* Added a new flag for write stat: B_STAT_SIZE_INSECURE that allows a

file system to not fill newly created space with zeros. 
  BFile::SetSize() now uses this, while [f]truncate() does not. This
  is only a temporary work-around until BFS supports sparse files.
* Apps that want to reserve space to fill up later should use 
  BFile::SetSize() for now, as this will be a lot faster than 
  [f]truncate().
* cache_io() and the functions below now use a special mode when you 
  pass in a NULL buffer: for read access, the cache is only populated
  (useful for prefetching), for write access, the file is filled with
  zeros.
* Implemented BFS's Inode::FillGapWithZeros() using this method now.
* Removed extraneous white space.


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@24555 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Axel Dörfler 2008-03-24 13:39:02 +00:00
parent c8ee6ba8fb
commit a121b8c83c
7 changed files with 123 additions and 135 deletions

View File

@ -27,8 +27,10 @@ typedef void *fs_cookie;
typedef void *fs_vnode;
/* additional flags passed to write_stat() (see NodeMonitor.h for the others) */
// NOTE: Changing the constants here or in NodeMonitor.h will break
// src/kits/storage/LibBeAdapter.cpp:_kern_write_stat().
// NOTE: Changing the constants here or in NodeMonitor.h will break
// src/kits/storage/LibBeAdapter.cpp:_kern_write_stat().
#define B_STAT_SIZE_INSECURE 0x2000
// TODO: this should be faded out once BFS supports sparse files
/* passed to write_fs_info() */
#define FS_WRITE_FSINFO_NAME 0x0001

View File

@ -874,6 +874,9 @@
#define fs_cookie fssh_fs_cookie
#define fs_vnode fssh_fs_vnode
/* additional flags passed to write_stat() */
#define B_STAT_SIZE_INSECURE FSSH_B_STAT_SIZE_INSECURE
/* passed to write_fs_info() */
#define FS_WRITE_FSINFO_NAME FSSH_FS_WRITE_FSINFO_NAME

View File

@ -29,6 +29,7 @@ typedef void *fssh_fs_cookie;
typedef void *fssh_fs_vnode;
/* additional flags passed to write_stat() */
#define FSSH_B_STAT_SIZE_INSECURE 0x2000
/* passed to write_fs_info() */
#define FSSH_FS_WRITE_FSINFO_NAME 0x0001

View File

@ -176,7 +176,7 @@ InodeAllocator::~InodeAllocator()
}
status_t
status_t
InodeAllocator::New(block_run *parentRun, mode_t mode, block_run &run,
Inode **_inode)
{
@ -273,7 +273,7 @@ InodeAllocator::_TransactionListener(int32 id, int32 event, void *_inode)
// #pragma mark -
status_t
status_t
bfs_inode::InitCheck(Volume *volume)
{
if (Magic1() != INODE_MAGIC1
@ -407,7 +407,7 @@ Inode::InitCheck(bool checkNode)
}
if (IsContainer()) {
// inodes that have a
// inodes that have a B+tree
if (fTree == NULL)
RETURN_ERROR(B_NO_MEMORY);
@ -715,7 +715,7 @@ Inode::_AddSmallData(Transaction &transaction, NodeGetter &nodeGetter,
small_data *next = item->Next();
if (!next->IsLast(node))
memmove((uint8 *)item + spaceNeeded, next, (uint8 *)last - (uint8 *)next);
// Move the "last" one to its new location and
// correctly terminate the small_data section
last = (small_data *)((uint8 *)last - ((uint8 *)next - ((uint8 *)item + spaceNeeded)));
@ -967,7 +967,7 @@ Inode::ReadAttribute(const char *name, int32 type, off_t pos, uint8 *buffer,
}
if (length + pos > smallData->DataSize())
length = smallData->DataSize() - pos;
memcpy(buffer, smallData->Data() + pos, length);
*_length = length;
return B_OK;
@ -1190,7 +1190,7 @@ Inode::CreateAttribute(Transaction &transaction, const char *name, uint32 type,
{
// do we need to create the attribute directory first?
if (Attributes().IsZero()) {
status_t status = Inode::Create(transaction, this, NULL,
status_t status = Inode::Create(transaction, this, NULL,
S_ATTR_DIR | S_DIRECTORY | 0666, 0, 0, NULL);
if (status < B_OK)
RETURN_ERROR(status);
@ -1201,7 +1201,7 @@ Inode::CreateAttribute(Transaction &transaction, const char *name, uint32 type,
return B_ERROR;
// Inode::Create() locks the inode for us
return Inode::Create(transaction, attributes, name,
return Inode::Create(transaction, attributes, name,
S_ATTR | S_FILE | 0666, 0, type, NULL, NULL, attribute);
}
@ -1426,8 +1426,6 @@ Inode::WriteAt(Transaction &transaction, off_t pos, const uint8 *buffer,
locker.Lock();
if (pos + length > Size()) {
off_t oldSize = Size();
// let's grow the data stream to the size needed
status_t status = SetFileSize(transaction, pos + length);
if (status < B_OK) {
@ -1438,11 +1436,6 @@ Inode::WriteAt(Transaction &transaction, off_t pos, const uint8 *buffer,
// index here as part of the current transaction - this might just
// be a bit too expensive, but worth a try.
// If the position of the write was beyond the file size, we
// have to fill the gap between that position and the old file
// size with zeros.
FillGapWithZeros(oldSize, pos);
// we need to write back the inode here because it has to
// go into this transaction (we cannot wait until the file
// is closed)
@ -1472,81 +1465,21 @@ Inode::WriteAt(Transaction &transaction, off_t pos, const uint8 *buffer,
status_t
Inode::FillGapWithZeros(off_t pos, off_t newSize)
{
// ToDo: we currently do anything here, same as original BFS!
//if (pos >= newSize)
return B_OK;
#if 0
block_run run;
off_t offset;
if (FindBlockRun(pos, run, offset) < B_OK)
RETURN_ERROR(B_BAD_VALUE);
while (pos < newSize) {
size_t size;
if (newSize > pos + 1024 * 1024 * 1024)
size = 1024 * 1024 * 1024;
else
size = newSize - pos;
off_t length = newSize - pos;
uint32 bytesWritten = 0;
uint32 blockSize = fVolume->BlockSize();
uint32 blockShift = fVolume->BlockShift();
uint8 *block;
status_t status = file_cache_write(FileCache(), NULL, pos, NULL, &size);
if (status < B_OK)
return status;
// the first block_run we write could not be aligned to the block_size boundary
// (write partial block at the beginning)
// pos % block_size == (pos - offset) % block_size, offset % block_size == 0
if (pos % blockSize != 0) {
run.start += (pos - offset) / blockSize;
run.length -= (pos - offset) / blockSize;
CachedBlock cached(fVolume,run);
if ((block = cached.Block()) == NULL)
RETURN_ERROR(B_BAD_VALUE);
bytesWritten = blockSize - (pos % blockSize);
if (length < bytesWritten)
bytesWritten = length;
memset(block + (pos % blockSize), 0, bytesWritten);
if (fVolume->WriteBlocks(cached.BlockNumber(), block, 1) < B_OK)
RETURN_ERROR(B_IO_ERROR);
pos += bytesWritten;
length -= bytesWritten;
if (length == 0)
return B_OK;
if (FindBlockRun(pos, run, offset) < B_OK)
RETURN_ERROR(B_BAD_VALUE);
pos += size;
}
while (length > 0) {
// offset is the offset to the current pos in the block_run
run.start = HOST_ENDIAN_TO_BFS_INT16(run.Start() + ((pos - offset) >> blockShift));
run.length = HOST_ENDIAN_TO_BFS_INT16(run.Length() - ((pos - offset) >> blockShift));
CachedBlock cached(fVolume);
off_t blockNumber = fVolume->ToBlock(run);
for (int32 i = 0; i < run.Length(); i++) {
if ((block = cached.SetTo(blockNumber + i, true)) == NULL)
RETURN_ERROR(B_IO_ERROR);
if (fVolume->WriteBlocks(cached.BlockNumber(), block, 1) < B_OK)
RETURN_ERROR(B_IO_ERROR);
}
int32 bytes = run.Length() << blockShift;
length -= bytes;
bytesWritten += bytes;
// since we don't respect a last partial block, length can be lower
if (length <= 0)
break;
pos += bytes;
if (FindBlockRun(pos, run, offset) < B_OK)
RETURN_ERROR(B_BAD_VALUE);
}
return B_OK;
#endif
}
@ -2264,7 +2197,7 @@ Inode::Sync()
if (indirectRuns[k].IsZero())
return B_OK;
block = fVolume->ToBlock(indirectRuns[k]);
block = fVolume->ToBlock(indirectRuns[k]);
for (int32 j = 0; j < indirectRuns[k].Length(); j++) {
block_run *runs = (block_run *)directCached.SetTo(block + j);
if (runs == NULL)
@ -2532,7 +2465,7 @@ Inode::Create(Transaction &transaction, Inode *parent, const char *name,
// handle this case on their own (or other cases where "parent" is
// NULL)
if (status < B_OK)
RETURN_ERROR(status);
RETURN_ERROR(status);
// Update the main indices (name, size & last_modified)
// (live queries might want to access us after this)

View File

@ -708,7 +708,7 @@ bfs_write_stat(void *_ns, void *_node, const struct stat *stat, uint32 mask)
// TODO: we should definitely check a bit more if the new stats are
// valid - or even better, the VFS should check this before calling us
status_t status = inode->CheckPermissions(W_OK);
if (status < B_OK)
RETURN_ERROR(status);
@ -734,7 +734,8 @@ bfs_write_stat(void *_ns, void *_node, const struct stat *stat, uint32 mask)
return status;
// fill the new blocks (if any) with zeros
inode->FillGapWithZeros(inode->OldSize(), inode->Size());
if ((mask & B_STAT_SIZE_INSECURE) == 0)
inode->FillGapWithZeros(inode->OldSize(), inode->Size());
if (!inode->IsDeleted()) {
Index index(volume);
@ -784,7 +785,7 @@ bfs_write_stat(void *_ns, void *_node, const struct stat *stat, uint32 mask)
}
status_t
status_t
bfs_create(void *_ns, void *_directory, const char *name, int openMode,
int mode, void **_cookie, ino_t *_vnodeID)
{
@ -804,7 +805,7 @@ bfs_create(void *_ns, void *_directory, const char *name, int openMode,
// to remove the inode if we don't have enough free memory later...
file_cookie *cookie = (file_cookie *)malloc(sizeof(file_cookie));
if (cookie == NULL)
RETURN_ERROR(B_NO_MEMORY);
RETURN_ERROR(B_NO_MEMORY);
// initialize the cookie
cookie->open_mode = openMode;
@ -832,7 +833,7 @@ bfs_create(void *_ns, void *_directory, const char *name, int openMode,
}
static status_t
static status_t
bfs_create_symlink(void *_ns, void *_directory, const char *name,
const char *path, int mode)
{
@ -895,7 +896,7 @@ bfs_create_symlink(void *_ns, void *_directory, const char *name,
}
status_t
status_t
bfs_link(void *ns, void *dir, const char *name, void *node)
{
FUNCTION_START(("name = \"%s\"\n", name));
@ -906,7 +907,7 @@ bfs_link(void *ns, void *dir, const char *name, void *node)
}
status_t
status_t
bfs_unlink(void *_ns, void *_directory, const char *name)
{
FUNCTION_START(("name = \"%s\"\n", name));
@ -935,7 +936,7 @@ bfs_unlink(void *_ns, void *_directory, const char *name)
}
status_t
status_t
bfs_rename(void *_ns, void *_oldDir, const char *oldName, void *_newDir, const char *newName)
{
FUNCTION_START(("oldDir = %p, oldName = \"%s\", newDir = %p, newName = \"%s\"\n", _oldDir, oldName, _newDir, newName));
@ -1150,7 +1151,7 @@ bfs_open(void *_fs, void *_node, int openMode, void **_cookie)
file_cookie *cookie = (file_cookie *)malloc(sizeof(file_cookie));
if (cookie == NULL)
RETURN_ERROR(B_NO_MEMORY);
RETURN_ERROR(B_NO_MEMORY);
// initialize the cookie
cookie->open_mode = openMode;
@ -1347,7 +1348,7 @@ static status_t
bfs_access(void *_ns, void *_node, int accessMode)
{
//FUNCTION();
if (_ns == NULL || _node == NULL)
return B_BAD_VALUE;
@ -1438,7 +1439,7 @@ static status_t
bfs_remove_dir(void *_ns, void *_directory, const char *name)
{
FUNCTION_START(("name = \"%s\"\n", name));
if (_ns == NULL || _directory == NULL || name == NULL || *name == '\0')
return B_BAD_VALUE;
@ -1467,10 +1468,10 @@ static status_t
bfs_open_dir(void *_ns, void *_node, void **_cookie)
{
FUNCTION();
if (_ns == NULL || _node == NULL || _cookie == NULL)
RETURN_ERROR(B_BAD_VALUE);
Inode *inode = (Inode *)_node;
status_t status = inode->CheckPermissions(R_OK);
@ -1496,7 +1497,7 @@ bfs_open_dir(void *_ns, void *_node, void **_cookie)
static status_t
bfs_read_dir(void *_ns, void *_node, void *_cookie, struct dirent *dirent,
bfs_read_dir(void *_ns, void *_node, void *_cookie, struct dirent *dirent,
size_t bufferSize, uint32 *_num)
{
FUNCTION();
@ -1537,7 +1538,7 @@ bfs_rewind_dir(void * /*ns*/, void * /*node*/, void *_cookie)
if (iterator == NULL)
RETURN_ERROR(B_BAD_VALUE);
return iterator->Rewind();
}
@ -1555,7 +1556,7 @@ static status_t
bfs_free_dir_cookie(void *ns, void *node, void *_cookie)
{
TreeIterator *iterator = (TreeIterator *)_cookie;
if (iterator == NULL)
RETURN_ERROR(B_BAD_VALUE);
@ -1610,7 +1611,7 @@ static status_t
bfs_rewind_attr_dir(void *_ns, void *_node, void *_cookie)
{
FUNCTION();
AttributeIterator *iterator = (AttributeIterator *)_cookie;
if (iterator == NULL)
RETURN_ERROR(B_BAD_VALUE);
@ -2234,7 +2235,7 @@ static file_system_module_info sBeFileSystem = {
&bfs_free_dir_cookie,
&bfs_read_dir,
&bfs_rewind_dir,
/* attribute directory operations */
&bfs_open_attr_dir,
&bfs_close_attr_dir,

View File

@ -14,6 +14,7 @@
#include <Directory.h>
#include <Entry.h>
#include <File.h>
#include <fs_interface.h>
#include <NodeMonitor.h>
#include <syscalls.h>
@ -77,7 +78,7 @@ BFile::BFile(const BEntry *entry, uint32 openMode)
/*! \brief Creates a BFile and initializes it to the file referred to by
the supplied path name and according to the specified open mode.
\param path the file's path name
\param path the file's path name
\param openMode the mode in which the file should be opened
\see SetTo() for values for \a openMode
*/
@ -217,7 +218,7 @@ BFile::SetTo(const BEntry *entry, uint32 openMode)
/*! \brief Re-initializes the BFile to the file referred to by the
supplied path name and according to the specified open mode.
\param path the file's path name
\param path the file's path name
\param openMode the mode in which the file should be opened
\return
- \c B_OK: Everything went fine.
@ -352,7 +353,7 @@ BFile::ReadAt(off_t location, void *buffer, size_t size)
return InitCheck();
if (location < 0)
return B_BAD_VALUE;
return _kern_read(get_fd(), location, buffer, size);
}
@ -453,7 +454,7 @@ BFile::SetSize(off_t size)
return B_BAD_VALUE;
struct stat statData;
statData.st_size = size;
return set_stat(statData, B_STAT_SIZE);
return set_stat(statData, B_STAT_SIZE | B_STAT_SIZE_INSECURE);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright 2004-2007, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
* Copyright 2004-2008, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
* Distributed under the terms of the MIT License.
*/
@ -51,7 +51,7 @@ struct file_cache_ref {
};
typedef status_t (*cache_func)(file_cache_ref *ref, void *cookie, off_t offset,
int32 pageOffset, addr_t buffer, size_t bufferSize,
int32 pageOffset, addr_t buffer, size_t bufferSize, bool useBuffer,
size_t lastReservedPages, size_t reservePages);
@ -169,7 +169,7 @@ reserve_pages(file_cache_ref *ref, size_t reservePages, bool isWrite)
*/
static status_t
read_into_cache(file_cache_ref *ref, void *cookie, off_t offset,
int32 pageOffset, addr_t buffer, size_t bufferSize,
int32 pageOffset, addr_t buffer, size_t bufferSize, bool useBuffer,
size_t lastReservedPages, size_t reservePages)
{
TRACE(("read_into_cache(offset = %Ld, pageOffset = %ld, buffer = %#lx, "
@ -240,14 +240,14 @@ read_into_cache(file_cache_ref *ref, void *cookie, off_t offset,
return status;
}
// copy the pages and unmap them again
// copy the pages if needed and unmap them again
for (int32 i = 0; i < vecCount; i++) {
addr_t base = (addr_t)vecs[i].iov_base;
size_t size = vecs[i].iov_len;
// copy to user buffer if necessary
if (bufferSize != 0) {
if (useBuffer && bufferSize != 0) {
size_t bytes = min_c(bufferSize, size - pageOffset);
user_memcpy((void *)buffer, (void *)(base + pageOffset), bytes);
@ -278,12 +278,15 @@ read_into_cache(file_cache_ref *ref, void *cookie, off_t offset,
static status_t
read_from_file(file_cache_ref *ref, void *cookie, off_t offset,
int32 pageOffset, addr_t buffer, size_t bufferSize,
int32 pageOffset, addr_t buffer, size_t bufferSize, bool useBuffer,
size_t lastReservedPages, size_t reservePages)
{
TRACE(("read_from_file(offset = %Ld, pageOffset = %ld, buffer = %#lx, "
"bufferSize = %lu\n", offset, pageOffset, buffer, bufferSize));
if (!useBuffer)
return B_OK;
iovec vec;
vec.iov_base = (void *)buffer;
vec.iov_len = bufferSize;
@ -310,7 +313,7 @@ read_from_file(file_cache_ref *ref, void *cookie, off_t offset,
*/
static status_t
write_to_cache(file_cache_ref *ref, void *cookie, off_t offset,
int32 pageOffset, addr_t buffer, size_t bufferSize,
int32 pageOffset, addr_t buffer, size_t bufferSize, bool useBuffer,
size_t lastReservedPages, size_t reservePages)
{
// TODO: We're using way too much stack! Rather allocate a sufficiently
@ -400,8 +403,13 @@ write_to_cache(file_cache_ref *ref, void *cookie, off_t offset,
addr_t base = (addr_t)vecs[i].iov_base;
size_t bytes = min_c(bufferSize, size_t(vecs[i].iov_len - pageOffset));
// copy data from user buffer
user_memcpy((void *)(base + pageOffset), (void *)buffer, bytes);
if (useBuffer) {
// copy data from user buffer
user_memcpy((void *)(base + pageOffset), (void *)buffer, bytes);
} else {
// clear buffer instead
memset((void *)(base + pageOffset), 0, bytes);
}
bufferSize -= bytes;
if (bufferSize == 0)
@ -454,9 +462,21 @@ write_to_cache(file_cache_ref *ref, void *cookie, off_t offset,
static status_t
write_to_file(file_cache_ref *ref, void *cookie, off_t offset, int32 pageOffset,
addr_t buffer, size_t bufferSize, size_t lastReservedPages,
addr_t buffer, size_t bufferSize, bool useBuffer, size_t lastReservedPages,
size_t reservePages)
{
size_t chunkSize;
if (!useBuffer) {
// we need to allocate a zero buffer
// TODO: use smaller buffers if this fails
chunkSize = min_c(bufferSize, B_PAGE_SIZE);
buffer = (addr_t)malloc(chunkSize);
if (buffer == 0)
return B_NO_MEMORY;
memset((void *)buffer, 0, chunkSize);
}
iovec vec;
vec.iov_base = (void *)buffer;
vec.iov_len = bufferSize;
@ -465,8 +485,26 @@ write_to_file(file_cache_ref *ref, void *cookie, off_t offset, int32 pageOffset,
mutex_unlock(&ref->cache->lock);
vm_page_unreserve_pages(lastReservedPages);
status_t status = vfs_write_pages(ref->vnode, cookie, offset + pageOffset,
&vec, 1, &bufferSize, false);
status_t status;
if (!useBuffer) {
while (bufferSize > 0) {
if (bufferSize < chunkSize)
chunkSize = bufferSize;
status = vfs_write_pages(ref->vnode, cookie, offset + pageOffset,
&vec, 1, &chunkSize, false);
if (status < B_OK)
break;
bufferSize -= chunkSize;
pageOffset += chunkSize;
}
} else {
status = vfs_write_pages(ref->vnode, cookie, offset + pageOffset,
&vec, 1, &bufferSize, false);
}
if (status == B_OK)
reserve_pages(ref, reservePages, true);
@ -478,9 +516,10 @@ write_to_file(file_cache_ref *ref, void *cookie, off_t offset, int32 pageOffset,
static inline status_t
satisfy_cache_io(file_cache_ref *ref, void *cookie, cache_func function,
off_t offset, addr_t buffer, int32 &pageOffset, size_t bytesLeft,
size_t &reservePages, off_t &lastOffset, addr_t &lastBuffer,
int32 &lastPageOffset, size_t &lastLeft, size_t &lastReservedPages)
off_t offset, addr_t buffer, bool useBuffer, int32 &pageOffset,
size_t bytesLeft, size_t &reservePages, off_t &lastOffset,
addr_t &lastBuffer, int32 &lastPageOffset, size_t &lastLeft,
size_t &lastReservedPages)
{
if (lastBuffer == buffer)
return B_OK;
@ -490,7 +529,7 @@ satisfy_cache_io(file_cache_ref *ref, void *cookie, cache_func function,
+ lastPageOffset + B_PAGE_SIZE - 1) >> PAGE_SHIFT);
status_t status = function(ref, cookie, lastOffset, lastPageOffset,
lastBuffer, requestSize, lastReservedPages, reservePages);
lastBuffer, requestSize, useBuffer, lastReservedPages, reservePages);
if (status == B_OK) {
lastReservedPages = reservePages;
lastBuffer = buffer;
@ -513,6 +552,7 @@ cache_io(void *_cacheRef, void *cookie, off_t offset, addr_t buffer,
file_cache_ref *ref = (file_cache_ref *)_cacheRef;
vm_cache *cache = ref->cache;
off_t fileSize = cache->virtual_size;
bool useBuffer = buffer != 0;
TRACE(("cache_io(ref = %p, offset = %Ld, buffer = %p, size = %lu, %s)\n",
ref, offset, (void *)buffer, *_size, doWrite ? "write" : "read"));
@ -575,8 +615,9 @@ cache_io(void *_cacheRef, void *cookie, off_t offset, addr_t buffer,
// we didn't get yet (to make sure no one else interferes in the
// mean time).
status_t status = satisfy_cache_io(ref, cookie, function, offset,
buffer, pageOffset, bytesLeft, reservePages, lastOffset,
lastBuffer, lastPageOffset, lastLeft, lastReservedPages);
buffer, useBuffer, pageOffset, bytesLeft, reservePages,
lastOffset, lastBuffer, lastPageOffset, lastLeft,
lastReservedPages);
if (status != B_OK)
return status;
@ -606,13 +647,18 @@ cache_io(void *_cacheRef, void *cookie, off_t offset, addr_t buffer,
// and copy the contents of the page already in memory
if (doWrite) {
user_memcpy((void *)(virtualAddress + pageOffset),
(void *)buffer, bytesInPage);
if (useBuffer) {
user_memcpy((void *)(virtualAddress + pageOffset),
(void *)buffer, bytesInPage);
} else {
user_memset((void *)(virtualAddress + pageOffset),
0, bytesInPage);
}
// make sure the page is in the modified list
if (page->state != PAGE_STATE_MODIFIED)
vm_page_set_state(page, PAGE_STATE_MODIFIED);
} else {
} else if (useBuffer) {
user_memcpy((void *)buffer,
(void *)(virtualAddress + pageOffset), bytesInPage);
}
@ -643,8 +689,9 @@ cache_io(void *_cacheRef, void *cookie, off_t offset, addr_t buffer,
if (buffer - lastBuffer + lastPageOffset >= kMaxChunkSize) {
status_t status = satisfy_cache_io(ref, cookie, function, offset,
buffer, pageOffset, bytesLeft, reservePages, lastOffset,
lastBuffer, lastPageOffset, lastLeft, lastReservedPages);
buffer, useBuffer, pageOffset, bytesLeft, reservePages,
lastOffset, lastBuffer, lastPageOffset, lastLeft,
lastReservedPages);
if (status != B_OK)
return status;
}
@ -653,7 +700,7 @@ cache_io(void *_cacheRef, void *cookie, off_t offset, addr_t buffer,
// fill the last remaining bytes of the request (either write or read)
return function(ref, cookie, lastOffset, lastPageOffset, lastBuffer,
lastLeft, lastReservedPages, 0);
lastLeft, useBuffer, lastReservedPages, 0);
}
@ -837,7 +884,7 @@ cache_node_closed(struct vnode *vnode, int32 fdType, vm_cache *cache,
}
extern "C" void
extern "C" void
cache_node_launched(size_t argCount, char * const *args)
{
if (sCacheModule == NULL || sCacheModule->node_launched == NULL)