Fixed another bad bug in the file cache: when one iovec would have to be
divided into several parts, it could happen that overwrite the whole data portion with data beyond the part that should have been read. git-svn-id: file:///srv/svn/repos/haiku/trunk/current@9706 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
parent
69f792c6bc
commit
e4211d820c
16
src/kernel/core/cache/file_cache.cpp
vendored
16
src/kernel/core/cache/file_cache.cpp
vendored
@ -135,12 +135,18 @@ readwrite_pages(file_cache_ref *ref, off_t offset, const iovec *vecs, size_t cou
|
||||
|
||||
size = min_c(vecs[i].iov_len - vecOffset, fileVec.length);
|
||||
tempVecs[0].iov_len = size;
|
||||
vecOffset = 0;
|
||||
|
||||
TRACE(("fill vec %ld, offset = %lu, size = %lu\n", i, vecOffset, size));
|
||||
|
||||
if (size >= fileVec.length)
|
||||
vecOffset += size;
|
||||
else
|
||||
vecOffset = 0;
|
||||
|
||||
while (size < fileVec.length && ++i < count) {
|
||||
tempVecs[tempCount].iov_base = vecs[i].iov_base;
|
||||
tempCount++;
|
||||
|
||||
|
||||
// is this iovec larger than the file_io_vec?
|
||||
if (vecs[i].iov_len + size > fileVec.length) {
|
||||
size += tempVecs[tempCount].iov_len = vecOffset = fileVec.length - size;
|
||||
@ -158,13 +164,13 @@ readwrite_pages(file_cache_ref *ref, off_t offset, const iovec *vecs, size_t cou
|
||||
if (status < B_OK)
|
||||
return status;
|
||||
|
||||
totalSize += size;
|
||||
|
||||
if (size != bytes) {
|
||||
// there are no more bytes, let's bail out
|
||||
*_numBytes = size + totalSize;
|
||||
*_numBytes = totalSize;
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
totalSize += size;
|
||||
}
|
||||
|
||||
return B_OK;
|
||||
|
Loading…
Reference in New Issue
Block a user