Removed one TODO from the list: in case pages_io() fails, read_chunk_into_cache()

will no longer panic, but free its allocated pages.
I ran into this because BFS managed to create a file without data stream but
with a length larger than 0...


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@15093 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Axel Dörfler 2005-11-23 18:36:38 +00:00
parent 76c0041c51
commit 2b028fcaa0

View File

@ -292,8 +292,11 @@ pages_io(file_cache_ref *ref, off_t offset, const iovec *vecs, size_t count,
size_t numBytes = *_numBytes;
status_t status = get_file_map(ref, offset, numBytes, fileVecs, &fileVecCount);
if (status < B_OK)
if (status < B_OK) {
TRACE(("get_file_map(offset = %Ld, numBytes = %lu) failed\n", offset,
numBytes));
return status;
}
// ToDo: handle array overflow gracefully!
@ -454,9 +457,16 @@ read_chunk_into_cache(file_cache_ref *ref, off_t offset, size_t size,
// read file into reserved pages
status_t status = pages_io(ref, offset, vecs, vecCount, &size, false);
if (status < B_OK) {
// ToDo: remove allocated pages...
panic("file_cache: remove allocated pages! read pages failed: %s\n", strerror(status));
// reading failed, free allocated pages
dprintf("file_cache: read pages failed: %s\n", strerror(status));
mutex_lock(&cache->lock);
for (int32 i = 0; i < pageIndex; i++) {
vm_cache_remove_page(cache, pages[i]);
vm_page_set_state(pages[i], PAGE_STATE_FREE);
}
return status;
}