Vastly improved the file map implementation:

* adjacent vecs are now joined.
* partial invalidation no longer frees all cached extents.
* the array can now be larger than the needed number of entries, allowing
  for a saner array allocation policy.
* it does no longer read the whole file map when the first translation is
  requested, but only as much as required (it will still ask the file system
  for the maximum file size, but it won't traverse further as long as the
  initial request is fulfilled).
* This should help a lot with the ext2 file system that doesn't support real
  file extents (but keeps a list of blocks).


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@26260 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Axel Dörfler 2008-07-05 17:58:47 +00:00
parent 609865fd21
commit 9b74b9cda8

View File

@ -55,6 +55,8 @@ struct file_map {
void Invalidate(off_t offset, off_t size); void Invalidate(off_t offset, off_t size);
void Free(); void Free();
status_t _MakeSpace(size_t amount);
union { union {
file_extent direct[CACHED_FILE_EXTENTS]; file_extent direct[CACHED_FILE_EXTENTS];
file_extent_array indirect; file_extent_array indirect;
@ -128,48 +130,95 @@ file_map::FindExtent(off_t offset, uint32 *_index)
} }
status_t
file_map::_MakeSpace(size_t amount)
{
if (amount <= CACHED_FILE_EXTENTS) {
// just use the reserved area in the file_cache_ref structure
if (amount <= CACHED_FILE_EXTENTS && count > CACHED_FILE_EXTENTS) {
// the new size is smaller than the minimal array size
file_extent *array = indirect.array;
memcpy(direct, array, sizeof(file_extent) * amount);
free(array);
}
} else {
// resize array if needed
file_extent *oldArray = NULL;
size_t maxCount = CACHED_FILE_EXTENTS;
if (count > CACHED_FILE_EXTENTS) {
oldArray = indirect.array;
maxCount = indirect.max_count;
}
if (amount > maxCount) {
// allocate new array
while (maxCount < amount) {
if (maxCount < 32768)
maxCount <<= 1;
else
maxCount += 32768;
}
file_extent *newArray = (file_extent *)realloc(oldArray,
maxCount * sizeof(file_extent));
if (newArray == NULL)
return B_NO_MEMORY;
if (count > 0 && count <= CACHED_FILE_EXTENTS)
memcpy(newArray, direct, sizeof(file_extent) * count);
indirect.array = newArray;
indirect.max_count = maxCount;
}
}
count = amount;
return B_OK;
}
status_t status_t
file_map::Add(file_io_vec *vecs, size_t vecCount, off_t &lastOffset) file_map::Add(file_io_vec *vecs, size_t vecCount, off_t &lastOffset)
{ {
TRACE(("file_map@%p::Add(vecCount = %ld)\n", this, vecCount)); TRACE(("file_map@%p::Add(vecCount = %ld)\n", this, vecCount));
uint32 start = count;
off_t offset = 0; off_t offset = 0;
if (vecCount <= CACHED_FILE_EXTENTS && count == 0) { status_t status = _MakeSpace(count + vecCount);
// just use the reserved area in the file_cache_ref structure if (status != B_OK)
} else { return status;
// TODO: once we can invalidate only parts of the file map,
// we might need to copy the previously cached file extends
// from the direct range
file_extent *newMap = (file_extent *)realloc(indirect.array,
(count + vecCount) * sizeof(file_extent));
if (newMap == NULL)
return B_NO_MEMORY;
indirect.array = newMap; file_extent *lastExtent = NULL;
if (start != 0) {
if (count != 0) { lastExtent = ExtentAt(start - 1);
file_extent *extent = ExtentAt(count - 1); offset = lastExtent->offset + lastExtent->disk.length;
offset = extent->offset + extent->disk.length;
}
} }
int32 start = count;
count += vecCount;
for (uint32 i = 0; i < vecCount; i++) { for (uint32 i = 0; i < vecCount; i++) {
file_extent *extent = ExtentAt(start + i); if (lastExtent != NULL) {
if (lastExtent->disk.offset + lastExtent->disk.length
== vecs[i].offset) {
lastExtent->disk.length += vecs[i].length;
offset += vecs[i].length;
start--;
_MakeSpace(count - 1);
continue;
}
}
file_extent *extent = ExtentAt(start + i);
extent->offset = offset; extent->offset = offset;
extent->disk = vecs[i]; extent->disk = vecs[i];
offset += extent->disk.length; offset += extent->disk.length;
lastExtent = extent;
} }
#ifdef TRACE_FILE_MAP #ifdef TRACE_FILE_MAP
for (uint32 i = start; i < count; i++) { for (uint32 i = 0; i < count; i++) {
file_extent *extent = ExtentAt(i); file_extent *extent = ExtentAt(i);
dprintf("[%ld] extend offset %Ld, disk offset %Ld, length %Ld\n", dprintf("[%ld] extent offset %Ld, disk offset %Ld, length %Ld\n",
i, extent->offset, extent->disk.offset, extent->disk.length); i, extent->offset, extent->disk.offset, extent->disk.length);
} }
#endif #endif
@ -179,11 +228,25 @@ file_map::Add(file_io_vec *vecs, size_t vecCount, off_t &lastOffset)
} }
/*! Invalidates or removes the specified part of the file map.
*/
void void
file_map::Invalidate(off_t offset, off_t size) file_map::Invalidate(off_t offset, off_t size)
{ {
// TODO: honour offset/size parameters // TODO: honour size, we currently always remove everything after "offset"
Free(); if (offset == 0) {
Free();
return;
}
uint32 index;
file_extent *extent = FindExtent(offset, &index);
if (extent != NULL) {
_MakeSpace(index);
if (extent->offset + extent->disk.length > offset)
extent->disk.length = offset - extent->offset;
}
} }
@ -193,7 +256,6 @@ file_map::Free()
if (count > CACHED_FILE_EXTENTS) if (count > CACHED_FILE_EXTENTS)
free(indirect.array); free(indirect.array);
indirect.array = NULL;
count = 0; count = 0;
} }
@ -281,50 +343,45 @@ file_map_translate(void *_map, off_t offset, size_t size, file_io_vec *vecs,
if (offset + size > map.size) if (offset + size > map.size)
size = map.size - offset; size = map.size - offset;
if (map.count == 0) { // First, we need to make sure that we have already cached all file
// we don't yet have the map of this file, so let's grab it // extents needed for this request.
// (ordered by offset, so that we can do a binary search on them)
file_extent *lastExtent = NULL;
if (map.count > 0)
lastExtent = map.ExtentAt(map.count - 1);
off_t mapOffset = 0;
if (lastExtent != NULL)
mapOffset = lastExtent->offset + lastExtent->disk.length;
off_t end = offset + size;
while (mapOffset < end) {
// We don't have the requested extents yet, retrieve them
size_t vecCount = maxVecs; size_t vecCount = maxVecs;
off_t mapOffset = 0; status = vfs_get_file_map(map.vnode, mapOffset, ~0UL, vecs,
&vecCount);
if (status < B_OK && status != B_BUFFER_OVERFLOW)
return status;
while (true) { status_t addStatus = map.Add(vecs, vecCount, mapOffset);
status = vfs_get_file_map(map.vnode, mapOffset, ~0UL, vecs, if (addStatus != B_OK) {
&vecCount); // only clobber the status in case of failure
if (status < B_OK && status != B_BUFFER_OVERFLOW) status = addStatus;
return status;
status_t addStatus = map.Add(vecs, vecCount, mapOffset);
if (addStatus != B_OK) {
// only clobber the status in case of failure
status = addStatus;
}
if (status != B_BUFFER_OVERFLOW)
break;
// when we are here, the map has been stored in the array, and
// the array size was still too small to cover the whole file
vecCount = maxVecs;
} }
if (status != B_BUFFER_OVERFLOW)
break;
} }
if (status != B_OK) { if (status != B_OK)
// We must invalidate the (part of the) map we already
// have, as we cannot know if it's complete or not
map.Free();
return status; return status;
}
// We now have cached the map of this file, we now need to // We now have cached the map of this file as far as we need it, now
// translate it for the requested access. // we need to translate it for the requested access.
uint32 index; uint32 index;
file_extent *fileExtent = map.FindExtent(offset, &index); file_extent *fileExtent = map.FindExtent(offset, &index);
if (fileExtent == NULL) {
// access outside file bounds? But that's not our problem
*_count = 0;
return B_OK;
}
offset -= fileExtent->offset; offset -= fileExtent->offset;
vecs[0].offset = fileExtent->disk.offset + offset; vecs[0].offset = fileExtent->disk.offset + offset;