Squashed a TODO in the block allocator:

* BlockAllocator::AllocateBlocks() was implemented pretty crappy: instead of
  just remembering the best run on the first pass, it made a second pass through
  all allocation groups when it couldn't fulfill the maximum request.
* Even worse, it would then also only allocate the first run that satisied the
  minimum request. Now, it will always choose the best allocation, leading to
  less fragmentation, and an improved runtime.
* Now mmlr hopefully won't need to wait 10 minutes for the bloc allocator to
  create the swap file on his fragmented volume...


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@28071 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Axel Dörfler 2008-10-14 08:38:33 +00:00
parent adc7239906
commit 976eeb4da0
1 changed files with 47 additions and 51 deletions

View File

@ -652,27 +652,19 @@ BlockAllocator::AllocateBlocks(Transaction& transaction, int32 group,
AllocationBlock cached(fVolume);
MutexLocker lock(fLock);
// The first scan through all allocation groups will look for the
// wanted maximum of blocks, the second scan will just look to
// satisfy the minimal requirement
uint16 numBlocks = maximum;
uint32 bitsPerFullBlock = fVolume->BlockSize() << 3;
for (int32 i = 0; i < fNumGroups * 2; i++, group++, start = 0) {
// Find the block_run that can fulfill the request best
int32 bestGroup = -1;
int32 bestStart = -1;
int32 bestLength = -1;
for (int32 i = 0; i < fNumGroups; i++, group++, start = 0) {
group = group % fNumGroups;
if (start >= fGroups[group].NumBits() || fGroups[group].IsFull())
continue;
if (i >= fNumGroups) {
// If the minimum is the same as the maximum, it's not necessary to
// search for in the allocation groups a second time
if (maximum == minimum)
return B_DEVICE_FULL;
numBlocks = minimum;
}
// The wanted maximum is smaller than the largest free block in the
// group or already smaller than the minimum
// TODO: disabled because it's currently not maintained after the first
@ -707,39 +699,52 @@ BlockAllocator::AllocateBlocks(Transaction& transaction, int32 group,
}
// have we found a range large enough to hold numBlocks?
if (++range >= maximum)
break;
} else if (i >= fNumGroups && range >= minimum) {
// we have found a block larger than the required minimum
// (second pass)
if (++range >= maximum) {
bestGroup = group;
bestStart = rangeStart;
bestLength = range;
break;
}
} else {
// end of a range
if (range > bestLength) {
bestGroup = group;
bestStart = rangeStart;
bestLength = range;
}
range = 0;
}
}
// TODO: we could also remember a "largest free block that fits the
// minimal requirement" in the group, and use that - this would
// avoid the need for a second run
T(Block("alloc-out", block, cached.Block(),
fVolume->BlockSize(), group, rangeStart));
// if we found a suitable block, mark the blocks as in use, and
if (bestLength >= maximum)
break;
// start from the beginning of the next block
start = 0;
}
if (bestLength >= maximum)
break;
}
// If we found a suitable range, mark the blocks as in use, and
// write the updated block bitmap back to disk
if (range >= numBlocks) {
// adjust allocation size
if (numBlocks < maximum)
numBlocks = range;
if (bestLength < minimum)
return B_DEVICE_FULL;
if (fGroups[group].Allocate(transaction, rangeStart, numBlocks)
if (fGroups[bestGroup].Allocate(transaction, bestStart, bestLength)
< B_OK)
RETURN_ERROR(B_IO_ERROR);
run.allocation_group = HOST_ENDIAN_TO_BFS_INT32(group);
run.start = HOST_ENDIAN_TO_BFS_INT16(rangeStart);
run.length = HOST_ENDIAN_TO_BFS_INT16(numBlocks);
run.allocation_group = HOST_ENDIAN_TO_BFS_INT32(bestGroup);
run.start = HOST_ENDIAN_TO_BFS_INT16(bestStart);
run.length = HOST_ENDIAN_TO_BFS_INT16(bestLength);
fVolume->SuperBlock().used_blocks =
HOST_ENDIAN_TO_BFS_INT64(fVolume->UsedBlocks() + numBlocks);
HOST_ENDIAN_TO_BFS_INT64(fVolume->UsedBlocks() + bestLength);
// We are not writing back the disk's super block - it's
// either done by the journaling code, or when the disk
// is unmounted.
@ -747,18 +752,9 @@ BlockAllocator::AllocateBlocks(Transaction& transaction, int32 group,
// fixed anyway.
T(Allocate(run));
T(Block("alloc-out", block, cached.Block(),
fVolume->BlockSize(), group, rangeStart));
return B_OK;
}
// start from the beginning of the next block
start = 0;
}
}
return B_DEVICE_FULL;
}
status_t
BlockAllocator::AllocateForInode(Transaction& transaction,