kernel/fs: Larger sizes for EntryCache if there is >= 1GB RAM available.
Instead of 8096 maximum entries, now there will be about 130k. As there are around ~32k files in the Haiku git tree, this has a serious impact on "git status" performance: in my testing it sped up from around 0.95s to 0.39s, or less than half with a "hot" cache (in a VM backed by an NVMe SSD, may be more dramatic on spinning system.) Compile performance does not seem very much improved, however.
This commit is contained in:
parent
3a19a89f1a
commit
e83979afa4
@ -7,6 +7,7 @@
|
||||
#include "EntryCache.h"
|
||||
|
||||
#include <new>
|
||||
#include <vm/vm.h>
|
||||
|
||||
|
||||
static const int32 kEntryNotInArray = -1;
|
||||
@ -78,8 +79,15 @@ EntryCache::Init()
|
||||
if (error != B_OK)
|
||||
return error;
|
||||
|
||||
fGenerationCount = 8;
|
||||
int32 entriesSize = 1024;
|
||||
fGenerationCount = 8;
|
||||
|
||||
// TODO: Choose generation size/count more scientifically?
|
||||
// TODO: Add low_resource handler hook?
|
||||
if (vm_available_memory() >= (1024*1024*1024)) {
|
||||
entriesSize = 8096;
|
||||
fGenerationCount = 16;
|
||||
}
|
||||
|
||||
fGenerations = new(std::nothrow) EntryCacheGeneration[fGenerationCount];
|
||||
for (int32 i = 0; i < fGenerationCount; i++) {
|
||||
|
Loading…
Reference in New Issue
Block a user