* The page cache hash table size was fixed to 1024 slots, but even when

freshly booted, it would already contain > 20000 pages. The size is
  now initialized to half of the available pages. Ideally it would
  grow/shrink dynamically, though.
* Changed the hash function to yield a better distribution.


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22211 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Ingo Weinhold 2007-09-09 17:36:13 +00:00
parent 40c3aedf8d
commit cf8b3687f4
1 changed files with 5 additions and 6 deletions

View File

@ -34,9 +34,6 @@
#endif
/* hash table of pages keyed by cache they're in and offset */
#define PAGE_TABLE_SIZE 1024 /* TODO: make this dynamic */
static hash_table *sPageCacheTable;
static spinlock sPageCacheTableLock;
@ -73,7 +70,8 @@ page_hash_func(void *_p, const void *_key, uint32 range)
vm_page *page = (vm_page *)_p;
const struct page_lookup_key *key = (page_lookup_key *)_key;
#define HASH(offset, ref) ((offset) ^ ((uint32)(ref) >> 4))
#define HASH(offset, ref) ((offset) + ((uint32)(ref) >> 6) * 997)
// sizeof(vm_cache) >= 64, hence (uint32)(ref) >> 6 is still unique
if (page)
return HASH(page->cache_offset, page->cache) % range;
@ -107,8 +105,9 @@ acquire_unreferenced_cache_pseudo_ref(vm_cache* cache)
status_t
vm_cache_init(kernel_args *args)
{
sPageCacheTable = hash_init(PAGE_TABLE_SIZE, offsetof(vm_page, hash_next),
&page_compare_func, &page_hash_func);
// TODO: The table should grow/shrink dynamically.
sPageCacheTable = hash_init(vm_page_num_pages() / 2,
offsetof(vm_page, hash_next), &page_compare_func, &page_hash_func);
if (sPageCacheTable == NULL)
panic("vm_cache_init: no memory\n");