Allocate the page buckets out of kernel_map, not kmem_map. Saves 16
or so kmem_map pages on a 32MB SPARCstation 2.
This commit is contained in:
parent
952281e8fb
commit
9671588a30
@ -1,4 +1,4 @@
|
||||
/* $NetBSD: uvm_page.c,v 1.29 1999/12/30 16:09:47 eeh Exp $ */
|
||||
/* $NetBSD: uvm_page.c,v 1.30 2000/02/13 03:34:40 thorpej Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 1997 Charles D. Cranor and Washington University.
|
||||
@ -108,8 +108,8 @@ static vaddr_t virtual_space_end;
|
||||
|
||||
/*
|
||||
* we use a hash table with only one bucket during bootup. we will
|
||||
* later rehash (resize) the hash table once malloc() is ready.
|
||||
* we static allocate the bootstrap bucket below...
|
||||
* later rehash (resize) the hash table once the allocator is ready.
|
||||
* we static allocate the one bootstrap bucket below...
|
||||
*/
|
||||
|
||||
static struct pglist uvm_bootbucket;
|
||||
@ -228,7 +228,7 @@ uvm_page_init(kvm_startp, kvm_endp)
|
||||
/*
|
||||
* step 2: init the <obj,offset> => <page> hash table. for now
|
||||
* we just have one bucket (the bootstrap bucket). later on we
|
||||
* will malloc() new buckets as we dynamically resize the hash table.
|
||||
* will allocate new buckets as we dynamically resize the hash table.
|
||||
*/
|
||||
|
||||
uvm.page_nhash = 1; /* 1 bucket */
|
||||
@ -733,6 +733,7 @@ uvm_page_rehash()
|
||||
int freepages, lcv, bucketcount, s, oldcount;
|
||||
struct pglist *newbuckets, *oldbuckets;
|
||||
struct vm_page *pg;
|
||||
size_t newsize, oldsize;
|
||||
|
||||
/*
|
||||
* compute number of pages that can go in the free pool
|
||||
@ -752,13 +753,21 @@ uvm_page_rehash()
|
||||
bucketcount = bucketcount * 2;
|
||||
|
||||
/*
|
||||
* malloc new buckets
|
||||
* compute the size of the current table and new table.
|
||||
*/
|
||||
|
||||
MALLOC(newbuckets, struct pglist *, sizeof(struct pglist) * bucketcount,
|
||||
M_VMPBUCKET, M_NOWAIT);
|
||||
oldbuckets = uvm.page_hash;
|
||||
oldcount = uvm.page_nhash;
|
||||
oldsize = round_page(sizeof(struct pglist) * oldcount);
|
||||
newsize = round_page(sizeof(struct pglist) * bucketcount);
|
||||
|
||||
/*
|
||||
* allocate the new buckets
|
||||
*/
|
||||
|
||||
newbuckets = (struct pglist *) uvm_km_alloc(kernel_map, newsize);
|
||||
if (newbuckets == NULL) {
|
||||
printf("vm_page_physrehash: WARNING: could not grow page "
|
||||
printf("uvm_page_physrehash: WARNING: could not grow page "
|
||||
"hash table\n");
|
||||
return;
|
||||
}
|
||||
@ -771,9 +780,6 @@ uvm_page_rehash()
|
||||
|
||||
s = splimp();
|
||||
simple_lock(&uvm.hashlock);
|
||||
/* swap old for new ... */
|
||||
oldbuckets = uvm.page_hash;
|
||||
oldcount = uvm.page_nhash;
|
||||
uvm.page_hash = newbuckets;
|
||||
uvm.page_nhash = bucketcount;
|
||||
uvm.page_hashmask = bucketcount - 1; /* power of 2 */
|
||||
@ -791,11 +797,11 @@ uvm_page_rehash()
|
||||
splx(s);
|
||||
|
||||
/*
|
||||
* free old bucket array if we malloc'd it previously
|
||||
* free old bucket array if is not the boot-time table
|
||||
*/
|
||||
|
||||
if (oldbuckets != &uvm_bootbucket)
|
||||
FREE(oldbuckets, M_VMPBUCKET);
|
||||
uvm_km_free(kernel_map, (vaddr_t) oldbuckets, oldsize);
|
||||
|
||||
/*
|
||||
* done
|
||||
|
Loading…
Reference in New Issue
Block a user