add new version of non contiguous memory code, written by chuck cranor,

called "MACHINE_NEW_NONCONGIG".  this is required for UVM, the new VM
system (also written by chuck) that is coming soon.  adds new functions:
	vm_page_physload() -- tell the VM system about an area of memory.
	vm_physseg_find() -- returns index in vm_physmem array that this
		address is in.
and several new versions of old functions/macros defined in vm_page.h.

this is the MI portion.  sparc, and then later i386 portions to come.
all other ports need to change to this ASAP!  (alpha is already being
worked on)
This commit is contained in:
mrg 1998-01-08 11:36:16 +00:00
parent 0876a69653
commit a20d56e92e
8 changed files with 921 additions and 143 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: init_main.c,v 1.112 1998/01/07 00:41:43 thorpej Exp $ */
/* $NetBSD: init_main.c,v 1.113 1998/01/08 11:36:16 mrg Exp $ */
/*
* Copyright (c) 1995 Christopher G. Demetriou. All rights reserved.
@ -187,6 +187,9 @@ main(framep)
vm_mem_init();
kmeminit();
#if defined(MACHINE_NEW_NONCONTIG)
vm_page_physrehash();
#endif
disk_init(); /* must come before autoconfiguration */
tty_init(); /* initialise tty list */
#if NRND > 0

View File

@ -1,4 +1,4 @@
/* $NetBSD: malloc.h,v 1.31 1997/06/12 17:59:13 mrg Exp $ */
/* $NetBSD: malloc.h,v 1.32 1998/01/08 11:36:18 mrg Exp $ */
/*
* Copyright (c) 1987, 1993
@ -128,6 +128,8 @@
#define M_NFSBIGFH 74 /* NFS big filehandle */
#define M_EXT2FSNODE 75 /* EXT2FS vnode private part */
#define M_VMSWAP 76 /* VM swap structures */
#define M_VMPAGE 77 /* VM page structures */
#define M_VMPBUCKET 78 /* VM page buckets */
#define M_TEMP 84 /* misc temporary data buffers */
#define M_LAST 85 /* Must be last type + 1 */
@ -209,8 +211,13 @@
"NFS bigfh", /* 74 M_NFSBIGFH */ \
"EXT2FS node", /* 75 M_EXT2FSNODE */ \
"VM swap", /* 76 M_VMSWAP */ \
NULL, NULL, NULL, NULL, \
NULL, NULL, NULL, \
"VM page", /* 77 M_VMPAGE */ \
"VM page bucket", /* 78 M_VMPBUCKET */ \
NULL, /* 79 */ \
NULL, /* 80 */ \
NULL, /* 81 */ \
NULL, /* 82 */ \
NULL, /* 83 */ \
"temp", /* 84 M_TEMP */ \
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.h,v 1.17 1998/01/03 01:13:14 thorpej Exp $ */
/* $NetBSD: pmap.h,v 1.18 1998/01/08 11:36:19 mrg Exp $ */
/*
* Copyright (c) 1991, 1993
@ -114,7 +114,7 @@ vm_offset_t pmap_extract __P((pmap_t, vm_offset_t));
int pmap_page_index __P((vm_offset_t));
#endif
#ifndef MACHINE_NONCONTIG
#if !defined(MACHINE_NONCONTIG) && !defined(MACHINE_NEW_NONCONTIG)
void pmap_init __P((vm_offset_t, vm_offset_t));
#else
void pmap_init __P((void));
@ -139,6 +139,8 @@ void pmap_zero_page __P((vm_offset_t));
u_int pmap_free_pages __P((void));
boolean_t pmap_next_page __P((vm_offset_t *));
void pmap_startup __P((vm_offset_t *, vm_offset_t *));
#endif
#if defined(MACHINE_NONCONTIG) || defined(MACHINE_NEW_NONCONTIG)
vm_offset_t pmap_steal_memory __P((vm_size_t));
void pmap_virtual_space __P((vm_offset_t *, vm_offset_t *));
#endif

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm_init.c,v 1.9 1994/06/29 06:48:00 cgd Exp $ */
/* $NetBSD: vm_init.c,v 1.10 1998/01/08 11:36:20 mrg Exp $ */
/*
* Copyright (c) 1991, 1993
@ -69,6 +69,7 @@
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <vm/vm.h>
#include <vm/vm_page.h>
@ -83,7 +84,7 @@
void vm_mem_init()
{
#ifndef MACHINE_NONCONTIG
#if !defined(MACHINE_NONCONTIG) && !defined(MACHINE_NEW_NONCONTIG)
extern vm_offset_t avail_start, avail_end;
extern vm_offset_t virtual_avail, virtual_end;
#else
@ -95,8 +96,11 @@ void vm_mem_init()
* From here on, all physical memory is accounted for,
* and we use only virtual addresses.
*/
if (cnt.v_page_size == 0) {
printf("vm_mem_init: WARN: MD code did not set page size\n");
vm_set_page_size();
#ifndef MACHINE_NONCONTIG
}
#if !defined(MACHINE_NONCONTIG) && !defined(MACHINE_NEW_NONCONTIG)
vm_page_startup(&avail_start, &avail_end);
#else
vm_page_bootstrap(&start, &end);
@ -105,13 +109,13 @@ void vm_mem_init()
/*
* Initialize other VM packages
*/
#ifndef MACHINE_NONCONTIG
#if !defined(MACHINE_NONCONTIG) && !defined(MACHINE_NEW_NONCONTIG)
vm_object_init(virtual_end - VM_MIN_KERNEL_ADDRESS);
#else
vm_object_init(end - VM_MIN_KERNEL_ADDRESS);
#endif
vm_map_startup();
#ifndef MACHINE_NONCONTIG
#if !defined(MACHINE_NONCONTIG) && !defined(MACHINE_NEW_NONCONTIG)
kmem_init(virtual_avail, virtual_end);
pmap_init(avail_start, avail_end);
#else

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm_map.c,v 1.32 1998/01/06 08:36:26 thorpej Exp $ */
/* $NetBSD: vm_map.c,v 1.33 1998/01/08 11:36:23 mrg Exp $ */
/*
* Copyright (c) 1991, 1993
@ -139,8 +139,16 @@
* maps and requires map entries.
*/
#if defined(MACHINE_NEW_NONCONTIG)
u_int8_t kentry_data_store[MAX_KMAP*sizeof(struct vm_map) +
MAX_KMAPENT*sizeof(struct vm_map_entry)];
vm_offset_t kentry_data = (vm_offset_t) kentry_data_store;
vm_size_t kentry_data_size = sizeof(kentry_data_store);
#else
/* NUKE NUKE NUKE */
vm_offset_t kentry_data;
vm_size_t kentry_data_size;
#endif
vm_map_entry_t kentry_free;
vm_map_t kmap_free;
@ -154,6 +162,12 @@ vm_map_startup()
register vm_map_entry_t mep;
vm_map_t mp;
/*
* zero kentry area
* XXX necessary?
*/
bzero((caddr_t)kentry_data, kentry_data_size);
/*
* Static map structures for allocation before initialization of
* kernel map or kmem map. vm_map_create knows how to deal with them.

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm_meter.c,v 1.20 1997/09/07 19:31:52 pk Exp $ */
/* $NetBSD: vm_meter.c,v 1.21 1998/01/08 11:36:24 mrg Exp $ */
/*
* Copyright (c) 1982, 1986, 1989, 1993
@ -45,7 +45,7 @@
struct loadavg averunnable; /* load average, of runnable procs */
int maxslp = MAXSLP;
#ifndef MACHINE_NONCONTIG
#if !defined(MACHINE_NONCONTIG) && !defined(MACHINE_NEW_NONCONTIG)
int saferss = SAFERSS;
#endif /* MACHINE_NONCONTIG */

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm_page.c,v 1.32 1997/09/16 00:08:09 thorpej Exp $ */
/* $NetBSD: vm_page.c,v 1.33 1998/01/08 11:36:25 mrg Exp $ */
#define VM_PAGE_ALLOC_MEMORY_STATS
@ -110,6 +110,7 @@
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/proc.h>
#include <sys/malloc.h>
#include <vm/vm.h>
#include <vm/vm_page.h>
@ -118,15 +119,28 @@
#include <machine/cpu.h>
#ifdef MACHINE_NONCONTIG
#if defined(MACHINE_NEW_NONCONTIG)
/*
* physical memory config is stored in vm_physmem.
*/
struct vm_physseg vm_physmem[VM_PHYSSEG_MAX];
int vm_nphysseg = 0;
static int vm_page_lost_count = 0; /* XXXCDC: DEBUG DEBUG */
#endif
#if defined(MACHINE_NONCONTIG) || defined(MACHINE_NEW_NONCONTIG)
/*
* These variables record the values returned by vm_page_bootstrap,
* for debugging purposes. The implementation of pmap_steal_memory
* and pmap_startup here also uses them internally.
*/
vm_offset_t virtual_space_start;
vm_offset_t virtual_space_end;
#endif /* MACHINE_NONCONTIG */
static vm_offset_t virtual_space_start;
static vm_offset_t virtual_space_end;
#endif
/*
* Associated with page of user-allocatable memory is a
@ -137,6 +151,9 @@ struct pglist *vm_page_buckets; /* Array of buckets */
int vm_page_bucket_count = 0; /* How big is array? */
int vm_page_hash_mask; /* Mask for hash function */
simple_lock_data_t bucket_lock; /* lock for all buckets XXX */
#if defined(MACHINE_NEW_NONCONTIG)
struct pglist vm_page_bootbucket; /* bootstrap bucket */
#endif
struct pglist vm_page_queue_free;
struct pglist vm_page_queue_active;
@ -149,17 +166,45 @@ boolean_t vm_page_startup_initialized;
vm_page_t vm_page_array;
int vm_page_count;
#ifndef MACHINE_NONCONTIG
#if defined(MACHINE_NEW_NONCONTIG)
/* NOTHING NEEDED HERE */
#elif defined(MACHINE_NONCONTIG)
/* OLD NONCONTIG CODE: NUKE NUKE NUKE ONCE CONVERTED */
u_long first_page;
int vm_page_count;
#else
/* OLD NCONTIG CODE: NUKE NUKE NUKE ONCE CONVERTED */
long first_page;
long last_page;
vm_offset_t first_phys_addr;
vm_offset_t last_phys_addr;
#else
u_long first_page;
#endif /* MACHINE_NONCONTIG */
int vm_page_count;
#endif
vm_size_t page_mask;
int page_shift;
#if defined(MACHINE_NEW_NONCONTIG)
/*
* local prototypes
*/
static boolean_t vm_page_physget __P((vm_offset_t *));
#endif
/*
* macros
*/
/*
* vm_page_hash:
*
* Distributes the object/offset key pair among hash buckets.
*
* NOTE: This macro depends on vm_page_bucket_count being a power of 2.
*/
#define vm_page_hash(object, offset) \
(((unsigned long)object+(unsigned long)atop(offset))&vm_page_hash_mask)
/*
* vm_set_page_size:
*
@ -183,8 +228,514 @@ vm_set_page_size()
break;
}
#if defined(MACHINE_NEW_NONCONTIG)
/*
* vm_page_bootstrap: initialize the resident memory module (called
* from vm_mem_init()).
*
* - startp and endp are out params which return the boundaries of the
* free part of the kernel's virtual address space.
*/
void
vm_page_bootstrap(startp, endp)
vm_offset_t *startp, *endp; /* OUT, OUT */
{
vm_offset_t paddr;
vm_page_t pagearray;
int lcv, freepages, pagecount, n, i;
/*
* first init all the locks and queues.
*/
simple_lock_init(&vm_page_queue_free_lock);
simple_lock_init(&vm_page_queue_lock);
TAILQ_INIT(&vm_page_queue_free);
TAILQ_INIT(&vm_page_queue_active);
TAILQ_INIT(&vm_page_queue_inactive);
/*
* init the <OBJ,OFFSET> => <PAGE> hash table buckets. for now
* we just have one bucket (the bootstrap bucket). later on we
* will malloc() new buckets as we dynamically resize the hash table.
*/
vm_page_bucket_count = 1;
vm_page_hash_mask = 0;
vm_page_buckets = &vm_page_bootbucket;
TAILQ_INIT(vm_page_buckets);
simple_lock_init(&bucket_lock);
/*
* before calling this function the MD code is expected to register
* some free RAM with the vm_page_physload() function. our job
* now is to allocate vm_page structures for this preloaded memory.
*/
if (vm_nphysseg == 0)
panic("vm_page_bootstrap: no memory pre-allocated");
/*
* first calculate the number of free pages... note that start/end
* are inclusive so you have to add one to get the number of pages.
*
* note that we use start/end rather than avail_start/avail_end.
* this allows us to allocate extra vm_page structures in case we
* want to return some memory to the pool after booting.
*/
freepages = 0;
for (lcv = 0; lcv < vm_nphysseg; lcv++) {
freepages = freepages +
(vm_physmem[lcv].end - vm_physmem[lcv].start);
}
/*
* we now know we have (PAGE_SIZE * freepages) bytes of memory we can
* use. for each page of memory we use we need a vm_page structure.
* thus, the total number of pages we can use is the total size of
* the memory divided by the PAGE_SIZE plus the size of the vm_page
* structure. we add one to freepages as a fudge factor to avoid
* truncation errors (since we can only allocate in terms of whole
* pages).
*/
pagecount = (PAGE_SIZE * (freepages + 1)) /
(PAGE_SIZE + sizeof(struct vm_page));
pagearray = (vm_page_t)
pmap_steal_memory(pagecount * sizeof(struct vm_page));
bzero(pagearray, pagecount * sizeof(struct vm_page));
/*
* now init the page frames
*/
for (lcv = 0; lcv < vm_nphysseg; lcv++) {
n = vm_physmem[lcv].end - vm_physmem[lcv].start;
if (n > pagecount) {
printf("vm_init: lost %d page(s) in init\n",
n - pagecount);
vm_page_lost_count += (n - pagecount);
n = pagecount;
}
/* set up page array pointers */
vm_physmem[lcv].pgs = pagearray;
pagearray += n;
pagecount -= n;
vm_physmem[lcv].lastpg = vm_physmem[lcv].pgs + (n - 1);
/* init and free vm_pages (we've already bzero'd them) */
paddr = ptoa(vm_physmem[lcv].start);
for (i = 0; i < n; i++, paddr += PAGE_SIZE) {
vm_physmem[lcv].pgs[i].phys_addr = paddr;
if (atop(paddr) >= vm_physmem[lcv].avail_start &&
atop(paddr) <= vm_physmem[lcv].avail_end)
vm_page_free(&vm_physmem[lcv].pgs[i]);
}
}
/*
* pass up the values of virtual_space_start and virtual_space_end
* (obtained by pmap_steal_memory) to the upper layers of the VM.
*/
*startp = round_page(virtual_space_start);
*endp = trunc_page(virtual_space_end);
/*
* init pagedaemon lock
*/
simple_lock_init(&vm_pages_needed_lock);
}
/*
* pmap_steal_memory: steal memory from physmem for bootstrapping
*/
vm_offset_t pmap_steal_memory(size)
vm_size_t size;
{
vm_offset_t addr, vaddr, paddr;
/* round the size to an integer multiple */
size = (size + 3) &~ 3; /* XXX */
/*
* on first call to this function init ourselves. we detect this
* by checking virtual_space_start/end which are in the zero'd BSS area.
*/
if (virtual_space_start == virtual_space_end) {
pmap_virtual_space(&virtual_space_start, &virtual_space_end);
/* round it the way we like it */
virtual_space_start = round_page(virtual_space_start);
virtual_space_end = trunc_page(virtual_space_end);
}
/*
* allocate virtual memory for this request
*/
addr = virtual_space_start;
virtual_space_start += size;
/*
* allocate and mapin physical pages to back new virtual pages
*/
for (vaddr = round_page(addr); vaddr < addr + size;
vaddr += PAGE_SIZE) {
if (!vm_page_physget(&paddr))
panic("pmap_steal_memory: out of memory");
/* XXX: should be wired, but some pmaps don't like that ... */
pmap_enter(pmap_kernel(), vaddr, paddr,
VM_PROT_READ|VM_PROT_WRITE, FALSE);
}
return(addr);
}
/*
* vm_page_physget: "steal" one page from the vm_physmem structure.
*
* - attempt to allocate it off the end of a segment in which the "avail"
* values match the start/end values. if we can't do that, then we
* will advance both values (making them equal, and removing some
* vm_page structures from the non-avail area).
* - return false if out of memory.
*/
static boolean_t
vm_page_physget(paddrp)
vm_offset_t *paddrp;
{
int lcv, x;
/* pass 1: try allocating from a matching end */
#if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
for (lcv = vm_nphysseg - 1 ; lcv >= 0 ; lcv--)
#else
for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
#endif
{
if (vm_physmem[lcv].pgs)
panic("vm_page_physget: called _after_ bootstrap");
/* try from front */
if (vm_physmem[lcv].avail_start == vm_physmem[lcv].start &&
vm_physmem[lcv].avail_start < vm_physmem[lcv].avail_end) {
*paddrp = ptoa(vm_physmem[lcv].avail_start);
vm_physmem[lcv].avail_start++;
vm_physmem[lcv].start++;
/* nothing left? nuke it */
if (vm_physmem[lcv].avail_start ==
vm_physmem[lcv].end) {
if (vm_nphysseg == 1)
panic("vm_page_physget: out of memory!");
vm_nphysseg--;
for (x = lcv; x < vm_nphysseg; x++)
/* structure copy */
vm_physmem[x] = vm_physmem[x+1];
}
return(TRUE);
}
/* try from rear */
if (vm_physmem[lcv].avail_end == vm_physmem[lcv].end &&
vm_physmem[lcv].avail_start < vm_physmem[lcv].avail_end) {
*paddrp = ptoa(vm_physmem[lcv].avail_end - 1);
vm_physmem[lcv].avail_end--;
vm_physmem[lcv].end--;
/* nothing left? nuke it */
if (vm_physmem[lcv].avail_end ==
vm_physmem[lcv].start) {
if (vm_nphysseg == 1)
panic("vm_page_physget: out of memory!");
vm_nphysseg--;
for (x = lcv; x < vm_nphysseg; x++)
/* structure copy */
vm_physmem[x] = vm_physmem[x+1];
}
return(TRUE);
}
}
/* pass2: forget about matching ends, just allocate something */
#if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
for (lcv = vm_nphysseg - 1 ; lcv >= 0 ; lcv--)
#else
for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
#endif
{
/* any room in this bank? */
if (vm_physmem[lcv].avail_start >= vm_physmem[lcv].avail_end)
continue; /* nope */
*paddrp = ptoa(vm_physmem[lcv].avail_start);
vm_physmem[lcv].avail_start++;
vm_physmem[lcv].start = vm_physmem[lcv].avail_start; /* truncate! */
/* nothing left? nuke it */
if (vm_physmem[lcv].avail_start == vm_physmem[lcv].end) {
if (vm_nphysseg == 1)
panic("vm_page_physget: out of memory!");
vm_nphysseg--;
for (x = lcv; x < vm_nphysseg; x++)
vm_physmem[x] = vm_physmem[x+1]; /* structure copy */
}
return(TRUE);
}
return(FALSE); /* whoops! */
}
/*
* vm_page_physload: load physical memory into VM system
*
* - all args are PFs
* - all pages in start/end get vm_page structures
* - areas marked by avail_start/avail_end get added to the free page pool
* - we are limited to VM_PHYSSEG_MAX physical memory segments
*/
void
vm_page_physload(start, end, avail_start, avail_end)
vm_offset_t start, end, avail_start, avail_end;
{
struct vm_page *pgs;
struct vm_physseg *ps;
int preload, lcv, npages, x;
if (cnt.v_page_size == 0)
panic("vm_page_physload: page size not set!");
/*
* do we have room?
*/
if (vm_nphysseg == VM_PHYSSEG_MAX) {
printf("vm_page_physload: unable to load physical memory segment\n");
printf("\t%d segments allocated, ignoring 0x%lx -> 0x%lx\n",
VM_PHYSSEG_MAX, start, end);
return;
}
/*
* check to see if this is a "preload" (i.e. vm_mem_init hasn't been
* called yet, so malloc is not available).
*/
for (lcv = 0; lcv < vm_nphysseg; lcv++) {
if (vm_physmem[lcv].pgs)
break;
}
preload = (lcv == vm_nphysseg);
/*
* if VM is already running, attempt to malloc() vm_page structures
*/
if (!preload) {
#if defined(VM_PHYSSEG_NOADD)
panic("vm_page_physload: tried to add RAM after vm_mem_init");
#else
/* XXXCDC: need some sort of lockout for this case */
vm_offset_t paddr;
/* # of pages */
npages = end - start;
MALLOC(pgs, struct vm_page *, sizeof(struct vm_page) * npages,
M_VMPAGE, M_NOWAIT);
if (pgs == NULL) {
printf("vm_page_physload: can not malloc vm_page structs for segment\n");
printf("\tignoring 0x%lx -> 0x%lx\n", start, end);
return;
}
/* zero data, init phys_addr, and free pages */
bzero(pgs, sizeof(struct vm_page) * npages);
for (lcv = 0, paddr = ptoa(start); lcv < npages;
lcv++, paddr += PAGE_SIZE) {
pgs[lcv].phys_addr = paddr;
if (atop(paddr) >= avail_start &&
atop(paddr) <= avail_end)
vm_page_free(&pgs[i]);
}
/* XXXCDC: incomplete: need to update v_free_count, what else? */
/* XXXCDC: need hook to tell pmap to rebuild pv_list, etc... */
#endif
} else {
/* XXX/gcc complains if these don't get init'd */
pgs = NULL;
npages = 0;
}
/*
* now insert us in the proper place in vm_physmem[]
*/
#if (VM_PHYSSEG_STRAT == VM_PSTRAT_RANDOM)
/* random: put it at the end (easy!) */
ps = &vm_physmem[vm_nphysseg];
#else
#if (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
/* sort by address for binary search */
for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
if (start < vm_physmem[lcv].start)
break;
ps = &vm_physmem[lcv];
/* move back other entries, if necessary ... */
for (x = vm_nphysseg + 1 ; x > lcv ; x--)
/* structure copy */
vm_physmem[x] = vm_physmem[x - 1];
#else
#if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
/* sort by largest segment first */
for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
if ((end - start) >
(vm_physmem[lcv].end - vm_physmem[lcv].start))
break;
ps = &vm_physmem[lcv];
/* move back other entries, if necessary ... */
for (x = vm_nphysseg + 1 ; x > lcv ; x--)
/* structure copy */
vm_physmem[x] = vm_physmem[x - 1];
#else
panic("vm_page_physload: unknown physseg strategy selected!");
#endif
#endif
#endif
ps->start = start;
ps->end = end;
ps->avail_start = avail_start;
ps->avail_end = avail_end;
if (preload) {
ps->pgs = NULL;
} else {
ps->pgs = pgs;
ps->lastpg = pgs + npages - 1;
}
vm_nphysseg++;
/*
* done!
*/
return;
}
/*
* vm_page_physrehash: reallocate hash table based on number of
* free pages.
*/
void
vm_page_physrehash()
{
struct pglist *newbuckets, *oldbuckets;
struct vm_page *pg;
int freepages, lcv, bucketcount, s, oldcount;
/*
* compute number of pages that can go in the free pool
*/
freepages = 0;
for (lcv = 0; lcv < vm_nphysseg; lcv++)
freepages = freepages + (vm_physmem[lcv].avail_end -
vm_physmem[lcv].avail_start);
/*
* compute number of buckets needed for this number of pages
*/
bucketcount = 1;
while (bucketcount < freepages)
bucketcount = bucketcount * 2;
/*
* malloc new buckets
*/
MALLOC(newbuckets, struct pglist *, sizeof(struct pglist) * bucketcount,
M_VMPBUCKET, M_NOWAIT);
if (newbuckets == NULL) {
printf("vm_page_physrehash: WARNING: could not grow page hash table\n");
return;
}
for (lcv = 0; lcv < bucketcount; lcv++)
TAILQ_INIT(&newbuckets[lcv]);
/*
* now replace the old buckets with the new ones and rehash everything
*/
s = splimp();
simple_lock(&bucket_lock);
/* swap old for new ... */
oldbuckets = vm_page_buckets;
oldcount = vm_page_bucket_count;
vm_page_buckets = newbuckets;
vm_page_bucket_count = bucketcount;
vm_page_hash_mask = bucketcount - 1; /* power of 2 */
/* ... and rehash */
for (lcv = 0 ; lcv < oldcount ; lcv++) {
while ((pg = oldbuckets[lcv].tqh_first) != NULL) {
TAILQ_REMOVE(&oldbuckets[lcv], pg, hashq);
TAILQ_INSERT_TAIL(&vm_page_buckets[
vm_page_hash(pg->object, pg->offset)], pg, hashq);
}
}
simple_unlock(&bucket_lock);
splx(s);
/*
* free old bucket array if we malloc'd it previously
*/
if (oldbuckets != &vm_page_bootbucket)
FREE(oldbuckets, M_VMPBUCKET);
/*
* done
*/
return;
}
#if 1 /* XXXCDC: TMP TMP TMP DEBUG DEBUG DEBUG */
void vm_page_physdump __P((void)); /* SHUT UP GCC */
/* call from DDB */
void
vm_page_physdump()
{
int lcv;
printf("rehash: physical memory config [segs=%d of %d]:\n",
vm_nphysseg, VM_PHYSSEG_MAX);
for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
printf("0x%lx->0x%lx [0x%lx->0x%lx]\n", vm_physmem[lcv].start,
vm_physmem[lcv].end, vm_physmem[lcv].avail_start,
vm_physmem[lcv].avail_end);
printf("STRATEGY = ");
switch (VM_PHYSSEG_STRAT) {
case VM_PSTRAT_RANDOM:
printf("RANDOM\n");
break;
case VM_PSTRAT_BSEARCH:
printf("BSEARCH\n");
break;
case VM_PSTRAT_BIGFIRST:
printf("BIGFIRST\n");
break;
default:
printf("<<UNKNOWN>>!!!!\n");
}
printf("number of buckets = %d\n", vm_page_bucket_count);
printf("number of lost pages = %d\n", vm_page_lost_count);
}
#endif
#elif defined(MACHINE_NONCONTIG)
/* OLD NONCONTIG CODE: NUKE NUKE NUKE ONCE CONVERTED */
#ifdef MACHINE_NONCONTIG
/*
* vm_page_bootstrap:
*
@ -210,7 +761,6 @@ vm_page_bootstrap(startp, endp)
/*
* Initialize the locks
*/
simple_lock_init(&vm_page_queue_free_lock);
simple_lock_init(&vm_page_queue_lock);
@ -218,7 +768,6 @@ vm_page_bootstrap(startp, endp)
* Initialize the queue headers for the free queue,
* the active queue and the inactive queue.
*/
TAILQ_INIT(&vm_page_queue_free);
TAILQ_INIT(&vm_page_queue_active);
TAILQ_INIT(&vm_page_queue_inactive);
@ -242,7 +791,6 @@ vm_page_bootstrap(startp, endp)
/*
* Validate these zone addresses.
*/
bzero((caddr_t) kentry_data, kentry_data_size);
/*
@ -256,7 +804,6 @@ vm_page_bootstrap(startp, endp)
* Note:
* This computation can be tweaked if desired.
*/
if (vm_page_bucket_count == 0) {
unsigned int npages = pmap_free_pages();
@ -285,7 +832,6 @@ vm_page_bootstrap(startp, endp)
* to the kernel. We don't trust the pmap module
* to get the alignment right.
*/
pmap_startup(&virtual_space_start, &virtual_space_end);
virtual_space_start = round_page(virtual_space_start);
virtual_space_end = trunc_page(virtual_space_end);
@ -297,7 +843,7 @@ vm_page_bootstrap(startp, endp)
}
#else /* MACHINE_NONCONTIG */
/* OLD CONTIG CODE: NUKE NUKE NUKE ONCE CONVERTED */
/*
* vm_page_startup:
*
@ -324,7 +870,6 @@ vm_page_startup(start, end)
/*
* Initialize the locks
*/
simple_lock_init(&vm_page_queue_free_lock);
simple_lock_init(&vm_page_queue_lock);
@ -332,7 +877,6 @@ vm_page_startup(start, end)
* Initialize the queue headers for the free queue,
* the active queue and the inactive queue.
*/
TAILQ_INIT(&vm_page_queue_free);
TAILQ_INIT(&vm_page_queue_active);
TAILQ_INIT(&vm_page_queue_inactive);
@ -347,7 +891,6 @@ vm_page_startup(start, end)
* Note:
* This computation can be tweaked if desired.
*/
if (vm_page_bucket_count == 0) {
vm_page_bucket_count = 1;
while (vm_page_bucket_count < atop(*end - *start))
@ -373,7 +916,6 @@ vm_page_startup(start, end)
/*
* Truncate the remainder of physical memory to our page size.
*/
*end = trunc_page(*end);
/*
@ -387,7 +929,6 @@ vm_page_startup(start, end)
* since people shouldn't be cluttering up the kernel
* map (they should use their own maps).
*/
kentry_data_size = round_page(MAX_KMAP*sizeof(struct vm_map) +
MAX_KMAPENT*sizeof(struct vm_map_entry));
kentry_data = (vm_offset_t) pmap_bootstrap_alloc(kentry_data_size);
@ -397,7 +938,6 @@ vm_page_startup(start, end)
* available for use (taking into account the overhead
* of a page structure per page).
*/
cnt.v_free_count = vm_page_count =
(*end - *start + sizeof(struct vm_page)) /
(PAGE_SIZE + sizeof(struct vm_page));
@ -406,7 +946,6 @@ vm_page_startup(start, end)
* Record the extent of physical memory that the
* virtual memory system manages.
*/
first_page = *start;
first_page += vm_page_count * sizeof(struct vm_page);
first_page = atop(round_page(first_page));
@ -415,11 +954,9 @@ vm_page_startup(start, end)
first_phys_addr = ptoa(first_page);
last_phys_addr = ptoa(last_page) + PAGE_MASK;
/*
* Allocate and clear the mem entry structures.
*/
m = vm_page_array = (vm_page_t)
pmap_bootstrap_alloc(vm_page_count * sizeof(struct vm_page));
bzero(vm_page_array, vm_page_count * sizeof(struct vm_page));
@ -428,7 +965,6 @@ vm_page_startup(start, end)
* Initialize the mem entry structures now, and
* put them in the free queue.
*/
pa = first_phys_addr;
npages = vm_page_count;
while (npages--) {
@ -592,16 +1128,6 @@ pmap_startup(startp, endp)
}
#endif /* MACHINE_NONCONTIG && !MACHINE_PAGES */
/*
* vm_page_hash:
*
* Distributes the object/offset key pair among hash buckets.
*
* NOTE: This macro depends on vm_page_bucket_count being a power of 2.
*/
#define vm_page_hash(object, offset) \
(((unsigned long)object+(unsigned long)atop(offset))&vm_page_hash_mask)
/*
* vm_page_insert: [ internal use only ]
*
@ -1085,6 +1611,10 @@ vm_page_alloc_memory(size, low, high, alignment, boundary,
int nsegs, waitok;
{
vm_offset_t try, idxpa, lastidxpa;
#if defined(MACHINE_NEW_NONCONTIG)
int psi;
struct vm_page *vm_page_array;
#endif
int s, tryidx, idx, end, error;
vm_page_t m;
u_long pagemask;
@ -1139,6 +1669,19 @@ vm_page_alloc_memory(size, low, high, alignment, boundary,
/*
* Make sure this is a managed physical page.
*/
#if defined(MACHINE_NEW_NONCONTIG)
if ((psi = vm_physseg_find(atop(try), &idx)) == -1)
continue; /* managed? */
if (vm_physseg_find(atop(try + size), NULL) != psi)
continue; /* end must be in this segment */
tryidx = idx;
end = idx + (size / PAGE_SIZE);
vm_page_array = vm_physmem[psi].pgs;
/* XXX: emulates old global vm_page_array */
#else
if (IS_VM_PHYSADDR(try) == 0)
continue;
@ -1150,6 +1693,7 @@ vm_page_alloc_memory(size, low, high, alignment, boundary,
*/
goto out;
}
#endif
/*
* Found a suitable starting page. See of the range
@ -1165,6 +1709,7 @@ vm_page_alloc_memory(size, low, high, alignment, boundary,
idxpa = VM_PAGE_TO_PHYS(&vm_page_array[idx]);
#if !defined(MACHINE_NEW_NONCONTIG)
/*
* Make sure this is a managed physical page.
* XXX Necessary? I guess only if there
@ -1172,6 +1717,7 @@ vm_page_alloc_memory(size, low, high, alignment, boundary,
*/
if (IS_VM_PHYSADDR(idxpa) == 0)
break;
#endif
if (idx > tryidx) {
lastidxpa =

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm_page.h,v 1.20 1997/06/06 23:10:25 thorpej Exp $ */
/* $NetBSD: vm_page.h,v 1.21 1998/01/08 11:36:27 mrg Exp $ */
/*
* Copyright (c) 1991, 1993
@ -67,7 +67,6 @@
/*
* Resident memory system definitions.
*/
#ifndef _VM_PAGE_
#define _VM_PAGE_
@ -95,7 +94,6 @@
* object that the page belongs to (O) or by the lock on the page
* queues (P).
*/
TAILQ_HEAD(pglist, vm_page);
struct vm_page {
@ -144,31 +142,42 @@ struct vm_page {
#define PG_PAGEROWNED 0x4000 /* DEBUG: async paging op in progress */
#define PG_PTPAGE 0x8000 /* DEBUG: is a user page table page */
#if VM_PAGE_DEBUG
#ifndef MACHINE_NONCONTIG
#define VM_PAGE_CHECK(mem) { \
if ((((unsigned int) mem) < ((unsigned int) &vm_page_array[0])) || \
(((unsigned int) mem) > \
((unsigned int) &vm_page_array[last_page-first_page])) || \
((mem->flags & (PG_ACTIVE | PG_INACTIVE)) == \
(PG_ACTIVE | PG_INACTIVE))) \
panic("vm_page_check: not valid!"); \
}
#else /* MACHINE_NONCONTIG */
#define VM_PAGE_CHECK(mem) { \
if ((((unsigned int) mem) < ((unsigned int) &vm_page_array[0])) || \
(((unsigned int) mem) > \
((unsigned int) &vm_page_array[vm_page_count])) || \
((mem->flags & (PG_ACTIVE | PG_INACTIVE)) == \
(PG_ACTIVE | PG_INACTIVE))) \
panic("vm_page_check: not valid!"); \
}
#endif /* MACHINE_NONCONTIG */
#else /* VM_PAGE_DEBUG */
#define VM_PAGE_CHECK(mem)
#endif /* VM_PAGE_DEBUG */
#if defined(MACHINE_NEW_NONCONTIG)
/*
* physical memory layout structure
*
* MD vmparam.h must #define:
* VM_PHYSEG_MAX = max number of physical memory segments we support
* (if this is "1" then we revert to a "contig" case)
* VM_PHYSSEG_STRAT: memory sort/search options (for VM_PHYSEG_MAX > 1)
* - VM_PSTRAT_RANDOM: linear search (random order)
* - VM_PSTRAT_BSEARCH: binary search (sorted by address)
* - VM_PSTRAT_BIGFIRST: linear search (sorted by largest segment first)
* - others?
* XXXCDC: eventually we should remove contig and old non-contig cases
* and purge all left-over global variables...
*/
#define VM_PSTRAT_RANDOM 1
#define VM_PSTRAT_BSEARCH 2
#define VM_PSTRAT_BIGFIRST 3
/*
* vm_physmemseg: describes one segment of physical memory
*/
struct vm_physseg {
vm_offset_t start; /* PF# of first page in segment */
vm_offset_t end; /* (PF# of last page in segment) + 1 */
vm_offset_t avail_start; /* PF# of first free page in segment */
vm_offset_t avail_end; /* (PF# of last free page in segment) +1 */
struct vm_page *pgs; /* vm_page structures (from start) */
struct vm_page *lastpg; /* vm_page structure for end */
struct pmap_physseg pmseg; /* pmap specific (MD) data */
};
#endif /* MACHINE_NEW_NONCONTIG */
#if defined(_KERNEL)
#ifdef _KERNEL
/*
* Each pageable resident page falls into one of three lists:
*
@ -195,7 +204,27 @@ struct pglist vm_page_queue_inactive; /* inactive memory queue */
extern
vm_page_t vm_page_array; /* First resident page in table */
#ifndef MACHINE_NONCONTIG
#if defined(MACHINE_NEW_NONCONTIG)
/*
* physical memory config is stored in vm_physmem.
*/
extern struct vm_physseg vm_physmem[VM_PHYSSEG_MAX];
extern int vm_nphysseg;
#else
#if defined(MACHINE_NONCONTIG)
/* OLD NONCONTIG CODE: NUKE NUKE NUKE ONCE CONVERTED */
extern
u_long first_page; /* first physical page number */
extern
int vm_page_count; /* How many pages do we manage? */
#define VM_PAGE_INDEX(pa) \
(pmap_page_index((pa)) - first_page)
#else
/* OLD CONTIG CODE: NUKE NUKE NUKE ONCE CONVERTED */
extern
long first_page; /* first physical page number */
/* ... represented in vm_page_array */
@ -207,34 +236,187 @@ extern
vm_offset_t first_phys_addr; /* physical address for first_page */
extern
vm_offset_t last_phys_addr; /* physical address for last_page */
#else /* MACHINE_NONCONTIG */
extern
u_long first_page; /* first physical page number */
extern
int vm_page_count; /* How many pages do we manage? */
#endif /* MACHINE_NONCONTIG */
#define VM_PAGE_TO_PHYS(entry) ((entry)->phys_addr)
#ifndef MACHINE_NONCONTIG
#define IS_VM_PHYSADDR(pa) \
((pa) >= first_phys_addr && (pa) <= last_phys_addr)
#define VM_PAGE_INDEX(pa) \
(atop((pa)) - first_page)
#else
#define IS_VM_PHYSADDR(pa) \
({ \
int __pmapidx = pmap_page_index(pa); \
(__pmapidx >= 0 && __pmapidx >= first_page); \
})
#define VM_PAGE_INDEX(pa) \
(pmap_page_index((pa)) - first_page)
#endif /* MACHINE_NONCONTIG */
#endif /* MACHINE_NEW_NONCONTIG */
/*
* prototypes
*/
#if defined(MACHINE_NEW_NONCONTIG)
static struct vm_page *PHYS_TO_VM_PAGE __P((vm_offset_t));
static int vm_physseg_find __P((vm_offset_t, int *));
#endif
/* XXX probably should be elsewhere. */
#if defined(MACHINE_NONCONTIG) || defined(MACHINE_NEW_NONCONTIG)
vm_offset_t pmap_steal_memory __P((vm_size_t));
#if !defined(MACHINE_NEW_NONCONTIG)
void pmap_startup __P((vm_offset_t *, vm_offset_t *));
#endif
#endif
void vm_page_activate __P((vm_page_t));
vm_page_t vm_page_alloc __P((vm_object_t, vm_offset_t));
int vm_page_alloc_memory __P((vm_size_t size, vm_offset_t low,
vm_offset_t high, vm_offset_t alignment, vm_offset_t boundary,
struct pglist *rlist, int nsegs, int waitok));
void vm_page_free_memory __P((struct pglist *list));
#if defined(MACHINE_NONCONTIG) || defined(MACHINE_NEW_NONCONTIG)
void vm_page_bootstrap __P((vm_offset_t *, vm_offset_t *));
#endif
void vm_page_copy __P((vm_page_t, vm_page_t));
void vm_page_deactivate __P((vm_page_t));
void vm_page_free __P((vm_page_t));
void vm_page_insert __P((vm_page_t, vm_object_t, vm_offset_t));
vm_page_t vm_page_lookup __P((vm_object_t, vm_offset_t));
#if defined(MACHINE_NEW_NONCONTIG)
void vm_page_physload __P((vm_offset_t, vm_offset_t,
vm_offset_t, vm_offset_t));
void vm_page_physrehash __P((void));
#endif
void vm_page_remove __P((vm_page_t));
void vm_page_rename __P((vm_page_t, vm_object_t, vm_offset_t));
#if !defined(MACHINE_NONCONTIG) && !defined(MACHINE_NEW_NONCONTIG)
void vm_page_startup __P((vm_offset_t *, vm_offset_t *));
#endif
void vm_page_unwire __P((vm_page_t));
void vm_page_wire __P((vm_page_t));
boolean_t vm_page_zero_fill __P((vm_page_t));
/*
* macros and inlines
*/
#define VM_PAGE_TO_PHYS(entry) ((entry)->phys_addr)
#if defined(MACHINE_NEW_NONCONTIG)
/*
* when VM_PHYSSEG_MAX is 1, we can simplify these functions
*/
/*
* vm_physseg_find: find vm_physseg structure that belongs to a PA
*/
static __inline int
vm_physseg_find(pframe, offp)
vm_offset_t pframe;
int *offp;
{
#if VM_PHYSSEG_MAX == 1
/* 'contig' case */
if (pframe >= vm_physmem[0].start && pframe < vm_physmem[0].end) {
if (offp)
*offp = pframe - vm_physmem[0].start;
return(0);
}
return(-1);
#elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
/* binary search for it */
int start, len, try;
/*
* if try is too large (thus target is less than than try) we reduce
* the length to trunc(len/2) [i.e. everything smaller than "try"]
*
* if the try is too small (thus target is greater than try) then
* we set the new start to be (try + 1). this means we need to
* reduce the length to (round(len/2) - 1).
*
* note "adjust" below which takes advantage of the fact that
* (round(len/2) - 1) == trunc((len - 1) / 2)
* for any value of len we may have
*/
for (start = 0, len = vm_nphysseg ; len != 0 ; len = len / 2) {
try = start + (len / 2); /* try in the middle */
/* start past our try? */
if (pframe >= vm_physmem[try].start) {
/* was try correct? */
if (pframe < vm_physmem[try].end) {
if (offp)
*offp = pframe - vm_physmem[try].start;
return(try); /* got it */
}
start = try + 1; /* next time, start here */
len--; /* "adjust" */
} else {
/*
* pframe before try, just reduce length of
* region, done in "for" loop
*/
}
}
return(-1);
#else
/* linear search for it */
int lcv;
for (lcv = 0; lcv < vm_nphysseg; lcv++) {
if (pframe >= vm_physmem[lcv].start &&
pframe < vm_physmem[lcv].end) {
if (offp)
*offp = pframe - vm_physmem[lcv].start;
return(lcv); /* got it */
}
}
return(-1);
#endif
}
/*
* IS_VM_PHYSADDR: only used my mips/pmax/pica trap/pmap.
*/
#define IS_VM_PHYSADDR(PA) (vm_physseg_find(atop(PA), NULL) != -1)
/*
* PHYS_TO_VM_PAGE: find vm_page for a PA. used by MI code to get vm_pages
* back from an I/O mapping (ugh!). used in some MD code as well.
*/
static __inline struct vm_page *
PHYS_TO_VM_PAGE(pa)
vm_offset_t pa;
{
vm_offset_t pf = atop(pa);
int off;
int psi;
psi = vm_physseg_find(pf, &off);
if (psi != -1)
return(&vm_physmem[psi].pgs[off]);
return(NULL);
}
#elif defined(MACHINE_NONCONTIG)
/* OLD NONCONTIG CODE: NUKE NUKE NUKE ONCE CONVERTED */
#define IS_VM_PHYSADDR(pa) \
(pmap_page_index(pa) >= 0)
#define PHYS_TO_VM_PAGE(pa) \
(&vm_page_array[VM_PAGE_INDEX((pa))])
(&vm_page_array[pmap_page_index(pa) - first_page])
#else
/* OLD CONTIG CODE: NUKE NUKE NUKE ONCE CONVERTED */
#define IS_VM_PHYSADDR(pa) \
((pa) >= first_phys_addr && (pa) <= last_phys_addr)
#define PHYS_TO_VM_PAGE(pa) \
(&vm_page_array[atop(pa) - first_page ])
#endif /* (OLD) MACHINE_NONCONTIG */
#define VM_PAGE_IS_FREE(entry) ((entry)->flags & PG_FREE)
@ -244,10 +426,6 @@ simple_lock_data_t vm_page_queue_lock; /* lock on active and inactive
extern /* lock on free page queue */
simple_lock_data_t vm_page_queue_free_lock;
/*
* Functions implemented as macros
*/
#define PAGE_ASSERT_WAIT(m, interruptible) { \
(m)->flags |= PG_WANTED; \
assert_wait((m), (interruptible)); \
@ -266,7 +444,10 @@ simple_lock_data_t vm_page_queue_free_lock;
#define vm_page_set_modified(m) { (m)->flags &= ~PG_CLEAN; }
#ifndef MACHINE_NONCONTIG
/*
* XXXCDC: different versions of this should die
*/
#if !defined(MACHINE_NONCONTIG) && !defined(MACHINE_NEW_NONCONTIG)
#define VM_PAGE_INIT(mem, obj, offset) { \
(mem)->flags = PG_BUSY | PG_CLEAN | PG_FAKE; \
vm_page_insert((mem), (obj), (offset)); \
@ -283,37 +464,58 @@ simple_lock_data_t vm_page_queue_free_lock;
}
#endif /* MACHINE_NONCONTIG */
/* XXX what is this here for? */
void vm_set_page_size __P((void));
#if VM_PAGE_DEBUG
#if defined(MACHINE_NEW_NONCONTIG)
/*
* VM_PAGE_CHECK: debugging check of a vm_page structure
*/
static __inline void
VM_PAGE_CHECK(mem)
struct vm_page *mem;
{
int lcv;
for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) {
if ((unsigned int) mem >= (unsigned int) vm_physmem[lcv].pgs &&
(unsigned int) mem <= (unsigned int) vm_physmem[lcv].lastpg)
break;
}
if (lcv == vm_nphysseg ||
(mem->flags & (PG_ACTIVE|PG_INACTIVE)) == (PG_ACTIVE|PG_INACTIVE))
panic("vm_page_check: not valid!");
return;
}
#elif defined(MACHINE_NONCONTIG)
/* OLD NONCONTIG CODE: NUKE NUKE NUKE ONCE CONVERTED */
#define VM_PAGE_CHECK(mem) { \
if ((((unsigned int) mem) < ((unsigned int) &vm_page_array[0])) || \
(((unsigned int) mem) > \
((unsigned int) &vm_page_array[vm_page_count])) || \
((mem->flags & (PG_ACTIVE | PG_INACTIVE)) == \
(PG_ACTIVE | PG_INACTIVE))) \
panic("vm_page_check: not valid!"); \
}
#else
/* OLD CONTIG CODE: NUKE NUKE NUKE ONCE CONVERTED */
#define VM_PAGE_CHECK(mem) { \
if ((((unsigned int) mem) < ((unsigned int) &vm_page_array[0])) || \
(((unsigned int) mem) > \
((unsigned int) &vm_page_array[last_page-first_page])) || \
((mem->flags & (PG_ACTIVE | PG_INACTIVE)) == \
(PG_ACTIVE | PG_INACTIVE))) \
panic("vm_page_check: not valid!"); \
}
/* XXX probably should be elsewhere. */
#ifdef MACHINE_NONCONTIG
vm_offset_t pmap_steal_memory __P((vm_size_t));
void pmap_startup __P((vm_offset_t *, vm_offset_t *));
#endif
void vm_page_activate __P((vm_page_t));
vm_page_t vm_page_alloc __P((vm_object_t, vm_offset_t));
int vm_page_alloc_memory __P((vm_size_t, vm_offset_t,
vm_offset_t, vm_offset_t, vm_offset_t,
struct pglist *, int, int));
void vm_page_free_memory __P((struct pglist *));
#ifdef MACHINE_NONCONTIG
void vm_page_bootstrap __P((vm_offset_t *, vm_offset_t *));
#endif
void vm_page_copy __P((vm_page_t, vm_page_t));
void vm_page_deactivate __P((vm_page_t));
void vm_page_free __P((vm_page_t));
void vm_page_insert __P((vm_page_t, vm_object_t, vm_offset_t));
vm_page_t vm_page_lookup __P((vm_object_t, vm_offset_t));
void vm_page_remove __P((vm_page_t));
void vm_page_rename __P((vm_page_t, vm_object_t, vm_offset_t));
#ifndef MACHINE_NONCONTIG
void vm_page_startup __P((vm_offset_t *, vm_offset_t *));
#endif
void vm_page_unwire __P((vm_page_t));
void vm_page_wire __P((vm_page_t));
boolean_t vm_page_zero_fill __P((vm_page_t));
#else /* VM_PAGE_DEBUG */
#define VM_PAGE_CHECK(mem)
#endif /* VM_PAGE_DEBUG */
#endif /* _KERNEL */
#endif /* !_VM_PAGE_ */