add UBC memory-usage balancing. we track the number of pages in use for

each of the basic types (anonymous data, executable image, cached files)
and prevent the pagedaemon from reusing a given page if that would reduce
the count of that type of page below a sysctl-setable minimum threshold.
the thresholds are controlled via three new sysctl tunables:
vm.anonmin, vm.vnodemin, and vm.vtextmin.  these tunables are the
percentages of pageable memory reserved for each usage, and we do not allow
the sum of the minimums to be more than 95% so that there's always some
memory that can be reused.
This commit is contained in:
chs 2001-03-09 01:02:10 +00:00
parent ca0ffe95fb
commit 83d071a318
12 changed files with 210 additions and 117 deletions

View File

@ -1,4 +1,4 @@
.\" $NetBSD: sysctl.8,v 1.64 2001/02/07 08:59:49 itojun Exp $
.\" $NetBSD: sysctl.8,v 1.65 2001/03/09 01:02:10 chs Exp $
.\"
.\" Copyright (c) 1993
.\" The Regents of the University of California. All rights reserved.
@ -346,9 +346,16 @@ privilege can change the value.
.It user.posix2_upe integer no
.It user.posix2_version integer no
.It user.re_dup_max integer no
.It vfs.generic.usermount integer yes
.It vendor.<vendor>.* ? vendor specific
.It vfs.generic.usermount integer yes
.It vm.anonmin integer yes
.It vm.loadavg struct no
.It vm.nkmempages integer no
.It vm.uvmexp struct no
.It vm.uvmexp2 struct no
.It vm.vmmeter struct no
.It vm.vnodemin integer yes
.It vm.vtextmin integer yes
.El
.Pp
Entries found under

View File

@ -1,4 +1,4 @@
/* $NetBSD: sysctl.c,v 1.45 2001/02/19 22:56:23 cgd Exp $ */
/* $NetBSD: sysctl.c,v 1.46 2001/03/09 01:02:11 chs Exp $ */
/*
* Copyright (c) 1993
@ -44,7 +44,7 @@ __COPYRIGHT(
#if 0
static char sccsid[] = "@(#)sysctl.c 8.1 (Berkeley) 6/6/93";
#else
__RCSID("$NetBSD: sysctl.c,v 1.45 2001/02/19 22:56:23 cgd Exp $");
__RCSID("$NetBSD: sysctl.c,v 1.46 2001/03/09 01:02:11 chs Exp $");
#endif
#endif /* not lint */
@ -298,6 +298,7 @@ parse(string, flags)
struct list *lp;
int mib[CTL_MAXNAME];
char *cp, *bufp, buf[BUFSIZ];
double loads[3];
bufp = buf;
snprintf(buf, BUFSIZ, "%s", string);
@ -377,7 +378,7 @@ parse(string, flags)
case KERN_NTPTIME:
if (flags == 0)
return;
warnx("Use xntpdc -c kerninfo to view %s information",
warnx("Use ntpdc -c kerninfo to view %s information",
string);
return;
case KERN_MBUF:
@ -403,35 +404,25 @@ parse(string, flags)
break;
case CTL_VM:
if (mib[1] == VM_LOADAVG) {
double loads[3];
switch (mib[1]) {
case VM_LOADAVG:
getloadavg(loads, 3);
if (!nflag)
printf("%s: ", string);
printf("%.2f %.2f %.2f\n", loads[0], loads[1],
loads[2]);
return;
}
if (mib[1] == VM_NKMEMPAGES) {
size_t nkmempages_len;
int nkmempages;
nkmempages_len = sizeof(nkmempages);
if (sysctl(mib, 2, &nkmempages, &nkmempages_len,
NULL, 0)) {
warn("unable to get %s", string);
return;
case VM_METER:
case VM_UVMEXP:
case VM_UVMEXP2:
if (flags) {
warnx("Use vmstat or systat to view %s"
"information", string);
}
if (!nflag)
printf("%s: ", string);
printf("%d\n", nkmempages);
}
if (flags == 0)
return;
warnx("Use vmstat or systat to view %s information", string);
return;
}
break;
case CTL_NET:
if (mib[1] == PF_INET) {
@ -1051,7 +1042,7 @@ sysctl_proc(string, bufpp, mib, flags, typep)
cp = &name[strlen(name)];
*cp++ = '.';
strcpy(cp, "curproc");
parse (name, Aflag);
parse(name, Aflag);
return (-1);
}
cp = strsep(bufpp, ".");

View File

@ -1,4 +1,4 @@
/* $NetBSD: vfs_subr.c,v 1.146 2001/02/21 21:40:00 jdolecek Exp $ */
/* $NetBSD: vfs_subr.c,v 1.147 2001/03/09 01:02:11 chs Exp $ */
/*-
* Copyright (c) 1997, 1998 The NetBSD Foundation, Inc.
@ -1217,6 +1217,10 @@ vput(vp)
else
TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
simple_unlock(&vnode_free_list_slock);
if (vp->v_flag & VTEXT) {
uvmexp.vtextpages -= vp->v_uvm.u_obj.uo_npages;
uvmexp.vnodepages += vp->v_uvm.u_obj.uo_npages;
}
vp->v_flag &= ~VTEXT;
simple_unlock(&vp->v_interlock);
VOP_INACTIVE(vp, p);
@ -1257,6 +1261,10 @@ vrele(vp)
else
TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
simple_unlock(&vnode_free_list_slock);
if (vp->v_flag & VTEXT) {
uvmexp.vtextpages -= vp->v_uvm.u_obj.uo_npages;
uvmexp.vnodepages += vp->v_uvm.u_obj.uo_npages;
}
vp->v_flag &= ~VTEXT;
if (vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK) == 0)
VOP_INACTIVE(vp, p);
@ -1481,6 +1489,10 @@ vclean(vp, flags, p)
if (vp->v_flag & VXLOCK)
panic("vclean: deadlock, vp %p", vp);
vp->v_flag |= VXLOCK;
if (vp->v_flag & VTEXT) {
uvmexp.vtextpages -= vp->v_uvm.u_obj.uo_npages;
uvmexp.vnodepages += vp->v_uvm.u_obj.uo_npages;
}
vp->v_flag &= ~VTEXT;
/*

View File

@ -1,4 +1,4 @@
/* $NetBSD: vfs_vnops.c,v 1.45 2000/11/27 08:39:44 chs Exp $ */
/* $NetBSD: vfs_vnops.c,v 1.46 2001/03/09 01:02:11 chs Exp $ */
/*
* Copyright (c) 1982, 1986, 1989, 1993
@ -195,6 +195,10 @@ void
vn_marktext(vp)
struct vnode *vp;
{
if ((vp->v_flag & VTEXT) == 0) {
uvmexp.vnodepages -= vp->v_uvm.u_obj.uo_npages;
uvmexp.vtextpages += vp->v_uvm.u_obj.uo_npages;
}
vp->v_flag |= VTEXT;
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_extern.h,v 1.56 2001/02/06 17:01:52 eeh Exp $ */
/* $NetBSD: uvm_extern.h,v 1.57 2001/03/09 01:02:12 chs Exp $ */
/*
*
@ -251,9 +251,12 @@ struct uvmexp {
int inactive; /* number of pages that we free'd but may want back */
int paging; /* number of pages in the process of being paged out */
int wired; /* number of wired pages */
/* XXX: Adding anything before this line will break binary
* compatibility with top(1) on NetBSD 1.5.
/*
* Adding anything before this line will break binary compatibility
* with top(1) on NetBSD 1.5.
*/
int zeropages; /* number of zero'd pages */
int reserve_pagedaemon; /* number of pages reserved for pagedaemon */
int reserve_kernel; /* number of pages reserved for kernel */
@ -266,6 +269,12 @@ struct uvmexp {
int freetarg; /* target number of free pages */
int inactarg; /* target number of inactive pages */
int wiredmax; /* max number of wired pages */
int anonmin; /* min threshold for anon pages */
int vtextmin; /* min threshold for vtext pages */
int vnodemin; /* min threshold for vnode pages */
int anonminpct; /* min percent anon pages */
int vtextminpct;/* min percent vtext pages */
int vnodeminpct;/* min percent vnode pages */
/* swap */
int nswapdev; /* number of configured swap devices in system */
@ -333,6 +342,9 @@ struct uvmexp {
int pdpageouts; /* number of times daemon started a pageout */
int pdpending; /* number of times daemon got a pending pagout */
int pddeact; /* number of pages daemon deactivates */
int pdreanon; /* anon pages reactivated due to min threshold */
int pdrevnode; /* vnode pages reactivated due to min threshold */
int pdrevtext; /* vtext pages reactivated due to min threshold */
/* kernel memory objects: managed by uvm_km_kmemalloc() only! */
struct uvm_object *kmem_object;

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_meter.c,v 1.16 2000/11/30 11:04:44 simonb Exp $ */
/* $NetBSD: uvm_meter.c,v 1.17 2001/03/09 01:02:12 chs Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -130,6 +130,7 @@ uvm_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
struct proc *p;
{
struct vmtotal vmtotals;
int rv, t;
/* all sysctl names at this level are terminal */
if (namelen != 1)
@ -156,6 +157,45 @@ uvm_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
case VM_NKMEMPAGES:
return (sysctl_rdint(oldp, oldlenp, newp, nkmempages));
case VM_ANONMIN:
t = uvmexp.anonminpct;
rv = sysctl_int(oldp, oldlenp, newp, newlen, &t);
if (rv) {
return rv;
}
if (t + uvmexp.vtextminpct + uvmexp.vnodeminpct > 95 || t < 0) {
return EINVAL;
}
uvmexp.anonminpct = t;
uvmexp.anonmin = t * 256 / 100;
return rv;
case VM_VTEXTMIN:
t = uvmexp.vtextminpct;
rv = sysctl_int(oldp, oldlenp, newp, newlen, &t);
if (rv) {
return rv;
}
if (uvmexp.anonminpct + t + uvmexp.vnodeminpct > 95 || t < 0) {
return EINVAL;
}
uvmexp.vtextminpct = t;
uvmexp.vtextmin = t * 256 / 100;
return rv;
case VM_VNODEMIN:
t = uvmexp.vnodeminpct;
rv = sysctl_int(oldp, oldlenp, newp, newlen, &t);
if (rv) {
return rv;
}
if (uvmexp.anonminpct + uvmexp.vtextminpct + t > 95 || t < 0) {
return EINVAL;
}
uvmexp.vnodeminpct = t;
uvmexp.vnodemin = t * 256 / 100;
return rv;
default:
return (EOPNOTSUPP);
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_object.h,v 1.10 2001/01/28 22:23:06 thorpej Exp $ */
/* $NetBSD: uvm_object.h,v 1.11 2001/03/09 01:02:12 chs Exp $ */
/*
*
@ -89,6 +89,11 @@ extern struct uvm_pagerops uvm_vnodeops;
#define UVM_OBJ_IS_VNODE(uobj) \
((uobj)->pgops == &uvm_vnodeops)
#define UVM_OBJ_IS_VTEXT(uobj) \
((uobj)->pgops == &uvm_vnodeops && \
((struct vnode *)uobj)->v_flag & VTEXT)
#endif /* _KERNEL */
#endif /* _UVM_UVM_OBJECT_H_ */

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_page.c,v 1.50 2001/01/28 22:23:04 thorpej Exp $ */
/* $NetBSD: uvm_page.c,v 1.51 2001/03/09 01:02:12 chs Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -77,6 +77,7 @@
#include <sys/malloc.h>
#include <sys/sched.h>
#include <sys/kernel.h>
#include <sys/vnode.h>
#define UVM_PAGE /* pull in uvm_page.h functions */
#include <uvm/uvm.h>
@ -151,11 +152,7 @@ uvm_pageinsert(pg)
struct pglist *buck;
int s;
#ifdef DIAGNOSTIC
if (pg->flags & PG_TABLED)
panic("uvm_pageinsert: already inserted");
#endif
KASSERT((pg->flags & PG_TABLED) == 0);
buck = &uvm.page_hash[uvm_pagehash(pg->uobject,pg->offset)];
s = splvm();
simple_lock(&uvm.hashlock);
@ -190,8 +187,11 @@ uvm_pageremove(pg)
simple_unlock(&uvm.hashlock);
splx(s);
if (UVM_OBJ_IS_VNODE(pg->uobject))
if (UVM_OBJ_IS_VTEXT(pg->uobject)) {
uvmexp.vtextpages--;
} else if (UVM_OBJ_IS_VNODE(pg->uobject)) {
uvmexp.vnodepages--;
}
/* object should be locked */
TAILQ_REMOVE(&pg->uobject->memq, pg, listq);
@ -218,8 +218,9 @@ uvm_page_init(kvm_startp, kvm_endp)
paddr_t paddr;
/*
* step 1: init the page queues and page queue locks
* init the page queues and page queue locks
*/
for (lcv = 0; lcv < VM_NFREELIST; lcv++) {
for (i = 0; i < PGFL_NQUEUES; i++)
TAILQ_INIT(&uvm.page_free[lcv].pgfl_queues[i]);
@ -231,8 +232,8 @@ uvm_page_init(kvm_startp, kvm_endp)
simple_lock_init(&uvm.fpageqlock);
/*
* step 2: init the <obj,offset> => <page> hash table. for now
* we just have one bucket (the bootstrap bucket). later on we
* init the <obj,offset> => <page> hash table. for now
* we just have one bucket (the bootstrap bucket). later on we
* will allocate new buckets as we dynamically resize the hash table.
*/
@ -243,7 +244,7 @@ uvm_page_init(kvm_startp, kvm_endp)
simple_lock_init(&uvm.hashlock); /* init hash table lock */
/*
* step 3: allocate vm_page structures.
* allocate vm_page structures.
*/
/*
@ -285,8 +286,7 @@ uvm_page_init(kvm_startp, kvm_endp)
memset(pagearray, 0, pagecount * sizeof(struct vm_page));
/*
* step 4: init the vm_page structures and put them in the correct
* place...
* init the vm_page structures and put them in the correct place.
*/
for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) {
@ -297,6 +297,7 @@ uvm_page_init(kvm_startp, kvm_endp)
panic("uvm_page_init"); /* XXXCDC: shouldn't happen? */
/* n = pagecount; */
}
/* set up page array pointers */
vm_physmem[lcv].pgs = pagearray;
pagearray += n;
@ -317,7 +318,7 @@ uvm_page_init(kvm_startp, kvm_endp)
}
/*
* step 5: pass up the values of virtual_space_start and
* pass up the values of virtual_space_start and
* virtual_space_end (obtained by uvm_pageboot_alloc) to the upper
* layers of the VM.
*/
@ -326,23 +327,30 @@ uvm_page_init(kvm_startp, kvm_endp)
*kvm_endp = trunc_page(virtual_space_end);
/*
* step 6: init locks for kernel threads
* init locks for kernel threads
*/
simple_lock_init(&uvm.pagedaemon_lock);
simple_lock_init(&uvm.aiodoned_lock);
/*
* step 7: init reserve thresholds
* init various thresholds.
* XXXCDC - values may need adjusting
*/
uvmexp.reserve_pagedaemon = 1;
uvmexp.reserve_kernel = 5;
uvmexp.anonminpct = 10;
uvmexp.vnodeminpct = 10;
uvmexp.vtextminpct = 5;
uvmexp.anonmin = uvmexp.anonminpct * 256 / 100;
uvmexp.vnodemin = uvmexp.vnodeminpct * 256 / 100;
uvmexp.vtextmin = uvmexp.vtextminpct * 256 / 100;
/*
* step 8: determine if we should zero pages in the idle
* loop.
* determine if we should zero pages in the idle loop.
*/
uvm.page_idle_zero = vm_page_zero_enable;
/*

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_param.h,v 1.4 2001/01/09 13:55:20 pk Exp $ */
/* $NetBSD: uvm_param.h,v 1.5 2001/03/09 01:02:12 chs Exp $ */
/*
* Copyright (c) 1991, 1993
@ -109,7 +109,11 @@ typedef int boolean_t;
#define VM_UVMEXP 3 /* struct uvmexp */
#define VM_NKMEMPAGES 4 /* kmem_map pages */
#define VM_UVMEXP2 5 /* struct uvmexp_sysctl */
#define VM_MAXID 6 /* number of valid vm ids */
#define VM_ANONMIN 6
#define VM_VTEXTMIN 7
#define VM_VNODEMIN 8
#define VM_MAXID 9 /* number of valid vm ids */
#define CTL_VM_NAMES { \
{ 0, 0 }, \
@ -118,9 +122,11 @@ typedef int boolean_t;
{ "uvmexp", CTLTYPE_STRUCT }, \
{ "nkmempages", CTLTYPE_INT }, \
{ "uvmexp2", CTLTYPE_STRUCT }, \
{ "anonmin", CTLTYPE_INT }, \
{ "vtextmin", CTLTYPE_INT }, \
{ "vnodemin", CTLTYPE_INT }, \
}
/*
* Return values from the VM routines.
*/

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_pdaemon.c,v 1.29 2001/01/28 23:30:46 thorpej Exp $ */
/* $NetBSD: uvm_pdaemon.c,v 1.30 2001/03/09 01:02:12 chs Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -78,6 +78,7 @@
#include <sys/kernel.h>
#include <sys/pool.h>
#include <sys/buf.h>
#include <sys/vnode.h>
#include <uvm/uvm.h>
@ -99,7 +100,6 @@ static void uvmpd_scan __P((void));
static boolean_t uvmpd_scan_inactive __P((struct pglist *));
static void uvmpd_tune __P((void));
/*
* uvm_wait: wait (sleep) for the page daemon to free some pages
*
@ -250,10 +250,7 @@ uvm_pageout(void *arg)
*/
if (uvmexp.free + uvmexp.paging < uvmexp.freetarg ||
uvmexp.inactive < uvmexp.inactarg ||
uvmexp.vnodepages >
(uvmexp.active + uvmexp.inactive + uvmexp.wired +
uvmexp.free) * 13 / 16) {
uvmexp.inactive < uvmexp.inactarg) {
uvmpd_scan();
}
@ -371,9 +368,9 @@ uvmpd_scan_inactive(pglst)
int swnpages, swcpages; /* XXX: see below */
int swslot;
struct vm_anon *anon;
boolean_t swap_backed, vnode_only;
boolean_t swap_backed;
vaddr_t start;
int dirtyreacts, vpgs;
int dirtyreacts, t;
UVMHIST_FUNC("uvmpd_scan_inactive"); UVMHIST_CALLED(pdhist);
/*
@ -394,7 +391,6 @@ uvmpd_scan_inactive(pglst)
swnpages = swcpages = 0;
free = 0;
dirtyreacts = 0;
vnode_only = FALSE;
for (p = TAILQ_FIRST(pglst); p != NULL || swslot != 0; p = nextpg) {
@ -417,33 +413,24 @@ uvmpd_scan_inactive(pglst)
free = uvmexp.free;
uvm_unlock_fpageq(s);
/* XXXUBC */
vpgs = uvmexp.vnodepages -
(uvmexp.active + uvmexp.inactive +
uvmexp.wired + uvmexp.free) * 13 / 16;
if (free + uvmexp.paging >= uvmexp.freetarg << 2 ||
vpgs > 0 || dirtyreacts == UVMPD_NUMDIRTYREACTS) {
if (vpgs <= 0) {
UVMHIST_LOG(pdhist," met free target: "
"exit loop", 0, 0, 0, 0);
retval = TRUE;
dirtyreacts == UVMPD_NUMDIRTYREACTS) {
UVMHIST_LOG(pdhist," met free target: "
"exit loop", 0, 0, 0, 0);
retval = TRUE;
if (swslot == 0)
/* exit now if no
swap-i/o pending */
break;
/* set p to null to signal final
swap i/o */
p = NULL;
} else {
vnode_only = TRUE;
if (swslot == 0) {
/* exit now if no swap-i/o pending */
break;
}
/* set p to null to signal final swap i/o */
p = NULL;
}
}
if (p) { /* if (we have a new page to consider) */
/*
* we are below target and have a new page to consider.
*/
@ -452,16 +439,44 @@ uvmpd_scan_inactive(pglst)
/*
* move referenced pages back to active queue and
* skip to next page (unlikely to happen since
* inactive pages shouldn't have any valid mappings
* and we cleared reference before deactivating).
* skip to next page.
*/
if (pmap_is_referenced(p)) {
uvm_pageactivate(p);
uvmexp.pdreact++;
continue;
}
/*
* enforce the minimum thresholds on different
* types of memory usage. if reusing the current
* page would reduce that type of usage below its
* minimum, reactivate the page instead and move
* on to the next page.
*/
t = uvmexp.active + uvmexp.inactive + uvmexp.free;
if (p->uanon &&
uvmexp.anonpages <= (t * uvmexp.anonmin) >> 8) {
uvm_pageactivate(p);
uvmexp.pdreanon++;
continue;
}
if (p->uobject && UVM_OBJ_IS_VTEXT(p->uobject) &&
uvmexp.vtextpages <= (t * uvmexp.vtextmin) >> 8) {
uvm_pageactivate(p);
uvmexp.pdrevtext++;
continue;
}
if (p->uobject && UVM_OBJ_IS_VNODE(p->uobject) &&
!UVM_OBJ_IS_VTEXT(p->uobject) &&
uvmexp.vnodepages <= (t * uvmexp.vnodemin) >> 8) {
uvm_pageactivate(p);
uvmexp.pdrevnode++;
continue;
}
/*
* first we attempt to lock the object that this page
* belongs to. if our attempt fails we skip on to
@ -477,18 +492,15 @@ uvmpd_scan_inactive(pglst)
* case, the anon can "take over" the loaned page
* and make it its own.
*/
/* is page part of an anon or ownerless ? */
if ((p->pqflags & PQ_ANON) || p->uobject == NULL) {
if (vnode_only) {
uvm_pageactivate(p);
continue;
}
anon = p->uanon;
KASSERT(anon != NULL);
if (!simple_lock_try(&anon->an_lock))
if (!simple_lock_try(&anon->an_lock)) {
/* lock failed, skip this page */
continue;
}
/*
* if the page is ownerless, claim it in the
@ -511,15 +523,10 @@ uvmpd_scan_inactive(pglst)
} else {
uobj = p->uobject;
KASSERT(uobj != NULL);
if (vnode_only &&
UVM_OBJ_IS_VNODE(uobj) == 0) {
uvm_pageactivate(p);
continue;
}
if (!simple_lock_try(&uobj->vmobjlock))
if (!simple_lock_try(&uobj->vmobjlock)) {
/* lock failed, skip this page */
continue;
}
if (p->flags & PG_BUSY) {
simple_unlock(&uobj->vmobjlock);
uvmexp.pdbusy++;
@ -538,8 +545,9 @@ uvmpd_scan_inactive(pglst)
*/
pmap_page_protect(p, VM_PROT_NONE);
if ((p->flags & PG_CLEAN) != 0 && pmap_is_modified(p))
if ((p->flags & PG_CLEAN) != 0 && pmap_is_modified(p)) {
p->flags &= ~PG_CLEAN;
}
if (p->flags & PG_CLEAN) {
if (p->pqflags & PQ_SWAPBACKED) {
@ -577,8 +585,7 @@ uvmpd_scan_inactive(pglst)
* free target when all the current pageouts complete.
*/
if (free + uvmexp.paging > uvmexp.freetarg << 2 &&
!vnode_only) {
if (free + uvmexp.paging > uvmexp.freetarg << 2) {
if (anon) {
simple_unlock(&anon->an_lock);
} else {
@ -634,7 +641,7 @@ uvmpd_scan_inactive(pglst)
* first mark the page busy so that no one else will
* touch the page.
*/
swap_backed = ((p->pqflags & PQ_SWAPBACKED) != 0);
p->flags |= PG_BUSY; /* now we own it */
UVM_PAGE_OWN(p, "scan_inactive");
@ -936,7 +943,7 @@ uvmpd_scan_inactive(pglst)
*/
nextpg = NULL;
/*
* lock page queues here just so they're always locked
* at the end of the loop.

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_stat.c,v 1.17 2001/02/04 10:55:58 mrg Exp $ */
/* $NetBSD: uvm_stat.c,v 1.18 2001/03/09 01:02:13 chs Exp $ */
/*
*
@ -218,7 +218,10 @@ uvmexp_print(void (*pr)(const char *, ...))
(*pr)(" %d VM pages: %d active, %d inactive, %d wired, %d free\n",
uvmexp.npages, uvmexp.active, uvmexp.inactive, uvmexp.wired,
uvmexp.free);
(*pr)(" %d anon, %d vnode, %d vtext\n",
(*pr)(" min %d%% (%d) anon, %d%% (%d) vnode, %d%% (%d) vtext\n",
uvmexp.anonminpct, uvmexp.anonmin, uvmexp.vnodeminpct,
uvmexp.vnodemin, uvmexp.vtextminpct, uvmexp.vtextmin);
(*pr)(" pages %d anon, %d vnode, %d vtext\n",
uvmexp.anonpages, uvmexp.vnodepages, uvmexp.vtextpages);
(*pr)(" freemin=%d, free-target=%d, inactive-target=%d, "
"wired-max=%d\n", uvmexp.freemin, uvmexp.freetarg, uvmexp.inactarg,

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_vnode.c,v 1.46 2001/02/22 01:02:09 enami Exp $ */
/* $NetBSD: uvm_vnode.c,v 1.47 2001/03/09 01:02:13 chs Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -830,7 +830,7 @@ uvn_cluster(uobj, offset, loffset, hoffset)
struct uvm_vnode *uvn = (struct uvm_vnode *)uobj;
*loffset = offset;
*hoffset = min(offset + MAXBSIZE, round_page(uvn->u_size));
*hoffset = MIN(offset + MAXBSIZE, round_page(uvn->u_size));
}
/*
@ -937,13 +937,7 @@ uvn_findpage(uobj, offset, pgp, flags)
UVMHIST_LOG(ubchist, "noalloc", 0,0,0,0);
return 0;
}
if (uvmexp.vnodepages >
(uvmexp.active + uvmexp.inactive + uvmexp.wired +
uvmexp.free) * 7 / 8) {
pg = NULL;
} else {
pg = uvm_pagealloc(uobj, offset, NULL, 0);
}
pg = uvm_pagealloc(uobj, offset, NULL, 0);
if (pg == NULL) {
if (flags & UFP_NOWAIT) {
UVMHIST_LOG(ubchist, "nowait",0,0,0,0);
@ -954,7 +948,11 @@ uvn_findpage(uobj, offset, pgp, flags)
simple_lock(&uobj->vmobjlock);
continue;
}
uvmexp.vnodepages++;
if (UVM_OBJ_IS_VTEXT(uobj)) {
uvmexp.vtextpages++;
} else {
uvmexp.vnodepages++;
}
UVMHIST_LOG(ubchist, "alloced",0,0,0,0);
break;
} else if (flags & UFP_NOCACHE) {