add uvmexp.swpgonly and use it to detect out-of-swap conditions.
This commit is contained in:
parent
92045bbba9
commit
d97d75d81b
@ -1,4 +1,4 @@
|
||||
/* $NetBSD: uvm.h,v 1.14 1999/03/25 18:48:49 mrg Exp $ */
|
||||
/* $NetBSD: uvm.h,v 1.15 1999/03/26 17:34:15 chs Exp $ */
|
||||
|
||||
/*
|
||||
*
|
||||
@ -92,6 +92,7 @@ struct uvm {
|
||||
int page_nhash; /* number of buckets */
|
||||
int page_hashmask; /* hash mask */
|
||||
simple_lock_data_t hashlock; /* lock on page_hash array */
|
||||
|
||||
/* anon stuff */
|
||||
struct vm_anon *afree; /* anon free list */
|
||||
simple_lock_data_t afreelock; /* lock on anon free list */
|
||||
@ -107,6 +108,9 @@ struct uvm {
|
||||
vaddr_t pager_sva; /* start of pager VA area */
|
||||
vaddr_t pager_eva; /* end of pager VA area */
|
||||
|
||||
/* swap-related items */
|
||||
simple_lock_data_t swap_data_lock;
|
||||
|
||||
/* kernel object: to support anonymous pageable kernel memory */
|
||||
struct uvm_object *kernel_object;
|
||||
};
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* $NetBSD: uvm_anon.c,v 1.1 1999/01/24 23:53:15 chuck Exp $ */
|
||||
/* $NetBSD: uvm_anon.c,v 1.2 1999/03/26 17:34:15 chs Exp $ */
|
||||
|
||||
/*
|
||||
*
|
||||
@ -205,7 +205,6 @@ uvm_anfree(anon)
|
||||
if ((pg->flags & PG_BUSY) != 0) {
|
||||
/* tell them to dump it when done */
|
||||
pg->flags |= PG_RELEASED;
|
||||
simple_unlock(&anon->an_lock);
|
||||
UVMHIST_LOG(maphist,
|
||||
" anon 0x%x, page 0x%x: BUSY (released!)",
|
||||
anon, pg, 0, 0);
|
||||
@ -223,19 +222,9 @@ uvm_anfree(anon)
|
||||
}
|
||||
|
||||
/*
|
||||
* are we using any backing store resources? if so, free them.
|
||||
* free any swap resources.
|
||||
*/
|
||||
if (anon->an_swslot) {
|
||||
/*
|
||||
* on backing store: no I/O in progress. sole amap reference
|
||||
* is ours and we've got it locked down. thus we can free,
|
||||
* and be done.
|
||||
*/
|
||||
UVMHIST_LOG(maphist," freeing anon 0x%x, paged to swslot 0x%x",
|
||||
anon, anon->an_swslot, 0, 0);
|
||||
uvm_swap_free(anon->an_swslot, 1);
|
||||
anon->an_swslot = 0;
|
||||
}
|
||||
uvm_anon_dropswap(anon);
|
||||
|
||||
/*
|
||||
* now that we've stripped the data areas from the anon, free the anon
|
||||
@ -249,6 +238,33 @@ uvm_anfree(anon)
|
||||
UVMHIST_LOG(maphist,"<- done!",0,0,0,0);
|
||||
}
|
||||
|
||||
/*
|
||||
* uvm_anon_dropswap: release any swap resources from this anon.
|
||||
*
|
||||
* => anon must be locked or have a reference count of 0.
|
||||
*/
|
||||
void
|
||||
uvm_anon_dropswap(anon)
|
||||
struct vm_anon *anon;
|
||||
{
|
||||
UVMHIST_FUNC("uvm_anon_dropswap"); UVMHIST_CALLED(maphist);
|
||||
if (anon->an_swslot == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
UVMHIST_LOG(maphist,"freeing swap for anon %p, paged to swslot 0x%x",
|
||||
anon, anon->an_swslot, 0, 0);
|
||||
uvm_swap_free(anon->an_swslot, 1);
|
||||
anon->an_swslot = 0;
|
||||
|
||||
if (anon->u.an_page == NULL) {
|
||||
/* this page is no longer only in swap. */
|
||||
simple_lock(&uvm.swap_data_lock);
|
||||
uvmexp.swpgonly--;
|
||||
simple_unlock(&uvm.swap_data_lock);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* uvm_anon_lockloanpg: given a locked anon, lock its resident page
|
||||
*
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* $NetBSD: uvm_anon.h,v 1.9 1999/01/24 23:53:15 chuck Exp $ */
|
||||
/* $NetBSD: uvm_anon.h,v 1.10 1999/03/26 17:34:15 chs Exp $ */
|
||||
|
||||
/*
|
||||
*
|
||||
@ -101,5 +101,6 @@ void uvm_anfree __P((struct vm_anon *));
|
||||
void uvm_anon_init __P((void));
|
||||
void uvm_anon_add __P((int));
|
||||
struct vm_page *uvm_anon_lockloanpg __P((struct vm_anon *));
|
||||
void uvm_anon_dropswap __P((struct vm_anon *));
|
||||
|
||||
#endif /* _UVM_UVM_ANON_H_ */
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* $NetBSD: uvm_aobj.c,v 1.17 1999/03/25 18:48:49 mrg Exp $ */
|
||||
/* $NetBSD: uvm_aobj.c,v 1.18 1999/03/26 17:34:15 chs Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 1998 Chuck Silvers, Charles D. Cranor and
|
||||
@ -423,8 +423,17 @@ uao_free(aobj)
|
||||
{
|
||||
int slot = elt->slots[j];
|
||||
|
||||
if (slot)
|
||||
if (slot) {
|
||||
uvm_swap_free(slot, 1);
|
||||
|
||||
/*
|
||||
* this page is no longer
|
||||
* only in swap.
|
||||
*/
|
||||
simple_lock(&uvm.swap_data_lock);
|
||||
uvmexp.swpgonly--;
|
||||
simple_unlock(&uvm.swap_data_lock);
|
||||
}
|
||||
}
|
||||
|
||||
next = elt->list.le_next;
|
||||
@ -443,8 +452,14 @@ uao_free(aobj)
|
||||
{
|
||||
int slot = aobj->u_swslots[i];
|
||||
|
||||
if (slot)
|
||||
if (slot) {
|
||||
uvm_swap_free(slot, 1);
|
||||
|
||||
/* this page is no longer only in swap. */
|
||||
simple_lock(&uvm.swap_data_lock);
|
||||
uvmexp.swpgonly--;
|
||||
simple_unlock(&uvm.swap_data_lock);
|
||||
}
|
||||
}
|
||||
FREE(aobj->u_swslots, M_UVMAOBJ);
|
||||
}
|
||||
@ -661,7 +676,6 @@ uao_detach(uobj)
|
||||
|
||||
busybody = FALSE;
|
||||
for (pg = uobj->memq.tqh_first ; pg != NULL ; pg = pg->listq.tqe_next) {
|
||||
int swslot;
|
||||
|
||||
if (pg->flags & PG_BUSY) {
|
||||
pg->flags |= PG_RELEASED;
|
||||
@ -669,16 +683,9 @@ uao_detach(uobj)
|
||||
continue;
|
||||
}
|
||||
|
||||
|
||||
/* zap the mappings, free the swap slot, free the page */
|
||||
pmap_page_protect(PMAP_PGARG(pg), VM_PROT_NONE);
|
||||
|
||||
swslot = uao_set_swslot(&aobj->u_obj,
|
||||
pg->offset >> PAGE_SHIFT, 0);
|
||||
if (swslot) {
|
||||
uvm_swap_free(swslot, 1);
|
||||
}
|
||||
|
||||
uao_dropswap(&aobj->u_obj, pg->offset >> PAGE_SHIFT);
|
||||
uvm_lock_pageq();
|
||||
uvm_pagefree(pg);
|
||||
uvm_unlock_pageq();
|
||||
@ -1037,7 +1044,6 @@ static boolean_t uao_releasepg(pg, nextpgp)
|
||||
struct vm_page **nextpgp; /* OUT */
|
||||
{
|
||||
struct uvm_aobj *aobj = (struct uvm_aobj *) pg->uobject;
|
||||
int slot;
|
||||
|
||||
#ifdef DIAGNOSTIC
|
||||
if ((pg->flags & PG_RELEASED) == 0)
|
||||
@ -1048,9 +1054,7 @@ static boolean_t uao_releasepg(pg, nextpgp)
|
||||
* dispose of the page [caller handles PG_WANTED] and swap slot.
|
||||
*/
|
||||
pmap_page_protect(PMAP_PGARG(pg), VM_PROT_NONE);
|
||||
slot = uao_set_swslot(&aobj->u_obj, pg->offset >> PAGE_SHIFT, 0);
|
||||
if (slot)
|
||||
uvm_swap_free(slot, 1);
|
||||
uao_dropswap(&aobj->u_obj, pg->offset >> PAGE_SHIFT);
|
||||
uvm_lock_pageq();
|
||||
if (nextpgp)
|
||||
*nextpgp = pg->pageq.tqe_next; /* next page for daemon */
|
||||
@ -1087,3 +1091,22 @@ static boolean_t uao_releasepg(pg, nextpgp)
|
||||
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
/*
|
||||
* uao_dropswap: release any swap resources from this aobj page.
|
||||
*
|
||||
* => aobj must be locked or have a reference count of 0.
|
||||
*/
|
||||
|
||||
void
|
||||
uao_dropswap(uobj, pageidx)
|
||||
struct uvm_object *uobj;
|
||||
int pageidx;
|
||||
{
|
||||
int slot;
|
||||
|
||||
slot = uao_set_swslot(uobj, pageidx, 0);
|
||||
if (slot) {
|
||||
uvm_swap_free(slot, 1);
|
||||
}
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* $NetBSD: uvm_aobj.h,v 1.7 1999/03/25 18:48:50 mrg Exp $ */
|
||||
/* $NetBSD: uvm_aobj.h,v 1.8 1999/03/26 17:34:15 chs Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 1998 Chuck Silvers, Charles D. Cranor and
|
||||
@ -63,6 +63,7 @@
|
||||
*/
|
||||
|
||||
int uao_set_swslot __P((struct uvm_object *, int, int));
|
||||
void uao_dropswap __P((struct uvm_object *, int));
|
||||
|
||||
/*
|
||||
* globals
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* $NetBSD: uvm_extern.h,v 1.22 1999/03/25 18:48:50 mrg Exp $ */
|
||||
/* $NetBSD: uvm_extern.h,v 1.23 1999/03/26 17:34:15 chs Exp $ */
|
||||
|
||||
/*
|
||||
*
|
||||
@ -177,6 +177,7 @@ struct uvmexp {
|
||||
int nswapdev; /* number of configured swap devices in system */
|
||||
int swpages; /* number of PAGE_SIZE'ed swap pages */
|
||||
int swpginuse; /* number of swap pages in use */
|
||||
int swpgonly; /* number of swap pages in use, not also in RAM */
|
||||
int nswget; /* number of times fault calls uvm_swap_get() */
|
||||
int nanon; /* number total of anon's in system */
|
||||
int nfreeanon; /* number of free anon's */
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* $NetBSD: uvm_fault.c,v 1.21 1999/03/25 18:48:50 mrg Exp $ */
|
||||
/* $NetBSD: uvm_fault.c,v 1.22 1999/03/26 17:34:16 chs Exp $ */
|
||||
|
||||
/*
|
||||
*
|
||||
@ -1146,13 +1146,18 @@ ReFault:
|
||||
if (anon)
|
||||
uvm_anfree(anon);
|
||||
uvmfault_unlockall(&ufi, amap, uobj, oanon);
|
||||
if (anon == NULL) {
|
||||
#ifdef DIAGNOSTIC
|
||||
if (uvmexp.swpgonly > uvmexp.swpages) {
|
||||
panic("uvmexp.swpgonly botch");
|
||||
}
|
||||
#endif
|
||||
if (anon == NULL || uvmexp.swpgonly == uvmexp.swpages) {
|
||||
UVMHIST_LOG(maphist,
|
||||
"<- failed. out of VM",0,0,0,0);
|
||||
uvmexp.fltnoanon++;
|
||||
/* XXX: OUT OF VM, ??? */
|
||||
return (KERN_RESOURCE_SHORTAGE);
|
||||
}
|
||||
|
||||
uvmexp.fltnoram++;
|
||||
uvm_wait("flt_noram3"); /* out of RAM, wait for more */
|
||||
goto ReFault;
|
||||
@ -1207,6 +1212,7 @@ ReFault:
|
||||
|
||||
if (fault_type == VM_FAULT_WIRE) {
|
||||
uvm_pagewire(pg);
|
||||
uvm_anon_dropswap(anon);
|
||||
} else {
|
||||
/* activate it */
|
||||
uvm_pageactivate(pg);
|
||||
@ -1538,13 +1544,18 @@ Case2:
|
||||
|
||||
/* unlock and fail ... */
|
||||
uvmfault_unlockall(&ufi, amap, uobj, NULL);
|
||||
if (anon == NULL) {
|
||||
#ifdef DIAGNOSTIC
|
||||
if (uvmexp.swpgonly > uvmexp.swpages) {
|
||||
panic("uvmexp.swpgonly botch");
|
||||
}
|
||||
#endif
|
||||
if (anon == NULL || uvmexp.swpgonly == uvmexp.swpages) {
|
||||
UVMHIST_LOG(maphist, " promote: out of VM",
|
||||
0,0,0,0);
|
||||
uvmexp.fltnoanon++;
|
||||
/* XXX: out of VM */
|
||||
return (KERN_RESOURCE_SHORTAGE);
|
||||
}
|
||||
|
||||
UVMHIST_LOG(maphist, " out of RAM, waiting for more",
|
||||
0,0,0,0);
|
||||
uvm_anfree(anon);
|
||||
@ -1625,6 +1636,9 @@ Case2:
|
||||
|
||||
if (fault_type == VM_FAULT_WIRE) {
|
||||
uvm_pagewire(pg);
|
||||
if (pg->pqflags & PQ_AOBJ) {
|
||||
uao_dropswap(uobj, pg->offset >> PAGE_SHIFT);
|
||||
}
|
||||
} else {
|
||||
|
||||
/* activate it */
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* $NetBSD: uvm_km.c,v 1.20 1999/03/25 18:48:52 mrg Exp $ */
|
||||
/* $NetBSD: uvm_km.c,v 1.21 1999/03/26 17:34:16 chs Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 1997 Charles D. Cranor and Washington University.
|
||||
@ -575,12 +575,7 @@ uvm_km_pgremove(uobj, start, end)
|
||||
* if this kernel object is an aobj, free the swap slot.
|
||||
*/
|
||||
if (is_aobj) {
|
||||
int slot = uao_set_swslot(uobj,
|
||||
curoff >> PAGE_SHIFT,
|
||||
0);
|
||||
|
||||
if (slot)
|
||||
uvm_swap_free(slot, 1);
|
||||
uao_dropswap(uobj, curoff >> PAGE_SHIFT);
|
||||
}
|
||||
|
||||
uvm_lock_pageq();
|
||||
@ -615,11 +610,7 @@ loop_by_list:
|
||||
* if this kernel object is an aobj, free the swap slot.
|
||||
*/
|
||||
if (is_aobj) {
|
||||
int slot = uao_set_swslot(uobj,
|
||||
pp->offset >> PAGE_SHIFT, 0);
|
||||
|
||||
if (slot)
|
||||
uvm_swap_free(slot, 1);
|
||||
uao_dropswap(uobj, pp->offset >> PAGE_SHIFT);
|
||||
}
|
||||
|
||||
uvm_lock_pageq();
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* $NetBSD: uvm_stat.c,v 1.11 1999/03/25 18:48:56 mrg Exp $ */
|
||||
/* $NetBSD: uvm_stat.c,v 1.12 1999/03/26 17:34:16 chs Exp $ */
|
||||
|
||||
/*
|
||||
*
|
||||
@ -244,6 +244,8 @@ uvm_dump()
|
||||
uvmexp.pdpending, uvmexp.nswget);
|
||||
printf(" nswapdev=%d, nanon=%d, nfreeanon=%d\n", uvmexp.nswapdev,
|
||||
uvmexp.nanon, uvmexp.nfreeanon);
|
||||
printf(" swpages=%d, swpginuse=%d, swpgonly=%d paging=%d\n",
|
||||
uvmexp.swpages, uvmexp.swpginuse, uvmexp.swpgonly, uvmexp.paging);
|
||||
|
||||
printf(" kernel pointers:\n");
|
||||
printf(" objs(kern/kmem/mb)=%p/%p/%p\n", uvm.kernel_object,
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* $NetBSD: uvm_swap.c,v 1.25 1999/03/18 01:45:29 chs Exp $ */
|
||||
/* $NetBSD: uvm_swap.c,v 1.26 1999/03/26 17:34:16 chs Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 1995, 1996, 1997 Matthew R. Green
|
||||
@ -88,7 +88,7 @@
|
||||
* - swap_syscall_lock (sleep lock): this lock serializes the swapctl
|
||||
* system call and prevents the swap priority list from changing
|
||||
* while we are in the middle of a system call (e.g. SWAP_STATS).
|
||||
* - swap_data_lock (simple_lock): this lock protects all swap data
|
||||
* - uvm.swap_data_lock (simple_lock): this lock protects all swap data
|
||||
* structures including the priority list, the swapdev structures,
|
||||
* and the swapmap extent.
|
||||
* - swap_buf_lock (simple_lock): this lock protects the free swapbuf
|
||||
@ -238,7 +238,6 @@ static struct swap_priority swap_priority;
|
||||
|
||||
/* locks */
|
||||
lock_data_t swap_syscall_lock;
|
||||
static simple_lock_data_t swap_data_lock;
|
||||
|
||||
/*
|
||||
* prototypes
|
||||
@ -287,7 +286,7 @@ uvm_swap_init()
|
||||
LIST_INIT(&swap_priority);
|
||||
uvmexp.nswapdev = 0;
|
||||
lockinit(&swap_syscall_lock, PVM, "swapsys", 0, 0);
|
||||
simple_lock_init(&swap_data_lock);
|
||||
simple_lock_init(&uvm.swap_data_lock);
|
||||
|
||||
if (bdevvp(swapdev, &swapdev_vp))
|
||||
panic("uvm_swap_init: can't get vnode for swap device");
|
||||
@ -341,7 +340,7 @@ uvm_swap_init()
|
||||
/*
|
||||
* swaplist_insert: insert swap device "sdp" into the global list
|
||||
*
|
||||
* => caller must hold both swap_syscall_lock and swap_data_lock
|
||||
* => caller must hold both swap_syscall_lock and uvm.swap_data_lock
|
||||
* => caller must provide a newly malloc'd swappri structure (we will
|
||||
* FREE it if we don't need it... this it to prevent malloc blocking
|
||||
* here while adding swap)
|
||||
@ -401,7 +400,7 @@ swaplist_insert(sdp, newspp, priority)
|
||||
* swaplist_find: find and optionally remove a swap device from the
|
||||
* global list.
|
||||
*
|
||||
* => caller must hold both swap_syscall_lock and swap_data_lock
|
||||
* => caller must hold both swap_syscall_lock and uvm.swap_data_lock
|
||||
* => we return the swapdev we found (and removed)
|
||||
*/
|
||||
static struct swapdev *
|
||||
@ -437,7 +436,7 @@ swaplist_find(vp, remove)
|
||||
* swaplist_trim: scan priority list for empty priority entries and kill
|
||||
* them.
|
||||
*
|
||||
* => caller must hold both swap_syscall_lock and swap_data_lock
|
||||
* => caller must hold both swap_syscall_lock and uvm.swap_data_lock
|
||||
*/
|
||||
static void
|
||||
swaplist_trim()
|
||||
@ -457,7 +456,7 @@ swaplist_trim()
|
||||
* swapdrum_add: add a "swapdev"'s blocks into /dev/drum's area.
|
||||
*
|
||||
* => caller must hold swap_syscall_lock
|
||||
* => swap_data_lock should be unlocked (we may sleep)
|
||||
* => uvm.swap_data_lock should be unlocked (we may sleep)
|
||||
*/
|
||||
static void
|
||||
swapdrum_add(sdp, npages)
|
||||
@ -479,7 +478,7 @@ swapdrum_add(sdp, npages)
|
||||
* to the "swapdev" that maps that section of the drum.
|
||||
*
|
||||
* => each swapdev takes one big contig chunk of the drum
|
||||
* => caller must hold swap_data_lock
|
||||
* => caller must hold uvm.swap_data_lock
|
||||
*/
|
||||
static struct swapdev *
|
||||
swapdrum_getsdp(pgno)
|
||||
@ -553,7 +552,7 @@ sys_swapctl(p, v, retval)
|
||||
*
|
||||
* note that the swap_priority list can't change as long
|
||||
* as we are holding the swap_syscall_lock. we don't want
|
||||
* to grab the swap_data_lock because we may fault&sleep during
|
||||
* to grab the uvm.swap_data_lock because we may fault&sleep during
|
||||
* copyout() and we don't want to be holding that lock then!
|
||||
*/
|
||||
if (SCARG(uap, cmd) == SWAP_STATS
|
||||
@ -675,14 +674,14 @@ sys_swapctl(p, v, retval)
|
||||
priority = SCARG(uap, misc);
|
||||
spp = (struct swappri *)
|
||||
malloc(sizeof *spp, M_VMSWAP, M_WAITOK);
|
||||
simple_lock(&swap_data_lock);
|
||||
simple_lock(&uvm.swap_data_lock);
|
||||
if ((sdp = swaplist_find(vp, 1)) == NULL) {
|
||||
error = ENOENT;
|
||||
} else {
|
||||
swaplist_insert(sdp, spp, priority);
|
||||
swaplist_trim();
|
||||
}
|
||||
simple_unlock(&swap_data_lock);
|
||||
simple_unlock(&uvm.swap_data_lock);
|
||||
if (error)
|
||||
free(spp, M_VMSWAP);
|
||||
break;
|
||||
@ -695,10 +694,10 @@ sys_swapctl(p, v, retval)
|
||||
* it.
|
||||
*/
|
||||
priority = SCARG(uap, misc);
|
||||
simple_lock(&swap_data_lock);
|
||||
simple_lock(&uvm.swap_data_lock);
|
||||
if ((sdp = swaplist_find(vp, 0)) != NULL) {
|
||||
error = EBUSY;
|
||||
simple_unlock(&swap_data_lock);
|
||||
simple_unlock(&uvm.swap_data_lock);
|
||||
break;
|
||||
}
|
||||
sdp = (struct swapdev *)
|
||||
@ -717,7 +716,7 @@ sys_swapctl(p, v, retval)
|
||||
sdp->swd_cred = crdup(p->p_ucred);
|
||||
#endif
|
||||
swaplist_insert(sdp, spp, priority);
|
||||
simple_unlock(&swap_data_lock);
|
||||
simple_unlock(&uvm.swap_data_lock);
|
||||
|
||||
sdp->swd_pathlen = len;
|
||||
sdp->swd_path = malloc(sdp->swd_pathlen, M_VMSWAP, M_WAITOK);
|
||||
@ -730,10 +729,10 @@ sys_swapctl(p, v, retval)
|
||||
* if swap_on is a success, it will clear the SWF_FAKE flag
|
||||
*/
|
||||
if ((error = swap_on(p, sdp)) != 0) {
|
||||
simple_lock(&swap_data_lock);
|
||||
simple_lock(&uvm.swap_data_lock);
|
||||
(void) swaplist_find(vp, 1); /* kill fake entry */
|
||||
swaplist_trim();
|
||||
simple_unlock(&swap_data_lock);
|
||||
simple_unlock(&uvm.swap_data_lock);
|
||||
#ifdef SWAP_TO_FILES
|
||||
if (vp->v_type == VREG)
|
||||
crfree(sdp->swd_cred);
|
||||
@ -756,9 +755,9 @@ sys_swapctl(p, v, retval)
|
||||
/*
|
||||
* find the entry of interest and ensure it is enabled.
|
||||
*/
|
||||
simple_lock(&swap_data_lock);
|
||||
simple_lock(&uvm.swap_data_lock);
|
||||
if ((sdp = swaplist_find(vp, 0)) == NULL) {
|
||||
simple_unlock(&swap_data_lock);
|
||||
simple_unlock(&uvm.swap_data_lock);
|
||||
error = ENXIO;
|
||||
break;
|
||||
}
|
||||
@ -767,7 +766,7 @@ sys_swapctl(p, v, retval)
|
||||
* can't stop swapping from it (again).
|
||||
*/
|
||||
if ((sdp->swd_flags & (SWF_INUSE|SWF_ENABLE)) == 0) {
|
||||
simple_unlock(&swap_data_lock);
|
||||
simple_unlock(&uvm.swap_data_lock);
|
||||
error = EBUSY;
|
||||
break;
|
||||
}
|
||||
@ -783,7 +782,7 @@ sys_swapctl(p, v, retval)
|
||||
error = ENXIO;
|
||||
break;
|
||||
}
|
||||
simple_unlock(&swap_data_lock);
|
||||
simple_unlock(&uvm.swap_data_lock);
|
||||
free((caddr_t)sdp, M_VMSWAP);
|
||||
#else
|
||||
error = EINVAL;
|
||||
@ -814,7 +813,7 @@ out:
|
||||
*
|
||||
* => we avoid the start of the disk (to protect disk labels)
|
||||
* => we also avoid the miniroot, if we are swapping to root.
|
||||
* => caller should leave swap_data_lock unlocked, we may lock it
|
||||
* => caller should leave uvm.swap_data_lock unlocked, we may lock it
|
||||
* if needed.
|
||||
*/
|
||||
static int
|
||||
@ -957,10 +956,12 @@ swap_on(p, sdp)
|
||||
if (extent_alloc_region(sdp->swd_ex, 0, addr, EX_WAITOK))
|
||||
panic("disklabel region");
|
||||
sdp->swd_npginuse += addr;
|
||||
simple_lock(&uvm.swap_data_lock);
|
||||
uvmexp.swpginuse += addr;
|
||||
uvmexp.swpgonly += addr;
|
||||
simple_unlock(&uvm.swap_data_lock);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* if the vnode we are swapping to is the root vnode
|
||||
* (i.e. we are swapping to the miniroot) then we want
|
||||
@ -983,8 +984,11 @@ swap_on(p, sdp)
|
||||
rootpages, EX_WAITOK))
|
||||
panic("swap_on: unable to preserve miniroot");
|
||||
|
||||
simple_lock(&uvm.swap_data_lock);
|
||||
sdp->swd_npginuse += (rootpages - addr);
|
||||
uvmexp.swpginuse += (rootpages - addr);
|
||||
uvmexp.swpgonly += (rootpages - addr);
|
||||
simple_unlock(&uvm.swap_data_lock);
|
||||
|
||||
printf("Preserved %d pages of miniroot ", rootpages);
|
||||
printf("leaving %d pages of swap\n", size - rootpages);
|
||||
@ -993,12 +997,12 @@ swap_on(p, sdp)
|
||||
/*
|
||||
* now add the new swapdev to the drum and enable.
|
||||
*/
|
||||
simple_lock(&swap_data_lock);
|
||||
simple_lock(&uvm.swap_data_lock);
|
||||
swapdrum_add(sdp, npages);
|
||||
sdp->swd_npages = npages;
|
||||
sdp->swd_flags &= ~SWF_FAKE; /* going live */
|
||||
sdp->swd_flags |= (SWF_INUSE|SWF_ENABLE);
|
||||
simple_unlock(&swap_data_lock);
|
||||
simple_unlock(&uvm.swap_data_lock);
|
||||
uvmexp.swpages += npages;
|
||||
|
||||
/*
|
||||
@ -1160,9 +1164,9 @@ swstrategy(bp)
|
||||
* in it (i.e. the blocks we are doing I/O on).
|
||||
*/
|
||||
pageno = dbtob(bp->b_blkno) >> PAGE_SHIFT;
|
||||
simple_lock(&swap_data_lock);
|
||||
simple_lock(&uvm.swap_data_lock);
|
||||
sdp = swapdrum_getsdp(pageno);
|
||||
simple_unlock(&swap_data_lock);
|
||||
simple_unlock(&uvm.swap_data_lock);
|
||||
if (sdp == NULL) {
|
||||
bp->b_error = EINVAL;
|
||||
bp->b_flags |= B_ERROR;
|
||||
@ -1553,7 +1557,7 @@ sw_reg_iodone(bp)
|
||||
* allocate in a priority we "rotate" the circle queue.
|
||||
* => space can be freed with uvm_swap_free
|
||||
* => we return the page slot number in /dev/drum (0 == invalid slot)
|
||||
* => we lock swap_data_lock
|
||||
* => we lock uvm.swap_data_lock
|
||||
* => XXXMRG: "LESSOK" INTERFACE NEEDED TO EXTENT SYSTEM
|
||||
*/
|
||||
int
|
||||
@ -1575,7 +1579,7 @@ uvm_swap_alloc(nslots, lessok)
|
||||
/*
|
||||
* lock data lock, convert slots into blocks, and enter loop
|
||||
*/
|
||||
simple_lock(&swap_data_lock);
|
||||
simple_lock(&uvm.swap_data_lock);
|
||||
|
||||
ReTry: /* XXXMRG */
|
||||
for (spp = swap_priority.lh_first; spp != NULL;
|
||||
@ -1601,7 +1605,7 @@ ReTry: /* XXXMRG */
|
||||
CIRCLEQ_INSERT_TAIL(&spp->spi_swapdev, sdp, swd_next);
|
||||
sdp->swd_npginuse += *nslots;
|
||||
uvmexp.swpginuse += *nslots;
|
||||
simple_unlock(&swap_data_lock);
|
||||
simple_unlock(&uvm.swap_data_lock);
|
||||
/* done! return drum slot number */
|
||||
UVMHIST_LOG(pdhist,
|
||||
"success! returning %d slots starting at %d",
|
||||
@ -1629,7 +1633,7 @@ panic("uvm_swap_alloc: allocating unmapped swap block!");
|
||||
}
|
||||
/* XXXMRG: END HACK */
|
||||
|
||||
simple_unlock(&swap_data_lock);
|
||||
simple_unlock(&uvm.swap_data_lock);
|
||||
return 0; /* failed */
|
||||
}
|
||||
|
||||
@ -1637,7 +1641,7 @@ panic("uvm_swap_alloc: allocating unmapped swap block!");
|
||||
* uvm_swap_free: free swap slots
|
||||
*
|
||||
* => this can be all or part of an allocation made by uvm_swap_alloc
|
||||
* => we lock swap_data_lock
|
||||
* => we lock uvm.swap_data_lock
|
||||
*/
|
||||
void
|
||||
uvm_swap_free(startslot, nslots)
|
||||
@ -1654,7 +1658,7 @@ uvm_swap_free(startslot, nslots)
|
||||
* in the extent, and return. must hold pri lock to do
|
||||
* lookup and access the extent.
|
||||
*/
|
||||
simple_lock(&swap_data_lock);
|
||||
simple_lock(&uvm.swap_data_lock);
|
||||
sdp = swapdrum_getsdp(startslot);
|
||||
|
||||
#ifdef DIAGNOSTIC
|
||||
@ -1677,7 +1681,7 @@ uvm_swap_free(startslot, nslots)
|
||||
if (sdp->swd_npginuse < 0)
|
||||
panic("uvm_swap_free: inuse < 0");
|
||||
#endif
|
||||
simple_unlock(&swap_data_lock);
|
||||
simple_unlock(&uvm.swap_data_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1724,9 +1728,25 @@ uvm_swap_get(page, swslot, flags)
|
||||
printf("uvm_swap_get: ASYNC get requested?\n");
|
||||
#endif
|
||||
|
||||
/*
|
||||
* this page is (about to be) no longer only in swap.
|
||||
*/
|
||||
simple_lock(&uvm.swap_data_lock);
|
||||
uvmexp.swpgonly--;
|
||||
simple_unlock(&uvm.swap_data_lock);
|
||||
|
||||
result = uvm_swap_io(&page, swslot, 1, B_READ |
|
||||
((flags & PGO_SYNCIO) ? 0 : B_ASYNC));
|
||||
|
||||
if (result != VM_PAGER_OK && result != VM_PAGER_PEND) {
|
||||
/*
|
||||
* oops, the read failed so it really is still only in swap.
|
||||
*/
|
||||
simple_lock(&uvm.swap_data_lock);
|
||||
uvmexp.swpgonly++;
|
||||
simple_unlock(&uvm.swap_data_lock);
|
||||
}
|
||||
|
||||
return (result);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user