wrap swap related code by #ifdef VMSWAP. always #define VMSWAP for now.

This commit is contained in:
yamt 2005-09-13 22:00:05 +00:00
parent 6ed9366c27
commit 6fbf5bf6f1
9 changed files with 155 additions and 28 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_amap.c,v 1.64 2005/07/31 04:04:46 yamt Exp $ */
/* $NetBSD: uvm_amap.c,v 1.65 2005/09/13 22:00:05 yamt Exp $ */
/*
*
@ -42,7 +42,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_amap.c,v 1.64 2005/07/31 04:04:46 yamt Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_amap.c,v 1.65 2005/09/13 22:00:05 yamt Exp $");
#undef UVM_AMAP_INLINE /* enable/disable amap inlines */
@ -1236,6 +1236,8 @@ amap_wiperange(struct vm_amap *amap, int slotoff, int slots)
#endif
#if defined(VMSWAP)
/*
* amap_swap_off: pagein anonymous pages in amaps and drop swap slots.
*
@ -1342,3 +1344,5 @@ next:
return rv;
}
#endif /* defined(VMSWAP) */

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_anon.c,v 1.36 2005/07/31 04:04:47 yamt Exp $ */
/* $NetBSD: uvm_anon.c,v 1.37 2005/09/13 22:00:05 yamt Exp $ */
/*
*
@ -37,7 +37,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_anon.c,v 1.36 2005/07/31 04:04:47 yamt Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_anon.c,v 1.37 2005/09/13 22:00:05 yamt Exp $");
#include "opt_uvmhist.h"
@ -76,7 +76,9 @@ uvm_anon_ctor(void *arg, void *object, int flags)
anon->an_ref = 0;
simple_lock_init(&anon->an_lock);
anon->an_page = NULL;
#if defined(VMSWAP)
anon->an_swslot = 0;
#endif /* defined(VMSWAP) */
return 0;
}
@ -96,7 +98,9 @@ uvm_analloc(void)
KASSERT(anon->an_ref == 0);
LOCK_ASSERT(simple_lock_held(&anon->an_lock) == 0);
KASSERT(anon->an_page == NULL);
#if defined(VMSWAP)
KASSERT(anon->an_swslot == 0);
#endif /* defined(VMSWAP) */
anon->an_ref = 1;
simple_lock(&anon->an_lock);
}
@ -187,6 +191,7 @@ uvm_anfree(struct vm_anon *anon)
"freed now!", anon, pg, 0, 0);
}
}
#if defined(VMSWAP)
if (pg == NULL && anon->an_swslot > 0) {
/* this page is no longer only in swap. */
simple_lock(&uvm.swap_data_lock);
@ -194,6 +199,7 @@ uvm_anfree(struct vm_anon *anon)
uvmexp.swpgonly--;
simple_unlock(&uvm.swap_data_lock);
}
#endif /* defined(VMSWAP) */
/*
* free any swap resources.
@ -207,12 +213,16 @@ uvm_anfree(struct vm_anon *anon)
*/
KASSERT(anon->an_page == NULL);
#if defined(VMSWAP)
KASSERT(anon->an_swslot == 0);
#endif /* defined(VMSWAP) */
pool_cache_put(&uvm_anon_pool_cache, anon);
UVMHIST_LOG(maphist,"<- done!",0,0,0,0);
}
#if defined(VMSWAP)
/*
* uvm_anon_dropswap: release any swap resources from this anon.
*
@ -232,6 +242,8 @@ uvm_anon_dropswap(struct vm_anon *anon)
anon->an_swslot = 0;
}
#endif /* defined(VMSWAP) */
/*
* uvm_anon_lockloanpg: given a locked anon, lock its resident page
*
@ -320,6 +332,8 @@ uvm_anon_lockloanpg(struct vm_anon *anon)
return(pg);
}
#if defined(VMSWAP)
/*
* fetch an anon's page.
*
@ -401,6 +415,8 @@ uvm_anon_pagein(struct vm_anon *anon)
return FALSE;
}
#endif /* defined(VMSWAP) */
/*
* uvm_anon_release: release an anon and its page.
*

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_anon.h,v 1.20 2005/07/31 04:04:47 yamt Exp $ */
/* $NetBSD: uvm_anon.h,v 1.21 2005/09/13 22:00:05 yamt Exp $ */
/*
*
@ -39,6 +39,14 @@
* uvm_anon.h
*/
#if defined(_KERNEL_OPT)
#if 0 /* notyet */
#include "opt_vmswap.h"
#else
#define VMSWAP
#endif
#endif
/*
* anonymous memory management
*
@ -51,9 +59,11 @@ struct vm_anon {
int an_ref; /* reference count [an_lock] */
struct simplelock an_lock; /* lock for an_ref */
struct vm_page *an_page;/* if in RAM [an_lock] */
#if defined(VMSWAP) || 1 /* XXX libkvm */
int an_swslot; /* drum swap slot # (if != 0)
[an_lock. also, it is ok to read
an_swslot if we hold an_page PG_BUSY] */
#endif /* defined(VMSWAP) */
};
/*
@ -97,7 +107,11 @@ struct vm_anon *uvm_analloc(void);
void uvm_anfree(struct vm_anon *);
void uvm_anon_init(void);
struct vm_page *uvm_anon_lockloanpg(struct vm_anon *);
#if defined(VMSWAP)
void uvm_anon_dropswap(struct vm_anon *);
#else /* defined(VMSWAP) */
#define uvm_anon_dropswap(a) /* nothing */
#endif /* defined(VMSWAP) */
void uvm_anon_release(struct vm_anon *);
boolean_t uvm_anon_pagein(struct vm_anon *);
#endif /* _KERNEL */

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_aobj.c,v 1.71 2005/09/13 19:54:09 yamt Exp $ */
/* $NetBSD: uvm_aobj.c,v 1.72 2005/09/13 22:00:05 yamt Exp $ */
/*
* Copyright (c) 1998 Chuck Silvers, Charles D. Cranor and
@ -43,7 +43,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_aobj.c,v 1.71 2005/09/13 19:54:09 yamt Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_aobj.c,v 1.72 2005/09/13 22:00:05 yamt Exp $");
#include "opt_uvmhist.h"
@ -176,15 +176,18 @@ MALLOC_DEFINE(M_UVMAOBJ, "UVM aobj", "UVM aobj and related structures");
* local functions
*/
static struct uao_swhash_elt *uao_find_swhash_elt
(struct uvm_aobj *, int, boolean_t);
static void uao_free(struct uvm_aobj *);
static int uao_get(struct uvm_object *, voff_t, struct vm_page **,
int *, int, vm_prot_t, int, int);
static boolean_t uao_put(struct uvm_object *, voff_t, voff_t, int);
#if defined(VMSWAP)
static struct uao_swhash_elt *uao_find_swhash_elt
(struct uvm_aobj *, int, boolean_t);
static boolean_t uao_pagein(struct uvm_aobj *, int, int);
static boolean_t uao_pagein_page(struct uvm_aobj *, int);
#endif /* defined(VMSWAP) */
/*
* aobj_pager
@ -216,6 +219,8 @@ static struct simplelock uao_list_lock;
* hash table/array related functions
*/
#if defined(VMSWAP)
/*
* uao_find_swhash_elt: find (or create) a hash table entry for a page
* offset.
@ -375,6 +380,8 @@ uao_set_swslot(struct uvm_object *uobj, int pageidx, int slot)
return (oldslot);
}
#endif /* defined(VMSWAP) */
/*
* end of hash/array functions
*/
@ -391,6 +398,8 @@ uao_free(struct uvm_aobj *aobj)
int swpgonlydelta = 0;
simple_unlock(&aobj->u_obj.vmobjlock);
#if defined(VMSWAP)
if (UAO_USES_SWHASH(aobj)) {
int i, hashbuckets = aobj->u_swhashmask + 1;
@ -439,6 +448,8 @@ uao_free(struct uvm_aobj *aobj)
free(aobj->u_swslots, M_UVMAOBJ);
}
#endif /* defined(VMSWAP) */
/*
* finally free the aobj itself
*/
@ -511,6 +522,7 @@ uao_create(vsize_t size, int flags)
*/
if (flags == 0 || (flags & UAO_FLAG_KERNSWAP) != 0) {
#if defined(VMSWAP)
int mflags = (flags & UAO_FLAG_KERNSWAP) != 0 ?
M_NOWAIT : M_WAITOK;
@ -527,6 +539,7 @@ uao_create(vsize_t size, int flags)
panic("uao_create: malloc swslots failed");
memset(aobj->u_swslots, 0, pages * sizeof(int));
}
#endif /* defined(VMSWAP) */
if (flags) {
aobj->u_flags &= ~UAO_FLAG_NOSWAP; /* clear noswap */
@ -924,10 +937,12 @@ static int
uao_get(struct uvm_object *uobj, voff_t offset, struct vm_page **pps,
int *npagesp, int centeridx, vm_prot_t access_type, int advice, int flags)
{
#if defined(VMSWAP)
struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
#endif /* defined(VMSWAP) */
voff_t current_offset;
struct vm_page *ptmp = NULL; /* Quell compiler warning */
int lcv, gotpages, maxpages, swslot, error, pageidx;
int lcv, gotpages, maxpages, swslot, pageidx;
boolean_t done;
UVMHIST_FUNC("uao_get"); UVMHIST_CALLED(pdhist);
@ -1139,6 +1154,9 @@ gotpage:
uvm_pagezero(ptmp);
} else {
#if defined(VMSWAP)
int error;
UVMHIST_LOG(pdhist, "pagein from swslot %d",
swslot, 0,0,0);
@ -1180,6 +1198,9 @@ gotpage:
simple_unlock(&uobj->vmobjlock);
return error;
}
#else /* defined(VMSWAP) */
panic("%s: pagein", __func__);
#endif /* defined(VMSWAP) */
}
/*
@ -1206,6 +1227,8 @@ gotpage:
return 0;
}
#if defined(VMSWAP)
/*
* uao_dropswap: release any swap resources from this aobj page.
*
@ -1443,3 +1466,5 @@ uao_pagein_page(struct uvm_aobj *aobj, int pageidx)
return FALSE;
}
#endif /* defined(VMSWAP) */

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_aobj.h,v 1.14 2005/07/31 04:04:47 yamt Exp $ */
/* $NetBSD: uvm_aobj.h,v 1.15 2005/09/13 22:00:05 yamt Exp $ */
/*
* Copyright (c) 1998 Chuck Silvers, Charles D. Cranor and
@ -57,6 +57,13 @@
#define UAO_FLAG_NOSWAP 0x8 /* aobj can't swap (kernel obj only!) */
#ifdef _KERNEL
#if defined(_KERNEL_OPT)
#if 0 /* notyet */
#include "opt_vmswap.h"
#else
#define VMSWAP
#endif
#endif
/*
* prototypes
@ -64,9 +71,14 @@
void uao_init(void);
int uao_set_swslot(struct uvm_object *, int, int);
#if defined(VMSWAP)
int uao_find_swslot(struct uvm_object *, int);
void uao_dropswap(struct uvm_object *, int);
int uao_swap_off(int, int);
#else /* defined(VMSWAP) */
#define uao_find_swslot(obj, off) 0
#define uao_dropswap(obj, off) /* nothing */
#endif /* defined(VMSWAP) */
/*
* globals

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_fault.c,v 1.100 2005/07/31 04:04:47 yamt Exp $ */
/* $NetBSD: uvm_fault.c,v 1.101 2005/09/13 22:00:05 yamt Exp $ */
/*
*
@ -39,7 +39,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_fault.c,v 1.100 2005/07/31 04:04:47 yamt Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_fault.c,v 1.101 2005/09/13 22:00:05 yamt Exp $");
#include "opt_uvmhist.h"
@ -359,6 +359,7 @@ uvmfault_anonget(struct uvm_faultinfo *ufi, struct vm_amap *amap,
"anonget2",0);
}
} else {
#if defined(VMSWAP)
/*
* no page, we must try and bring it in.
@ -395,6 +396,9 @@ uvmfault_anonget(struct uvm_faultinfo *ufi, struct vm_amap *amap,
* "we_own" case
*/
}
#else /* defined(VMSWAP) */
panic("%s: no page", __func__);
#endif /* defined(VMSWAP) */
}
/*
@ -420,6 +424,7 @@ uvmfault_anonget(struct uvm_faultinfo *ufi, struct vm_amap *amap,
*/
if (we_own) {
#if defined(VMSWAP)
if (pg->flags & PG_WANTED) {
wakeup(pg);
}
@ -493,6 +498,9 @@ released:
UVM_PAGE_OWN(pg, NULL);
if (!locked)
simple_unlock(&anon->an_lock);
#else /* defined(VMSWAP) */
panic("%s: we_own", __func__);
#endif /* defined(VMSWAP) */
}
/*

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_pager.c,v 1.70 2005/07/31 04:04:47 yamt Exp $ */
/* $NetBSD: uvm_pager.c,v 1.71 2005/09/13 22:00:05 yamt Exp $ */
/*
*
@ -39,7 +39,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_pager.c,v 1.70 2005/07/31 04:04:47 yamt Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_pager.c,v 1.71 2005/09/13 22:00:05 yamt Exp $");
#include "opt_uvmhist.h"
@ -321,20 +321,27 @@ uvm_aio_aiodone(struct buf *bp)
slock = &uobj->vmobjlock;
simple_lock(slock);
uvm_lock_pageq();
} else if (error) {
if (pg->uobject != NULL) {
swslot = uao_find_swslot(pg->uobject,
pg->offset >> PAGE_SHIFT);
} else {
swslot = pg->uanon->an_swslot;
} else {
#if defined(VMSWAP)
if (error) {
if (pg->uobject != NULL) {
swslot = uao_find_swslot(pg->uobject,
pg->offset >> PAGE_SHIFT);
} else {
swslot = pg->uanon->an_swslot;
}
KASSERT(swslot);
}
KASSERT(swslot);
#else /* defined(VMSWAP) */
panic("%s: swap", __func__);
#endif /* defined(VMSWAP) */
}
for (i = 0; i < npages; i++) {
pg = pgs[i];
KASSERT(swap || pg->uobject == uobj);
UVMHIST_LOG(ubchist, "pg %p", pg, 0,0,0);
#if defined(VMSWAP)
/*
* for swap i/os, lock each page's object (or anon)
* individually since each page may need a different lock.
@ -349,6 +356,7 @@ uvm_aio_aiodone(struct buf *bp)
simple_lock(slock);
uvm_lock_pageq();
}
#endif /* defined(VMSWAP) */
/*
* process errors. for reads, just mark the page to be freed.
@ -375,6 +383,7 @@ uvm_aio_aiodone(struct buf *bp)
} else
slot = SWSLOT_BAD;
#if defined(VMSWAP)
if (swap) {
if (pg->uobject != NULL) {
int oldslot;
@ -387,6 +396,7 @@ uvm_aio_aiodone(struct buf *bp)
pg->uanon->an_swslot = slot;
}
}
#endif /* defined(VMSWAP) */
}
/*
@ -415,6 +425,7 @@ uvm_aio_aiodone(struct buf *bp)
pg->flags |= PG_RELEASED;
}
#if defined(VMSWAP)
/*
* for swap pages, unlock everything for this page now.
*/
@ -430,12 +441,14 @@ uvm_aio_aiodone(struct buf *bp)
simple_unlock(slock);
}
}
#endif /* defined(VMSWAP) */
}
if (!swap) {
uvm_page_unbusy(pgs, npages);
uvm_unlock_pageq();
simple_unlock(slock);
} else {
#if defined(VMSWAP)
KASSERT(write);
/* these pages are now only in swap. */
@ -451,6 +464,7 @@ uvm_aio_aiodone(struct buf *bp)
uvm_swap_free(swslot, npages);
}
uvmexp.pdpending--;
#endif /* defined(VMSWAP) */
}
s = splbio();
if (write && (bp->b_flags & B_AGE) != 0) {

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_pdaemon.c,v 1.67 2005/07/31 04:04:47 yamt Exp $ */
/* $NetBSD: uvm_pdaemon.c,v 1.68 2005/09/13 22:00:05 yamt Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -71,7 +71,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_pdaemon.c,v 1.67 2005/07/31 04:04:47 yamt Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_pdaemon.c,v 1.68 2005/09/13 22:00:05 yamt Exp $");
#include "opt_uvmhist.h"
@ -382,15 +382,18 @@ uvm_aiodone_daemon(void *arg)
static void
uvmpd_scan_inactive(struct pglist *pglst)
{
int error;
struct vm_page *p, *nextpg = NULL; /* Quell compiler warning */
struct uvm_object *uobj;
struct vm_anon *anon;
#if defined(VMSWAP)
struct vm_page *swpps[round_page(MAXPHYS) >> PAGE_SHIFT];
int error;
int result;
#endif /* defined(VMSWAP) */
struct simplelock *slock;
int swnpages, swcpages;
int swslot;
int dirtyreacts, t, result;
int dirtyreacts, t;
boolean_t anonunder, fileunder, execunder;
boolean_t anonover, fileover, execover;
boolean_t anonreact, filereact, execreact;
@ -424,6 +427,13 @@ uvmpd_scan_inactive(struct pglist *pglst)
if (filereact && execreact && (anonreact || uvm_swapisfull())) {
anonreact = filereact = execreact = FALSE;
}
#if !defined(VMSWAP)
/*
* XXX no point to put swap-backed pages on the page queue.
*/
anonreact = TRUE;
#endif /* !defined(VMSWAP) */
for (p = TAILQ_FIRST(pglst); p != NULL || swslot != 0; p = nextpg) {
uobj = NULL;
anon = NULL;
@ -525,6 +535,7 @@ uvmpd_scan_inactive(struct pglist *pglst)
}
uvmexp.pdobscan++;
} else {
#if defined(VMSWAP)
KASSERT(anon != NULL);
slock = &anon->an_lock;
if (!simple_lock_try(slock)) {
@ -547,6 +558,9 @@ uvmpd_scan_inactive(struct pglist *pglst)
continue;
}
uvmexp.pdanscan++;
#else /* defined(VMSWAP) */
panic("%s: anon", __func__);
#endif /* defined(VMSWAP) */
}
@ -569,6 +583,7 @@ uvmpd_scan_inactive(struct pglist *pglst)
continue;
}
#if defined(VMSWAP)
/*
* the page is swap-backed. remove all the permissions
* from the page so we can sync the modified info
@ -716,8 +731,13 @@ uvmpd_scan_inactive(struct pglist *pglst)
if (swcpages < swnpages) {
continue;
}
#else /* defined(VMSWAP) */
panic("%s: swap-backed", __func__);
#endif /* defined(VMSWAP) */
}
#if defined(VMSWAP)
/*
* if this is the final pageout we could have a few
* unused swap blocks. if so, free them now.
@ -753,6 +773,7 @@ uvmpd_scan_inactive(struct pglist *pglst)
if (nextpg && (nextpg->pqflags & PQ_INACTIVE) == 0) {
nextpg = TAILQ_FIRST(pglst);
}
#endif /* defined(VMSWAP) */
}
}
@ -872,6 +893,7 @@ uvmpd_scan(void)
continue;
}
#if defined(VMSWAP)
/*
* if there's a shortage of swap, free any swap allocated
* to this page so that other pages can be paged out.
@ -893,6 +915,7 @@ uvmpd_scan(void)
}
}
}
#endif /* defined(VMSWAP) */
/*
* if there's a shortage of inactive pages, deactivate.

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_swap.h,v 1.10 2005/07/31 04:04:47 yamt Exp $ */
/* $NetBSD: uvm_swap.h,v 1.11 2005/09/13 22:00:05 yamt Exp $ */
/*
* Copyright (c) 1997 Matthew R. Green
@ -36,9 +36,17 @@
#define SWSLOT_BAD (-1)
#ifdef _KERNEL
#if defined(_KERNEL_OPT)
#if 0 /* notyet */
#include "opt_vmswap.h"
#else
#define VMSWAP
#endif
#endif
struct swapent;
#if defined(VMSWAP)
int uvm_swap_get(struct vm_page *, int, int);
int uvm_swap_put(int, struct vm_page **, int, int);
int uvm_swap_alloc(int *, boolean_t);
@ -46,6 +54,9 @@ void uvm_swap_free(int, int);
void uvm_swap_markbad(int, int);
void uvm_swap_stats(int, struct swapent *, int, register_t *);
boolean_t uvm_swapisfull(void);
#else /* defined(VMSWAP) */
#define uvm_swapisfull() TRUE
#endif /* defined(VMSWAP) */
#endif /* _KERNEL */