2001-02-19 00:19:08 +03:00
|
|
|
/* $NetBSD: uvm_aobj.c,v 1.39 2001/02/18 21:19:08 chs Exp $ */
|
1998-02-10 17:08:44 +03:00
|
|
|
|
1998-02-12 10:36:43 +03:00
|
|
|
/*
|
|
|
|
* Copyright (c) 1998 Chuck Silvers, Charles D. Cranor and
|
|
|
|
* Washington University.
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 3. All advertising materials mentioning features or use of this software
|
|
|
|
* must display the following acknowledgement:
|
|
|
|
* This product includes software developed by Charles D. Cranor and
|
|
|
|
* Washington University.
|
|
|
|
* 4. The name of the author may not be used to endorse or promote products
|
|
|
|
* derived from this software without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
|
|
|
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
|
|
|
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
|
|
|
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|
|
|
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
|
|
|
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
|
|
|
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*
|
1998-02-07 14:07:38 +03:00
|
|
|
* from: Id: uvm_aobj.c,v 1.1.2.5 1998/02/06 05:14:38 chs Exp
|
|
|
|
*/
|
1998-02-12 10:36:43 +03:00
|
|
|
/*
|
|
|
|
* uvm_aobj.c: anonymous memory uvm_object pager
|
|
|
|
*
|
|
|
|
* author: Chuck Silvers <chuq@chuq.com>
|
|
|
|
* started: Jan-1998
|
|
|
|
*
|
|
|
|
* - design mostly from Chuck Cranor
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#include "opt_uvmhist.h"
|
1998-02-05 09:25:08 +03:00
|
|
|
|
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/systm.h>
|
|
|
|
#include <sys/proc.h>
|
|
|
|
#include <sys/malloc.h>
|
2000-11-25 09:27:59 +03:00
|
|
|
#include <sys/kernel.h>
|
1998-08-31 04:01:59 +04:00
|
|
|
#include <sys/pool.h>
|
2000-01-11 09:57:49 +03:00
|
|
|
#include <sys/kernel.h>
|
1998-02-05 09:25:08 +03:00
|
|
|
|
|
|
|
#include <uvm/uvm.h>
|
|
|
|
|
|
|
|
/*
|
|
|
|
* an aobj manages anonymous-memory backed uvm_objects. in addition
|
|
|
|
* to keeping the list of resident pages, it also keeps a list of
|
|
|
|
* allocated swap blocks. depending on the size of the aobj this list
|
|
|
|
* of allocated swap blocks is either stored in an array (small objects)
|
|
|
|
* or in a hash table (large objects).
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* local structures
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* for hash tables, we break the address space of the aobj into blocks
|
|
|
|
* of UAO_SWHASH_CLUSTER_SIZE pages. we require the cluster size to
|
|
|
|
* be a power of two.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define UAO_SWHASH_CLUSTER_SHIFT 4
|
|
|
|
#define UAO_SWHASH_CLUSTER_SIZE (1 << UAO_SWHASH_CLUSTER_SHIFT)
|
|
|
|
|
|
|
|
/* get the "tag" for this page index */
|
|
|
|
#define UAO_SWHASH_ELT_TAG(PAGEIDX) \
|
|
|
|
((PAGEIDX) >> UAO_SWHASH_CLUSTER_SHIFT)
|
|
|
|
|
|
|
|
/* given an ELT and a page index, find the swap slot */
|
|
|
|
#define UAO_SWHASH_ELT_PAGESLOT(ELT, PAGEIDX) \
|
|
|
|
((ELT)->slots[(PAGEIDX) & (UAO_SWHASH_CLUSTER_SIZE - 1)])
|
|
|
|
|
|
|
|
/* given an ELT, return its pageidx base */
|
|
|
|
#define UAO_SWHASH_ELT_PAGEIDX_BASE(ELT) \
|
|
|
|
((ELT)->tag << UAO_SWHASH_CLUSTER_SHIFT)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* the swhash hash function
|
|
|
|
*/
|
|
|
|
#define UAO_SWHASH_HASH(AOBJ, PAGEIDX) \
|
|
|
|
(&(AOBJ)->u_swhash[(((PAGEIDX) >> UAO_SWHASH_CLUSTER_SHIFT) \
|
|
|
|
& (AOBJ)->u_swhashmask)])
|
|
|
|
|
|
|
|
/*
|
|
|
|
* the swhash threshhold determines if we will use an array or a
|
|
|
|
* hash table to store the list of allocated swap blocks.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define UAO_SWHASH_THRESHOLD (UAO_SWHASH_CLUSTER_SIZE * 4)
|
|
|
|
#define UAO_USES_SWHASH(AOBJ) \
|
|
|
|
((AOBJ)->u_pages > UAO_SWHASH_THRESHOLD) /* use hash? */
|
|
|
|
|
|
|
|
/*
|
1998-02-07 05:32:37 +03:00
|
|
|
* the number of buckets in a swhash, with an upper bound
|
1998-02-05 09:25:08 +03:00
|
|
|
*/
|
|
|
|
#define UAO_SWHASH_MAXBUCKETS 256
|
|
|
|
#define UAO_SWHASH_BUCKETS(AOBJ) \
|
|
|
|
(min((AOBJ)->u_pages >> UAO_SWHASH_CLUSTER_SHIFT, \
|
|
|
|
UAO_SWHASH_MAXBUCKETS))
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* uao_swhash_elt: when a hash table is being used, this structure defines
|
|
|
|
* the format of an entry in the bucket list.
|
|
|
|
*/
|
|
|
|
|
|
|
|
struct uao_swhash_elt {
|
1998-02-09 17:35:48 +03:00
|
|
|
LIST_ENTRY(uao_swhash_elt) list; /* the hash list */
|
2000-03-27 00:54:45 +04:00
|
|
|
voff_t tag; /* our 'tag' */
|
1998-02-09 17:35:48 +03:00
|
|
|
int count; /* our number of active slots */
|
|
|
|
int slots[UAO_SWHASH_CLUSTER_SIZE]; /* the slots */
|
1998-02-05 09:25:08 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* uao_swhash: the swap hash table structure
|
|
|
|
*/
|
|
|
|
|
|
|
|
LIST_HEAD(uao_swhash, uao_swhash_elt);
|
|
|
|
|
1998-08-31 04:01:59 +04:00
|
|
|
/*
|
|
|
|
* uao_swhash_elt_pool: pool of uao_swhash_elt structures
|
|
|
|
*/
|
|
|
|
|
|
|
|
struct pool uao_swhash_elt_pool;
|
1998-02-05 09:25:08 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* uvm_aobj: the actual anon-backed uvm_object
|
|
|
|
*
|
|
|
|
* => the uvm_object is at the top of the structure, this allows
|
|
|
|
* (struct uvm_device *) == (struct uvm_object *)
|
|
|
|
* => only one of u_swslots and u_swhash is used in any given aobj
|
|
|
|
*/
|
|
|
|
|
|
|
|
struct uvm_aobj {
|
1998-02-09 17:35:48 +03:00
|
|
|
struct uvm_object u_obj; /* has: lock, pgops, memq, #pages, #refs */
|
1998-08-13 21:32:46 +04:00
|
|
|
int u_pages; /* number of pages in entire object */
|
1998-02-09 17:35:48 +03:00
|
|
|
int u_flags; /* the flags (see uvm_aobj.h) */
|
|
|
|
int *u_swslots; /* array of offset->swapslot mappings */
|
|
|
|
/*
|
|
|
|
* hashtable of offset->swapslot mappings
|
|
|
|
* (u_swhash is an array of bucket heads)
|
|
|
|
*/
|
|
|
|
struct uao_swhash *u_swhash;
|
|
|
|
u_long u_swhashmask; /* mask for hashtable */
|
|
|
|
LIST_ENTRY(uvm_aobj) u_list; /* global list of aobjs */
|
1998-02-05 09:25:08 +03:00
|
|
|
};
|
|
|
|
|
1998-08-31 04:01:59 +04:00
|
|
|
/*
|
|
|
|
* uvm_aobj_pool: pool of uvm_aobj structures
|
|
|
|
*/
|
|
|
|
|
|
|
|
struct pool uvm_aobj_pool;
|
|
|
|
|
1998-02-05 09:25:08 +03:00
|
|
|
/*
|
|
|
|
* local functions
|
|
|
|
*/
|
|
|
|
|
|
|
|
static struct uao_swhash_elt *uao_find_swhash_elt __P((struct uvm_aobj *,
|
|
|
|
int, boolean_t));
|
2000-03-27 00:54:45 +04:00
|
|
|
static int uao_find_swslot __P((struct uvm_aobj *, int));
|
|
|
|
static boolean_t uao_flush __P((struct uvm_object *,
|
|
|
|
voff_t, voff_t, int));
|
1998-02-05 09:25:08 +03:00
|
|
|
static void uao_free __P((struct uvm_aobj *));
|
2000-03-27 00:54:45 +04:00
|
|
|
static int uao_get __P((struct uvm_object *, voff_t,
|
|
|
|
vm_page_t *, int *, int,
|
1998-02-05 09:25:08 +03:00
|
|
|
vm_prot_t, int, int));
|
2000-03-27 00:54:45 +04:00
|
|
|
static boolean_t uao_releasepg __P((struct vm_page *,
|
1998-02-05 09:25:08 +03:00
|
|
|
struct vm_page **));
|
2000-01-11 09:57:49 +03:00
|
|
|
static boolean_t uao_pagein __P((struct uvm_aobj *, int, int));
|
|
|
|
static boolean_t uao_pagein_page __P((struct uvm_aobj *, int));
|
1998-02-05 09:25:08 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* aobj_pager
|
|
|
|
*
|
|
|
|
* note that some functions (e.g. put) are handled elsewhere
|
|
|
|
*/
|
|
|
|
|
|
|
|
struct uvm_pagerops aobj_pager = {
|
2000-01-11 09:57:49 +03:00
|
|
|
NULL, /* init */
|
1998-02-09 17:35:48 +03:00
|
|
|
uao_reference, /* reference */
|
|
|
|
uao_detach, /* detach */
|
|
|
|
NULL, /* fault */
|
|
|
|
uao_flush, /* flush */
|
|
|
|
uao_get, /* get */
|
|
|
|
NULL, /* put (done by pagedaemon) */
|
|
|
|
NULL, /* cluster */
|
|
|
|
NULL, /* mk_pcluster */
|
|
|
|
uao_releasepg /* releasepg */
|
1998-02-05 09:25:08 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* uao_list: global list of active aobjs, locked by uao_list_lock
|
|
|
|
*/
|
|
|
|
|
|
|
|
static LIST_HEAD(aobjlist, uvm_aobj) uao_list;
|
|
|
|
static simple_lock_data_t uao_list_lock;
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* functions
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* hash table/array related functions
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* uao_find_swhash_elt: find (or create) a hash table entry for a page
|
|
|
|
* offset.
|
|
|
|
*
|
|
|
|
* => the object should be locked by the caller
|
|
|
|
*/
|
|
|
|
|
1998-02-09 17:35:48 +03:00
|
|
|
static struct uao_swhash_elt *
|
|
|
|
uao_find_swhash_elt(aobj, pageidx, create)
|
|
|
|
struct uvm_aobj *aobj;
|
|
|
|
int pageidx;
|
|
|
|
boolean_t create;
|
|
|
|
{
|
|
|
|
struct uao_swhash *swhash;
|
|
|
|
struct uao_swhash_elt *elt;
|
2000-03-27 00:54:45 +04:00
|
|
|
voff_t page_tag;
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-02-09 17:35:48 +03:00
|
|
|
swhash = UAO_SWHASH_HASH(aobj, pageidx); /* first hash to get bucket */
|
|
|
|
page_tag = UAO_SWHASH_ELT_TAG(pageidx); /* tag to search for */
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-02-09 17:35:48 +03:00
|
|
|
/*
|
|
|
|
* now search the bucket for the requested tag
|
|
|
|
*/
|
2000-11-25 09:27:59 +03:00
|
|
|
LIST_FOREACH(elt, swhash, list) {
|
1998-02-09 17:35:48 +03:00
|
|
|
if (elt->tag == page_tag)
|
|
|
|
return(elt);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* fail now if we are not allowed to create a new entry in the bucket */
|
|
|
|
if (!create)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
1998-08-31 04:01:59 +04:00
|
|
|
* allocate a new entry for the bucket and init/insert it in
|
1998-02-09 17:35:48 +03:00
|
|
|
*/
|
1998-08-31 04:01:59 +04:00
|
|
|
elt = pool_get(&uao_swhash_elt_pool, PR_WAITOK);
|
1998-02-09 17:35:48 +03:00
|
|
|
LIST_INSERT_HEAD(swhash, elt, list);
|
|
|
|
elt->tag = page_tag;
|
|
|
|
elt->count = 0;
|
1998-08-10 02:36:37 +04:00
|
|
|
memset(elt->slots, 0, sizeof(elt->slots));
|
1998-02-09 17:35:48 +03:00
|
|
|
|
|
|
|
return(elt);
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* uao_find_swslot: find the swap slot number for an aobj/pageidx
|
|
|
|
*
|
|
|
|
* => object must be locked by caller
|
|
|
|
*/
|
1998-02-09 17:35:48 +03:00
|
|
|
__inline static int
|
|
|
|
uao_find_swslot(aobj, pageidx)
|
|
|
|
struct uvm_aobj *aobj;
|
1998-08-13 21:32:46 +04:00
|
|
|
int pageidx;
|
1998-02-09 17:35:48 +03:00
|
|
|
{
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-02-09 17:35:48 +03:00
|
|
|
/*
|
|
|
|
* if noswap flag is set, then we never return a slot
|
|
|
|
*/
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-02-09 17:35:48 +03:00
|
|
|
if (aobj->u_flags & UAO_FLAG_NOSWAP)
|
|
|
|
return(0);
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-02-09 17:35:48 +03:00
|
|
|
/*
|
|
|
|
* if hashing, look in hash table.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (UAO_USES_SWHASH(aobj)) {
|
|
|
|
struct uao_swhash_elt *elt =
|
|
|
|
uao_find_swhash_elt(aobj, pageidx, FALSE);
|
|
|
|
|
|
|
|
if (elt)
|
|
|
|
return(UAO_SWHASH_ELT_PAGESLOT(elt, pageidx));
|
|
|
|
else
|
2000-05-19 08:34:39 +04:00
|
|
|
return(0);
|
1998-02-09 17:35:48 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* otherwise, look in the array
|
|
|
|
*/
|
|
|
|
return(aobj->u_swslots[pageidx]);
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* uao_set_swslot: set the swap slot for a page in an aobj.
|
|
|
|
*
|
|
|
|
* => setting a slot to zero frees the slot
|
|
|
|
* => object must be locked by caller
|
|
|
|
*/
|
1998-02-09 17:35:48 +03:00
|
|
|
int
|
|
|
|
uao_set_swslot(uobj, pageidx, slot)
|
|
|
|
struct uvm_object *uobj;
|
|
|
|
int pageidx, slot;
|
1998-02-05 09:25:08 +03:00
|
|
|
{
|
1998-02-09 17:35:48 +03:00
|
|
|
struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
|
|
|
|
int oldslot;
|
|
|
|
UVMHIST_FUNC("uao_set_swslot"); UVMHIST_CALLED(pdhist);
|
|
|
|
UVMHIST_LOG(pdhist, "aobj %p pageidx %d slot %d",
|
|
|
|
aobj, pageidx, slot, 0);
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-02-09 17:35:48 +03:00
|
|
|
/*
|
|
|
|
* if noswap flag is set, then we can't set a slot
|
|
|
|
*/
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-02-09 17:35:48 +03:00
|
|
|
if (aobj->u_flags & UAO_FLAG_NOSWAP) {
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-02-09 17:35:48 +03:00
|
|
|
if (slot == 0)
|
|
|
|
return(0); /* a clear is ok */
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-02-09 17:35:48 +03:00
|
|
|
/* but a set is not */
|
|
|
|
printf("uao_set_swslot: uobj = %p\n", uobj);
|
|
|
|
panic("uao_set_swslot: attempt to set a slot on a NOSWAP object");
|
|
|
|
}
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-02-09 17:35:48 +03:00
|
|
|
/*
|
|
|
|
* are we using a hash table? if so, add it in the hash.
|
|
|
|
*/
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-02-09 17:35:48 +03:00
|
|
|
if (UAO_USES_SWHASH(aobj)) {
|
2001-02-19 00:19:08 +03:00
|
|
|
|
1998-08-31 04:01:59 +04:00
|
|
|
/*
|
|
|
|
* Avoid allocating an entry just to free it again if
|
|
|
|
* the page had not swap slot in the first place, and
|
|
|
|
* we are freeing.
|
|
|
|
*/
|
2001-02-19 00:19:08 +03:00
|
|
|
|
1998-02-09 17:35:48 +03:00
|
|
|
struct uao_swhash_elt *elt =
|
1998-08-31 04:01:59 +04:00
|
|
|
uao_find_swhash_elt(aobj, pageidx, slot ? TRUE : FALSE);
|
|
|
|
if (elt == NULL) {
|
2001-02-19 00:19:08 +03:00
|
|
|
KASSERT(slot == 0);
|
1998-08-31 04:01:59 +04:00
|
|
|
return (0);
|
|
|
|
}
|
1998-02-09 17:35:48 +03:00
|
|
|
|
|
|
|
oldslot = UAO_SWHASH_ELT_PAGESLOT(elt, pageidx);
|
|
|
|
UAO_SWHASH_ELT_PAGESLOT(elt, pageidx) = slot;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* now adjust the elt's reference counter and free it if we've
|
|
|
|
* dropped it to zero.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* an allocation? */
|
|
|
|
if (slot) {
|
|
|
|
if (oldslot == 0)
|
|
|
|
elt->count++;
|
|
|
|
} else { /* freeing slot ... */
|
|
|
|
if (oldslot) /* to be safe */
|
|
|
|
elt->count--;
|
|
|
|
|
|
|
|
if (elt->count == 0) {
|
|
|
|
LIST_REMOVE(elt, list);
|
1998-08-31 04:01:59 +04:00
|
|
|
pool_put(&uao_swhash_elt_pool, elt);
|
1998-02-09 17:35:48 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/* we are using an array */
|
|
|
|
oldslot = aobj->u_swslots[pageidx];
|
|
|
|
aobj->u_swslots[pageidx] = slot;
|
|
|
|
}
|
|
|
|
return (oldslot);
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* end of hash/array functions
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* uao_free: free all resources held by an aobj, and then free the aobj
|
|
|
|
*
|
|
|
|
* => the aobj should be dead
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
uao_free(aobj)
|
1998-02-09 17:35:48 +03:00
|
|
|
struct uvm_aobj *aobj;
|
1998-02-05 09:25:08 +03:00
|
|
|
{
|
|
|
|
|
2000-01-11 09:57:49 +03:00
|
|
|
simple_unlock(&aobj->u_obj.vmobjlock);
|
|
|
|
|
1998-02-09 17:35:48 +03:00
|
|
|
if (UAO_USES_SWHASH(aobj)) {
|
|
|
|
int i, hashbuckets = aobj->u_swhashmask + 1;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* free the swslots from each hash bucket,
|
|
|
|
* then the hash bucket, and finally the hash table itself.
|
|
|
|
*/
|
|
|
|
for (i = 0; i < hashbuckets; i++) {
|
|
|
|
struct uao_swhash_elt *elt, *next;
|
|
|
|
|
2000-01-11 09:57:49 +03:00
|
|
|
for (elt = LIST_FIRST(&aobj->u_swhash[i]);
|
|
|
|
elt != NULL;
|
|
|
|
elt = next) {
|
1998-02-09 17:35:48 +03:00
|
|
|
int j;
|
|
|
|
|
2000-01-11 09:57:49 +03:00
|
|
|
for (j = 0; j < UAO_SWHASH_CLUSTER_SIZE; j++) {
|
1998-02-09 17:35:48 +03:00
|
|
|
int slot = elt->slots[j];
|
|
|
|
|
2000-11-25 09:27:59 +03:00
|
|
|
if (slot == 0) {
|
|
|
|
continue;
|
1999-03-26 20:34:15 +03:00
|
|
|
}
|
2000-11-25 09:27:59 +03:00
|
|
|
uvm_swap_free(slot, 1);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* this page is no longer
|
|
|
|
* only in swap.
|
|
|
|
*/
|
|
|
|
simple_lock(&uvm.swap_data_lock);
|
|
|
|
uvmexp.swpgonly--;
|
|
|
|
simple_unlock(&uvm.swap_data_lock);
|
1998-02-09 17:35:48 +03:00
|
|
|
}
|
|
|
|
|
2000-01-11 09:57:49 +03:00
|
|
|
next = LIST_NEXT(elt, list);
|
1998-08-31 04:01:59 +04:00
|
|
|
pool_put(&uao_swhash_elt_pool, elt);
|
1998-02-09 17:35:48 +03:00
|
|
|
}
|
|
|
|
}
|
2000-08-03 00:23:23 +04:00
|
|
|
free(aobj->u_swhash, M_UVMAOBJ);
|
1998-02-09 17:35:48 +03:00
|
|
|
} else {
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* free the array
|
|
|
|
*/
|
|
|
|
|
2000-01-11 09:57:49 +03:00
|
|
|
for (i = 0; i < aobj->u_pages; i++) {
|
1998-02-09 17:35:48 +03:00
|
|
|
int slot = aobj->u_swslots[i];
|
|
|
|
|
1999-03-26 20:34:15 +03:00
|
|
|
if (slot) {
|
1998-02-09 17:35:48 +03:00
|
|
|
uvm_swap_free(slot, 1);
|
1999-03-26 20:34:15 +03:00
|
|
|
|
|
|
|
/* this page is no longer only in swap. */
|
|
|
|
simple_lock(&uvm.swap_data_lock);
|
|
|
|
uvmexp.swpgonly--;
|
|
|
|
simple_unlock(&uvm.swap_data_lock);
|
|
|
|
}
|
1998-02-09 17:35:48 +03:00
|
|
|
}
|
2000-08-03 00:23:23 +04:00
|
|
|
free(aobj->u_swslots, M_UVMAOBJ);
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|
|
|
|
|
1998-02-09 17:35:48 +03:00
|
|
|
/*
|
|
|
|
* finally free the aobj itself
|
|
|
|
*/
|
1998-08-31 04:01:59 +04:00
|
|
|
pool_put(&uvm_aobj_pool, aobj);
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* pager functions
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* uao_create: create an aobj of the given size and return its uvm_object.
|
|
|
|
*
|
|
|
|
* => for normal use, flags are always zero
|
|
|
|
* => for the kernel object, the flags are:
|
|
|
|
* UAO_FLAG_KERNOBJ - allocate the kernel object (can only happen once)
|
|
|
|
* UAO_FLAG_KERNSWAP - enable swapping of kernel object (" ")
|
|
|
|
*/
|
1998-02-09 17:35:48 +03:00
|
|
|
struct uvm_object *
|
|
|
|
uao_create(size, flags)
|
1998-08-13 06:10:37 +04:00
|
|
|
vsize_t size;
|
1998-02-09 17:35:48 +03:00
|
|
|
int flags;
|
|
|
|
{
|
2000-01-11 09:57:49 +03:00
|
|
|
static struct uvm_aobj kernel_object_store; /* home of kernel_object */
|
1998-02-09 17:35:48 +03:00
|
|
|
static int kobj_alloced = 0; /* not allocated yet */
|
1998-10-19 03:49:59 +04:00
|
|
|
int pages = round_page(size) >> PAGE_SHIFT;
|
1998-02-09 17:35:48 +03:00
|
|
|
struct uvm_aobj *aobj;
|
|
|
|
|
|
|
|
/*
|
2000-01-11 09:57:49 +03:00
|
|
|
* malloc a new aobj unless we are asked for the kernel object
|
|
|
|
*/
|
1998-02-09 17:35:48 +03:00
|
|
|
if (flags & UAO_FLAG_KERNOBJ) { /* want kernel object? */
|
|
|
|
if (kobj_alloced)
|
|
|
|
panic("uao_create: kernel object already allocated");
|
|
|
|
|
|
|
|
aobj = &kernel_object_store;
|
|
|
|
aobj->u_pages = pages;
|
|
|
|
aobj->u_flags = UAO_FLAG_NOSWAP; /* no swap to start */
|
|
|
|
/* we are special, we never die */
|
|
|
|
aobj->u_obj.uo_refs = UVM_OBJ_KERN;
|
|
|
|
kobj_alloced = UAO_FLAG_KERNOBJ;
|
|
|
|
} else if (flags & UAO_FLAG_KERNSWAP) {
|
|
|
|
aobj = &kernel_object_store;
|
|
|
|
if (kobj_alloced != UAO_FLAG_KERNOBJ)
|
|
|
|
panic("uao_create: asked to enable swap on kernel object");
|
|
|
|
kobj_alloced = UAO_FLAG_KERNSWAP;
|
|
|
|
} else { /* normal object */
|
1998-08-31 04:01:59 +04:00
|
|
|
aobj = pool_get(&uvm_aobj_pool, PR_WAITOK);
|
1998-02-09 17:35:48 +03:00
|
|
|
aobj->u_pages = pages;
|
|
|
|
aobj->u_flags = 0; /* normal object */
|
|
|
|
aobj->u_obj.uo_refs = 1; /* start with 1 reference */
|
|
|
|
}
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-02-09 17:35:48 +03:00
|
|
|
/*
|
|
|
|
* allocate hash/array if necessary
|
|
|
|
*
|
|
|
|
* note: in the KERNSWAP case no need to worry about locking since
|
|
|
|
* we are still booting we should be the only thread around.
|
|
|
|
*/
|
|
|
|
if (flags == 0 || (flags & UAO_FLAG_KERNSWAP) != 0) {
|
|
|
|
int mflags = (flags & UAO_FLAG_KERNSWAP) != 0 ?
|
|
|
|
M_NOWAIT : M_WAITOK;
|
|
|
|
|
|
|
|
/* allocate hash table or array depending on object size */
|
2000-01-11 09:57:49 +03:00
|
|
|
if (UAO_USES_SWHASH(aobj)) {
|
1998-02-09 17:35:48 +03:00
|
|
|
aobj->u_swhash = hashinit(UAO_SWHASH_BUCKETS(aobj),
|
2000-11-08 17:28:12 +03:00
|
|
|
HASH_LIST, M_UVMAOBJ, mflags, &aobj->u_swhashmask);
|
1998-02-09 17:35:48 +03:00
|
|
|
if (aobj->u_swhash == NULL)
|
|
|
|
panic("uao_create: hashinit swhash failed");
|
|
|
|
} else {
|
2000-08-03 00:23:23 +04:00
|
|
|
aobj->u_swslots = malloc(pages * sizeof(int),
|
1998-02-09 17:35:48 +03:00
|
|
|
M_UVMAOBJ, mflags);
|
|
|
|
if (aobj->u_swslots == NULL)
|
|
|
|
panic("uao_create: malloc swslots failed");
|
1998-08-10 02:36:37 +04:00
|
|
|
memset(aobj->u_swslots, 0, pages * sizeof(int));
|
1998-02-09 17:35:48 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (flags) {
|
|
|
|
aobj->u_flags &= ~UAO_FLAG_NOSWAP; /* clear noswap */
|
|
|
|
return(&aobj->u_obj);
|
|
|
|
/* done! */
|
|
|
|
}
|
|
|
|
}
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-02-09 17:35:48 +03:00
|
|
|
/*
|
|
|
|
* init aobj fields
|
|
|
|
*/
|
|
|
|
simple_lock_init(&aobj->u_obj.vmobjlock);
|
|
|
|
aobj->u_obj.pgops = &aobj_pager;
|
|
|
|
TAILQ_INIT(&aobj->u_obj.memq);
|
|
|
|
aobj->u_obj.uo_npages = 0;
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-02-09 17:35:48 +03:00
|
|
|
/*
|
|
|
|
* now that aobj is ready, add it to the global list
|
|
|
|
*/
|
|
|
|
simple_lock(&uao_list_lock);
|
|
|
|
LIST_INSERT_HEAD(&uao_list, aobj, u_list);
|
|
|
|
simple_unlock(&uao_list_lock);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* done!
|
|
|
|
*/
|
|
|
|
return(&aobj->u_obj);
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* uao_init: set up aobj pager subsystem
|
|
|
|
*
|
|
|
|
* => called at boot time from uvm_pager_init()
|
|
|
|
*/
|
2000-01-11 09:57:49 +03:00
|
|
|
void
|
1998-02-09 17:35:48 +03:00
|
|
|
uao_init()
|
1998-02-05 09:25:08 +03:00
|
|
|
{
|
1998-08-31 04:01:59 +04:00
|
|
|
static int uao_initialized;
|
|
|
|
|
|
|
|
if (uao_initialized)
|
|
|
|
return;
|
|
|
|
uao_initialized = TRUE;
|
1998-02-09 17:35:48 +03:00
|
|
|
|
|
|
|
LIST_INIT(&uao_list);
|
|
|
|
simple_lock_init(&uao_list_lock);
|
1998-08-31 04:01:59 +04:00
|
|
|
|
1998-09-18 23:28:22 +04:00
|
|
|
/*
|
|
|
|
* NOTE: Pages fror this pool must not come from a pageable
|
|
|
|
* kernel map!
|
|
|
|
*/
|
1998-08-31 04:01:59 +04:00
|
|
|
pool_init(&uao_swhash_elt_pool, sizeof(struct uao_swhash_elt),
|
1998-09-18 23:27:20 +04:00
|
|
|
0, 0, 0, "uaoeltpl", 0, NULL, NULL, M_UVMAOBJ);
|
1998-08-31 04:01:59 +04:00
|
|
|
|
|
|
|
pool_init(&uvm_aobj_pool, sizeof(struct uvm_aobj), 0, 0, 0,
|
|
|
|
"aobjpl", 0,
|
|
|
|
pool_page_alloc_nointr, pool_page_free_nointr, M_UVMAOBJ);
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* uao_reference: add a ref to an aobj
|
|
|
|
*
|
2000-01-11 09:57:49 +03:00
|
|
|
* => aobj must be unlocked
|
|
|
|
* => just lock it and call the locked version
|
1998-02-05 09:25:08 +03:00
|
|
|
*/
|
1998-02-09 17:35:48 +03:00
|
|
|
void
|
|
|
|
uao_reference(uobj)
|
|
|
|
struct uvm_object *uobj;
|
2000-01-11 09:57:49 +03:00
|
|
|
{
|
|
|
|
simple_lock(&uobj->vmobjlock);
|
|
|
|
uao_reference_locked(uobj);
|
|
|
|
simple_unlock(&uobj->vmobjlock);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* uao_reference_locked: add a ref to an aobj that is already locked
|
|
|
|
*
|
|
|
|
* => aobj must be locked
|
|
|
|
* this needs to be separate from the normal routine
|
|
|
|
* since sometimes we need to add a reference to an aobj when
|
|
|
|
* it's already locked.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
uao_reference_locked(uobj)
|
|
|
|
struct uvm_object *uobj;
|
1998-02-05 09:25:08 +03:00
|
|
|
{
|
1998-02-09 17:35:48 +03:00
|
|
|
UVMHIST_FUNC("uao_reference"); UVMHIST_CALLED(maphist);
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-02-09 17:35:48 +03:00
|
|
|
/*
|
|
|
|
* kernel_object already has plenty of references, leave it alone.
|
|
|
|
*/
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1999-05-25 04:09:00 +04:00
|
|
|
if (UVM_OBJ_IS_KERN_OBJECT(uobj))
|
1998-02-09 17:35:48 +03:00
|
|
|
return;
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-02-09 17:35:48 +03:00
|
|
|
uobj->uo_refs++; /* bump! */
|
|
|
|
UVMHIST_LOG(maphist, "<- done (uobj=0x%x, ref = %d)",
|
2000-01-11 09:57:49 +03:00
|
|
|
uobj, uobj->uo_refs,0,0);
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|
|
|
|
|
2000-01-11 09:57:49 +03:00
|
|
|
|
1998-02-05 09:25:08 +03:00
|
|
|
/*
|
|
|
|
* uao_detach: drop a reference to an aobj
|
|
|
|
*
|
2000-01-11 09:57:49 +03:00
|
|
|
* => aobj must be unlocked
|
|
|
|
* => just lock it and call the locked version
|
1998-02-05 09:25:08 +03:00
|
|
|
*/
|
1998-02-09 17:35:48 +03:00
|
|
|
void
|
|
|
|
uao_detach(uobj)
|
|
|
|
struct uvm_object *uobj;
|
2000-01-11 09:57:49 +03:00
|
|
|
{
|
|
|
|
simple_lock(&uobj->vmobjlock);
|
|
|
|
uao_detach_locked(uobj);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* uao_detach_locked: drop a reference to an aobj
|
|
|
|
*
|
|
|
|
* => aobj must be locked, and is unlocked (or freed) upon return.
|
|
|
|
* this needs to be separate from the normal routine
|
|
|
|
* since sometimes we need to detach from an aobj when
|
|
|
|
* it's already locked.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
uao_detach_locked(uobj)
|
|
|
|
struct uvm_object *uobj;
|
1998-02-09 17:35:48 +03:00
|
|
|
{
|
|
|
|
struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
|
|
|
|
struct vm_page *pg;
|
|
|
|
boolean_t busybody;
|
|
|
|
UVMHIST_FUNC("uao_detach"); UVMHIST_CALLED(maphist);
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-02-09 17:35:48 +03:00
|
|
|
/*
|
|
|
|
* detaching from kernel_object is a noop.
|
|
|
|
*/
|
2000-01-11 09:57:49 +03:00
|
|
|
if (UVM_OBJ_IS_KERN_OBJECT(uobj)) {
|
|
|
|
simple_unlock(&uobj->vmobjlock);
|
1998-02-09 17:35:48 +03:00
|
|
|
return;
|
2000-01-11 09:57:49 +03:00
|
|
|
}
|
1998-02-09 17:35:48 +03:00
|
|
|
|
|
|
|
UVMHIST_LOG(maphist," (uobj=0x%x) ref=%d", uobj,uobj->uo_refs,0,0);
|
|
|
|
uobj->uo_refs--; /* drop ref! */
|
|
|
|
if (uobj->uo_refs) { /* still more refs? */
|
|
|
|
simple_unlock(&uobj->vmobjlock);
|
|
|
|
UVMHIST_LOG(maphist, "<- done (rc>0)", 0,0,0,0);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* remove the aobj from the global list.
|
|
|
|
*/
|
|
|
|
simple_lock(&uao_list_lock);
|
|
|
|
LIST_REMOVE(aobj, u_list);
|
|
|
|
simple_unlock(&uao_list_lock);
|
|
|
|
|
|
|
|
/*
|
2000-01-11 09:57:49 +03:00
|
|
|
* free all the pages that aren't PG_BUSY,
|
|
|
|
* mark for release any that are.
|
1998-02-09 17:35:48 +03:00
|
|
|
*/
|
|
|
|
busybody = FALSE;
|
2000-01-11 09:57:49 +03:00
|
|
|
for (pg = TAILQ_FIRST(&uobj->memq);
|
|
|
|
pg != NULL;
|
|
|
|
pg = TAILQ_NEXT(pg, listq)) {
|
1998-02-09 17:35:48 +03:00
|
|
|
if (pg->flags & PG_BUSY) {
|
|
|
|
pg->flags |= PG_RELEASED;
|
|
|
|
busybody = TRUE;
|
|
|
|
continue;
|
|
|
|
}
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-02-09 17:35:48 +03:00
|
|
|
/* zap the mappings, free the swap slot, free the page */
|
1999-09-12 05:16:55 +04:00
|
|
|
pmap_page_protect(pg, VM_PROT_NONE);
|
1999-03-26 20:34:15 +03:00
|
|
|
uao_dropswap(&aobj->u_obj, pg->offset >> PAGE_SHIFT);
|
1998-02-09 17:35:48 +03:00
|
|
|
uvm_lock_pageq();
|
|
|
|
uvm_pagefree(pg);
|
|
|
|
uvm_unlock_pageq();
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* if we found any busy pages, we're done for now.
|
|
|
|
* mark the aobj for death, releasepg will finish up for us.
|
|
|
|
*/
|
|
|
|
if (busybody) {
|
|
|
|
aobj->u_flags |= UAO_FLAG_KILLME;
|
|
|
|
simple_unlock(&aobj->u_obj.vmobjlock);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* finally, free the rest.
|
|
|
|
*/
|
|
|
|
uao_free(aobj);
|
|
|
|
}
|
1998-02-05 09:25:08 +03:00
|
|
|
|
|
|
|
/*
|
1999-07-17 10:06:36 +04:00
|
|
|
* uao_flush: "flush" pages out of a uvm object
|
|
|
|
*
|
|
|
|
* => object should be locked by caller. we may _unlock_ the object
|
|
|
|
* if (and only if) we need to clean a page (PGO_CLEANIT).
|
|
|
|
* XXXJRT Currently, however, we don't. In the case of cleaning
|
|
|
|
* XXXJRT a page, we simply just deactivate it. Should probably
|
|
|
|
* XXXJRT handle this better, in the future (although "flushing"
|
|
|
|
* XXXJRT anonymous memory isn't terribly important).
|
|
|
|
* => if PGO_CLEANIT is not set, then we will neither unlock the object
|
|
|
|
* or block.
|
|
|
|
* => if PGO_ALLPAGE is set, then all pages in the object are valid targets
|
|
|
|
* for flushing.
|
|
|
|
* => NOTE: we rely on the fact that the object's memq is a TAILQ and
|
|
|
|
* that new pages are inserted on the tail end of the list. thus,
|
|
|
|
* we can make a complete pass through the object in one go by starting
|
|
|
|
* at the head and working towards the tail (new pages are put in
|
|
|
|
* front of us).
|
|
|
|
* => NOTE: we are allowed to lock the page queues, so the caller
|
|
|
|
* must not be holding the lock on them [e.g. pagedaemon had
|
|
|
|
* better not call us with the queues locked]
|
|
|
|
* => we return TRUE unless we encountered some sort of I/O error
|
|
|
|
* XXXJRT currently never happens, as we never directly initiate
|
|
|
|
* XXXJRT I/O
|
|
|
|
*
|
|
|
|
* comment on "cleaning" object and PG_BUSY pages:
|
|
|
|
* this routine is holding the lock on the object. the only time
|
|
|
|
* that is can run into a PG_BUSY page that it does not own is if
|
|
|
|
* some other process has started I/O on the page (e.g. either
|
|
|
|
* a pagein or a pageout). if the PG_BUSY page is being paged
|
|
|
|
* in, then it can not be dirty (!PG_CLEAN) because no one has
|
|
|
|
* had a change to modify it yet. if the PG_BUSY page is being
|
|
|
|
* paged out then it means that someone else has already started
|
|
|
|
* cleaning the page for us (how nice!). in this case, if we
|
|
|
|
* have syncio specified, then after we make our pass through the
|
|
|
|
* object we need to wait for the other PG_BUSY pages to clear
|
|
|
|
* off (i.e. we need to do an iosync). also note that once a
|
|
|
|
* page is PG_BUSY is must stary in its object until it is un-busyed.
|
|
|
|
* XXXJRT We never actually do this, as we are "flushing" anonymous
|
|
|
|
* XXXJRT memory, which doesn't have persistent backing store.
|
|
|
|
*
|
|
|
|
* note on page traversal:
|
|
|
|
* we can traverse the pages in an object either by going down the
|
|
|
|
* linked list in "uobj->memq", or we can go over the address range
|
|
|
|
* by page doing hash table lookups for each address. depending
|
|
|
|
* on how many pages are in the object it may be cheaper to do one
|
|
|
|
* or the other. we set "by_list" to true if we are using memq.
|
|
|
|
* if the cost of a hash lookup was equal to the cost of the list
|
|
|
|
* traversal we could compare the number of pages in the start->stop
|
|
|
|
* range to the total number of pages in the object. however, it
|
|
|
|
* seems that a hash table lookup is more expensive than the linked
|
|
|
|
* list traversal, so we multiply the number of pages in the
|
|
|
|
* start->stop range by a penalty which we define below.
|
1998-02-05 09:25:08 +03:00
|
|
|
*/
|
1999-07-17 10:06:36 +04:00
|
|
|
|
|
|
|
#define UAO_HASH_PENALTY 4 /* XXX: a guess */
|
|
|
|
|
1998-02-09 17:35:48 +03:00
|
|
|
boolean_t
|
1999-07-17 10:06:36 +04:00
|
|
|
uao_flush(uobj, start, stop, flags)
|
1998-02-09 17:35:48 +03:00
|
|
|
struct uvm_object *uobj;
|
2000-03-27 00:54:45 +04:00
|
|
|
voff_t start, stop;
|
1998-02-09 17:35:48 +03:00
|
|
|
int flags;
|
1998-02-05 09:25:08 +03:00
|
|
|
{
|
1999-07-17 10:06:36 +04:00
|
|
|
struct uvm_aobj *aobj = (struct uvm_aobj *) uobj;
|
|
|
|
struct vm_page *pp, *ppnext;
|
|
|
|
boolean_t retval, by_list;
|
2000-03-27 00:54:45 +04:00
|
|
|
voff_t curoff;
|
1999-07-17 10:06:36 +04:00
|
|
|
UVMHIST_FUNC("uao_flush"); UVMHIST_CALLED(maphist);
|
|
|
|
|
|
|
|
curoff = 0; /* XXX: shut up gcc */
|
|
|
|
|
|
|
|
retval = TRUE; /* default to success */
|
|
|
|
|
|
|
|
if (flags & PGO_ALLPAGES) {
|
|
|
|
start = 0;
|
|
|
|
stop = aobj->u_pages << PAGE_SHIFT;
|
|
|
|
by_list = TRUE; /* always go by the list */
|
|
|
|
} else {
|
|
|
|
start = trunc_page(start);
|
|
|
|
stop = round_page(stop);
|
|
|
|
if (stop > (aobj->u_pages << PAGE_SHIFT)) {
|
|
|
|
printf("uao_flush: strange, got an out of range "
|
|
|
|
"flush (fixed)\n");
|
|
|
|
stop = aobj->u_pages << PAGE_SHIFT;
|
|
|
|
}
|
|
|
|
by_list = (uobj->uo_npages <=
|
|
|
|
((stop - start) >> PAGE_SHIFT) * UAO_HASH_PENALTY);
|
|
|
|
}
|
|
|
|
|
|
|
|
UVMHIST_LOG(maphist,
|
|
|
|
" flush start=0x%lx, stop=0x%x, by_list=%d, flags=0x%x",
|
|
|
|
start, stop, by_list, flags);
|
1998-02-09 17:35:48 +03:00
|
|
|
|
|
|
|
/*
|
1999-07-17 10:06:36 +04:00
|
|
|
* Don't need to do any work here if we're not freeing
|
|
|
|
* or deactivating pages.
|
|
|
|
*/
|
|
|
|
if ((flags & (PGO_DEACTIVATE|PGO_FREE)) == 0) {
|
|
|
|
UVMHIST_LOG(maphist,
|
|
|
|
"<- done (no work to do)",0,0,0,0);
|
|
|
|
return (retval);
|
|
|
|
}
|
|
|
|
|
1998-02-09 17:35:48 +03:00
|
|
|
/*
|
1999-07-17 10:06:36 +04:00
|
|
|
* now do it. note: we must update ppnext in the body of loop or we
|
|
|
|
* will get stuck. we need to use ppnext because we may free "pp"
|
|
|
|
* before doing the next loop.
|
1999-07-07 09:32:26 +04:00
|
|
|
*/
|
1999-07-17 10:06:36 +04:00
|
|
|
|
|
|
|
if (by_list) {
|
|
|
|
pp = uobj->memq.tqh_first;
|
|
|
|
} else {
|
|
|
|
curoff = start;
|
|
|
|
pp = uvm_pagelookup(uobj, curoff);
|
|
|
|
}
|
|
|
|
|
|
|
|
ppnext = NULL; /* XXX: shut up gcc */
|
|
|
|
uvm_lock_pageq(); /* page queues locked */
|
|
|
|
|
|
|
|
/* locked: both page queues and uobj */
|
|
|
|
for ( ; (by_list && pp != NULL) ||
|
|
|
|
(!by_list && curoff < stop) ; pp = ppnext) {
|
|
|
|
if (by_list) {
|
2000-11-25 09:27:59 +03:00
|
|
|
ppnext = TAILQ_NEXT(pp, listq);
|
1999-07-17 10:06:36 +04:00
|
|
|
|
|
|
|
/* range check */
|
|
|
|
if (pp->offset < start || pp->offset >= stop)
|
|
|
|
continue;
|
|
|
|
} else {
|
|
|
|
curoff += PAGE_SIZE;
|
|
|
|
if (curoff < stop)
|
|
|
|
ppnext = uvm_pagelookup(uobj, curoff);
|
|
|
|
|
|
|
|
/* null check */
|
|
|
|
if (pp == NULL)
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE)) {
|
|
|
|
/*
|
|
|
|
* XXX In these first 3 cases, we always just
|
|
|
|
* XXX deactivate the page. We may want to
|
|
|
|
* XXX handle the different cases more specifically
|
|
|
|
* XXX in the future.
|
|
|
|
*/
|
|
|
|
case PGO_CLEANIT|PGO_FREE:
|
|
|
|
case PGO_CLEANIT|PGO_DEACTIVATE:
|
|
|
|
case PGO_DEACTIVATE:
|
1999-08-21 06:19:05 +04:00
|
|
|
deactivate_it:
|
1999-07-17 10:06:36 +04:00
|
|
|
/* skip the page if it's loaned or wired */
|
|
|
|
if (pp->loan_count != 0 ||
|
|
|
|
pp->wire_count != 0)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* ...and deactivate the page. */
|
Page scanner improvements, behavior is actually a bit more like
Mach VM's now. Specific changes:
- Pages now need not have all of their mappings removed before being
put on the inactive list. They only need to have the "referenced"
attribute cleared. This makes putting pages onto the inactive list
much more efficient. In order to eliminate redundant clearings of
"refrenced", callers of uvm_pagedeactivate() must now do this
themselves.
- When checking the "modified" attribute for a page (for clearing
PG_CLEAN), make sure to only do it if PG_CLEAN is currently set on
the page (saves a potentially expensive pmap operation).
- When scanning the inactive list, if a page is referenced, reactivate
it (this part was actually added in uvm_pdaemon.c,v 1.27). This
now works properly now that pages on the inactive list are allowed to
have mappings.
- When scanning the inactive list and considering a page for freeing,
remove all mappings, and then check the "modified" attribute if the
page is marked PG_CLEAN.
- When scanning the active list, if the page was referenced since its
last sweep by the scanner, don't deactivate it. (This part was
actually added in uvm_pdaemon.c,v 1.28.)
These changes greatly improve interactive performance during
moderate to high memory and I/O load.
2001-01-29 02:30:42 +03:00
|
|
|
pmap_clear_reference(pp);
|
1999-07-17 10:06:36 +04:00
|
|
|
uvm_pagedeactivate(pp);
|
|
|
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
case PGO_FREE:
|
1999-08-21 06:19:05 +04:00
|
|
|
/*
|
|
|
|
* If there are multiple references to
|
|
|
|
* the object, just deactivate the page.
|
|
|
|
*/
|
|
|
|
if (uobj->uo_refs > 1)
|
|
|
|
goto deactivate_it;
|
|
|
|
|
1999-07-17 10:06:36 +04:00
|
|
|
/* XXX skip the page if it's loaned or wired */
|
|
|
|
if (pp->loan_count != 0 ||
|
|
|
|
pp->wire_count != 0)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* mark the page as released if its busy.
|
|
|
|
*/
|
|
|
|
if (pp->flags & PG_BUSY) {
|
|
|
|
pp->flags |= PG_RELEASED;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* zap all mappings for the page. */
|
1999-09-12 05:16:55 +04:00
|
|
|
pmap_page_protect(pp, VM_PROT_NONE);
|
1999-07-17 10:06:36 +04:00
|
|
|
|
|
|
|
uao_dropswap(uobj, pp->offset >> PAGE_SHIFT);
|
|
|
|
uvm_pagefree(pp);
|
|
|
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
default:
|
|
|
|
panic("uao_flush: weird flags");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
uvm_unlock_pageq();
|
|
|
|
|
|
|
|
UVMHIST_LOG(maphist,
|
|
|
|
"<- done, rv=%d",retval,0,0,0);
|
|
|
|
return (retval);
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* uao_get: fetch me a page
|
|
|
|
*
|
|
|
|
* we have three cases:
|
|
|
|
* 1: page is resident -> just return the page.
|
|
|
|
* 2: page is zero-fill -> allocate a new page and zero it.
|
|
|
|
* 3: page is swapped out -> fetch the page from swap.
|
|
|
|
*
|
|
|
|
* cases 1 and 2 can be handled with PGO_LOCKED, case 3 cannot.
|
|
|
|
* so, if the "center" page hits case 3 (or any page, with PGO_ALLPAGES),
|
|
|
|
* then we will need to return VM_PAGER_UNLOCK.
|
|
|
|
*
|
|
|
|
* => prefer map unlocked (not required)
|
|
|
|
* => object must be locked! we will _unlock_ it before starting any I/O.
|
|
|
|
* => flags: PGO_ALLPAGES: get all of the pages
|
|
|
|
* PGO_LOCKED: fault data structures are locked
|
|
|
|
* => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx]
|
|
|
|
* => NOTE: caller must check for released pages!!
|
|
|
|
*/
|
1998-02-09 17:35:48 +03:00
|
|
|
static int
|
|
|
|
uao_get(uobj, offset, pps, npagesp, centeridx, access_type, advice, flags)
|
|
|
|
struct uvm_object *uobj;
|
2000-03-27 00:54:45 +04:00
|
|
|
voff_t offset;
|
1998-02-09 17:35:48 +03:00
|
|
|
struct vm_page **pps;
|
|
|
|
int *npagesp;
|
|
|
|
int centeridx, advice, flags;
|
|
|
|
vm_prot_t access_type;
|
1998-02-05 09:25:08 +03:00
|
|
|
{
|
1998-02-09 17:35:48 +03:00
|
|
|
struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
|
2000-03-27 00:54:45 +04:00
|
|
|
voff_t current_offset;
|
1998-02-09 17:35:48 +03:00
|
|
|
vm_page_t ptmp;
|
2000-01-11 09:57:49 +03:00
|
|
|
int lcv, gotpages, maxpages, swslot, rv, pageidx;
|
1998-02-09 17:35:48 +03:00
|
|
|
boolean_t done;
|
|
|
|
UVMHIST_FUNC("uao_get"); UVMHIST_CALLED(pdhist);
|
|
|
|
|
2000-01-11 09:57:49 +03:00
|
|
|
UVMHIST_LOG(pdhist, "aobj=%p offset=%d, flags=%d",
|
|
|
|
aobj, offset, flags,0);
|
2000-11-25 09:27:59 +03:00
|
|
|
|
1998-02-09 17:35:48 +03:00
|
|
|
/*
|
|
|
|
* get number of pages
|
|
|
|
*/
|
|
|
|
maxpages = *npagesp;
|
1998-02-05 09:25:08 +03:00
|
|
|
|
|
|
|
/*
|
1998-02-09 17:35:48 +03:00
|
|
|
* step 1: handled the case where fault data structures are locked.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (flags & PGO_LOCKED) {
|
|
|
|
/*
|
|
|
|
* step 1a: get pages that are already resident. only do
|
|
|
|
* this if the data structures are locked (i.e. the first
|
|
|
|
* time through).
|
|
|
|
*/
|
|
|
|
|
|
|
|
done = TRUE; /* be optimistic */
|
|
|
|
gotpages = 0; /* # of pages we got so far */
|
|
|
|
|
|
|
|
for (lcv = 0, current_offset = offset ; lcv < maxpages ;
|
|
|
|
lcv++, current_offset += PAGE_SIZE) {
|
|
|
|
/* do we care about this page? if not, skip it */
|
|
|
|
if (pps[lcv] == PGO_DONTCARE)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
ptmp = uvm_pagelookup(uobj, current_offset);
|
|
|
|
|
|
|
|
/*
|
2000-04-10 04:32:46 +04:00
|
|
|
* if page is new, attempt to allocate the page,
|
|
|
|
* zero-fill'd.
|
1998-02-09 17:35:48 +03:00
|
|
|
*/
|
|
|
|
if (ptmp == NULL && uao_find_swslot(aobj,
|
1998-10-19 03:49:59 +04:00
|
|
|
current_offset >> PAGE_SHIFT) == 0) {
|
1998-02-09 17:35:48 +03:00
|
|
|
ptmp = uvm_pagealloc(uobj, current_offset,
|
2000-04-10 04:32:46 +04:00
|
|
|
NULL, UVM_PGA_ZERO);
|
1998-02-09 17:35:48 +03:00
|
|
|
if (ptmp) {
|
|
|
|
/* new page */
|
|
|
|
ptmp->flags &= ~(PG_BUSY|PG_FAKE);
|
|
|
|
ptmp->pqflags |= PQ_AOBJ;
|
|
|
|
UVM_PAGE_OWN(ptmp, NULL);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* to be useful must get a non-busy, non-released page
|
|
|
|
*/
|
|
|
|
if (ptmp == NULL ||
|
|
|
|
(ptmp->flags & (PG_BUSY|PG_RELEASED)) != 0) {
|
|
|
|
if (lcv == centeridx ||
|
|
|
|
(flags & PGO_ALLPAGES) != 0)
|
|
|
|
/* need to do a wait or I/O! */
|
|
|
|
done = FALSE;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* useful page: busy/lock it and plug it in our
|
|
|
|
* result array
|
|
|
|
*/
|
|
|
|
/* caller must un-busy this page */
|
|
|
|
ptmp->flags |= PG_BUSY;
|
|
|
|
UVM_PAGE_OWN(ptmp, "uao_get1");
|
|
|
|
pps[lcv] = ptmp;
|
|
|
|
gotpages++;
|
|
|
|
|
|
|
|
} /* "for" lcv loop */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* step 1b: now we've either done everything needed or we
|
|
|
|
* to unlock and do some waiting or I/O.
|
|
|
|
*/
|
|
|
|
|
|
|
|
UVMHIST_LOG(pdhist, "<- done (done=%d)", done, 0,0,0);
|
|
|
|
|
|
|
|
*npagesp = gotpages;
|
|
|
|
if (done)
|
|
|
|
/* bingo! */
|
|
|
|
return(VM_PAGER_OK);
|
|
|
|
else
|
|
|
|
/* EEK! Need to unlock and I/O */
|
|
|
|
return(VM_PAGER_UNLOCK);
|
|
|
|
}
|
1998-02-05 09:25:08 +03:00
|
|
|
|
|
|
|
/*
|
1998-02-09 17:35:48 +03:00
|
|
|
* step 2: get non-resident or busy pages.
|
|
|
|
* object is locked. data structures are unlocked.
|
|
|
|
*/
|
|
|
|
|
|
|
|
for (lcv = 0, current_offset = offset ; lcv < maxpages ;
|
|
|
|
lcv++, current_offset += PAGE_SIZE) {
|
2000-01-11 09:57:49 +03:00
|
|
|
|
1998-02-09 17:35:48 +03:00
|
|
|
/*
|
|
|
|
* - skip over pages we've already gotten or don't want
|
|
|
|
* - skip over pages we don't _have_ to get
|
|
|
|
*/
|
2000-01-11 09:57:49 +03:00
|
|
|
|
1998-02-09 17:35:48 +03:00
|
|
|
if (pps[lcv] != NULL ||
|
|
|
|
(lcv != centeridx && (flags & PGO_ALLPAGES) == 0))
|
|
|
|
continue;
|
|
|
|
|
2000-01-11 09:57:49 +03:00
|
|
|
pageidx = current_offset >> PAGE_SHIFT;
|
|
|
|
|
1998-02-09 17:35:48 +03:00
|
|
|
/*
|
|
|
|
* we have yet to locate the current page (pps[lcv]). we
|
|
|
|
* first look for a page that is already at the current offset.
|
|
|
|
* if we find a page, we check to see if it is busy or
|
|
|
|
* released. if that is the case, then we sleep on the page
|
|
|
|
* until it is no longer busy or released and repeat the lookup.
|
|
|
|
* if the page we found is neither busy nor released, then we
|
|
|
|
* busy it (so we own it) and plug it into pps[lcv]. this
|
|
|
|
* 'break's the following while loop and indicates we are
|
|
|
|
* ready to move on to the next page in the "lcv" loop above.
|
|
|
|
*
|
|
|
|
* if we exit the while loop with pps[lcv] still set to NULL,
|
|
|
|
* then it means that we allocated a new busy/fake/clean page
|
|
|
|
* ptmp in the object and we need to do I/O to fill in the data.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* top of "pps" while loop */
|
|
|
|
while (pps[lcv] == NULL) {
|
|
|
|
/* look for a resident page */
|
|
|
|
ptmp = uvm_pagelookup(uobj, current_offset);
|
|
|
|
|
|
|
|
/* not resident? allocate one now (if we can) */
|
|
|
|
if (ptmp == NULL) {
|
|
|
|
|
|
|
|
ptmp = uvm_pagealloc(uobj, current_offset,
|
1999-04-11 08:04:04 +04:00
|
|
|
NULL, 0);
|
1998-02-09 17:35:48 +03:00
|
|
|
|
|
|
|
/* out of RAM? */
|
|
|
|
if (ptmp == NULL) {
|
|
|
|
simple_unlock(&uobj->vmobjlock);
|
|
|
|
UVMHIST_LOG(pdhist,
|
|
|
|
"sleeping, ptmp == NULL\n",0,0,0,0);
|
|
|
|
uvm_wait("uao_getpage");
|
|
|
|
simple_lock(&uobj->vmobjlock);
|
|
|
|
/* goto top of pps while loop */
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* safe with PQ's unlocked: because we just
|
|
|
|
* alloc'd the page
|
|
|
|
*/
|
|
|
|
ptmp->pqflags |= PQ_AOBJ;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* got new page ready for I/O. break pps while
|
|
|
|
* loop. pps[lcv] is still NULL.
|
|
|
|
*/
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* page is there, see if we need to wait on it */
|
|
|
|
if ((ptmp->flags & (PG_BUSY|PG_RELEASED)) != 0) {
|
|
|
|
ptmp->flags |= PG_WANTED;
|
|
|
|
UVMHIST_LOG(pdhist,
|
|
|
|
"sleeping, ptmp->flags 0x%x\n",
|
|
|
|
ptmp->flags,0,0,0);
|
1999-07-23 01:27:32 +04:00
|
|
|
UVM_UNLOCK_AND_WAIT(ptmp, &uobj->vmobjlock,
|
|
|
|
FALSE, "uao_get", 0);
|
1998-02-09 17:35:48 +03:00
|
|
|
simple_lock(&uobj->vmobjlock);
|
|
|
|
continue; /* goto top of pps while loop */
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* if we get here then the page has become resident and
|
|
|
|
* unbusy between steps 1 and 2. we busy it now (so we
|
|
|
|
* own it) and set pps[lcv] (so that we exit the while
|
|
|
|
* loop).
|
|
|
|
*/
|
|
|
|
/* we own it, caller must un-busy */
|
|
|
|
ptmp->flags |= PG_BUSY;
|
|
|
|
UVM_PAGE_OWN(ptmp, "uao_get2");
|
|
|
|
pps[lcv] = ptmp;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* if we own the valid page at the correct offset, pps[lcv] will
|
|
|
|
* point to it. nothing more to do except go to the next page.
|
|
|
|
*/
|
|
|
|
if (pps[lcv])
|
|
|
|
continue; /* next lcv */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* we have a "fake/busy/clean" page that we just allocated.
|
|
|
|
* do the needed "i/o", either reading from swap or zeroing.
|
|
|
|
*/
|
2000-01-11 09:57:49 +03:00
|
|
|
swslot = uao_find_swslot(aobj, pageidx);
|
1998-02-09 17:35:48 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* just zero the page if there's nothing in swap.
|
|
|
|
*/
|
|
|
|
if (swslot == 0)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* page hasn't existed before, just zero it.
|
|
|
|
*/
|
|
|
|
uvm_pagezero(ptmp);
|
2000-01-11 09:57:49 +03:00
|
|
|
} else {
|
1998-02-09 17:35:48 +03:00
|
|
|
UVMHIST_LOG(pdhist, "pagein from swslot %d",
|
|
|
|
swslot, 0,0,0);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* page in the swapped-out page.
|
|
|
|
* unlock object for i/o, relock when done.
|
|
|
|
*/
|
|
|
|
simple_unlock(&uobj->vmobjlock);
|
|
|
|
rv = uvm_swap_get(ptmp, swslot, PGO_SYNCIO);
|
|
|
|
simple_lock(&uobj->vmobjlock);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* I/O done. check for errors.
|
|
|
|
*/
|
|
|
|
if (rv != VM_PAGER_OK)
|
|
|
|
{
|
|
|
|
UVMHIST_LOG(pdhist, "<- done (error=%d)",
|
|
|
|
rv,0,0,0);
|
|
|
|
if (ptmp->flags & PG_WANTED)
|
1999-07-23 02:58:38 +04:00
|
|
|
wakeup(ptmp);
|
2000-01-11 09:57:49 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* remove the swap slot from the aobj
|
|
|
|
* and mark the aobj as having no real slot.
|
|
|
|
* don't free the swap slot, thus preventing
|
|
|
|
* it from being used again.
|
|
|
|
*/
|
|
|
|
swslot = uao_set_swslot(&aobj->u_obj, pageidx,
|
|
|
|
SWSLOT_BAD);
|
|
|
|
uvm_swap_markbad(swslot, 1);
|
|
|
|
|
1998-02-09 17:35:48 +03:00
|
|
|
ptmp->flags &= ~(PG_WANTED|PG_BUSY);
|
|
|
|
UVM_PAGE_OWN(ptmp, NULL);
|
|
|
|
uvm_lock_pageq();
|
|
|
|
uvm_pagefree(ptmp);
|
|
|
|
uvm_unlock_pageq();
|
2000-01-11 09:57:49 +03:00
|
|
|
|
1998-02-09 17:35:48 +03:00
|
|
|
simple_unlock(&uobj->vmobjlock);
|
|
|
|
return (rv);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* we got the page! clear the fake flag (indicates valid
|
|
|
|
* data now in page) and plug into our result array. note
|
|
|
|
* that page is still busy.
|
|
|
|
*
|
|
|
|
* it is the callers job to:
|
|
|
|
* => check if the page is released
|
|
|
|
* => unbusy the page
|
|
|
|
* => activate the page
|
|
|
|
*/
|
|
|
|
|
|
|
|
ptmp->flags &= ~PG_FAKE; /* data is valid ... */
|
1999-09-12 05:16:55 +04:00
|
|
|
pmap_clear_modify(ptmp); /* ... and clean */
|
1998-02-09 17:35:48 +03:00
|
|
|
pps[lcv] = ptmp;
|
|
|
|
|
|
|
|
} /* lcv loop */
|
1998-02-05 09:25:08 +03:00
|
|
|
|
|
|
|
/*
|
1998-02-09 17:35:48 +03:00
|
|
|
* finally, unlock object and return.
|
|
|
|
*/
|
|
|
|
|
|
|
|
simple_unlock(&uobj->vmobjlock);
|
|
|
|
UVMHIST_LOG(pdhist, "<- done (OK)",0,0,0,0);
|
|
|
|
return(VM_PAGER_OK);
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* uao_releasepg: handle released page in an aobj
|
|
|
|
*
|
|
|
|
* => "pg" is a PG_BUSY [caller owns it], PG_RELEASED page that we need
|
|
|
|
* to dispose of.
|
|
|
|
* => caller must handle PG_WANTED case
|
|
|
|
* => called with page's object locked, pageq's unlocked
|
|
|
|
* => returns TRUE if page's object is still alive, FALSE if we
|
|
|
|
* killed the page's object. if we return TRUE, then we
|
|
|
|
* return with the object locked.
|
2000-11-25 09:27:59 +03:00
|
|
|
* => if (nextpgp != NULL) => we return the next page on the queue, and return
|
1998-02-05 09:25:08 +03:00
|
|
|
* with the page queues locked [for pagedaemon]
|
|
|
|
* => if (nextpgp == NULL) => we return with page queues unlocked [normal case]
|
|
|
|
* => we kill the aobj if it is not referenced and we are suppose to
|
|
|
|
* kill it ("KILLME").
|
|
|
|
*/
|
2000-01-11 09:57:49 +03:00
|
|
|
static boolean_t
|
|
|
|
uao_releasepg(pg, nextpgp)
|
1998-02-09 17:35:48 +03:00
|
|
|
struct vm_page *pg;
|
|
|
|
struct vm_page **nextpgp; /* OUT */
|
1998-02-05 09:25:08 +03:00
|
|
|
{
|
1998-02-09 17:35:48 +03:00
|
|
|
struct uvm_aobj *aobj = (struct uvm_aobj *) pg->uobject;
|
1998-02-05 09:25:08 +03:00
|
|
|
|
2001-02-19 00:19:08 +03:00
|
|
|
KASSERT(pg->flags & PG_RELEASED);
|
2000-01-11 09:57:49 +03:00
|
|
|
|
1998-02-09 17:35:48 +03:00
|
|
|
/*
|
|
|
|
* dispose of the page [caller handles PG_WANTED] and swap slot.
|
|
|
|
*/
|
1999-09-12 05:16:55 +04:00
|
|
|
pmap_page_protect(pg, VM_PROT_NONE);
|
1999-03-26 20:34:15 +03:00
|
|
|
uao_dropswap(&aobj->u_obj, pg->offset >> PAGE_SHIFT);
|
1998-02-09 17:35:48 +03:00
|
|
|
uvm_lock_pageq();
|
|
|
|
if (nextpgp)
|
2000-11-25 09:27:59 +03:00
|
|
|
*nextpgp = TAILQ_NEXT(pg, pageq); /* next page for daemon */
|
1998-02-09 17:35:48 +03:00
|
|
|
uvm_pagefree(pg);
|
|
|
|
if (!nextpgp)
|
2000-01-11 09:57:49 +03:00
|
|
|
uvm_unlock_pageq(); /* keep locked for daemon */
|
1998-02-09 17:35:48 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* if we're not killing the object, we're done.
|
|
|
|
*/
|
|
|
|
if ((aobj->u_flags & UAO_FLAG_KILLME) == 0)
|
|
|
|
return TRUE;
|
2000-11-25 09:27:59 +03:00
|
|
|
KASSERT(aobj->u_obj.uo_refs == 0);
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-02-09 17:35:48 +03:00
|
|
|
/*
|
|
|
|
* if there are still pages in the object, we're done for now.
|
|
|
|
*/
|
|
|
|
if (aobj->u_obj.uo_npages != 0)
|
|
|
|
return TRUE;
|
1998-02-05 09:25:08 +03:00
|
|
|
|
2001-02-19 00:19:08 +03:00
|
|
|
KASSERT(TAILQ_EMPTY(&aobj->u_obj.memq));
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-02-09 17:35:48 +03:00
|
|
|
/*
|
|
|
|
* finally, free the rest.
|
|
|
|
*/
|
|
|
|
uao_free(aobj);
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-02-09 17:35:48 +03:00
|
|
|
return FALSE;
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|
1999-03-26 20:34:15 +03:00
|
|
|
|
2000-01-11 09:57:49 +03:00
|
|
|
|
1999-03-26 20:34:15 +03:00
|
|
|
/*
|
|
|
|
* uao_dropswap: release any swap resources from this aobj page.
|
|
|
|
*
|
|
|
|
* => aobj must be locked or have a reference count of 0.
|
|
|
|
*/
|
|
|
|
|
|
|
|
void
|
|
|
|
uao_dropswap(uobj, pageidx)
|
|
|
|
struct uvm_object *uobj;
|
|
|
|
int pageidx;
|
|
|
|
{
|
|
|
|
int slot;
|
|
|
|
|
|
|
|
slot = uao_set_swslot(uobj, pageidx, 0);
|
|
|
|
if (slot) {
|
|
|
|
uvm_swap_free(slot, 1);
|
|
|
|
}
|
|
|
|
}
|
2000-01-11 09:57:49 +03:00
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* page in every page in every aobj that is paged-out to a range of swslots.
|
|
|
|
*
|
|
|
|
* => nothing should be locked.
|
|
|
|
* => returns TRUE if pagein was aborted due to lack of memory.
|
|
|
|
*/
|
|
|
|
boolean_t
|
|
|
|
uao_swap_off(startslot, endslot)
|
|
|
|
int startslot, endslot;
|
|
|
|
{
|
|
|
|
struct uvm_aobj *aobj, *nextaobj;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* walk the list of all aobjs.
|
|
|
|
*/
|
|
|
|
|
|
|
|
restart:
|
|
|
|
simple_lock(&uao_list_lock);
|
|
|
|
|
|
|
|
for (aobj = LIST_FIRST(&uao_list);
|
|
|
|
aobj != NULL;
|
|
|
|
aobj = nextaobj) {
|
|
|
|
boolean_t rv;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* try to get the object lock,
|
|
|
|
* start all over if we fail.
|
|
|
|
* most of the time we'll get the aobj lock,
|
|
|
|
* so this should be a rare case.
|
|
|
|
*/
|
|
|
|
if (!simple_lock_try(&aobj->u_obj.vmobjlock)) {
|
|
|
|
simple_unlock(&uao_list_lock);
|
|
|
|
goto restart;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* add a ref to the aobj so it doesn't disappear
|
|
|
|
* while we're working.
|
|
|
|
*/
|
|
|
|
uao_reference_locked(&aobj->u_obj);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* now it's safe to unlock the uao list.
|
|
|
|
*/
|
|
|
|
simple_unlock(&uao_list_lock);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* page in any pages in the swslot range.
|
|
|
|
* if there's an error, abort and return the error.
|
|
|
|
*/
|
|
|
|
rv = uao_pagein(aobj, startslot, endslot);
|
|
|
|
if (rv) {
|
|
|
|
uao_detach_locked(&aobj->u_obj);
|
|
|
|
return rv;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* we're done with this aobj.
|
|
|
|
* relock the list and drop our ref on the aobj.
|
|
|
|
*/
|
|
|
|
simple_lock(&uao_list_lock);
|
|
|
|
nextaobj = LIST_NEXT(aobj, u_list);
|
|
|
|
uao_detach_locked(&aobj->u_obj);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* done with traversal, unlock the list
|
|
|
|
*/
|
|
|
|
simple_unlock(&uao_list_lock);
|
|
|
|
return FALSE;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* page in any pages from aobj in the given range.
|
|
|
|
*
|
|
|
|
* => aobj must be locked and is returned locked.
|
|
|
|
* => returns TRUE if pagein was aborted due to lack of memory.
|
|
|
|
*/
|
|
|
|
static boolean_t
|
|
|
|
uao_pagein(aobj, startslot, endslot)
|
|
|
|
struct uvm_aobj *aobj;
|
|
|
|
int startslot, endslot;
|
|
|
|
{
|
|
|
|
boolean_t rv;
|
|
|
|
|
|
|
|
if (UAO_USES_SWHASH(aobj)) {
|
|
|
|
struct uao_swhash_elt *elt;
|
|
|
|
int bucket;
|
|
|
|
|
|
|
|
restart:
|
|
|
|
for (bucket = aobj->u_swhashmask; bucket >= 0; bucket--) {
|
|
|
|
for (elt = LIST_FIRST(&aobj->u_swhash[bucket]);
|
|
|
|
elt != NULL;
|
|
|
|
elt = LIST_NEXT(elt, list)) {
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < UAO_SWHASH_CLUSTER_SIZE; i++) {
|
|
|
|
int slot = elt->slots[i];
|
|
|
|
|
|
|
|
/*
|
|
|
|
* if the slot isn't in range, skip it.
|
|
|
|
*/
|
|
|
|
if (slot < startslot ||
|
|
|
|
slot >= endslot) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* process the page,
|
|
|
|
* the start over on this object
|
|
|
|
* since the swhash elt
|
|
|
|
* may have been freed.
|
|
|
|
*/
|
|
|
|
rv = uao_pagein_page(aobj,
|
|
|
|
UAO_SWHASH_ELT_PAGEIDX_BASE(elt) + i);
|
|
|
|
if (rv) {
|
|
|
|
return rv;
|
|
|
|
}
|
|
|
|
goto restart;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < aobj->u_pages; i++) {
|
|
|
|
int slot = aobj->u_swslots[i];
|
|
|
|
|
|
|
|
/*
|
|
|
|
* if the slot isn't in range, skip it
|
|
|
|
*/
|
|
|
|
if (slot < startslot || slot >= endslot) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* process the page.
|
|
|
|
*/
|
|
|
|
rv = uao_pagein_page(aobj, i);
|
|
|
|
if (rv) {
|
|
|
|
return rv;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return FALSE;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* page in a page from an aobj. used for swap_off.
|
|
|
|
* returns TRUE if pagein was aborted due to lack of memory.
|
|
|
|
*
|
|
|
|
* => aobj must be locked and is returned locked.
|
|
|
|
*/
|
|
|
|
static boolean_t
|
|
|
|
uao_pagein_page(aobj, pageidx)
|
|
|
|
struct uvm_aobj *aobj;
|
|
|
|
int pageidx;
|
|
|
|
{
|
|
|
|
struct vm_page *pg;
|
|
|
|
int rv, slot, npages;
|
|
|
|
|
|
|
|
pg = NULL;
|
|
|
|
npages = 1;
|
|
|
|
/* locked: aobj */
|
|
|
|
rv = uao_get(&aobj->u_obj, pageidx << PAGE_SHIFT,
|
|
|
|
&pg, &npages, 0, VM_PROT_READ|VM_PROT_WRITE, 0, 0);
|
|
|
|
/* unlocked: aobj */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* relock and finish up.
|
|
|
|
*/
|
|
|
|
simple_lock(&aobj->u_obj.vmobjlock);
|
|
|
|
|
|
|
|
switch (rv) {
|
|
|
|
case VM_PAGER_OK:
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VM_PAGER_ERROR:
|
|
|
|
case VM_PAGER_REFAULT:
|
|
|
|
/*
|
|
|
|
* nothing more to do on errors.
|
|
|
|
* VM_PAGER_REFAULT can only mean that the anon was freed,
|
|
|
|
* so again there's nothing to do.
|
|
|
|
*/
|
|
|
|
return FALSE;
|
|
|
|
|
|
|
|
}
|
2001-02-19 00:19:08 +03:00
|
|
|
KASSERT((pg->flags & PG_RELEASED) == 0);
|
2000-01-11 09:57:49 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* ok, we've got the page now.
|
|
|
|
* mark it as dirty, clear its swslot and un-busy it.
|
|
|
|
*/
|
|
|
|
slot = uao_set_swslot(&aobj->u_obj, pageidx, 0);
|
|
|
|
uvm_swap_free(slot, 1);
|
|
|
|
pg->flags &= ~(PG_BUSY|PG_CLEAN|PG_FAKE);
|
|
|
|
UVM_PAGE_OWN(pg, NULL);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* deactivate the page (to put it on a page queue).
|
|
|
|
*/
|
|
|
|
pmap_clear_reference(pg);
|
|
|
|
uvm_lock_pageq();
|
|
|
|
uvm_pagedeactivate(pg);
|
|
|
|
uvm_unlock_pageq();
|
|
|
|
|
|
|
|
return FALSE;
|
|
|
|
}
|