2001-08-17 09:53:02 +04:00
|
|
|
/* $NetBSD: uvm_vnode.c,v 1.51 2001/08/17 05:53:02 chs Exp $ */
|
1998-02-05 09:25:08 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Copyright (c) 1997 Charles D. Cranor and Washington University.
|
|
|
|
* Copyright (c) 1991, 1993
|
2001-05-25 08:06:11 +04:00
|
|
|
* The Regents of the University of California.
|
1998-02-05 09:25:08 +03:00
|
|
|
* Copyright (c) 1990 University of Utah.
|
|
|
|
*
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* This code is derived from software contributed to Berkeley by
|
|
|
|
* the Systems Programming Group of the University of Utah Computer
|
|
|
|
* Science Department.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 3. All advertising materials mentioning features or use of this software
|
|
|
|
* must display the following acknowledgement:
|
|
|
|
* This product includes software developed by Charles D. Cranor,
|
2001-05-25 08:06:11 +04:00
|
|
|
* Washington University, the University of California, Berkeley and
|
1998-02-05 09:25:08 +03:00
|
|
|
* its contributors.
|
|
|
|
* 4. Neither the name of the University nor the names of its contributors
|
|
|
|
* may be used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
|
|
|
* @(#)vnode_pager.c 8.8 (Berkeley) 2/13/94
|
1998-02-07 14:07:38 +03:00
|
|
|
* from: Id: uvm_vnode.c,v 1.1.2.26 1998/02/02 20:38:07 chuck Exp
|
1998-02-05 09:25:08 +03:00
|
|
|
*/
|
|
|
|
|
1998-02-19 03:55:04 +03:00
|
|
|
#include "fs_nfs.h"
|
1998-02-10 17:08:44 +03:00
|
|
|
#include "opt_uvmhist.h"
|
2000-11-27 11:39:39 +03:00
|
|
|
#include "opt_ddb.h"
|
1998-02-10 17:08:44 +03:00
|
|
|
|
1998-02-05 09:25:08 +03:00
|
|
|
/*
|
|
|
|
* uvm_vnode.c: the vnode pager.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/systm.h>
|
2000-11-27 11:39:39 +03:00
|
|
|
#include <sys/kernel.h>
|
1998-02-05 09:25:08 +03:00
|
|
|
#include <sys/proc.h>
|
|
|
|
#include <sys/malloc.h>
|
|
|
|
#include <sys/vnode.h>
|
1998-07-08 03:22:13 +04:00
|
|
|
#include <sys/disklabel.h>
|
|
|
|
#include <sys/ioctl.h>
|
|
|
|
#include <sys/fcntl.h>
|
|
|
|
#include <sys/conf.h>
|
2000-11-27 11:39:39 +03:00
|
|
|
#include <sys/pool.h>
|
|
|
|
#include <sys/mount.h>
|
1998-07-08 03:22:13 +04:00
|
|
|
|
|
|
|
#include <miscfs/specfs/specdev.h>
|
1998-02-05 09:25:08 +03:00
|
|
|
|
|
|
|
#include <uvm/uvm.h>
|
|
|
|
#include <uvm/uvm_vnode.h>
|
|
|
|
|
|
|
|
/*
|
|
|
|
* functions
|
|
|
|
*/
|
|
|
|
|
2000-11-27 11:39:39 +03:00
|
|
|
static void uvn_cluster __P((struct uvm_object *, voff_t, voff_t *,
|
|
|
|
voff_t *));
|
|
|
|
static void uvn_detach __P((struct uvm_object *));
|
|
|
|
static int uvn_findpage __P((struct uvm_object *, voff_t,
|
|
|
|
struct vm_page **, int));
|
|
|
|
static boolean_t uvn_flush __P((struct uvm_object *, voff_t, voff_t,
|
|
|
|
int));
|
2001-05-27 01:27:10 +04:00
|
|
|
static int uvn_get __P((struct uvm_object *, voff_t,
|
|
|
|
struct vm_page **, int *, int, vm_prot_t,
|
|
|
|
int, int));
|
|
|
|
static int uvn_put __P((struct uvm_object *, struct vm_page **,
|
|
|
|
int, boolean_t));
|
2000-11-27 11:39:39 +03:00
|
|
|
static void uvn_reference __P((struct uvm_object *));
|
|
|
|
static boolean_t uvn_releasepg __P((struct vm_page *,
|
|
|
|
struct vm_page **));
|
1998-02-05 09:25:08 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* master pager structure
|
|
|
|
*/
|
|
|
|
|
|
|
|
struct uvm_pagerops uvm_vnodeops = {
|
2000-11-27 11:39:39 +03:00
|
|
|
NULL,
|
1998-03-09 03:58:55 +03:00
|
|
|
uvn_reference,
|
|
|
|
uvn_detach,
|
2000-11-27 11:39:39 +03:00
|
|
|
NULL,
|
1998-03-09 03:58:55 +03:00
|
|
|
uvn_flush,
|
|
|
|
uvn_get,
|
|
|
|
uvn_put,
|
|
|
|
uvn_cluster,
|
2000-11-27 11:39:39 +03:00
|
|
|
uvm_mk_pcluster,
|
1998-03-09 03:58:55 +03:00
|
|
|
uvn_releasepg,
|
1998-02-05 09:25:08 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* the ops!
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* uvn_attach
|
|
|
|
*
|
|
|
|
* attach a vnode structure to a VM object. if the vnode is already
|
|
|
|
* attached, then just bump the reference count by one and return the
|
|
|
|
* VM object. if not already attached, attach and return the new VM obj.
|
|
|
|
* the "accessprot" tells the max access the attaching thread wants to
|
|
|
|
* our pages.
|
|
|
|
*
|
|
|
|
* => caller must _not_ already be holding the lock on the uvm_object.
|
|
|
|
* => in fact, nothing should be locked so that we can sleep here.
|
|
|
|
* => note that uvm_object is first thing in vnode structure, so their
|
|
|
|
* pointers are equiv.
|
|
|
|
*/
|
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
struct uvm_object *
|
|
|
|
uvn_attach(arg, accessprot)
|
|
|
|
void *arg;
|
|
|
|
vm_prot_t accessprot;
|
|
|
|
{
|
|
|
|
struct vnode *vp = arg;
|
|
|
|
struct uvm_vnode *uvn = &vp->v_uvm;
|
|
|
|
struct vattr vattr;
|
2000-11-27 11:39:39 +03:00
|
|
|
int result;
|
1998-07-08 03:22:13 +04:00
|
|
|
struct partinfo pi;
|
2000-11-27 11:39:39 +03:00
|
|
|
voff_t used_vnode_size;
|
1998-03-09 03:58:55 +03:00
|
|
|
UVMHIST_FUNC("uvn_attach"); UVMHIST_CALLED(maphist);
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
UVMHIST_LOG(maphist, "(vn=0x%x)", arg,0,0,0);
|
2000-11-27 11:39:39 +03:00
|
|
|
used_vnode_size = (voff_t)0;
|
1998-07-08 03:22:13 +04:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
/*
|
|
|
|
* first get a lock on the uvn.
|
|
|
|
*/
|
|
|
|
simple_lock(&uvn->u_obj.vmobjlock);
|
2000-11-27 11:39:39 +03:00
|
|
|
while (uvn->u_flags & VXLOCK) {
|
|
|
|
uvn->u_flags |= VXWANT;
|
1998-03-09 03:58:55 +03:00
|
|
|
UVMHIST_LOG(maphist, " SLEEPING on blocked vn",0,0,0,0);
|
|
|
|
UVM_UNLOCK_AND_WAIT(uvn, &uvn->u_obj.vmobjlock, FALSE,
|
|
|
|
"uvn_attach", 0);
|
|
|
|
simple_lock(&uvn->u_obj.vmobjlock);
|
|
|
|
UVMHIST_LOG(maphist," WOKE UP",0,0,0,0);
|
|
|
|
}
|
|
|
|
|
1998-07-08 03:22:13 +04:00
|
|
|
/*
|
1999-01-29 15:56:17 +03:00
|
|
|
* if we're mapping a BLK device, make sure it is a disk.
|
1998-07-08 03:22:13 +04:00
|
|
|
*/
|
|
|
|
if (vp->v_type == VBLK && bdevsw[major(vp->v_rdev)].d_type != D_DISK) {
|
2000-11-27 11:39:39 +03:00
|
|
|
simple_unlock(&uvn->u_obj.vmobjlock);
|
1998-07-08 03:22:13 +04:00
|
|
|
UVMHIST_LOG(maphist,"<- done (VBLK not D_DISK!)", 0,0,0,0);
|
|
|
|
return(NULL);
|
|
|
|
}
|
2001-08-17 09:53:02 +04:00
|
|
|
KASSERT(vp->v_type == VREG || vp->v_type == VBLK);
|
1998-03-09 03:58:55 +03:00
|
|
|
|
|
|
|
/*
|
2000-11-27 11:39:39 +03:00
|
|
|
* set up our idea of the size
|
|
|
|
* if this hasn't been done already.
|
1998-03-09 03:58:55 +03:00
|
|
|
*/
|
2000-11-27 11:39:39 +03:00
|
|
|
if (uvn->u_size == VSIZENOTSET) {
|
|
|
|
|
|
|
|
uvn->u_flags |= VXLOCK;
|
1998-03-09 03:58:55 +03:00
|
|
|
simple_unlock(&uvn->u_obj.vmobjlock); /* drop lock in case we sleep */
|
|
|
|
/* XXX: curproc? */
|
1998-07-08 03:22:13 +04:00
|
|
|
if (vp->v_type == VBLK) {
|
|
|
|
/*
|
|
|
|
* We could implement this as a specfs getattr call, but:
|
|
|
|
*
|
|
|
|
* (1) VOP_GETATTR() would get the file system
|
|
|
|
* vnode operation, not the specfs operation.
|
|
|
|
*
|
|
|
|
* (2) All we want is the size, anyhow.
|
|
|
|
*/
|
|
|
|
result = (*bdevsw[major(vp->v_rdev)].d_ioctl)(vp->v_rdev,
|
|
|
|
DIOCGPART, (caddr_t)&pi, FREAD, curproc);
|
|
|
|
if (result == 0) {
|
|
|
|
/* XXX should remember blocksize */
|
2000-11-27 11:39:39 +03:00
|
|
|
used_vnode_size = (voff_t)pi.disklab->d_secsize *
|
|
|
|
(voff_t)pi.part->p_size;
|
1998-07-08 03:22:13 +04:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
result = VOP_GETATTR(vp, &vattr, curproc->p_ucred, curproc);
|
|
|
|
if (result == 0)
|
|
|
|
used_vnode_size = vattr.va_size;
|
1998-03-09 03:58:55 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/* relock object */
|
2000-11-27 11:39:39 +03:00
|
|
|
simple_lock(&uvn->u_obj.vmobjlock);
|
|
|
|
|
|
|
|
if (uvn->u_flags & VXWANT)
|
|
|
|
wakeup(uvn);
|
|
|
|
uvn->u_flags &= ~(VXLOCK|VXWANT);
|
1998-03-09 03:58:55 +03:00
|
|
|
|
|
|
|
if (result != 0) {
|
|
|
|
simple_unlock(&uvn->u_obj.vmobjlock); /* drop lock */
|
|
|
|
UVMHIST_LOG(maphist,"<- done (VOP_GETATTR FAILED!)", 0,0,0,0);
|
|
|
|
return(NULL);
|
|
|
|
}
|
|
|
|
uvn->u_size = used_vnode_size;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2000-11-27 11:39:39 +03:00
|
|
|
/* unlock and return */
|
1998-03-09 03:58:55 +03:00
|
|
|
simple_unlock(&uvn->u_obj.vmobjlock);
|
2000-11-27 11:39:39 +03:00
|
|
|
UVMHIST_LOG(maphist,"<- done, refcnt=%d", uvn->u_obj.uo_refs,
|
|
|
|
0, 0, 0);
|
|
|
|
return (&uvn->u_obj);
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* uvn_reference
|
|
|
|
*
|
|
|
|
* duplicate a reference to a VM object. Note that the reference
|
2001-05-25 08:06:11 +04:00
|
|
|
* count must already be at least one (the passed in reference) so
|
1998-02-05 09:25:08 +03:00
|
|
|
* there is no chance of the uvn being killed or locked out here.
|
|
|
|
*
|
2001-05-25 08:06:11 +04:00
|
|
|
* => caller must call with object unlocked.
|
1998-02-05 09:25:08 +03:00
|
|
|
* => caller must be using the same accessprot as was used at attach time
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
static void
|
|
|
|
uvn_reference(uobj)
|
|
|
|
struct uvm_object *uobj;
|
1998-02-05 09:25:08 +03:00
|
|
|
{
|
2000-11-27 11:39:39 +03:00
|
|
|
VREF((struct vnode *)uobj);
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* uvn_detach
|
|
|
|
*
|
|
|
|
* remove a reference to a VM object.
|
|
|
|
*
|
|
|
|
* => caller must call with object unlocked and map locked.
|
|
|
|
*/
|
1998-03-09 03:58:55 +03:00
|
|
|
static void
|
|
|
|
uvn_detach(uobj)
|
|
|
|
struct uvm_object *uobj;
|
|
|
|
{
|
2000-11-27 11:39:39 +03:00
|
|
|
vrele((struct vnode *)uobj);
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* uvn_releasepg: handled a released page in a uvn
|
|
|
|
*
|
|
|
|
* => "pg" is a PG_BUSY [caller owns it], PG_RELEASED page that we need
|
|
|
|
* to dispose of.
|
|
|
|
* => caller must handled PG_WANTED case
|
|
|
|
* => called with page's object locked, pageq's unlocked
|
|
|
|
* => returns TRUE if page's object is still alive, FALSE if we
|
|
|
|
* killed the page's object. if we return TRUE, then we
|
|
|
|
* return with the object locked.
|
2000-11-27 11:39:39 +03:00
|
|
|
* => if (nextpgp != NULL) => we return the next page on the queue, and return
|
1998-02-05 09:25:08 +03:00
|
|
|
* with the page queues locked [for pagedaemon]
|
|
|
|
* => if (nextpgp == NULL) => we return with page queues unlocked [normal case]
|
|
|
|
* => we kill the uvn if it is not referenced and we are suppose to
|
|
|
|
* kill it ("relkill").
|
|
|
|
*/
|
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
boolean_t
|
|
|
|
uvn_releasepg(pg, nextpgp)
|
|
|
|
struct vm_page *pg;
|
|
|
|
struct vm_page **nextpgp; /* OUT */
|
1998-02-05 09:25:08 +03:00
|
|
|
{
|
2000-11-27 11:39:39 +03:00
|
|
|
KASSERT(pg->flags & PG_RELEASED);
|
2001-05-25 08:06:11 +04:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
/*
|
|
|
|
* dispose of the page [caller handles PG_WANTED]
|
|
|
|
*/
|
1999-09-12 05:16:55 +04:00
|
|
|
pmap_page_protect(pg, VM_PROT_NONE);
|
1998-03-09 03:58:55 +03:00
|
|
|
uvm_lock_pageq();
|
|
|
|
if (nextpgp)
|
2000-11-27 11:39:39 +03:00
|
|
|
*nextpgp = TAILQ_NEXT(pg, pageq);
|
1998-03-09 03:58:55 +03:00
|
|
|
uvm_pagefree(pg);
|
|
|
|
if (!nextpgp)
|
|
|
|
uvm_unlock_pageq();
|
|
|
|
|
|
|
|
return (TRUE);
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* issues to consider:
|
|
|
|
* there are two tailq's in the uvm. structure... one for pending async
|
|
|
|
* i/o and one for "done" async i/o. to do an async i/o one puts
|
2001-02-18 22:40:25 +03:00
|
|
|
* a buf on the "pending" list (protected by splbio()), starts the
|
2001-03-11 01:46:45 +03:00
|
|
|
* i/o and returns 0. when the i/o is done, we expect
|
1998-02-05 09:25:08 +03:00
|
|
|
* some sort of "i/o done" function to be called (at splbio(), interrupt
|
2001-02-18 22:40:25 +03:00
|
|
|
* time). this function should remove the buf from the pending list
|
1998-02-05 09:25:08 +03:00
|
|
|
* and place it on the "done" list and wakeup the daemon. the daemon
|
|
|
|
* will run at normal spl() and will remove all items from the "done"
|
2001-02-18 22:40:25 +03:00
|
|
|
* list and call the iodone hook for each done request (see uvm_pager.c).
|
1998-02-05 09:25:08 +03:00
|
|
|
*
|
|
|
|
* => return KERN_SUCCESS (aio finished, free it). otherwise requeue for
|
|
|
|
* later collection.
|
|
|
|
* => called with pageq's locked by the daemon.
|
|
|
|
*
|
|
|
|
* general outline:
|
|
|
|
* - "try" to lock object. if fail, just return (will try again later)
|
|
|
|
* - drop "u_nio" (this req is done!)
|
|
|
|
* - if (object->iosync && u_naio == 0) { wakeup &uvn->u_naio }
|
|
|
|
* - get "page" structures (atop?).
|
|
|
|
* - handle "wanted" pages
|
|
|
|
* - handle "released" pages [using pgo_releasepg]
|
|
|
|
* >>> pgo_releasepg may kill the object
|
|
|
|
* dont forget to look at "object" wanted flag in all cases.
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* uvn_flush: flush pages out of a uvm object.
|
|
|
|
*
|
2001-02-06 13:53:23 +03:00
|
|
|
* => "stop == 0" means flush all pages at or after "start".
|
1998-02-05 09:25:08 +03:00
|
|
|
* => object should be locked by caller. we may _unlock_ the object
|
2001-01-08 09:21:13 +03:00
|
|
|
* if (and only if) we need to clean a page (PGO_CLEANIT), or
|
|
|
|
* if PGO_SYNCIO is set and there are pages busy.
|
1998-02-05 09:25:08 +03:00
|
|
|
* we return with the object locked.
|
2001-01-08 09:21:13 +03:00
|
|
|
* => if PGO_CLEANIT or PGO_SYNCIO is set, we may block (due to I/O).
|
|
|
|
* thus, a caller might want to unlock higher level resources
|
|
|
|
* (e.g. vm_map) before calling flush.
|
|
|
|
* => if neither PGO_CLEANIT nor PGO_SYNCIO is set, then we will neither
|
|
|
|
* unlock the object nor block.
|
|
|
|
* => if PGO_ALLPAGES is set, then all pages in the object are valid targets
|
1998-02-05 09:25:08 +03:00
|
|
|
* for flushing.
|
|
|
|
* => NOTE: we rely on the fact that the object's memq is a TAILQ and
|
|
|
|
* that new pages are inserted on the tail end of the list. thus,
|
|
|
|
* we can make a complete pass through the object in one go by starting
|
|
|
|
* at the head and working towards the tail (new pages are put in
|
|
|
|
* front of us).
|
|
|
|
* => NOTE: we are allowed to lock the page queues, so the caller
|
|
|
|
* must not be holding the lock on them [e.g. pagedaemon had
|
|
|
|
* better not call us with the queues locked]
|
|
|
|
* => we return TRUE unless we encountered some sort of I/O error
|
|
|
|
*
|
|
|
|
* comment on "cleaning" object and PG_BUSY pages:
|
|
|
|
* this routine is holding the lock on the object. the only time
|
|
|
|
* that it can run into a PG_BUSY page that it does not own is if
|
|
|
|
* some other process has started I/O on the page (e.g. either
|
|
|
|
* a pagein, or a pageout). if the PG_BUSY page is being paged
|
|
|
|
* in, then it can not be dirty (!PG_CLEAN) because no one has
|
|
|
|
* had a chance to modify it yet. if the PG_BUSY page is being
|
|
|
|
* paged out then it means that someone else has already started
|
2001-05-25 08:06:11 +04:00
|
|
|
* cleaning the page for us (how nice!). in this case, if we
|
1998-02-05 09:25:08 +03:00
|
|
|
* have syncio specified, then after we make our pass through the
|
2001-05-25 08:06:11 +04:00
|
|
|
* object we need to wait for the other PG_BUSY pages to clear
|
1998-02-05 09:25:08 +03:00
|
|
|
* off (i.e. we need to do an iosync). also note that once a
|
|
|
|
* page is PG_BUSY it must stay in its object until it is un-busyed.
|
|
|
|
*
|
|
|
|
* note on page traversal:
|
|
|
|
* we can traverse the pages in an object either by going down the
|
|
|
|
* linked list in "uobj->memq", or we can go over the address range
|
|
|
|
* by page doing hash table lookups for each address. depending
|
2001-05-25 08:06:11 +04:00
|
|
|
* on how many pages are in the object it may be cheaper to do one
|
1998-02-05 09:25:08 +03:00
|
|
|
* or the other. we set "by_list" to true if we are using memq.
|
|
|
|
* if the cost of a hash lookup was equal to the cost of the list
|
|
|
|
* traversal we could compare the number of pages in the start->stop
|
|
|
|
* range to the total number of pages in the object. however, it
|
|
|
|
* seems that a hash table lookup is more expensive than the linked
|
2001-05-25 08:06:11 +04:00
|
|
|
* list traversal, so we multiply the number of pages in the
|
1998-02-05 09:25:08 +03:00
|
|
|
* start->stop range by a penalty which we define below.
|
|
|
|
*/
|
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
#define UVN_HASH_PENALTY 4 /* XXX: a guess */
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
static boolean_t
|
|
|
|
uvn_flush(uobj, start, stop, flags)
|
|
|
|
struct uvm_object *uobj;
|
2000-03-27 00:54:45 +04:00
|
|
|
voff_t start, stop;
|
1998-03-09 03:58:55 +03:00
|
|
|
int flags;
|
|
|
|
{
|
2000-11-27 11:39:39 +03:00
|
|
|
struct uvm_vnode *uvn = (struct uvm_vnode *)uobj;
|
|
|
|
struct vnode *vp = (struct vnode *)uobj;
|
1998-03-09 03:58:55 +03:00
|
|
|
struct vm_page *pp, *ppnext, *ptmp;
|
2000-11-27 11:39:39 +03:00
|
|
|
struct vm_page *pps[256], **ppsp;
|
|
|
|
int s;
|
1998-03-09 03:58:55 +03:00
|
|
|
int npages, result, lcv;
|
2000-11-27 11:39:39 +03:00
|
|
|
boolean_t retval, need_iosync, by_list, needs_clean, all, wasclean;
|
2001-03-11 01:46:45 +03:00
|
|
|
boolean_t async = (flags & PGO_SYNCIO) == 0;
|
2000-03-27 00:54:45 +04:00
|
|
|
voff_t curoff;
|
1998-03-09 03:58:55 +03:00
|
|
|
u_short pp_version;
|
|
|
|
UVMHIST_FUNC("uvn_flush"); UVMHIST_CALLED(maphist);
|
2000-11-27 11:39:39 +03:00
|
|
|
UVMHIST_LOG(maphist, "uobj %p start 0x%x stop 0x%x flags 0x%x",
|
|
|
|
uobj, start, stop, flags);
|
|
|
|
KASSERT(flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE));
|
|
|
|
|
2001-02-18 22:40:25 +03:00
|
|
|
if (uobj->uo_npages == 0) {
|
|
|
|
if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL &&
|
|
|
|
(vp->v_flag & VONWORKLST)) {
|
|
|
|
vp->v_flag &= ~VONWORKLST;
|
|
|
|
LIST_REMOVE(vp, v_synclist);
|
|
|
|
}
|
|
|
|
return TRUE;
|
|
|
|
}
|
|
|
|
|
2000-11-27 11:39:39 +03:00
|
|
|
#ifdef DEBUG
|
|
|
|
if (uvn->u_size == VSIZENOTSET) {
|
|
|
|
printf("uvn_flush: size not set vp %p\n", uvn);
|
|
|
|
vprint("uvn_flush VSIZENOTSET", vp);
|
|
|
|
flags |= PGO_ALLPAGES;
|
|
|
|
}
|
|
|
|
#endif
|
1998-03-09 03:58:55 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* get init vals and determine how we are going to traverse object
|
|
|
|
*/
|
1998-02-05 09:25:08 +03:00
|
|
|
|
2001-02-06 13:53:23 +03:00
|
|
|
if (stop == 0) {
|
|
|
|
stop = trunc_page(LLONG_MAX);
|
|
|
|
}
|
2000-11-27 11:39:39 +03:00
|
|
|
curoff = 0;
|
1998-03-09 03:58:55 +03:00
|
|
|
need_iosync = FALSE;
|
2000-11-27 11:39:39 +03:00
|
|
|
retval = TRUE;
|
|
|
|
wasclean = TRUE;
|
1998-03-09 03:58:55 +03:00
|
|
|
if (flags & PGO_ALLPAGES) {
|
2000-03-27 00:54:45 +04:00
|
|
|
all = TRUE;
|
2000-11-27 11:39:39 +03:00
|
|
|
by_list = TRUE;
|
1998-03-09 03:58:55 +03:00
|
|
|
} else {
|
|
|
|
start = trunc_page(start);
|
|
|
|
stop = round_page(stop);
|
2000-03-27 00:54:45 +04:00
|
|
|
all = FALSE;
|
2001-05-25 08:06:11 +04:00
|
|
|
by_list = (uobj->uo_npages <=
|
1998-10-19 03:49:59 +04:00
|
|
|
((stop - start) >> PAGE_SHIFT) * UVN_HASH_PENALTY);
|
1998-03-09 03:58:55 +03:00
|
|
|
}
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
UVMHIST_LOG(maphist,
|
|
|
|
" flush start=0x%x, stop=0x%x, by_list=%d, flags=0x%x",
|
|
|
|
start, stop, by_list, flags);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* PG_CLEANCHK: this bit is used by the pgo_mk_pcluster function as
|
|
|
|
* a _hint_ as to how up to date the PG_CLEAN bit is. if the hint
|
|
|
|
* is wrong it will only prevent us from clustering... it won't break
|
|
|
|
* anything. we clear all PG_CLEANCHK bits here, and pgo_mk_pcluster
|
|
|
|
* will set them as it syncs PG_CLEAN. This is only an issue if we
|
|
|
|
* are looking at non-inactive pages (because inactive page's PG_CLEAN
|
|
|
|
* bit is always up to date since there are no mappings).
|
|
|
|
* [borrowed PG_CLEANCHK idea from FreeBSD VM]
|
|
|
|
*/
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
if ((flags & PGO_CLEANIT) != 0 &&
|
|
|
|
uobj->pgops->pgo_mk_pcluster != NULL) {
|
|
|
|
if (by_list) {
|
2000-11-27 11:39:39 +03:00
|
|
|
TAILQ_FOREACH(pp, &uobj->memq, listq) {
|
2000-03-27 00:54:45 +04:00
|
|
|
if (!all &&
|
|
|
|
(pp->offset < start || pp->offset >= stop))
|
1998-03-09 03:58:55 +03:00
|
|
|
continue;
|
|
|
|
pp->flags &= ~PG_CLEANCHK;
|
|
|
|
}
|
|
|
|
|
|
|
|
} else { /* by hash */
|
|
|
|
for (curoff = start ; curoff < stop;
|
|
|
|
curoff += PAGE_SIZE) {
|
|
|
|
pp = uvm_pagelookup(uobj, curoff);
|
|
|
|
if (pp)
|
|
|
|
pp->flags &= ~PG_CLEANCHK;
|
|
|
|
}
|
|
|
|
}
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
/*
|
|
|
|
* now do it. note: we must update ppnext in body of loop or we
|
|
|
|
* will get stuck. we need to use ppnext because we may free "pp"
|
|
|
|
* before doing the next loop.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (by_list) {
|
2000-11-27 11:39:39 +03:00
|
|
|
pp = TAILQ_FIRST(&uobj->memq);
|
1998-02-05 09:25:08 +03:00
|
|
|
} else {
|
1998-03-09 03:58:55 +03:00
|
|
|
curoff = start;
|
|
|
|
pp = uvm_pagelookup(uobj, curoff);
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|
1998-03-09 03:58:55 +03:00
|
|
|
|
2000-11-27 11:39:39 +03:00
|
|
|
ppnext = NULL;
|
|
|
|
ppsp = NULL;
|
|
|
|
uvm_lock_pageq();
|
1998-03-09 03:58:55 +03:00
|
|
|
|
|
|
|
/* locked: both page queues and uobj */
|
2001-05-25 08:06:11 +04:00
|
|
|
for ( ; (by_list && pp != NULL) ||
|
2000-11-27 11:39:39 +03:00
|
|
|
(!by_list && curoff < stop) ; pp = ppnext) {
|
1998-03-09 03:58:55 +03:00
|
|
|
if (by_list) {
|
2000-03-27 00:54:45 +04:00
|
|
|
if (!all &&
|
|
|
|
(pp->offset < start || pp->offset >= stop)) {
|
2000-11-27 11:39:39 +03:00
|
|
|
ppnext = TAILQ_NEXT(pp, listq);
|
1998-03-09 03:58:55 +03:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
curoff += PAGE_SIZE;
|
|
|
|
if (pp == NULL) {
|
|
|
|
if (curoff < stop)
|
|
|
|
ppnext = uvm_pagelookup(uobj, curoff);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* handle case where we do not need to clean page (either
|
|
|
|
* because we are not clean or because page is not dirty or
|
|
|
|
* is busy):
|
2001-05-25 08:06:11 +04:00
|
|
|
*
|
1998-03-09 03:58:55 +03:00
|
|
|
* NOTE: we are allowed to deactivate a non-wired active
|
|
|
|
* PG_BUSY page, but once a PG_BUSY page is on the inactive
|
|
|
|
* queue it must stay put until it is !PG_BUSY (so as not to
|
|
|
|
* confuse pagedaemon).
|
|
|
|
*/
|
|
|
|
|
|
|
|
if ((flags & PGO_CLEANIT) == 0 || (pp->flags & PG_BUSY) != 0) {
|
|
|
|
needs_clean = FALSE;
|
2001-03-11 01:46:45 +03:00
|
|
|
if (!async)
|
1998-03-09 03:58:55 +03:00
|
|
|
need_iosync = TRUE;
|
|
|
|
} else {
|
2000-11-27 11:39:39 +03:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
/*
|
|
|
|
* freeing: nuke all mappings so we can sync
|
|
|
|
* PG_CLEAN bit with no race
|
|
|
|
*/
|
2001-05-25 08:06:11 +04:00
|
|
|
if ((pp->flags & PG_CLEAN) != 0 &&
|
1998-03-09 03:58:55 +03:00
|
|
|
(flags & PGO_FREE) != 0 &&
|
Page scanner improvements, behavior is actually a bit more like
Mach VM's now. Specific changes:
- Pages now need not have all of their mappings removed before being
put on the inactive list. They only need to have the "referenced"
attribute cleared. This makes putting pages onto the inactive list
much more efficient. In order to eliminate redundant clearings of
"refrenced", callers of uvm_pagedeactivate() must now do this
themselves.
- When checking the "modified" attribute for a page (for clearing
PG_CLEAN), make sure to only do it if PG_CLEAN is currently set on
the page (saves a potentially expensive pmap operation).
- When scanning the inactive list, if a page is referenced, reactivate
it (this part was actually added in uvm_pdaemon.c,v 1.27). This
now works properly now that pages on the inactive list are allowed to
have mappings.
- When scanning the inactive list and considering a page for freeing,
remove all mappings, and then check the "modified" attribute if the
page is marked PG_CLEAN.
- When scanning the active list, if the page was referenced since its
last sweep by the scanner, don't deactivate it. (This part was
actually added in uvm_pdaemon.c,v 1.28.)
These changes greatly improve interactive performance during
moderate to high memory and I/O load.
2001-01-29 02:30:42 +03:00
|
|
|
/* XXX ACTIVE|INACTIVE test unnecessary? */
|
|
|
|
(pp->pqflags & (PQ_ACTIVE|PQ_INACTIVE)) != 0)
|
1999-09-12 05:16:55 +04:00
|
|
|
pmap_page_protect(pp, VM_PROT_NONE);
|
1998-03-09 03:58:55 +03:00
|
|
|
if ((pp->flags & PG_CLEAN) != 0 &&
|
1999-09-12 05:16:55 +04:00
|
|
|
pmap_is_modified(pp))
|
1998-03-09 03:58:55 +03:00
|
|
|
pp->flags &= ~(PG_CLEAN);
|
2000-11-27 11:39:39 +03:00
|
|
|
pp->flags |= PG_CLEANCHK;
|
1998-03-09 03:58:55 +03:00
|
|
|
needs_clean = ((pp->flags & PG_CLEAN) == 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* if we don't need a clean... load ppnext and dispose of pp
|
|
|
|
*/
|
|
|
|
if (!needs_clean) {
|
|
|
|
if (by_list)
|
2000-11-27 11:39:39 +03:00
|
|
|
ppnext = TAILQ_NEXT(pp, listq);
|
1998-03-09 03:58:55 +03:00
|
|
|
else {
|
|
|
|
if (curoff < stop)
|
|
|
|
ppnext = uvm_pagelookup(uobj, curoff);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (flags & PGO_DEACTIVATE) {
|
|
|
|
if ((pp->pqflags & PQ_INACTIVE) == 0 &&
|
2000-12-16 09:17:09 +03:00
|
|
|
(pp->flags & PG_BUSY) == 0 &&
|
1998-03-09 03:58:55 +03:00
|
|
|
pp->wire_count == 0) {
|
Page scanner improvements, behavior is actually a bit more like
Mach VM's now. Specific changes:
- Pages now need not have all of their mappings removed before being
put on the inactive list. They only need to have the "referenced"
attribute cleared. This makes putting pages onto the inactive list
much more efficient. In order to eliminate redundant clearings of
"refrenced", callers of uvm_pagedeactivate() must now do this
themselves.
- When checking the "modified" attribute for a page (for clearing
PG_CLEAN), make sure to only do it if PG_CLEAN is currently set on
the page (saves a potentially expensive pmap operation).
- When scanning the inactive list, if a page is referenced, reactivate
it (this part was actually added in uvm_pdaemon.c,v 1.27). This
now works properly now that pages on the inactive list are allowed to
have mappings.
- When scanning the inactive list and considering a page for freeing,
remove all mappings, and then check the "modified" attribute if the
page is marked PG_CLEAN.
- When scanning the active list, if the page was referenced since its
last sweep by the scanner, don't deactivate it. (This part was
actually added in uvm_pdaemon.c,v 1.28.)
These changes greatly improve interactive performance during
moderate to high memory and I/O load.
2001-01-29 02:30:42 +03:00
|
|
|
pmap_clear_reference(pp);
|
1998-03-09 03:58:55 +03:00
|
|
|
uvm_pagedeactivate(pp);
|
|
|
|
}
|
|
|
|
|
|
|
|
} else if (flags & PGO_FREE) {
|
|
|
|
if (pp->flags & PG_BUSY) {
|
|
|
|
pp->flags |= PG_RELEASED;
|
|
|
|
} else {
|
1999-09-12 05:16:55 +04:00
|
|
|
pmap_page_protect(pp, VM_PROT_NONE);
|
1998-03-09 03:58:55 +03:00
|
|
|
uvm_pagefree(pp);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/* ppnext is valid so we can continue... */
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* pp points to a page in the locked object that we are
|
|
|
|
* working on. if it is !PG_CLEAN,!PG_BUSY and we asked
|
|
|
|
* for cleaning (PGO_CLEANIT). we clean it now.
|
|
|
|
*
|
|
|
|
* let uvm_pager_put attempted a clustered page out.
|
|
|
|
* note: locked: uobj and page queues.
|
|
|
|
*/
|
|
|
|
|
2000-11-27 11:39:39 +03:00
|
|
|
wasclean = FALSE;
|
1998-03-09 03:58:55 +03:00
|
|
|
pp->flags |= PG_BUSY; /* we 'own' page now */
|
|
|
|
UVM_PAGE_OWN(pp, "uvn_flush");
|
1999-09-12 05:16:55 +04:00
|
|
|
pmap_page_protect(pp, VM_PROT_READ);
|
1998-03-09 03:58:55 +03:00
|
|
|
pp_version = pp->version;
|
|
|
|
ppsp = pps;
|
|
|
|
npages = sizeof(pps) / sizeof(struct vm_page *);
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
/* locked: page queues, uobj */
|
2001-05-25 08:06:11 +04:00
|
|
|
result = uvm_pager_put(uobj, pp, &ppsp, &npages,
|
2000-11-27 11:39:39 +03:00
|
|
|
flags | PGO_DOACTCLUST, start, stop);
|
1998-03-09 03:58:55 +03:00
|
|
|
/* unlocked: page queues, uobj */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* at this point nothing is locked. if we did an async I/O
|
2001-05-25 08:06:11 +04:00
|
|
|
* it is remotely possible for the async i/o to complete and
|
|
|
|
* the page "pp" be freed or what not before we get a chance
|
1998-03-09 03:58:55 +03:00
|
|
|
* to relock the object. in order to detect this, we have
|
|
|
|
* saved the version number of the page in "pp_version".
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* relock! */
|
|
|
|
simple_lock(&uobj->vmobjlock);
|
|
|
|
uvm_lock_pageq();
|
|
|
|
|
|
|
|
/*
|
2001-03-11 01:46:45 +03:00
|
|
|
* the cleaning operation is now done. finish up. note that
|
|
|
|
* on error uvm_pager_put drops the cluster for us.
|
|
|
|
* on success uvm_pager_put returns the cluster to us in
|
|
|
|
* ppsp/npages.
|
1998-03-09 03:58:55 +03:00
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* for pending async i/o if we are not deactivating/freeing
|
|
|
|
* we can move on to the next page.
|
|
|
|
*/
|
|
|
|
|
2001-03-11 01:46:45 +03:00
|
|
|
if (result == 0 && async &&
|
2000-11-27 11:39:39 +03:00
|
|
|
(flags & (PGO_DEACTIVATE|PGO_FREE)) == 0) {
|
1998-03-09 03:58:55 +03:00
|
|
|
|
2000-11-27 11:39:39 +03:00
|
|
|
/*
|
|
|
|
* no per-page ops: refresh ppnext and continue
|
|
|
|
*/
|
|
|
|
if (by_list) {
|
|
|
|
if (pp->version == pp_version)
|
|
|
|
ppnext = TAILQ_NEXT(pp, listq);
|
|
|
|
else
|
|
|
|
ppnext = TAILQ_FIRST(&uobj->memq);
|
|
|
|
} else {
|
|
|
|
if (curoff < stop)
|
|
|
|
ppnext = uvm_pagelookup(uobj, curoff);
|
1998-03-09 03:58:55 +03:00
|
|
|
}
|
2000-11-27 11:39:39 +03:00
|
|
|
continue;
|
1998-03-09 03:58:55 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2001-05-25 08:06:11 +04:00
|
|
|
* need to look at each page of the I/O operation. we defer
|
|
|
|
* processing "pp" until the last trip through this "for" loop
|
1998-03-09 03:58:55 +03:00
|
|
|
* so that we can load "ppnext" for the main loop after we
|
2001-05-25 08:06:11 +04:00
|
|
|
* play with the cluster pages [thus the "npages + 1" in the
|
1998-03-09 03:58:55 +03:00
|
|
|
* loop below].
|
|
|
|
*/
|
|
|
|
|
|
|
|
for (lcv = 0 ; lcv < npages + 1 ; lcv++) {
|
|
|
|
|
|
|
|
/*
|
|
|
|
* handle ppnext for outside loop, and saving pp
|
|
|
|
* until the end.
|
|
|
|
*/
|
|
|
|
if (lcv < npages) {
|
|
|
|
if (ppsp[lcv] == pp)
|
|
|
|
continue; /* skip pp until the end */
|
|
|
|
ptmp = ppsp[lcv];
|
|
|
|
} else {
|
|
|
|
ptmp = pp;
|
|
|
|
|
|
|
|
/* set up next page for outer loop */
|
|
|
|
if (by_list) {
|
|
|
|
if (pp->version == pp_version)
|
2000-11-27 11:39:39 +03:00
|
|
|
ppnext = TAILQ_NEXT(pp, listq);
|
1998-03-09 03:58:55 +03:00
|
|
|
else
|
2000-11-27 11:39:39 +03:00
|
|
|
ppnext = TAILQ_FIRST(
|
|
|
|
&uobj->memq);
|
1998-03-09 03:58:55 +03:00
|
|
|
} else {
|
|
|
|
if (curoff < stop)
|
2000-11-27 11:39:39 +03:00
|
|
|
ppnext = uvm_pagelookup(uobj,
|
|
|
|
curoff);
|
1998-03-09 03:58:55 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2000-11-27 11:39:39 +03:00
|
|
|
* verify the page wasn't moved while obj was
|
1998-03-09 03:58:55 +03:00
|
|
|
* unlocked
|
|
|
|
*/
|
2001-03-11 01:46:45 +03:00
|
|
|
if (result == 0 && async && ptmp->uobject != uobj)
|
1998-03-09 03:58:55 +03:00
|
|
|
continue;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* unbusy the page if I/O is done. note that for
|
2001-03-11 01:46:45 +03:00
|
|
|
* async I/O it is possible that the I/O op
|
1998-03-09 03:58:55 +03:00
|
|
|
* finished before we relocked the object (in
|
|
|
|
* which case the page is no longer busy).
|
|
|
|
*/
|
|
|
|
|
2001-03-11 01:46:45 +03:00
|
|
|
if (result != 0 || !async) {
|
2000-11-27 11:39:39 +03:00
|
|
|
if (ptmp->flags & PG_WANTED) {
|
1998-03-09 03:58:55 +03:00
|
|
|
/* still holding object lock */
|
1999-07-23 02:58:38 +04:00
|
|
|
wakeup(ptmp);
|
2000-11-27 11:39:39 +03:00
|
|
|
}
|
1998-03-09 03:58:55 +03:00
|
|
|
ptmp->flags &= ~(PG_WANTED|PG_BUSY);
|
|
|
|
UVM_PAGE_OWN(ptmp, NULL);
|
|
|
|
if (ptmp->flags & PG_RELEASED) {
|
|
|
|
uvm_unlock_pageq();
|
2000-11-27 11:39:39 +03:00
|
|
|
if (!uvn_releasepg(ptmp, NULL)) {
|
|
|
|
UVMHIST_LOG(maphist,
|
|
|
|
"released %p",
|
|
|
|
ptmp, 0,0,0);
|
1998-03-09 03:58:55 +03:00
|
|
|
return (TRUE);
|
2000-11-27 11:39:39 +03:00
|
|
|
}
|
|
|
|
uvm_lock_pageq();
|
|
|
|
continue;
|
1998-03-09 03:58:55 +03:00
|
|
|
} else {
|
2000-11-27 11:39:39 +03:00
|
|
|
if ((flags & PGO_WEAK) == 0 &&
|
2001-03-11 01:46:45 +03:00
|
|
|
!(result == EIO &&
|
2000-11-27 11:39:39 +03:00
|
|
|
curproc == uvm.pagedaemon_proc)) {
|
|
|
|
ptmp->flags |=
|
|
|
|
(PG_CLEAN|PG_CLEANCHK);
|
|
|
|
if ((flags & PGO_FREE) == 0) {
|
|
|
|
pmap_clear_modify(ptmp);
|
|
|
|
}
|
|
|
|
}
|
1998-03-09 03:58:55 +03:00
|
|
|
}
|
|
|
|
}
|
2001-05-25 08:06:11 +04:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
/*
|
|
|
|
* dispose of page
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (flags & PGO_DEACTIVATE) {
|
|
|
|
if ((pp->pqflags & PQ_INACTIVE) == 0 &&
|
2000-12-16 09:17:09 +03:00
|
|
|
(pp->flags & PG_BUSY) == 0 &&
|
1998-03-09 03:58:55 +03:00
|
|
|
pp->wire_count == 0) {
|
Page scanner improvements, behavior is actually a bit more like
Mach VM's now. Specific changes:
- Pages now need not have all of their mappings removed before being
put on the inactive list. They only need to have the "referenced"
attribute cleared. This makes putting pages onto the inactive list
much more efficient. In order to eliminate redundant clearings of
"refrenced", callers of uvm_pagedeactivate() must now do this
themselves.
- When checking the "modified" attribute for a page (for clearing
PG_CLEAN), make sure to only do it if PG_CLEAN is currently set on
the page (saves a potentially expensive pmap operation).
- When scanning the inactive list, if a page is referenced, reactivate
it (this part was actually added in uvm_pdaemon.c,v 1.27). This
now works properly now that pages on the inactive list are allowed to
have mappings.
- When scanning the inactive list and considering a page for freeing,
remove all mappings, and then check the "modified" attribute if the
page is marked PG_CLEAN.
- When scanning the active list, if the page was referenced since its
last sweep by the scanner, don't deactivate it. (This part was
actually added in uvm_pdaemon.c,v 1.28.)
These changes greatly improve interactive performance during
moderate to high memory and I/O load.
2001-01-29 02:30:42 +03:00
|
|
|
pmap_clear_reference(ptmp);
|
1998-03-09 03:58:55 +03:00
|
|
|
uvm_pagedeactivate(ptmp);
|
|
|
|
}
|
|
|
|
} else if (flags & PGO_FREE) {
|
2001-03-11 01:46:45 +03:00
|
|
|
if (result == 0 && async) {
|
1998-03-09 03:58:55 +03:00
|
|
|
if ((ptmp->flags & PG_BUSY) != 0)
|
|
|
|
/* signal for i/o done */
|
|
|
|
ptmp->flags |= PG_RELEASED;
|
|
|
|
} else {
|
2001-03-11 01:46:45 +03:00
|
|
|
if (result != 0) {
|
1998-03-09 03:58:55 +03:00
|
|
|
printf("uvn_flush: obj=%p, "
|
2000-11-27 11:39:39 +03:00
|
|
|
"offset=0x%llx. error %d\n",
|
2000-03-27 00:54:45 +04:00
|
|
|
pp->uobject,
|
2000-11-27 11:39:39 +03:00
|
|
|
(long long)pp->offset,
|
|
|
|
result);
|
1998-03-09 03:58:55 +03:00
|
|
|
printf("uvn_flush: WARNING: "
|
|
|
|
"changes to page may be "
|
|
|
|
"lost!\n");
|
|
|
|
retval = FALSE;
|
|
|
|
}
|
1999-09-12 05:16:55 +04:00
|
|
|
pmap_page_protect(ptmp, VM_PROT_NONE);
|
1998-03-09 03:58:55 +03:00
|
|
|
uvm_pagefree(ptmp);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} /* end of "lcv" for loop */
|
|
|
|
} /* end of "pp" for loop */
|
|
|
|
|
|
|
|
uvm_unlock_pageq();
|
2000-11-27 11:39:39 +03:00
|
|
|
if ((flags & PGO_CLEANIT) && all && wasclean &&
|
|
|
|
LIST_FIRST(&vp->v_dirtyblkhd) == NULL &&
|
|
|
|
(vp->v_flag & VONWORKLST)) {
|
|
|
|
vp->v_flag &= ~VONWORKLST;
|
|
|
|
LIST_REMOVE(vp, v_synclist);
|
|
|
|
}
|
1998-03-09 03:58:55 +03:00
|
|
|
if (need_iosync) {
|
|
|
|
UVMHIST_LOG(maphist," <<DOING IOSYNC>>",0,0,0,0);
|
2000-11-27 11:39:39 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* XXX this doesn't use the new two-flag scheme,
|
|
|
|
* but to use that, all i/o initiators will have to change.
|
|
|
|
*/
|
|
|
|
|
|
|
|
s = splbio();
|
|
|
|
while (vp->v_numoutput != 0) {
|
|
|
|
UVMHIST_LOG(ubchist, "waiting for vp %p num %d",
|
|
|
|
vp, vp->v_numoutput,0,0);
|
|
|
|
|
|
|
|
vp->v_flag |= VBWAIT;
|
|
|
|
UVM_UNLOCK_AND_WAIT(&vp->v_numoutput,
|
2001-05-25 08:06:11 +04:00
|
|
|
&uvn->u_obj.vmobjlock,
|
2000-11-27 11:39:39 +03:00
|
|
|
FALSE, "uvn_flush",0);
|
1998-03-09 03:58:55 +03:00
|
|
|
simple_lock(&uvn->u_obj.vmobjlock);
|
|
|
|
}
|
2000-11-27 11:39:39 +03:00
|
|
|
splx(s);
|
1998-03-09 03:58:55 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/* return, with object locked! */
|
|
|
|
UVMHIST_LOG(maphist,"<- done (retval=0x%x)",retval,0,0,0);
|
|
|
|
return(retval);
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* uvn_cluster
|
|
|
|
*
|
|
|
|
* we are about to do I/O in an object at offset. this function is called
|
|
|
|
* to establish a range of offsets around "offset" in which we can cluster
|
|
|
|
* I/O.
|
|
|
|
*
|
|
|
|
* - currently doesn't matter if obj locked or not.
|
|
|
|
*/
|
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
static void
|
|
|
|
uvn_cluster(uobj, offset, loffset, hoffset)
|
|
|
|
struct uvm_object *uobj;
|
2000-03-27 00:54:45 +04:00
|
|
|
voff_t offset;
|
|
|
|
voff_t *loffset, *hoffset; /* OUT */
|
1998-02-05 09:25:08 +03:00
|
|
|
{
|
2000-11-27 11:39:39 +03:00
|
|
|
struct uvm_vnode *uvn = (struct uvm_vnode *)uobj;
|
1998-02-05 09:25:08 +03:00
|
|
|
|
2000-11-27 11:39:39 +03:00
|
|
|
*loffset = offset;
|
2001-03-09 04:02:10 +03:00
|
|
|
*hoffset = MIN(offset + MAXBSIZE, round_page(uvn->u_size));
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* uvn_put: flush page data to backing store.
|
|
|
|
*
|
|
|
|
* => object must be locked! we will _unlock_ it before starting I/O.
|
|
|
|
* => flags: PGO_SYNCIO -- use sync. I/O
|
|
|
|
* => note: caller must set PG_CLEAN and pmap_clear_modify (if needed)
|
|
|
|
*/
|
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
static int
|
|
|
|
uvn_put(uobj, pps, npages, flags)
|
|
|
|
struct uvm_object *uobj;
|
|
|
|
struct vm_page **pps;
|
|
|
|
int npages, flags;
|
1998-02-05 09:25:08 +03:00
|
|
|
{
|
2000-11-27 11:39:39 +03:00
|
|
|
struct vnode *vp = (struct vnode *)uobj;
|
|
|
|
int error;
|
1998-02-05 09:25:08 +03:00
|
|
|
|
2000-11-27 11:39:39 +03:00
|
|
|
error = VOP_PUTPAGES(vp, pps, npages, flags, NULL);
|
2001-03-11 01:46:45 +03:00
|
|
|
return error;
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* uvn_get: get pages (synchronously) from backing store
|
|
|
|
*
|
|
|
|
* => prefer map unlocked (not required)
|
|
|
|
* => object must be locked! we will _unlock_ it before starting any I/O.
|
|
|
|
* => flags: PGO_ALLPAGES: get all of the pages
|
|
|
|
* PGO_LOCKED: fault data structures are locked
|
|
|
|
* => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx]
|
|
|
|
* => NOTE: caller must check for released pages!!
|
|
|
|
*/
|
2001-05-25 08:06:11 +04:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
static int
|
|
|
|
uvn_get(uobj, offset, pps, npagesp, centeridx, access_type, advice, flags)
|
|
|
|
struct uvm_object *uobj;
|
2000-03-27 00:54:45 +04:00
|
|
|
voff_t offset;
|
1998-03-09 03:58:55 +03:00
|
|
|
struct vm_page **pps; /* IN/OUT */
|
|
|
|
int *npagesp; /* IN (OUT if PGO_LOCKED) */
|
2000-11-27 11:39:39 +03:00
|
|
|
int centeridx;
|
1998-03-09 03:58:55 +03:00
|
|
|
vm_prot_t access_type;
|
2000-11-27 11:39:39 +03:00
|
|
|
int advice, flags;
|
1998-02-05 09:25:08 +03:00
|
|
|
{
|
2000-11-27 11:39:39 +03:00
|
|
|
struct vnode *vp = (struct vnode *)uobj;
|
|
|
|
int error;
|
|
|
|
UVMHIST_FUNC("uvn_get"); UVMHIST_CALLED(ubchist);
|
|
|
|
|
|
|
|
UVMHIST_LOG(ubchist, "vp %p off 0x%x", vp, (int)offset, 0,0);
|
|
|
|
error = VOP_GETPAGES(vp, offset, pps, npagesp, centeridx,
|
|
|
|
access_type, advice, flags);
|
2001-03-11 01:46:45 +03:00
|
|
|
return error;
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|
|
|
|
|
2000-11-27 11:39:39 +03:00
|
|
|
|
1998-02-05 09:25:08 +03:00
|
|
|
/*
|
2000-11-27 11:39:39 +03:00
|
|
|
* uvn_findpages:
|
|
|
|
* return the page for the uobj and offset requested, allocating if needed.
|
|
|
|
* => uobj must be locked.
|
|
|
|
* => returned page will be BUSY.
|
1998-02-05 09:25:08 +03:00
|
|
|
*/
|
|
|
|
|
2000-11-27 11:39:39 +03:00
|
|
|
void
|
|
|
|
uvn_findpages(uobj, offset, npagesp, pps, flags)
|
|
|
|
struct uvm_object *uobj;
|
|
|
|
voff_t offset;
|
|
|
|
int *npagesp;
|
|
|
|
struct vm_page **pps;
|
|
|
|
int flags;
|
1998-02-05 09:25:08 +03:00
|
|
|
{
|
2000-11-27 11:39:39 +03:00
|
|
|
int i, rv, npages;
|
1998-02-05 09:25:08 +03:00
|
|
|
|
2000-11-27 11:39:39 +03:00
|
|
|
rv = 0;
|
|
|
|
npages = *npagesp;
|
|
|
|
for (i = 0; i < npages; i++, offset += PAGE_SIZE) {
|
|
|
|
rv += uvn_findpage(uobj, offset, &pps[i], flags);
|
1998-03-09 03:58:55 +03:00
|
|
|
}
|
2000-11-27 11:39:39 +03:00
|
|
|
*npagesp = rv;
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|
|
|
|
|
2000-11-27 11:39:39 +03:00
|
|
|
static int
|
|
|
|
uvn_findpage(uobj, offset, pgp, flags)
|
|
|
|
struct uvm_object *uobj;
|
|
|
|
voff_t offset;
|
|
|
|
struct vm_page **pgp;
|
|
|
|
int flags;
|
1998-02-05 09:25:08 +03:00
|
|
|
{
|
2000-11-27 11:39:39 +03:00
|
|
|
struct vm_page *pg;
|
|
|
|
UVMHIST_FUNC("uvn_findpage"); UVMHIST_CALLED(ubchist);
|
|
|
|
UVMHIST_LOG(ubchist, "vp %p off 0x%lx", uobj, offset,0,0);
|
1998-02-05 09:25:08 +03:00
|
|
|
|
2000-11-27 11:39:39 +03:00
|
|
|
if (*pgp != NULL) {
|
|
|
|
UVMHIST_LOG(ubchist, "dontcare", 0,0,0,0);
|
|
|
|
return 0;
|
1998-03-09 03:58:55 +03:00
|
|
|
}
|
2000-11-27 11:39:39 +03:00
|
|
|
for (;;) {
|
|
|
|
/* look for an existing page */
|
|
|
|
pg = uvm_pagelookup(uobj, offset);
|
|
|
|
|
|
|
|
/* nope? allocate one now */
|
|
|
|
if (pg == NULL) {
|
|
|
|
if (flags & UFP_NOALLOC) {
|
|
|
|
UVMHIST_LOG(ubchist, "noalloc", 0,0,0,0);
|
|
|
|
return 0;
|
|
|
|
}
|
2001-03-09 04:02:10 +03:00
|
|
|
pg = uvm_pagealloc(uobj, offset, NULL, 0);
|
2000-11-27 11:39:39 +03:00
|
|
|
if (pg == NULL) {
|
|
|
|
if (flags & UFP_NOWAIT) {
|
|
|
|
UVMHIST_LOG(ubchist, "nowait",0,0,0,0);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
simple_unlock(&uobj->vmobjlock);
|
|
|
|
uvm_wait("uvn_fp1");
|
|
|
|
simple_lock(&uobj->vmobjlock);
|
|
|
|
continue;
|
|
|
|
}
|
2001-03-09 04:02:10 +03:00
|
|
|
if (UVM_OBJ_IS_VTEXT(uobj)) {
|
|
|
|
uvmexp.vtextpages++;
|
|
|
|
} else {
|
|
|
|
uvmexp.vnodepages++;
|
|
|
|
}
|
2000-11-27 11:39:39 +03:00
|
|
|
UVMHIST_LOG(ubchist, "alloced",0,0,0,0);
|
|
|
|
break;
|
|
|
|
} else if (flags & UFP_NOCACHE) {
|
|
|
|
UVMHIST_LOG(ubchist, "nocache",0,0,0,0);
|
|
|
|
return 0;
|
|
|
|
}
|
1998-02-05 09:25:08 +03:00
|
|
|
|
2000-11-27 11:39:39 +03:00
|
|
|
/* page is there, see if we need to wait on it */
|
|
|
|
if ((pg->flags & (PG_BUSY|PG_RELEASED)) != 0) {
|
|
|
|
if (flags & UFP_NOWAIT) {
|
|
|
|
UVMHIST_LOG(ubchist, "nowait",0,0,0,0);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
pg->flags |= PG_WANTED;
|
|
|
|
UVM_UNLOCK_AND_WAIT(pg, &uobj->vmobjlock, 0,
|
|
|
|
"uvn_fp2", 0);
|
|
|
|
simple_lock(&uobj->vmobjlock);
|
|
|
|
continue;
|
1998-03-09 03:58:55 +03:00
|
|
|
}
|
2001-05-25 08:06:11 +04:00
|
|
|
|
2000-11-27 11:39:39 +03:00
|
|
|
/* skip PG_RDONLY pages if requested */
|
|
|
|
if ((flags & UFP_NORDONLY) && (pg->flags & PG_RDONLY)) {
|
|
|
|
UVMHIST_LOG(ubchist, "nordonly",0,0,0,0);
|
|
|
|
return 0;
|
1998-03-09 03:58:55 +03:00
|
|
|
}
|
1998-02-05 09:25:08 +03:00
|
|
|
|
2000-11-27 11:39:39 +03:00
|
|
|
/* mark the page BUSY and we're done. */
|
|
|
|
pg->flags |= PG_BUSY;
|
|
|
|
UVM_PAGE_OWN(pg, "uvn_findpage");
|
|
|
|
UVMHIST_LOG(ubchist, "found",0,0,0,0);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
*pgp = pg;
|
|
|
|
return 1;
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* uvm_vnp_setsize: grow or shrink a vnode uvn
|
|
|
|
*
|
|
|
|
* grow => just update size value
|
|
|
|
* shrink => toss un-needed pages
|
|
|
|
*
|
2001-05-25 08:06:11 +04:00
|
|
|
* => we assume that the caller has a reference of some sort to the
|
1998-02-05 09:25:08 +03:00
|
|
|
* vnode in question so that it will not be yanked out from under
|
|
|
|
* us.
|
|
|
|
*
|
|
|
|
* called from:
|
|
|
|
* => truncate fns (ext2fs_truncate, ffs_truncate, detrunc[msdos])
|
|
|
|
* => "write" fns (ext2fs_write, WRITE [ufs/ufs], msdosfs_write, nfs_write)
|
|
|
|
* => ffs_balloc [XXX: why? doesn't WRITE handle?]
|
|
|
|
* => NFS: nfs_loadattrcache, nfs_getattrcache, nfs_setattr
|
|
|
|
* => union fs: union_newsize
|
|
|
|
*/
|
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
void
|
|
|
|
uvm_vnp_setsize(vp, newsize)
|
|
|
|
struct vnode *vp;
|
2000-03-27 00:54:45 +04:00
|
|
|
voff_t newsize;
|
1998-02-05 09:25:08 +03:00
|
|
|
{
|
1998-03-09 03:58:55 +03:00
|
|
|
struct uvm_vnode *uvn = &vp->v_uvm;
|
2001-02-22 04:02:09 +03:00
|
|
|
voff_t pgend = round_page(newsize);
|
2000-11-27 11:39:39 +03:00
|
|
|
UVMHIST_FUNC("uvm_vnp_setsize"); UVMHIST_CALLED(ubchist);
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
simple_lock(&uvn->u_obj.vmobjlock);
|
1998-02-05 09:25:08 +03:00
|
|
|
|
2000-11-27 11:39:39 +03:00
|
|
|
UVMHIST_LOG(ubchist, "old 0x%x new 0x%x", uvn->u_size, newsize, 0,0);
|
1998-03-09 03:58:55 +03:00
|
|
|
|
|
|
|
/*
|
2000-11-27 11:39:39 +03:00
|
|
|
* now check if the size has changed: if we shrink we had better
|
|
|
|
* toss some pages...
|
1998-03-09 03:58:55 +03:00
|
|
|
*/
|
2000-11-27 11:39:39 +03:00
|
|
|
|
2001-02-22 04:02:09 +03:00
|
|
|
if (uvn->u_size > pgend && uvn->u_size != VSIZENOTSET) {
|
|
|
|
(void) uvn_flush(&uvn->u_obj, pgend, 0, PGO_FREE);
|
2000-11-27 11:39:39 +03:00
|
|
|
}
|
|
|
|
uvn->u_size = newsize;
|
|
|
|
simple_unlock(&uvn->u_obj.vmobjlock);
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2000-11-27 11:39:39 +03:00
|
|
|
* uvm_vnp_zerorange: set a range of bytes in a file to zero.
|
1998-02-05 09:25:08 +03:00
|
|
|
*/
|
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
void
|
2000-11-27 11:39:39 +03:00
|
|
|
uvm_vnp_zerorange(vp, off, len)
|
1998-03-09 03:58:55 +03:00
|
|
|
struct vnode *vp;
|
2000-11-27 11:39:39 +03:00
|
|
|
off_t off;
|
|
|
|
size_t len;
|
|
|
|
{
|
|
|
|
void *win;
|
1998-03-09 03:58:55 +03:00
|
|
|
|
2000-11-27 11:39:39 +03:00
|
|
|
/*
|
|
|
|
* XXXUBC invent kzero() and use it
|
|
|
|
*/
|
1998-03-09 03:58:55 +03:00
|
|
|
|
2000-11-27 11:39:39 +03:00
|
|
|
while (len) {
|
|
|
|
vsize_t bytelen = len;
|
1998-03-09 03:58:55 +03:00
|
|
|
|
2000-11-27 11:39:39 +03:00
|
|
|
win = ubc_alloc(&vp->v_uvm.u_obj, off, &bytelen, UBC_WRITE);
|
|
|
|
memset(win, 0, bytelen);
|
|
|
|
ubc_release(win, 0);
|
1998-03-09 03:58:55 +03:00
|
|
|
|
2000-11-27 11:39:39 +03:00
|
|
|
off += bytelen;
|
|
|
|
len -= bytelen;
|
|
|
|
}
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|