2003-09-26 15:51:53 +04:00
|
|
|
/* $NetBSD: nfs_bio.c,v 1.110 2003/09/26 11:51:53 yamt Exp $ */
|
1994-06-29 10:39:25 +04:00
|
|
|
|
1993-03-21 12:45:37 +03:00
|
|
|
/*
|
1994-06-08 15:33:09 +04:00
|
|
|
* Copyright (c) 1989, 1993
|
|
|
|
* The Regents of the University of California. All rights reserved.
|
1993-03-21 12:45:37 +03:00
|
|
|
*
|
|
|
|
* This code is derived from software contributed to Berkeley by
|
|
|
|
* Rick Macklem at The University of Guelph.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
2003-08-07 20:26:28 +04:00
|
|
|
* 3. Neither the name of the University nor the names of its contributors
|
1993-03-21 12:45:37 +03:00
|
|
|
* may be used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
1996-02-18 14:53:36 +03:00
|
|
|
* @(#)nfs_bio.c 8.9 (Berkeley) 3/30/95
|
1993-03-21 12:45:37 +03:00
|
|
|
*/
|
|
|
|
|
2001-11-10 13:59:08 +03:00
|
|
|
#include <sys/cdefs.h>
|
2003-09-26 15:51:53 +04:00
|
|
|
__KERNEL_RCSID(0, "$NetBSD: nfs_bio.c,v 1.110 2003/09/26 11:51:53 yamt Exp $");
|
2001-11-10 13:59:08 +03:00
|
|
|
|
2000-09-19 21:04:50 +04:00
|
|
|
#include "opt_nfs.h"
|
2000-11-27 11:39:39 +03:00
|
|
|
#include "opt_ddb.h"
|
2000-09-19 21:04:50 +04:00
|
|
|
|
1993-12-18 03:40:47 +03:00
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/systm.h>
|
1994-06-08 15:33:09 +04:00
|
|
|
#include <sys/resourcevar.h>
|
1996-02-18 14:53:36 +03:00
|
|
|
#include <sys/signalvar.h>
|
1993-12-18 03:40:47 +03:00
|
|
|
#include <sys/proc.h>
|
|
|
|
#include <sys/buf.h>
|
|
|
|
#include <sys/vnode.h>
|
|
|
|
#include <sys/mount.h>
|
1994-06-08 15:33:09 +04:00
|
|
|
#include <sys/kernel.h>
|
1996-02-10 00:48:19 +03:00
|
|
|
#include <sys/namei.h>
|
1997-10-10 05:53:17 +04:00
|
|
|
#include <sys/dirent.h>
|
2000-11-27 11:39:39 +03:00
|
|
|
#include <sys/malloc.h>
|
1994-06-08 15:33:09 +04:00
|
|
|
|
1998-02-05 10:59:28 +03:00
|
|
|
#include <uvm/uvm_extern.h>
|
2000-11-27 11:39:39 +03:00
|
|
|
#include <uvm/uvm.h>
|
1998-02-05 10:59:28 +03:00
|
|
|
|
1994-06-08 15:33:09 +04:00
|
|
|
#include <nfs/rpcv2.h>
|
1996-02-18 14:53:36 +03:00
|
|
|
#include <nfs/nfsproto.h>
|
1993-12-18 03:40:47 +03:00
|
|
|
#include <nfs/nfs.h>
|
|
|
|
#include <nfs/nfsmount.h>
|
1994-06-08 15:33:09 +04:00
|
|
|
#include <nfs/nqnfs.h>
|
1996-02-18 14:53:36 +03:00
|
|
|
#include <nfs/nfsnode.h>
|
1996-02-10 00:48:19 +03:00
|
|
|
#include <nfs/nfs_var.h>
|
1993-03-21 12:45:37 +03:00
|
|
|
|
1994-06-08 15:33:09 +04:00
|
|
|
extern int nfs_numasync;
|
2002-01-26 05:52:19 +03:00
|
|
|
extern int nfs_commitsize;
|
1996-02-18 14:53:36 +03:00
|
|
|
extern struct nfsstats nfsstats;
|
1993-03-21 12:45:37 +03:00
|
|
|
|
2003-04-12 18:26:58 +04:00
|
|
|
static int nfs_doio_read __P((struct buf *, struct uio *));
|
|
|
|
static int nfs_doio_write __P((struct buf *, struct uio *));
|
|
|
|
static int nfs_doio_phys __P((struct buf *, struct uio *));
|
|
|
|
|
1993-03-21 12:45:37 +03:00
|
|
|
/*
|
|
|
|
* Vnode op for read using bio
|
|
|
|
* Any similarity to readip() is purely coincidental
|
|
|
|
*/
|
1996-02-10 00:48:19 +03:00
|
|
|
int
|
1997-10-10 05:53:17 +04:00
|
|
|
nfs_bioread(vp, uio, ioflag, cred, cflag)
|
2000-03-30 16:51:13 +04:00
|
|
|
struct vnode *vp;
|
|
|
|
struct uio *uio;
|
1997-10-10 05:53:17 +04:00
|
|
|
int ioflag, cflag;
|
1993-03-21 12:45:37 +03:00
|
|
|
struct ucred *cred;
|
|
|
|
{
|
2000-03-30 16:51:13 +04:00
|
|
|
struct nfsnode *np = VTONFS(vp);
|
1996-02-10 00:48:19 +03:00
|
|
|
struct buf *bp = NULL, *rabp;
|
1993-03-21 12:45:37 +03:00
|
|
|
struct vattr vattr;
|
2003-06-30 02:28:00 +04:00
|
|
|
struct proc *p;
|
1996-02-18 14:53:36 +03:00
|
|
|
struct nfsmount *nmp = VFSTONFS(vp->v_mount);
|
1997-10-19 05:46:15 +04:00
|
|
|
struct nfsdircache *ndp = NULL, *nndp = NULL;
|
1997-10-10 05:53:17 +04:00
|
|
|
caddr_t baddr, ep, edp;
|
2000-11-27 11:39:39 +03:00
|
|
|
int got_buf = 0, error = 0, n = 0, on = 0, en, enn;
|
1997-10-10 05:53:17 +04:00
|
|
|
int enough = 0;
|
|
|
|
struct dirent *dp, *pdp;
|
2000-11-27 11:39:39 +03:00
|
|
|
off_t curoff = 0;
|
1993-03-21 12:45:37 +03:00
|
|
|
|
|
|
|
#ifdef DIAGNOSTIC
|
|
|
|
if (uio->uio_rw != UIO_READ)
|
|
|
|
panic("nfs_read mode");
|
|
|
|
#endif
|
|
|
|
if (uio->uio_resid == 0)
|
|
|
|
return (0);
|
1997-10-10 05:53:17 +04:00
|
|
|
if (vp->v_type != VDIR && uio->uio_offset < 0)
|
1993-03-21 12:45:37 +03:00
|
|
|
return (EINVAL);
|
2003-06-30 02:28:00 +04:00
|
|
|
p = uio->uio_procp;
|
2000-09-19 21:04:50 +04:00
|
|
|
#ifndef NFS_V2_ONLY
|
1997-10-10 05:53:17 +04:00
|
|
|
if ((nmp->nm_flag & NFSMNT_NFSV3) &&
|
|
|
|
!(nmp->nm_iflag & NFSMNT_GOTFSINFO))
|
2003-06-30 02:28:00 +04:00
|
|
|
(void)nfs_fsinfo(nmp, vp, cred, p);
|
2000-09-19 21:04:50 +04:00
|
|
|
#endif
|
1997-10-10 05:53:17 +04:00
|
|
|
if (vp->v_type != VDIR &&
|
|
|
|
(uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize)
|
1997-07-18 03:54:27 +04:00
|
|
|
return (EFBIG);
|
2000-11-27 11:39:39 +03:00
|
|
|
|
1993-03-21 12:45:37 +03:00
|
|
|
/*
|
1994-06-08 15:33:09 +04:00
|
|
|
* For nfs, cache consistency can only be maintained approximately.
|
|
|
|
* Although RFC1094 does not specify the criteria, the following is
|
|
|
|
* believed to be compatible with the reference port.
|
|
|
|
* For nqnfs, full cache consistency is maintained within the loop.
|
|
|
|
* For nfs:
|
1993-03-21 12:45:37 +03:00
|
|
|
* If the file's modify time on the server has changed since the
|
|
|
|
* last read rpc or you have written to the file,
|
|
|
|
* you may have lost data cache consistency with the
|
|
|
|
* server, so flush all of the file's data out of the cache.
|
|
|
|
* Then force a getattr rpc to ensure that you have up to date
|
|
|
|
* attributes.
|
|
|
|
* NB: This implies that cache data can be read when up to
|
|
|
|
* NFS_ATTRTIMEO seconds out of date. If you find that you need current
|
|
|
|
* attributes this could be forced by setting n_attrstamp to 0 before
|
1994-06-08 15:33:09 +04:00
|
|
|
* the VOP_GETATTR() call.
|
1993-03-21 12:45:37 +03:00
|
|
|
*/
|
2000-11-27 11:39:39 +03:00
|
|
|
|
1994-06-08 15:33:09 +04:00
|
|
|
if ((nmp->nm_flag & NFSMNT_NQNFS) == 0 && vp->v_type != VLNK) {
|
1993-03-21 12:45:37 +03:00
|
|
|
if (np->n_flag & NMODIFIED) {
|
1996-02-18 14:53:36 +03:00
|
|
|
if (vp->v_type != VREG) {
|
|
|
|
if (vp->v_type != VDIR)
|
|
|
|
panic("nfs: bioread, not dir");
|
1997-10-19 05:46:15 +04:00
|
|
|
nfs_invaldircache(vp, 0);
|
|
|
|
np->n_direofoffset = 0;
|
2003-06-30 02:28:00 +04:00
|
|
|
error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1);
|
1996-02-10 00:48:19 +03:00
|
|
|
if (error)
|
1994-06-08 15:33:09 +04:00
|
|
|
return (error);
|
|
|
|
}
|
1993-03-21 12:45:37 +03:00
|
|
|
np->n_attrstamp = 0;
|
2003-06-30 02:28:00 +04:00
|
|
|
error = VOP_GETATTR(vp, &vattr, cred, p);
|
1996-02-10 00:48:19 +03:00
|
|
|
if (error)
|
1993-03-21 12:45:37 +03:00
|
|
|
return (error);
|
2003-09-26 15:51:53 +04:00
|
|
|
np->n_mtime = vattr.va_mtime;
|
1993-03-21 12:45:37 +03:00
|
|
|
} else {
|
2003-06-30 02:28:00 +04:00
|
|
|
error = VOP_GETATTR(vp, &vattr, cred, p);
|
1996-02-18 14:53:36 +03:00
|
|
|
if (error)
|
1993-03-21 12:45:37 +03:00
|
|
|
return (error);
|
2003-09-26 15:51:53 +04:00
|
|
|
if (timespeccmp(&np->n_mtime, &vattr.va_mtime, !=)) {
|
1997-10-19 05:46:15 +04:00
|
|
|
if (vp->v_type == VDIR) {
|
|
|
|
nfs_invaldircache(vp, 0);
|
|
|
|
np->n_direofoffset = 0;
|
|
|
|
}
|
2003-06-30 02:28:00 +04:00
|
|
|
error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1);
|
1996-02-10 00:48:19 +03:00
|
|
|
if (error)
|
1994-06-08 15:33:09 +04:00
|
|
|
return (error);
|
2003-09-26 15:51:53 +04:00
|
|
|
np->n_mtime = vattr.va_mtime;
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
1994-06-08 15:33:09 +04:00
|
|
|
|
2000-11-27 11:39:39 +03:00
|
|
|
/*
|
|
|
|
* update the cached read creds for this node.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (np->n_rcred) {
|
|
|
|
crfree(np->n_rcred);
|
|
|
|
}
|
|
|
|
np->n_rcred = cred;
|
|
|
|
crhold(cred);
|
|
|
|
|
|
|
|
do {
|
2000-09-20 03:26:25 +04:00
|
|
|
#ifndef NFS_V2_ONLY
|
1994-06-08 15:33:09 +04:00
|
|
|
/*
|
|
|
|
* Get a valid lease. If cached data is stale, flush it.
|
|
|
|
*/
|
|
|
|
if (nmp->nm_flag & NFSMNT_NQNFS) {
|
1996-02-18 14:53:36 +03:00
|
|
|
if (NQNFS_CKINVALID(vp, np, ND_READ)) {
|
1994-06-08 15:33:09 +04:00
|
|
|
do {
|
2003-06-30 02:28:00 +04:00
|
|
|
error = nqnfs_getlease(vp, ND_READ, cred, p);
|
1994-06-08 15:33:09 +04:00
|
|
|
} while (error == NQNFS_EXPIRED);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
if (np->n_lrev != np->n_brev ||
|
|
|
|
(np->n_flag & NQNFSNONCACHE) ||
|
|
|
|
((np->n_flag & NMODIFIED) && vp->v_type == VDIR)) {
|
1997-10-19 05:46:15 +04:00
|
|
|
if (vp->v_type == VDIR) {
|
|
|
|
nfs_invaldircache(vp, 0);
|
|
|
|
np->n_direofoffset = 0;
|
|
|
|
}
|
2003-06-30 02:28:00 +04:00
|
|
|
error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1);
|
1996-02-10 00:48:19 +03:00
|
|
|
if (error)
|
1994-06-08 15:33:09 +04:00
|
|
|
return (error);
|
|
|
|
np->n_brev = np->n_lrev;
|
|
|
|
}
|
|
|
|
} else if (vp->v_type == VDIR && (np->n_flag & NMODIFIED)) {
|
1997-10-19 05:46:15 +04:00
|
|
|
nfs_invaldircache(vp, 0);
|
2003-06-30 02:28:00 +04:00
|
|
|
error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1);
|
1997-10-19 05:46:15 +04:00
|
|
|
np->n_direofoffset = 0;
|
1996-02-10 00:48:19 +03:00
|
|
|
if (error)
|
1994-06-08 15:33:09 +04:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
}
|
2000-09-20 03:26:25 +04:00
|
|
|
#endif
|
1996-05-24 02:47:27 +04:00
|
|
|
/*
|
|
|
|
* Don't cache symlinks.
|
|
|
|
*/
|
|
|
|
if (np->n_flag & NQNFSNONCACHE
|
|
|
|
|| ((vp->v_flag & VROOT) && vp->v_type == VLNK)) {
|
1994-06-08 15:33:09 +04:00
|
|
|
switch (vp->v_type) {
|
|
|
|
case VREG:
|
2000-11-27 11:39:39 +03:00
|
|
|
return (nfs_readrpc(vp, uio));
|
1994-06-08 15:33:09 +04:00
|
|
|
case VLNK:
|
1996-02-18 14:53:36 +03:00
|
|
|
return (nfs_readlinkrpc(vp, uio, cred));
|
1994-06-08 15:33:09 +04:00
|
|
|
case VDIR:
|
1996-02-10 00:48:19 +03:00
|
|
|
break;
|
1996-02-18 14:53:36 +03:00
|
|
|
default:
|
1996-10-13 05:39:03 +04:00
|
|
|
printf(" NQNFSNONCACHE: type %x unexpected\n",
|
1996-10-11 03:31:17 +04:00
|
|
|
vp->v_type);
|
1994-06-08 15:33:09 +04:00
|
|
|
};
|
|
|
|
}
|
|
|
|
baddr = (caddr_t)0;
|
1993-03-21 12:45:37 +03:00
|
|
|
switch (vp->v_type) {
|
|
|
|
case VREG:
|
|
|
|
nfsstats.biocache_reads++;
|
1994-06-08 15:33:09 +04:00
|
|
|
|
2000-11-27 11:39:39 +03:00
|
|
|
error = 0;
|
2001-04-16 18:37:43 +04:00
|
|
|
if (uio->uio_offset >= np->n_size) {
|
|
|
|
break;
|
|
|
|
}
|
2000-11-27 11:39:39 +03:00
|
|
|
while (uio->uio_resid > 0) {
|
|
|
|
void *win;
|
2001-02-05 15:27:18 +03:00
|
|
|
vsize_t bytelen = MIN(np->n_size - uio->uio_offset,
|
2000-11-27 11:39:39 +03:00
|
|
|
uio->uio_resid);
|
1994-06-08 15:33:09 +04:00
|
|
|
|
2000-11-27 11:39:39 +03:00
|
|
|
if (bytelen == 0)
|
|
|
|
break;
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
win = ubc_alloc(&vp->v_uobj, uio->uio_offset,
|
2000-11-27 11:39:39 +03:00
|
|
|
&bytelen, UBC_READ);
|
|
|
|
error = uiomove(win, bytelen, uio);
|
|
|
|
ubc_release(win, 0);
|
|
|
|
if (error) {
|
|
|
|
break;
|
1994-06-08 15:33:09 +04:00
|
|
|
}
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
2000-11-27 11:39:39 +03:00
|
|
|
n = 0;
|
1993-03-21 12:45:37 +03:00
|
|
|
break;
|
2000-11-27 11:39:39 +03:00
|
|
|
|
1993-03-21 12:45:37 +03:00
|
|
|
case VLNK:
|
|
|
|
nfsstats.biocache_readlinks++;
|
2003-06-30 02:28:00 +04:00
|
|
|
bp = nfs_getcacheblk(vp, (daddr_t)0, NFS_MAXPATHLEN, p);
|
1994-06-08 15:33:09 +04:00
|
|
|
if (!bp)
|
|
|
|
return (EINTR);
|
|
|
|
if ((bp->b_flags & B_DONE) == 0) {
|
|
|
|
bp->b_flags |= B_READ;
|
2003-06-30 02:28:00 +04:00
|
|
|
error = nfs_doio(bp, p);
|
1996-02-18 14:53:36 +03:00
|
|
|
if (error) {
|
1994-06-08 15:33:09 +04:00
|
|
|
brelse(bp);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
}
|
2001-02-27 07:37:44 +03:00
|
|
|
n = MIN(uio->uio_resid, NFS_MAXPATHLEN - bp->b_resid);
|
1994-06-08 15:33:09 +04:00
|
|
|
got_buf = 1;
|
|
|
|
on = 0;
|
1993-03-21 12:45:37 +03:00
|
|
|
break;
|
|
|
|
case VDIR:
|
1997-10-10 05:53:17 +04:00
|
|
|
diragain:
|
1993-03-21 12:45:37 +03:00
|
|
|
nfsstats.biocache_readdirs++;
|
1997-10-19 05:46:15 +04:00
|
|
|
ndp = nfs_searchdircache(vp, uio->uio_offset,
|
|
|
|
(nmp->nm_flag & NFSMNT_XLATECOOKIE), 0);
|
|
|
|
if (!ndp) {
|
|
|
|
/*
|
|
|
|
* We've been handed a cookie that is not
|
|
|
|
* in the cache. If we're not translating
|
|
|
|
* 32 <-> 64, it may be a value that was
|
|
|
|
* flushed out of the cache because it grew
|
|
|
|
* too big. Let the server judge if it's
|
|
|
|
* valid or not. In the translation case,
|
|
|
|
* we have no way of validating this value,
|
|
|
|
* so punt.
|
|
|
|
*/
|
|
|
|
if (nmp->nm_flag & NFSMNT_XLATECOOKIE)
|
|
|
|
return (EINVAL);
|
|
|
|
ndp = nfs_enterdircache(vp, uio->uio_offset,
|
|
|
|
uio->uio_offset, 0, 0);
|
|
|
|
}
|
|
|
|
|
1997-10-10 05:53:17 +04:00
|
|
|
if (uio->uio_offset != 0 &&
|
1997-10-19 05:46:15 +04:00
|
|
|
ndp->dc_cookie == np->n_direofoffset) {
|
|
|
|
nfsstats.direofcache_hits++;
|
1997-10-10 05:53:17 +04:00
|
|
|
return (0);
|
1997-10-19 05:46:15 +04:00
|
|
|
}
|
|
|
|
|
2003-06-30 02:28:00 +04:00
|
|
|
bp = nfs_getcacheblk(vp, ndp->dc_blkno, NFS_DIRBLKSIZ, p);
|
1994-06-08 15:33:09 +04:00
|
|
|
if (!bp)
|
1996-02-18 14:53:36 +03:00
|
|
|
return (EINTR);
|
1994-06-08 15:33:09 +04:00
|
|
|
if ((bp->b_flags & B_DONE) == 0) {
|
1996-02-18 14:53:36 +03:00
|
|
|
bp->b_flags |= B_READ;
|
1997-10-19 05:46:15 +04:00
|
|
|
bp->b_dcookie = ndp->dc_blkcookie;
|
2003-06-30 02:28:00 +04:00
|
|
|
error = nfs_doio(bp, p);
|
1996-02-18 14:53:36 +03:00
|
|
|
if (error) {
|
1997-10-10 05:53:17 +04:00
|
|
|
/*
|
|
|
|
* Yuck! The directory has been modified on the
|
|
|
|
* server. Punt and let the userland code
|
|
|
|
* deal with it.
|
|
|
|
*/
|
1996-02-18 14:53:36 +03:00
|
|
|
brelse(bp);
|
1997-10-10 05:53:17 +04:00
|
|
|
if (error == NFSERR_BAD_COOKIE) {
|
1997-10-19 05:46:15 +04:00
|
|
|
nfs_invaldircache(vp, 0);
|
2003-06-30 02:28:00 +04:00
|
|
|
nfs_vinvalbuf(vp, 0, cred, p, 1);
|
1997-10-10 05:53:17 +04:00
|
|
|
error = EINVAL;
|
1994-06-08 15:33:09 +04:00
|
|
|
}
|
1997-10-10 05:53:17 +04:00
|
|
|
return (error);
|
1996-02-18 14:53:36 +03:00
|
|
|
}
|
1997-11-23 16:52:24 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Just return if we hit EOF right away with this
|
|
|
|
* block. Always check here, because direofoffset
|
|
|
|
* may have been set by an nfsiod since the last
|
|
|
|
* check.
|
|
|
|
*/
|
|
|
|
if (np->n_direofoffset != 0 &&
|
1997-10-23 18:12:14 +04:00
|
|
|
ndp->dc_blkcookie == np->n_direofoffset) {
|
1997-11-23 16:52:24 +03:00
|
|
|
brelse(bp);
|
|
|
|
return (0);
|
1994-06-08 15:33:09 +04:00
|
|
|
}
|
|
|
|
|
1997-10-10 05:53:17 +04:00
|
|
|
/*
|
|
|
|
* Find the entry we were looking for in the block.
|
|
|
|
*/
|
|
|
|
|
|
|
|
en = ndp->dc_entry;
|
|
|
|
|
|
|
|
pdp = dp = (struct dirent *)bp->b_data;
|
2001-04-03 19:07:23 +04:00
|
|
|
edp = bp->b_data + bp->b_bcount - bp->b_resid;
|
1997-10-10 05:53:17 +04:00
|
|
|
enn = 0;
|
|
|
|
while (enn < en && (caddr_t)dp < edp) {
|
|
|
|
pdp = dp;
|
|
|
|
dp = (struct dirent *)((caddr_t)dp + dp->d_reclen);
|
|
|
|
enn++;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the entry number was bigger than the number of
|
|
|
|
* entries in the block, or the cookie of the previous
|
|
|
|
* entry doesn't match, the directory cache is
|
|
|
|
* stale. Flush it and try again (i.e. go to
|
|
|
|
* the server).
|
|
|
|
*/
|
|
|
|
if ((caddr_t)dp >= edp || (caddr_t)dp + dp->d_reclen > edp ||
|
1997-10-19 05:46:15 +04:00
|
|
|
(en > 0 && NFS_GETCOOKIE(pdp) != ndp->dc_cookie)) {
|
1997-10-10 05:53:17 +04:00
|
|
|
#ifdef DEBUG
|
1997-10-21 02:08:44 +04:00
|
|
|
printf("invalid cache: %p %p %p off %lx %lx\n",
|
|
|
|
pdp, dp, edp,
|
1997-10-10 05:53:17 +04:00
|
|
|
(unsigned long)uio->uio_offset,
|
|
|
|
(unsigned long)NFS_GETCOOKIE(pdp));
|
|
|
|
#endif
|
|
|
|
brelse(bp);
|
1997-10-19 05:46:15 +04:00
|
|
|
nfs_invaldircache(vp, 0);
|
2003-06-30 02:28:00 +04:00
|
|
|
nfs_vinvalbuf(vp, 0, cred, p, 0);
|
1997-10-10 05:53:17 +04:00
|
|
|
goto diragain;
|
|
|
|
}
|
|
|
|
|
|
|
|
on = (caddr_t)dp - bp->b_data;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Cache all entries that may be exported to the
|
|
|
|
* user, as they may be thrown back at us. The
|
|
|
|
* NFSBIO_CACHECOOKIES flag indicates that all
|
|
|
|
* entries are being 'exported', so cache them all.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (en == 0 && pdp == dp) {
|
|
|
|
dp = (struct dirent *)
|
|
|
|
((caddr_t)dp + dp->d_reclen);
|
|
|
|
enn++;
|
|
|
|
}
|
|
|
|
|
2001-04-03 19:07:23 +04:00
|
|
|
if (uio->uio_resid < (bp->b_bcount - bp->b_resid - on)) {
|
1997-10-10 05:53:17 +04:00
|
|
|
n = uio->uio_resid;
|
|
|
|
enough = 1;
|
|
|
|
} else
|
2001-04-03 19:07:23 +04:00
|
|
|
n = bp->b_bcount - bp->b_resid - on;
|
1997-10-10 05:53:17 +04:00
|
|
|
|
|
|
|
ep = bp->b_data + on + n;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Find last complete entry to copy, caching entries
|
|
|
|
* (if requested) as we go.
|
|
|
|
*/
|
|
|
|
|
|
|
|
while ((caddr_t)dp < ep && (caddr_t)dp + dp->d_reclen <= ep) {
|
1997-10-19 05:46:15 +04:00
|
|
|
if (cflag & NFSBIO_CACHECOOKIES) {
|
|
|
|
nndp = nfs_enterdircache(vp, NFS_GETCOOKIE(pdp),
|
|
|
|
ndp->dc_blkcookie, enn, bp->b_lblkno);
|
|
|
|
if (nmp->nm_flag & NFSMNT_XLATECOOKIE) {
|
|
|
|
NFS_STASHCOOKIE32(pdp,
|
|
|
|
nndp->dc_cookie32);
|
|
|
|
}
|
|
|
|
}
|
1997-10-10 05:53:17 +04:00
|
|
|
pdp = dp;
|
|
|
|
dp = (struct dirent *)((caddr_t)dp + dp->d_reclen);
|
|
|
|
enn++;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the last requested entry was not the last in the
|
|
|
|
* buffer (happens if NFS_DIRFRAGSIZ < NFS_DIRBLKSIZ),
|
|
|
|
* cache the cookie of the last requested one, and
|
|
|
|
* set of the offset to it.
|
|
|
|
*/
|
|
|
|
|
2001-04-03 19:07:23 +04:00
|
|
|
if ((on + n) < bp->b_bcount - bp->b_resid) {
|
1997-10-10 05:53:17 +04:00
|
|
|
curoff = NFS_GETCOOKIE(pdp);
|
1997-10-19 05:46:15 +04:00
|
|
|
nndp = nfs_enterdircache(vp, curoff, ndp->dc_blkcookie,
|
|
|
|
enn, bp->b_lblkno);
|
|
|
|
if (nmp->nm_flag & NFSMNT_XLATECOOKIE) {
|
|
|
|
NFS_STASHCOOKIE32(pdp, nndp->dc_cookie32);
|
|
|
|
curoff = nndp->dc_cookie32;
|
|
|
|
}
|
1997-10-10 05:53:17 +04:00
|
|
|
} else
|
|
|
|
curoff = bp->b_dcookie;
|
|
|
|
|
1997-10-19 05:46:15 +04:00
|
|
|
/*
|
|
|
|
* Always cache the entry for the next block,
|
|
|
|
* so that readaheads can use it.
|
|
|
|
*/
|
|
|
|
nndp = nfs_enterdircache(vp, bp->b_dcookie, bp->b_dcookie, 0,0);
|
|
|
|
if (nmp->nm_flag & NFSMNT_XLATECOOKIE) {
|
|
|
|
if (curoff == bp->b_dcookie) {
|
|
|
|
NFS_STASHCOOKIE32(pdp, nndp->dc_cookie32);
|
|
|
|
curoff = nndp->dc_cookie32;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
1997-10-10 05:53:17 +04:00
|
|
|
n = ((caddr_t)pdp + pdp->d_reclen) - (bp->b_data + on);
|
|
|
|
|
1994-06-08 15:33:09 +04:00
|
|
|
/*
|
|
|
|
* If not eof and read aheads are enabled, start one.
|
|
|
|
* (You need the current block first, so that you have the
|
1996-02-18 14:53:36 +03:00
|
|
|
* directory offset cookie of the next block.)
|
1994-06-08 15:33:09 +04:00
|
|
|
*/
|
|
|
|
if (nfs_numasync > 0 && nmp->nm_readahead > 0 &&
|
1997-10-10 05:53:17 +04:00
|
|
|
np->n_direofoffset == 0 && !(np->n_flag & NQNFSNONCACHE)) {
|
1997-10-19 05:46:15 +04:00
|
|
|
rabp = nfs_getcacheblk(vp, nndp->dc_blkno,
|
2003-06-30 02:28:00 +04:00
|
|
|
NFS_DIRBLKSIZ, p);
|
1994-06-08 15:33:09 +04:00
|
|
|
if (rabp) {
|
|
|
|
if ((rabp->b_flags & (B_DONE | B_DELWRI)) == 0) {
|
1997-10-19 05:46:15 +04:00
|
|
|
rabp->b_dcookie = nndp->dc_cookie;
|
1994-06-08 15:33:09 +04:00
|
|
|
rabp->b_flags |= (B_READ | B_ASYNC);
|
2000-11-27 11:39:39 +03:00
|
|
|
if (nfs_asyncio(rabp)) {
|
1994-06-08 15:33:09 +04:00
|
|
|
rabp->b_flags |= B_INVAL;
|
|
|
|
brelse(rabp);
|
|
|
|
}
|
1995-01-12 15:08:23 +03:00
|
|
|
} else
|
|
|
|
brelse(rabp);
|
1994-06-08 15:33:09 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
got_buf = 1;
|
1993-03-21 12:45:37 +03:00
|
|
|
break;
|
1996-02-18 14:53:36 +03:00
|
|
|
default:
|
1996-10-13 05:39:03 +04:00
|
|
|
printf(" nfsbioread: type %x unexpected\n",vp->v_type);
|
1996-02-10 00:48:19 +03:00
|
|
|
break;
|
2000-11-27 11:39:39 +03:00
|
|
|
}
|
1994-06-08 15:33:09 +04:00
|
|
|
|
|
|
|
if (n > 0) {
|
|
|
|
if (!baddr)
|
|
|
|
baddr = bp->b_data;
|
|
|
|
error = uiomove(baddr + on, (int)n, uio);
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
|
|
|
switch (vp->v_type) {
|
1996-02-18 14:53:36 +03:00
|
|
|
case VREG:
|
|
|
|
break;
|
1993-03-21 12:45:37 +03:00
|
|
|
case VLNK:
|
|
|
|
n = 0;
|
|
|
|
break;
|
|
|
|
case VDIR:
|
1996-02-18 14:53:36 +03:00
|
|
|
if (np->n_flag & NQNFSNONCACHE)
|
|
|
|
bp->b_flags |= B_INVAL;
|
1997-10-10 05:53:17 +04:00
|
|
|
uio->uio_offset = curoff;
|
|
|
|
if (enough)
|
|
|
|
n = 0;
|
1996-02-10 00:48:19 +03:00
|
|
|
break;
|
1996-02-18 14:53:36 +03:00
|
|
|
default:
|
1996-10-13 05:39:03 +04:00
|
|
|
printf(" nfsbioread: type %x unexpected\n",vp->v_type);
|
1996-02-18 14:53:36 +03:00
|
|
|
}
|
1994-06-08 15:33:09 +04:00
|
|
|
if (got_buf)
|
|
|
|
brelse(bp);
|
|
|
|
} while (error == 0 && uio->uio_resid > 0 && n > 0);
|
1993-03-21 12:45:37 +03:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Vnode op for write using bio
|
|
|
|
*/
|
1996-02-10 00:48:19 +03:00
|
|
|
int
|
|
|
|
nfs_write(v)
|
|
|
|
void *v;
|
|
|
|
{
|
1994-06-08 15:33:09 +04:00
|
|
|
struct vop_write_args /* {
|
1996-02-18 14:53:36 +03:00
|
|
|
struct vnode *a_vp;
|
1994-06-08 15:33:09 +04:00
|
|
|
struct uio *a_uio;
|
|
|
|
int a_ioflag;
|
|
|
|
struct ucred *a_cred;
|
1996-02-10 00:48:19 +03:00
|
|
|
} */ *ap = v;
|
2000-03-30 16:51:13 +04:00
|
|
|
struct uio *uio = ap->a_uio;
|
2003-06-30 02:28:00 +04:00
|
|
|
struct proc *p = uio->uio_procp;
|
2000-03-30 16:51:13 +04:00
|
|
|
struct vnode *vp = ap->a_vp;
|
1993-03-21 12:45:37 +03:00
|
|
|
struct nfsnode *np = VTONFS(vp);
|
2000-03-30 16:51:13 +04:00
|
|
|
struct ucred *cred = ap->a_cred;
|
1994-06-08 15:33:09 +04:00
|
|
|
int ioflag = ap->a_ioflag;
|
1993-03-21 12:45:37 +03:00
|
|
|
struct vattr vattr;
|
1996-02-18 14:53:36 +03:00
|
|
|
struct nfsmount *nmp = VFSTONFS(vp->v_mount);
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
void *win;
|
|
|
|
voff_t oldoff, origoff;
|
|
|
|
vsize_t bytelen;
|
2003-05-03 20:28:57 +04:00
|
|
|
int error = 0;
|
2002-10-23 13:10:23 +04:00
|
|
|
int extended = 0, wrotedta = 0;
|
1993-03-21 12:45:37 +03:00
|
|
|
|
|
|
|
#ifdef DIAGNOSTIC
|
|
|
|
if (uio->uio_rw != UIO_WRITE)
|
|
|
|
panic("nfs_write mode");
|
2003-06-30 02:28:00 +04:00
|
|
|
if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != curproc)
|
1993-03-21 12:45:37 +03:00
|
|
|
panic("nfs_write proc");
|
|
|
|
#endif
|
|
|
|
if (vp->v_type != VREG)
|
|
|
|
return (EIO);
|
1994-06-08 15:33:09 +04:00
|
|
|
if (np->n_flag & NWRITEERR) {
|
|
|
|
np->n_flag &= ~NWRITEERR;
|
|
|
|
return (np->n_error);
|
|
|
|
}
|
2000-09-19 21:04:50 +04:00
|
|
|
#ifndef NFS_V2_ONLY
|
1997-10-10 05:53:17 +04:00
|
|
|
if ((nmp->nm_flag & NFSMNT_NFSV3) &&
|
|
|
|
!(nmp->nm_iflag & NFSMNT_GOTFSINFO))
|
2003-06-30 02:28:00 +04:00
|
|
|
(void)nfs_fsinfo(nmp, vp, cred, p);
|
2000-09-19 21:04:50 +04:00
|
|
|
#endif
|
1993-03-21 12:45:37 +03:00
|
|
|
if (ioflag & (IO_APPEND | IO_SYNC)) {
|
|
|
|
if (np->n_flag & NMODIFIED) {
|
1994-06-08 15:33:09 +04:00
|
|
|
np->n_attrstamp = 0;
|
2003-06-30 02:28:00 +04:00
|
|
|
error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1);
|
1996-02-10 00:48:19 +03:00
|
|
|
if (error)
|
1994-06-08 15:33:09 +04:00
|
|
|
return (error);
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
|
|
|
if (ioflag & IO_APPEND) {
|
|
|
|
np->n_attrstamp = 0;
|
2003-06-30 02:28:00 +04:00
|
|
|
error = VOP_GETATTR(vp, &vattr, cred, p);
|
1996-02-10 00:48:19 +03:00
|
|
|
if (error)
|
1993-03-21 12:45:37 +03:00
|
|
|
return (error);
|
|
|
|
uio->uio_offset = np->n_size;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (uio->uio_offset < 0)
|
|
|
|
return (EINVAL);
|
1997-07-18 03:54:27 +04:00
|
|
|
if ((uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize)
|
|
|
|
return (EFBIG);
|
1993-03-21 12:45:37 +03:00
|
|
|
if (uio->uio_resid == 0)
|
|
|
|
return (0);
|
|
|
|
/*
|
|
|
|
* Maybe this should be above the vnode op call, but so long as
|
|
|
|
* file servers have no limits, i don't think it matters
|
|
|
|
*/
|
1994-06-08 15:33:09 +04:00
|
|
|
if (p && uio->uio_offset + uio->uio_resid >
|
1993-03-21 12:45:37 +03:00
|
|
|
p->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
|
|
|
|
psignal(p, SIGXFSZ);
|
|
|
|
return (EFBIG);
|
|
|
|
}
|
2000-11-27 11:39:39 +03:00
|
|
|
|
1993-03-21 12:45:37 +03:00
|
|
|
/*
|
2000-11-27 11:39:39 +03:00
|
|
|
* update the cached write creds for this node.
|
1993-03-21 12:45:37 +03:00
|
|
|
*/
|
1994-06-08 15:33:09 +04:00
|
|
|
|
2000-11-27 11:39:39 +03:00
|
|
|
if (np->n_wcred) {
|
|
|
|
crfree(np->n_wcred);
|
|
|
|
}
|
|
|
|
np->n_wcred = cred;
|
|
|
|
crhold(cred);
|
|
|
|
|
|
|
|
if ((np->n_flag & NQNFSNONCACHE) && uio->uio_iovcnt == 1) {
|
2003-05-03 20:28:57 +04:00
|
|
|
int iomode = NFSV3WRITE_FILESYNC;
|
|
|
|
boolean_t stalewriteverf = FALSE;
|
|
|
|
|
|
|
|
lockmgr(&nmp->nm_writeverflock, LK_SHARED, NULL);
|
2003-05-21 17:27:19 +04:00
|
|
|
error = nfs_writerpc(vp, uio, &iomode, FALSE, &stalewriteverf);
|
2003-05-03 20:28:57 +04:00
|
|
|
lockmgr(&nmp->nm_writeverflock, LK_RELEASE, NULL);
|
2003-04-09 18:30:30 +04:00
|
|
|
if (stalewriteverf)
|
2000-11-27 11:39:39 +03:00
|
|
|
nfs_clearcommit(vp->v_mount);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
origoff = uio->uio_offset;
|
2000-11-27 11:39:39 +03:00
|
|
|
do {
|
2002-10-29 13:15:16 +03:00
|
|
|
boolean_t extending; /* if we are extending whole pages */
|
|
|
|
u_quad_t oldsize;
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
oldoff = uio->uio_offset;
|
|
|
|
bytelen = uio->uio_resid;
|
1994-07-13 01:03:14 +04:00
|
|
|
|
2000-09-20 03:26:25 +04:00
|
|
|
#ifndef NFS_V2_ONLY
|
1994-06-08 15:33:09 +04:00
|
|
|
/*
|
|
|
|
* Check for a valid write lease.
|
|
|
|
*/
|
|
|
|
if ((nmp->nm_flag & NFSMNT_NQNFS) &&
|
1996-02-18 14:53:36 +03:00
|
|
|
NQNFS_CKINVALID(vp, np, ND_WRITE)) {
|
1994-06-08 15:33:09 +04:00
|
|
|
do {
|
2003-06-30 02:28:00 +04:00
|
|
|
error = nqnfs_getlease(vp, ND_WRITE, cred, p);
|
1994-06-08 15:33:09 +04:00
|
|
|
} while (error == NQNFS_EXPIRED);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
if (np->n_lrev != np->n_brev ||
|
|
|
|
(np->n_flag & NQNFSNONCACHE)) {
|
2003-06-30 02:28:00 +04:00
|
|
|
error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1);
|
1996-02-10 00:48:19 +03:00
|
|
|
if (error)
|
1994-06-08 15:33:09 +04:00
|
|
|
return (error);
|
|
|
|
np->n_brev = np->n_lrev;
|
|
|
|
}
|
|
|
|
}
|
2000-09-20 03:26:25 +04:00
|
|
|
#endif
|
1993-03-21 12:45:37 +03:00
|
|
|
nfsstats.biocache_writes++;
|
2000-11-27 11:39:39 +03:00
|
|
|
|
2002-10-29 13:15:16 +03:00
|
|
|
oldsize = np->n_size;
|
1994-06-08 15:33:09 +04:00
|
|
|
np->n_flag |= NMODIFIED;
|
2000-11-27 11:39:39 +03:00
|
|
|
if (np->n_size < uio->uio_offset + bytelen) {
|
|
|
|
np->n_size = uio->uio_offset + bytelen;
|
1994-06-08 15:33:09 +04:00
|
|
|
}
|
2002-10-29 13:15:16 +03:00
|
|
|
extending = ((uio->uio_offset & PAGE_MASK) == 0 &&
|
2002-04-10 07:06:57 +04:00
|
|
|
(bytelen & PAGE_MASK) == 0 &&
|
2002-10-29 13:15:16 +03:00
|
|
|
uio->uio_offset >= vp->v_size);
|
2003-08-03 22:20:53 +04:00
|
|
|
win = ubc_alloc(&vp->v_uobj, uio->uio_offset, &bytelen,
|
|
|
|
UBC_WRITE | (extending ? UBC_FAULTBUSY : 0));
|
2000-11-27 11:39:39 +03:00
|
|
|
error = uiomove(win, bytelen, uio);
|
|
|
|
ubc_release(win, 0);
|
2000-12-27 08:15:43 +03:00
|
|
|
if (error) {
|
2002-10-29 13:15:16 +03:00
|
|
|
if (extending) {
|
|
|
|
/*
|
|
|
|
* backout size and free pages past eof.
|
|
|
|
*/
|
|
|
|
np->n_size = oldsize;
|
2003-08-26 20:40:10 +04:00
|
|
|
simple_lock(&vp->v_interlock);
|
2002-10-29 13:15:16 +03:00
|
|
|
(void)VOP_PUTPAGES(vp, round_page(vp->v_size),
|
|
|
|
0, PGO_SYNCIO | PGO_FREE);
|
|
|
|
}
|
2000-11-27 11:39:39 +03:00
|
|
|
break;
|
|
|
|
}
|
2002-10-23 13:10:23 +04:00
|
|
|
wrotedta = 1;
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* update UVM's notion of the size now that we've
|
|
|
|
* copied the data into the vnode's pages.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (vp->v_size < uio->uio_offset) {
|
|
|
|
uvm_vnp_setsize(vp, uio->uio_offset);
|
2002-10-23 13:10:23 +04:00
|
|
|
extended = 1;
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
if ((oldoff & ~(nmp->nm_wsize - 1)) !=
|
|
|
|
(uio->uio_offset & ~(nmp->nm_wsize - 1))) {
|
2001-11-30 10:08:53 +03:00
|
|
|
simple_lock(&vp->v_interlock);
|
|
|
|
error = VOP_PUTPAGES(vp,
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
trunc_page(oldoff & ~(nmp->nm_wsize - 1)),
|
|
|
|
round_page((uio->uio_offset + nmp->nm_wsize - 1) &
|
2002-03-25 05:08:09 +03:00
|
|
|
~(nmp->nm_wsize - 1)), PGO_CLEANIT);
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
}
|
2000-11-27 11:39:39 +03:00
|
|
|
} while (uio->uio_resid > 0);
|
2002-10-23 13:10:23 +04:00
|
|
|
if (wrotedta)
|
|
|
|
VN_KNOTE(vp, NOTE_WRITE | (extended ? NOTE_EXTEND : 0));
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
if ((np->n_flag & NQNFSNONCACHE) || (ioflag & IO_SYNC)) {
|
2001-11-30 10:08:53 +03:00
|
|
|
simple_lock(&vp->v_interlock);
|
|
|
|
error = VOP_PUTPAGES(vp,
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
trunc_page(origoff & ~(nmp->nm_wsize - 1)),
|
|
|
|
round_page((uio->uio_offset + nmp->nm_wsize - 1) &
|
|
|
|
~(nmp->nm_wsize - 1)),
|
2001-11-30 10:08:53 +03:00
|
|
|
PGO_CLEANIT | PGO_SYNCIO);
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
}
|
2000-11-27 11:39:39 +03:00
|
|
|
return error;
|
1994-06-08 15:33:09 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Get an nfs cache block.
|
|
|
|
* Allocate a new one if the block isn't currently in the cache
|
|
|
|
* and return the block marked busy. If the calling process is
|
|
|
|
* interrupted by a signal for an interruptible mount point, return
|
|
|
|
* NULL.
|
|
|
|
*/
|
|
|
|
struct buf *
|
2003-06-30 02:28:00 +04:00
|
|
|
nfs_getcacheblk(vp, bn, size, p)
|
1994-06-08 15:33:09 +04:00
|
|
|
struct vnode *vp;
|
|
|
|
daddr_t bn;
|
|
|
|
int size;
|
2003-06-30 02:28:00 +04:00
|
|
|
struct proc *p;
|
1994-06-08 15:33:09 +04:00
|
|
|
{
|
2000-03-30 16:51:13 +04:00
|
|
|
struct buf *bp;
|
1994-06-08 15:33:09 +04:00
|
|
|
struct nfsmount *nmp = VFSTONFS(vp->v_mount);
|
|
|
|
|
|
|
|
if (nmp->nm_flag & NFSMNT_INT) {
|
|
|
|
bp = getblk(vp, bn, size, PCATCH, 0);
|
2000-11-27 11:39:39 +03:00
|
|
|
while (bp == NULL) {
|
2003-06-30 02:28:00 +04:00
|
|
|
if (nfs_sigintr(nmp, NULL, p))
|
2000-11-27 11:39:39 +03:00
|
|
|
return (NULL);
|
1994-06-08 15:33:09 +04:00
|
|
|
bp = getblk(vp, bn, size, 0, 2 * hz);
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
bp = getblk(vp, bn, size, 0, 0);
|
|
|
|
return (bp);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Flush and invalidate all dirty buffers. If another process is already
|
|
|
|
* doing the flush, just wait for completion.
|
|
|
|
*/
|
1996-02-10 00:48:19 +03:00
|
|
|
int
|
2003-06-30 02:28:00 +04:00
|
|
|
nfs_vinvalbuf(vp, flags, cred, p, intrflg)
|
1994-06-08 15:33:09 +04:00
|
|
|
struct vnode *vp;
|
|
|
|
int flags;
|
|
|
|
struct ucred *cred;
|
2003-06-30 02:28:00 +04:00
|
|
|
struct proc *p;
|
1994-06-08 15:33:09 +04:00
|
|
|
int intrflg;
|
|
|
|
{
|
2000-03-30 16:51:13 +04:00
|
|
|
struct nfsnode *np = VTONFS(vp);
|
1994-06-08 15:33:09 +04:00
|
|
|
struct nfsmount *nmp = VFSTONFS(vp->v_mount);
|
|
|
|
int error = 0, slpflag, slptimeo;
|
|
|
|
|
|
|
|
if ((nmp->nm_flag & NFSMNT_INT) == 0)
|
|
|
|
intrflg = 0;
|
|
|
|
if (intrflg) {
|
|
|
|
slpflag = PCATCH;
|
|
|
|
slptimeo = 2 * hz;
|
|
|
|
} else {
|
|
|
|
slpflag = 0;
|
|
|
|
slptimeo = 0;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* First wait for any other process doing a flush to complete.
|
|
|
|
*/
|
2003-05-22 19:59:24 +04:00
|
|
|
simple_lock(&vp->v_interlock);
|
1994-06-08 15:33:09 +04:00
|
|
|
while (np->n_flag & NFLUSHINPROG) {
|
|
|
|
np->n_flag |= NFLUSHWANT;
|
2003-05-22 19:59:24 +04:00
|
|
|
error = ltsleep(&np->n_flag, PRIBIO + 2, "nfsvinval",
|
|
|
|
slptimeo, &vp->v_interlock);
|
2003-06-30 02:28:00 +04:00
|
|
|
if (error && intrflg && nfs_sigintr(nmp, NULL, p)) {
|
2003-05-22 19:59:24 +04:00
|
|
|
simple_unlock(&vp->v_interlock);
|
|
|
|
return EINTR;
|
|
|
|
}
|
1994-06-08 15:33:09 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Now, flush as required.
|
|
|
|
*/
|
|
|
|
np->n_flag |= NFLUSHINPROG;
|
2003-05-22 19:59:24 +04:00
|
|
|
simple_unlock(&vp->v_interlock);
|
2003-06-30 02:28:00 +04:00
|
|
|
error = vinvalbuf(vp, flags, cred, p, slpflag, 0);
|
1994-06-08 15:33:09 +04:00
|
|
|
while (error) {
|
2003-06-30 02:28:00 +04:00
|
|
|
if (intrflg && nfs_sigintr(nmp, NULL, p)) {
|
2003-05-22 19:59:24 +04:00
|
|
|
error = EINTR;
|
|
|
|
break;
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
2003-06-30 02:28:00 +04:00
|
|
|
error = vinvalbuf(vp, flags, cred, p, 0, slptimeo);
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
2003-05-22 19:59:24 +04:00
|
|
|
simple_lock(&vp->v_interlock);
|
|
|
|
if (error == 0)
|
|
|
|
np->n_flag &= ~NMODIFIED;
|
|
|
|
np->n_flag &= ~NFLUSHINPROG;
|
1994-06-08 15:33:09 +04:00
|
|
|
if (np->n_flag & NFLUSHWANT) {
|
|
|
|
np->n_flag &= ~NFLUSHWANT;
|
2003-05-22 19:59:24 +04:00
|
|
|
wakeup(&np->n_flag);
|
1994-06-08 15:33:09 +04:00
|
|
|
}
|
2003-05-22 19:59:24 +04:00
|
|
|
simple_unlock(&vp->v_interlock);
|
|
|
|
return error;
|
1994-06-08 15:33:09 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Initiate asynchronous I/O. Return an error if no nfsiods are available.
|
|
|
|
* This is mainly to avoid queueing async I/O requests when the nfsiods
|
|
|
|
* are all hung on a dead server.
|
|
|
|
*/
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
|
1996-02-10 00:48:19 +03:00
|
|
|
int
|
2000-11-27 11:39:39 +03:00
|
|
|
nfs_asyncio(bp)
|
2000-03-30 16:51:13 +04:00
|
|
|
struct buf *bp;
|
1994-06-08 15:33:09 +04:00
|
|
|
{
|
2000-03-30 16:51:13 +04:00
|
|
|
int i;
|
|
|
|
struct nfsmount *nmp;
|
1996-12-03 01:55:39 +03:00
|
|
|
int gotiod, slpflag = 0, slptimeo = 0, error;
|
1994-06-08 15:33:09 +04:00
|
|
|
|
|
|
|
if (nfs_numasync == 0)
|
|
|
|
return (EIO);
|
1996-12-03 01:55:39 +03:00
|
|
|
|
|
|
|
nmp = VFSTONFS(bp->b_vp->v_mount);
|
|
|
|
again:
|
|
|
|
if (nmp->nm_flag & NFSMNT_INT)
|
|
|
|
slpflag = PCATCH;
|
|
|
|
gotiod = FALSE;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Find a free iod to process this request.
|
|
|
|
*/
|
|
|
|
|
2003-04-09 18:22:33 +04:00
|
|
|
for (i = 0; i < NFS_MAXASYNCDAEMON; i++) {
|
|
|
|
struct nfs_iod *iod = &nfs_asyncdaemon[i];
|
|
|
|
|
2003-05-07 20:18:53 +04:00
|
|
|
simple_lock(&iod->nid_slock);
|
2003-04-09 18:22:33 +04:00
|
|
|
if (iod->nid_want) {
|
1996-12-03 01:55:39 +03:00
|
|
|
/*
|
|
|
|
* Found one, so wake it up and tell it which
|
|
|
|
* mount to process.
|
|
|
|
*/
|
2003-04-09 18:22:33 +04:00
|
|
|
iod->nid_want = NULL;
|
|
|
|
iod->nid_mount = nmp;
|
2003-05-07 20:18:53 +04:00
|
|
|
wakeup(&iod->nid_want);
|
|
|
|
simple_lock(&nmp->nm_slock);
|
|
|
|
simple_unlock(&iod->nid_slock);
|
1996-12-03 01:55:39 +03:00
|
|
|
nmp->nm_bufqiods++;
|
|
|
|
gotiod = TRUE;
|
1997-04-20 20:24:44 +04:00
|
|
|
break;
|
1996-12-03 01:55:39 +03:00
|
|
|
}
|
2003-05-07 20:18:53 +04:00
|
|
|
simple_unlock(&iod->nid_slock);
|
2003-04-09 18:22:33 +04:00
|
|
|
}
|
2002-01-26 05:52:19 +03:00
|
|
|
|
1996-12-03 01:55:39 +03:00
|
|
|
/*
|
|
|
|
* If none are free, we may already have an iod working on this mount
|
|
|
|
* point. If so, it will process our request.
|
|
|
|
*/
|
2002-01-26 05:52:19 +03:00
|
|
|
|
2003-05-07 20:18:53 +04:00
|
|
|
if (!gotiod) {
|
|
|
|
simple_lock(&nmp->nm_slock);
|
|
|
|
if (nmp->nm_bufqiods > 0)
|
|
|
|
gotiod = TRUE;
|
|
|
|
}
|
|
|
|
|
|
|
|
LOCK_ASSERT(simple_lock_held(&nmp->nm_slock));
|
1996-12-03 01:55:39 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If we have an iod which can process the request, then queue
|
|
|
|
* the buffer.
|
|
|
|
*/
|
2002-01-26 05:52:19 +03:00
|
|
|
|
1996-12-03 01:55:39 +03:00
|
|
|
if (gotiod) {
|
2002-01-26 05:52:19 +03:00
|
|
|
|
1996-12-03 01:55:39 +03:00
|
|
|
/*
|
|
|
|
* Ensure that the queue never grows too large.
|
|
|
|
*/
|
2002-01-26 05:52:19 +03:00
|
|
|
|
1996-12-03 01:55:39 +03:00
|
|
|
while (nmp->nm_bufqlen >= 2*nfs_numasync) {
|
|
|
|
nmp->nm_bufqwant = TRUE;
|
2003-05-07 20:18:53 +04:00
|
|
|
error = ltsleep(&nmp->nm_bufq,
|
|
|
|
slpflag | PRIBIO | PNORELOCK,
|
|
|
|
"nfsaio", slptimeo, &nmp->nm_slock);
|
1996-12-03 01:55:39 +03:00
|
|
|
if (error) {
|
2003-06-30 02:28:00 +04:00
|
|
|
if (nfs_sigintr(nmp, NULL, curproc))
|
1996-12-03 01:55:39 +03:00
|
|
|
return (EINTR);
|
|
|
|
if (slpflag == PCATCH) {
|
|
|
|
slpflag = 0;
|
|
|
|
slptimeo = 2 * hz;
|
|
|
|
}
|
|
|
|
}
|
2002-01-26 05:52:19 +03:00
|
|
|
|
1996-12-03 01:55:39 +03:00
|
|
|
/*
|
|
|
|
* We might have lost our iod while sleeping,
|
|
|
|
* so check and loop if nescessary.
|
|
|
|
*/
|
2002-01-26 05:52:19 +03:00
|
|
|
|
1996-12-03 01:55:39 +03:00
|
|
|
if (nmp->nm_bufqiods == 0)
|
|
|
|
goto again;
|
2003-05-07 20:18:53 +04:00
|
|
|
|
|
|
|
simple_lock(&nmp->nm_slock);
|
1996-12-03 01:55:39 +03:00
|
|
|
}
|
|
|
|
TAILQ_INSERT_TAIL(&nmp->nm_bufq, bp, b_freelist);
|
|
|
|
nmp->nm_bufqlen++;
|
2003-05-07 20:18:53 +04:00
|
|
|
simple_unlock(&nmp->nm_slock);
|
1994-06-08 15:33:09 +04:00
|
|
|
return (0);
|
2002-01-26 05:52:19 +03:00
|
|
|
}
|
2003-05-07 20:18:53 +04:00
|
|
|
simple_unlock(&nmp->nm_slock);
|
1996-02-18 14:53:36 +03:00
|
|
|
|
|
|
|
/*
|
1996-12-03 01:55:39 +03:00
|
|
|
* All the iods are busy on other mounts, so return EIO to
|
|
|
|
* force the caller to process the i/o synchronously.
|
1996-02-18 14:53:36 +03:00
|
|
|
*/
|
2002-01-26 05:52:19 +03:00
|
|
|
|
1996-12-03 01:55:39 +03:00
|
|
|
return (EIO);
|
1994-06-08 15:33:09 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2003-04-12 18:26:58 +04:00
|
|
|
* nfs_doio for read.
|
1994-06-08 15:33:09 +04:00
|
|
|
*/
|
2003-04-12 18:26:58 +04:00
|
|
|
static int
|
|
|
|
nfs_doio_read(bp, uiop)
|
2000-03-30 16:51:13 +04:00
|
|
|
struct buf *bp;
|
|
|
|
struct uio *uiop;
|
2003-04-12 18:26:58 +04:00
|
|
|
{
|
|
|
|
struct vnode *vp = bp->b_vp;
|
|
|
|
struct nfsnode *np = VTONFS(vp);
|
|
|
|
struct nfsmount *nmp = VFSTONFS(vp->v_mount);
|
|
|
|
int error = 0;
|
1994-06-08 15:33:09 +04:00
|
|
|
|
2003-04-12 18:26:58 +04:00
|
|
|
uiop->uio_rw = UIO_READ;
|
|
|
|
switch (vp->v_type) {
|
|
|
|
case VREG:
|
1994-06-08 15:33:09 +04:00
|
|
|
nfsstats.read_bios++;
|
2000-11-27 11:39:39 +03:00
|
|
|
error = nfs_readrpc(vp, uiop);
|
|
|
|
if (!error && uiop->uio_resid) {
|
2003-04-12 18:26:58 +04:00
|
|
|
int diff, len;
|
2000-11-27 11:39:39 +03:00
|
|
|
|
1994-06-08 15:33:09 +04:00
|
|
|
/*
|
|
|
|
* If len > 0, there is a hole in the file and
|
|
|
|
* no writes after the hole have been pushed to
|
|
|
|
* the server yet.
|
|
|
|
* Just zero fill the rest of the valid area.
|
|
|
|
*/
|
2000-11-27 11:39:39 +03:00
|
|
|
|
1994-06-08 15:33:09 +04:00
|
|
|
diff = bp->b_bcount - uiop->uio_resid;
|
2000-11-27 11:39:39 +03:00
|
|
|
len = np->n_size - ((((off_t)bp->b_blkno) << DEV_BSHIFT)
|
1994-06-08 15:33:09 +04:00
|
|
|
+ diff);
|
|
|
|
if (len > 0) {
|
2001-02-27 07:37:44 +03:00
|
|
|
len = MIN(len, uiop->uio_resid);
|
2000-11-27 11:39:39 +03:00
|
|
|
memset((char *)bp->b_data + diff, 0, len);
|
|
|
|
}
|
1994-06-08 15:33:09 +04:00
|
|
|
}
|
2003-06-30 02:28:00 +04:00
|
|
|
if (uiop->uio_procp && (vp->v_flag & VTEXT) &&
|
2003-09-26 15:51:53 +04:00
|
|
|
(((nmp->nm_flag & NFSMNT_NQNFS) &&
|
|
|
|
NQNFS_CKINVALID(vp, np, ND_READ) &&
|
|
|
|
np->n_lrev != np->n_brev) ||
|
|
|
|
(!(nmp->nm_flag & NFSMNT_NQNFS) &&
|
|
|
|
timespeccmp(&np->n_mtime, &np->n_vattr->va_mtime, !=)))) {
|
2000-11-27 11:39:39 +03:00
|
|
|
uprintf("Process killed due to "
|
|
|
|
"text file modification\n");
|
2003-06-30 02:28:00 +04:00
|
|
|
psignal(uiop->uio_procp, SIGKILL);
|
2003-01-18 11:51:40 +03:00
|
|
|
#if 0 /* XXX NJWLWP */
|
2003-06-30 02:28:00 +04:00
|
|
|
uiop->uio_procp->p_holdcnt++;
|
2003-01-18 11:51:40 +03:00
|
|
|
#endif
|
1994-06-08 15:33:09 +04:00
|
|
|
}
|
|
|
|
break;
|
2003-04-12 18:26:58 +04:00
|
|
|
case VLNK:
|
|
|
|
KASSERT(uiop->uio_offset == (off_t)0);
|
1994-06-08 15:33:09 +04:00
|
|
|
nfsstats.readlink_bios++;
|
2003-06-30 02:28:00 +04:00
|
|
|
error = nfs_readlinkrpc(vp, uiop, curproc->p_ucred);
|
1994-06-08 15:33:09 +04:00
|
|
|
break;
|
2003-04-12 18:26:58 +04:00
|
|
|
case VDIR:
|
1994-06-08 15:33:09 +04:00
|
|
|
nfsstats.readdir_bios++;
|
1997-10-10 05:53:17 +04:00
|
|
|
uiop->uio_offset = bp->b_dcookie;
|
1996-02-18 14:53:36 +03:00
|
|
|
if (nmp->nm_flag & NFSMNT_RDIRPLUS) {
|
2003-06-30 02:28:00 +04:00
|
|
|
error = nfs_readdirplusrpc(vp, uiop, curproc->p_ucred);
|
1996-02-18 14:53:36 +03:00
|
|
|
if (error == NFSERR_NOTSUPP)
|
|
|
|
nmp->nm_flag &= ~NFSMNT_RDIRPLUS;
|
|
|
|
}
|
|
|
|
if ((nmp->nm_flag & NFSMNT_RDIRPLUS) == 0)
|
2003-06-30 02:28:00 +04:00
|
|
|
error = nfs_readdirrpc(vp, uiop, curproc->p_ucred);
|
1997-10-10 05:53:17 +04:00
|
|
|
if (!error) {
|
|
|
|
bp->b_dcookie = uiop->uio_offset;
|
|
|
|
}
|
1996-02-18 14:53:36 +03:00
|
|
|
break;
|
2003-04-12 18:26:58 +04:00
|
|
|
default:
|
|
|
|
printf("nfs_doio: type %x unexpected\n", vp->v_type);
|
1994-06-08 15:33:09 +04:00
|
|
|
break;
|
2003-04-12 18:26:58 +04:00
|
|
|
}
|
|
|
|
if (error) {
|
1994-06-08 15:33:09 +04:00
|
|
|
bp->b_flags |= B_ERROR;
|
|
|
|
bp->b_error = error;
|
2003-04-12 18:26:58 +04:00
|
|
|
}
|
|
|
|
return error;
|
|
|
|
}
|
2002-01-26 05:52:19 +03:00
|
|
|
|
2003-04-12 18:26:58 +04:00
|
|
|
/*
|
|
|
|
* nfs_doio for write.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
nfs_doio_write(bp, uiop)
|
|
|
|
struct buf *bp;
|
|
|
|
struct uio *uiop;
|
|
|
|
{
|
|
|
|
struct vnode *vp = bp->b_vp;
|
|
|
|
struct nfsnode *np = VTONFS(vp);
|
2003-05-03 20:28:57 +04:00
|
|
|
struct nfsmount *nmp = VFSTONFS(vp->v_mount);
|
2003-04-12 18:26:58 +04:00
|
|
|
int iomode;
|
2003-05-03 20:28:57 +04:00
|
|
|
boolean_t stalewriteverf = FALSE;
|
2003-04-12 18:26:58 +04:00
|
|
|
int i, npages = (bp->b_bcount + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
|
|
|
struct vm_page *pgs[npages];
|
|
|
|
boolean_t needcommit = TRUE;
|
2003-05-21 17:27:19 +04:00
|
|
|
boolean_t pageprotected;
|
2003-04-12 18:26:58 +04:00
|
|
|
struct uvm_object *uobj = &vp->v_uobj;
|
|
|
|
int error;
|
|
|
|
off_t off, cnt;
|
2002-01-26 05:52:19 +03:00
|
|
|
|
2003-04-12 18:26:58 +04:00
|
|
|
if ((bp->b_flags & B_ASYNC) != 0 && NFS_ISV3(vp)) {
|
|
|
|
iomode = NFSV3WRITE_UNSTABLE;
|
|
|
|
} else {
|
|
|
|
iomode = NFSV3WRITE_FILESYNC;
|
|
|
|
}
|
2002-01-26 05:52:19 +03:00
|
|
|
|
2003-05-03 20:28:57 +04:00
|
|
|
again:
|
|
|
|
lockmgr(&nmp->nm_writeverflock, LK_SHARED, NULL);
|
|
|
|
|
2003-04-12 18:26:58 +04:00
|
|
|
for (i = 0; i < npages; i++) {
|
2003-04-15 17:48:40 +04:00
|
|
|
pgs[i] = uvm_pageratop((vaddr_t)bp->b_data + (i << PAGE_SHIFT));
|
2003-05-15 18:34:06 +04:00
|
|
|
if (pgs[i]->uobject == uobj &&
|
|
|
|
pgs[i]->offset == uiop->uio_offset + (i << PAGE_SHIFT)) {
|
2003-05-16 21:16:05 +04:00
|
|
|
KASSERT(pgs[i]->flags & PG_BUSY);
|
2003-05-15 18:34:06 +04:00
|
|
|
/*
|
|
|
|
* this page belongs to our object.
|
|
|
|
*/
|
|
|
|
simple_lock(&uobj->vmobjlock);
|
|
|
|
if (pgs[i]->flags & (PG_RELEASED|PG_PAGEOUT))
|
|
|
|
iomode = NFSV3WRITE_FILESYNC;
|
|
|
|
if ((pgs[i]->flags & PG_NEEDCOMMIT) == 0)
|
|
|
|
needcommit = FALSE;
|
|
|
|
simple_unlock(&uobj->vmobjlock);
|
|
|
|
} else {
|
2003-04-12 18:26:58 +04:00
|
|
|
iomode = NFSV3WRITE_FILESYNC;
|
2003-05-15 18:34:06 +04:00
|
|
|
needcommit = FALSE;
|
2003-04-12 18:26:58 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!needcommit && iomode == NFSV3WRITE_UNSTABLE) {
|
2003-05-15 18:34:06 +04:00
|
|
|
simple_lock(&uobj->vmobjlock);
|
2003-04-12 18:26:58 +04:00
|
|
|
for (i = 0; i < npages; i++) {
|
|
|
|
pgs[i]->flags |= PG_NEEDCOMMIT | PG_RDONLY;
|
|
|
|
pmap_page_protect(pgs[i], VM_PROT_READ);
|
|
|
|
}
|
2003-05-15 18:34:06 +04:00
|
|
|
simple_unlock(&uobj->vmobjlock);
|
2003-05-21 17:27:19 +04:00
|
|
|
pageprotected = TRUE; /* pages can't be modified during i/o. */
|
|
|
|
} else
|
|
|
|
pageprotected = FALSE;
|
2000-11-27 11:39:39 +03:00
|
|
|
|
2003-04-12 18:26:58 +04:00
|
|
|
/*
|
|
|
|
* Send the data to the server if necessary,
|
|
|
|
* otherwise just send a commit rpc.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (needcommit) {
|
2002-01-26 05:52:19 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If the buffer is in the range that we already committed,
|
|
|
|
* there's nothing to do.
|
|
|
|
*
|
|
|
|
* If it's in the range that we need to commit, push the
|
|
|
|
* whole range at once, otherwise only push the buffer.
|
|
|
|
* In both these cases, acquire the commit lock to avoid
|
|
|
|
* other processes modifying the range.
|
|
|
|
*/
|
|
|
|
|
2003-04-09 18:27:58 +04:00
|
|
|
off = uiop->uio_offset;
|
|
|
|
cnt = bp->b_bcount;
|
2002-01-26 05:52:19 +03:00
|
|
|
lockmgr(&np->n_commitlock, LK_EXCLUSIVE, NULL);
|
|
|
|
if (!nfs_in_committed_range(vp, off, bp->b_bcount)) {
|
2003-04-12 18:26:58 +04:00
|
|
|
boolean_t pushedrange;
|
2002-01-26 05:52:19 +03:00
|
|
|
if (nfs_in_tobecommitted_range(vp, off, bp->b_bcount)) {
|
2003-04-12 18:26:58 +04:00
|
|
|
pushedrange = TRUE;
|
2002-01-26 05:52:19 +03:00
|
|
|
off = np->n_pushlo;
|
|
|
|
cnt = np->n_pushhi - np->n_pushlo;
|
|
|
|
} else {
|
2003-04-12 18:26:58 +04:00
|
|
|
pushedrange = FALSE;
|
2002-01-26 05:52:19 +03:00
|
|
|
}
|
2003-06-30 02:28:00 +04:00
|
|
|
error = nfs_commit(vp, off, cnt, curproc);
|
2002-01-26 05:52:19 +03:00
|
|
|
if (error == 0) {
|
|
|
|
if (pushedrange) {
|
|
|
|
nfs_merge_commit_ranges(vp);
|
|
|
|
} else {
|
|
|
|
nfs_add_committed_range(vp, off, cnt);
|
|
|
|
}
|
|
|
|
}
|
2003-04-18 19:19:02 +04:00
|
|
|
} else {
|
|
|
|
error = 0;
|
2002-01-26 05:52:19 +03:00
|
|
|
}
|
|
|
|
lockmgr(&np->n_commitlock, LK_RELEASE, NULL);
|
2003-05-03 20:28:57 +04:00
|
|
|
lockmgr(&nmp->nm_writeverflock, LK_RELEASE, NULL);
|
2002-01-26 05:52:19 +03:00
|
|
|
if (!error) {
|
2003-05-03 20:46:39 +04:00
|
|
|
/*
|
|
|
|
* pages are now on stable storage.
|
|
|
|
*/
|
2003-04-12 18:41:28 +04:00
|
|
|
uiop->uio_resid = 0;
|
2002-01-26 05:52:19 +03:00
|
|
|
simple_lock(&uobj->vmobjlock);
|
|
|
|
for (i = 0; i < npages; i++) {
|
|
|
|
pgs[i]->flags &= ~(PG_NEEDCOMMIT | PG_RDONLY);
|
|
|
|
}
|
|
|
|
simple_unlock(&uobj->vmobjlock);
|
2003-04-12 18:26:58 +04:00
|
|
|
return 0;
|
2002-01-26 05:52:19 +03:00
|
|
|
} else if (error == NFSERR_STALEWRITEVERF) {
|
2003-05-03 20:28:57 +04:00
|
|
|
nfs_clearcommit(vp->v_mount);
|
|
|
|
goto again;
|
2002-01-26 05:52:19 +03:00
|
|
|
}
|
2003-05-03 20:28:57 +04:00
|
|
|
if (error) {
|
|
|
|
bp->b_flags |= B_ERROR;
|
|
|
|
bp->b_error = np->n_error = error;
|
|
|
|
np->n_flag |= NWRITEERR;
|
|
|
|
}
|
|
|
|
return error;
|
2003-04-12 18:26:58 +04:00
|
|
|
}
|
|
|
|
off = uiop->uio_offset;
|
|
|
|
cnt = bp->b_bcount;
|
|
|
|
uiop->uio_rw = UIO_WRITE;
|
|
|
|
nfsstats.write_bios++;
|
2003-05-21 17:27:19 +04:00
|
|
|
error = nfs_writerpc(vp, uiop, &iomode, pageprotected, &stalewriteverf);
|
2003-04-12 18:26:58 +04:00
|
|
|
if (!error && iomode == NFSV3WRITE_UNSTABLE) {
|
2003-05-03 20:46:39 +04:00
|
|
|
/*
|
|
|
|
* we need to commit pages later.
|
|
|
|
*/
|
2002-01-26 05:52:19 +03:00
|
|
|
lockmgr(&np->n_commitlock, LK_EXCLUSIVE, NULL);
|
|
|
|
nfs_add_tobecommitted_range(vp, off, cnt);
|
2003-05-03 20:46:39 +04:00
|
|
|
/*
|
|
|
|
* if there can be too many uncommitted pages, commit them now.
|
|
|
|
*/
|
2002-01-26 05:52:19 +03:00
|
|
|
if (np->n_pushhi - np->n_pushlo > nfs_commitsize) {
|
|
|
|
off = np->n_pushlo;
|
|
|
|
cnt = nfs_commitsize >> 1;
|
2003-06-30 02:28:00 +04:00
|
|
|
error = nfs_commit(vp, off, cnt, curproc);
|
2002-01-26 05:52:19 +03:00
|
|
|
if (!error) {
|
|
|
|
nfs_add_committed_range(vp, off, cnt);
|
|
|
|
nfs_del_tobecommitted_range(vp, off, cnt);
|
|
|
|
}
|
2003-05-03 20:46:39 +04:00
|
|
|
if (error == NFSERR_STALEWRITEVERF) {
|
|
|
|
stalewriteverf = TRUE;
|
|
|
|
error = 0; /* it isn't a real error */
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* re-dirty pages so that they will be passed
|
|
|
|
* to us later again.
|
|
|
|
*/
|
|
|
|
simple_lock(&uobj->vmobjlock);
|
|
|
|
for (i = 0; i < npages; i++) {
|
|
|
|
pgs[i]->flags &= ~PG_CLEAN;
|
|
|
|
}
|
|
|
|
simple_unlock(&uobj->vmobjlock);
|
2002-01-26 05:52:19 +03:00
|
|
|
}
|
|
|
|
lockmgr(&np->n_commitlock, LK_RELEASE, NULL);
|
2003-05-03 20:46:39 +04:00
|
|
|
} else if (!error) {
|
|
|
|
/*
|
|
|
|
* pages are now on stable storage.
|
|
|
|
*/
|
2002-01-26 05:52:19 +03:00
|
|
|
lockmgr(&np->n_commitlock, LK_EXCLUSIVE, NULL);
|
|
|
|
nfs_del_committed_range(vp, off, cnt);
|
|
|
|
lockmgr(&np->n_commitlock, LK_RELEASE, NULL);
|
|
|
|
simple_lock(&uobj->vmobjlock);
|
|
|
|
for (i = 0; i < npages; i++) {
|
|
|
|
pgs[i]->flags &= ~(PG_NEEDCOMMIT | PG_RDONLY);
|
|
|
|
}
|
|
|
|
simple_unlock(&uobj->vmobjlock);
|
2003-04-12 18:26:58 +04:00
|
|
|
} else {
|
2003-05-03 20:46:39 +04:00
|
|
|
/*
|
|
|
|
* we got an error.
|
|
|
|
*/
|
|
|
|
bp->b_flags |= B_ERROR;
|
|
|
|
bp->b_error = np->n_error = error;
|
|
|
|
np->n_flag |= NWRITEERR;
|
2000-11-27 11:39:39 +03:00
|
|
|
}
|
2003-05-03 20:28:57 +04:00
|
|
|
|
|
|
|
lockmgr(&nmp->nm_writeverflock, LK_RELEASE, NULL);
|
|
|
|
|
|
|
|
if (stalewriteverf) {
|
2000-11-27 11:39:39 +03:00
|
|
|
nfs_clearcommit(vp->v_mount);
|
2002-01-26 05:52:19 +03:00
|
|
|
}
|
2003-04-12 18:26:58 +04:00
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* nfs_doio for B_PHYS.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
nfs_doio_phys(bp, uiop)
|
|
|
|
struct buf *bp;
|
|
|
|
struct uio *uiop;
|
|
|
|
{
|
|
|
|
struct vnode *vp = bp->b_vp;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
uiop->uio_offset = ((off_t)bp->b_blkno) << DEV_BSHIFT;
|
|
|
|
if (bp->b_flags & B_READ) {
|
|
|
|
uiop->uio_rw = UIO_READ;
|
|
|
|
nfsstats.read_physios++;
|
|
|
|
error = nfs_readrpc(vp, uiop);
|
|
|
|
} else {
|
|
|
|
int iomode = NFSV3WRITE_DATASYNC;
|
2003-05-03 20:28:57 +04:00
|
|
|
boolean_t stalewriteverf;
|
|
|
|
struct nfsmount *nmp = VFSTONFS(vp->v_mount);
|
2003-04-12 18:26:58 +04:00
|
|
|
|
|
|
|
uiop->uio_rw = UIO_WRITE;
|
|
|
|
nfsstats.write_physios++;
|
2003-05-03 20:28:57 +04:00
|
|
|
lockmgr(&nmp->nm_writeverflock, LK_SHARED, NULL);
|
2003-05-21 17:27:19 +04:00
|
|
|
error = nfs_writerpc(vp, uiop, &iomode, FALSE, &stalewriteverf);
|
2003-05-03 20:28:57 +04:00
|
|
|
lockmgr(&nmp->nm_writeverflock, LK_RELEASE, NULL);
|
2003-04-12 18:26:58 +04:00
|
|
|
if (stalewriteverf) {
|
|
|
|
nfs_clearcommit(bp->b_vp->v_mount);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (error) {
|
|
|
|
bp->b_flags |= B_ERROR;
|
|
|
|
bp->b_error = error;
|
|
|
|
}
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Do an I/O operation to/from a cache block. This may be called
|
|
|
|
* synchronously or from an nfsiod.
|
|
|
|
*/
|
|
|
|
int
|
2003-06-30 02:28:00 +04:00
|
|
|
nfs_doio(bp, p)
|
2003-04-12 18:26:58 +04:00
|
|
|
struct buf *bp;
|
2003-06-30 02:28:00 +04:00
|
|
|
struct proc *p;
|
2003-04-12 18:26:58 +04:00
|
|
|
{
|
|
|
|
int error;
|
|
|
|
struct uio uio;
|
|
|
|
struct uio *uiop = &uio;
|
|
|
|
struct iovec io;
|
|
|
|
UVMHIST_FUNC("nfs_doio"); UVMHIST_CALLED(ubchist);
|
|
|
|
|
|
|
|
uiop->uio_iov = &io;
|
|
|
|
uiop->uio_iovcnt = 1;
|
|
|
|
uiop->uio_segflg = UIO_SYSSPACE;
|
2003-06-30 02:28:00 +04:00
|
|
|
uiop->uio_procp = p;
|
2003-04-12 18:26:58 +04:00
|
|
|
uiop->uio_offset = (((off_t)bp->b_blkno) << DEV_BSHIFT);
|
|
|
|
io.iov_base = bp->b_data;
|
|
|
|
io.iov_len = uiop->uio_resid = bp->b_bcount;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Historically, paging was done with physio, but no more...
|
|
|
|
*/
|
|
|
|
if (bp->b_flags & B_PHYS) {
|
|
|
|
/*
|
|
|
|
* ...though reading /dev/drum still gets us here.
|
|
|
|
*/
|
|
|
|
error = nfs_doio_phys(bp, uiop);
|
|
|
|
} else if (bp->b_flags & B_READ) {
|
|
|
|
error = nfs_doio_read(bp, uiop);
|
|
|
|
} else {
|
|
|
|
error = nfs_doio_write(bp, uiop);
|
|
|
|
}
|
|
|
|
bp->b_resid = uiop->uio_resid;
|
2000-11-27 11:39:39 +03:00
|
|
|
biodone(bp);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Vnode op for VM getpages.
|
|
|
|
*/
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
|
2000-11-27 11:39:39 +03:00
|
|
|
int
|
|
|
|
nfs_getpages(v)
|
|
|
|
void *v;
|
|
|
|
{
|
|
|
|
struct vop_getpages_args /* {
|
|
|
|
struct vnode *a_vp;
|
|
|
|
voff_t a_offset;
|
2001-05-27 01:27:10 +04:00
|
|
|
struct vm_page **a_m;
|
2000-11-27 11:39:39 +03:00
|
|
|
int *a_count;
|
|
|
|
int a_centeridx;
|
|
|
|
vm_prot_t a_access_type;
|
|
|
|
int a_advice;
|
|
|
|
int a_flags;
|
|
|
|
} */ *ap = v;
|
|
|
|
|
|
|
|
struct vnode *vp = ap->a_vp;
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
struct uvm_object *uobj = &vp->v_uobj;
|
2000-11-27 11:39:39 +03:00
|
|
|
struct nfsnode *np = VTONFS(vp);
|
2002-05-06 04:07:51 +04:00
|
|
|
const int npages = *ap->a_count;
|
|
|
|
struct vm_page *pg, **pgs, *opgs[npages];
|
2002-01-26 05:52:19 +03:00
|
|
|
off_t origoffset, len;
|
2002-05-06 04:07:51 +04:00
|
|
|
int i, error;
|
2000-11-27 11:39:39 +03:00
|
|
|
boolean_t v3 = NFS_ISV3(vp);
|
|
|
|
boolean_t write = (ap->a_access_type & VM_PROT_WRITE) != 0;
|
2001-12-31 10:16:47 +03:00
|
|
|
boolean_t locked = (ap->a_flags & PGO_LOCKED) != 0;
|
2000-11-27 11:39:39 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* update the cached read creds for this node.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (np->n_rcred) {
|
|
|
|
crfree(np->n_rcred);
|
|
|
|
}
|
2003-06-30 02:28:00 +04:00
|
|
|
np->n_rcred = curproc->p_ucred;
|
2000-11-27 11:39:39 +03:00
|
|
|
crhold(np->n_rcred);
|
|
|
|
|
|
|
|
/*
|
2002-05-06 04:07:51 +04:00
|
|
|
* call the genfs code to get the pages. `pgs' may be NULL
|
|
|
|
* when doing read-ahead.
|
2000-11-27 11:39:39 +03:00
|
|
|
*/
|
|
|
|
|
2002-05-06 04:07:51 +04:00
|
|
|
pgs = ap->a_m;
|
2002-05-06 07:20:54 +04:00
|
|
|
if (write && locked && v3) {
|
2002-05-06 04:07:51 +04:00
|
|
|
KASSERT(pgs != NULL);
|
|
|
|
#ifdef DEBUG
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If PGO_LOCKED is set, real pages shouldn't exists
|
|
|
|
* in the array.
|
|
|
|
*/
|
|
|
|
|
|
|
|
for (i = 0; i < npages; i++)
|
|
|
|
KDASSERT(pgs[i] == NULL || pgs[i] == PGO_DONTCARE);
|
|
|
|
#endif
|
|
|
|
memcpy(opgs, pgs, npages * sizeof(struct vm_pages *));
|
|
|
|
}
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
error = genfs_getpages(v);
|
2002-03-17 02:05:25 +03:00
|
|
|
if (error) {
|
2002-05-06 04:07:51 +04:00
|
|
|
return (error);
|
2000-11-27 11:39:39 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2002-03-17 02:05:25 +03:00
|
|
|
* for read faults where the nfs node is not yet marked NMODIFIED,
|
|
|
|
* set PG_RDONLY on the pages so that we come back here if someone
|
|
|
|
* tries to modify later via the mapping that will be entered for
|
|
|
|
* this fault.
|
2000-11-27 11:39:39 +03:00
|
|
|
*/
|
|
|
|
|
2002-03-17 02:05:25 +03:00
|
|
|
if (!write && (np->n_flag & NMODIFIED) == 0 && pgs != NULL) {
|
|
|
|
if (!locked) {
|
|
|
|
simple_lock(&uobj->vmobjlock);
|
|
|
|
}
|
|
|
|
for (i = 0; i < npages; i++) {
|
|
|
|
pg = pgs[i];
|
|
|
|
if (pg == NULL || pg == PGO_DONTCARE) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
pg->flags |= PG_RDONLY;
|
|
|
|
}
|
|
|
|
if (!locked) {
|
|
|
|
simple_unlock(&uobj->vmobjlock);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!write) {
|
2002-05-06 04:07:51 +04:00
|
|
|
return (0);
|
2002-03-17 02:05:25 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* this is a write fault, update the commit info.
|
|
|
|
*/
|
|
|
|
|
2002-01-26 05:52:19 +03:00
|
|
|
origoffset = ap->a_offset;
|
|
|
|
len = npages << PAGE_SHIFT;
|
2000-11-27 11:39:39 +03:00
|
|
|
|
2002-03-17 02:05:25 +03:00
|
|
|
if (v3) {
|
2002-05-06 04:07:51 +04:00
|
|
|
error = lockmgr(&np->n_commitlock,
|
|
|
|
LK_EXCLUSIVE | (locked ? LK_NOWAIT : 0), NULL);
|
|
|
|
if (error) {
|
|
|
|
KASSERT(locked != 0);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Since PGO_LOCKED is set, we need to unbusy
|
|
|
|
* all pages fetched by genfs_getpages() above,
|
|
|
|
* tell the caller that there are no pages
|
|
|
|
* available and put back original pgs array.
|
|
|
|
*/
|
|
|
|
|
|
|
|
uvm_lock_pageq();
|
|
|
|
uvm_page_unbusy(pgs, npages);
|
|
|
|
uvm_unlock_pageq();
|
|
|
|
*ap->a_count = 0;
|
|
|
|
memcpy(pgs, opgs,
|
|
|
|
npages * sizeof(struct vm_pages *));
|
|
|
|
return (error);
|
|
|
|
}
|
2002-03-17 02:05:25 +03:00
|
|
|
nfs_del_committed_range(vp, origoffset, len);
|
|
|
|
nfs_del_tobecommitted_range(vp, origoffset, len);
|
|
|
|
}
|
2002-05-06 04:07:51 +04:00
|
|
|
np->n_flag |= NMODIFIED;
|
2001-12-31 10:16:47 +03:00
|
|
|
if (!locked) {
|
|
|
|
simple_lock(&uobj->vmobjlock);
|
|
|
|
}
|
2000-11-27 11:39:39 +03:00
|
|
|
for (i = 0; i < npages; i++) {
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
pg = pgs[i];
|
|
|
|
if (pg == NULL || pg == PGO_DONTCARE) {
|
2000-11-27 11:39:39 +03:00
|
|
|
continue;
|
|
|
|
}
|
2002-01-26 05:52:19 +03:00
|
|
|
pg->flags &= ~(PG_NEEDCOMMIT | PG_RDONLY);
|
2000-11-27 11:39:39 +03:00
|
|
|
}
|
2001-12-31 10:16:47 +03:00
|
|
|
if (!locked) {
|
|
|
|
simple_unlock(&uobj->vmobjlock);
|
|
|
|
}
|
2002-03-17 02:05:25 +03:00
|
|
|
if (v3) {
|
|
|
|
lockmgr(&np->n_commitlock, LK_RELEASE, NULL);
|
|
|
|
}
|
2002-05-06 04:07:51 +04:00
|
|
|
return (0);
|
2000-11-27 11:39:39 +03:00
|
|
|
}
|