2010-04-23 19:38:46 +04:00
|
|
|
/* $NetBSD: nfs_bio.c,v 1.184 2010/04/23 15:38:47 pooka Exp $ */
|
1994-06-29 10:39:25 +04:00
|
|
|
|
1993-03-21 12:45:37 +03:00
|
|
|
/*
|
1994-06-08 15:33:09 +04:00
|
|
|
* Copyright (c) 1989, 1993
|
|
|
|
* The Regents of the University of California. All rights reserved.
|
1993-03-21 12:45:37 +03:00
|
|
|
*
|
|
|
|
* This code is derived from software contributed to Berkeley by
|
|
|
|
* Rick Macklem at The University of Guelph.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
2003-08-07 20:26:28 +04:00
|
|
|
* 3. Neither the name of the University nor the names of its contributors
|
1993-03-21 12:45:37 +03:00
|
|
|
* may be used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
1996-02-18 14:53:36 +03:00
|
|
|
* @(#)nfs_bio.c 8.9 (Berkeley) 3/30/95
|
1993-03-21 12:45:37 +03:00
|
|
|
*/
|
|
|
|
|
2001-11-10 13:59:08 +03:00
|
|
|
#include <sys/cdefs.h>
|
2010-04-23 19:38:46 +04:00
|
|
|
__KERNEL_RCSID(0, "$NetBSD: nfs_bio.c,v 1.184 2010/04/23 15:38:47 pooka Exp $");
|
2001-11-10 13:59:08 +03:00
|
|
|
|
2008-11-19 21:35:57 +03:00
|
|
|
#ifdef _KERNEL_OPT
|
2000-09-19 21:04:50 +04:00
|
|
|
#include "opt_nfs.h"
|
2000-11-27 11:39:39 +03:00
|
|
|
#include "opt_ddb.h"
|
2008-11-19 21:35:57 +03:00
|
|
|
#endif
|
2000-09-19 21:04:50 +04:00
|
|
|
|
1993-12-18 03:40:47 +03:00
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/systm.h>
|
1994-06-08 15:33:09 +04:00
|
|
|
#include <sys/resourcevar.h>
|
1996-02-18 14:53:36 +03:00
|
|
|
#include <sys/signalvar.h>
|
1993-12-18 03:40:47 +03:00
|
|
|
#include <sys/proc.h>
|
|
|
|
#include <sys/buf.h>
|
|
|
|
#include <sys/vnode.h>
|
|
|
|
#include <sys/mount.h>
|
1994-06-08 15:33:09 +04:00
|
|
|
#include <sys/kernel.h>
|
1996-02-10 00:48:19 +03:00
|
|
|
#include <sys/namei.h>
|
1997-10-10 05:53:17 +04:00
|
|
|
#include <sys/dirent.h>
|
2006-05-15 01:31:52 +04:00
|
|
|
#include <sys/kauth.h>
|
1994-06-08 15:33:09 +04:00
|
|
|
|
1998-02-05 10:59:28 +03:00
|
|
|
#include <uvm/uvm_extern.h>
|
2000-11-27 11:39:39 +03:00
|
|
|
#include <uvm/uvm.h>
|
1998-02-05 10:59:28 +03:00
|
|
|
|
1994-06-08 15:33:09 +04:00
|
|
|
#include <nfs/rpcv2.h>
|
1996-02-18 14:53:36 +03:00
|
|
|
#include <nfs/nfsproto.h>
|
1993-12-18 03:40:47 +03:00
|
|
|
#include <nfs/nfs.h>
|
|
|
|
#include <nfs/nfsmount.h>
|
1996-02-18 14:53:36 +03:00
|
|
|
#include <nfs/nfsnode.h>
|
1996-02-10 00:48:19 +03:00
|
|
|
#include <nfs/nfs_var.h>
|
1993-03-21 12:45:37 +03:00
|
|
|
|
1994-06-08 15:33:09 +04:00
|
|
|
extern int nfs_numasync;
|
2002-01-26 05:52:19 +03:00
|
|
|
extern int nfs_commitsize;
|
1996-02-18 14:53:36 +03:00
|
|
|
extern struct nfsstats nfsstats;
|
1993-03-21 12:45:37 +03:00
|
|
|
|
2009-03-14 17:45:51 +03:00
|
|
|
static int nfs_doio_read(struct buf *, struct uio *);
|
|
|
|
static int nfs_doio_write(struct buf *, struct uio *);
|
|
|
|
static int nfs_doio_phys(struct buf *, struct uio *);
|
2003-04-12 18:26:58 +04:00
|
|
|
|
1993-03-21 12:45:37 +03:00
|
|
|
/*
|
|
|
|
* Vnode op for read using bio
|
|
|
|
* Any similarity to readip() is purely coincidental
|
|
|
|
*/
|
1996-02-10 00:48:19 +03:00
|
|
|
int
|
2008-03-29 16:48:00 +03:00
|
|
|
nfs_bioread(struct vnode *vp, struct uio *uio, int ioflag,
|
|
|
|
kauth_cred_t cred, int cflag)
|
1993-03-21 12:45:37 +03:00
|
|
|
{
|
2000-03-30 16:51:13 +04:00
|
|
|
struct nfsnode *np = VTONFS(vp);
|
1996-02-10 00:48:19 +03:00
|
|
|
struct buf *bp = NULL, *rabp;
|
1996-02-18 14:53:36 +03:00
|
|
|
struct nfsmount *nmp = VFSTONFS(vp->v_mount);
|
1997-10-19 05:46:15 +04:00
|
|
|
struct nfsdircache *ndp = NULL, *nndp = NULL;
|
2007-03-04 08:59:00 +03:00
|
|
|
void *baddr;
|
2000-11-27 11:39:39 +03:00
|
|
|
int got_buf = 0, error = 0, n = 0, on = 0, en, enn;
|
1997-10-10 05:53:17 +04:00
|
|
|
int enough = 0;
|
2005-08-19 06:04:03 +04:00
|
|
|
struct dirent *dp, *pdp, *edp, *ep;
|
2000-11-27 11:39:39 +03:00
|
|
|
off_t curoff = 0;
|
2005-11-30 01:52:02 +03:00
|
|
|
int advice;
|
2006-03-01 15:38:10 +03:00
|
|
|
struct lwp *l = curlwp;
|
1993-03-21 12:45:37 +03:00
|
|
|
|
|
|
|
#ifdef DIAGNOSTIC
|
|
|
|
if (uio->uio_rw != UIO_READ)
|
|
|
|
panic("nfs_read mode");
|
|
|
|
#endif
|
|
|
|
if (uio->uio_resid == 0)
|
|
|
|
return (0);
|
1997-10-10 05:53:17 +04:00
|
|
|
if (vp->v_type != VDIR && uio->uio_offset < 0)
|
1993-03-21 12:45:37 +03:00
|
|
|
return (EINVAL);
|
2000-09-19 21:04:50 +04:00
|
|
|
#ifndef NFS_V2_ONLY
|
1997-10-10 05:53:17 +04:00
|
|
|
if ((nmp->nm_flag & NFSMNT_NFSV3) &&
|
|
|
|
!(nmp->nm_iflag & NFSMNT_GOTFSINFO))
|
2005-12-11 15:16:03 +03:00
|
|
|
(void)nfs_fsinfo(nmp, vp, cred, l);
|
2000-09-19 21:04:50 +04:00
|
|
|
#endif
|
1997-10-10 05:53:17 +04:00
|
|
|
if (vp->v_type != VDIR &&
|
|
|
|
(uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize)
|
1997-07-18 03:54:27 +04:00
|
|
|
return (EFBIG);
|
2000-11-27 11:39:39 +03:00
|
|
|
|
1993-03-21 12:45:37 +03:00
|
|
|
/*
|
1994-06-08 15:33:09 +04:00
|
|
|
* For nfs, cache consistency can only be maintained approximately.
|
|
|
|
* Although RFC1094 does not specify the criteria, the following is
|
|
|
|
* believed to be compatible with the reference port.
|
2006-12-27 15:10:09 +03:00
|
|
|
*
|
1993-03-21 12:45:37 +03:00
|
|
|
* If the file's modify time on the server has changed since the
|
|
|
|
* last read rpc or you have written to the file,
|
|
|
|
* you may have lost data cache consistency with the
|
|
|
|
* server, so flush all of the file's data out of the cache.
|
|
|
|
* Then force a getattr rpc to ensure that you have up to date
|
|
|
|
* attributes.
|
|
|
|
* NB: This implies that cache data can be read when up to
|
2007-10-29 01:24:28 +03:00
|
|
|
* nfs_attrtimeo seconds out of date. If you find that you need current
|
1993-03-21 12:45:37 +03:00
|
|
|
* attributes this could be forced by setting n_attrstamp to 0 before
|
1994-06-08 15:33:09 +04:00
|
|
|
* the VOP_GETATTR() call.
|
1993-03-21 12:45:37 +03:00
|
|
|
*/
|
2000-11-27 11:39:39 +03:00
|
|
|
|
2006-12-27 15:10:09 +03:00
|
|
|
if (vp->v_type != VLNK) {
|
2005-12-11 15:16:03 +03:00
|
|
|
error = nfs_flushstalebuf(vp, cred, l,
|
2004-12-14 12:13:13 +03:00
|
|
|
NFS_FLUSHSTALEBUF_MYWRITE);
|
|
|
|
if (error)
|
|
|
|
return error;
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
1994-06-08 15:33:09 +04:00
|
|
|
|
2000-11-27 11:39:39 +03:00
|
|
|
do {
|
1996-05-24 02:47:27 +04:00
|
|
|
/*
|
|
|
|
* Don't cache symlinks.
|
|
|
|
*/
|
2007-10-11 00:42:20 +04:00
|
|
|
if ((vp->v_vflag & VV_ROOT) && vp->v_type == VLNK) {
|
2006-12-27 15:10:09 +03:00
|
|
|
return (nfs_readlinkrpc(vp, uio, cred));
|
1994-06-08 15:33:09 +04:00
|
|
|
}
|
2007-03-04 08:59:00 +03:00
|
|
|
baddr = (void *)0;
|
1993-03-21 12:45:37 +03:00
|
|
|
switch (vp->v_type) {
|
|
|
|
case VREG:
|
|
|
|
nfsstats.biocache_reads++;
|
1994-06-08 15:33:09 +04:00
|
|
|
|
2005-11-30 01:52:02 +03:00
|
|
|
advice = IO_ADV_DECODE(ioflag);
|
2000-11-27 11:39:39 +03:00
|
|
|
error = 0;
|
2005-11-04 22:39:30 +03:00
|
|
|
while (uio->uio_resid > 0) {
|
|
|
|
vsize_t bytelen;
|
1994-06-08 15:33:09 +04:00
|
|
|
|
2005-11-04 22:39:30 +03:00
|
|
|
nfs_delayedtruncate(vp);
|
|
|
|
if (np->n_size <= uio->uio_offset) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
bytelen =
|
|
|
|
MIN(np->n_size - uio->uio_offset, uio->uio_resid);
|
2007-07-27 14:00:42 +04:00
|
|
|
error = ubc_uiomove(&vp->v_uobj, uio, bytelen,
|
|
|
|
advice, UBC_READ | UBC_PARTIALOK |
|
|
|
|
(UBC_WANT_UNMAP(vp) ? UBC_UNMAP : 0));
|
2000-11-27 11:39:39 +03:00
|
|
|
if (error) {
|
2005-10-06 14:20:58 +04:00
|
|
|
/*
|
|
|
|
* XXXkludge
|
|
|
|
* the file has been truncated on the server.
|
|
|
|
* there isn't much we can do.
|
|
|
|
*/
|
|
|
|
if (uio->uio_offset >= np->n_size) {
|
|
|
|
/* end of file */
|
|
|
|
error = 0;
|
2005-11-04 22:39:30 +03:00
|
|
|
} else {
|
|
|
|
break;
|
2005-10-06 14:20:58 +04:00
|
|
|
}
|
1994-06-08 15:33:09 +04:00
|
|
|
}
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
|
|
|
break;
|
2000-11-27 11:39:39 +03:00
|
|
|
|
1993-03-21 12:45:37 +03:00
|
|
|
case VLNK:
|
|
|
|
nfsstats.biocache_readlinks++;
|
2005-12-11 15:16:03 +03:00
|
|
|
bp = nfs_getcacheblk(vp, (daddr_t)0, NFS_MAXPATHLEN, l);
|
1994-06-08 15:33:09 +04:00
|
|
|
if (!bp)
|
|
|
|
return (EINTR);
|
2008-01-02 14:48:20 +03:00
|
|
|
if ((bp->b_oflags & BO_DONE) == 0) {
|
1994-06-08 15:33:09 +04:00
|
|
|
bp->b_flags |= B_READ;
|
2005-07-07 06:05:03 +04:00
|
|
|
error = nfs_doio(bp);
|
1996-02-18 14:53:36 +03:00
|
|
|
if (error) {
|
2007-10-08 22:02:53 +04:00
|
|
|
brelse(bp, 0);
|
1994-06-08 15:33:09 +04:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
}
|
2001-02-27 07:37:44 +03:00
|
|
|
n = MIN(uio->uio_resid, NFS_MAXPATHLEN - bp->b_resid);
|
1994-06-08 15:33:09 +04:00
|
|
|
got_buf = 1;
|
|
|
|
on = 0;
|
1993-03-21 12:45:37 +03:00
|
|
|
break;
|
|
|
|
case VDIR:
|
1997-10-10 05:53:17 +04:00
|
|
|
diragain:
|
1993-03-21 12:45:37 +03:00
|
|
|
nfsstats.biocache_readdirs++;
|
1997-10-19 05:46:15 +04:00
|
|
|
ndp = nfs_searchdircache(vp, uio->uio_offset,
|
|
|
|
(nmp->nm_flag & NFSMNT_XLATECOOKIE), 0);
|
|
|
|
if (!ndp) {
|
|
|
|
/*
|
|
|
|
* We've been handed a cookie that is not
|
|
|
|
* in the cache. If we're not translating
|
|
|
|
* 32 <-> 64, it may be a value that was
|
|
|
|
* flushed out of the cache because it grew
|
|
|
|
* too big. Let the server judge if it's
|
|
|
|
* valid or not. In the translation case,
|
|
|
|
* we have no way of validating this value,
|
|
|
|
* so punt.
|
|
|
|
*/
|
|
|
|
if (nmp->nm_flag & NFSMNT_XLATECOOKIE)
|
|
|
|
return (EINVAL);
|
2005-02-27 01:31:44 +03:00
|
|
|
ndp = nfs_enterdircache(vp, uio->uio_offset,
|
1997-10-19 05:46:15 +04:00
|
|
|
uio->uio_offset, 0, 0);
|
|
|
|
}
|
|
|
|
|
2005-01-26 13:30:58 +03:00
|
|
|
if (NFS_EOFVALID(np) &&
|
1997-10-19 05:46:15 +04:00
|
|
|
ndp->dc_cookie == np->n_direofoffset) {
|
2004-09-15 13:50:56 +04:00
|
|
|
nfs_putdircache(np, ndp);
|
1997-10-19 05:46:15 +04:00
|
|
|
nfsstats.direofcache_hits++;
|
1997-10-10 05:53:17 +04:00
|
|
|
return (0);
|
1997-10-19 05:46:15 +04:00
|
|
|
}
|
|
|
|
|
2005-12-11 15:16:03 +03:00
|
|
|
bp = nfs_getcacheblk(vp, NFSDC_BLKNO(ndp), NFS_DIRBLKSIZ, l);
|
1994-06-08 15:33:09 +04:00
|
|
|
if (!bp)
|
1996-02-18 14:53:36 +03:00
|
|
|
return (EINTR);
|
2008-01-02 14:48:20 +03:00
|
|
|
if ((bp->b_oflags & BO_DONE) == 0) {
|
1996-02-18 14:53:36 +03:00
|
|
|
bp->b_flags |= B_READ;
|
1997-10-19 05:46:15 +04:00
|
|
|
bp->b_dcookie = ndp->dc_blkcookie;
|
2005-07-07 06:05:03 +04:00
|
|
|
error = nfs_doio(bp);
|
1996-02-18 14:53:36 +03:00
|
|
|
if (error) {
|
1997-10-10 05:53:17 +04:00
|
|
|
/*
|
|
|
|
* Yuck! The directory has been modified on the
|
|
|
|
* server. Punt and let the userland code
|
|
|
|
* deal with it.
|
|
|
|
*/
|
2004-09-15 13:50:56 +04:00
|
|
|
nfs_putdircache(np, ndp);
|
2007-10-08 22:02:53 +04:00
|
|
|
brelse(bp, 0);
|
2006-06-30 13:55:34 +04:00
|
|
|
/*
|
|
|
|
* nfs_request maps NFSERR_BAD_COOKIE to EINVAL.
|
|
|
|
*/
|
|
|
|
if (error == EINVAL) { /* NFSERR_BAD_COOKIE */
|
1997-10-19 05:46:15 +04:00
|
|
|
nfs_invaldircache(vp, 0);
|
2005-12-11 15:16:03 +03:00
|
|
|
nfs_vinvalbuf(vp, 0, cred, l, 1);
|
1994-06-08 15:33:09 +04:00
|
|
|
}
|
1997-10-10 05:53:17 +04:00
|
|
|
return (error);
|
1996-02-18 14:53:36 +03:00
|
|
|
}
|
1997-11-23 16:52:24 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Just return if we hit EOF right away with this
|
|
|
|
* block. Always check here, because direofoffset
|
|
|
|
* may have been set by an nfsiod since the last
|
|
|
|
* check.
|
2005-01-27 15:46:42 +03:00
|
|
|
*
|
|
|
|
* also, empty block implies EOF.
|
1997-11-23 16:52:24 +03:00
|
|
|
*/
|
2005-01-27 15:46:42 +03:00
|
|
|
|
|
|
|
if (bp->b_bcount == bp->b_resid ||
|
|
|
|
(NFS_EOFVALID(np) &&
|
|
|
|
ndp->dc_blkcookie == np->n_direofoffset)) {
|
|
|
|
KASSERT(bp->b_bcount != bp->b_resid ||
|
|
|
|
ndp->dc_blkcookie == bp->b_dcookie);
|
2004-09-15 13:50:56 +04:00
|
|
|
nfs_putdircache(np, ndp);
|
2007-10-08 22:02:53 +04:00
|
|
|
brelse(bp, BC_NOCACHE);
|
2005-01-27 15:46:42 +03:00
|
|
|
return 0;
|
1994-06-08 15:33:09 +04:00
|
|
|
}
|
|
|
|
|
1997-10-10 05:53:17 +04:00
|
|
|
/*
|
|
|
|
* Find the entry we were looking for in the block.
|
|
|
|
*/
|
|
|
|
|
|
|
|
en = ndp->dc_entry;
|
|
|
|
|
|
|
|
pdp = dp = (struct dirent *)bp->b_data;
|
2007-03-04 08:59:00 +03:00
|
|
|
edp = (struct dirent *)(void *)((char *)bp->b_data + bp->b_bcount -
|
2005-08-19 06:04:03 +04:00
|
|
|
bp->b_resid);
|
1997-10-10 05:53:17 +04:00
|
|
|
enn = 0;
|
2005-08-19 06:04:03 +04:00
|
|
|
while (enn < en && dp < edp) {
|
1997-10-10 05:53:17 +04:00
|
|
|
pdp = dp;
|
2005-08-19 06:04:03 +04:00
|
|
|
dp = _DIRENT_NEXT(dp);
|
1997-10-10 05:53:17 +04:00
|
|
|
enn++;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the entry number was bigger than the number of
|
|
|
|
* entries in the block, or the cookie of the previous
|
|
|
|
* entry doesn't match, the directory cache is
|
|
|
|
* stale. Flush it and try again (i.e. go to
|
|
|
|
* the server).
|
|
|
|
*/
|
2005-08-19 14:08:48 +04:00
|
|
|
if (dp >= edp || (struct dirent *)_DIRENT_NEXT(dp) > edp ||
|
1997-10-19 05:46:15 +04:00
|
|
|
(en > 0 && NFS_GETCOOKIE(pdp) != ndp->dc_cookie)) {
|
1997-10-10 05:53:17 +04:00
|
|
|
#ifdef DEBUG
|
2009-03-13 18:00:34 +03:00
|
|
|
printf("invalid cache: %p %p %p off %jx %jx\n",
|
1997-10-21 02:08:44 +04:00
|
|
|
pdp, dp, edp,
|
2009-03-13 18:00:34 +03:00
|
|
|
(uintmax_t)uio->uio_offset,
|
|
|
|
(uintmax_t)NFS_GETCOOKIE(pdp));
|
1997-10-10 05:53:17 +04:00
|
|
|
#endif
|
2004-09-15 13:50:56 +04:00
|
|
|
nfs_putdircache(np, ndp);
|
2007-10-08 22:02:53 +04:00
|
|
|
brelse(bp, 0);
|
1997-10-19 05:46:15 +04:00
|
|
|
nfs_invaldircache(vp, 0);
|
2005-12-11 15:16:03 +03:00
|
|
|
nfs_vinvalbuf(vp, 0, cred, l, 0);
|
1997-10-10 05:53:17 +04:00
|
|
|
goto diragain;
|
|
|
|
}
|
|
|
|
|
2007-03-04 08:59:00 +03:00
|
|
|
on = (char *)dp - (char *)bp->b_data;
|
1997-10-10 05:53:17 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Cache all entries that may be exported to the
|
|
|
|
* user, as they may be thrown back at us. The
|
|
|
|
* NFSBIO_CACHECOOKIES flag indicates that all
|
|
|
|
* entries are being 'exported', so cache them all.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (en == 0 && pdp == dp) {
|
2005-08-19 06:04:03 +04:00
|
|
|
dp = _DIRENT_NEXT(dp);
|
1997-10-10 05:53:17 +04:00
|
|
|
enn++;
|
|
|
|
}
|
|
|
|
|
2001-04-03 19:07:23 +04:00
|
|
|
if (uio->uio_resid < (bp->b_bcount - bp->b_resid - on)) {
|
1997-10-10 05:53:17 +04:00
|
|
|
n = uio->uio_resid;
|
|
|
|
enough = 1;
|
|
|
|
} else
|
2001-04-03 19:07:23 +04:00
|
|
|
n = bp->b_bcount - bp->b_resid - on;
|
1997-10-10 05:53:17 +04:00
|
|
|
|
2007-03-04 08:59:00 +03:00
|
|
|
ep = (struct dirent *)(void *)((char *)bp->b_data + on + n);
|
1997-10-10 05:53:17 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Find last complete entry to copy, caching entries
|
|
|
|
* (if requested) as we go.
|
|
|
|
*/
|
|
|
|
|
2005-08-19 06:04:03 +04:00
|
|
|
while (dp < ep && (struct dirent *)_DIRENT_NEXT(dp) <= ep) {
|
1997-10-19 05:46:15 +04:00
|
|
|
if (cflag & NFSBIO_CACHECOOKIES) {
|
|
|
|
nndp = nfs_enterdircache(vp, NFS_GETCOOKIE(pdp),
|
|
|
|
ndp->dc_blkcookie, enn, bp->b_lblkno);
|
|
|
|
if (nmp->nm_flag & NFSMNT_XLATECOOKIE) {
|
|
|
|
NFS_STASHCOOKIE32(pdp,
|
|
|
|
nndp->dc_cookie32);
|
|
|
|
}
|
2004-09-15 13:50:56 +04:00
|
|
|
nfs_putdircache(np, nndp);
|
1997-10-19 05:46:15 +04:00
|
|
|
}
|
1997-10-10 05:53:17 +04:00
|
|
|
pdp = dp;
|
2005-08-19 06:04:03 +04:00
|
|
|
dp = _DIRENT_NEXT(dp);
|
1997-10-10 05:53:17 +04:00
|
|
|
enn++;
|
|
|
|
}
|
2004-09-15 13:50:56 +04:00
|
|
|
nfs_putdircache(np, ndp);
|
1997-10-10 05:53:17 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If the last requested entry was not the last in the
|
2005-02-27 01:31:44 +03:00
|
|
|
* buffer (happens if NFS_DIRFRAGSIZ < NFS_DIRBLKSIZ),
|
1997-10-10 05:53:17 +04:00
|
|
|
* cache the cookie of the last requested one, and
|
|
|
|
* set of the offset to it.
|
|
|
|
*/
|
|
|
|
|
2001-04-03 19:07:23 +04:00
|
|
|
if ((on + n) < bp->b_bcount - bp->b_resid) {
|
1997-10-10 05:53:17 +04:00
|
|
|
curoff = NFS_GETCOOKIE(pdp);
|
1997-10-19 05:46:15 +04:00
|
|
|
nndp = nfs_enterdircache(vp, curoff, ndp->dc_blkcookie,
|
|
|
|
enn, bp->b_lblkno);
|
|
|
|
if (nmp->nm_flag & NFSMNT_XLATECOOKIE) {
|
|
|
|
NFS_STASHCOOKIE32(pdp, nndp->dc_cookie32);
|
|
|
|
curoff = nndp->dc_cookie32;
|
|
|
|
}
|
2004-09-15 13:50:56 +04:00
|
|
|
nfs_putdircache(np, nndp);
|
1997-10-10 05:53:17 +04:00
|
|
|
} else
|
|
|
|
curoff = bp->b_dcookie;
|
|
|
|
|
1997-10-19 05:46:15 +04:00
|
|
|
/*
|
|
|
|
* Always cache the entry for the next block,
|
|
|
|
* so that readaheads can use it.
|
|
|
|
*/
|
|
|
|
nndp = nfs_enterdircache(vp, bp->b_dcookie, bp->b_dcookie, 0,0);
|
|
|
|
if (nmp->nm_flag & NFSMNT_XLATECOOKIE) {
|
|
|
|
if (curoff == bp->b_dcookie) {
|
|
|
|
NFS_STASHCOOKIE32(pdp, nndp->dc_cookie32);
|
|
|
|
curoff = nndp->dc_cookie32;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-03-04 08:59:00 +03:00
|
|
|
n = (char *)_DIRENT_NEXT(pdp) - ((char *)bp->b_data + on);
|
1997-10-10 05:53:17 +04:00
|
|
|
|
1994-06-08 15:33:09 +04:00
|
|
|
/*
|
|
|
|
* If not eof and read aheads are enabled, start one.
|
|
|
|
* (You need the current block first, so that you have the
|
1996-02-18 14:53:36 +03:00
|
|
|
* directory offset cookie of the next block.)
|
1994-06-08 15:33:09 +04:00
|
|
|
*/
|
|
|
|
if (nfs_numasync > 0 && nmp->nm_readahead > 0 &&
|
2006-12-27 15:10:09 +03:00
|
|
|
!NFS_EOFVALID(np)) {
|
2004-10-26 08:34:46 +04:00
|
|
|
rabp = nfs_getcacheblk(vp, NFSDC_BLKNO(nndp),
|
2005-12-11 15:16:03 +03:00
|
|
|
NFS_DIRBLKSIZ, l);
|
1994-06-08 15:33:09 +04:00
|
|
|
if (rabp) {
|
2008-01-02 14:48:20 +03:00
|
|
|
if ((rabp->b_oflags & (BO_DONE | BO_DELWRI)) == 0) {
|
1997-10-19 05:46:15 +04:00
|
|
|
rabp->b_dcookie = nndp->dc_cookie;
|
1994-06-08 15:33:09 +04:00
|
|
|
rabp->b_flags |= (B_READ | B_ASYNC);
|
2000-11-27 11:39:39 +03:00
|
|
|
if (nfs_asyncio(rabp)) {
|
2007-10-08 22:02:53 +04:00
|
|
|
brelse(rabp, BC_INVAL);
|
1994-06-08 15:33:09 +04:00
|
|
|
}
|
1995-01-12 15:08:23 +03:00
|
|
|
} else
|
2007-10-08 22:02:53 +04:00
|
|
|
brelse(rabp, 0);
|
1994-06-08 15:33:09 +04:00
|
|
|
}
|
|
|
|
}
|
2004-09-15 13:50:56 +04:00
|
|
|
nfs_putdircache(np, nndp);
|
1994-06-08 15:33:09 +04:00
|
|
|
got_buf = 1;
|
1993-03-21 12:45:37 +03:00
|
|
|
break;
|
1996-02-18 14:53:36 +03:00
|
|
|
default:
|
1996-10-13 05:39:03 +04:00
|
|
|
printf(" nfsbioread: type %x unexpected\n",vp->v_type);
|
1996-02-10 00:48:19 +03:00
|
|
|
break;
|
2000-11-27 11:39:39 +03:00
|
|
|
}
|
1994-06-08 15:33:09 +04:00
|
|
|
|
|
|
|
if (n > 0) {
|
|
|
|
if (!baddr)
|
|
|
|
baddr = bp->b_data;
|
2007-03-04 08:59:00 +03:00
|
|
|
error = uiomove((char *)baddr + on, (int)n, uio);
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
|
|
|
switch (vp->v_type) {
|
1996-02-18 14:53:36 +03:00
|
|
|
case VREG:
|
|
|
|
break;
|
1993-03-21 12:45:37 +03:00
|
|
|
case VLNK:
|
|
|
|
n = 0;
|
|
|
|
break;
|
|
|
|
case VDIR:
|
1997-10-10 05:53:17 +04:00
|
|
|
uio->uio_offset = curoff;
|
|
|
|
if (enough)
|
|
|
|
n = 0;
|
1996-02-10 00:48:19 +03:00
|
|
|
break;
|
1996-02-18 14:53:36 +03:00
|
|
|
default:
|
1996-10-13 05:39:03 +04:00
|
|
|
printf(" nfsbioread: type %x unexpected\n",vp->v_type);
|
1996-02-18 14:53:36 +03:00
|
|
|
}
|
1994-06-08 15:33:09 +04:00
|
|
|
if (got_buf)
|
2007-10-08 22:02:53 +04:00
|
|
|
brelse(bp, 0);
|
1994-06-08 15:33:09 +04:00
|
|
|
} while (error == 0 && uio->uio_resid > 0 && n > 0);
|
1993-03-21 12:45:37 +03:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Vnode op for write using bio
|
|
|
|
*/
|
1996-02-10 00:48:19 +03:00
|
|
|
int
|
2008-03-29 16:48:00 +03:00
|
|
|
nfs_write(void *v)
|
1996-02-10 00:48:19 +03:00
|
|
|
{
|
1994-06-08 15:33:09 +04:00
|
|
|
struct vop_write_args /* {
|
1996-02-18 14:53:36 +03:00
|
|
|
struct vnode *a_vp;
|
1994-06-08 15:33:09 +04:00
|
|
|
struct uio *a_uio;
|
|
|
|
int a_ioflag;
|
2006-05-15 01:31:52 +04:00
|
|
|
kauth_cred_t a_cred;
|
1996-02-10 00:48:19 +03:00
|
|
|
} */ *ap = v;
|
2000-03-30 16:51:13 +04:00
|
|
|
struct uio *uio = ap->a_uio;
|
2006-03-01 15:38:10 +03:00
|
|
|
struct lwp *l = curlwp;
|
2000-03-30 16:51:13 +04:00
|
|
|
struct vnode *vp = ap->a_vp;
|
1993-03-21 12:45:37 +03:00
|
|
|
struct nfsnode *np = VTONFS(vp);
|
2006-05-15 01:31:52 +04:00
|
|
|
kauth_cred_t cred = ap->a_cred;
|
1996-02-18 14:53:36 +03:00
|
|
|
struct nfsmount *nmp = VFSTONFS(vp->v_mount);
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
voff_t oldoff, origoff;
|
|
|
|
vsize_t bytelen;
|
2007-06-05 16:31:30 +04:00
|
|
|
int error = 0;
|
2005-01-09 19:42:43 +03:00
|
|
|
int ioflag = ap->a_ioflag;
|
|
|
|
int extended = 0, wrotedata = 0;
|
1993-03-21 12:45:37 +03:00
|
|
|
|
|
|
|
#ifdef DIAGNOSTIC
|
|
|
|
if (uio->uio_rw != UIO_WRITE)
|
|
|
|
panic("nfs_write mode");
|
|
|
|
#endif
|
|
|
|
if (vp->v_type != VREG)
|
|
|
|
return (EIO);
|
1994-06-08 15:33:09 +04:00
|
|
|
if (np->n_flag & NWRITEERR) {
|
|
|
|
np->n_flag &= ~NWRITEERR;
|
|
|
|
return (np->n_error);
|
|
|
|
}
|
2000-09-19 21:04:50 +04:00
|
|
|
#ifndef NFS_V2_ONLY
|
1997-10-10 05:53:17 +04:00
|
|
|
if ((nmp->nm_flag & NFSMNT_NFSV3) &&
|
|
|
|
!(nmp->nm_iflag & NFSMNT_GOTFSINFO))
|
2005-12-11 15:16:03 +03:00
|
|
|
(void)nfs_fsinfo(nmp, vp, cred, l);
|
2000-09-19 21:04:50 +04:00
|
|
|
#endif
|
2007-06-12 13:42:27 +04:00
|
|
|
if (ioflag & IO_APPEND) {
|
|
|
|
NFS_INVALIDATE_ATTRCACHE(np);
|
|
|
|
error = nfs_flushstalebuf(vp, cred, l,
|
|
|
|
NFS_FLUSHSTALEBUF_MYWRITE);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
uio->uio_offset = np->n_size;
|
2010-04-23 19:38:46 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* This is already checked above VOP_WRITE, but recheck
|
|
|
|
* the append case here to make sure our idea of the
|
|
|
|
* file size is as fresh as possible.
|
|
|
|
*/
|
|
|
|
if (uio->uio_offset + uio->uio_resid >
|
|
|
|
l->l_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
|
|
|
|
mutex_enter(proc_lock);
|
|
|
|
psignal(l->l_proc, SIGXFSZ);
|
|
|
|
mutex_exit(proc_lock);
|
|
|
|
return (EFBIG);
|
|
|
|
}
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
|
|
|
if (uio->uio_offset < 0)
|
|
|
|
return (EINVAL);
|
1997-07-18 03:54:27 +04:00
|
|
|
if ((uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize)
|
|
|
|
return (EFBIG);
|
1993-03-21 12:45:37 +03:00
|
|
|
if (uio->uio_resid == 0)
|
|
|
|
return (0);
|
2000-11-27 11:39:39 +03:00
|
|
|
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
origoff = uio->uio_offset;
|
2000-11-27 11:39:39 +03:00
|
|
|
do {
|
2007-06-05 16:31:30 +04:00
|
|
|
bool overwrite; /* if we are overwriting whole pages */
|
2002-10-29 13:15:16 +03:00
|
|
|
u_quad_t oldsize;
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
oldoff = uio->uio_offset;
|
|
|
|
bytelen = uio->uio_resid;
|
1994-07-13 01:03:14 +04:00
|
|
|
|
1993-03-21 12:45:37 +03:00
|
|
|
nfsstats.biocache_writes++;
|
2000-11-27 11:39:39 +03:00
|
|
|
|
2002-10-29 13:15:16 +03:00
|
|
|
oldsize = np->n_size;
|
1994-06-08 15:33:09 +04:00
|
|
|
np->n_flag |= NMODIFIED;
|
2000-11-27 11:39:39 +03:00
|
|
|
if (np->n_size < uio->uio_offset + bytelen) {
|
|
|
|
np->n_size = uio->uio_offset + bytelen;
|
1994-06-08 15:33:09 +04:00
|
|
|
}
|
2007-06-05 16:31:30 +04:00
|
|
|
overwrite = false;
|
|
|
|
if ((uio->uio_offset & PAGE_MASK) == 0) {
|
2007-10-11 00:42:20 +04:00
|
|
|
if ((vp->v_vflag & VV_MAPPED) == 0 &&
|
2007-06-05 16:31:30 +04:00
|
|
|
bytelen > PAGE_SIZE) {
|
|
|
|
bytelen = trunc_page(bytelen);
|
|
|
|
overwrite = true;
|
|
|
|
} else if ((bytelen & PAGE_MASK) == 0 &&
|
|
|
|
uio->uio_offset >= vp->v_size) {
|
|
|
|
overwrite = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (vp->v_size < uio->uio_offset + bytelen) {
|
|
|
|
uvm_vnp_setwritesize(vp, uio->uio_offset + bytelen);
|
|
|
|
}
|
|
|
|
error = ubc_uiomove(&vp->v_uobj, uio, bytelen,
|
2007-07-27 13:50:36 +04:00
|
|
|
UVM_ADV_RANDOM, UBC_WRITE | UBC_PARTIALOK |
|
2007-06-05 16:31:30 +04:00
|
|
|
(overwrite ? UBC_FAULTBUSY : 0) |
|
|
|
|
(UBC_WANT_UNMAP(vp) ? UBC_UNMAP : 0));
|
2000-12-27 08:15:43 +03:00
|
|
|
if (error) {
|
2007-06-05 16:31:30 +04:00
|
|
|
uvm_vnp_setwritesize(vp, vp->v_size);
|
|
|
|
if (overwrite && np->n_size != oldsize) {
|
2002-10-29 13:15:16 +03:00
|
|
|
/*
|
|
|
|
* backout size and free pages past eof.
|
|
|
|
*/
|
|
|
|
np->n_size = oldsize;
|
2008-01-02 14:48:20 +03:00
|
|
|
mutex_enter(&vp->v_interlock);
|
2002-10-29 13:15:16 +03:00
|
|
|
(void)VOP_PUTPAGES(vp, round_page(vp->v_size),
|
|
|
|
0, PGO_SYNCIO | PGO_FREE);
|
|
|
|
}
|
2000-11-27 11:39:39 +03:00
|
|
|
break;
|
|
|
|
}
|
2005-01-09 19:42:43 +03:00
|
|
|
wrotedata = 1;
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* update UVM's notion of the size now that we've
|
|
|
|
* copied the data into the vnode's pages.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (vp->v_size < uio->uio_offset) {
|
|
|
|
uvm_vnp_setsize(vp, uio->uio_offset);
|
2002-10-23 13:10:23 +04:00
|
|
|
extended = 1;
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
if ((oldoff & ~(nmp->nm_wsize - 1)) !=
|
|
|
|
(uio->uio_offset & ~(nmp->nm_wsize - 1))) {
|
2008-01-02 14:48:20 +03:00
|
|
|
mutex_enter(&vp->v_interlock);
|
2001-11-30 10:08:53 +03:00
|
|
|
error = VOP_PUTPAGES(vp,
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
trunc_page(oldoff & ~(nmp->nm_wsize - 1)),
|
|
|
|
round_page((uio->uio_offset + nmp->nm_wsize - 1) &
|
2002-03-25 05:08:09 +03:00
|
|
|
~(nmp->nm_wsize - 1)), PGO_CLEANIT);
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
}
|
2000-11-27 11:39:39 +03:00
|
|
|
} while (uio->uio_resid > 0);
|
2005-01-09 19:42:43 +03:00
|
|
|
if (wrotedata)
|
2002-10-23 13:10:23 +04:00
|
|
|
VN_KNOTE(vp, NOTE_WRITE | (extended ? NOTE_EXTEND : 0));
|
2007-05-10 03:17:45 +04:00
|
|
|
if (error == 0 && (ioflag & IO_SYNC) != 0) {
|
2008-01-02 14:48:20 +03:00
|
|
|
mutex_enter(&vp->v_interlock);
|
2001-11-30 10:08:53 +03:00
|
|
|
error = VOP_PUTPAGES(vp,
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
trunc_page(origoff & ~(nmp->nm_wsize - 1)),
|
|
|
|
round_page((uio->uio_offset + nmp->nm_wsize - 1) &
|
|
|
|
~(nmp->nm_wsize - 1)),
|
2001-11-30 10:08:53 +03:00
|
|
|
PGO_CLEANIT | PGO_SYNCIO);
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
}
|
2000-11-27 11:39:39 +03:00
|
|
|
return error;
|
1994-06-08 15:33:09 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Get an nfs cache block.
|
|
|
|
* Allocate a new one if the block isn't currently in the cache
|
|
|
|
* and return the block marked busy. If the calling process is
|
|
|
|
* interrupted by a signal for an interruptible mount point, return
|
|
|
|
* NULL.
|
|
|
|
*/
|
|
|
|
struct buf *
|
2008-03-29 16:48:00 +03:00
|
|
|
nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size, struct lwp *l)
|
1994-06-08 15:33:09 +04:00
|
|
|
{
|
2000-03-30 16:51:13 +04:00
|
|
|
struct buf *bp;
|
1994-06-08 15:33:09 +04:00
|
|
|
struct nfsmount *nmp = VFSTONFS(vp->v_mount);
|
|
|
|
|
|
|
|
if (nmp->nm_flag & NFSMNT_INT) {
|
|
|
|
bp = getblk(vp, bn, size, PCATCH, 0);
|
2000-11-27 11:39:39 +03:00
|
|
|
while (bp == NULL) {
|
2005-12-11 15:16:03 +03:00
|
|
|
if (nfs_sigintr(nmp, NULL, l))
|
2000-11-27 11:39:39 +03:00
|
|
|
return (NULL);
|
1994-06-08 15:33:09 +04:00
|
|
|
bp = getblk(vp, bn, size, 0, 2 * hz);
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
bp = getblk(vp, bn, size, 0, 0);
|
|
|
|
return (bp);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Flush and invalidate all dirty buffers. If another process is already
|
|
|
|
* doing the flush, just wait for completion.
|
|
|
|
*/
|
1996-02-10 00:48:19 +03:00
|
|
|
int
|
2008-03-29 16:48:00 +03:00
|
|
|
nfs_vinvalbuf(struct vnode *vp, int flags, kauth_cred_t cred,
|
|
|
|
struct lwp *l, int intrflg)
|
1994-06-08 15:33:09 +04:00
|
|
|
{
|
2000-03-30 16:51:13 +04:00
|
|
|
struct nfsnode *np = VTONFS(vp);
|
1994-06-08 15:33:09 +04:00
|
|
|
struct nfsmount *nmp = VFSTONFS(vp->v_mount);
|
2008-01-02 14:48:20 +03:00
|
|
|
int error = 0, slptimeo;
|
|
|
|
bool catch;
|
1994-06-08 15:33:09 +04:00
|
|
|
|
|
|
|
if ((nmp->nm_flag & NFSMNT_INT) == 0)
|
|
|
|
intrflg = 0;
|
|
|
|
if (intrflg) {
|
2008-01-02 14:48:20 +03:00
|
|
|
catch = true;
|
1994-06-08 15:33:09 +04:00
|
|
|
slptimeo = 2 * hz;
|
|
|
|
} else {
|
2008-01-02 14:48:20 +03:00
|
|
|
catch = false;
|
1994-06-08 15:33:09 +04:00
|
|
|
slptimeo = 0;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* First wait for any other process doing a flush to complete.
|
|
|
|
*/
|
2008-01-02 14:48:20 +03:00
|
|
|
mutex_enter(&vp->v_interlock);
|
1994-06-08 15:33:09 +04:00
|
|
|
while (np->n_flag & NFLUSHINPROG) {
|
|
|
|
np->n_flag |= NFLUSHWANT;
|
2008-01-02 14:48:20 +03:00
|
|
|
error = mtsleep(&np->n_flag, PRIBIO + 2, "nfsvinval",
|
2003-05-22 19:59:24 +04:00
|
|
|
slptimeo, &vp->v_interlock);
|
2005-12-11 15:16:03 +03:00
|
|
|
if (error && intrflg && nfs_sigintr(nmp, NULL, l)) {
|
2008-01-02 14:48:20 +03:00
|
|
|
mutex_exit(&vp->v_interlock);
|
2003-05-22 19:59:24 +04:00
|
|
|
return EINTR;
|
|
|
|
}
|
1994-06-08 15:33:09 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Now, flush as required.
|
|
|
|
*/
|
|
|
|
np->n_flag |= NFLUSHINPROG;
|
2008-01-02 14:48:20 +03:00
|
|
|
mutex_exit(&vp->v_interlock);
|
|
|
|
error = vinvalbuf(vp, flags, cred, l, catch, 0);
|
1994-06-08 15:33:09 +04:00
|
|
|
while (error) {
|
2005-12-11 15:16:03 +03:00
|
|
|
if (intrflg && nfs_sigintr(nmp, NULL, l)) {
|
2003-05-22 19:59:24 +04:00
|
|
|
error = EINTR;
|
|
|
|
break;
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
2005-12-11 15:16:03 +03:00
|
|
|
error = vinvalbuf(vp, flags, cred, l, 0, slptimeo);
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
2008-01-02 14:48:20 +03:00
|
|
|
mutex_enter(&vp->v_interlock);
|
2003-05-22 19:59:24 +04:00
|
|
|
if (error == 0)
|
|
|
|
np->n_flag &= ~NMODIFIED;
|
|
|
|
np->n_flag &= ~NFLUSHINPROG;
|
1994-06-08 15:33:09 +04:00
|
|
|
if (np->n_flag & NFLUSHWANT) {
|
|
|
|
np->n_flag &= ~NFLUSHWANT;
|
2003-05-22 19:59:24 +04:00
|
|
|
wakeup(&np->n_flag);
|
1994-06-08 15:33:09 +04:00
|
|
|
}
|
2008-01-02 14:48:20 +03:00
|
|
|
mutex_exit(&vp->v_interlock);
|
2003-05-22 19:59:24 +04:00
|
|
|
return error;
|
1994-06-08 15:33:09 +04:00
|
|
|
}
|
|
|
|
|
2004-12-14 12:13:13 +03:00
|
|
|
/*
|
2005-02-27 01:31:44 +03:00
|
|
|
* nfs_flushstalebuf: flush cache if it's stale.
|
2004-12-14 12:13:13 +03:00
|
|
|
*
|
|
|
|
* => caller shouldn't own any pages or buffers which belong to the vnode.
|
|
|
|
*/
|
|
|
|
|
|
|
|
int
|
2006-05-15 01:31:52 +04:00
|
|
|
nfs_flushstalebuf(struct vnode *vp, kauth_cred_t cred, struct lwp *l,
|
2004-12-14 12:13:13 +03:00
|
|
|
int flags)
|
|
|
|
{
|
|
|
|
struct nfsnode *np = VTONFS(vp);
|
|
|
|
struct vattr vattr;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
if (np->n_flag & NMODIFIED) {
|
|
|
|
if ((flags & NFS_FLUSHSTALEBUF_MYWRITE) == 0
|
|
|
|
|| vp->v_type != VREG) {
|
2005-12-11 15:16:03 +03:00
|
|
|
error = nfs_vinvalbuf(vp, V_SAVE, cred, l, 1);
|
2004-12-14 12:13:13 +03:00
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
if (vp->v_type == VDIR) {
|
|
|
|
nfs_invaldircache(vp, 0);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* XXX assuming writes are ours.
|
|
|
|
*/
|
|
|
|
}
|
|
|
|
NFS_INVALIDATE_ATTRCACHE(np);
|
2007-11-26 22:01:26 +03:00
|
|
|
error = VOP_GETATTR(vp, &vattr, cred);
|
2004-12-14 12:13:13 +03:00
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
np->n_mtime = vattr.va_mtime;
|
|
|
|
} else {
|
2007-11-26 22:01:26 +03:00
|
|
|
error = VOP_GETATTR(vp, &vattr, cred);
|
2004-12-14 12:13:13 +03:00
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
if (timespeccmp(&np->n_mtime, &vattr.va_mtime, !=)) {
|
|
|
|
if (vp->v_type == VDIR) {
|
|
|
|
nfs_invaldircache(vp, 0);
|
|
|
|
}
|
2005-12-11 15:16:03 +03:00
|
|
|
error = nfs_vinvalbuf(vp, V_SAVE, cred, l, 1);
|
2004-12-14 12:13:13 +03:00
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
np->n_mtime = vattr.va_mtime;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
1994-06-08 15:33:09 +04:00
|
|
|
/*
|
|
|
|
* Initiate asynchronous I/O. Return an error if no nfsiods are available.
|
|
|
|
* This is mainly to avoid queueing async I/O requests when the nfsiods
|
|
|
|
* are all hung on a dead server.
|
|
|
|
*/
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
|
1996-02-10 00:48:19 +03:00
|
|
|
int
|
2008-03-29 16:48:00 +03:00
|
|
|
nfs_asyncio(struct buf *bp)
|
1994-06-08 15:33:09 +04:00
|
|
|
{
|
2007-08-10 19:12:56 +04:00
|
|
|
struct nfs_iod *iod;
|
2000-03-30 16:51:13 +04:00
|
|
|
struct nfsmount *nmp;
|
2007-08-10 19:12:56 +04:00
|
|
|
int slptimeo = 0, error;
|
2007-04-29 14:30:18 +04:00
|
|
|
bool catch = false;
|
1994-06-08 15:33:09 +04:00
|
|
|
|
|
|
|
if (nfs_numasync == 0)
|
|
|
|
return (EIO);
|
1996-12-03 01:55:39 +03:00
|
|
|
|
|
|
|
nmp = VFSTONFS(bp->b_vp->v_mount);
|
|
|
|
again:
|
|
|
|
if (nmp->nm_flag & NFSMNT_INT)
|
2007-04-29 14:30:18 +04:00
|
|
|
catch = true;
|
2005-02-27 01:31:44 +03:00
|
|
|
|
1996-12-03 01:55:39 +03:00
|
|
|
/*
|
|
|
|
* Find a free iod to process this request.
|
|
|
|
*/
|
|
|
|
|
2007-08-08 16:27:56 +04:00
|
|
|
mutex_enter(&nfs_iodlist_lock);
|
2007-08-10 19:12:56 +04:00
|
|
|
iod = LIST_FIRST(&nfs_iodlist_idle);
|
|
|
|
if (iod) {
|
|
|
|
/*
|
|
|
|
* Found one, so wake it up and tell it which
|
|
|
|
* mount to process.
|
|
|
|
*/
|
|
|
|
LIST_REMOVE(iod, nid_idle);
|
2007-04-29 14:30:18 +04:00
|
|
|
mutex_enter(&iod->nid_lock);
|
2007-08-10 19:12:56 +04:00
|
|
|
mutex_exit(&nfs_iodlist_lock);
|
|
|
|
KASSERT(iod->nid_mount == NULL);
|
|
|
|
iod->nid_mount = nmp;
|
|
|
|
cv_signal(&iod->nid_cv);
|
|
|
|
mutex_enter(&nmp->nm_lock);
|
2007-04-29 14:30:18 +04:00
|
|
|
mutex_exit(&iod->nid_lock);
|
2007-08-10 19:12:56 +04:00
|
|
|
nmp->nm_bufqiods++;
|
|
|
|
if (nmp->nm_bufqlen < 2 * nmp->nm_bufqiods) {
|
|
|
|
cv_broadcast(&nmp->nm_aiocv);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
mutex_exit(&nfs_iodlist_lock);
|
2007-04-29 14:30:18 +04:00
|
|
|
mutex_enter(&nmp->nm_lock);
|
2003-05-07 20:18:53 +04:00
|
|
|
}
|
|
|
|
|
2007-04-29 14:30:18 +04:00
|
|
|
KASSERT(mutex_owned(&nmp->nm_lock));
|
1996-12-03 01:55:39 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If we have an iod which can process the request, then queue
|
2005-02-27 01:31:44 +03:00
|
|
|
* the buffer. However, even if we have an iod, do not initiate
|
2003-11-17 03:28:32 +03:00
|
|
|
* queue cleaning if curproc is the pageout daemon. if the NFS mount
|
|
|
|
* is via local loopback, we may put curproc (pagedaemon) to sleep
|
|
|
|
* waiting for the writes to complete. But the server (ourself)
|
|
|
|
* may block the write, waiting for its (ie., our) pagedaemon
|
|
|
|
* to produce clean pages to handle the write: deadlock.
|
|
|
|
* XXX: start non-loopback mounts straight away? If "lots free",
|
|
|
|
* let pagedaemon start loopback writes anyway?
|
1996-12-03 01:55:39 +03:00
|
|
|
*/
|
2007-08-10 19:12:56 +04:00
|
|
|
if (nmp->nm_bufqiods > 0) {
|
2005-02-27 01:31:44 +03:00
|
|
|
|
1996-12-03 01:55:39 +03:00
|
|
|
/*
|
|
|
|
* Ensure that the queue never grows too large.
|
|
|
|
*/
|
2007-07-10 00:51:58 +04:00
|
|
|
if (curlwp == uvm.pagedaemon_lwp) {
|
2003-11-17 04:44:49 +03:00
|
|
|
/* Enque for later, to avoid free-page deadlock */
|
2007-08-08 16:27:56 +04:00
|
|
|
} else while (nmp->nm_bufqlen >= 2 * nmp->nm_bufqiods) {
|
2007-04-29 14:30:18 +04:00
|
|
|
if (catch) {
|
2007-12-04 20:42:30 +03:00
|
|
|
error = cv_timedwait_sig(&nmp->nm_aiocv,
|
2007-04-29 14:30:18 +04:00
|
|
|
&nmp->nm_lock, slptimeo);
|
|
|
|
} else {
|
|
|
|
error = cv_timedwait(&nmp->nm_aiocv,
|
|
|
|
&nmp->nm_lock, slptimeo);
|
|
|
|
}
|
1996-12-03 01:55:39 +03:00
|
|
|
if (error) {
|
2007-07-12 22:29:43 +04:00
|
|
|
if (nfs_sigintr(nmp, NULL, curlwp)) {
|
|
|
|
mutex_exit(&nmp->nm_lock);
|
1996-12-03 01:55:39 +03:00
|
|
|
return (EINTR);
|
2007-07-12 22:29:43 +04:00
|
|
|
}
|
2007-04-29 14:30:18 +04:00
|
|
|
if (catch) {
|
|
|
|
catch = false;
|
1996-12-03 01:55:39 +03:00
|
|
|
slptimeo = 2 * hz;
|
|
|
|
}
|
|
|
|
}
|
2002-01-26 05:52:19 +03:00
|
|
|
|
1996-12-03 01:55:39 +03:00
|
|
|
/*
|
|
|
|
* We might have lost our iod while sleeping,
|
2007-07-17 14:23:33 +04:00
|
|
|
* so check and loop if necessary.
|
1996-12-03 01:55:39 +03:00
|
|
|
*/
|
2002-01-26 05:52:19 +03:00
|
|
|
|
2007-04-29 14:30:18 +04:00
|
|
|
if (nmp->nm_bufqiods == 0) {
|
|
|
|
mutex_exit(&nmp->nm_lock);
|
1996-12-03 01:55:39 +03:00
|
|
|
goto again;
|
2007-04-29 14:30:18 +04:00
|
|
|
}
|
1996-12-03 01:55:39 +03:00
|
|
|
}
|
|
|
|
TAILQ_INSERT_TAIL(&nmp->nm_bufq, bp, b_freelist);
|
|
|
|
nmp->nm_bufqlen++;
|
2007-04-29 14:30:18 +04:00
|
|
|
mutex_exit(&nmp->nm_lock);
|
1994-06-08 15:33:09 +04:00
|
|
|
return (0);
|
2002-01-26 05:52:19 +03:00
|
|
|
}
|
2007-04-29 14:30:18 +04:00
|
|
|
mutex_exit(&nmp->nm_lock);
|
1996-02-18 14:53:36 +03:00
|
|
|
|
|
|
|
/*
|
1996-12-03 01:55:39 +03:00
|
|
|
* All the iods are busy on other mounts, so return EIO to
|
|
|
|
* force the caller to process the i/o synchronously.
|
1996-02-18 14:53:36 +03:00
|
|
|
*/
|
2002-01-26 05:52:19 +03:00
|
|
|
|
1996-12-03 01:55:39 +03:00
|
|
|
return (EIO);
|
1994-06-08 15:33:09 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2003-04-12 18:26:58 +04:00
|
|
|
* nfs_doio for read.
|
1994-06-08 15:33:09 +04:00
|
|
|
*/
|
2003-04-12 18:26:58 +04:00
|
|
|
static int
|
2008-03-29 16:48:00 +03:00
|
|
|
nfs_doio_read(struct buf *bp, struct uio *uiop)
|
2003-04-12 18:26:58 +04:00
|
|
|
{
|
|
|
|
struct vnode *vp = bp->b_vp;
|
|
|
|
struct nfsnode *np = VTONFS(vp);
|
|
|
|
struct nfsmount *nmp = VFSTONFS(vp->v_mount);
|
|
|
|
int error = 0;
|
1994-06-08 15:33:09 +04:00
|
|
|
|
2003-04-12 18:26:58 +04:00
|
|
|
uiop->uio_rw = UIO_READ;
|
|
|
|
switch (vp->v_type) {
|
|
|
|
case VREG:
|
1994-06-08 15:33:09 +04:00
|
|
|
nfsstats.read_bios++;
|
2000-11-27 11:39:39 +03:00
|
|
|
error = nfs_readrpc(vp, uiop);
|
|
|
|
if (!error && uiop->uio_resid) {
|
2003-04-12 18:26:58 +04:00
|
|
|
int diff, len;
|
2000-11-27 11:39:39 +03:00
|
|
|
|
1994-06-08 15:33:09 +04:00
|
|
|
/*
|
2004-07-18 11:43:00 +04:00
|
|
|
* If uio_resid > 0, there is a hole in the file and
|
1994-06-08 15:33:09 +04:00
|
|
|
* no writes after the hole have been pushed to
|
2004-07-18 11:43:00 +04:00
|
|
|
* the server yet or the file has been truncated
|
|
|
|
* on the server.
|
1994-06-08 15:33:09 +04:00
|
|
|
* Just zero fill the rest of the valid area.
|
|
|
|
*/
|
2000-11-27 11:39:39 +03:00
|
|
|
|
2004-07-18 11:43:00 +04:00
|
|
|
KASSERT(vp->v_size >=
|
|
|
|
uiop->uio_offset + uiop->uio_resid);
|
1994-06-08 15:33:09 +04:00
|
|
|
diff = bp->b_bcount - uiop->uio_resid;
|
2004-07-18 11:43:00 +04:00
|
|
|
len = uiop->uio_resid;
|
|
|
|
memset((char *)bp->b_data + diff, 0, len);
|
2006-01-14 11:57:40 +03:00
|
|
|
uiop->uio_resid = 0;
|
1994-06-08 15:33:09 +04:00
|
|
|
}
|
2006-03-01 15:38:10 +03:00
|
|
|
#if 0
|
2007-12-04 20:42:30 +03:00
|
|
|
if (uiop->uio_lwp && (vp->v_iflag & VI_TEXT) &&
|
2006-12-27 15:10:09 +03:00
|
|
|
timespeccmp(&np->n_mtime, &np->n_vattr->va_mtime, !=)) {
|
2008-04-24 19:35:27 +04:00
|
|
|
mutex_enter(proc_lock);
|
2005-12-11 15:16:03 +03:00
|
|
|
killproc(uiop->uio_lwp->l_proc, "process text file was modified");
|
2008-04-24 19:35:27 +04:00
|
|
|
mutex_exit(proc_lock);
|
2003-01-18 11:51:40 +03:00
|
|
|
#if 0 /* XXX NJWLWP */
|
2005-12-11 15:16:03 +03:00
|
|
|
uiop->uio_lwp->l_proc->p_holdcnt++;
|
2003-01-18 11:51:40 +03:00
|
|
|
#endif
|
1994-06-08 15:33:09 +04:00
|
|
|
}
|
2006-03-01 15:38:10 +03:00
|
|
|
#endif
|
1994-06-08 15:33:09 +04:00
|
|
|
break;
|
2003-04-12 18:26:58 +04:00
|
|
|
case VLNK:
|
|
|
|
KASSERT(uiop->uio_offset == (off_t)0);
|
1994-06-08 15:33:09 +04:00
|
|
|
nfsstats.readlink_bios++;
|
2005-07-21 14:39:46 +04:00
|
|
|
error = nfs_readlinkrpc(vp, uiop, np->n_rcred);
|
1994-06-08 15:33:09 +04:00
|
|
|
break;
|
2003-04-12 18:26:58 +04:00
|
|
|
case VDIR:
|
1994-06-08 15:33:09 +04:00
|
|
|
nfsstats.readdir_bios++;
|
1997-10-10 05:53:17 +04:00
|
|
|
uiop->uio_offset = bp->b_dcookie;
|
2004-05-23 09:53:01 +04:00
|
|
|
#ifndef NFS_V2_ONLY
|
1996-02-18 14:53:36 +03:00
|
|
|
if (nmp->nm_flag & NFSMNT_RDIRPLUS) {
|
2005-12-11 15:16:03 +03:00
|
|
|
error = nfs_readdirplusrpc(vp, uiop,
|
2006-07-24 02:06:03 +04:00
|
|
|
curlwp->l_cred);
|
2006-06-30 13:55:34 +04:00
|
|
|
/*
|
|
|
|
* nfs_request maps NFSERR_NOTSUPP to ENOTSUP.
|
|
|
|
*/
|
|
|
|
if (error == ENOTSUP)
|
1996-02-18 14:53:36 +03:00
|
|
|
nmp->nm_flag &= ~NFSMNT_RDIRPLUS;
|
|
|
|
}
|
2004-05-23 09:53:01 +04:00
|
|
|
#else
|
|
|
|
nmp->nm_flag &= ~NFSMNT_RDIRPLUS;
|
|
|
|
#endif
|
1996-02-18 14:53:36 +03:00
|
|
|
if ((nmp->nm_flag & NFSMNT_RDIRPLUS) == 0)
|
2005-12-11 15:16:03 +03:00
|
|
|
error = nfs_readdirrpc(vp, uiop,
|
2006-07-24 02:06:03 +04:00
|
|
|
curlwp->l_cred);
|
1997-10-10 05:53:17 +04:00
|
|
|
if (!error) {
|
|
|
|
bp->b_dcookie = uiop->uio_offset;
|
|
|
|
}
|
1996-02-18 14:53:36 +03:00
|
|
|
break;
|
2003-04-12 18:26:58 +04:00
|
|
|
default:
|
|
|
|
printf("nfs_doio: type %x unexpected\n", vp->v_type);
|
1994-06-08 15:33:09 +04:00
|
|
|
break;
|
2003-04-12 18:26:58 +04:00
|
|
|
}
|
2008-01-02 14:48:20 +03:00
|
|
|
bp->b_error = error;
|
2003-04-12 18:26:58 +04:00
|
|
|
return error;
|
|
|
|
}
|
2002-01-26 05:52:19 +03:00
|
|
|
|
2003-04-12 18:26:58 +04:00
|
|
|
/*
|
|
|
|
* nfs_doio for write.
|
|
|
|
*/
|
|
|
|
static int
|
2008-03-29 16:48:00 +03:00
|
|
|
nfs_doio_write(struct buf *bp, struct uio *uiop)
|
2003-04-12 18:26:58 +04:00
|
|
|
{
|
|
|
|
struct vnode *vp = bp->b_vp;
|
|
|
|
struct nfsnode *np = VTONFS(vp);
|
2003-05-03 20:28:57 +04:00
|
|
|
struct nfsmount *nmp = VFSTONFS(vp->v_mount);
|
2003-04-12 18:26:58 +04:00
|
|
|
int iomode;
|
2007-02-22 09:05:00 +03:00
|
|
|
bool stalewriteverf = false;
|
2003-04-12 18:26:58 +04:00
|
|
|
int i, npages = (bp->b_bcount + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
2008-10-31 23:42:41 +03:00
|
|
|
struct vm_page **pgs, *spgs[UBC_MAX_PAGES];
|
2004-05-23 09:53:01 +04:00
|
|
|
#ifndef NFS_V2_ONLY
|
2007-02-22 09:05:00 +03:00
|
|
|
bool needcommit = true; /* need only COMMIT RPC */
|
2004-05-23 09:53:01 +04:00
|
|
|
#else
|
2007-02-22 09:05:00 +03:00
|
|
|
bool needcommit = false; /* need only COMMIT RPC */
|
2004-05-23 09:53:01 +04:00
|
|
|
#endif
|
2007-02-22 01:59:35 +03:00
|
|
|
bool pageprotected;
|
2003-04-12 18:26:58 +04:00
|
|
|
struct uvm_object *uobj = &vp->v_uobj;
|
|
|
|
int error;
|
|
|
|
off_t off, cnt;
|
2002-01-26 05:52:19 +03:00
|
|
|
|
2008-10-17 18:24:43 +04:00
|
|
|
if (npages < __arraycount(spgs))
|
|
|
|
pgs = spgs;
|
|
|
|
else {
|
|
|
|
if ((pgs = kmem_alloc(sizeof(*pgs) * npages, KM_NOSLEEP)) ==
|
|
|
|
NULL)
|
|
|
|
return ENOMEM;
|
|
|
|
}
|
|
|
|
|
2003-04-12 18:26:58 +04:00
|
|
|
if ((bp->b_flags & B_ASYNC) != 0 && NFS_ISV3(vp)) {
|
|
|
|
iomode = NFSV3WRITE_UNSTABLE;
|
|
|
|
} else {
|
|
|
|
iomode = NFSV3WRITE_FILESYNC;
|
|
|
|
}
|
2002-01-26 05:52:19 +03:00
|
|
|
|
2004-05-23 09:53:01 +04:00
|
|
|
#ifndef NFS_V2_ONLY
|
2003-05-03 20:28:57 +04:00
|
|
|
again:
|
2004-05-23 09:53:01 +04:00
|
|
|
#endif
|
2007-02-15 19:01:51 +03:00
|
|
|
rw_enter(&nmp->nm_writeverflock, RW_READER);
|
2003-05-03 20:28:57 +04:00
|
|
|
|
2003-04-12 18:26:58 +04:00
|
|
|
for (i = 0; i < npages; i++) {
|
2003-04-15 17:48:40 +04:00
|
|
|
pgs[i] = uvm_pageratop((vaddr_t)bp->b_data + (i << PAGE_SHIFT));
|
2003-05-15 18:34:06 +04:00
|
|
|
if (pgs[i]->uobject == uobj &&
|
|
|
|
pgs[i]->offset == uiop->uio_offset + (i << PAGE_SHIFT)) {
|
2003-05-16 21:16:05 +04:00
|
|
|
KASSERT(pgs[i]->flags & PG_BUSY);
|
2003-05-15 18:34:06 +04:00
|
|
|
/*
|
|
|
|
* this page belongs to our object.
|
|
|
|
*/
|
2008-01-02 14:48:20 +03:00
|
|
|
mutex_enter(&uobj->vmobjlock);
|
2004-01-10 17:52:53 +03:00
|
|
|
/*
|
|
|
|
* write out the page stably if it's about to
|
|
|
|
* be released because we can't resend it
|
|
|
|
* on the server crash.
|
|
|
|
*
|
|
|
|
* XXX assuming PG_RELEASE|PG_PAGEOUT won't be
|
|
|
|
* changed until unbusy the page.
|
|
|
|
*/
|
2003-05-15 18:34:06 +04:00
|
|
|
if (pgs[i]->flags & (PG_RELEASED|PG_PAGEOUT))
|
|
|
|
iomode = NFSV3WRITE_FILESYNC;
|
2004-01-10 17:52:53 +03:00
|
|
|
/*
|
|
|
|
* if we met a page which hasn't been sent yet,
|
|
|
|
* we need do WRITE RPC.
|
|
|
|
*/
|
2003-05-15 18:34:06 +04:00
|
|
|
if ((pgs[i]->flags & PG_NEEDCOMMIT) == 0)
|
2007-02-22 09:05:00 +03:00
|
|
|
needcommit = false;
|
2008-01-02 14:48:20 +03:00
|
|
|
mutex_exit(&uobj->vmobjlock);
|
2003-05-15 18:34:06 +04:00
|
|
|
} else {
|
2003-04-12 18:26:58 +04:00
|
|
|
iomode = NFSV3WRITE_FILESYNC;
|
2007-02-22 09:05:00 +03:00
|
|
|
needcommit = false;
|
2003-04-12 18:26:58 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!needcommit && iomode == NFSV3WRITE_UNSTABLE) {
|
2008-01-02 14:48:20 +03:00
|
|
|
mutex_enter(&uobj->vmobjlock);
|
2003-04-12 18:26:58 +04:00
|
|
|
for (i = 0; i < npages; i++) {
|
|
|
|
pgs[i]->flags |= PG_NEEDCOMMIT | PG_RDONLY;
|
|
|
|
pmap_page_protect(pgs[i], VM_PROT_READ);
|
|
|
|
}
|
2008-01-02 14:48:20 +03:00
|
|
|
mutex_exit(&uobj->vmobjlock);
|
2007-02-22 09:05:00 +03:00
|
|
|
pageprotected = true; /* pages can't be modified during i/o. */
|
2003-05-21 17:27:19 +04:00
|
|
|
} else
|
2007-02-22 09:05:00 +03:00
|
|
|
pageprotected = false;
|
2000-11-27 11:39:39 +03:00
|
|
|
|
2003-04-12 18:26:58 +04:00
|
|
|
/*
|
|
|
|
* Send the data to the server if necessary,
|
|
|
|
* otherwise just send a commit rpc.
|
|
|
|
*/
|
2004-05-23 09:53:01 +04:00
|
|
|
#ifndef NFS_V2_ONLY
|
2003-04-12 18:26:58 +04:00
|
|
|
if (needcommit) {
|
2002-01-26 05:52:19 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If the buffer is in the range that we already committed,
|
|
|
|
* there's nothing to do.
|
|
|
|
*
|
|
|
|
* If it's in the range that we need to commit, push the
|
|
|
|
* whole range at once, otherwise only push the buffer.
|
|
|
|
* In both these cases, acquire the commit lock to avoid
|
|
|
|
* other processes modifying the range.
|
|
|
|
*/
|
|
|
|
|
2003-04-09 18:27:58 +04:00
|
|
|
off = uiop->uio_offset;
|
|
|
|
cnt = bp->b_bcount;
|
2007-02-15 19:01:51 +03:00
|
|
|
mutex_enter(&np->n_commitlock);
|
2002-01-26 05:52:19 +03:00
|
|
|
if (!nfs_in_committed_range(vp, off, bp->b_bcount)) {
|
2007-02-22 01:59:35 +03:00
|
|
|
bool pushedrange;
|
2002-01-26 05:52:19 +03:00
|
|
|
if (nfs_in_tobecommitted_range(vp, off, bp->b_bcount)) {
|
2007-02-22 09:05:00 +03:00
|
|
|
pushedrange = true;
|
2002-01-26 05:52:19 +03:00
|
|
|
off = np->n_pushlo;
|
|
|
|
cnt = np->n_pushhi - np->n_pushlo;
|
|
|
|
} else {
|
2007-02-22 09:05:00 +03:00
|
|
|
pushedrange = false;
|
2002-01-26 05:52:19 +03:00
|
|
|
}
|
2005-12-11 15:16:03 +03:00
|
|
|
error = nfs_commit(vp, off, cnt, curlwp);
|
2002-01-26 05:52:19 +03:00
|
|
|
if (error == 0) {
|
|
|
|
if (pushedrange) {
|
|
|
|
nfs_merge_commit_ranges(vp);
|
|
|
|
} else {
|
|
|
|
nfs_add_committed_range(vp, off, cnt);
|
|
|
|
}
|
|
|
|
}
|
2003-04-18 19:19:02 +04:00
|
|
|
} else {
|
|
|
|
error = 0;
|
2002-01-26 05:52:19 +03:00
|
|
|
}
|
2007-02-15 19:01:51 +03:00
|
|
|
mutex_exit(&np->n_commitlock);
|
|
|
|
rw_exit(&nmp->nm_writeverflock);
|
2002-01-26 05:52:19 +03:00
|
|
|
if (!error) {
|
2003-05-03 20:46:39 +04:00
|
|
|
/*
|
|
|
|
* pages are now on stable storage.
|
|
|
|
*/
|
2003-04-12 18:41:28 +04:00
|
|
|
uiop->uio_resid = 0;
|
2008-01-02 14:48:20 +03:00
|
|
|
mutex_enter(&uobj->vmobjlock);
|
2002-01-26 05:52:19 +03:00
|
|
|
for (i = 0; i < npages; i++) {
|
|
|
|
pgs[i]->flags &= ~(PG_NEEDCOMMIT | PG_RDONLY);
|
|
|
|
}
|
2008-01-02 14:48:20 +03:00
|
|
|
mutex_exit(&uobj->vmobjlock);
|
2003-04-12 18:26:58 +04:00
|
|
|
return 0;
|
2002-01-26 05:52:19 +03:00
|
|
|
} else if (error == NFSERR_STALEWRITEVERF) {
|
2003-05-03 20:28:57 +04:00
|
|
|
nfs_clearcommit(vp->v_mount);
|
|
|
|
goto again;
|
2002-01-26 05:52:19 +03:00
|
|
|
}
|
2003-05-03 20:28:57 +04:00
|
|
|
if (error) {
|
|
|
|
bp->b_error = np->n_error = error;
|
|
|
|
np->n_flag |= NWRITEERR;
|
|
|
|
}
|
2008-10-16 23:33:48 +04:00
|
|
|
goto out;
|
2003-04-12 18:26:58 +04:00
|
|
|
}
|
2004-05-23 09:53:01 +04:00
|
|
|
#endif
|
2003-04-12 18:26:58 +04:00
|
|
|
off = uiop->uio_offset;
|
|
|
|
cnt = bp->b_bcount;
|
|
|
|
uiop->uio_rw = UIO_WRITE;
|
|
|
|
nfsstats.write_bios++;
|
2003-05-21 17:27:19 +04:00
|
|
|
error = nfs_writerpc(vp, uiop, &iomode, pageprotected, &stalewriteverf);
|
2004-05-23 09:53:01 +04:00
|
|
|
#ifndef NFS_V2_ONLY
|
2003-04-12 18:26:58 +04:00
|
|
|
if (!error && iomode == NFSV3WRITE_UNSTABLE) {
|
2003-05-03 20:46:39 +04:00
|
|
|
/*
|
|
|
|
* we need to commit pages later.
|
|
|
|
*/
|
2007-02-15 19:01:51 +03:00
|
|
|
mutex_enter(&np->n_commitlock);
|
2002-01-26 05:52:19 +03:00
|
|
|
nfs_add_tobecommitted_range(vp, off, cnt);
|
2003-05-03 20:46:39 +04:00
|
|
|
/*
|
|
|
|
* if there can be too many uncommitted pages, commit them now.
|
|
|
|
*/
|
2002-01-26 05:52:19 +03:00
|
|
|
if (np->n_pushhi - np->n_pushlo > nfs_commitsize) {
|
|
|
|
off = np->n_pushlo;
|
|
|
|
cnt = nfs_commitsize >> 1;
|
2005-12-11 15:16:03 +03:00
|
|
|
error = nfs_commit(vp, off, cnt, curlwp);
|
2002-01-26 05:52:19 +03:00
|
|
|
if (!error) {
|
|
|
|
nfs_add_committed_range(vp, off, cnt);
|
|
|
|
nfs_del_tobecommitted_range(vp, off, cnt);
|
|
|
|
}
|
2003-05-03 20:46:39 +04:00
|
|
|
if (error == NFSERR_STALEWRITEVERF) {
|
2007-02-22 09:05:00 +03:00
|
|
|
stalewriteverf = true;
|
2003-05-03 20:46:39 +04:00
|
|
|
error = 0; /* it isn't a real error */
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* re-dirty pages so that they will be passed
|
|
|
|
* to us later again.
|
|
|
|
*/
|
2008-01-02 14:48:20 +03:00
|
|
|
mutex_enter(&uobj->vmobjlock);
|
2003-05-03 20:46:39 +04:00
|
|
|
for (i = 0; i < npages; i++) {
|
|
|
|
pgs[i]->flags &= ~PG_CLEAN;
|
|
|
|
}
|
2008-01-02 14:48:20 +03:00
|
|
|
mutex_exit(&uobj->vmobjlock);
|
2002-01-26 05:52:19 +03:00
|
|
|
}
|
2007-02-15 19:01:51 +03:00
|
|
|
mutex_exit(&np->n_commitlock);
|
2004-05-23 09:53:01 +04:00
|
|
|
} else
|
|
|
|
#endif
|
|
|
|
if (!error) {
|
2003-05-03 20:46:39 +04:00
|
|
|
/*
|
|
|
|
* pages are now on stable storage.
|
|
|
|
*/
|
2007-02-15 19:01:51 +03:00
|
|
|
mutex_enter(&np->n_commitlock);
|
2002-01-26 05:52:19 +03:00
|
|
|
nfs_del_committed_range(vp, off, cnt);
|
2007-02-15 19:01:51 +03:00
|
|
|
mutex_exit(&np->n_commitlock);
|
2008-01-02 14:48:20 +03:00
|
|
|
mutex_enter(&uobj->vmobjlock);
|
2002-01-26 05:52:19 +03:00
|
|
|
for (i = 0; i < npages; i++) {
|
|
|
|
pgs[i]->flags &= ~(PG_NEEDCOMMIT | PG_RDONLY);
|
|
|
|
}
|
2008-01-02 14:48:20 +03:00
|
|
|
mutex_exit(&uobj->vmobjlock);
|
2003-04-12 18:26:58 +04:00
|
|
|
} else {
|
2003-05-03 20:46:39 +04:00
|
|
|
/*
|
|
|
|
* we got an error.
|
|
|
|
*/
|
|
|
|
bp->b_error = np->n_error = error;
|
|
|
|
np->n_flag |= NWRITEERR;
|
2000-11-27 11:39:39 +03:00
|
|
|
}
|
2003-05-03 20:28:57 +04:00
|
|
|
|
2007-02-15 19:01:51 +03:00
|
|
|
rw_exit(&nmp->nm_writeverflock);
|
2003-05-03 20:28:57 +04:00
|
|
|
|
2008-10-17 10:40:21 +04:00
|
|
|
|
2003-05-03 20:28:57 +04:00
|
|
|
if (stalewriteverf) {
|
2000-11-27 11:39:39 +03:00
|
|
|
nfs_clearcommit(vp->v_mount);
|
2002-01-26 05:52:19 +03:00
|
|
|
}
|
2008-10-17 10:40:21 +04:00
|
|
|
#ifndef NFS_V2_ONLY
|
2008-10-16 23:33:48 +04:00
|
|
|
out:
|
2008-10-17 10:40:21 +04:00
|
|
|
#endif
|
2008-10-17 18:24:43 +04:00
|
|
|
if (pgs != spgs)
|
|
|
|
kmem_free(pgs, sizeof(*pgs) * npages);
|
2003-04-12 18:26:58 +04:00
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* nfs_doio for B_PHYS.
|
|
|
|
*/
|
|
|
|
static int
|
2008-03-29 16:48:00 +03:00
|
|
|
nfs_doio_phys(struct buf *bp, struct uio *uiop)
|
2003-04-12 18:26:58 +04:00
|
|
|
{
|
|
|
|
struct vnode *vp = bp->b_vp;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
uiop->uio_offset = ((off_t)bp->b_blkno) << DEV_BSHIFT;
|
|
|
|
if (bp->b_flags & B_READ) {
|
|
|
|
uiop->uio_rw = UIO_READ;
|
|
|
|
nfsstats.read_physios++;
|
|
|
|
error = nfs_readrpc(vp, uiop);
|
|
|
|
} else {
|
|
|
|
int iomode = NFSV3WRITE_DATASYNC;
|
2007-02-22 01:59:35 +03:00
|
|
|
bool stalewriteverf;
|
2003-05-03 20:28:57 +04:00
|
|
|
struct nfsmount *nmp = VFSTONFS(vp->v_mount);
|
2003-04-12 18:26:58 +04:00
|
|
|
|
|
|
|
uiop->uio_rw = UIO_WRITE;
|
|
|
|
nfsstats.write_physios++;
|
2007-02-15 19:01:51 +03:00
|
|
|
rw_enter(&nmp->nm_writeverflock, RW_READER);
|
2007-02-22 09:05:00 +03:00
|
|
|
error = nfs_writerpc(vp, uiop, &iomode, false, &stalewriteverf);
|
2007-02-15 19:01:51 +03:00
|
|
|
rw_exit(&nmp->nm_writeverflock);
|
2003-04-12 18:26:58 +04:00
|
|
|
if (stalewriteverf) {
|
|
|
|
nfs_clearcommit(bp->b_vp->v_mount);
|
|
|
|
}
|
|
|
|
}
|
2008-01-02 14:48:20 +03:00
|
|
|
bp->b_error = error;
|
2003-04-12 18:26:58 +04:00
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Do an I/O operation to/from a cache block. This may be called
|
|
|
|
* synchronously or from an nfsiod.
|
|
|
|
*/
|
|
|
|
int
|
2008-03-29 16:48:00 +03:00
|
|
|
nfs_doio(struct buf *bp)
|
2003-04-12 18:26:58 +04:00
|
|
|
{
|
|
|
|
int error;
|
|
|
|
struct uio uio;
|
|
|
|
struct uio *uiop = &uio;
|
|
|
|
struct iovec io;
|
|
|
|
UVMHIST_FUNC("nfs_doio"); UVMHIST_CALLED(ubchist);
|
|
|
|
|
|
|
|
uiop->uio_iov = &io;
|
|
|
|
uiop->uio_iovcnt = 1;
|
|
|
|
uiop->uio_offset = (((off_t)bp->b_blkno) << DEV_BSHIFT);
|
2006-03-01 15:38:10 +03:00
|
|
|
UIO_SETUP_SYSSPACE(uiop);
|
2003-04-12 18:26:58 +04:00
|
|
|
io.iov_base = bp->b_data;
|
|
|
|
io.iov_len = uiop->uio_resid = bp->b_bcount;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Historically, paging was done with physio, but no more...
|
|
|
|
*/
|
|
|
|
if (bp->b_flags & B_PHYS) {
|
|
|
|
/*
|
|
|
|
* ...though reading /dev/drum still gets us here.
|
|
|
|
*/
|
|
|
|
error = nfs_doio_phys(bp, uiop);
|
|
|
|
} else if (bp->b_flags & B_READ) {
|
|
|
|
error = nfs_doio_read(bp, uiop);
|
|
|
|
} else {
|
|
|
|
error = nfs_doio_write(bp, uiop);
|
|
|
|
}
|
|
|
|
bp->b_resid = uiop->uio_resid;
|
2000-11-27 11:39:39 +03:00
|
|
|
biodone(bp);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Vnode op for VM getpages.
|
|
|
|
*/
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
|
2000-11-27 11:39:39 +03:00
|
|
|
int
|
2008-03-29 16:48:00 +03:00
|
|
|
nfs_getpages(void *v)
|
2000-11-27 11:39:39 +03:00
|
|
|
{
|
|
|
|
struct vop_getpages_args /* {
|
|
|
|
struct vnode *a_vp;
|
|
|
|
voff_t a_offset;
|
2001-05-27 01:27:10 +04:00
|
|
|
struct vm_page **a_m;
|
2000-11-27 11:39:39 +03:00
|
|
|
int *a_count;
|
|
|
|
int a_centeridx;
|
|
|
|
vm_prot_t a_access_type;
|
|
|
|
int a_advice;
|
|
|
|
int a_flags;
|
|
|
|
} */ *ap = v;
|
|
|
|
|
|
|
|
struct vnode *vp = ap->a_vp;
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
struct uvm_object *uobj = &vp->v_uobj;
|
2000-11-27 11:39:39 +03:00
|
|
|
struct nfsnode *np = VTONFS(vp);
|
2002-05-06 04:07:51 +04:00
|
|
|
const int npages = *ap->a_count;
|
2008-10-31 23:42:41 +03:00
|
|
|
struct vm_page *pg, **pgs, **opgs, *spgs[UBC_MAX_PAGES];
|
2002-01-26 05:52:19 +03:00
|
|
|
off_t origoffset, len;
|
2002-05-06 04:07:51 +04:00
|
|
|
int i, error;
|
2007-02-22 01:59:35 +03:00
|
|
|
bool v3 = NFS_ISV3(vp);
|
|
|
|
bool write = (ap->a_access_type & VM_PROT_WRITE) != 0;
|
|
|
|
bool locked = (ap->a_flags & PGO_LOCKED) != 0;
|
2000-11-27 11:39:39 +03:00
|
|
|
|
2008-10-17 18:24:43 +04:00
|
|
|
/*
|
|
|
|
* If we are not locked we are not really using opgs,
|
|
|
|
* so just initialize it
|
|
|
|
*/
|
|
|
|
if (!locked || npages < __arraycount(spgs))
|
|
|
|
opgs = spgs;
|
|
|
|
else {
|
|
|
|
if ((opgs = kmem_alloc(npages * sizeof(*opgs), KM_NOSLEEP)) ==
|
|
|
|
NULL)
|
|
|
|
return ENOMEM;
|
|
|
|
}
|
|
|
|
|
2000-11-27 11:39:39 +03:00
|
|
|
/*
|
2002-05-06 04:07:51 +04:00
|
|
|
* call the genfs code to get the pages. `pgs' may be NULL
|
|
|
|
* when doing read-ahead.
|
2000-11-27 11:39:39 +03:00
|
|
|
*/
|
2002-05-06 04:07:51 +04:00
|
|
|
pgs = ap->a_m;
|
2002-05-06 07:20:54 +04:00
|
|
|
if (write && locked && v3) {
|
2002-05-06 04:07:51 +04:00
|
|
|
KASSERT(pgs != NULL);
|
|
|
|
#ifdef DEBUG
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If PGO_LOCKED is set, real pages shouldn't exists
|
|
|
|
* in the array.
|
|
|
|
*/
|
|
|
|
|
|
|
|
for (i = 0; i < npages; i++)
|
|
|
|
KDASSERT(pgs[i] == NULL || pgs[i] == PGO_DONTCARE);
|
|
|
|
#endif
|
|
|
|
memcpy(opgs, pgs, npages * sizeof(struct vm_pages *));
|
|
|
|
}
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
error = genfs_getpages(v);
|
2008-10-17 02:04:22 +04:00
|
|
|
if (error)
|
|
|
|
goto out;
|
2000-11-27 11:39:39 +03:00
|
|
|
|
|
|
|
/*
|
2002-03-17 02:05:25 +03:00
|
|
|
* for read faults where the nfs node is not yet marked NMODIFIED,
|
|
|
|
* set PG_RDONLY on the pages so that we come back here if someone
|
|
|
|
* tries to modify later via the mapping that will be entered for
|
|
|
|
* this fault.
|
2000-11-27 11:39:39 +03:00
|
|
|
*/
|
|
|
|
|
2002-03-17 02:05:25 +03:00
|
|
|
if (!write && (np->n_flag & NMODIFIED) == 0 && pgs != NULL) {
|
|
|
|
if (!locked) {
|
2008-01-02 14:48:20 +03:00
|
|
|
mutex_enter(&uobj->vmobjlock);
|
2002-03-17 02:05:25 +03:00
|
|
|
}
|
|
|
|
for (i = 0; i < npages; i++) {
|
|
|
|
pg = pgs[i];
|
|
|
|
if (pg == NULL || pg == PGO_DONTCARE) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
pg->flags |= PG_RDONLY;
|
|
|
|
}
|
|
|
|
if (!locked) {
|
2008-01-02 14:48:20 +03:00
|
|
|
mutex_exit(&uobj->vmobjlock);
|
2002-03-17 02:05:25 +03:00
|
|
|
}
|
|
|
|
}
|
2008-10-17 02:04:22 +04:00
|
|
|
if (!write)
|
|
|
|
goto out;
|
2002-03-17 02:05:25 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* this is a write fault, update the commit info.
|
|
|
|
*/
|
|
|
|
|
2002-01-26 05:52:19 +03:00
|
|
|
origoffset = ap->a_offset;
|
|
|
|
len = npages << PAGE_SHIFT;
|
2000-11-27 11:39:39 +03:00
|
|
|
|
2002-03-17 02:05:25 +03:00
|
|
|
if (v3) {
|
2007-02-15 19:01:51 +03:00
|
|
|
if (!locked) {
|
|
|
|
mutex_enter(&np->n_commitlock);
|
|
|
|
} else {
|
2007-02-27 13:03:56 +03:00
|
|
|
if (!mutex_tryenter(&np->n_commitlock)) {
|
2002-05-06 04:07:51 +04:00
|
|
|
|
2007-02-15 19:01:51 +03:00
|
|
|
/*
|
|
|
|
* Since PGO_LOCKED is set, we need to unbusy
|
|
|
|
* all pages fetched by genfs_getpages() above,
|
|
|
|
* tell the caller that there are no pages
|
|
|
|
* available and put back original pgs array.
|
|
|
|
*/
|
2002-05-06 04:07:51 +04:00
|
|
|
|
2008-01-02 14:48:20 +03:00
|
|
|
mutex_enter(&uvm_pageqlock);
|
2007-02-15 19:01:51 +03:00
|
|
|
uvm_page_unbusy(pgs, npages);
|
2008-01-02 14:48:20 +03:00
|
|
|
mutex_exit(&uvm_pageqlock);
|
2007-02-15 19:01:51 +03:00
|
|
|
*ap->a_count = 0;
|
|
|
|
memcpy(pgs, opgs,
|
|
|
|
npages * sizeof(struct vm_pages *));
|
2008-10-17 02:04:22 +04:00
|
|
|
error = EBUSY;
|
|
|
|
goto out;
|
2007-02-15 19:01:51 +03:00
|
|
|
}
|
2002-05-06 04:07:51 +04:00
|
|
|
}
|
2002-03-17 02:05:25 +03:00
|
|
|
nfs_del_committed_range(vp, origoffset, len);
|
|
|
|
nfs_del_tobecommitted_range(vp, origoffset, len);
|
|
|
|
}
|
2002-05-06 04:07:51 +04:00
|
|
|
np->n_flag |= NMODIFIED;
|
2001-12-31 10:16:47 +03:00
|
|
|
if (!locked) {
|
2008-01-02 14:48:20 +03:00
|
|
|
mutex_enter(&uobj->vmobjlock);
|
2001-12-31 10:16:47 +03:00
|
|
|
}
|
2000-11-27 11:39:39 +03:00
|
|
|
for (i = 0; i < npages; i++) {
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
pg = pgs[i];
|
|
|
|
if (pg == NULL || pg == PGO_DONTCARE) {
|
2000-11-27 11:39:39 +03:00
|
|
|
continue;
|
|
|
|
}
|
2002-01-26 05:52:19 +03:00
|
|
|
pg->flags &= ~(PG_NEEDCOMMIT | PG_RDONLY);
|
2000-11-27 11:39:39 +03:00
|
|
|
}
|
2001-12-31 10:16:47 +03:00
|
|
|
if (!locked) {
|
2008-01-02 14:48:20 +03:00
|
|
|
mutex_exit(&uobj->vmobjlock);
|
2001-12-31 10:16:47 +03:00
|
|
|
}
|
2002-03-17 02:05:25 +03:00
|
|
|
if (v3) {
|
2007-02-15 19:01:51 +03:00
|
|
|
mutex_exit(&np->n_commitlock);
|
2002-03-17 02:05:25 +03:00
|
|
|
}
|
2008-10-17 02:04:22 +04:00
|
|
|
out:
|
2008-10-17 18:24:43 +04:00
|
|
|
if (opgs != spgs)
|
|
|
|
kmem_free(opgs, sizeof(*opgs) * npages);
|
2008-10-17 02:04:22 +04:00
|
|
|
return error;
|
2000-11-27 11:39:39 +03:00
|
|
|
}
|