NetBSD/sys/nfs/nfs_bio.c

1353 lines
33 KiB
C
Raw Normal View History

/* $NetBSD: nfs_bio.c,v 1.185 2010/06/12 21:10:55 jakllsch Exp $ */
1993-03-21 12:45:37 +03:00
/*
* Copyright (c) 1989, 1993
* The Regents of the University of California. All rights reserved.
1993-03-21 12:45:37 +03:00
*
* This code is derived from software contributed to Berkeley by
* Rick Macklem at The University of Guelph.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
1993-03-21 12:45:37 +03:00
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)nfs_bio.c 8.9 (Berkeley) 3/30/95
1993-03-21 12:45:37 +03:00
*/
2001-11-10 13:59:08 +03:00
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: nfs_bio.c,v 1.185 2010/06/12 21:10:55 jakllsch Exp $");
2001-11-10 13:59:08 +03:00
#ifdef _KERNEL_OPT
#include "opt_nfs.h"
#include "opt_ddb.h"
#endif
1993-12-18 03:40:47 +03:00
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/resourcevar.h>
#include <sys/signalvar.h>
1993-12-18 03:40:47 +03:00
#include <sys/proc.h>
#include <sys/buf.h>
#include <sys/vnode.h>
#include <sys/mount.h>
#include <sys/kernel.h>
1996-02-10 00:48:19 +03:00
#include <sys/namei.h>
#include <sys/dirent.h>
2006-05-15 01:31:52 +04:00
#include <sys/kauth.h>
#include <uvm/uvm_extern.h>
#include <uvm/uvm.h>
#include <nfs/rpcv2.h>
#include <nfs/nfsproto.h>
1993-12-18 03:40:47 +03:00
#include <nfs/nfs.h>
#include <nfs/nfsmount.h>
#include <nfs/nfsnode.h>
1996-02-10 00:48:19 +03:00
#include <nfs/nfs_var.h>
1993-03-21 12:45:37 +03:00
extern int nfs_numasync;
extern int nfs_commitsize;
extern struct nfsstats nfsstats;
1993-03-21 12:45:37 +03:00
static int nfs_doio_read(struct buf *, struct uio *);
static int nfs_doio_write(struct buf *, struct uio *);
static int nfs_doio_phys(struct buf *, struct uio *);
1993-03-21 12:45:37 +03:00
/*
* Vnode op for read using bio
* Any similarity to readip() is purely coincidental
*/
1996-02-10 00:48:19 +03:00
int
2008-03-29 16:48:00 +03:00
nfs_bioread(struct vnode *vp, struct uio *uio, int ioflag,
kauth_cred_t cred, int cflag)
1993-03-21 12:45:37 +03:00
{
2000-03-30 16:51:13 +04:00
struct nfsnode *np = VTONFS(vp);
1996-02-10 00:48:19 +03:00
struct buf *bp = NULL, *rabp;
struct nfsmount *nmp = VFSTONFS(vp->v_mount);
struct nfsdircache *ndp = NULL, *nndp = NULL;
void *baddr;
int got_buf = 0, error = 0, n = 0, on = 0, en, enn;
int enough = 0;
2005-08-19 06:04:03 +04:00
struct dirent *dp, *pdp, *edp, *ep;
off_t curoff = 0;
2005-11-30 01:52:02 +03:00
int advice;
struct lwp *l = curlwp;
1993-03-21 12:45:37 +03:00
#ifdef DIAGNOSTIC
if (uio->uio_rw != UIO_READ)
panic("nfs_read mode");
#endif
if (uio->uio_resid == 0)
return (0);
if (vp->v_type != VDIR && uio->uio_offset < 0)
1993-03-21 12:45:37 +03:00
return (EINVAL);
#ifndef NFS_V2_ONLY
if ((nmp->nm_flag & NFSMNT_NFSV3) &&
!(nmp->nm_iflag & NFSMNT_GOTFSINFO))
2005-12-11 15:16:03 +03:00
(void)nfs_fsinfo(nmp, vp, cred, l);
#endif
if (vp->v_type != VDIR &&
(uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize)
return (EFBIG);
1993-03-21 12:45:37 +03:00
/*
* For nfs, cache consistency can only be maintained approximately.
* Although RFC1094 does not specify the criteria, the following is
* believed to be compatible with the reference port.
2006-12-27 15:10:09 +03:00
*
1993-03-21 12:45:37 +03:00
* If the file's modify time on the server has changed since the
* last read rpc or you have written to the file,
* you may have lost data cache consistency with the
* server, so flush all of the file's data out of the cache.
* Then force a getattr rpc to ensure that you have up to date
* attributes.
* NB: This implies that cache data can be read when up to
2007-10-29 01:24:28 +03:00
* nfs_attrtimeo seconds out of date. If you find that you need current
1993-03-21 12:45:37 +03:00
* attributes this could be forced by setting n_attrstamp to 0 before
* the VOP_GETATTR() call.
1993-03-21 12:45:37 +03:00
*/
2006-12-27 15:10:09 +03:00
if (vp->v_type != VLNK) {
2005-12-11 15:16:03 +03:00
error = nfs_flushstalebuf(vp, cred, l,
NFS_FLUSHSTALEBUF_MYWRITE);
if (error)
return error;
1993-03-21 12:45:37 +03:00
}
do {
/*
* Don't cache symlinks.
*/
if ((vp->v_vflag & VV_ROOT) && vp->v_type == VLNK) {
2006-12-27 15:10:09 +03:00
return (nfs_readlinkrpc(vp, uio, cred));
}
baddr = (void *)0;
1993-03-21 12:45:37 +03:00
switch (vp->v_type) {
case VREG:
nfsstats.biocache_reads++;
2005-11-30 01:52:02 +03:00
advice = IO_ADV_DECODE(ioflag);
error = 0;
while (uio->uio_resid > 0) {
vsize_t bytelen;
nfs_delayedtruncate(vp);
if (np->n_size <= uio->uio_offset) {
break;
}
bytelen =
MIN(np->n_size - uio->uio_offset, uio->uio_resid);
2007-07-27 14:00:42 +04:00
error = ubc_uiomove(&vp->v_uobj, uio, bytelen,
advice, UBC_READ | UBC_PARTIALOK |
(UBC_WANT_UNMAP(vp) ? UBC_UNMAP : 0));
if (error) {
/*
* XXXkludge
* the file has been truncated on the server.
* there isn't much we can do.
*/
if (uio->uio_offset >= np->n_size) {
/* end of file */
error = 0;
} else {
break;
}
}
1993-03-21 12:45:37 +03:00
}
break;
1993-03-21 12:45:37 +03:00
case VLNK:
nfsstats.biocache_readlinks++;
2005-12-11 15:16:03 +03:00
bp = nfs_getcacheblk(vp, (daddr_t)0, NFS_MAXPATHLEN, l);
if (!bp)
return (EINTR);
2008-01-02 14:48:20 +03:00
if ((bp->b_oflags & BO_DONE) == 0) {
bp->b_flags |= B_READ;
error = nfs_doio(bp);
if (error) {
brelse(bp, 0);
return (error);
}
}
n = MIN(uio->uio_resid, NFS_MAXPATHLEN - bp->b_resid);
got_buf = 1;
on = 0;
1993-03-21 12:45:37 +03:00
break;
case VDIR:
diragain:
1993-03-21 12:45:37 +03:00
nfsstats.biocache_readdirs++;
ndp = nfs_searchdircache(vp, uio->uio_offset,
(nmp->nm_flag & NFSMNT_XLATECOOKIE), 0);
if (!ndp) {
/*
* We've been handed a cookie that is not
* in the cache. If we're not translating
* 32 <-> 64, it may be a value that was
* flushed out of the cache because it grew
* too big. Let the server judge if it's
* valid or not. In the translation case,
* we have no way of validating this value,
* so punt.
*/
if (nmp->nm_flag & NFSMNT_XLATECOOKIE)
return (EINVAL);
2005-02-27 01:31:44 +03:00
ndp = nfs_enterdircache(vp, uio->uio_offset,
uio->uio_offset, 0, 0);
}
if (NFS_EOFVALID(np) &&
ndp->dc_cookie == np->n_direofoffset) {
nfs_putdircache(np, ndp);
nfsstats.direofcache_hits++;
return (0);
}
2005-12-11 15:16:03 +03:00
bp = nfs_getcacheblk(vp, NFSDC_BLKNO(ndp), NFS_DIRBLKSIZ, l);
if (!bp)
return (EINTR);
2008-01-02 14:48:20 +03:00
if ((bp->b_oflags & BO_DONE) == 0) {
bp->b_flags |= B_READ;
bp->b_dcookie = ndp->dc_blkcookie;
error = nfs_doio(bp);
if (error) {
/*
* Yuck! The directory has been modified on the
* server. Punt and let the userland code
* deal with it.
*/
nfs_putdircache(np, ndp);
brelse(bp, 0);
/*
* nfs_request maps NFSERR_BAD_COOKIE to EINVAL.
*/
if (error == EINVAL) { /* NFSERR_BAD_COOKIE */
nfs_invaldircache(vp, 0);
2005-12-11 15:16:03 +03:00
nfs_vinvalbuf(vp, 0, cred, l, 1);
}
return (error);
}
}
/*
* Just return if we hit EOF right away with this
* block. Always check here, because direofoffset
* may have been set by an nfsiod since the last
* check.
*
* also, empty block implies EOF.
*/
if (bp->b_bcount == bp->b_resid ||
(NFS_EOFVALID(np) &&
ndp->dc_blkcookie == np->n_direofoffset)) {
KASSERT(bp->b_bcount != bp->b_resid ||
ndp->dc_blkcookie == bp->b_dcookie);
nfs_putdircache(np, ndp);
brelse(bp, BC_NOCACHE);
return 0;
}
/*
* Find the entry we were looking for in the block.
*/
en = ndp->dc_entry;
pdp = dp = (struct dirent *)bp->b_data;
edp = (struct dirent *)(void *)((char *)bp->b_data + bp->b_bcount -
2005-08-19 06:04:03 +04:00
bp->b_resid);
enn = 0;
2005-08-19 06:04:03 +04:00
while (enn < en && dp < edp) {
pdp = dp;
2005-08-19 06:04:03 +04:00
dp = _DIRENT_NEXT(dp);
enn++;
}
/*
* If the entry number was bigger than the number of
* entries in the block, or the cookie of the previous
* entry doesn't match, the directory cache is
* stale. Flush it and try again (i.e. go to
* the server).
*/
if (dp >= edp || (struct dirent *)_DIRENT_NEXT(dp) > edp ||
(en > 0 && NFS_GETCOOKIE(pdp) != ndp->dc_cookie)) {
#ifdef DEBUG
printf("invalid cache: %p %p %p off %jx %jx\n",
pdp, dp, edp,
(uintmax_t)uio->uio_offset,
(uintmax_t)NFS_GETCOOKIE(pdp));
#endif
nfs_putdircache(np, ndp);
brelse(bp, 0);
nfs_invaldircache(vp, 0);
2005-12-11 15:16:03 +03:00
nfs_vinvalbuf(vp, 0, cred, l, 0);
goto diragain;
}
on = (char *)dp - (char *)bp->b_data;
/*
* Cache all entries that may be exported to the
* user, as they may be thrown back at us. The
* NFSBIO_CACHECOOKIES flag indicates that all
* entries are being 'exported', so cache them all.
*/
if (en == 0 && pdp == dp) {
2005-08-19 06:04:03 +04:00
dp = _DIRENT_NEXT(dp);
enn++;
}
if (uio->uio_resid < (bp->b_bcount - bp->b_resid - on)) {
n = uio->uio_resid;
enough = 1;
} else
n = bp->b_bcount - bp->b_resid - on;
ep = (struct dirent *)(void *)((char *)bp->b_data + on + n);
/*
* Find last complete entry to copy, caching entries
* (if requested) as we go.
*/
2005-08-19 06:04:03 +04:00
while (dp < ep && (struct dirent *)_DIRENT_NEXT(dp) <= ep) {
if (cflag & NFSBIO_CACHECOOKIES) {
nndp = nfs_enterdircache(vp, NFS_GETCOOKIE(pdp),
ndp->dc_blkcookie, enn, bp->b_lblkno);
if (nmp->nm_flag & NFSMNT_XLATECOOKIE) {
NFS_STASHCOOKIE32(pdp,
nndp->dc_cookie32);
}
nfs_putdircache(np, nndp);
}
pdp = dp;
2005-08-19 06:04:03 +04:00
dp = _DIRENT_NEXT(dp);
enn++;
}
nfs_putdircache(np, ndp);
/*
* If the last requested entry was not the last in the
2005-02-27 01:31:44 +03:00
* buffer (happens if NFS_DIRFRAGSIZ < NFS_DIRBLKSIZ),
* cache the cookie of the last requested one, and
* set of the offset to it.
*/
if ((on + n) < bp->b_bcount - bp->b_resid) {
curoff = NFS_GETCOOKIE(pdp);
nndp = nfs_enterdircache(vp, curoff, ndp->dc_blkcookie,
enn, bp->b_lblkno);
if (nmp->nm_flag & NFSMNT_XLATECOOKIE) {
NFS_STASHCOOKIE32(pdp, nndp->dc_cookie32);
curoff = nndp->dc_cookie32;
}
nfs_putdircache(np, nndp);
} else
curoff = bp->b_dcookie;
/*
* Always cache the entry for the next block,
* so that readaheads can use it.
*/
nndp = nfs_enterdircache(vp, bp->b_dcookie, bp->b_dcookie, 0,0);
if (nmp->nm_flag & NFSMNT_XLATECOOKIE) {
if (curoff == bp->b_dcookie) {
NFS_STASHCOOKIE32(pdp, nndp->dc_cookie32);
curoff = nndp->dc_cookie32;
}
}
n = (char *)_DIRENT_NEXT(pdp) - ((char *)bp->b_data + on);
/*
* If not eof and read aheads are enabled, start one.
* (You need the current block first, so that you have the
* directory offset cookie of the next block.)
*/
if (nfs_numasync > 0 && nmp->nm_readahead > 0 &&
2006-12-27 15:10:09 +03:00
!NFS_EOFVALID(np)) {
rabp = nfs_getcacheblk(vp, NFSDC_BLKNO(nndp),
2005-12-11 15:16:03 +03:00
NFS_DIRBLKSIZ, l);
if (rabp) {
2008-01-02 14:48:20 +03:00
if ((rabp->b_oflags & (BO_DONE | BO_DELWRI)) == 0) {
rabp->b_dcookie = nndp->dc_cookie;
rabp->b_flags |= (B_READ | B_ASYNC);
if (nfs_asyncio(rabp)) {
brelse(rabp, BC_INVAL);
}
} else
brelse(rabp, 0);
}
}
nfs_putdircache(np, nndp);
got_buf = 1;
1993-03-21 12:45:37 +03:00
break;
default:
1996-10-13 05:39:03 +04:00
printf(" nfsbioread: type %x unexpected\n",vp->v_type);
1996-02-10 00:48:19 +03:00
break;
}
if (n > 0) {
if (!baddr)
baddr = bp->b_data;
error = uiomove((char *)baddr + on, (int)n, uio);
1993-03-21 12:45:37 +03:00
}
switch (vp->v_type) {
case VREG:
break;
1993-03-21 12:45:37 +03:00
case VLNK:
n = 0;
break;
case VDIR:
uio->uio_offset = curoff;
if (enough)
n = 0;
1996-02-10 00:48:19 +03:00
break;
default:
1996-10-13 05:39:03 +04:00
printf(" nfsbioread: type %x unexpected\n",vp->v_type);
}
if (got_buf)
brelse(bp, 0);
} while (error == 0 && uio->uio_resid > 0 && n > 0);
1993-03-21 12:45:37 +03:00
return (error);
}
/*
* Vnode op for write using bio
*/
1996-02-10 00:48:19 +03:00
int
2008-03-29 16:48:00 +03:00
nfs_write(void *v)
1996-02-10 00:48:19 +03:00
{
struct vop_write_args /* {
struct vnode *a_vp;
struct uio *a_uio;
int a_ioflag;
2006-05-15 01:31:52 +04:00
kauth_cred_t a_cred;
1996-02-10 00:48:19 +03:00
} */ *ap = v;
2000-03-30 16:51:13 +04:00
struct uio *uio = ap->a_uio;
struct lwp *l = curlwp;
2000-03-30 16:51:13 +04:00
struct vnode *vp = ap->a_vp;
1993-03-21 12:45:37 +03:00
struct nfsnode *np = VTONFS(vp);
2006-05-15 01:31:52 +04:00
kauth_cred_t cred = ap->a_cred;
struct nfsmount *nmp = VFSTONFS(vp->v_mount);
a whole bunch of changes to improve performance and robustness under load: - remove special treatment of pager_map mappings in pmaps. this is required now, since I've removed the globals that expose the address range. pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's no longer any need to special-case it. - eliminate struct uvm_vnode by moving its fields into struct vnode. - rewrite the pageout path. the pager is now responsible for handling the high-level requests instead of only getting control after a bunch of work has already been done on its behalf. this will allow us to UBCify LFS, which needs tighter control over its pages than other filesystems do. writing a page to disk no longer requires making it read-only, which allows us to write wired pages without causing all kinds of havoc. - use a new PG_PAGEOUT flag to indicate that a page should be freed on behalf of the pagedaemon when it's unlocked. this flag is very similar to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the pageout fails due to eg. an indirect-block buffer being locked. this allows us to remove the "version" field from struct vm_page, and together with shrinking "loan_count" from 32 bits to 16, struct vm_page is now 4 bytes smaller. - no longer use PG_RELEASED for swap-backed pages. if the page is busy because it's being paged out, we can't release the swap slot to be reallocated until that write is complete, but unlike with vnodes we don't keep a count of in-progress writes so there's no good way to know when the write is done. instead, when we need to free a busy swap-backed page, just sleep until we can get it busy ourselves. - implement a fast-path for extending writes which allows us to avoid zeroing new pages. this substantially reduces cpu usage. - encapsulate the data used by the genfs code in a struct genfs_node, which must be the first element of the filesystem-specific vnode data for filesystems which use genfs_{get,put}pages(). - eliminate many of the UVM pagerops, since they aren't needed anymore now that the pager "put" operation is a higher-level operation. - enhance the genfs code to allow NFS to use the genfs_{get,put}pages instead of a modified copy. - clean up struct vnode by removing all the fields that used to be used by the vfs_cluster.c code (which we don't use anymore with UBC). - remove kmem_object and mb_object since they were useless. instead of allocating pages to these objects, we now just allocate pages with no object. such pages are mapped in the kernel until they are freed, so we can use the mapping to find the page to free it. this allows us to remove splvm() protection in several places. The sum of all these changes improves write throughput on my decstation 5000/200 to within 1% of the rate of NetBSD 1.5 and reduces the elapsed time for "make release" of a NetBSD 1.5 source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
voff_t oldoff, origoff;
vsize_t bytelen;
int error = 0;
int ioflag = ap->a_ioflag;
int extended = 0, wrotedata = 0;
1993-03-21 12:45:37 +03:00
#ifdef DIAGNOSTIC
if (uio->uio_rw != UIO_WRITE)
panic("nfs_write mode");
#endif
if (vp->v_type != VREG)
return (EIO);
if (np->n_flag & NWRITEERR) {
np->n_flag &= ~NWRITEERR;
return (np->n_error);
}
#ifndef NFS_V2_ONLY
if ((nmp->nm_flag & NFSMNT_NFSV3) &&
!(nmp->nm_iflag & NFSMNT_GOTFSINFO))
2005-12-11 15:16:03 +03:00
(void)nfs_fsinfo(nmp, vp, cred, l);
#endif
if (ioflag & IO_APPEND) {
NFS_INVALIDATE_ATTRCACHE(np);
error = nfs_flushstalebuf(vp, cred, l,
NFS_FLUSHSTALEBUF_MYWRITE);
if (error)
return (error);
uio->uio_offset = np->n_size;
/*
* This is already checked above VOP_WRITE, but recheck
* the append case here to make sure our idea of the
* file size is as fresh as possible.
*/
if (uio->uio_offset + uio->uio_resid >
l->l_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
mutex_enter(proc_lock);
psignal(l->l_proc, SIGXFSZ);
mutex_exit(proc_lock);
return (EFBIG);
}
1993-03-21 12:45:37 +03:00
}
if (uio->uio_offset < 0)
return (EINVAL);
if ((uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize)
return (EFBIG);
1993-03-21 12:45:37 +03:00
if (uio->uio_resid == 0)
return (0);
a whole bunch of changes to improve performance and robustness under load: - remove special treatment of pager_map mappings in pmaps. this is required now, since I've removed the globals that expose the address range. pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's no longer any need to special-case it. - eliminate struct uvm_vnode by moving its fields into struct vnode. - rewrite the pageout path. the pager is now responsible for handling the high-level requests instead of only getting control after a bunch of work has already been done on its behalf. this will allow us to UBCify LFS, which needs tighter control over its pages than other filesystems do. writing a page to disk no longer requires making it read-only, which allows us to write wired pages without causing all kinds of havoc. - use a new PG_PAGEOUT flag to indicate that a page should be freed on behalf of the pagedaemon when it's unlocked. this flag is very similar to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the pageout fails due to eg. an indirect-block buffer being locked. this allows us to remove the "version" field from struct vm_page, and together with shrinking "loan_count" from 32 bits to 16, struct vm_page is now 4 bytes smaller. - no longer use PG_RELEASED for swap-backed pages. if the page is busy because it's being paged out, we can't release the swap slot to be reallocated until that write is complete, but unlike with vnodes we don't keep a count of in-progress writes so there's no good way to know when the write is done. instead, when we need to free a busy swap-backed page, just sleep until we can get it busy ourselves. - implement a fast-path for extending writes which allows us to avoid zeroing new pages. this substantially reduces cpu usage. - encapsulate the data used by the genfs code in a struct genfs_node, which must be the first element of the filesystem-specific vnode data for filesystems which use genfs_{get,put}pages(). - eliminate many of the UVM pagerops, since they aren't needed anymore now that the pager "put" operation is a higher-level operation. - enhance the genfs code to allow NFS to use the genfs_{get,put}pages instead of a modified copy. - clean up struct vnode by removing all the fields that used to be used by the vfs_cluster.c code (which we don't use anymore with UBC). - remove kmem_object and mb_object since they were useless. instead of allocating pages to these objects, we now just allocate pages with no object. such pages are mapped in the kernel until they are freed, so we can use the mapping to find the page to free it. this allows us to remove splvm() protection in several places. The sum of all these changes improves write throughput on my decstation 5000/200 to within 1% of the rate of NetBSD 1.5 and reduces the elapsed time for "make release" of a NetBSD 1.5 source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
origoff = uio->uio_offset;
do {
bool overwrite; /* if we are overwriting whole pages */
u_quad_t oldsize;
a whole bunch of changes to improve performance and robustness under load: - remove special treatment of pager_map mappings in pmaps. this is required now, since I've removed the globals that expose the address range. pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's no longer any need to special-case it. - eliminate struct uvm_vnode by moving its fields into struct vnode. - rewrite the pageout path. the pager is now responsible for handling the high-level requests instead of only getting control after a bunch of work has already been done on its behalf. this will allow us to UBCify LFS, which needs tighter control over its pages than other filesystems do. writing a page to disk no longer requires making it read-only, which allows us to write wired pages without causing all kinds of havoc. - use a new PG_PAGEOUT flag to indicate that a page should be freed on behalf of the pagedaemon when it's unlocked. this flag is very similar to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the pageout fails due to eg. an indirect-block buffer being locked. this allows us to remove the "version" field from struct vm_page, and together with shrinking "loan_count" from 32 bits to 16, struct vm_page is now 4 bytes smaller. - no longer use PG_RELEASED for swap-backed pages. if the page is busy because it's being paged out, we can't release the swap slot to be reallocated until that write is complete, but unlike with vnodes we don't keep a count of in-progress writes so there's no good way to know when the write is done. instead, when we need to free a busy swap-backed page, just sleep until we can get it busy ourselves. - implement a fast-path for extending writes which allows us to avoid zeroing new pages. this substantially reduces cpu usage. - encapsulate the data used by the genfs code in a struct genfs_node, which must be the first element of the filesystem-specific vnode data for filesystems which use genfs_{get,put}pages(). - eliminate many of the UVM pagerops, since they aren't needed anymore now that the pager "put" operation is a higher-level operation. - enhance the genfs code to allow NFS to use the genfs_{get,put}pages instead of a modified copy. - clean up struct vnode by removing all the fields that used to be used by the vfs_cluster.c code (which we don't use anymore with UBC). - remove kmem_object and mb_object since they were useless. instead of allocating pages to these objects, we now just allocate pages with no object. such pages are mapped in the kernel until they are freed, so we can use the mapping to find the page to free it. this allows us to remove splvm() protection in several places. The sum of all these changes improves write throughput on my decstation 5000/200 to within 1% of the rate of NetBSD 1.5 and reduces the elapsed time for "make release" of a NetBSD 1.5 source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
oldoff = uio->uio_offset;
bytelen = uio->uio_resid;
1994-07-13 01:03:14 +04:00
1993-03-21 12:45:37 +03:00
nfsstats.biocache_writes++;
oldsize = np->n_size;
np->n_flag |= NMODIFIED;
if (np->n_size < uio->uio_offset + bytelen) {
np->n_size = uio->uio_offset + bytelen;
}
overwrite = false;
if ((uio->uio_offset & PAGE_MASK) == 0) {
if ((vp->v_vflag & VV_MAPPED) == 0 &&
bytelen > PAGE_SIZE) {
bytelen = trunc_page(bytelen);
overwrite = true;
} else if ((bytelen & PAGE_MASK) == 0 &&
uio->uio_offset >= vp->v_size) {
overwrite = true;
}
}
if (vp->v_size < uio->uio_offset + bytelen) {
uvm_vnp_setwritesize(vp, uio->uio_offset + bytelen);
}
error = ubc_uiomove(&vp->v_uobj, uio, bytelen,
UVM_ADV_RANDOM, UBC_WRITE | UBC_PARTIALOK |
(overwrite ? UBC_FAULTBUSY : 0) |
(UBC_WANT_UNMAP(vp) ? UBC_UNMAP : 0));
if (error) {
uvm_vnp_setwritesize(vp, vp->v_size);
if (overwrite && np->n_size != oldsize) {
/*
* backout size and free pages past eof.
*/
np->n_size = oldsize;
2008-01-02 14:48:20 +03:00
mutex_enter(&vp->v_interlock);
(void)VOP_PUTPAGES(vp, round_page(vp->v_size),
0, PGO_SYNCIO | PGO_FREE);
}
break;
}
wrotedata = 1;
a whole bunch of changes to improve performance and robustness under load: - remove special treatment of pager_map mappings in pmaps. this is required now, since I've removed the globals that expose the address range. pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's no longer any need to special-case it. - eliminate struct uvm_vnode by moving its fields into struct vnode. - rewrite the pageout path. the pager is now responsible for handling the high-level requests instead of only getting control after a bunch of work has already been done on its behalf. this will allow us to UBCify LFS, which needs tighter control over its pages than other filesystems do. writing a page to disk no longer requires making it read-only, which allows us to write wired pages without causing all kinds of havoc. - use a new PG_PAGEOUT flag to indicate that a page should be freed on behalf of the pagedaemon when it's unlocked. this flag is very similar to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the pageout fails due to eg. an indirect-block buffer being locked. this allows us to remove the "version" field from struct vm_page, and together with shrinking "loan_count" from 32 bits to 16, struct vm_page is now 4 bytes smaller. - no longer use PG_RELEASED for swap-backed pages. if the page is busy because it's being paged out, we can't release the swap slot to be reallocated until that write is complete, but unlike with vnodes we don't keep a count of in-progress writes so there's no good way to know when the write is done. instead, when we need to free a busy swap-backed page, just sleep until we can get it busy ourselves. - implement a fast-path for extending writes which allows us to avoid zeroing new pages. this substantially reduces cpu usage. - encapsulate the data used by the genfs code in a struct genfs_node, which must be the first element of the filesystem-specific vnode data for filesystems which use genfs_{get,put}pages(). - eliminate many of the UVM pagerops, since they aren't needed anymore now that the pager "put" operation is a higher-level operation. - enhance the genfs code to allow NFS to use the genfs_{get,put}pages instead of a modified copy. - clean up struct vnode by removing all the fields that used to be used by the vfs_cluster.c code (which we don't use anymore with UBC). - remove kmem_object and mb_object since they were useless. instead of allocating pages to these objects, we now just allocate pages with no object. such pages are mapped in the kernel until they are freed, so we can use the mapping to find the page to free it. this allows us to remove splvm() protection in several places. The sum of all these changes improves write throughput on my decstation 5000/200 to within 1% of the rate of NetBSD 1.5 and reduces the elapsed time for "make release" of a NetBSD 1.5 source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
/*
* update UVM's notion of the size now that we've
* copied the data into the vnode's pages.
*/
if (vp->v_size < uio->uio_offset) {
uvm_vnp_setsize(vp, uio->uio_offset);
extended = 1;
a whole bunch of changes to improve performance and robustness under load: - remove special treatment of pager_map mappings in pmaps. this is required now, since I've removed the globals that expose the address range. pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's no longer any need to special-case it. - eliminate struct uvm_vnode by moving its fields into struct vnode. - rewrite the pageout path. the pager is now responsible for handling the high-level requests instead of only getting control after a bunch of work has already been done on its behalf. this will allow us to UBCify LFS, which needs tighter control over its pages than other filesystems do. writing a page to disk no longer requires making it read-only, which allows us to write wired pages without causing all kinds of havoc. - use a new PG_PAGEOUT flag to indicate that a page should be freed on behalf of the pagedaemon when it's unlocked. this flag is very similar to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the pageout fails due to eg. an indirect-block buffer being locked. this allows us to remove the "version" field from struct vm_page, and together with shrinking "loan_count" from 32 bits to 16, struct vm_page is now 4 bytes smaller. - no longer use PG_RELEASED for swap-backed pages. if the page is busy because it's being paged out, we can't release the swap slot to be reallocated until that write is complete, but unlike with vnodes we don't keep a count of in-progress writes so there's no good way to know when the write is done. instead, when we need to free a busy swap-backed page, just sleep until we can get it busy ourselves. - implement a fast-path for extending writes which allows us to avoid zeroing new pages. this substantially reduces cpu usage. - encapsulate the data used by the genfs code in a struct genfs_node, which must be the first element of the filesystem-specific vnode data for filesystems which use genfs_{get,put}pages(). - eliminate many of the UVM pagerops, since they aren't needed anymore now that the pager "put" operation is a higher-level operation. - enhance the genfs code to allow NFS to use the genfs_{get,put}pages instead of a modified copy. - clean up struct vnode by removing all the fields that used to be used by the vfs_cluster.c code (which we don't use anymore with UBC). - remove kmem_object and mb_object since they were useless. instead of allocating pages to these objects, we now just allocate pages with no object. such pages are mapped in the kernel until they are freed, so we can use the mapping to find the page to free it. this allows us to remove splvm() protection in several places. The sum of all these changes improves write throughput on my decstation 5000/200 to within 1% of the rate of NetBSD 1.5 and reduces the elapsed time for "make release" of a NetBSD 1.5 source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
}
if ((oldoff & ~(nmp->nm_wsize - 1)) !=
(uio->uio_offset & ~(nmp->nm_wsize - 1))) {
2008-01-02 14:48:20 +03:00
mutex_enter(&vp->v_interlock);
error = VOP_PUTPAGES(vp,
a whole bunch of changes to improve performance and robustness under load: - remove special treatment of pager_map mappings in pmaps. this is required now, since I've removed the globals that expose the address range. pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's no longer any need to special-case it. - eliminate struct uvm_vnode by moving its fields into struct vnode. - rewrite the pageout path. the pager is now responsible for handling the high-level requests instead of only getting control after a bunch of work has already been done on its behalf. this will allow us to UBCify LFS, which needs tighter control over its pages than other filesystems do. writing a page to disk no longer requires making it read-only, which allows us to write wired pages without causing all kinds of havoc. - use a new PG_PAGEOUT flag to indicate that a page should be freed on behalf of the pagedaemon when it's unlocked. this flag is very similar to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the pageout fails due to eg. an indirect-block buffer being locked. this allows us to remove the "version" field from struct vm_page, and together with shrinking "loan_count" from 32 bits to 16, struct vm_page is now 4 bytes smaller. - no longer use PG_RELEASED for swap-backed pages. if the page is busy because it's being paged out, we can't release the swap slot to be reallocated until that write is complete, but unlike with vnodes we don't keep a count of in-progress writes so there's no good way to know when the write is done. instead, when we need to free a busy swap-backed page, just sleep until we can get it busy ourselves. - implement a fast-path for extending writes which allows us to avoid zeroing new pages. this substantially reduces cpu usage. - encapsulate the data used by the genfs code in a struct genfs_node, which must be the first element of the filesystem-specific vnode data for filesystems which use genfs_{get,put}pages(). - eliminate many of the UVM pagerops, since they aren't needed anymore now that the pager "put" operation is a higher-level operation. - enhance the genfs code to allow NFS to use the genfs_{get,put}pages instead of a modified copy. - clean up struct vnode by removing all the fields that used to be used by the vfs_cluster.c code (which we don't use anymore with UBC). - remove kmem_object and mb_object since they were useless. instead of allocating pages to these objects, we now just allocate pages with no object. such pages are mapped in the kernel until they are freed, so we can use the mapping to find the page to free it. this allows us to remove splvm() protection in several places. The sum of all these changes improves write throughput on my decstation 5000/200 to within 1% of the rate of NetBSD 1.5 and reduces the elapsed time for "make release" of a NetBSD 1.5 source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
trunc_page(oldoff & ~(nmp->nm_wsize - 1)),
round_page((uio->uio_offset + nmp->nm_wsize - 1) &
~(nmp->nm_wsize - 1)), PGO_CLEANIT);
a whole bunch of changes to improve performance and robustness under load: - remove special treatment of pager_map mappings in pmaps. this is required now, since I've removed the globals that expose the address range. pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's no longer any need to special-case it. - eliminate struct uvm_vnode by moving its fields into struct vnode. - rewrite the pageout path. the pager is now responsible for handling the high-level requests instead of only getting control after a bunch of work has already been done on its behalf. this will allow us to UBCify LFS, which needs tighter control over its pages than other filesystems do. writing a page to disk no longer requires making it read-only, which allows us to write wired pages without causing all kinds of havoc. - use a new PG_PAGEOUT flag to indicate that a page should be freed on behalf of the pagedaemon when it's unlocked. this flag is very similar to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the pageout fails due to eg. an indirect-block buffer being locked. this allows us to remove the "version" field from struct vm_page, and together with shrinking "loan_count" from 32 bits to 16, struct vm_page is now 4 bytes smaller. - no longer use PG_RELEASED for swap-backed pages. if the page is busy because it's being paged out, we can't release the swap slot to be reallocated until that write is complete, but unlike with vnodes we don't keep a count of in-progress writes so there's no good way to know when the write is done. instead, when we need to free a busy swap-backed page, just sleep until we can get it busy ourselves. - implement a fast-path for extending writes which allows us to avoid zeroing new pages. this substantially reduces cpu usage. - encapsulate the data used by the genfs code in a struct genfs_node, which must be the first element of the filesystem-specific vnode data for filesystems which use genfs_{get,put}pages(). - eliminate many of the UVM pagerops, since they aren't needed anymore now that the pager "put" operation is a higher-level operation. - enhance the genfs code to allow NFS to use the genfs_{get,put}pages instead of a modified copy. - clean up struct vnode by removing all the fields that used to be used by the vfs_cluster.c code (which we don't use anymore with UBC). - remove kmem_object and mb_object since they were useless. instead of allocating pages to these objects, we now just allocate pages with no object. such pages are mapped in the kernel until they are freed, so we can use the mapping to find the page to free it. this allows us to remove splvm() protection in several places. The sum of all these changes improves write throughput on my decstation 5000/200 to within 1% of the rate of NetBSD 1.5 and reduces the elapsed time for "make release" of a NetBSD 1.5 source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
}
} while (uio->uio_resid > 0);
if (wrotedata)
VN_KNOTE(vp, NOTE_WRITE | (extended ? NOTE_EXTEND : 0));
if (error == 0 && (ioflag & IO_SYNC) != 0) {
2008-01-02 14:48:20 +03:00
mutex_enter(&vp->v_interlock);
error = VOP_PUTPAGES(vp,
a whole bunch of changes to improve performance and robustness under load: - remove special treatment of pager_map mappings in pmaps. this is required now, since I've removed the globals that expose the address range. pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's no longer any need to special-case it. - eliminate struct uvm_vnode by moving its fields into struct vnode. - rewrite the pageout path. the pager is now responsible for handling the high-level requests instead of only getting control after a bunch of work has already been done on its behalf. this will allow us to UBCify LFS, which needs tighter control over its pages than other filesystems do. writing a page to disk no longer requires making it read-only, which allows us to write wired pages without causing all kinds of havoc. - use a new PG_PAGEOUT flag to indicate that a page should be freed on behalf of the pagedaemon when it's unlocked. this flag is very similar to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the pageout fails due to eg. an indirect-block buffer being locked. this allows us to remove the "version" field from struct vm_page, and together with shrinking "loan_count" from 32 bits to 16, struct vm_page is now 4 bytes smaller. - no longer use PG_RELEASED for swap-backed pages. if the page is busy because it's being paged out, we can't release the swap slot to be reallocated until that write is complete, but unlike with vnodes we don't keep a count of in-progress writes so there's no good way to know when the write is done. instead, when we need to free a busy swap-backed page, just sleep until we can get it busy ourselves. - implement a fast-path for extending writes which allows us to avoid zeroing new pages. this substantially reduces cpu usage. - encapsulate the data used by the genfs code in a struct genfs_node, which must be the first element of the filesystem-specific vnode data for filesystems which use genfs_{get,put}pages(). - eliminate many of the UVM pagerops, since they aren't needed anymore now that the pager "put" operation is a higher-level operation. - enhance the genfs code to allow NFS to use the genfs_{get,put}pages instead of a modified copy. - clean up struct vnode by removing all the fields that used to be used by the vfs_cluster.c code (which we don't use anymore with UBC). - remove kmem_object and mb_object since they were useless. instead of allocating pages to these objects, we now just allocate pages with no object. such pages are mapped in the kernel until they are freed, so we can use the mapping to find the page to free it. this allows us to remove splvm() protection in several places. The sum of all these changes improves write throughput on my decstation 5000/200 to within 1% of the rate of NetBSD 1.5 and reduces the elapsed time for "make release" of a NetBSD 1.5 source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
trunc_page(origoff & ~(nmp->nm_wsize - 1)),
round_page((uio->uio_offset + nmp->nm_wsize - 1) &
~(nmp->nm_wsize - 1)),
PGO_CLEANIT | PGO_SYNCIO);
a whole bunch of changes to improve performance and robustness under load: - remove special treatment of pager_map mappings in pmaps. this is required now, since I've removed the globals that expose the address range. pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's no longer any need to special-case it. - eliminate struct uvm_vnode by moving its fields into struct vnode. - rewrite the pageout path. the pager is now responsible for handling the high-level requests instead of only getting control after a bunch of work has already been done on its behalf. this will allow us to UBCify LFS, which needs tighter control over its pages than other filesystems do. writing a page to disk no longer requires making it read-only, which allows us to write wired pages without causing all kinds of havoc. - use a new PG_PAGEOUT flag to indicate that a page should be freed on behalf of the pagedaemon when it's unlocked. this flag is very similar to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the pageout fails due to eg. an indirect-block buffer being locked. this allows us to remove the "version" field from struct vm_page, and together with shrinking "loan_count" from 32 bits to 16, struct vm_page is now 4 bytes smaller. - no longer use PG_RELEASED for swap-backed pages. if the page is busy because it's being paged out, we can't release the swap slot to be reallocated until that write is complete, but unlike with vnodes we don't keep a count of in-progress writes so there's no good way to know when the write is done. instead, when we need to free a busy swap-backed page, just sleep until we can get it busy ourselves. - implement a fast-path for extending writes which allows us to avoid zeroing new pages. this substantially reduces cpu usage. - encapsulate the data used by the genfs code in a struct genfs_node, which must be the first element of the filesystem-specific vnode data for filesystems which use genfs_{get,put}pages(). - eliminate many of the UVM pagerops, since they aren't needed anymore now that the pager "put" operation is a higher-level operation. - enhance the genfs code to allow NFS to use the genfs_{get,put}pages instead of a modified copy. - clean up struct vnode by removing all the fields that used to be used by the vfs_cluster.c code (which we don't use anymore with UBC). - remove kmem_object and mb_object since they were useless. instead of allocating pages to these objects, we now just allocate pages with no object. such pages are mapped in the kernel until they are freed, so we can use the mapping to find the page to free it. this allows us to remove splvm() protection in several places. The sum of all these changes improves write throughput on my decstation 5000/200 to within 1% of the rate of NetBSD 1.5 and reduces the elapsed time for "make release" of a NetBSD 1.5 source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
}
return error;
}
/*
* Get an nfs cache block.
* Allocate a new one if the block isn't currently in the cache
* and return the block marked busy. If the calling process is
* interrupted by a signal for an interruptible mount point, return
* NULL.
*/
struct buf *
2008-03-29 16:48:00 +03:00
nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size, struct lwp *l)
{
2000-03-30 16:51:13 +04:00
struct buf *bp;
struct nfsmount *nmp = VFSTONFS(vp->v_mount);
if (nmp->nm_flag & NFSMNT_INT) {
bp = getblk(vp, bn, size, PCATCH, 0);
while (bp == NULL) {
2005-12-11 15:16:03 +03:00
if (nfs_sigintr(nmp, NULL, l))
return (NULL);
bp = getblk(vp, bn, size, 0, 2 * hz);
}
} else
bp = getblk(vp, bn, size, 0, 0);
return (bp);
}
/*
* Flush and invalidate all dirty buffers. If another process is already
* doing the flush, just wait for completion.
*/
1996-02-10 00:48:19 +03:00
int
2008-03-29 16:48:00 +03:00
nfs_vinvalbuf(struct vnode *vp, int flags, kauth_cred_t cred,
struct lwp *l, int intrflg)
{
2000-03-30 16:51:13 +04:00
struct nfsnode *np = VTONFS(vp);
struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2008-01-02 14:48:20 +03:00
int error = 0, slptimeo;
bool catch;
if ((nmp->nm_flag & NFSMNT_INT) == 0)
intrflg = 0;
if (intrflg) {
2008-01-02 14:48:20 +03:00
catch = true;
slptimeo = 2 * hz;
} else {
2008-01-02 14:48:20 +03:00
catch = false;
slptimeo = 0;
}
/*
* First wait for any other process doing a flush to complete.
*/
2008-01-02 14:48:20 +03:00
mutex_enter(&vp->v_interlock);
while (np->n_flag & NFLUSHINPROG) {
np->n_flag |= NFLUSHWANT;
2008-01-02 14:48:20 +03:00
error = mtsleep(&np->n_flag, PRIBIO + 2, "nfsvinval",
2003-05-22 19:59:24 +04:00
slptimeo, &vp->v_interlock);
2005-12-11 15:16:03 +03:00
if (error && intrflg && nfs_sigintr(nmp, NULL, l)) {
2008-01-02 14:48:20 +03:00
mutex_exit(&vp->v_interlock);
2003-05-22 19:59:24 +04:00
return EINTR;
}
}
/*
* Now, flush as required.
*/
np->n_flag |= NFLUSHINPROG;
2008-01-02 14:48:20 +03:00
mutex_exit(&vp->v_interlock);
error = vinvalbuf(vp, flags, cred, l, catch, 0);
while (error) {
2005-12-11 15:16:03 +03:00
if (intrflg && nfs_sigintr(nmp, NULL, l)) {
2003-05-22 19:59:24 +04:00
error = EINTR;
break;
1993-03-21 12:45:37 +03:00
}
2005-12-11 15:16:03 +03:00
error = vinvalbuf(vp, flags, cred, l, 0, slptimeo);
1993-03-21 12:45:37 +03:00
}
2008-01-02 14:48:20 +03:00
mutex_enter(&vp->v_interlock);
2003-05-22 19:59:24 +04:00
if (error == 0)
np->n_flag &= ~NMODIFIED;
np->n_flag &= ~NFLUSHINPROG;
if (np->n_flag & NFLUSHWANT) {
np->n_flag &= ~NFLUSHWANT;
2003-05-22 19:59:24 +04:00
wakeup(&np->n_flag);
}
2008-01-02 14:48:20 +03:00
mutex_exit(&vp->v_interlock);
2003-05-22 19:59:24 +04:00
return error;
}
/*
2005-02-27 01:31:44 +03:00
* nfs_flushstalebuf: flush cache if it's stale.
*
* => caller shouldn't own any pages or buffers which belong to the vnode.
*/
int
2006-05-15 01:31:52 +04:00
nfs_flushstalebuf(struct vnode *vp, kauth_cred_t cred, struct lwp *l,
int flags)
{
struct nfsnode *np = VTONFS(vp);
struct vattr vattr;
int error;
if (np->n_flag & NMODIFIED) {
if ((flags & NFS_FLUSHSTALEBUF_MYWRITE) == 0
|| vp->v_type != VREG) {
2005-12-11 15:16:03 +03:00
error = nfs_vinvalbuf(vp, V_SAVE, cred, l, 1);
if (error)
return error;
if (vp->v_type == VDIR) {
nfs_invaldircache(vp, 0);
}
} else {
/*
* XXX assuming writes are ours.
*/
}
NFS_INVALIDATE_ATTRCACHE(np);
error = VOP_GETATTR(vp, &vattr, cred);
if (error)
return error;
np->n_mtime = vattr.va_mtime;
} else {
error = VOP_GETATTR(vp, &vattr, cred);
if (error)
return error;
if (timespeccmp(&np->n_mtime, &vattr.va_mtime, !=)) {
if (vp->v_type == VDIR) {
nfs_invaldircache(vp, 0);
}
2005-12-11 15:16:03 +03:00
error = nfs_vinvalbuf(vp, V_SAVE, cred, l, 1);
if (error)
return error;
np->n_mtime = vattr.va_mtime;
}
}
return error;
}
/*
* Initiate asynchronous I/O. Return an error if no nfsiods are available.
* This is mainly to avoid queueing async I/O requests when the nfsiods
* are all hung on a dead server.
*/
a whole bunch of changes to improve performance and robustness under load: - remove special treatment of pager_map mappings in pmaps. this is required now, since I've removed the globals that expose the address range. pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's no longer any need to special-case it. - eliminate struct uvm_vnode by moving its fields into struct vnode. - rewrite the pageout path. the pager is now responsible for handling the high-level requests instead of only getting control after a bunch of work has already been done on its behalf. this will allow us to UBCify LFS, which needs tighter control over its pages than other filesystems do. writing a page to disk no longer requires making it read-only, which allows us to write wired pages without causing all kinds of havoc. - use a new PG_PAGEOUT flag to indicate that a page should be freed on behalf of the pagedaemon when it's unlocked. this flag is very similar to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the pageout fails due to eg. an indirect-block buffer being locked. this allows us to remove the "version" field from struct vm_page, and together with shrinking "loan_count" from 32 bits to 16, struct vm_page is now 4 bytes smaller. - no longer use PG_RELEASED for swap-backed pages. if the page is busy because it's being paged out, we can't release the swap slot to be reallocated until that write is complete, but unlike with vnodes we don't keep a count of in-progress writes so there's no good way to know when the write is done. instead, when we need to free a busy swap-backed page, just sleep until we can get it busy ourselves. - implement a fast-path for extending writes which allows us to avoid zeroing new pages. this substantially reduces cpu usage. - encapsulate the data used by the genfs code in a struct genfs_node, which must be the first element of the filesystem-specific vnode data for filesystems which use genfs_{get,put}pages(). - eliminate many of the UVM pagerops, since they aren't needed anymore now that the pager "put" operation is a higher-level operation. - enhance the genfs code to allow NFS to use the genfs_{get,put}pages instead of a modified copy. - clean up struct vnode by removing all the fields that used to be used by the vfs_cluster.c code (which we don't use anymore with UBC). - remove kmem_object and mb_object since they were useless. instead of allocating pages to these objects, we now just allocate pages with no object. such pages are mapped in the kernel until they are freed, so we can use the mapping to find the page to free it. this allows us to remove splvm() protection in several places. The sum of all these changes improves write throughput on my decstation 5000/200 to within 1% of the rate of NetBSD 1.5 and reduces the elapsed time for "make release" of a NetBSD 1.5 source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
1996-02-10 00:48:19 +03:00
int
2008-03-29 16:48:00 +03:00
nfs_asyncio(struct buf *bp)
{
struct nfs_iod *iod;
2000-03-30 16:51:13 +04:00
struct nfsmount *nmp;
int slptimeo = 0, error;
2007-04-29 14:30:18 +04:00
bool catch = false;
if (nfs_numasync == 0)
return (EIO);
nmp = VFSTONFS(bp->b_vp->v_mount);
again:
if (nmp->nm_flag & NFSMNT_INT)
2007-04-29 14:30:18 +04:00
catch = true;
2005-02-27 01:31:44 +03:00
/*
* Find a free iod to process this request.
*/
2007-08-08 16:27:56 +04:00
mutex_enter(&nfs_iodlist_lock);
iod = LIST_FIRST(&nfs_iodlist_idle);
if (iod) {
/*
* Found one, so wake it up and tell it which
* mount to process.
*/
LIST_REMOVE(iod, nid_idle);
2007-04-29 14:30:18 +04:00
mutex_enter(&iod->nid_lock);
mutex_exit(&nfs_iodlist_lock);
KASSERT(iod->nid_mount == NULL);
iod->nid_mount = nmp;
cv_signal(&iod->nid_cv);
mutex_enter(&nmp->nm_lock);
2007-04-29 14:30:18 +04:00
mutex_exit(&iod->nid_lock);
nmp->nm_bufqiods++;
if (nmp->nm_bufqlen < 2 * nmp->nm_bufqiods) {
cv_broadcast(&nmp->nm_aiocv);
}
} else {
mutex_exit(&nfs_iodlist_lock);
2007-04-29 14:30:18 +04:00
mutex_enter(&nmp->nm_lock);
2003-05-07 20:18:53 +04:00
}
2007-04-29 14:30:18 +04:00
KASSERT(mutex_owned(&nmp->nm_lock));
/*
* If we have an iod which can process the request, then queue
2005-02-27 01:31:44 +03:00
* the buffer. However, even if we have an iod, do not initiate
* queue cleaning if curproc is the pageout daemon. if the NFS mount
* is via local loopback, we may put curproc (pagedaemon) to sleep
* waiting for the writes to complete. But the server (ourself)
* may block the write, waiting for its (ie., our) pagedaemon
* to produce clean pages to handle the write: deadlock.
* XXX: start non-loopback mounts straight away? If "lots free",
* let pagedaemon start loopback writes anyway?
*/
if (nmp->nm_bufqiods > 0) {
2005-02-27 01:31:44 +03:00
/*
* Ensure that the queue never grows too large.
*/
if (curlwp == uvm.pagedaemon_lwp) {
/* Enque for later, to avoid free-page deadlock */
2007-08-08 16:27:56 +04:00
} else while (nmp->nm_bufqlen >= 2 * nmp->nm_bufqiods) {
2007-04-29 14:30:18 +04:00
if (catch) {
error = cv_timedwait_sig(&nmp->nm_aiocv,
2007-04-29 14:30:18 +04:00
&nmp->nm_lock, slptimeo);
} else {
error = cv_timedwait(&nmp->nm_aiocv,
&nmp->nm_lock, slptimeo);
}
if (error) {
if (nfs_sigintr(nmp, NULL, curlwp)) {
mutex_exit(&nmp->nm_lock);
return (EINTR);
}
2007-04-29 14:30:18 +04:00
if (catch) {
catch = false;
slptimeo = 2 * hz;
}
}
/*
* We might have lost our iod while sleeping,
2007-07-17 14:23:33 +04:00
* so check and loop if necessary.
*/
2007-04-29 14:30:18 +04:00
if (nmp->nm_bufqiods == 0) {
mutex_exit(&nmp->nm_lock);
goto again;
2007-04-29 14:30:18 +04:00
}
}
TAILQ_INSERT_TAIL(&nmp->nm_bufq, bp, b_freelist);
nmp->nm_bufqlen++;
2007-04-29 14:30:18 +04:00
mutex_exit(&nmp->nm_lock);
return (0);
}
2007-04-29 14:30:18 +04:00
mutex_exit(&nmp->nm_lock);
/*
* All the iods are busy on other mounts, so return EIO to
* force the caller to process the i/o synchronously.
*/
return (EIO);
}
/*
* nfs_doio for read.
*/
static int
2008-03-29 16:48:00 +03:00
nfs_doio_read(struct buf *bp, struct uio *uiop)
{
struct vnode *vp = bp->b_vp;
struct nfsnode *np = VTONFS(vp);
struct nfsmount *nmp = VFSTONFS(vp->v_mount);
int error = 0;
uiop->uio_rw = UIO_READ;
switch (vp->v_type) {
case VREG:
nfsstats.read_bios++;
error = nfs_readrpc(vp, uiop);
if (!error && uiop->uio_resid) {
int diff, len;
/*
* If uio_resid > 0, there is a hole in the file and
* no writes after the hole have been pushed to
* the server yet or the file has been truncated
* on the server.
* Just zero fill the rest of the valid area.
*/
KASSERT(vp->v_size >=
uiop->uio_offset + uiop->uio_resid);
diff = bp->b_bcount - uiop->uio_resid;
len = uiop->uio_resid;
memset((char *)bp->b_data + diff, 0, len);
uiop->uio_resid = 0;
}
#if 0
if (uiop->uio_lwp && (vp->v_iflag & VI_TEXT) &&
2006-12-27 15:10:09 +03:00
timespeccmp(&np->n_mtime, &np->n_vattr->va_mtime, !=)) {
mutex_enter(proc_lock);
2005-12-11 15:16:03 +03:00
killproc(uiop->uio_lwp->l_proc, "process text file was modified");
mutex_exit(proc_lock);
2003-01-18 11:51:40 +03:00
#if 0 /* XXX NJWLWP */
2005-12-11 15:16:03 +03:00
uiop->uio_lwp->l_proc->p_holdcnt++;
2003-01-18 11:51:40 +03:00
#endif
}
#endif
break;
case VLNK:
KASSERT(uiop->uio_offset == (off_t)0);
nfsstats.readlink_bios++;
error = nfs_readlinkrpc(vp, uiop, np->n_rcred);
break;
case VDIR:
nfsstats.readdir_bios++;
uiop->uio_offset = bp->b_dcookie;
#ifndef NFS_V2_ONLY
if (nmp->nm_flag & NFSMNT_RDIRPLUS) {
2005-12-11 15:16:03 +03:00
error = nfs_readdirplusrpc(vp, uiop,
curlwp->l_cred);
/*
* nfs_request maps NFSERR_NOTSUPP to ENOTSUP.
*/
if (error == ENOTSUP)
nmp->nm_flag &= ~NFSMNT_RDIRPLUS;
}
#else
nmp->nm_flag &= ~NFSMNT_RDIRPLUS;
#endif
if ((nmp->nm_flag & NFSMNT_RDIRPLUS) == 0)
2005-12-11 15:16:03 +03:00
error = nfs_readdirrpc(vp, uiop,
curlwp->l_cred);
if (!error) {
bp->b_dcookie = uiop->uio_offset;
}
break;
default:
printf("nfs_doio: type %x unexpected\n", vp->v_type);
break;
}
2008-01-02 14:48:20 +03:00
bp->b_error = error;
return error;
}
/*
* nfs_doio for write.
*/
static int
2008-03-29 16:48:00 +03:00
nfs_doio_write(struct buf *bp, struct uio *uiop)
{
struct vnode *vp = bp->b_vp;
struct nfsnode *np = VTONFS(vp);
struct nfsmount *nmp = VFSTONFS(vp->v_mount);
int iomode;
2007-02-22 09:05:00 +03:00
bool stalewriteverf = false;
int i, npages = (bp->b_bcount + PAGE_SIZE - 1) >> PAGE_SHIFT;
struct vm_page **pgs, *spgs[UBC_MAX_PAGES];
#ifndef NFS_V2_ONLY
2007-02-22 09:05:00 +03:00
bool needcommit = true; /* need only COMMIT RPC */
#else
2007-02-22 09:05:00 +03:00
bool needcommit = false; /* need only COMMIT RPC */
#endif
bool pageprotected;
struct uvm_object *uobj = &vp->v_uobj;
int error;
off_t off, cnt;
if (npages < __arraycount(spgs))
pgs = spgs;
else {
if ((pgs = kmem_alloc(sizeof(*pgs) * npages, KM_NOSLEEP)) ==
NULL)
return ENOMEM;
}
if ((bp->b_flags & B_ASYNC) != 0 && NFS_ISV3(vp)) {
iomode = NFSV3WRITE_UNSTABLE;
} else {
iomode = NFSV3WRITE_FILESYNC;
}
#ifndef NFS_V2_ONLY
again:
#endif
rw_enter(&nmp->nm_writeverflock, RW_READER);
for (i = 0; i < npages; i++) {
pgs[i] = uvm_pageratop((vaddr_t)bp->b_data + (i << PAGE_SHIFT));
if (pgs[i]->uobject == uobj &&
pgs[i]->offset == uiop->uio_offset + (i << PAGE_SHIFT)) {
2003-05-16 21:16:05 +04:00
KASSERT(pgs[i]->flags & PG_BUSY);
/*
* this page belongs to our object.
*/
2008-01-02 14:48:20 +03:00
mutex_enter(&uobj->vmobjlock);
2004-01-10 17:52:53 +03:00
/*
* write out the page stably if it's about to
* be released because we can't resend it
* on the server crash.
*
* XXX assuming PG_RELEASE|PG_PAGEOUT won't be
* changed until unbusy the page.
*/
if (pgs[i]->flags & (PG_RELEASED|PG_PAGEOUT))
iomode = NFSV3WRITE_FILESYNC;
2004-01-10 17:52:53 +03:00
/*
* if we met a page which hasn't been sent yet,
* we need do WRITE RPC.
*/
if ((pgs[i]->flags & PG_NEEDCOMMIT) == 0)
2007-02-22 09:05:00 +03:00
needcommit = false;
2008-01-02 14:48:20 +03:00
mutex_exit(&uobj->vmobjlock);
} else {
iomode = NFSV3WRITE_FILESYNC;
2007-02-22 09:05:00 +03:00
needcommit = false;
}
}
if (!needcommit && iomode == NFSV3WRITE_UNSTABLE) {
2008-01-02 14:48:20 +03:00
mutex_enter(&uobj->vmobjlock);
for (i = 0; i < npages; i++) {
pgs[i]->flags |= PG_NEEDCOMMIT | PG_RDONLY;
pmap_page_protect(pgs[i], VM_PROT_READ);
}
2008-01-02 14:48:20 +03:00
mutex_exit(&uobj->vmobjlock);
2007-02-22 09:05:00 +03:00
pageprotected = true; /* pages can't be modified during i/o. */
} else
2007-02-22 09:05:00 +03:00
pageprotected = false;
/*
* Send the data to the server if necessary,
* otherwise just send a commit rpc.
*/
#ifndef NFS_V2_ONLY
if (needcommit) {
/*
* If the buffer is in the range that we already committed,
* there's nothing to do.
*
* If it's in the range that we need to commit, push the
* whole range at once, otherwise only push the buffer.
* In both these cases, acquire the commit lock to avoid
* other processes modifying the range.
*/
off = uiop->uio_offset;
cnt = bp->b_bcount;
mutex_enter(&np->n_commitlock);
if (!nfs_in_committed_range(vp, off, bp->b_bcount)) {
bool pushedrange;
if (nfs_in_tobecommitted_range(vp, off, bp->b_bcount)) {
2007-02-22 09:05:00 +03:00
pushedrange = true;
off = np->n_pushlo;
cnt = np->n_pushhi - np->n_pushlo;
} else {
2007-02-22 09:05:00 +03:00
pushedrange = false;
}
2005-12-11 15:16:03 +03:00
error = nfs_commit(vp, off, cnt, curlwp);
if (error == 0) {
if (pushedrange) {
nfs_merge_commit_ranges(vp);
} else {
nfs_add_committed_range(vp, off, cnt);
}
}
} else {
error = 0;
}
mutex_exit(&np->n_commitlock);
rw_exit(&nmp->nm_writeverflock);
if (!error) {
/*
* pages are now on stable storage.
*/
2003-04-12 18:41:28 +04:00
uiop->uio_resid = 0;
2008-01-02 14:48:20 +03:00
mutex_enter(&uobj->vmobjlock);
for (i = 0; i < npages; i++) {
pgs[i]->flags &= ~(PG_NEEDCOMMIT | PG_RDONLY);
}
2008-01-02 14:48:20 +03:00
mutex_exit(&uobj->vmobjlock);
goto out;
} else if (error == NFSERR_STALEWRITEVERF) {
nfs_clearcommit(vp->v_mount);
goto again;
}
if (error) {
bp->b_error = np->n_error = error;
np->n_flag |= NWRITEERR;
}
goto out;
}
#endif
off = uiop->uio_offset;
cnt = bp->b_bcount;
uiop->uio_rw = UIO_WRITE;
nfsstats.write_bios++;
error = nfs_writerpc(vp, uiop, &iomode, pageprotected, &stalewriteverf);
#ifndef NFS_V2_ONLY
if (!error && iomode == NFSV3WRITE_UNSTABLE) {
/*
* we need to commit pages later.
*/
mutex_enter(&np->n_commitlock);
nfs_add_tobecommitted_range(vp, off, cnt);
/*
* if there can be too many uncommitted pages, commit them now.
*/
if (np->n_pushhi - np->n_pushlo > nfs_commitsize) {
off = np->n_pushlo;
cnt = nfs_commitsize >> 1;
2005-12-11 15:16:03 +03:00
error = nfs_commit(vp, off, cnt, curlwp);
if (!error) {
nfs_add_committed_range(vp, off, cnt);
nfs_del_tobecommitted_range(vp, off, cnt);
}
if (error == NFSERR_STALEWRITEVERF) {
2007-02-22 09:05:00 +03:00
stalewriteverf = true;
error = 0; /* it isn't a real error */
}
} else {
/*
* re-dirty pages so that they will be passed
* to us later again.
*/
2008-01-02 14:48:20 +03:00
mutex_enter(&uobj->vmobjlock);
for (i = 0; i < npages; i++) {
pgs[i]->flags &= ~PG_CLEAN;
}
2008-01-02 14:48:20 +03:00
mutex_exit(&uobj->vmobjlock);
}
mutex_exit(&np->n_commitlock);
} else
#endif
if (!error) {
/*
* pages are now on stable storage.
*/
mutex_enter(&np->n_commitlock);
nfs_del_committed_range(vp, off, cnt);
mutex_exit(&np->n_commitlock);
2008-01-02 14:48:20 +03:00
mutex_enter(&uobj->vmobjlock);
for (i = 0; i < npages; i++) {
pgs[i]->flags &= ~(PG_NEEDCOMMIT | PG_RDONLY);
}
2008-01-02 14:48:20 +03:00
mutex_exit(&uobj->vmobjlock);
} else {
/*
* we got an error.
*/
bp->b_error = np->n_error = error;
np->n_flag |= NWRITEERR;
}
rw_exit(&nmp->nm_writeverflock);
if (stalewriteverf) {
nfs_clearcommit(vp->v_mount);
}
#ifndef NFS_V2_ONLY
out:
#endif
if (pgs != spgs)
kmem_free(pgs, sizeof(*pgs) * npages);
return error;
}
/*
* nfs_doio for B_PHYS.
*/
static int
2008-03-29 16:48:00 +03:00
nfs_doio_phys(struct buf *bp, struct uio *uiop)
{
struct vnode *vp = bp->b_vp;
int error;
uiop->uio_offset = ((off_t)bp->b_blkno) << DEV_BSHIFT;
if (bp->b_flags & B_READ) {
uiop->uio_rw = UIO_READ;
nfsstats.read_physios++;
error = nfs_readrpc(vp, uiop);
} else {
int iomode = NFSV3WRITE_DATASYNC;
bool stalewriteverf;
struct nfsmount *nmp = VFSTONFS(vp->v_mount);
uiop->uio_rw = UIO_WRITE;
nfsstats.write_physios++;
rw_enter(&nmp->nm_writeverflock, RW_READER);
2007-02-22 09:05:00 +03:00
error = nfs_writerpc(vp, uiop, &iomode, false, &stalewriteverf);
rw_exit(&nmp->nm_writeverflock);
if (stalewriteverf) {
nfs_clearcommit(bp->b_vp->v_mount);
}
}
2008-01-02 14:48:20 +03:00
bp->b_error = error;
return error;
}
/*
* Do an I/O operation to/from a cache block. This may be called
* synchronously or from an nfsiod.
*/
int
2008-03-29 16:48:00 +03:00
nfs_doio(struct buf *bp)
{
int error;
struct uio uio;
struct uio *uiop = &uio;
struct iovec io;
UVMHIST_FUNC("nfs_doio"); UVMHIST_CALLED(ubchist);
uiop->uio_iov = &io;
uiop->uio_iovcnt = 1;
uiop->uio_offset = (((off_t)bp->b_blkno) << DEV_BSHIFT);
UIO_SETUP_SYSSPACE(uiop);
io.iov_base = bp->b_data;
io.iov_len = uiop->uio_resid = bp->b_bcount;
/*
* Historically, paging was done with physio, but no more...
*/
if (bp->b_flags & B_PHYS) {
/*
* ...though reading /dev/drum still gets us here.
*/
error = nfs_doio_phys(bp, uiop);
} else if (bp->b_flags & B_READ) {
error = nfs_doio_read(bp, uiop);
} else {
error = nfs_doio_write(bp, uiop);
}
bp->b_resid = uiop->uio_resid;
biodone(bp);
return (error);
}
/*
* Vnode op for VM getpages.
*/
a whole bunch of changes to improve performance and robustness under load: - remove special treatment of pager_map mappings in pmaps. this is required now, since I've removed the globals that expose the address range. pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's no longer any need to special-case it. - eliminate struct uvm_vnode by moving its fields into struct vnode. - rewrite the pageout path. the pager is now responsible for handling the high-level requests instead of only getting control after a bunch of work has already been done on its behalf. this will allow us to UBCify LFS, which needs tighter control over its pages than other filesystems do. writing a page to disk no longer requires making it read-only, which allows us to write wired pages without causing all kinds of havoc. - use a new PG_PAGEOUT flag to indicate that a page should be freed on behalf of the pagedaemon when it's unlocked. this flag is very similar to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the pageout fails due to eg. an indirect-block buffer being locked. this allows us to remove the "version" field from struct vm_page, and together with shrinking "loan_count" from 32 bits to 16, struct vm_page is now 4 bytes smaller. - no longer use PG_RELEASED for swap-backed pages. if the page is busy because it's being paged out, we can't release the swap slot to be reallocated until that write is complete, but unlike with vnodes we don't keep a count of in-progress writes so there's no good way to know when the write is done. instead, when we need to free a busy swap-backed page, just sleep until we can get it busy ourselves. - implement a fast-path for extending writes which allows us to avoid zeroing new pages. this substantially reduces cpu usage. - encapsulate the data used by the genfs code in a struct genfs_node, which must be the first element of the filesystem-specific vnode data for filesystems which use genfs_{get,put}pages(). - eliminate many of the UVM pagerops, since they aren't needed anymore now that the pager "put" operation is a higher-level operation. - enhance the genfs code to allow NFS to use the genfs_{get,put}pages instead of a modified copy. - clean up struct vnode by removing all the fields that used to be used by the vfs_cluster.c code (which we don't use anymore with UBC). - remove kmem_object and mb_object since they were useless. instead of allocating pages to these objects, we now just allocate pages with no object. such pages are mapped in the kernel until they are freed, so we can use the mapping to find the page to free it. this allows us to remove splvm() protection in several places. The sum of all these changes improves write throughput on my decstation 5000/200 to within 1% of the rate of NetBSD 1.5 and reduces the elapsed time for "make release" of a NetBSD 1.5 source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
int
2008-03-29 16:48:00 +03:00
nfs_getpages(void *v)
{
struct vop_getpages_args /* {
struct vnode *a_vp;
voff_t a_offset;
struct vm_page **a_m;
int *a_count;
int a_centeridx;
vm_prot_t a_access_type;
int a_advice;
int a_flags;
} */ *ap = v;
struct vnode *vp = ap->a_vp;
a whole bunch of changes to improve performance and robustness under load: - remove special treatment of pager_map mappings in pmaps. this is required now, since I've removed the globals that expose the address range. pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's no longer any need to special-case it. - eliminate struct uvm_vnode by moving its fields into struct vnode. - rewrite the pageout path. the pager is now responsible for handling the high-level requests instead of only getting control after a bunch of work has already been done on its behalf. this will allow us to UBCify LFS, which needs tighter control over its pages than other filesystems do. writing a page to disk no longer requires making it read-only, which allows us to write wired pages without causing all kinds of havoc. - use a new PG_PAGEOUT flag to indicate that a page should be freed on behalf of the pagedaemon when it's unlocked. this flag is very similar to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the pageout fails due to eg. an indirect-block buffer being locked. this allows us to remove the "version" field from struct vm_page, and together with shrinking "loan_count" from 32 bits to 16, struct vm_page is now 4 bytes smaller. - no longer use PG_RELEASED for swap-backed pages. if the page is busy because it's being paged out, we can't release the swap slot to be reallocated until that write is complete, but unlike with vnodes we don't keep a count of in-progress writes so there's no good way to know when the write is done. instead, when we need to free a busy swap-backed page, just sleep until we can get it busy ourselves. - implement a fast-path for extending writes which allows us to avoid zeroing new pages. this substantially reduces cpu usage. - encapsulate the data used by the genfs code in a struct genfs_node, which must be the first element of the filesystem-specific vnode data for filesystems which use genfs_{get,put}pages(). - eliminate many of the UVM pagerops, since they aren't needed anymore now that the pager "put" operation is a higher-level operation. - enhance the genfs code to allow NFS to use the genfs_{get,put}pages instead of a modified copy. - clean up struct vnode by removing all the fields that used to be used by the vfs_cluster.c code (which we don't use anymore with UBC). - remove kmem_object and mb_object since they were useless. instead of allocating pages to these objects, we now just allocate pages with no object. such pages are mapped in the kernel until they are freed, so we can use the mapping to find the page to free it. this allows us to remove splvm() protection in several places. The sum of all these changes improves write throughput on my decstation 5000/200 to within 1% of the rate of NetBSD 1.5 and reduces the elapsed time for "make release" of a NetBSD 1.5 source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
struct uvm_object *uobj = &vp->v_uobj;
struct nfsnode *np = VTONFS(vp);
const int npages = *ap->a_count;
struct vm_page *pg, **pgs, **opgs, *spgs[UBC_MAX_PAGES];
off_t origoffset, len;
int i, error;
bool v3 = NFS_ISV3(vp);
bool write = (ap->a_access_type & VM_PROT_WRITE) != 0;
bool locked = (ap->a_flags & PGO_LOCKED) != 0;
/*
* If we are not locked we are not really using opgs,
* so just initialize it
*/
if (!locked || npages < __arraycount(spgs))
opgs = spgs;
else {
if ((opgs = kmem_alloc(npages * sizeof(*opgs), KM_NOSLEEP)) ==
NULL)
return ENOMEM;
}
/*
* call the genfs code to get the pages. `pgs' may be NULL
* when doing read-ahead.
*/
pgs = ap->a_m;
if (write && locked && v3) {
KASSERT(pgs != NULL);
#ifdef DEBUG
/*
* If PGO_LOCKED is set, real pages shouldn't exists
* in the array.
*/
for (i = 0; i < npages; i++)
KDASSERT(pgs[i] == NULL || pgs[i] == PGO_DONTCARE);
#endif
memcpy(opgs, pgs, npages * sizeof(struct vm_pages *));
}
a whole bunch of changes to improve performance and robustness under load: - remove special treatment of pager_map mappings in pmaps. this is required now, since I've removed the globals that expose the address range. pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's no longer any need to special-case it. - eliminate struct uvm_vnode by moving its fields into struct vnode. - rewrite the pageout path. the pager is now responsible for handling the high-level requests instead of only getting control after a bunch of work has already been done on its behalf. this will allow us to UBCify LFS, which needs tighter control over its pages than other filesystems do. writing a page to disk no longer requires making it read-only, which allows us to write wired pages without causing all kinds of havoc. - use a new PG_PAGEOUT flag to indicate that a page should be freed on behalf of the pagedaemon when it's unlocked. this flag is very similar to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the pageout fails due to eg. an indirect-block buffer being locked. this allows us to remove the "version" field from struct vm_page, and together with shrinking "loan_count" from 32 bits to 16, struct vm_page is now 4 bytes smaller. - no longer use PG_RELEASED for swap-backed pages. if the page is busy because it's being paged out, we can't release the swap slot to be reallocated until that write is complete, but unlike with vnodes we don't keep a count of in-progress writes so there's no good way to know when the write is done. instead, when we need to free a busy swap-backed page, just sleep until we can get it busy ourselves. - implement a fast-path for extending writes which allows us to avoid zeroing new pages. this substantially reduces cpu usage. - encapsulate the data used by the genfs code in a struct genfs_node, which must be the first element of the filesystem-specific vnode data for filesystems which use genfs_{get,put}pages(). - eliminate many of the UVM pagerops, since they aren't needed anymore now that the pager "put" operation is a higher-level operation. - enhance the genfs code to allow NFS to use the genfs_{get,put}pages instead of a modified copy. - clean up struct vnode by removing all the fields that used to be used by the vfs_cluster.c code (which we don't use anymore with UBC). - remove kmem_object and mb_object since they were useless. instead of allocating pages to these objects, we now just allocate pages with no object. such pages are mapped in the kernel until they are freed, so we can use the mapping to find the page to free it. this allows us to remove splvm() protection in several places. The sum of all these changes improves write throughput on my decstation 5000/200 to within 1% of the rate of NetBSD 1.5 and reduces the elapsed time for "make release" of a NetBSD 1.5 source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
error = genfs_getpages(v);
if (error)
goto out;
/*
* for read faults where the nfs node is not yet marked NMODIFIED,
* set PG_RDONLY on the pages so that we come back here if someone
* tries to modify later via the mapping that will be entered for
* this fault.
*/
if (!write && (np->n_flag & NMODIFIED) == 0 && pgs != NULL) {
if (!locked) {
2008-01-02 14:48:20 +03:00
mutex_enter(&uobj->vmobjlock);
}
for (i = 0; i < npages; i++) {
pg = pgs[i];
if (pg == NULL || pg == PGO_DONTCARE) {
continue;
}
pg->flags |= PG_RDONLY;
}
if (!locked) {
2008-01-02 14:48:20 +03:00
mutex_exit(&uobj->vmobjlock);
}
}
if (!write)
goto out;
/*
* this is a write fault, update the commit info.
*/
origoffset = ap->a_offset;
len = npages << PAGE_SHIFT;
if (v3) {
if (!locked) {
mutex_enter(&np->n_commitlock);
} else {
if (!mutex_tryenter(&np->n_commitlock)) {
/*
* Since PGO_LOCKED is set, we need to unbusy
* all pages fetched by genfs_getpages() above,
* tell the caller that there are no pages
* available and put back original pgs array.
*/
2008-01-02 14:48:20 +03:00
mutex_enter(&uvm_pageqlock);
uvm_page_unbusy(pgs, npages);
2008-01-02 14:48:20 +03:00
mutex_exit(&uvm_pageqlock);
*ap->a_count = 0;
memcpy(pgs, opgs,
npages * sizeof(struct vm_pages *));
error = EBUSY;
goto out;
}
}
nfs_del_committed_range(vp, origoffset, len);
nfs_del_tobecommitted_range(vp, origoffset, len);
}
np->n_flag |= NMODIFIED;
2001-12-31 10:16:47 +03:00
if (!locked) {
2008-01-02 14:48:20 +03:00
mutex_enter(&uobj->vmobjlock);
2001-12-31 10:16:47 +03:00
}
for (i = 0; i < npages; i++) {
a whole bunch of changes to improve performance and robustness under load: - remove special treatment of pager_map mappings in pmaps. this is required now, since I've removed the globals that expose the address range. pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's no longer any need to special-case it. - eliminate struct uvm_vnode by moving its fields into struct vnode. - rewrite the pageout path. the pager is now responsible for handling the high-level requests instead of only getting control after a bunch of work has already been done on its behalf. this will allow us to UBCify LFS, which needs tighter control over its pages than other filesystems do. writing a page to disk no longer requires making it read-only, which allows us to write wired pages without causing all kinds of havoc. - use a new PG_PAGEOUT flag to indicate that a page should be freed on behalf of the pagedaemon when it's unlocked. this flag is very similar to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the pageout fails due to eg. an indirect-block buffer being locked. this allows us to remove the "version" field from struct vm_page, and together with shrinking "loan_count" from 32 bits to 16, struct vm_page is now 4 bytes smaller. - no longer use PG_RELEASED for swap-backed pages. if the page is busy because it's being paged out, we can't release the swap slot to be reallocated until that write is complete, but unlike with vnodes we don't keep a count of in-progress writes so there's no good way to know when the write is done. instead, when we need to free a busy swap-backed page, just sleep until we can get it busy ourselves. - implement a fast-path for extending writes which allows us to avoid zeroing new pages. this substantially reduces cpu usage. - encapsulate the data used by the genfs code in a struct genfs_node, which must be the first element of the filesystem-specific vnode data for filesystems which use genfs_{get,put}pages(). - eliminate many of the UVM pagerops, since they aren't needed anymore now that the pager "put" operation is a higher-level operation. - enhance the genfs code to allow NFS to use the genfs_{get,put}pages instead of a modified copy. - clean up struct vnode by removing all the fields that used to be used by the vfs_cluster.c code (which we don't use anymore with UBC). - remove kmem_object and mb_object since they were useless. instead of allocating pages to these objects, we now just allocate pages with no object. such pages are mapped in the kernel until they are freed, so we can use the mapping to find the page to free it. this allows us to remove splvm() protection in several places. The sum of all these changes improves write throughput on my decstation 5000/200 to within 1% of the rate of NetBSD 1.5 and reduces the elapsed time for "make release" of a NetBSD 1.5 source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
pg = pgs[i];
if (pg == NULL || pg == PGO_DONTCARE) {
continue;
}
pg->flags &= ~(PG_NEEDCOMMIT | PG_RDONLY);
}
2001-12-31 10:16:47 +03:00
if (!locked) {
2008-01-02 14:48:20 +03:00
mutex_exit(&uobj->vmobjlock);
2001-12-31 10:16:47 +03:00
}
if (v3) {
mutex_exit(&np->n_commitlock);
}
out:
if (opgs != spgs)
kmem_free(opgs, sizeof(*opgs) * npages);
return error;
}