2005-12-27 07:06:45 +03:00
|
|
|
/* $NetBSD: vfs_subr.c,v 1.258 2005/12/27 04:06:46 chs Exp $ */
|
1997-10-05 22:37:01 +04:00
|
|
|
|
|
|
|
/*-
|
2005-03-02 14:05:34 +03:00
|
|
|
* Copyright (c) 1997, 1998, 2004, 2005 The NetBSD Foundation, Inc.
|
1997-10-05 22:37:01 +04:00
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* This code is derived from software contributed to The NetBSD Foundation
|
|
|
|
* by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
|
|
|
|
* NASA Ames Research Center.
|
2005-03-02 14:05:34 +03:00
|
|
|
* This code is derived from software contributed to The NetBSD Foundation
|
|
|
|
* by Charles M. Hannum.
|
1997-10-05 22:37:01 +04:00
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 3. All advertising materials mentioning features or use of this software
|
|
|
|
* must display the following acknowledgement:
|
|
|
|
* This product includes software developed by the NetBSD
|
|
|
|
* Foundation, Inc. and its contributors.
|
|
|
|
* 4. Neither the name of The NetBSD Foundation nor the names of its
|
|
|
|
* contributors may be used to endorse or promote products derived
|
|
|
|
* from this software without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
|
|
|
|
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
|
|
|
|
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
|
|
|
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
|
|
|
|
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
|
|
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
|
|
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
|
|
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
|
|
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
|
|
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
|
|
* POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
1994-06-29 10:29:24 +04:00
|
|
|
|
1994-05-17 08:21:49 +04:00
|
|
|
/*
|
1994-06-08 15:28:29 +04:00
|
|
|
* Copyright (c) 1989, 1993
|
|
|
|
* The Regents of the University of California. All rights reserved.
|
1994-05-17 08:21:49 +04:00
|
|
|
* (c) UNIX System Laboratories, Inc.
|
|
|
|
* All or some portions of this file are derived from material licensed
|
|
|
|
* to the University of California by American Telephone and Telegraph
|
|
|
|
* Co. or Unix System Laboratories, Inc. and are reproduced herein with
|
|
|
|
* the permission of UNIX System Laboratories, Inc.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
2003-08-07 20:26:28 +04:00
|
|
|
* 3. Neither the name of the University nor the names of its contributors
|
1994-05-17 08:21:49 +04:00
|
|
|
* may be used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
1994-06-29 10:29:24 +04:00
|
|
|
* @(#)vfs_subr.c 8.13 (Berkeley) 4/18/94
|
1994-05-17 08:21:49 +04:00
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* External virtual filesystem routines
|
|
|
|
*/
|
|
|
|
|
2001-11-12 18:25:01 +03:00
|
|
|
#include <sys/cdefs.h>
|
2005-12-27 07:06:45 +03:00
|
|
|
__KERNEL_RCSID(0, "$NetBSD: vfs_subr.c,v 1.258 2005/12/27 04:06:46 chs Exp $");
|
2001-11-12 18:25:01 +03:00
|
|
|
|
2003-05-16 18:01:56 +04:00
|
|
|
#include "opt_inet.h"
|
2000-04-10 06:22:13 +04:00
|
|
|
#include "opt_ddb.h"
|
1998-11-15 21:38:11 +03:00
|
|
|
#include "opt_compat_netbsd.h"
|
1998-12-10 18:07:01 +03:00
|
|
|
#include "opt_compat_43.h"
|
1998-02-10 17:08:44 +03:00
|
|
|
|
1994-05-17 08:21:49 +04:00
|
|
|
#include <sys/param.h>
|
1994-06-08 15:28:29 +04:00
|
|
|
#include <sys/systm.h>
|
1994-05-17 08:21:49 +04:00
|
|
|
#include <sys/proc.h>
|
2000-08-31 18:41:35 +04:00
|
|
|
#include <sys/kernel.h>
|
1994-05-17 08:21:49 +04:00
|
|
|
#include <sys/mount.h>
|
1995-07-03 20:58:38 +04:00
|
|
|
#include <sys/fcntl.h>
|
1994-05-17 08:21:49 +04:00
|
|
|
#include <sys/vnode.h>
|
1994-06-08 15:28:29 +04:00
|
|
|
#include <sys/stat.h>
|
1994-05-17 08:21:49 +04:00
|
|
|
#include <sys/namei.h>
|
|
|
|
#include <sys/ucred.h>
|
|
|
|
#include <sys/buf.h>
|
|
|
|
#include <sys/errno.h>
|
|
|
|
#include <sys/malloc.h>
|
1994-06-08 15:28:29 +04:00
|
|
|
#include <sys/domain.h>
|
|
|
|
#include <sys/mbuf.h>
|
2003-01-18 13:06:22 +03:00
|
|
|
#include <sys/sa.h>
|
1996-02-09 21:59:18 +03:00
|
|
|
#include <sys/syscallargs.h>
|
1997-01-31 05:50:36 +03:00
|
|
|
#include <sys/device.h>
|
2003-04-17 01:44:18 +04:00
|
|
|
#include <sys/filedesc.h>
|
1996-02-04 05:17:43 +03:00
|
|
|
|
1994-06-08 15:28:29 +04:00
|
|
|
#include <miscfs/specfs/specdev.h>
|
1999-11-15 21:49:07 +03:00
|
|
|
#include <miscfs/genfs/genfs.h>
|
|
|
|
#include <miscfs/syncfs/syncfs.h>
|
1994-06-08 15:28:29 +04:00
|
|
|
|
2000-04-10 06:22:13 +04:00
|
|
|
#include <uvm/uvm.h>
|
2005-11-30 01:52:02 +03:00
|
|
|
#include <uvm/uvm_readahead.h>
|
2000-04-10 06:22:13 +04:00
|
|
|
#include <uvm/uvm_ddb.h>
|
1998-02-05 10:59:28 +03:00
|
|
|
|
2000-06-27 21:41:07 +04:00
|
|
|
#include <sys/sysctl.h>
|
|
|
|
|
2003-02-26 02:01:39 +03:00
|
|
|
const enum vtype iftovt_tab[16] = {
|
1994-06-08 15:28:29 +04:00
|
|
|
VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
|
|
|
|
VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
|
|
|
|
};
|
2001-02-22 00:39:52 +03:00
|
|
|
const int vttoif_tab[9] = {
|
1994-06-08 15:28:29 +04:00
|
|
|
0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
|
|
|
|
S_IFSOCK, S_IFIFO, S_IFMT,
|
|
|
|
};
|
|
|
|
|
1994-06-13 19:37:55 +04:00
|
|
|
int doforce = 1; /* 1 => permit forcible unmounting */
|
|
|
|
int prtactive = 0; /* 1 => print out reclaim of active vnodes */
|
1994-05-17 08:21:49 +04:00
|
|
|
|
2000-02-16 14:57:45 +03:00
|
|
|
extern int dovfsusermount; /* 1 => permit any user to mount filesystems */
|
|
|
|
|
1994-05-17 08:21:49 +04:00
|
|
|
/*
|
|
|
|
* Insq/Remq for the vnode usage lists.
|
|
|
|
*/
|
|
|
|
#define bufinsvn(bp, dp) LIST_INSERT_HEAD(dp, bp, b_vnbufs)
|
|
|
|
#define bufremvn(bp) { \
|
|
|
|
LIST_REMOVE(bp, b_vnbufs); \
|
|
|
|
(bp)->b_vnbufs.le_next = NOLIST; \
|
2004-03-23 16:22:32 +03:00
|
|
|
}
|
1999-11-15 21:49:07 +03:00
|
|
|
/* TAILQ_HEAD(freelst, vnode) vnode_free_list = vnode free list (in vnode.h) */
|
|
|
|
struct freelst vnode_free_list = TAILQ_HEAD_INITIALIZER(vnode_free_list);
|
1999-11-18 08:50:25 +03:00
|
|
|
struct freelst vnode_hold_list = TAILQ_HEAD_INITIALIZER(vnode_hold_list);
|
1999-11-15 21:49:07 +03:00
|
|
|
|
1996-10-02 02:49:11 +04:00
|
|
|
struct mntlist mountlist = /* mounted filesystem list */
|
|
|
|
CIRCLEQ_HEAD_INITIALIZER(mountlist);
|
1998-02-18 10:16:41 +03:00
|
|
|
struct vfs_list_head vfs_list = /* vfs list */
|
2000-03-03 08:21:03 +03:00
|
|
|
LIST_HEAD_INITIALIZER(vfs_list);
|
1998-02-18 10:16:41 +03:00
|
|
|
|
2000-08-19 21:25:33 +04:00
|
|
|
struct simplelock mountlist_slock = SIMPLELOCK_INITIALIZER;
|
|
|
|
static struct simplelock mntid_slock = SIMPLELOCK_INITIALIZER;
|
|
|
|
struct simplelock mntvnode_slock = SIMPLELOCK_INITIALIZER;
|
|
|
|
struct simplelock vnode_free_list_slock = SIMPLELOCK_INITIALIZER;
|
|
|
|
struct simplelock spechash_slock = SIMPLELOCK_INITIALIZER;
|
1998-03-01 05:20:01 +03:00
|
|
|
|
2003-02-06 00:38:38 +03:00
|
|
|
/* XXX - gross; single global lock to protect v_numoutput */
|
|
|
|
struct simplelock global_v_numoutput_slock = SIMPLELOCK_INITIALIZER;
|
|
|
|
|
1998-02-18 10:16:41 +03:00
|
|
|
/*
|
|
|
|
* These define the root filesystem and device.
|
|
|
|
*/
|
|
|
|
struct mount *rootfs;
|
|
|
|
struct vnode *rootvnode;
|
1998-03-01 05:20:01 +03:00
|
|
|
struct device *root_device; /* root device */
|
1998-02-18 10:16:41 +03:00
|
|
|
|
2004-04-25 20:42:40 +04:00
|
|
|
POOL_INIT(vnode_pool, sizeof(struct vnode), 0, 0, 0, "vnodepl",
|
|
|
|
&pool_allocator_nointr);
|
1998-09-01 07:09:14 +04:00
|
|
|
|
2003-02-01 09:23:35 +03:00
|
|
|
MALLOC_DEFINE(M_VNODE, "vnodes", "Dynamically allocated vnodes");
|
|
|
|
|
1998-06-08 19:52:07 +04:00
|
|
|
/*
|
|
|
|
* Local declarations.
|
|
|
|
*/
|
2004-03-23 16:22:32 +03:00
|
|
|
void insmntque(struct vnode *, struct mount *);
|
|
|
|
int getdevvp(dev_t, struct vnode **, enum vtype);
|
1996-02-04 05:17:43 +03:00
|
|
|
|
2005-12-11 15:16:03 +03:00
|
|
|
void vclean(struct vnode *, int, struct lwp *);
|
2002-10-23 10:45:49 +04:00
|
|
|
|
2005-12-11 15:16:03 +03:00
|
|
|
static struct vnode *getcleanvnode(struct lwp *);
|
1996-02-09 21:59:18 +03:00
|
|
|
|
|
|
|
#ifdef DEBUG
|
2004-03-23 16:22:32 +03:00
|
|
|
void printlockedvnodes(void);
|
1996-02-09 21:59:18 +03:00
|
|
|
#endif
|
|
|
|
|
1994-05-17 08:21:49 +04:00
|
|
|
/*
|
1994-06-08 15:28:29 +04:00
|
|
|
* Initialize the vnode management data structures.
|
1994-05-17 08:21:49 +04:00
|
|
|
*/
|
1996-02-04 05:17:43 +03:00
|
|
|
void
|
2005-06-06 03:47:48 +04:00
|
|
|
vntblinit(void)
|
1994-05-17 08:21:49 +04:00
|
|
|
{
|
|
|
|
|
1999-11-15 21:49:07 +03:00
|
|
|
/*
|
|
|
|
* Initialize the filesystem syncer.
|
|
|
|
*/
|
|
|
|
vn_initialize_syncerd();
|
1994-05-17 08:21:49 +04:00
|
|
|
}
|
|
|
|
|
2003-07-30 16:09:46 +04:00
|
|
|
int
|
2005-12-11 15:16:03 +03:00
|
|
|
vfs_drainvnodes(long target, struct lwp *l)
|
2003-07-30 16:09:46 +04:00
|
|
|
{
|
|
|
|
|
|
|
|
simple_lock(&vnode_free_list_slock);
|
|
|
|
while (numvnodes > target) {
|
|
|
|
struct vnode *vp;
|
|
|
|
|
2005-12-11 15:16:03 +03:00
|
|
|
vp = getcleanvnode(l);
|
2003-07-30 16:09:46 +04:00
|
|
|
if (vp == NULL)
|
|
|
|
return EBUSY; /* give up */
|
|
|
|
pool_put(&vnode_pool, vp);
|
|
|
|
simple_lock(&vnode_free_list_slock);
|
|
|
|
numvnodes--;
|
|
|
|
}
|
|
|
|
simple_unlock(&vnode_free_list_slock);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* grab a vnode from freelist and clean it.
|
|
|
|
*/
|
|
|
|
struct vnode *
|
2005-12-11 15:16:03 +03:00
|
|
|
getcleanvnode(struct lwp *l)
|
2003-07-30 16:09:46 +04:00
|
|
|
{
|
|
|
|
struct vnode *vp;
|
2003-10-15 15:28:59 +04:00
|
|
|
struct mount *mp;
|
2003-07-30 16:09:46 +04:00
|
|
|
struct freelst *listhd;
|
|
|
|
|
|
|
|
LOCK_ASSERT(simple_lock_held(&vnode_free_list_slock));
|
2004-06-19 10:20:02 +04:00
|
|
|
|
|
|
|
listhd = &vnode_free_list;
|
|
|
|
try_nextlist:
|
|
|
|
TAILQ_FOREACH(vp, listhd, v_freelist) {
|
2003-10-15 15:28:59 +04:00
|
|
|
if (!simple_lock_try(&vp->v_interlock))
|
|
|
|
continue;
|
2004-06-16 16:32:51 +04:00
|
|
|
/*
|
|
|
|
* as our lwp might hold the underlying vnode locked,
|
|
|
|
* don't try to reclaim the VLAYER vnode if it's locked.
|
|
|
|
*/
|
2004-06-16 16:35:51 +04:00
|
|
|
if ((vp->v_flag & VXLOCK) == 0 &&
|
|
|
|
((vp->v_flag & VLAYER) == 0 || VOP_ISLOCKED(vp) == 0)) {
|
2003-10-15 15:28:59 +04:00
|
|
|
if (vn_start_write(vp, &mp, V_NOWAIT) == 0)
|
2003-07-30 16:09:46 +04:00
|
|
|
break;
|
|
|
|
}
|
2003-10-15 15:28:59 +04:00
|
|
|
mp = NULL;
|
|
|
|
simple_unlock(&vp->v_interlock);
|
2003-07-30 16:09:46 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
if (vp == NULLVP) {
|
2004-06-19 10:20:02 +04:00
|
|
|
if (listhd == &vnode_free_list) {
|
|
|
|
listhd = &vnode_hold_list;
|
|
|
|
goto try_nextlist;
|
|
|
|
}
|
2003-07-30 16:09:46 +04:00
|
|
|
simple_unlock(&vnode_free_list_slock);
|
|
|
|
return NULLVP;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (vp->v_usecount)
|
|
|
|
panic("free vnode isn't, vp %p", vp);
|
|
|
|
TAILQ_REMOVE(listhd, vp, v_freelist);
|
|
|
|
/* see comment on why 0xdeadb is set at end of vgone (below) */
|
|
|
|
vp->v_freelist.tqe_prev = (struct vnode **)0xdeadb;
|
|
|
|
simple_unlock(&vnode_free_list_slock);
|
|
|
|
vp->v_lease = NULL;
|
|
|
|
|
|
|
|
if (vp->v_type != VBAD)
|
2005-12-11 15:16:03 +03:00
|
|
|
vgonel(vp, l);
|
2003-07-30 16:09:46 +04:00
|
|
|
else
|
|
|
|
simple_unlock(&vp->v_interlock);
|
2003-10-15 15:28:59 +04:00
|
|
|
vn_finished_write(mp, 0);
|
2003-07-30 16:09:46 +04:00
|
|
|
#ifdef DIAGNOSTIC
|
|
|
|
if (vp->v_data || vp->v_uobj.uo_npages ||
|
|
|
|
TAILQ_FIRST(&vp->v_uobj.memq))
|
|
|
|
panic("cleaned vnode isn't, vp %p", vp);
|
|
|
|
if (vp->v_numoutput)
|
|
|
|
panic("clean vnode has pending I/O's, vp %p", vp);
|
|
|
|
#endif
|
|
|
|
KASSERT((vp->v_flag & VONWORKLST) == 0);
|
|
|
|
|
|
|
|
return vp;
|
|
|
|
}
|
|
|
|
|
1994-05-17 08:21:49 +04:00
|
|
|
/*
|
1998-03-01 05:20:01 +03:00
|
|
|
* Mark a mount point as busy. Used to synchronize access and to delay
|
|
|
|
* unmounting. Interlock is not released on failure.
|
1994-05-17 08:21:49 +04:00
|
|
|
*/
|
1996-02-04 05:17:43 +03:00
|
|
|
int
|
2005-06-06 03:47:48 +04:00
|
|
|
vfs_busy(struct mount *mp, int flags, struct simplelock *interlkp)
|
1994-05-17 08:21:49 +04:00
|
|
|
{
|
1998-03-01 05:20:01 +03:00
|
|
|
int lkflags;
|
1994-05-17 08:21:49 +04:00
|
|
|
|
2003-10-14 18:02:56 +04:00
|
|
|
while (mp->mnt_iflag & IMNT_UNMOUNT) {
|
2004-05-02 16:21:02 +04:00
|
|
|
int gone, n;
|
2004-03-23 16:22:32 +03:00
|
|
|
|
1998-03-01 05:20:01 +03:00
|
|
|
if (flags & LK_NOWAIT)
|
|
|
|
return (ENOENT);
|
1999-11-15 21:49:07 +03:00
|
|
|
if ((flags & LK_RECURSEFAIL) && mp->mnt_unmounter != NULL
|
2005-12-11 15:16:03 +03:00
|
|
|
&& mp->mnt_unmounter == curlwp)
|
1999-11-15 21:49:07 +03:00
|
|
|
return (EDEADLK);
|
1998-03-01 05:20:01 +03:00
|
|
|
if (interlkp)
|
|
|
|
simple_unlock(interlkp);
|
|
|
|
/*
|
|
|
|
* Since all busy locks are shared except the exclusive
|
|
|
|
* lock granted when unmounting, the only place that a
|
|
|
|
* wakeup needs to be done is at the release of the
|
|
|
|
* exclusive lock at the end of dounmount.
|
|
|
|
*/
|
2004-05-02 16:21:02 +04:00
|
|
|
simple_lock(&mp->mnt_slock);
|
1999-07-04 20:20:12 +04:00
|
|
|
mp->mnt_wcnt++;
|
2004-05-02 16:21:02 +04:00
|
|
|
ltsleep((caddr_t)mp, PVFS, "vfs_busy", 0, &mp->mnt_slock);
|
|
|
|
n = --mp->mnt_wcnt;
|
|
|
|
simple_unlock(&mp->mnt_slock);
|
2003-10-14 18:02:56 +04:00
|
|
|
gone = mp->mnt_iflag & IMNT_GONE;
|
2004-03-23 16:22:32 +03:00
|
|
|
|
2004-05-02 16:21:02 +04:00
|
|
|
if (n == 0)
|
1999-07-04 20:20:12 +04:00
|
|
|
wakeup(&mp->mnt_wcnt);
|
1998-03-01 05:20:01 +03:00
|
|
|
if (interlkp)
|
|
|
|
simple_lock(interlkp);
|
1999-07-04 20:20:12 +04:00
|
|
|
if (gone)
|
|
|
|
return (ENOENT);
|
1994-05-17 08:21:49 +04:00
|
|
|
}
|
1998-03-01 05:20:01 +03:00
|
|
|
lkflags = LK_SHARED;
|
|
|
|
if (interlkp)
|
|
|
|
lkflags |= LK_INTERLOCK;
|
|
|
|
if (lockmgr(&mp->mnt_lock, lkflags, interlkp))
|
|
|
|
panic("vfs_busy: unexpected lock failure");
|
1994-05-17 08:21:49 +04:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
1998-03-01 05:20:01 +03:00
|
|
|
* Free a busy filesystem.
|
1994-05-17 08:21:49 +04:00
|
|
|
*/
|
|
|
|
void
|
2005-06-06 03:47:48 +04:00
|
|
|
vfs_unbusy(struct mount *mp)
|
1994-05-17 08:21:49 +04:00
|
|
|
{
|
|
|
|
|
1998-03-01 05:20:01 +03:00
|
|
|
lockmgr(&mp->mnt_lock, LK_RELEASE, NULL);
|
1994-05-17 08:21:49 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
1998-03-01 05:20:01 +03:00
|
|
|
* Lookup a filesystem type, and if found allocate and initialize
|
|
|
|
* a mount structure for it.
|
|
|
|
*
|
|
|
|
* Devname is usually updated by mount(8) after booting.
|
1994-05-17 08:21:49 +04:00
|
|
|
*/
|
1996-02-04 05:17:43 +03:00
|
|
|
int
|
2005-06-06 03:47:48 +04:00
|
|
|
vfs_rootmountalloc(const char *fstypename, const char *devname,
|
|
|
|
struct mount **mpp)
|
1994-05-17 08:21:49 +04:00
|
|
|
{
|
1998-03-01 05:20:01 +03:00
|
|
|
struct vfsops *vfsp = NULL;
|
|
|
|
struct mount *mp;
|
1994-05-17 08:21:49 +04:00
|
|
|
|
2001-06-26 23:14:25 +04:00
|
|
|
LIST_FOREACH(vfsp, &vfs_list, vfs_list)
|
1998-03-01 05:20:01 +03:00
|
|
|
if (!strncmp(vfsp->vfs_name, fstypename, MFSNAMELEN))
|
|
|
|
break;
|
1994-05-17 08:21:49 +04:00
|
|
|
|
1998-03-01 05:20:01 +03:00
|
|
|
if (vfsp == NULL)
|
|
|
|
return (ENODEV);
|
|
|
|
mp = malloc((u_long)sizeof(struct mount), M_MOUNT, M_WAITOK);
|
Abolition of bcopy, ovbcopy, bcmp, and bzero, phase one.
bcopy(x, y, z) -> memcpy(y, x, z)
ovbcopy(x, y, z) -> memmove(y, x, z)
bcmp(x, y, z) -> memcmp(x, y, z)
bzero(x, y) -> memset(x, 0, y)
1998-08-04 08:03:10 +04:00
|
|
|
memset((char *)mp, 0, (u_long)sizeof(struct mount));
|
1998-03-01 05:20:01 +03:00
|
|
|
lockinit(&mp->mnt_lock, PVFS, "vfslock", 0, 0);
|
2004-05-02 16:21:02 +04:00
|
|
|
simple_lock_init(&mp->mnt_slock);
|
1998-03-01 05:20:01 +03:00
|
|
|
(void)vfs_busy(mp, LK_NOWAIT, 0);
|
|
|
|
LIST_INIT(&mp->mnt_vnodelist);
|
|
|
|
mp->mnt_op = vfsp;
|
|
|
|
mp->mnt_flag = MNT_RDONLY;
|
|
|
|
mp->mnt_vnodecovered = NULLVP;
|
2004-07-01 14:03:29 +04:00
|
|
|
mp->mnt_leaf = mp;
|
1998-03-01 05:20:01 +03:00
|
|
|
vfsp->vfs_refcount++;
|
|
|
|
strncpy(mp->mnt_stat.f_fstypename, vfsp->vfs_name, MFSNAMELEN);
|
|
|
|
mp->mnt_stat.f_mntonname[0] = '/';
|
|
|
|
(void) copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 0);
|
|
|
|
*mpp = mp;
|
|
|
|
return (0);
|
1994-05-17 08:21:49 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Lookup a mount point by filesystem identifier.
|
|
|
|
*/
|
|
|
|
struct mount *
|
2005-06-06 03:47:48 +04:00
|
|
|
vfs_getvfs(fsid_t *fsid)
|
1994-05-17 08:21:49 +04:00
|
|
|
{
|
2000-03-30 13:27:11 +04:00
|
|
|
struct mount *mp;
|
1994-05-17 08:21:49 +04:00
|
|
|
|
1998-03-01 05:20:01 +03:00
|
|
|
simple_lock(&mountlist_slock);
|
2002-09-04 05:32:31 +04:00
|
|
|
CIRCLEQ_FOREACH(mp, &mountlist, mnt_list) {
|
2004-04-21 05:05:31 +04:00
|
|
|
if (mp->mnt_stat.f_fsidx.__fsid_val[0] == fsid->__fsid_val[0] &&
|
|
|
|
mp->mnt_stat.f_fsidx.__fsid_val[1] == fsid->__fsid_val[1]) {
|
1998-03-01 05:20:01 +03:00
|
|
|
simple_unlock(&mountlist_slock);
|
1994-05-17 08:21:49 +04:00
|
|
|
return (mp);
|
1998-03-01 05:20:01 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
simple_unlock(&mountlist_slock);
|
1994-05-17 08:21:49 +04:00
|
|
|
return ((struct mount *)0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Get a new unique fsid
|
|
|
|
*/
|
|
|
|
void
|
2005-06-06 03:47:48 +04:00
|
|
|
vfs_getnewfsid(struct mount *mp)
|
1994-05-17 08:21:49 +04:00
|
|
|
{
|
|
|
|
static u_short xxxfs_mntid;
|
|
|
|
fsid_t tfsid;
|
1998-03-01 05:20:01 +03:00
|
|
|
int mtype;
|
1994-05-17 08:21:49 +04:00
|
|
|
|
1998-03-01 05:20:01 +03:00
|
|
|
simple_lock(&mntid_slock);
|
2000-06-10 22:27:01 +04:00
|
|
|
mtype = makefstype(mp->mnt_op->vfs_name);
|
2004-04-21 05:05:31 +04:00
|
|
|
mp->mnt_stat.f_fsidx.__fsid_val[0] = makedev(mtype, 0);
|
|
|
|
mp->mnt_stat.f_fsidx.__fsid_val[1] = mtype;
|
|
|
|
mp->mnt_stat.f_fsid = mp->mnt_stat.f_fsidx.__fsid_val[0];
|
1994-05-17 08:21:49 +04:00
|
|
|
if (xxxfs_mntid == 0)
|
|
|
|
++xxxfs_mntid;
|
2004-04-21 05:05:31 +04:00
|
|
|
tfsid.__fsid_val[0] = makedev(mtype & 0xff, xxxfs_mntid);
|
|
|
|
tfsid.__fsid_val[1] = mtype;
|
2002-09-04 05:32:31 +04:00
|
|
|
if (!CIRCLEQ_EMPTY(&mountlist)) {
|
1998-03-01 05:20:01 +03:00
|
|
|
while (vfs_getvfs(&tfsid)) {
|
2004-04-21 05:05:31 +04:00
|
|
|
tfsid.__fsid_val[0]++;
|
1994-05-17 08:21:49 +04:00
|
|
|
xxxfs_mntid++;
|
|
|
|
}
|
|
|
|
}
|
2004-04-21 05:05:31 +04:00
|
|
|
mp->mnt_stat.f_fsidx.__fsid_val[0] = tfsid.__fsid_val[0];
|
|
|
|
mp->mnt_stat.f_fsid = mp->mnt_stat.f_fsidx.__fsid_val[0];
|
1998-03-01 05:20:01 +03:00
|
|
|
simple_unlock(&mntid_slock);
|
1994-05-17 08:21:49 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
1994-06-08 15:28:29 +04:00
|
|
|
* Make a 'unique' number from a mount type name.
|
1994-05-17 08:21:49 +04:00
|
|
|
*/
|
|
|
|
long
|
2005-06-06 03:47:48 +04:00
|
|
|
makefstype(const char *type)
|
1994-05-17 08:21:49 +04:00
|
|
|
{
|
|
|
|
long rv;
|
|
|
|
|
|
|
|
for (rv = 0; *type; type++) {
|
|
|
|
rv <<= 2;
|
|
|
|
rv ^= *type;
|
|
|
|
}
|
|
|
|
return rv;
|
|
|
|
}
|
1994-06-08 15:28:29 +04:00
|
|
|
|
1998-03-01 05:20:01 +03:00
|
|
|
|
1994-06-08 15:28:29 +04:00
|
|
|
/*
|
|
|
|
* Set vnode attributes to VNOVAL
|
|
|
|
*/
|
|
|
|
void
|
2005-06-06 03:47:48 +04:00
|
|
|
vattr_null(struct vattr *vap)
|
1994-06-08 15:28:29 +04:00
|
|
|
{
|
|
|
|
|
|
|
|
vap->va_type = VNON;
|
1997-10-18 15:51:32 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Assign individually so that it is safe even if size and
|
|
|
|
* sign of each member are varied.
|
|
|
|
*/
|
|
|
|
vap->va_mode = VNOVAL;
|
|
|
|
vap->va_nlink = VNOVAL;
|
|
|
|
vap->va_uid = VNOVAL;
|
|
|
|
vap->va_gid = VNOVAL;
|
|
|
|
vap->va_fsid = VNOVAL;
|
|
|
|
vap->va_fileid = VNOVAL;
|
1994-06-08 15:28:29 +04:00
|
|
|
vap->va_size = VNOVAL;
|
1997-10-18 15:51:32 +04:00
|
|
|
vap->va_blocksize = VNOVAL;
|
1997-10-18 20:34:17 +04:00
|
|
|
vap->va_atime.tv_sec =
|
|
|
|
vap->va_mtime.tv_sec =
|
2003-04-03 13:13:10 +04:00
|
|
|
vap->va_ctime.tv_sec =
|
|
|
|
vap->va_birthtime.tv_sec = VNOVAL;
|
1997-10-18 20:34:17 +04:00
|
|
|
vap->va_atime.tv_nsec =
|
|
|
|
vap->va_mtime.tv_nsec =
|
2003-04-03 13:13:10 +04:00
|
|
|
vap->va_ctime.tv_nsec =
|
|
|
|
vap->va_birthtime.tv_nsec = VNOVAL;
|
1997-10-18 15:51:32 +04:00
|
|
|
vap->va_gen = VNOVAL;
|
|
|
|
vap->va_flags = VNOVAL;
|
|
|
|
vap->va_rdev = VNOVAL;
|
1994-06-08 15:28:29 +04:00
|
|
|
vap->va_bytes = VNOVAL;
|
|
|
|
vap->va_vaflags = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Routines having to do with the management of the vnode table.
|
|
|
|
*/
|
2004-03-23 16:22:32 +03:00
|
|
|
extern int (**dead_vnodeop_p)(void *);
|
1994-06-08 15:28:29 +04:00
|
|
|
long numvnodes;
|
|
|
|
|
1994-05-17 08:21:49 +04:00
|
|
|
/*
|
|
|
|
* Return the next vnode from the free list.
|
|
|
|
*/
|
1996-02-04 05:17:43 +03:00
|
|
|
int
|
2005-06-06 03:47:48 +04:00
|
|
|
getnewvnode(enum vtagtype tag, struct mount *mp, int (**vops)(void *),
|
|
|
|
struct vnode **vpp)
|
1994-05-17 08:21:49 +04:00
|
|
|
{
|
2000-11-27 11:39:39 +03:00
|
|
|
extern struct uvm_pagerops uvm_vnodeops;
|
|
|
|
struct uvm_object *uobj;
|
2005-12-11 15:16:03 +03:00
|
|
|
struct lwp *l = curlwp; /* XXX */
|
1999-11-15 21:49:07 +03:00
|
|
|
static int toggle;
|
1998-03-01 05:20:01 +03:00
|
|
|
struct vnode *vp;
|
2001-06-27 02:52:03 +04:00
|
|
|
int error = 0, tryalloc;
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
|
2001-09-26 04:59:57 +04:00
|
|
|
try_again:
|
1999-07-04 20:20:12 +04:00
|
|
|
if (mp) {
|
|
|
|
/*
|
1999-07-29 17:31:45 +04:00
|
|
|
* Mark filesystem busy while we're creating a vnode.
|
|
|
|
* If unmount is in progress, this will wait; if the
|
|
|
|
* unmount succeeds (only if umount -f), this will
|
|
|
|
* return an error. If the unmount fails, we'll keep
|
|
|
|
* going afterwards.
|
|
|
|
* (This puts the per-mount vnode list logically under
|
|
|
|
* the protection of the vfs_busy lock).
|
1999-07-04 20:20:12 +04:00
|
|
|
*/
|
1999-11-15 21:49:07 +03:00
|
|
|
error = vfs_busy(mp, LK_RECURSEFAIL, 0);
|
|
|
|
if (error && error != EDEADLK)
|
1999-07-04 20:20:12 +04:00
|
|
|
return error;
|
|
|
|
}
|
1994-05-17 08:21:49 +04:00
|
|
|
|
1999-11-15 21:49:07 +03:00
|
|
|
/*
|
|
|
|
* We must choose whether to allocate a new vnode or recycle an
|
|
|
|
* existing one. The criterion for allocating a new one is that
|
|
|
|
* the total number of vnodes is less than the number desired or
|
|
|
|
* there are no vnodes on either free list. Generally we only
|
|
|
|
* want to recycle vnodes that have no buffers associated with
|
|
|
|
* them, so we look first on the vnode_free_list. If it is empty,
|
|
|
|
* we next consider vnodes with referencing buffers on the
|
|
|
|
* vnode_hold_list. The toggle ensures that half the time we
|
|
|
|
* will use a buffer from the vnode_hold_list, and half the time
|
|
|
|
* we will allocate a new one unless the list has grown to twice
|
|
|
|
* the desired size. We are reticent to recycle vnodes from the
|
|
|
|
* vnode_hold_list because we will lose the identity of all its
|
|
|
|
* referencing buffers.
|
|
|
|
*/
|
2000-11-27 11:39:39 +03:00
|
|
|
|
2001-06-27 02:52:03 +04:00
|
|
|
vp = NULL;
|
|
|
|
|
|
|
|
simple_lock(&vnode_free_list_slock);
|
|
|
|
|
1999-11-15 21:49:07 +03:00
|
|
|
toggle ^= 1;
|
|
|
|
if (numvnodes > 2 * desiredvnodes)
|
|
|
|
toggle = 0;
|
|
|
|
|
2001-06-27 02:52:03 +04:00
|
|
|
tryalloc = numvnodes < desiredvnodes ||
|
2001-09-26 04:59:57 +04:00
|
|
|
(TAILQ_FIRST(&vnode_free_list) == NULL &&
|
|
|
|
(TAILQ_FIRST(&vnode_hold_list) == NULL || toggle));
|
2001-06-27 02:52:03 +04:00
|
|
|
|
|
|
|
if (tryalloc &&
|
|
|
|
(vp = pool_get(&vnode_pool, PR_NOWAIT)) != NULL) {
|
2003-09-14 15:09:48 +04:00
|
|
|
numvnodes++;
|
1998-03-01 05:20:01 +03:00
|
|
|
simple_unlock(&vnode_free_list_slock);
|
2000-11-27 11:39:39 +03:00
|
|
|
memset(vp, 0, sizeof(*vp));
|
2005-06-06 16:09:19 +04:00
|
|
|
UVM_OBJ_INIT(&vp->v_uobj, &uvm_vnodeops, 1);
|
2004-05-07 02:01:14 +04:00
|
|
|
/*
|
|
|
|
* done by memset() above.
|
|
|
|
* LIST_INIT(&vp->v_nclist);
|
|
|
|
* LIST_INIT(&vp->v_dnclist);
|
|
|
|
*/
|
1994-05-17 08:21:49 +04:00
|
|
|
} else {
|
2005-12-11 15:16:03 +03:00
|
|
|
vp = getcleanvnode(l);
|
1998-03-01 05:20:01 +03:00
|
|
|
/*
|
|
|
|
* Unless this is a bad time of the month, at most
|
|
|
|
* the first NCPUS items on the free list are
|
|
|
|
* locked, so this is close enough to being empty.
|
|
|
|
*/
|
|
|
|
if (vp == NULLVP) {
|
1999-11-15 21:49:07 +03:00
|
|
|
if (mp && error != EDEADLK)
|
|
|
|
vfs_unbusy(mp);
|
2001-06-27 02:52:03 +04:00
|
|
|
if (tryalloc) {
|
|
|
|
printf("WARNING: unable to allocate new "
|
|
|
|
"vnode, retrying...\n");
|
|
|
|
(void) tsleep(&lbolt, PRIBIO, "newvn", hz);
|
|
|
|
goto try_again;
|
|
|
|
}
|
2000-07-04 19:33:28 +04:00
|
|
|
tablefull("vnode", "increase kern.maxvnodes or NVNODE");
|
1994-05-17 08:21:49 +04:00
|
|
|
*vpp = 0;
|
|
|
|
return (ENFILE);
|
|
|
|
}
|
2005-06-06 16:09:19 +04:00
|
|
|
vp->v_usecount = 1;
|
1994-05-17 08:21:49 +04:00
|
|
|
vp->v_flag = 0;
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
vp->v_socket = NULL;
|
1994-05-17 08:21:49 +04:00
|
|
|
}
|
|
|
|
vp->v_type = VNON;
|
1999-07-08 05:05:58 +04:00
|
|
|
vp->v_vnlock = &vp->v_lock;
|
|
|
|
lockinit(vp->v_vnlock, PVFS, "vnlock", 0, 0);
|
2004-05-07 02:01:14 +04:00
|
|
|
KASSERT(LIST_EMPTY(&vp->v_nclist));
|
|
|
|
KASSERT(LIST_EMPTY(&vp->v_dnclist));
|
1994-05-17 08:21:49 +04:00
|
|
|
vp->v_tag = tag;
|
|
|
|
vp->v_op = vops;
|
|
|
|
insmntque(vp, mp);
|
|
|
|
*vpp = vp;
|
1994-06-08 15:28:29 +04:00
|
|
|
vp->v_data = 0;
|
2005-01-13 00:51:52 +03:00
|
|
|
simple_lock_init(&vp->v_interlock);
|
2000-11-27 11:39:39 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* initialize uvm_object within vnode.
|
|
|
|
*/
|
|
|
|
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
uobj = &vp->v_uobj;
|
|
|
|
KASSERT(uobj->pgops == &uvm_vnodeops);
|
|
|
|
KASSERT(uobj->uo_npages == 0);
|
|
|
|
KASSERT(TAILQ_FIRST(&uobj->memq) == NULL);
|
|
|
|
vp->v_size = VSIZENOTSET;
|
2000-11-27 11:39:39 +03:00
|
|
|
|
1999-11-15 21:49:07 +03:00
|
|
|
if (mp && error != EDEADLK)
|
|
|
|
vfs_unbusy(mp);
|
1994-05-17 08:21:49 +04:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2000-06-28 03:34:45 +04:00
|
|
|
/*
|
|
|
|
* This is really just the reverse of getnewvnode(). Needed for
|
|
|
|
* VFS_VGET functions who may need to push back a vnode in case
|
|
|
|
* of a locking race.
|
|
|
|
*/
|
|
|
|
void
|
2005-06-06 03:47:48 +04:00
|
|
|
ungetnewvnode(struct vnode *vp)
|
2000-06-28 03:34:45 +04:00
|
|
|
{
|
|
|
|
#ifdef DIAGNOSTIC
|
|
|
|
if (vp->v_usecount != 1)
|
2000-06-28 03:51:51 +04:00
|
|
|
panic("ungetnewvnode: busy vnode");
|
2000-06-28 03:34:45 +04:00
|
|
|
#endif
|
|
|
|
vp->v_usecount--;
|
|
|
|
insmntque(vp, NULL);
|
|
|
|
vp->v_type = VBAD;
|
|
|
|
|
|
|
|
simple_lock(&vp->v_interlock);
|
2004-03-23 16:22:32 +03:00
|
|
|
/*
|
2000-06-28 03:34:45 +04:00
|
|
|
* Insert at head of LRU list
|
|
|
|
*/
|
|
|
|
simple_lock(&vnode_free_list_slock);
|
|
|
|
if (vp->v_holdcnt > 0)
|
|
|
|
TAILQ_INSERT_HEAD(&vnode_hold_list, vp, v_freelist);
|
|
|
|
else
|
|
|
|
TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
|
2004-03-23 16:22:32 +03:00
|
|
|
simple_unlock(&vnode_free_list_slock);
|
2000-06-28 03:34:45 +04:00
|
|
|
simple_unlock(&vp->v_interlock);
|
|
|
|
}
|
|
|
|
|
1994-05-17 08:21:49 +04:00
|
|
|
/*
|
|
|
|
* Move a vnode from one mount queue to another.
|
|
|
|
*/
|
1996-02-04 05:17:43 +03:00
|
|
|
void
|
2005-06-06 03:47:48 +04:00
|
|
|
insmntque(struct vnode *vp, struct mount *mp)
|
1994-05-17 08:21:49 +04:00
|
|
|
{
|
|
|
|
|
1999-07-04 20:20:12 +04:00
|
|
|
#ifdef DIAGNOSTIC
|
|
|
|
if ((mp != NULL) &&
|
2003-10-14 18:02:56 +04:00
|
|
|
(mp->mnt_iflag & IMNT_UNMOUNT) &&
|
1999-11-15 21:49:07 +03:00
|
|
|
!(mp->mnt_flag & MNT_SOFTDEP) &&
|
|
|
|
vp->v_tag != VT_VFS) {
|
1999-07-04 20:20:12 +04:00
|
|
|
panic("insmntque into dying filesystem");
|
|
|
|
}
|
|
|
|
#endif
|
2004-03-23 16:22:32 +03:00
|
|
|
|
1998-03-01 05:20:01 +03:00
|
|
|
simple_lock(&mntvnode_slock);
|
1994-05-17 08:21:49 +04:00
|
|
|
/*
|
|
|
|
* Delete from old mount point vnode list, if on one.
|
|
|
|
*/
|
|
|
|
if (vp->v_mount != NULL)
|
|
|
|
LIST_REMOVE(vp, v_mntvnodes);
|
|
|
|
/*
|
|
|
|
* Insert into list of vnodes for the new mount point, if available.
|
|
|
|
*/
|
1998-03-01 05:20:01 +03:00
|
|
|
if ((vp->v_mount = mp) != NULL)
|
|
|
|
LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes);
|
|
|
|
simple_unlock(&mntvnode_slock);
|
1994-05-17 08:21:49 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Update outstanding I/O count and do wakeup if requested.
|
|
|
|
*/
|
1996-02-04 05:17:43 +03:00
|
|
|
void
|
2005-06-06 03:47:48 +04:00
|
|
|
vwakeup(struct buf *bp)
|
1994-05-17 08:21:49 +04:00
|
|
|
{
|
2000-03-30 13:27:11 +04:00
|
|
|
struct vnode *vp;
|
1994-05-17 08:21:49 +04:00
|
|
|
|
1996-02-04 05:17:43 +03:00
|
|
|
if ((vp = bp->b_vp) != NULL) {
|
2003-02-06 00:38:38 +03:00
|
|
|
/* XXX global lock hack
|
|
|
|
* can't use v_interlock here since this is called
|
|
|
|
* in interrupt context from biodone().
|
|
|
|
*/
|
|
|
|
simple_lock(&global_v_numoutput_slock);
|
1994-06-08 15:28:29 +04:00
|
|
|
if (--vp->v_numoutput < 0)
|
2000-04-10 06:22:13 +04:00
|
|
|
panic("vwakeup: neg numoutput, vp %p", vp);
|
1994-05-17 08:21:49 +04:00
|
|
|
if ((vp->v_flag & VBWAIT) && vp->v_numoutput <= 0) {
|
|
|
|
vp->v_flag &= ~VBWAIT;
|
|
|
|
wakeup((caddr_t)&vp->v_numoutput);
|
|
|
|
}
|
2003-02-06 00:38:38 +03:00
|
|
|
simple_unlock(&global_v_numoutput_slock);
|
1994-05-17 08:21:49 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Flush out and invalidate all buffers associated with a vnode.
|
2000-05-28 08:13:56 +04:00
|
|
|
* Called with the underlying vnode locked, which should prevent new dirty
|
|
|
|
* buffers from being queued.
|
1994-05-17 08:21:49 +04:00
|
|
|
*/
|
1994-06-08 15:28:29 +04:00
|
|
|
int
|
2005-12-11 15:16:03 +03:00
|
|
|
vinvalbuf(struct vnode *vp, int flags, struct ucred *cred, struct lwp *l,
|
2005-06-06 03:47:48 +04:00
|
|
|
int slpflag, int slptimeo)
|
1994-05-17 08:21:49 +04:00
|
|
|
{
|
2000-05-28 08:13:56 +04:00
|
|
|
struct buf *bp, *nbp;
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
int s, error;
|
2001-12-06 07:34:33 +03:00
|
|
|
int flushflags = PGO_ALLPAGES | PGO_FREE | PGO_SYNCIO |
|
2000-11-27 11:39:39 +03:00
|
|
|
(flags & V_SAVE ? PGO_CLEANIT : 0);
|
|
|
|
|
|
|
|
/* XXXUBC this doesn't look at flags or slp* */
|
2001-12-06 07:34:33 +03:00
|
|
|
simple_lock(&vp->v_interlock);
|
|
|
|
error = VOP_PUTPAGES(vp, 0, 0, flushflags);
|
|
|
|
if (error) {
|
|
|
|
return error;
|
2000-11-27 11:39:39 +03:00
|
|
|
}
|
2001-12-06 07:34:33 +03:00
|
|
|
|
1994-06-08 15:28:29 +04:00
|
|
|
if (flags & V_SAVE) {
|
2005-12-11 15:16:03 +03:00
|
|
|
error = VOP_FSYNC(vp, cred, FSYNC_WAIT|FSYNC_RECLAIM, 0, 0, l);
|
2000-05-28 08:13:56 +04:00
|
|
|
if (error)
|
2000-03-17 04:25:06 +03:00
|
|
|
return (error);
|
2000-05-28 08:13:56 +04:00
|
|
|
#ifdef DIAGNOSTIC
|
2000-03-17 04:25:06 +03:00
|
|
|
s = splbio();
|
2000-05-28 08:13:56 +04:00
|
|
|
if (vp->v_numoutput > 0 || !LIST_EMPTY(&vp->v_dirtyblkhd))
|
2000-04-10 06:22:13 +04:00
|
|
|
panic("vinvalbuf: dirty bufs, vp %p", vp);
|
1999-11-15 21:49:07 +03:00
|
|
|
splx(s);
|
2000-05-28 08:13:56 +04:00
|
|
|
#endif
|
1994-06-08 15:28:29 +04:00
|
|
|
}
|
1999-11-15 21:49:07 +03:00
|
|
|
|
1999-11-24 02:52:40 +03:00
|
|
|
s = splbio();
|
|
|
|
|
2000-05-28 08:13:56 +04:00
|
|
|
restart:
|
|
|
|
for (bp = LIST_FIRST(&vp->v_cleanblkhd); bp; bp = nbp) {
|
|
|
|
nbp = LIST_NEXT(bp, b_vnbufs);
|
2003-02-06 00:38:38 +03:00
|
|
|
simple_lock(&bp->b_interlock);
|
2000-05-28 08:13:56 +04:00
|
|
|
if (bp->b_flags & B_BUSY) {
|
|
|
|
bp->b_flags |= B_WANTED;
|
2003-02-06 00:38:38 +03:00
|
|
|
error = ltsleep((caddr_t)bp,
|
|
|
|
slpflag | (PRIBIO + 1) | PNORELOCK,
|
|
|
|
"vinvalbuf", slptimeo, &bp->b_interlock);
|
2000-05-28 08:13:56 +04:00
|
|
|
if (error) {
|
|
|
|
splx(s);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
goto restart;
|
1999-11-15 21:49:07 +03:00
|
|
|
}
|
2000-05-28 08:13:56 +04:00
|
|
|
bp->b_flags |= B_BUSY | B_INVAL | B_VFLUSH;
|
2003-02-06 00:38:38 +03:00
|
|
|
simple_unlock(&bp->b_interlock);
|
2000-05-28 08:13:56 +04:00
|
|
|
brelse(bp);
|
|
|
|
}
|
1994-06-08 15:28:29 +04:00
|
|
|
|
2000-05-28 08:13:56 +04:00
|
|
|
for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
|
|
|
|
nbp = LIST_NEXT(bp, b_vnbufs);
|
2003-02-06 00:38:38 +03:00
|
|
|
simple_lock(&bp->b_interlock);
|
2000-05-28 08:13:56 +04:00
|
|
|
if (bp->b_flags & B_BUSY) {
|
|
|
|
bp->b_flags |= B_WANTED;
|
2003-02-06 00:38:38 +03:00
|
|
|
error = ltsleep((caddr_t)bp,
|
|
|
|
slpflag | (PRIBIO + 1) | PNORELOCK,
|
|
|
|
"vinvalbuf", slptimeo, &bp->b_interlock);
|
2000-05-28 08:13:56 +04:00
|
|
|
if (error) {
|
|
|
|
splx(s);
|
|
|
|
return (error);
|
1994-05-17 08:21:49 +04:00
|
|
|
}
|
2000-05-28 08:13:56 +04:00
|
|
|
goto restart;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* XXX Since there are no node locks for NFS, I believe
|
|
|
|
* there is a slight chance that a delayed write will
|
|
|
|
* occur while sleeping just above, so check for it.
|
|
|
|
*/
|
|
|
|
if ((bp->b_flags & B_DELWRI) && (flags & V_SAVE)) {
|
1999-11-15 21:49:07 +03:00
|
|
|
#ifdef DEBUG
|
2000-05-28 08:13:56 +04:00
|
|
|
printf("buffer still DELWRI\n");
|
1999-11-15 21:49:07 +03:00
|
|
|
#endif
|
2000-05-28 08:13:56 +04:00
|
|
|
bp->b_flags |= B_BUSY | B_VFLUSH;
|
2003-02-06 00:38:38 +03:00
|
|
|
simple_unlock(&bp->b_interlock);
|
2000-05-28 08:13:56 +04:00
|
|
|
VOP_BWRITE(bp);
|
|
|
|
goto restart;
|
1994-05-17 08:21:49 +04:00
|
|
|
}
|
2000-05-28 08:13:56 +04:00
|
|
|
bp->b_flags |= B_BUSY | B_INVAL | B_VFLUSH;
|
2003-02-06 00:38:38 +03:00
|
|
|
simple_unlock(&bp->b_interlock);
|
2000-05-28 08:13:56 +04:00
|
|
|
brelse(bp);
|
1994-05-17 08:21:49 +04:00
|
|
|
}
|
1999-11-24 02:52:40 +03:00
|
|
|
|
2000-05-28 08:13:56 +04:00
|
|
|
#ifdef DIAGNOSTIC
|
|
|
|
if (!LIST_EMPTY(&vp->v_cleanblkhd) || !LIST_EMPTY(&vp->v_dirtyblkhd))
|
2000-04-10 06:22:13 +04:00
|
|
|
panic("vinvalbuf: flush failed, vp %p", vp);
|
2000-05-28 08:13:56 +04:00
|
|
|
#endif
|
|
|
|
|
|
|
|
splx(s);
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Destroy any in core blocks past the truncation length.
|
|
|
|
* Called with the underlying vnode locked, which should prevent new dirty
|
|
|
|
* buffers from being queued.
|
|
|
|
*/
|
|
|
|
int
|
2005-06-06 03:47:48 +04:00
|
|
|
vtruncbuf(struct vnode *vp, daddr_t lbn, int slpflag, int slptimeo)
|
2000-05-28 08:13:56 +04:00
|
|
|
{
|
|
|
|
struct buf *bp, *nbp;
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
int s, error;
|
2001-12-06 07:34:33 +03:00
|
|
|
voff_t off;
|
2000-05-28 08:13:56 +04:00
|
|
|
|
2001-12-06 07:34:33 +03:00
|
|
|
off = round_page((voff_t)lbn << vp->v_mount->mnt_fs_bshift);
|
|
|
|
simple_lock(&vp->v_interlock);
|
|
|
|
error = VOP_PUTPAGES(vp, off, 0, PGO_FREE | PGO_SYNCIO);
|
|
|
|
if (error) {
|
|
|
|
return error;
|
2000-11-27 11:39:39 +03:00
|
|
|
}
|
2000-05-28 08:13:56 +04:00
|
|
|
|
2001-12-06 07:34:33 +03:00
|
|
|
s = splbio();
|
|
|
|
|
2000-05-28 08:13:56 +04:00
|
|
|
restart:
|
|
|
|
for (bp = LIST_FIRST(&vp->v_cleanblkhd); bp; bp = nbp) {
|
|
|
|
nbp = LIST_NEXT(bp, b_vnbufs);
|
|
|
|
if (bp->b_lblkno < lbn)
|
|
|
|
continue;
|
2003-02-06 00:38:38 +03:00
|
|
|
simple_lock(&bp->b_interlock);
|
2000-05-28 08:13:56 +04:00
|
|
|
if (bp->b_flags & B_BUSY) {
|
|
|
|
bp->b_flags |= B_WANTED;
|
2003-02-06 00:38:38 +03:00
|
|
|
error = ltsleep(bp, slpflag | (PRIBIO + 1) | PNORELOCK,
|
|
|
|
"vtruncbuf", slptimeo, &bp->b_interlock);
|
2000-05-28 08:13:56 +04:00
|
|
|
if (error) {
|
|
|
|
splx(s);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
goto restart;
|
|
|
|
}
|
|
|
|
bp->b_flags |= B_BUSY | B_INVAL | B_VFLUSH;
|
2003-02-06 00:38:38 +03:00
|
|
|
simple_unlock(&bp->b_interlock);
|
2000-05-28 08:13:56 +04:00
|
|
|
brelse(bp);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
|
|
|
|
nbp = LIST_NEXT(bp, b_vnbufs);
|
|
|
|
if (bp->b_lblkno < lbn)
|
|
|
|
continue;
|
2003-02-06 00:38:38 +03:00
|
|
|
simple_lock(&bp->b_interlock);
|
2000-05-28 08:13:56 +04:00
|
|
|
if (bp->b_flags & B_BUSY) {
|
|
|
|
bp->b_flags |= B_WANTED;
|
2003-02-06 00:38:38 +03:00
|
|
|
error = ltsleep(bp, slpflag | (PRIBIO + 1) | PNORELOCK,
|
|
|
|
"vtruncbuf", slptimeo, &bp->b_interlock);
|
2000-05-28 08:13:56 +04:00
|
|
|
if (error) {
|
|
|
|
splx(s);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
goto restart;
|
|
|
|
}
|
|
|
|
bp->b_flags |= B_BUSY | B_INVAL | B_VFLUSH;
|
2003-02-06 00:38:38 +03:00
|
|
|
simple_unlock(&bp->b_interlock);
|
2000-05-28 08:13:56 +04:00
|
|
|
brelse(bp);
|
|
|
|
}
|
1999-11-24 02:52:40 +03:00
|
|
|
|
|
|
|
splx(s);
|
|
|
|
|
1994-06-08 15:28:29 +04:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2005-06-06 03:47:48 +04:00
|
|
|
vflushbuf(struct vnode *vp, int sync)
|
1994-06-08 15:28:29 +04:00
|
|
|
{
|
2000-03-30 13:27:11 +04:00
|
|
|
struct buf *bp, *nbp;
|
2001-12-06 07:34:33 +03:00
|
|
|
int flags = PGO_CLEANIT | PGO_ALLPAGES | (sync ? PGO_SYNCIO : 0);
|
1994-06-08 15:28:29 +04:00
|
|
|
int s;
|
|
|
|
|
2001-12-06 07:34:33 +03:00
|
|
|
simple_lock(&vp->v_interlock);
|
|
|
|
(void) VOP_PUTPAGES(vp, 0, 0, flags);
|
2000-11-27 11:39:39 +03:00
|
|
|
|
1994-06-08 15:28:29 +04:00
|
|
|
loop:
|
|
|
|
s = splbio();
|
2000-05-28 08:13:56 +04:00
|
|
|
for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
|
|
|
|
nbp = LIST_NEXT(bp, b_vnbufs);
|
2003-02-06 00:38:38 +03:00
|
|
|
simple_lock(&bp->b_interlock);
|
|
|
|
if ((bp->b_flags & B_BUSY)) {
|
|
|
|
simple_unlock(&bp->b_interlock);
|
1994-06-08 15:28:29 +04:00
|
|
|
continue;
|
2003-02-06 00:38:38 +03:00
|
|
|
}
|
1994-06-08 15:28:29 +04:00
|
|
|
if ((bp->b_flags & B_DELWRI) == 0)
|
2000-04-10 06:22:13 +04:00
|
|
|
panic("vflushbuf: not dirty, bp %p", bp);
|
1997-04-10 01:12:10 +04:00
|
|
|
bp->b_flags |= B_BUSY | B_VFLUSH;
|
2003-02-06 00:38:38 +03:00
|
|
|
simple_unlock(&bp->b_interlock);
|
1994-06-08 15:28:29 +04:00
|
|
|
splx(s);
|
|
|
|
/*
|
|
|
|
* Wait for I/O associated with indirect blocks to complete,
|
|
|
|
* since there is no way to quickly wait for them below.
|
|
|
|
*/
|
|
|
|
if (bp->b_vp == vp || sync == 0)
|
|
|
|
(void) bawrite(bp);
|
|
|
|
else
|
|
|
|
(void) bwrite(bp);
|
|
|
|
goto loop;
|
|
|
|
}
|
|
|
|
if (sync == 0) {
|
|
|
|
splx(s);
|
|
|
|
return;
|
|
|
|
}
|
2003-02-06 00:38:38 +03:00
|
|
|
simple_lock(&global_v_numoutput_slock);
|
1994-06-08 15:28:29 +04:00
|
|
|
while (vp->v_numoutput) {
|
|
|
|
vp->v_flag |= VBWAIT;
|
2003-02-06 00:38:38 +03:00
|
|
|
ltsleep((caddr_t)&vp->v_numoutput, PRIBIO + 1, "vflushbuf", 0,
|
|
|
|
&global_v_numoutput_slock);
|
1994-06-08 15:28:29 +04:00
|
|
|
}
|
2003-02-06 00:38:38 +03:00
|
|
|
simple_unlock(&global_v_numoutput_slock);
|
1994-06-08 15:28:29 +04:00
|
|
|
splx(s);
|
2000-05-28 08:13:56 +04:00
|
|
|
if (!LIST_EMPTY(&vp->v_dirtyblkhd)) {
|
1994-06-08 15:28:29 +04:00
|
|
|
vprint("vflushbuf: dirty", vp);
|
|
|
|
goto loop;
|
|
|
|
}
|
1994-05-17 08:21:49 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Associate a buffer with a vnode.
|
|
|
|
*/
|
1996-02-04 05:17:43 +03:00
|
|
|
void
|
2005-06-06 03:47:48 +04:00
|
|
|
bgetvp(struct vnode *vp, struct buf *bp)
|
1994-05-17 08:21:49 +04:00
|
|
|
{
|
1999-11-24 02:52:40 +03:00
|
|
|
int s;
|
1994-05-17 08:21:49 +04:00
|
|
|
|
|
|
|
if (bp->b_vp)
|
2000-04-10 06:22:13 +04:00
|
|
|
panic("bgetvp: not free, bp %p", bp);
|
1994-05-17 08:21:49 +04:00
|
|
|
VHOLD(vp);
|
1999-11-24 02:52:40 +03:00
|
|
|
s = splbio();
|
1994-05-17 08:21:49 +04:00
|
|
|
bp->b_vp = vp;
|
|
|
|
if (vp->v_type == VBLK || vp->v_type == VCHR)
|
|
|
|
bp->b_dev = vp->v_rdev;
|
|
|
|
else
|
|
|
|
bp->b_dev = NODEV;
|
|
|
|
/*
|
|
|
|
* Insert onto list for new vnode.
|
|
|
|
*/
|
|
|
|
bufinsvn(bp, &vp->v_cleanblkhd);
|
1999-11-24 02:52:40 +03:00
|
|
|
splx(s);
|
1994-05-17 08:21:49 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Disassociate a buffer from a vnode.
|
|
|
|
*/
|
1996-02-04 05:17:43 +03:00
|
|
|
void
|
2005-06-06 03:47:48 +04:00
|
|
|
brelvp(struct buf *bp)
|
1994-05-17 08:21:49 +04:00
|
|
|
{
|
|
|
|
struct vnode *vp;
|
1999-11-24 02:52:40 +03:00
|
|
|
int s;
|
1994-05-17 08:21:49 +04:00
|
|
|
|
2000-04-10 06:22:13 +04:00
|
|
|
if (bp->b_vp == NULL)
|
|
|
|
panic("brelvp: vp NULL, bp %p", bp);
|
1999-11-24 02:52:40 +03:00
|
|
|
|
|
|
|
s = splbio();
|
1999-11-15 21:49:07 +03:00
|
|
|
vp = bp->b_vp;
|
1994-05-17 08:21:49 +04:00
|
|
|
/*
|
|
|
|
* Delete from old vnode list, if on one.
|
|
|
|
*/
|
2002-09-04 05:32:31 +04:00
|
|
|
if (LIST_NEXT(bp, b_vnbufs) != NOLIST)
|
1994-05-17 08:21:49 +04:00
|
|
|
bufremvn(bp);
|
2000-11-27 11:39:39 +03:00
|
|
|
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
if (TAILQ_EMPTY(&vp->v_uobj.memq) && (vp->v_flag & VONWORKLST) &&
|
2000-11-27 11:39:39 +03:00
|
|
|
LIST_FIRST(&vp->v_dirtyblkhd) == NULL) {
|
2005-07-23 16:18:41 +04:00
|
|
|
vp->v_flag &= ~(VWRITEMAPDIRTY|VONWORKLST);
|
1999-11-15 21:49:07 +03:00
|
|
|
LIST_REMOVE(vp, v_synclist);
|
|
|
|
}
|
2000-11-27 11:39:39 +03:00
|
|
|
|
|
|
|
bp->b_vp = NULL;
|
1994-05-17 08:21:49 +04:00
|
|
|
HOLDRELE(vp);
|
1999-11-24 02:52:40 +03:00
|
|
|
splx(s);
|
1994-05-17 08:21:49 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Reassign a buffer from one vnode to another.
|
|
|
|
* Used to assign file specific control information
|
|
|
|
* (indirect blocks) to the vnode to which they belong.
|
1999-11-24 02:52:40 +03:00
|
|
|
*
|
|
|
|
* This function must be called at splbio().
|
1994-05-17 08:21:49 +04:00
|
|
|
*/
|
1996-02-04 05:17:43 +03:00
|
|
|
void
|
2005-06-06 03:47:48 +04:00
|
|
|
reassignbuf(struct buf *bp, struct vnode *newvp)
|
1994-05-17 08:21:49 +04:00
|
|
|
{
|
1999-11-15 21:49:07 +03:00
|
|
|
struct buflists *listheadp;
|
2005-05-31 02:15:38 +04:00
|
|
|
int delayx;
|
1994-05-17 08:21:49 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Delete from old vnode list, if on one.
|
|
|
|
*/
|
2002-09-04 05:32:31 +04:00
|
|
|
if (LIST_NEXT(bp, b_vnbufs) != NOLIST)
|
1994-05-17 08:21:49 +04:00
|
|
|
bufremvn(bp);
|
|
|
|
/*
|
|
|
|
* If dirty, put on list of dirty buffers;
|
|
|
|
* otherwise insert onto list of clean buffers.
|
|
|
|
*/
|
1999-11-15 21:49:07 +03:00
|
|
|
if ((bp->b_flags & B_DELWRI) == 0) {
|
1994-05-17 08:21:49 +04:00
|
|
|
listheadp = &newvp->v_cleanblkhd;
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
if (TAILQ_EMPTY(&newvp->v_uobj.memq) &&
|
2000-11-27 11:39:39 +03:00
|
|
|
(newvp->v_flag & VONWORKLST) &&
|
1999-11-15 21:49:07 +03:00
|
|
|
LIST_FIRST(&newvp->v_dirtyblkhd) == NULL) {
|
2005-07-23 16:18:41 +04:00
|
|
|
newvp->v_flag &= ~(VWRITEMAPDIRTY|VONWORKLST);
|
1999-11-15 21:49:07 +03:00
|
|
|
LIST_REMOVE(newvp, v_synclist);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
listheadp = &newvp->v_dirtyblkhd;
|
|
|
|
if ((newvp->v_flag & VONWORKLST) == 0) {
|
|
|
|
switch (newvp->v_type) {
|
|
|
|
case VDIR:
|
2005-05-31 02:15:38 +04:00
|
|
|
delayx = dirdelay;
|
1999-11-15 21:49:07 +03:00
|
|
|
break;
|
|
|
|
case VBLK:
|
|
|
|
if (newvp->v_specmountpoint != NULL) {
|
2005-05-31 02:15:38 +04:00
|
|
|
delayx = metadelay;
|
1999-11-15 21:49:07 +03:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
/* fall through */
|
|
|
|
default:
|
2005-05-31 02:15:38 +04:00
|
|
|
delayx = filedelay;
|
2000-03-03 08:21:03 +03:00
|
|
|
break;
|
1999-11-15 21:49:07 +03:00
|
|
|
}
|
2000-03-03 08:21:03 +03:00
|
|
|
if (!newvp->v_mount ||
|
|
|
|
(newvp->v_mount->mnt_flag & MNT_ASYNC) == 0)
|
2005-05-31 02:15:38 +04:00
|
|
|
vn_syncer_add_to_worklist(newvp, delayx);
|
1999-11-15 21:49:07 +03:00
|
|
|
}
|
|
|
|
}
|
1994-05-17 08:21:49 +04:00
|
|
|
bufinsvn(bp, listheadp);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Create a vnode for a block device.
|
1997-01-31 22:10:27 +03:00
|
|
|
* Used for root filesystem and swap areas.
|
1994-05-17 08:21:49 +04:00
|
|
|
* Also used for memory file system special devices.
|
|
|
|
*/
|
1996-02-04 05:17:43 +03:00
|
|
|
int
|
2005-06-06 03:47:48 +04:00
|
|
|
bdevvp(dev_t dev, struct vnode **vpp)
|
1994-05-17 08:21:49 +04:00
|
|
|
{
|
1994-06-08 15:28:29 +04:00
|
|
|
|
|
|
|
return (getdevvp(dev, vpp, VBLK));
|
1994-05-17 08:21:49 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Create a vnode for a character device.
|
|
|
|
* Used for kernfs and some console handling.
|
|
|
|
*/
|
1996-02-04 05:17:43 +03:00
|
|
|
int
|
2005-06-06 03:47:48 +04:00
|
|
|
cdevvp(dev_t dev, struct vnode **vpp)
|
1994-05-17 08:21:49 +04:00
|
|
|
{
|
1994-06-08 15:28:29 +04:00
|
|
|
|
|
|
|
return (getdevvp(dev, vpp, VCHR));
|
1994-05-17 08:21:49 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Create a vnode for a device.
|
|
|
|
* Used by bdevvp (block device) for root file system etc.,
|
|
|
|
* and by cdevvp (character device) for console and kernfs.
|
|
|
|
*/
|
1996-02-04 05:17:43 +03:00
|
|
|
int
|
2005-06-06 03:47:48 +04:00
|
|
|
getdevvp(dev_t dev, struct vnode **vpp, enum vtype type)
|
1994-05-17 08:21:49 +04:00
|
|
|
{
|
2000-03-30 13:27:11 +04:00
|
|
|
struct vnode *vp;
|
1994-05-17 08:21:49 +04:00
|
|
|
struct vnode *nvp;
|
|
|
|
int error;
|
|
|
|
|
1998-03-01 05:20:01 +03:00
|
|
|
if (dev == NODEV) {
|
|
|
|
*vpp = NULLVP;
|
1994-05-17 08:21:49 +04:00
|
|
|
return (0);
|
1998-03-01 05:20:01 +03:00
|
|
|
}
|
1996-02-04 05:17:43 +03:00
|
|
|
error = getnewvnode(VT_NON, NULL, spec_vnodeop_p, &nvp);
|
1994-05-17 08:21:49 +04:00
|
|
|
if (error) {
|
|
|
|
*vpp = NULLVP;
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
vp = nvp;
|
|
|
|
vp->v_type = type;
|
1996-02-04 05:17:43 +03:00
|
|
|
if ((nvp = checkalias(vp, dev, NULL)) != 0) {
|
1994-05-17 08:21:49 +04:00
|
|
|
vput(vp);
|
|
|
|
vp = nvp;
|
|
|
|
}
|
|
|
|
*vpp = vp;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check to see if the new vnode represents a special device
|
|
|
|
* for which we already have a vnode (either because of
|
|
|
|
* bdevvp() or because of a different vnode representing
|
|
|
|
* the same block device). If such an alias exists, deallocate
|
|
|
|
* the existing contents and return the aliased vnode. The
|
|
|
|
* caller is responsible for filling it with its new contents.
|
|
|
|
*/
|
|
|
|
struct vnode *
|
2005-06-06 03:47:48 +04:00
|
|
|
checkalias(struct vnode *nvp, dev_t nvp_rdev, struct mount *mp)
|
1994-05-17 08:21:49 +04:00
|
|
|
{
|
2005-12-11 15:16:03 +03:00
|
|
|
struct lwp *l = curlwp; /* XXX */
|
2000-03-30 13:27:11 +04:00
|
|
|
struct vnode *vp;
|
1994-05-17 08:21:49 +04:00
|
|
|
struct vnode **vpp;
|
|
|
|
|
|
|
|
if (nvp->v_type != VBLK && nvp->v_type != VCHR)
|
|
|
|
return (NULLVP);
|
|
|
|
|
|
|
|
vpp = &speclisth[SPECHASH(nvp_rdev)];
|
|
|
|
loop:
|
1998-03-01 05:20:01 +03:00
|
|
|
simple_lock(&spechash_slock);
|
1994-05-17 08:21:49 +04:00
|
|
|
for (vp = *vpp; vp; vp = vp->v_specnext) {
|
|
|
|
if (nvp_rdev != vp->v_rdev || nvp->v_type != vp->v_type)
|
|
|
|
continue;
|
|
|
|
/*
|
|
|
|
* Alias, but not in use, so flush it out.
|
|
|
|
*/
|
1998-03-01 05:20:01 +03:00
|
|
|
simple_lock(&vp->v_interlock);
|
There is an annoying deadlock that goes like this:
* Process A is closing one file descriptor belonging to a device. In doing so,
ffs_update() is called and starts writing a block synchronously. (Note: This
leaves the vnode locked. It also has other instances -- stdin, et al -- of
the same device open, so v_usecount is definitely non-zero.)
* Process B does a revoke() on the device. The revoke() has to wait for the
vnode to be unlocked because ffs_update() is still in progress.
* Process C tries to open() the device. It wedges in checkalias() repeatedly
calling vget() because it returns EBUSY immediately.
To fix, this:
* checkalias() now uses LK_SLEEPFAIL rather than LK_NOWAIT. Therefore it will
wait for the vnode to become unlocked, but it will recheck that it is on the
hash list, in case it was in the process of being revoke()d or was revoke()d
again before we were woken up.
* Since we're relying on the vnode lock to tell us that the vnode hasn't been
removed from the hash list *anyway*, I have moved the code to remove it into
the DOCLOSE section of vclean(), inside the vnode lock.
In the example at hand, process A was sh(1), process B was a child of init(8),
and process C was syslogd(8).
2004-08-14 02:48:06 +04:00
|
|
|
simple_unlock(&spechash_slock);
|
1994-05-17 08:21:49 +04:00
|
|
|
if (vp->v_usecount == 0) {
|
2005-12-11 15:16:03 +03:00
|
|
|
vgonel(vp, l);
|
1994-05-17 08:21:49 +04:00
|
|
|
goto loop;
|
|
|
|
}
|
There is an annoying deadlock that goes like this:
* Process A is closing one file descriptor belonging to a device. In doing so,
ffs_update() is called and starts writing a block synchronously. (Note: This
leaves the vnode locked. It also has other instances -- stdin, et al -- of
the same device open, so v_usecount is definitely non-zero.)
* Process B does a revoke() on the device. The revoke() has to wait for the
vnode to be unlocked because ffs_update() is still in progress.
* Process C tries to open() the device. It wedges in checkalias() repeatedly
calling vget() because it returns EBUSY immediately.
To fix, this:
* checkalias() now uses LK_SLEEPFAIL rather than LK_NOWAIT. Therefore it will
wait for the vnode to become unlocked, but it will recheck that it is on the
hash list, in case it was in the process of being revoke()d or was revoke()d
again before we were woken up.
* Since we're relying on the vnode lock to tell us that the vnode hasn't been
removed from the hash list *anyway*, I have moved the code to remove it into
the DOCLOSE section of vclean(), inside the vnode lock.
In the example at hand, process A was sh(1), process B was a child of init(8),
and process C was syslogd(8).
2004-08-14 02:48:06 +04:00
|
|
|
/*
|
|
|
|
* What we're interested to know here is if someone else has
|
|
|
|
* removed this vnode from the device hash list while we were
|
|
|
|
* waiting. This can only happen if vclean() did it, and
|
|
|
|
* this requires the vnode to be locked. Therefore, we use
|
|
|
|
* LK_SLEEPFAIL and retry.
|
|
|
|
*/
|
|
|
|
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK | LK_SLEEPFAIL))
|
1994-05-17 08:21:49 +04:00
|
|
|
goto loop;
|
There is an annoying deadlock that goes like this:
* Process A is closing one file descriptor belonging to a device. In doing so,
ffs_update() is called and starts writing a block synchronously. (Note: This
leaves the vnode locked. It also has other instances -- stdin, et al -- of
the same device open, so v_usecount is definitely non-zero.)
* Process B does a revoke() on the device. The revoke() has to wait for the
vnode to be unlocked because ffs_update() is still in progress.
* Process C tries to open() the device. It wedges in checkalias() repeatedly
calling vget() because it returns EBUSY immediately.
To fix, this:
* checkalias() now uses LK_SLEEPFAIL rather than LK_NOWAIT. Therefore it will
wait for the vnode to become unlocked, but it will recheck that it is on the
hash list, in case it was in the process of being revoke()d or was revoke()d
again before we were woken up.
* Since we're relying on the vnode lock to tell us that the vnode hasn't been
removed from the hash list *anyway*, I have moved the code to remove it into
the DOCLOSE section of vclean(), inside the vnode lock.
In the example at hand, process A was sh(1), process B was a child of init(8),
and process C was syslogd(8).
2004-08-14 02:48:06 +04:00
|
|
|
simple_lock(&spechash_slock);
|
1994-05-17 08:21:49 +04:00
|
|
|
break;
|
|
|
|
}
|
1994-07-10 09:53:25 +04:00
|
|
|
if (vp == NULL || vp->v_tag != VT_NON || vp->v_type != VBLK) {
|
1994-05-17 08:21:49 +04:00
|
|
|
MALLOC(nvp->v_specinfo, struct specinfo *,
|
2001-06-05 08:42:05 +04:00
|
|
|
sizeof(struct specinfo), M_VNODE, M_NOWAIT);
|
|
|
|
/* XXX Erg. */
|
|
|
|
if (nvp->v_specinfo == NULL) {
|
|
|
|
simple_unlock(&spechash_slock);
|
|
|
|
uvm_wait("checkalias");
|
|
|
|
goto loop;
|
|
|
|
}
|
|
|
|
|
1994-05-17 08:21:49 +04:00
|
|
|
nvp->v_rdev = nvp_rdev;
|
|
|
|
nvp->v_hashchain = vpp;
|
|
|
|
nvp->v_specnext = *vpp;
|
1999-11-15 21:49:07 +03:00
|
|
|
nvp->v_specmountpoint = NULL;
|
1998-03-01 05:20:01 +03:00
|
|
|
simple_unlock(&spechash_slock);
|
1997-04-04 03:15:52 +04:00
|
|
|
nvp->v_speclockf = NULL;
|
2004-02-14 03:00:56 +03:00
|
|
|
simple_lock_init(&nvp->v_spec_cow_slock);
|
|
|
|
SLIST_INIT(&nvp->v_spec_cow_head);
|
|
|
|
nvp->v_spec_cow_req = 0;
|
|
|
|
nvp->v_spec_cow_count = 0;
|
|
|
|
|
1994-05-17 08:21:49 +04:00
|
|
|
*vpp = nvp;
|
1998-03-01 05:20:01 +03:00
|
|
|
if (vp != NULLVP) {
|
1994-05-17 08:21:49 +04:00
|
|
|
nvp->v_flag |= VALIASED;
|
|
|
|
vp->v_flag |= VALIASED;
|
|
|
|
vput(vp);
|
|
|
|
}
|
|
|
|
return (NULLVP);
|
|
|
|
}
|
1998-03-01 05:20:01 +03:00
|
|
|
simple_unlock(&spechash_slock);
|
|
|
|
VOP_UNLOCK(vp, 0);
|
|
|
|
simple_lock(&vp->v_interlock);
|
2005-12-11 15:16:03 +03:00
|
|
|
vclean(vp, 0, l);
|
1994-05-17 08:21:49 +04:00
|
|
|
vp->v_op = nvp->v_op;
|
|
|
|
vp->v_tag = nvp->v_tag;
|
1999-07-08 05:05:58 +04:00
|
|
|
vp->v_vnlock = &vp->v_lock;
|
|
|
|
lockinit(vp->v_vnlock, PVFS, "vnlock", 0, 0);
|
1994-05-17 08:21:49 +04:00
|
|
|
nvp->v_type = VNON;
|
|
|
|
insmntque(vp, mp);
|
|
|
|
return (vp);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Grab a particular vnode from the free list, increment its
|
1998-03-04 12:13:48 +03:00
|
|
|
* reference count and lock it. If the vnode lock bit is set the
|
|
|
|
* vnode is being eliminated in vgone. In that case, we can not
|
|
|
|
* grab the vnode, so the process is awakened when the transition is
|
|
|
|
* completed, and an error returned to indicate that the vnode is no
|
|
|
|
* longer usable (possibly having been changed to a new file system type).
|
1994-05-17 08:21:49 +04:00
|
|
|
*/
|
1994-06-08 15:28:29 +04:00
|
|
|
int
|
2005-06-06 03:47:48 +04:00
|
|
|
vget(struct vnode *vp, int flags)
|
1994-05-17 08:21:49 +04:00
|
|
|
{
|
2002-05-24 03:05:25 +04:00
|
|
|
int error;
|
1994-05-17 08:21:49 +04:00
|
|
|
|
1994-06-08 15:28:29 +04:00
|
|
|
/*
|
|
|
|
* If the vnode is in the process of being cleaned out for
|
|
|
|
* another use, we wait for the cleaning to finish and then
|
1998-03-01 05:20:01 +03:00
|
|
|
* return failure. Cleaning is determined by checking that
|
|
|
|
* the VXLOCK flag is set.
|
1994-06-08 15:28:29 +04:00
|
|
|
*/
|
2000-11-27 11:39:39 +03:00
|
|
|
|
1998-03-01 05:20:01 +03:00
|
|
|
if ((flags & LK_INTERLOCK) == 0)
|
|
|
|
simple_lock(&vp->v_interlock);
|
2005-12-23 18:31:40 +03:00
|
|
|
if ((vp->v_flag & (VXLOCK | VFREEING)) != 0) {
|
2000-11-27 11:39:39 +03:00
|
|
|
if (flags & LK_NOWAIT) {
|
2000-12-31 06:13:51 +03:00
|
|
|
simple_unlock(&vp->v_interlock);
|
2000-11-27 11:39:39 +03:00
|
|
|
return EBUSY;
|
|
|
|
}
|
1994-05-17 08:21:49 +04:00
|
|
|
vp->v_flag |= VXWANT;
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
ltsleep(vp, PINOD|PNORELOCK, "vget", 0, &vp->v_interlock);
|
1998-03-01 05:20:01 +03:00
|
|
|
return (ENOENT);
|
1994-05-17 08:21:49 +04:00
|
|
|
}
|
1998-03-01 05:20:01 +03:00
|
|
|
if (vp->v_usecount == 0) {
|
|
|
|
simple_lock(&vnode_free_list_slock);
|
1999-11-15 21:49:07 +03:00
|
|
|
if (vp->v_holdcnt > 0)
|
|
|
|
TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist);
|
|
|
|
else
|
|
|
|
TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
|
1998-03-01 05:20:01 +03:00
|
|
|
simple_unlock(&vnode_free_list_slock);
|
|
|
|
}
|
1994-05-17 08:21:49 +04:00
|
|
|
vp->v_usecount++;
|
1999-10-02 02:03:17 +04:00
|
|
|
#ifdef DIAGNOSTIC
|
|
|
|
if (vp->v_usecount == 0) {
|
|
|
|
vprint("vget", vp);
|
2000-04-10 06:22:13 +04:00
|
|
|
panic("vget: usecount overflow, vp %p", vp);
|
1999-10-02 02:03:17 +04:00
|
|
|
}
|
|
|
|
#endif
|
1998-03-01 05:20:01 +03:00
|
|
|
if (flags & LK_TYPE_MASK) {
|
1999-11-15 21:49:07 +03:00
|
|
|
if ((error = vn_lock(vp, flags | LK_INTERLOCK))) {
|
2005-12-23 18:31:40 +03:00
|
|
|
vrele(vp);
|
1999-11-15 21:49:07 +03:00
|
|
|
}
|
1998-03-01 05:20:01 +03:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
simple_unlock(&vp->v_interlock);
|
1994-05-17 08:21:49 +04:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* vput(), just unlock and vrele()
|
|
|
|
*/
|
|
|
|
void
|
2005-06-06 03:47:48 +04:00
|
|
|
vput(struct vnode *vp)
|
1994-05-17 08:21:49 +04:00
|
|
|
{
|
2005-12-11 15:16:03 +03:00
|
|
|
struct lwp *l = curlwp; /* XXX */
|
1994-06-08 15:28:29 +04:00
|
|
|
|
1999-10-02 01:57:42 +04:00
|
|
|
#ifdef DIAGNOSTIC
|
1998-03-01 05:20:01 +03:00
|
|
|
if (vp == NULL)
|
|
|
|
panic("vput: null vp");
|
|
|
|
#endif
|
|
|
|
simple_lock(&vp->v_interlock);
|
|
|
|
vp->v_usecount--;
|
|
|
|
if (vp->v_usecount > 0) {
|
|
|
|
simple_unlock(&vp->v_interlock);
|
|
|
|
VOP_UNLOCK(vp, 0);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
#ifdef DIAGNOSTIC
|
|
|
|
if (vp->v_usecount < 0 || vp->v_writecount != 0) {
|
|
|
|
vprint("vput: bad ref count", vp);
|
|
|
|
panic("vput: ref cnt");
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
/*
|
1998-05-18 18:59:49 +04:00
|
|
|
* Insert at tail of LRU list.
|
1998-03-01 05:20:01 +03:00
|
|
|
*/
|
|
|
|
simple_lock(&vnode_free_list_slock);
|
1999-11-15 21:49:07 +03:00
|
|
|
if (vp->v_holdcnt > 0)
|
|
|
|
TAILQ_INSERT_TAIL(&vnode_hold_list, vp, v_freelist);
|
|
|
|
else
|
|
|
|
TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
|
1998-03-01 05:20:01 +03:00
|
|
|
simple_unlock(&vnode_free_list_slock);
|
2001-10-30 18:32:01 +03:00
|
|
|
if (vp->v_flag & VEXECMAP) {
|
2001-12-09 06:07:43 +03:00
|
|
|
uvmexp.execpages -= vp->v_uobj.uo_npages;
|
|
|
|
uvmexp.filepages += vp->v_uobj.uo_npages;
|
2001-03-09 04:02:10 +03:00
|
|
|
}
|
2005-07-23 16:18:41 +04:00
|
|
|
vp->v_flag &= ~(VTEXT|VEXECMAP|VWRITEMAP);
|
1998-03-01 05:20:01 +03:00
|
|
|
simple_unlock(&vp->v_interlock);
|
2005-12-11 15:16:03 +03:00
|
|
|
VOP_INACTIVE(vp, l);
|
1994-05-17 08:21:49 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Vnode release.
|
|
|
|
* If count drops to zero, call inactive routine and return to freelist.
|
|
|
|
*/
|
|
|
|
void
|
2005-06-06 03:47:48 +04:00
|
|
|
vrele(struct vnode *vp)
|
1994-05-17 08:21:49 +04:00
|
|
|
{
|
2005-12-11 15:16:03 +03:00
|
|
|
struct lwp *l = curlwp; /* XXX */
|
1994-05-17 08:21:49 +04:00
|
|
|
|
|
|
|
#ifdef DIAGNOSTIC
|
|
|
|
if (vp == NULL)
|
|
|
|
panic("vrele: null vp");
|
|
|
|
#endif
|
1998-03-01 05:20:01 +03:00
|
|
|
simple_lock(&vp->v_interlock);
|
1994-05-17 08:21:49 +04:00
|
|
|
vp->v_usecount--;
|
1998-03-01 05:20:01 +03:00
|
|
|
if (vp->v_usecount > 0) {
|
|
|
|
simple_unlock(&vp->v_interlock);
|
1994-05-17 08:21:49 +04:00
|
|
|
return;
|
1998-03-01 05:20:01 +03:00
|
|
|
}
|
1994-05-17 08:21:49 +04:00
|
|
|
#ifdef DIAGNOSTIC
|
1998-03-01 05:20:01 +03:00
|
|
|
if (vp->v_usecount < 0 || vp->v_writecount != 0) {
|
1994-05-17 08:21:49 +04:00
|
|
|
vprint("vrele: bad ref count", vp);
|
2000-11-27 11:39:39 +03:00
|
|
|
panic("vrele: ref cnt vp %p", vp);
|
1994-05-17 08:21:49 +04:00
|
|
|
}
|
|
|
|
#endif
|
1994-06-08 15:28:29 +04:00
|
|
|
/*
|
1998-05-18 18:59:49 +04:00
|
|
|
* Insert at tail of LRU list.
|
1994-06-08 15:28:29 +04:00
|
|
|
*/
|
1998-03-01 05:20:01 +03:00
|
|
|
simple_lock(&vnode_free_list_slock);
|
1999-11-15 21:49:07 +03:00
|
|
|
if (vp->v_holdcnt > 0)
|
|
|
|
TAILQ_INSERT_TAIL(&vnode_hold_list, vp, v_freelist);
|
|
|
|
else
|
|
|
|
TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
|
1998-03-01 05:20:01 +03:00
|
|
|
simple_unlock(&vnode_free_list_slock);
|
2001-10-30 18:32:01 +03:00
|
|
|
if (vp->v_flag & VEXECMAP) {
|
2001-12-09 06:07:43 +03:00
|
|
|
uvmexp.execpages -= vp->v_uobj.uo_npages;
|
|
|
|
uvmexp.filepages += vp->v_uobj.uo_npages;
|
2001-03-09 04:02:10 +03:00
|
|
|
}
|
2005-07-23 16:18:41 +04:00
|
|
|
vp->v_flag &= ~(VTEXT|VEXECMAP|VWRITEMAP);
|
1998-03-01 05:20:01 +03:00
|
|
|
if (vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK) == 0)
|
2005-12-11 15:16:03 +03:00
|
|
|
VOP_INACTIVE(vp, l);
|
1994-05-17 08:21:49 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Page or buffer structure gets a reference.
|
2005-12-27 07:06:45 +03:00
|
|
|
* Called with v_interlock held.
|
1994-05-17 08:21:49 +04:00
|
|
|
*/
|
1994-06-08 15:28:29 +04:00
|
|
|
void
|
2005-06-06 03:47:48 +04:00
|
|
|
vholdl(struct vnode *vp)
|
1994-05-17 08:21:49 +04:00
|
|
|
{
|
|
|
|
|
1999-11-15 21:49:07 +03:00
|
|
|
/*
|
|
|
|
* If it is on the freelist and the hold count is currently
|
|
|
|
* zero, move it to the hold list. The test of the back
|
|
|
|
* pointer and the use reference count of zero is because
|
|
|
|
* it will be removed from a free list by getnewvnode,
|
|
|
|
* but will not have its reference count incremented until
|
|
|
|
* after calling vgone. If the reference count were
|
|
|
|
* incremented first, vgone would (incorrectly) try to
|
|
|
|
* close the previous instance of the underlying object.
|
|
|
|
* So, the back pointer is explicitly set to `0xdeadb' in
|
|
|
|
* getnewvnode after removing it from a freelist to ensure
|
|
|
|
* that we do not try to move it here.
|
|
|
|
*/
|
|
|
|
if ((vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb) &&
|
|
|
|
vp->v_holdcnt == 0 && vp->v_usecount == 0) {
|
|
|
|
simple_lock(&vnode_free_list_slock);
|
|
|
|
TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
|
|
|
|
TAILQ_INSERT_TAIL(&vnode_hold_list, vp, v_freelist);
|
|
|
|
simple_unlock(&vnode_free_list_slock);
|
|
|
|
}
|
1994-05-17 08:21:49 +04:00
|
|
|
vp->v_holdcnt++;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Page or buffer structure frees a reference.
|
2005-12-27 07:06:45 +03:00
|
|
|
* Called with v_interlock held.
|
1994-05-17 08:21:49 +04:00
|
|
|
*/
|
1994-06-08 15:28:29 +04:00
|
|
|
void
|
2005-06-06 03:47:48 +04:00
|
|
|
holdrelel(struct vnode *vp)
|
1994-05-17 08:21:49 +04:00
|
|
|
{
|
|
|
|
|
|
|
|
if (vp->v_holdcnt <= 0)
|
2004-01-14 14:28:04 +03:00
|
|
|
panic("holdrelel: holdcnt vp %p", vp);
|
1994-05-17 08:21:49 +04:00
|
|
|
vp->v_holdcnt--;
|
2000-11-27 11:39:39 +03:00
|
|
|
|
1999-11-15 21:49:07 +03:00
|
|
|
/*
|
|
|
|
* If it is on the holdlist and the hold count drops to
|
|
|
|
* zero, move it to the free list. The test of the back
|
|
|
|
* pointer and the use reference count of zero is because
|
|
|
|
* it will be removed from a free list by getnewvnode,
|
|
|
|
* but will not have its reference count incremented until
|
|
|
|
* after calling vgone. If the reference count were
|
|
|
|
* incremented first, vgone would (incorrectly) try to
|
|
|
|
* close the previous instance of the underlying object.
|
|
|
|
* So, the back pointer is explicitly set to `0xdeadb' in
|
|
|
|
* getnewvnode after removing it from a freelist to ensure
|
|
|
|
* that we do not try to move it here.
|
|
|
|
*/
|
2000-11-27 11:39:39 +03:00
|
|
|
|
1999-11-15 21:49:07 +03:00
|
|
|
if ((vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb) &&
|
|
|
|
vp->v_holdcnt == 0 && vp->v_usecount == 0) {
|
|
|
|
simple_lock(&vnode_free_list_slock);
|
|
|
|
TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist);
|
|
|
|
TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
|
|
|
|
simple_unlock(&vnode_free_list_slock);
|
|
|
|
}
|
1994-05-17 08:21:49 +04:00
|
|
|
}
|
1998-03-01 12:51:29 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Vnode reference.
|
|
|
|
*/
|
|
|
|
void
|
2005-06-06 03:47:48 +04:00
|
|
|
vref(struct vnode *vp)
|
1998-03-01 12:51:29 +03:00
|
|
|
{
|
|
|
|
|
|
|
|
simple_lock(&vp->v_interlock);
|
|
|
|
if (vp->v_usecount <= 0)
|
2000-04-10 06:22:13 +04:00
|
|
|
panic("vref used where vget required, vp %p", vp);
|
1998-03-01 12:51:29 +03:00
|
|
|
vp->v_usecount++;
|
1999-10-02 02:03:17 +04:00
|
|
|
#ifdef DIAGNOSTIC
|
|
|
|
if (vp->v_usecount == 0) {
|
|
|
|
vprint("vref", vp);
|
2000-04-10 06:22:13 +04:00
|
|
|
panic("vref: usecount overflow, vp %p", vp);
|
1999-10-02 02:03:17 +04:00
|
|
|
}
|
|
|
|
#endif
|
1998-03-01 12:51:29 +03:00
|
|
|
simple_unlock(&vp->v_interlock);
|
|
|
|
}
|
1994-05-17 08:21:49 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Remove any vnodes in the vnode table belonging to mount point mp.
|
|
|
|
*
|
2002-12-29 09:47:57 +03:00
|
|
|
* If FORCECLOSE is not specified, there should not be any active ones,
|
1994-05-17 08:21:49 +04:00
|
|
|
* return error if any are found (nb: this is a user error, not a
|
2002-12-29 09:47:57 +03:00
|
|
|
* system error). If FORCECLOSE is specified, detach any active vnodes
|
1994-05-17 08:21:49 +04:00
|
|
|
* that are found.
|
2002-12-29 09:47:57 +03:00
|
|
|
*
|
|
|
|
* If WRITECLOSE is set, only flush out regular file vnodes open for
|
|
|
|
* writing.
|
|
|
|
*
|
|
|
|
* SKIPSYSTEM causes any vnodes marked V_SYSTEM to be skipped.
|
1994-05-17 08:21:49 +04:00
|
|
|
*/
|
1994-06-08 15:28:29 +04:00
|
|
|
#ifdef DEBUG
|
|
|
|
int busyprt = 0; /* print out busy vnodes */
|
|
|
|
struct ctldebug debug1 = { "busyprt", &busyprt };
|
|
|
|
#endif
|
1994-05-17 08:21:49 +04:00
|
|
|
|
1996-02-04 05:17:43 +03:00
|
|
|
int
|
2005-06-06 03:47:48 +04:00
|
|
|
vflush(struct mount *mp, struct vnode *skipvp, int flags)
|
1994-05-17 08:21:49 +04:00
|
|
|
{
|
2005-12-11 15:16:03 +03:00
|
|
|
struct lwp *l = curlwp; /* XXX */
|
2000-03-30 13:27:11 +04:00
|
|
|
struct vnode *vp, *nvp;
|
1994-05-17 08:21:49 +04:00
|
|
|
int busy = 0;
|
|
|
|
|
1998-03-01 05:20:01 +03:00
|
|
|
simple_lock(&mntvnode_slock);
|
1994-05-17 08:21:49 +04:00
|
|
|
loop:
|
2002-09-04 05:32:31 +04:00
|
|
|
for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp; vp = nvp) {
|
1994-05-17 08:21:49 +04:00
|
|
|
if (vp->v_mount != mp)
|
|
|
|
goto loop;
|
2002-09-04 05:32:31 +04:00
|
|
|
nvp = LIST_NEXT(vp, v_mntvnodes);
|
1994-05-17 08:21:49 +04:00
|
|
|
/*
|
|
|
|
* Skip over a selected vnode.
|
|
|
|
*/
|
|
|
|
if (vp == skipvp)
|
|
|
|
continue;
|
1998-03-01 05:20:01 +03:00
|
|
|
simple_lock(&vp->v_interlock);
|
1994-05-17 08:21:49 +04:00
|
|
|
/*
|
|
|
|
* Skip over a vnodes marked VSYSTEM.
|
|
|
|
*/
|
1998-03-01 05:20:01 +03:00
|
|
|
if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) {
|
|
|
|
simple_unlock(&vp->v_interlock);
|
1994-05-17 08:21:49 +04:00
|
|
|
continue;
|
1998-03-01 05:20:01 +03:00
|
|
|
}
|
1994-06-08 15:28:29 +04:00
|
|
|
/*
|
|
|
|
* If WRITECLOSE is set, only flush out regular file
|
|
|
|
* vnodes open for writing.
|
|
|
|
*/
|
|
|
|
if ((flags & WRITECLOSE) &&
|
1998-08-17 21:29:20 +04:00
|
|
|
(vp->v_writecount == 0 || vp->v_type != VREG)) {
|
|
|
|
simple_unlock(&vp->v_interlock);
|
1994-06-08 15:28:29 +04:00
|
|
|
continue;
|
1998-08-17 21:29:20 +04:00
|
|
|
}
|
1994-05-17 08:21:49 +04:00
|
|
|
/*
|
|
|
|
* With v_usecount == 0, all we need to do is clear
|
|
|
|
* out the vnode data structures and we are done.
|
|
|
|
*/
|
|
|
|
if (vp->v_usecount == 0) {
|
1998-03-01 05:20:01 +03:00
|
|
|
simple_unlock(&mntvnode_slock);
|
2005-12-11 15:16:03 +03:00
|
|
|
vgonel(vp, l);
|
1998-03-01 05:20:01 +03:00
|
|
|
simple_lock(&mntvnode_slock);
|
1994-05-17 08:21:49 +04:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
/*
|
1994-06-08 15:28:29 +04:00
|
|
|
* If FORCECLOSE is set, forcibly close the vnode.
|
1994-05-17 08:21:49 +04:00
|
|
|
* For block or character devices, revert to an
|
|
|
|
* anonymous device. For all other files, just kill them.
|
|
|
|
*/
|
|
|
|
if (flags & FORCECLOSE) {
|
1998-03-01 05:20:01 +03:00
|
|
|
simple_unlock(&mntvnode_slock);
|
1994-05-17 08:21:49 +04:00
|
|
|
if (vp->v_type != VBLK && vp->v_type != VCHR) {
|
2005-12-11 15:16:03 +03:00
|
|
|
vgonel(vp, l);
|
1994-05-17 08:21:49 +04:00
|
|
|
} else {
|
2005-12-11 15:16:03 +03:00
|
|
|
vclean(vp, 0, l);
|
1994-06-08 15:28:29 +04:00
|
|
|
vp->v_op = spec_vnodeop_p;
|
1994-05-17 08:21:49 +04:00
|
|
|
insmntque(vp, (struct mount *)0);
|
|
|
|
}
|
1998-03-01 05:20:01 +03:00
|
|
|
simple_lock(&mntvnode_slock);
|
1994-05-17 08:21:49 +04:00
|
|
|
continue;
|
|
|
|
}
|
1994-06-08 15:28:29 +04:00
|
|
|
#ifdef DEBUG
|
1994-05-17 08:21:49 +04:00
|
|
|
if (busyprt)
|
|
|
|
vprint("vflush: busy vnode", vp);
|
1994-06-08 15:28:29 +04:00
|
|
|
#endif
|
1998-03-01 05:20:01 +03:00
|
|
|
simple_unlock(&vp->v_interlock);
|
1994-05-17 08:21:49 +04:00
|
|
|
busy++;
|
|
|
|
}
|
1998-03-01 05:20:01 +03:00
|
|
|
simple_unlock(&mntvnode_slock);
|
1994-05-17 08:21:49 +04:00
|
|
|
if (busy)
|
|
|
|
return (EBUSY);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Disassociate the underlying file system from a vnode.
|
|
|
|
*/
|
|
|
|
void
|
2005-12-11 15:16:03 +03:00
|
|
|
vclean(struct vnode *vp, int flags, struct lwp *l)
|
1994-05-17 08:21:49 +04:00
|
|
|
{
|
2003-10-15 15:28:59 +04:00
|
|
|
struct mount *mp;
|
2002-05-24 03:05:25 +04:00
|
|
|
int active;
|
1994-05-17 08:21:49 +04:00
|
|
|
|
2001-12-06 07:34:33 +03:00
|
|
|
LOCK_ASSERT(simple_lock_held(&vp->v_interlock));
|
|
|
|
|
1994-05-17 08:21:49 +04:00
|
|
|
/*
|
|
|
|
* Check to see if the vnode is in use.
|
|
|
|
* If so we have to reference it before we clean it out
|
|
|
|
* so that its count cannot fall to zero and generate a
|
|
|
|
* race against ourselves to recycle it.
|
|
|
|
*/
|
2001-12-06 07:34:33 +03:00
|
|
|
|
1999-10-02 02:03:17 +04:00
|
|
|
if ((active = vp->v_usecount) != 0) {
|
1998-05-18 18:59:49 +04:00
|
|
|
vp->v_usecount++;
|
1999-10-02 02:03:17 +04:00
|
|
|
#ifdef DIAGNOSTIC
|
|
|
|
if (vp->v_usecount == 0) {
|
|
|
|
vprint("vclean", vp);
|
|
|
|
panic("vclean: usecount overflow");
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
1998-05-18 18:59:49 +04:00
|
|
|
|
1994-05-17 08:21:49 +04:00
|
|
|
/*
|
|
|
|
* Prevent the vnode from being recycled or
|
|
|
|
* brought into use while we clean it out.
|
|
|
|
*/
|
|
|
|
if (vp->v_flag & VXLOCK)
|
2000-04-10 06:22:13 +04:00
|
|
|
panic("vclean: deadlock, vp %p", vp);
|
1994-05-17 08:21:49 +04:00
|
|
|
vp->v_flag |= VXLOCK;
|
2001-10-30 18:32:01 +03:00
|
|
|
if (vp->v_flag & VEXECMAP) {
|
2001-12-09 06:07:43 +03:00
|
|
|
uvmexp.execpages -= vp->v_uobj.uo_npages;
|
|
|
|
uvmexp.filepages += vp->v_uobj.uo_npages;
|
2001-03-09 04:02:10 +03:00
|
|
|
}
|
2001-10-30 18:32:01 +03:00
|
|
|
vp->v_flag &= ~(VTEXT|VEXECMAP);
|
2000-11-27 11:39:39 +03:00
|
|
|
|
1994-05-17 08:21:49 +04:00
|
|
|
/*
|
1998-03-01 05:20:01 +03:00
|
|
|
* Even if the count is zero, the VOP_INACTIVE routine may still
|
|
|
|
* have the object locked while it cleans it out. The VOP_LOCK
|
|
|
|
* ensures that the VOP_INACTIVE routine is done with its work.
|
|
|
|
* For active vnodes, it ensures that no other activity can
|
|
|
|
* occur while the underlying object is being cleaned out.
|
1994-05-17 08:21:49 +04:00
|
|
|
*/
|
1998-03-01 05:20:01 +03:00
|
|
|
VOP_LOCK(vp, LK_DRAIN | LK_INTERLOCK);
|
|
|
|
|
1999-02-09 04:57:05 +03:00
|
|
|
/*
|
2000-11-27 11:39:39 +03:00
|
|
|
* Clean out any cached data associated with the vnode.
|
There is an annoying deadlock that goes like this:
* Process A is closing one file descriptor belonging to a device. In doing so,
ffs_update() is called and starts writing a block synchronously. (Note: This
leaves the vnode locked. It also has other instances -- stdin, et al -- of
the same device open, so v_usecount is definitely non-zero.)
* Process B does a revoke() on the device. The revoke() has to wait for the
vnode to be unlocked because ffs_update() is still in progress.
* Process C tries to open() the device. It wedges in checkalias() repeatedly
calling vget() because it returns EBUSY immediately.
To fix, this:
* checkalias() now uses LK_SLEEPFAIL rather than LK_NOWAIT. Therefore it will
wait for the vnode to become unlocked, but it will recheck that it is on the
hash list, in case it was in the process of being revoke()d or was revoke()d
again before we were woken up.
* Since we're relying on the vnode lock to tell us that the vnode hasn't been
removed from the hash list *anyway*, I have moved the code to remove it into
the DOCLOSE section of vclean(), inside the vnode lock.
In the example at hand, process A was sh(1), process B was a child of init(8),
and process C was syslogd(8).
2004-08-14 02:48:06 +04:00
|
|
|
* If special device, remove it from special device alias list.
|
|
|
|
* if it is on one.
|
1994-05-17 08:21:49 +04:00
|
|
|
*/
|
2001-12-06 07:34:33 +03:00
|
|
|
if (flags & DOCLOSE) {
|
2003-12-01 21:53:10 +03:00
|
|
|
int error;
|
There is an annoying deadlock that goes like this:
* Process A is closing one file descriptor belonging to a device. In doing so,
ffs_update() is called and starts writing a block synchronously. (Note: This
leaves the vnode locked. It also has other instances -- stdin, et al -- of
the same device open, so v_usecount is definitely non-zero.)
* Process B does a revoke() on the device. The revoke() has to wait for the
vnode to be unlocked because ffs_update() is still in progress.
* Process C tries to open() the device. It wedges in checkalias() repeatedly
calling vget() because it returns EBUSY immediately.
To fix, this:
* checkalias() now uses LK_SLEEPFAIL rather than LK_NOWAIT. Therefore it will
wait for the vnode to become unlocked, but it will recheck that it is on the
hash list, in case it was in the process of being revoke()d or was revoke()d
again before we were woken up.
* Since we're relying on the vnode lock to tell us that the vnode hasn't been
removed from the hash list *anyway*, I have moved the code to remove it into
the DOCLOSE section of vclean(), inside the vnode lock.
In the example at hand, process A was sh(1), process B was a child of init(8),
and process C was syslogd(8).
2004-08-14 02:48:06 +04:00
|
|
|
struct vnode *vq, *vx;
|
|
|
|
|
2003-10-15 15:28:59 +04:00
|
|
|
vn_start_write(vp, &mp, V_WAIT | V_LOWER);
|
2005-12-11 15:16:03 +03:00
|
|
|
error = vinvalbuf(vp, V_SAVE, NOCRED, l, 0, 0);
|
2003-10-15 15:28:59 +04:00
|
|
|
vn_finished_write(mp, V_LOWER);
|
2003-12-01 21:53:10 +03:00
|
|
|
if (error)
|
2005-12-11 15:16:03 +03:00
|
|
|
error = vinvalbuf(vp, 0, NOCRED, l, 0, 0);
|
2003-12-01 21:53:10 +03:00
|
|
|
KASSERT(error == 0);
|
2001-12-06 07:34:33 +03:00
|
|
|
KASSERT((vp->v_flag & VONWORKLST) == 0);
|
There is an annoying deadlock that goes like this:
* Process A is closing one file descriptor belonging to a device. In doing so,
ffs_update() is called and starts writing a block synchronously. (Note: This
leaves the vnode locked. It also has other instances -- stdin, et al -- of
the same device open, so v_usecount is definitely non-zero.)
* Process B does a revoke() on the device. The revoke() has to wait for the
vnode to be unlocked because ffs_update() is still in progress.
* Process C tries to open() the device. It wedges in checkalias() repeatedly
calling vget() because it returns EBUSY immediately.
To fix, this:
* checkalias() now uses LK_SLEEPFAIL rather than LK_NOWAIT. Therefore it will
wait for the vnode to become unlocked, but it will recheck that it is on the
hash list, in case it was in the process of being revoke()d or was revoke()d
again before we were woken up.
* Since we're relying on the vnode lock to tell us that the vnode hasn't been
removed from the hash list *anyway*, I have moved the code to remove it into
the DOCLOSE section of vclean(), inside the vnode lock.
In the example at hand, process A was sh(1), process B was a child of init(8),
and process C was syslogd(8).
2004-08-14 02:48:06 +04:00
|
|
|
|
|
|
|
if (active)
|
|
|
|
VOP_CLOSE(vp, FNONBLOCK, NOCRED, NULL);
|
|
|
|
|
|
|
|
if ((vp->v_type == VBLK || vp->v_type == VCHR) &&
|
|
|
|
vp->v_specinfo != 0) {
|
|
|
|
simple_lock(&spechash_slock);
|
|
|
|
if (vp->v_hashchain != NULL) {
|
|
|
|
if (*vp->v_hashchain == vp) {
|
|
|
|
*vp->v_hashchain = vp->v_specnext;
|
|
|
|
} else {
|
|
|
|
for (vq = *vp->v_hashchain; vq;
|
|
|
|
vq = vq->v_specnext) {
|
|
|
|
if (vq->v_specnext != vp)
|
|
|
|
continue;
|
|
|
|
vq->v_specnext = vp->v_specnext;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (vq == NULL)
|
|
|
|
panic("missing bdev");
|
|
|
|
}
|
|
|
|
if (vp->v_flag & VALIASED) {
|
|
|
|
vx = NULL;
|
|
|
|
for (vq = *vp->v_hashchain; vq;
|
|
|
|
vq = vq->v_specnext) {
|
|
|
|
if (vq->v_rdev != vp->v_rdev ||
|
|
|
|
vq->v_type != vp->v_type)
|
|
|
|
continue;
|
|
|
|
if (vx)
|
|
|
|
break;
|
|
|
|
vx = vq;
|
|
|
|
}
|
|
|
|
if (vx == NULL)
|
|
|
|
panic("missing alias");
|
|
|
|
if (vq == NULL)
|
|
|
|
vx->v_flag &= ~VALIASED;
|
|
|
|
vp->v_flag &= ~VALIASED;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
simple_unlock(&spechash_slock);
|
|
|
|
FREE(vp->v_specinfo, M_VNODE);
|
|
|
|
vp->v_specinfo = NULL;
|
|
|
|
}
|
2001-12-06 07:34:33 +03:00
|
|
|
}
|
|
|
|
LOCK_ASSERT(!simple_lock_held(&vp->v_interlock));
|
1998-03-01 05:20:01 +03:00
|
|
|
|
1994-05-17 08:21:49 +04:00
|
|
|
/*
|
1994-06-08 15:28:29 +04:00
|
|
|
* If purging an active vnode, it must be closed and
|
1998-03-01 05:20:01 +03:00
|
|
|
* deactivated before being reclaimed. Note that the
|
|
|
|
* VOP_INACTIVE will unlock the vnode.
|
1994-05-17 08:21:49 +04:00
|
|
|
*/
|
|
|
|
if (active) {
|
2005-12-11 15:16:03 +03:00
|
|
|
VOP_INACTIVE(vp, l);
|
1998-03-01 05:20:01 +03:00
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* Any other processes trying to obtain this lock must first
|
|
|
|
* wait for VXLOCK to clear, then call the new lock operation.
|
|
|
|
*/
|
|
|
|
VOP_UNLOCK(vp, 0);
|
1994-05-17 08:21:49 +04:00
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Reclaim the vnode.
|
|
|
|
*/
|
2005-12-11 15:16:03 +03:00
|
|
|
if (VOP_RECLAIM(vp, l))
|
2000-04-10 06:22:13 +04:00
|
|
|
panic("vclean: cannot reclaim, vp %p", vp);
|
1998-05-18 18:59:49 +04:00
|
|
|
if (active) {
|
|
|
|
/*
|
|
|
|
* Inline copy of vrele() since VOP_INACTIVE
|
|
|
|
* has already been called.
|
|
|
|
*/
|
|
|
|
simple_lock(&vp->v_interlock);
|
|
|
|
if (--vp->v_usecount <= 0) {
|
|
|
|
#ifdef DIAGNOSTIC
|
|
|
|
if (vp->v_usecount < 0 || vp->v_writecount != 0) {
|
|
|
|
vprint("vclean: bad ref count", vp);
|
|
|
|
panic("vclean: ref cnt");
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
/*
|
|
|
|
* Insert at tail of LRU list.
|
|
|
|
*/
|
2000-11-27 11:39:39 +03:00
|
|
|
|
1999-11-15 21:49:07 +03:00
|
|
|
simple_unlock(&vp->v_interlock);
|
1998-05-18 18:59:49 +04:00
|
|
|
simple_lock(&vnode_free_list_slock);
|
1999-07-08 05:05:58 +04:00
|
|
|
#ifdef DIAGNOSTIC
|
1999-11-15 21:49:07 +03:00
|
|
|
if (vp->v_holdcnt > 0)
|
2000-04-10 06:22:13 +04:00
|
|
|
panic("vclean: not clean, vp %p", vp);
|
1999-07-08 05:05:58 +04:00
|
|
|
#endif
|
1998-05-18 18:59:49 +04:00
|
|
|
TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
|
|
|
|
simple_unlock(&vnode_free_list_slock);
|
1999-11-15 21:49:07 +03:00
|
|
|
} else
|
|
|
|
simple_unlock(&vp->v_interlock);
|
1998-05-18 18:59:49 +04:00
|
|
|
}
|
1994-06-08 15:28:29 +04:00
|
|
|
|
2002-02-05 10:50:58 +03:00
|
|
|
KASSERT(vp->v_uobj.uo_npages == 0);
|
2005-11-30 01:52:02 +03:00
|
|
|
if (vp->v_type == VREG && vp->v_ractx != NULL) {
|
|
|
|
uvm_ra_freectx(vp->v_ractx);
|
|
|
|
vp->v_ractx = NULL;
|
|
|
|
}
|
1998-03-01 05:20:01 +03:00
|
|
|
cache_purge(vp);
|
|
|
|
|
1994-05-17 08:21:49 +04:00
|
|
|
/*
|
1994-06-08 15:28:29 +04:00
|
|
|
* Done with purge, notify sleepers of the grim news.
|
1994-05-17 08:21:49 +04:00
|
|
|
*/
|
1994-06-08 15:28:29 +04:00
|
|
|
vp->v_op = dead_vnodeop_p;
|
|
|
|
vp->v_tag = VT_NON;
|
2000-09-05 09:13:43 +04:00
|
|
|
simple_lock(&vp->v_interlock);
|
2002-10-23 13:10:23 +04:00
|
|
|
VN_KNOTE(vp, NOTE_REVOKE); /* FreeBSD has this in vn_pollgone() */
|
2004-09-21 07:10:35 +04:00
|
|
|
vp->v_flag &= ~(VXLOCK|VLOCKSWORK);
|
1994-05-17 08:21:49 +04:00
|
|
|
if (vp->v_flag & VXWANT) {
|
|
|
|
vp->v_flag &= ~VXWANT;
|
2000-09-05 09:13:43 +04:00
|
|
|
simple_unlock(&vp->v_interlock);
|
1994-05-17 08:21:49 +04:00
|
|
|
wakeup((caddr_t)vp);
|
2000-09-05 09:13:43 +04:00
|
|
|
} else
|
|
|
|
simple_unlock(&vp->v_interlock);
|
1994-05-17 08:21:49 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
1998-03-01 05:20:01 +03:00
|
|
|
* Recycle an unused vnode to the front of the free list.
|
|
|
|
* Release the passed interlock if the vnode will be recycled.
|
1994-05-17 08:21:49 +04:00
|
|
|
*/
|
1998-03-01 05:20:01 +03:00
|
|
|
int
|
2005-12-11 15:16:03 +03:00
|
|
|
vrecycle(struct vnode *vp, struct simplelock *inter_lkp, struct lwp *l)
|
2004-03-23 16:22:32 +03:00
|
|
|
{
|
|
|
|
|
1998-03-01 05:20:01 +03:00
|
|
|
simple_lock(&vp->v_interlock);
|
|
|
|
if (vp->v_usecount == 0) {
|
|
|
|
if (inter_lkp)
|
|
|
|
simple_unlock(inter_lkp);
|
2005-12-11 15:16:03 +03:00
|
|
|
vgonel(vp, l);
|
1998-03-01 05:20:01 +03:00
|
|
|
return (1);
|
1994-05-17 08:21:49 +04:00
|
|
|
}
|
1998-03-01 05:20:01 +03:00
|
|
|
simple_unlock(&vp->v_interlock);
|
|
|
|
return (0);
|
1994-05-17 08:21:49 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Eliminate all activity associated with a vnode
|
|
|
|
* in preparation for reuse.
|
|
|
|
*/
|
|
|
|
void
|
2005-06-06 03:47:48 +04:00
|
|
|
vgone(struct vnode *vp)
|
1998-03-01 05:20:01 +03:00
|
|
|
{
|
2005-12-11 15:16:03 +03:00
|
|
|
struct lwp *l = curlwp; /* XXX */
|
1998-03-01 05:20:01 +03:00
|
|
|
|
|
|
|
simple_lock(&vp->v_interlock);
|
2005-12-11 15:16:03 +03:00
|
|
|
vgonel(vp, l);
|
1998-03-01 05:20:01 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* vgone, with the vp interlock held.
|
|
|
|
*/
|
|
|
|
void
|
2005-12-11 15:16:03 +03:00
|
|
|
vgonel(struct vnode *vp, struct lwp *l)
|
1994-05-17 08:21:49 +04:00
|
|
|
{
|
|
|
|
|
2001-12-06 07:34:33 +03:00
|
|
|
LOCK_ASSERT(simple_lock_held(&vp->v_interlock));
|
|
|
|
|
1994-05-17 08:21:49 +04:00
|
|
|
/*
|
|
|
|
* If a vgone (or vclean) is already in progress,
|
|
|
|
* wait until it is done and return.
|
|
|
|
*/
|
2001-12-06 07:34:33 +03:00
|
|
|
|
1994-05-17 08:21:49 +04:00
|
|
|
if (vp->v_flag & VXLOCK) {
|
|
|
|
vp->v_flag |= VXWANT;
|
2001-12-06 07:34:33 +03:00
|
|
|
ltsleep(vp, PINOD | PNORELOCK, "vgone", 0, &vp->v_interlock);
|
1994-05-17 08:21:49 +04:00
|
|
|
return;
|
|
|
|
}
|
2001-12-06 07:34:33 +03:00
|
|
|
|
1994-05-17 08:21:49 +04:00
|
|
|
/*
|
|
|
|
* Clean out the filesystem specific data.
|
|
|
|
*/
|
2001-12-06 07:34:33 +03:00
|
|
|
|
2005-12-11 15:16:03 +03:00
|
|
|
vclean(vp, DOCLOSE, l);
|
2001-12-06 07:34:33 +03:00
|
|
|
KASSERT((vp->v_flag & VONWORKLST) == 0);
|
|
|
|
|
1994-05-17 08:21:49 +04:00
|
|
|
/*
|
|
|
|
* Delete from old mount point vnode list, if on one.
|
|
|
|
*/
|
2001-12-06 07:34:33 +03:00
|
|
|
|
1998-03-01 05:20:01 +03:00
|
|
|
if (vp->v_mount != NULL)
|
|
|
|
insmntque(vp, (struct mount *)0);
|
2001-12-06 07:34:33 +03:00
|
|
|
|
1994-05-17 08:21:49 +04:00
|
|
|
/*
|
2003-07-30 16:09:46 +04:00
|
|
|
* The test of the back pointer and the reference count of
|
|
|
|
* zero is because it will be removed from the free list by
|
|
|
|
* getcleanvnode, but will not have its reference count
|
|
|
|
* incremented until after calling vgone. If the reference
|
|
|
|
* count were incremented first, vgone would (incorrectly)
|
|
|
|
* try to close the previous instance of the underlying object.
|
1994-06-08 15:28:29 +04:00
|
|
|
* So, the back pointer is explicitly set to `0xdeadb' in
|
|
|
|
* getnewvnode after removing it from the freelist to ensure
|
|
|
|
* that we do not try to move it here.
|
1994-05-17 08:21:49 +04:00
|
|
|
*/
|
2001-12-06 07:34:33 +03:00
|
|
|
|
2003-07-30 16:09:46 +04:00
|
|
|
vp->v_type = VBAD;
|
1998-03-01 05:20:01 +03:00
|
|
|
if (vp->v_usecount == 0) {
|
2003-07-30 16:09:46 +04:00
|
|
|
boolean_t dofree;
|
|
|
|
|
1998-03-01 05:20:01 +03:00
|
|
|
simple_lock(&vnode_free_list_slock);
|
1999-11-15 21:49:07 +03:00
|
|
|
if (vp->v_holdcnt > 0)
|
2000-04-10 06:22:13 +04:00
|
|
|
panic("vgonel: not clean, vp %p", vp);
|
2003-07-30 16:09:46 +04:00
|
|
|
/*
|
|
|
|
* if it isn't on the freelist, we're called by getcleanvnode
|
|
|
|
* and vnode is being re-used. otherwise, we'll free it.
|
|
|
|
*/
|
|
|
|
dofree = vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb;
|
|
|
|
if (dofree) {
|
1998-03-01 05:20:01 +03:00
|
|
|
TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
|
2003-07-30 16:09:46 +04:00
|
|
|
numvnodes--;
|
1998-03-01 05:20:01 +03:00
|
|
|
}
|
|
|
|
simple_unlock(&vnode_free_list_slock);
|
2003-07-30 16:09:46 +04:00
|
|
|
if (dofree)
|
|
|
|
pool_put(&vnode_pool, vp);
|
1994-05-17 08:21:49 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Lookup a vnode by device number.
|
|
|
|
*/
|
1996-02-04 05:17:43 +03:00
|
|
|
int
|
2005-06-06 03:47:48 +04:00
|
|
|
vfinddev(dev_t dev, enum vtype type, struct vnode **vpp)
|
1994-05-17 08:21:49 +04:00
|
|
|
{
|
1998-03-01 05:20:01 +03:00
|
|
|
struct vnode *vp;
|
|
|
|
int rc = 0;
|
1994-05-17 08:21:49 +04:00
|
|
|
|
1998-03-01 05:20:01 +03:00
|
|
|
simple_lock(&spechash_slock);
|
1994-05-17 08:21:49 +04:00
|
|
|
for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) {
|
|
|
|
if (dev != vp->v_rdev || type != vp->v_type)
|
|
|
|
continue;
|
|
|
|
*vpp = vp;
|
1998-03-01 05:20:01 +03:00
|
|
|
rc = 1;
|
|
|
|
break;
|
1994-05-17 08:21:49 +04:00
|
|
|
}
|
1998-03-01 05:20:01 +03:00
|
|
|
simple_unlock(&spechash_slock);
|
|
|
|
return (rc);
|
1994-05-17 08:21:49 +04:00
|
|
|
}
|
|
|
|
|
1998-11-18 23:24:59 +03:00
|
|
|
/*
|
|
|
|
* Revoke all the vnodes corresponding to the specified minor number
|
|
|
|
* range (endpoints inclusive) of the specified major.
|
|
|
|
*/
|
|
|
|
void
|
2005-06-06 03:47:48 +04:00
|
|
|
vdevgone(int maj, int minl, int minh, enum vtype type)
|
1998-11-18 23:24:59 +03:00
|
|
|
{
|
|
|
|
struct vnode *vp;
|
|
|
|
int mn;
|
|
|
|
|
|
|
|
for (mn = minl; mn <= minh; mn++)
|
|
|
|
if (vfinddev(makedev(maj, mn), type, &vp))
|
|
|
|
VOP_REVOKE(vp, REVOKEALL);
|
|
|
|
}
|
|
|
|
|
1994-05-17 08:21:49 +04:00
|
|
|
/*
|
|
|
|
* Calculate the total number of references to a special device.
|
|
|
|
*/
|
1994-06-08 15:28:29 +04:00
|
|
|
int
|
2005-06-06 03:47:48 +04:00
|
|
|
vcount(struct vnode *vp)
|
1994-05-17 08:21:49 +04:00
|
|
|
{
|
2000-03-30 13:27:11 +04:00
|
|
|
struct vnode *vq, *vnext;
|
1994-05-17 08:21:49 +04:00
|
|
|
int count;
|
|
|
|
|
|
|
|
loop:
|
|
|
|
if ((vp->v_flag & VALIASED) == 0)
|
|
|
|
return (vp->v_usecount);
|
1998-03-01 05:20:01 +03:00
|
|
|
simple_lock(&spechash_slock);
|
1994-06-08 15:28:29 +04:00
|
|
|
for (count = 0, vq = *vp->v_hashchain; vq; vq = vnext) {
|
|
|
|
vnext = vq->v_specnext;
|
1994-05-17 08:21:49 +04:00
|
|
|
if (vq->v_rdev != vp->v_rdev || vq->v_type != vp->v_type)
|
|
|
|
continue;
|
|
|
|
/*
|
|
|
|
* Alias, but not in use, so flush it out.
|
|
|
|
*/
|
2001-06-26 19:51:06 +04:00
|
|
|
if (vq->v_usecount == 0 && vq != vp &&
|
|
|
|
(vq->v_flag & VXLOCK) == 0) {
|
1998-03-01 05:20:01 +03:00
|
|
|
simple_unlock(&spechash_slock);
|
1994-05-17 08:21:49 +04:00
|
|
|
vgone(vq);
|
|
|
|
goto loop;
|
|
|
|
}
|
|
|
|
count += vq->v_usecount;
|
|
|
|
}
|
1998-03-01 05:20:01 +03:00
|
|
|
simple_unlock(&spechash_slock);
|
1994-05-17 08:21:49 +04:00
|
|
|
return (count);
|
|
|
|
}
|
|
|
|
|
2004-12-18 06:07:40 +03:00
|
|
|
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof(arr[0]))
|
|
|
|
#define ARRAY_PRINT(idx, arr) \
|
|
|
|
((idx) > 0 && (idx) < ARRAY_SIZE(arr) ? (arr)[(idx)] : "UNKNOWN")
|
|
|
|
|
|
|
|
const char * const vnode_tags[] = { VNODE_TAGS };
|
|
|
|
const char * const vnode_types[] = { VNODE_TYPES };
|
|
|
|
const char vnode_flagbits[] = VNODE_FLAGBITS;
|
|
|
|
|
1994-05-17 08:21:49 +04:00
|
|
|
/*
|
|
|
|
* Print out a description of a vnode.
|
|
|
|
*/
|
|
|
|
void
|
2005-06-06 03:47:48 +04:00
|
|
|
vprint(const char *label, struct vnode *vp)
|
1994-05-17 08:21:49 +04:00
|
|
|
{
|
2005-05-30 02:24:14 +04:00
|
|
|
char bf[96];
|
1994-05-17 08:21:49 +04:00
|
|
|
|
|
|
|
if (label != NULL)
|
1996-10-13 06:32:29 +04:00
|
|
|
printf("%s: ", label);
|
2004-12-18 06:07:40 +03:00
|
|
|
printf("tag %s(%d) type %s(%d), usecount %d, writecount %ld, "
|
|
|
|
"refcount %ld,", ARRAY_PRINT(vp->v_tag, vnode_tags), vp->v_tag,
|
|
|
|
ARRAY_PRINT(vp->v_type, vnode_types), vp->v_type,
|
2003-02-26 02:35:03 +03:00
|
|
|
vp->v_usecount, vp->v_writecount, vp->v_holdcnt);
|
2005-05-30 02:24:14 +04:00
|
|
|
bitmask_snprintf(vp->v_flag, vnode_flagbits, bf, sizeof(bf));
|
|
|
|
if (bf[0] != '\0')
|
|
|
|
printf(" flags (%s)", &bf[1]);
|
1994-06-08 15:28:29 +04:00
|
|
|
if (vp->v_data == NULL) {
|
1996-10-13 06:32:29 +04:00
|
|
|
printf("\n");
|
1994-06-08 15:28:29 +04:00
|
|
|
} else {
|
1996-10-13 06:32:29 +04:00
|
|
|
printf("\n\t");
|
1994-06-08 15:28:29 +04:00
|
|
|
VOP_PRINT(vp);
|
|
|
|
}
|
1994-05-17 08:21:49 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef DEBUG
|
|
|
|
/*
|
|
|
|
* List all of the locked vnodes in the system.
|
|
|
|
* Called when debugging the kernel.
|
|
|
|
*/
|
1996-02-09 21:59:18 +03:00
|
|
|
void
|
2005-06-06 03:47:48 +04:00
|
|
|
printlockedvnodes(void)
|
1994-05-17 08:21:49 +04:00
|
|
|
{
|
1998-03-01 05:20:01 +03:00
|
|
|
struct mount *mp, *nmp;
|
|
|
|
struct vnode *vp;
|
1994-05-17 08:21:49 +04:00
|
|
|
|
1996-10-13 06:32:29 +04:00
|
|
|
printf("Locked vnodes\n");
|
1998-03-01 05:20:01 +03:00
|
|
|
simple_lock(&mountlist_slock);
|
2002-09-04 05:32:31 +04:00
|
|
|
for (mp = CIRCLEQ_FIRST(&mountlist); mp != (void *)&mountlist;
|
|
|
|
mp = nmp) {
|
1998-03-01 05:20:01 +03:00
|
|
|
if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock)) {
|
2002-09-04 05:32:31 +04:00
|
|
|
nmp = CIRCLEQ_NEXT(mp, mnt_list);
|
1998-03-01 05:20:01 +03:00
|
|
|
continue;
|
|
|
|
}
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
|
1994-05-17 08:21:49 +04:00
|
|
|
if (VOP_ISLOCKED(vp))
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
vprint(NULL, vp);
|
1998-03-01 05:20:01 +03:00
|
|
|
}
|
|
|
|
simple_lock(&mountlist_slock);
|
2002-09-04 05:32:31 +04:00
|
|
|
nmp = CIRCLEQ_NEXT(mp, mnt_list);
|
1998-03-01 05:20:01 +03:00
|
|
|
vfs_unbusy(mp);
|
1994-05-17 08:21:49 +04:00
|
|
|
}
|
1998-03-01 05:20:01 +03:00
|
|
|
simple_unlock(&mountlist_slock);
|
1994-05-17 08:21:49 +04:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2004-04-19 04:15:55 +04:00
|
|
|
/*
|
|
|
|
* sysctl helper routine to return list of supported fstypes
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
sysctl_vfs_generic_fstypes(SYSCTLFN_ARGS)
|
|
|
|
{
|
2005-05-30 02:24:14 +04:00
|
|
|
char bf[MFSNAMELEN];
|
2004-04-19 04:15:55 +04:00
|
|
|
char *where = oldp;
|
|
|
|
struct vfsops *v;
|
|
|
|
size_t needed, left, slen;
|
|
|
|
int error, first;
|
|
|
|
|
|
|
|
if (newp != NULL)
|
|
|
|
return (EPERM);
|
|
|
|
if (namelen != 0)
|
|
|
|
return (EINVAL);
|
|
|
|
|
|
|
|
first = 1;
|
|
|
|
error = 0;
|
|
|
|
needed = 0;
|
|
|
|
left = *oldlenp;
|
|
|
|
|
|
|
|
LIST_FOREACH(v, &vfs_list, vfs_list) {
|
|
|
|
if (where == NULL)
|
|
|
|
needed += strlen(v->vfs_name) + 1;
|
|
|
|
else {
|
2005-05-30 02:24:14 +04:00
|
|
|
memset(bf, 0, sizeof(bf));
|
2004-04-19 04:15:55 +04:00
|
|
|
if (first) {
|
2005-05-30 02:24:14 +04:00
|
|
|
strncpy(bf, v->vfs_name, sizeof(bf));
|
2004-04-19 04:15:55 +04:00
|
|
|
first = 0;
|
|
|
|
} else {
|
2005-05-30 02:24:14 +04:00
|
|
|
bf[0] = ' ';
|
|
|
|
strncpy(bf + 1, v->vfs_name, sizeof(bf) - 1);
|
2004-04-19 04:15:55 +04:00
|
|
|
}
|
2005-05-30 02:24:14 +04:00
|
|
|
bf[sizeof(bf)-1] = '\0';
|
|
|
|
slen = strlen(bf);
|
2004-04-19 04:15:55 +04:00
|
|
|
if (left < slen + 1)
|
|
|
|
break;
|
|
|
|
/* +1 to copy out the trailing NUL byte */
|
2005-05-30 02:24:14 +04:00
|
|
|
error = copyout(bf, where, slen + 1);
|
2004-04-19 04:15:55 +04:00
|
|
|
if (error)
|
|
|
|
break;
|
|
|
|
where += slen;
|
|
|
|
needed += slen;
|
|
|
|
left -= slen;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
*oldlenp = needed;
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
1998-03-01 05:20:01 +03:00
|
|
|
/*
|
|
|
|
* Top level filesystem related information gathering.
|
|
|
|
*/
|
Dynamic sysctl.
Gone are the old kern_sysctl(), cpu_sysctl(), hw_sysctl(),
vfs_sysctl(), etc, routines, along with sysctl_int() et al. Now all
nodes are registered with the tree, and nodes can be added (or
removed) easily, and I/O to and from the tree is handled generically.
Since the nodes are registered with the tree, the mapping from name to
number (and back again) can now be discovered, instead of having to be
hard coded. Adding new nodes to the tree is likewise much simpler --
the new infrastructure handles almost all the work for simple types,
and just about anything else can be done with a small helper function.
All existing nodes are where they were before (numerically speaking),
so all existing consumers of sysctl information should notice no
difference.
PS - I'm sorry, but there's a distinct lack of documentation at the
moment. I'm working on sysctl(3/8/9) right now, and I promise to
watch out for buses.
2003-12-04 22:38:21 +03:00
|
|
|
SYSCTL_SETUP(sysctl_vfs_setup, "sysctl vfs subtree setup")
|
1998-03-01 05:20:01 +03:00
|
|
|
{
|
2004-03-24 18:34:46 +03:00
|
|
|
sysctl_createv(clog, 0, NULL, NULL,
|
|
|
|
CTLFLAG_PERMANENT,
|
Dynamic sysctl.
Gone are the old kern_sysctl(), cpu_sysctl(), hw_sysctl(),
vfs_sysctl(), etc, routines, along with sysctl_int() et al. Now all
nodes are registered with the tree, and nodes can be added (or
removed) easily, and I/O to and from the tree is handled generically.
Since the nodes are registered with the tree, the mapping from name to
number (and back again) can now be discovered, instead of having to be
hard coded. Adding new nodes to the tree is likewise much simpler --
the new infrastructure handles almost all the work for simple types,
and just about anything else can be done with a small helper function.
All existing nodes are where they were before (numerically speaking),
so all existing consumers of sysctl information should notice no
difference.
PS - I'm sorry, but there's a distinct lack of documentation at the
moment. I'm working on sysctl(3/8/9) right now, and I promise to
watch out for buses.
2003-12-04 22:38:21 +03:00
|
|
|
CTLTYPE_NODE, "vfs", NULL,
|
|
|
|
NULL, 0, NULL, 0,
|
|
|
|
CTL_VFS, CTL_EOL);
|
2004-03-24 18:34:46 +03:00
|
|
|
sysctl_createv(clog, 0, NULL, NULL,
|
|
|
|
CTLFLAG_PERMANENT,
|
2004-05-25 08:44:43 +04:00
|
|
|
CTLTYPE_NODE, "generic",
|
|
|
|
SYSCTL_DESCR("Non-specific vfs related information"),
|
Dynamic sysctl.
Gone are the old kern_sysctl(), cpu_sysctl(), hw_sysctl(),
vfs_sysctl(), etc, routines, along with sysctl_int() et al. Now all
nodes are registered with the tree, and nodes can be added (or
removed) easily, and I/O to and from the tree is handled generically.
Since the nodes are registered with the tree, the mapping from name to
number (and back again) can now be discovered, instead of having to be
hard coded. Adding new nodes to the tree is likewise much simpler --
the new infrastructure handles almost all the work for simple types,
and just about anything else can be done with a small helper function.
All existing nodes are where they were before (numerically speaking),
so all existing consumers of sysctl information should notice no
difference.
PS - I'm sorry, but there's a distinct lack of documentation at the
moment. I'm working on sysctl(3/8/9) right now, and I promise to
watch out for buses.
2003-12-04 22:38:21 +03:00
|
|
|
NULL, 0, NULL, 0,
|
|
|
|
CTL_VFS, VFS_GENERIC, CTL_EOL);
|
2004-03-24 18:34:46 +03:00
|
|
|
sysctl_createv(clog, 0, NULL, NULL,
|
|
|
|
CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
|
2004-05-25 08:44:43 +04:00
|
|
|
CTLTYPE_INT, "usermount",
|
|
|
|
SYSCTL_DESCR("Whether unprivileged users may mount "
|
|
|
|
"filesystems"),
|
Dynamic sysctl.
Gone are the old kern_sysctl(), cpu_sysctl(), hw_sysctl(),
vfs_sysctl(), etc, routines, along with sysctl_int() et al. Now all
nodes are registered with the tree, and nodes can be added (or
removed) easily, and I/O to and from the tree is handled generically.
Since the nodes are registered with the tree, the mapping from name to
number (and back again) can now be discovered, instead of having to be
hard coded. Adding new nodes to the tree is likewise much simpler --
the new infrastructure handles almost all the work for simple types,
and just about anything else can be done with a small helper function.
All existing nodes are where they were before (numerically speaking),
so all existing consumers of sysctl information should notice no
difference.
PS - I'm sorry, but there's a distinct lack of documentation at the
moment. I'm working on sysctl(3/8/9) right now, and I promise to
watch out for buses.
2003-12-04 22:38:21 +03:00
|
|
|
NULL, 0, &dovfsusermount, 0,
|
|
|
|
CTL_VFS, VFS_GENERIC, VFS_USERMOUNT, CTL_EOL);
|
2004-04-19 04:15:55 +04:00
|
|
|
sysctl_createv(clog, 0, NULL, NULL,
|
|
|
|
CTLFLAG_PERMANENT,
|
|
|
|
CTLTYPE_STRING, "fstypes",
|
|
|
|
SYSCTL_DESCR("List of file systems present"),
|
|
|
|
sysctl_vfs_generic_fstypes, 0, NULL, 0,
|
|
|
|
CTL_VFS, VFS_GENERIC, CTL_CREATE, CTL_EOL);
|
1998-03-01 05:20:01 +03:00
|
|
|
}
|
|
|
|
|
Dynamic sysctl.
Gone are the old kern_sysctl(), cpu_sysctl(), hw_sysctl(),
vfs_sysctl(), etc, routines, along with sysctl_int() et al. Now all
nodes are registered with the tree, and nodes can be added (or
removed) easily, and I/O to and from the tree is handled generically.
Since the nodes are registered with the tree, the mapping from name to
number (and back again) can now be discovered, instead of having to be
hard coded. Adding new nodes to the tree is likewise much simpler --
the new infrastructure handles almost all the work for simple types,
and just about anything else can be done with a small helper function.
All existing nodes are where they were before (numerically speaking),
so all existing consumers of sysctl information should notice no
difference.
PS - I'm sorry, but there's a distinct lack of documentation at the
moment. I'm working on sysctl(3/8/9) right now, and I promise to
watch out for buses.
2003-12-04 22:38:21 +03:00
|
|
|
|
1994-05-17 08:21:49 +04:00
|
|
|
int kinfo_vdebug = 1;
|
|
|
|
int kinfo_vgetfailed;
|
|
|
|
#define KINFO_VNODESLOP 10
|
|
|
|
/*
|
|
|
|
* Dump vnode list (via sysctl).
|
|
|
|
* Copyout address of vnode followed by vnode.
|
|
|
|
*/
|
|
|
|
/* ARGSUSED */
|
1996-02-04 05:17:43 +03:00
|
|
|
int
|
Dynamic sysctl.
Gone are the old kern_sysctl(), cpu_sysctl(), hw_sysctl(),
vfs_sysctl(), etc, routines, along with sysctl_int() et al. Now all
nodes are registered with the tree, and nodes can be added (or
removed) easily, and I/O to and from the tree is handled generically.
Since the nodes are registered with the tree, the mapping from name to
number (and back again) can now be discovered, instead of having to be
hard coded. Adding new nodes to the tree is likewise much simpler --
the new infrastructure handles almost all the work for simple types,
and just about anything else can be done with a small helper function.
All existing nodes are where they were before (numerically speaking),
so all existing consumers of sysctl information should notice no
difference.
PS - I'm sorry, but there's a distinct lack of documentation at the
moment. I'm working on sysctl(3/8/9) right now, and I promise to
watch out for buses.
2003-12-04 22:38:21 +03:00
|
|
|
sysctl_kern_vnode(SYSCTLFN_ARGS)
|
1994-05-17 08:21:49 +04:00
|
|
|
{
|
Dynamic sysctl.
Gone are the old kern_sysctl(), cpu_sysctl(), hw_sysctl(),
vfs_sysctl(), etc, routines, along with sysctl_int() et al. Now all
nodes are registered with the tree, and nodes can be added (or
removed) easily, and I/O to and from the tree is handled generically.
Since the nodes are registered with the tree, the mapping from name to
number (and back again) can now be discovered, instead of having to be
hard coded. Adding new nodes to the tree is likewise much simpler --
the new infrastructure handles almost all the work for simple types,
and just about anything else can be done with a small helper function.
All existing nodes are where they were before (numerically speaking),
so all existing consumers of sysctl information should notice no
difference.
PS - I'm sorry, but there's a distinct lack of documentation at the
moment. I'm working on sysctl(3/8/9) right now, and I promise to
watch out for buses.
2003-12-04 22:38:21 +03:00
|
|
|
char *where = oldp;
|
|
|
|
size_t *sizep = oldlenp;
|
1998-03-01 05:20:01 +03:00
|
|
|
struct mount *mp, *nmp;
|
|
|
|
struct vnode *nvp, *vp;
|
|
|
|
char *bp = where, *savebp;
|
1994-05-17 08:21:49 +04:00
|
|
|
char *ewhere;
|
|
|
|
int error;
|
|
|
|
|
Dynamic sysctl.
Gone are the old kern_sysctl(), cpu_sysctl(), hw_sysctl(),
vfs_sysctl(), etc, routines, along with sysctl_int() et al. Now all
nodes are registered with the tree, and nodes can be added (or
removed) easily, and I/O to and from the tree is handled generically.
Since the nodes are registered with the tree, the mapping from name to
number (and back again) can now be discovered, instead of having to be
hard coded. Adding new nodes to the tree is likewise much simpler --
the new infrastructure handles almost all the work for simple types,
and just about anything else can be done with a small helper function.
All existing nodes are where they were before (numerically speaking),
so all existing consumers of sysctl information should notice no
difference.
PS - I'm sorry, but there's a distinct lack of documentation at the
moment. I'm working on sysctl(3/8/9) right now, and I promise to
watch out for buses.
2003-12-04 22:38:21 +03:00
|
|
|
if (namelen != 0)
|
|
|
|
return (EOPNOTSUPP);
|
|
|
|
if (newp != NULL)
|
|
|
|
return (EPERM);
|
|
|
|
|
1998-08-01 02:50:48 +04:00
|
|
|
#define VPTRSZ sizeof(struct vnode *)
|
|
|
|
#define VNODESZ sizeof(struct vnode)
|
1994-05-17 08:21:49 +04:00
|
|
|
if (where == NULL) {
|
|
|
|
*sizep = (numvnodes + KINFO_VNODESLOP) * (VPTRSZ + VNODESZ);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
ewhere = where + *sizep;
|
1998-03-01 05:20:01 +03:00
|
|
|
|
|
|
|
simple_lock(&mountlist_slock);
|
2002-09-04 05:32:31 +04:00
|
|
|
for (mp = CIRCLEQ_FIRST(&mountlist); mp != (void *)&mountlist;
|
|
|
|
mp = nmp) {
|
1998-03-01 05:20:01 +03:00
|
|
|
if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock)) {
|
2002-09-04 05:32:31 +04:00
|
|
|
nmp = CIRCLEQ_NEXT(mp, mnt_list);
|
1994-05-17 08:21:49 +04:00
|
|
|
continue;
|
1998-03-01 05:20:01 +03:00
|
|
|
}
|
1994-05-17 08:21:49 +04:00
|
|
|
savebp = bp;
|
|
|
|
again:
|
1998-03-01 05:20:01 +03:00
|
|
|
simple_lock(&mntvnode_slock);
|
2002-09-04 05:32:31 +04:00
|
|
|
for (vp = LIST_FIRST(&mp->mnt_vnodelist);
|
1994-05-17 08:21:49 +04:00
|
|
|
vp != NULL;
|
1998-03-01 05:20:01 +03:00
|
|
|
vp = nvp) {
|
1994-05-17 08:21:49 +04:00
|
|
|
/*
|
|
|
|
* Check that the vp is still associated with
|
|
|
|
* this filesystem. RACE: could have been
|
|
|
|
* recycled onto the same filesystem.
|
|
|
|
*/
|
|
|
|
if (vp->v_mount != mp) {
|
1998-03-01 05:20:01 +03:00
|
|
|
simple_unlock(&mntvnode_slock);
|
1994-05-17 08:21:49 +04:00
|
|
|
if (kinfo_vdebug)
|
1996-10-13 06:32:29 +04:00
|
|
|
printf("kinfo: vp changed\n");
|
1994-05-17 08:21:49 +04:00
|
|
|
bp = savebp;
|
|
|
|
goto again;
|
|
|
|
}
|
2002-09-04 05:32:31 +04:00
|
|
|
nvp = LIST_NEXT(vp, v_mntvnodes);
|
1994-05-17 08:21:49 +04:00
|
|
|
if (bp + VPTRSZ + VNODESZ > ewhere) {
|
1998-03-01 05:20:01 +03:00
|
|
|
simple_unlock(&mntvnode_slock);
|
1994-05-17 08:21:49 +04:00
|
|
|
*sizep = bp - where;
|
|
|
|
return (ENOMEM);
|
|
|
|
}
|
1998-03-01 05:20:01 +03:00
|
|
|
simple_unlock(&mntvnode_slock);
|
1994-05-17 08:21:49 +04:00
|
|
|
if ((error = copyout((caddr_t)&vp, bp, VPTRSZ)) ||
|
|
|
|
(error = copyout((caddr_t)vp, bp + VPTRSZ, VNODESZ)))
|
|
|
|
return (error);
|
|
|
|
bp += VPTRSZ + VNODESZ;
|
1998-03-01 05:20:01 +03:00
|
|
|
simple_lock(&mntvnode_slock);
|
1994-05-17 08:21:49 +04:00
|
|
|
}
|
1998-03-01 05:20:01 +03:00
|
|
|
simple_unlock(&mntvnode_slock);
|
|
|
|
simple_lock(&mountlist_slock);
|
2002-09-04 05:32:31 +04:00
|
|
|
nmp = CIRCLEQ_NEXT(mp, mnt_list);
|
1994-05-17 08:21:49 +04:00
|
|
|
vfs_unbusy(mp);
|
|
|
|
}
|
1998-03-01 05:20:01 +03:00
|
|
|
simple_unlock(&mountlist_slock);
|
1994-05-17 08:21:49 +04:00
|
|
|
|
|
|
|
*sizep = bp - where;
|
|
|
|
return (0);
|
|
|
|
}
|
1994-06-08 15:28:29 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Check to see if a filesystem is mounted on a block device.
|
|
|
|
*/
|
|
|
|
int
|
2005-06-06 03:47:48 +04:00
|
|
|
vfs_mountedon(struct vnode *vp)
|
1994-06-08 15:28:29 +04:00
|
|
|
{
|
1998-03-01 05:20:01 +03:00
|
|
|
struct vnode *vq;
|
|
|
|
int error = 0;
|
1994-06-08 15:28:29 +04:00
|
|
|
|
1999-11-15 21:49:07 +03:00
|
|
|
if (vp->v_specmountpoint != NULL)
|
1994-06-08 15:28:29 +04:00
|
|
|
return (EBUSY);
|
|
|
|
if (vp->v_flag & VALIASED) {
|
1998-03-01 05:20:01 +03:00
|
|
|
simple_lock(&spechash_slock);
|
1994-06-08 15:28:29 +04:00
|
|
|
for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
|
|
|
|
if (vq->v_rdev != vp->v_rdev ||
|
|
|
|
vq->v_type != vp->v_type)
|
|
|
|
continue;
|
1999-11-15 21:49:07 +03:00
|
|
|
if (vq->v_specmountpoint != NULL) {
|
1998-03-01 05:20:01 +03:00
|
|
|
error = EBUSY;
|
|
|
|
break;
|
|
|
|
}
|
1994-06-08 15:28:29 +04:00
|
|
|
}
|
1998-03-01 05:20:01 +03:00
|
|
|
simple_unlock(&spechash_slock);
|
1994-06-08 15:28:29 +04:00
|
|
|
}
|
1998-03-01 05:20:01 +03:00
|
|
|
return (error);
|
1994-06-08 15:28:29 +04:00
|
|
|
}
|
|
|
|
|
1994-12-24 19:44:12 +03:00
|
|
|
/*
|
|
|
|
* Do the usual access checking.
|
|
|
|
* file_mode, uid and gid are from the vnode in question,
|
|
|
|
* while acc_mode and cred are from the VOP_ACCESS parameter list
|
|
|
|
*/
|
1995-04-22 02:03:24 +04:00
|
|
|
int
|
2005-06-06 03:47:48 +04:00
|
|
|
vaccess(enum vtype type, mode_t file_mode, uid_t uid, gid_t gid,
|
|
|
|
mode_t acc_mode, struct ucred *cred)
|
1994-12-24 19:44:12 +03:00
|
|
|
{
|
|
|
|
mode_t mask;
|
2004-03-23 16:22:32 +03:00
|
|
|
|
1997-04-24 00:18:16 +04:00
|
|
|
/*
|
|
|
|
* Super-user always gets read/write access, but execute access depends
|
|
|
|
* on at least one execute bit being set.
|
|
|
|
*/
|
|
|
|
if (cred->cr_uid == 0) {
|
1997-05-08 20:34:54 +04:00
|
|
|
if ((acc_mode & VEXEC) && type != VDIR &&
|
1997-05-08 20:19:43 +04:00
|
|
|
(file_mode & (S_IXUSR|S_IXGRP|S_IXOTH)) == 0)
|
1997-04-24 00:18:16 +04:00
|
|
|
return (EACCES);
|
1997-05-08 20:19:43 +04:00
|
|
|
return (0);
|
1997-04-24 00:18:16 +04:00
|
|
|
}
|
2004-03-23 16:22:32 +03:00
|
|
|
|
1994-12-24 19:44:12 +03:00
|
|
|
mask = 0;
|
2004-03-23 16:22:32 +03:00
|
|
|
|
1994-12-24 19:44:12 +03:00
|
|
|
/* Otherwise, check the owner. */
|
|
|
|
if (cred->cr_uid == uid) {
|
1997-05-08 20:19:43 +04:00
|
|
|
if (acc_mode & VEXEC)
|
1994-12-24 19:44:12 +03:00
|
|
|
mask |= S_IXUSR;
|
|
|
|
if (acc_mode & VREAD)
|
|
|
|
mask |= S_IRUSR;
|
|
|
|
if (acc_mode & VWRITE)
|
|
|
|
mask |= S_IWUSR;
|
1997-04-24 00:18:16 +04:00
|
|
|
return ((file_mode & mask) == mask ? 0 : EACCES);
|
1994-12-24 19:44:12 +03:00
|
|
|
}
|
2004-03-23 16:22:32 +03:00
|
|
|
|
1994-12-24 19:44:12 +03:00
|
|
|
/* Otherwise, check the groups. */
|
1995-06-02 02:43:30 +04:00
|
|
|
if (cred->cr_gid == gid || groupmember(gid, cred)) {
|
1997-05-08 20:19:43 +04:00
|
|
|
if (acc_mode & VEXEC)
|
1994-12-24 19:44:12 +03:00
|
|
|
mask |= S_IXGRP;
|
|
|
|
if (acc_mode & VREAD)
|
|
|
|
mask |= S_IRGRP;
|
|
|
|
if (acc_mode & VWRITE)
|
|
|
|
mask |= S_IWGRP;
|
1997-04-24 00:18:16 +04:00
|
|
|
return ((file_mode & mask) == mask ? 0 : EACCES);
|
1994-12-24 19:44:12 +03:00
|
|
|
}
|
2004-03-23 16:22:32 +03:00
|
|
|
|
1994-12-24 19:44:12 +03:00
|
|
|
/* Otherwise, check everyone else. */
|
1997-05-08 20:19:43 +04:00
|
|
|
if (acc_mode & VEXEC)
|
1994-12-24 19:44:12 +03:00
|
|
|
mask |= S_IXOTH;
|
|
|
|
if (acc_mode & VREAD)
|
|
|
|
mask |= S_IROTH;
|
|
|
|
if (acc_mode & VWRITE)
|
|
|
|
mask |= S_IWOTH;
|
1997-04-24 00:18:16 +04:00
|
|
|
return ((file_mode & mask) == mask ? 0 : EACCES);
|
1994-12-24 19:44:12 +03:00
|
|
|
}
|
1995-04-10 23:46:56 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Unmount all file systems.
|
|
|
|
* We traverse the list in reverse order under the assumption that doing so
|
|
|
|
* will avoid needing to worry about dependencies.
|
|
|
|
*/
|
|
|
|
void
|
2005-12-11 15:16:03 +03:00
|
|
|
vfs_unmountall(struct lwp *l)
|
1995-04-10 23:46:56 +04:00
|
|
|
{
|
2000-03-30 13:27:11 +04:00
|
|
|
struct mount *mp, *nmp;
|
1995-04-22 01:55:11 +04:00
|
|
|
int allerror, error;
|
1998-04-26 23:10:33 +04:00
|
|
|
|
2004-09-22 15:47:23 +04:00
|
|
|
printf("unmounting file systems...");
|
1995-04-10 23:46:56 +04:00
|
|
|
for (allerror = 0,
|
|
|
|
mp = mountlist.cqh_last; mp != (void *)&mountlist; mp = nmp) {
|
|
|
|
nmp = mp->mnt_list.cqe_prev;
|
1996-06-02 00:24:05 +04:00
|
|
|
#ifdef DEBUG
|
2004-09-22 15:47:23 +04:00
|
|
|
printf("\nunmounting %s (%s)...",
|
1996-10-11 02:46:11 +04:00
|
|
|
mp->mnt_stat.f_mntonname, mp->mnt_stat.f_mntfromname);
|
1996-06-02 00:24:05 +04:00
|
|
|
#endif
|
2001-04-17 02:41:09 +04:00
|
|
|
/*
|
|
|
|
* XXX Freeze syncer. Must do this before locking the
|
|
|
|
* mount point. See dounmount() for details.
|
|
|
|
*/
|
|
|
|
lockmgr(&syncer_lock, LK_EXCLUSIVE, NULL);
|
|
|
|
if (vfs_busy(mp, 0, 0)) {
|
|
|
|
lockmgr(&syncer_lock, LK_RELEASE, NULL);
|
1997-02-22 06:22:32 +03:00
|
|
|
continue;
|
2001-04-17 02:41:09 +04:00
|
|
|
}
|
2005-12-11 15:16:03 +03:00
|
|
|
if ((error = dounmount(mp, MNT_FORCE, l)) != 0) {
|
1996-10-13 06:32:29 +04:00
|
|
|
printf("unmount of %s failed with error %d\n",
|
1995-04-22 01:55:11 +04:00
|
|
|
mp->mnt_stat.f_mntonname, error);
|
|
|
|
allerror = 1;
|
|
|
|
}
|
1995-04-10 23:46:56 +04:00
|
|
|
}
|
2004-09-22 15:47:23 +04:00
|
|
|
printf(" done\n");
|
1995-04-10 23:46:56 +04:00
|
|
|
if (allerror)
|
1996-10-13 06:32:29 +04:00
|
|
|
printf("WARNING: some file systems would not unmount\n");
|
1995-04-22 01:55:11 +04:00
|
|
|
}
|
|
|
|
|
2003-09-11 19:34:26 +04:00
|
|
|
extern struct simplelock bqueue_slock; /* XXX */
|
|
|
|
|
1995-04-22 01:55:11 +04:00
|
|
|
/*
|
|
|
|
* Sync and unmount file systems before shutting down.
|
|
|
|
*/
|
|
|
|
void
|
2005-06-06 03:47:48 +04:00
|
|
|
vfs_shutdown(void)
|
1995-04-22 01:55:11 +04:00
|
|
|
{
|
2003-01-18 13:06:22 +03:00
|
|
|
struct lwp *l = curlwp;
|
2003-01-21 02:59:14 +03:00
|
|
|
struct proc *p;
|
1995-04-22 01:55:11 +04:00
|
|
|
|
Fix assorted bugs around shutdown/reboot/panic time.
- add a new global variable, doing_shutdown, which is nonzero if
vfs_shutdown() or panic() have been called.
- in panic, set RB_NOSYNC if doing_shutdown is already set on entry
so we don't reenter vfs_shutdown if we panic'ed there.
- in vfs_shutdown, don't use proc0's process for sys_sync unless
curproc is NULL.
- in lockmgr, attribute successful locks to proc0 if doing_shutdown
&& curproc==NULL, and panic if we can't get the lock right away; avoids the
spurious lockmgr DIAGNOSTIC panic from the ddb reboot command.
- in subr_pool, deal with curproc==NULL in the doing_shutdown case.
- in mfs_strategy, bitbucket writes if doing_shutdown, so we don't
wedge waiting for the mfs process.
- in ltsleep, treat ((curproc == NULL) && doing_shutdown) like the
panicstr case.
Appears to fix: kern/9239, kern/10187, kern/9367.
May also fix kern/10122.
2000-06-10 22:44:43 +04:00
|
|
|
/* XXX we're certainly not running in proc0's context! */
|
2003-01-21 02:59:14 +03:00
|
|
|
if (l == NULL || (p = l->l_proc) == NULL)
|
Fix assorted bugs around shutdown/reboot/panic time.
- add a new global variable, doing_shutdown, which is nonzero if
vfs_shutdown() or panic() have been called.
- in panic, set RB_NOSYNC if doing_shutdown is already set on entry
so we don't reenter vfs_shutdown if we panic'ed there.
- in vfs_shutdown, don't use proc0's process for sys_sync unless
curproc is NULL.
- in lockmgr, attribute successful locks to proc0 if doing_shutdown
&& curproc==NULL, and panic if we can't get the lock right away; avoids the
spurious lockmgr DIAGNOSTIC panic from the ddb reboot command.
- in subr_pool, deal with curproc==NULL in the doing_shutdown case.
- in mfs_strategy, bitbucket writes if doing_shutdown, so we don't
wedge waiting for the mfs process.
- in ltsleep, treat ((curproc == NULL) && doing_shutdown) like the
panicstr case.
Appears to fix: kern/9239, kern/10187, kern/9367.
May also fix kern/10122.
2000-06-10 22:44:43 +04:00
|
|
|
p = &proc0;
|
2003-01-21 02:59:14 +03:00
|
|
|
|
1997-06-07 21:27:57 +04:00
|
|
|
printf("syncing disks... ");
|
|
|
|
|
2000-08-31 18:41:35 +04:00
|
|
|
/* remove user process from run queue */
|
|
|
|
suspendsched();
|
1995-04-22 01:55:11 +04:00
|
|
|
(void) spl0();
|
|
|
|
|
Fix assorted bugs around shutdown/reboot/panic time.
- add a new global variable, doing_shutdown, which is nonzero if
vfs_shutdown() or panic() have been called.
- in panic, set RB_NOSYNC if doing_shutdown is already set on entry
so we don't reenter vfs_shutdown if we panic'ed there.
- in vfs_shutdown, don't use proc0's process for sys_sync unless
curproc is NULL.
- in lockmgr, attribute successful locks to proc0 if doing_shutdown
&& curproc==NULL, and panic if we can't get the lock right away; avoids the
spurious lockmgr DIAGNOSTIC panic from the ddb reboot command.
- in subr_pool, deal with curproc==NULL in the doing_shutdown case.
- in mfs_strategy, bitbucket writes if doing_shutdown, so we don't
wedge waiting for the mfs process.
- in ltsleep, treat ((curproc == NULL) && doing_shutdown) like the
panicstr case.
Appears to fix: kern/9239, kern/10187, kern/9367.
May also fix kern/10122.
2000-06-10 22:44:43 +04:00
|
|
|
/* avoid coming back this way again if we panic. */
|
|
|
|
doing_shutdown = 1;
|
|
|
|
|
2003-01-18 13:06:22 +03:00
|
|
|
sys_sync(l, NULL, NULL);
|
1995-04-22 01:55:11 +04:00
|
|
|
|
|
|
|
/* Wait for sync to finish. */
|
2003-12-30 15:33:13 +03:00
|
|
|
if (buf_syncwait() != 0) {
|
2000-03-30 13:32:25 +04:00
|
|
|
#if defined(DDB) && defined(DEBUG_HALT_BUSY)
|
|
|
|
Debugger();
|
1999-08-19 17:54:06 +04:00
|
|
|
#endif
|
2000-03-30 13:32:25 +04:00
|
|
|
printf("giving up\n");
|
1998-04-26 22:58:54 +04:00
|
|
|
return;
|
1997-09-25 01:40:55 +04:00
|
|
|
} else
|
1996-10-13 06:32:29 +04:00
|
|
|
printf("done\n");
|
1997-09-25 01:40:55 +04:00
|
|
|
|
1998-04-26 22:58:54 +04:00
|
|
|
/*
|
|
|
|
* If we've panic'd, don't make the situation potentially
|
|
|
|
* worse by unmounting the file systems.
|
|
|
|
*/
|
|
|
|
if (panicstr != NULL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* Release inodes held by texts before update. */
|
1997-09-25 01:40:55 +04:00
|
|
|
#ifdef notdef
|
1998-04-26 22:58:54 +04:00
|
|
|
vnshutdown();
|
1997-09-25 01:40:55 +04:00
|
|
|
#endif
|
1998-04-26 22:58:54 +04:00
|
|
|
/* Unmount file systems. */
|
2005-12-11 15:16:03 +03:00
|
|
|
vfs_unmountall(l);
|
1995-04-10 23:46:56 +04:00
|
|
|
}
|
1997-01-31 05:50:36 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Mount the root file system. If the operator didn't specify a
|
|
|
|
* file system to use, try all possible file systems until one
|
|
|
|
* succeeds.
|
|
|
|
*/
|
|
|
|
int
|
2005-06-06 03:47:48 +04:00
|
|
|
vfs_mountroot(void)
|
1997-01-31 05:50:36 +03:00
|
|
|
{
|
1998-02-18 10:16:41 +03:00
|
|
|
struct vfsops *v;
|
2005-01-09 06:11:48 +03:00
|
|
|
int error = ENODEV;
|
1997-01-31 05:50:36 +03:00
|
|
|
|
|
|
|
if (root_device == NULL)
|
|
|
|
panic("vfs_mountroot: root device unknown");
|
|
|
|
|
|
|
|
switch (root_device->dv_class) {
|
|
|
|
case DV_IFNET:
|
|
|
|
if (rootdev != NODEV)
|
2002-04-04 05:44:30 +04:00
|
|
|
panic("vfs_mountroot: rootdev set for DV_IFNET "
|
|
|
|
"(0x%08x -> %d,%d)", rootdev,
|
|
|
|
major(rootdev), minor(rootdev));
|
1997-01-31 05:50:36 +03:00
|
|
|
break;
|
|
|
|
|
|
|
|
case DV_DISK:
|
|
|
|
if (rootdev == NODEV)
|
|
|
|
panic("vfs_mountroot: rootdev not set for DV_DISK");
|
2005-01-09 06:11:48 +03:00
|
|
|
if (bdevvp(rootdev, &rootvp))
|
|
|
|
panic("vfs_mountroot: can't get vnode for rootdev");
|
2005-12-11 15:16:03 +03:00
|
|
|
error = VOP_OPEN(rootvp, FREAD, FSCRED, curlwp);
|
2005-01-09 06:11:48 +03:00
|
|
|
if (error) {
|
|
|
|
printf("vfs_mountroot: can't open root device\n");
|
|
|
|
return (error);
|
|
|
|
}
|
1997-01-31 05:50:36 +03:00
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
printf("%s: inappropriate for root file system\n",
|
|
|
|
root_device->dv_xname);
|
|
|
|
return (ENODEV);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If user specified a file system, use it.
|
|
|
|
*/
|
2005-01-09 06:11:48 +03:00
|
|
|
if (mountroot != NULL) {
|
|
|
|
error = (*mountroot)();
|
|
|
|
goto done;
|
|
|
|
}
|
1997-01-31 05:50:36 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Try each file system currently configured into the kernel.
|
|
|
|
*/
|
2004-04-19 04:15:55 +04:00
|
|
|
LIST_FOREACH(v, &vfs_list, vfs_list) {
|
1998-02-18 10:16:41 +03:00
|
|
|
if (v->vfs_mountroot == NULL)
|
1997-01-31 05:50:36 +03:00
|
|
|
continue;
|
|
|
|
#ifdef DEBUG
|
2003-05-18 02:22:41 +04:00
|
|
|
aprint_normal("mountroot: trying %s...\n", v->vfs_name);
|
1997-01-31 05:50:36 +03:00
|
|
|
#endif
|
2005-01-09 06:11:48 +03:00
|
|
|
error = (*v->vfs_mountroot)();
|
|
|
|
if (!error) {
|
2003-05-18 02:22:41 +04:00
|
|
|
aprint_normal("root file system type: %s\n",
|
|
|
|
v->vfs_name);
|
1998-02-18 10:16:41 +03:00
|
|
|
break;
|
1997-01-31 05:50:36 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
1998-02-18 10:16:41 +03:00
|
|
|
if (v == NULL) {
|
|
|
|
printf("no file system for %s", root_device->dv_xname);
|
|
|
|
if (root_device->dv_class == DV_DISK)
|
|
|
|
printf(" (dev 0x%x)", rootdev);
|
|
|
|
printf("\n");
|
2005-01-09 06:11:48 +03:00
|
|
|
error = EFTYPE;
|
1998-02-18 10:16:41 +03:00
|
|
|
}
|
2005-01-09 06:11:48 +03:00
|
|
|
|
|
|
|
done:
|
|
|
|
if (error && root_device->dv_class == DV_DISK) {
|
2005-12-11 15:16:03 +03:00
|
|
|
VOP_CLOSE(rootvp, FREAD, FSCRED, curlwp);
|
2005-01-09 06:11:48 +03:00
|
|
|
vrele(rootvp);
|
|
|
|
}
|
|
|
|
return (error);
|
1997-01-31 05:50:36 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Given a file system name, look up the vfsops for that
|
|
|
|
* file system, or return NULL if file system isn't present
|
|
|
|
* in the kernel.
|
|
|
|
*/
|
|
|
|
struct vfsops *
|
2005-06-06 03:47:48 +04:00
|
|
|
vfs_getopsbyname(const char *name)
|
1997-01-31 05:50:36 +03:00
|
|
|
{
|
1998-02-18 10:16:41 +03:00
|
|
|
struct vfsops *v;
|
|
|
|
|
2004-04-19 04:15:55 +04:00
|
|
|
LIST_FOREACH(v, &vfs_list, vfs_list) {
|
1998-02-18 10:16:41 +03:00
|
|
|
if (strcmp(v->vfs_name, name) == 0)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return (v);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Establish a file system and initialize it.
|
|
|
|
*/
|
|
|
|
int
|
2005-06-06 03:47:48 +04:00
|
|
|
vfs_attach(struct vfsops *vfs)
|
1998-02-18 10:16:41 +03:00
|
|
|
{
|
|
|
|
struct vfsops *v;
|
|
|
|
int error = 0;
|
|
|
|
|
1997-01-31 05:50:36 +03:00
|
|
|
|
1998-02-18 10:16:41 +03:00
|
|
|
/*
|
|
|
|
* Make sure this file system doesn't already exist.
|
|
|
|
*/
|
2001-09-15 20:12:54 +04:00
|
|
|
LIST_FOREACH(v, &vfs_list, vfs_list) {
|
1998-02-18 10:16:41 +03:00
|
|
|
if (strcmp(vfs->vfs_name, v->vfs_name) == 0) {
|
|
|
|
error = EEXIST;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Initialize the vnode operations for this file system.
|
|
|
|
*/
|
|
|
|
vfs_opv_init(vfs->vfs_opv_descs);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Now initialize the file system itself.
|
|
|
|
*/
|
|
|
|
(*vfs->vfs_init)();
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ...and link it into the kernel's list.
|
|
|
|
*/
|
|
|
|
LIST_INSERT_HEAD(&vfs_list, vfs, vfs_list);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Sanity: make sure the reference count is 0.
|
|
|
|
*/
|
|
|
|
vfs->vfs_refcount = 0;
|
|
|
|
|
|
|
|
out:
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Remove a file system from the kernel.
|
|
|
|
*/
|
|
|
|
int
|
2005-06-06 03:47:48 +04:00
|
|
|
vfs_detach(struct vfsops *vfs)
|
1998-02-18 10:16:41 +03:00
|
|
|
{
|
|
|
|
struct vfsops *v;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Make sure no one is using the filesystem.
|
|
|
|
*/
|
|
|
|
if (vfs->vfs_refcount != 0)
|
|
|
|
return (EBUSY);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ...and remove it from the kernel's list.
|
|
|
|
*/
|
2001-09-15 20:12:54 +04:00
|
|
|
LIST_FOREACH(v, &vfs_list, vfs_list) {
|
1998-02-18 10:16:41 +03:00
|
|
|
if (v == vfs) {
|
|
|
|
LIST_REMOVE(v, vfs_list);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (v == NULL)
|
|
|
|
return (ESRCH);
|
|
|
|
|
2000-03-16 21:08:17 +03:00
|
|
|
/*
|
|
|
|
* Now run the file system-specific cleanups.
|
|
|
|
*/
|
|
|
|
(*vfs->vfs_done)();
|
|
|
|
|
1998-02-18 10:16:41 +03:00
|
|
|
/*
|
|
|
|
* Free the vnode operations vector.
|
|
|
|
*/
|
|
|
|
vfs_opv_free(vfs->vfs_opv_descs);
|
|
|
|
return (0);
|
1997-01-31 05:50:36 +03:00
|
|
|
}
|
2000-04-10 06:22:13 +04:00
|
|
|
|
2001-09-15 20:12:54 +04:00
|
|
|
void
|
|
|
|
vfs_reinit(void)
|
|
|
|
{
|
|
|
|
struct vfsops *vfs;
|
|
|
|
|
|
|
|
LIST_FOREACH(vfs, &vfs_list, vfs_list) {
|
|
|
|
if (vfs->vfs_reinit) {
|
|
|
|
(*vfs->vfs_reinit)();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2004-01-10 20:16:38 +03:00
|
|
|
/*
|
|
|
|
* Request a filesystem to suspend write operations.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
vfs_write_suspend(struct mount *mp, int slpflag, int slptimeo)
|
|
|
|
{
|
2005-12-11 15:16:03 +03:00
|
|
|
struct lwp *l = curlwp; /* XXX */
|
2004-01-10 20:16:38 +03:00
|
|
|
int error;
|
|
|
|
|
|
|
|
while ((mp->mnt_iflag & IMNT_SUSPEND)) {
|
|
|
|
if (slptimeo < 0)
|
|
|
|
return EWOULDBLOCK;
|
|
|
|
error = tsleep(&mp->mnt_flag, slpflag, "suspwt1", slptimeo);
|
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
mp->mnt_iflag |= IMNT_SUSPEND;
|
|
|
|
|
2004-05-02 16:21:02 +04:00
|
|
|
simple_lock(&mp->mnt_slock);
|
2004-01-10 20:16:38 +03:00
|
|
|
if (mp->mnt_writeopcountupper > 0)
|
2004-05-02 16:21:02 +04:00
|
|
|
ltsleep(&mp->mnt_writeopcountupper, PUSER - 1, "suspwt",
|
|
|
|
0, &mp->mnt_slock);
|
|
|
|
simple_unlock(&mp->mnt_slock);
|
2004-01-10 20:16:38 +03:00
|
|
|
|
2005-12-11 15:16:03 +03:00
|
|
|
error = VFS_SYNC(mp, MNT_WAIT, l->l_proc->p_ucred, l);
|
2004-01-10 20:16:38 +03:00
|
|
|
if (error) {
|
|
|
|
vfs_write_resume(mp);
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
mp->mnt_iflag |= IMNT_SUSPENDLOW;
|
|
|
|
|
2004-05-02 16:21:02 +04:00
|
|
|
simple_lock(&mp->mnt_slock);
|
2004-01-10 20:16:38 +03:00
|
|
|
if (mp->mnt_writeopcountlower > 0)
|
2004-05-02 16:21:02 +04:00
|
|
|
ltsleep(&mp->mnt_writeopcountlower, PUSER - 1, "suspwt",
|
|
|
|
0, &mp->mnt_slock);
|
2004-01-10 20:16:38 +03:00
|
|
|
mp->mnt_iflag |= IMNT_SUSPENDED;
|
2004-05-02 16:21:02 +04:00
|
|
|
simple_unlock(&mp->mnt_slock);
|
2004-01-10 20:16:38 +03:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Request a filesystem to resume write operations.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
vfs_write_resume(struct mount *mp)
|
|
|
|
{
|
|
|
|
|
|
|
|
if ((mp->mnt_iflag & IMNT_SUSPEND) == 0)
|
|
|
|
return;
|
|
|
|
mp->mnt_iflag &= ~(IMNT_SUSPEND | IMNT_SUSPENDLOW | IMNT_SUSPENDED);
|
|
|
|
wakeup(&mp->mnt_flag);
|
|
|
|
}
|
|
|
|
|
2003-04-17 01:44:18 +04:00
|
|
|
void
|
2004-04-21 05:05:31 +04:00
|
|
|
copy_statvfs_info(struct statvfs *sbp, const struct mount *mp)
|
2003-04-17 01:44:18 +04:00
|
|
|
{
|
2004-04-21 05:05:31 +04:00
|
|
|
const struct statvfs *mbp;
|
2003-04-19 02:44:45 +04:00
|
|
|
|
|
|
|
if (sbp == (mbp = &mp->mnt_stat))
|
2003-04-17 01:44:18 +04:00
|
|
|
return;
|
2003-04-19 02:44:45 +04:00
|
|
|
|
2004-04-22 07:47:58 +04:00
|
|
|
(void)memcpy(&sbp->f_fsidx, &mbp->f_fsidx, sizeof(sbp->f_fsidx));
|
|
|
|
sbp->f_fsid = mbp->f_fsid;
|
2003-04-19 02:44:45 +04:00
|
|
|
sbp->f_owner = mbp->f_owner;
|
2004-04-21 05:05:31 +04:00
|
|
|
sbp->f_flag = mbp->f_flag;
|
2003-04-19 02:44:45 +04:00
|
|
|
sbp->f_syncwrites = mbp->f_syncwrites;
|
|
|
|
sbp->f_asyncwrites = mbp->f_asyncwrites;
|
2004-04-21 05:05:31 +04:00
|
|
|
sbp->f_syncreads = mbp->f_syncreads;
|
|
|
|
sbp->f_asyncreads = mbp->f_asyncreads;
|
|
|
|
(void)memcpy(sbp->f_spare, mbp->f_spare, sizeof(mbp->f_spare));
|
2003-04-19 02:44:45 +04:00
|
|
|
(void)memcpy(sbp->f_fstypename, mbp->f_fstypename,
|
2003-04-17 01:44:18 +04:00
|
|
|
sizeof(sbp->f_fstypename));
|
2003-04-19 02:44:45 +04:00
|
|
|
(void)memcpy(sbp->f_mntonname, mbp->f_mntonname,
|
2003-04-17 01:44:18 +04:00
|
|
|
sizeof(sbp->f_mntonname));
|
|
|
|
(void)memcpy(sbp->f_mntfromname, mp->mnt_stat.f_mntfromname,
|
|
|
|
sizeof(sbp->f_mntfromname));
|
2004-09-13 23:45:21 +04:00
|
|
|
sbp->f_namemax = mbp->f_namemax;
|
2003-04-17 01:44:18 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2004-04-21 05:05:31 +04:00
|
|
|
set_statvfs_info(const char *onp, int ukon, const char *fromp, int ukfrom,
|
2005-12-11 15:16:03 +03:00
|
|
|
struct mount *mp, struct lwp *l)
|
2003-04-17 01:44:18 +04:00
|
|
|
{
|
|
|
|
int error;
|
|
|
|
size_t size;
|
2004-04-21 05:05:31 +04:00
|
|
|
struct statvfs *sfs = &mp->mnt_stat;
|
2003-04-17 01:44:18 +04:00
|
|
|
int (*fun)(const void *, void *, size_t, size_t *);
|
|
|
|
|
2004-03-23 16:22:32 +03:00
|
|
|
(void)strncpy(mp->mnt_stat.f_fstypename, mp->mnt_op->vfs_name,
|
2003-04-17 01:44:18 +04:00
|
|
|
sizeof(mp->mnt_stat.f_fstypename));
|
|
|
|
|
|
|
|
if (onp) {
|
2005-12-11 15:16:03 +03:00
|
|
|
struct cwdinfo *cwdi = l->l_proc->p_cwdi;
|
2003-04-17 01:44:18 +04:00
|
|
|
fun = (ukon == UIO_SYSSPACE) ? copystr : copyinstr;
|
|
|
|
if (cwdi->cwdi_rdir != NULL) {
|
|
|
|
size_t len;
|
|
|
|
char *bp;
|
|
|
|
char *path = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
|
|
|
|
|
2003-11-12 23:38:24 +03:00
|
|
|
if (!path) /* XXX can't happen with M_WAITOK */
|
2003-04-17 01:44:18 +04:00
|
|
|
return ENOMEM;
|
|
|
|
|
|
|
|
bp = path + MAXPATHLEN;
|
|
|
|
*--bp = '\0';
|
|
|
|
error = getcwd_common(cwdi->cwdi_rdir, rootvnode, &bp,
|
2005-12-11 15:16:03 +03:00
|
|
|
path, MAXPATHLEN / 2, 0, l);
|
2003-04-17 01:44:18 +04:00
|
|
|
if (error) {
|
|
|
|
free(path, M_TEMP);
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
len = strlen(bp);
|
|
|
|
if (len > sizeof(sfs->f_mntonname) - 1)
|
|
|
|
len = sizeof(sfs->f_mntonname) - 1;
|
|
|
|
(void)strncpy(sfs->f_mntonname, bp, len);
|
|
|
|
free(path, M_TEMP);
|
|
|
|
|
|
|
|
if (len < sizeof(sfs->f_mntonname) - 1) {
|
|
|
|
error = (*fun)(onp, &sfs->f_mntonname[len],
|
2003-04-22 17:11:23 +04:00
|
|
|
sizeof(sfs->f_mntonname) - len - 1, &size);
|
2003-04-17 01:44:18 +04:00
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
size += len;
|
|
|
|
} else {
|
|
|
|
size = len;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
error = (*fun)(onp, &sfs->f_mntonname,
|
|
|
|
sizeof(sfs->f_mntonname) - 1, &size);
|
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
(void)memset(sfs->f_mntonname + size, 0,
|
|
|
|
sizeof(sfs->f_mntonname) - size);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (fromp) {
|
|
|
|
fun = (ukfrom == UIO_SYSSPACE) ? copystr : copyinstr;
|
|
|
|
error = (*fun)(fromp, sfs->f_mntfromname,
|
|
|
|
sizeof(sfs->f_mntfromname) - 1, &size);
|
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
(void)memset(sfs->f_mntfromname + size, 0,
|
|
|
|
sizeof(sfs->f_mntfromname) - size);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2000-04-10 06:22:13 +04:00
|
|
|
#ifdef DDB
|
2005-06-06 03:47:48 +04:00
|
|
|
static const char buf_flagbits[] = BUF_FLAGBITS;
|
2000-04-10 06:22:13 +04:00
|
|
|
|
|
|
|
void
|
2005-06-06 03:47:48 +04:00
|
|
|
vfs_buf_print(struct buf *bp, int full, void (*pr)(const char *, ...))
|
2000-04-10 06:22:13 +04:00
|
|
|
{
|
2005-05-30 02:24:14 +04:00
|
|
|
char bf[1024];
|
2000-04-10 06:22:13 +04:00
|
|
|
|
2003-05-23 05:45:07 +04:00
|
|
|
(*pr)(" vp %p lblkno 0x%"PRIx64" blkno 0x%"PRIx64" dev 0x%x\n",
|
2000-04-10 06:22:13 +04:00
|
|
|
bp->b_vp, bp->b_lblkno, bp->b_blkno, bp->b_dev);
|
|
|
|
|
2005-05-30 02:24:14 +04:00
|
|
|
bitmask_snprintf(bp->b_flags, buf_flagbits, bf, sizeof(bf));
|
|
|
|
(*pr)(" error %d flags 0x%s\n", bp->b_error, bf);
|
2000-04-10 06:22:13 +04:00
|
|
|
|
2001-11-30 13:06:46 +03:00
|
|
|
(*pr)(" bufsize 0x%lx bcount 0x%lx resid 0x%lx\n",
|
2000-04-10 06:22:13 +04:00
|
|
|
bp->b_bufsize, bp->b_bcount, bp->b_resid);
|
2000-11-27 11:39:39 +03:00
|
|
|
(*pr)(" data %p saveaddr %p dep %p\n",
|
|
|
|
bp->b_data, bp->b_saveaddr, LIST_FIRST(&bp->b_dep));
|
2000-04-10 06:22:13 +04:00
|
|
|
(*pr)(" iodone %p\n", bp->b_iodone);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void
|
2005-06-06 03:47:48 +04:00
|
|
|
vfs_vnode_print(struct vnode *vp, int full, void (*pr)(const char *, ...))
|
2000-04-10 06:22:13 +04:00
|
|
|
{
|
2005-05-30 02:24:14 +04:00
|
|
|
char bf[256];
|
2000-04-10 06:22:13 +04:00
|
|
|
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
uvm_object_printit(&vp->v_uobj, full, pr);
|
2005-05-30 02:24:14 +04:00
|
|
|
bitmask_snprintf(vp->v_flag, vnode_flagbits, bf, sizeof(bf));
|
|
|
|
(*pr)("\nVNODE flags %s\n", bf);
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
(*pr)("mp %p numoutput %d size 0x%llx\n",
|
|
|
|
vp->v_mount, vp->v_numoutput, vp->v_size);
|
2000-04-10 06:22:13 +04:00
|
|
|
|
2001-11-30 13:06:46 +03:00
|
|
|
(*pr)("data %p usecount %d writecount %ld holdcnt %ld numoutput %d\n",
|
2000-04-10 06:22:13 +04:00
|
|
|
vp->v_data, vp->v_usecount, vp->v_writecount,
|
|
|
|
vp->v_holdcnt, vp->v_numoutput);
|
|
|
|
|
2004-12-18 06:07:40 +03:00
|
|
|
(*pr)("tag %s(%d) type %s(%d) mount %p typedata %p\n",
|
|
|
|
ARRAY_PRINT(vp->v_tag, vnode_tags), vp->v_tag,
|
|
|
|
ARRAY_PRINT(vp->v_type, vnode_types), vp->v_type,
|
2003-07-30 16:10:57 +04:00
|
|
|
vp->v_mount, vp->v_mountedhere);
|
2000-04-10 06:22:13 +04:00
|
|
|
|
|
|
|
if (full) {
|
|
|
|
struct buf *bp;
|
|
|
|
|
|
|
|
(*pr)("clean bufs:\n");
|
2000-11-27 11:39:39 +03:00
|
|
|
LIST_FOREACH(bp, &vp->v_cleanblkhd, b_vnbufs) {
|
|
|
|
(*pr)(" bp %p\n", bp);
|
2000-04-10 06:22:13 +04:00
|
|
|
vfs_buf_print(bp, full, pr);
|
|
|
|
}
|
|
|
|
|
|
|
|
(*pr)("dirty bufs:\n");
|
2000-11-27 11:39:39 +03:00
|
|
|
LIST_FOREACH(bp, &vp->v_dirtyblkhd, b_vnbufs) {
|
|
|
|
(*pr)(" bp %p\n", bp);
|
2000-04-10 06:22:13 +04:00
|
|
|
vfs_buf_print(bp, full, pr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2003-11-18 21:26:18 +03:00
|
|
|
|
|
|
|
void
|
2005-06-06 03:47:48 +04:00
|
|
|
vfs_mount_print(struct mount *mp, int full, void (*pr)(const char *, ...))
|
2003-11-18 21:26:18 +03:00
|
|
|
{
|
|
|
|
char sbuf[256];
|
|
|
|
|
|
|
|
(*pr)("vnodecovered = %p syncer = %p data = %p\n",
|
|
|
|
mp->mnt_vnodecovered,mp->mnt_syncer,mp->mnt_data);
|
|
|
|
|
Fixing age old cruft:
* Rather than using mnt_maxsymlinklen to indicate that a file systems returns
d_type fields(!), add a new internal flag, IMNT_DTYPE.
Add 3 new elements to ufsmount:
* um_maxsymlinklen, replaces mnt_maxsymlinklen (which never should have existed
in the first place).
* um_dirblksiz, which tracks the current directory block size, eliminating the
FS-specific checks littered throughout the code. This may be used later to
make the block size variable.
* um_maxfilesize, which is the maximum file size, possibly adjusted lower due
to implementation issues.
Sync some bug fixes from FFS into ext2fs, particularly:
* ffs_lookup.c 1.21, 1.28, 1.33, 1.48
* ffs_inode.c 1.43, 1.44, 1.45, 1.66, 1.67
* ffs_vnops.c 1.84, 1.85, 1.86
Clean up some crappy pointer frobnication.
2004-08-15 11:19:54 +04:00
|
|
|
(*pr)("fs_bshift %d dev_bshift = %d\n",
|
|
|
|
mp->mnt_fs_bshift,mp->mnt_dev_bshift);
|
2003-11-18 21:26:18 +03:00
|
|
|
|
|
|
|
bitmask_snprintf(mp->mnt_flag, __MNT_FLAG_BITS, sbuf, sizeof(sbuf));
|
|
|
|
(*pr)("flag = %s\n", sbuf);
|
|
|
|
|
|
|
|
bitmask_snprintf(mp->mnt_iflag, __IMNT_FLAG_BITS, sbuf, sizeof(sbuf));
|
|
|
|
(*pr)("iflag = %s\n", sbuf);
|
|
|
|
|
|
|
|
/* XXX use lockmgr_printinfo */
|
|
|
|
if (mp->mnt_lock.lk_sharecount)
|
|
|
|
(*pr)(" lock type %s: SHARED (count %d)", mp->mnt_lock.lk_wmesg,
|
|
|
|
mp->mnt_lock.lk_sharecount);
|
|
|
|
else if (mp->mnt_lock.lk_flags & LK_HAVE_EXCL) {
|
|
|
|
(*pr)(" lock type %s: EXCL (count %d) by ",
|
|
|
|
mp->mnt_lock.lk_wmesg, mp->mnt_lock.lk_exclusivecount);
|
|
|
|
if (mp->mnt_lock.lk_flags & LK_SPIN)
|
|
|
|
(*pr)("processor %lu", mp->mnt_lock.lk_cpu);
|
|
|
|
else
|
|
|
|
(*pr)("pid %d.%d", mp->mnt_lock.lk_lockholder,
|
|
|
|
mp->mnt_lock.lk_locklwp);
|
|
|
|
} else
|
|
|
|
(*pr)(" not locked");
|
|
|
|
if ((mp->mnt_lock.lk_flags & LK_SPIN) == 0 && mp->mnt_lock.lk_waitcount > 0)
|
|
|
|
(*pr)(" with %d pending", mp->mnt_lock.lk_waitcount);
|
|
|
|
|
|
|
|
(*pr)("\n");
|
|
|
|
|
|
|
|
if (mp->mnt_unmounter) {
|
2005-12-11 15:16:03 +03:00
|
|
|
(*pr)("unmounter pid = %d ",mp->mnt_unmounter->l_proc);
|
2003-11-18 21:26:18 +03:00
|
|
|
}
|
|
|
|
(*pr)("wcnt = %d, writeopcountupper = %d, writeopcountupper = %d\n",
|
|
|
|
mp->mnt_wcnt,mp->mnt_writeopcountupper,mp->mnt_writeopcountlower);
|
|
|
|
|
2004-04-21 05:05:31 +04:00
|
|
|
(*pr)("statvfs cache:\n");
|
|
|
|
(*pr)("\tbsize = %lu\n",mp->mnt_stat.f_bsize);
|
|
|
|
(*pr)("\tfrsize = %lu\n",mp->mnt_stat.f_frsize);
|
|
|
|
(*pr)("\tiosize = %lu\n",mp->mnt_stat.f_iosize);
|
|
|
|
|
|
|
|
(*pr)("\tblocks = "PRIu64"\n",mp->mnt_stat.f_blocks);
|
|
|
|
(*pr)("\tbfree = "PRIu64"\n",mp->mnt_stat.f_bfree);
|
|
|
|
(*pr)("\tbavail = "PRIu64"\n",mp->mnt_stat.f_bavail);
|
|
|
|
(*pr)("\tbresvd = "PRIu64"\n",mp->mnt_stat.f_bresvd);
|
|
|
|
|
|
|
|
(*pr)("\tfiles = "PRIu64"\n",mp->mnt_stat.f_files);
|
|
|
|
(*pr)("\tffree = "PRIu64"\n",mp->mnt_stat.f_ffree);
|
|
|
|
(*pr)("\tfavail = "PRIu64"\n",mp->mnt_stat.f_favail);
|
|
|
|
(*pr)("\tfresvd = "PRIu64"\n",mp->mnt_stat.f_fresvd);
|
|
|
|
|
|
|
|
(*pr)("\tf_fsidx = { 0x%"PRIx32", 0x%"PRIx32" }\n",
|
|
|
|
mp->mnt_stat.f_fsidx.__fsid_val[0],
|
|
|
|
mp->mnt_stat.f_fsidx.__fsid_val[1]);
|
|
|
|
|
2003-11-18 21:26:18 +03:00
|
|
|
(*pr)("\towner = %"PRIu32"\n",mp->mnt_stat.f_owner);
|
2004-04-21 05:05:31 +04:00
|
|
|
(*pr)("\tnamemax = %lu\n",mp->mnt_stat.f_namemax);
|
|
|
|
|
|
|
|
bitmask_snprintf(mp->mnt_stat.f_flag, __MNT_FLAG_BITS, sbuf,
|
|
|
|
sizeof(sbuf));
|
|
|
|
(*pr)("\tflag = %s\n",sbuf);
|
|
|
|
(*pr)("\tsyncwrites = " PRIu64 "\n",mp->mnt_stat.f_syncwrites);
|
|
|
|
(*pr)("\tasyncwrites = " PRIu64 "\n",mp->mnt_stat.f_asyncwrites);
|
|
|
|
(*pr)("\tsyncreads = " PRIu64 "\n",mp->mnt_stat.f_syncreads);
|
|
|
|
(*pr)("\tasyncreads = " PRIu64 "\n",mp->mnt_stat.f_asyncreads);
|
2003-11-18 21:26:18 +03:00
|
|
|
(*pr)("\tfstypename = %s\n",mp->mnt_stat.f_fstypename);
|
|
|
|
(*pr)("\tmntonname = %s\n",mp->mnt_stat.f_mntonname);
|
|
|
|
(*pr)("\tmntfromname = %s\n",mp->mnt_stat.f_mntfromname);
|
|
|
|
|
|
|
|
{
|
|
|
|
int cnt = 0;
|
|
|
|
struct vnode *vp;
|
|
|
|
(*pr)("locked vnodes =");
|
|
|
|
/* XXX would take mountlist lock, except ddb may not have context */
|
|
|
|
LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
|
|
|
|
if (VOP_ISLOCKED(vp)) {
|
|
|
|
if ((++cnt % 6) == 0) {
|
|
|
|
(*pr)(" %p,\n\t", vp);
|
|
|
|
} else {
|
|
|
|
(*pr)(" %p,", vp);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
(*pr)("\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
if (full) {
|
|
|
|
int cnt = 0;
|
|
|
|
struct vnode *vp;
|
|
|
|
(*pr)("all vnodes =");
|
|
|
|
/* XXX would take mountlist lock, except ddb may not have context */
|
|
|
|
LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
|
|
|
|
if (!LIST_NEXT(vp, v_mntvnodes)) {
|
|
|
|
(*pr)(" %p", vp);
|
|
|
|
} else if ((++cnt % 6) == 0) {
|
|
|
|
(*pr)(" %p,\n\t", vp);
|
|
|
|
} else {
|
|
|
|
(*pr)(" %p,", vp);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
(*pr)("\n", vp);
|
|
|
|
}
|
|
|
|
}
|
2005-06-06 03:47:48 +04:00
|
|
|
#endif /* DDB */
|