NetBSD/sys/kern/vfs_subr.c

2148 lines
53 KiB
C
Raw Normal View History

/* $NetBSD: vfs_subr.c,v 1.296 2007/07/29 14:44:08 pooka Exp $ */
/*-
2005-03-02 14:05:34 +03:00
* Copyright (c) 1997, 1998, 2004, 2005 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
* NASA Ames Research Center.
2005-03-02 14:05:34 +03:00
* This code is derived from software contributed to The NetBSD Foundation
* by Charles M. Hannum.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the NetBSD
* Foundation, Inc. and its contributors.
* 4. Neither the name of The NetBSD Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
1994-05-17 08:21:49 +04:00
/*
1994-06-08 15:28:29 +04:00
* Copyright (c) 1989, 1993
* The Regents of the University of California. All rights reserved.
1994-05-17 08:21:49 +04:00
* (c) UNIX System Laboratories, Inc.
* All or some portions of this file are derived from material licensed
* to the University of California by American Telephone and Telegraph
* Co. or Unix System Laboratories, Inc. and are reproduced herein with
* the permission of UNIX System Laboratories, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
1994-05-17 08:21:49 +04:00
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)vfs_subr.c 8.13 (Berkeley) 4/18/94
1994-05-17 08:21:49 +04:00
*/
/*
* External virtual filesystem routines.
*
* This file contains vfs subroutines which are heavily dependant on
* the kernel and are not suitable for standalone use. Examples include
* routines involved vnode and mountpoint management.
1994-05-17 08:21:49 +04:00
*/
2001-11-12 18:25:01 +03:00
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: vfs_subr.c,v 1.296 2007/07/29 14:44:08 pooka Exp $");
2001-11-12 18:25:01 +03:00
#include "opt_inet.h"
#include "opt_ddb.h"
#include "opt_compat_netbsd.h"
1998-12-10 18:07:01 +03:00
#include "opt_compat_43.h"
1994-05-17 08:21:49 +04:00
#include <sys/param.h>
1994-06-08 15:28:29 +04:00
#include <sys/systm.h>
1994-05-17 08:21:49 +04:00
#include <sys/proc.h>
#include <sys/kernel.h>
1994-05-17 08:21:49 +04:00
#include <sys/mount.h>
1995-07-03 20:58:38 +04:00
#include <sys/fcntl.h>
1994-05-17 08:21:49 +04:00
#include <sys/vnode.h>
1994-06-08 15:28:29 +04:00
#include <sys/stat.h>
1994-05-17 08:21:49 +04:00
#include <sys/namei.h>
#include <sys/ucred.h>
#include <sys/buf.h>
#include <sys/errno.h>
#include <sys/malloc.h>
1996-02-09 21:59:18 +03:00
#include <sys/syscallargs.h>
#include <sys/device.h>
#include <sys/filedesc.h>
2006-05-15 01:15:11 +04:00
#include <sys/kauth.h>
1996-02-04 05:17:43 +03:00
1994-06-08 15:28:29 +04:00
#include <miscfs/specfs/specdev.h>
#include <miscfs/syncfs/syncfs.h>
1994-06-08 15:28:29 +04:00
#include <uvm/uvm.h>
2005-11-30 01:52:02 +03:00
#include <uvm/uvm_readahead.h>
#include <uvm/uvm_ddb.h>
2000-06-27 21:41:07 +04:00
#include <sys/sysctl.h>
extern int dovfsusermount; /* 1 => permit any user to mount filesystems */
extern int vfs_magiclinks; /* 1 => expand "magic" symlinks */
1994-05-17 08:21:49 +04:00
/*
* Insq/Remq for the vnode usage lists.
*/
#define bufinsvn(bp, dp) LIST_INSERT_HEAD(dp, bp, b_vnbufs)
#define bufremvn(bp) { \
LIST_REMOVE(bp, b_vnbufs); \
(bp)->b_vnbufs.le_next = NOLIST; \
2004-03-23 16:22:32 +03:00
}
/* TAILQ_HEAD(freelst, vnode) vnode_free_list = vnode free list (in vnode.h) */
struct freelst vnode_free_list = TAILQ_HEAD_INITIALIZER(vnode_free_list);
struct freelst vnode_hold_list = TAILQ_HEAD_INITIALIZER(vnode_hold_list);
struct simplelock vnode_free_list_slock = SIMPLELOCK_INITIALIZER;
POOL_INIT(vnode_pool, sizeof(struct vnode), 0, 0, 0, "vnodepl",
&pool_allocator_nointr, IPL_NONE);
MALLOC_DEFINE(M_VNODE, "vnodes", "Dynamically allocated vnodes");
/*
* Local declarations.
*/
static specificdata_domain_t mount_specificdata_domain;
2006-01-17 00:44:46 +03:00
static void insmntque(struct vnode *, struct mount *);
static int getdevvp(dev_t, struct vnode **, enum vtype);
static void vclean(struct vnode *, int, struct lwp *);
2005-12-11 15:16:03 +03:00
static struct vnode *getcleanvnode(struct lwp *);
1996-02-09 21:59:18 +03:00
1994-05-17 08:21:49 +04:00
/*
1994-06-08 15:28:29 +04:00
* Initialize the vnode management data structures.
1994-05-17 08:21:49 +04:00
*/
1996-02-04 05:17:43 +03:00
void
2005-06-06 03:47:48 +04:00
vntblinit(void)
1994-05-17 08:21:49 +04:00
{
mount_specificdata_domain = specificdata_domain_create();
/*
* Initialize the filesystem syncer.
*/
vn_initialize_syncerd();
1994-05-17 08:21:49 +04:00
}
int
2005-12-11 15:16:03 +03:00
vfs_drainvnodes(long target, struct lwp *l)
{
simple_lock(&vnode_free_list_slock);
while (numvnodes > target) {
struct vnode *vp;
2005-12-11 15:16:03 +03:00
vp = getcleanvnode(l);
if (vp == NULL)
return EBUSY; /* give up */
pool_put(&vnode_pool, vp);
simple_lock(&vnode_free_list_slock);
numvnodes--;
}
simple_unlock(&vnode_free_list_slock);
return 0;
}
/*
* grab a vnode from freelist and clean it.
*/
struct vnode *
2005-12-11 15:16:03 +03:00
getcleanvnode(struct lwp *l)
{
struct vnode *vp;
struct freelst *listhd;
LOCK_ASSERT(simple_lock_held(&vnode_free_list_slock));
listhd = &vnode_free_list;
try_nextlist:
TAILQ_FOREACH(vp, listhd, v_freelist) {
if (!simple_lock_try(&vp->v_interlock))
continue;
/*
* as our lwp might hold the underlying vnode locked,
* don't try to reclaim the VLAYER vnode if it's locked.
*/
if ((vp->v_flag & VXLOCK) == 0 &&
((vp->v_flag & VLAYER) == 0 || VOP_ISLOCKED(vp) == 0)) {
break;
}
simple_unlock(&vp->v_interlock);
}
if (vp == NULLVP) {
if (listhd == &vnode_free_list) {
listhd = &vnode_hold_list;
goto try_nextlist;
}
simple_unlock(&vnode_free_list_slock);
return NULLVP;
}
if (vp->v_usecount)
panic("free vnode isn't, vp %p", vp);
TAILQ_REMOVE(listhd, vp, v_freelist);
/* see comment on why 0xdeadb is set at end of vgone (below) */
vp->v_freelist.tqe_prev = (struct vnode **)0xdeadb;
simple_unlock(&vnode_free_list_slock);
if (vp->v_type != VBAD)
2005-12-11 15:16:03 +03:00
vgonel(vp, l);
else
simple_unlock(&vp->v_interlock);
#ifdef DIAGNOSTIC
if (vp->v_data || vp->v_uobj.uo_npages ||
TAILQ_FIRST(&vp->v_uobj.memq))
panic("cleaned vnode isn't, vp %p", vp);
if (vp->v_numoutput)
panic("clean vnode has pending I/O's, vp %p", vp);
#endif
KASSERT((vp->v_flag & VONWORKLST) == 0);
return vp;
}
1994-05-17 08:21:49 +04:00
/*
1998-03-01 05:20:01 +03:00
* Mark a mount point as busy. Used to synchronize access and to delay
* unmounting. Interlock is not released on failure.
1994-05-17 08:21:49 +04:00
*/
1996-02-04 05:17:43 +03:00
int
2005-06-06 03:47:48 +04:00
vfs_busy(struct mount *mp, int flags, struct simplelock *interlkp)
1994-05-17 08:21:49 +04:00
{
1998-03-01 05:20:01 +03:00
int lkflags;
1994-05-17 08:21:49 +04:00
while (mp->mnt_iflag & IMNT_UNMOUNT) {
int gone, n;
2004-03-23 16:22:32 +03:00
1998-03-01 05:20:01 +03:00
if (flags & LK_NOWAIT)
return (ENOENT);
if ((flags & LK_RECURSEFAIL) && mp->mnt_unmounter != NULL
2005-12-11 15:16:03 +03:00
&& mp->mnt_unmounter == curlwp)
return (EDEADLK);
1998-03-01 05:20:01 +03:00
if (interlkp)
simple_unlock(interlkp);
/*
* Since all busy locks are shared except the exclusive
* lock granted when unmounting, the only place that a
* wakeup needs to be done is at the release of the
* exclusive lock at the end of dounmount.
*/
simple_lock(&mp->mnt_slock);
mp->mnt_wcnt++;
ltsleep((void *)mp, PVFS, "vfs_busy", 0, &mp->mnt_slock);
n = --mp->mnt_wcnt;
simple_unlock(&mp->mnt_slock);
gone = mp->mnt_iflag & IMNT_GONE;
2004-03-23 16:22:32 +03:00
if (n == 0)
wakeup(&mp->mnt_wcnt);
1998-03-01 05:20:01 +03:00
if (interlkp)
simple_lock(interlkp);
if (gone)
return (ENOENT);
1994-05-17 08:21:49 +04:00
}
1998-03-01 05:20:01 +03:00
lkflags = LK_SHARED;
if (interlkp)
lkflags |= LK_INTERLOCK;
if (lockmgr(&mp->mnt_lock, lkflags, interlkp))
panic("vfs_busy: unexpected lock failure");
1994-05-17 08:21:49 +04:00
return (0);
}
/*
1998-03-01 05:20:01 +03:00
* Free a busy filesystem.
1994-05-17 08:21:49 +04:00
*/
void
2005-06-06 03:47:48 +04:00
vfs_unbusy(struct mount *mp)
1994-05-17 08:21:49 +04:00
{
1998-03-01 05:20:01 +03:00
lockmgr(&mp->mnt_lock, LK_RELEASE, NULL);
1994-05-17 08:21:49 +04:00
}
/*
1998-03-01 05:20:01 +03:00
* Lookup a filesystem type, and if found allocate and initialize
* a mount structure for it.
*
* Devname is usually updated by mount(8) after booting.
1994-05-17 08:21:49 +04:00
*/
1996-02-04 05:17:43 +03:00
int
2005-06-06 03:47:48 +04:00
vfs_rootmountalloc(const char *fstypename, const char *devname,
struct mount **mpp)
1994-05-17 08:21:49 +04:00
{
1998-03-01 05:20:01 +03:00
struct vfsops *vfsp = NULL;
struct mount *mp;
1994-05-17 08:21:49 +04:00
LIST_FOREACH(vfsp, &vfs_list, vfs_list)
if (!strncmp(vfsp->vfs_name, fstypename,
sizeof(mp->mnt_stat.f_fstypename)))
1998-03-01 05:20:01 +03:00
break;
1994-05-17 08:21:49 +04:00
1998-03-01 05:20:01 +03:00
if (vfsp == NULL)
return (ENODEV);
mp = malloc((u_long)sizeof(struct mount), M_MOUNT, M_WAITOK);
memset((char *)mp, 0, (u_long)sizeof(struct mount));
1998-03-01 05:20:01 +03:00
lockinit(&mp->mnt_lock, PVFS, "vfslock", 0, 0);
simple_lock_init(&mp->mnt_slock);
1998-03-01 05:20:01 +03:00
(void)vfs_busy(mp, LK_NOWAIT, 0);
TAILQ_INIT(&mp->mnt_vnodelist);
1998-03-01 05:20:01 +03:00
mp->mnt_op = vfsp;
mp->mnt_flag = MNT_RDONLY;
mp->mnt_vnodecovered = NULLVP;
vfsp->vfs_refcount++;
(void)strlcpy(mp->mnt_stat.f_fstypename, vfsp->vfs_name,
sizeof(mp->mnt_stat.f_fstypename));
1998-03-01 05:20:01 +03:00
mp->mnt_stat.f_mntonname[0] = '/';
mp->mnt_stat.f_mntonname[1] = '\0';
mp->mnt_stat.f_mntfromname[sizeof(mp->mnt_stat.f_mntfromname) - 1] =
'\0';
(void)copystr(devname, mp->mnt_stat.f_mntfromname,
sizeof(mp->mnt_stat.f_mntfromname) - 1, 0);
mount_initspecific(mp);
1998-03-01 05:20:01 +03:00
*mpp = mp;
return (0);
1994-05-17 08:21:49 +04:00
}
1994-06-08 15:28:29 +04:00
/*
* Routines having to do with the management of the vnode table.
*/
2004-03-23 16:22:32 +03:00
extern int (**dead_vnodeop_p)(void *);
1994-06-08 15:28:29 +04:00
1994-05-17 08:21:49 +04:00
/*
* Return the next vnode from the free list.
*/
1996-02-04 05:17:43 +03:00
int
2005-06-06 03:47:48 +04:00
getnewvnode(enum vtagtype tag, struct mount *mp, int (**vops)(void *),
struct vnode **vpp)
1994-05-17 08:21:49 +04:00
{
extern struct uvm_pagerops uvm_vnodeops;
struct uvm_object *uobj;
2005-12-11 15:16:03 +03:00
struct lwp *l = curlwp; /* XXX */
static int toggle;
1998-03-01 05:20:01 +03:00
struct vnode *vp;
int error = 0, tryalloc;
a whole bunch of changes to improve performance and robustness under load: - remove special treatment of pager_map mappings in pmaps. this is required now, since I've removed the globals that expose the address range. pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's no longer any need to special-case it. - eliminate struct uvm_vnode by moving its fields into struct vnode. - rewrite the pageout path. the pager is now responsible for handling the high-level requests instead of only getting control after a bunch of work has already been done on its behalf. this will allow us to UBCify LFS, which needs tighter control over its pages than other filesystems do. writing a page to disk no longer requires making it read-only, which allows us to write wired pages without causing all kinds of havoc. - use a new PG_PAGEOUT flag to indicate that a page should be freed on behalf of the pagedaemon when it's unlocked. this flag is very similar to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the pageout fails due to eg. an indirect-block buffer being locked. this allows us to remove the "version" field from struct vm_page, and together with shrinking "loan_count" from 32 bits to 16, struct vm_page is now 4 bytes smaller. - no longer use PG_RELEASED for swap-backed pages. if the page is busy because it's being paged out, we can't release the swap slot to be reallocated until that write is complete, but unlike with vnodes we don't keep a count of in-progress writes so there's no good way to know when the write is done. instead, when we need to free a busy swap-backed page, just sleep until we can get it busy ourselves. - implement a fast-path for extending writes which allows us to avoid zeroing new pages. this substantially reduces cpu usage. - encapsulate the data used by the genfs code in a struct genfs_node, which must be the first element of the filesystem-specific vnode data for filesystems which use genfs_{get,put}pages(). - eliminate many of the UVM pagerops, since they aren't needed anymore now that the pager "put" operation is a higher-level operation. - enhance the genfs code to allow NFS to use the genfs_{get,put}pages instead of a modified copy. - clean up struct vnode by removing all the fields that used to be used by the vfs_cluster.c code (which we don't use anymore with UBC). - remove kmem_object and mb_object since they were useless. instead of allocating pages to these objects, we now just allocate pages with no object. such pages are mapped in the kernel until they are freed, so we can use the mapping to find the page to free it. this allows us to remove splvm() protection in several places. The sum of all these changes improves write throughput on my decstation 5000/200 to within 1% of the rate of NetBSD 1.5 and reduces the elapsed time for "make release" of a NetBSD 1.5 source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
try_again:
if (mp) {
/*
* Mark filesystem busy while we're creating a vnode.
* If unmount is in progress, this will wait; if the
* unmount succeeds (only if umount -f), this will
* return an error. If the unmount fails, we'll keep
* going afterwards.
* (This puts the per-mount vnode list logically under
* the protection of the vfs_busy lock).
*/
error = vfs_busy(mp, LK_RECURSEFAIL, 0);
if (error && error != EDEADLK)
return error;
}
1994-05-17 08:21:49 +04:00
/*
* We must choose whether to allocate a new vnode or recycle an
* existing one. The criterion for allocating a new one is that
* the total number of vnodes is less than the number desired or
* there are no vnodes on either free list. Generally we only
* want to recycle vnodes that have no buffers associated with
* them, so we look first on the vnode_free_list. If it is empty,
* we next consider vnodes with referencing buffers on the
* vnode_hold_list. The toggle ensures that half the time we
* will use a buffer from the vnode_hold_list, and half the time
* we will allocate a new one unless the list has grown to twice
* the desired size. We are reticent to recycle vnodes from the
* vnode_hold_list because we will lose the identity of all its
* referencing buffers.
*/
vp = NULL;
simple_lock(&vnode_free_list_slock);
toggle ^= 1;
if (numvnodes > 2 * desiredvnodes)
toggle = 0;
tryalloc = numvnodes < desiredvnodes ||
(TAILQ_FIRST(&vnode_free_list) == NULL &&
(TAILQ_FIRST(&vnode_hold_list) == NULL || toggle));
if (tryalloc &&
(vp = pool_get(&vnode_pool, PR_NOWAIT)) != NULL) {
numvnodes++;
1998-03-01 05:20:01 +03:00
simple_unlock(&vnode_free_list_slock);
memset(vp, 0, sizeof(*vp));
UVM_OBJ_INIT(&vp->v_uobj, &uvm_vnodeops, 1);
/*
* done by memset() above.
* LIST_INIT(&vp->v_nclist);
* LIST_INIT(&vp->v_dnclist);
*/
1994-05-17 08:21:49 +04:00
} else {
2005-12-11 15:16:03 +03:00
vp = getcleanvnode(l);
1998-03-01 05:20:01 +03:00
/*
* Unless this is a bad time of the month, at most
* the first NCPUS items on the free list are
* locked, so this is close enough to being empty.
*/
if (vp == NULLVP) {
if (mp && error != EDEADLK)
vfs_unbusy(mp);
if (tryalloc) {
printf("WARNING: unable to allocate new "
"vnode, retrying...\n");
(void) tsleep(&lbolt, PRIBIO, "newvn", hz);
goto try_again;
}
tablefull("vnode", "increase kern.maxvnodes or NVNODE");
1994-05-17 08:21:49 +04:00
*vpp = 0;
return (ENFILE);
}
vp->v_usecount = 1;
1994-05-17 08:21:49 +04:00
vp->v_flag = 0;
a whole bunch of changes to improve performance and robustness under load: - remove special treatment of pager_map mappings in pmaps. this is required now, since I've removed the globals that expose the address range. pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's no longer any need to special-case it. - eliminate struct uvm_vnode by moving its fields into struct vnode. - rewrite the pageout path. the pager is now responsible for handling the high-level requests instead of only getting control after a bunch of work has already been done on its behalf. this will allow us to UBCify LFS, which needs tighter control over its pages than other filesystems do. writing a page to disk no longer requires making it read-only, which allows us to write wired pages without causing all kinds of havoc. - use a new PG_PAGEOUT flag to indicate that a page should be freed on behalf of the pagedaemon when it's unlocked. this flag is very similar to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the pageout fails due to eg. an indirect-block buffer being locked. this allows us to remove the "version" field from struct vm_page, and together with shrinking "loan_count" from 32 bits to 16, struct vm_page is now 4 bytes smaller. - no longer use PG_RELEASED for swap-backed pages. if the page is busy because it's being paged out, we can't release the swap slot to be reallocated until that write is complete, but unlike with vnodes we don't keep a count of in-progress writes so there's no good way to know when the write is done. instead, when we need to free a busy swap-backed page, just sleep until we can get it busy ourselves. - implement a fast-path for extending writes which allows us to avoid zeroing new pages. this substantially reduces cpu usage. - encapsulate the data used by the genfs code in a struct genfs_node, which must be the first element of the filesystem-specific vnode data for filesystems which use genfs_{get,put}pages(). - eliminate many of the UVM pagerops, since they aren't needed anymore now that the pager "put" operation is a higher-level operation. - enhance the genfs code to allow NFS to use the genfs_{get,put}pages instead of a modified copy. - clean up struct vnode by removing all the fields that used to be used by the vfs_cluster.c code (which we don't use anymore with UBC). - remove kmem_object and mb_object since they were useless. instead of allocating pages to these objects, we now just allocate pages with no object. such pages are mapped in the kernel until they are freed, so we can use the mapping to find the page to free it. this allows us to remove splvm() protection in several places. The sum of all these changes improves write throughput on my decstation 5000/200 to within 1% of the rate of NetBSD 1.5 and reduces the elapsed time for "make release" of a NetBSD 1.5 source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
vp->v_socket = NULL;
1994-05-17 08:21:49 +04:00
}
vp->v_type = VNON;
vp->v_vnlock = &vp->v_lock;
lockinit(vp->v_vnlock, PVFS, "vnlock", 0, 0);
KASSERT(LIST_EMPTY(&vp->v_nclist));
KASSERT(LIST_EMPTY(&vp->v_dnclist));
1994-05-17 08:21:49 +04:00
vp->v_tag = tag;
vp->v_op = vops;
insmntque(vp, mp);
*vpp = vp;
1994-06-08 15:28:29 +04:00
vp->v_data = 0;
simple_lock_init(&vp->v_interlock);
/*
* initialize uvm_object within vnode.
*/
a whole bunch of changes to improve performance and robustness under load: - remove special treatment of pager_map mappings in pmaps. this is required now, since I've removed the globals that expose the address range. pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's no longer any need to special-case it. - eliminate struct uvm_vnode by moving its fields into struct vnode. - rewrite the pageout path. the pager is now responsible for handling the high-level requests instead of only getting control after a bunch of work has already been done on its behalf. this will allow us to UBCify LFS, which needs tighter control over its pages than other filesystems do. writing a page to disk no longer requires making it read-only, which allows us to write wired pages without causing all kinds of havoc. - use a new PG_PAGEOUT flag to indicate that a page should be freed on behalf of the pagedaemon when it's unlocked. this flag is very similar to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the pageout fails due to eg. an indirect-block buffer being locked. this allows us to remove the "version" field from struct vm_page, and together with shrinking "loan_count" from 32 bits to 16, struct vm_page is now 4 bytes smaller. - no longer use PG_RELEASED for swap-backed pages. if the page is busy because it's being paged out, we can't release the swap slot to be reallocated until that write is complete, but unlike with vnodes we don't keep a count of in-progress writes so there's no good way to know when the write is done. instead, when we need to free a busy swap-backed page, just sleep until we can get it busy ourselves. - implement a fast-path for extending writes which allows us to avoid zeroing new pages. this substantially reduces cpu usage. - encapsulate the data used by the genfs code in a struct genfs_node, which must be the first element of the filesystem-specific vnode data for filesystems which use genfs_{get,put}pages(). - eliminate many of the UVM pagerops, since they aren't needed anymore now that the pager "put" operation is a higher-level operation. - enhance the genfs code to allow NFS to use the genfs_{get,put}pages instead of a modified copy. - clean up struct vnode by removing all the fields that used to be used by the vfs_cluster.c code (which we don't use anymore with UBC). - remove kmem_object and mb_object since they were useless. instead of allocating pages to these objects, we now just allocate pages with no object. such pages are mapped in the kernel until they are freed, so we can use the mapping to find the page to free it. this allows us to remove splvm() protection in several places. The sum of all these changes improves write throughput on my decstation 5000/200 to within 1% of the rate of NetBSD 1.5 and reduces the elapsed time for "make release" of a NetBSD 1.5 source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
uobj = &vp->v_uobj;
KASSERT(uobj->pgops == &uvm_vnodeops);
KASSERT(uobj->uo_npages == 0);
KASSERT(TAILQ_FIRST(&uobj->memq) == NULL);
vp->v_size = vp->v_writesize = VSIZENOTSET;
if (mp && error != EDEADLK)
vfs_unbusy(mp);
1994-05-17 08:21:49 +04:00
return (0);
}
/*
* This is really just the reverse of getnewvnode(). Needed for
* VFS_VGET functions who may need to push back a vnode in case
* of a locking race.
*/
void
2005-06-06 03:47:48 +04:00
ungetnewvnode(struct vnode *vp)
{
#ifdef DIAGNOSTIC
if (vp->v_usecount != 1)
2000-06-28 03:51:51 +04:00
panic("ungetnewvnode: busy vnode");
#endif
vp->v_usecount--;
insmntque(vp, NULL);
vp->v_type = VBAD;
simple_lock(&vp->v_interlock);
2004-03-23 16:22:32 +03:00
/*
* Insert at head of LRU list
*/
simple_lock(&vnode_free_list_slock);
if (vp->v_holdcnt > 0)
TAILQ_INSERT_HEAD(&vnode_hold_list, vp, v_freelist);
else
TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
2004-03-23 16:22:32 +03:00
simple_unlock(&vnode_free_list_slock);
simple_unlock(&vp->v_interlock);
}
1994-05-17 08:21:49 +04:00
/*
* Move a vnode from one mount queue to another.
*/
2006-01-17 00:44:46 +03:00
static void
2005-06-06 03:47:48 +04:00
insmntque(struct vnode *vp, struct mount *mp)
1994-05-17 08:21:49 +04:00
{
#ifdef DIAGNOSTIC
if ((mp != NULL) &&
(mp->mnt_iflag & IMNT_UNMOUNT) &&
!(mp->mnt_flag & MNT_SOFTDEP) &&
vp->v_tag != VT_VFS) {
panic("insmntque into dying filesystem");
}
#endif
2004-03-23 16:22:32 +03:00
1998-03-01 05:20:01 +03:00
simple_lock(&mntvnode_slock);
1994-05-17 08:21:49 +04:00
/*
* Delete from old mount point vnode list, if on one.
*/
if (vp->v_mount != NULL)
TAILQ_REMOVE(&vp->v_mount->mnt_vnodelist, vp, v_mntvnodes);
1994-05-17 08:21:49 +04:00
/*
* Insert into list of vnodes for the new mount point, if available.
*/
if ((vp->v_mount = mp) != NULL)
TAILQ_INSERT_TAIL(&mp->mnt_vnodelist, vp, v_mntvnodes);
1998-03-01 05:20:01 +03:00
simple_unlock(&mntvnode_slock);
1994-05-17 08:21:49 +04:00
}
/*
* Update outstanding I/O count and do wakeup if requested.
*/
1996-02-04 05:17:43 +03:00
void
2005-06-06 03:47:48 +04:00
vwakeup(struct buf *bp)
1994-05-17 08:21:49 +04:00
{
2000-03-30 13:27:11 +04:00
struct vnode *vp;
1994-05-17 08:21:49 +04:00
1996-02-04 05:17:43 +03:00
if ((vp = bp->b_vp) != NULL) {
2003-02-06 00:38:38 +03:00
/* XXX global lock hack
* can't use v_interlock here since this is called
* in interrupt context from biodone().
*/
simple_lock(&global_v_numoutput_slock);
1994-06-08 15:28:29 +04:00
if (--vp->v_numoutput < 0)
panic("vwakeup: neg numoutput, vp %p", vp);
1994-05-17 08:21:49 +04:00
if ((vp->v_flag & VBWAIT) && vp->v_numoutput <= 0) {
vp->v_flag &= ~VBWAIT;
wakeup((void *)&vp->v_numoutput);
1994-05-17 08:21:49 +04:00
}
2003-02-06 00:38:38 +03:00
simple_unlock(&global_v_numoutput_slock);
1994-05-17 08:21:49 +04:00
}
}
/*
* Flush out and invalidate all buffers associated with a vnode.
* Called with the underlying vnode locked, which should prevent new dirty
* buffers from being queued.
1994-05-17 08:21:49 +04:00
*/
1994-06-08 15:28:29 +04:00
int
2006-05-15 01:15:11 +04:00
vinvalbuf(struct vnode *vp, int flags, kauth_cred_t cred, struct lwp *l,
2005-06-06 03:47:48 +04:00
int slpflag, int slptimeo)
1994-05-17 08:21:49 +04:00
{
struct buf *bp, *nbp;
a whole bunch of changes to improve performance and robustness under load: - remove special treatment of pager_map mappings in pmaps. this is required now, since I've removed the globals that expose the address range. pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's no longer any need to special-case it. - eliminate struct uvm_vnode by moving its fields into struct vnode. - rewrite the pageout path. the pager is now responsible for handling the high-level requests instead of only getting control after a bunch of work has already been done on its behalf. this will allow us to UBCify LFS, which needs tighter control over its pages than other filesystems do. writing a page to disk no longer requires making it read-only, which allows us to write wired pages without causing all kinds of havoc. - use a new PG_PAGEOUT flag to indicate that a page should be freed on behalf of the pagedaemon when it's unlocked. this flag is very similar to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the pageout fails due to eg. an indirect-block buffer being locked. this allows us to remove the "version" field from struct vm_page, and together with shrinking "loan_count" from 32 bits to 16, struct vm_page is now 4 bytes smaller. - no longer use PG_RELEASED for swap-backed pages. if the page is busy because it's being paged out, we can't release the swap slot to be reallocated until that write is complete, but unlike with vnodes we don't keep a count of in-progress writes so there's no good way to know when the write is done. instead, when we need to free a busy swap-backed page, just sleep until we can get it busy ourselves. - implement a fast-path for extending writes which allows us to avoid zeroing new pages. this substantially reduces cpu usage. - encapsulate the data used by the genfs code in a struct genfs_node, which must be the first element of the filesystem-specific vnode data for filesystems which use genfs_{get,put}pages(). - eliminate many of the UVM pagerops, since they aren't needed anymore now that the pager "put" operation is a higher-level operation. - enhance the genfs code to allow NFS to use the genfs_{get,put}pages instead of a modified copy. - clean up struct vnode by removing all the fields that used to be used by the vfs_cluster.c code (which we don't use anymore with UBC). - remove kmem_object and mb_object since they were useless. instead of allocating pages to these objects, we now just allocate pages with no object. such pages are mapped in the kernel until they are freed, so we can use the mapping to find the page to free it. this allows us to remove splvm() protection in several places. The sum of all these changes improves write throughput on my decstation 5000/200 to within 1% of the rate of NetBSD 1.5 and reduces the elapsed time for "make release" of a NetBSD 1.5 source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
int s, error;
int flushflags = PGO_ALLPAGES | PGO_FREE | PGO_SYNCIO |
(flags & V_SAVE ? PGO_CLEANIT | PGO_RECLAIM : 0);
/* XXXUBC this doesn't look at flags or slp* */
simple_lock(&vp->v_interlock);
error = VOP_PUTPAGES(vp, 0, 0, flushflags);
if (error) {
return error;
}
1994-06-08 15:28:29 +04:00
if (flags & V_SAVE) {
2005-12-11 15:16:03 +03:00
error = VOP_FSYNC(vp, cred, FSYNC_WAIT|FSYNC_RECLAIM, 0, 0, l);
if (error)
return (error);
#ifdef DIAGNOSTIC
s = splbio();
if (vp->v_numoutput > 0 || !LIST_EMPTY(&vp->v_dirtyblkhd))
panic("vinvalbuf: dirty bufs, vp %p", vp);
splx(s);
#endif
1994-06-08 15:28:29 +04:00
}
s = splbio();
restart:
for (bp = LIST_FIRST(&vp->v_cleanblkhd); bp; bp = nbp) {
nbp = LIST_NEXT(bp, b_vnbufs);
2003-02-06 00:38:38 +03:00
simple_lock(&bp->b_interlock);
if (bp->b_flags & B_BUSY) {
bp->b_flags |= B_WANTED;
error = ltsleep((void *)bp,
2003-02-06 00:38:38 +03:00
slpflag | (PRIBIO + 1) | PNORELOCK,
"vinvalbuf", slptimeo, &bp->b_interlock);
if (error) {
splx(s);
return (error);
}
goto restart;
}
bp->b_flags |= B_BUSY | B_INVAL | B_VFLUSH;
2003-02-06 00:38:38 +03:00
simple_unlock(&bp->b_interlock);
brelse(bp);
}
1994-06-08 15:28:29 +04:00
for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
nbp = LIST_NEXT(bp, b_vnbufs);
2003-02-06 00:38:38 +03:00
simple_lock(&bp->b_interlock);
if (bp->b_flags & B_BUSY) {
bp->b_flags |= B_WANTED;
error = ltsleep((void *)bp,
2003-02-06 00:38:38 +03:00
slpflag | (PRIBIO + 1) | PNORELOCK,
"vinvalbuf", slptimeo, &bp->b_interlock);
if (error) {
splx(s);
return (error);
1994-05-17 08:21:49 +04:00
}
goto restart;
}
/*
* XXX Since there are no node locks for NFS, I believe
* there is a slight chance that a delayed write will
* occur while sleeping just above, so check for it.
*/
if ((bp->b_flags & B_DELWRI) && (flags & V_SAVE)) {
#ifdef DEBUG
printf("buffer still DELWRI\n");
#endif
bp->b_flags |= B_BUSY | B_VFLUSH;
2003-02-06 00:38:38 +03:00
simple_unlock(&bp->b_interlock);
VOP_BWRITE(bp);
goto restart;
1994-05-17 08:21:49 +04:00
}
bp->b_flags |= B_BUSY | B_INVAL | B_VFLUSH;
2003-02-06 00:38:38 +03:00
simple_unlock(&bp->b_interlock);
brelse(bp);
1994-05-17 08:21:49 +04:00
}
#ifdef DIAGNOSTIC
if (!LIST_EMPTY(&vp->v_cleanblkhd) || !LIST_EMPTY(&vp->v_dirtyblkhd))
panic("vinvalbuf: flush failed, vp %p", vp);
#endif
splx(s);
return (0);
}
/*
* Destroy any in core blocks past the truncation length.
* Called with the underlying vnode locked, which should prevent new dirty
* buffers from being queued.
*/
int
2005-06-06 03:47:48 +04:00
vtruncbuf(struct vnode *vp, daddr_t lbn, int slpflag, int slptimeo)
{
struct buf *bp, *nbp;
a whole bunch of changes to improve performance and robustness under load: - remove special treatment of pager_map mappings in pmaps. this is required now, since I've removed the globals that expose the address range. pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's no longer any need to special-case it. - eliminate struct uvm_vnode by moving its fields into struct vnode. - rewrite the pageout path. the pager is now responsible for handling the high-level requests instead of only getting control after a bunch of work has already been done on its behalf. this will allow us to UBCify LFS, which needs tighter control over its pages than other filesystems do. writing a page to disk no longer requires making it read-only, which allows us to write wired pages without causing all kinds of havoc. - use a new PG_PAGEOUT flag to indicate that a page should be freed on behalf of the pagedaemon when it's unlocked. this flag is very similar to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the pageout fails due to eg. an indirect-block buffer being locked. this allows us to remove the "version" field from struct vm_page, and together with shrinking "loan_count" from 32 bits to 16, struct vm_page is now 4 bytes smaller. - no longer use PG_RELEASED for swap-backed pages. if the page is busy because it's being paged out, we can't release the swap slot to be reallocated until that write is complete, but unlike with vnodes we don't keep a count of in-progress writes so there's no good way to know when the write is done. instead, when we need to free a busy swap-backed page, just sleep until we can get it busy ourselves. - implement a fast-path for extending writes which allows us to avoid zeroing new pages. this substantially reduces cpu usage. - encapsulate the data used by the genfs code in a struct genfs_node, which must be the first element of the filesystem-specific vnode data for filesystems which use genfs_{get,put}pages(). - eliminate many of the UVM pagerops, since they aren't needed anymore now that the pager "put" operation is a higher-level operation. - enhance the genfs code to allow NFS to use the genfs_{get,put}pages instead of a modified copy. - clean up struct vnode by removing all the fields that used to be used by the vfs_cluster.c code (which we don't use anymore with UBC). - remove kmem_object and mb_object since they were useless. instead of allocating pages to these objects, we now just allocate pages with no object. such pages are mapped in the kernel until they are freed, so we can use the mapping to find the page to free it. this allows us to remove splvm() protection in several places. The sum of all these changes improves write throughput on my decstation 5000/200 to within 1% of the rate of NetBSD 1.5 and reduces the elapsed time for "make release" of a NetBSD 1.5 source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
int s, error;
voff_t off;
off = round_page((voff_t)lbn << vp->v_mount->mnt_fs_bshift);
simple_lock(&vp->v_interlock);
error = VOP_PUTPAGES(vp, off, 0, PGO_FREE | PGO_SYNCIO);
if (error) {
return error;
}
s = splbio();
restart:
for (bp = LIST_FIRST(&vp->v_cleanblkhd); bp; bp = nbp) {
nbp = LIST_NEXT(bp, b_vnbufs);
if (bp->b_lblkno < lbn)
continue;
2003-02-06 00:38:38 +03:00
simple_lock(&bp->b_interlock);
if (bp->b_flags & B_BUSY) {
bp->b_flags |= B_WANTED;
2003-02-06 00:38:38 +03:00
error = ltsleep(bp, slpflag | (PRIBIO + 1) | PNORELOCK,
"vtruncbuf", slptimeo, &bp->b_interlock);
if (error) {
splx(s);
return (error);
}
goto restart;
}
bp->b_flags |= B_BUSY | B_INVAL | B_VFLUSH;
2003-02-06 00:38:38 +03:00
simple_unlock(&bp->b_interlock);
brelse(bp);
}
for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
nbp = LIST_NEXT(bp, b_vnbufs);
if (bp->b_lblkno < lbn)
continue;
2003-02-06 00:38:38 +03:00
simple_lock(&bp->b_interlock);
if (bp->b_flags & B_BUSY) {
bp->b_flags |= B_WANTED;
2003-02-06 00:38:38 +03:00
error = ltsleep(bp, slpflag | (PRIBIO + 1) | PNORELOCK,
"vtruncbuf", slptimeo, &bp->b_interlock);
if (error) {
splx(s);
return (error);
}
goto restart;
}
bp->b_flags |= B_BUSY | B_INVAL | B_VFLUSH;
2003-02-06 00:38:38 +03:00
simple_unlock(&bp->b_interlock);
brelse(bp);
}
splx(s);
1994-06-08 15:28:29 +04:00
return (0);
}
void
2005-06-06 03:47:48 +04:00
vflushbuf(struct vnode *vp, int sync)
1994-06-08 15:28:29 +04:00
{
2000-03-30 13:27:11 +04:00
struct buf *bp, *nbp;
int flags = PGO_CLEANIT | PGO_ALLPAGES | (sync ? PGO_SYNCIO : 0);
1994-06-08 15:28:29 +04:00
int s;
simple_lock(&vp->v_interlock);
(void) VOP_PUTPAGES(vp, 0, 0, flags);
1994-06-08 15:28:29 +04:00
loop:
s = splbio();
for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
nbp = LIST_NEXT(bp, b_vnbufs);
2003-02-06 00:38:38 +03:00
simple_lock(&bp->b_interlock);
if ((bp->b_flags & B_BUSY)) {
simple_unlock(&bp->b_interlock);
1994-06-08 15:28:29 +04:00
continue;
2003-02-06 00:38:38 +03:00
}
1994-06-08 15:28:29 +04:00
if ((bp->b_flags & B_DELWRI) == 0)
panic("vflushbuf: not dirty, bp %p", bp);
bp->b_flags |= B_BUSY | B_VFLUSH;
2003-02-06 00:38:38 +03:00
simple_unlock(&bp->b_interlock);
1994-06-08 15:28:29 +04:00
splx(s);
/*
* Wait for I/O associated with indirect blocks to complete,
* since there is no way to quickly wait for them below.
*/
if (bp->b_vp == vp || sync == 0)
(void) bawrite(bp);
else
(void) bwrite(bp);
goto loop;
}
if (sync == 0) {
splx(s);
return;
}
2003-02-06 00:38:38 +03:00
simple_lock(&global_v_numoutput_slock);
1994-06-08 15:28:29 +04:00
while (vp->v_numoutput) {
vp->v_flag |= VBWAIT;
ltsleep((void *)&vp->v_numoutput, PRIBIO + 1, "vflushbuf", 0,
2003-02-06 00:38:38 +03:00
&global_v_numoutput_slock);
1994-06-08 15:28:29 +04:00
}
2003-02-06 00:38:38 +03:00
simple_unlock(&global_v_numoutput_slock);
1994-06-08 15:28:29 +04:00
splx(s);
if (!LIST_EMPTY(&vp->v_dirtyblkhd)) {
1994-06-08 15:28:29 +04:00
vprint("vflushbuf: dirty", vp);
goto loop;
}
1994-05-17 08:21:49 +04:00
}
/*
* Associate a buffer with a vnode.
*/
1996-02-04 05:17:43 +03:00
void
2005-06-06 03:47:48 +04:00
bgetvp(struct vnode *vp, struct buf *bp)
1994-05-17 08:21:49 +04:00
{
int s;
1994-05-17 08:21:49 +04:00
if (bp->b_vp)
panic("bgetvp: not free, bp %p", bp);
1994-05-17 08:21:49 +04:00
VHOLD(vp);
s = splbio();
1994-05-17 08:21:49 +04:00
bp->b_vp = vp;
if (vp->v_type == VBLK || vp->v_type == VCHR)
bp->b_dev = vp->v_rdev;
else
bp->b_dev = NODEV;
/*
* Insert onto list for new vnode.
*/
bufinsvn(bp, &vp->v_cleanblkhd);
splx(s);
1994-05-17 08:21:49 +04:00
}
/*
* Disassociate a buffer from a vnode.
*/
1996-02-04 05:17:43 +03:00
void
2005-06-06 03:47:48 +04:00
brelvp(struct buf *bp)
1994-05-17 08:21:49 +04:00
{
struct vnode *vp;
int s;
1994-05-17 08:21:49 +04:00
if (bp->b_vp == NULL)
panic("brelvp: vp NULL, bp %p", bp);
s = splbio();
vp = bp->b_vp;
1994-05-17 08:21:49 +04:00
/*
* Delete from old vnode list, if on one.
*/
if (LIST_NEXT(bp, b_vnbufs) != NOLIST)
1994-05-17 08:21:49 +04:00
bufremvn(bp);
a whole bunch of changes to improve performance and robustness under load: - remove special treatment of pager_map mappings in pmaps. this is required now, since I've removed the globals that expose the address range. pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's no longer any need to special-case it. - eliminate struct uvm_vnode by moving its fields into struct vnode. - rewrite the pageout path. the pager is now responsible for handling the high-level requests instead of only getting control after a bunch of work has already been done on its behalf. this will allow us to UBCify LFS, which needs tighter control over its pages than other filesystems do. writing a page to disk no longer requires making it read-only, which allows us to write wired pages without causing all kinds of havoc. - use a new PG_PAGEOUT flag to indicate that a page should be freed on behalf of the pagedaemon when it's unlocked. this flag is very similar to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the pageout fails due to eg. an indirect-block buffer being locked. this allows us to remove the "version" field from struct vm_page, and together with shrinking "loan_count" from 32 bits to 16, struct vm_page is now 4 bytes smaller. - no longer use PG_RELEASED for swap-backed pages. if the page is busy because it's being paged out, we can't release the swap slot to be reallocated until that write is complete, but unlike with vnodes we don't keep a count of in-progress writes so there's no good way to know when the write is done. instead, when we need to free a busy swap-backed page, just sleep until we can get it busy ourselves. - implement a fast-path for extending writes which allows us to avoid zeroing new pages. this substantially reduces cpu usage. - encapsulate the data used by the genfs code in a struct genfs_node, which must be the first element of the filesystem-specific vnode data for filesystems which use genfs_{get,put}pages(). - eliminate many of the UVM pagerops, since they aren't needed anymore now that the pager "put" operation is a higher-level operation. - enhance the genfs code to allow NFS to use the genfs_{get,put}pages instead of a modified copy. - clean up struct vnode by removing all the fields that used to be used by the vfs_cluster.c code (which we don't use anymore with UBC). - remove kmem_object and mb_object since they were useless. instead of allocating pages to these objects, we now just allocate pages with no object. such pages are mapped in the kernel until they are freed, so we can use the mapping to find the page to free it. this allows us to remove splvm() protection in several places. The sum of all these changes improves write throughput on my decstation 5000/200 to within 1% of the rate of NetBSD 1.5 and reduces the elapsed time for "make release" of a NetBSD 1.5 source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
if (TAILQ_EMPTY(&vp->v_uobj.memq) && (vp->v_flag & VONWORKLST) &&
LIST_FIRST(&vp->v_dirtyblkhd) == NULL) {
vp->v_flag &= ~VWRITEMAPDIRTY;
vn_syncer_remove_from_worklist(vp);
}
bp->b_vp = NULL;
1994-05-17 08:21:49 +04:00
HOLDRELE(vp);
splx(s);
1994-05-17 08:21:49 +04:00
}
/*
* Reassign a buffer from one vnode to another.
* Used to assign file specific control information
* (indirect blocks) to the vnode to which they belong.
*
* This function must be called at splbio().
1994-05-17 08:21:49 +04:00
*/
1996-02-04 05:17:43 +03:00
void
2005-06-06 03:47:48 +04:00
reassignbuf(struct buf *bp, struct vnode *newvp)
1994-05-17 08:21:49 +04:00
{
struct buflists *listheadp;
2005-05-31 02:15:38 +04:00
int delayx;
1994-05-17 08:21:49 +04:00
/*
* Delete from old vnode list, if on one.
*/
if (LIST_NEXT(bp, b_vnbufs) != NOLIST)
1994-05-17 08:21:49 +04:00
bufremvn(bp);
/*
* If dirty, put on list of dirty buffers;
* otherwise insert onto list of clean buffers.
*/
if ((bp->b_flags & B_DELWRI) == 0) {
1994-05-17 08:21:49 +04:00
listheadp = &newvp->v_cleanblkhd;
a whole bunch of changes to improve performance and robustness under load: - remove special treatment of pager_map mappings in pmaps. this is required now, since I've removed the globals that expose the address range. pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's no longer any need to special-case it. - eliminate struct uvm_vnode by moving its fields into struct vnode. - rewrite the pageout path. the pager is now responsible for handling the high-level requests instead of only getting control after a bunch of work has already been done on its behalf. this will allow us to UBCify LFS, which needs tighter control over its pages than other filesystems do. writing a page to disk no longer requires making it read-only, which allows us to write wired pages without causing all kinds of havoc. - use a new PG_PAGEOUT flag to indicate that a page should be freed on behalf of the pagedaemon when it's unlocked. this flag is very similar to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the pageout fails due to eg. an indirect-block buffer being locked. this allows us to remove the "version" field from struct vm_page, and together with shrinking "loan_count" from 32 bits to 16, struct vm_page is now 4 bytes smaller. - no longer use PG_RELEASED for swap-backed pages. if the page is busy because it's being paged out, we can't release the swap slot to be reallocated until that write is complete, but unlike with vnodes we don't keep a count of in-progress writes so there's no good way to know when the write is done. instead, when we need to free a busy swap-backed page, just sleep until we can get it busy ourselves. - implement a fast-path for extending writes which allows us to avoid zeroing new pages. this substantially reduces cpu usage. - encapsulate the data used by the genfs code in a struct genfs_node, which must be the first element of the filesystem-specific vnode data for filesystems which use genfs_{get,put}pages(). - eliminate many of the UVM pagerops, since they aren't needed anymore now that the pager "put" operation is a higher-level operation. - enhance the genfs code to allow NFS to use the genfs_{get,put}pages instead of a modified copy. - clean up struct vnode by removing all the fields that used to be used by the vfs_cluster.c code (which we don't use anymore with UBC). - remove kmem_object and mb_object since they were useless. instead of allocating pages to these objects, we now just allocate pages with no object. such pages are mapped in the kernel until they are freed, so we can use the mapping to find the page to free it. this allows us to remove splvm() protection in several places. The sum of all these changes improves write throughput on my decstation 5000/200 to within 1% of the rate of NetBSD 1.5 and reduces the elapsed time for "make release" of a NetBSD 1.5 source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
if (TAILQ_EMPTY(&newvp->v_uobj.memq) &&
(newvp->v_flag & VONWORKLST) &&
LIST_FIRST(&newvp->v_dirtyblkhd) == NULL) {
newvp->v_flag &= ~VWRITEMAPDIRTY;
vn_syncer_remove_from_worklist(newvp);
}
} else {
listheadp = &newvp->v_dirtyblkhd;
if ((newvp->v_flag & VONWORKLST) == 0) {
switch (newvp->v_type) {
case VDIR:
2005-05-31 02:15:38 +04:00
delayx = dirdelay;
break;
case VBLK:
if (newvp->v_specmountpoint != NULL) {
2005-05-31 02:15:38 +04:00
delayx = metadelay;
break;
}
/* fall through */
default:
2005-05-31 02:15:38 +04:00
delayx = filedelay;
break;
}
if (!newvp->v_mount ||
(newvp->v_mount->mnt_flag & MNT_ASYNC) == 0)
2005-05-31 02:15:38 +04:00
vn_syncer_add_to_worklist(newvp, delayx);
}
}
1994-05-17 08:21:49 +04:00
bufinsvn(bp, listheadp);
}
/*
* Create a vnode for a block device.
1997-01-31 22:10:27 +03:00
* Used for root filesystem and swap areas.
1994-05-17 08:21:49 +04:00
* Also used for memory file system special devices.
*/
1996-02-04 05:17:43 +03:00
int
2005-06-06 03:47:48 +04:00
bdevvp(dev_t dev, struct vnode **vpp)
1994-05-17 08:21:49 +04:00
{
1994-06-08 15:28:29 +04:00
return (getdevvp(dev, vpp, VBLK));
1994-05-17 08:21:49 +04:00
}
/*
* Create a vnode for a character device.
* Used for kernfs and some console handling.
*/
1996-02-04 05:17:43 +03:00
int
2005-06-06 03:47:48 +04:00
cdevvp(dev_t dev, struct vnode **vpp)
1994-05-17 08:21:49 +04:00
{
1994-06-08 15:28:29 +04:00
return (getdevvp(dev, vpp, VCHR));
1994-05-17 08:21:49 +04:00
}
/*
* Create a vnode for a device.
* Used by bdevvp (block device) for root file system etc.,
* and by cdevvp (character device) for console and kernfs.
*/
2006-01-17 00:44:46 +03:00
static int
2005-06-06 03:47:48 +04:00
getdevvp(dev_t dev, struct vnode **vpp, enum vtype type)
1994-05-17 08:21:49 +04:00
{
2000-03-30 13:27:11 +04:00
struct vnode *vp;
1994-05-17 08:21:49 +04:00
struct vnode *nvp;
int error;
1998-03-01 05:20:01 +03:00
if (dev == NODEV) {
*vpp = NULLVP;
1994-05-17 08:21:49 +04:00
return (0);
1998-03-01 05:20:01 +03:00
}
1996-02-04 05:17:43 +03:00
error = getnewvnode(VT_NON, NULL, spec_vnodeop_p, &nvp);
1994-05-17 08:21:49 +04:00
if (error) {
*vpp = NULLVP;
return (error);
}
vp = nvp;
vp->v_type = type;
1996-02-04 05:17:43 +03:00
if ((nvp = checkalias(vp, dev, NULL)) != 0) {
1994-05-17 08:21:49 +04:00
vput(vp);
vp = nvp;
}
*vpp = vp;
return (0);
}
/*
* Check to see if the new vnode represents a special device
* for which we already have a vnode (either because of
* bdevvp() or because of a different vnode representing
* the same block device). If such an alias exists, deallocate
* the existing contents and return the aliased vnode. The
* caller is responsible for filling it with its new contents.
*/
struct vnode *
2005-06-06 03:47:48 +04:00
checkalias(struct vnode *nvp, dev_t nvp_rdev, struct mount *mp)
1994-05-17 08:21:49 +04:00
{
2005-12-11 15:16:03 +03:00
struct lwp *l = curlwp; /* XXX */
2000-03-30 13:27:11 +04:00
struct vnode *vp;
1994-05-17 08:21:49 +04:00
struct vnode **vpp;
if (nvp->v_type != VBLK && nvp->v_type != VCHR)
return (NULLVP);
vpp = &speclisth[SPECHASH(nvp_rdev)];
loop:
1998-03-01 05:20:01 +03:00
simple_lock(&spechash_slock);
1994-05-17 08:21:49 +04:00
for (vp = *vpp; vp; vp = vp->v_specnext) {
if (nvp_rdev != vp->v_rdev || nvp->v_type != vp->v_type)
continue;
/*
* Alias, but not in use, so flush it out.
*/
1998-03-01 05:20:01 +03:00
simple_lock(&vp->v_interlock);
simple_unlock(&spechash_slock);
1994-05-17 08:21:49 +04:00
if (vp->v_usecount == 0) {
2005-12-11 15:16:03 +03:00
vgonel(vp, l);
1994-05-17 08:21:49 +04:00
goto loop;
}
/*
* What we're interested to know here is if someone else has
* removed this vnode from the device hash list while we were
* waiting. This can only happen if vclean() did it, and
* this requires the vnode to be locked.
*/
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK))
1994-05-17 08:21:49 +04:00
goto loop;
if (vp->v_specinfo == NULL) {
vput(vp);
goto loop;
}
simple_lock(&spechash_slock);
1994-05-17 08:21:49 +04:00
break;
}
if (vp == NULL || vp->v_tag != VT_NON || vp->v_type != VBLK) {
1994-05-17 08:21:49 +04:00
MALLOC(nvp->v_specinfo, struct specinfo *,
sizeof(struct specinfo), M_VNODE, M_NOWAIT);
/* XXX Erg. */
if (nvp->v_specinfo == NULL) {
simple_unlock(&spechash_slock);
uvm_wait("checkalias");
goto loop;
}
1994-05-17 08:21:49 +04:00
nvp->v_rdev = nvp_rdev;
nvp->v_hashchain = vpp;
nvp->v_specnext = *vpp;
nvp->v_specmountpoint = NULL;
1998-03-01 05:20:01 +03:00
simple_unlock(&spechash_slock);
nvp->v_speclockf = NULL;
simple_lock_init(&nvp->v_spec_cow_slock);
SLIST_INIT(&nvp->v_spec_cow_head);
nvp->v_spec_cow_req = 0;
nvp->v_spec_cow_count = 0;
1994-05-17 08:21:49 +04:00
*vpp = nvp;
1998-03-01 05:20:01 +03:00
if (vp != NULLVP) {
1994-05-17 08:21:49 +04:00
nvp->v_flag |= VALIASED;
vp->v_flag |= VALIASED;
vput(vp);
}
return (NULLVP);
}
1998-03-01 05:20:01 +03:00
simple_unlock(&spechash_slock);
VOP_UNLOCK(vp, 0);
simple_lock(&vp->v_interlock);
2005-12-11 15:16:03 +03:00
vclean(vp, 0, l);
1994-05-17 08:21:49 +04:00
vp->v_op = nvp->v_op;
vp->v_tag = nvp->v_tag;
vp->v_vnlock = &vp->v_lock;
lockinit(vp->v_vnlock, PVFS, "vnlock", 0, 0);
1994-05-17 08:21:49 +04:00
nvp->v_type = VNON;
insmntque(vp, mp);
return (vp);
}
/*
* Grab a particular vnode from the free list, increment its
1998-03-04 12:13:48 +03:00
* reference count and lock it. If the vnode lock bit is set the
* vnode is being eliminated in vgone. In that case, we can not
* grab the vnode, so the process is awakened when the transition is
* completed, and an error returned to indicate that the vnode is no
* longer usable (possibly having been changed to a new file system type).
1994-05-17 08:21:49 +04:00
*/
1994-06-08 15:28:29 +04:00
int
2005-06-06 03:47:48 +04:00
vget(struct vnode *vp, int flags)
1994-05-17 08:21:49 +04:00
{
int error;
1994-05-17 08:21:49 +04:00
1994-06-08 15:28:29 +04:00
/*
* If the vnode is in the process of being cleaned out for
* another use, we wait for the cleaning to finish and then
1998-03-01 05:20:01 +03:00
* return failure. Cleaning is determined by checking that
* the VXLOCK flag is set.
1994-06-08 15:28:29 +04:00
*/
1998-03-01 05:20:01 +03:00
if ((flags & LK_INTERLOCK) == 0)
simple_lock(&vp->v_interlock);
if ((vp->v_flag & (VXLOCK | VFREEING)) != 0) {
if (flags & LK_NOWAIT) {
simple_unlock(&vp->v_interlock);
return EBUSY;
}
1994-05-17 08:21:49 +04:00
vp->v_flag |= VXWANT;
a whole bunch of changes to improve performance and robustness under load: - remove special treatment of pager_map mappings in pmaps. this is required now, since I've removed the globals that expose the address range. pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's no longer any need to special-case it. - eliminate struct uvm_vnode by moving its fields into struct vnode. - rewrite the pageout path. the pager is now responsible for handling the high-level requests instead of only getting control after a bunch of work has already been done on its behalf. this will allow us to UBCify LFS, which needs tighter control over its pages than other filesystems do. writing a page to disk no longer requires making it read-only, which allows us to write wired pages without causing all kinds of havoc. - use a new PG_PAGEOUT flag to indicate that a page should be freed on behalf of the pagedaemon when it's unlocked. this flag is very similar to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the pageout fails due to eg. an indirect-block buffer being locked. this allows us to remove the "version" field from struct vm_page, and together with shrinking "loan_count" from 32 bits to 16, struct vm_page is now 4 bytes smaller. - no longer use PG_RELEASED for swap-backed pages. if the page is busy because it's being paged out, we can't release the swap slot to be reallocated until that write is complete, but unlike with vnodes we don't keep a count of in-progress writes so there's no good way to know when the write is done. instead, when we need to free a busy swap-backed page, just sleep until we can get it busy ourselves. - implement a fast-path for extending writes which allows us to avoid zeroing new pages. this substantially reduces cpu usage. - encapsulate the data used by the genfs code in a struct genfs_node, which must be the first element of the filesystem-specific vnode data for filesystems which use genfs_{get,put}pages(). - eliminate many of the UVM pagerops, since they aren't needed anymore now that the pager "put" operation is a higher-level operation. - enhance the genfs code to allow NFS to use the genfs_{get,put}pages instead of a modified copy. - clean up struct vnode by removing all the fields that used to be used by the vfs_cluster.c code (which we don't use anymore with UBC). - remove kmem_object and mb_object since they were useless. instead of allocating pages to these objects, we now just allocate pages with no object. such pages are mapped in the kernel until they are freed, so we can use the mapping to find the page to free it. this allows us to remove splvm() protection in several places. The sum of all these changes improves write throughput on my decstation 5000/200 to within 1% of the rate of NetBSD 1.5 and reduces the elapsed time for "make release" of a NetBSD 1.5 source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
ltsleep(vp, PINOD|PNORELOCK, "vget", 0, &vp->v_interlock);
1998-03-01 05:20:01 +03:00
return (ENOENT);
1994-05-17 08:21:49 +04:00
}
1998-03-01 05:20:01 +03:00
if (vp->v_usecount == 0) {
simple_lock(&vnode_free_list_slock);
if (vp->v_holdcnt > 0)
TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist);
else
TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
1998-03-01 05:20:01 +03:00
simple_unlock(&vnode_free_list_slock);
}
1994-05-17 08:21:49 +04:00
vp->v_usecount++;
#ifdef DIAGNOSTIC
if (vp->v_usecount == 0) {
vprint("vget", vp);
panic("vget: usecount overflow, vp %p", vp);
}
#endif
1998-03-01 05:20:01 +03:00
if (flags & LK_TYPE_MASK) {
if ((error = vn_lock(vp, flags | LK_INTERLOCK))) {
vrele(vp);
}
1998-03-01 05:20:01 +03:00
return (error);
}
simple_unlock(&vp->v_interlock);
1994-05-17 08:21:49 +04:00
return (0);
}
/*
* vput(), just unlock and vrele()
*/
void
2005-06-06 03:47:48 +04:00
vput(struct vnode *vp)
1994-05-17 08:21:49 +04:00
{
2005-12-11 15:16:03 +03:00
struct lwp *l = curlwp; /* XXX */
1994-06-08 15:28:29 +04:00
1999-10-02 01:57:42 +04:00
#ifdef DIAGNOSTIC
1998-03-01 05:20:01 +03:00
if (vp == NULL)
panic("vput: null vp");
#endif
simple_lock(&vp->v_interlock);
vp->v_usecount--;
if (vp->v_usecount > 0) {
simple_unlock(&vp->v_interlock);
VOP_UNLOCK(vp, 0);
return;
}
#ifdef DIAGNOSTIC
if (vp->v_usecount < 0 || vp->v_writecount != 0) {
vprint("vput: bad ref count", vp);
panic("vput: ref cnt");
}
#endif
/*
* Insert at tail of LRU list.
1998-03-01 05:20:01 +03:00
*/
simple_lock(&vnode_free_list_slock);
if (vp->v_holdcnt > 0)
TAILQ_INSERT_TAIL(&vnode_hold_list, vp, v_freelist);
else
TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
1998-03-01 05:20:01 +03:00
simple_unlock(&vnode_free_list_slock);
if (vp->v_flag & VEXECMAP) {
uvmexp.execpages -= vp->v_uobj.uo_npages;
uvmexp.filepages += vp->v_uobj.uo_npages;
}
vp->v_flag &= ~(VTEXT|VEXECMAP|VWRITEMAP|VMAPPED);
1998-03-01 05:20:01 +03:00
simple_unlock(&vp->v_interlock);
2005-12-11 15:16:03 +03:00
VOP_INACTIVE(vp, l);
1994-05-17 08:21:49 +04:00
}
/*
* Vnode release.
* If count drops to zero, call inactive routine and return to freelist.
*/
void
2005-06-06 03:47:48 +04:00
vrele(struct vnode *vp)
1994-05-17 08:21:49 +04:00
{
2005-12-11 15:16:03 +03:00
struct lwp *l = curlwp; /* XXX */
1994-05-17 08:21:49 +04:00
#ifdef DIAGNOSTIC
if (vp == NULL)
panic("vrele: null vp");
#endif
1998-03-01 05:20:01 +03:00
simple_lock(&vp->v_interlock);
1994-05-17 08:21:49 +04:00
vp->v_usecount--;
1998-03-01 05:20:01 +03:00
if (vp->v_usecount > 0) {
simple_unlock(&vp->v_interlock);
1994-05-17 08:21:49 +04:00
return;
1998-03-01 05:20:01 +03:00
}
1994-05-17 08:21:49 +04:00
#ifdef DIAGNOSTIC
1998-03-01 05:20:01 +03:00
if (vp->v_usecount < 0 || vp->v_writecount != 0) {
1994-05-17 08:21:49 +04:00
vprint("vrele: bad ref count", vp);
panic("vrele: ref cnt vp %p", vp);
1994-05-17 08:21:49 +04:00
}
#endif
1994-06-08 15:28:29 +04:00
/*
* Insert at tail of LRU list.
1994-06-08 15:28:29 +04:00
*/
1998-03-01 05:20:01 +03:00
simple_lock(&vnode_free_list_slock);
if (vp->v_holdcnt > 0)
TAILQ_INSERT_TAIL(&vnode_hold_list, vp, v_freelist);
else
TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
1998-03-01 05:20:01 +03:00
simple_unlock(&vnode_free_list_slock);
if (vp->v_flag & VEXECMAP) {
uvmexp.execpages -= vp->v_uobj.uo_npages;
uvmexp.filepages += vp->v_uobj.uo_npages;
}
vp->v_flag &= ~(VTEXT|VEXECMAP|VWRITEMAP|VMAPPED);
1998-03-01 05:20:01 +03:00
if (vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK) == 0)
2005-12-11 15:16:03 +03:00
VOP_INACTIVE(vp, l);
1994-05-17 08:21:49 +04:00
}
/*
* Page or buffer structure gets a reference.
* Called with v_interlock held.
1994-05-17 08:21:49 +04:00
*/
1994-06-08 15:28:29 +04:00
void
2005-06-06 03:47:48 +04:00
vholdl(struct vnode *vp)
1994-05-17 08:21:49 +04:00
{
/*
* If it is on the freelist and the hold count is currently
* zero, move it to the hold list. The test of the back
* pointer and the use reference count of zero is because
* it will be removed from a free list by getnewvnode,
* but will not have its reference count incremented until
* after calling vgone. If the reference count were
* incremented first, vgone would (incorrectly) try to
* close the previous instance of the underlying object.
* So, the back pointer is explicitly set to `0xdeadb' in
* getnewvnode after removing it from a freelist to ensure
* that we do not try to move it here.
*/
if ((vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb) &&
vp->v_holdcnt == 0 && vp->v_usecount == 0) {
simple_lock(&vnode_free_list_slock);
TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
TAILQ_INSERT_TAIL(&vnode_hold_list, vp, v_freelist);
simple_unlock(&vnode_free_list_slock);
}
1994-05-17 08:21:49 +04:00
vp->v_holdcnt++;
}
/*
* Page or buffer structure frees a reference.
* Called with v_interlock held.
1994-05-17 08:21:49 +04:00
*/
1994-06-08 15:28:29 +04:00
void
2005-06-06 03:47:48 +04:00
holdrelel(struct vnode *vp)
1994-05-17 08:21:49 +04:00
{
if (vp->v_holdcnt <= 0)
panic("holdrelel: holdcnt vp %p", vp);
1994-05-17 08:21:49 +04:00
vp->v_holdcnt--;
/*
* If it is on the holdlist and the hold count drops to
* zero, move it to the free list. The test of the back
* pointer and the use reference count of zero is because
* it will be removed from a free list by getnewvnode,
* but will not have its reference count incremented until
* after calling vgone. If the reference count were
* incremented first, vgone would (incorrectly) try to
* close the previous instance of the underlying object.
* So, the back pointer is explicitly set to `0xdeadb' in
* getnewvnode after removing it from a freelist to ensure
* that we do not try to move it here.
*/
if ((vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb) &&
vp->v_holdcnt == 0 && vp->v_usecount == 0) {
simple_lock(&vnode_free_list_slock);
TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist);
TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
simple_unlock(&vnode_free_list_slock);
}
1994-05-17 08:21:49 +04:00
}
/*
* Vnode reference.
*/
void
2005-06-06 03:47:48 +04:00
vref(struct vnode *vp)
{
simple_lock(&vp->v_interlock);
if (vp->v_usecount <= 0)
panic("vref used where vget required, vp %p", vp);
vp->v_usecount++;
#ifdef DIAGNOSTIC
if (vp->v_usecount == 0) {
vprint("vref", vp);
panic("vref: usecount overflow, vp %p", vp);
}
#endif
simple_unlock(&vp->v_interlock);
}
1994-05-17 08:21:49 +04:00
/*
* Remove any vnodes in the vnode table belonging to mount point mp.
*
* If FORCECLOSE is not specified, there should not be any active ones,
1994-05-17 08:21:49 +04:00
* return error if any are found (nb: this is a user error, not a
* system error). If FORCECLOSE is specified, detach any active vnodes
1994-05-17 08:21:49 +04:00
* that are found.
*
* If WRITECLOSE is set, only flush out regular file vnodes open for
* writing.
*
* SKIPSYSTEM causes any vnodes marked V_SYSTEM to be skipped.
1994-05-17 08:21:49 +04:00
*/
1994-06-08 15:28:29 +04:00
#ifdef DEBUG
int busyprt = 0; /* print out busy vnodes */
struct ctldebug debug1 = { "busyprt", &busyprt };
#endif
1994-05-17 08:21:49 +04:00
1996-02-04 05:17:43 +03:00
int
2005-06-06 03:47:48 +04:00
vflush(struct mount *mp, struct vnode *skipvp, int flags)
1994-05-17 08:21:49 +04:00
{
2005-12-11 15:16:03 +03:00
struct lwp *l = curlwp; /* XXX */
struct vnode *vp, *nvp;
1994-05-17 08:21:49 +04:00
int busy = 0;
1998-03-01 05:20:01 +03:00
simple_lock(&mntvnode_slock);
1994-05-17 08:21:49 +04:00
loop:
/*
* NOTE: not using the TAILQ_FOREACH here since in this loop vgone()
* and vclean() are called
*/
for (vp = TAILQ_FIRST(&mp->mnt_vnodelist); vp; vp = nvp) {
1994-05-17 08:21:49 +04:00
if (vp->v_mount != mp)
goto loop;
nvp = TAILQ_NEXT(vp, v_mntvnodes);
1994-05-17 08:21:49 +04:00
/*
* Skip over a selected vnode.
*/
if (vp == skipvp)
continue;
1998-03-01 05:20:01 +03:00
simple_lock(&vp->v_interlock);
1994-05-17 08:21:49 +04:00
/*
* Skip over a vnodes marked VSYSTEM.
*/
1998-03-01 05:20:01 +03:00
if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) {
simple_unlock(&vp->v_interlock);
1994-05-17 08:21:49 +04:00
continue;
1998-03-01 05:20:01 +03:00
}
1994-06-08 15:28:29 +04:00
/*
* If WRITECLOSE is set, only flush out regular file
* vnodes open for writing.
*/
if ((flags & WRITECLOSE) &&
(vp->v_writecount == 0 || vp->v_type != VREG)) {
simple_unlock(&vp->v_interlock);
1994-06-08 15:28:29 +04:00
continue;
}
1994-05-17 08:21:49 +04:00
/*
* With v_usecount == 0, all we need to do is clear
* out the vnode data structures and we are done.
*/
if (vp->v_usecount == 0) {
1998-03-01 05:20:01 +03:00
simple_unlock(&mntvnode_slock);
2005-12-11 15:16:03 +03:00
vgonel(vp, l);
1998-03-01 05:20:01 +03:00
simple_lock(&mntvnode_slock);
1994-05-17 08:21:49 +04:00
continue;
}
/*
1994-06-08 15:28:29 +04:00
* If FORCECLOSE is set, forcibly close the vnode.
1994-05-17 08:21:49 +04:00
* For block or character devices, revert to an
* anonymous device. For all other files, just kill them.
*/
if (flags & FORCECLOSE) {
1998-03-01 05:20:01 +03:00
simple_unlock(&mntvnode_slock);
1994-05-17 08:21:49 +04:00
if (vp->v_type != VBLK && vp->v_type != VCHR) {
2005-12-11 15:16:03 +03:00
vgonel(vp, l);
1994-05-17 08:21:49 +04:00
} else {
2005-12-11 15:16:03 +03:00
vclean(vp, 0, l);
1994-06-08 15:28:29 +04:00
vp->v_op = spec_vnodeop_p;
1994-05-17 08:21:49 +04:00
insmntque(vp, (struct mount *)0);
}
1998-03-01 05:20:01 +03:00
simple_lock(&mntvnode_slock);
1994-05-17 08:21:49 +04:00
continue;
}
1994-06-08 15:28:29 +04:00
#ifdef DEBUG
1994-05-17 08:21:49 +04:00
if (busyprt)
vprint("vflush: busy vnode", vp);
1994-06-08 15:28:29 +04:00
#endif
1998-03-01 05:20:01 +03:00
simple_unlock(&vp->v_interlock);
1994-05-17 08:21:49 +04:00
busy++;
}
1998-03-01 05:20:01 +03:00
simple_unlock(&mntvnode_slock);
1994-05-17 08:21:49 +04:00
if (busy)
return (EBUSY);
return (0);
}
/*
* Disassociate the underlying file system from a vnode.
*/
2006-01-17 00:44:46 +03:00
static void
2005-12-11 15:16:03 +03:00
vclean(struct vnode *vp, int flags, struct lwp *l)
1994-05-17 08:21:49 +04:00
{
int active;
1994-05-17 08:21:49 +04:00
LOCK_ASSERT(simple_lock_held(&vp->v_interlock));
1994-05-17 08:21:49 +04:00
/*
* Check to see if the vnode is in use.
* If so we have to reference it before we clean it out
* so that its count cannot fall to zero and generate a
* race against ourselves to recycle it.
*/
if ((active = vp->v_usecount) != 0) {
vp->v_usecount++;
#ifdef DIAGNOSTIC
if (vp->v_usecount == 0) {
vprint("vclean", vp);
panic("vclean: usecount overflow");
}
#endif
}
1994-05-17 08:21:49 +04:00
/*
* Prevent the vnode from being recycled or
* brought into use while we clean it out.
*/
if (vp->v_flag & VXLOCK)
panic("vclean: deadlock, vp %p", vp);
1994-05-17 08:21:49 +04:00
vp->v_flag |= VXLOCK;
if (vp->v_flag & VEXECMAP) {
uvmexp.execpages -= vp->v_uobj.uo_npages;
uvmexp.filepages += vp->v_uobj.uo_npages;
}
vp->v_flag &= ~(VTEXT|VEXECMAP);
1994-05-17 08:21:49 +04:00
/*
1998-03-01 05:20:01 +03:00
* Even if the count is zero, the VOP_INACTIVE routine may still
* have the object locked while it cleans it out. For
* active vnodes, it ensures that no other activity can
1998-03-01 05:20:01 +03:00
* occur while the underlying object is being cleaned out.
*
* We drain the lock to make sure we are the last one trying to
* get it and immediately resurrect the lock. Future accesses
* for locking this _vnode_ will be protected by VXLOCK. However,
* upper layers might be using the _lock_ in case the file system
* exported it and might access it while the vnode lingers in
* deadfs.
1994-05-17 08:21:49 +04:00
*/
VOP_LOCK(vp, LK_DRAIN | LK_RESURRECT | LK_INTERLOCK);
1998-03-01 05:20:01 +03:00
/*
* Clean out any cached data associated with the vnode.
* If special device, remove it from special device alias list.
* if it is on one.
1994-05-17 08:21:49 +04:00
*/
if (flags & DOCLOSE) {
int error;
struct vnode *vq, *vx;
2005-12-11 15:16:03 +03:00
error = vinvalbuf(vp, V_SAVE, NOCRED, l, 0, 0);
if (error)
2005-12-11 15:16:03 +03:00
error = vinvalbuf(vp, 0, NOCRED, l, 0, 0);
KASSERT(error == 0);
KASSERT((vp->v_flag & VONWORKLST) == 0);
if (active)
VOP_CLOSE(vp, FNONBLOCK, NOCRED, NULL);
if ((vp->v_type == VBLK || vp->v_type == VCHR) &&
vp->v_specinfo != 0) {
simple_lock(&spechash_slock);
if (vp->v_hashchain != NULL) {
if (*vp->v_hashchain == vp) {
*vp->v_hashchain = vp->v_specnext;
} else {
for (vq = *vp->v_hashchain; vq;
vq = vq->v_specnext) {
if (vq->v_specnext != vp)
continue;
vq->v_specnext = vp->v_specnext;
break;
}
if (vq == NULL)
panic("missing bdev");
}
if (vp->v_flag & VALIASED) {
vx = NULL;
for (vq = *vp->v_hashchain; vq;
vq = vq->v_specnext) {
if (vq->v_rdev != vp->v_rdev ||
vq->v_type != vp->v_type)
continue;
if (vx)
break;
vx = vq;
}
if (vx == NULL)
panic("missing alias");
if (vq == NULL)
vx->v_flag &= ~VALIASED;
vp->v_flag &= ~VALIASED;
}
}
simple_unlock(&spechash_slock);
FREE(vp->v_specinfo, M_VNODE);
vp->v_specinfo = NULL;
}
}
LOCK_ASSERT(!simple_lock_held(&vp->v_interlock));
1998-03-01 05:20:01 +03:00
1994-05-17 08:21:49 +04:00
/*
1994-06-08 15:28:29 +04:00
* If purging an active vnode, it must be closed and
1998-03-01 05:20:01 +03:00
* deactivated before being reclaimed. Note that the
* VOP_INACTIVE will unlock the vnode.
1994-05-17 08:21:49 +04:00
*/
if (active) {
2005-12-11 15:16:03 +03:00
VOP_INACTIVE(vp, l);
1998-03-01 05:20:01 +03:00
} else {
/*
* Any other processes trying to obtain this lock must first
* wait for VXLOCK to clear, then call the new lock operation.
*/
VOP_UNLOCK(vp, 0);
1994-05-17 08:21:49 +04:00
}
/*
* Reclaim the vnode.
*/
2005-12-11 15:16:03 +03:00
if (VOP_RECLAIM(vp, l))
panic("vclean: cannot reclaim, vp %p", vp);
if (active) {
/*
* Inline copy of vrele() since VOP_INACTIVE
* has already been called.
*/
simple_lock(&vp->v_interlock);
if (--vp->v_usecount <= 0) {
#ifdef DIAGNOSTIC
if (vp->v_usecount < 0 || vp->v_writecount != 0) {
vprint("vclean: bad ref count", vp);
panic("vclean: ref cnt");
}
#endif
/*
* Insert at tail of LRU list.
*/
simple_unlock(&vp->v_interlock);
simple_lock(&vnode_free_list_slock);
#ifdef DIAGNOSTIC
if (vp->v_holdcnt > 0)
panic("vclean: not clean, vp %p", vp);
#endif
TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
simple_unlock(&vnode_free_list_slock);
} else
simple_unlock(&vp->v_interlock);
}
1994-06-08 15:28:29 +04:00
KASSERT(vp->v_uobj.uo_npages == 0);
2005-11-30 01:52:02 +03:00
if (vp->v_type == VREG && vp->v_ractx != NULL) {
uvm_ra_freectx(vp->v_ractx);
vp->v_ractx = NULL;
}
1998-03-01 05:20:01 +03:00
cache_purge(vp);
1994-05-17 08:21:49 +04:00
/*
1994-06-08 15:28:29 +04:00
* Done with purge, notify sleepers of the grim news.
1994-05-17 08:21:49 +04:00
*/
1994-06-08 15:28:29 +04:00
vp->v_op = dead_vnodeop_p;
vp->v_tag = VT_NON;
vp->v_vnlock = NULL;
simple_lock(&vp->v_interlock);
VN_KNOTE(vp, NOTE_REVOKE); /* FreeBSD has this in vn_pollgone() */
vp->v_flag &= ~(VXLOCK|VLOCKSWORK);
1994-05-17 08:21:49 +04:00
if (vp->v_flag & VXWANT) {
vp->v_flag &= ~VXWANT;
simple_unlock(&vp->v_interlock);
wakeup((void *)vp);
} else
simple_unlock(&vp->v_interlock);
1994-05-17 08:21:49 +04:00
}
/*
1998-03-01 05:20:01 +03:00
* Recycle an unused vnode to the front of the free list.
* Release the passed interlock if the vnode will be recycled.
1994-05-17 08:21:49 +04:00
*/
1998-03-01 05:20:01 +03:00
int
2005-12-11 15:16:03 +03:00
vrecycle(struct vnode *vp, struct simplelock *inter_lkp, struct lwp *l)
2004-03-23 16:22:32 +03:00
{
1998-03-01 05:20:01 +03:00
simple_lock(&vp->v_interlock);
if (vp->v_usecount == 0) {
if (inter_lkp)
simple_unlock(inter_lkp);
2005-12-11 15:16:03 +03:00
vgonel(vp, l);
1998-03-01 05:20:01 +03:00
return (1);
1994-05-17 08:21:49 +04:00
}
1998-03-01 05:20:01 +03:00
simple_unlock(&vp->v_interlock);
return (0);
1994-05-17 08:21:49 +04:00
}
/*
* Eliminate all activity associated with a vnode
* in preparation for reuse.
*/
void
2005-06-06 03:47:48 +04:00
vgone(struct vnode *vp)
1998-03-01 05:20:01 +03:00
{
2005-12-11 15:16:03 +03:00
struct lwp *l = curlwp; /* XXX */
1998-03-01 05:20:01 +03:00
simple_lock(&vp->v_interlock);
2005-12-11 15:16:03 +03:00
vgonel(vp, l);
1998-03-01 05:20:01 +03:00
}
/*
* vgone, with the vp interlock held.
*/
void
2005-12-11 15:16:03 +03:00
vgonel(struct vnode *vp, struct lwp *l)
1994-05-17 08:21:49 +04:00
{
LOCK_ASSERT(simple_lock_held(&vp->v_interlock));
1994-05-17 08:21:49 +04:00
/*
* If a vgone (or vclean) is already in progress,
* wait until it is done and return.
*/
1994-05-17 08:21:49 +04:00
if (vp->v_flag & VXLOCK) {
vp->v_flag |= VXWANT;
ltsleep(vp, PINOD | PNORELOCK, "vgone", 0, &vp->v_interlock);
1994-05-17 08:21:49 +04:00
return;
}
1994-05-17 08:21:49 +04:00
/*
* Clean out the filesystem specific data.
*/
2005-12-11 15:16:03 +03:00
vclean(vp, DOCLOSE, l);
KASSERT((vp->v_flag & VONWORKLST) == 0);
1994-05-17 08:21:49 +04:00
/*
* Delete from old mount point vnode list, if on one.
*/
1998-03-01 05:20:01 +03:00
if (vp->v_mount != NULL)
insmntque(vp, (struct mount *)0);
1994-05-17 08:21:49 +04:00
/*
* The test of the back pointer and the reference count of
* zero is because it will be removed from the free list by
* getcleanvnode, but will not have its reference count
* incremented until after calling vgone. If the reference
* count were incremented first, vgone would (incorrectly)
* try to close the previous instance of the underlying object.
1994-06-08 15:28:29 +04:00
* So, the back pointer is explicitly set to `0xdeadb' in
* getnewvnode after removing it from the freelist to ensure
* that we do not try to move it here.
1994-05-17 08:21:49 +04:00
*/
vp->v_type = VBAD;
1998-03-01 05:20:01 +03:00
if (vp->v_usecount == 0) {
bool dofree;
1998-03-01 05:20:01 +03:00
simple_lock(&vnode_free_list_slock);
if (vp->v_holdcnt > 0)
panic("vgonel: not clean, vp %p", vp);
/*
* if it isn't on the freelist, we're called by getcleanvnode
* and vnode is being re-used. otherwise, we'll free it.
*/
dofree = vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb;
if (dofree) {
1998-03-01 05:20:01 +03:00
TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
numvnodes--;
1998-03-01 05:20:01 +03:00
}
simple_unlock(&vnode_free_list_slock);
if (dofree)
pool_put(&vnode_pool, vp);
1994-05-17 08:21:49 +04:00
}
}
/*
* Lookup a vnode by device number.
*/
1996-02-04 05:17:43 +03:00
int
2005-06-06 03:47:48 +04:00
vfinddev(dev_t dev, enum vtype type, struct vnode **vpp)
1994-05-17 08:21:49 +04:00
{
1998-03-01 05:20:01 +03:00
struct vnode *vp;
int rc = 0;
1994-05-17 08:21:49 +04:00
1998-03-01 05:20:01 +03:00
simple_lock(&spechash_slock);
1994-05-17 08:21:49 +04:00
for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) {
if (dev != vp->v_rdev || type != vp->v_type)
continue;
*vpp = vp;
1998-03-01 05:20:01 +03:00
rc = 1;
break;
1994-05-17 08:21:49 +04:00
}
1998-03-01 05:20:01 +03:00
simple_unlock(&spechash_slock);
return (rc);
1994-05-17 08:21:49 +04:00
}
/*
* Revoke all the vnodes corresponding to the specified minor number
* range (endpoints inclusive) of the specified major.
*/
void
2005-06-06 03:47:48 +04:00
vdevgone(int maj, int minl, int minh, enum vtype type)
{
struct vnode *vp;
int mn;
vp = NULL; /* XXX gcc */
for (mn = minl; mn <= minh; mn++)
if (vfinddev(makedev(maj, mn), type, &vp))
VOP_REVOKE(vp, REVOKEALL);
}
1994-05-17 08:21:49 +04:00
/*
* Calculate the total number of references to a special device.
*/
1994-06-08 15:28:29 +04:00
int
2005-06-06 03:47:48 +04:00
vcount(struct vnode *vp)
1994-05-17 08:21:49 +04:00
{
2000-03-30 13:27:11 +04:00
struct vnode *vq, *vnext;
1994-05-17 08:21:49 +04:00
int count;
loop:
if ((vp->v_flag & VALIASED) == 0)
return (vp->v_usecount);
1998-03-01 05:20:01 +03:00
simple_lock(&spechash_slock);
1994-06-08 15:28:29 +04:00
for (count = 0, vq = *vp->v_hashchain; vq; vq = vnext) {
vnext = vq->v_specnext;
1994-05-17 08:21:49 +04:00
if (vq->v_rdev != vp->v_rdev || vq->v_type != vp->v_type)
continue;
/*
* Alias, but not in use, so flush it out.
*/
if (vq->v_usecount == 0 && vq != vp &&
(vq->v_flag & VXLOCK) == 0) {
1998-03-01 05:20:01 +03:00
simple_unlock(&spechash_slock);
1994-05-17 08:21:49 +04:00
vgone(vq);
goto loop;
}
count += vq->v_usecount;
}
1998-03-01 05:20:01 +03:00
simple_unlock(&spechash_slock);
1994-05-17 08:21:49 +04:00
return (count);
}
/*
* sysctl helper routine to return list of supported fstypes
*/
static int
sysctl_vfs_generic_fstypes(SYSCTLFN_ARGS)
{
char bf[sizeof(((struct statvfs *)NULL)->f_fstypename)];
char *where = oldp;
struct vfsops *v;
size_t needed, left, slen;
int error, first;
if (newp != NULL)
return (EPERM);
if (namelen != 0)
return (EINVAL);
first = 1;
error = 0;
needed = 0;
left = *oldlenp;
LIST_FOREACH(v, &vfs_list, vfs_list) {
if (where == NULL)
needed += strlen(v->vfs_name) + 1;
else {
memset(bf, 0, sizeof(bf));
if (first) {
strncpy(bf, v->vfs_name, sizeof(bf));
first = 0;
} else {
bf[0] = ' ';
strncpy(bf + 1, v->vfs_name, sizeof(bf) - 1);
}
bf[sizeof(bf)-1] = '\0';
slen = strlen(bf);
if (left < slen + 1)
break;
/* +1 to copy out the trailing NUL byte */
error = copyout(bf, where, slen + 1);
if (error)
break;
where += slen;
needed += slen;
left -= slen;
}
}
*oldlenp = needed;
return (error);
}
1998-03-01 05:20:01 +03:00
/*
* Top level filesystem related information gathering.
*/
SYSCTL_SETUP(sysctl_vfs_setup, "sysctl vfs subtree setup")
1998-03-01 05:20:01 +03:00
{
sysctl_createv(clog, 0, NULL, NULL,
CTLFLAG_PERMANENT,
CTLTYPE_NODE, "vfs", NULL,
NULL, 0, NULL, 0,
CTL_VFS, CTL_EOL);
sysctl_createv(clog, 0, NULL, NULL,
CTLFLAG_PERMANENT,
2004-05-25 08:44:43 +04:00
CTLTYPE_NODE, "generic",
SYSCTL_DESCR("Non-specific vfs related information"),
NULL, 0, NULL, 0,
CTL_VFS, VFS_GENERIC, CTL_EOL);
sysctl_createv(clog, 0, NULL, NULL,
CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
2004-05-25 08:44:43 +04:00
CTLTYPE_INT, "usermount",
SYSCTL_DESCR("Whether unprivileged users may mount "
"filesystems"),
NULL, 0, &dovfsusermount, 0,
CTL_VFS, VFS_GENERIC, VFS_USERMOUNT, CTL_EOL);
sysctl_createv(clog, 0, NULL, NULL,
CTLFLAG_PERMANENT,
CTLTYPE_STRING, "fstypes",
SYSCTL_DESCR("List of file systems present"),
sysctl_vfs_generic_fstypes, 0, NULL, 0,
CTL_VFS, VFS_GENERIC, CTL_CREATE, CTL_EOL);
sysctl_createv(clog, 0, NULL, NULL,
CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
CTLTYPE_INT, "magiclinks",
SYSCTL_DESCR("Whether \"magic\" symlinks are expanded"),
NULL, 0, &vfs_magiclinks, 0,
CTL_VFS, VFS_GENERIC, VFS_MAGICLINKS, CTL_EOL);
1998-03-01 05:20:01 +03:00
}
1994-05-17 08:21:49 +04:00
int kinfo_vdebug = 1;
int kinfo_vgetfailed;
#define KINFO_VNODESLOP 10
/*
* Dump vnode list (via sysctl).
* Copyout address of vnode followed by vnode.
*/
/* ARGSUSED */
1996-02-04 05:17:43 +03:00
int
sysctl_kern_vnode(SYSCTLFN_ARGS)
1994-05-17 08:21:49 +04:00
{
char *where = oldp;
size_t *sizep = oldlenp;
1998-03-01 05:20:01 +03:00
struct mount *mp, *nmp;
struct vnode *vp;
1998-03-01 05:20:01 +03:00
char *bp = where, *savebp;
1994-05-17 08:21:49 +04:00
char *ewhere;
int error;
if (namelen != 0)
return (EOPNOTSUPP);
if (newp != NULL)
return (EPERM);
#define VPTRSZ sizeof(struct vnode *)
#define VNODESZ sizeof(struct vnode)
1994-05-17 08:21:49 +04:00
if (where == NULL) {
*sizep = (numvnodes + KINFO_VNODESLOP) * (VPTRSZ + VNODESZ);
return (0);
}
ewhere = where + *sizep;
1998-03-01 05:20:01 +03:00
simple_lock(&mountlist_slock);
for (mp = CIRCLEQ_FIRST(&mountlist); mp != (void *)&mountlist;
mp = nmp) {
1998-03-01 05:20:01 +03:00
if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock)) {
nmp = CIRCLEQ_NEXT(mp, mnt_list);
1994-05-17 08:21:49 +04:00
continue;
1998-03-01 05:20:01 +03:00
}
1994-05-17 08:21:49 +04:00
savebp = bp;
again:
1998-03-01 05:20:01 +03:00
simple_lock(&mntvnode_slock);
TAILQ_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
1994-05-17 08:21:49 +04:00
/*
* Check that the vp is still associated with
* this filesystem. RACE: could have been
* recycled onto the same filesystem.
*/
if (vp->v_mount != mp) {
1998-03-01 05:20:01 +03:00
simple_unlock(&mntvnode_slock);
1994-05-17 08:21:49 +04:00
if (kinfo_vdebug)
1996-10-13 06:32:29 +04:00
printf("kinfo: vp changed\n");
1994-05-17 08:21:49 +04:00
bp = savebp;
goto again;
}
if (bp + VPTRSZ + VNODESZ > ewhere) {
1998-03-01 05:20:01 +03:00
simple_unlock(&mntvnode_slock);
1994-05-17 08:21:49 +04:00
*sizep = bp - where;
return (ENOMEM);
}
1998-03-01 05:20:01 +03:00
simple_unlock(&mntvnode_slock);
if ((error = copyout((void *)&vp, bp, VPTRSZ)) ||
(error = copyout((void *)vp, bp + VPTRSZ, VNODESZ)))
1994-05-17 08:21:49 +04:00
return (error);
bp += VPTRSZ + VNODESZ;
1998-03-01 05:20:01 +03:00
simple_lock(&mntvnode_slock);
1994-05-17 08:21:49 +04:00
}
1998-03-01 05:20:01 +03:00
simple_unlock(&mntvnode_slock);
simple_lock(&mountlist_slock);
nmp = CIRCLEQ_NEXT(mp, mnt_list);
1994-05-17 08:21:49 +04:00
vfs_unbusy(mp);
}
1998-03-01 05:20:01 +03:00
simple_unlock(&mountlist_slock);
1994-05-17 08:21:49 +04:00
*sizep = bp - where;
return (0);
}
1994-06-08 15:28:29 +04:00
/*
* Check to see if a filesystem is mounted on a block device.
*/
int
2005-06-06 03:47:48 +04:00
vfs_mountedon(struct vnode *vp)
1994-06-08 15:28:29 +04:00
{
1998-03-01 05:20:01 +03:00
struct vnode *vq;
int error = 0;
1994-06-08 15:28:29 +04:00
if (vp->v_type != VBLK)
return ENOTBLK;
if (vp->v_specmountpoint != NULL)
1994-06-08 15:28:29 +04:00
return (EBUSY);
if (vp->v_flag & VALIASED) {
1998-03-01 05:20:01 +03:00
simple_lock(&spechash_slock);
1994-06-08 15:28:29 +04:00
for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
if (vq->v_rdev != vp->v_rdev ||
vq->v_type != vp->v_type)
continue;
if (vq->v_specmountpoint != NULL) {
1998-03-01 05:20:01 +03:00
error = EBUSY;
break;
}
1994-06-08 15:28:29 +04:00
}
1998-03-01 05:20:01 +03:00
simple_unlock(&spechash_slock);
1994-06-08 15:28:29 +04:00
}
1998-03-01 05:20:01 +03:00
return (error);
1994-06-08 15:28:29 +04:00
}
/*
* Unmount all file systems.
* We traverse the list in reverse order under the assumption that doing so
* will avoid needing to worry about dependencies.
*/
void
2005-12-11 15:16:03 +03:00
vfs_unmountall(struct lwp *l)
{
2000-03-30 13:27:11 +04:00
struct mount *mp, *nmp;
int allerror, error;
printf("unmounting file systems...");
for (allerror = 0,
mp = mountlist.cqh_last; mp != (void *)&mountlist; mp = nmp) {
nmp = mp->mnt_list.cqe_prev;
#ifdef DEBUG
printf("\nunmounting %s (%s)...",
1996-10-11 02:46:11 +04:00
mp->mnt_stat.f_mntonname, mp->mnt_stat.f_mntfromname);
#endif
/*
* XXX Freeze syncer. Must do this before locking the
* mount point. See dounmount() for details.
*/
2007-02-10 00:55:00 +03:00
mutex_enter(&syncer_mutex);
if (vfs_busy(mp, 0, 0)) {
2007-02-10 00:55:00 +03:00
mutex_exit(&syncer_mutex);
continue;
}
2005-12-11 15:16:03 +03:00
if ((error = dounmount(mp, MNT_FORCE, l)) != 0) {
1996-10-13 06:32:29 +04:00
printf("unmount of %s failed with error %d\n",
mp->mnt_stat.f_mntonname, error);
allerror = 1;
}
}
printf(" done\n");
if (allerror)
1996-10-13 06:32:29 +04:00
printf("WARNING: some file systems would not unmount\n");
}
extern struct simplelock bqueue_slock; /* XXX */
/*
* Sync and unmount file systems before shutting down.
*/
void
2005-06-06 03:47:48 +04:00
vfs_shutdown(void)
{
struct lwp *l;
/* XXX we're certainly not running in lwp0's context! */
l = curlwp;
if (l == NULL)
l = &lwp0;
printf("syncing disks... ");
/* remove user process from run queue */
suspendsched();
(void) spl0();
/* avoid coming back this way again if we panic. */
doing_shutdown = 1;
2003-01-18 13:06:22 +03:00
sys_sync(l, NULL, NULL);
/* Wait for sync to finish. */
if (buf_syncwait() != 0) {
#if defined(DDB) && defined(DEBUG_HALT_BUSY)
Debugger();
#endif
printf("giving up\n");
return;
} else
1996-10-13 06:32:29 +04:00
printf("done\n");
/*
* If we've panic'd, don't make the situation potentially
* worse by unmounting the file systems.
*/
if (panicstr != NULL)
return;
/* Release inodes held by texts before update. */
#ifdef notdef
vnshutdown();
#endif
/* Unmount file systems. */
2005-12-11 15:16:03 +03:00
vfs_unmountall(l);
}
/*
* Mount the root file system. If the operator didn't specify a
* file system to use, try all possible file systems until one
* succeeds.
*/
int
2005-06-06 03:47:48 +04:00
vfs_mountroot(void)
{
struct vfsops *v;
int error = ENODEV;
if (root_device == NULL)
panic("vfs_mountroot: root device unknown");
switch (device_class(root_device)) {
case DV_IFNET:
if (rootdev != NODEV)
panic("vfs_mountroot: rootdev set for DV_IFNET "
"(0x%08x -> %d,%d)", rootdev,
major(rootdev), minor(rootdev));
break;
case DV_DISK:
if (rootdev == NODEV)
panic("vfs_mountroot: rootdev not set for DV_DISK");
if (bdevvp(rootdev, &rootvp))
panic("vfs_mountroot: can't get vnode for rootdev");
2005-12-11 15:16:03 +03:00
error = VOP_OPEN(rootvp, FREAD, FSCRED, curlwp);
if (error) {
printf("vfs_mountroot: can't open root device\n");
return (error);
}
break;
default:
printf("%s: inappropriate for root file system\n",
root_device->dv_xname);
return (ENODEV);
}
/*
* If user specified a file system, use it.
*/
if (mountroot != NULL) {
error = (*mountroot)();
goto done;
}
/*
* Try each file system currently configured into the kernel.
*/
LIST_FOREACH(v, &vfs_list, vfs_list) {
if (v->vfs_mountroot == NULL)
continue;
#ifdef DEBUG
aprint_normal("mountroot: trying %s...\n", v->vfs_name);
#endif
error = (*v->vfs_mountroot)();
if (!error) {
aprint_normal("root file system type: %s\n",
v->vfs_name);
break;
}
}
if (v == NULL) {
printf("no file system for %s", root_device->dv_xname);
if (device_class(root_device) == DV_DISK)
printf(" (dev 0x%x)", rootdev);
printf("\n");
error = EFTYPE;
}
done:
if (error && device_class(root_device) == DV_DISK) {
2005-12-11 15:16:03 +03:00
VOP_CLOSE(rootvp, FREAD, FSCRED, curlwp);
vrele(rootvp);
}
return (error);
}
/*
* mount_specific_key_create --
* Create a key for subsystem mount-specific data.
*/
int
mount_specific_key_create(specificdata_key_t *keyp, specificdata_dtor_t dtor)
{
return (specificdata_key_create(mount_specificdata_domain, keyp, dtor));
}
/*
* mount_specific_key_delete --
* Delete a key for subsystem mount-specific data.
*/
void
mount_specific_key_delete(specificdata_key_t key)
{
specificdata_key_delete(mount_specificdata_domain, key);
}
/*
* mount_initspecific --
* Initialize a mount's specificdata container.
*/
void
mount_initspecific(struct mount *mp)
{
int error;
error = specificdata_init(mount_specificdata_domain,
&mp->mnt_specdataref);
KASSERT(error == 0);
}
/*
* mount_finispecific --
* Finalize a mount's specificdata container.
*/
void
mount_finispecific(struct mount *mp)
{
specificdata_fini(mount_specificdata_domain, &mp->mnt_specdataref);
}
/*
* mount_getspecific --
* Return mount-specific data corresponding to the specified key.
*/
void *
mount_getspecific(struct mount *mp, specificdata_key_t key)
{
return (specificdata_getspecific(mount_specificdata_domain,
&mp->mnt_specdataref, key));
}
/*
* mount_setspecific --
* Set mount-specific data corresponding to the specified key.
*/
void
mount_setspecific(struct mount *mp, specificdata_key_t key, void *data)
{
specificdata_setspecific(mount_specificdata_domain,
&mp->mnt_specdataref, key, data);
}