NetBSD/sys/nfs/nfs_node.c
jdolecek 89015c4648 Add new VFS op routine - vfs_done and call it on filesystem detach
in vfs_detach(). vfs_done may free global filesystem's resources,
typically those allocated in respective filesystem's init function.
Needed so those filesystems which went in via LKM have a chance to
clean after themselves before unloading. This fixes random panics
when LKM for filesystem using pools was loaded and unloaded several
times.

For each leaf filesystem, add appropriate vfs_done routine.
2000-03-16 18:08:17 +00:00

282 lines
7.7 KiB
C

/* $NetBSD: nfs_node.c,v 1.31 2000/03/16 18:08:29 jdolecek Exp $ */
/*
* Copyright (c) 1989, 1993
* The Regents of the University of California. All rights reserved.
*
* This code is derived from software contributed to Berkeley by
* Rick Macklem at The University of Guelph.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the University of
* California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)nfs_node.c 8.6 (Berkeley) 5/22/95
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/proc.h>
#include <sys/mount.h>
#include <sys/namei.h>
#include <sys/vnode.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/pool.h>
#include <sys/lock.h>
#include <nfs/rpcv2.h>
#include <nfs/nfsproto.h>
#include <nfs/nfs.h>
#include <nfs/nfsnode.h>
#include <nfs/nfsmount.h>
#include <nfs/nqnfs.h>
#include <nfs/nfs_var.h>
LIST_HEAD(nfsnodehashhead, nfsnode) *nfsnodehashtbl;
u_long nfsnodehash;
struct lock nfs_hashlock;
struct pool nfs_node_pool; /* memory pool for nfs nodes */
struct pool nfs_vattr_pool; /* memory pool for nfs vattrs */
#define TRUE 1
#define FALSE 0
/*
* Initialize hash links for nfsnodes
* and build nfsnode free list.
*/
void
nfs_nhinit()
{
nfsnodehashtbl = hashinit(desiredvnodes, M_NFSNODE, M_WAITOK, &nfsnodehash);
lockinit(&nfs_hashlock, PINOD, "nfs_hashlock", 0, 0);
pool_init(&nfs_node_pool, sizeof(struct nfsnode), 0, 0, 0, "nfsnodepl",
0, pool_page_alloc_nointr, pool_page_free_nointr, M_NFSNODE);
pool_init(&nfs_vattr_pool, sizeof(struct vattr), 0, 0, 0, "nfsvapl",
0, pool_page_alloc_nointr, pool_page_free_nointr, M_NFSNODE);
}
/*
* Free resources previoslu allocated in nfs_nhinit().
*/
void
nfs_nhdone()
{
hashdone(nfsnodehashtbl, M_NFSNODE);
pool_destroy(&nfs_node_pool);
pool_destroy(&nfs_vattr_pool);
}
/*
* Compute an entry in the NFS hash table structure
*/
u_long
nfs_hash(fhp, fhsize)
register nfsfh_t *fhp;
int fhsize;
{
register u_char *fhpp;
register u_long fhsum;
register int i;
fhpp = &fhp->fh_bytes[0];
fhsum = 0;
for (i = 0; i < fhsize; i++)
fhsum += *fhpp++;
return (fhsum);
}
/*
* Look up a vnode/nfsnode by file handle.
* Callers must check for mount points!!
* In all cases, a pointer to a
* nfsnode structure is returned.
*/
int
nfs_nget(mntp, fhp, fhsize, npp)
struct mount *mntp;
register nfsfh_t *fhp;
int fhsize;
struct nfsnode **npp;
{
register struct nfsnode *np;
struct nfsnodehashhead *nhpp;
register struct vnode *vp;
extern int (**nfsv2_vnodeop_p)__P((void *));
struct vnode *nvp;
int error;
nhpp = NFSNOHASH(nfs_hash(fhp, fhsize));
loop:
for (np = nhpp->lh_first; np != 0; np = np->n_hash.le_next) {
if (mntp != NFSTOV(np)->v_mount || np->n_fhsize != fhsize ||
memcmp((caddr_t)fhp, (caddr_t)np->n_fhp, fhsize))
continue;
vp = NFSTOV(np);
if (vget(vp, LK_EXCLUSIVE))
goto loop;
*npp = np;
return(0);
}
if (lockmgr(&nfs_hashlock, LK_EXCLUSIVE|LK_SLEEPFAIL, 0))
goto loop;
error = getnewvnode(VT_NFS, mntp, nfsv2_vnodeop_p, &nvp);
if (error) {
*npp = 0;
lockmgr(&nfs_hashlock, LK_RELEASE, 0);
return (error);
}
nvp->v_vnlock = 0; /* XXX At least untill we do locking */
vp = nvp;
np = pool_get(&nfs_node_pool, PR_WAITOK);
memset((caddr_t)np, 0, sizeof *np);
vp->v_data = np;
np->n_vnode = vp;
/*
* Insert the nfsnode in the hash queue for its new file handle
*/
LIST_INSERT_HEAD(nhpp, np, n_hash);
if (fhsize > NFS_SMALLFH) {
MALLOC(np->n_fhp, nfsfh_t *, fhsize, M_NFSBIGFH, M_WAITOK);
} else
np->n_fhp = &np->n_fh;
memcpy((caddr_t)np->n_fhp, (caddr_t)fhp, fhsize);
np->n_fhsize = fhsize;
np->n_accstamp = -1;
np->n_vattr = pool_get(&nfs_vattr_pool, PR_WAITOK);
memset(np->n_vattr, 0, sizeof (struct vattr));
lockmgr(&nfs_hashlock, LK_RELEASE, 0);
*npp = np;
return (0);
}
int
nfs_inactive(v)
void *v;
{
struct vop_inactive_args /* {
struct vnode *a_vp;
struct proc *a_p;
} */ *ap = v;
register struct nfsnode *np;
register struct sillyrename *sp;
struct proc *p = ap->a_p;
extern int prtactive;
np = VTONFS(ap->a_vp);
if (prtactive && ap->a_vp->v_usecount != 0)
vprint("nfs_inactive: pushing active", ap->a_vp);
if (ap->a_vp->v_type != VDIR) {
sp = np->n_sillyrename;
np->n_sillyrename = (struct sillyrename *)0;
} else
sp = (struct sillyrename *)0;
if (sp) {
/*
* If the usecount is greater than zero, then we are
* being inactivated by a forcible unmount and do not
* have to get our own reference. In the normal case,
* we need a reference to keep the vnode from being
* recycled by getnewvnode while we do the I/O
* associated with discarding the buffers.
*/
if (ap->a_vp->v_usecount > 0)
(void) nfs_vinvalbuf(ap->a_vp, 0, sp->s_cred, p, 1);
else if (vget(ap->a_vp, 0))
panic("nfs_inactive: lost vnode");
else {
(void) nfs_vinvalbuf(ap->a_vp, 0, sp->s_cred, p, 1);
vrele(ap->a_vp);
}
/*
* Remove the silly file that was rename'd earlier
*/
nfs_removeit(sp);
crfree(sp->s_cred);
vrele(sp->s_dvp);
FREE((caddr_t)sp, M_NFSREQ);
}
np->n_flag &= (NMODIFIED | NFLUSHINPROG | NFLUSHWANT | NQNFSEVICTED |
NQNFSNONCACHE | NQNFSWRITE);
VOP_UNLOCK(ap->a_vp, 0);
return (0);
}
/*
* Reclaim an nfsnode so that it can be used for other purposes.
*/
int
nfs_reclaim(v)
void *v;
{
struct vop_reclaim_args /* {
struct vnode *a_vp;
} */ *ap = v;
register struct vnode *vp = ap->a_vp;
register struct nfsnode *np = VTONFS(vp);
register struct nfsmount *nmp = VFSTONFS(vp->v_mount);
extern int prtactive;
if (prtactive && vp->v_usecount != 0)
vprint("nfs_reclaim: pushing active", vp);
LIST_REMOVE(np, n_hash);
/*
* For nqnfs, take it off the timer queue as required.
*/
if ((nmp->nm_flag & NFSMNT_NQNFS) && np->n_timer.cqe_next != 0) {
CIRCLEQ_REMOVE(&nmp->nm_timerhead, np, n_timer);
}
/*
* Free up any directory cookie structures and
* large file handle structures that might be associated with
* this nfs node.
*/
if (vp->v_type == VDIR && np->n_dircache) {
nfs_invaldircache(vp, 1);
FREE(np->n_dircache, M_NFSDIROFF);
}
if (np->n_fhsize > NFS_SMALLFH) {
FREE((caddr_t)np->n_fhp, M_NFSBIGFH);
}
pool_put(&nfs_vattr_pool, np->n_vattr);
cache_purge(vp);
pool_put(&nfs_node_pool, vp->v_data);
vp->v_data = (void *)0;
return (0);
}