Introduce layer library in genfs. This set of files abstracts most of

the functionality of nullfs. The latter is now just a mount & unmount
routine, and a few tables. umapfs borrow most of this infrastructure.

Both fs's are now nfs-exportable.

All layered fs's share a common format to private mount & private
vnode structs (which a particular fs can extend).

Also add genfs_noerr_rele(), a vnode op which will vrele/vput
operand vnodes appropriately.
This commit is contained in:
wrstuden 1999-07-08 01:18:59 +00:00
parent 379a26972f
commit 9866514df5
16 changed files with 2449 additions and 1978 deletions

View File

@ -1,7 +1,7 @@
# $NetBSD: Makefile,v 1.1 1998/06/12 23:23:02 cgd Exp $
# $NetBSD: Makefile,v 1.2 1999/07/08 01:18:59 wrstuden Exp $
INCSDIR= /usr/include/miscfs/genfs
INCS= genfs.h
INCS= genfs.h layer.h
.include <bsd.kinc.mk>

View File

@ -1,18 +1,23 @@
/* $NetBSD: genfs.h,v 1.8 1998/08/13 09:59:52 kleink Exp $ */
/* $NetBSD: genfs.h,v 1.9 1999/07/08 01:18:59 wrstuden Exp $ */
int genfs_badop __P((void *));
int genfs_nullop __P((void *));
int genfs_enoioctl __P((void *));
int genfs_einval __P((void *));
int genfs_eopnotsupp __P((void *));
int genfs_ebadf __P((void *));
int genfs_nolock __P((void *));
int genfs_noislocked __P((void *));
int genfs_nounlock __P((void *));
int genfs_badop __P((void *));
int genfs_nullop __P((void *));
int genfs_enoioctl __P((void *));
int genfs_enoextops __P((void *));
int genfs_einval __P((void *));
int genfs_eopnotsupp __P((void *));
int genfs_eopnotsupp_rele __P((void *));
int genfs_ebadf __P((void *));
int genfs_nolock __P((void *));
int genfs_noislocked __P((void *));
int genfs_nounlock __P((void *));
int genfs_poll __P((void *));
int genfs_fsync __P((void *));
int genfs_seek __P((void *));
int genfs_abortop __P((void *));
int genfs_revoke __P((void *));
int genfs_lease_check __P((void *));
int genfs_poll __P((void *));
int genfs_fsync __P((void *));
int genfs_seek __P((void *));
int genfs_abortop __P((void *));
int genfs_revoke __P((void *));
int genfs_lease_check __P((void *));
int genfs_lock __P((void *));
int genfs_islocked __P((void *));
int genfs_unlock __P((void *));

View File

@ -1,4 +1,4 @@
/* $NetBSD: genfs_vnops.c,v 1.11 1999/03/05 21:09:49 mycroft Exp $ */
/* $NetBSD: genfs_vnops.c,v 1.12 1999/07/08 01:18:59 wrstuden Exp $ */
/*
* Copyright (c) 1982, 1986, 1989, 1993
@ -158,6 +158,45 @@ genfs_eopnotsupp(v)
return (EOPNOTSUPP);
}
/*
* Called when an fs doesn't support a particular vop but the vop needs to
* vrele, vput, or vunlock passed in vnodes.
*/
int
genfs_eopnotsupp_rele(v)
void *v;
{
struct vop_generic_args /*
struct vnodeop_desc *a_desc;
/ * other random data follows, presumably * /
} */ *ap = v;
struct vnodeop_desc *desc = ap->a_desc;
struct vnode *vp;
int flags, i, j, offset;
flags = desc->vdesc_flags;
for (i = 0; i < VDESC_MAX_VPS; flags >>=1, i++) {
if ((offset = desc->vdesc_vp_offsets[i]) == VDESC_NO_OFFSET)
break; /* stop at end of list */
if ((j = flags & VDESC_VP0_WILLPUT)) {
vp = *VOPARG_OFFSETTO(struct vnode**,offset,ap);
switch (j) {
case VDESC_VP0_WILLPUT:
vput(vp);
break;
case VDESC_VP0_WILLUNLOCK:
VOP_UNLOCK(vp, 0);
break;
case VDESC_VP0_WILLRELE:
vrele(vp);
break;
}
}
}
return (EOPNOTSUPP);
}
/*ARGSUSED*/
int
genfs_ebadf(v)
@ -242,12 +281,58 @@ genfs_revoke(v)
return (0);
}
/*
* Lock the node.
*/
int
genfs_lock(v)
void *v;
{
struct vop_lock_args /* {
struct vnode *a_vp;
int a_flags;
struct proc *a_p;
} */ *ap = v;
struct vnode *vp = ap->a_vp;
return (lockmgr(&vp->v_lock, ap->a_flags, &vp->v_interlock));
}
/*
* Unlock the node.
*/
int
genfs_unlock(v)
void *v;
{
struct vop_unlock_args /* {
struct vnode *a_vp;
int a_flags;
struct proc *a_p;
} */ *ap = v;
struct vnode *vp = ap->a_vp;
return (lockmgr(&vp->v_lock, ap->a_flags | LK_RELEASE,
&vp->v_interlock));
}
/*
* Return whether or not the node is locked.
*/
int
genfs_islocked(v)
void *v;
{
struct vop_islocked_args /* {
struct vnode *a_vp;
} */ *ap = v;
struct vnode *vp = ap->a_vp;
return (lockstatus(&vp->v_lock));
}
/*
* Stubs to use when there is no locking to be done on the underlying object.
* A minimal shared lock is necessary to ensure that the underlying object
* is not revoked while an operation is in progress. So, an active shared
* count is maintained in an auxillary vnode lock structure.
*/
int
genfs_nolock(v)
@ -259,51 +344,6 @@ genfs_nolock(v)
struct proc *a_p;
} */ *ap = v;
#ifdef notyet
/*
* This code cannot be used until all the non-locking filesystems
* (notably NFS) are converted to properly lock and release nodes.
* Also, certain vnode operations change the locking state within
* the operation (create, mknod, remove, link, rename, mkdir, rmdir,
* and symlink). Ideally these operations should not change the
* lock state, but should be changed to let the caller of the
* function unlock them. Otherwise all intermediate vnode layers
* (such as union, umapfs, etc) must catch these functions to do
* the necessary locking at their layer. Note that the inactive
* and lookup operations also change their lock state, but this
* cannot be avoided, so these two operations will always need
* to be handled in intermediate layers.
*/
struct vnode *vp = ap->a_vp;
int vnflags, flags = ap->a_flags;
if (vp->v_vnlock == NULL) {
if ((flags & LK_TYPE_MASK) == LK_DRAIN)
return (0);
MALLOC(vp->v_vnlock, struct lock *, sizeof(struct lock),
M_VNODE, M_WAITOK);
lockinit(vp->v_vnlock, PVFS, "vnlock", 0, 0);
}
switch (flags & LK_TYPE_MASK) {
case LK_DRAIN:
vnflags = LK_DRAIN;
break;
case LK_EXCLUSIVE:
case LK_SHARED:
vnflags = LK_SHARED;
break;
case LK_UPGRADE:
case LK_EXCLUPGRADE:
case LK_DOWNGRADE:
return (0);
case LK_RELEASE:
default:
panic("vop_nolock: bad operation %d", flags & LK_TYPE_MASK);
}
if (flags & LK_INTERLOCK)
vnflags |= LK_INTERLOCK;
return(lockmgr(vp->v_vnlock, vnflags, &vp->v_interlock));
#else /* for now */
/*
* Since we are not using the lock manager, we must clear
* the interlock here.
@ -311,43 +351,20 @@ genfs_nolock(v)
if (ap->a_flags & LK_INTERLOCK)
simple_unlock(&ap->a_vp->v_interlock);
return (0);
#endif
}
/*
* Decrement the active use count.
*/
int
genfs_nounlock(v)
void *v;
{
struct vop_unlock_args /* {
struct vnode *a_vp;
int a_flags;
struct proc *a_p;
} */ *ap = v;
struct vnode *vp = ap->a_vp;
if (vp->v_vnlock == NULL)
return (0);
return (lockmgr(vp->v_vnlock, LK_RELEASE, NULL));
return (0);
}
/*
* Return whether or not the node is in use.
*/
int
genfs_noislocked(v)
void *v;
{
struct vop_islocked_args /* {
struct vnode *a_vp;
} */ *ap = v;
struct vnode *vp = ap->a_vp;
if (vp->v_vnlock == NULL)
return (0);
return (lockstatus(vp->v_vnlock));
return (0);
}
/*

167
sys/miscfs/genfs/layer.h Normal file
View File

@ -0,0 +1,167 @@
/* $NetBSD: layer.h,v 1.1 1999/07/08 01:18:59 wrstuden Exp $ */
/*
* Copyright (c) 1999 National Aeronautics & Space Administration
* All rights reserved.
*
* This software was written by William Studenmund of the
* Numerical Aerospace Similation Facility, NASA Ames Research Center.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the the name of the National Aeronautics & Space Administration
* nor the names of its contributors may be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE NATIONAL AERONAUTICS & SPACE ADMINISTRATION
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE ADMINISTRATION OR CONTRIB-
* UTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Copyright (c) 1992, 1993
* The Regents of the University of California. All rights reserved.
*
* This code is derived from software donated to Berkeley by
* Jan-Simon Pendry.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the University of
* California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* from: Id: lofs.h,v 1.8 1992/05/30 10:05:43 jsp Exp
* @(#)null.h 8.2 (Berkeley) 1/21/94
*/
#ifndef _MISCFS_GENFS_LAYER_H_
#define _MISCFS_GENFS_LAYER_H_
struct layer_args {
char *target; /* Target of loopback */
struct export_args export; /* network export info */
};
#ifdef _KERNEL
struct layer_node;
LIST_HEAD(layer_node_hashhead, layer_node);
struct layer_mount {
struct mount *layerm_vfs;
struct vnode *layerm_rootvp; /* Ref to root layer_node */
struct netexport layerm_export; /* export info */
u_int layerm_flags; /* mount point layer flags */
u_int layerm_size; /* size of fs's struct node */
enum vtype layerm_tag; /* vtag of our vnodes */
int /* bypass routine for this mount */
(*layerm_bypass) __P((void *));
int (*layerm_alloc) /* alloc a new layer node */
__P((struct mount *, struct vnode *,
struct vnode **));
int (**layerm_vnodeop_p) /* ops for our nodes */
__P((void *));
struct layer_node_hashhead /* head of hash list for layer_nodes */
*layerm_node_hashtbl;
u_long layerm_node_hash; /* hash mask for hash chain */
struct simplelock layerm_hashlock; /* interlock for hash chain. */
};
#define LAYERFS_MFLAGS 0x00000fff /* reserved layer mount flags */
#define LAYERFS_MBYPASSDEBUG 0x00000001
/*
* A cache of vnode references
*/
struct layer_node {
LIST_ENTRY(layer_node) layer_hash; /* Hash list */
struct vnode *layer_lowervp; /* VREFed once */
struct vnode *layer_vnode; /* Back pointer */
unsigned int layer_flags; /* locking, etc. */
};
#define LAYERFS_RESFLAGS 0x00000fff /* flags reserved for layerfs */
/*
* The following macros handle upperfs-specific locking. They are needed
* when the lowerfs does not export a struct lock for locking use by the
* upper layers. These macros are inteded for adjusting the upperfs
* struct lock to reflect changes in the underlying vnode's lock state.
*/
#define LAYERFS_UPPERLOCK(v, f, r) do { \
if ((v)->v_vnlock == NULL) \
r = lockmgr(&(v)->v_lock, (f), &(v)->v_interlock); \
else \
r = 0; \
} while (0)
#define LAYERFS_UPPERUNLOCK(v, f, r) do { \
if ((v)->v_vnlock == NULL) \
r = lockmgr(&(v)->v_lock, (f) | LK_RELEASE, &(v)->v_interlock); \
else \
r = 0; \
} while (0)
#define LAYERFS_UPPERISLOCKED(v, r) do { \
if ((v)->v_vnlock == NULL) \
r = lockstatus(&(v)->v_lock); \
else \
r = -1; \
} while (0)
#define LAYERFS_DO_BYPASS(vp, ap) \
(*MOUNTTOLAYERMOUNT((vp)->v_mount)->layerm_bypass)((ap))
extern int layer_node_create __P((struct mount *mp, struct vnode *target, struct vnode **vpp));
extern struct vnode *layer_checkvp __P((struct vnode *vp, char *fil, int lno));
#define MOUNTTOLAYERMOUNT(mp) ((struct layer_mount *)((mp)->mnt_data))
#define VTOLAYER(vp) ((struct layer_node *)(vp)->v_data)
#define LAYERTOV(xp) ((xp)->layer_vnode)
#ifdef LAYERFS_DIAGNOSTIC
#define LAYERVPTOLOWERVP(vp) layer_checkvp((vp), __FILE__, __LINE__)
#else
#define LAYERVPTOLOWERVP(vp) (VTOLAYER(vp)->layer_lowervp)
#endif
#endif /* _KERNEL */
#endif /* _MISCFS_GENFS_LAYER_H_ */

View File

@ -0,0 +1,118 @@
/* $NetBSD: layer_extern.h,v 1.1 1999/07/08 01:19:00 wrstuden Exp $ */
/*
* Copyright (c) 1999 National Aeronautics & Space Administration
* All rights reserved.
*
* This software was written by William Studenmund of the
* Numerical Aerospace Similation Facility, NASA Ames Research Center.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the the name of the National Aeronautics & Space Administration
* nor the names of its contributors may be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE NATIONAL AERONAUTICS & SPACE ADMINISTRATION
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE ADMINISTRATION OR CONTRIB-
* UTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Copyright (c) 1992, 1993, 1995
* The Regents of the University of California. All rights reserved.
*
* This code is derived from software donated to Berkeley by
* Jan-Simon Pendry.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the University of
* California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
*/
/*
* Routines defined by layerfs
*/
/* misc routines in layer_subr.c */
void layerfs_init __P((void));
int layer_node_alloc __P((struct mount *, struct vnode *, struct vnode **));
int layer_node_create __P((struct mount *, struct vnode *, struct vnode **));
struct vnode *
layer_node_find __P((struct mount *, struct vnode *));
#define LOG2_SIZEVNODE 7 /* log2(sizeof struct vnode) */
#define LAYER_NHASH(lmp, vp) \
(&((lmp)->layerm_node_hashtbl[(((u_long)vp)>>LOG2_SIZEVNODE) & \
(lmp)->layerm_node_hash]))
/* vfs routines */
int layerfs_start __P((struct mount *, int, struct proc *));
int layerfs_root __P((struct mount *, struct vnode **));
int layerfs_quotactl __P((struct mount *, int, uid_t, caddr_t,
struct proc *));
int layerfs_statfs __P((struct mount *, struct statfs *, struct proc *));
int layerfs_sync __P((struct mount *, int, struct ucred *, struct proc *));
int layerfs_vget __P((struct mount *, ino_t, struct vnode **));
int layerfs_fhtovp __P((struct mount *, struct fid *, struct vnode **));
int layerfs_checkexp __P((struct mount *, struct mbuf *, int *,
struct ucred **));
int layerfs_vptofh __P((struct vnode *, struct fid *));
int layerfs_sysctl __P((int *, u_int, void *, size_t *, void *, size_t,
struct proc *));
/* VOP routines */
int layer_bypass __P((void *));
int layer_getattr __P((void *));
int layer_inactive __P((void *));
int layer_reclaim __P((void *));
int layer_print __P((void *));
int layer_strategy __P((void *));
int layer_bwrite __P((void *));
int layer_bmap __P((void *));
int layer_lock __P((void *));
int layer_unlock __P((void *));
int layer_islocked __P((void *));
int layer_fsync __P((void *));
int layer_lookup __P((void *));
int layer_setattr __P((void *));
int layer_access __P((void *));
int layer_open __P((void *));

View File

@ -0,0 +1,386 @@
/* $NetBSD: layer_subr.c,v 1.1 1999/07/08 01:19:00 wrstuden Exp $ */
/*
* Copyright (c) 1999 National Aeronautics & Space Administration
* All rights reserved.
*
* This software was written by William Studenmund of the
* Numerical Aerospace Similation Facility, NASA Ames Research Center.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the the name of the National Aeronautics & Space Administration
* nor the names of its contributors may be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE NATIONAL AERONAUTICS & SPACE ADMINISTRATION
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE ADMINISTRATION OR CONTRIB-
* UTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Copyright (c) 1992, 1993
* The Regents of the University of California. All rights reserved.
*
* This code is derived from software donated to Berkeley by
* Jan-Simon Pendry.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the University of
* California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* from: Id: lofs_subr.c,v 1.11 1992/05/30 10:05:43 jsp Exp
* @(#)null_subr.c 8.7 (Berkeley) 5/14/95
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/proc.h>
#include <sys/time.h>
#include <sys/types.h>
#include <sys/vnode.h>
#include <sys/mount.h>
#include <sys/namei.h>
#include <sys/malloc.h>
#include <miscfs/specfs/specdev.h>
#include <miscfs/genfs/layer.h>
#include <miscfs/genfs/layer_extern.h>
#define NLAYERNODECACHE 16
/*
* layer cache:
* Each cache entry holds a reference to the lower vnode
* along with a pointer to the alias vnode. When an
* entry is added the lower vnode is VREF'd. When the
* alias is removed the lower vnode is vrele'd.
*/
/*
* Initialise cache headers
*/
void
layerfs_init()
{
#ifdef LAYERFS_DIAGNOSTIC
printf("layerfs_init\n"); /* printed during system boot */
#endif
}
/*
* Return a locked, VREF'ed alias for lower vnode if already exists, else 0.
*/
struct vnode *
layer_node_find(mp, lowervp)
struct mount *mp;
struct vnode *lowervp;
{
struct layer_mount *lmp = MOUNTTOLAYERMOUNT(mp);
struct layer_node_hashhead *hd;
struct layer_node *a;
struct vnode *vp;
/*
* Find hash base, and then search the (two-way) linked
* list looking for a layer_node structure which is referencing
* the lower vnode. If found, the increment the layer_node
* reference count (but NOT the lower vnode's VREF counter)
* and return the vnode locked.
*/
hd = LAYER_NHASH(lmp, lowervp);
loop:
simple_lock(&lmp->layerfs_hashlock);
for (a = hd->lh_first; a != 0; a = a->layer_hash.le_next) {
if (a->layer_lowervp == lowervp && LAYERTOV(a)->v_mount == mp) {
vp = LAYERTOV(a);
simple_unlock(&lmp->layerfs_hashlock);
/*
* We must be careful here as the fact the lower
* vnode is locked will imply vp is locked unless
* someone has decided to start vclean'ing either
* vp or lowervp.
*
* So we try for an exclusive, recursive lock
* on the upper vnode. If it fails, vcleaning
* is in progress (so when we try again, we'll
* fail). If it succeeds, we now have double
* locked the bottom node. So we do an explicit
* VOP_UNLOCK on it to keep the counts right. Note
* that we will end up with the upper node and
* the lower node locked once.
*/
if (vget(vp, LK_EXCLUSIVE | LK_CANRECURSE)) {
printf ("layer_node_find: vget failed.\n");
goto loop;
};
VOP_UNLOCK(lowervp, 0);
return (vp);
}
}
simple_unlock(&lmp->layerfs_hashlock);
return NULL;
}
/*
* Make a new layer_node node.
* Vp is the alias vnode, lowervp is the lower vnode.
* Maintain a reference to lowervp.
*/
int
layer_node_alloc(mp, lowervp, vpp)
struct mount *mp;
struct vnode *lowervp;
struct vnode **vpp;
{
struct layer_mount *lmp = MOUNTTOLAYERMOUNT(mp);
struct layer_node_hashhead *hd;
struct layer_node *xp;
struct vnode *vp, *nvp;
int error;
extern int (**dead_vnodeop_p) __P((void *));
if ((error = getnewvnode(lmp->layerm_tag, mp, lmp->layerm_vnodeop_p,
&vp)) != 0)
return (error);
vp->v_type = lowervp->v_type;
MALLOC(xp, struct layer_node *, lmp->layerm_size, M_TEMP, M_WAITOK);
if (vp->v_type == VBLK || vp->v_type == VCHR) {
MALLOC(vp->v_specinfo, struct specinfo *,
sizeof(struct specinfo), M_VNODE, M_WAITOK);
vp->v_rdev = lowervp->v_rdev;
}
vp->v_data = xp;
xp->layer_vnode = vp;
xp->layer_lowervp = lowervp;
xp->layer_flags = 0;
/*
* Before we insert our new node onto the hash chains,
* check to see if someone else has beaten us to it.
* (We could have slept in MALLOC.)
*/
if ((nvp = layer_node_find(mp, lowervp)) != NULL) {
*vpp = nvp;
/* free the substructures we've allocated. */
FREE(xp, M_TEMP);
if (vp->v_type == VBLK || vp->v_type == VCHR)
FREE(vp->v_specinfo, M_VNODE);
vp->v_type = VBAD; /* node is discarded */
vp->v_op = dead_vnodeop_p; /* so ops will still work */
vrele(vp); /* get rid of it. */
return (0);
}
simple_lock(&lmp->layerfs_hashlock);
/*
* Now lock the new node. We rely on the fact that we were passed
* a locked vnode. If the lower node is exporting a struct lock
* (v_vnlock != NULL) then we just set the upper v_vnlock to the
* lower one, and both are now locked. If the lower node is exporting
* NULL, then we copy that up and manually lock the upper node.
*
* LAYERFS_UPPERLOCK already has the test, so we use it after copying
* up the v_vnlock from below.
*/
vp->v_vnlock = lowervp->v_vnlock;
LAYERFS_UPPERLOCK(vp, LK_EXCLUSIVE, error);
if (error) {
/*
* How did we get a locking error? The node just came off
* of the free list, and we're the only routine which
* knows it's there...
*/
vp->v_vnlock = &vp->v_lock;
*vpp = NULL;
/* free the substructures we've allocated. */
FREE(xp, M_TEMP);
if (vp->v_type == VBLK || vp->v_type == VCHR)
FREE(vp->v_specinfo, M_VNODE);
vp->v_type = VBAD; /* node is discarded */
vp->v_op = dead_vnodeop_p; /* so ops will still work */
vrele(vp); /* get rid of it. */
return (error);
}
/*
* NetBSD used to do an inlined checkalias here. We do not, as
* we never flag device nodes as being aliased. The lowervp
* node will, when appropriate, be flaged as an alias.
*/
*vpp = vp;
VREF(lowervp); /* Take into account reference held in layer_node */
hd = LAYER_NHASH(lmp, lowervp);
LIST_INSERT_HEAD(hd, xp, layer_hash);
simple_unlock(&lmp->layerfs_hashlock);
return (0);
}
/*
* Try to find an existing layer_node vnode refering
* to it, otherwise make a new layer_node vnode which
* contains a reference to the lower vnode.
*
* >>> we assume that the lower node is already locked upon entry, so we
* propagate the lock state to upper node <<
*/
int
layer_node_create(mp, lowervp, newvpp)
struct mount *mp;
struct vnode *lowervp;
struct vnode **newvpp;
{
struct vnode *aliasvp;
struct layer_mount *lmp = MOUNTTOLAYERMOUNT(mp);
if ((aliasvp = layer_node_find(mp, lowervp)) != NULL) {
/*
* layer_node_find has taken another reference
* to the alias vnode and moved the lock holding to
* aliasvp
*/
#ifdef LAYERFS_DIAGNOSTIC
vprint("layer_node_create: exists", aliasvp);
#endif
} else {
int error;
/*
* Get new vnode.
*/
#ifdef LAYERFS_DIAGNOSTIC
printf("layer_node_create: create new alias vnode\n");
#endif
/*
* Make new vnode reference the layer_node.
*/
if ((error = (lmp->layerm_alloc)(mp, lowervp, &aliasvp)) != 0)
return error;
/*
* aliasvp is already VREF'd by getnewvnode()
*/
}
/*
* Now that we have VREF'd the upper vnode, release the reference
* to the lower node. The existance of the layer_node retains one
* reference to the lower node.
*/
vrele(lowervp);
#ifdef DIAGNOSTIC
if (lowervp->v_usecount < 1) {
/* Should never happen... */
vprint("layer_node_create: alias", aliasvp);
vprint("layer_node_create: lower", lowervp);
panic("layer_node_create: lower has 0 usecount.");
};
#endif
#ifdef LAYERFS_DIAGNOSTIC
vprint("layer_node_create: alias", aliasvp);
#endif
*newvpp = aliasvp;
return (0);
}
struct vnode *
layer_checkvp(vp, fil, lno)
struct vnode *vp;
char *fil;
int lno;
{
struct layer_node *a = VTOLAYER(vp);
#ifdef notyet
/*
* Can't do this check because vop_reclaim runs
* with a funny vop vector.
*
* WRS - no it doesnt...
*/
if (vp->v_op != layer_vnodeop_p) {
printf ("layer_checkvp: on non-layer-node\n");
#ifdef notyet
while (layer_checkvp_barrier) /*WAIT*/ ;
#endif
panic("layer_checkvp");
};
#endif
if (a->layer_lowervp == NULL) {
/* Should never happen */
int i; u_long *p;
printf("vp = %p, ZERO ptr\n", vp);
for (p = (u_long *) a, i = 0; i < 8; i++)
printf(" %lx", p[i]);
printf("\n");
/* wait for debugger */
panic("layer_checkvp");
}
if (a->layer_lowervp->v_usecount < 1) {
int i; u_long *p;
printf("vp = %p, unref'ed lowervp\n", vp);
for (p = (u_long *) a, i = 0; i < 8; i++)
printf(" %lx", p[i]);
printf("\n");
/* wait for debugger */
panic ("layer with unref'ed lowervp");
};
#ifdef notnow
printf("layer %p/%d -> %p/%d [%s, %d]\n",
LAYERTOV(a), LAYERTOV(a)->v_usecount,
a->layer_lowervp, a->layer_lowervp->v_usecount,
fil, lno);
#endif
return a->layer_lowervp;
}

View File

@ -0,0 +1,285 @@
/* $NetBSD: layer_vfsops.c,v 1.1 1999/07/08 01:19:01 wrstuden Exp $ */
/*
* Copyright (c) 1999 National Aeronautics & Space Administration
* All rights reserved.
*
* This software was written by William Studenmund of the
* Numerical Aerospace Similation Facility, NASA Ames Research Center.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the the name of the National Aeronautics & Space Administration
* nor the names of its contributors may be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE NATIONAL AERONAUTICS & SPACE ADMINISTRATION
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE ADMINISTRATION OR CONTRIB-
* UTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Copyright (c) 1992, 1993, 1995
* The Regents of the University of California. All rights reserved.
*
* This code is derived from software donated to Berkeley by
* Jan-Simon Pendry.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the University of
* California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* from: Id: lofs_vfsops.c,v 1.9 1992/05/30 10:26:24 jsp Exp
* from: @(#)lofs_vfsops.c 1.2 (Berkeley) 6/18/92
* @(#)null_vfsops.c 8.7 (Berkeley) 5/14/95
*/
/*
* generic layer vfs ops.
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/time.h>
#include <sys/proc.h>
#include <sys/types.h>
#include <sys/vnode.h>
#include <sys/mount.h>
#include <sys/namei.h>
#include <sys/malloc.h>
#include <miscfs/genfs/layer.h>
#include <miscfs/genfs/layer_extern.h>
/*
* VFS start. Nothing needed here - the start routine
* on the underlying filesystem will have been called
* when that filesystem was mounted.
*/
int
layerfs_start(mp, flags, p)
struct mount *mp;
int flags;
struct proc *p;
{
return (0);
/* return VFS_START(MOUNTTOLAYERMOUNT(mp)->layerm_vfs, flags, p); */
}
int
layerfs_root(mp, vpp)
struct mount *mp;
struct vnode **vpp;
{
struct vnode *vp;
#ifdef LAYERFS_DIAGNOSTIC
printf("layerfs_root(mp = %p, vp = %p->%p)\n", mp,
MOUNTTOLAYERMOUNT(mp)->layerm_rootvp,
LAYERVPTOLOWERVP(MOUNTTOLAYERMOUNT(mp)->layerm_rootvp));
#endif
/*
* Return locked reference to root.
*/
vp = MOUNTTOLAYERMOUNT(mp)->layerm_rootvp;
if (vp == NULL) {
*vpp = NULL;
return (EINVAL);
}
VREF(vp);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
*vpp = vp;
return 0;
}
int
layerfs_quotactl(mp, cmd, uid, arg, p)
struct mount *mp;
int cmd;
uid_t uid;
caddr_t arg;
struct proc *p;
{
return VFS_QUOTACTL(MOUNTTOLAYERMOUNT(mp)->layerm_vfs,
cmd, uid, arg, p);
}
int
layerfs_statfs(mp, sbp, p)
struct mount *mp;
struct statfs *sbp;
struct proc *p;
{
int error;
struct statfs mstat;
#ifdef LAYERFS_DIAGNOSTIC
printf("layerfs_statfs(mp = %p, vp = %p->%p)\n", mp,
MOUNTTOLAYERMOUNT(mp)->layerm_rootvp,
LAYERVPTOLOWERVP(MOUNTTOLAYERMOUNT(mp)->layerm_rootvp));
#endif
memset(&mstat, 0, sizeof(mstat));
error = VFS_STATFS(MOUNTTOLAYERMOUNT(mp)->layerm_vfs, &mstat, p);
if (error)
return (error);
/* now copy across the "interesting" information and fake the rest */
sbp->f_type = mstat.f_type;
sbp->f_flags = mstat.f_flags;
sbp->f_bsize = mstat.f_bsize;
sbp->f_iosize = mstat.f_iosize;
sbp->f_blocks = mstat.f_blocks;
sbp->f_bfree = mstat.f_bfree;
sbp->f_bavail = mstat.f_bavail;
sbp->f_files = mstat.f_files;
sbp->f_ffree = mstat.f_ffree;
if (sbp != &mp->mnt_stat) {
memcpy(&sbp->f_fsid, &mp->mnt_stat.f_fsid, sizeof(sbp->f_fsid));
memcpy(sbp->f_mntonname, mp->mnt_stat.f_mntonname, MNAMELEN);
memcpy(sbp->f_mntfromname, mp->mnt_stat.f_mntfromname, MNAMELEN);
}
strncpy(sbp->f_fstypename, mp->mnt_op->vfs_name, MFSNAMELEN);
return (0);
}
int
layerfs_sync(mp, waitfor, cred, p)
struct mount *mp;
int waitfor;
struct ucred *cred;
struct proc *p;
{
/*
* XXX - Assumes no data cached at layer.
*/
return (0);
}
int
layerfs_vget(mp, ino, vpp)
struct mount *mp;
ino_t ino;
struct vnode **vpp;
{
int error;
struct vnode *vp;
if ((error = VFS_VGET(MOUNTTOLAYERMOUNT(mp)->layerm_vfs, ino, &vp))) {
*vpp = NULL;
return (error);
}
if ((error = layer_node_create(mp, vp, vpp))) {
vput(vp);
*vpp = NULL;
return (error);
}
return (0);
}
int
layerfs_fhtovp(mp, fidp, vpp)
struct mount *mp;
struct fid *fidp;
struct vnode **vpp;
{
int error;
struct vnode *vp;
if ((error = VFS_FHTOVP(MOUNTTOLAYERMOUNT(mp)->layerm_vfs, fidp, &vp)))
return (error);
if ((error = layer_node_create(mp, vp, vpp))) {
vput(vp);
*vpp = NULL;
return (error);
}
return (0);
}
int
layerfs_checkexp(mp, nam, exflagsp, credanonp)
struct mount *mp;
struct mbuf *nam;
int *exflagsp;
struct ucred**credanonp;
{
struct netcred *np;
struct layer_mount *lmp = MOUNTTOLAYERMOUNT(mp);
/*
* get the export permission structure for this <mp, client> tuple.
*/
if ((np = vfs_export_lookup(mp, &lmp->layerm_export, nam)) == NULL)
return (EACCES);
*exflagsp = np->netc_exflags;
*credanonp = &np->netc_anon;
return (0);
}
int
layerfs_vptofh(vp, fhp)
struct vnode *vp;
struct fid *fhp;
{
return (VFS_VPTOFH(LAYERVPTOLOWERVP(vp), fhp));
}
int
layerfs_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
int *name;
u_int namelen;
void *oldp;
size_t *oldlenp;
void *newp;
size_t newlen;
struct proc *p;
{
return (EOPNOTSUPP);
}

View File

@ -0,0 +1,880 @@
/* $NetBSD: layer_vnops.c,v 1.1 1999/07/08 01:19:01 wrstuden Exp $ */
/*
* Copyright (c) 1999 National Aeronautics & Space Administration
* All rights reserved.
*
* This software was written by William Studenmund of the
* Numerical Aerospace Similation Facility, NASA Ames Research Center.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the the name of the National Aeronautics & Space Administration
* nor the names of its contributors may be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE NATIONAL AERONAUTICS & SPACE ADMINISTRATION
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE ADMINISTRATION OR CONTRIB-
* UTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Copyright (c) 1992, 1993
* The Regents of the University of California. All rights reserved.
*
* This code is derived from software contributed to Berkeley by
* John Heidemann of the UCLA Ficus project.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the University of
* California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)null_vnops.c 8.6 (Berkeley) 5/27/95
*
* Ancestors:
* @(#)lofs_vnops.c 1.2 (Berkeley) 6/18/92
* $Id: layer_vnops.c,v 1.1 1999/07/08 01:19:01 wrstuden Exp $
* ...and...
* @(#)null_vnodeops.c 1.20 92/07/07 UCLA Ficus project
*/
/*
* Null Layer vnode routines.
*
* (See mount_null(8) for more information.)
*
* The layer.h, layer_extern.h, layer_vfs.c, and layer_vnops.c files provide
* the core implimentation of the null file system and most other stacked
* fs's. The description below refers to the null file system, but the
* services provided by the layer* files are useful for all layered fs's.
*
* The null layer duplicates a portion of the file system
* name space under a new name. In this respect, it is
* similar to the loopback file system. It differs from
* the loopback fs in two respects: it is implemented using
* a stackable layers techniques, and it's "null-node"s stack above
* all lower-layer vnodes, not just over directory vnodes.
*
* The null layer has two purposes. First, it serves as a demonstration
* of layering by proving a layer which does nothing. (It actually
* does everything the loopback file system does, which is slightly
* more than nothing.) Second, the null layer can serve as a prototype
* layer. Since it provides all necessary layer framework,
* new file system layers can be created very easily be starting
* with a null layer.
*
* The remainder of the man page examines the null layer as a basis
* for constructing new layers.
*
*
* INSTANTIATING NEW NULL LAYERS
*
* New null layers are created with mount_null(8).
* Mount_null(8) takes two arguments, the pathname
* of the lower vfs (target-pn) and the pathname where the null
* layer will appear in the namespace (alias-pn). After
* the null layer is put into place, the contents
* of target-pn subtree will be aliased under alias-pn.
*
* It is conceivable that other overlay filesystems will take different
* parameters. For instance, data migration or access controll layers might
* only take one pathname which will serve both as the target-pn and
* alias-pn described above.
*
*
* OPERATION OF A NULL LAYER
*
* The null layer is the minimum file system layer,
* simply bypassing all possible operations to the lower layer
* for processing there. The majority of its activity centers
* on the bypass routine, though which nearly all vnode operations
* pass.
*
* The bypass routine accepts arbitrary vnode operations for
* handling by the lower layer. It begins by examing vnode
* operation arguments and replacing any layered nodes by their
* lower-layer equivlants. It then invokes the operation
* on the lower layer. Finally, it replaces the layered nodes
* in the arguments and, if a vnode is return by the operation,
* stacks a layered node on top of the returned vnode.
*
* The bypass routine in this file, layer_bypass(), is suitable for use
* by many different layered filesystems. It can be used by multiple
* filesystems simultaneously. Alternatively, a layered fs may provide
* its own bypass routine, in which case layer_bypass() should be used as
* a model. For instance, the main functionality provided by umapfs, the user
* identity mapping file system, is handled by a custom bypass routine.
*
* Typically a layered fs registers its selected bypass routine as the
* default vnode operation in its vnodeopv_entry_desc table. Additionally
* the filesystem must store the bypass entry point in the layerm_bypass
* field of struct layer_mount. All other layer routines in this file will
* use the layerm_bypass routine.
*
* Although the bypass routine handles most operations outright, a number
* of operations are special cased, and handled by the layered fs. One
* group, layer_setattr, layer_getattr, layer_access, layer_open, and
* layer_fsync, perform layer-specific manipulation in addition to calling
* the bypass routine. The other group
* Although bypass handles most operations, vop_getattr, vop_lock,
* vop_unlock, vop_inactive, vop_reclaim, and vop_print are not
* bypassed. Vop_getattr must change the fsid being returned.
* Vop_lock and vop_unlock must handle any locking for the
* current vnode as well as pass the lock request down.
* Vop_inactive and vop_reclaim are not bypassed so that
* they can handle freeing null-layer specific data. Vop_print
* is not bypassed to avoid excessive debugging information.
* Also, certain vnode operations change the locking state within
* the operation (create, mknod, remove, link, rename, mkdir, rmdir,
* and symlink). Ideally these operations should not change the
* lock state, but should be changed to let the caller of the
* function unlock them. Otherwise all intermediate vnode layers
* (such as union, umapfs, etc) must catch these functions to do
* the necessary locking at their layer.
*
*
* INSTANTIATING VNODE STACKS
*
* Mounting associates the null layer with a lower layer,
* effect stacking two VFSes. Vnode stacks are instead
* created on demand as files are accessed.
*
* The initial mount creates a single vnode stack for the
* root of the new null layer. All other vnode stacks
* are created as a result of vnode operations on
* this or other null vnode stacks.
*
* New vnode stacks come into existance as a result of
* an operation which returns a vnode.
* The bypass routine stacks a null-node above the new
* vnode before returning it to the caller.
*
* For example, imagine mounting a null layer with
* "mount_null /usr/include /dev/layer/null".
* Changing directory to /dev/layer/null will assign
* the root null-node (which was created when the null layer was mounted).
* Now consider opening "sys". A vop_lookup would be
* done on the root null-node. This operation would bypass through
* to the lower layer which would return a vnode representing
* the UFS "sys". layer_bypass then builds a null-node
* aliasing the UFS "sys" and returns this to the caller.
* Later operations on the null-node "sys" will repeat this
* process when constructing other vnode stacks.
*
*
* CREATING OTHER FILE SYSTEM LAYERS
*
* One of the easiest ways to construct new file system layers is to make
* a copy of the null layer, rename all files and variables, and
* then begin modifing the copy. Sed can be used to easily rename
* all variables.
*
* The umap layer is an example of a layer descended from the
* null layer.
*
*
* INVOKING OPERATIONS ON LOWER LAYERS
*
* There are two techniques to invoke operations on a lower layer
* when the operation cannot be completely bypassed. Each method
* is appropriate in different situations. In both cases,
* it is the responsibility of the aliasing layer to make
* the operation arguments "correct" for the lower layer
* by mapping an vnode arguments to the lower layer.
*
* The first approach is to call the aliasing layer's bypass routine.
* This method is most suitable when you wish to invoke the operation
* currently being hanldled on the lower layer. It has the advantage
* that the bypass routine already must do argument mapping.
* An example of this is null_getattrs in the null layer.
*
* A second approach is to directly invoked vnode operations on
* the lower layer with the VOP_OPERATIONNAME interface.
* The advantage of this method is that it is easy to invoke
* arbitrary operations on the lower layer. The disadvantage
* is that vnodes arguments must be manualy mapped.
*
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/proc.h>
#include <sys/time.h>
#include <sys/types.h>
#include <sys/vnode.h>
#include <sys/mount.h>
#include <sys/namei.h>
#include <sys/malloc.h>
#include <sys/buf.h>
#include <miscfs/genfs/layer.h>
#include <miscfs/genfs/layer_extern.h>
#include <miscfs/genfs/genfs.h>
/*
* This is the 08-June-99 bypass routine, based on the 10-Apr-92 bypass
* routine by John Heidemann.
* The new element for this version is that the whole nullfs
* system gained the concept of locks on the lower node, and locks on
* our nodes. When returning from a call to the lower layer, we may
* need to update lock state ONLY on our layer. The LAYERFS_UPPER*LOCK()
* macros provide this functionality.
* The 10-Apr-92 version was optimized for speed, throwing away some
* safety checks. It should still always work, but it's not as
* robust to programmer errors.
* Define SAFETY to include some error checking code.
*
* In general, we map all vnodes going down and unmap them on the way back.
*
* Also, some BSD vnode operations have the side effect of vrele'ing
* their arguments. With stacking, the reference counts are held
* by the upper node, not the lower one, so we must handle these
* side-effects here. This is not of concern in Sun-derived systems
* since there are no such side-effects.
*
* New for the 08-June-99 version: we also handle operations which unlock
* the passed-in node (typically they vput the node).
*
* This makes the following assumptions:
* - only one returned vpp
* - no INOUT vpp's (Sun's vop_open has one of these)
* - the vnode operation vector of the first vnode should be used
* to determine what implementation of the op should be invoked
* - all mapped vnodes are of our vnode-type (NEEDSWORK:
* problems on rmdir'ing mount points and renaming?)
*/
int
layer_bypass(v)
void *v;
{
struct vop_generic_args /* {
struct vnodeop_desc *a_desc;
<other random data follows, presumably>
} */ *ap = v;
int (**our_vnodeop_p) __P((void *));
register struct vnode **this_vp_p;
int error, error1;
struct vnode *old_vps[VDESC_MAX_VPS], *vp0;
struct vnode **vps_p[VDESC_MAX_VPS];
struct vnode ***vppp;
struct vnodeop_desc *descp = ap->a_desc;
int reles, i, flags;
#ifdef SAFETY
/*
* We require at least one vp.
*/
if (descp->vdesc_vp_offsets == NULL ||
descp->vdesc_vp_offsets[0] == VDESC_NO_OFFSET)
panic ("layer_bypass: no vp's in map.\n");
#endif
vps_p[0] = VOPARG_OFFSETTO(struct vnode**,descp->vdesc_vp_offsets[0],ap);
vp0 = *vps_p[0];
flags = MOUNTTOLAYERMOUNT(vp0->v_mount)->layerm_flags;
our_vnodeop_p = vp0->v_op;
if (flags & LAYERFS_MBYPASSDEBUG)
printf ("layer_bypass: %s\n", descp->vdesc_name);
/*
* Map the vnodes going in.
* Later, we'll invoke the operation based on
* the first mapped vnode's operation vector.
*/
reles = descp->vdesc_flags;
for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) {
if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
break; /* bail out at end of list */
vps_p[i] = this_vp_p =
VOPARG_OFFSETTO(struct vnode**,descp->vdesc_vp_offsets[i],ap);
/*
* We're not guaranteed that any but the first vnode
* are of our type. Check for and don't map any
* that aren't. (We must always map first vp or vclean fails.)
*/
if (i && (*this_vp_p == NULL ||
(*this_vp_p)->v_op != our_vnodeop_p)) {
old_vps[i] = NULL;
} else {
old_vps[i] = *this_vp_p;
*(vps_p[i]) = LAYERVPTOLOWERVP(*this_vp_p);
/*
* XXX - Several operations have the side effect
* of vrele'ing their vp's. We must account for
* that. (This should go away in the future.)
*/
if (reles & VDESC_VP0_WILLRELE)
VREF(*this_vp_p);
}
}
/*
* Call the operation on the lower layer
* with the modified argument structure.
*/
error = VCALL(*vps_p[0], descp->vdesc_offset, ap);
/*
* Maintain the illusion of call-by-value
* by restoring vnodes in the argument structure
* to their original value.
*/
reles = descp->vdesc_flags;
for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) {
if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
break; /* bail out at end of list */
if (old_vps[i]) {
*(vps_p[i]) = old_vps[i];
if (reles & VDESC_VP0_WILLUNLOCK)
LAYERFS_UPPERUNLOCK(*(vps_p[i]), 0, error1);
if (reles & VDESC_VP0_WILLRELE)
vrele(*(vps_p[i]));
}
}
/*
* Map the possible out-going vpp
* (Assumes that the lower layer always returns
* a VREF'ed vpp unless it gets an error.)
*/
if (descp->vdesc_vpp_offset != VDESC_NO_OFFSET &&
!(descp->vdesc_flags & VDESC_NOMAP_VPP) &&
!error) {
/*
* XXX - even though some ops have vpp returned vp's,
* several ops actually vrele this before returning.
* We must avoid these ops.
* (This should go away when these ops are regularized.)
*/
if (descp->vdesc_flags & VDESC_VPP_WILLRELE)
goto out;
vppp = VOPARG_OFFSETTO(struct vnode***,
descp->vdesc_vpp_offset,ap);
/*
* Only vop_lookup, vop_create, vop_makedir, vop_bmap,
* vop_mknod, and vop_symlink return vpp's. The latter
* two are VPP_WILLRELE, so we won't get here, and vop_bmap
* doesn't call bypass as the lower vpp is fine (we're just
* going to do i/o on it). vop_loookup doesn't call bypass
* as a lookup on "." would generate a locking error.
* So all the calls which get us here have a locked vpp. :-)
*/
error = layer_node_create(old_vps[0]->v_mount, **vppp, *vppp);
}
out:
return (error);
}
/*
* We have to carry on the locking protocol on the layer vnodes
* as we progress through the tree. We also have to enforce read-only
* if this layer is mounted read-only.
*/
int
layer_lookup(v)
void *v;
{
struct vop_lookup_args /* {
struct vnodeop_desc *a_desc;
struct vnode * a_dvp;
struct vnode ** a_vpp;
struct componentname * a_cnp;
} */ *ap = v;
struct componentname *cnp = ap->a_cnp;
int flags = cnp->cn_flags;
struct vnode *dvp, *vp, *ldvp;
int error, r;
dvp = ap->a_dvp;
if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
(cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
return (EROFS);
ldvp = LAYERVPTOLOWERVP(dvp);
ap->a_dvp = ldvp;
error = VCALL(ldvp, ap->a_desc->vdesc_offset, ap);
vp = *ap->a_vpp;
if (error == EJUSTRETURN && (flags & ISLASTCN) &&
(dvp->v_mount->mnt_flag & MNT_RDONLY) &&
(cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME))
error = EROFS;
/*
* We must do the same locking and unlocking at this layer as
* is done in the layers below us. It used to be we would try
* to guess based on what was set with the flags and error codes.
*
* But that doesn't work. So now we have the underlying VOP_LOOKUP
* tell us if it released the parent vnode, and we adjust the
* upper node accordingly. We can't just look at the lock states
* of the lower nodes as someone else might have come along and
* locked the parent node after our call to VOP_LOOKUP locked it.
*/
if ((cnp->cn_flags & PDIRUNLOCK)) {
LAYERFS_UPPERUNLOCK(dvp, 0, r);
}
if (ldvp == vp) {
/*
* Did lookup on "." or ".." in the root node of a mount point.
* So we return dvp after a VREF.
*/
*ap->a_vpp = dvp;
VREF(dvp);
vrele(vp);
} else if (vp != NULL) {
error = layer_node_create(dvp->v_mount, vp, ap->a_vpp);
}
return (error);
}
/*
* Setattr call. Disallow write attempts if the layer is mounted read-only.
*/
int
layer_setattr(v)
void *v;
{
struct vop_setattr_args /* {
struct vnodeop_desc *a_desc;
struct vnode *a_vp;
struct vattr *a_vap;
struct ucred *a_cred;
struct proc *a_p;
} */ *ap = v;
struct vnode *vp = ap->a_vp;
struct vattr *vap = ap->a_vap;
if ((vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL ||
vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL ||
vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) &&
(vp->v_mount->mnt_flag & MNT_RDONLY))
return (EROFS);
if (vap->va_size != VNOVAL) {
switch (vp->v_type) {
case VDIR:
return (EISDIR);
case VCHR:
case VBLK:
case VSOCK:
case VFIFO:
return (0);
case VREG:
case VLNK:
default:
/*
* Disallow write attempts if the filesystem is
* mounted read-only.
*/
if (vp->v_mount->mnt_flag & MNT_RDONLY)
return (EROFS);
}
}
return (LAYERFS_DO_BYPASS(vp, ap));
}
/*
* We handle getattr only to change the fsid.
*/
int
layer_getattr(v)
void *v;
{
struct vop_getattr_args /* {
struct vnode *a_vp;
struct vattr *a_vap;
struct ucred *a_cred;
struct proc *a_p;
} */ *ap = v;
struct vnode *vp = ap->a_vp;
int error;
if ((error = LAYERFS_DO_BYPASS(vp, ap)) != 0)
return (error);
/* Requires that arguments be restored. */
ap->a_vap->va_fsid = vp->v_mount->mnt_stat.f_fsid.val[0];
return (0);
}
int
layer_access(v)
void *v;
{
struct vop_access_args /* {
struct vnode *a_vp;
int a_mode;
struct ucred *a_cred;
struct proc *a_p;
} */ *ap = v;
struct vnode *vp = ap->a_vp;
mode_t mode = ap->a_mode;
/*
* Disallow write attempts on read-only layers;
* unless the file is a socket, fifo, or a block or
* character device resident on the file system.
*/
if (mode & VWRITE) {
switch (vp->v_type) {
case VDIR:
case VLNK:
case VREG:
if (vp->v_mount->mnt_flag & MNT_RDONLY)
return (EROFS);
break;
default:
break;
}
}
return (LAYERFS_DO_BYPASS(vp, ap));
}
/*
* We must handle open to be able to catch MNT_NODEV and friends.
*/
int
layer_open(v)
void *v;
{
struct vop_open_args *ap = v;
struct vnode *vp = ap->a_vp;
enum vtype lower_type = LAYERVPTOLOWERVP(vp)->v_type;
if (((lower_type == VBLK) || (lower_type == VCHR)) &&
(vp->v_mount->mnt_flag & MNT_NODEV))
return ENXIO;
return LAYERFS_DO_BYPASS(vp, ap);
}
/*
* We need to process our own vnode lock and then clear the
* interlock flag as it applies only to our vnode, not the
* vnodes below us on the stack.
*/
int
layer_lock(v)
void *v;
{
struct vop_lock_args /* {
struct vnode *a_vp;
int a_flags;
struct proc *a_p;
} */ *ap = v;
struct vnode *vp = ap->a_vp, *lowervp;
int flags = ap->a_flags, error;
if (vp->v_vnlock != NULL) {
/*
* The lower level has exported a struct lock to us. Use
* it so that all vnodes in the stack lock and unlock
* simultaneously. Note: we don't DRAIN the lock as DRAIN
* decommissions the lock - just because our vnode is
* going away doesn't mean the struct lock below us is.
* LK_EXCLUSIVE is fine.
*/
if ((flags & LK_TYPE_MASK) == LK_DRAIN) {
return(lockmgr(vp->v_vnlock,
(flags & ~LK_TYPE_MASK) | LK_EXCLUSIVE,
&vp->v_interlock));
} else
return(lockmgr(vp->v_vnlock, flags, &vp->v_interlock));
} else {
/*
* Ahh well. It would be nice if the fs we're over would
* export a struct lock for us to use, but it doesn't.
*
* To prevent race conditions involving doing a lookup
* on "..", we have to lock the lower node, then lock our
* node. Most of the time it won't matter that we lock our
* node (as any locking would need the lower one locked
* first). But we can LK_DRAIN the upper lock as a step
* towards decomissioning it.
*/
lowervp = LAYERVPTOLOWERVP(vp);
if (flags & LK_INTERLOCK) {
simple_unlock(&vp->v_interlock);
flags &= ~LK_INTERLOCK;
}
if ((flags & LK_TYPE_MASK) == LK_DRAIN) {
error = VOP_LOCK(lowervp,
(flags & ~LK_TYPE_MASK) | LK_EXCLUSIVE);
} else
error = VOP_LOCK(lowervp, flags);
if (error)
return (error);
if ((error = lockmgr(&vp->v_lock, flags, &vp->v_interlock))) {
VOP_UNLOCK(lowervp, 0);
}
return (error);
}
}
/*
*/
int
layer_unlock(v)
void *v;
{
struct vop_unlock_args /* {
struct vnode *a_vp;
int a_flags;
struct proc *a_p;
} */ *ap = v;
struct vnode *vp = ap->a_vp;
int flags = ap->a_flags;
if (vp->v_vnlock != NULL) {
return (lockmgr(vp->v_vnlock, ap->a_flags | LK_RELEASE,
&vp->v_interlock));
} else {
if (flags & LK_INTERLOCK) {
simple_unlock(&vp->v_interlock);
flags &= ~LK_INTERLOCK;
}
VOP_UNLOCK(LAYERVPTOLOWERVP(vp), flags);
return (lockmgr(&vp->v_lock, ap->a_flags | LK_RELEASE,
&vp->v_interlock));
}
}
/*
* As long as genfs_nolock is in use, don't call VOP_ISLOCKED(lowervp)
* if vp->v_vnlock == NULL as genfs_noislocked will always report 0.
*/
int
layer_islocked(v)
void *v;
{
struct vop_islocked_args /* {
struct vnode *a_vp;
} */ *ap = v;
struct vnode *vp = ap->a_vp;
if (vp->v_vnlock != NULL)
return (lockstatus(vp->v_vnlock));
else
return (lockstatus(&vp->v_lock));
}
/*
* If vinvalbuf is calling us, it's a "shallow fsync" -- don't bother
* syncing the underlying vnodes, since they'll be fsync'ed when
* reclaimed; otherwise,
* pass it through to the underlying layer.
*
* XXX Do we still need to worry about shallow fsync?
*/
int
layer_fsync(v)
void *v;
{
struct vop_fsync_args /* {
struct vnode *a_vp;
struct ucred *a_cred;
int a_flags;
struct proc *a_p;
} */ *ap = v;
if (ap->a_flags & FSYNC_RECLAIM) {
return 0;
}
return (LAYERFS_DO_BYPASS(ap->a_vp, ap));
}
int
layer_inactive(v)
void *v;
{
struct vop_inactive_args /* {
struct vnode *a_vp;
struct proc *a_p;
} */ *ap = v;
/*
* Do nothing (and _don't_ bypass).
* Wait to vrele lowervp until reclaim,
* so that until then our layer_node is in the
* cache and reusable.
*
* NEEDSWORK: Someday, consider inactive'ing
* the lowervp and then trying to reactivate it
* with capabilities (v_id)
* like they do in the name lookup cache code.
* That's too much work for now.
*/
VOP_UNLOCK(ap->a_vp, 0);
return (0);
}
int
layer_reclaim(v)
void *v;
{
struct vop_reclaim_args /* {
struct vnode *a_vp;
struct proc *a_p;
} */ *ap = v;
struct vnode *vp = ap->a_vp;
struct layer_mount *lmp = MOUNTTOLAYERMOUNT(vp->v_mount);
struct layer_node *xp = VTOLAYER(vp);
struct vnode *lowervp = xp->layer_lowervp;
/*
* Note: in vop_reclaim, the node's struct lock has been
* decomissioned, so we have to be careful about calling
* VOP's on ourself. Even if we turned a LK_DRAIN into an
* LK_EXCLUSIVE in layer_lock, we still must be careful as VXLOCK is
* set.
*/
/* After this assignment, this node will not be re-used. */
if ((vp == lmp->layerm_rootvp)) {
/*
* Oops! We no longer have a root node. Most likely reason is
* that someone forcably unmunted the underlying fs.
*
* Now getting the root vnode will fail. We're dead. :-(
*/
lmp->layerm_rootvp = NULL;
}
xp->layer_lowervp = NULL;
simple_lock(&lmp->layerm_hashlock);
LIST_REMOVE(xp, layer_hash);
simple_unlock(&lmp->layerm_hashlock);
FREE(vp->v_data, M_TEMP);
vp->v_data = NULL;
vrele (lowervp);
return (0);
}
/*
* We just feed the returned vnode up to the caller - there's no need
* to build a layer node on top of the node on which we're going to do
* i/o. :-)
*/
int
layer_bmap(v)
void *v;
{
struct vop_bmap_args /* {
struct vnode *a_vp;
daddr_t a_bn;
struct vnode **a_vpp;
daddr_t *a_bnp;
int *a_runp;
} */ *ap = v;
struct vnode *vp;
ap->a_vp = vp = LAYERVPTOLOWERVP(ap->a_vp);
return (VCALL(vp, ap->a_desc->vdesc_offset, ap));
}
int
layer_print(v)
void *v;
{
struct vop_print_args /* {
struct vnode *a_vp;
} */ *ap = v;
register struct vnode *vp = ap->a_vp;
printf ("\ttag VT_LAYERFS, vp=%p, lowervp=%p\n", vp, LAYERVPTOLOWERVP(vp));
return (0);
}
/*
* XXX - vop_strategy must be hand coded because it has no
* vnode in its arguments.
* This goes away with a merged VM/buffer cache.
*/
int
layer_strategy(v)
void *v;
{
struct vop_strategy_args /* {
struct buf *a_bp;
} */ *ap = v;
struct buf *bp = ap->a_bp;
int error;
struct vnode *savedvp;
savedvp = bp->b_vp;
bp->b_vp = LAYERVPTOLOWERVP(bp->b_vp);
error = VOP_STRATEGY(bp);
bp->b_vp = savedvp;
return (error);
}
/*
* XXX - like vop_strategy, vop_bwrite must be hand coded because it has no
* vnode in its arguments.
* This goes away with a merged VM/buffer cache.
*/
int
layer_bwrite(v)
void *v;
{
struct vop_bwrite_args /* {
struct buf *a_bp;
} */ *ap = v;
struct buf *bp = ap->a_bp;
int error;
struct vnode *savedvp;
savedvp = bp->b_vp;
bp->b_vp = LAYERVPTOLOWERVP(bp->b_vp);
error = VOP_BWRITE(bp);
bp->b_vp = savedvp;
return (error);
}

View File

@ -1,4 +1,37 @@
/* $NetBSD: null.h,v 1.9 1997/10/06 09:32:31 thorpej Exp $ */
/* $NetBSD: null.h,v 1.10 1999/07/08 01:19:03 wrstuden Exp $ */
/*
* Copyright (c) 1999 National Aeronautics & Space Administration
* All rights reserved.
*
* This software was written by William Studnemund of the
* Numerical Aerospace Similation Facility, NASA Ames Research Center.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the the name of the National Aeronautics & Space Administration
* nor the names of its contributors may be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE NATIONAL AERONAUTICS & SPACE ADMINISTRATION
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE ADMINISTRATION OR CONTRIB-
* UTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Copyright (c) 1992, 1993
@ -39,50 +72,50 @@
* @(#)null.h 8.2 (Berkeley) 1/21/94
*/
struct null_args {
char *target; /* Target of loopback */
};
#include <miscfs/genfs/layer.h>
struct null_mount {
struct mount *nullm_vfs;
struct vnode *nullm_rootvp; /* Reference to root null_node */
struct null_args {
struct layer_args la; /* generic layerfs args */
};
#define nulla_target la.target
#define nulla_export la.export
#ifdef _KERNEL
struct null_mount {
struct layer_mount lm; /* generic layerfs mount stuff */
};
#define nullm_vfs lm.layerm_vfs
#define nullm_rootvp lm.layerm_rootvp
#define nullm_export lm.layerm_export
#define nullm_flags lm.layerm_flags
#define nullm_size lm.layerm_size
#define nullm_tag lm.layerm_tag
#define nullm_bypass lm.layerm_bypass
#define nullm_alloc lm.layerm_alloc
#define nullm_vnodeop_p lm.layerm_vnodeop_p
#define nullm_node_hashtbl lm.layerm_node_hashtbl
#define nullm_node_hash lm.layerm_node_hash
#define nullm_hashlock lm.layerm_hashlock
/*
* A cache of vnode references
*/
struct null_node {
LIST_ENTRY(null_node) null_hash; /* Hash list */
struct vnode *null_lowervp; /* VREFed once */
struct vnode *null_vnode; /* Back pointer */
unsigned int null_flags; /* locking, etc. */
#ifdef DIAGNOSTIC
pid_t null_pid; /* who's locking it? */
caddr_t null_lockpc; /* their return addr */
caddr_t null_lockpc2; /* their return addr^2 */
#endif
struct layer_node ln;
};
#define null_hash ln.layer_hash
#define null_lowervp ln.layer_lowervp
#define null_vnode ln.layer_vnode
#define null_flags ln.layer_flags
#if defined(__alpha__) || !defined(__GNUC__) || __GNUC__ < 2 || \
(__GNUC__ == 2 && __GNUC_MINOR__ < 5)
#define RETURN_PC(frameno) (void *)0
#else
#define RETURN_PC(frameno) __builtin_return_address(frameno)
#endif
#define NULL_WANTED 0x01
#define NULL_LOCKED 0x02
#define NULL_LLOCK 0x04
extern int null_node_create __P((struct mount *mp, struct vnode *target, struct vnode **vpp, int lockit));
int null_node_create __P((struct mount *mp, struct vnode *target, struct vnode **vpp));
#define MOUNTTONULLMOUNT(mp) ((struct null_mount *)((mp)->mnt_data))
#define VTONULL(vp) ((struct null_node *)(vp)->v_data)
#define NULLTOV(xp) ((xp)->null_vnode)
#ifdef NULLFS_DIAGNOSTIC
extern struct vnode *null_checkvp __P((struct vnode *vp, char *fil, int lno));
#define NULLVPTOLOWERVP(vp) null_checkvp((vp), __FILE__, __LINE__)
extern struct vnode *layer_checkvp __P((struct vnode *vp, char *fil, int lno));
#define NULLVPTOLOWERVP(vp) layer_checkvp((vp), __FILE__, __LINE__)
#else
#define NULLVPTOLOWERVP(vp) (VTONULL(vp)->null_lowervp)
#endif

View File

@ -1,369 +0,0 @@
/* $NetBSD: null_subr.c,v 1.14 1999/04/09 17:27:38 wrstuden Exp $ */
/*
* Copyright (c) 1992, 1993
* The Regents of the University of California. All rights reserved.
*
* This code is derived from software donated to Berkeley by
* Jan-Simon Pendry.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the University of
* California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* from: Id: lofs_subr.c,v 1.11 1992/05/30 10:05:43 jsp Exp
* @(#)null_subr.c 8.7 (Berkeley) 5/14/95
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/proc.h>
#include <sys/time.h>
#include <sys/types.h>
#include <sys/vnode.h>
#include <sys/mount.h>
#include <sys/namei.h>
#include <sys/malloc.h>
#include <miscfs/specfs/specdev.h>
#include <miscfs/nullfs/null.h>
#define LOG2_SIZEVNODE 7 /* log2(sizeof struct vnode) */
#define NNULLNODECACHE 16
/*
* Null layer cache:
* Each cache entry holds a reference to the lower vnode
* along with a pointer to the alias vnode. When an
* entry is added the lower vnode is VREF'd. When the
* alias is removed the lower vnode is vrele'd.
*/
#define NULL_NHASH(vp) \
(&null_node_hashtbl[(((u_long)vp)>>LOG2_SIZEVNODE) & null_node_hash])
LIST_HEAD(null_node_hashhead, null_node) *null_node_hashtbl;
u_long null_node_hash;
void nullfs_init __P((void));
static struct vnode *
null_node_find __P((struct mount *, struct vnode *));
static int
null_node_alloc __P((struct mount *, struct vnode *, struct vnode **));
/*
* Initialise cache headers
*/
void
nullfs_init()
{
#ifdef NULLFS_DIAGNOSTIC
printf("nullfs_init\n"); /* printed during system boot */
#endif
null_node_hashtbl = hashinit(NNULLNODECACHE, M_CACHE, M_WAITOK, &null_node_hash);
}
/*
* Return a VREF'ed alias for lower vnode if already exists, else 0.
*/
static struct vnode *
null_node_find(mp, lowervp)
struct mount *mp;
struct vnode *lowervp;
{
struct null_node_hashhead *hd;
struct null_node *a;
struct vnode *vp;
/*
* Find hash base, and then search the (two-way) linked
* list looking for a null_node structure which is referencing
* the lower vnode. If found, the increment the null_node
* reference count (but NOT the lower vnode's VREF counter).
*/
hd = NULL_NHASH(lowervp);
loop:
for (a = hd->lh_first; a != 0; a = a->null_hash.le_next) {
if (a->null_lowervp == lowervp && NULLTOV(a)->v_mount == mp) {
vp = NULLTOV(a);
/*
* We need vget for the VXLOCK
* stuff, but we don't want to lock
* the lower node.
*/
if (vget(vp, 0)) {
printf ("null_node_find: vget failed.\n");
goto loop;
};
return (vp);
}
}
return NULL;
}
/*
* Make a new null_node node.
* Vp is the alias vnode, lowervp is the lower vnode.
* Maintain a reference to lowervp.
*/
static int
null_node_alloc(mp, lowervp, vpp)
struct mount *mp;
struct vnode *lowervp;
struct vnode **vpp;
{
struct null_node_hashhead *hd;
struct null_node *xp;
struct vnode *vp, *nvp;
int error;
extern int (**dead_vnodeop_p) __P((void *));
if ((error = getnewvnode(VT_NULL, mp, null_vnodeop_p, &vp)) != 0)
return (error);
vp->v_type = lowervp->v_type;
MALLOC(xp, struct null_node *, sizeof(struct null_node), M_TEMP,
M_WAITOK);
if (vp->v_type == VBLK || vp->v_type == VCHR) {
MALLOC(vp->v_specinfo, struct specinfo *,
sizeof(struct specinfo), M_VNODE, M_WAITOK);
vp->v_rdev = lowervp->v_rdev;
}
vp->v_data = xp;
xp->null_vnode = vp;
xp->null_lowervp = lowervp;
xp->null_flags = 0;
#ifdef DIAGNOSTIC
xp->null_pid = -1;
xp->null_lockpc = xp->null_lockpc2 = 0;
#endif
/*
* Before we insert our new node onto the hash chains,
* check to see if someone else has beaten us to it.
* (We could have slept in MALLOC.)
*/
if ((nvp = null_node_find(mp, lowervp)) != NULL) {
*vpp = nvp;
/* free the substructures we've allocated. */
FREE(xp, M_TEMP);
if (vp->v_type == VBLK || vp->v_type == VCHR)
FREE(vp->v_specinfo, M_VNODE);
vp->v_type = VBAD; /* node is discarded */
vp->v_op = dead_vnodeop_p; /* so ops will still work */
vrele(vp); /* get rid of it. */
return (0);
}
/*
* XXX if it's a device node, it needs to be checkalias()ed.
* however, for locking reasons, that's just not possible.
* so we have to do most of the dirty work inline. Note that
* this is a limited case; we know that there's going to be
* an alias, and we know that that alias will be a "real"
* device node, i.e. not tagged VT_NON.
*/
if (vp->v_type == VBLK || vp->v_type == VCHR) {
struct vnode *cvp, **cvpp;
cvpp = &speclisth[SPECHASH(vp->v_rdev)];
loop:
for (cvp = *cvpp; cvp; cvp = cvp->v_specnext) {
if (vp->v_rdev != cvp->v_rdev ||
vp->v_type != cvp->v_type)
continue;
/*
* Alias, but not in use, so flush it out.
*/
if (cvp->v_usecount == 0) {
vgone(cvp);
goto loop;
}
if (vget(cvp, 0))
/* can't lock; will die! */
goto loop;
break;
}
vp->v_hashchain = cvpp;
vp->v_specnext = *cvpp;
vp->v_specflags = 0;
*cvpp = vp;
#ifdef DIAGNOSTIC
if (cvp == NULLVP)
panic("null_node_alloc: no alias for device");
#endif
vp->v_flag |= VALIASED;
cvp->v_flag |= VALIASED;
vrele(cvp);
}
/* XXX end of transmogrified checkalias() */
*vpp = vp;
VREF(lowervp); /* Extra VREF will be vrele'd in null_node_create */
hd = NULL_NHASH(lowervp);
LIST_INSERT_HEAD(hd, xp, null_hash);
return (0);
}
/*
* Try to find an existing null_node vnode refering
* to it, otherwise make a new null_node vnode which
* contains a reference to the lower vnode.
*
* >>> we assume that the lower node is already locked upon entry, so we mark
* the upper node as locked too (if caller requests it). <<<
*/
int
null_node_create(mp, lowervp, newvpp, takelock)
struct mount *mp;
struct vnode *lowervp;
struct vnode **newvpp;
int takelock;
{
struct vnode *aliasvp;
int locked = 0;
if ((aliasvp = null_node_find(mp, lowervp)) != NULL) {
/*
* null_node_find has taken another reference
* to the alias vnode.
*/
#ifdef NULLFS_DIAGNOSTIC
vprint("null_node_create: exists", aliasvp);
#endif
/* VREF(aliasvp); --- done in null_node_find */
} else {
int error;
/*
* Get new vnode.
*/
#ifdef NULLFS_DIAGNOSTIC
printf("null_node_create: create new alias vnode\n");
#endif
/*
* Make new vnode reference the null_node.
*/
if ((error = null_node_alloc(mp, lowervp, &aliasvp)) != 0)
return error;
/*
* aliasvp is already VREF'd by getnewvnode()
*/
locked = 1;
}
vrele(lowervp);
#ifdef DIAGNOSTIC
if (lowervp->v_usecount < 1) {
/* Should never happen... */
vprint("null_node_create: alias", aliasvp);
vprint("null_node_create: lower", lowervp);
panic("null_node_create: lower has 0 usecount.");
};
#endif
#ifdef NULLFS_DIAGNOSTIC
vprint("null_node_create: alias", aliasvp);
#endif
/* lower node was locked: mark it as locked and take
upper layer lock */
VTONULL(aliasvp)->null_flags |= NULL_LLOCK;
if (takelock) {
if (!locked)
vn_lock(aliasvp, LK_EXCLUSIVE | LK_RETRY);
#ifdef NULLFS_DIAGNOSTIC
else
printf ("null_node_create: already locked\n");
#endif
}
*newvpp = aliasvp;
return (0);
}
#ifdef NULLFS_DIAGNOSTIC
struct vnode *
null_checkvp(vp, fil, lno)
struct vnode *vp;
char *fil;
int lno;
{
struct null_node *a = VTONULL(vp);
#ifdef notyet
/*
* Can't do this check because vop_reclaim runs
* with a funny vop vector.
*/
if (vp->v_op != null_vnodeop_p) {
printf ("null_checkvp: on non-null-node\n");
#ifdef notyet
while (null_checkvp_barrier) /*WAIT*/ ;
#endif
panic("null_checkvp");
};
#endif
if (a->null_lowervp == NULL) {
/* Should never happen */
int i; u_long *p;
printf("vp = %p, ZERO ptr\n", vp);
for (p = (u_long *) a, i = 0; i < 8; i++)
printf(" %lx", p[i]);
printf("\n");
/* wait for debugger */
#ifdef notyet
while (null_checkvp_barrier) /*WAIT*/ ;
#endif
panic("null_checkvp");
}
if (a->null_lowervp->v_usecount < 1) {
int i; u_long *p;
printf("vp = %p, unref'ed lowervp\n", vp);
for (p = (u_long *) a, i = 0; i < 8; i++)
printf(" %lx", p[i]);
printf("\n");
/* wait for debugger */
#ifdef notyet
while (null_checkvp_barrier) /*WAIT*/ ;
#endif
panic ("null with unref'ed lowervp");
};
#ifdef notyet
printf("null %p/%d -> %p/%d [%s, %d]\n",
NULLTOV(a), NULLTOV(a)->v_usecount,
a->null_lowervp, a->null_lowervp->v_usecount,
fil, lno);
#endif
return a->null_lowervp;
}
#endif

View File

@ -1,5 +1,37 @@
/* $NetBSD: null_vfsops.c,v 1.23 1999/02/26 23:44:45 wrstuden Exp $ */
/* $NetBSD: null_vfsops.c,v 1.24 1999/07/08 01:19:05 wrstuden Exp $ */
/*
* Copyright (c) 1999 National Aeronautics & Space Administration
* All rights reserved.
*
* This software was written by William Studenmund of the
* Numerical Aerospace Similation Facility, NASA Ames Research Center.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the the name of the National Aeronautics & Space Administration
* nor the names of its contributors may be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE NATIONAL AERONAUTICS & SPACE ADMINISTRATION
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE ADMINISTRATION OR CONTRIB-
* UTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Copyright (c) 1992, 1993, 1995
* The Regents of the University of California. All rights reserved.
@ -55,23 +87,14 @@
#include <sys/namei.h>
#include <sys/malloc.h>
#include <miscfs/nullfs/null.h>
#include <miscfs/genfs/layer_extern.h>
int nullfs_mount __P((struct mount *, const char *, void *,
struct nameidata *, struct proc *));
int nullfs_start __P((struct mount *, int, struct proc *));
int nullfs_unmount __P((struct mount *, int, struct proc *));
int nullfs_root __P((struct mount *, struct vnode **));
int nullfs_quotactl __P((struct mount *, int, uid_t, caddr_t,
struct proc *));
int nullfs_statfs __P((struct mount *, struct statfs *, struct proc *));
int nullfs_sync __P((struct mount *, int, struct ucred *, struct proc *));
int nullfs_vget __P((struct mount *, ino_t, struct vnode **));
int nullfs_fhtovp __P((struct mount *, struct fid *, struct vnode **));
int nullfs_checkexp __P((struct mount *, struct mbuf *, int *,
struct ucred **));
int nullfs_vptofh __P((struct vnode *, struct fid *));
int nullfs_sysctl __P((int *, u_int, void *, size_t *, void *, size_t,
struct proc *));
#define NNULLNODECACHE 16
/*
* Mount null layer
*/
@ -86,22 +109,14 @@ nullfs_mount(mp, path, data, ndp, p)
int error = 0;
struct null_args args;
struct vnode *lowerrootvp, *vp;
struct vnode *nullm_rootvp;
struct null_mount *xmp;
struct null_mount *nmp;
struct layer_mount *lmp;
size_t size;
#ifdef NULLFS_DIAGNOSTIC
printf("nullfs_mount(mp = %p)\n", mp);
#endif
/*
* Update is a no-op
*/
if (mp->mnt_flag & MNT_UPDATE) {
return (EOPNOTSUPP);
/* return VFS_MOUNT(MOUNTTONULLMOUNT(mp)->nullm_vfs, path, data, ndp, p);*/
}
/*
* Get argument
*/
@ -109,11 +124,23 @@ nullfs_mount(mp, path, data, ndp, p)
if (error)
return (error);
/*
* Update only does export updating.
*/
if (mp->mnt_flag & MNT_UPDATE) {
lmp = MOUNTTOLAYERMOUNT(mp);
if (args.nulla_target == 0)
return (vfs_export(mp, &lmp->layerm_export,
&args.la.export));
else
return (EOPNOTSUPP);
}
/*
* Find lower node
*/
NDINIT(ndp, LOOKUP, FOLLOW|WANTPARENT|LOCKLEAF,
UIO_USERSPACE, args.target, p);
UIO_USERSPACE, args.la.target, p);
if ((error = namei(ndp)) != 0)
return (error);
@ -125,29 +152,47 @@ nullfs_mount(mp, path, data, ndp, p)
vrele(ndp->ni_dvp);
ndp->ni_dvp = NULL;
xmp = (struct null_mount *) malloc(sizeof(struct null_mount),
/*
* First cut at fixing up upper mount point
*/
nmp = (struct null_mount *) malloc(sizeof(struct null_mount),
M_UFSMNT, M_WAITOK); /* XXX */
memset((caddr_t)nmp, 0, sizeof(struct null_mount));
mp->mnt_data = (qaddr_t) nmp;
nmp->nullm_vfs = lowerrootvp->v_mount;
if (nmp->nullm_vfs->mnt_flag & MNT_LOCAL)
mp->mnt_flag |= MNT_LOCAL;
/*
* Save reference to underlying FS
* Make sure that the mount point is sufficiently initialized
* that the node create call will work.
*/
xmp->nullm_vfs = lowerrootvp->v_mount;
vfs_getnewfsid(mp, MOUNT_NULL);
nmp->nullm_size = sizeof (struct null_node);
nmp->nullm_tag = VT_NULL;
nmp->nullm_bypass = layer_bypass;
nmp->nullm_alloc = layer_node_alloc; /* the default alloc is fine */
nmp->nullm_vnodeop_p = null_vnodeop_p;
simple_lock_init(&nmp->nullm_hashlock);
nmp->nullm_node_hashtbl = hashinit(NNULLNODECACHE, M_CACHE, M_WAITOK,
&nmp->nullm_node_hash);
/*
* Save reference. Each mount also holds
* a reference on the root vnode.
* Fix up null node for root vnode
*/
error = null_node_create(mp, lowerrootvp, &vp, 1);
error = layer_node_create(mp, lowerrootvp, &vp);
/*
* Make sure the node alias worked
* Make sure the fixup worked
*/
if (error) {
vrele(lowerrootvp);
free(xmp, M_UFSMNT); /* XXX */
vput(lowerrootvp);
free(nmp, M_UFSMNT); /* XXX */
return (error);
}
/*
* Unlock the node (either the lower or the alias)
* Unlock the node
*/
VOP_UNLOCK(vp, 0);
@ -155,17 +200,12 @@ nullfs_mount(mp, path, data, ndp, p)
* Keep a held reference to the root vnode.
* It is vrele'd in nullfs_unmount.
*/
nullm_rootvp = vp;
nullm_rootvp->v_flag |= VROOT;
xmp->nullm_rootvp = nullm_rootvp;
if (NULLVPTOLOWERVP(nullm_rootvp)->v_mount->mnt_flag & MNT_LOCAL)
mp->mnt_flag |= MNT_LOCAL;
mp->mnt_data = (qaddr_t) xmp;
vfs_getnewfsid(mp, MOUNT_NULL);
vp->v_flag |= VROOT;
nmp->nullm_rootvp = vp;
(void) copyinstr(path, mp->mnt_stat.f_mntonname, MNAMELEN - 1, &size);
memset(mp->mnt_stat.f_mntonname + size, 0, MNAMELEN - size);
(void) copyinstr(args.target, mp->mnt_stat.f_mntfromname, MNAMELEN - 1,
(void) copyinstr(args.la.target, mp->mnt_stat.f_mntfromname, MNAMELEN - 1,
&size);
memset(mp->mnt_stat.f_mntfromname + size, 0, MNAMELEN - size);
#ifdef NULLFS_DIAGNOSTIC
@ -175,22 +215,6 @@ nullfs_mount(mp, path, data, ndp, p)
return (0);
}
/*
* VFS start. Nothing needed here - the start routine
* on the underlying filesystem will have been called
* when that filesystem was mounted.
*/
int
nullfs_start(mp, flags, p)
struct mount *mp;
int flags;
struct proc *p;
{
return (0);
/* return VFS_START(MOUNTTONULLMOUNT(mp)->nullm_vfs, flags, p); */
}
/*
* Free reference to null layer
*/
@ -200,7 +224,7 @@ nullfs_unmount(mp, mntflags, p)
int mntflags;
struct proc *p;
{
struct vnode *nullm_rootvp = MOUNTTONULLMOUNT(mp)->nullm_rootvp;
struct vnode *null_rootvp = MOUNTTONULLMOUNT(mp)->nullm_rootvp;
int error;
int flags = 0;
@ -221,22 +245,22 @@ nullfs_unmount(mp, mntflags, p)
if (mntinvalbuf(mp, 1))
return (EBUSY);
#endif
if (nullm_rootvp->v_usecount > 1)
if (null_rootvp->v_usecount > 1)
return (EBUSY);
if ((error = vflush(mp, nullm_rootvp, flags)) != 0)
if ((error = vflush(mp, null_rootvp, flags)) != 0)
return (error);
#ifdef NULLFS_DIAGNOSTIC
vprint("alias root of lower", nullm_rootvp);
vprint("alias root of lower", null_rootvp);
#endif
/*
* Release reference on underlying root vnode
*/
vrele(nullm_rootvp);
vrele(null_rootvp);
/*
* And blow it away for future re-use
*/
vgone(nullm_rootvp);
vgone(null_rootvp);
/*
* Finally, throw away the null_mount structure
*/
@ -245,148 +269,6 @@ nullfs_unmount(mp, mntflags, p)
return 0;
}
int
nullfs_root(mp, vpp)
struct mount *mp;
struct vnode **vpp;
{
struct vnode *vp;
#ifdef NULLFS_DIAGNOSTIC
printf("nullfs_root(mp = %p, vp = %p->%p)\n", mp,
MOUNTTONULLMOUNT(mp)->nullm_rootvp,
NULLVPTOLOWERVP(MOUNTTONULLMOUNT(mp)->nullm_rootvp));
#endif
/*
* Return locked reference to root.
*/
vp = MOUNTTONULLMOUNT(mp)->nullm_rootvp;
VREF(vp);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
*vpp = vp;
return 0;
}
int
nullfs_quotactl(mp, cmd, uid, arg, p)
struct mount *mp;
int cmd;
uid_t uid;
caddr_t arg;
struct proc *p;
{
return VFS_QUOTACTL(MOUNTTONULLMOUNT(mp)->nullm_vfs, cmd, uid, arg, p);
}
int
nullfs_statfs(mp, sbp, p)
struct mount *mp;
struct statfs *sbp;
struct proc *p;
{
int error;
struct statfs mstat;
#ifdef NULLFS_DIAGNOSTIC
printf("nullfs_statfs(mp = %p, vp = %p->%p)\n", mp,
MOUNTTONULLMOUNT(mp)->nullm_rootvp,
NULLVPTOLOWERVP(MOUNTTONULLMOUNT(mp)->nullm_rootvp));
#endif
memset(&mstat, 0, sizeof(mstat));
error = VFS_STATFS(MOUNTTONULLMOUNT(mp)->nullm_vfs, &mstat, p);
if (error)
return (error);
/* now copy across the "interesting" information and fake the rest */
sbp->f_type = mstat.f_type;
sbp->f_flags = mstat.f_flags;
sbp->f_bsize = mstat.f_bsize;
sbp->f_iosize = mstat.f_iosize;
sbp->f_blocks = mstat.f_blocks;
sbp->f_bfree = mstat.f_bfree;
sbp->f_bavail = mstat.f_bavail;
sbp->f_files = mstat.f_files;
sbp->f_ffree = mstat.f_ffree;
if (sbp != &mp->mnt_stat) {
memcpy(&sbp->f_fsid, &mp->mnt_stat.f_fsid, sizeof(sbp->f_fsid));
memcpy(sbp->f_mntonname, mp->mnt_stat.f_mntonname, MNAMELEN);
memcpy(sbp->f_mntfromname, mp->mnt_stat.f_mntfromname, MNAMELEN);
}
strncpy(sbp->f_fstypename, mp->mnt_op->vfs_name, MFSNAMELEN);
return (0);
}
int
nullfs_sync(mp, waitfor, cred, p)
struct mount *mp;
int waitfor;
struct ucred *cred;
struct proc *p;
{
/*
* XXX - Assumes no data cached at null layer.
*/
return (0);
}
int
nullfs_vget(mp, ino, vpp)
struct mount *mp;
ino_t ino;
struct vnode **vpp;
{
return VFS_VGET(MOUNTTONULLMOUNT(mp)->nullm_vfs, ino, vpp);
}
int
nullfs_fhtovp(mp, fidp, vpp)
struct mount *mp;
struct fid *fidp;
struct vnode **vpp;
{
return (EOPNOTSUPP);
}
int
nullfs_checkexp(mp, nam, exflagsp, credanonp)
struct mount *mp;
struct mbuf *nam;
int *exflagsp;
struct ucred**credanonp;
{
return (EOPNOTSUPP);
}
int
nullfs_vptofh(vp, fhp)
struct vnode *vp;
struct fid *fhp;
{
return (EOPNOTSUPP);
}
int
nullfs_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
int *name;
u_int namelen;
void *oldp;
size_t *oldlenp;
void *newp;
size_t newlen;
struct proc *p;
{
return (EOPNOTSUPP);
}
extern struct vnodeopv_desc null_vnodeop_opv_desc;
struct vnodeopv_desc *nullfs_vnodeopv_descs[] = {
@ -397,18 +279,18 @@ struct vnodeopv_desc *nullfs_vnodeopv_descs[] = {
struct vfsops nullfs_vfsops = {
MOUNT_NULL,
nullfs_mount,
nullfs_start,
layerfs_start,
nullfs_unmount,
nullfs_root,
nullfs_quotactl,
nullfs_statfs,
nullfs_sync,
nullfs_vget,
nullfs_fhtovp,
nullfs_vptofh,
nullfs_init,
nullfs_sysctl,
layerfs_root,
layerfs_quotactl,
layerfs_statfs,
layerfs_sync,
layerfs_vget,
layerfs_fhtovp,
layerfs_vptofh,
layerfs_init,
layerfs_sysctl,
NULL, /* vfs_mountroot */
nullfs_checkexp,
layerfs_checkexp,
nullfs_vnodeopv_descs,
};

View File

@ -1,5 +1,37 @@
/* $NetBSD: null_vnops.c,v 1.15 1999/03/25 13:05:41 bouyer Exp $ */
/* $NetBSD: null_vnops.c,v 1.16 1999/07/08 01:19:05 wrstuden Exp $ */
/*
* Copyright (c) 1999 National Aeronautics & Space Administration
* All rights reserved.
*
* This software was written by William Studenmund of the
* Numerical Aerospace Similation Facility, NASA Ames Research Center.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the the name of the National Aeronautics & Space Administration
* nor the names of its contributors may be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE NATIONAL AERONAUTICS & SPACE ADMINISTRATION
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE ADMINISTRATION OR CONTRIB-
* UTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Copyright (c) 1992, 1993
* The Regents of the University of California. All rights reserved.
@ -39,7 +71,7 @@
*
* Ancestors:
* @(#)lofs_vnops.c 1.2 (Berkeley) 6/18/92
* $Id: null_vnops.c,v 1.15 1999/03/25 13:05:41 bouyer Exp $
* $Id: null_vnops.c,v 1.16 1999/07/08 01:19:05 wrstuden Exp $
* ...and...
* @(#)null_vnodeops.c 1.20 92/07/07 UCLA Ficus project
*/
@ -184,539 +216,34 @@
#include <sys/namei.h>
#include <sys/malloc.h>
#include <sys/buf.h>
#include <miscfs/nullfs/null.h>
#include <miscfs/genfs/genfs.h>
int null_bug_bypass = 0; /* for debugging: enables bypass printf'ing */
int null_bypass __P((void *));
int null_getattr __P((void *));
int null_inactive __P((void *));
int null_reclaim __P((void *));
int null_print __P((void *));
int null_strategy __P((void *));
int null_bwrite __P((void *));
int null_lock __P((void *));
int null_unlock __P((void *));
int null_fsync __P((void *));
int null_lookup __P((void *));
int null_setattr __P((void *));
int null_access __P((void *));
int null_open __P((void *));
/*
* This is the 10-Apr-92 bypass routine.
* This version has been optimized for speed, throwing away some
* safety checks. It should still always work, but it's not as
* robust to programmer errors.
* Define SAFETY to include some error checking code.
*
* In general, we map all vnodes going down and unmap them on the way back.
* As an exception to this, vnodes can be marked "unmapped" by setting
* the Nth bit in operation's vdesc_flags.
*
* Also, some BSD vnode operations have the side effect of vrele'ing
* their arguments. With stacking, the reference counts are held
* by the upper node, not the lower one, so we must handle these
* side-effects here. This is not of concern in Sun-derived systems
* since there are no such side-effects.
*
* This makes the following assumptions:
* - only one returned vpp
* - no INOUT vpp's (Sun's vop_open has one of these)
* - the vnode operation vector of the first vnode should be used
* to determine what implementation of the op should be invoked
* - all mapped vnodes are of our vnode-type (NEEDSWORK:
* problems on rmdir'ing mount points and renaming?)
*/
int
null_bypass(v)
void *v;
{
struct vop_generic_args /* {
struct vnodeop_desc *a_desc;
<other random data follows, presumably>
} */ *ap = v;
extern int (**null_vnodeop_p) __P((void *));
register struct vnode **this_vp_p;
int error;
struct vnode *old_vps[VDESC_MAX_VPS];
struct vnode **vps_p[VDESC_MAX_VPS];
struct vnode ***vppp;
struct vnodeop_desc *descp = ap->a_desc;
int reles, i;
if (null_bug_bypass)
printf ("null_bypass: %s\n", descp->vdesc_name);
#ifdef SAFETY
/*
* We require at least one vp.
*/
if (descp->vdesc_vp_offsets == NULL ||
descp->vdesc_vp_offsets[0] == VDESC_NO_OFFSET)
panic ("null_bypass: no vp's in map.\n");
#endif
/*
* Map the vnodes going in.
* Later, we'll invoke the operation based on
* the first mapped vnode's operation vector.
*/
reles = descp->vdesc_flags;
for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) {
if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
break; /* bail out at end of list */
vps_p[i] = this_vp_p =
VOPARG_OFFSETTO(struct vnode**,descp->vdesc_vp_offsets[i],ap);
/*
* We're not guaranteed that any but the first vnode
* are of our type. Check for and don't map any
* that aren't. (We must always map first vp or vclean fails.)
*/
if (i && (*this_vp_p == NULL ||
(*this_vp_p)->v_op != null_vnodeop_p)) {
old_vps[i] = NULL;
} else {
old_vps[i] = *this_vp_p;
*(vps_p[i]) = NULLVPTOLOWERVP(*this_vp_p);
/*
* XXX - Several operations have the side effect
* of vrele'ing their vp's. We must account for
* that. (This should go away in the future.)
*/
if (reles & 1)
VREF(*this_vp_p);
}
}
/*
* Call the operation on the lower layer
* with the modified argument structure.
*/
error = VCALL(*(vps_p[0]), descp->vdesc_offset, ap);
/*
* Maintain the illusion of call-by-value
* by restoring vnodes in the argument structure
* to their original value.
*/
reles = descp->vdesc_flags;
for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) {
if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
break; /* bail out at end of list */
if (old_vps[i]) {
*(vps_p[i]) = old_vps[i];
if (reles & 1)
vrele(*(vps_p[i]));
}
}
/*
* Map the possible out-going vpp
* (Assumes that the lower layer always returns
* a VREF'ed vpp unless it gets an error.)
*/
if (descp->vdesc_vpp_offset != VDESC_NO_OFFSET &&
!(descp->vdesc_flags & VDESC_NOMAP_VPP) &&
!error) {
/*
* XXX - even though some ops have vpp returned vp's,
* several ops actually vrele this before returning.
* We must avoid these ops.
* (This should go away when these ops are regularized.)
*/
if (descp->vdesc_flags & VDESC_VPP_WILLRELE)
goto out;
vppp = VOPARG_OFFSETTO(struct vnode***,
descp->vdesc_vpp_offset,ap);
error = null_node_create(old_vps[0]->v_mount, **vppp, *vppp,
descp == &vop_lookup_desc ? 0 : 1);
}
out:
return (error);
}
/*
* We have to carry on the locking protocol on the null layer vnodes
* as we progress through the tree. We also have to enforce read-only
* if this layer is mounted read-only.
*/
int
null_lookup(v)
void *v;
{
struct vop_lookup_args /* {
struct vnode * a_dvp;
struct vnode ** a_vpp;
struct componentname * a_cnp;
} */ *ap = v;
struct componentname *cnp = ap->a_cnp;
int flags = cnp->cn_flags;
struct vop_lock_args lockargs;
struct vop_unlock_args unlockargs;
struct vnode *dvp, *vp;
int error;
if ((flags & ISLASTCN) && (ap->a_dvp->v_mount->mnt_flag & MNT_RDONLY) &&
(cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
return (EROFS);
error = null_bypass(ap);
if (error == EJUSTRETURN && (flags & ISLASTCN) &&
(ap->a_dvp->v_mount->mnt_flag & MNT_RDONLY) &&
(cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME))
error = EROFS;
/*
* We must do the same locking and unlocking at this layer as
* is done in the layers below us. We could figure this out
* based on the error return and the LASTCN, LOCKPARENT, and
* LOCKLEAF flags. However, it is more expidient to just find
* out the state of the lower level vnodes and set ours to the
* same state.
*/
dvp = ap->a_dvp;
vp = *ap->a_vpp;
if (dvp == vp)
return (error);
if (!VOP_ISLOCKED(dvp)) {
unlockargs.a_vp = dvp;
unlockargs.a_flags = 0;
genfs_nounlock(&unlockargs);
}
if (vp != NULL && VOP_ISLOCKED(vp)) {
lockargs.a_vp = vp;
lockargs.a_flags = LK_SHARED;
genfs_nolock(&lockargs);
}
return (error);
}
/*
* Setattr call. Disallow write attempts if the layer is mounted read-only.
*/
int
null_setattr(v)
void *v;
{
struct vop_setattr_args /* {
struct vnodeop_desc *a_desc;
struct vnode *a_vp;
struct vattr *a_vap;
struct ucred *a_cred;
struct proc *a_p;
} */ *ap = v;
struct vnode *vp = ap->a_vp;
struct vattr *vap = ap->a_vap;
if ((vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL ||
vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL ||
vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) &&
(vp->v_mount->mnt_flag & MNT_RDONLY))
return (EROFS);
if (vap->va_size != VNOVAL) {
switch (vp->v_type) {
case VDIR:
return (EISDIR);
case VCHR:
case VBLK:
case VSOCK:
case VFIFO:
return (0);
case VREG:
case VLNK:
default:
/*
* Disallow write attempts if the filesystem is
* mounted read-only.
*/
if (vp->v_mount->mnt_flag & MNT_RDONLY)
return (EROFS);
}
}
return (null_bypass(ap));
}
/*
* We handle getattr only to change the fsid.
*/
int
null_getattr(v)
void *v;
{
struct vop_getattr_args /* {
struct vnode *a_vp;
struct vattr *a_vap;
struct ucred *a_cred;
struct proc *a_p;
} */ *ap = v;
int error;
if ((error = null_bypass(ap)) != 0)
return (error);
/* Requires that arguments be restored. */
ap->a_vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0];
return (0);
}
int
null_access(v)
void *v;
{
struct vop_access_args /* {
struct vnode *a_vp;
int a_mode;
struct ucred *a_cred;
struct proc *a_p;
} */ *ap = v;
struct vnode *vp = ap->a_vp;
mode_t mode = ap->a_mode;
/*
* Disallow write attempts on read-only layers;
* unless the file is a socket, fifo, or a block or
* character device resident on the file system.
*/
if (mode & VWRITE) {
switch (vp->v_type) {
case VDIR:
case VLNK:
case VREG:
if (vp->v_mount->mnt_flag & MNT_RDONLY)
return (EROFS);
break;
default:
break;
}
}
return (null_bypass(ap));
}
/*
* We must handle open to be able to catch MNT_NODEV and friends.
*/
int
null_open(v)
void *v;
{
struct vop_open_args *ap = v;
struct vnode *vp = ap->a_vp;
enum vtype lower_type = NULLVPTOLOWERVP(vp)->v_type;
if (((lower_type == VBLK) || (lower_type == VCHR)) &&
(vp->v_mount->mnt_flag & MNT_NODEV))
return ENXIO;
return null_bypass(ap);
}
/*
* We need to process our own vnode lock and then clear the
* interlock flag as it applies only to our vnode, not the
* vnodes below us on the stack.
*/
int
null_lock(v)
void *v;
{
struct vop_lock_args /* {
struct vnode *a_vp;
int a_flags;
struct proc *a_p;
} */ *ap = v;
genfs_nolock(ap);
if ((ap->a_flags & LK_TYPE_MASK) == LK_DRAIN)
return (0);
ap->a_flags &= ~LK_INTERLOCK;
return (null_bypass(ap));
}
/*
* We need to process our own vnode unlock and then clear the
* interlock flag as it applies only to our vnode, not the
* vnodes below us on the stack.
*/
int
null_unlock(v)
void *v;
{
struct vop_unlock_args /* {
struct vnode *a_vp;
int a_flags;
struct proc *a_p;
} */ *ap = v;
genfs_nounlock(ap);
ap->a_flags &= ~LK_INTERLOCK;
return (null_bypass(ap));
}
/*
* If vinvalbuf is calling us, it's a "shallow fsync" -- don't bother
* syncing the underlying vnodes, since (a) they'll be fsync'ed when
* reclaimed and (b) we could deadlock if they're locked; otherwise,
* pass it through to the underlying layer.
*/
int
null_fsync(v)
void *v;
{
struct vop_fsync_args /* {
struct vnode *a_vp;
struct ucred *a_cred;
int a_flags;
struct proc *a_p;
} */ *ap = v;
if (ap->a_flags & FSYNC_RECLAIM) {
return 0;
}
return (null_bypass(ap));
}
int
null_inactive(v)
void *v;
{
struct vop_inactive_args /* {
struct vnode *a_vp;
struct proc *a_p;
} */ *ap = v;
/*
* Do nothing (and _don't_ bypass).
* Wait to vrele lowervp until reclaim,
* so that until then our null_node is in the
* cache and reusable.
*
* NEEDSWORK: Someday, consider inactive'ing
* the lowervp and then trying to reactivate it
* with capabilities (v_id)
* like they do in the name lookup cache code.
* That's too much work for now.
*/
VOP_UNLOCK(ap->a_vp, 0);
return (0);
}
int
null_reclaim(v)
void *v;
{
struct vop_reclaim_args /* {
struct vnode *a_vp;
struct proc *a_p;
} */ *ap = v;
struct vnode *vp = ap->a_vp;
struct null_node *xp = VTONULL(vp);
struct vnode *lowervp = xp->null_lowervp;
/*
* Note: in vop_reclaim, vp->v_op == dead_vnodeop_p,
* so we can't call VOPs on ourself.
*/
/* After this assignment, this node will not be re-used. */
xp->null_lowervp = NULL;
LIST_REMOVE(xp, null_hash);
FREE(vp->v_data, M_TEMP);
vp->v_data = NULL;
vrele (lowervp);
return (0);
}
int
null_print(v)
void *v;
{
struct vop_print_args /* {
struct vnode *a_vp;
} */ *ap = v;
register struct vnode *vp = ap->a_vp;
printf ("\ttag VT_NULLFS, vp=%p, lowervp=%p\n", vp, NULLVPTOLOWERVP(vp));
return (0);
}
/*
* XXX - vop_strategy must be hand coded because it has no
* vnode in its arguments.
* This goes away with a merged VM/buffer cache.
*/
int
null_strategy(v)
void *v;
{
struct vop_strategy_args /* {
struct buf *a_bp;
} */ *ap = v;
struct buf *bp = ap->a_bp;
int error;
struct vnode *savedvp;
savedvp = bp->b_vp;
bp->b_vp = NULLVPTOLOWERVP(bp->b_vp);
error = VOP_STRATEGY(bp);
bp->b_vp = savedvp;
return (error);
}
/*
* XXX - like vop_strategy, vop_bwrite must be hand coded because it has no
* vnode in its arguments.
* This goes away with a merged VM/buffer cache.
*/
int
null_bwrite(v)
void *v;
{
struct vop_bwrite_args /* {
struct buf *a_bp;
} */ *ap = v;
struct buf *bp = ap->a_bp;
int error;
struct vnode *savedvp;
savedvp = bp->b_vp;
bp->b_vp = NULLVPTOLOWERVP(bp->b_vp);
error = VOP_BWRITE(bp);
bp->b_vp = savedvp;
return (error);
}
#include <miscfs/nullfs/null.h>
#include <miscfs/genfs/layer_extern.h>
/*
* Global vfs data structures
*/
int (**null_vnodeop_p) __P((void *));
struct vnodeopv_entry_desc null_vnodeop_entries[] = {
{ &vop_default_desc, null_bypass },
{ &vop_default_desc, layer_bypass },
{ &vop_lookup_desc, null_lookup },
{ &vop_setattr_desc, null_setattr },
{ &vop_getattr_desc, null_getattr },
{ &vop_access_desc, null_access },
{ &vop_lock_desc, null_lock },
{ &vop_unlock_desc, null_unlock },
{ &vop_fsync_desc, null_fsync },
{ &vop_inactive_desc, null_inactive },
{ &vop_reclaim_desc, null_reclaim },
{ &vop_print_desc, null_print },
{ &vop_lookup_desc, layer_lookup },
{ &vop_setattr_desc, layer_setattr },
{ &vop_getattr_desc, layer_getattr },
{ &vop_access_desc, layer_access },
{ &vop_lock_desc, layer_lock },
{ &vop_unlock_desc, layer_unlock },
{ &vop_islocked_desc, layer_islocked },
{ &vop_fsync_desc, layer_fsync },
{ &vop_inactive_desc, layer_inactive },
{ &vop_reclaim_desc, layer_reclaim },
{ &vop_print_desc, layer_print },
{ &vop_open_desc, null_open }, /* mount option handling */
{ &vop_open_desc, layer_open }, /* mount option handling */
{ &vop_strategy_desc, null_strategy },
{ &vop_bwrite_desc, null_bwrite },
{ &vop_strategy_desc, layer_strategy },
{ &vop_bwrite_desc, layer_bwrite },
{ &vop_bmap_desc, layer_bmap },
{ (struct vnodeop_desc*)NULL, (int(*)__P((void *)))NULL }
};

View File

@ -1,4 +1,4 @@
/* $NetBSD: umap.h,v 1.8 1998/03/01 02:21:51 fvdl Exp $ */
/* $NetBSD: umap.h,v 1.9 1999/07/08 01:19:06 wrstuden Exp $ */
/*
* Copyright (c) 1992, 1993
@ -39,22 +39,28 @@
* @(#)umap.h 8.4 (Berkeley) 8/20/94
*/
#include <miscfs/genfs/layer.h>
#define MAPFILEENTRIES 64
#define GMAPFILEENTRIES 16
#define NOBODY 32767
#define NULLGROUP 65534
struct umap_args {
char *target; /* Target of loopback */
struct layer_args la; /* generic layerfs args. Includes
* target and export info */
#define umap_target la.target
#define umap_export la.export
int nentries; /* # of entries in user map array */
int gnentries; /* # of entries in group map array */
u_long (*mapdata)[2]; /* pointer to array of user mappings */
u_long (*gmapdata)[2]; /* pointer to array of group mappings */
};
#ifdef _KERNEL
struct umap_mount {
struct mount *umapm_vfs;
struct vnode *umapm_rootvp; /* Reference to root umap_node */
struct layer_mount lm;
int info_nentries; /* number of uid mappings */
int info_gnentries; /* number of gid mappings */
u_long info_mapdata[MAPFILEENTRIES][2]; /* mapping data for
@ -62,27 +68,39 @@ struct umap_mount {
u_long info_gmapdata[GMAPFILEENTRIES][2]; /*mapping data for
group mapping in ficus */
};
#define umapm_vfs lm.layerm_vfs
#define umapm_rootvp lm.layerm_rootvp
#define umapm_export lm.layerm_export
#define umapm_flags lm.layerm_flags
#define umapm_size lm.layerm_size
#define umapm_tag lm.layerm_tag
#define umapm_bypass lm.layerm_bypass
#define umapm_alloc lm.layerm_alloc
#define umapm_vnodeop_p lm.layerm_vnodeop_p
#define umapm_node_hashtbl lm.layerm_node_hashtbl
#define umapm_node_hash lm.layerm_node_hash
#define umapm_hashlock lm.layerm_hashlock
#ifdef _KERNEL
/*
* A cache of vnode references
*/
struct umap_node {
LIST_ENTRY(umap_node) umap_hash; /* Hash list */
struct vnode *umap_lowervp; /* Aliased vnode - VREFed once */
struct vnode *umap_vnode; /* Back pointer to vnode/umap_node */
struct layer_node ln;
};
extern int umap_node_create __P((struct mount *mp, struct vnode *target, struct vnode **vpp));
extern u_long umap_reverse_findid __P((u_long id, u_long map[][2], int nentries));
extern void umap_mapids __P((struct mount *v_mount, struct ucred *credp));
u_long umap_reverse_findid __P((u_long id, u_long map[][2], int nentries));
void umap_mapids __P((struct mount *v_mount, struct ucred *credp));
#define umap_hash ln.layer_hash
#define umap_lowervp ln.layer_lowervp
#define umap_vnode ln.layer_vnode
#define umap_flags ln.layer_flags
#define MOUNTTOUMAPMOUNT(mp) ((struct umap_mount *)((mp)->mnt_data))
#define VTOUMAP(vp) ((struct umap_node *)(vp)->v_data)
#define UMAPTOV(xp) ((xp)->umap_vnode)
#ifdef UMAPFS_DIAGNOSTIC
extern struct vnode *umap_checkvp __P((struct vnode *vp, char *fil, int lno));
#define UMAPVPTOLOWERVP(vp) umap_checkvp((vp), __FILE__, __LINE__)
#define UMAPVPTOLOWERVP(vp) layer_checkvp((vp), __FILE__, __LINE__)
#else
#define UMAPVPTOLOWERVP(vp) (VTOUMAP(vp)->umap_lowervp)
#endif
@ -90,6 +108,8 @@ extern struct vnode *umap_checkvp __P((struct vnode *vp, char *fil, int lno));
extern int (**umap_vnodeop_p) __P((void *));
extern struct vfsops umapfs_vfsops;
void umapfs_init __P((void));
int umap_bypass __P((void *));
#define NUMAPNODECACHE 16
#endif /* _KERNEL */

View File

@ -1,5 +1,37 @@
/* $NetBSD: umap_subr.c,v 1.14 1999/03/19 21:46:25 perseant Exp $ */
/* $NetBSD: umap_subr.c,v 1.15 1999/07/08 01:19:06 wrstuden Exp $ */
/*
* Copyright (c) 1999 National Aeronautics & Space Administration
* All rights reserved.
*
* This software was written by William Studenmund of the
* Numerical Aerospace Similation Facility, NASA Ames Research Center.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the the name of the National Aeronautics & Space Administration
* nor the names of its contributors may be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE NATIONAL AERONAUTICS & SPACE ADMINISTRATION
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE ADMINISTRATION OR CONTRIB-
* UTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Copyright (c) 1992, 1993, 1995
* The Regents of the University of California. All rights reserved.
@ -51,45 +83,15 @@
#include <miscfs/specfs/specdev.h>
#include <miscfs/umapfs/umap.h>
#define LOG2_SIZEVNODE 7 /* log2(sizeof struct vnode) */
#define NUMAPNODECACHE 16
/*
* Null layer cache:
* Each cache entry holds a reference to the target vnode
* along with a pointer to the alias vnode. When an
* entry is added the target vnode is VREF'd. When the
* alias is removed the target vnode is vrele'd.
*/
#define UMAP_NHASH(vp) \
(&umap_node_hashtbl[(((u_long)vp)>>LOG2_SIZEVNODE) & umap_node_hash])
LIST_HEAD(umap_node_hashhead, umap_node) *umap_node_hashtbl;
u_long umap_node_hash;
static u_long umap_findid __P((u_long, u_long [][2], int));
static struct vnode *umap_node_find __P((struct mount *, struct vnode *));
static int umap_node_alloc __P((struct mount *, struct vnode *,
u_long umap_findid __P((u_long, u_long [][2], int));
int umap_node_alloc __P((struct mount *, struct vnode *,
struct vnode **));
/*
* Initialise cache headers
*/
void
umapfs_init()
{
#ifdef UMAPFS_DIAGNOSTIC
printf("umapfs_init\n"); /* printed during system boot */
#endif
umap_node_hashtbl = hashinit(NUMAPNODECACHE, M_CACHE, M_WAITOK, &umap_node_hash);
}
/*
* umap_findid is called by various routines in umap_vnodeops.c to
* find a user or group id in a map.
*/
static u_long
u_long
umap_findid(id, map, nentries)
u_long id;
u_long map[][2];
@ -133,269 +135,6 @@ umap_reverse_findid(id, map, nentries)
}
/*
* Return alias for target vnode if already exists, else 0.
*/
static struct vnode *
umap_node_find(mp, targetvp)
struct mount *mp;
struct vnode *targetvp;
{
struct umap_node_hashhead *hd;
struct umap_node *a;
struct vnode *vp;
#ifdef UMAPFS_DIAGNOSTIC
printf("umap_node_find(mp = %p, target = %p)\n", mp, targetvp);
#endif
/*
* Find hash base, and then search the (two-way) linked
* list looking for a umap_node structure which is referencing
* the target vnode. If found, the increment the umap_node
* reference count (but NOT the target vnode's VREF counter).
*/
hd = UMAP_NHASH(targetvp);
loop:
for (a = hd->lh_first; a != 0; a = a->umap_hash.le_next) {
if (a->umap_lowervp == targetvp &&
a->umap_vnode->v_mount == mp) {
vp = UMAPTOV(a);
/*
* We need vget for the VXLOCK
* stuff, but we don't want to lock
* the lower node.
*/
if (vget(vp, 0)) {
#ifdef UMAPFS_DIAGNOSTIC
printf ("umap_node_find: vget failed.\n");
#endif
goto loop;
}
return (vp);
}
}
#ifdef UMAPFS_DIAGNOSTIC
printf("umap_node_find(%p, %p): NOT found\n", mp, targetvp);
#endif
return (0);
}
/*
* Make a new umap_node node.
* Vp is the alias vnode, lowervp is the target vnode.
* Maintain a reference to lowervp.
*/
static int
umap_node_alloc(mp, lowervp, vpp)
struct mount *mp;
struct vnode *lowervp;
struct vnode **vpp;
{
struct umap_node_hashhead *hd;
struct umap_node *xp;
struct vnode *vp, *nvp;
struct proc *p = curproc; /* XXX */
int error;
extern int (**dead_vnodeop_p) __P((void *));
if ((error = getnewvnode(VT_UMAP, mp, umap_vnodeop_p, &vp)) != 0)
return (error);
vp->v_type = lowervp->v_type;
MALLOC(xp, struct umap_node *, sizeof(struct umap_node), M_TEMP,
M_WAITOK);
if (vp->v_type == VBLK || vp->v_type == VCHR) {
MALLOC(vp->v_specinfo, struct specinfo *,
sizeof(struct specinfo), M_VNODE, M_WAITOK);
vp->v_rdev = lowervp->v_rdev;
}
vp->v_data = xp;
xp->umap_vnode = vp;
xp->umap_lowervp = lowervp;
/*
* Before we insert our new node onto the hash chains,
* check to see if someone else has beaten us to it.
* (We could have slept in MALLOC.)
*/
if ((nvp = umap_node_find(mp, lowervp)) != NULL) {
*vpp = nvp;
/* free the substructures we've allocated. */
FREE(xp, M_TEMP);
if (vp->v_type == VBLK || vp->v_type == VCHR)
FREE(vp->v_specinfo, M_VNODE);
vp->v_type = VBAD; /* node is discarded */
vp->v_op = dead_vnodeop_p; /* so ops will still work */
vrele(vp); /* get rid of it. */
return (0);
}
/*
* XXX if it's a device node, it needs to be checkalias()ed.
* however, for locking reasons, that's just not possible.
* so we have to do most of the dirty work inline. Note that
* this is a limited case; we know that there's going to be
* an alias, and we know that that alias will be a "real"
* device node, i.e. not tagged VT_NON.
*/
if (vp->v_type == VBLK || vp->v_type == VCHR) {
struct vnode *cvp, **cvpp;
cvpp = &speclisth[SPECHASH(vp->v_rdev)];
loop:
simple_lock(&spechash_slock);
for (cvp = *cvpp; cvp; cvp = cvp->v_specnext) {
if (vp->v_rdev != cvp->v_rdev ||
vp->v_type != cvp->v_type)
continue;
/*
* Alias, but not in use, so flush it out.
*/
simple_lock(&cvp->v_interlock);
if (cvp->v_usecount == 0) {
simple_unlock(&spechash_slock);
vgonel(cvp, p);
goto loop;
}
if (vget(cvp, LK_EXCLUSIVE | LK_INTERLOCK)) {
simple_unlock(&spechash_slock);
goto loop;
}
break;
}
vp->v_hashchain = cvpp;
vp->v_specnext = *cvpp;
vp->v_specflags = 0;
*cvpp = vp;
#ifdef DIAGNOSTIC
if (cvp == NULLVP)
panic("umap_node_alloc: no alias for device");
#endif
vp->v_flag |= VALIASED;
cvp->v_flag |= VALIASED;
simple_unlock(&spechash_slock);
vrele(cvp);
}
/* XXX end of transmogrified checkalias() */
*vpp = vp;
VREF(lowervp); /* Extra VREF will be vrele'd in umap_node_create */
hd = UMAP_NHASH(lowervp);
LIST_INSERT_HEAD(hd, xp, umap_hash);
return (0);
}
/*
* Try to find an existing umap_node vnode refering
* to it, otherwise make a new umap_node vnode which
* contains a reference to the target vnode.
*/
int
umap_node_create(mp, targetvp, newvpp)
struct mount *mp;
struct vnode *targetvp;
struct vnode **newvpp;
{
struct vnode *aliasvp;
if ((aliasvp = umap_node_find(mp, targetvp)) != NULL) {
/*
* Take another reference to the alias vnode
*/
#ifdef UMAPFS_DIAGNOSTIC
vprint("umap_node_create: exists", aliasvp);
#endif
/* VREF(aliasvp); */
} else {
int error;
/*
* Get new vnode.
*/
#ifdef UMAPFS_DIAGNOSTIC
printf("umap_node_create: create new alias vnode\n");
#endif
/*
* Make new vnode reference the umap_node.
*/
if ((error = umap_node_alloc(mp, targetvp, &aliasvp)) != 0)
return (error);
/*
* aliasvp is already VREF'd by getnewvnode()
*/
}
vrele(targetvp);
#ifdef UMAPFS_DIAGNOSTIC
vprint("umap_node_create: alias", aliasvp);
vprint("umap_node_create: target", targetvp);
#endif
*newvpp = aliasvp;
return (0);
}
#ifdef UMAPFS_DIAGNOSTIC
int umap_checkvp_barrier = 1;
struct vnode *
umap_checkvp(vp, fil, lno)
struct vnode *vp;
char *fil;
int lno;
{
struct umap_node *a = VTOUMAP(vp);
#if 0
/*
* Can't do this check because vop_reclaim runs
* with funny vop vector.
*/
if (vp->v_op != umap_vnodeop_p) {
printf("umap_checkvp: on non-umap-node\n");
while (umap_checkvp_barrier) /*WAIT*/ ;
panic("umap_checkvp");
}
#endif
if (a->umap_lowervp == NULL) {
/* Should never happen */
int i; u_long *p;
printf("vp = %p, ZERO ptr\n", vp);
for (p = (u_long *) a, i = 0; i < 8; i++)
printf(" %lx", p[i]);
printf("\n");
/* wait for debugger */
while (umap_checkvp_barrier) /*WAIT*/ ;
panic("umap_checkvp");
}
if (a->umap_lowervp->v_usecount < 1) {
int i; u_long *p;
printf("vp = %p, unref'ed lowervp\n", vp);
for (p = (u_long *) a, i = 0; i < 8; i++)
printf(" %lx", p[i]);
printf("\n");
/* wait for debugger */
while (umap_checkvp_barrier) /*WAIT*/ ;
panic ("umap with unref'ed lowervp");
}
#if 0
printf("umap %p/%d -> %p/%d [%s, %d]\n",
a->umap_vnode, a->umap_vnode->v_usecount,
a->umap_lowervp, a->umap_lowervp->v_usecount,
fil, lno);
#endif
return (a->umap_lowervp);
}
#endif
/* umap_mapids maps all of the ids in a credential, both user and group. */
void

View File

@ -1,4 +1,4 @@
/* $NetBSD: umap_vfsops.c,v 1.22 1999/03/19 21:46:26 perseant Exp $ */
/* $NetBSD: umap_vfsops.c,v 1.23 1999/07/08 01:19:07 wrstuden Exp $ */
/*
* Copyright (c) 1992, 1993
@ -54,23 +54,11 @@
#include <sys/namei.h>
#include <sys/malloc.h>
#include <miscfs/umapfs/umap.h>
#include <miscfs/genfs/layer_extern.h>
int umapfs_mount __P((struct mount *, const char *, void *,
struct nameidata *, struct proc *));
int umapfs_start __P((struct mount *, int, struct proc *));
int umapfs_unmount __P((struct mount *, int, struct proc *));
int umapfs_root __P((struct mount *, struct vnode **));
int umapfs_quotactl __P((struct mount *, int, uid_t, caddr_t,
struct proc *));
int umapfs_statfs __P((struct mount *, struct statfs *, struct proc *));
int umapfs_sync __P((struct mount *, int, struct ucred *, struct proc *));
int umapfs_vget __P((struct mount *, ino_t, struct vnode **));
int umapfs_fhtovp __P((struct mount *, struct fid *, struct vnode **));
int umapfs_checkexp __P((struct mount *, struct mbuf *, int *,
struct ucred **));
int umapfs_vptofh __P((struct vnode *, struct fid *));
int umapfs_sysctl __P((int *, u_int, void *, size_t *, void *, size_t,
struct proc *));
/*
* Mount umap layer
@ -85,7 +73,6 @@ umapfs_mount(mp, path, data, ndp, p)
{
struct umap_args args;
struct vnode *lowerrootvp, *vp;
struct vnode *umapm_rootvp;
struct umap_mount *amp;
size_t size;
int error;
@ -101,14 +88,6 @@ umapfs_mount(mp, path, data, ndp, p)
printf("umapfs_mount(mp = %p)\n", mp);
#endif
/*
* Update is a no-op
*/
if (mp->mnt_flag & MNT_UPDATE) {
return (EOPNOTSUPP);
/* return (VFS_MOUNT(MOUNTTOUMAPMOUNT(mp)->umapm_vfs, path, data, ndp, p));*/
}
/*
* Get argument
*/
@ -116,11 +95,23 @@ umapfs_mount(mp, path, data, ndp, p)
if (error)
return (error);
/*
* Update only does export updating.
*/
if (mp->mnt_flag & MNT_UPDATE) {
amp = MOUNTTOUMAPMOUNT(mp);
if (args.umap_target == 0)
return (vfs_export(mp, &amp->umapm_export,
&args.umap_export));
else
return (EOPNOTSUPP);
}
/*
* Find lower node
*/
NDINIT(ndp, LOOKUP, FOLLOW|WANTPARENT|LOCKLEAF,
UIO_USERSPACE, args.target, p);
UIO_USERSPACE, args.umap_target, p);
if ((error = namei(ndp)) != 0)
return (error);
@ -145,11 +136,12 @@ umapfs_mount(mp, path, data, ndp, p)
amp = (struct umap_mount *) malloc(sizeof(struct umap_mount),
M_UFSMNT, M_WAITOK); /* XXX */
memset((caddr_t)amp, 0, sizeof(struct umap_mount));
/*
* Save reference to underlying FS
*/
mp->mnt_data = (qaddr_t) amp;
amp->umapm_vfs = lowerrootvp->v_mount;
if (amp->umapm_vfs->mnt_flag & MNT_LOCAL)
mp->mnt_flag |= MNT_LOCAL;
/*
* Now copy in the number of entries and maps for umap mapping.
@ -158,8 +150,10 @@ umapfs_mount(mp, path, data, ndp, p)
amp->info_gnentries = args.gnentries;
error = copyin(args.mapdata, (caddr_t)amp->info_mapdata,
2*sizeof(u_long)*args.nentries);
if (error)
if (error) {
vput(lowerrootvp);
return (error);
}
#ifdef UMAPFS_DIAGNOSTIC
printf("umap_mount:nentries %d\n",args.nentries);
@ -170,8 +164,10 @@ umapfs_mount(mp, path, data, ndp, p)
error = copyin(args.gmapdata, (caddr_t)amp->info_gmapdata,
2*sizeof(u_long)*args.gnentries);
if (error)
if (error) {
vput(lowerrootvp);
return (error);
}
#ifdef UMAPFS_DIAGNOSTIC
printf("umap_mount:gnentries %d\n",args.gnentries);
@ -181,41 +177,49 @@ umapfs_mount(mp, path, data, ndp, p)
amp->info_gmapdata[i][1]);
#endif
/*
* Make sure the mount point's sufficiently initialized
* that the node create call will work.
*/
vfs_getnewfsid(mp, MOUNT_UMAP);
amp->umapm_size = sizeof(struct umap_node);
amp->umapm_tag = VT_UMAP;
amp->umapm_bypass = umap_bypass;
amp->umapm_alloc = layer_node_alloc; /* the default alloc is fine */
amp->umapm_vnodeop_p = umap_vnodeop_p;
simple_lock_init(&amp->umapm_hashlock);
amp->umapm_node_hashtbl = hashinit(NUMAPNODECACHE, M_CACHE, M_WAITOK,
&amp->umapm_node_hash);
/*
* Save reference. Each mount also holds
* a reference on the root vnode.
* fix up umap node for root vnode.
*/
error = umap_node_create(mp, lowerrootvp, &vp);
/*
* Unlock the node (either the lower or the alias)
*/
VOP_UNLOCK(vp, 0);
error = layer_node_create(mp, lowerrootvp, &vp);
/*
* Make sure the node alias worked
*/
if (error) {
vrele(lowerrootvp);
vput(lowerrootvp);
free(amp, M_UFSMNT); /* XXX */
return (error);
}
/*
* Unlock the node (either the lower or the alias)
*/
VOP_UNLOCK(vp, 0);
/*
* Keep a held reference to the root vnode.
* It is vrele'd in umapfs_unmount.
*/
umapm_rootvp = vp;
umapm_rootvp->v_flag |= VROOT;
amp->umapm_rootvp = umapm_rootvp;
if (UMAPVPTOLOWERVP(umapm_rootvp)->v_mount->mnt_flag & MNT_LOCAL)
mp->mnt_flag |= MNT_LOCAL;
mp->mnt_data = (qaddr_t) amp;
vfs_getnewfsid(mp, MOUNT_UMAP);
vp->v_flag |= VROOT;
amp->umapm_rootvp = vp;
(void) copyinstr(path, mp->mnt_stat.f_mntonname, MNAMELEN - 1, &size);
memset(mp->mnt_stat.f_mntonname + size, 0, MNAMELEN - size);
(void) copyinstr(args.target, mp->mnt_stat.f_mntfromname, MNAMELEN - 1,
&size);
(void) copyinstr(args.umap_target, mp->mnt_stat.f_mntfromname,
MNAMELEN - 1, &size);
memset(mp->mnt_stat.f_mntfromname + size, 0, MNAMELEN - size);
#ifdef UMAPFS_DIAGNOSTIC
printf("umapfs_mount: lower %s, alias at %s\n",
@ -224,22 +228,6 @@ umapfs_mount(mp, path, data, ndp, p)
return (0);
}
/*
* VFS start. Nothing needed here - the start routine
* on the underlying filesystem will have been called
* when that filesystem was mounted.
*/
int
umapfs_start(mp, flags, p)
struct mount *mp;
int flags;
struct proc *p;
{
return (0);
/* return (VFS_START(MOUNTTOUMAPMOUNT(mp)->umapm_vfs, flags, p)); */
}
/*
* Free reference to umap layer
*/
@ -249,7 +237,7 @@ umapfs_unmount(mp, mntflags, p)
int mntflags;
struct proc *p;
{
struct vnode *umapm_rootvp = MOUNTTOUMAPMOUNT(mp)->umapm_rootvp;
struct vnode *rootvp = MOUNTTOUMAPMOUNT(mp)->umapm_rootvp;
int error;
int flags = 0;
@ -270,22 +258,22 @@ umapfs_unmount(mp, mntflags, p)
if (mntinvalbuf(mp, 1))
return (EBUSY);
#endif
if (umapm_rootvp->v_usecount > 1)
if (rootvp->v_usecount > 1)
return (EBUSY);
if ((error = vflush(mp, umapm_rootvp, flags)) != 0)
if ((error = vflush(mp, rootvp, flags)) != 0)
return (error);
#ifdef UMAPFS_DIAGNOSTIC
vprint("alias root of lower", umapm_rootvp);
vprint("alias root of lower", rootvp);
#endif
/*
* Release reference on underlying root vnode
*/
vrele(umapm_rootvp);
vrele(rootvp);
/*
* And blow it away for future re-use
*/
vgone(umapm_rootvp);
vgone(rootvp);
/*
* Finally, throw away the umap_mount structure
*/
@ -294,148 +282,6 @@ umapfs_unmount(mp, mntflags, p)
return (0);
}
int
umapfs_root(mp, vpp)
struct mount *mp;
struct vnode **vpp;
{
struct vnode *vp;
#ifdef UMAPFS_DIAGNOSTIC
printf("umapfs_root(mp = %p, vp = %p->%p)\n", mp,
MOUNTTOUMAPMOUNT(mp)->umapm_rootvp,
UMAPVPTOLOWERVP(MOUNTTOUMAPMOUNT(mp)->umapm_rootvp));
#endif
/*
* Return locked reference to root.
*/
vp = MOUNTTOUMAPMOUNT(mp)->umapm_rootvp;
VREF(vp);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
*vpp = vp;
return (0);
}
int
umapfs_quotactl(mp, cmd, uid, arg, p)
struct mount *mp;
int cmd;
uid_t uid;
caddr_t arg;
struct proc *p;
{
return (VFS_QUOTACTL(MOUNTTOUMAPMOUNT(mp)->umapm_vfs, cmd, uid, arg, p));
}
int
umapfs_statfs(mp, sbp, p)
struct mount *mp;
struct statfs *sbp;
struct proc *p;
{
int error;
struct statfs mstat;
#ifdef UMAPFS_DIAGNOSTIC
printf("umapfs_statfs(mp = %p, vp = %p->%p)\n", mp,
MOUNTTOUMAPMOUNT(mp)->umapm_rootvp,
UMAPVPTOLOWERVP(MOUNTTOUMAPMOUNT(mp)->umapm_rootvp));
#endif
memset(&mstat, 0, sizeof(mstat));
error = VFS_STATFS(MOUNTTOUMAPMOUNT(mp)->umapm_vfs, &mstat, p);
if (error)
return (error);
/* now copy across the "interesting" information and fake the rest */
sbp->f_type = mstat.f_type;
sbp->f_flags = mstat.f_flags;
sbp->f_bsize = mstat.f_bsize;
sbp->f_iosize = mstat.f_iosize;
sbp->f_blocks = mstat.f_blocks;
sbp->f_bfree = mstat.f_bfree;
sbp->f_bavail = mstat.f_bavail;
sbp->f_files = mstat.f_files;
sbp->f_ffree = mstat.f_ffree;
if (sbp != &mp->mnt_stat) {
memcpy(&sbp->f_fsid, &mp->mnt_stat.f_fsid, sizeof(sbp->f_fsid));
memcpy(sbp->f_mntonname, mp->mnt_stat.f_mntonname, MNAMELEN);
memcpy(sbp->f_mntfromname, mp->mnt_stat.f_mntfromname, MNAMELEN);
}
strncpy(sbp->f_fstypename, mp->mnt_op->vfs_name, MFSNAMELEN);
return (0);
}
int
umapfs_sync(mp, waitfor, cred, p)
struct mount *mp;
int waitfor;
struct ucred *cred;
struct proc *p;
{
/*
* XXX - Assumes no data cached at umap layer.
*/
return (0);
}
int
umapfs_vget(mp, ino, vpp)
struct mount *mp;
ino_t ino;
struct vnode **vpp;
{
return (VFS_VGET(MOUNTTOUMAPMOUNT(mp)->umapm_vfs, ino, vpp));
}
int
umapfs_fhtovp(mp, fidp, vpp)
struct mount *mp;
struct fid *fidp;
struct vnode **vpp;
{
return (EOPNOTSUPP);
}
int
umapfs_checkexp(mp, nam, exflagsp, credanonp)
struct mount *mp;
struct mbuf *nam;
int *exflagsp;
struct ucred**credanonp;
{
return (EOPNOTSUPP);
}
int
umapfs_vptofh(vp, fhp)
struct vnode *vp;
struct fid *fhp;
{
return (EOPNOTSUPP);
}
int
umapfs_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
int *name;
u_int namelen;
void *oldp;
size_t *oldlenp;
void *newp;
size_t newlen;
struct proc *p;
{
return (EOPNOTSUPP);
}
extern struct vnodeopv_desc umapfs_vnodeop_opv_desc;
struct vnodeopv_desc *umapfs_vnodeopv_descs[] = {
@ -446,18 +292,18 @@ struct vnodeopv_desc *umapfs_vnodeopv_descs[] = {
struct vfsops umapfs_vfsops = {
MOUNT_UMAP,
umapfs_mount,
umapfs_start,
layerfs_start,
umapfs_unmount,
umapfs_root,
umapfs_quotactl,
umapfs_statfs,
umapfs_sync,
umapfs_vget,
umapfs_fhtovp,
umapfs_vptofh,
umapfs_init,
umapfs_sysctl,
layerfs_root,
layerfs_quotactl,
layerfs_statfs,
layerfs_sync,
layerfs_vget,
layerfs_fhtovp,
layerfs_vptofh,
layerfs_init,
layerfs_sysctl,
NULL, /* vfs_mountroot */
umapfs_checkexp,
layerfs_checkexp,
umapfs_vnodeopv_descs,
};

View File

@ -1,4 +1,4 @@
/* $NetBSD: umap_vnops.c,v 1.14 1999/05/17 20:29:05 wrstuden Exp $ */
/* $NetBSD: umap_vnops.c,v 1.15 1999/07/08 01:19:07 wrstuden Exp $ */
/*
* Copyright (c) 1992, 1993
@ -53,22 +53,13 @@
#include <sys/buf.h>
#include <miscfs/umapfs/umap.h>
#include <miscfs/genfs/genfs.h>
#include <miscfs/genfs/layer_extern.h>
int umap_bug_bypass = 0; /* for debugging: enables bypass printf'ing */
int umap_bypass __P((void *));
int umap_lookup __P((void *));
int umap_getattr __P((void *));
int umap_inactive __P((void *));
int umap_reclaim __P((void *));
int umap_print __P((void *));
int umap_rename __P((void *));
int umap_strategy __P((void *));
int umap_bwrite __P((void *));
int umap_lock __P((void *));
int umap_unlock __P((void *));
int umap_open __P((void *));
int umap_fsync __P((void *));
/*
* Global vfs data structures
@ -80,22 +71,26 @@ int umap_fsync __P((void *));
*/
int (**umap_vnodeop_p) __P((void *));
struct vnodeopv_entry_desc umap_vnodeop_entries[] = {
{ &vop_default_desc, umap_bypass },
{ &vop_default_desc, umap_bypass },
{ &vop_getattr_desc, umap_getattr },
{ &vop_lock_desc, umap_lock },
{ &vop_unlock_desc, umap_unlock },
{ &vop_fsync_desc, umap_fsync },
{ &vop_inactive_desc, umap_inactive },
{ &vop_reclaim_desc, umap_reclaim },
{ &vop_print_desc, umap_print },
{ &vop_lookup_desc, umap_lookup },
{ &vop_getattr_desc, umap_getattr },
{ &vop_print_desc, umap_print },
{ &vop_rename_desc, umap_rename },
{ &vop_open_desc, umap_open }, /* mount option handling */
{ &vop_lock_desc, layer_lock },
{ &vop_unlock_desc, layer_unlock },
{ &vop_islocked_desc, layer_islocked },
{ &vop_fsync_desc, layer_fsync },
{ &vop_inactive_desc, layer_inactive },
{ &vop_reclaim_desc, layer_reclaim },
{ &vop_open_desc, layer_open },
{ &vop_setattr_desc, layer_setattr },
{ &vop_access_desc, layer_access },
{ &vop_rename_desc, umap_rename },
{ &vop_strategy_desc, umap_strategy },
{ &vop_bwrite_desc, umap_bwrite },
{ &vop_strategy_desc, layer_strategy },
{ &vop_bwrite_desc, layer_bwrite },
{ &vop_bmap_desc, layer_bmap },
{ (struct vnodeop_desc*) NULL, (int(*) __P((void *))) NULL }
};
@ -103,8 +98,8 @@ struct vnodeopv_desc umapfs_vnodeop_opv_desc =
{ &umap_vnodeop_p, umap_vnodeop_entries };
/*
* This is the 10-Apr-92 bypass routine.
* See null_vnops.c:null_bypass for more details.
* This is the 08-June-1999 bypass routine.
* See layer_vnops.c:layer_bypass for more details.
*/
int
umap_bypass(v)
@ -118,18 +113,15 @@ umap_bypass(v)
struct ucred *savecredp = 0, *savecompcredp = 0;
struct ucred *compcredp = 0;
struct vnode **this_vp_p;
int error;
struct vnode *old_vps[VDESC_MAX_VPS];
struct vnode *vp1 = 0;
int error, error1;
int (**our_vnodeop_p) __P((void *));
struct vnode *old_vps[VDESC_MAX_VPS], *vp0;
struct vnode **vps_p[VDESC_MAX_VPS];
struct vnode ***vppp;
struct vnodeop_desc *descp = ap->a_desc;
int reles, i;
int reles, i, flags;
struct componentname **compnamepp = 0;
if (umap_bug_bypass)
printf("umap_bypass: %s\n", descp->vdesc_name);
#ifdef SAFETY
/*
* We require at least one vp.
@ -138,6 +130,14 @@ umap_bypass(v)
descp->vdesc_vp_offsets[0] == VDESC_NO_OFFSET)
panic ("umap_bypass: no vp's in map.\n");
#endif
vps_p[0] = VOPARG_OFFSETTO(struct vnode**,descp->vdesc_vp_offsets[0],
ap);
vp0 = *vps_p[0];
flags = MOUNTTOUMAPMOUNT(vp0->v_mount)->umapm_flags;
our_vnodeop_p = vp0->v_op;
if (flags & LAYERFS_MBYPASSDEBUG)
printf("umap_bypass: %s\n", descp->vdesc_name);
/*
* Map the vnodes going in.
@ -151,17 +151,14 @@ umap_bypass(v)
vps_p[i] = this_vp_p =
VOPARG_OFFSETTO(struct vnode**, descp->vdesc_vp_offsets[i], ap);
if (i == 0) {
vp1 = *vps_p[0];
}
/*
* We're not guaranteed that any but the first vnode
* are of our type. Check for and don't map any
* that aren't. (Must map first vp or vclean fails.)
*/
if (i && ((*this_vp_p)==NULL || (*this_vp_p)->v_op != umap_vnodeop_p)) {
if (i && ((*this_vp_p)==NULL ||
(*this_vp_p)->v_op != our_vnodeop_p)) {
old_vps[i] = NULL;
} else {
old_vps[i] = *this_vp_p;
@ -188,15 +185,15 @@ umap_bypass(v)
*credpp = crdup(savecredp);
credp = *credpp;
if (umap_bug_bypass && credp->cr_uid != 0)
if ((flags & LAYERFS_MBYPASSDEBUG) && credp->cr_uid != 0)
printf("umap_bypass: user was %d, group %d\n",
credp->cr_uid, credp->cr_gid);
/* Map all ids in the credential structure. */
umap_mapids(vp1->v_mount, credp);
umap_mapids(vp0->v_mount, credp);
if (umap_bug_bypass && credp->cr_uid != 0)
if ((flags & LAYERFS_MBYPASSDEBUG) && credp->cr_uid != 0)
printf("umap_bypass: user now %d, group %d\n",
credp->cr_uid, credp->cr_gid);
}
@ -215,15 +212,15 @@ umap_bypass(v)
(*compnamepp)->cn_cred = crdup(savecompcredp);
compcredp = (*compnamepp)->cn_cred;
if (umap_bug_bypass && compcredp->cr_uid != 0)
if ((flags & LAYERFS_MBYPASSDEBUG) && compcredp->cr_uid != 0)
printf("umap_bypass: component credit user was %d, group %d\n",
compcredp->cr_uid, compcredp->cr_gid);
/* Map all ids in the credential structure. */
umap_mapids(vp1->v_mount, compcredp);
umap_mapids(vp0->v_mount, compcredp);
if (umap_bug_bypass && compcredp->cr_uid != 0)
if ((flags & LAYERFS_MBYPASSDEBUG) && compcredp->cr_uid != 0)
printf("umap_bypass: component credit user now %d, group %d\n",
compcredp->cr_uid, compcredp->cr_gid);
}
@ -245,7 +242,9 @@ umap_bypass(v)
break; /* bail out at end of list */
if (old_vps[i]) {
*(vps_p[i]) = old_vps[i];
if (reles & 1)
if (reles & VDESC_VP0_WILLUNLOCK)
LAYERFS_UPPERUNLOCK(*(vps_p[i]), 0, error1);
if (reles & VDESC_VP0_WILLRELE)
vrele(*(vps_p[i]));
};
};
@ -262,7 +261,7 @@ umap_bypass(v)
goto out;
vppp = VOPARG_OFFSETTO(struct vnode***,
descp->vdesc_vpp_offset, ap);
error = umap_node_create(old_vps[0]->v_mount, **vppp, *vppp);
error = layer_node_create(old_vps[0]->v_mount, **vppp, *vppp);
};
out:
@ -270,28 +269,32 @@ umap_bypass(v)
* Free duplicate cred structure and restore old one.
*/
if (descp->vdesc_cred_offset != VDESC_NO_OFFSET) {
if (umap_bug_bypass && credp && credp->cr_uid != 0)
if ((flags & LAYERFS_MBYPASSDEBUG) && credp &&
credp->cr_uid != 0)
printf("umap_bypass: returning-user was %d\n",
credp->cr_uid);
if (savecredp != NOCRED) {
crfree(credp);
*credpp = savecredp;
if (umap_bug_bypass && credpp && (*credpp)->cr_uid != 0)
if ((flags & LAYERFS_MBYPASSDEBUG) && credpp &&
(*credpp)->cr_uid != 0)
printf("umap_bypass: returning-user now %d\n\n",
savecredp->cr_uid);
}
}
if (descp->vdesc_componentname_offset != VDESC_NO_OFFSET) {
if (umap_bug_bypass && compcredp && compcredp->cr_uid != 0)
if ((flags & LAYERFS_MBYPASSDEBUG) && compcredp &&
compcredp->cr_uid != 0)
printf("umap_bypass: returning-component-user was %d\n",
compcredp->cr_uid);
if (savecompcredp != NOCRED) {
crfree(compcredp);
(*compnamepp)->cn_cred = savecompcredp;
if (umap_bug_bypass && credpp && (*credpp)->cr_uid != 0)
if ((flags & LAYERFS_MBYPASSDEBUG) && savecompcredp &&
savecompcredp->cr_uid != 0)
printf("umap_bypass: returning-component-user now %d\n",
savecompcredp->cr_uid);
}
@ -301,69 +304,101 @@ umap_bypass(v)
}
/*
* We need to process our own vnode lock and then clear the
* interlock flag as it applies only to our vnode, not the
* vnodes below us on the stack.
*/
* This is based on the 08-June-1999 bypass routine.
* See layer_vnops.c:layer_bypass for more details.
*/
int
umap_lock(v)
umap_lookup(v)
void *v;
{
struct vop_lock_args /* {
struct vnode *a_vp;
int a_flags;
struct proc *a_p;
struct vop_lookup_args /* {
struct vnodeop_desc *a_desc;
struct vnode * a_dvp;
struct vnode ** a_vpp;
struct componentname * a_cnp;
} */ *ap = v;
struct componentname *cnp = ap->a_cnp;
struct ucred *savecompcredp = NULL;
struct ucred *compcredp = NULL;
struct vnode *dvp, *vp, *ldvp;
struct mount *mp;
int error;
int i, flags, cnf = cnp->cn_flags;
genfs_nolock(ap);
if ((ap->a_flags & LK_TYPE_MASK) == LK_DRAIN)
return (0);
ap->a_flags &= ~LK_INTERLOCK;
return (umap_bypass(ap));
}
dvp = ap->a_dvp;
mp = dvp->v_mount;
/*
* We need to process our own vnode unlock and then clear the
* interlock flag as it applies only to our vnode, not the
* vnodes below us on the stack.
*/
int
umap_unlock(v)
void *v;
{
struct vop_unlock_args /* {
struct vnode *a_vp;
int a_flags;
struct proc *a_p;
} */ *ap = v;
if ((cnf & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
(cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
return (EROFS);
genfs_nounlock(ap);
ap->a_flags &= ~LK_INTERLOCK;
return (umap_bypass(ap));
}
flags = MOUNTTOUMAPMOUNT(mp)->umapm_flags;
ldvp = UMAPVPTOLOWERVP(dvp);
/*
* If vinvalbuf is calling us, it's a "shallow fsync" -- don't bother
* syncing the underlying vnodes, since (a) they'll be fsync'ed when
* reclaimed and (b) we could deadlock if they're locked; otherwise,
* pass it through to the underlying layer.
*/
if (flags & LAYERFS_MBYPASSDEBUG)
printf("umap_lookup\n");
int
umap_fsync(v)
void *v;
{
struct vop_fsync_args /* {
struct vnode *a_vp;
struct ucred *a_cred;
int a_flags;
struct proc *a_p;
} */ *ap = v;
/*
* Fix the credentials. (That's the purpose of this layer.)
*
* BSD often keeps a credential in the componentname structure
* for speed. If there is one, it better get mapped, too.
*/
if (ap->a_flags & FSYNC_RECLAIM)
return 0;
if ((savecompcredp = cnp->cn_cred)) {
compcredp = crdup(savecompcredp);
cnp->cn_cred = compcredp;
return (umap_bypass(ap));
if ((flags & LAYERFS_MBYPASSDEBUG) && compcredp->cr_uid != 0)
printf("umap_lookup: component credit user was %d, group %d\n",
compcredp->cr_uid, compcredp->cr_gid);
/* Map all ids in the credential structure. */
umap_mapids(mp, compcredp);
}
if ((flags & LAYERFS_MBYPASSDEBUG) && compcredp->cr_uid != 0)
printf("umap_lookup: component credit user now %d, group %d\n",
compcredp->cr_uid, compcredp->cr_gid);
ap->a_dvp = ldvp;
error = VCALL(ldvp, ap->a_desc->vdesc_offset, ap);
if (error == EJUSTRETURN && (cnf & ISLASTCN) &&
(dvp->v_mount->mnt_flag & MNT_RDONLY) &&
(cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME))
error = EROFS;
/* Do locking fixup as appropriate. See layer_lookup() for info */
if ((cnp->cn_flags & PDIRUNLOCK)) {
LAYERFS_UPPERUNLOCK(dvp, 0, i);
}
if (ldvp == vp) {
*ap->a_vpp = dvp;
VREF(dvp);
vrele(vp);
} else if (vp != NULL) {
error = layer_node_create(mp, vp, ap->a_vpp);
}
/*
* Free duplicate cred structure and restore old one.
*/
if ((flags & LAYERFS_MBYPASSDEBUG) && compcredp &&
compcredp->cr_uid != 0)
printf("umap_lookup: returning-component-user was %d\n",
compcredp->cr_uid);
if (savecompcredp != NOCRED) {
crfree(compcredp);
cnp->cn_cred = savecompcredp;
if ((flags & LAYERFS_MBYPASSDEBUG) && savecompcredp &&
savecompcredp->cr_uid != 0)
printf("umap_lookup: returning-component-user now %d\n",
savecompcredp->cr_uid);
}
return (error);
}
/*
@ -381,7 +416,7 @@ umap_getattr(v)
} */ *ap = v;
uid_t uid;
gid_t gid;
int error, tmpid, nentries, gnentries;
int error, tmpid, nentries, gnentries, flags;
u_long (*mapdata)[2];
u_long (*gmapdata)[2];
struct vnode **vp1p;
@ -392,6 +427,7 @@ umap_getattr(v)
/* Requires that arguments be restored. */
ap->a_vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0];
flags = MOUNTTOUMAPMOUNT(ap->a_vp->v_mount)->umapm_flags;
/*
* Umap needs to map the uid and gid returned by a stat
* into the proper values for this site. This involves
@ -407,7 +443,7 @@ umap_getattr(v)
uid = ap->a_vap->va_uid;
gid = ap->a_vap->va_gid;
if (umap_bug_bypass)
if ((flags & LAYERFS_MBYPASSDEBUG))
printf("umap_getattr: mapped uid = %d, mapped gid = %d\n", uid,
gid);
@ -424,7 +460,7 @@ umap_getattr(v)
if (tmpid != -1) {
ap->a_vap->va_uid = (uid_t) tmpid;
if (umap_bug_bypass)
if ((flags & LAYERFS_MBYPASSDEBUG))
printf("umap_getattr: original uid = %d\n", uid);
} else
ap->a_vap->va_uid = (uid_t) NOBODY;
@ -435,7 +471,7 @@ umap_getattr(v)
if (tmpid != -1) {
ap->a_vap->va_gid = (gid_t) tmpid;
if (umap_bug_bypass)
if ((flags & LAYERFS_MBYPASSDEBUG))
printf("umap_getattr: original gid = %d\n", gid);
} else
ap->a_vap->va_gid = (gid_t) NULLGROUP;
@ -443,108 +479,6 @@ umap_getattr(v)
return (0);
}
/*
* We must handle open to be able to catch MNT_NODEV and friends.
*/
int
umap_open(v)
void *v;
{
struct vop_open_args *ap = v;
struct vnode *vp = ap->a_vp;
enum vtype lower_type = UMAPVPTOLOWERVP(vp)->v_type;
if (((lower_type == VBLK) || (lower_type == VCHR)) &&
(vp->v_mount->mnt_flag & MNT_NODEV))
return ENXIO;
return umap_bypass(ap);
}
/*ARGSUSED*/
int
umap_inactive(v)
void *v;
{
struct vop_inactive_args /* {
struct vnode *a_vp;
struct proc *a_p;
} */ *ap = v;
/*
* Do nothing (and _don't_ bypass).
* Wait to vrele lowervp until reclaim,
* so that until then our umap_node is in the
* cache and reusable.
*
*/
VOP_UNLOCK(ap->a_vp, 0);
return (0);
}
int
umap_reclaim(v)
void *v;
{
struct vop_reclaim_args /* {
struct vnode *a_vp;
} */ *ap = v;
struct vnode *vp = ap->a_vp;
struct umap_node *xp = VTOUMAP(vp);
struct vnode *lowervp = xp->umap_lowervp;
/* After this assignment, this node will not be re-used. */
xp->umap_lowervp = NULL;
LIST_REMOVE(xp, umap_hash);
FREE(vp->v_data, M_TEMP);
vp->v_data = NULL;
vrele(lowervp);
return (0);
}
int
umap_strategy(v)
void *v;
{
struct vop_strategy_args /* {
struct buf *a_bp;
} */ *ap = v;
struct buf *bp = ap->a_bp;
int error;
struct vnode *savedvp;
savedvp = bp->b_vp;
bp->b_vp = UMAPVPTOLOWERVP(bp->b_vp);
error = VOP_STRATEGY(ap->a_bp);
bp->b_vp = savedvp;
return (error);
}
int
umap_bwrite(v)
void *v;
{
struct vop_bwrite_args /* {
struct buf *a_bp;
} */ *ap = v;
struct buf *bp = ap->a_bp;
int error;
struct vnode *savedvp;
savedvp = bp->b_vp;
bp->b_vp = UMAPVPTOLOWERVP(bp->b_vp);
error = VOP_BWRITE(ap->a_bp);
bp->b_vp = savedvp;
return (error);
}
int
umap_print(v)
void *v;
@ -570,7 +504,7 @@ umap_rename(v)
struct vnode *a_tvp;
struct componentname *a_tcnp;
} */ *ap = v;
int error;
int error, flags;
struct componentname *compnamep;
struct ucred *compcredp, *savecompcredp;
struct vnode *vp;
@ -582,13 +516,14 @@ umap_rename(v)
*/
vp = ap->a_fdvp;
flags = MOUNTTOUMAPMOUNT(vp->v_mount)->umapm_flags;
compnamep = ap->a_tcnp;
compcredp = compnamep->cn_cred;
savecompcredp = compcredp;
compcredp = compnamep->cn_cred = crdup(savecompcredp);
if (umap_bug_bypass && compcredp->cr_uid != 0)
if ((flags & LAYERFS_MBYPASSDEBUG) && compcredp->cr_uid != 0)
printf("umap_rename: rename component credit user was %d, group %d\n",
compcredp->cr_uid, compcredp->cr_gid);
@ -596,7 +531,7 @@ umap_rename(v)
umap_mapids(vp->v_mount, compcredp);
if (umap_bug_bypass && compcredp->cr_uid != 0)
if ((flags & LAYERFS_MBYPASSDEBUG) && compcredp->cr_uid != 0)
printf("umap_rename: rename component credit user now %d, group %d\n",
compcredp->cr_uid, compcredp->cr_gid);