Replace flag based union node locking with generic vnode lock, support
shared and nowait locks and protect un_uppervp and un_*sz with mutex. Mark file system MPSAFE.
This commit is contained in:
parent
dd89f66f56
commit
4e825063cf
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: union.h,v 1.21 2011/08/23 07:39:37 hannken Exp $ */
|
||||
/* $NetBSD: union.h,v 1.22 2011/11/21 18:29:22 hannken Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 1994 The Regents of the University of California.
|
||||
|
@ -105,28 +105,36 @@ struct union_mount {
|
|||
#define UN_FILEMODE (S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP|S_IROTH|S_IWOTH)
|
||||
|
||||
/*
|
||||
* A cache of vnode references
|
||||
* A cache of vnode references.
|
||||
* Lock requirements are:
|
||||
*
|
||||
* : stable
|
||||
* c unheadlock[hash]
|
||||
* l un_lock
|
||||
* m un_lock or vnode lock to read, un_lock and
|
||||
* exclusive vnode lock to write
|
||||
* v vnode lock to read, exclusive vnode lock to write
|
||||
*
|
||||
* Lock order is vnode then un_lock.
|
||||
*/
|
||||
struct union_node {
|
||||
LIST_ENTRY(union_node) un_cache; /* Hash chain */
|
||||
struct vnode *un_vnode; /* Back pointer */
|
||||
struct vnode *un_uppervp; /* overlaying object */
|
||||
struct vnode *un_lowervp; /* underlying object */
|
||||
struct vnode *un_dirvp; /* Parent dir of uppervp */
|
||||
struct vnode *un_pvp; /* Parent vnode */
|
||||
char *un_path; /* saved component name */
|
||||
int un_hash; /* saved un_path hash value */
|
||||
int un_openl; /* # of opens on lowervp */
|
||||
unsigned int un_flags;
|
||||
struct vnode **un_dircache; /* cached union stack */
|
||||
off_t un_uppersz; /* size of upper object */
|
||||
off_t un_lowersz; /* size of lower object */
|
||||
lwp_t *un_lwp; /* DIAGNOSTIC only */
|
||||
kmutex_t un_lock;
|
||||
LIST_ENTRY(union_node) un_cache; /* c: Hash chain */
|
||||
struct vnode *un_vnode; /* :: Back pointer */
|
||||
struct vnode *un_uppervp; /* m: overlaying object */
|
||||
struct vnode *un_lowervp; /* v: underlying object */
|
||||
struct vnode *un_dirvp; /* v: Parent dir of uppervp */
|
||||
struct vnode *un_pvp; /* v: Parent vnode */
|
||||
char *un_path; /* v: saved component name */
|
||||
int un_hash; /* v: saved un_path hash */
|
||||
int un_openl; /* v: # of opens on lowervp */
|
||||
unsigned int un_flags; /* v: node flags */
|
||||
unsigned int un_cflags; /* c: cache flags */
|
||||
struct vnode **un_dircache; /* v: cached union stack */
|
||||
off_t un_uppersz; /* l: size of upper object */
|
||||
off_t un_lowersz; /* l: size of lower object */
|
||||
};
|
||||
|
||||
#define UN_WANTED 0x01
|
||||
#define UN_LOCKED 0x02
|
||||
#define UN_ULOCK 0x04 /* Upper node is locked */
|
||||
#define UN_KLOCK 0x08 /* Keep upper node locked on vput */
|
||||
#define UN_CACHED 0x10 /* In union cache */
|
||||
|
||||
|
@ -162,6 +170,7 @@ int union_readdirhook(struct vnode **, struct file *, struct lwp *);
|
|||
#define LOWERVP(vp) (VTOUNION(vp)->un_lowervp)
|
||||
#define UPPERVP(vp) (VTOUNION(vp)->un_uppervp)
|
||||
#define OTHERVP(vp) (UPPERVP(vp) ? UPPERVP(vp) : LOWERVP(vp))
|
||||
#define LOCKVP(vp) (UPPERVP(vp) ? UPPERVP(vp) : (vp))
|
||||
|
||||
extern int (**union_vnodeop_p)(void *);
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: union_subr.c,v 1.52 2011/11/14 18:38:13 hannken Exp $ */
|
||||
/* $NetBSD: union_subr.c,v 1.53 2011/11/21 18:29:22 hannken Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 1994
|
||||
|
@ -72,7 +72,7 @@
|
|||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__KERNEL_RCSID(0, "$NetBSD: union_subr.c,v 1.52 2011/11/14 18:38:13 hannken Exp $");
|
||||
__KERNEL_RCSID(0, "$NetBSD: union_subr.c,v 1.53 2011/11/21 18:29:22 hannken Exp $");
|
||||
|
||||
#include <sys/param.h>
|
||||
#include <sys/systm.h>
|
||||
|
@ -93,6 +93,7 @@ __KERNEL_RCSID(0, "$NetBSD: union_subr.c,v 1.52 2011/11/14 18:38:13 hannken Exp
|
|||
#include <uvm/uvm_extern.h>
|
||||
|
||||
#include <fs/union/union.h>
|
||||
#include <miscfs/genfs/genfs.h>
|
||||
#include <miscfs/specfs/specdev.h>
|
||||
|
||||
/* must be power of two, otherwise change UNION_HASH() */
|
||||
|
@ -145,7 +146,9 @@ union_updatevp(struct union_node *un, struct vnode *uppervp,
|
|||
int nhash = UNION_HASH(uppervp, lowervp);
|
||||
int docache = (lowervp != NULLVP || uppervp != NULLVP);
|
||||
int lhash, uhash;
|
||||
bool un_unlock;
|
||||
|
||||
KASSERT(VOP_ISLOCKED(UNIONTOV(un)) == LK_EXCLUSIVE);
|
||||
/*
|
||||
* Ensure locking is ordered from lower to higher
|
||||
* to avoid deadlocks.
|
||||
|
@ -164,8 +167,8 @@ union_updatevp(struct union_node *un, struct vnode *uppervp,
|
|||
mutex_enter(&unheadlock[uhash]);
|
||||
|
||||
if (ohash != nhash || !docache) {
|
||||
if (un->un_flags & UN_CACHED) {
|
||||
un->un_flags &= ~UN_CACHED;
|
||||
if (un->un_cflags & UN_CACHED) {
|
||||
un->un_cflags &= ~UN_CACHED;
|
||||
LIST_REMOVE(un, un_cache);
|
||||
}
|
||||
}
|
||||
|
@ -186,15 +189,30 @@ union_updatevp(struct union_node *un, struct vnode *uppervp,
|
|||
}
|
||||
}
|
||||
un->un_lowervp = lowervp;
|
||||
mutex_enter(&un->un_lock);
|
||||
un->un_lowersz = VNOVAL;
|
||||
mutex_exit(&un->un_lock);
|
||||
}
|
||||
|
||||
if (un->un_uppervp != uppervp) {
|
||||
if (un->un_uppervp)
|
||||
if (un->un_uppervp) {
|
||||
un_unlock = false;
|
||||
vrele(un->un_uppervp);
|
||||
} else
|
||||
un_unlock = true;
|
||||
|
||||
mutex_enter(&un->un_lock);
|
||||
un->un_uppervp = uppervp;
|
||||
mutex_exit(&un->un_lock);
|
||||
if (un_unlock) {
|
||||
struct vop_unlock_args ap;
|
||||
|
||||
ap.a_vp = UNIONTOV(un);
|
||||
genfs_unlock(&ap);
|
||||
}
|
||||
mutex_enter(&un->un_lock);
|
||||
un->un_uppersz = VNOVAL;
|
||||
mutex_exit(&un->un_lock);
|
||||
/* Update union vnode interlock. */
|
||||
if (uppervp != NULL) {
|
||||
mutex_obj_hold(uppervp->v_interlock);
|
||||
|
@ -205,7 +223,7 @@ union_updatevp(struct union_node *un, struct vnode *uppervp,
|
|||
|
||||
if (docache && (ohash != nhash)) {
|
||||
LIST_INSERT_HEAD(&unhead[nhash], un, un_cache);
|
||||
un->un_flags |= UN_CACHED;
|
||||
un->un_cflags |= UN_CACHED;
|
||||
}
|
||||
|
||||
mutex_exit(&unheadlock[nhash]);
|
||||
|
@ -229,20 +247,23 @@ union_newupper(struct union_node *un, struct vnode *uppervp)
|
|||
* Keep track of size changes in the underlying vnodes.
|
||||
* If the size changes, then callback to the vm layer
|
||||
* giving priority to the upper layer size.
|
||||
*
|
||||
* Mutex un_lock hold on entry and released on return.
|
||||
*/
|
||||
void
|
||||
union_newsize(struct vnode *vp, off_t uppersz, off_t lowersz)
|
||||
{
|
||||
struct union_node *un;
|
||||
struct union_node *un = VTOUNION(vp);
|
||||
off_t sz;
|
||||
|
||||
KASSERT(mutex_owned(&un->un_lock));
|
||||
/* only interested in regular files */
|
||||
if (vp->v_type != VREG) {
|
||||
mutex_exit(&un->un_lock);
|
||||
uvm_vnp_setsize(vp, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
un = VTOUNION(vp);
|
||||
sz = VNOVAL;
|
||||
|
||||
if ((uppersz != VNOVAL) && (un->un_uppersz != uppersz)) {
|
||||
|
@ -256,6 +277,7 @@ union_newsize(struct vnode *vp, off_t uppersz, off_t lowersz)
|
|||
if (sz == VNOVAL)
|
||||
sz = un->un_lowersz;
|
||||
}
|
||||
mutex_exit(&un->un_lock);
|
||||
|
||||
if (sz != VNOVAL) {
|
||||
#ifdef UNION_DIAGNOSTIC
|
||||
|
@ -319,6 +341,9 @@ union_allocvp(
|
|||
int vflag, iflag;
|
||||
int try;
|
||||
|
||||
if (uppervp)
|
||||
KASSERT(VOP_ISLOCKED(uppervp) == LK_EXCLUSIVE);
|
||||
|
||||
if (uppervp == NULLVP && lowervp == NULLVP)
|
||||
panic("union: unidentifiable allocation");
|
||||
|
||||
|
@ -374,61 +399,33 @@ loop:
|
|||
(un->un_uppervp == uppervp ||
|
||||
un->un_uppervp == NULLVP) &&
|
||||
(UNIONTOV(un)->v_mount == mp)) {
|
||||
int lflag;
|
||||
|
||||
if (uppervp != NULL && (uppervp == dvp ||
|
||||
uppervp == un->un_uppervp))
|
||||
/* "." or already locked. */
|
||||
lflag = 0;
|
||||
else
|
||||
lflag = LK_EXCLUSIVE;
|
||||
vp = UNIONTOV(un);
|
||||
mutex_enter(vp->v_interlock);
|
||||
if (vget(vp, 0)) {
|
||||
mutex_exit(&unheadlock[hash]);
|
||||
mutex_exit(&unheadlock[hash]);
|
||||
if (vget(vp, lflag))
|
||||
goto loop;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
mutex_exit(&unheadlock[hash]);
|
||||
|
||||
if (un)
|
||||
break;
|
||||
|
||||
mutex_exit(&unheadlock[hash]);
|
||||
}
|
||||
|
||||
if (un) {
|
||||
/*
|
||||
* Obtain a lock on the union_node.
|
||||
* uppervp is locked, though un->un_uppervp
|
||||
* may not be. this doesn't break the locking
|
||||
* hierarchy since in the case that un->un_uppervp
|
||||
* is not yet locked it will be vrele'd and replaced
|
||||
* with uppervp.
|
||||
*/
|
||||
|
||||
if ((dvp != NULLVP) && (uppervp == dvp)) {
|
||||
/*
|
||||
* Access ``.'', so (un) will already
|
||||
* be locked. Since this process has
|
||||
* the lock on (uppervp) no other
|
||||
* process can hold the lock on (un).
|
||||
*/
|
||||
KASSERT((un->un_flags & UN_LOCKED) != 0);
|
||||
KASSERT(curlwp == NULL || un->un_lwp == NULL ||
|
||||
un->un_lwp == curlwp);
|
||||
} else {
|
||||
if (un->un_flags & UN_LOCKED) {
|
||||
vrele(UNIONTOV(un));
|
||||
un->un_flags |= UN_WANTED;
|
||||
(void) tsleep(&un->un_flags, PINOD,
|
||||
"unionalloc", 0);
|
||||
goto loop;
|
||||
}
|
||||
un->un_flags |= UN_LOCKED;
|
||||
|
||||
un->un_lwp = curlwp;
|
||||
}
|
||||
|
||||
/*
|
||||
* At this point, the union_node is locked,
|
||||
* un->un_uppervp may not be locked, and uppervp
|
||||
* is locked or nil.
|
||||
*/
|
||||
|
||||
KASSERT(VOP_ISLOCKED(UNIONTOV(un)) == LK_EXCLUSIVE);
|
||||
KASSERT(uppervp == NULL ||
|
||||
VOP_ISLOCKED(uppervp) == LK_EXCLUSIVE);
|
||||
/*
|
||||
* Save information about the upper layer.
|
||||
*/
|
||||
|
@ -438,10 +435,8 @@ loop:
|
|||
vrele(uppervp);
|
||||
}
|
||||
|
||||
if (un->un_uppervp) {
|
||||
un->un_flags |= UN_ULOCK;
|
||||
if (un->un_uppervp)
|
||||
un->un_flags &= ~UN_KLOCK;
|
||||
}
|
||||
|
||||
/*
|
||||
* Save information about the lower layer.
|
||||
|
@ -536,6 +531,7 @@ loop:
|
|||
spec_node_init(*vpp, rdev);
|
||||
|
||||
un = VTOUNION(*vpp);
|
||||
mutex_init(&un->un_lock, MUTEX_DEFAULT, IPL_NONE);
|
||||
un->un_vnode = *vpp;
|
||||
un->un_uppervp = uppervp;
|
||||
un->un_lowervp = lowervp;
|
||||
|
@ -544,15 +540,23 @@ loop:
|
|||
vref(undvp);
|
||||
un->un_dircache = 0;
|
||||
un->un_openl = 0;
|
||||
un->un_flags = UN_LOCKED;
|
||||
un->un_flags = 0;
|
||||
un->un_cflags = 0;
|
||||
|
||||
if (uppervp == NULL) {
|
||||
struct vop_lock_args ap;
|
||||
|
||||
ap.a_vp = UNIONTOV(un);
|
||||
ap.a_flags = LK_EXCLUSIVE;
|
||||
error = genfs_lock(&ap);
|
||||
KASSERT(error == 0);
|
||||
}
|
||||
|
||||
mutex_enter(&un->un_lock);
|
||||
un->un_uppersz = VNOVAL;
|
||||
un->un_lowersz = VNOVAL;
|
||||
union_newsize(*vpp, uppersz, lowersz);
|
||||
|
||||
if (un->un_uppervp)
|
||||
un->un_flags |= UN_ULOCK;
|
||||
un->un_lwp = curlwp;
|
||||
if (dvp && cnp && (lowervp != NULLVP)) {
|
||||
un->un_hash = cnp->cn_hash;
|
||||
un->un_path = malloc(cnp->cn_namelen+1, M_TEMP, M_WAITOK);
|
||||
|
@ -568,7 +572,7 @@ loop:
|
|||
|
||||
if (docache) {
|
||||
LIST_INSERT_HEAD(&unhead[hash], un, un_cache);
|
||||
un->un_flags |= UN_CACHED;
|
||||
un->un_cflags |= UN_CACHED;
|
||||
}
|
||||
|
||||
if (xlowervp)
|
||||
|
@ -590,8 +594,8 @@ union_freevp(struct vnode *vp)
|
|||
hash = UNION_HASH(un->un_uppervp, un->un_lowervp);
|
||||
|
||||
mutex_enter(&unheadlock[hash]);
|
||||
if (un->un_flags & UN_CACHED) {
|
||||
un->un_flags &= ~UN_CACHED;
|
||||
if (un->un_cflags & UN_CACHED) {
|
||||
un->un_cflags &= ~UN_CACHED;
|
||||
LIST_REMOVE(un, un_cache);
|
||||
}
|
||||
mutex_exit(&unheadlock[hash]);
|
||||
|
@ -606,6 +610,7 @@ union_freevp(struct vnode *vp)
|
|||
vrele(un->un_dirvp);
|
||||
if (un->un_path)
|
||||
free(un->un_path, M_TEMP);
|
||||
mutex_destroy(&un->un_lock);
|
||||
|
||||
free(vp->v_data, M_TEMP);
|
||||
vp->v_data = NULL;
|
||||
|
@ -691,9 +696,8 @@ union_copyup(struct union_node *un, int docopy, kauth_cred_t cred,
|
|||
if (error)
|
||||
return (error);
|
||||
|
||||
/* at this point, uppervp is locked */
|
||||
KASSERT(VOP_ISLOCKED(uvp) == LK_EXCLUSIVE);
|
||||
union_newupper(un, uvp);
|
||||
un->un_flags |= UN_ULOCK;
|
||||
|
||||
lvp = un->un_lowervp;
|
||||
|
||||
|
@ -954,9 +958,10 @@ union_vn_close(struct vnode *vp, int fmode, kauth_cred_t cred, struct lwp *l)
|
|||
void
|
||||
union_removed_upper(struct union_node *un)
|
||||
{
|
||||
struct vnode *vp = UNIONTOV(un);
|
||||
int hash;
|
||||
|
||||
KASSERT((un->un_flags & UN_ULOCK) == 0);
|
||||
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
|
||||
#if 1
|
||||
/*
|
||||
* We do not set the uppervp to NULLVP here, because lowervp
|
||||
|
@ -973,10 +978,11 @@ union_removed_upper(struct union_node *un)
|
|||
#endif
|
||||
|
||||
hash = UNION_HASH(un->un_uppervp, un->un_lowervp);
|
||||
VOP_UNLOCK(vp);
|
||||
|
||||
mutex_enter(&unheadlock[hash]);
|
||||
if (un->un_flags & UN_CACHED) {
|
||||
un->un_flags &= ~UN_CACHED;
|
||||
if (un->un_cflags & UN_CACHED) {
|
||||
un->un_cflags &= ~UN_CACHED;
|
||||
LIST_REMOVE(un, un_cache);
|
||||
}
|
||||
mutex_exit(&unheadlock[hash]);
|
||||
|
@ -1097,6 +1103,7 @@ union_diruncache(struct union_node *un)
|
|||
{
|
||||
struct vnode **vpp;
|
||||
|
||||
KASSERT(VOP_ISLOCKED(UNIONTOV(un)) == LK_EXCLUSIVE);
|
||||
if (un->un_dircache != 0) {
|
||||
for (vpp = un->un_dircache; *vpp != NULLVP; vpp++)
|
||||
vrele(*vpp);
|
||||
|
@ -1212,7 +1219,9 @@ union_readdirhook(struct vnode **vpp, struct file *fp, struct lwp *l)
|
|||
* If the directory is opaque,
|
||||
* then don't show lower entries
|
||||
*/
|
||||
vn_lock(vp, LK_SHARED | LK_RETRY);
|
||||
error = VOP_GETATTR(vp, &va, fp->f_cred);
|
||||
VOP_UNLOCK(vp);
|
||||
if (error || (va.va_flags & OPAQUE))
|
||||
return error;
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: union_vfsops.c,v 1.64 2011/08/28 08:27:57 hannken Exp $ */
|
||||
/* $NetBSD: union_vfsops.c,v 1.65 2011/11/21 18:29:22 hannken Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 1994 The Regents of the University of California.
|
||||
|
@ -77,7 +77,7 @@
|
|||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__KERNEL_RCSID(0, "$NetBSD: union_vfsops.c,v 1.64 2011/08/28 08:27:57 hannken Exp $");
|
||||
__KERNEL_RCSID(0, "$NetBSD: union_vfsops.c,v 1.65 2011/11/21 18:29:22 hannken Exp $");
|
||||
|
||||
#include <sys/param.h>
|
||||
#include <sys/systm.h>
|
||||
|
@ -208,6 +208,8 @@ union_mount(struct mount *mp, const char *path, void *data, size_t *data_len)
|
|||
goto bad;
|
||||
}
|
||||
|
||||
mp->mnt_iflag |= IMNT_MPSAFE;
|
||||
|
||||
/*
|
||||
* Unless the mount is readonly, ensure that the top layer
|
||||
* supports whiteout operations
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: union_vnops.c,v 1.48 2011/11/14 18:42:57 hannken Exp $ */
|
||||
/* $NetBSD: union_vnops.c,v 1.49 2011/11/21 18:29:22 hannken Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 1992, 1993, 1994, 1995
|
||||
|
@ -72,7 +72,7 @@
|
|||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__KERNEL_RCSID(0, "$NetBSD: union_vnops.c,v 1.48 2011/11/14 18:42:57 hannken Exp $");
|
||||
__KERNEL_RCSID(0, "$NetBSD: union_vnops.c,v 1.49 2011/11/21 18:29:22 hannken Exp $");
|
||||
|
||||
#include <sys/param.h>
|
||||
#include <sys/systm.h>
|
||||
|
@ -189,8 +189,6 @@ const struct vnodeopv_entry_desc union_vnodeop_entries[] = {
|
|||
const struct vnodeopv_desc union_vnodeop_opv_desc =
|
||||
{ &union_vnodeop_p, union_vnodeop_entries };
|
||||
|
||||
#define FIXUP(un) \
|
||||
KASSERT(((un)->un_flags & UN_ULOCK) == UN_ULOCK)
|
||||
#define NODE_IS_SPECIAL(vp) \
|
||||
((vp)->v_type == VBLK || (vp)->v_type == VCHR || \
|
||||
(vp)->v_type == VSOCK || (vp)->v_type == VFIFO)
|
||||
|
@ -309,41 +307,8 @@ start:
|
|||
* on and just return that vnode.
|
||||
*/
|
||||
if (upperdvp != NULLVP) {
|
||||
FIXUP(dun);
|
||||
/*
|
||||
* If we're doing `..' in the underlying filesystem,
|
||||
* we must drop our lock on the union node before
|
||||
* going up the tree in the lower file system--if we block
|
||||
* on the lowervp lock, and that's held by someone else
|
||||
* coming down the tree and who's waiting for our lock,
|
||||
* we would be hosed.
|
||||
*/
|
||||
if (cnp->cn_flags & ISDOTDOT) {
|
||||
/* retain lock on underlying VP */
|
||||
dun->un_flags |= UN_KLOCK;
|
||||
VOP_UNLOCK(dvp);
|
||||
}
|
||||
uerror = union_lookup1(um->um_uppervp, &upperdvp,
|
||||
&uppervp, cnp);
|
||||
|
||||
if (cnp->cn_flags & ISDOTDOT) {
|
||||
if (dun->un_uppervp == upperdvp) {
|
||||
/*
|
||||
* we got the underlying bugger back locked...
|
||||
* now take back the union node lock. Since we
|
||||
* hold the uppervp lock, we can diddle union
|
||||
* locking flags at will. :)
|
||||
*/
|
||||
dun->un_flags |= UN_ULOCK;
|
||||
}
|
||||
/*
|
||||
* if upperdvp got swapped out, it means we did
|
||||
* some mount point magic, and we do not have
|
||||
* dun->un_uppervp locked currently--so we get it
|
||||
* locked here (don't set the UN_ULOCK flag).
|
||||
*/
|
||||
vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
|
||||
}
|
||||
if (cnp->cn_consume != 0) {
|
||||
*ap->a_vpp = uppervp;
|
||||
return (uerror);
|
||||
|
@ -480,12 +445,10 @@ start:
|
|||
* to get the componentname right.
|
||||
*/
|
||||
if (upperdvp) {
|
||||
dun->un_flags &= ~UN_ULOCK;
|
||||
VOP_UNLOCK(upperdvp);
|
||||
uerror = union_mkshadow(um, upperdvp, cnp,
|
||||
&uppervp);
|
||||
vn_lock(upperdvp, LK_EXCLUSIVE | LK_RETRY);
|
||||
dun->un_flags |= UN_ULOCK;
|
||||
if (uerror == 0 && cnp->cn_nameiop != LOOKUP) {
|
||||
vput(uppervp);
|
||||
if (lowervp != NULLVP)
|
||||
|
@ -537,8 +500,6 @@ union_create(void *v)
|
|||
struct vnode *vp;
|
||||
struct mount *mp;
|
||||
|
||||
FIXUP(un);
|
||||
|
||||
vref(dvp);
|
||||
un->un_flags |= UN_KLOCK;
|
||||
mp = ap->a_dvp->v_mount;
|
||||
|
@ -572,7 +533,6 @@ union_whiteout(void *v)
|
|||
if (un->un_uppervp == NULLVP)
|
||||
return (EOPNOTSUPP);
|
||||
|
||||
FIXUP(un);
|
||||
return (VOP_WHITEOUT(un->un_uppervp, cnp, ap->a_flags));
|
||||
}
|
||||
|
||||
|
@ -594,8 +554,6 @@ union_mknod(void *v)
|
|||
struct vnode *vp;
|
||||
struct mount *mp;
|
||||
|
||||
FIXUP(un);
|
||||
|
||||
vref(dvp);
|
||||
un->un_flags |= UN_KLOCK;
|
||||
mp = ap->a_dvp->v_mount;
|
||||
|
@ -669,8 +627,6 @@ union_open(void *v)
|
|||
(ap->a_vp->v_mount->mnt_flag & MNT_NODEV))
|
||||
return ENXIO;
|
||||
|
||||
FIXUP(un);
|
||||
|
||||
error = VOP_OPEN(tvp, mode, cred);
|
||||
|
||||
return (error);
|
||||
|
@ -758,7 +714,6 @@ union_access(void *v)
|
|||
|
||||
|
||||
if ((vp = un->un_uppervp) != NULLVP) {
|
||||
FIXUP(un);
|
||||
ap->a_vp = vp;
|
||||
return (VCALL(vp, VOFFSET(vop_access), ap));
|
||||
}
|
||||
|
@ -818,12 +773,10 @@ union_getattr(void *v)
|
|||
|
||||
vp = un->un_uppervp;
|
||||
if (vp != NULLVP) {
|
||||
if (un->un_flags & UN_LOCKED)
|
||||
FIXUP(un);
|
||||
|
||||
error = VOP_GETATTR(vp, vap, ap->a_cred);
|
||||
if (error)
|
||||
return (error);
|
||||
mutex_enter(&un->un_lock);
|
||||
union_newsize(ap->a_vp, vap->va_size, VNOVAL);
|
||||
}
|
||||
|
||||
|
@ -845,6 +798,7 @@ union_getattr(void *v)
|
|||
VOP_UNLOCK(vp);
|
||||
if (error)
|
||||
return (error);
|
||||
mutex_enter(&un->un_lock);
|
||||
union_newsize(ap->a_vp, VNOVAL, vap->va_size);
|
||||
}
|
||||
|
||||
|
@ -929,10 +883,11 @@ union_setattr(void *v)
|
|||
* otherwise.
|
||||
*/
|
||||
if (un->un_uppervp != NULLVP) {
|
||||
FIXUP(un);
|
||||
error = VOP_SETATTR(un->un_uppervp, vap, ap->a_cred);
|
||||
if ((error == 0) && (vap->va_size != VNOVAL))
|
||||
if ((error == 0) && (vap->va_size != VNOVAL)) {
|
||||
mutex_enter(&un->un_lock);
|
||||
union_newsize(ap->a_vp, vap->va_size, VNOVAL);
|
||||
}
|
||||
} else {
|
||||
KASSERT(un->un_lowervp != NULLVP);
|
||||
if (NODE_IS_SPECIAL(un->un_lowervp)) {
|
||||
|
@ -964,8 +919,6 @@ union_read(void *v)
|
|||
|
||||
if (dolock)
|
||||
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
|
||||
else
|
||||
FIXUP(VTOUNION(ap->a_vp));
|
||||
error = VOP_READ(vp, ap->a_uio, ap->a_ioflag, ap->a_cred);
|
||||
if (dolock)
|
||||
VOP_UNLOCK(vp);
|
||||
|
@ -979,14 +932,21 @@ union_read(void *v)
|
|||
if (error == 0) {
|
||||
struct union_node *un = VTOUNION(ap->a_vp);
|
||||
off_t cur = ap->a_uio->uio_offset;
|
||||
off_t usz = VNOVAL, lsz = VNOVAL;
|
||||
|
||||
mutex_enter(&un->un_lock);
|
||||
if (vp == un->un_uppervp) {
|
||||
if (cur > un->un_uppersz)
|
||||
union_newsize(ap->a_vp, cur, VNOVAL);
|
||||
usz = cur;
|
||||
} else {
|
||||
if (cur > un->un_lowersz)
|
||||
union_newsize(ap->a_vp, VNOVAL, cur);
|
||||
lsz = cur;
|
||||
}
|
||||
|
||||
if (usz != VNOVAL || lsz != VNOVAL)
|
||||
union_newsize(ap->a_vp, usz, lsz);
|
||||
else
|
||||
mutex_exit(&un->un_lock);
|
||||
}
|
||||
|
||||
return (error);
|
||||
|
@ -1018,7 +978,6 @@ union_write(void *v)
|
|||
panic("union: missing upper layer in write");
|
||||
}
|
||||
|
||||
FIXUP(un);
|
||||
error = VOP_WRITE(vp, ap->a_uio, ap->a_ioflag, ap->a_cred);
|
||||
|
||||
/*
|
||||
|
@ -1028,8 +987,11 @@ union_write(void *v)
|
|||
if (error == 0) {
|
||||
off_t cur = ap->a_uio->uio_offset;
|
||||
|
||||
mutex_enter(&un->un_lock);
|
||||
if (cur > un->un_uppersz)
|
||||
union_newsize(ap->a_vp, cur, VNOVAL);
|
||||
else
|
||||
mutex_exit(&un->un_lock);
|
||||
}
|
||||
|
||||
return (error);
|
||||
|
@ -1131,8 +1093,6 @@ union_fsync(void *v)
|
|||
|
||||
if (dolock)
|
||||
vn_lock(targetvp, LK_EXCLUSIVE | LK_RETRY);
|
||||
else
|
||||
FIXUP(VTOUNION(ap->a_vp));
|
||||
error = VOP_FSYNC(targetvp, ap->a_cred, ap->a_flags,
|
||||
ap->a_offlo, ap->a_offhi);
|
||||
if (dolock)
|
||||
|
@ -1177,11 +1137,9 @@ union_remove(void *v)
|
|||
struct vnode *dvp = dun->un_uppervp;
|
||||
struct vnode *vp = un->un_uppervp;
|
||||
|
||||
FIXUP(dun);
|
||||
vref(dvp);
|
||||
dun->un_flags |= UN_KLOCK;
|
||||
vput(ap->a_dvp);
|
||||
FIXUP(un);
|
||||
vref(vp);
|
||||
un->un_flags |= UN_KLOCK;
|
||||
vput(ap->a_vp);
|
||||
|
@ -1192,7 +1150,6 @@ union_remove(void *v)
|
|||
if (!error)
|
||||
union_removed_upper(un);
|
||||
} else {
|
||||
FIXUP(dun);
|
||||
error = union_mkwhiteout(
|
||||
MOUNTTOUNIONMOUNT(UNIONTOV(dun)->v_mount),
|
||||
dun->un_uppervp, ap->a_cnp, un);
|
||||
|
@ -1226,19 +1183,18 @@ union_link(void *v)
|
|||
} else {
|
||||
struct union_node *un = VTOUNION(ap->a_vp);
|
||||
if (un->un_uppervp == NULLVP) {
|
||||
const bool droplock = (dun->un_uppervp == un->un_dirvp);
|
||||
|
||||
/*
|
||||
* Needs to be copied before we can link it.
|
||||
*/
|
||||
vn_lock(ap->a_vp, LK_EXCLUSIVE | LK_RETRY);
|
||||
if (dun->un_uppervp == un->un_dirvp) {
|
||||
dun->un_flags &= ~UN_ULOCK;
|
||||
if (droplock)
|
||||
VOP_UNLOCK(dun->un_uppervp);
|
||||
}
|
||||
error = union_copyup(un, 1, cnp->cn_cred, curlwp);
|
||||
if (dun->un_uppervp == un->un_dirvp) {
|
||||
if (droplock) {
|
||||
vn_lock(dun->un_uppervp,
|
||||
LK_EXCLUSIVE | LK_RETRY);
|
||||
dun->un_flags |= UN_ULOCK;
|
||||
/*
|
||||
* During copyup, we dropped the lock on the
|
||||
* dir and invalidated any saved namei lookup
|
||||
|
@ -1283,7 +1239,6 @@ union_link(void *v)
|
|||
return (error);
|
||||
}
|
||||
|
||||
FIXUP(dun);
|
||||
vref(dvp);
|
||||
dun->un_flags |= UN_KLOCK;
|
||||
vput(ap->a_dvp);
|
||||
|
@ -1408,7 +1363,6 @@ union_mkdir(void *v)
|
|||
int error;
|
||||
struct vnode *vp;
|
||||
|
||||
FIXUP(un);
|
||||
vref(dvp);
|
||||
un->un_flags |= UN_KLOCK;
|
||||
VOP_UNLOCK(ap->a_dvp);
|
||||
|
@ -1457,11 +1411,9 @@ union_rmdir(void *v)
|
|||
struct vnode *dvp = dun->un_uppervp;
|
||||
struct vnode *vp = un->un_uppervp;
|
||||
|
||||
FIXUP(dun);
|
||||
vref(dvp);
|
||||
dun->un_flags |= UN_KLOCK;
|
||||
vput(ap->a_dvp);
|
||||
FIXUP(un);
|
||||
vref(vp);
|
||||
un->un_flags |= UN_KLOCK;
|
||||
vput(ap->a_vp);
|
||||
|
@ -1472,7 +1424,6 @@ union_rmdir(void *v)
|
|||
if (!error)
|
||||
union_removed_upper(un);
|
||||
} else {
|
||||
FIXUP(dun);
|
||||
error = union_mkwhiteout(
|
||||
MOUNTTOUNIONMOUNT(UNIONTOV(dun)->v_mount),
|
||||
dun->un_uppervp, ap->a_cnp, un);
|
||||
|
@ -1500,7 +1451,6 @@ union_symlink(void *v)
|
|||
if (dvp != NULLVP) {
|
||||
int error;
|
||||
|
||||
FIXUP(un);
|
||||
vref(dvp);
|
||||
un->un_flags |= UN_KLOCK;
|
||||
vput(ap->a_dvp);
|
||||
|
@ -1538,7 +1488,6 @@ union_readdir(void *v)
|
|||
if (uvp == NULLVP)
|
||||
return (0);
|
||||
|
||||
FIXUP(un);
|
||||
ap->a_vp = uvp;
|
||||
return (VCALL(uvp, VOFFSET(vop_readdir), ap));
|
||||
}
|
||||
|
@ -1557,8 +1506,6 @@ union_readlink(void *v)
|
|||
|
||||
if (dolock)
|
||||
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
|
||||
else
|
||||
FIXUP(VTOUNION(ap->a_vp));
|
||||
ap->a_vp = vp;
|
||||
error = VCALL(vp, VOFFSET(vop_readlink), ap);
|
||||
if (dolock)
|
||||
|
@ -1613,7 +1560,7 @@ union_inactive(void *v)
|
|||
un->un_dircache = 0;
|
||||
}
|
||||
|
||||
*ap->a_recycle = ((un->un_flags & UN_CACHED) == 0);
|
||||
*ap->a_recycle = ((un->un_cflags & UN_CACHED) == 0);
|
||||
VOP_UNLOCK(vp);
|
||||
|
||||
return (0);
|
||||
|
@ -1638,54 +1585,33 @@ union_lock(void *v)
|
|||
struct vnode *a_vp;
|
||||
int a_flags;
|
||||
} */ *ap = v;
|
||||
struct vnode *vp = ap->a_vp;
|
||||
int flags = ap->a_flags;
|
||||
struct vnode *vp;
|
||||
struct union_node *un;
|
||||
int error;
|
||||
|
||||
/* XXX unionfs can't handle shared locks yet */
|
||||
if ((flags & LK_SHARED) != 0) {
|
||||
flags = (flags & ~LK_SHARED) | LK_EXCLUSIVE;
|
||||
un = VTOUNION(ap->a_vp);
|
||||
mutex_enter(&un->un_lock);
|
||||
for (;;) {
|
||||
vp = LOCKVP(ap->a_vp);
|
||||
mutex_exit(&un->un_lock);
|
||||
if (vp == ap->a_vp)
|
||||
error = genfs_lock(ap);
|
||||
else
|
||||
error = VOP_LOCK(vp, ap->a_flags);
|
||||
if (error != 0)
|
||||
return error;
|
||||
mutex_enter(&un->un_lock);
|
||||
if (vp == LOCKVP(ap->a_vp))
|
||||
break;
|
||||
if (vp == ap->a_vp)
|
||||
genfs_unlock(ap);
|
||||
else
|
||||
VOP_UNLOCK(vp);
|
||||
}
|
||||
KASSERT((un->un_flags & UN_KLOCK) == 0);
|
||||
mutex_exit(&un->un_lock);
|
||||
|
||||
start:
|
||||
un = VTOUNION(vp);
|
||||
|
||||
if (un->un_uppervp != NULLVP) {
|
||||
if (((un->un_flags & UN_ULOCK) == 0) &&
|
||||
(vp->v_usecount != 0)) {
|
||||
/*
|
||||
* We MUST always use the order of: take upper
|
||||
* vp lock, manipulate union node flags, drop
|
||||
* upper vp lock. This code must not be an
|
||||
* exception.
|
||||
*/
|
||||
error = vn_lock(un->un_uppervp, flags);
|
||||
if (error)
|
||||
return (error);
|
||||
un->un_flags |= UN_ULOCK;
|
||||
}
|
||||
#ifdef DIAGNOSTIC
|
||||
if (un->un_flags & UN_KLOCK) {
|
||||
vprint("union: dangling klock", vp);
|
||||
panic("union: dangling upper lock (%p)", vp);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
/* XXX ignores LK_NOWAIT */
|
||||
if (un->un_flags & UN_LOCKED) {
|
||||
KASSERT(curlwp == NULL || un->un_lwp == NULL ||
|
||||
un->un_lwp != curlwp);
|
||||
un->un_flags |= UN_WANTED;
|
||||
tsleep(&un->un_flags, PINOD, "unionlk2", 0);
|
||||
goto start;
|
||||
}
|
||||
|
||||
un->un_lwp = curlwp;
|
||||
|
||||
un->un_flags |= UN_LOCKED;
|
||||
return (0);
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1706,27 +1632,22 @@ union_unlock(void *v)
|
|||
struct vnode *a_vp;
|
||||
int a_flags;
|
||||
} */ *ap = v;
|
||||
struct union_node *un = VTOUNION(ap->a_vp);
|
||||
struct vnode *vp;
|
||||
struct union_node *un;
|
||||
|
||||
KASSERT((un->un_flags & UN_LOCKED) != 0);
|
||||
KASSERT(curlwp == NULL || un->un_lwp == NULL ||
|
||||
un->un_lwp == curlwp);
|
||||
|
||||
un->un_flags &= ~UN_LOCKED;
|
||||
|
||||
if ((un->un_flags & (UN_ULOCK|UN_KLOCK)) == UN_ULOCK)
|
||||
VOP_UNLOCK(un->un_uppervp);
|
||||
|
||||
un->un_flags &= ~(UN_ULOCK|UN_KLOCK);
|
||||
|
||||
if (un->un_flags & UN_WANTED) {
|
||||
un->un_flags &= ~UN_WANTED;
|
||||
wakeup( &un->un_flags);
|
||||
un = VTOUNION(ap->a_vp);
|
||||
vp = LOCKVP(ap->a_vp);
|
||||
if ((un->un_flags & UN_KLOCK) == UN_KLOCK) {
|
||||
KASSERT(vp != ap->a_vp);
|
||||
un->un_flags &= ~UN_KLOCK;
|
||||
return 0;
|
||||
}
|
||||
if (vp == ap->a_vp)
|
||||
genfs_unlock(ap);
|
||||
else
|
||||
VOP_UNLOCK(vp);
|
||||
|
||||
un->un_lwp = NULL;
|
||||
|
||||
return (0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
|
@ -1745,8 +1666,6 @@ union_bmap(void *v)
|
|||
|
||||
if (dolock)
|
||||
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
|
||||
else
|
||||
FIXUP(VTOUNION(ap->a_vp));
|
||||
ap->a_vp = vp;
|
||||
error = VCALL(vp, VOFFSET(vop_bmap), ap);
|
||||
if (dolock)
|
||||
|
@ -1784,8 +1703,18 @@ union_islocked(void *v)
|
|||
struct vop_islocked_args /* {
|
||||
struct vnode *a_vp;
|
||||
} */ *ap = v;
|
||||
struct vnode *vp;
|
||||
struct union_node *un;
|
||||
|
||||
return ((VTOUNION(ap->a_vp)->un_flags & UN_LOCKED) ? LK_EXCLUSIVE : 0);
|
||||
un = VTOUNION(ap->a_vp);
|
||||
mutex_enter(&un->un_lock);
|
||||
vp = LOCKVP(ap->a_vp);
|
||||
mutex_exit(&un->un_lock);
|
||||
|
||||
if (vp == ap->a_vp)
|
||||
return genfs_islocked(ap);
|
||||
else
|
||||
return VOP_ISLOCKED(vp);
|
||||
}
|
||||
|
||||
int
|
||||
|
@ -1802,8 +1731,6 @@ union_pathconf(void *v)
|
|||
|
||||
if (dolock)
|
||||
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
|
||||
else
|
||||
FIXUP(VTOUNION(ap->a_vp));
|
||||
ap->a_vp = vp;
|
||||
error = VCALL(vp, VOFFSET(vop_pathconf), ap);
|
||||
if (dolock)
|
||||
|
|
Loading…
Reference in New Issue