Take the vnode lock before the vnode is marked VI_CHANGING and fed

to vclean().  Prevents a deadlock with two null mounts on the same
physical mount where one thread tries to vclean() a layer node and
another thread tries to vget() a layer node pointing to the same
physical node.

Fixes PR kern/50375 layerfs (nullfs) locking problem leading to livelock
This commit is contained in:
hannken 2015-11-12 11:35:42 +00:00
parent 7017cd79aa
commit fa45966e6d
1 changed files with 27 additions and 19 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: vfs_vnode.c,v 1.45 2015/07/12 08:11:28 hannken Exp $ */
/* $NetBSD: vfs_vnode.c,v 1.46 2015/11/12 11:35:42 hannken Exp $ */
/*-
* Copyright (c) 1997-2011 The NetBSD Foundation, Inc.
@ -116,7 +116,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: vfs_vnode.c,v 1.45 2015/07/12 08:11:28 hannken Exp $");
__KERNEL_RCSID(0, "$NetBSD: vfs_vnode.c,v 1.46 2015/11/12 11:35:42 hannken Exp $");
#define _VFS_VNODE_PRIVATE
@ -325,15 +325,17 @@ try_nextlist:
KASSERT((vp->v_iflag & VI_CLEAN) == 0);
KASSERT(vp->v_freelisthd == listhd);
if (!mutex_tryenter(vp->v_interlock))
if (vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT) != 0)
continue;
if ((vp->v_iflag & VI_XLOCK) != 0) {
mutex_exit(vp->v_interlock);
if (!mutex_tryenter(vp->v_interlock)) {
VOP_UNLOCK(vp);
continue;
}
KASSERT((vp->v_iflag & VI_XLOCK) == 0);
mp = vp->v_mount;
if (fstrans_start_nowait(mp, FSTRANS_SHARED) != 0) {
mutex_exit(vp->v_interlock);
VOP_UNLOCK(vp);
continue;
}
break;
@ -643,6 +645,11 @@ vrelel(vnode_t *vp, int flags)
* Note that VOP_INACTIVE() will drop the vnode lock.
*/
VOP_INACTIVE(vp, &recycle);
if (recycle) {
/* vclean() below will drop the lock. */
if (vn_lock(vp, LK_EXCLUSIVE) != 0)
recycle = false;
}
mutex_enter(vp->v_interlock);
if (!recycle) {
if (vtryrele(vp)) {
@ -867,6 +874,7 @@ holdrelel(vnode_t *vp)
/*
* Disassociate the underlying file system from a vnode.
*
* Must be called with vnode locked and will return unlocked.
* Must be called with the interlock held, and will return with it held.
*/
static void
@ -876,26 +884,18 @@ vclean(vnode_t *vp)
bool recycle, active;
int error;
KASSERT((vp->v_vflag & VV_LOCKSWORK) == 0 ||
VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
KASSERT(mutex_owned(vp->v_interlock));
KASSERT((vp->v_iflag & VI_MARKER) == 0);
KASSERT((vp->v_iflag & (VI_XLOCK | VI_CLEAN)) == 0);
KASSERT(vp->v_usecount != 0);
/* If already clean, nothing to do. */
if ((vp->v_iflag & VI_CLEAN) != 0) {
return;
}
active = (vp->v_usecount > 1);
mutex_exit(vp->v_interlock);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
/*
* Prevent the vnode from being recycled or brought into use
* while we clean it out.
*/
mutex_enter(vp->v_interlock);
KASSERT((vp->v_iflag & (VI_XLOCK | VI_CLEAN)) == 0);
vp->v_iflag |= VI_XLOCK;
if (vp->v_iflag & VI_EXECMAP) {
atomic_add_int(&uvmexp.execpages, -vp->v_uobj.uo_npages);
@ -972,23 +972,26 @@ bool
vrecycle(vnode_t *vp)
{
if (vn_lock(vp, LK_EXCLUSIVE) != 0)
return false;
mutex_enter(vp->v_interlock);
KASSERT((vp->v_iflag & VI_MARKER) == 0);
if (vp->v_usecount != 1) {
mutex_exit(vp->v_interlock);
VOP_UNLOCK(vp);
return false;
}
if ((vp->v_iflag & VI_CHANGING) != 0)
vwait(vp, VI_CHANGING);
if (vp->v_usecount != 1) {
mutex_exit(vp->v_interlock);
VOP_UNLOCK(vp);
return false;
} else if ((vp->v_iflag & VI_CLEAN) != 0) {
mutex_exit(vp->v_interlock);
return true;
}
KASSERT((vp->v_iflag & VI_CLEAN) == 0);
vp->v_iflag |= VI_CHANGING;
vclean(vp);
vrelel(vp, VRELEL_CHANGING_SET);
@ -1036,6 +1039,11 @@ void
vgone(vnode_t *vp)
{
if (vn_lock(vp, LK_EXCLUSIVE) != 0) {
KASSERT((vp->v_iflag & VI_CLEAN) != 0);
vrele(vp);
}
mutex_enter(vp->v_interlock);
if ((vp->v_iflag & VI_CHANGING) != 0)
vwait(vp, VI_CHANGING);