/* $NetBSD: genfs_vnops.c,v 1.10 1998/08/13 09:59:53 kleink Exp $ */ /* * Copyright (c) 1982, 1986, 1989, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include "opt_nfsserver.h" #include #include #include #include #include #include #include #include #include #include #include #ifdef NFSSERVER #include #include #include #include #include #endif int genfs_poll(v) void *v; { struct vop_poll_args /* { struct vnode *a_vp; int a_events; struct proc *a_p; } */ *ap = v; return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)); } int genfs_fsync(v) void *v; { struct vop_fsync_args /* { struct vnode *a_vp; struct ucred *a_cred; int a_flags; struct proc *a_p; } */ *ap = v; register struct vnode *vp = ap->a_vp; struct timespec ts; vflushbuf(vp, (ap->a_flags & FSYNC_WAIT) != 0); if ((ap->a_flags & FSYNC_DATAONLY) != 0) { return (0); } else { TIMEVAL_TO_TIMESPEC(&time, &ts); return (VOP_UPDATE(ap->a_vp, &ts, &ts, (ap->a_flags & FSYNC_WAIT) != 0)); } } int genfs_seek(v) void *v; { struct vop_seek_args /* { struct vnode *a_vp; off_t a_oldoff; off_t a_newoff; struct ucred *a_ucred; } */ *ap = v; if (ap->a_newoff < 0) return (EINVAL); return (0); } int genfs_abortop(v) void *v; { struct vop_abortop_args /* { struct vnode *a_dvp; struct componentname *a_cnp; } */ *ap = v; if ((ap->a_cnp->cn_flags & (HASBUF | SAVESTART)) == HASBUF) FREE(ap->a_cnp->cn_pnbuf, M_NAMEI); return (0); } /*ARGSUSED*/ int genfs_badop(v) void *v; { panic("genfs: bad op"); } /*ARGSUSED*/ int genfs_nullop(v) void *v; { return (0); } /*ARGSUSED*/ int genfs_einval(v) void *v; { return (EINVAL); } /*ARGSUSED*/ int genfs_eopnotsupp(v) void *v; { return (EOPNOTSUPP); } /*ARGSUSED*/ int genfs_ebadf(v) void *v; { return (EBADF); } /* ARGSUSED */ int genfs_enoioctl(v) void *v; { return (ENOTTY); } /* * Eliminate all activity associated with the requested vnode * and with all vnodes aliased to the requested vnode. */ int genfs_revoke(v) void *v; { struct vop_revoke_args /* { struct vnode *a_vp; int a_flags; } */ *ap = v; struct vnode *vp, *vq; struct proc *p = curproc; /* XXX */ #ifdef DIAGNOSTIC if ((ap->a_flags & REVOKEALL) == 0) panic("genfs_revoke: not revokeall"); #endif vp = ap->a_vp; simple_lock(&vp->v_interlock); if (vp->v_flag & VALIASED) { /* * If a vgone (or vclean) is already in progress, * wait until it is done and return. */ if (vp->v_flag & VXLOCK) { vp->v_flag |= VXWANT; simple_unlock(&vp->v_interlock); tsleep((caddr_t)vp, PINOD, "vop_revokeall", 0); return (0); } /* * Ensure that vp will not be vgone'd while we * are eliminating its aliases. */ vp->v_flag |= VXLOCK; simple_unlock(&vp->v_interlock); while (vp->v_flag & VALIASED) { simple_lock(&spechash_slock); for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) { if (vq->v_rdev != vp->v_rdev || vq->v_type != vp->v_type || vp == vq) continue; simple_unlock(&spechash_slock); vgone(vq); break; } if (vq == NULLVP) simple_unlock(&spechash_slock); } /* * Remove the lock so that vgone below will * really eliminate the vnode after which time * vgone will awaken any sleepers. */ simple_lock(&vp->v_interlock); vp->v_flag &= ~VXLOCK; } vgonel(vp, p); return (0); } /* * Stubs to use when there is no locking to be done on the underlying object. * A minimal shared lock is necessary to ensure that the underlying object * is not revoked while an operation is in progress. So, an active shared * count is maintained in an auxillary vnode lock structure. */ int genfs_nolock(v) void *v; { struct vop_lock_args /* { struct vnode *a_vp; int a_flags; struct proc *a_p; } */ *ap = v; #ifdef notyet /* * This code cannot be used until all the non-locking filesystems * (notably NFS) are converted to properly lock and release nodes. * Also, certain vnode operations change the locking state within * the operation (create, mknod, remove, link, rename, mkdir, rmdir, * and symlink). Ideally these operations should not change the * lock state, but should be changed to let the caller of the * function unlock them. Otherwise all intermediate vnode layers * (such as union, umapfs, etc) must catch these functions to do * the necessary locking at their layer. Note that the inactive * and lookup operations also change their lock state, but this * cannot be avoided, so these two operations will always need * to be handled in intermediate layers. */ struct vnode *vp = ap->a_vp; int vnflags, flags = ap->a_flags; if (vp->v_vnlock == NULL) { if ((flags & LK_TYPE_MASK) == LK_DRAIN) return (0); MALLOC(vp->v_vnlock, struct lock *, sizeof(struct lock), M_VNODE, M_WAITOK); lockinit(vp->v_vnlock, PVFS, "vnlock", 0, 0); } switch (flags & LK_TYPE_MASK) { case LK_DRAIN: vnflags = LK_DRAIN; break; case LK_EXCLUSIVE: case LK_SHARED: vnflags = LK_SHARED; break; case LK_UPGRADE: case LK_EXCLUPGRADE: case LK_DOWNGRADE: return (0); case LK_RELEASE: default: panic("vop_nolock: bad operation %d", flags & LK_TYPE_MASK); } if (flags & LK_INTERLOCK) vnflags |= LK_INTERLOCK; return(lockmgr(vp->v_vnlock, vnflags, &vp->v_interlock)); #else /* for now */ /* * Since we are not using the lock manager, we must clear * the interlock here. */ if (ap->a_flags & LK_INTERLOCK) simple_unlock(&ap->a_vp->v_interlock); return (0); #endif } /* * Decrement the active use count. */ int genfs_nounlock(v) void *v; { struct vop_unlock_args /* { struct vnode *a_vp; int a_flags; struct proc *a_p; } */ *ap = v; struct vnode *vp = ap->a_vp; if (vp->v_vnlock == NULL) return (0); return (lockmgr(vp->v_vnlock, LK_RELEASE, NULL)); } /* * Return whether or not the node is in use. */ int genfs_noislocked(v) void *v; { struct vop_islocked_args /* { struct vnode *a_vp; } */ *ap = v; struct vnode *vp = ap->a_vp; if (vp->v_vnlock == NULL) return (0); return (lockstatus(vp->v_vnlock)); } /* * Local lease check for NFS servers. Just set up args and let * nqsrv_getlease() do the rest. If NFSSERVER is not in the kernel, * this is a null operation. */ int genfs_lease_check(v) void *v; { #ifdef NFSSERVER struct vop_lease_args /* { struct vnode *a_vp; struct proc *a_p; struct ucred *a_cred; int a_flag; } */ *ap = v; u_int32_t duration = 0; int cache; u_quad_t frev; (void) nqsrv_getlease(ap->a_vp, &duration, ND_CHECK | ap->a_flag, NQLOCALSLP, ap->a_p, (struct mbuf *)0, &cache, &frev, ap->a_cred); return (0); #else return (0); #endif /* NFSSERVER */ }