Housecleaning time:

Fix and document naming convention for vnode variables (always use
lvp/lvpp and uvp/uvpp instead of a hash of cvp, vpp, dvpp, pvp, pvpp).

Delete old stale #if 0'ed code at the end.

Change error path code in getcwd_getcache() slightly (merge common
cleanup code; shouldn't affect behavior any).
This commit is contained in:
sommerfeld 1999-07-04 20:16:57 +00:00
parent 25b8d43788
commit 140b1ee075

View File

@ -1,4 +1,4 @@
/* $NetBSD: vfs_getcwd.c,v 1.8 1999/06/21 05:11:09 sommerfeld Exp $ */ /* $NetBSD: vfs_getcwd.c,v 1.9 1999/07/04 20:16:57 sommerfeld Exp $ */
/*- /*-
* Copyright (c) 1999 The NetBSD Foundation, Inc. * Copyright (c) 1999 The NetBSD Foundation, Inc.
@ -67,6 +67,19 @@ int vn_isunder __P((struct vnode *, struct vnode *, struct proc *));
#define DIRENT_MINSIZE (sizeof(struct dirent) - (MAXNAMLEN+1) + 4) #define DIRENT_MINSIZE (sizeof(struct dirent) - (MAXNAMLEN+1) + 4)
/*
* Vnode variable naming conventions in this file:
*
* rvp: the current root we're aiming towards.
* lvp, *lvpp: the "lower" vnode
* uvp, *uvpp: the "upper" vnode.
*
* Since all the vnodes we're dealing with are directories, and the
* lookups are going *up* in the filesystem rather than *down*, the
* usual "pvp" (parent) or "dvp" (directory) naming conventions are
* too confusing.
*/
/* /*
* XXX Will infinite loop in certain cases if a directory read reliably * XXX Will infinite loop in certain cases if a directory read reliably
* returns EINVAL on last block. * returns EINVAL on last block.
@ -74,19 +87,25 @@ int vn_isunder __P((struct vnode *, struct vnode *, struct proc *));
*/ */
/* /*
* Find parent vnode of cvp, return in *pvpp * XXX Untested vs. mount -o union; probably does the wrong thing.
* Scan it looking for name of directory entry pointing at cvp. */
/*
* Find parent vnode of *lvpp, return in *uvpp
*
* If we care about the name, scan it looking for name of directory
* entry pointing at lvp.
* *
* Place the name in the buffer which starts at bufp, immediately * Place the name in the buffer which starts at bufp, immediately
* before *bpp, and move bpp backwards to point at the start of it. * before *bpp, and move bpp backwards to point at the start of it.
* *
* On entry, *cvpp is a locked vnode reference; on exit, it is vput and NULL'ed * On entry, *lvpp is a locked vnode reference; on exit, it is vput and NULL'ed
* On exit, *pvpp is either NULL or is a locked vnode reference. * On exit, *uvpp is either NULL or is a locked vnode reference.
*/ */
static int static int
getcwd_scandir(cvpp, pvpp, bpp, bufp, p) getcwd_scandir(lvpp, uvpp, bpp, bufp, p)
struct vnode **cvpp; struct vnode **lvpp;
struct vnode **pvpp; struct vnode **uvpp;
char **bpp; char **bpp;
char *bufp; char *bufp;
struct proc *p; struct proc *p;
@ -101,8 +120,8 @@ getcwd_scandir(cvpp, pvpp, bpp, bufp, p)
int dirbuflen; int dirbuflen;
ino_t fileno; ino_t fileno;
struct vattr va; struct vattr va;
struct vnode *pvp = NULL; struct vnode *uvp = NULL;
struct vnode *cvp = *cvpp; struct vnode *lvp = *lvpp;
struct componentname cn; struct componentname cn;
int len, reclen; int len, reclen;
tries = 0; tries = 0;
@ -112,11 +131,11 @@ getcwd_scandir(cvpp, pvpp, bpp, bufp, p)
* current directory is still locked. * current directory is still locked.
*/ */
if (bufp != NULL) { if (bufp != NULL) {
error = VOP_GETATTR(cvp, &va, p->p_ucred, p); error = VOP_GETATTR(lvp, &va, p->p_ucred, p);
if (error) { if (error) {
vput(cvp); vput(lvp);
*cvpp = NULL; *lvpp = NULL;
*pvpp = NULL; *uvpp = NULL;
return error; return error;
} }
} }
@ -136,22 +155,22 @@ getcwd_scandir(cvpp, pvpp, bpp, bufp, p)
cn.cn_consume = 0; cn.cn_consume = 0;
/* /*
* At this point, cvp is locked and will be unlocked by the lookup. * At this point, lvp is locked and will be unlocked by the lookup.
* On successful return, *pvpp will be locked * On successful return, *uvpp will be locked
*/ */
error = VOP_LOOKUP(cvp, pvpp, &cn); error = VOP_LOOKUP(lvp, uvpp, &cn);
if (error) { if (error) {
vput(cvp); vput(lvp);
*cvpp = NULL; *lvpp = NULL;
*pvpp = NULL; *uvpp = NULL;
return error; return error;
} }
pvp = *pvpp; uvp = *uvpp;
/* If we don't care about the pathname, we're done */ /* If we don't care about the pathname, we're done */
if (bufp == NULL) { if (bufp == NULL) {
vrele(cvp); vrele(lvp);
*cvpp = NULL; *lvpp = NULL;
return 0; return 0;
} }
@ -181,7 +200,7 @@ unionread:
eofflag = 0; eofflag = 0;
error = VOP_READDIR(pvp, &uio, p->p_ucred, &eofflag, 0, 0); error = VOP_READDIR(uvp, &uio, p->p_ucred, &eofflag, 0, 0);
off = uio.uio_offset; off = uio.uio_offset;
@ -243,17 +262,17 @@ unionread:
* Deal with mount -o union, which unions only the * Deal with mount -o union, which unions only the
* root directory of the mount. * root directory of the mount.
*/ */
if ((pvp->v_flag & VROOT) && if ((uvp->v_flag & VROOT) &&
(pvp->v_mount->mnt_flag & MNT_UNION)) { (uvp->v_mount->mnt_flag & MNT_UNION)) {
struct vnode *tvp = pvp; struct vnode *tvp = uvp;
pvp = pvp->v_mount->mnt_vnodecovered; uvp = uvp->v_mount->mnt_vnodecovered;
vput(tvp); vput(tvp);
VREF(pvp); VREF(uvp);
*pvpp = pvp; *uvpp = uvp;
error = vn_lock(pvp, LK_EXCLUSIVE | LK_RETRY); error = vn_lock(uvp, LK_EXCLUSIVE | LK_RETRY);
if (error != 0) { if (error != 0) {
vrele(pvp); vrele(uvp);
*pvpp = pvp = NULL; *uvpp = uvp = NULL;
goto out; goto out;
} }
goto unionread; goto unionread;
@ -262,8 +281,8 @@ unionread:
error = ENOENT; error = ENOENT;
out: out:
vrele(cvp); vrele(lvp);
*cvpp = NULL; *lvpp = NULL;
free(dirbuf, M_TEMP); free(dirbuf, M_TEMP);
return error; return error;
} }
@ -274,69 +293,75 @@ out:
* *
* XXX vget failure path is untested. * XXX vget failure path is untested.
* *
* On entry, *vpp is a locked vnode reference. * On entry, *lvpp is a locked vnode reference.
* On exit, one of the following is the case: * On exit, one of the following is the case:
* 0) Both *vpp and *vpp are NULL and failure is returned. * 0) Both *lvpp and *uvpp are NULL and failure is returned.
* 1) *dvpp is NULL, *vpp remains locked and -1 is returned (cache miss) * 1) *uvpp is NULL, *lvpp remains locked and -1 is returned (cache miss)
* 2) *dvpp is a locked vnode reference, *vpp is vput and NULL'ed * 2) *uvpp is a locked vnode reference, *lvpp is vput and NULL'ed
* and 0 is returned (cache hit) * and 0 is returned (cache hit)
*/ */
static int static int
getcwd_getcache(vpp, dvpp, bpp, bufp) getcwd_getcache(lvpp, uvpp, bpp, bufp)
struct vnode **vpp, **dvpp; struct vnode **lvpp, **uvpp;
char **bpp; char **bpp;
char *bufp; char *bufp;
{ {
struct vnode *cvp, *pvp = NULL; struct vnode *lvp, *uvp = NULL;
int error; int error;
int vpid; int vpid;
cvp = *vpp; lvp = *lvpp;
/* /*
* This returns 0 on a cache hit, -1 on a clean cache miss, * This returns 0 on a cache hit, -1 on a clean cache miss,
* or an errno on other failure. * or an errno on other failure.
*/ */
error = cache_revlookup(cvp, dvpp, bpp, bufp); error = cache_revlookup(lvp, uvpp, bpp, bufp);
if (error) { if (error) {
if (error != -1) { if (error != -1) {
vput(cvp); vput(lvp);
*vpp = NULL; *lvpp = NULL;
*dvpp = NULL; *uvpp = NULL;
} }
return error; return error;
} }
pvp = *dvpp; uvp = *uvpp;
vpid = pvp->v_id; vpid = uvp->v_id;
/* /*
* Since we're going up, we have to release the current lock * Since we're going up, we have to release the current lock
* before we take the parent lock. * before we take the parent lock.
*/ */
VOP_UNLOCK(cvp, 0); VOP_UNLOCK(lvp, 0);
error = vget(pvp, LK_EXCLUSIVE | LK_RETRY); error = vget(uvp, LK_EXCLUSIVE | LK_RETRY);
if (error != 0) if (error != 0)
*dvpp = NULL; *uvpp = NULL;
/* /*
* Check that vnode capability didn't change while we were waiting * Verify that vget succeeded, and check that vnode capability
* for the lock. * didn't change while we were waiting for the lock.
*/ */
if (error || (vpid != pvp->v_id)) { if (error || (vpid != uvp->v_id)) {
/* /*
* oops, it did. do this the hard way. * Oops, we missed. If the vget failed, or the
* capability changed, try to get our lock back; if
* that works, tell caller to try things the hard way,
* otherwise give up.
*/ */
if (!error) vput(pvp); if (!error) vput(uvp);
error = vn_lock(cvp, LK_EXCLUSIVE | LK_RETRY); *uvpp = NULL;
*dvpp = NULL;
return -1; error = vn_lock(lvp, LK_EXCLUSIVE | LK_RETRY);
}
vrele(cvp);
*vpp = NULL;
return 0; if (!error)
return -1;
}
vrele(lvp);
*lvpp = NULL;
return error;
} }
/* /*
@ -345,8 +370,8 @@ getcwd_getcache(vpp, dvpp, bpp, bufp)
#define GETCWD_CHECK_ACCESS 0x0001 #define GETCWD_CHECK_ACCESS 0x0001
static int getcwd_common (dvp, rvp, bpp, bufp, limit, flags, p) static int getcwd_common (lvp, rvp, bpp, bufp, limit, flags, p)
struct vnode *dvp; struct vnode *lvp;
struct vnode *rvp; struct vnode *rvp;
char **bpp; char **bpp;
char *bufp; char *bufp;
@ -355,7 +380,7 @@ static int getcwd_common (dvp, rvp, bpp, bufp, limit, flags, p)
struct proc *p; struct proc *p;
{ {
struct cwdinfo *cwdi = p->p_cwdi; struct cwdinfo *cwdi = p->p_cwdi;
struct vnode *pvp = NULL; struct vnode *uvp = NULL;
char *bp = NULL; char *bp = NULL;
int error; int error;
@ -366,19 +391,19 @@ static int getcwd_common (dvp, rvp, bpp, bufp, limit, flags, p)
} }
VREF(rvp); VREF(rvp);
VREF(dvp); VREF(lvp);
/* /*
* Error handling invariant: * Error handling invariant:
* Before a `goto out': * Before a `goto out':
* dvp is either NULL, or locked and held. * lvp is either NULL, or locked and held.
* pvp is either NULL, or locked and held. * uvp is either NULL, or locked and held.
*/ */
error = vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY); error = vn_lock(lvp, LK_EXCLUSIVE | LK_RETRY);
if (error) { if (error) {
vrele(dvp); vrele(lvp);
dvp = NULL; lvp = NULL;
goto out; goto out;
} }
if (bufp) if (bufp)
@ -389,13 +414,13 @@ static int getcwd_common (dvp, rvp, bpp, bufp, limit, flags, p)
* - getdirentries or lookup fails * - getdirentries or lookup fails
* - we run out of space in the buffer. * - we run out of space in the buffer.
*/ */
if (dvp == rvp) { if (lvp == rvp) {
if (bp) if (bp)
*(--bp) = '/'; *(--bp) = '/';
goto out; goto out;
} }
do { do {
if (dvp->v_type != VDIR) { if (lvp->v_type != VDIR) {
error = ENOTDIR; error = ENOTDIR;
goto out; goto out;
} }
@ -405,7 +430,7 @@ static int getcwd_common (dvp, rvp, bpp, bufp, limit, flags, p)
* whether or not caller cares. * whether or not caller cares.
*/ */
if (flags & GETCWD_CHECK_ACCESS) { if (flags & GETCWD_CHECK_ACCESS) {
error = VOP_ACCESS(dvp, VEXEC|VREAD, p->p_ucred, p); error = VOP_ACCESS(lvp, VEXEC|VREAD, p->p_ucred, p);
if (error) if (error)
goto out; goto out;
} }
@ -413,27 +438,27 @@ static int getcwd_common (dvp, rvp, bpp, bufp, limit, flags, p)
/* /*
* step up if we're a covered vnode.. * step up if we're a covered vnode..
*/ */
while (dvp->v_flag & VROOT) { while (lvp->v_flag & VROOT) {
struct vnode *tvp; struct vnode *tvp;
if (dvp == rvp) if (lvp == rvp)
goto out; goto out;
tvp = dvp; tvp = lvp;
dvp = dvp->v_mount->mnt_vnodecovered; lvp = lvp->v_mount->mnt_vnodecovered;
vput(tvp); vput(tvp);
/* /*
* hodie natus est radici frater * hodie natus est radici frater
*/ */
if (dvp == NULL) { if (lvp == NULL) {
error = ENOENT; error = ENOENT;
goto out; goto out;
} }
VREF(dvp); VREF(lvp);
error = vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY); error = vn_lock(lvp, LK_EXCLUSIVE | LK_RETRY);
if (error != 0) { if (error != 0) {
vrele(dvp); vrele(lvp);
dvp = NULL; lvp = NULL;
goto out; goto out;
} }
} }
@ -441,32 +466,32 @@ static int getcwd_common (dvp, rvp, bpp, bufp, limit, flags, p)
* Look in the name cache; if that fails, look in the * Look in the name cache; if that fails, look in the
* directory.. * directory..
*/ */
error = getcwd_getcache(&dvp, &pvp, &bp, bufp); error = getcwd_getcache(&lvp, &uvp, &bp, bufp);
if (error == -1) if (error == -1)
error = getcwd_scandir(&dvp, &pvp, &bp, bufp, p); error = getcwd_scandir(&lvp, &uvp, &bp, bufp, p);
if (error) if (error)
goto out; goto out;
#if DIAGNOSTIC #if DIAGNOSTIC
if (dvp != NULL) if (lvp != NULL)
panic("getcwd: oops, forgot to null dvp"); panic("getcwd: oops, forgot to null lvp");
if (bufp && (bp <= bufp)) { if (bufp && (bp <= bufp)) {
panic("getcwd: oops, went back too far"); panic("getcwd: oops, went back too far");
} }
#endif #endif
if (bp) if (bp)
*(--bp) = '/'; *(--bp) = '/';
dvp = pvp; lvp = uvp;
pvp = NULL; uvp = NULL;
limit--; limit--;
} while ((dvp != rvp) && (limit > 0)); } while ((lvp != rvp) && (limit > 0));
out: out:
if (bpp) if (bpp)
*bpp = bp; *bpp = bp;
if (pvp) if (uvp)
vput(pvp); vput(uvp);
if (dvp) if (lvp)
vput(dvp); vput(lvp);
vrele(rvp); vrele(rvp);
return error; return error;
} }
@ -478,14 +503,14 @@ out:
* Intended to be used in chroot, chdir, fchdir, etc., to ensure that * Intended to be used in chroot, chdir, fchdir, etc., to ensure that
* chroot() actually means something. * chroot() actually means something.
*/ */
int vn_isunder(dvp, rvp, p) int vn_isunder(lvp, rvp, p)
struct vnode *dvp; struct vnode *lvp;
struct vnode *rvp; struct vnode *rvp;
struct proc *p; struct proc *p;
{ {
int error; int error;
error = getcwd_common (dvp, rvp, NULL, NULL, MAXPATHLEN/2, 0, p); error = getcwd_common (lvp, rvp, NULL, NULL, MAXPATHLEN/2, 0, p);
if (!error) if (!error)
return 1; return 1;
@ -515,6 +540,13 @@ int proc_isunder (p1, p2)
return vn_isunder(r1, r2, p2); return vn_isunder(r1, r2, p2);
} }
/*
* Find pathname of process's current directory.
*
* Use vfs vnode-to-name reverse cache; if that fails, fall back
* to reading directory contents.
*/
int sys___getcwd(p, v, retval) int sys___getcwd(p, v, retval)
struct proc *p; struct proc *p;
void *v; void *v;
@ -566,146 +598,3 @@ out:
/*
* Find pathname of process's current directory.
*
* Use vfs vnode-to-name reverse cache; if that fails, fall back
* to reading directory contents.
*/
/*
* XXX Untested vs. mount -o union; probably does the wrong thing.
* XXX Untested vs chroot
* XXX most error paths probably work, but many locking-related ones
* aren't tested well.
*/
#if 0
int
sys___getcwd(p, v, retval)
struct proc *p;
void *v;
register_t *retval;
{
register struct sys___getcwd_args /* {
syscallarg(char *) bufp;
syscallarg(size_t) length;
} */ *uap = v;
struct cwdinfo *cwdi = p->p_cwdi;
struct vnode *cvp = NULL, *pvp = NULL, *rootvp = NULL;
int error;
char *path;
char *bp, *bend;
int len = SCARG(uap, length);
int lenused;
if ((len < 2) || (len > MAXPATHLEN*4))
return ERANGE;
path = (char *)malloc(len, M_TEMP, M_WAITOK);
if (!path)
return ENOMEM;
bp = &path[len];
bend = bp;
*(--bp) = '\0';
rootvp = cwdi->cwdi_rdir;
if (rootvp == NULL)
rootvp = rootvnode;
cvp = cwdi->cwdi_cdir;
VREF(rootvp);
VREF(cvp);
/*
* Error handling invariant:
* Before a `goto out':
* cvp is either NULL, or locked and held.
* pvp is either NULL, or locked and held.
*/
error = vn_lock(cvp, LK_EXCLUSIVE | LK_RETRY);
if (error) {
vrele(cvp);
cvp = NULL;
goto out;
}
/*
* this loop will terminate when one of the following happens:
* - we hit the root
* - getdirentries or lookup fails
* - we run out of space in the buffer.
*/
if (cvp == rootvp) {
*(--bp) = '/';
goto hitroot;
}
do {
/*
* so, are we even allowed to look at this directory?
*/
error = VOP_ACCESS(cvp, VEXEC|VREAD, p->p_ucred, p);
if (error)
goto out;
/*
* step up if we're a covered vnode..
*/
while (cvp->v_flag & VROOT) {
struct vnode *tvp;
if (cvp == rootvp)
goto hitroot;
tvp = cvp;
cvp = cvp->v_mount->mnt_vnodecovered;
vput(tvp);
VREF(cvp);
error = vn_lock(cvp, LK_EXCLUSIVE | LK_RETRY);
if (error != 0) {
vrele(cvp);
cvp = NULL;
goto out;
}
}
/*
* Look in the name cache; if that fails, look in the directory..
*/
error = getcwd_getcache(&cvp, &pvp, &bp, path);
if (error == -1)
error = getcwd_scandir(cvp, &pvp, &bp, path, p);
if (error)
goto out;
if (bp <= path) {
error = ERANGE;
goto out;
}
*(--bp) = '/';
vput(cvp);
cvp = pvp;
pvp = NULL;
} while (cvp != rootvp);
hitroot:
lenused = bend - bp;
*retval = lenused;
/* put the result into user buffer */
error = copyout(bp, SCARG(uap, bufp), lenused);
out:
if (pvp)
vput(pvp);
if (cvp)
vput(cvp);
vrele(rootvp);
free(path, M_TEMP);
return error;
}
#endif