NetBSD/sys/kern/vfs_vnops.c

977 lines
22 KiB
C
Raw Normal View History

/* $NetBSD: vfs_vnops.c,v 1.133 2007/02/16 17:24:00 hannken Exp $ */
1994-05-17 08:21:49 +04:00
/*
1994-06-08 15:28:29 +04:00
* Copyright (c) 1982, 1986, 1989, 1993
* The Regents of the University of California. All rights reserved.
1994-05-17 08:21:49 +04:00
* (c) UNIX System Laboratories, Inc.
* All or some portions of this file are derived from material licensed
* to the University of California by American Telephone and Telegraph
* Co. or Unix System Laboratories, Inc. and are reproduced herein with
* the permission of UNIX System Laboratories, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
1994-05-17 08:21:49 +04:00
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
1998-03-01 05:20:01 +03:00
* @(#)vfs_vnops.c 8.14 (Berkeley) 6/15/95
1994-05-17 08:21:49 +04:00
*/
2001-11-12 18:25:01 +03:00
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: vfs_vnops.c,v 1.133 2007/02/16 17:24:00 hannken Exp $");
2001-11-12 18:25:01 +03:00
1998-02-19 03:53:46 +03:00
#include "fs_union.h"
#include "veriexec.h"
1994-05-17 08:21:49 +04:00
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/file.h>
#include <sys/stat.h>
#include <sys/buf.h>
#include <sys/proc.h>
#include <sys/malloc.h>
1994-05-17 08:21:49 +04:00
#include <sys/mount.h>
#include <sys/namei.h>
#include <sys/vnode.h>
#include <sys/ioctl.h>
#include <sys/tty.h>
1996-09-07 16:40:22 +04:00
#include <sys/poll.h>
2006-05-15 01:15:11 +04:00
#include <sys/kauth.h>
2006-11-02 01:45:14 +03:00
#include <sys/syslog.h>
1994-05-17 08:21:49 +04:00
#include <miscfs/specfs/specdev.h>
#include <uvm/uvm_extern.h>
2005-11-30 01:52:02 +03:00
#include <uvm/uvm_readahead.h>
1998-03-01 05:20:01 +03:00
#ifdef UNION
#include <fs/union/union.h>
1998-03-01 05:20:01 +03:00
#endif
#if defined(LKM) || defined(UNION)
2005-12-11 15:16:03 +03:00
int (*vn_union_readdir_hook) (struct vnode **, struct file *, struct lwp *);
#endif
#if NVERIEXEC > 0
#include <sys/verified_exec.h>
#endif /* NVERIEXEC > 0 */
1998-03-01 05:20:01 +03:00
static int vn_read(struct file *fp, off_t *offset, struct uio *uio,
2006-05-15 01:15:11 +04:00
kauth_cred_t cred, int flags);
static int vn_write(struct file *fp, off_t *offset, struct uio *uio,
2006-05-15 01:15:11 +04:00
kauth_cred_t cred, int flags);
2005-12-11 15:16:03 +03:00
static int vn_closefile(struct file *fp, struct lwp *l);
static int vn_poll(struct file *fp, int events, struct lwp *l);
static int vn_fcntl(struct file *fp, u_int com, void *data, struct lwp *l);
static int vn_statfile(struct file *fp, struct stat *sb, struct lwp *l);
static int vn_ioctl(struct file *fp, u_long com, void *data, struct lwp *l);
const struct fileops vnops = {
vn_read, vn_write, vn_ioctl, vn_fcntl, vn_poll,
vn_statfile, vn_closefile, vn_kqfilter
};
1994-05-17 08:21:49 +04:00
/*
* Common code for vnode open operations.
* Check permissions, and call the VOP_OPEN or VOP_CREATE routine.
*/
1996-02-04 05:17:43 +03:00
int
2005-06-06 03:47:48 +04:00
vn_open(struct nameidata *ndp, int fmode, int cmode)
1994-05-17 08:21:49 +04:00
{
struct vnode *vp;
2006-05-14 09:30:31 +04:00
struct mount *mp = NULL; /* XXX: GCC */
2005-12-11 15:16:03 +03:00
struct lwp *l = ndp->ni_cnd.cn_lwp;
kauth_cred_t cred = l->l_cred;
1995-05-23 10:11:29 +04:00
struct vattr va;
1994-05-17 08:21:49 +04:00
int error;
pathname_t pn = NULL;
restart:
1994-05-17 08:21:49 +04:00
if (fmode & O_CREAT) {
1994-06-08 15:28:29 +04:00
ndp->ni_cnd.cn_nameiop = CREATE;
ndp->ni_cnd.cn_flags = LOCKPARENT | LOCKLEAF;
if ((fmode & O_EXCL) == 0 &&
2002-12-07 01:44:49 +03:00
((fmode & O_NOFOLLOW) == 0))
1994-06-08 15:28:29 +04:00
ndp->ni_cnd.cn_flags |= FOLLOW;
} else {
ndp->ni_cnd.cn_nameiop = LOOKUP;
ndp->ni_cnd.cn_flags = LOCKLEAF;
if ((fmode & O_NOFOLLOW) == 0)
ndp->ni_cnd.cn_flags |= FOLLOW;
}
#if NVERIEXEC > 0
error = pathname_get(ndp->ni_dirp, ndp->ni_segflg, &pn);
if (error)
goto bad2;
ndp->ni_dirp = pathname_path(pn);
ndp->ni_segflg = UIO_SYSSPACE;
#endif /* NVERIEXEC > 0 */
error = namei(ndp);
if (error)
goto bad2;
vp = ndp->ni_vp;
#if NVERIEXEC > 0
error = veriexec_openchk(l, ndp->ni_vp, ndp->ni_dirp, fmode);
if (error)
goto bad;
#endif /* NVERIEXEC > 0 */
if (fmode & O_CREAT) {
if (ndp->ni_vp == NULL) {
1995-05-23 10:11:29 +04:00
VATTR_NULL(&va);
va.va_type = VREG;
va.va_mode = cmode;
1998-03-01 05:20:01 +03:00
if (fmode & O_EXCL)
va.va_vaflags |= VA_EXCLUSIVE;
if (vn_start_write(ndp->ni_dvp, &mp, V_NOWAIT) != 0) {
VOP_ABORTOP(ndp->ni_dvp, &ndp->ni_cnd);
vput(ndp->ni_dvp);
if ((error = vn_start_write(NULL, &mp,
V_WAIT | V_SLEEPONLY | V_PCATCH)) != 0)
goto bad2;
goto restart;
}
2005-12-11 15:16:03 +03:00
VOP_LEASE(ndp->ni_dvp, l, cred, LEASE_WRITE);
1996-02-04 05:17:43 +03:00
error = VOP_CREATE(ndp->ni_dvp, &ndp->ni_vp,
&ndp->ni_cnd, &va);
vn_finished_write(mp, 0);
1996-02-04 05:17:43 +03:00
if (error)
goto bad2;
1994-05-17 08:21:49 +04:00
fmode &= ~O_TRUNC;
vp = ndp->ni_vp;
} else {
1994-06-08 15:28:29 +04:00
VOP_ABORTOP(ndp->ni_dvp, &ndp->ni_cnd);
1994-05-17 08:21:49 +04:00
if (ndp->ni_dvp == ndp->ni_vp)
vrele(ndp->ni_dvp);
else
vput(ndp->ni_dvp);
ndp->ni_dvp = NULL;
vp = ndp->ni_vp;
if (fmode & O_EXCL) {
error = EEXIST;
goto bad;
}
fmode &= ~O_CREAT;
}
} else {
vp = ndp->ni_vp;
}
if (vp->v_type == VSOCK) {
error = EOPNOTSUPP;
goto bad;
}
if (ndp->ni_vp->v_type == VLNK) {
error = EFTYPE;
goto bad;
}
1994-05-17 08:21:49 +04:00
if ((fmode & O_CREAT) == 0) {
if (fmode & FREAD) {
2005-12-11 15:16:03 +03:00
if ((error = VOP_ACCESS(vp, VREAD, cred, l)) != 0)
1994-05-17 08:21:49 +04:00
goto bad;
}
1994-05-17 08:21:49 +04:00
if (fmode & (FWRITE | O_TRUNC)) {
if (vp->v_type == VDIR) {
error = EISDIR;
goto bad;
}
1996-02-04 05:17:43 +03:00
if ((error = vn_writechk(vp)) != 0 ||
2005-12-11 15:16:03 +03:00
(error = VOP_ACCESS(vp, VWRITE, cred, l)) != 0)
1994-05-17 08:21:49 +04:00
goto bad;
}
}
1994-05-17 08:21:49 +04:00
if (fmode & O_TRUNC) {
1998-03-01 05:20:01 +03:00
VOP_UNLOCK(vp, 0); /* XXX */
if ((error = vn_start_write(vp, &mp, V_WAIT | V_PCATCH)) != 0) {
vrele(vp);
goto bad2;
}
2005-12-11 15:16:03 +03:00
VOP_LEASE(vp, l, cred, LEASE_WRITE);
1998-03-01 05:20:01 +03:00
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); /* XXX */
1995-05-23 10:11:29 +04:00
VATTR_NULL(&va);
va.va_size = 0;
2005-12-11 15:16:03 +03:00
error = VOP_SETATTR(vp, &va, cred, l);
vn_finished_write(mp, 0);
if (error != 0)
1994-05-17 08:21:49 +04:00
goto bad;
}
2005-12-11 15:16:03 +03:00
if ((error = VOP_OPEN(vp, fmode, cred, l)) != 0)
1994-05-17 08:21:49 +04:00
goto bad;
if (vp->v_type == VREG &&
uvn_attach(vp, fmode & FWRITE ? VM_PROT_WRITE : 0) == NULL) {
error = EIO;
goto bad;
}
1994-05-17 08:21:49 +04:00
if (fmode & FWRITE)
vp->v_writecount++;
1994-05-17 08:21:49 +04:00
bad:
if (error)
vput(vp);
bad2:
pathname_put(pn);
1994-05-17 08:21:49 +04:00
return (error);
}
/*
* Check for write permissions on the specified vnode.
1998-03-01 05:20:01 +03:00
* Prototype text segments cannot be written.
1994-05-17 08:21:49 +04:00
*/
1996-02-04 05:17:43 +03:00
int
2005-06-06 03:47:48 +04:00
vn_writechk(struct vnode *vp)
1994-05-17 08:21:49 +04:00
{
/*
* If the vnode is in use as a process's text,
* we can't allow writing.
1994-05-17 08:21:49 +04:00
*/
if (vp->v_flag & VTEXT)
return (ETXTBSY);
1994-05-17 08:21:49 +04:00
return (0);
}
/*
* Mark a vnode as having executable mappings.
*/
void
2005-06-06 03:47:48 +04:00
vn_markexec(struct vnode *vp)
{
if ((vp->v_flag & VEXECMAP) == 0) {
uvmexp.filepages -= vp->v_uobj.uo_npages;
uvmexp.execpages += vp->v_uobj.uo_npages;
}
vp->v_flag |= VEXECMAP;
}
/*
* Mark a vnode as being the text of a process.
* Fail if the vnode is currently writable.
*/
int
2005-06-06 03:47:48 +04:00
vn_marktext(struct vnode *vp)
{
if (vp->v_writecount != 0) {
KASSERT((vp->v_flag & VTEXT) == 0);
return (ETXTBSY);
}
vp->v_flag |= VTEXT;
vn_markexec(vp);
return (0);
}
1994-05-17 08:21:49 +04:00
/*
* Vnode close call
*
* Note: takes an unlocked vnode, while VOP_CLOSE takes a locked node.
1994-05-17 08:21:49 +04:00
*/
1996-02-04 05:17:43 +03:00
int
2006-05-15 01:15:11 +04:00
vn_close(struct vnode *vp, int flags, kauth_cred_t cred, struct lwp *l)
1994-05-17 08:21:49 +04:00
{
int error;
if (flags & FWRITE)
vp->v_writecount--;
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2005-12-11 15:16:03 +03:00
error = VOP_CLOSE(vp, flags, cred, l);
vput(vp);
1994-05-17 08:21:49 +04:00
return (error);
}
/*
* Package up an I/O request on a vnode into a uio and do it.
*/
1996-02-04 05:17:43 +03:00
int
2005-06-06 03:47:48 +04:00
vn_rdwr(enum uio_rw rw, struct vnode *vp, caddr_t base, int len, off_t offset,
2006-05-15 01:15:11 +04:00
enum uio_seg segflg, int ioflg, kauth_cred_t cred, size_t *aresid,
2005-12-11 15:16:03 +03:00
struct lwp *l)
1994-05-17 08:21:49 +04:00
{
struct uio auio;
struct iovec aiov;
struct mount *mp = NULL;
1994-05-17 08:21:49 +04:00
int error;
if ((ioflg & IO_NODELOCKED) == 0) {
if (rw == UIO_READ) {
vn_lock(vp, LK_SHARED | LK_RETRY);
} else /* UIO_WRITE */ {
if (vp->v_type != VCHR &&
(error = vn_start_write(vp, &mp, V_WAIT | V_PCATCH))
!= 0)
return (error);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
}
}
1994-05-17 08:21:49 +04:00
auio.uio_iov = &aiov;
auio.uio_iovcnt = 1;
aiov.iov_base = base;
aiov.iov_len = len;
auio.uio_resid = len;
auio.uio_offset = offset;
auio.uio_rw = rw;
if (segflg == UIO_SYSSPACE) {
UIO_SETUP_SYSSPACE(&auio);
} else {
auio.uio_vmspace = l->l_proc->p_vmspace;
}
1994-06-08 15:28:29 +04:00
if (rw == UIO_READ) {
1994-05-17 08:21:49 +04:00
error = VOP_READ(vp, &auio, ioflg, cred);
1994-06-08 15:28:29 +04:00
} else {
1994-05-17 08:21:49 +04:00
error = VOP_WRITE(vp, &auio, ioflg, cred);
1994-06-08 15:28:29 +04:00
}
1994-05-17 08:21:49 +04:00
if (aresid)
*aresid = auio.uio_resid;
else
if (auio.uio_resid && error == 0)
error = EIO;
if ((ioflg & IO_NODELOCKED) == 0) {
if (rw == UIO_WRITE)
vn_finished_write(mp, 0);
1998-03-01 05:20:01 +03:00
VOP_UNLOCK(vp, 0);
}
1994-05-17 08:21:49 +04:00
return (error);
}
int
2005-06-06 03:47:48 +04:00
vn_readdir(struct file *fp, char *bf, int segflg, u_int count, int *done,
2005-12-11 15:16:03 +03:00
struct lwp *l, off_t **cookies, int *ncookies)
{
struct vnode *vp = (struct vnode *)fp->f_data;
struct iovec aiov;
struct uio auio;
int error, eofflag;
/* Limit the size on any kernel buffers used by VOP_READDIR */
count = min(MAXBSIZE, count);
unionread:
if (vp->v_type != VDIR)
return (EINVAL);
aiov.iov_base = bf;
aiov.iov_len = count;
auio.uio_iov = &aiov;
auio.uio_iovcnt = 1;
auio.uio_rw = UIO_READ;
if (segflg == UIO_SYSSPACE) {
UIO_SETUP_SYSSPACE(&auio);
} else {
KASSERT(l == curlwp);
auio.uio_vmspace = l->l_proc->p_vmspace;
}
auio.uio_resid = count;
vn_lock(vp, LK_SHARED | LK_RETRY);
auio.uio_offset = fp->f_offset;
1998-03-01 05:20:01 +03:00
error = VOP_READDIR(vp, &auio, fp->f_cred, &eofflag, cookies,
ncookies);
fp->f_offset = auio.uio_offset;
1998-03-01 05:20:01 +03:00
VOP_UNLOCK(vp, 0);
if (error)
return (error);
#if defined(UNION) || defined(LKM)
if (count == auio.uio_resid && vn_union_readdir_hook) {
struct vnode *ovp = vp;
2005-12-11 15:16:03 +03:00
error = (*vn_union_readdir_hook)(&vp, fp, l);
if (error)
return (error);
if (vp != ovp)
goto unionread;
}
#endif /* UNION || LKM */
if (count == auio.uio_resid && (vp->v_flag & VROOT) &&
(vp->v_mount->mnt_flag & MNT_UNION)) {
struct vnode *tvp = vp;
vp = vp->v_mount->mnt_vnodecovered;
VREF(vp);
fp->f_data = vp;
fp->f_offset = 0;
vrele(tvp);
goto unionread;
}
*done = count - auio.uio_resid;
return error;
}
1994-05-17 08:21:49 +04:00
/*
* File table vnode read routine.
*/
static int
2006-05-15 01:15:11 +04:00
vn_read(struct file *fp, off_t *offset, struct uio *uio, kauth_cred_t cred,
2005-06-06 03:47:48 +04:00
int flags)
1994-05-17 08:21:49 +04:00
{
1998-03-01 05:20:01 +03:00
struct vnode *vp = (struct vnode *)fp->f_data;
2005-11-30 01:52:02 +03:00
int count, error, ioflag;
struct lwp *l = curlwp;
1994-05-17 08:21:49 +04:00
VOP_LEASE(vp, l, cred, LEASE_READ);
2005-11-30 01:52:02 +03:00
ioflag = IO_ADV_ENCODE(fp->f_advice);
if (fp->f_flag & FNONBLOCK)
ioflag |= IO_NDELAY;
if ((fp->f_flag & (FFSYNC | FRSYNC)) == (FFSYNC | FRSYNC))
ioflag |= IO_SYNC;
if (fp->f_flag & FALTIO)
ioflag |= IO_ALTSEMANTICS;
if (fp->f_flag & FDIRECT)
ioflag |= IO_DIRECT;
vn_lock(vp, LK_SHARED | LK_RETRY);
uio->uio_offset = *offset;
1994-05-17 08:21:49 +04:00
count = uio->uio_resid;
error = VOP_READ(vp, uio, ioflag, cred);
if (flags & FOF_UPDATE_OFFSET)
*offset += count - uio->uio_resid;
1998-03-01 05:20:01 +03:00
VOP_UNLOCK(vp, 0);
1994-05-17 08:21:49 +04:00
return (error);
}
/*
* File table vnode write routine.
*/
static int
2006-05-15 01:15:11 +04:00
vn_write(struct file *fp, off_t *offset, struct uio *uio, kauth_cred_t cred,
2005-06-06 03:47:48 +04:00
int flags)
1994-05-17 08:21:49 +04:00
{
1998-03-01 05:20:01 +03:00
struct vnode *vp = (struct vnode *)fp->f_data;
struct mount *mp;
1994-12-14 00:52:35 +03:00
int count, error, ioflag = IO_UNIT;
struct lwp *l = curlwp;
1994-05-17 08:21:49 +04:00
if (vp->v_type == VREG && (fp->f_flag & O_APPEND))
ioflag |= IO_APPEND;
if (fp->f_flag & FNONBLOCK)
ioflag |= IO_NDELAY;
if (fp->f_flag & FFSYNC ||
(vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS)))
ioflag |= IO_SYNC;
else if (fp->f_flag & FDSYNC)
ioflag |= IO_DSYNC;
if (fp->f_flag & FALTIO)
ioflag |= IO_ALTSEMANTICS;
if (fp->f_flag & FDIRECT)
ioflag |= IO_DIRECT;
mp = NULL;
if (vp->v_type != VCHR &&
(error = vn_start_write(vp, &mp, V_WAIT | V_PCATCH)) != 0)
return (error);
VOP_LEASE(vp, l, cred, LEASE_WRITE);
1998-03-01 05:20:01 +03:00
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
uio->uio_offset = *offset;
1994-05-17 08:21:49 +04:00
count = uio->uio_resid;
error = VOP_WRITE(vp, uio, ioflag, cred);
if (flags & FOF_UPDATE_OFFSET) {
if (ioflag & IO_APPEND)
*offset = uio->uio_offset;
else
*offset += count - uio->uio_resid;
}
1998-03-01 05:20:01 +03:00
VOP_UNLOCK(vp, 0);
vn_finished_write(mp, 0);
1994-05-17 08:21:49 +04:00
return (error);
}
/*
* File table vnode stat routine.
*/
static int
2005-12-11 15:16:03 +03:00
vn_statfile(struct file *fp, struct stat *sb, struct lwp *l)
{
struct vnode *vp = (struct vnode *)fp->f_data;
2005-12-11 15:16:03 +03:00
return vn_stat(vp, sb, l);
}
1996-02-04 05:17:43 +03:00
int
2005-12-11 15:16:03 +03:00
vn_stat(struct vnode *vp, struct stat *sb, struct lwp *l)
1994-05-17 08:21:49 +04:00
{
1995-05-23 10:11:29 +04:00
struct vattr va;
1994-05-17 08:21:49 +04:00
int error;
mode_t mode;
1994-05-17 08:21:49 +04:00
error = VOP_GETATTR(vp, &va, l->l_cred, l);
1994-05-17 08:21:49 +04:00
if (error)
return (error);
/*
* Copy from vattr table
*/
1995-05-23 10:11:29 +04:00
sb->st_dev = va.va_fsid;
sb->st_ino = va.va_fileid;
mode = va.va_mode;
1994-05-17 08:21:49 +04:00
switch (vp->v_type) {
case VREG:
mode |= S_IFREG;
break;
case VDIR:
mode |= S_IFDIR;
break;
case VBLK:
mode |= S_IFBLK;
break;
case VCHR:
mode |= S_IFCHR;
break;
case VLNK:
mode |= S_IFLNK;
break;
case VSOCK:
mode |= S_IFSOCK;
break;
case VFIFO:
mode |= S_IFIFO;
break;
default:
return (EBADF);
};
sb->st_mode = mode;
1995-05-23 10:11:29 +04:00
sb->st_nlink = va.va_nlink;
sb->st_uid = va.va_uid;
sb->st_gid = va.va_gid;
sb->st_rdev = va.va_rdev;
sb->st_size = va.va_size;
sb->st_atimespec = va.va_atime;
sb->st_mtimespec = va.va_mtime;
sb->st_ctimespec = va.va_ctime;
2003-04-03 18:53:38 +04:00
sb->st_birthtimespec = va.va_birthtime;
1995-05-23 10:11:29 +04:00
sb->st_blksize = va.va_blocksize;
sb->st_flags = va.va_flags;
sb->st_gen = 0;
1995-05-23 10:11:29 +04:00
sb->st_blocks = va.va_bytes / S_BLKSIZE;
1994-05-17 08:21:49 +04:00
return (0);
}
/*
* File table vnode fcntl routine.
*/
static int
2005-12-11 15:16:03 +03:00
vn_fcntl(struct file *fp, u_int com, void *data, struct lwp *l)
{
2000-03-30 13:27:11 +04:00
struct vnode *vp = ((struct vnode *)fp->f_data);
int error;
error = VOP_FCNTL(vp, com, data, fp->f_flag, l->l_cred, l);
return (error);
}
1994-05-17 08:21:49 +04:00
/*
* File table vnode ioctl routine.
*/
static int
2005-12-11 15:16:03 +03:00
vn_ioctl(struct file *fp, u_long com, void *data, struct lwp *l)
1994-05-17 08:21:49 +04:00
{
2007-02-10 00:55:00 +03:00
struct vnode *vp = ((struct vnode *)fp->f_data), *ovp;
2005-12-11 15:16:03 +03:00
struct proc *p = l->l_proc;
1994-05-17 08:21:49 +04:00
struct vattr vattr;
int error;
switch (vp->v_type) {
case VREG:
case VDIR:
if (com == FIONREAD) {
error = VOP_GETATTR(vp, &vattr, l->l_cred, l);
1996-02-04 05:17:43 +03:00
if (error)
1994-05-17 08:21:49 +04:00
return (error);
*(int *)data = vattr.va_size - fp->f_offset;
return (0);
}
2004-11-06 10:34:53 +03:00
if ((com == FIONWRITE) || (com == FIONSPACE)) {
/*
* Files don't have send queues, so there never
* are any bytes in them, nor is there any
* open space in them.
*/
*(int *)data = 0;
return (0);
}
if (com == FIOGETBMAP) {
daddr_t *block;
if (*(daddr_t *)data < 0)
return (EINVAL);
block = (daddr_t *)data;
return (VOP_BMAP(vp, *block, NULL, block, NULL));
}
if (com == OFIOGETBMAP) {
daddr_t ibn, obn;
if (*(int32_t *)data < 0)
return (EINVAL);
ibn = (daddr_t)*(int32_t *)data;
error = VOP_BMAP(vp, ibn, NULL, &obn, NULL);
*(int32_t *)data = (int32_t)obn;
return error;
}
1994-05-17 08:21:49 +04:00
if (com == FIONBIO || com == FIOASYNC) /* XXX */
return (0); /* XXX */
/* fall into ... */
case VFIFO:
case VCHR:
case VBLK:
2005-12-11 15:16:03 +03:00
error = VOP_IOCTL(vp, com, data, fp->f_flag,
l->l_cred, l);
1994-05-17 08:21:49 +04:00
if (error == 0 && com == TIOCSCTTY) {
VREF(vp);
2007-02-10 00:55:00 +03:00
rw_enter(&proclist_lock, RW_WRITER);
ovp = p->p_session->s_ttyvp;
p->p_session->s_ttyvp = vp;
rw_exit(&proclist_lock);
if (ovp != NULL)
vrele(ovp);
1994-05-17 08:21:49 +04:00
}
return (error);
default:
return (EPASSTHROUGH);
1994-05-17 08:21:49 +04:00
}
}
/*
1996-09-07 16:40:22 +04:00
* File table vnode poll routine.
1994-05-17 08:21:49 +04:00
*/
static int
2005-12-11 15:16:03 +03:00
vn_poll(struct file *fp, int events, struct lwp *l)
1994-05-17 08:21:49 +04:00
{
2005-12-11 15:16:03 +03:00
return (VOP_POLL(((struct vnode *)fp->f_data), events, l));
1994-05-17 08:21:49 +04:00
}
/*
* File table vnode kqfilter routine.
*/
int
2005-06-06 03:47:48 +04:00
vn_kqfilter(struct file *fp, struct knote *kn)
{
return (VOP_KQFILTER((struct vnode *)fp->f_data, kn));
}
1998-03-01 05:20:01 +03:00
/*
* Check that the vnode is still valid, and if so
* acquire requested lock.
*/
int
2005-06-06 03:47:48 +04:00
vn_lock(struct vnode *vp, int flags)
1998-03-01 05:20:01 +03:00
{
int error;
#if 0
KASSERT(vp->v_usecount > 0 || (flags & LK_INTERLOCK) != 0
|| (vp->v_flag & VONWORKLST) != 0);
#endif
KASSERT((flags &
~(LK_INTERLOCK|LK_SHARED|LK_EXCLUSIVE|LK_DRAIN|LK_NOWAIT|LK_RETRY|
LK_SETRECURSE|LK_CANRECURSE))
== 0);
1998-03-01 05:20:01 +03:00
do {
if ((flags & LK_INTERLOCK) == 0)
1998-03-01 05:20:01 +03:00
simple_lock(&vp->v_interlock);
if (vp->v_flag & VXLOCK) {
a whole bunch of changes to improve performance and robustness under load: - remove special treatment of pager_map mappings in pmaps. this is required now, since I've removed the globals that expose the address range. pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's no longer any need to special-case it. - eliminate struct uvm_vnode by moving its fields into struct vnode. - rewrite the pageout path. the pager is now responsible for handling the high-level requests instead of only getting control after a bunch of work has already been done on its behalf. this will allow us to UBCify LFS, which needs tighter control over its pages than other filesystems do. writing a page to disk no longer requires making it read-only, which allows us to write wired pages without causing all kinds of havoc. - use a new PG_PAGEOUT flag to indicate that a page should be freed on behalf of the pagedaemon when it's unlocked. this flag is very similar to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the pageout fails due to eg. an indirect-block buffer being locked. this allows us to remove the "version" field from struct vm_page, and together with shrinking "loan_count" from 32 bits to 16, struct vm_page is now 4 bytes smaller. - no longer use PG_RELEASED for swap-backed pages. if the page is busy because it's being paged out, we can't release the swap slot to be reallocated until that write is complete, but unlike with vnodes we don't keep a count of in-progress writes so there's no good way to know when the write is done. instead, when we need to free a busy swap-backed page, just sleep until we can get it busy ourselves. - implement a fast-path for extending writes which allows us to avoid zeroing new pages. this substantially reduces cpu usage. - encapsulate the data used by the genfs code in a struct genfs_node, which must be the first element of the filesystem-specific vnode data for filesystems which use genfs_{get,put}pages(). - eliminate many of the UVM pagerops, since they aren't needed anymore now that the pager "put" operation is a higher-level operation. - enhance the genfs code to allow NFS to use the genfs_{get,put}pages instead of a modified copy. - clean up struct vnode by removing all the fields that used to be used by the vfs_cluster.c code (which we don't use anymore with UBC). - remove kmem_object and mb_object since they were useless. instead of allocating pages to these objects, we now just allocate pages with no object. such pages are mapped in the kernel until they are freed, so we can use the mapping to find the page to free it. this allows us to remove splvm() protection in several places. The sum of all these changes improves write throughput on my decstation 5000/200 to within 1% of the rate of NetBSD 1.5 and reduces the elapsed time for "make release" of a NetBSD 1.5 source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
if (flags & LK_NOWAIT) {
simple_unlock(&vp->v_interlock);
return EBUSY;
}
1998-03-01 05:20:01 +03:00
vp->v_flag |= VXWANT;
a whole bunch of changes to improve performance and robustness under load: - remove special treatment of pager_map mappings in pmaps. this is required now, since I've removed the globals that expose the address range. pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's no longer any need to special-case it. - eliminate struct uvm_vnode by moving its fields into struct vnode. - rewrite the pageout path. the pager is now responsible for handling the high-level requests instead of only getting control after a bunch of work has already been done on its behalf. this will allow us to UBCify LFS, which needs tighter control over its pages than other filesystems do. writing a page to disk no longer requires making it read-only, which allows us to write wired pages without causing all kinds of havoc. - use a new PG_PAGEOUT flag to indicate that a page should be freed on behalf of the pagedaemon when it's unlocked. this flag is very similar to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the pageout fails due to eg. an indirect-block buffer being locked. this allows us to remove the "version" field from struct vm_page, and together with shrinking "loan_count" from 32 bits to 16, struct vm_page is now 4 bytes smaller. - no longer use PG_RELEASED for swap-backed pages. if the page is busy because it's being paged out, we can't release the swap slot to be reallocated until that write is complete, but unlike with vnodes we don't keep a count of in-progress writes so there's no good way to know when the write is done. instead, when we need to free a busy swap-backed page, just sleep until we can get it busy ourselves. - implement a fast-path for extending writes which allows us to avoid zeroing new pages. this substantially reduces cpu usage. - encapsulate the data used by the genfs code in a struct genfs_node, which must be the first element of the filesystem-specific vnode data for filesystems which use genfs_{get,put}pages(). - eliminate many of the UVM pagerops, since they aren't needed anymore now that the pager "put" operation is a higher-level operation. - enhance the genfs code to allow NFS to use the genfs_{get,put}pages instead of a modified copy. - clean up struct vnode by removing all the fields that used to be used by the vfs_cluster.c code (which we don't use anymore with UBC). - remove kmem_object and mb_object since they were useless. instead of allocating pages to these objects, we now just allocate pages with no object. such pages are mapped in the kernel until they are freed, so we can use the mapping to find the page to free it. this allows us to remove splvm() protection in several places. The sum of all these changes improves write throughput on my decstation 5000/200 to within 1% of the rate of NetBSD 1.5 and reduces the elapsed time for "make release" of a NetBSD 1.5 source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
ltsleep(vp, PINOD | PNORELOCK,
"vn_lock", 0, &vp->v_interlock);
1998-03-01 05:20:01 +03:00
error = ENOENT;
} else {
error = VOP_LOCK(vp,
(flags & ~LK_RETRY) | LK_INTERLOCK);
a whole bunch of changes to improve performance and robustness under load: - remove special treatment of pager_map mappings in pmaps. this is required now, since I've removed the globals that expose the address range. pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's no longer any need to special-case it. - eliminate struct uvm_vnode by moving its fields into struct vnode. - rewrite the pageout path. the pager is now responsible for handling the high-level requests instead of only getting control after a bunch of work has already been done on its behalf. this will allow us to UBCify LFS, which needs tighter control over its pages than other filesystems do. writing a page to disk no longer requires making it read-only, which allows us to write wired pages without causing all kinds of havoc. - use a new PG_PAGEOUT flag to indicate that a page should be freed on behalf of the pagedaemon when it's unlocked. this flag is very similar to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the pageout fails due to eg. an indirect-block buffer being locked. this allows us to remove the "version" field from struct vm_page, and together with shrinking "loan_count" from 32 bits to 16, struct vm_page is now 4 bytes smaller. - no longer use PG_RELEASED for swap-backed pages. if the page is busy because it's being paged out, we can't release the swap slot to be reallocated until that write is complete, but unlike with vnodes we don't keep a count of in-progress writes so there's no good way to know when the write is done. instead, when we need to free a busy swap-backed page, just sleep until we can get it busy ourselves. - implement a fast-path for extending writes which allows us to avoid zeroing new pages. this substantially reduces cpu usage. - encapsulate the data used by the genfs code in a struct genfs_node, which must be the first element of the filesystem-specific vnode data for filesystems which use genfs_{get,put}pages(). - eliminate many of the UVM pagerops, since they aren't needed anymore now that the pager "put" operation is a higher-level operation. - enhance the genfs code to allow NFS to use the genfs_{get,put}pages instead of a modified copy. - clean up struct vnode by removing all the fields that used to be used by the vfs_cluster.c code (which we don't use anymore with UBC). - remove kmem_object and mb_object since they were useless. instead of allocating pages to these objects, we now just allocate pages with no object. such pages are mapped in the kernel until they are freed, so we can use the mapping to find the page to free it. this allows us to remove splvm() protection in several places. The sum of all these changes improves write throughput on my decstation 5000/200 to within 1% of the rate of NetBSD 1.5 and reduces the elapsed time for "make release" of a NetBSD 1.5 source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
if (error == 0 || error == EDEADLK || error == EBUSY)
1998-03-01 05:20:01 +03:00
return (error);
}
flags &= ~LK_INTERLOCK;
} while (flags & LK_RETRY);
return (error);
}
1994-05-17 08:21:49 +04:00
/*
* File table vnode close routine.
*/
static int
2005-12-11 15:16:03 +03:00
vn_closefile(struct file *fp, struct lwp *l)
1994-05-17 08:21:49 +04:00
{
return (vn_close(((struct vnode *)fp->f_data), fp->f_flag,
2005-12-11 15:16:03 +03:00
fp->f_cred, l));
1994-05-17 08:21:49 +04:00
}
/*
* Enable LK_CANRECURSE on lock. Return prior status.
*/
u_int
2005-06-06 03:47:48 +04:00
vn_setrecurse(struct vnode *vp)
{
struct lock *lkp = &vp->v_lock;
u_int retval = lkp->lk_flags & LK_CANRECURSE;
lkp->lk_flags |= LK_CANRECURSE;
return retval;
}
/*
* Called when done with locksetrecurse.
*/
void
2005-06-06 03:47:48 +04:00
vn_restorerecurse(struct vnode *vp, u_int flags)
{
struct lock *lkp = &vp->v_lock;
lkp->lk_flags &= ~LK_CANRECURSE;
lkp->lk_flags |= flags;
}
int
vn_cow_establish(struct vnode *vp,
int (*func)(void *, struct buf *), void *cookie)
{
int s;
struct spec_cow_entry *e;
MALLOC(e, struct spec_cow_entry *, sizeof(struct spec_cow_entry),
M_DEVBUF, M_WAITOK);
e->ce_func = func;
e->ce_cookie = cookie;
SPEC_COW_LOCK(vp->v_specinfo, s);
vp->v_spec_cow_req++;
while (vp->v_spec_cow_count > 0)
ltsleep(&vp->v_spec_cow_req, PRIBIO, "cowlist", 0,
&vp->v_spec_cow_slock);
SLIST_INSERT_HEAD(&vp->v_spec_cow_head, e, ce_list);
vp->v_spec_cow_req--;
if (vp->v_spec_cow_req == 0)
wakeup(&vp->v_spec_cow_req);
SPEC_COW_UNLOCK(vp->v_specinfo, s);
return 0;
}
int
vn_cow_disestablish(struct vnode *vp,
int (*func)(void *, struct buf *), void *cookie)
{
int s;
struct spec_cow_entry *e;
SPEC_COW_LOCK(vp->v_specinfo, s);
vp->v_spec_cow_req++;
while (vp->v_spec_cow_count > 0)
ltsleep(&vp->v_spec_cow_req, PRIBIO, "cowlist", 0,
&vp->v_spec_cow_slock);
SLIST_FOREACH(e, &vp->v_spec_cow_head, ce_list)
if (e->ce_func == func && e->ce_cookie == cookie) {
SLIST_REMOVE(&vp->v_spec_cow_head, e,
spec_cow_entry, ce_list);
FREE(e, M_DEVBUF);
break;
}
vp->v_spec_cow_req--;
if (vp->v_spec_cow_req == 0)
wakeup(&vp->v_spec_cow_req);
SPEC_COW_UNLOCK(vp->v_specinfo, s);
return e ? 0 : EINVAL;
}
/*
* Simplified in-kernel wrapper calls for extended attribute access.
* Both calls pass in a NULL credential, authorizing a "kernel" access.
* Set IO_NODELOCKED in ioflg if the vnode is already locked.
*/
int
vn_extattr_get(struct vnode *vp, int ioflg, int attrnamespace,
2005-12-11 15:16:03 +03:00
const char *attrname, size_t *buflen, void *bf, struct lwp *l)
{
struct uio auio;
struct iovec aiov;
int error;
aiov.iov_len = *buflen;
aiov.iov_base = bf;
auio.uio_iov = &aiov;
auio.uio_iovcnt = 1;
auio.uio_rw = UIO_READ;
auio.uio_offset = 0;
auio.uio_resid = *buflen;
UIO_SETUP_SYSSPACE(&auio);
if ((ioflg & IO_NODELOCKED) == 0)
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2005-02-27 00:34:55 +03:00
error = VOP_GETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, NULL,
2005-12-11 15:16:03 +03:00
l);
2005-02-27 00:34:55 +03:00
if ((ioflg & IO_NODELOCKED) == 0)
VOP_UNLOCK(vp, 0);
2005-02-27 00:34:55 +03:00
if (error == 0)
*buflen = *buflen - auio.uio_resid;
2005-02-27 00:34:55 +03:00
return (error);
}
/*
* XXX Failure mode if partially written?
*/
int
vn_extattr_set(struct vnode *vp, int ioflg, int attrnamespace,
2005-12-11 15:16:03 +03:00
const char *attrname, size_t buflen, const void *bf, struct lwp *l)
{
struct uio auio;
struct iovec aiov;
2006-05-14 09:30:31 +04:00
struct mount *mp = NULL; /* XXX: GCC */
int error;
aiov.iov_len = buflen;
aiov.iov_base = __UNCONST(bf); /* XXXUNCONST kills const */
auio.uio_iov = &aiov;
auio.uio_iovcnt = 1;
auio.uio_rw = UIO_WRITE;
auio.uio_offset = 0;
auio.uio_resid = buflen;
UIO_SETUP_SYSSPACE(&auio);
if ((ioflg & IO_NODELOCKED) == 0) {
if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0)
return (error);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
}
2005-12-11 15:16:03 +03:00
error = VOP_SETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, l);
if ((ioflg & IO_NODELOCKED) == 0) {
vn_finished_write(mp, 0);
VOP_UNLOCK(vp, 0);
}
return (error);
}
int
vn_extattr_rm(struct vnode *vp, int ioflg, int attrnamespace,
2005-12-11 15:16:03 +03:00
const char *attrname, struct lwp *l)
{
2006-05-14 09:30:31 +04:00
struct mount *mp = NULL; /* XXX: GCC */
int error;
if ((ioflg & IO_NODELOCKED) == 0) {
if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0)
return (error);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
}
2005-12-11 15:16:03 +03:00
error = VOP_DELETEEXTATTR(vp, attrnamespace, attrname, NULL, l);
if (error == EOPNOTSUPP)
error = VOP_SETEXTATTR(vp, attrnamespace, attrname, NULL,
2005-12-11 15:16:03 +03:00
NULL, l);
2005-02-27 00:34:55 +03:00
if ((ioflg & IO_NODELOCKED) == 0) {
vn_finished_write(mp, 0);
VOP_UNLOCK(vp, 0);
}
return (error);
}
/*
* OBSOLETE -- this function will be removed in the near future!
*
* Preparing to start a filesystem write operation. If the operation is
* permitted, then we bump the count of operations in progress and
* proceed. If a suspend request is in progress, we wait until the
* suspension is over, and then proceed.
* V_PCATCH adds PCATCH to the tsleep flags.
* V_WAIT waits until suspension is over. Otherwise returns EWOULDBLOCK.
* V_SLEEPONLY wait, but do not bump the operations count.
* V_LOWER this is a lower level operation. No further vnodes should be
* locked. Otherwise it is a upper level operation. No vnodes
* should be locked.
*/
int
vn_start_write(struct vnode *vp, struct mount **mpp, int flags)
{
/*
* If a vnode is provided, get and return the mount point that
* to which it will write.
*/
if (vp != NULL) {
*mpp = vp->v_mount;
}
return 0;
}
/*
* OBSOLETE -- this function will be removed in the near future!
*
* Filesystem write operation has completed. If we are suspending and this
* operation is the last one, notify the suspender that the suspension is
* now in effect.
*/
void
vn_finished_write(struct mount *mp, int flags)
{
}
2005-11-30 01:52:02 +03:00
void
vn_ra_allocctx(struct vnode *vp)
{
struct uvm_ractx *ra = NULL;
if (vp->v_type != VREG) {
return;
}
if (vp->v_ractx != NULL) {
return;
}
simple_lock(&vp->v_interlock);
if (vp->v_ractx == NULL) {
simple_unlock(&vp->v_interlock);
ra = uvm_ra_allocctx();
simple_lock(&vp->v_interlock);
if (ra != NULL && vp->v_ractx == NULL) {
vp->v_ractx = ra;
ra = NULL;
}
}
simple_unlock(&vp->v_interlock);
if (ra != NULL) {
uvm_ra_freectx(ra);
}
}