NetBSD/sys/kern/spec_vnops.c

540 lines
12 KiB
C

/*
* Copyright (c) 1989 The Regents of the University of California.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the University of
* California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* from: @(#)spec_vnops.c 7.37 (Berkeley) 5/30/91
* $Id: spec_vnops.c,v 1.4 1993/06/27 06:01:50 andrew Exp $
*/
#include "param.h"
#include "proc.h"
#include "systm.h"
#include "kernel.h"
#include "conf.h"
#include "buf.h"
#include "mount.h"
#include "namei.h"
#include "vnode.h"
#include "specdev.h"
#include "stat.h"
#include "errno.h"
#include "ioctl.h"
#include "file.h"
#include "dkbad.h" /* XXX */
#include "disklabel.h"
/* symbolic sleep message strings for devices */
char devopn[] = "devopn";
char devio[] = "devio";
char devwait[] = "devwait";
char devin[] = "devin";
char devout[] = "devout";
char devioc[] = "devioc";
char devcls[] = "devcls";
struct vnodeops spec_vnodeops = {
spec_lookup, /* lookup */
spec_create, /* create */
spec_mknod, /* mknod */
spec_open, /* open */
spec_close, /* close */
spec_access, /* access */
spec_getattr, /* getattr */
spec_setattr, /* setattr */
spec_read, /* read */
spec_write, /* write */
spec_ioctl, /* ioctl */
spec_select, /* select */
spec_mmap, /* mmap */
spec_fsync, /* fsync */
spec_seek, /* seek */
spec_remove, /* remove */
spec_link, /* link */
spec_rename, /* rename */
spec_mkdir, /* mkdir */
spec_rmdir, /* rmdir */
spec_symlink, /* symlink */
spec_readdir, /* readdir */
spec_readlink, /* readlink */
spec_abortop, /* abortop */
spec_inactive, /* inactive */
spec_reclaim, /* reclaim */
spec_lock, /* lock */
spec_unlock, /* unlock */
spec_bmap, /* bmap */
spec_strategy, /* strategy */
spec_print, /* print */
spec_islocked, /* islocked */
spec_advlock, /* advlock */
};
/*
* Trivial lookup routine that always fails.
*/
int
spec_lookup(vp, ndp, p)
struct vnode *vp;
struct nameidata *ndp;
struct proc *p;
{
ndp->ni_dvp = vp;
ndp->ni_vp = NULL;
return (ENOTDIR);
}
/*
* Open a special file: Don't allow open if fs is mounted -nodev,
* and don't allow opens of block devices that are currently mounted.
* Otherwise, call device driver open function.
*/
/* ARGSUSED */
int
spec_open(vp, mode, cred, p)
register struct vnode *vp;
int mode;
struct ucred *cred;
struct proc *p;
{
dev_t dev = (dev_t)vp->v_rdev;
register int maj = major(dev);
int error;
if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_NODEV))
return (ENXIO);
switch (vp->v_type) {
case VCHR:
if ((u_int)maj >= nchrdev)
return (ENXIO);
VOP_UNLOCK(vp);
error = (*cdevsw[maj].d_open)(dev, mode, S_IFCHR, p);
VOP_LOCK(vp);
return (error);
case VBLK:
if ((u_int)maj >= nblkdev)
return (ENXIO);
if (error = mountedon(vp))
return (error);
return ((*bdevsw[maj].d_open)(dev, mode, S_IFBLK, p));
default:
return (0);
}
}
/*
* Vnode op for read
*/
/* ARGSUSED */
int
spec_read(vp, uio, ioflag, cred)
register struct vnode *vp;
register struct uio *uio;
int ioflag;
struct ucred *cred;
{
struct proc *p = uio->uio_procp;
struct buf *bp;
daddr_t bn;
long bsize, bscale;
struct partinfo dpart;
register int n, on;
int error = 0;
extern int mem_no;
#ifdef DIAGNOSTIC
if (uio->uio_rw != UIO_READ)
panic("spec_read mode");
if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != curproc)
panic("spec_read proc");
#endif
if (uio->uio_resid == 0)
return (0);
switch (vp->v_type) {
case VCHR:
/*
* Negative offsets allowed only for /dev/kmem
*/
if (uio->uio_offset < 0 && major(vp->v_rdev) != mem_no)
return (EINVAL);
VOP_UNLOCK(vp);
error = (*cdevsw[major(vp->v_rdev)].d_read)
(vp->v_rdev, uio, ioflag);
VOP_LOCK(vp);
return (error);
case VBLK:
if (uio->uio_offset < 0)
return (EINVAL);
bsize = BLKDEV_IOSIZE;
if ((*bdevsw[major(vp->v_rdev)].d_ioctl)(vp->v_rdev, DIOCGPART,
(caddr_t)&dpart, FREAD, p) == 0) {
if (dpart.part->p_fstype == FS_BSDFFS &&
dpart.part->p_frag != 0 && dpart.part->p_fsize != 0)
bsize = dpart.part->p_frag *
dpart.part->p_fsize;
}
bscale = bsize / DEV_BSIZE;
do {
bn = (uio->uio_offset / DEV_BSIZE) &~ (bscale - 1);
on = uio->uio_offset % bsize;
n = MIN((unsigned)(bsize - on), uio->uio_resid);
if (vp->v_lastr + bscale == bn)
error = breada(vp, bn, (int)bsize, bn + bscale,
(int)bsize, NOCRED, &bp);
else
error = bread(vp, bn, (int)bsize, NOCRED, &bp);
vp->v_lastr = bn;
n = MIN(n, bsize - bp->b_resid);
if (error) {
brelse(bp);
return (error);
}
error = uiomove(bp->b_un.b_addr + on, n, uio);
#ifdef OMIT /* 20 Aug 92*/
if (n + on == bsize)
bp->b_flags |= B_AGE;
#endif /* OMIT*/
brelse(bp);
} while (error == 0 && uio->uio_resid > 0 && n != 0);
return (error);
default:
panic("spec_read type");
}
/* NOTREACHED */
}
/*
* Vnode op for write
*/
/* ARGSUSED */
int
spec_write(vp, uio, ioflag, cred)
register struct vnode *vp;
register struct uio *uio;
int ioflag;
struct ucred *cred;
{
struct proc *p = uio->uio_procp;
struct buf *bp;
daddr_t bn;
int bsize, blkmask;
struct partinfo dpart;
register int n, on;
int error = 0;
extern int mem_no;
#ifdef DIAGNOSTIC
if (uio->uio_rw != UIO_WRITE)
panic("spec_write mode");
if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != curproc)
panic("spec_write proc");
#endif
switch (vp->v_type) {
case VCHR:
/*
* Negative offsets allowed only for /dev/kmem
*/
if (uio->uio_offset < 0 && major(vp->v_rdev) != mem_no)
return (EINVAL);
VOP_UNLOCK(vp);
error = (*cdevsw[major(vp->v_rdev)].d_write)
(vp->v_rdev, uio, ioflag);
VOP_LOCK(vp);
return (error);
case VBLK:
if (uio->uio_resid == 0)
return (0);
if (uio->uio_offset < 0)
return (EINVAL);
bsize = BLKDEV_IOSIZE;
if ((*bdevsw[major(vp->v_rdev)].d_ioctl)(vp->v_rdev, DIOCGPART,
(caddr_t)&dpart, FREAD, p) == 0) {
if (dpart.part->p_fstype == FS_BSDFFS &&
dpart.part->p_frag != 0 && dpart.part->p_fsize != 0)
bsize = dpart.part->p_frag *
dpart.part->p_fsize;
}
blkmask = (bsize / DEV_BSIZE) - 1;
do {
bn = (uio->uio_offset / DEV_BSIZE) &~ blkmask;
on = uio->uio_offset % bsize;
n = MIN((unsigned)(bsize - on), uio->uio_resid);
if (n == bsize)
bp = getblk(vp, bn, bsize);
else
error = bread(vp, bn, bsize, NOCRED, &bp);
n = MIN(n, bsize - bp->b_resid);
if (error) {
brelse(bp);
return (error);
}
error = uiomove(bp->b_un.b_addr + on, n, uio);
if (n + on == bsize) {
bp->b_flags |= B_AGE;
bawrite(bp);
} else
bdwrite(bp);
} while (error == 0 && uio->uio_resid > 0 && n != 0);
return (error);
default:
panic("spec_write type");
}
/* NOTREACHED */
}
/*
* Device ioctl operation.
*/
/* ARGSUSED */
int
spec_ioctl(vp, com, data, fflag, cred, p)
struct vnode *vp;
int com;
caddr_t data;
int fflag;
struct ucred *cred;
struct proc *p;
{
dev_t dev = vp->v_rdev;
switch (vp->v_type) {
case VCHR:
return ((*cdevsw[major(dev)].d_ioctl)(dev, com, data,
fflag, p));
case VBLK:
if (com == 0 && (int)data == B_TAPE)
if (bdevsw[major(dev)].d_flags & B_TAPE)
return (0);
else
return (1);
return ((*bdevsw[major(dev)].d_ioctl)(dev, com, data,
fflag, p));
default:
panic("spec_ioctl");
/* NOTREACHED */
}
}
/* ARGSUSED */
int
spec_select(vp, which, fflags, cred, p)
struct vnode *vp;
int which, fflags;
struct ucred *cred;
struct proc *p;
{
register dev_t dev;
switch (vp->v_type) {
default:
return (1); /* XXX */
case VCHR:
dev = vp->v_rdev;
return (*cdevsw[major(dev)].d_select)(dev, which, p);
}
}
/*
* Just call the device strategy routine
*/
int
spec_strategy(bp)
register struct buf *bp;
{
(*bdevsw[major(bp->b_dev)].d_strategy)(bp);
return (0);
}
/*
* This is a noop, simply returning what one has been given.
*/
int
spec_bmap(vp, bn, vpp, bnp)
struct vnode *vp;
daddr_t bn;
struct vnode **vpp;
daddr_t *bnp;
{
if (vpp != NULL)
*vpp = vp;
if (bnp != NULL)
*bnp = bn;
return (0);
}
/*
* At the moment we do not do any locking.
*/
/* ARGSUSED */
int
spec_lock(vp)
struct vnode *vp;
{
return (0);
}
/* ARGSUSED */
int
spec_unlock(vp)
struct vnode *vp;
{
return (0);
}
/*
* Device close routine
*/
/* ARGSUSED */
int
spec_close(vp, flag, cred, p)
register struct vnode *vp;
int flag;
struct ucred *cred;
struct proc *p;
{
dev_t dev = vp->v_rdev;
int (*devclose) __P((dev_t, int, int, struct proc *));
int mode;
switch (vp->v_type) {
case VCHR:
/*
* If the vnode is locked, then we are in the midst
* of forcably closing the device, otherwise we only
* close on last reference.
*/
if (vcount(vp) > 1 && (vp->v_flag & VXLOCK) == 0)
return (0);
devclose = cdevsw[major(dev)].d_close;
mode = S_IFCHR;
break;
case VBLK:
/*
* On last close of a block device (that isn't mounted)
* we must invalidate any in core blocks, so that
* we can, for instance, change floppy disks.
*/
vflushbuf(vp, 0);
if (vinvalbuf(vp, 1))
return (0);
/*
* We do not want to really close the device if it
* is still in use unless we are trying to close it
* forcibly. Since every use (buffer, vnode, swap, cmap)
* holds a reference to the vnode, and because we mark
* any other vnodes that alias this device, when the
* sum of the reference counts on all the aliased
* vnodes descends to one, we are on last close.
*/
if (vcount(vp) > 1 && (vp->v_flag & VXLOCK) == 0)
return (0);
devclose = bdevsw[major(dev)].d_close;
mode = S_IFBLK;
break;
default:
panic("spec_close: not special");
}
return ((*devclose)(dev, flag, mode, p));
}
/*
* Print out the contents of a special device vnode.
*/
void
spec_print(vp)
struct vnode *vp;
{
printf("tag VT_NON, dev %d, %d\n", major(vp->v_rdev),
minor(vp->v_rdev));
}
/*
* Special device advisory byte-level locks.
*/
/* ARGSUSED */
int
spec_advlock(vp, id, op, fl, flags)
struct vnode *vp;
caddr_t id;
int op;
struct flock *fl;
int flags;
{
return (EOPNOTSUPP);
}
/*
* Special device failed operation
*/
int
spec_ebadf()
{
return (EBADF);
}
/*
* Special device bad operation
*/
int
spec_badop()
{
panic("spec_badop called");
/* NOTREACHED */
}