Run all i/o from or to backing store through fss_bs_io().

Replace VOP_BMAP()/VOP_BALLOC() by vn_rdwr()/VOP_PUTPAGES().

Inspired by discussion on tech-kern@
This commit is contained in:
hannken 2005-10-24 14:25:06 +00:00
parent 0dc198a64d
commit 366a53ff0c
2 changed files with 15 additions and 215 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: fss.c,v 1.16 2005/10/15 17:29:11 yamt Exp $ */
/* $NetBSD: fss.c,v 1.17 2005/10/24 14:25:06 hannken Exp $ */
/*-
* Copyright (c) 2003 The NetBSD Foundation, Inc.
@ -43,7 +43,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: fss.c,v 1.16 2005/10/15 17:29:11 yamt Exp $");
__KERNEL_RCSID(0, "$NetBSD: fss.c,v 1.17 2005/10/24 14:25:06 hannken Exp $");
#include "fss.h"
@ -136,10 +136,7 @@ static int fss_softc_alloc(struct fss_softc *);
static void fss_softc_free(struct fss_softc *);
static void fss_cluster_iodone(struct buf *);
static void fss_read_cluster(struct fss_softc *, u_int32_t);
static int fss_write_cluster(struct fss_cache *, u_int32_t);
static void fss_bs_thread(void *);
static int fss_bmap(struct fss_softc *, off_t, int,
struct vnode **, daddr_t *, int *);
static int fss_bs_io(struct fss_softc *, fss_io_type,
u_int32_t, off_t, int, caddr_t);
static u_int32_t *fss_bs_indir(struct fss_softc *, u_int32_t);
@ -659,11 +656,9 @@ fss_create_files(struct fss_softc *sc, struct fss_set *fss,
if (sc->sc_bs_bshift >= 32)
return EINVAL;
sc->sc_bs_bmask = FSS_FSBSIZE(sc)-1;
sc->sc_flags |= FSS_BS_ALLOC;
} else {
sc->sc_bs_bshift = DEV_BSHIFT;
sc->sc_bs_bmask = FSS_FSBSIZE(sc)-1;
sc->sc_flags &= ~FSS_BS_ALLOC;
}
/*
@ -812,75 +807,13 @@ fss_delete_snapshot(struct fss_softc *sc, struct proc *p)
else
vn_close(sc->sc_bs_vp, FREAD|FWRITE, p->p_ucred, p);
sc->sc_bs_vp = NULL;
sc->sc_flags &= ~(FSS_PERSISTENT|FSS_BS_ALLOC);
sc->sc_flags &= ~FSS_PERSISTENT;
FSS_STAT_CLEAR(sc);
return 0;
}
/*
* Get the block address and number of contiguous blocks.
* If the file contains a hole, try to allocate.
* Backing store is locked by caller.
*/
static int
fss_bmap(struct fss_softc *sc, off_t start, int len,
struct vnode **vpp, daddr_t *bnp, int *runp)
{
int l, s, error;
struct buf *bp, **bpp;
if ((sc->sc_bs_vp->v_mount->mnt_flag & MNT_SOFTDEP) != 0)
bpp = &bp;
else
bpp = NULL;
error = VOP_BMAP(sc->sc_bs_vp, FSS_BTOFSB(sc, start), vpp, bnp, runp);
if ((error == 0 && *bnp != (daddr_t)-1) ||
(sc->sc_flags & FSS_BS_ALLOC) == 0)
goto out;
if (start+len >= sc->sc_bs_size) {
error = ENOSPC;
goto out;
}
for (l = 0; l < len; l += FSS_FSBSIZE(sc)) {
error = VOP_BALLOC(sc->sc_bs_vp, start+l, FSS_FSBSIZE(sc),
sc->sc_bs_proc->p_ucred, 0, bpp);
if (error)
goto out;
if (bpp == NULL)
continue;
s = splbio();
simple_lock(&bp->b_interlock);
if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_start)
(*bioops.io_start)(bp);
if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_complete)
(*bioops.io_complete)(bp);
bp->b_flags |= B_INVAL;
simple_unlock(&bp->b_interlock);
splx(s);
brelse(bp);
}
error = VOP_BMAP(sc->sc_bs_vp, FSS_BTOFSB(sc, start), vpp, bnp, runp);
out:
if ((sc->sc_flags & FSS_PERSISTENT) == 0 &&
error == 0 && *bnp == (daddr_t)-1)
error = ENOSPC;
return error;
}
/*
* A read from the snapshotted block device has completed.
*/
@ -1016,84 +949,6 @@ restart:
wakeup(&sc->sc_bs_proc);
}
/*
* Write a cluster from the cache to the backing store.
*/
static int
fss_write_cluster(struct fss_cache *scp, u_int32_t cl)
{
int s, error, todo, len, nra;
daddr_t nbn;
caddr_t addr;
off_t pos;
struct buf *bp;
struct vnode *vp;
struct fss_softc *sc;
error = 0;
sc = scp->fc_softc;
pos = FSS_CLTOB(sc, cl);
addr = scp->fc_data;
todo = FSS_CLSIZE(sc);
vn_lock(sc->sc_bs_vp, LK_EXCLUSIVE|LK_RETRY);
simple_lock(&sc->sc_bs_vp->v_interlock);
error = VOP_PUTPAGES(sc->sc_bs_vp, trunc_page(pos),
round_page(pos+todo), PGO_CLEANIT|PGO_SYNCIO|PGO_FREE);
while (error == 0 && todo > 0) {
error = fss_bmap(sc, pos, todo, &vp, &nbn, &nra);
if (error)
break;
len = FSS_FSBTOB(sc, nra+1)-FSS_FSBOFF(sc, pos);
if (len > todo)
len = todo;
s = splbio();
bp = pool_get(&bufpool, PR_WAITOK);
splx(s);
BUF_INIT(bp);
bp->b_flags = B_CALL;
bp->b_bcount = len;
bp->b_bufsize = bp->b_bcount;
bp->b_error = 0;
bp->b_data = addr;
bp->b_blkno = bp->b_rawblkno = nbn+btodb(FSS_FSBOFF(sc, pos));
bp->b_proc = NULL;
bp->b_vp = NULLVP;
bp->b_private = scp;
bp->b_iodone = fss_cluster_iodone;
bgetvp(vp, bp);
bp->b_vp->v_numoutput++;
BIO_SETPRIO(bp, BPRIO_TIMECRITICAL);
VOP_STRATEGY(vp, bp);
FSS_LOCK(sc, s);
scp->fc_xfercount++;
FSS_UNLOCK(sc, s);
pos += len;
addr += len;
todo -= len;
}
/*
* Wait for all write requests to complete.
*/
FSS_LOCK(sc, s);
while (scp->fc_xfercount > 0)
ltsleep(&scp->fc_data, PRIBIO, "bswwait", 0, &sc->sc_slock);
FSS_UNLOCK(sc, s);
VOP_UNLOCK(sc->sc_bs_vp, 0);
return error;
}
/*
* Read/write clusters from/to backing store.
* For persistent snapshots must be called with cl == 0. off is the
@ -1103,74 +958,19 @@ static int
fss_bs_io(struct fss_softc *sc, fss_io_type rw,
u_int32_t cl, off_t off, int len, caddr_t data)
{
int s, error, todo, count, nra;
off_t pos;
daddr_t nbn;
struct buf *bp;
struct vnode *vp;
int error;
todo = len;
pos = FSS_CLTOB(sc, cl)+off;
error = 0;
off += FSS_CLTOB(sc, cl);
vn_lock(sc->sc_bs_vp, LK_EXCLUSIVE|LK_RETRY);
simple_lock(&sc->sc_bs_vp->v_interlock);
error = VOP_PUTPAGES(sc->sc_bs_vp, trunc_page(pos),
round_page(pos+todo), PGO_CLEANIT|PGO_SYNCIO|PGO_FREE);
while (error == 0 && todo > 0) {
error = fss_bmap(sc, pos, todo, &vp, &nbn, &nra);
if (error)
break;
count = FSS_FSBTOB(sc, nra+1)-FSS_FSBOFF(sc, pos);
if (count > todo)
count = todo;
if (nbn == (daddr_t)-1) {
bzero(data, count);
todo -= count;
data += count;
pos += count;
continue;
}
s = splbio();
bp = pool_get(&bufpool, PR_WAITOK);
splx(s);
BUF_INIT(bp);
bp->b_flags = (rw == FSS_READ ? B_READ : 0);
bp->b_bcount = count;
bp->b_bufsize = bp->b_bcount;
bp->b_error = 0;
bp->b_data = data;
bp->b_blkno = bp->b_rawblkno = nbn+btodb(FSS_FSBOFF(sc, pos));
bp->b_proc = NULL;
bp->b_vp = NULLVP;
bgetvp(vp, bp);
if ((bp->b_flags & B_READ) == 0)
bp->b_vp->v_numoutput++;
if ((bp->b_flags & B_READ) == 0 || cl < sc->sc_indir_size)
BIO_SETPRIO(bp, BPRIO_TIMECRITICAL);
VOP_STRATEGY(vp, bp);
error = biowait(bp);
if (bp->b_vp != NULL)
brelvp(bp);
s = splbio();
pool_put(&bufpool, bp);
splx(s);
if (error)
break;
todo -= count;
data += count;
pos += count;
error = vn_rdwr((rw == FSS_READ ? UIO_READ : UIO_WRITE), sc->sc_bs_vp,
data, len, off, UIO_SYSSPACE, IO_UNIT|IO_NODELOCKED,
sc->sc_bs_proc->p_ucred, NULL, sc->sc_bs_proc);
if (error == 0) {
simple_lock(&sc->sc_bs_vp->v_interlock);
error = VOP_PUTPAGES(sc->sc_bs_vp, trunc_page(off),
round_page(off+len), PGO_CLEANIT|PGO_SYNCIO|PGO_FREE);
}
VOP_UNLOCK(sc->sc_bs_vp, 0);
@ -1321,7 +1121,8 @@ fss_bs_thread(void *arg)
indirp = fss_bs_indir(sc, scp->fc_cluster);
if (indirp != NULL) {
error = fss_write_cluster(scp, sc->sc_clnext);
error = fss_bs_io(sc, FSS_WRITE, sc->sc_clnext,
0, FSS_CLSIZE(sc), scp->fc_data);
} else
error = EIO;

View File

@ -1,4 +1,4 @@
/* $NetBSD: fssvar.h,v 1.10 2005/10/15 17:29:11 yamt Exp $ */
/* $NetBSD: fssvar.h,v 1.11 2005/10/24 14:25:06 hannken Exp $ */
/*-
* Copyright (c) 2003 The NetBSD Foundation, Inc.
@ -149,7 +149,6 @@ struct fss_softc {
#define FSS_ERROR 0x02 /* I/O error occurred */
#define FSS_BS_THREAD 0x04 /* Kernel thread is running */
#define FSS_EXCL 0x08 /* Exclusive access granted */
#define FSS_BS_ALLOC 0x10 /* Allocate backing store */
#define FSS_PERSISTENT 0x20 /* File system internal snapshot */
#define FSS_CDEV_OPEN 0x40 /* character device open */
#define FSS_BDEV_OPEN 0x80 /* block device open */