Make LFS work better (though still not "well") as an NFS-exported

filesystem (and other things that needed to be fixed before the tests
would complete), to wit:

* Include the fs ident in the filehandle; improve stale filehandle checks.

* Change definition of blksize() to use the on-dinode size instead of
  the inode's i_size, so that fsck_lfs will work properly again.

* Use b_interlock in lfs_vtruncbuf.

* Postpone dirop reclamation until after the seglock has been released,
  so that lfs_truncate is not called with the segment lock held.

* Don't loop in lfs_fsync(), just write everything and wait.

* Be more careful about the interlock/uobjlock in lfs_putpages: when we
  lose this lock, we have to resynchronize dirtiness of pages in each
  block.

* Be sure to always write indirect blocks and update metadata in
  lfs_putpages; fixes a bug that caused blocks to be accounted to the
  wrong segment.
This commit is contained in:
perseant 2003-04-23 07:20:37 +00:00
parent 8ded03e5f2
commit ef3c60764c
6 changed files with 165 additions and 118 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: lfs.h,v 1.63 2003/04/09 00:32:54 thorpej Exp $ */
/* $NetBSD: lfs.h,v 1.64 2003/04/23 07:20:37 perseant Exp $ */
/*-
* Copyright (c) 1999, 2000, 2001, 2002, 2003 The NetBSD Foundation, Inc.
@ -252,6 +252,17 @@ extern struct lfs_log_entry lfs_log[LFS_LOGLENGTH];
# define LFS_BWRITE_LOG(bp) VOP_BWRITE((bp))
#endif /* _KERNEL */
#ifdef _KERNEL
/* Filehandle structure for exported LFSes */
struct lfid {
struct ufid lfid_ufid;
#define lfid_len lfid_ufid.ufid_len
#define lfid_ino lfid_ufid.ufid_ino
#define lfid_gen lfid_ufid.ufid_gen
uint32_t lfid_ident;
};
#endif /* _KERNEL */
/*
* "struct inode" associated definitions
*/
@ -728,6 +739,7 @@ struct lfs {
#define LFS_NOTYET 0x01
#define LFS_IFDIRTY 0x02
#define LFS_WARNED 0x04
#define LFS_UNDIROP 0x08
int8_t lfs_flags; /* currently unused flag */
u_int16_t lfs_activesb; /* toggle between superblocks */
daddr_t lfs_sbactive; /* disk address of current sb write */
@ -768,9 +780,9 @@ struct lfs {
#define INOPF(fs) ((fs)->lfs_inopf)
#define blksize(fs, ip, lbn) \
(((lbn) >= NDADDR || (ip)->i_size >= ((lbn) + 1) << (fs)->lfs_bshift) \
(((lbn) >= NDADDR || (ip)->i_ffs1_size >= ((lbn) + 1) << (fs)->lfs_bshift) \
? (fs)->lfs_bsize \
: (fragroundup(fs, blkoff(fs, (ip)->i_size))))
: (fragroundup(fs, blkoff(fs, (ip)->i_ffs1_size))))
#define blkoff(fs, loc) ((int)((loc) & (fs)->lfs_bmask))
#define fragoff(fs, loc) /* calculates (loc % fs->lfs_fsize) */ \
((int)((loc) & (fs)->lfs_ffmask))

View File

@ -1,4 +1,4 @@
/* $NetBSD: lfs_inode.c,v 1.73 2003/04/10 04:15:38 simonb Exp $ */
/* $NetBSD: lfs_inode.c,v 1.74 2003/04/23 07:20:37 perseant Exp $ */
/*-
* Copyright (c) 1999, 2000, 2001, 2002, 2003 The NetBSD Foundation, Inc.
@ -71,7 +71,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: lfs_inode.c,v 1.73 2003/04/10 04:15:38 simonb Exp $");
__KERNEL_RCSID(0, "$NetBSD: lfs_inode.c,v 1.74 2003/04/23 07:20:37 perseant Exp $");
#if defined(_KERNEL_OPT)
#include "opt_quota.h"
@ -794,10 +794,11 @@ restart:
nbp = LIST_NEXT(bp, b_vnbufs);
if (bp->b_lblkno < lbn)
continue;
simple_lock(&bp->b_interlock);
if (bp->b_flags & B_BUSY) {
bp->b_flags |= B_WANTED;
error = tsleep((caddr_t)bp, slpflag | (PRIBIO + 1),
"lfs_vtruncbuf", slptimeo);
error = ltsleep(bp, slpflag | (PRIBIO + 1),
"lfs_vtruncbuf", slptimeo, &bp->b_interlock);
if (error) {
splx(s);
return (error);
@ -811,6 +812,7 @@ restart:
wakeup(&fs->lfs_avail);
}
LFS_UNLOCK_BUF(bp);
simple_unlock(&bp->b_interlock);
brelse(bp);
}
@ -818,10 +820,11 @@ restart:
nbp = LIST_NEXT(bp, b_vnbufs);
if (bp->b_lblkno < lbn)
continue;
simple_lock(&bp->b_interlock);
if (bp->b_flags & B_BUSY) {
bp->b_flags |= B_WANTED;
error = tsleep((caddr_t)bp, slpflag | (PRIBIO + 1),
"lfs_vtruncbuf", slptimeo);
error = ltsleep(bp, slpflag | (PRIBIO + 1),
"lfs_vtruncbuf", slptimeo, &bp->b_interlock);
if (error) {
splx(s);
return (error);
@ -835,6 +838,7 @@ restart:
wakeup(&fs->lfs_avail);
}
LFS_UNLOCK_BUF(bp);
simple_unlock(&bp->b_interlock);
brelse(bp);
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: lfs_segment.c,v 1.119 2003/04/02 10:39:41 fvdl Exp $ */
/* $NetBSD: lfs_segment.c,v 1.120 2003/04/23 07:20:38 perseant Exp $ */
/*-
* Copyright (c) 1999, 2000, 2001, 2002, 2003 The NetBSD Foundation, Inc.
@ -71,7 +71,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: lfs_segment.c,v 1.119 2003/04/02 10:39:41 fvdl Exp $");
__KERNEL_RCSID(0, "$NetBSD: lfs_segment.c,v 1.120 2003/04/23 07:20:38 perseant Exp $");
#define ivndebug(vp,str) printf("ino %d: %s\n",VTOI(vp)->i_number,(str))
@ -415,6 +415,16 @@ lfs_vflush(struct vnode *vp)
lfs_segunlock(fs);
/* Wait for these buffers to be recovered by aiodoned */
s = splbio();
simple_lock(&global_v_numoutput_slock);
while (vp->v_numoutput > 0) {
ltsleep(&vp->v_numoutput, PRIBIO + 1, "lfs_vf2", 0,
&global_v_numoutput_slock);
}
simple_unlock(&global_v_numoutput_slock);
splx(s);
CLR_FLUSHING(fs,vp);
return (0);
}
@ -768,8 +778,7 @@ lfs_writefile(struct lfs *fs, struct segment *sp, struct vnode *vp)
if (!IS_FLUSHING(fs, vp)) {
simple_lock(&vp->v_interlock);
VOP_PUTPAGES(vp, 0, 0,
PGO_CLEANIT | PGO_ALLPAGES | PGO_LOCKED |
PGO_BUSYFAIL);
PGO_CLEANIT | PGO_ALLPAGES | PGO_LOCKED);
}
}
@ -1080,7 +1089,6 @@ lfs_gatherblock(struct segment *sp, struct buf *bp, int *sptr)
#endif
/* Insert into the buffer list, update the FINFO block. */
bp->b_flags |= B_GATHERED;
bp->b_flags &= ~B_DONE;
*sp->cbpp++ = bp;
for (j = 0; j < blksinblk; j++)
@ -1815,7 +1823,7 @@ lfs_writeseg(struct lfs *fs, struct segment *sp)
newbp);
#endif
*bpp = newbp;
bp->b_flags &= ~(B_ERROR | B_GATHERED | B_DONE);
bp->b_flags &= ~(B_ERROR | B_GATHERED);
if (bp->b_flags & B_CALL) {
printf("lfs_writeseg: indir bp should not be B_CALL\n");
s = splbio();

View File

@ -1,4 +1,4 @@
/* $NetBSD: lfs_subr.c,v 1.39 2003/03/21 06:26:37 perseant Exp $ */
/* $NetBSD: lfs_subr.c,v 1.40 2003/04/23 07:20:38 perseant Exp $ */
/*-
* Copyright (c) 1999, 2000, 2001, 2002, 2003 The NetBSD Foundation, Inc.
@ -71,7 +71,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: lfs_subr.c,v 1.39 2003/03/21 06:26:37 perseant Exp $");
__KERNEL_RCSID(0, "$NetBSD: lfs_subr.c,v 1.40 2003/04/23 07:20:38 perseant Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -366,8 +366,17 @@ lfs_unmark_dirop(struct lfs *fs)
{
struct inode *ip, *nip;
struct vnode *vp;
int doit;
extern int lfs_dirvcount;
simple_lock(&fs->lfs_interlock);
doit = !(fs->lfs_flags & LFS_UNDIROP);
if (doit)
fs->lfs_flags |= LFS_UNDIROP;
simple_unlock(&fs->lfs_interlock);
if (!doit)
return;
for (ip = TAILQ_FIRST(&fs->lfs_dchainhd); ip != NULL; ip = nip) {
nip = TAILQ_NEXT(ip, i_lfs_dchain);
vp = ITOV(ip);
@ -386,6 +395,10 @@ lfs_unmark_dirop(struct lfs *fs)
fs->lfs_unlockvp = NULL;
}
}
simple_lock(&fs->lfs_interlock);
fs->lfs_flags &= ~LFS_UNDIROP;
simple_unlock(&fs->lfs_interlock);
}
static void
@ -428,6 +441,7 @@ lfs_segunlock(struct lfs *fs)
struct segment *sp;
unsigned long sync, ckp;
struct buf *bp;
int do_unmark_dirop = 0;
extern int locked_queue_count;
extern long locked_queue_bytes;
@ -435,9 +449,9 @@ lfs_segunlock(struct lfs *fs)
simple_lock(&fs->lfs_interlock);
if (fs->lfs_seglock == 1) {
simple_unlock(&fs->lfs_interlock);
if ((sp->seg_flags & SEGM_PROT) == 0)
lfs_unmark_dirop(fs);
do_unmark_dirop = 1;
simple_unlock(&fs->lfs_interlock);
sync = sp->seg_flags & SEGM_SYNC;
ckp = sp->seg_flags & SEGM_CKP;
if (sp->bpp != sp->cbpp) {
@ -521,6 +535,8 @@ lfs_segunlock(struct lfs *fs)
}
/* Reenable fragment size changes */
lockmgr(&fs->lfs_fraglock, LK_RELEASE, 0);
if (do_unmark_dirop)
lfs_unmark_dirop(fs);
} else if (fs->lfs_seglock == 0) {
simple_unlock(&fs->lfs_interlock);
panic ("Seglock not held");

View File

@ -1,4 +1,4 @@
/* $NetBSD: lfs_vfsops.c,v 1.114 2003/04/16 21:44:27 christos Exp $ */
/* $NetBSD: lfs_vfsops.c,v 1.115 2003/04/23 07:20:38 perseant Exp $ */
/*-
* Copyright (c) 1999, 2000, 2001, 2002, 2003 The NetBSD Foundation, Inc.
@ -71,7 +71,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: lfs_vfsops.c,v 1.114 2003/04/16 21:44:27 christos Exp $");
__KERNEL_RCSID(0, "$NetBSD: lfs_vfsops.c,v 1.115 2003/04/23 07:20:38 perseant Exp $");
#if defined(_KERNEL_OPT)
#include "opt_quota.h"
@ -244,13 +244,6 @@ lfs_writerd(void *arg)
/* NOTREACHED */
}
#if 0
extern struct malloc_type *debug_malloc_type;
extern int debug_malloc_size;
extern int debug_malloc_size_lo;
extern int debug_malloc_size_hi;
#endif
/*
* Initialize the filesystem, most work done by ufs_init.
*/
@ -272,12 +265,6 @@ lfs_init()
memset(lfs_log, 0, sizeof(lfs_log));
#endif
simple_lock_init(&lfs_subsys_lock);
#if 0
debug_malloc_type = M_SEGMENT;
debug_malloc_size = 0;
debug_malloc_size_lo = 1;
debug_malloc_size_hi = 65536;
#endif
}
void
@ -364,12 +351,6 @@ lfs_mount(struct mount *mp, const char *path, void *data, struct nameidata *ndp,
if (error)
return (error);
#if 0
/* Until LFS can do NFS right. XXX */
if (args.export.ex_flags & MNT_EXPORTED)
return (EINVAL);
#endif
/*
* If updating, check whether changing from read-only to
* read/write; if there is no device name, that's all we do.
@ -1118,9 +1099,6 @@ lfs_mountfs(struct vnode *devvp, struct mount *mp, struct proc *p)
TAILQ_INIT(&fs->lfs_dchainhd);
/* and paging tailq */
TAILQ_INIT(&fs->lfs_pchainhd);
#if 0 /* XXXDEBUG */
fs->lfs_lastwrit = dbtofsb(fs, fs->lfs_offset - 1);
#endif
/*
* We use the ifile vnode for almost every operation. Instead of
@ -1676,26 +1654,38 @@ lfs_vget(struct mount *mp, ino_t ino, struct vnode **vpp)
/*
* File handle to vnode
*
* Have to be really careful about stale file handles:
* - check that the inode number is valid
* - call lfs_vget() to get the locked inode
* - check for an unallocated inode (i_mode == 0)
*
* XXX
* use ifile to see if inode is allocated instead of reading off disk
* what is the relationship between my generational number and the NFS
* generational number.
*/
int
lfs_fhtovp(struct mount *mp, struct fid *fhp, struct vnode **vpp)
{
struct ufid *ufhp;
struct lfid *lfhp;
struct buf *bp;
IFILE *ifp;
int32_t daddr;
struct lfs *fs;
ufhp = (struct ufid *)fhp;
if (ufhp->ufid_ino < ROOTINO)
return (ESTALE);
return (ufs_fhtovp(mp, ufhp, vpp));
lfhp = (struct lfid *)fhp;
if (lfhp->lfid_ino < LFS_IFILE_INUM)
return ESTALE;
fs = VFSTOUFS(mp)->um_lfs;
if (lfhp->lfid_ident != fs->lfs_ident)
return ESTALE;
if (lfhp->lfid_ino >
((VTOI(fs->lfs_ivnode)->i_ffs1_size >> fs->lfs_bshift) -
fs->lfs_cleansz - fs->lfs_segtabsz) * fs->lfs_ifpb)
return ESTALE;
if (ufs_ihashlookup(VFSTOUFS(mp)->um_dev, lfhp->lfid_ino) == NULLVP) {
LFS_IENTRY(ifp, fs, lfhp->lfid_ino, bp);
daddr = ifp->if_daddr;
brelse(bp);
if (daddr == LFS_UNUSED_DADDR)
return ESTALE;
}
return (ufs_fhtovp(mp, &lfhp->lfid_ufid, vpp));
}
/*
@ -1706,13 +1696,14 @@ int
lfs_vptofh(struct vnode *vp, struct fid *fhp)
{
struct inode *ip;
struct ufid *ufhp;
struct lfid *lfhp;
ip = VTOI(vp);
ufhp = (struct ufid *)fhp;
ufhp->ufid_len = sizeof(struct ufid);
ufhp->ufid_ino = ip->i_number;
ufhp->ufid_gen = ip->i_gen;
lfhp = (struct lfid *)fhp;
lfhp->lfid_len = sizeof(struct lfid);
lfhp->lfid_ino = ip->i_number;
lfhp->lfid_gen = ip->i_gen;
lfhp->lfid_ident = ip->i_lfs->lfs_ident;
return (0);
}
@ -1841,7 +1832,10 @@ lfs_gop_write(struct vnode *vp, struct vm_page **pgs, int npages, int flags)
bytes = MIN(npages << PAGE_SHIFT, eof - startoffset);
skipbytes = 0;
KASSERT(bytes != 0);
/* KASSERT(bytes != 0); */
if (bytes == 0)
printf("ino %d bytes == 0 offset %" PRId64 "\n",
VTOI(vp)->i_number, pgs[0]->offset);
/* Swap PG_DELWRI for PG_PAGEOUT */
for (i = 0; i < npages; i++)
@ -1904,9 +1898,9 @@ lfs_gop_write(struct vnode *vp, struct vm_page **pgs, int npages, int flags)
break;
}
iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset,
bytes);
iobytes = MIN(MAXPHYS, bytes);
if (blkno == (daddr_t)-1) {
iobytes = MIN(fs->lfs_bsize, bytes);
skipbytes += iobytes;
continue;
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: lfs_vnops.c,v 1.102 2003/04/02 10:39:42 fvdl Exp $ */
/* $NetBSD: lfs_vnops.c,v 1.103 2003/04/23 07:20:39 perseant Exp $ */
/*-
* Copyright (c) 1999, 2000, 2001, 2002, 2003 The NetBSD Foundation, Inc.
@ -71,7 +71,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: lfs_vnops.c,v 1.102 2003/04/02 10:39:42 fvdl Exp $");
__KERNEL_RCSID(0, "$NetBSD: lfs_vnops.c,v 1.103 2003/04/23 07:20:39 perseant Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -315,33 +315,15 @@ lfs_fsync(void *v)
}
wait = (ap->a_flags & FSYNC_WAIT);
do {
#ifdef DEBUG
struct buf *bp;
#endif
simple_lock(&vp->v_interlock);
error = VOP_PUTPAGES(vp, trunc_page(ap->a_offlo),
round_page(ap->a_offhi),
PGO_CLEANIT | (wait ? PGO_SYNCIO : 0));
if (error)
return error;
error = VOP_UPDATE(vp, NULL, NULL, wait ? UPDATE_WAIT : 0);
if (wait && error == 0 && !VPISEMPTY(vp)) {
#ifdef DEBUG
printf("lfs_fsync: reflushing ino %d\n",
VTOI(vp)->i_number);
printf("vflags %x iflags %x npages %d\n",
vp->v_flag, VTOI(vp)->i_flag,
vp->v_uobj.uo_npages);
LIST_FOREACH(bp, &vp->v_dirtyblkhd, b_vnbufs)
printf("%" PRId64 " (%lx)", bp->b_lblkno,
bp->b_flags);
printf("\n");
#endif
LFS_SET_UINO(VTOI(vp), IN_MODIFIED);
}
} while (wait && error == 0 && !VPISEMPTY(vp));
simple_lock(&vp->v_interlock);
error = VOP_PUTPAGES(vp, trunc_page(ap->a_offlo),
round_page(ap->a_offhi),
PGO_CLEANIT | (wait ? PGO_SYNCIO : 0));
if (error)
return error;
error = VOP_UPDATE(vp, NULL, NULL, wait ? UPDATE_WAIT : 0);
if (wait && !VPISEMPTY(vp))
LFS_SET_UINO(VTOI(vp), IN_MODIFIED);
return error;
}
@ -1369,7 +1351,7 @@ lfs_getpages(void *v)
static int
check_dirty(struct lfs *fs, struct vnode *vp,
off_t startoffset, off_t endoffset, off_t blkeof,
int flags)
int flags, int checkfirst)
{
int by_list;
struct vm_page *curpg, *pgs[MAXBSIZE / PAGE_SIZE], *pg;
@ -1484,6 +1466,9 @@ check_dirty(struct lfs *fs, struct vnode *vp,
UVM_PAGE_OWN(pg, NULL);
}
if (checkfirst && any_dirty)
return any_dirty;
if (by_list) {
curpg = TAILQ_NEXT(curpg, listq);
} else {
@ -1657,16 +1642,27 @@ lfs_putpages(void *v)
return genfs_putpages(v);
/*
* Make sure that all pages in any given block are dirty, or
* none of them are. Find out if any of the pages we've been
* asked about are dirty. If none are dirty, send them on
* through genfs_putpages(), albeit with adjusted offsets.
* XXXUBC I am assuming here that they can't be dirtied in
* XXXUBC the meantime, but I bet that's wrong.
* If there are more than one page per block, we don't want
* to get caught locking them backwards; so set PGO_BUSYFAIL
* to avoid deadlocks.
*/
dirty = check_dirty(fs, vp, startoffset, endoffset, blkeof, ap->a_flags);
if (!dirty)
return genfs_putpages(v);
ap->a_flags |= PGO_BUSYFAIL;
do {
int r;
/* If no pages are dirty, we can just use genfs_getpages. */
if ((dirty = check_dirty(fs, vp, startoffset, endoffset, blkeof,
ap->a_flags, 1)) != 0)
break;
if ((r = genfs_putpages(v)) != EDEADLK)
return r;
/* Start over. */
preempt(NULL);
simple_lock(&vp->v_interlock);
} while(1);
/*
* Dirty and asked to clean.
@ -1716,19 +1712,13 @@ lfs_putpages(void *v)
/* XXX the flush should have taken care of this one too! */
}
/*
* This is it. We are going to write some pages. From here on
* down it's all just mechanics.
*
* If there are more than one page per block, we don't want to get
* caught locking them backwards; so set PGO_BUSYFAIL to avoid
* deadlocks. Also, don't let genfs_putpages wait;
* lfs_segunlock will wait for us, if need be.
* Don't let genfs_putpages wait; lfs_segunlock will wait for us.
*/
ap->a_flags &= ~PGO_SYNCIO;
if (pages_per_block > 1)
ap->a_flags |= PGO_BUSYFAIL;
/*
* If we've already got the seglock, flush the node and return.
@ -1741,7 +1731,15 @@ lfs_putpages(void *v)
sp = fs->lfs_sp;
sp->vp = vp;
while ((error = genfs_putpages(v)) == EDEADLK) {
/*
* Make sure that all pages in any given block are dirty, or
* none of them are.
*/
again:
check_dirty(fs, vp, startoffset, endoffset, blkeof,
ap->a_flags, 1);
if ((error = genfs_putpages(v)) == EDEADLK) {
#ifdef DEBUG_LFS
printf("lfs_putpages: genfs_putpages returned EDEADLK"
" ino %d off %x (seg %d)\n",
@ -1752,7 +1750,7 @@ lfs_putpages(void *v)
if (sp->cbpp - sp->bpp == 1) {
preempt(NULL);
simple_lock(&vp->v_interlock);
continue;
goto again;
}
/* Write gathered pages */
lfs_updatemeta(sp);
@ -1769,8 +1767,12 @@ lfs_putpages(void *v)
/* Give the write a chance to complete */
preempt(NULL);
/* We've lost the interlock. Start over. */
simple_lock(&vp->v_interlock);
goto again;
}
lfs_updatemeta(sp);
return error;
}
@ -1811,9 +1813,14 @@ lfs_putpages(void *v)
/*
* Loop through genfs_putpages until all pages are gathered.
* genfs_putpages() drops the interlock, so reacquire it if necessary.
* Whenever we lose the interlock we have to rerun check_dirty, as
* well.
*/
again2:
simple_lock(&vp->v_interlock);
while ((error = genfs_putpages(v)) == EDEADLK) {
check_dirty(fs, vp, startoffset, endoffset, blkeof, ap->a_flags, 1);
if ((error = genfs_putpages(v)) == EDEADLK) {
#ifdef DEBUG_LFS
printf("lfs_putpages: genfs_putpages returned EDEADLK [2]"
" ino %d off %x (seg %d)\n",
@ -1823,8 +1830,7 @@ lfs_putpages(void *v)
/* If nothing to write, short-circuit */
if (sp->cbpp - sp->bpp == 1) {
preempt(NULL);
simple_lock(&vp->v_interlock);
continue;
goto again2;
}
/* Write gathered pages */
lfs_updatemeta(sp);
@ -1844,9 +1850,16 @@ lfs_putpages(void *v)
/* Give the write a chance to complete */
preempt(NULL);
simple_lock(&vp->v_interlock);
/* We've lost the interlock. Start over. */
goto again2;
}
/* Write indirect blocks as well */
lfs_gather(fs, fs->lfs_sp, vp, lfs_match_indir);
lfs_gather(fs, fs->lfs_sp, vp, lfs_match_dindir);
lfs_gather(fs, fs->lfs_sp, vp, lfs_match_tindir);
/*
* Blocks are now gathered into a segment waiting to be written.
* All that's left to do is update metadata, and write them.