Protect various per-fs structures with fs->lfs_interlock simple_lock, to
improve behavior in the multiprocessor case. Add debugging segment-lock assertion statements.
This commit is contained in:
parent
7c4b722858
commit
1ebfc508b6
|
@ -1,15 +1,10 @@
|
|||
# $NetBSD: TODO,v 1.8 2005/02/26 05:40:42 perseant Exp $
|
||||
|
||||
- Lock audit. Need to check locking for multiprocessor case in particular.
|
||||
|
||||
- Get rid of lfs_segclean(); the kernel should clean a dirty segment IFF it
|
||||
has passed two checkpoints containing zero live bytes.
|
||||
# $NetBSD: TODO,v 1.9 2005/04/01 21:59:46 perseant Exp $
|
||||
|
||||
- Now that our cache is basically all of physical memory, we need to make
|
||||
sure that segwrite is not starving other important things. Need a way
|
||||
to prioritize which blocks are most important to write, and write only
|
||||
those before giving up the seglock to do the rest. How does this change
|
||||
our notion of what a checkpoint is?
|
||||
those, saving the rest for later. Does this change our notion of what
|
||||
a checkpoint is?
|
||||
|
||||
- Investigate alternate inode locking strategy: Inode locks are useful
|
||||
for locking against simultaneous changes to inode size (balloc,
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: lfs.h,v 1.77 2005/03/08 00:18:19 perseant Exp $ */
|
||||
/* $NetBSD: lfs.h,v 1.78 2005/04/01 21:59:46 perseant Exp $ */
|
||||
|
||||
/*-
|
||||
* Copyright (c) 1999, 2000, 2001, 2002, 2003 The NetBSD Foundation, Inc.
|
||||
|
@ -213,6 +213,7 @@ typedef struct lfs_res_blk {
|
|||
struct lfs_log_entry {
|
||||
char *op;
|
||||
char *file;
|
||||
int pid;
|
||||
int line;
|
||||
daddr_t block;
|
||||
unsigned long flags;
|
||||
|
@ -220,23 +221,26 @@ struct lfs_log_entry {
|
|||
extern int lfs_lognum;
|
||||
extern struct lfs_log_entry lfs_log[LFS_LOGLENGTH];
|
||||
# define LFS_BWRITE_LOG(bp) lfs_bwrite_log((bp), __FILE__, __LINE__)
|
||||
# define LFS_ENTER_LOG(theop, thefile, theline, lbn, theflags) do { \
|
||||
# define LFS_ENTER_LOG(theop, thefile, theline, lbn, theflags, thepid) do {\
|
||||
int _s; \
|
||||
\
|
||||
simple_lock(&lfs_subsys_lock); \
|
||||
_s = splbio(); \
|
||||
lfs_log[lfs_lognum].op = theop; \
|
||||
lfs_log[lfs_lognum].file = thefile; \
|
||||
lfs_log[lfs_lognum].line = (theline); \
|
||||
lfs_log[lfs_lognum].pid = (thepid); \
|
||||
lfs_log[lfs_lognum].block = (lbn); \
|
||||
lfs_log[lfs_lognum].flags = (theflags); \
|
||||
lfs_lognum = (lfs_lognum + 1) % LFS_LOGLENGTH; \
|
||||
splx(_s); \
|
||||
simple_unlock(&lfs_subsys_lock); \
|
||||
} while (0)
|
||||
|
||||
# define LFS_BCLEAN_LOG(fs, bp) do { \
|
||||
if ((bp)->b_vp == (fs)->lfs_ivnode) \
|
||||
LFS_ENTER_LOG("clear", __FILE__, __LINE__, \
|
||||
bp->b_lblkno, bp->b_flags); \
|
||||
bp->b_lblkno, bp->b_flags, curproc->p_pid);\
|
||||
} while (0)
|
||||
|
||||
/* Must match list in lfs_vfsops.c ! */
|
||||
|
@ -329,7 +333,9 @@ struct lfid {
|
|||
_ifp->if_atime_sec = (acc)->tv_sec; \
|
||||
_ifp->if_atime_nsec = (acc)->tv_nsec; \
|
||||
LFS_BWRITE_LOG(_ibp); \
|
||||
simple_lock(&_fs->lfs_interlock); \
|
||||
_fs->lfs_flags |= LFS_IFDIRTY; \
|
||||
simple_unlock(&_fs->lfs_interlock); \
|
||||
} else { \
|
||||
LFS_SET_UINO(ip, IN_ACCESSED); \
|
||||
} \
|
||||
|
@ -502,21 +508,32 @@ typedef struct _cleanerinfo {
|
|||
(CP) = (CLEANERINFO *)(BP)->b_data; \
|
||||
} while (0)
|
||||
|
||||
/* Synchronize the Ifile cleaner info with current avail and bfree */
|
||||
/*
|
||||
* Synchronize the Ifile cleaner info with current avail and bfree.
|
||||
*/
|
||||
#define LFS_SYNC_CLEANERINFO(cip, fs, bp, w) do { \
|
||||
simple_lock(&(fs)->lfs_interlock); \
|
||||
if ((w) || (cip)->bfree != (fs)->lfs_bfree || \
|
||||
(cip)->avail != (fs)->lfs_avail - (fs)->lfs_ravail - \
|
||||
(fs)->lfs_favail) { \
|
||||
(cip)->bfree = (fs)->lfs_bfree; \
|
||||
(cip)->avail = (fs)->lfs_avail - (fs)->lfs_ravail - \
|
||||
(fs)->lfs_favail; \
|
||||
if (((bp)->b_flags & B_GATHERED) == 0) \
|
||||
if (((bp)->b_flags & B_GATHERED) == 0) { \
|
||||
(fs)->lfs_flags |= LFS_IFDIRTY; \
|
||||
} \
|
||||
simple_unlock(&(fs)->lfs_interlock); \
|
||||
(void) LFS_BWRITE_LOG(bp); /* Ifile */ \
|
||||
} else \
|
||||
} else { \
|
||||
simple_unlock(&(fs)->lfs_interlock); \
|
||||
brelse(bp); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* Get the head of the inode free list.
|
||||
* Always caled with the segment lock held.
|
||||
*/
|
||||
#define LFS_GET_HEADFREE(FS, CIP, BP, FREEP) do { \
|
||||
if ((FS)->lfs_version > 1) { \
|
||||
LFS_CLEANERINFO((CIP), (FS), (BP)); \
|
||||
|
@ -532,7 +549,9 @@ typedef struct _cleanerinfo {
|
|||
LFS_CLEANERINFO((CIP), (FS), (BP)); \
|
||||
(CIP)->free_head = (VAL); \
|
||||
LFS_BWRITE_LOG(BP); \
|
||||
simple_lock(&fs->lfs_interlock); \
|
||||
(FS)->lfs_flags |= LFS_IFDIRTY; \
|
||||
simple_unlock(&fs->lfs_interlock); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
|
@ -546,7 +565,9 @@ typedef struct _cleanerinfo {
|
|||
LFS_CLEANERINFO((CIP), (FS), (BP)); \
|
||||
(CIP)->free_tail = (VAL); \
|
||||
LFS_BWRITE_LOG(BP); \
|
||||
simple_lock(&fs->lfs_interlock); \
|
||||
(FS)->lfs_flags |= LFS_IFDIRTY; \
|
||||
simple_unlock(&fs->lfs_interlock); \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
|
@ -1031,4 +1052,27 @@ struct lfs_fcntl_markv {
|
|||
((fs)->lfs_seglock != 0 && (fs)->lfs_lockpid == curproc->p_pid)
|
||||
#endif /* _KERNEL */
|
||||
|
||||
/* Debug segment lock */
|
||||
#ifdef notyet
|
||||
# define ASSERT_SEGLOCK(fs) KASSERT(LFS_SEGLOCK_HELD(fs))
|
||||
# define ASSERT_NO_SEGLOCK(fs) KASSERT(!LFS_SEGLOCK_HELD(fs))
|
||||
# define ASSERT_DUNNO_SEGLOCK(fs)
|
||||
# define ASSERT_MAYBE_SEGLOCK(fs)
|
||||
#else /* !notyet */
|
||||
# define ASSERT_DUNNO_SEGLOCK(fs) \
|
||||
DLOG((DLOG_SEG, "lfs func %s seglock wrong (%d)\n", __func__, \
|
||||
LFS_SEGLOCK_HELD(fs)))
|
||||
# define ASSERT_SEGLOCK(fs) do { \
|
||||
if (!LFS_SEGLOCK_HELD(fs)) { \
|
||||
DLOG((DLOG_SEG, "lfs func %s seglock wrong (0)\n", __func__)); \
|
||||
} \
|
||||
} while(0)
|
||||
# define ASSERT_NO_SEGLOCK(fs) do { \
|
||||
if (LFS_SEGLOCK_HELD(fs)) { \
|
||||
DLOG((DLOG_SEG, "lfs func %s seglock wrong (1)\n", __func__)); \
|
||||
} \
|
||||
} while(0)
|
||||
# define ASSERT_MAYBE_SEGLOCK(x)
|
||||
#endif /* !notyet */
|
||||
|
||||
#endif /* !_UFS_LFS_LFS_H_ */
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: lfs_alloc.c,v 1.77 2005/03/23 00:12:51 perseant Exp $ */
|
||||
/* $NetBSD: lfs_alloc.c,v 1.78 2005/04/01 21:59:46 perseant Exp $ */
|
||||
|
||||
/*-
|
||||
* Copyright (c) 1999, 2000, 2001, 2002, 2003 The NetBSD Foundation, Inc.
|
||||
|
@ -67,7 +67,7 @@
|
|||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__KERNEL_RCSID(0, "$NetBSD: lfs_alloc.c,v 1.77 2005/03/23 00:12:51 perseant Exp $");
|
||||
__KERNEL_RCSID(0, "$NetBSD: lfs_alloc.c,v 1.78 2005/04/01 21:59:46 perseant Exp $");
|
||||
|
||||
#if defined(_KERNEL_OPT)
|
||||
#include "opt_quota.h"
|
||||
|
@ -120,6 +120,8 @@ lfs_rf_valloc(struct lfs *fs, ino_t ino, int version, struct proc *p,
|
|||
int error;
|
||||
CLEANERINFO *cip;
|
||||
|
||||
ASSERT_SEGLOCK(fs); /* XXX it doesn't, really */
|
||||
|
||||
/*
|
||||
* First, just try a vget. If the version number is the one we want,
|
||||
* we don't have to do anything else. If the version number is wrong,
|
||||
|
@ -201,9 +203,13 @@ lfs_rf_valloc(struct lfs *fs, ino_t ino, int version, struct proc *p,
|
|||
/* The dirop-nature of this vnode is past */
|
||||
lfs_unmark_vnode(vp);
|
||||
(void)lfs_vunref(vp);
|
||||
--lfs_dirvcount;
|
||||
vp->v_flag &= ~VDIROP;
|
||||
simple_lock(&fs->lfs_interlock);
|
||||
simple_lock(&lfs_subsys_lock);
|
||||
--lfs_dirvcount;
|
||||
simple_unlock(&lfs_subsys_lock);
|
||||
TAILQ_REMOVE(&fs->lfs_dchainhd, ip, i_lfs_dchain);
|
||||
simple_unlock(&fs->lfs_interlock);
|
||||
}
|
||||
*vpp = vp;
|
||||
return error;
|
||||
|
@ -226,6 +232,8 @@ extend_ifile(struct lfs *fs, struct ucred *cred)
|
|||
ino_t oldlast;
|
||||
CLEANERINFO *cip;
|
||||
|
||||
ASSERT_SEGLOCK(fs);
|
||||
|
||||
vp = fs->lfs_ivnode;
|
||||
ip = VTOI(vp);
|
||||
blkno = lblkno(fs, ip->i_size);
|
||||
|
@ -295,7 +303,10 @@ lfs_valloc(void *v)
|
|||
if (fs->lfs_ronly)
|
||||
return EROFS;
|
||||
|
||||
ASSERT_NO_SEGLOCK(fs);
|
||||
|
||||
lfs_seglock(fs, SEGM_PROT);
|
||||
vn_lock(fs->lfs_ivnode, LK_EXCLUSIVE);
|
||||
|
||||
/* Get the head of the freelist. */
|
||||
LFS_GET_HEADFREE(fs, cip, cbp, &new_ino);
|
||||
|
@ -328,6 +339,7 @@ lfs_valloc(void *v)
|
|||
if (fs->lfs_freehd == LFS_UNUSED_INUM) {
|
||||
if ((error = extend_ifile(fs, ap->a_cred)) != 0) {
|
||||
LFS_PUT_HEADFREE(fs, cip, cbp, new_ino);
|
||||
VOP_UNLOCK(fs->lfs_ivnode, 0);
|
||||
lfs_segunlock(fs);
|
||||
return error;
|
||||
}
|
||||
|
@ -337,6 +349,13 @@ lfs_valloc(void *v)
|
|||
panic("inode 0 allocated [3]");
|
||||
#endif /* DIAGNOSTIC */
|
||||
|
||||
/* Set superblock modified bit and increment file count. */
|
||||
simple_lock(&fs->lfs_interlock);
|
||||
fs->lfs_fmod = 1;
|
||||
simple_unlock(&fs->lfs_interlock);
|
||||
++fs->lfs_nfiles;
|
||||
|
||||
VOP_UNLOCK(fs->lfs_ivnode, 0);
|
||||
lfs_segunlock(fs);
|
||||
|
||||
return lfs_ialloc(fs, ap->a_pvp, new_ino, new_gen, ap->a_vpp);
|
||||
|
@ -352,6 +371,8 @@ lfs_ialloc(struct lfs *fs, struct vnode *pvp, ino_t new_ino, int new_gen,
|
|||
struct inode *ip;
|
||||
struct vnode *vp;
|
||||
|
||||
ASSERT_NO_SEGLOCK(fs);
|
||||
|
||||
vp = *vpp;
|
||||
lockmgr(&ufs_hashlock, LK_EXCLUSIVE, 0);
|
||||
/* Create an inode to associate with the vnode. */
|
||||
|
@ -382,9 +403,6 @@ lfs_ialloc(struct lfs *fs, struct vnode *pvp, ino_t new_ino, int new_gen,
|
|||
lfs_mark_vnode(vp);
|
||||
genfs_node_init(vp, &lfs_genfsops);
|
||||
VREF(ip->i_devvp);
|
||||
/* Set superblock modified bit and increment file count. */
|
||||
fs->lfs_fmod = 1;
|
||||
++fs->lfs_nfiles;
|
||||
return (0);
|
||||
}
|
||||
|
||||
|
@ -400,6 +418,8 @@ lfs_vcreate(struct mount *mp, ino_t ino, struct vnode *vp)
|
|||
/* Get a pointer to the private mount structure. */
|
||||
ump = VFSTOUFS(mp);
|
||||
|
||||
ASSERT_NO_SEGLOCK(ump->um_lfs);
|
||||
|
||||
/* Initialize the inode. */
|
||||
ip = pool_get(&lfs_inode_pool, PR_WAITOK);
|
||||
memset(ip, 0, sizeof(*ip));
|
||||
|
@ -456,19 +476,28 @@ lfs_vfree(void *v)
|
|||
fs = ip->i_lfs;
|
||||
ino = ip->i_number;
|
||||
|
||||
ASSERT_NO_SEGLOCK(fs);
|
||||
|
||||
/* Drain of pending writes */
|
||||
simple_lock(&vp->v_interlock);
|
||||
s = splbio();
|
||||
if (fs->lfs_version > 1 && WRITEINPROG(vp))
|
||||
tsleep(vp, (PRIBIO+1), "lfs_vfree", 0);
|
||||
ltsleep(vp, (PRIBIO+1), "lfs_vfree", 0, &vp->v_interlock);
|
||||
splx(s);
|
||||
simple_unlock(&vp->v_interlock);
|
||||
|
||||
lfs_seglock(fs, SEGM_PROT);
|
||||
vn_lock(fs->lfs_ivnode, LK_EXCLUSIVE);
|
||||
|
||||
lfs_unmark_vnode(vp);
|
||||
if (vp->v_flag & VDIROP) {
|
||||
--lfs_dirvcount;
|
||||
vp->v_flag &= ~VDIROP;
|
||||
simple_lock(&fs->lfs_interlock);
|
||||
simple_lock(&lfs_subsys_lock);
|
||||
--lfs_dirvcount;
|
||||
simple_unlock(&lfs_subsys_lock);
|
||||
TAILQ_REMOVE(&fs->lfs_dchainhd, ip, i_lfs_dchain);
|
||||
simple_unlock(&fs->lfs_interlock);
|
||||
wakeup(&lfs_dirvcount);
|
||||
lfs_vunref(vp);
|
||||
}
|
||||
|
@ -528,9 +557,12 @@ lfs_vfree(void *v)
|
|||
}
|
||||
|
||||
/* Set superblock modified bit and decrement file count. */
|
||||
simple_lock(&fs->lfs_interlock);
|
||||
fs->lfs_fmod = 1;
|
||||
simple_unlock(&fs->lfs_interlock);
|
||||
--fs->lfs_nfiles;
|
||||
|
||||
VOP_UNLOCK(fs->lfs_ivnode, 0);
|
||||
lfs_segunlock(fs);
|
||||
|
||||
return (0);
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: lfs_balloc.c,v 1.51 2005/03/02 21:16:09 perseant Exp $ */
|
||||
/* $NetBSD: lfs_balloc.c,v 1.52 2005/04/01 21:59:46 perseant Exp $ */
|
||||
|
||||
/*-
|
||||
* Copyright (c) 1999, 2000, 2001, 2002, 2003 The NetBSD Foundation, Inc.
|
||||
|
@ -67,7 +67,7 @@
|
|||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__KERNEL_RCSID(0, "$NetBSD: lfs_balloc.c,v 1.51 2005/03/02 21:16:09 perseant Exp $");
|
||||
__KERNEL_RCSID(0, "$NetBSD: lfs_balloc.c,v 1.52 2005/04/01 21:59:46 perseant Exp $");
|
||||
|
||||
#if defined(_KERNEL_OPT)
|
||||
#include "opt_quota.h"
|
||||
|
@ -146,6 +146,8 @@ lfs_balloc(void *v)
|
|||
/* (void)lfs_check(vp, lbn, 0); */
|
||||
bpp = ap->a_bpp;
|
||||
|
||||
ASSERT_MAYBE_SEGLOCK(fs);
|
||||
|
||||
/*
|
||||
* Three cases: it's a block beyond the end of file, it's a block in
|
||||
* the file that may or may not have been assigned a disk address or
|
||||
|
@ -208,7 +210,9 @@ lfs_balloc(void *v)
|
|||
clrbuf(bp);
|
||||
}
|
||||
ip->i_lfs_effnblks += bb;
|
||||
simple_lock(&fs->lfs_interlock);
|
||||
ip->i_lfs->lfs_bfree -= bb;
|
||||
simple_unlock(&fs->lfs_interlock);
|
||||
ip->i_ffs1_db[lbn] = UNWRITTEN;
|
||||
} else {
|
||||
if (nsize <= osize) {
|
||||
|
@ -251,7 +255,9 @@ lfs_balloc(void *v)
|
|||
}
|
||||
}
|
||||
if (ISSPACE(fs, bcount, ap->a_cred)) {
|
||||
simple_lock(&fs->lfs_interlock);
|
||||
ip->i_lfs->lfs_bfree -= bcount;
|
||||
simple_unlock(&fs->lfs_interlock);
|
||||
ip->i_lfs_effnblks += bcount;
|
||||
} else {
|
||||
return ENOSPC;
|
||||
|
@ -290,6 +296,13 @@ lfs_balloc(void *v)
|
|||
UNWRITTEN;
|
||||
/* XXX ondisk32 */
|
||||
idaddr = ((int32_t *)ibp->b_data)[indirs[i].in_off];
|
||||
#ifdef DEBUG
|
||||
if (vp == fs->lfs_ivnode) {
|
||||
LFS_ENTER_LOG("balloc", __FILE__,
|
||||
__LINE__, indirs[i].in_lbn,
|
||||
ibp->b_flags, curproc->p_pid);
|
||||
}
|
||||
#endif
|
||||
if ((error = VOP_BWRITE(ibp)))
|
||||
return error;
|
||||
}
|
||||
|
@ -342,6 +355,13 @@ lfs_balloc(void *v)
|
|||
(long long)idp->in_lbn);
|
||||
/* XXX ondisk32 */
|
||||
((int32_t *)ibp->b_data)[idp->in_off] = UNWRITTEN;
|
||||
#ifdef DEBUG
|
||||
if (vp == fs->lfs_ivnode) {
|
||||
LFS_ENTER_LOG("balloc", __FILE__,
|
||||
__LINE__, idp->in_lbn,
|
||||
ibp->b_flags, curproc->p_pid);
|
||||
}
|
||||
#endif
|
||||
VOP_BWRITE(ibp);
|
||||
}
|
||||
} else if (bpp && !(bp->b_flags & (B_DONE|B_DELWRI))) {
|
||||
|
@ -383,6 +403,8 @@ lfs_fragextend(struct vnode *vp, int osize, int nsize, daddr_t lbn, struct buf *
|
|||
bb = (long)fragstofsb(fs, numfrags(fs, nsize - osize));
|
||||
error = 0;
|
||||
|
||||
ASSERT_DUNNO_SEGLOCK(fs);
|
||||
|
||||
/*
|
||||
* Get the seglock so we don't enlarge blocks while a segment
|
||||
* is being written. If we're called with bpp==NULL, though,
|
||||
|
@ -438,7 +460,9 @@ lfs_fragextend(struct vnode *vp, int osize, int nsize, daddr_t lbn, struct buf *
|
|||
fs->lfs_avail -= bb;
|
||||
}
|
||||
|
||||
simple_lock(&fs->lfs_interlock);
|
||||
fs->lfs_bfree -= bb;
|
||||
simple_unlock(&fs->lfs_interlock);
|
||||
ip->i_lfs_effnblks += bb;
|
||||
ip->i_flag |= IN_CHANGE | IN_UPDATE;
|
||||
|
||||
|
@ -447,8 +471,11 @@ lfs_fragextend(struct vnode *vp, int osize, int nsize, daddr_t lbn, struct buf *
|
|||
allocbuf(*bpp, nsize, 1);
|
||||
|
||||
/* Adjust locked-list accounting */
|
||||
if (((*bpp)->b_flags & (B_LOCKED | B_CALL)) == B_LOCKED)
|
||||
if (((*bpp)->b_flags & (B_LOCKED | B_CALL)) == B_LOCKED) {
|
||||
simple_lock(&lfs_subsys_lock);
|
||||
locked_queue_bytes += (*bpp)->b_bufsize - obufsize;
|
||||
simple_unlock(&lfs_subsys_lock);
|
||||
}
|
||||
|
||||
bzero((char *)((*bpp)->b_data) + osize, (u_int)(nsize - osize));
|
||||
}
|
||||
|
@ -465,7 +492,6 @@ lfs_fragextend(struct vnode *vp, int osize, int nsize, daddr_t lbn, struct buf *
|
|||
* on the buffer headers, but since pages don't have buffer headers we
|
||||
* record it here instead.
|
||||
*/
|
||||
|
||||
void
|
||||
lfs_register_block(struct vnode *vp, daddr_t lbn)
|
||||
{
|
||||
|
@ -481,6 +507,8 @@ lfs_register_block(struct vnode *vp, daddr_t lbn)
|
|||
ip = VTOI(vp);
|
||||
fs = ip->i_lfs;
|
||||
|
||||
ASSERT_NO_SEGLOCK(fs);
|
||||
|
||||
/* If no space, wait for the cleaner */
|
||||
lfs_availwait(fs, btofsb(fs, 1 << fs->lfs_bshift));
|
||||
|
||||
|
@ -493,19 +521,28 @@ lfs_register_block(struct vnode *vp, daddr_t lbn)
|
|||
lbp = (struct lbnentry *)pool_get(&lfs_lbnentry_pool, PR_WAITOK);
|
||||
lbp->lbn = lbn;
|
||||
LIST_INSERT_HEAD(&(ip->i_lfs_blist[hash]), lbp, entry);
|
||||
|
||||
simple_lock(&fs->lfs_interlock);
|
||||
fs->lfs_favail += btofsb(fs, (1 << fs->lfs_bshift));
|
||||
++locked_fakequeue_count;
|
||||
simple_unlock(&fs->lfs_interlock);
|
||||
}
|
||||
|
||||
static void
|
||||
lfs_do_deregister(struct lfs *fs, struct lbnentry *lbp)
|
||||
{
|
||||
ASSERT_MAYBE_SEGLOCK(fs);
|
||||
|
||||
LIST_REMOVE(lbp, entry);
|
||||
pool_put(&lfs_lbnentry_pool, lbp);
|
||||
simple_lock(&fs->lfs_interlock);
|
||||
if (fs->lfs_favail > btofsb(fs, (1 << fs->lfs_bshift)))
|
||||
fs->lfs_favail -= btofsb(fs, (1 << fs->lfs_bshift));
|
||||
simple_lock(&lfs_subsys_lock);
|
||||
if (locked_fakequeue_count > 0)
|
||||
--locked_fakequeue_count;
|
||||
simple_unlock(&lfs_subsys_lock);
|
||||
simple_unlock(&fs->lfs_interlock);
|
||||
}
|
||||
|
||||
void
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: lfs_bio.c,v 1.81 2005/03/09 22:12:15 perseant Exp $ */
|
||||
/* $NetBSD: lfs_bio.c,v 1.82 2005/04/01 21:59:46 perseant Exp $ */
|
||||
|
||||
/*-
|
||||
* Copyright (c) 1999, 2000, 2001, 2002, 2003 The NetBSD Foundation, Inc.
|
||||
|
@ -67,7 +67,7 @@
|
|||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__KERNEL_RCSID(0, "$NetBSD: lfs_bio.c,v 1.81 2005/03/09 22:12:15 perseant Exp $");
|
||||
__KERNEL_RCSID(0, "$NetBSD: lfs_bio.c,v 1.82 2005/04/01 21:59:46 perseant Exp $");
|
||||
|
||||
#include <sys/param.h>
|
||||
#include <sys/systm.h>
|
||||
|
@ -128,6 +128,7 @@ lfs_fits_buf(struct lfs *fs, int n, int bytes)
|
|||
{
|
||||
int count_fit, bytes_fit;
|
||||
|
||||
ASSERT_NO_SEGLOCK(fs);
|
||||
LOCK_ASSERT(simple_lock_held(&lfs_subsys_lock));
|
||||
|
||||
count_fit =
|
||||
|
@ -156,6 +157,7 @@ int
|
|||
lfs_reservebuf(struct lfs *fs, struct vnode *vp, struct vnode *vp2,
|
||||
int n, int bytes)
|
||||
{
|
||||
ASSERT_MAYBE_SEGLOCK(fs);
|
||||
KASSERT(locked_queue_rcount >= 0);
|
||||
KASSERT(locked_queue_rbytes >= 0);
|
||||
|
||||
|
@ -207,8 +209,11 @@ lfs_reserveavail(struct lfs *fs, struct vnode *vp, struct vnode *vp2, int fsb)
|
|||
struct buf *bp;
|
||||
int error, slept;
|
||||
|
||||
ASSERT_MAYBE_SEGLOCK(fs);
|
||||
slept = 0;
|
||||
simple_lock(&fs->lfs_interlock);
|
||||
while (fsb > 0 && !lfs_fits(fs, fsb + fs->lfs_ravail + fs->lfs_favail)) {
|
||||
simple_unlock(&fs->lfs_interlock);
|
||||
#if 0
|
||||
/*
|
||||
* XXX ideally, we should unlock vnodes here
|
||||
|
@ -226,14 +231,12 @@ lfs_reserveavail(struct lfs *fs, struct vnode *vp, struct vnode *vp2, int fsb)
|
|||
*/
|
||||
#endif
|
||||
|
||||
#ifdef DEBUG
|
||||
if (!slept) {
|
||||
DLOG((DLOG_AVAIL, "lfs_reserve: waiting for %ld (bfree = %d,"
|
||||
" est_bfree = %d)\n",
|
||||
fsb + fs->lfs_ravail + fs->lfs_favail,
|
||||
fs->lfs_bfree, LFS_EST_BFREE(fs)));
|
||||
}
|
||||
#endif
|
||||
++slept;
|
||||
|
||||
/* Wake up the cleaner */
|
||||
|
@ -242,8 +245,13 @@ lfs_reserveavail(struct lfs *fs, struct vnode *vp, struct vnode *vp2, int fsb)
|
|||
wakeup(&lfs_allclean_wakeup);
|
||||
wakeup(&fs->lfs_nextseg);
|
||||
|
||||
error = tsleep(&fs->lfs_avail, PCATCH | PUSER, "lfs_reserve",
|
||||
0);
|
||||
simple_lock(&fs->lfs_interlock);
|
||||
/* Cleaner might have run while we were reading, check again */
|
||||
if (lfs_fits(fs, fsb + fs->lfs_ravail + fs->lfs_favail))
|
||||
break;
|
||||
|
||||
error = ltsleep(&fs->lfs_avail, PCATCH | PUSER, "lfs_reserve",
|
||||
0, &fs->lfs_interlock);
|
||||
#if 0
|
||||
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); /* XXX use lockstatus */
|
||||
vn_lock(vp2, LK_EXCLUSIVE | LK_RETRY); /* XXX use lockstatus */
|
||||
|
@ -257,6 +265,7 @@ lfs_reserveavail(struct lfs *fs, struct vnode *vp, struct vnode *vp2, int fsb)
|
|||
}
|
||||
#endif
|
||||
fs->lfs_ravail += fsb;
|
||||
simple_unlock(&fs->lfs_interlock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -272,6 +281,17 @@ lfs_reserve(struct lfs *fs, struct vnode *vp, struct vnode *vp2, int fsb)
|
|||
int error;
|
||||
int cantwait;
|
||||
|
||||
ASSERT_MAYBE_SEGLOCK(fs);
|
||||
if (vp2) {
|
||||
/* Make sure we're not in the process of reclaiming vp2 */
|
||||
simple_lock(&fs->lfs_interlock);
|
||||
while(fs->lfs_flags & LFS_UNDIROP) {
|
||||
ltsleep(&fs->lfs_flags, PRIBIO + 1, "lfsrundirop", 0,
|
||||
&fs->lfs_interlock);
|
||||
}
|
||||
simple_unlock(&fs->lfs_interlock);
|
||||
}
|
||||
|
||||
KASSERT(fsb < 0 || VOP_ISLOCKED(vp));
|
||||
KASSERT(vp2 == NULL || fsb < 0 || VOP_ISLOCKED(vp2));
|
||||
KASSERT(vp2 == NULL || !(VTOI(vp2)->i_flag & IN_ADIROP));
|
||||
|
@ -357,6 +377,7 @@ lfs_fits(struct lfs *fs, int fsb)
|
|||
{
|
||||
int needed;
|
||||
|
||||
ASSERT_NO_SEGLOCK(fs);
|
||||
needed = fsb + btofsb(fs, fs->lfs_sumsize) +
|
||||
((howmany(fs->lfs_uinodes + 1, INOPB(fs)) + fs->lfs_segtabsz +
|
||||
1) << (fs->lfs_blktodb - fs->lfs_fsbtodb));
|
||||
|
@ -380,10 +401,10 @@ lfs_availwait(struct lfs *fs, int fsb)
|
|||
CLEANERINFO *cip;
|
||||
struct buf *cbp;
|
||||
|
||||
ASSERT_NO_SEGLOCK(fs);
|
||||
/* Push cleaner blocks through regardless */
|
||||
simple_lock(&fs->lfs_interlock);
|
||||
if (fs->lfs_seglock &&
|
||||
fs->lfs_lockpid == curproc->p_pid &&
|
||||
if (LFS_SEGLOCK_HELD(fs) &&
|
||||
fs->lfs_sp->seg_flags & (SEGM_CLEAN | SEGM_FORCE_CKP)) {
|
||||
simple_unlock(&fs->lfs_interlock);
|
||||
return 0;
|
||||
|
@ -408,7 +429,7 @@ lfs_availwait(struct lfs *fs, int fsb)
|
|||
wakeup(&lfs_allclean_wakeup);
|
||||
wakeup(&fs->lfs_nextseg);
|
||||
#ifdef DIAGNOSTIC
|
||||
if (fs->lfs_seglock && fs->lfs_lockpid == curproc->p_pid)
|
||||
if (LFS_SEGLOCK_HELD(fs))
|
||||
panic("lfs_availwait: deadlock");
|
||||
#endif
|
||||
error = tsleep(&fs->lfs_avail, PCATCH | PUSER, "cleaner", 0);
|
||||
|
@ -425,6 +446,7 @@ lfs_bwrite_ext(struct buf *bp, int flags)
|
|||
struct inode *ip;
|
||||
int fsb, s;
|
||||
|
||||
ASSERT_MAYBE_SEGLOCK(VFSTOUFS(bp->b_vp->v_mount)->um_lfs);
|
||||
KASSERT(bp->b_flags & B_BUSY);
|
||||
KASSERT(flags & BW_CLEAN || !LFS_IS_MALLOC_BUF(bp));
|
||||
KASSERT((bp->b_flags & (B_DELWRI|B_LOCKED)) != B_DELWRI);
|
||||
|
@ -486,25 +508,34 @@ lfs_bwrite_ext(struct buf *bp, int flags)
|
|||
return (0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Called and return with the lfs_interlock held, but the lfs_subsys_lock
|
||||
* not held.
|
||||
*/
|
||||
void
|
||||
lfs_flush_fs(struct lfs *fs, int flags)
|
||||
{
|
||||
ASSERT_NO_SEGLOCK(fs);
|
||||
LOCK_ASSERT(simple_lock_held(&fs->lfs_interlock));
|
||||
LOCK_ASSERT(!simple_lock_held(&lfs_subsys_lock));
|
||||
if (fs->lfs_ronly)
|
||||
return;
|
||||
|
||||
simple_lock(&lfs_subsys_lock);
|
||||
lfs_subsys_pages -= fs->lfs_pages; /* XXXUBC */
|
||||
if (lfs_subsys_pages < 0) /* XXXUBC */
|
||||
lfs_subsys_pages = 0; /* XXXUBC */
|
||||
fs->lfs_pages = 0; /* XXXUBC need a better way to count this */
|
||||
|
||||
lfs_writer_enter(fs, "fldirop");
|
||||
|
||||
if (lfs_dostats)
|
||||
++lfs_stats.flush_invoked;
|
||||
lfs_segwrite(fs->lfs_ivnode->v_mount, flags);
|
||||
fs->lfs_favail = 0; /* XXX */
|
||||
simple_unlock(&lfs_subsys_lock);
|
||||
fs->lfs_pages = 0; /* XXXUBC need a better way to count this */
|
||||
|
||||
simple_unlock(&fs->lfs_interlock);
|
||||
lfs_writer_enter(fs, "fldirop");
|
||||
lfs_segwrite(fs->lfs_ivnode->v_mount, flags);
|
||||
lfs_writer_leave(fs);
|
||||
simple_lock(&fs->lfs_interlock);
|
||||
fs->lfs_favail = 0; /* XXX */
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -522,6 +553,7 @@ lfs_flush(struct lfs *fs, int flags, int only_onefs)
|
|||
extern u_int64_t locked_fakequeue_count;
|
||||
struct mount *mp, *nmp;
|
||||
|
||||
ASSERT_NO_SEGLOCK(fs);
|
||||
LOCK_ASSERT(simple_lock_held(&lfs_subsys_lock));
|
||||
KDASSERT(fs == NULL || !LFS_SEGLOCK_HELD(fs));
|
||||
|
||||
|
@ -534,7 +566,7 @@ lfs_flush(struct lfs *fs, int flags, int only_onefs)
|
|||
}
|
||||
while (lfs_writing)
|
||||
ltsleep(&lfs_writing, PRIBIO + 1, "lfsflush", 0,
|
||||
&lfs_subsys_lock);
|
||||
&lfs_subsys_lock);
|
||||
lfs_writing = 1;
|
||||
|
||||
simple_unlock(&lfs_subsys_lock);
|
||||
|
@ -601,10 +633,14 @@ lfs_check(struct vnode *vp, daddr_t blkno, int flags)
|
|||
|
||||
fs = ip->i_lfs;
|
||||
|
||||
ASSERT_NO_SEGLOCK(fs);
|
||||
LOCK_ASSERT(!simple_lock_held(&fs->lfs_interlock));
|
||||
|
||||
/*
|
||||
* If we would flush below, but dirops are active, sleep.
|
||||
* Note that a dirop cannot ever reach this code!
|
||||
*/
|
||||
simple_lock(&fs->lfs_interlock);
|
||||
simple_lock(&lfs_subsys_lock);
|
||||
while (fs->lfs_dirops > 0 &&
|
||||
(locked_queue_count + INOCOUNT(fs) > LFS_MAX_BUFS ||
|
||||
|
@ -612,10 +648,12 @@ lfs_check(struct vnode *vp, daddr_t blkno, int flags)
|
|||
lfs_subsys_pages > LFS_MAX_PAGES ||
|
||||
lfs_dirvcount > LFS_MAX_DIROP || fs->lfs_diropwait > 0))
|
||||
{
|
||||
simple_unlock(&lfs_subsys_lock);
|
||||
++fs->lfs_diropwait;
|
||||
ltsleep(&fs->lfs_writer, PRIBIO+1, "bufdirop", 0,
|
||||
&lfs_subsys_lock);
|
||||
&fs->lfs_interlock);
|
||||
--fs->lfs_diropwait;
|
||||
simple_lock(&lfs_subsys_lock);
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
|
@ -626,13 +664,17 @@ lfs_check(struct vnode *vp, daddr_t blkno, int flags)
|
|||
DLOG((DLOG_FLUSH, "lfs_check: lqb = %ld, max %ld\n",
|
||||
locked_queue_bytes + INOBYTES(fs), LFS_MAX_BYTES));
|
||||
if (lfs_subsys_pages > LFS_MAX_PAGES)
|
||||
DLOG((DLOG_FLUSH, "lfs_check: lssp = %d, max %d\n", lfs_subsys_pages, LFS_MAX_PAGES));
|
||||
DLOG((DLOG_FLUSH, "lfs_check: lssp = %d, max %d\n",
|
||||
lfs_subsys_pages, LFS_MAX_PAGES));
|
||||
if (lfs_fs_pagetrip && fs->lfs_pages > lfs_fs_pagetrip)
|
||||
DLOG((DLOG_FLUSH, "lfs_check: fssp = %d, trip at %d\n", fs->lfs_pages, lfs_fs_pagetrip));
|
||||
DLOG((DLOG_FLUSH, "lfs_check: fssp = %d, trip at %d\n",
|
||||
fs->lfs_pages, lfs_fs_pagetrip));
|
||||
if (lfs_dirvcount > LFS_MAX_DIROP)
|
||||
DLOG((DLOG_FLUSH, "lfs_check: ldvc = %d, max %d\n", lfs_dirvcount, LFS_MAX_DIROP));
|
||||
DLOG((DLOG_FLUSH, "lfs_check: ldvc = %d, max %d\n",
|
||||
lfs_dirvcount, LFS_MAX_DIROP));
|
||||
if (fs->lfs_diropwait > 0)
|
||||
DLOG((DLOG_FLUSH, "lfs_check: ldvw = %d\n", fs->lfs_diropwait));
|
||||
DLOG((DLOG_FLUSH, "lfs_check: ldvw = %d\n",
|
||||
fs->lfs_diropwait));
|
||||
#endif
|
||||
|
||||
if (locked_queue_count + INOCOUNT(fs) > LFS_MAX_BUFS ||
|
||||
|
@ -653,30 +695,29 @@ lfs_check(struct vnode *vp, daddr_t blkno, int flags)
|
|||
locked_queue_bytes + INOBYTES(fs) > LFS_WAIT_BYTES ||
|
||||
lfs_subsys_pages > LFS_WAIT_PAGES ||
|
||||
lfs_dirvcount > LFS_MAX_DIROP) {
|
||||
simple_unlock(&lfs_subsys_lock);
|
||||
|
||||
if (lfs_dostats)
|
||||
++lfs_stats.wait_exceeded;
|
||||
DLOG((DLOG_AVAIL, "lfs_check: waiting: count=%d, bytes=%ld\n",
|
||||
locked_queue_count, locked_queue_bytes));
|
||||
error = tsleep(&locked_queue_count, PCATCH | PUSER,
|
||||
"buffers", hz * LFS_BUFWAIT);
|
||||
if (error != EWOULDBLOCK) {
|
||||
simple_lock(&lfs_subsys_lock);
|
||||
error = ltsleep(&locked_queue_count, PCATCH | PUSER,
|
||||
"buffers", hz * LFS_BUFWAIT, &lfs_subsys_lock);
|
||||
if (error != EWOULDBLOCK)
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* lfs_flush might not flush all the buffers, if some of the
|
||||
* inodes were locked or if most of them were Ifile blocks
|
||||
* and we weren't asked to checkpoint. Try flushing again
|
||||
* to keep us from blocking indefinitely.
|
||||
*/
|
||||
simple_lock(&lfs_subsys_lock);
|
||||
if (locked_queue_count + INOCOUNT(fs) > LFS_MAX_BUFS ||
|
||||
locked_queue_bytes + INOBYTES(fs) > LFS_MAX_BYTES) {
|
||||
lfs_flush(fs, flags | SEGM_CKP, 0);
|
||||
}
|
||||
}
|
||||
simple_unlock(&lfs_subsys_lock);
|
||||
simple_unlock(&fs->lfs_interlock);
|
||||
return (error);
|
||||
}
|
||||
|
||||
|
@ -690,6 +731,7 @@ lfs_newbuf(struct lfs *fs, struct vnode *vp, daddr_t daddr, size_t size, int typ
|
|||
size_t nbytes;
|
||||
int s;
|
||||
|
||||
ASSERT_MAYBE_SEGLOCK(fs);
|
||||
nbytes = roundup(size, fsbtob(fs, 1));
|
||||
|
||||
s = splbio();
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: lfs_debug.c,v 1.29 2005/03/26 19:40:31 christos Exp $ */
|
||||
/* $NetBSD: lfs_debug.c,v 1.30 2005/04/01 21:59:46 perseant Exp $ */
|
||||
|
||||
/*-
|
||||
* Copyright (c) 1999, 2000, 2001, 2002, 2003 The NetBSD Foundation, Inc.
|
||||
|
@ -71,7 +71,7 @@
|
|||
#include <machine/stdarg.h>
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__KERNEL_RCSID(0, "$NetBSD: lfs_debug.c,v 1.29 2005/03/26 19:40:31 christos Exp $");
|
||||
__KERNEL_RCSID(0, "$NetBSD: lfs_debug.c,v 1.30 2005/04/01 21:59:46 perseant Exp $");
|
||||
#include <sys/param.h>
|
||||
#include <sys/systm.h>
|
||||
#include <sys/namei.h>
|
||||
|
@ -93,23 +93,35 @@ int lfs_bwrite_log(struct buf *bp, char *file, int line)
|
|||
a.a_desc = VDESC(vop_bwrite);
|
||||
a.a_bp = bp;
|
||||
|
||||
if (!(bp->b_flags & (B_DELWRI | B_GATHERED)))
|
||||
LFS_ENTER_LOG("write", file, line, bp->b_lblkno, bp->b_flags);
|
||||
if (!(bp->b_flags & (B_DELWRI | B_GATHERED))) {
|
||||
LFS_ENTER_LOG("write", file, line, bp->b_lblkno, bp->b_flags,
|
||||
curproc->p_pid);
|
||||
}
|
||||
return (VCALL(bp->b_vp, VOFFSET(vop_bwrite), &a));
|
||||
}
|
||||
|
||||
void lfs_dumplog(void)
|
||||
{
|
||||
int i;
|
||||
char *cp;
|
||||
|
||||
for (i = lfs_lognum; i != (lfs_lognum - 1) % LFS_LOGLENGTH; i = (i + 1) % LFS_LOGLENGTH)
|
||||
for (i = lfs_lognum; i != (lfs_lognum - 1) % LFS_LOGLENGTH;
|
||||
i = (i + 1) % LFS_LOGLENGTH)
|
||||
if (lfs_log[i].file) {
|
||||
printf("lbn %" PRId64 " %s %lx %d %s\n",
|
||||
/* Only print out basename, for readability */
|
||||
cp = lfs_log[i].file;
|
||||
while(*cp)
|
||||
++cp;
|
||||
while(*cp != '/' && cp > lfs_log[i].file)
|
||||
--cp;
|
||||
|
||||
printf("lbn %" PRId64 " %s %lx %d, %d %s\n",
|
||||
lfs_log[i].block,
|
||||
lfs_log[i].op,
|
||||
lfs_log[i].flags,
|
||||
lfs_log[i].pid,
|
||||
lfs_log[i].line,
|
||||
lfs_log[i].file + 56);
|
||||
cp);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: lfs_inode.c,v 1.90 2005/03/08 00:18:19 perseant Exp $ */
|
||||
/* $NetBSD: lfs_inode.c,v 1.91 2005/04/01 21:59:46 perseant Exp $ */
|
||||
|
||||
/*-
|
||||
* Copyright (c) 1999, 2000, 2001, 2002, 2003 The NetBSD Foundation, Inc.
|
||||
|
@ -67,7 +67,7 @@
|
|||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__KERNEL_RCSID(0, "$NetBSD: lfs_inode.c,v 1.90 2005/03/08 00:18:19 perseant Exp $");
|
||||
__KERNEL_RCSID(0, "$NetBSD: lfs_inode.c,v 1.91 2005/04/01 21:59:46 perseant Exp $");
|
||||
|
||||
#if defined(_KERNEL_OPT)
|
||||
#include "opt_quota.h"
|
||||
|
@ -107,6 +107,7 @@ lfs_ifind(struct lfs *fs, ino_t ino, struct buf *bp)
|
|||
struct ufs1_dinode *dip = (struct ufs1_dinode *)bp->b_data;
|
||||
struct ufs1_dinode *ldip, *fin;
|
||||
|
||||
ASSERT_NO_SEGLOCK(fs);
|
||||
/*
|
||||
* Read the inode block backwards, since later versions of the
|
||||
* inode will supercede earlier ones. Though it is unlikely, it is
|
||||
|
@ -143,6 +144,7 @@ lfs_update(void *v)
|
|||
int s;
|
||||
int flags;
|
||||
|
||||
ASSERT_NO_SEGLOCK(fs);
|
||||
if (vp->v_mount->mnt_flag & MNT_RDONLY)
|
||||
return (0);
|
||||
ip = VTOI(vp);
|
||||
|
@ -155,12 +157,14 @@ lfs_update(void *v)
|
|||
* for our inode completes, if we are called with UPDATE_WAIT set.
|
||||
*/
|
||||
s = splbio();
|
||||
simple_lock(&vp->v_interlock);
|
||||
while ((ap->a_flags & (UPDATE_WAIT|UPDATE_DIROP)) == UPDATE_WAIT &&
|
||||
WRITEINPROG(vp)) {
|
||||
DLOG((DLOG_SEG, "lfs_update: sleeping on ino %d"
|
||||
" (in progress)\n", ip->i_number));
|
||||
tsleep(vp, (PRIBIO+1), "lfs_update", 0);
|
||||
ltsleep(vp, (PRIBIO+1), "lfs_update", 0, &vp->v_interlock);
|
||||
}
|
||||
simple_unlock(&vp->v_interlock);
|
||||
splx(s);
|
||||
TIMEVAL_TO_TIMESPEC(&time, &ts);
|
||||
LFS_ITIMES(ip,
|
||||
|
@ -176,6 +180,7 @@ lfs_update(void *v)
|
|||
/* If sync, push back the vnode and any dirty blocks it may have. */
|
||||
if ((ap->a_flags & (UPDATE_WAIT|UPDATE_DIROP)) == UPDATE_WAIT) {
|
||||
/* Avoid flushing VDIROP. */
|
||||
simple_lock(&fs->lfs_interlock);
|
||||
++fs->lfs_diropwait;
|
||||
while (vp->v_flag & VDIROP) {
|
||||
DLOG((DLOG_DIROP, "lfs_update: sleeping on inode %d"
|
||||
|
@ -185,12 +190,13 @@ lfs_update(void *v)
|
|||
if (fs->lfs_dirops == 0)
|
||||
lfs_flush_fs(fs, SEGM_SYNC);
|
||||
else
|
||||
tsleep(&fs->lfs_writer, PRIBIO+1, "lfs_fsync",
|
||||
0);
|
||||
ltsleep(&fs->lfs_writer, PRIBIO+1, "lfs_fsync",
|
||||
0, &fs->lfs_interlock);
|
||||
/* XXX KS - by falling out here, are we writing the vn
|
||||
twice? */
|
||||
}
|
||||
--fs->lfs_diropwait;
|
||||
simple_unlock(&fs->lfs_interlock);
|
||||
return lfs_vflush(vp);
|
||||
}
|
||||
return 0;
|
||||
|
@ -272,6 +278,7 @@ lfs_truncate(void *v)
|
|||
ioflag = ap->a_flags;
|
||||
usepc = (ovp->v_type == VREG && ovp != fs->lfs_ivnode);
|
||||
|
||||
ASSERT_NO_SEGLOCK(fs);
|
||||
/*
|
||||
* Lengthen the size of the file. We must ensure that the
|
||||
* last byte of the file is allocated. Since the smallest
|
||||
|
@ -372,8 +379,11 @@ lfs_truncate(void *v)
|
|||
memset((char *)bp->b_data + offset, 0,
|
||||
(u_int)(size - offset));
|
||||
allocbuf(bp, size, 1);
|
||||
if ((bp->b_flags & (B_LOCKED | B_CALL)) == B_LOCKED)
|
||||
if ((bp->b_flags & (B_LOCKED | B_CALL)) == B_LOCKED) {
|
||||
simple_lock(&lfs_subsys_lock);
|
||||
locked_queue_bytes -= obufsize - bp->b_bufsize;
|
||||
simple_unlock(&lfs_subsys_lock);
|
||||
}
|
||||
if (bp->b_flags & B_DELWRI)
|
||||
fs->lfs_avail += odb - btofsb(fs, size);
|
||||
(void) VOP_BWRITE(bp);
|
||||
|
@ -563,7 +573,9 @@ done:
|
|||
oip->i_size = oip->i_ffs1_size = length;
|
||||
oip->i_lfs_effnblks -= blocksreleased;
|
||||
oip->i_ffs1_blocks -= real_released;
|
||||
simple_lock(&fs->lfs_interlock);
|
||||
fs->lfs_bfree += blocksreleased;
|
||||
simple_unlock(&fs->lfs_interlock);
|
||||
#ifdef DIAGNOSTIC
|
||||
if (oip->i_size == 0 &&
|
||||
(oip->i_ffs1_blocks != 0 || oip->i_lfs_effnblks != 0)) {
|
||||
|
@ -592,6 +604,7 @@ lfs_blkfree(struct lfs *fs, daddr_t daddr, size_t bsize, long *lastseg,
|
|||
long seg;
|
||||
int error = 0;
|
||||
|
||||
ASSERT_SEGLOCK(fs);
|
||||
bsize = fragroundup(fs, bsize);
|
||||
if (daddr > 0) {
|
||||
if (*lastseg != (seg = dtosn(fs, daddr))) {
|
||||
|
@ -611,6 +624,7 @@ lfs_update_seguse(struct lfs *fs, long lastseg, size_t num)
|
|||
SEGUSE *sup;
|
||||
struct buf *bp;
|
||||
|
||||
ASSERT_SEGLOCK(fs);
|
||||
if (lastseg < 0 || num == 0)
|
||||
return 0;
|
||||
|
||||
|
@ -652,6 +666,7 @@ lfs_indirtrunc(struct inode *ip, daddr_t lbn, daddr_t dbn,
|
|||
int nblocks, blocksreleased = 0, real_released = 0;
|
||||
int error = 0, allerror = 0;
|
||||
|
||||
ASSERT_SEGLOCK(fs);
|
||||
/*
|
||||
* Calculate index in current block of last
|
||||
* block to be kept. -1 indicates the entire
|
||||
|
@ -787,6 +802,7 @@ lfs_vtruncbuf(struct vnode *vp, daddr_t lbn, int slpflag, int slptimeo)
|
|||
fs = VTOI(vp)->i_lfs;
|
||||
s = splbio();
|
||||
|
||||
ASSERT_SEGLOCK(fs);
|
||||
restart:
|
||||
for (bp = LIST_FIRST(&vp->v_cleanblkhd); bp; bp = nbp) {
|
||||
nbp = LIST_NEXT(bp, b_vnbufs);
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: lfs_segment.c,v 1.158 2005/03/08 00:18:19 perseant Exp $ */
|
||||
/* $NetBSD: lfs_segment.c,v 1.159 2005/04/01 21:59:46 perseant Exp $ */
|
||||
|
||||
/*-
|
||||
* Copyright (c) 1999, 2000, 2001, 2002, 2003 The NetBSD Foundation, Inc.
|
||||
|
@ -67,7 +67,7 @@
|
|||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__KERNEL_RCSID(0, "$NetBSD: lfs_segment.c,v 1.158 2005/03/08 00:18:19 perseant Exp $");
|
||||
__KERNEL_RCSID(0, "$NetBSD: lfs_segment.c,v 1.159 2005/04/01 21:59:46 perseant Exp $");
|
||||
|
||||
#ifdef DEBUG
|
||||
# define vndebug(vp, str) do { \
|
||||
|
@ -167,6 +167,7 @@ lfs_imtime(struct lfs *fs)
|
|||
struct timespec ts;
|
||||
struct inode *ip;
|
||||
|
||||
ASSERT_MAYBE_SEGLOCK(fs);
|
||||
TIMEVAL_TO_TIMESPEC(&time, &ts);
|
||||
ip = VTOI(fs->lfs_ivnode);
|
||||
ip->i_ffs1_mtime = ts.tv_sec;
|
||||
|
@ -200,6 +201,7 @@ lfs_vflush(struct vnode *vp)
|
|||
ip = VTOI(vp);
|
||||
fs = VFSTOUFS(vp->v_mount)->um_lfs;
|
||||
|
||||
ASSERT_NO_SEGLOCK(fs);
|
||||
if (ip->i_flag & IN_CLEANING) {
|
||||
ivndebug(vp,"vflush/in_cleaning");
|
||||
LFS_CLR_UINO(ip, IN_CLEANING);
|
||||
|
@ -267,12 +269,14 @@ lfs_vflush(struct vnode *vp)
|
|||
}
|
||||
|
||||
/* If the node is being written, wait until that is done */
|
||||
simple_lock(&vp->v_interlock);
|
||||
s = splbio();
|
||||
if (WRITEINPROG(vp)) {
|
||||
ivndebug(vp,"vflush/writeinprog");
|
||||
tsleep(vp, PRIBIO+1, "lfs_vw", 0);
|
||||
ltsleep(vp, (PRIBIO+1), "lfs_vw", 0, &vp->v_interlock);
|
||||
}
|
||||
splx(s);
|
||||
simple_unlock(&vp->v_interlock);
|
||||
|
||||
/* Protect against VXLOCK deadlock in vinvalbuf() */
|
||||
lfs_seglock(fs, SEGM_SYNC);
|
||||
|
@ -359,11 +363,15 @@ lfs_vflush(struct vnode *vp)
|
|||
if (flushed && vp != fs->lfs_ivnode)
|
||||
lfs_writeseg(fs, sp);
|
||||
else do {
|
||||
simple_lock(&fs->lfs_interlock);
|
||||
fs->lfs_flags &= ~LFS_IFDIRTY;
|
||||
simple_unlock(&fs->lfs_interlock);
|
||||
lfs_writefile(fs, sp, vp);
|
||||
redo = lfs_writeinode(fs, sp, ip);
|
||||
redo += lfs_writeseg(fs, sp);
|
||||
simple_lock(&fs->lfs_interlock);
|
||||
redo += (fs->lfs_flags & LFS_IFDIRTY);
|
||||
simple_unlock(&fs->lfs_interlock);
|
||||
} while (redo && vp == fs->lfs_ivnode);
|
||||
#endif
|
||||
if (lfs_dostats) {
|
||||
|
@ -385,12 +393,11 @@ lfs_vflush(struct vnode *vp)
|
|||
*/
|
||||
simple_lock(&fs->lfs_interlock);
|
||||
if (fs->lfs_seglock > 1) {
|
||||
simple_unlock(&fs->lfs_interlock);
|
||||
while (fs->lfs_iocount > 1)
|
||||
(void)tsleep(&fs->lfs_iocount, PRIBIO + 1,
|
||||
"lfs_vflush", 0);
|
||||
} else
|
||||
simple_unlock(&fs->lfs_interlock);
|
||||
(void)ltsleep(&fs->lfs_iocount, PRIBIO + 1,
|
||||
"lfs_vflush", 0, &fs->lfs_interlock);
|
||||
}
|
||||
simple_unlock(&fs->lfs_interlock);
|
||||
|
||||
lfs_segunlock(fs);
|
||||
|
||||
|
@ -416,6 +423,7 @@ lfs_writevnodes(struct lfs *fs, struct mount *mp, struct segment *sp, int op)
|
|||
struct vnode *vp, *nvp;
|
||||
int inodes_written = 0, only_cleaning;
|
||||
|
||||
ASSERT_SEGLOCK(fs);
|
||||
#ifndef LFS_NO_BACKVP_HACK
|
||||
/* BEGIN HACK */
|
||||
#define VN_OFFSET \
|
||||
|
@ -489,17 +497,18 @@ lfs_writevnodes(struct lfs *fs, struct mount *mp, struct segment *sp, int op)
|
|||
only_cleaning =
|
||||
((ip->i_flag & IN_ALLMOD) == IN_CLEANING);
|
||||
|
||||
if (ip->i_number != LFS_IFILE_INUM)
|
||||
if (ip->i_number != LFS_IFILE_INUM) {
|
||||
lfs_writefile(fs, sp, vp);
|
||||
if (!VPISEMPTY(vp)) {
|
||||
if (WRITEINPROG(vp)) {
|
||||
ivndebug(vp,"writevnodes/write2");
|
||||
} else if (!(ip->i_flag & IN_ALLMOD)) {
|
||||
LFS_SET_UINO(ip, IN_MODIFIED);
|
||||
if (!VPISEMPTY(vp)) {
|
||||
if (WRITEINPROG(vp)) {
|
||||
ivndebug(vp,"writevnodes/write2");
|
||||
} else if (!(ip->i_flag & IN_ALLMOD)) {
|
||||
LFS_SET_UINO(ip, IN_MODIFIED);
|
||||
}
|
||||
}
|
||||
(void) lfs_writeinode(fs, sp, ip);
|
||||
inodes_written++;
|
||||
}
|
||||
(void) lfs_writeinode(fs, sp, ip);
|
||||
inodes_written++;
|
||||
}
|
||||
|
||||
if (lfs_clean_vnhead && only_cleaning)
|
||||
|
@ -529,6 +538,7 @@ lfs_segwrite(struct mount *mp, int flags)
|
|||
int redo;
|
||||
|
||||
fs = VFSTOUFS(mp)->um_lfs;
|
||||
ASSERT_MAYBE_SEGLOCK(fs);
|
||||
|
||||
if (fs->lfs_ronly)
|
||||
return EROFS;
|
||||
|
@ -614,15 +624,19 @@ lfs_segwrite(struct mount *mp, int flags)
|
|||
}
|
||||
}
|
||||
|
||||
LOCK_ASSERT(LFS_SEGLOCK_HELD(fs));
|
||||
|
||||
did_ckp = 0;
|
||||
if (do_ckp || fs->lfs_doifile) {
|
||||
vp = fs->lfs_ivnode;
|
||||
vn_lock(vp, LK_EXCLUSIVE);
|
||||
do {
|
||||
vp = fs->lfs_ivnode;
|
||||
|
||||
#ifdef DEBUG
|
||||
LFS_ENTER_LOG("pretend", __FILE__, __LINE__, 0, 0);
|
||||
LFS_ENTER_LOG("pretend", __FILE__, __LINE__, 0, 0, curproc->p_pid);
|
||||
#endif
|
||||
simple_lock(&fs->lfs_interlock);
|
||||
fs->lfs_flags &= ~LFS_IFDIRTY;
|
||||
simple_unlock(&fs->lfs_interlock);
|
||||
|
||||
ip = VTOI(vp);
|
||||
|
||||
|
@ -633,7 +647,9 @@ lfs_segwrite(struct mount *mp, int flags)
|
|||
++did_ckp;
|
||||
redo = lfs_writeinode(fs, sp, ip);
|
||||
redo += lfs_writeseg(fs, sp);
|
||||
simple_lock(&fs->lfs_interlock);
|
||||
redo += (fs->lfs_flags & LFS_IFDIRTY);
|
||||
simple_unlock(&fs->lfs_interlock);
|
||||
} while (redo && do_ckp);
|
||||
|
||||
/*
|
||||
|
@ -649,16 +665,23 @@ lfs_segwrite(struct mount *mp, int flags)
|
|||
}
|
||||
#ifdef DIAGNOSTIC
|
||||
else if (do_ckp) {
|
||||
int do_panic = 0;
|
||||
LIST_FOREACH(bp, &vp->v_dirtyblkhd, b_vnbufs) {
|
||||
if (bp->b_lblkno < fs->lfs_cleansz +
|
||||
fs->lfs_segtabsz &&
|
||||
!(bp->b_flags & B_GATHERED)) {
|
||||
panic("dirty blocks");
|
||||
printf("ifile lbn %ld still dirty (flags %lx)\n",
|
||||
(long)bp->b_lblkno,
|
||||
(long)bp->b_flags);
|
||||
++do_panic;
|
||||
}
|
||||
}
|
||||
if (do_panic)
|
||||
panic("dirty blocks");
|
||||
}
|
||||
#endif
|
||||
splx(s);
|
||||
VOP_UNLOCK(vp, 0);
|
||||
} else {
|
||||
(void) lfs_writeseg(fs, sp);
|
||||
}
|
||||
|
@ -703,6 +726,7 @@ lfs_writefile(struct lfs *fs, struct segment *sp, struct vnode *vp)
|
|||
IFILE *ifp;
|
||||
int i, frag;
|
||||
|
||||
ASSERT_SEGLOCK(fs);
|
||||
ip = VTOI(vp);
|
||||
|
||||
if (sp->seg_bytes_left < fs->lfs_bsize ||
|
||||
|
@ -812,6 +836,7 @@ lfs_writeinode(struct lfs *fs, struct segment *sp, struct inode *ip)
|
|||
struct timespec ts;
|
||||
int gotblk = 0;
|
||||
|
||||
ASSERT_SEGLOCK(fs);
|
||||
if (!(ip->i_flag & IN_ALLMOD))
|
||||
return (0);
|
||||
|
||||
|
@ -989,8 +1014,11 @@ lfs_writeinode(struct lfs *fs, struct segment *sp, struct inode *ip)
|
|||
sup->su_nbytes -= sizeof (struct ufs1_dinode);
|
||||
redo_ifile =
|
||||
(ino == LFS_IFILE_INUM && !(bp->b_flags & B_GATHERED));
|
||||
if (redo_ifile)
|
||||
if (redo_ifile) {
|
||||
simple_lock(&fs->lfs_interlock);
|
||||
fs->lfs_flags |= LFS_IFDIRTY;
|
||||
simple_unlock(&fs->lfs_interlock);
|
||||
}
|
||||
LFS_WRITESEGENTRY(sup, fs, oldsn, bp); /* Ifile */
|
||||
}
|
||||
return (redo_ifile);
|
||||
|
@ -1003,6 +1031,7 @@ lfs_gatherblock(struct segment *sp, struct buf *bp, int *sptr)
|
|||
int version;
|
||||
int j, blksinblk;
|
||||
|
||||
ASSERT_SEGLOCK(sp->fs);
|
||||
/*
|
||||
* If full, finish this segment. We may be doing I/O, so
|
||||
* release and reacquire the splbio().
|
||||
|
@ -1062,6 +1091,7 @@ lfs_gather(struct lfs *fs, struct segment *sp, struct vnode *vp,
|
|||
struct buf *bp, *nbp;
|
||||
int s, count = 0;
|
||||
|
||||
ASSERT_SEGLOCK(fs);
|
||||
KASSERT(sp->vp == NULL);
|
||||
sp->vp = vp;
|
||||
s = splbio();
|
||||
|
@ -1174,6 +1204,7 @@ lfs_update_single(struct lfs *fs, struct segment *sp, struct vnode *vp,
|
|||
int num, error;
|
||||
int bb, osize, obb;
|
||||
|
||||
ASSERT_SEGLOCK(fs);
|
||||
KASSERT(sp == NULL || sp->vp == vp);
|
||||
ip = VTOI(vp);
|
||||
|
||||
|
@ -1275,8 +1306,11 @@ lfs_update_single(struct lfs *fs, struct segment *sp, struct vnode *vp,
|
|||
dtosn(fs, daddr), osize,
|
||||
ip->i_number, lbn, daddr));
|
||||
sup->su_nbytes -= osize;
|
||||
if (!(bp->b_flags & B_GATHERED))
|
||||
if (!(bp->b_flags & B_GATHERED)) {
|
||||
simple_lock(&fs->lfs_interlock);
|
||||
fs->lfs_flags |= LFS_IFDIRTY;
|
||||
simple_unlock(&fs->lfs_interlock);
|
||||
}
|
||||
LFS_WRITESEGENTRY(sup, fs, oldsn, bp);
|
||||
}
|
||||
/*
|
||||
|
@ -1303,6 +1337,7 @@ lfs_updatemeta(struct segment *sp)
|
|||
int bb;
|
||||
int bytesleft, size;
|
||||
|
||||
ASSERT_SEGLOCK(sp->fs);
|
||||
vp = sp->vp;
|
||||
nblocks = &sp->fip->fi_blocks[sp->fip->fi_nblocks] - sp->start_lbp;
|
||||
KASSERT(nblocks >= 0);
|
||||
|
@ -1408,6 +1443,7 @@ lfs_initseg(struct lfs *fs)
|
|||
struct buf *sbp; /* buffer for SEGSUM */
|
||||
int repeat = 0; /* return value */
|
||||
|
||||
ASSERT_SEGLOCK(fs);
|
||||
/* Advance to the next segment. */
|
||||
if (!LFS_PARTIAL_FITS(fs)) {
|
||||
SEGUSE *sup;
|
||||
|
@ -1456,10 +1492,12 @@ lfs_initseg(struct lfs *fs)
|
|||
fs->lfs_cleanint[fs->lfs_cleanind] = fs->lfs_offset;
|
||||
if (++fs->lfs_cleanind >= LFS_MAX_CLEANIND) {
|
||||
/* "1" is the artificial inc in lfs_seglock */
|
||||
simple_lock(&fs->lfs_interlock);
|
||||
while (fs->lfs_iocount > 1) {
|
||||
tsleep(&fs->lfs_iocount, PRIBIO + 1,
|
||||
"lfs_initseg", 0);
|
||||
ltsleep(&fs->lfs_iocount, PRIBIO + 1,
|
||||
"lfs_initseg", 0, &fs->lfs_interlock);
|
||||
}
|
||||
simple_unlock(&fs->lfs_interlock);
|
||||
fs->lfs_cleanind = 0;
|
||||
}
|
||||
}
|
||||
|
@ -1513,6 +1551,7 @@ lfs_newseg(struct lfs *fs)
|
|||
struct buf *bp;
|
||||
int curseg, isdirty, sn;
|
||||
|
||||
ASSERT_SEGLOCK(fs);
|
||||
LFS_SEGENTRY(sup, fs, dtosn(fs, fs->lfs_nextseg), bp);
|
||||
DLOG((DLOG_SU, "lfs_newseg: seg %d := 0 in newseg\n",
|
||||
dtosn(fs, fs->lfs_nextseg)));
|
||||
|
@ -1561,6 +1600,7 @@ lfs_newclusterbuf(struct lfs *fs, struct vnode *vp, daddr_t addr, int n)
|
|||
struct buf **bpp, *bp;
|
||||
int s;
|
||||
|
||||
ASSERT_SEGLOCK(fs);
|
||||
cl = (struct lfs_cluster *)pool_get(&fs->lfs_clpool, PR_WAITOK);
|
||||
bpp = (struct buf **)pool_get(&fs->lfs_bpppool, PR_WAITOK);
|
||||
memset(cl, 0, sizeof(*cl));
|
||||
|
@ -1611,6 +1651,7 @@ lfs_writeseg(struct lfs *fs, struct segment *sp)
|
|||
int changed;
|
||||
u_int32_t sum;
|
||||
|
||||
ASSERT_SEGLOCK(fs);
|
||||
/*
|
||||
* If there are no buffers other than the segment summary to write
|
||||
* and it is not a checkpoint, don't do anything. On a checkpoint,
|
||||
|
@ -1672,20 +1713,24 @@ lfs_writeseg(struct lfs *fs, struct segment *sp)
|
|||
bp->b_flags |= B_BUSY;
|
||||
continue;
|
||||
}
|
||||
again:
|
||||
|
||||
simple_lock(&bp->b_interlock);
|
||||
s = splbio();
|
||||
if (bp->b_flags & B_BUSY) {
|
||||
while (bp->b_flags & B_BUSY) {
|
||||
DLOG((DLOG_SEG, "lfs_writeseg: avoiding potential"
|
||||
" data summary corruption for ino %d, lbn %"
|
||||
PRId64 "\n",
|
||||
VTOI(bp->b_vp)->i_number, bp->b_lblkno));
|
||||
bp->b_flags |= B_WANTED;
|
||||
tsleep(bp, (PRIBIO + 1), "lfs_writeseg", 0);
|
||||
ltsleep(bp, (PRIBIO + 1), "lfs_writeseg", 0,
|
||||
&bp->b_interlock);
|
||||
splx(s);
|
||||
goto again;
|
||||
s = splbio();
|
||||
}
|
||||
bp->b_flags |= B_BUSY;
|
||||
splx(s);
|
||||
simple_unlock(&bp->b_interlock);
|
||||
|
||||
/*
|
||||
* Check and replace indirect block UNWRITTEN bogosity.
|
||||
* XXX See comment in lfs_writefile.
|
||||
|
@ -1796,8 +1841,10 @@ lfs_writeseg(struct lfs *fs, struct segment *sp)
|
|||
ssp->ss_sumsum = cksum(&ssp->ss_datasum,
|
||||
fs->lfs_sumsize - sizeof(ssp->ss_sumsum));
|
||||
|
||||
simple_lock(&fs->lfs_interlock);
|
||||
fs->lfs_bfree -= (btofsb(fs, ninos * fs->lfs_ibsize) +
|
||||
btofsb(fs, fs->lfs_sumsize));
|
||||
simple_unlock(&fs->lfs_interlock);
|
||||
|
||||
/*
|
||||
* When we simply write the blocks we lose a rotation for every block
|
||||
|
@ -1833,7 +1880,9 @@ lfs_writeseg(struct lfs *fs, struct segment *sp)
|
|||
/*
|
||||
* Construct the cluster.
|
||||
*/
|
||||
simple_lock(&fs->lfs_interlock);
|
||||
++fs->lfs_iocount;
|
||||
simple_unlock(&fs->lfs_interlock);
|
||||
while (i && cbp->b_bcount < CHUNKSIZE) {
|
||||
bp = *bpp;
|
||||
|
||||
|
@ -1932,6 +1981,7 @@ lfs_writesuper(struct lfs *fs, daddr_t daddr)
|
|||
int s;
|
||||
struct vnode *devvp = VTOI(fs->lfs_ivnode)->i_devvp;
|
||||
|
||||
ASSERT_MAYBE_SEGLOCK(fs);
|
||||
#ifdef DIAGNOSTIC
|
||||
KASSERT(fs->lfs_magic == LFS_MAGIC);
|
||||
#endif
|
||||
|
@ -1940,12 +1990,15 @@ lfs_writesuper(struct lfs *fs, daddr_t daddr)
|
|||
* progress, we risk not having a complete checkpoint if we crash.
|
||||
* So, block here if a superblock write is in progress.
|
||||
*/
|
||||
simple_lock(&fs->lfs_interlock);
|
||||
s = splbio();
|
||||
while (fs->lfs_sbactive) {
|
||||
tsleep(&fs->lfs_sbactive, PRIBIO+1, "lfs sb", 0);
|
||||
ltsleep(&fs->lfs_sbactive, PRIBIO+1, "lfs sb", 0,
|
||||
&fs->lfs_interlock);
|
||||
}
|
||||
fs->lfs_sbactive = daddr;
|
||||
splx(s);
|
||||
simple_unlock(&fs->lfs_interlock);
|
||||
|
||||
/* Set timestamp of this version of the superblock */
|
||||
if (fs->lfs_version == 1)
|
||||
|
@ -1972,7 +2025,9 @@ lfs_writesuper(struct lfs *fs, daddr_t daddr)
|
|||
s = splbio();
|
||||
V_INCR_NUMOUTPUT(bp->b_vp);
|
||||
splx(s);
|
||||
simple_lock(&fs->lfs_interlock);
|
||||
++fs->lfs_iocount;
|
||||
simple_unlock(&fs->lfs_interlock);
|
||||
VOP_STRATEGY(devvp, bp);
|
||||
}
|
||||
|
||||
|
@ -1984,6 +2039,7 @@ int
|
|||
lfs_match_fake(struct lfs *fs, struct buf *bp)
|
||||
{
|
||||
|
||||
ASSERT_SEGLOCK(fs);
|
||||
return LFS_IS_MALLOC_BUF(bp);
|
||||
}
|
||||
|
||||
|
@ -1992,6 +2048,7 @@ int
|
|||
lfs_match_real(struct lfs *fs, struct buf *bp)
|
||||
{
|
||||
|
||||
ASSERT_SEGLOCK(fs);
|
||||
return (lfs_match_data(fs, bp) && !lfs_match_fake(fs, bp));
|
||||
}
|
||||
#endif
|
||||
|
@ -2000,6 +2057,7 @@ int
|
|||
lfs_match_data(struct lfs *fs, struct buf *bp)
|
||||
{
|
||||
|
||||
ASSERT_SEGLOCK(fs);
|
||||
return (bp->b_lblkno >= 0);
|
||||
}
|
||||
|
||||
|
@ -2008,6 +2066,7 @@ lfs_match_indir(struct lfs *fs, struct buf *bp)
|
|||
{
|
||||
daddr_t lbn;
|
||||
|
||||
ASSERT_SEGLOCK(fs);
|
||||
lbn = bp->b_lblkno;
|
||||
return (lbn < 0 && (-lbn - NDADDR) % NINDIR(fs) == 0);
|
||||
}
|
||||
|
@ -2017,6 +2076,7 @@ lfs_match_dindir(struct lfs *fs, struct buf *bp)
|
|||
{
|
||||
daddr_t lbn;
|
||||
|
||||
ASSERT_SEGLOCK(fs);
|
||||
lbn = bp->b_lblkno;
|
||||
return (lbn < 0 && (-lbn - NDADDR) % NINDIR(fs) == 1);
|
||||
}
|
||||
|
@ -2026,6 +2086,7 @@ lfs_match_tindir(struct lfs *fs, struct buf *bp)
|
|||
{
|
||||
daddr_t lbn;
|
||||
|
||||
ASSERT_SEGLOCK(fs);
|
||||
lbn = bp->b_lblkno;
|
||||
return (lbn < 0 && (-lbn - NDADDR) % NINDIR(fs) == 2);
|
||||
}
|
||||
|
@ -2043,6 +2104,7 @@ lfs_callback(struct buf *bp)
|
|||
struct lfs *fs;
|
||||
|
||||
fs = bp->b_private;
|
||||
ASSERT_NO_SEGLOCK(fs);
|
||||
lfs_freebuf(fs, bp);
|
||||
}
|
||||
|
||||
|
@ -2052,10 +2114,13 @@ lfs_super_aiodone(struct buf *bp)
|
|||
struct lfs *fs;
|
||||
|
||||
fs = bp->b_private;
|
||||
ASSERT_NO_SEGLOCK(fs);
|
||||
simple_lock(&fs->lfs_interlock);
|
||||
fs->lfs_sbactive = 0;
|
||||
wakeup(&fs->lfs_sbactive);
|
||||
if (--fs->lfs_iocount <= 1)
|
||||
wakeup(&fs->lfs_iocount);
|
||||
simple_unlock(&fs->lfs_interlock);
|
||||
wakeup(&fs->lfs_sbactive);
|
||||
lfs_freebuf(fs, bp);
|
||||
}
|
||||
|
||||
|
@ -2075,6 +2140,7 @@ lfs_cluster_aiodone(struct buf *bp)
|
|||
cl = bp->b_private;
|
||||
fs = cl->fs;
|
||||
devvp = VTOI(fs->lfs_ivnode)->i_devvp;
|
||||
ASSERT_NO_SEGLOCK(fs);
|
||||
|
||||
/* Put the pages back, and release the buffer */
|
||||
while (cl->bufcount--) {
|
||||
|
@ -2177,12 +2243,14 @@ lfs_cluster_aiodone(struct buf *bp)
|
|||
if (--cl->seg->seg_iocount == 0)
|
||||
wakeup(&cl->seg->seg_iocount);
|
||||
}
|
||||
simple_lock(&fs->lfs_interlock);
|
||||
#ifdef DIAGNOSTIC
|
||||
if (fs->lfs_iocount == 0)
|
||||
panic("lfs_cluster_aiodone: zero iocount");
|
||||
#endif
|
||||
if (--fs->lfs_iocount <= 1)
|
||||
wakeup(&fs->lfs_iocount);
|
||||
simple_unlock(&fs->lfs_interlock);
|
||||
|
||||
pool_put(&fs->lfs_bpppool, cl->bpp);
|
||||
cl->bpp = NULL;
|
||||
|
@ -2293,6 +2361,7 @@ lfs_shellsort(struct buf **bp_array, int32_t *lb_array, int nmemb, int size)
|
|||
int
|
||||
lfs_vref(struct vnode *vp)
|
||||
{
|
||||
ASSERT_MAYBE_SEGLOCK(VTOI(vp)->i_lfs);
|
||||
/*
|
||||
* If we return 1 here during a flush, we risk vinvalbuf() not
|
||||
* being able to flush all of the pages from this vnode, which
|
||||
|
@ -2314,6 +2383,7 @@ lfs_vref(struct vnode *vp)
|
|||
void
|
||||
lfs_vunref(struct vnode *vp)
|
||||
{
|
||||
ASSERT_MAYBE_SEGLOCK(VTOI(vp)->i_lfs);
|
||||
/*
|
||||
* Analogous to lfs_vref, if the node is flushing, fake it.
|
||||
*/
|
||||
|
@ -2359,6 +2429,7 @@ void
|
|||
lfs_vunref_head(struct vnode *vp)
|
||||
{
|
||||
|
||||
ASSERT_SEGLOCK(VTOI(vp)->i_lfs);
|
||||
simple_lock(&vp->v_interlock);
|
||||
#ifdef DIAGNOSTIC
|
||||
if (vp->v_usecount == 0) {
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: lfs_subr.c,v 1.50 2005/03/08 00:18:20 perseant Exp $ */
|
||||
/* $NetBSD: lfs_subr.c,v 1.51 2005/04/01 21:59:46 perseant Exp $ */
|
||||
|
||||
/*-
|
||||
* Copyright (c) 1999, 2000, 2001, 2002, 2003 The NetBSD Foundation, Inc.
|
||||
|
@ -67,7 +67,7 @@
|
|||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__KERNEL_RCSID(0, "$NetBSD: lfs_subr.c,v 1.50 2005/03/08 00:18:20 perseant Exp $");
|
||||
__KERNEL_RCSID(0, "$NetBSD: lfs_subr.c,v 1.51 2005/04/01 21:59:46 perseant Exp $");
|
||||
|
||||
#include <sys/param.h>
|
||||
#include <sys/systm.h>
|
||||
|
@ -144,6 +144,7 @@ lfs_setup_resblks(struct lfs *fs)
|
|||
int i, j;
|
||||
int maxbpp;
|
||||
|
||||
ASSERT_NO_SEGLOCK(fs);
|
||||
fs->lfs_resblk = (res_t *)malloc(LFS_N_TOTAL * sizeof(res_t), M_SEGMENT,
|
||||
M_WAITOK);
|
||||
for (i = 0; i < LFS_N_TOTAL; i++) {
|
||||
|
@ -195,13 +196,16 @@ lfs_free_resblks(struct lfs *fs)
|
|||
pool_destroy(&fs->lfs_segpool);
|
||||
pool_destroy(&fs->lfs_clpool);
|
||||
|
||||
simple_lock(&fs->lfs_interlock);
|
||||
for (i = 0; i < LFS_N_TOTAL; i++) {
|
||||
while (fs->lfs_resblk[i].inuse)
|
||||
tsleep(&fs->lfs_resblk, PRIBIO + 1, "lfs_free", 0);
|
||||
ltsleep(&fs->lfs_resblk, PRIBIO + 1, "lfs_free", 0,
|
||||
&fs->lfs_interlock);
|
||||
if (fs->lfs_resblk[i].p != NULL)
|
||||
free(fs->lfs_resblk[i].p, M_SEGMENT);
|
||||
}
|
||||
free(fs->lfs_resblk, M_SEGMENT);
|
||||
simple_unlock(&fs->lfs_interlock);
|
||||
}
|
||||
|
||||
static unsigned int
|
||||
|
@ -222,6 +226,7 @@ lfs_malloc(struct lfs *fs, size_t size, int type)
|
|||
int i, s, start;
|
||||
unsigned int h;
|
||||
|
||||
ASSERT_MAYBE_SEGLOCK(fs);
|
||||
r = NULL;
|
||||
|
||||
/* If no mem allocated for this type, it just waits */
|
||||
|
@ -241,6 +246,8 @@ lfs_malloc(struct lfs *fs, size_t size, int type)
|
|||
* at least one cluster block, at least one superblock,
|
||||
* and several indirect blocks.
|
||||
*/
|
||||
|
||||
simple_lock(&fs->lfs_interlock);
|
||||
/* skip over blocks of other types */
|
||||
for (i = 0, start = 0; i < type; i++)
|
||||
start += lfs_res_qty[i];
|
||||
|
@ -255,14 +262,19 @@ lfs_malloc(struct lfs *fs, size_t size, int type)
|
|||
s = splbio();
|
||||
LIST_INSERT_HEAD(&fs->lfs_reshash[h], re, res);
|
||||
splx(s);
|
||||
simple_unlock(&fs->lfs_interlock);
|
||||
return r;
|
||||
}
|
||||
}
|
||||
DLOG((DLOG_MALLOC, "sleeping on %s (%d)\n", lfs_res_names[type], lfs_res_qty[type]));
|
||||
tsleep(&fs->lfs_resblk, PVM, "lfs_malloc", 0);
|
||||
DLOG((DLOG_MALLOC, "done sleeping on %s\n", lfs_res_names[type]));
|
||||
DLOG((DLOG_MALLOC, "sleeping on %s (%d)\n",
|
||||
lfs_res_names[type], lfs_res_qty[type]));
|
||||
ltsleep(&fs->lfs_resblk, PVM, "lfs_malloc", 0,
|
||||
&fs->lfs_interlock);
|
||||
DLOG((DLOG_MALLOC, "done sleeping on %s\n",
|
||||
lfs_res_names[type]));
|
||||
}
|
||||
/* NOTREACHED */
|
||||
simple_unlock(&fs->lfs_interlock);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -276,7 +288,9 @@ lfs_free(struct lfs *fs, void *p, int type)
|
|||
int i;
|
||||
#endif
|
||||
|
||||
ASSERT_MAYBE_SEGLOCK(fs);
|
||||
h = lfs_mhash(p);
|
||||
simple_lock(&fs->lfs_interlock);
|
||||
s = splbio();
|
||||
LIST_FOREACH(re, &fs->lfs_reshash[h], res) {
|
||||
if (re->p == p) {
|
||||
|
@ -285,6 +299,7 @@ lfs_free(struct lfs *fs, void *p, int type)
|
|||
re->inuse = 0;
|
||||
wakeup(&fs->lfs_resblk);
|
||||
splx(s);
|
||||
simple_unlock(&fs->lfs_interlock);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
@ -295,7 +310,8 @@ lfs_free(struct lfs *fs, void *p, int type)
|
|||
}
|
||||
#endif
|
||||
splx(s);
|
||||
|
||||
simple_unlock(&fs->lfs_interlock);
|
||||
|
||||
/*
|
||||
* If we didn't find it, free it.
|
||||
*/
|
||||
|
@ -321,9 +337,12 @@ lfs_seglock(struct lfs *fs, unsigned long flags)
|
|||
} else if (flags & SEGM_PAGEDAEMON) {
|
||||
simple_unlock(&fs->lfs_interlock);
|
||||
return EWOULDBLOCK;
|
||||
} else while (fs->lfs_seglock)
|
||||
(void)ltsleep(&fs->lfs_seglock, PRIBIO + 1,
|
||||
"lfs seglock", 0, &fs->lfs_interlock);
|
||||
} else {
|
||||
while (fs->lfs_seglock) {
|
||||
(void)ltsleep(&fs->lfs_seglock, PRIBIO + 1,
|
||||
"lfs seglock", 0, &fs->lfs_interlock);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fs->lfs_seglock = 1;
|
||||
|
@ -331,6 +350,9 @@ lfs_seglock(struct lfs *fs, unsigned long flags)
|
|||
simple_unlock(&fs->lfs_interlock);
|
||||
fs->lfs_cleanind = 0;
|
||||
|
||||
#ifdef DEBUG
|
||||
LFS_ENTER_LOG("seglock", __FILE__, __LINE__, 0, flags, curproc->p_pid);
|
||||
#endif
|
||||
/* Drain fragment size changes out */
|
||||
lockmgr(&fs->lfs_fraglock, LK_EXCLUSIVE, 0);
|
||||
|
||||
|
@ -347,7 +369,9 @@ lfs_seglock(struct lfs *fs, unsigned long flags)
|
|||
* so we artificially increment it by one until we've scheduled all of
|
||||
* the writes we intend to do.
|
||||
*/
|
||||
simple_lock(&fs->lfs_interlock);
|
||||
++fs->lfs_iocount;
|
||||
simple_unlock(&fs->lfs_interlock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -360,36 +384,53 @@ lfs_unmark_dirop(struct lfs *fs)
|
|||
struct vnode *vp;
|
||||
int doit;
|
||||
|
||||
ASSERT_NO_SEGLOCK(fs);
|
||||
simple_lock(&fs->lfs_interlock);
|
||||
doit = !(fs->lfs_flags & LFS_UNDIROP);
|
||||
if (doit)
|
||||
fs->lfs_flags |= LFS_UNDIROP;
|
||||
simple_unlock(&fs->lfs_interlock);
|
||||
if (!doit)
|
||||
if (!doit) {
|
||||
simple_unlock(&fs->lfs_interlock);
|
||||
return;
|
||||
}
|
||||
|
||||
for (ip = TAILQ_FIRST(&fs->lfs_dchainhd); ip != NULL; ip = nip) {
|
||||
nip = TAILQ_NEXT(ip, i_lfs_dchain);
|
||||
simple_unlock(&fs->lfs_interlock);
|
||||
vp = ITOV(ip);
|
||||
|
||||
simple_lock(&vp->v_interlock);
|
||||
if (VOP_ISLOCKED(vp) &&
|
||||
vp->v_lock.lk_lockholder != curproc->p_pid) {
|
||||
simple_lock(&fs->lfs_interlock);
|
||||
simple_unlock(&vp->v_interlock);
|
||||
continue;
|
||||
}
|
||||
if ((VTOI(vp)->i_flag & IN_ADIROP) == 0) {
|
||||
simple_lock(&fs->lfs_interlock);
|
||||
simple_lock(&lfs_subsys_lock);
|
||||
--lfs_dirvcount;
|
||||
simple_unlock(&lfs_subsys_lock);
|
||||
vp->v_flag &= ~VDIROP;
|
||||
TAILQ_REMOVE(&fs->lfs_dchainhd, ip, i_lfs_dchain);
|
||||
simple_unlock(&fs->lfs_interlock);
|
||||
wakeup(&lfs_dirvcount);
|
||||
simple_unlock(&vp->v_interlock);
|
||||
simple_lock(&fs->lfs_interlock);
|
||||
fs->lfs_unlockvp = vp;
|
||||
simple_unlock(&fs->lfs_interlock);
|
||||
vrele(vp);
|
||||
simple_lock(&fs->lfs_interlock);
|
||||
fs->lfs_unlockvp = NULL;
|
||||
}
|
||||
simple_unlock(&fs->lfs_interlock);
|
||||
} else
|
||||
simple_unlock(&vp->v_interlock);
|
||||
simple_lock(&fs->lfs_interlock);
|
||||
}
|
||||
|
||||
simple_lock(&fs->lfs_interlock);
|
||||
fs->lfs_flags &= ~LFS_UNDIROP;
|
||||
simple_unlock(&fs->lfs_interlock);
|
||||
wakeup(&fs->lfs_flags);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -397,6 +438,7 @@ lfs_auto_segclean(struct lfs *fs)
|
|||
{
|
||||
int i, error, s, waited;
|
||||
|
||||
ASSERT_SEGLOCK(fs);
|
||||
/*
|
||||
* Now that we've swapped lfs_activesb, but while we still
|
||||
* hold the segment lock, run through the segment list marking
|
||||
|
@ -413,11 +455,13 @@ lfs_auto_segclean(struct lfs *fs)
|
|||
(SEGUSE_DIRTY | SEGUSE_EMPTY)) {
|
||||
|
||||
/* Make sure the sb is written before we clean */
|
||||
simple_lock(&fs->lfs_interlock);
|
||||
s = splbio();
|
||||
while (waited == 0 && fs->lfs_sbactive)
|
||||
tsleep(&fs->lfs_sbactive, PRIBIO+1, "lfs asb",
|
||||
0);
|
||||
ltsleep(&fs->lfs_sbactive, PRIBIO+1, "lfs asb",
|
||||
0, &fs->lfs_interlock);
|
||||
splx(s);
|
||||
simple_unlock(&fs->lfs_interlock);
|
||||
waited = 1;
|
||||
|
||||
if ((error = lfs_do_segclean(fs, i)) != 0) {
|
||||
|
@ -444,6 +488,7 @@ lfs_segunlock(struct lfs *fs)
|
|||
sp = fs->lfs_sp;
|
||||
|
||||
simple_lock(&fs->lfs_interlock);
|
||||
LOCK_ASSERT(LFS_SEGLOCK_HELD(fs));
|
||||
if (fs->lfs_seglock == 1) {
|
||||
if ((sp->seg_flags & SEGM_PROT) == 0)
|
||||
do_unmark_dirop = 1;
|
||||
|
@ -476,16 +521,21 @@ lfs_segunlock(struct lfs *fs)
|
|||
* At the moment, the user's process hangs around so we can
|
||||
* sleep.
|
||||
*/
|
||||
simple_lock(&fs->lfs_interlock);
|
||||
if (--fs->lfs_iocount == 0)
|
||||
LFS_DEBUG_COUNTLOCKED("lfs_segunlock");
|
||||
if (fs->lfs_iocount <= 1)
|
||||
wakeup(&fs->lfs_iocount);
|
||||
simple_unlock(&fs->lfs_interlock);
|
||||
/*
|
||||
* If we're not checkpointing, we don't have to block
|
||||
* other processes to wait for a synchronous write
|
||||
* to complete.
|
||||
*/
|
||||
if (!ckp) {
|
||||
#ifdef DEBUG
|
||||
LFS_ENTER_LOG("segunlock_std", __FILE__, __LINE__, 0, 0, curproc->p_pid);
|
||||
#endif
|
||||
simple_lock(&fs->lfs_interlock);
|
||||
--fs->lfs_seglock;
|
||||
fs->lfs_lockpid = 0;
|
||||
|
@ -499,14 +549,16 @@ lfs_segunlock(struct lfs *fs)
|
|||
* superblocks to make sure that the checkpoint described
|
||||
* by a superblock completed.
|
||||
*/
|
||||
simple_lock(&fs->lfs_interlock);
|
||||
while (ckp && sync && fs->lfs_iocount)
|
||||
(void)tsleep(&fs->lfs_iocount, PRIBIO + 1,
|
||||
"lfs_iocount", 0);
|
||||
(void)ltsleep(&fs->lfs_iocount, PRIBIO + 1,
|
||||
"lfs_iocount", 0, &fs->lfs_interlock);
|
||||
while (sync && sp->seg_iocount) {
|
||||
(void)tsleep(&sp->seg_iocount, PRIBIO + 1,
|
||||
"seg_iocount", 0);
|
||||
(void)ltsleep(&sp->seg_iocount, PRIBIO + 1,
|
||||
"seg_iocount", 0, &fs->lfs_interlock);
|
||||
DLOG((DLOG_SEG, "sleeping on iocount %x == %d\n", sp, sp->seg_iocount));
|
||||
}
|
||||
simple_unlock(&fs->lfs_interlock);
|
||||
if (sync)
|
||||
pool_put(&fs->lfs_segpool, sp);
|
||||
|
||||
|
@ -524,6 +576,9 @@ lfs_segunlock(struct lfs *fs)
|
|||
lfs_auto_segclean(fs);
|
||||
}
|
||||
fs->lfs_activesb = 1 - fs->lfs_activesb;
|
||||
#ifdef DEBUG
|
||||
LFS_ENTER_LOG("segunlock_ckp", __FILE__, __LINE__, 0, 0, curproc->p_pid);
|
||||
#endif
|
||||
simple_lock(&fs->lfs_interlock);
|
||||
--fs->lfs_seglock;
|
||||
fs->lfs_lockpid = 0;
|
||||
|
@ -551,6 +606,7 @@ lfs_writer_enter(struct lfs *fs, const char *wmesg)
|
|||
{
|
||||
int error = 0;
|
||||
|
||||
ASSERT_MAYBE_SEGLOCK(fs);
|
||||
simple_lock(&fs->lfs_interlock);
|
||||
|
||||
/* disallow dirops during flush */
|
||||
|
@ -559,7 +615,7 @@ lfs_writer_enter(struct lfs *fs, const char *wmesg)
|
|||
while (fs->lfs_dirops > 0) {
|
||||
++fs->lfs_diropwait;
|
||||
error = ltsleep(&fs->lfs_writer, PRIBIO+1, wmesg, 0,
|
||||
&fs->lfs_interlock);
|
||||
&fs->lfs_interlock);
|
||||
--fs->lfs_diropwait;
|
||||
}
|
||||
|
||||
|
@ -576,6 +632,7 @@ lfs_writer_leave(struct lfs *fs)
|
|||
{
|
||||
boolean_t dowakeup;
|
||||
|
||||
ASSERT_MAYBE_SEGLOCK(fs);
|
||||
simple_lock(&fs->lfs_interlock);
|
||||
dowakeup = !(--fs->lfs_writer);
|
||||
simple_unlock(&fs->lfs_interlock);
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: lfs_syscalls.c,v 1.103 2005/03/08 00:18:20 perseant Exp $ */
|
||||
/* $NetBSD: lfs_syscalls.c,v 1.104 2005/04/01 21:59:46 perseant Exp $ */
|
||||
|
||||
/*-
|
||||
* Copyright (c) 1999, 2000, 2001, 2002, 2003 The NetBSD Foundation, Inc.
|
||||
|
@ -67,7 +67,7 @@
|
|||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__KERNEL_RCSID(0, "$NetBSD: lfs_syscalls.c,v 1.103 2005/03/08 00:18:20 perseant Exp $");
|
||||
__KERNEL_RCSID(0, "$NetBSD: lfs_syscalls.c,v 1.104 2005/04/01 21:59:46 perseant Exp $");
|
||||
|
||||
#ifndef LFS
|
||||
# define LFS /* for prototypes in syscallargs.h */
|
||||
|
@ -884,8 +884,10 @@ lfs_do_segclean(struct lfs *fs, unsigned long segnum)
|
|||
if (fs->lfs_version > 1 && segnum == 0 &&
|
||||
fs->lfs_start < btofsb(fs, LFS_LABELPAD))
|
||||
fs->lfs_avail -= btofsb(fs, LFS_LABELPAD) - fs->lfs_start;
|
||||
simple_lock(&fs->lfs_interlock);
|
||||
fs->lfs_bfree += sup->su_nsums * btofsb(fs, fs->lfs_sumsize) +
|
||||
btofsb(fs, sup->su_ninos * fs->lfs_ibsize);
|
||||
simple_unlock(&fs->lfs_interlock);
|
||||
fs->lfs_dmeta -= sup->su_nsums * btofsb(fs, fs->lfs_sumsize) +
|
||||
btofsb(fs, sup->su_ninos * fs->lfs_ibsize);
|
||||
if (fs->lfs_dmeta < 0)
|
||||
|
@ -898,7 +900,9 @@ lfs_do_segclean(struct lfs *fs, unsigned long segnum)
|
|||
--cip->dirty;
|
||||
fs->lfs_nclean = cip->clean;
|
||||
cip->bfree = fs->lfs_bfree;
|
||||
simple_lock(&fs->lfs_interlock);
|
||||
cip->avail = fs->lfs_avail - fs->lfs_ravail - fs->lfs_favail;
|
||||
simple_unlock(&fs->lfs_interlock);
|
||||
(void) LFS_BWRITE_LOG(bp);
|
||||
wakeup(&fs->lfs_avail);
|
||||
|
||||
|
@ -1026,9 +1030,13 @@ lfs_fastvget(struct mount *mp, ino_t ino, daddr_t daddr, struct vnode **vpp, str
|
|||
* Wait until the filesystem is fully mounted before allowing vget
|
||||
* to complete. This prevents possible problems with roll-forward.
|
||||
*/
|
||||
simple_lock(&fs->lfs_interlock);
|
||||
while (fs->lfs_flags & LFS_NOTYET) {
|
||||
tsleep(&fs->lfs_flags, PRIBIO+1, "lfs_fnotyet", 0);
|
||||
ltsleep(&fs->lfs_flags, PRIBIO+1, "lfs_fnotyet", 0,
|
||||
&fs->lfs_interlock);
|
||||
}
|
||||
simple_unlock(&fs->lfs_interlock);
|
||||
|
||||
/*
|
||||
* This is playing fast and loose. Someone may have the inode
|
||||
* locked, in which case they are going to be distinctly unhappy
|
||||
|
@ -1166,7 +1174,9 @@ lfs_fakebuf(struct lfs *fs, struct vnode *vp, int lbn, size_t size, caddr_t uadd
|
|||
KDASSERT(bp->b_iodone == lfs_callback);
|
||||
|
||||
#if 0
|
||||
simple_lock(&fs->lfs_interlock);
|
||||
++fs->lfs_iocount;
|
||||
simple_unlock(&fs->lfs_interlock);
|
||||
#endif
|
||||
bp->b_bufsize = size;
|
||||
bp->b_bcount = size;
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: lfs_vfsops.c,v 1.168 2005/03/29 02:41:06 thorpej Exp $ */
|
||||
/* $NetBSD: lfs_vfsops.c,v 1.169 2005/04/01 21:59:46 perseant Exp $ */
|
||||
|
||||
/*-
|
||||
* Copyright (c) 1999, 2000, 2001, 2002, 2003 The NetBSD Foundation, Inc.
|
||||
|
@ -67,7 +67,7 @@
|
|||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__KERNEL_RCSID(0, "$NetBSD: lfs_vfsops.c,v 1.168 2005/03/29 02:41:06 thorpej Exp $");
|
||||
__KERNEL_RCSID(0, "$NetBSD: lfs_vfsops.c,v 1.169 2005/04/01 21:59:46 perseant Exp $");
|
||||
|
||||
#if defined(_KERNEL_OPT)
|
||||
#include "opt_quota.h"
|
||||
|
@ -212,12 +212,14 @@ lfs_writerd(void *arg)
|
|||
if (strncmp(&mp->mnt_stat.f_fstypename[0], MOUNT_LFS,
|
||||
MFSNAMELEN) == 0) {
|
||||
fs = VFSTOUFS(mp)->um_lfs;
|
||||
simple_lock(&fs->lfs_interlock);
|
||||
if (fs->lfs_pdflush ||
|
||||
!TAILQ_EMPTY(&fs->lfs_pchainhd)) {
|
||||
DLOG((DLOG_FLUSH, "lfs_writerd: pdflush set\n"));
|
||||
fs->lfs_pdflush = 0;
|
||||
lfs_flush_fs(fs, 0);
|
||||
}
|
||||
simple_unlock(&fs->lfs_interlock);
|
||||
}
|
||||
|
||||
simple_lock(&mountlist_slock);
|
||||
|
@ -1388,8 +1390,11 @@ lfs_unmount(struct mount *mp, int mntflags, struct proc *p)
|
|||
fs->lfs_pflags |= LFS_PF_CLEAN;
|
||||
lfs_writesuper(fs, fs->lfs_sboffs[0]);
|
||||
lfs_writesuper(fs, fs->lfs_sboffs[1]);
|
||||
simple_lock(&fs->lfs_interlock);
|
||||
while (fs->lfs_iocount)
|
||||
tsleep(&fs->lfs_iocount, PRIBIO + 1, "lfs_umount", 0);
|
||||
ltsleep(&fs->lfs_iocount, PRIBIO + 1, "lfs_umount", 0,
|
||||
&fs->lfs_interlock);
|
||||
simple_unlock(&fs->lfs_interlock);
|
||||
|
||||
/* Finish with the Ifile, now that we're done with it */
|
||||
vrele(fs->lfs_ivnode);
|
||||
|
@ -1418,6 +1423,9 @@ lfs_unmount(struct mount *mp, int mntflags, struct proc *p)
|
|||
|
||||
/*
|
||||
* Get file system statistics.
|
||||
*
|
||||
* NB: We don't lock to access the superblock here, because it's not
|
||||
* really that important if we get it wrong.
|
||||
*/
|
||||
int
|
||||
lfs_statvfs(struct mount *mp, struct statvfs *sbp, struct proc *p)
|
||||
|
@ -1511,8 +1519,11 @@ lfs_vget(struct mount *mp, ino_t ino, struct vnode **vpp)
|
|||
* If the filesystem is not completely mounted yet, suspend
|
||||
* any access requests (wait for roll-forward to complete).
|
||||
*/
|
||||
simple_lock(&fs->lfs_interlock);
|
||||
while ((fs->lfs_flags & LFS_NOTYET) && curproc->p_pid != fs->lfs_rfpid)
|
||||
tsleep(&fs->lfs_flags, PRIBIO+1, "lfs_notyet", 0);
|
||||
ltsleep(&fs->lfs_flags, PRIBIO+1, "lfs_notyet", 0,
|
||||
&fs->lfs_interlock);
|
||||
simple_unlock(&fs->lfs_interlock);
|
||||
|
||||
if ((*vpp = ufs_ihashget(dev, ino, LK_EXCLUSIVE)) != NULL)
|
||||
return (0);
|
||||
|
@ -1600,6 +1611,7 @@ lfs_vget(struct mount *mp, ino_t ino, struct vnode **vpp)
|
|||
#ifdef DEBUG
|
||||
/* If the seglock is held look at the bpp to see
|
||||
what is there anyway */
|
||||
simple_lock(&fs->lfs_interlock);
|
||||
if (fs->lfs_seglock > 0) {
|
||||
struct buf **bpp;
|
||||
struct ufs1_dinode *dp;
|
||||
|
@ -1620,11 +1632,18 @@ lfs_vget(struct mount *mp, ino_t ino, struct vnode **vpp)
|
|||
}
|
||||
}
|
||||
}
|
||||
simple_unlock(&fs->lfs_interlock);
|
||||
#endif /* DEBUG */
|
||||
panic("lfs_vget: dinode not found");
|
||||
}
|
||||
DLOG((DLOG_VNODE, "lfs_vget: dinode %d not found, retrying...\n", ino));
|
||||
(void)tsleep(&fs->lfs_iocount, PRIBIO + 1, "lfs ifind", 1);
|
||||
simple_lock(&fs->lfs_interlock);
|
||||
if (fs->lfs_iocount) {
|
||||
DLOG((DLOG_VNODE, "lfs_vget: dinode %d not found, retrying...\n", ino));
|
||||
(void)ltsleep(&fs->lfs_iocount, PRIBIO + 1,
|
||||
"lfs ifind", 1, &fs->lfs_interlock);
|
||||
} else
|
||||
retries = LFS_IFIND_RETRIES;
|
||||
simple_unlock(&fs->lfs_interlock);
|
||||
goto again;
|
||||
}
|
||||
*ip->i_din.ffs1_din = *dip;
|
||||
|
@ -1911,6 +1930,8 @@ lfs_gop_write(struct vnode *vp, struct vm_page **pgs, int npages, int flags)
|
|||
struct segment *sp = fs->lfs_sp;
|
||||
UVMHIST_FUNC("lfs_gop_write"); UVMHIST_CALLED(ubchist);
|
||||
|
||||
ASSERT_SEGLOCK(fs);
|
||||
|
||||
/* The Ifile lives in the buffer cache */
|
||||
KASSERT(vp != fs->lfs_ivnode);
|
||||
|
||||
|
@ -1930,7 +1951,7 @@ lfs_gop_write(struct vnode *vp, struct vm_page **pgs, int npages, int flags)
|
|||
*
|
||||
* XXXUBC that last statement is an oversimplification of course.
|
||||
*/
|
||||
if (!(fs->lfs_seglock) || fs->lfs_lockpid != curproc->p_pid ||
|
||||
if (!LFS_SEGLOCK_HELD(fs) ||
|
||||
(ip->i_lfs_iflags & LFSI_NO_GOP_WRITE) ||
|
||||
(pgs[0]->offset & fs->lfs_bmask) != 0) {
|
||||
goto tryagain;
|
||||
|
@ -2242,7 +2263,9 @@ warn_ifile_size(struct lfs *fs)
|
|||
KASSERT(LFS_MAX_BYTES > 0);
|
||||
if (((fs->lfs_ivnode->v_size >> fs->lfs_bshift) - fs->lfs_segtabsz) >
|
||||
LFS_MAX_BUFS) {
|
||||
simple_lock(&fs->lfs_interlock);
|
||||
fs->lfs_flags |= LFS_WARNED;
|
||||
simple_unlock(&fs->lfs_interlock);
|
||||
log(LOG_WARNING, "lfs_mountfs: inode part of ifile of length %"
|
||||
PRId64 " cannot fit in %d buffers\n",
|
||||
fs->lfs_ivnode->v_size -
|
||||
|
@ -2255,7 +2278,9 @@ warn_ifile_size(struct lfs *fs)
|
|||
fs->lfs_segtabsz));
|
||||
} else if ((fs->lfs_ivnode->v_size >> fs->lfs_bshift) > LFS_MAX_BUFS) {
|
||||
/* Same thing but LOG_NOTICE */
|
||||
simple_lock(&fs->lfs_interlock);
|
||||
fs->lfs_flags |= LFS_WARNED;
|
||||
simple_unlock(&fs->lfs_interlock);
|
||||
log(LOG_NOTICE, "lfs_mountfs: entire ifile of length %"
|
||||
PRId64 " cannot fit in %d buffers\n",
|
||||
fs->lfs_ivnode->v_size, LFS_MAX_BUFS);
|
||||
|
@ -2267,7 +2292,9 @@ warn_ifile_size(struct lfs *fs)
|
|||
|
||||
if (fs->lfs_ivnode->v_size - (fs->lfs_segtabsz << fs->lfs_bshift) >
|
||||
LFS_MAX_BYTES) {
|
||||
simple_lock(&fs->lfs_interlock);
|
||||
fs->lfs_flags |= LFS_WARNED;
|
||||
simple_unlock(&fs->lfs_interlock);
|
||||
log(LOG_WARNING, "lfs_mountfs: inode part of ifile of length %"
|
||||
PRId64 " cannot fit in %lu bytes\n",
|
||||
fs->lfs_ivnode->v_size - (fs->lfs_segtabsz <<
|
||||
|
@ -2280,7 +2307,9 @@ warn_ifile_size(struct lfs *fs)
|
|||
fs->lfs_bshift)) >>
|
||||
PAGE_SHIFT);
|
||||
} else if(fs->lfs_ivnode->v_size > LFS_MAX_BYTES) {
|
||||
simple_lock(&fs->lfs_interlock);
|
||||
fs->lfs_flags |= LFS_WARNED;
|
||||
simple_unlock(&fs->lfs_interlock);
|
||||
log(LOG_NOTICE, "lfs_mountfs: entire ifile of length %" PRId64
|
||||
" cannot fit in %lu buffer bytes\n",
|
||||
fs->lfs_ivnode->v_size, LFS_MAX_BYTES);
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: lfs_vnops.c,v 1.140 2005/03/25 01:45:05 perseant Exp $ */
|
||||
/* $NetBSD: lfs_vnops.c,v 1.141 2005/04/01 21:59:47 perseant Exp $ */
|
||||
|
||||
/*-
|
||||
* Copyright (c) 1999, 2000, 2001, 2002, 2003 The NetBSD Foundation, Inc.
|
||||
|
@ -67,7 +67,7 @@
|
|||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__KERNEL_RCSID(0, "$NetBSD: lfs_vnops.c,v 1.140 2005/03/25 01:45:05 perseant Exp $");
|
||||
__KERNEL_RCSID(0, "$NetBSD: lfs_vnops.c,v 1.141 2005/04/01 21:59:47 perseant Exp $");
|
||||
|
||||
#include <sys/param.h>
|
||||
#include <sys/systm.h>
|
||||
|
@ -386,6 +386,8 @@ lfs_set_dirop(struct vnode *dvp, struct vnode *vp)
|
|||
KASSERT(vp == NULL || VOP_ISLOCKED(vp));
|
||||
|
||||
fs = VTOI(dvp)->i_lfs;
|
||||
|
||||
ASSERT_NO_SEGLOCK(fs);
|
||||
/*
|
||||
* LFS_NRESERVE calculates direct and indirect blocks as well
|
||||
* as an inode block; an overestimate in most cases.
|
||||
|
@ -393,15 +395,16 @@ lfs_set_dirop(struct vnode *dvp, struct vnode *vp)
|
|||
if ((error = lfs_reserve(fs, dvp, vp, LFS_NRESERVE(fs))) != 0)
|
||||
return (error);
|
||||
|
||||
if (fs->lfs_dirops == 0)
|
||||
lfs_check(dvp, LFS_UNUSED_LBN, 0);
|
||||
restart:
|
||||
restart:
|
||||
simple_lock(&fs->lfs_interlock);
|
||||
if (fs->lfs_writer) {
|
||||
ltsleep(&fs->lfs_dirops, (PRIBIO + 1) | PNORELOCK,
|
||||
"lfs_sdirop", 0, &fs->lfs_interlock);
|
||||
goto restart;
|
||||
if (fs->lfs_dirops == 0) {
|
||||
simple_unlock(&fs->lfs_interlock);
|
||||
lfs_check(dvp, LFS_UNUSED_LBN, 0);
|
||||
simple_lock(&fs->lfs_interlock);
|
||||
}
|
||||
while (fs->lfs_writer)
|
||||
ltsleep(&fs->lfs_dirops, (PRIBIO + 1), "lfs_sdirop", 0,
|
||||
&fs->lfs_interlock);
|
||||
simple_lock(&lfs_subsys_lock);
|
||||
if (lfs_dirvcount > LFS_MAX_DIROP && fs->lfs_dirops == 0) {
|
||||
wakeup(&lfs_writer_daemon);
|
||||
|
@ -458,6 +461,7 @@ lfs_set_dirop_create(struct vnode *dvp, struct vnode **vpp)
|
|||
struct lfs *fs;
|
||||
|
||||
fs = VFSTOUFS(dvp->v_mount)->um_lfs;
|
||||
ASSERT_NO_SEGLOCK(fs);
|
||||
if (fs->lfs_ronly)
|
||||
return EROFS;
|
||||
if (vpp && (error = getnewvnode(VT_LFS, dvp->v_mount, lfs_vnodeop_p, vpp))) {
|
||||
|
@ -524,10 +528,13 @@ lfs_mark_vnode(struct vnode *vp)
|
|||
struct inode *ip = VTOI(vp);
|
||||
struct lfs *fs = ip->i_lfs;
|
||||
|
||||
simple_lock(&fs->lfs_interlock);
|
||||
if (!(ip->i_flag & IN_ADIROP)) {
|
||||
if (!(vp->v_flag & VDIROP)) {
|
||||
(void)lfs_vref(vp);
|
||||
simple_lock(&lfs_subsys_lock);
|
||||
++lfs_dirvcount;
|
||||
simple_unlock(&lfs_subsys_lock);
|
||||
TAILQ_INSERT_TAIL(&fs->lfs_dchainhd, ip, i_lfs_dchain);
|
||||
vp->v_flag |= VDIROP;
|
||||
}
|
||||
|
@ -535,6 +542,7 @@ lfs_mark_vnode(struct vnode *vp)
|
|||
ip->i_flag |= IN_ADIROP;
|
||||
} else
|
||||
KASSERT(vp->v_flag & VDIROP);
|
||||
simple_unlock(&fs->lfs_interlock);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -544,7 +552,9 @@ lfs_unmark_vnode(struct vnode *vp)
|
|||
|
||||
if (ip->i_flag & IN_ADIROP) {
|
||||
KASSERT(vp->v_flag & VDIROP);
|
||||
simple_lock(&ip->i_lfs->lfs_interlock);
|
||||
--ip->i_lfs->lfs_nadirop;
|
||||
simple_unlock(&ip->i_lfs->lfs_interlock);
|
||||
ip->i_flag &= ~IN_ADIROP;
|
||||
}
|
||||
}
|
||||
|
@ -1121,8 +1131,12 @@ lfs_strategy(void *v)
|
|||
DLOG((DLOG_CLEAN,
|
||||
"lfs_strategy: sleeping on ino %d lbn %"
|
||||
PRId64 "\n", ip->i_number, bp->b_lblkno));
|
||||
tsleep(&fs->lfs_seglock, PRIBIO+1,
|
||||
"lfs_strategy", 0);
|
||||
simple_lock(&fs->lfs_interlock);
|
||||
if (fs->lfs_seglock)
|
||||
ltsleep(&fs->lfs_seglock,
|
||||
(PRIBIO + 1) | PNORELOCK,
|
||||
"lfs_strategy", 0,
|
||||
&fs->lfs_interlock);
|
||||
/* Things may be different now; start over. */
|
||||
slept = 1;
|
||||
break;
|
||||
|
@ -1146,11 +1160,17 @@ lfs_flush_dirops(struct lfs *fs)
|
|||
struct segment *sp;
|
||||
int needunlock;
|
||||
|
||||
ASSERT_NO_SEGLOCK(fs);
|
||||
|
||||
if (fs->lfs_ronly)
|
||||
return;
|
||||
|
||||
if (TAILQ_FIRST(&fs->lfs_dchainhd) == NULL)
|
||||
simple_lock(&fs->lfs_interlock);
|
||||
if (TAILQ_FIRST(&fs->lfs_dchainhd) == NULL) {
|
||||
simple_unlock(&fs->lfs_interlock);
|
||||
return;
|
||||
} else
|
||||
simple_unlock(&fs->lfs_interlock);
|
||||
|
||||
if (lfs_dostats)
|
||||
++lfs_stats.flush_invoked;
|
||||
|
@ -1177,8 +1197,10 @@ lfs_flush_dirops(struct lfs *fs)
|
|||
* no dirops are active.
|
||||
*
|
||||
*/
|
||||
simple_lock(&fs->lfs_interlock);
|
||||
for (ip = TAILQ_FIRST(&fs->lfs_dchainhd); ip != NULL; ip = nip) {
|
||||
nip = TAILQ_NEXT(ip, i_lfs_dchain);
|
||||
simple_unlock(&fs->lfs_interlock);
|
||||
vp = ITOV(ip);
|
||||
|
||||
/*
|
||||
|
@ -1213,7 +1235,9 @@ lfs_flush_dirops(struct lfs *fs)
|
|||
VOP_UNLOCK(vp, 0);
|
||||
else
|
||||
LFS_SET_UINO(ip, IN_MODIFIED);
|
||||
simple_lock(&fs->lfs_interlock);
|
||||
}
|
||||
simple_unlock(&fs->lfs_interlock);
|
||||
/* We've written all the dirops there are */
|
||||
((SEGSUM *)(sp->segsum))->ss_flags &= ~(SS_CONT);
|
||||
(void) lfs_writeseg(fs, sp);
|
||||
|
@ -1414,6 +1438,7 @@ check_dirty(struct lfs *fs, struct vnode *vp,
|
|||
int tdirty;
|
||||
int pages_per_block = fs->lfs_bsize >> PAGE_SHIFT;
|
||||
|
||||
ASSERT_MAYBE_SEGLOCK(fs);
|
||||
top:
|
||||
by_list = (vp->v_uobj.uo_npages <=
|
||||
((endoffset - startoffset) >> PAGE_SHIFT) *
|
||||
|
@ -1545,7 +1570,9 @@ check_dirty(struct lfs *fs, struct vnode *vp,
|
|||
if (any_dirty) {
|
||||
if (!(ip->i_flags & IN_PAGING)) {
|
||||
ip->i_flags |= IN_PAGING;
|
||||
simple_lock(&fs->lfs_interlock);
|
||||
TAILQ_INSERT_TAIL(&fs->lfs_pchainhd, ip, i_lfs_pchain);
|
||||
simple_unlock(&fs->lfs_interlock);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
@ -1737,7 +1764,9 @@ lfs_putpages(void *v)
|
|||
* notice the pager inode queue and act on that.
|
||||
*/
|
||||
if (pagedaemon) {
|
||||
simple_lock(&fs->lfs_interlock);
|
||||
++fs->lfs_pdflush;
|
||||
simple_unlock(&fs->lfs_interlock);
|
||||
wakeup(&lfs_writer_daemon);
|
||||
simple_unlock(&vp->v_interlock);
|
||||
return EWOULDBLOCK;
|
||||
|
@ -1761,7 +1790,9 @@ lfs_putpages(void *v)
|
|||
if (locked)
|
||||
VOP_UNLOCK(vp, 0);
|
||||
|
||||
simple_lock(&fs->lfs_interlock);
|
||||
lfs_flush_fs(fs, sync ? SEGM_SYNC : 0);
|
||||
simple_unlock(&fs->lfs_interlock);
|
||||
|
||||
simple_lock(&vp->v_interlock);
|
||||
if (locked)
|
||||
|
@ -1803,7 +1834,9 @@ lfs_putpages(void *v)
|
|||
* VOP_PUTPAGES should not be called while holding the seglock.
|
||||
* XXXUBC fix lfs_markv, or do this properly.
|
||||
*/
|
||||
/* KASSERT(fs->lfs_seglock == 1); */
|
||||
#ifdef notyet
|
||||
KASSERT(fs->lfs_seglock == 1);
|
||||
#endif /* notyet */
|
||||
|
||||
/*
|
||||
* We assume we're being called with sp->fip pointing at blank space.
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: ufs_readwrite.c,v 1.61 2005/02/26 05:40:42 perseant Exp $ */
|
||||
/* $NetBSD: ufs_readwrite.c,v 1.62 2005/04/01 21:59:47 perseant Exp $ */
|
||||
|
||||
/*-
|
||||
* Copyright (c) 1993
|
||||
|
@ -32,7 +32,7 @@
|
|||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__KERNEL_RCSID(1, "$NetBSD: ufs_readwrite.c,v 1.61 2005/02/26 05:40:42 perseant Exp $");
|
||||
__KERNEL_RCSID(1, "$NetBSD: ufs_readwrite.c,v 1.62 2005/04/01 21:59:47 perseant Exp $");
|
||||
|
||||
#ifdef LFS_READWRITE
|
||||
#define BLKSIZE(a, b, c) blksize(a, b, c)
|
||||
|
@ -283,10 +283,12 @@ WRITE(void *v)
|
|||
|
||||
/* Account writes. This overcounts if pages are already dirty. */
|
||||
if (usepc) {
|
||||
simple_lock(&fs->lfs_interlock);
|
||||
simple_lock(&lfs_subsys_lock);
|
||||
lfs_subsys_pages += round_page(uio->uio_resid) >> PAGE_SHIFT;
|
||||
fs->lfs_pages += round_page(uio->uio_resid) >> PAGE_SHIFT;
|
||||
simple_unlock(&lfs_subsys_lock);
|
||||
simple_unlock(&fs->lfs_interlock);
|
||||
}
|
||||
lfs_check(vp, LFS_UNUSED_LBN, 0);
|
||||
#endif /* !LFS_READWRITE */
|
||||
|
|
Loading…
Reference in New Issue