Change lfs from hash table to vcache.

- Change lfs_valloc() to return an inode number and version instead of
  a vnode and move lfs_ialloc() and lfs_vcreate() to new lfs_init_vnode().

- Add lfs_valloc_fixed() to allocate a known inode, used by kernel
  roll forward.

- Remove lfs_*ref(), these functions cannot coexist with vcache and
  their commented behaviour is far away from their implementation.

- Add the cleaner lwp and blockinfo to struct ulfsmount so lfs_loadvnode()
  may use hints from the cleaner.

- Remove vnode locks from ulfs_lookup() like we did with ufs_lookup().
This commit is contained in:
hannken 2015-05-31 15:48:02 +00:00
parent cf850531be
commit d8868b1ee7
17 changed files with 425 additions and 1022 deletions

View File

@ -1,4 +1,4 @@
# $NetBSD: Makefile,v 1.7 2014/05/16 09:34:03 dholland Exp $ # $NetBSD: Makefile,v 1.8 2015/05/31 15:48:02 hannken Exp $
.include "../Makefile.inc" .include "../Makefile.inc"
@ -13,7 +13,7 @@ SRCS= lfs_vfsops.c lfs_vnops.c lfs_subr.c lfs_alloc.c lfs_balloc.c \
lfs_bio.c lfs_cksum.c lfs_debug.c lfs_inode.c lfs_pages.c \ lfs_bio.c lfs_cksum.c lfs_debug.c lfs_inode.c lfs_pages.c \
lfs_segment.c lfs_rename.c lfs_syscalls.c lfs_itimes.c lfs_segment.c lfs_rename.c lfs_syscalls.c lfs_itimes.c
SRCS+= ulfs_bmap.c ulfs_dirhash.c ulfs_ihash.c ulfs_inode.c ulfs_lookup.c \ SRCS+= ulfs_bmap.c ulfs_dirhash.c ulfs_inode.c ulfs_lookup.c \
ulfs_snapshot.c ulfs_vfsops.c ulfs_vnops.c ulfs_snapshot.c ulfs_vfsops.c ulfs_vnops.c
.include <bsd.kmodule.mk> .include <bsd.kmodule.mk>

View File

@ -1,4 +1,4 @@
# $NetBSD: Makefile,v 1.11 2014/05/16 09:34:03 dholland Exp $ # $NetBSD: Makefile,v 1.12 2015/05/31 15:48:02 hannken Exp $
# #
.PATH: ${.CURDIR}/../../../../ufs/lfs .PATH: ${.CURDIR}/../../../../ufs/lfs
@ -9,7 +9,7 @@ SRCS= lfs_alloc.c lfs_balloc.c lfs_bio.c lfs_cksum.c lfs_debug.c \
lfs_inode.c lfs_itimes.c lfs_pages.c lfs_rename.c lfs_rfw.c \ lfs_inode.c lfs_itimes.c lfs_pages.c lfs_rename.c lfs_rfw.c \
lfs_segment.c lfs_subr.c lfs_syscalls.c lfs_vfsops.c lfs_vnops.c lfs_segment.c lfs_subr.c lfs_syscalls.c lfs_vfsops.c lfs_vnops.c
SRCS+= ulfs_bmap.c ulfs_dirhash.c ulfs_extattr.c ulfs_ihash.c \ SRCS+= ulfs_bmap.c ulfs_dirhash.c ulfs_extattr.c \
ulfs_inode.c ulfs_lookup.c ulfs_quota.c ulfs_quota1.c \ ulfs_inode.c ulfs_lookup.c ulfs_quota.c ulfs_quota1.c \
ulfs_quota2.c ulfs_quota1_subr.c ulfs_quota2_subr.c \ ulfs_quota2.c ulfs_quota1_subr.c ulfs_quota2_subr.c \
ulfs_snapshot.c ulfs_vfsops.c ulfs_vnops.c ulfs_snapshot.c ulfs_vfsops.c ulfs_vnops.c

View File

@ -1,4 +1,4 @@
# $NetBSD: files.ufs,v 1.39 2015/01/11 17:29:57 hannken Exp $ # $NetBSD: files.ufs,v 1.40 2015/05/31 15:48:03 hannken Exp $
deffs FFS deffs FFS
deffs EXT2FS deffs EXT2FS
@ -77,7 +77,6 @@ file ufs/lfs/lfs_vnops.c lfs
file ufs/lfs/ulfs_bmap.c lfs file ufs/lfs/ulfs_bmap.c lfs
file ufs/lfs/ulfs_dirhash.c lfs & lfs_dirhash file ufs/lfs/ulfs_dirhash.c lfs & lfs_dirhash
file ufs/lfs/ulfs_extattr.c lfs & lfs_extattr file ufs/lfs/ulfs_extattr.c lfs & lfs_extattr
file ufs/lfs/ulfs_ihash.c lfs
file ufs/lfs/ulfs_inode.c lfs file ufs/lfs/ulfs_inode.c lfs
file ufs/lfs/ulfs_lookup.c lfs file ufs/lfs/ulfs_lookup.c lfs
file ufs/lfs/ulfs_quota.c lfs & (lfs_quota | lfs_quota2) file ufs/lfs/ulfs_quota.c lfs & (lfs_quota | lfs_quota2)

View File

@ -1,4 +1,4 @@
/* $NetBSD: lfs_alloc.c,v 1.119 2013/07/28 01:25:05 dholland Exp $ */ /* $NetBSD: lfs_alloc.c,v 1.120 2015/05/31 15:48:03 hannken Exp $ */
/*- /*-
* Copyright (c) 1999, 2000, 2001, 2002, 2003, 2007 The NetBSD Foundation, Inc. * Copyright (c) 1999, 2000, 2001, 2002, 2003, 2007 The NetBSD Foundation, Inc.
@ -60,7 +60,7 @@
*/ */
#include <sys/cdefs.h> #include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: lfs_alloc.c,v 1.119 2013/07/28 01:25:05 dholland Exp $"); __KERNEL_RCSID(0, "$NetBSD: lfs_alloc.c,v 1.120 2015/05/31 15:48:03 hannken Exp $");
#if defined(_KERNEL_OPT) #if defined(_KERNEL_OPT)
#include "opt_quota.h" #include "opt_quota.h"
@ -191,14 +191,12 @@ lfs_extend_ifile(struct lfs *fs, kauth_cred_t cred)
/* VOP_BWRITE 2i times */ /* VOP_BWRITE 2i times */
int int
lfs_valloc(struct vnode *pvp, int mode, kauth_cred_t cred, lfs_valloc(struct vnode *pvp, int mode, kauth_cred_t cred,
struct vnode **vpp) ino_t *ino, int *gen)
{ {
struct lfs *fs; struct lfs *fs;
struct buf *bp, *cbp; struct buf *bp, *cbp;
struct ifile *ifp; struct ifile *ifp;
ino_t new_ino;
int error; int error;
int new_gen;
CLEANERINFO *cip; CLEANERINFO *cip;
fs = VTOI(pvp)->i_lfs; fs = VTOI(pvp)->i_lfs;
@ -210,32 +208,32 @@ lfs_valloc(struct vnode *pvp, int mode, kauth_cred_t cred,
lfs_seglock(fs, SEGM_PROT); lfs_seglock(fs, SEGM_PROT);
/* Get the head of the freelist. */ /* Get the head of the freelist. */
LFS_GET_HEADFREE(fs, cip, cbp, &new_ino); LFS_GET_HEADFREE(fs, cip, cbp, ino);
KASSERT(new_ino != LFS_UNUSED_INUM && new_ino != LFS_IFILE_INUM); KASSERT(*ino != LFS_UNUSED_INUM && *ino != LFS_IFILE_INUM);
DLOG((DLOG_ALLOC, "lfs_valloc: allocate inode %lld\n", DLOG((DLOG_ALLOC, "lfs_valloc: allocate inode %" PRId64 "\n",
(long long)new_ino)); *ino));
/* /*
* Remove the inode from the free list and write the new start * Remove the inode from the free list and write the new start
* of the free list into the superblock. * of the free list into the superblock.
*/ */
CLR_BITMAP_FREE(fs, new_ino); CLR_BITMAP_FREE(fs, *ino);
LFS_IENTRY(ifp, fs, new_ino, bp); LFS_IENTRY(ifp, fs, *ino, bp);
if (ifp->if_daddr != LFS_UNUSED_DADDR) if (ifp->if_daddr != LFS_UNUSED_DADDR)
panic("lfs_valloc: inuse inode %llu on the free list", panic("lfs_valloc: inuse inode %" PRId64 " on the free list",
(unsigned long long)new_ino); *ino);
LFS_PUT_HEADFREE(fs, cip, cbp, ifp->if_nextfree); LFS_PUT_HEADFREE(fs, cip, cbp, ifp->if_nextfree);
DLOG((DLOG_ALLOC, "lfs_valloc: headfree %lld -> %lld\n", DLOG((DLOG_ALLOC, "lfs_valloc: headfree %" PRId64 " -> %u\n",
(long long)new_ino, (long long)ifp->if_nextfree)); *ino, ifp->if_nextfree));
new_gen = ifp->if_version; /* version was updated by vfree */ *gen = ifp->if_version; /* version was updated by vfree */
brelse(bp, 0); brelse(bp, 0);
/* Extend IFILE so that the next lfs_valloc will succeed. */ /* Extend IFILE so that the next lfs_valloc will succeed. */
if (fs->lfs_freehd == LFS_UNUSED_INUM) { if (fs->lfs_freehd == LFS_UNUSED_INUM) {
if ((error = lfs_extend_ifile(fs, cred)) != 0) { if ((error = lfs_extend_ifile(fs, cred)) != 0) {
LFS_PUT_HEADFREE(fs, cip, cbp, new_ino); LFS_PUT_HEADFREE(fs, cip, cbp, *ino);
lfs_segunlock(fs); lfs_segunlock(fs);
return error; return error;
} }
@ -253,94 +251,54 @@ lfs_valloc(struct vnode *pvp, int mode, kauth_cred_t cred,
lfs_segunlock(fs); lfs_segunlock(fs);
return lfs_ialloc(fs, pvp, new_ino, new_gen, vpp); return 0;
} }
/* /*
* Finish allocating a new inode, given an inode and generation number. * Allocate a new inode with given inode number and version.
*/ */
int int
lfs_ialloc(struct lfs *fs, struct vnode *pvp, ino_t new_ino, int new_gen, lfs_valloc_fixed(struct lfs *fs, ino_t ino, int vers)
struct vnode **vpp)
{ {
struct inode *ip; IFILE *ifp;
struct vnode *vp; struct buf *bp, *cbp;
ino_t tino, oldnext;
CLEANERINFO *cip;
ASSERT_NO_SEGLOCK(fs); /* If the Ifile is too short to contain this inum, extend it */
while (VTOI(fs->lfs_ivnode)->i_size <= (ino /
vp = *vpp; fs->lfs_ifpb + fs->lfs_cleansz + fs->lfs_segtabsz)
mutex_enter(&ulfs_hashlock); << fs->lfs_bshift) {
/* Create an inode to associate with the vnode. */ lfs_extend_ifile(fs, NOCRED);
lfs_vcreate(pvp->v_mount, new_ino, vp);
ip = VTOI(vp);
mutex_enter(&lfs_lock);
LFS_SET_UINO(ip, IN_CHANGE);
mutex_exit(&lfs_lock);
/* on-disk structure has been zeroed out by lfs_vcreate */
ip->i_din.ffs1_din->di_inumber = new_ino;
/* Note no blocks yet */
ip->i_lfs_hiblk = -1;
/* Set a new generation number for this inode. */
if (new_gen) {
ip->i_gen = new_gen;
ip->i_ffs1_gen = new_gen;
} }
/* Insert into the inode hash table. */ LFS_IENTRY(ifp, fs, ino, bp);
ulfs_ihashins(ip); oldnext = ifp->if_nextfree;
mutex_exit(&ulfs_hashlock); ifp->if_version = vers;
brelse(bp, 0);
ulfs_vinit(vp->v_mount, lfs_specop_p, lfs_fifoop_p, vpp); LFS_GET_HEADFREE(fs, cip, cbp, &ino);
vp = *vpp; if (ino) {
ip = VTOI(vp); LFS_PUT_HEADFREE(fs, cip, cbp, oldnext);
} else {
memset(ip->i_lfs_fragsize, 0, ULFS_NDADDR * sizeof(*ip->i_lfs_fragsize)); tino = ino;
while (1) {
uvm_vnp_setsize(vp, 0); LFS_IENTRY(ifp, fs, tino, bp);
lfs_mark_vnode(vp); if (ifp->if_nextfree == ino ||
genfs_node_init(vp, &lfs_genfsops); ifp->if_nextfree == LFS_UNUSED_INUM)
vref(ip->i_devvp); break;
return (0); tino = ifp->if_nextfree;
brelse(bp, 0);
}
if (ifp->if_nextfree == LFS_UNUSED_INUM) {
brelse(bp, 0);
return ENOENT;
}
ifp->if_nextfree = oldnext;
LFS_BWRITE_LOG(bp);
} }
/* Create a new vnode/inode pair and initialize what fields we can. */ return 0;
void
lfs_vcreate(struct mount *mp, ino_t ino, struct vnode *vp)
{
struct inode *ip;
struct ulfs1_dinode *dp;
struct ulfsmount *ump;
/* Get a pointer to the private mount structure. */
ump = VFSTOULFS(mp);
ASSERT_NO_SEGLOCK(ump->um_lfs);
/* Initialize the inode. */
ip = pool_get(&lfs_inode_pool, PR_WAITOK);
memset(ip, 0, sizeof(*ip));
dp = pool_get(&lfs_dinode_pool, PR_WAITOK);
memset(dp, 0, sizeof(*dp));
ip->inode_ext.lfs = pool_get(&lfs_inoext_pool, PR_WAITOK);
memset(ip->inode_ext.lfs, 0, sizeof(*ip->inode_ext.lfs));
vp->v_data = ip;
ip->i_din.ffs1_din = dp;
ip->i_ump = ump;
ip->i_vnode = vp;
ip->i_devvp = ump->um_devvp;
ip->i_dev = ump->um_dev;
ip->i_number = dp->di_inumber = ino;
ip->i_lfs = ump->um_lfs;
ip->i_lfs_effnblks = 0;
SPLAY_INIT(&ip->i_lfs_lbtree);
ip->i_lfs_nbtree = 0;
LIST_INIT(&ip->i_lfs_segdhd);
#if defined(LFS_QUOTA) || defined(LFS_QUOTA2)
ulfsquota_init(ip);
#endif
} }
#if 0 #if 0
@ -449,7 +407,7 @@ lfs_vfree(struct vnode *vp, ino_t ino, int mode)
wakeup(&fs->lfs_dirvcount); wakeup(&fs->lfs_dirvcount);
wakeup(&lfs_dirvcount); wakeup(&lfs_dirvcount);
mutex_exit(&lfs_lock); mutex_exit(&lfs_lock);
lfs_vunref(vp); vrele(vp);
/* /*
* If this inode is not going to be written any more, any * If this inode is not going to be written any more, any

View File

@ -1,4 +1,4 @@
/* $NetBSD: lfs_extern.h,v 1.104 2015/05/31 15:45:18 hannken Exp $ */ /* $NetBSD: lfs_extern.h,v 1.105 2015/05/31 15:48:03 hannken Exp $ */
/*- /*-
* Copyright (c) 1999, 2000, 2001, 2002, 2003 The NetBSD Foundation, Inc. * Copyright (c) 1999, 2000, 2001, 2002, 2003 The NetBSD Foundation, Inc.
@ -122,12 +122,11 @@ extern kcondvar_t lfs_writing_cv;
extern kcondvar_t locked_queue_cv; extern kcondvar_t locked_queue_cv;
/* lfs_alloc.c */ /* lfs_alloc.c */
void lfs_vcreate(struct mount *, ino_t, struct vnode *); int lfs_valloc(struct vnode *, int, kauth_cred_t, ino_t *, int *);
int lfs_valloc(struct vnode *, int, kauth_cred_t, struct vnode **); int lfs_valloc_fixed(struct lfs *, ino_t, int);
int lfs_vfree(struct vnode *, ino_t, int); int lfs_vfree(struct vnode *, ino_t, int);
void lfs_order_freelist(struct lfs *); void lfs_order_freelist(struct lfs *);
int lfs_extend_ifile(struct lfs *, kauth_cred_t); int lfs_extend_ifile(struct lfs *, kauth_cred_t);
int lfs_ialloc(struct lfs *, struct vnode *, ino_t, int, struct vnode **);
void lfs_orphan(struct lfs *, ino_t); void lfs_orphan(struct lfs *, ino_t);
/* lfs_balloc.c */ /* lfs_balloc.c */
@ -195,9 +194,6 @@ int lfs_match_indir(struct lfs *, struct buf *);
int lfs_match_dindir(struct lfs *, struct buf *); int lfs_match_dindir(struct lfs *, struct buf *);
int lfs_match_tindir(struct lfs *, struct buf *); int lfs_match_tindir(struct lfs *, struct buf *);
void lfs_callback(struct buf *); void lfs_callback(struct buf *);
int lfs_vref(struct vnode *);
void lfs_vunref(struct vnode *);
void lfs_vunref_head(struct vnode *);
void lfs_acquire_finfo(struct lfs *fs, ino_t, int); void lfs_acquire_finfo(struct lfs *fs, ino_t, int);
void lfs_release_finfo(struct lfs *fs); void lfs_release_finfo(struct lfs *fs);

View File

@ -1,4 +1,4 @@
/* $NetBSD: lfs_rfw.c,v 1.19 2015/03/28 19:24:05 maxv Exp $ */ /* $NetBSD: lfs_rfw.c,v 1.20 2015/05/31 15:48:03 hannken Exp $ */
/*- /*-
* Copyright (c) 1999, 2000, 2001, 2002, 2003 The NetBSD Foundation, Inc. * Copyright (c) 1999, 2000, 2001, 2002, 2003 The NetBSD Foundation, Inc.
@ -30,7 +30,7 @@
*/ */
#include <sys/cdefs.h> #include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: lfs_rfw.c,v 1.19 2015/03/28 19:24:05 maxv Exp $"); __KERNEL_RCSID(0, "$NetBSD: lfs_rfw.c,v 1.20 2015/05/31 15:48:03 hannken Exp $");
#if defined(_KERNEL_OPT) #if defined(_KERNEL_OPT)
#include "opt_quota.h" #include "opt_quota.h"
@ -101,13 +101,10 @@ int
lfs_rf_valloc(struct lfs *fs, ino_t ino, int vers, struct lwp *l, lfs_rf_valloc(struct lfs *fs, ino_t ino, int vers, struct lwp *l,
struct vnode **vpp) struct vnode **vpp)
{ {
IFILE *ifp; struct vattr va;
struct buf *bp, *cbp;
struct vnode *vp; struct vnode *vp;
struct inode *ip; struct inode *ip;
ino_t tino, oldnext;
int error; int error;
CLEANERINFO *cip;
ASSERT_SEGLOCK(fs); /* XXX it doesn't, really */ ASSERT_SEGLOCK(fs); /* XXX it doesn't, really */
@ -138,70 +135,25 @@ lfs_rf_valloc(struct lfs *fs, ino_t ino, int vers, struct lwp *l,
} }
} }
/* /* Not found, create as regular file. */
* The inode is not in use. Find it on the free list. vattr_null(&va);
*/ va.va_type = VREG;
/* If the Ifile is too short to contain this inum, extend it */ va.va_mode = 0;
while (VTOI(fs->lfs_ivnode)->i_size <= (ino / va.va_fileid = ino;
fs->lfs_ifpb + fs->lfs_cleansz + fs->lfs_segtabsz) va.va_gen = vers;
<< fs->lfs_bshift) { error = vcache_new(fs->lfs_ivnode->v_mount, NULL, &va, NOCRED, &vp);
lfs_extend_ifile(fs, NOCRED); if (error)
}
LFS_IENTRY(ifp, fs, ino, bp);
oldnext = ifp->if_nextfree;
ifp->if_version = vers;
brelse(bp, 0);
LFS_GET_HEADFREE(fs, cip, cbp, &ino);
if (ino) {
LFS_PUT_HEADFREE(fs, cip, cbp, oldnext);
} else {
tino = ino;
while (1) {
LFS_IENTRY(ifp, fs, tino, bp);
if (ifp->if_nextfree == ino ||
ifp->if_nextfree == LFS_UNUSED_INUM)
break;
tino = ifp->if_nextfree;
brelse(bp, 0);
}
if (ifp->if_nextfree == LFS_UNUSED_INUM) {
brelse(bp, 0);
return ENOENT;
}
ifp->if_nextfree = oldnext;
LFS_BWRITE_LOG(bp);
}
error = lfs_ialloc(fs, fs->lfs_ivnode, ino, vers, &vp);
if (error == 0) {
/*
* Make it VREG so we can put blocks on it. We will change
* this later if it turns out to be some other kind of file.
*/
ip = VTOI(vp);
ip->i_mode = ip->i_ffs1_mode = LFS_IFREG;
ip->i_nlink = ip->i_ffs1_nlink = 1;
ulfs_vinit(vp->v_mount, lfs_specop_p, lfs_fifoop_p, &vp);
ip = VTOI(vp);
DLOG((DLOG_RF, "lfs_rf_valloc: ino %d vp %p\n", ino, vp));
/* The dirop-nature of this vnode is past */
lfs_unmark_vnode(vp);
(void)lfs_vunref(vp);
vp->v_uflag &= ~VU_DIROP;
mutex_enter(&lfs_lock);
--lfs_dirvcount;
--fs->lfs_dirvcount;
TAILQ_REMOVE(&fs->lfs_dchainhd, ip, i_lfs_dchain);
wakeup(&lfs_dirvcount);
wakeup(&fs->lfs_dirvcount);
mutex_exit(&lfs_lock);
}
*vpp = vp;
return error; return error;
error = vn_lock(vp, LK_EXCLUSIVE);
if (error) {
vrele(vp);
*vpp = NULLVP;
return error;
}
ip = VTOI(vp);
ip->i_nlink = ip->i_ffs1_nlink = 1;
*vpp = vp;
return 0;
} }
/* /*

View File

@ -1,4 +1,4 @@
/* $NetBSD: lfs_segment.c,v 1.239 2015/05/31 15:44:31 hannken Exp $ */ /* $NetBSD: lfs_segment.c,v 1.240 2015/05/31 15:48:03 hannken Exp $ */
/*- /*-
* Copyright (c) 1999, 2000, 2001, 2002, 2003 The NetBSD Foundation, Inc. * Copyright (c) 1999, 2000, 2001, 2002, 2003 The NetBSD Foundation, Inc.
@ -60,7 +60,7 @@
*/ */
#include <sys/cdefs.h> #include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: lfs_segment.c,v 1.239 2015/05/31 15:44:31 hannken Exp $"); __KERNEL_RCSID(0, "$NetBSD: lfs_segment.c,v 1.240 2015/05/31 15:48:03 hannken Exp $");
#define _VFS_VNODE_PRIVATE /* XXX: check for VI_MARKER, this has to go */ #define _VFS_VNODE_PRIVATE /* XXX: check for VI_MARKER, this has to go */
@ -541,8 +541,8 @@ lfs_writevnodes(struct lfs *fs, struct mount *mp, struct segment *sp, int op)
} }
mutex_exit(&mntvnode_lock); mutex_exit(&mntvnode_lock);
if (lfs_vref(vp)) { if (vget(vp, LK_NOWAIT, false /* !wait */)) {
vndebug(vp,"vref"); vndebug(vp,"vget");
mutex_enter(&mntvnode_lock); mutex_enter(&mntvnode_lock);
continue; continue;
} }
@ -558,7 +558,7 @@ lfs_writevnodes(struct lfs *fs, struct mount *mp, struct segment *sp, int op)
if (ip->i_number != LFS_IFILE_INUM) { if (ip->i_number != LFS_IFILE_INUM) {
error = lfs_writefile(fs, sp, vp); error = lfs_writefile(fs, sp, vp);
if (error) { if (error) {
lfs_vunref(vp); vrele(vp);
if (error == EAGAIN) { if (error == EAGAIN) {
/* /*
* This error from lfs_putpages * This error from lfs_putpages
@ -599,9 +599,9 @@ lfs_writevnodes(struct lfs *fs, struct mount *mp, struct segment *sp, int op)
} }
if (lfs_clean_vnhead && only_cleaning) if (lfs_clean_vnhead && only_cleaning)
lfs_vunref_head(vp); vrele(vp);
else else
lfs_vunref(vp); vrele(vp);
mutex_enter(&mntvnode_lock); mutex_enter(&mntvnode_lock);
} }
@ -2755,79 +2755,6 @@ lfs_shellsort(struct buf **bp_array, int32_t *lb_array, int nmemb, int size)
} }
} }
/*
* Call vget with LK_NOWAIT. If we are the one who is dead,
* however, we must press on. Just fake success in that case.
*/
int
lfs_vref(struct vnode *vp)
{
struct lfs *fs;
KASSERT(mutex_owned(vp->v_interlock));
fs = VTOI(vp)->i_lfs;
ASSERT_MAYBE_SEGLOCK(fs);
/*
* If we return 1 here during a flush, we risk vinvalbuf() not
* being able to flush all of the pages from this vnode, which
* will cause it to panic. So, return 0 if a flush is in progress.
*/
if (IS_FLUSHING(VTOI(vp)->i_lfs, vp)) {
++fs->lfs_flushvp_fakevref;
mutex_exit(vp->v_interlock);
return 0;
}
return vget(vp, LK_NOWAIT, false /* !wait */);
}
/*
* This is vrele except that we do not want to VOP_INACTIVE this vnode. We
* inline vrele here to avoid the vn_lock and VOP_INACTIVE call at the end.
*/
void
lfs_vunref(struct vnode *vp)
{
struct lfs *fs;
fs = VTOI(vp)->i_lfs;
ASSERT_MAYBE_SEGLOCK(fs);
/*
* Analogous to lfs_vref, if the node is flushing, fake it.
*/
if (IS_FLUSHING(fs, vp) && fs->lfs_flushvp_fakevref) {
--fs->lfs_flushvp_fakevref;
return;
}
/* does not call inactive XXX sure it does XXX */
vrele(vp);
}
/*
* We use this when we have vnodes that were loaded in solely for cleaning.
* There is no reason to believe that these vnodes will be referenced again
* soon, since the cleaning process is unrelated to normal filesystem
* activity. Putting cleaned vnodes at the tail of the list has the effect
* of flushing the vnode LRU. So, put vnodes that were loaded only for
* cleaning at the head of the list, instead.
*/
void
lfs_vunref_head(struct vnode *vp)
{
ASSERT_SEGLOCK(VTOI(vp)->i_lfs);
/* does not call inactive XXX sure it does XXX,
inserts non-held vnode at head of freelist */
vrele(vp);
}
/* /*
* Set up an FINFO entry for a new file. The fip pointer is assumed to * Set up an FINFO entry for a new file. The fip pointer is assumed to
* point at uninitialized space. * point at uninitialized space.

View File

@ -1,4 +1,4 @@
/* $NetBSD: lfs_syscalls.c,v 1.159 2015/05/31 15:45:18 hannken Exp $ */ /* $NetBSD: lfs_syscalls.c,v 1.160 2015/05/31 15:48:03 hannken Exp $ */
/*- /*-
* Copyright (c) 1999, 2000, 2001, 2002, 2003, 2007, 2007, 2008 * Copyright (c) 1999, 2000, 2001, 2002, 2003, 2007, 2007, 2008
@ -61,7 +61,7 @@
*/ */
#include <sys/cdefs.h> #include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: lfs_syscalls.c,v 1.159 2015/05/31 15:45:18 hannken Exp $"); __KERNEL_RCSID(0, "$NetBSD: lfs_syscalls.c,v 1.160 2015/05/31 15:48:03 hannken Exp $");
#ifndef LFS #ifndef LFS
# define LFS /* for prototypes in syscallargs.h */ # define LFS /* for prototypes in syscallargs.h */
@ -85,11 +85,9 @@ __KERNEL_RCSID(0, "$NetBSD: lfs_syscalls.c,v 1.159 2015/05/31 15:45:18 hannken E
#include <ufs/lfs/lfs_kernel.h> #include <ufs/lfs/lfs_kernel.h>
#include <ufs/lfs/lfs_extern.h> #include <ufs/lfs/lfs_extern.h>
static int lfs_fastvget(struct mount *, ino_t, BLOCK_INFO *, int,
struct vnode **);
struct buf *lfs_fakebuf(struct lfs *, struct vnode *, int, size_t, void *); struct buf *lfs_fakebuf(struct lfs *, struct vnode *, int, size_t, void *);
int lfs_fasthashget(dev_t, ino_t, struct vnode **);
int lfs_fastvget(struct mount *, ino_t, BLOCK_INFO *, int, struct vnode **);
pid_t lfs_cleaner_pid = 0;
/* /*
* sys_lfs_markv: * sys_lfs_markv:
@ -295,8 +293,7 @@ lfs_markv(struct proc *p, fsid_t *fsidp, BLOCK_INFO *blkiov,
* Finish the old file, if there was one. * Finish the old file, if there was one.
*/ */
if (vp != NULL) { if (vp != NULL) {
VOP_UNLOCK(vp); vput(vp);
lfs_vunref(vp);
vp = NULL; vp = NULL;
numrefed--; numrefed--;
} }
@ -466,8 +463,7 @@ lfs_markv(struct proc *p, fsid_t *fsidp, BLOCK_INFO *blkiov,
* Finish the old file, if there was one * Finish the old file, if there was one
*/ */
if (vp != NULL) { if (vp != NULL) {
VOP_UNLOCK(vp); vput(vp);
lfs_vunref(vp);
vp = NULL; vp = NULL;
numrefed--; numrefed--;
} }
@ -512,8 +508,7 @@ err3:
*/ */
if (vp != NULL) { if (vp != NULL) {
VOP_UNLOCK(vp); vput(vp);
lfs_vunref(vp);
vp = NULL; vp = NULL;
--numrefed; --numrefed;
} }
@ -658,6 +653,7 @@ lfs_bmapv(struct proc *p, fsid_t *fsidp, BLOCK_INFO *blkiov, int blkcnt)
struct inode *ip = NULL; struct inode *ip = NULL;
struct lfs *fs; struct lfs *fs;
struct mount *mntp; struct mount *mntp;
struct ulfsmount *ump;
struct vnode *vp; struct vnode *vp;
ino_t lastino; ino_t lastino;
daddr_t v_daddr; daddr_t v_daddr;
@ -667,9 +663,14 @@ lfs_bmapv(struct proc *p, fsid_t *fsidp, BLOCK_INFO *blkiov, int blkcnt)
if ((mntp = vfs_getvfs(fsidp)) == NULL) if ((mntp = vfs_getvfs(fsidp)) == NULL)
return (ENOENT); return (ENOENT);
ump = VFSTOULFS(mntp);
if ((error = vfs_busy(mntp, NULL)) != 0) if ((error = vfs_busy(mntp, NULL)) != 0)
return (error); return (error);
if (ump->um_cleaner_thread == NULL)
ump->um_cleaner_thread = curlwp;
KASSERT(ump->um_cleaner_thread == curlwp);
cnt = blkcnt; cnt = blkcnt;
fs = VFSTOULFS(mntp)->um_lfs; fs = VFSTOULFS(mntp)->um_lfs;
@ -691,8 +692,7 @@ lfs_bmapv(struct proc *p, fsid_t *fsidp, BLOCK_INFO *blkiov, int blkcnt)
* Finish the old file, if there was one. * Finish the old file, if there was one.
*/ */
if (vp != NULL) { if (vp != NULL) {
VOP_UNLOCK(vp); vput(vp);
lfs_vunref(vp);
vp = NULL; vp = NULL;
numrefed--; numrefed--;
} }
@ -771,8 +771,7 @@ lfs_bmapv(struct proc *p, fsid_t *fsidp, BLOCK_INFO *blkiov, int blkcnt)
* Finish the old file, if there was one. * Finish the old file, if there was one.
*/ */
if (vp != NULL) { if (vp != NULL) {
VOP_UNLOCK(vp); vput(vp);
lfs_vunref(vp);
vp = NULL; vp = NULL;
numrefed--; numrefed--;
} }
@ -972,218 +971,36 @@ sys___lfs_segwait50(struct lwp *l, const struct sys___lfs_segwait50_args *uap,
} }
/* /*
* VFS_VGET call specialized for the cleaner. The cleaner already knows the * VFS_VGET call specialized for the cleaner. If the cleaner is
* daddr from the ifile, so don't look it up again. If the cleaner is
* processing IINFO structures, it may have the ondisk inode already, so * processing IINFO structures, it may have the ondisk inode already, so
* don't go retrieving it again. * don't go retrieving it again.
* *
* we lfs_vref, and it is the caller's responsibility to lfs_vunref * Return the vnode referenced and locked.
* when finished.
*/ */
int static int
lfs_fasthashget(dev_t dev, ino_t ino, struct vnode **vpp)
{
struct vnode *vp;
mutex_enter(&ulfs_ihash_lock);
if ((vp = ulfs_ihashlookup(dev, ino)) != NULL) {
mutex_enter(vp->v_interlock);
mutex_exit(&ulfs_ihash_lock);
if (vdead_check(vp, VDEAD_NOWAIT) != 0) {
DLOG((DLOG_CLEAN, "lfs_fastvget: ino %d dead\n",
ino));
lfs_stats.clean_vnlocked++;
mutex_exit(vp->v_interlock);
return EAGAIN;
}
if (lfs_vref(vp)) {
DLOG((DLOG_CLEAN, "lfs_fastvget: lfs_vref failed"
" for ino %d\n", ino));
lfs_stats.clean_inlocked++;
return EAGAIN;
}
} else {
mutex_exit(&ulfs_ihash_lock);
}
*vpp = vp;
return (0);
}
int
lfs_fastvget(struct mount *mp, ino_t ino, BLOCK_INFO *blkp, int lk_flags, lfs_fastvget(struct mount *mp, ino_t ino, BLOCK_INFO *blkp, int lk_flags,
struct vnode **vpp) struct vnode **vpp)
{ {
IFILE *ifp;
struct inode *ip;
struct ulfs1_dinode *dip, *dinp;
struct vnode *vp;
struct ulfsmount *ump; struct ulfsmount *ump;
daddr_t daddr; int error;
dev_t dev;
int error, retries;
struct buf *bp;
struct lfs *fs;
ump = VFSTOULFS(mp); ump = VFSTOULFS(mp);
dev = ump->um_dev; ump->um_cleaner_hint = blkp;
fs = ump->um_lfs; error = vcache_get(mp, &ino, sizeof(ino), vpp);
ump->um_cleaner_hint = NULL;
/* if (error)
* Wait until the filesystem is fully mounted before allowing vget
* to complete. This prevents possible problems with roll-forward.
*/
mutex_enter(&lfs_lock);
while (fs->lfs_flags & LFS_NOTYET) {
mtsleep(&fs->lfs_flags, PRIBIO+1, "lfs_fnotyet", 0,
&lfs_lock);
}
mutex_exit(&lfs_lock);
/*
* This is playing fast and loose. Someone may have the inode
* locked, in which case they are going to be distinctly unhappy
* if we trash something.
*/
error = lfs_fasthashget(dev, ino, vpp);
if (error != 0)
return error; return error;
else if (*vpp != NULL) {
error = vn_lock(*vpp, lk_flags); error = vn_lock(*vpp, lk_flags);
if (error) {
if (error == EBUSY) if (error == EBUSY)
error = EAGAIN; error = EAGAIN;
if (error) { vrele(*vpp);
lfs_vunref(*vpp);
*vpp = NULL; *vpp = NULL;
return error; return error;
} }
}
if (blkp != NULL && blkp->bi_lbn == LFS_UNUSED_LBN) return 0;
dinp = blkp->bi_bp;
else
dinp = NULL;
if (ino == LFS_IFILE_INUM)
daddr = fs->lfs_idaddr;
else {
LFS_IENTRY(ifp, fs, ino, bp);
daddr = ifp->if_daddr;
brelse(bp, 0);
}
if (daddr == LFS_UNUSED_DADDR)
return ENOENT;
/*
* getnewvnode(9) will call vfs_busy, which will block if the
* filesystem is being unmounted; but umount(9) is waiting for
* us because we're already holding the fs busy.
* XXXMP
*/
if (mp->mnt_iflag & IMNT_UNMOUNT) {
*vpp = NULL;
return EDEADLK;
}
error = getnewvnode(VT_LFS, mp, lfs_vnodeop_p, NULL, &vp);
if (error) {
*vpp = NULL;
return (error);
}
mutex_enter(&ulfs_hashlock);
error = lfs_fasthashget(dev, ino, vpp);
if (error != 0 || *vpp != NULL) {
mutex_exit(&ulfs_hashlock);
ungetnewvnode(vp);
return (error);
}
/* Allocate new vnode/inode. */
lfs_vcreate(mp, ino, vp);
/*
* Put it onto its hash chain and lock it so that other requests for
* this inode will block if they arrive while we are sleeping waiting
* for old data structures to be purged or for the contents of the
* disk portion of this inode to be read.
*/
ip = VTOI(vp);
ulfs_ihashins(ip);
mutex_exit(&ulfs_hashlock);
#ifdef notyet
/* Not found in the cache => this vnode was loaded only for cleaning. */
ip->i_lfs_iflags |= LFSI_BMAP;
#endif
/*
* XXX
* This may not need to be here, logically it should go down with
* the i_devvp initialization.
* Ask Kirk.
*/
ip->i_lfs = fs;
/* Read in the disk contents for the inode, copy into the inode. */
if (dinp) {
error = copyin(dinp, ip->i_din.ffs1_din, sizeof (struct ulfs1_dinode));
if (error) {
DLOG((DLOG_CLEAN, "lfs_fastvget: dinode copyin failed"
" for ino %d\n", ino));
ulfs_ihashrem(ip);
/* Unlock and discard unneeded inode. */
VOP_UNLOCK(vp);
lfs_vunref(vp);
*vpp = NULL;
return (error);
}
if (ip->i_number != ino)
panic("lfs_fastvget: I was fed the wrong inode!");
} else {
retries = 0;
again:
error = bread(ump->um_devvp, LFS_FSBTODB(fs, daddr), fs->lfs_ibsize,
0, &bp);
if (error) {
DLOG((DLOG_CLEAN, "lfs_fastvget: bread failed (%d)\n",
error));
/*
* The inode does not contain anything useful, so it
* would be misleading to leave it on its hash chain.
* Iput() will return it to the free list.
*/
ulfs_ihashrem(ip);
/* Unlock and discard unneeded inode. */
VOP_UNLOCK(vp);
lfs_vunref(vp);
*vpp = NULL;
return (error);
}
dip = lfs_ifind(ump->um_lfs, ino, bp);
if (dip == NULL) {
/* Assume write has not completed yet; try again */
brelse(bp, BC_INVAL);
++retries;
if (retries > LFS_IFIND_RETRIES)
panic("lfs_fastvget: dinode not found");
DLOG((DLOG_CLEAN, "lfs_fastvget: dinode not found,"
" retrying...\n"));
goto again;
}
*ip->i_din.ffs1_din = *dip;
brelse(bp, 0);
}
lfs_vinit(mp, &vp);
*vpp = vp;
KASSERT(VOP_ISLOCKED(vp));
return (0);
} }
/* /*

View File

@ -1,4 +1,4 @@
/* $NetBSD: lfs_vfsops.c,v 1.323 2015/05/31 15:44:31 hannken Exp $ */ /* $NetBSD: lfs_vfsops.c,v 1.324 2015/05/31 15:48:03 hannken Exp $ */
/*- /*-
* Copyright (c) 1999, 2000, 2001, 2002, 2003, 2007, 2007 * Copyright (c) 1999, 2000, 2001, 2002, 2003, 2007, 2007
@ -61,7 +61,7 @@
*/ */
#include <sys/cdefs.h> #include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: lfs_vfsops.c,v 1.323 2015/05/31 15:44:31 hannken Exp $"); __KERNEL_RCSID(0, "$NetBSD: lfs_vfsops.c,v 1.324 2015/05/31 15:48:03 hannken Exp $");
#if defined(_KERNEL_OPT) #if defined(_KERNEL_OPT)
#include "opt_lfs.h" #include "opt_lfs.h"
@ -101,6 +101,7 @@ __KERNEL_RCSID(0, "$NetBSD: lfs_vfsops.c,v 1.323 2015/05/31 15:44:31 hannken Exp
#include <ufs/lfs/ulfs_quotacommon.h> #include <ufs/lfs/ulfs_quotacommon.h>
#include <ufs/lfs/ulfs_inode.h> #include <ufs/lfs/ulfs_inode.h>
#include <ufs/lfs/ulfsmount.h> #include <ufs/lfs/ulfsmount.h>
#include <ufs/lfs/ulfs_bswap.h>
#include <ufs/lfs/ulfs_extern.h> #include <ufs/lfs/ulfs_extern.h>
#include <uvm/uvm.h> #include <uvm/uvm.h>
@ -151,6 +152,8 @@ struct vfsops lfs_vfsops = {
.vfs_statvfs = lfs_statvfs, .vfs_statvfs = lfs_statvfs,
.vfs_sync = lfs_sync, .vfs_sync = lfs_sync,
.vfs_vget = lfs_vget, .vfs_vget = lfs_vget,
.vfs_loadvnode = lfs_loadvnode,
.vfs_newvnode = lfs_newvnode,
.vfs_fhtovp = lfs_fhtovp, .vfs_fhtovp = lfs_fhtovp,
.vfs_vptofh = lfs_vptofh, .vfs_vptofh = lfs_vptofh,
.vfs_init = lfs_init, .vfs_init = lfs_init,
@ -983,6 +986,7 @@ lfs_mountfs(struct vnode *devvp, struct mount *mp, struct lwp *l)
ump = kmem_zalloc(sizeof(*ump), KM_SLEEP); ump = kmem_zalloc(sizeof(*ump), KM_SLEEP);
ump->um_lfs = fs; ump->um_lfs = fs;
ump->um_fstype = ULFS1; ump->um_fstype = ULFS1;
/* ump->um_cleaner_thread = NULL; */
if (sizeof(struct lfs) < LFS_SBPAD) { /* XXX why? */ if (sizeof(struct lfs) < LFS_SBPAD) { /* XXX why? */
brelse(bp, BC_INVAL); brelse(bp, BC_INVAL);
brelse(abp, BC_INVAL); brelse(abp, BC_INVAL);
@ -1436,23 +1440,95 @@ lfs_sync(struct mount *mp, int waitfor, kauth_cred_t cred)
*/ */
int int
lfs_vget(struct mount *mp, ino_t ino, struct vnode **vpp) lfs_vget(struct mount *mp, ino_t ino, struct vnode **vpp)
{
int error;
error = vcache_get(mp, &ino, sizeof(ino), vpp);
if (error)
return error;
error = vn_lock(*vpp, LK_EXCLUSIVE);
if (error) {
vrele(*vpp);
*vpp = NULL;
return error;
}
return 0;
}
/*
* Create a new vnode/inode pair and initialize what fields we can.
*/
static void
lfs_init_vnode(struct ulfsmount *ump, ino_t ino, struct vnode *vp)
{
struct inode *ip;
struct ulfs1_dinode *dp;
ASSERT_NO_SEGLOCK(ump->um_lfs);
/* Initialize the inode. */
ip = pool_get(&lfs_inode_pool, PR_WAITOK);
memset(ip, 0, sizeof(*ip));
dp = pool_get(&lfs_dinode_pool, PR_WAITOK);
memset(dp, 0, sizeof(*dp));
ip->inode_ext.lfs = pool_get(&lfs_inoext_pool, PR_WAITOK);
memset(ip->inode_ext.lfs, 0, sizeof(*ip->inode_ext.lfs));
ip->i_din.ffs1_din = dp;
ip->i_ump = ump;
ip->i_vnode = vp;
ip->i_dev = ump->um_dev;
ip->i_number = dp->di_inumber = ino;
ip->i_lfs = ump->um_lfs;
ip->i_lfs_effnblks = 0;
SPLAY_INIT(&ip->i_lfs_lbtree);
ip->i_lfs_nbtree = 0;
LIST_INIT(&ip->i_lfs_segdhd);
vp->v_tag = VT_LFS;
vp->v_op = lfs_vnodeop_p;
vp->v_data = ip;
}
/*
* Undo lfs_init_vnode().
*/
static void
lfs_deinit_vnode(struct ulfsmount *ump, struct vnode *vp)
{
struct inode *ip = VTOI(vp);
pool_put(&lfs_inoext_pool, ip->inode_ext.lfs);
pool_put(&lfs_dinode_pool, ip->i_din.ffs1_din);
pool_put(&lfs_inode_pool, ip);
vp->v_data = NULL;
}
/*
* Read an inode from disk and initialize this vnode / inode pair.
* Caller assures no other thread will try to load this inode.
*/
int
lfs_loadvnode(struct mount *mp, struct vnode *vp,
const void *key, size_t key_len, const void **new_key)
{ {
struct lfs *fs; struct lfs *fs;
struct ulfs1_dinode *dip; struct ulfs1_dinode *dip;
struct inode *ip; struct inode *ip;
struct buf *bp; struct buf *bp;
struct ifile *ifp; struct ifile *ifp;
struct vnode *vp;
struct ulfsmount *ump; struct ulfsmount *ump;
ino_t ino;
daddr_t daddr; daddr_t daddr;
dev_t dev;
int error, retries; int error, retries;
struct timespec ts; struct timespec ts;
KASSERT(key_len == sizeof(ino));
memcpy(&ino, key, key_len);
memset(&ts, 0, sizeof ts); /* XXX gcc */ memset(&ts, 0, sizeof ts); /* XXX gcc */
ump = VFSTOULFS(mp); ump = VFSTOULFS(mp);
dev = ump->um_dev;
fs = ump->um_lfs; fs = ump->um_lfs;
/* /*
@ -1465,23 +1541,6 @@ lfs_vget(struct mount *mp, ino_t ino, struct vnode **vpp)
&lfs_lock); &lfs_lock);
mutex_exit(&lfs_lock); mutex_exit(&lfs_lock);
retry:
if ((*vpp = ulfs_ihashget(dev, ino, LK_EXCLUSIVE)) != NULL)
return (0);
error = getnewvnode(VT_LFS, mp, lfs_vnodeop_p, NULL, &vp);
if (error) {
*vpp = NULL;
return (error);
}
mutex_enter(&ulfs_hashlock);
if (ulfs_ihashget(dev, ino, 0) != NULL) {
mutex_exit(&ulfs_hashlock);
ungetnewvnode(vp);
goto retry;
}
/* Translate the inode number to a disk address. */ /* Translate the inode number to a disk address. */
if (ino == LFS_IFILE_INUM) if (ino == LFS_IFILE_INUM)
daddr = fs->lfs_idaddr; daddr = fs->lfs_idaddr;
@ -1495,34 +1554,27 @@ retry:
} }
brelse(bp, 0); brelse(bp, 0);
if (daddr == LFS_UNUSED_DADDR) { if (daddr == LFS_UNUSED_DADDR)
*vpp = NULLVP;
mutex_exit(&ulfs_hashlock);
ungetnewvnode(vp);
return (ENOENT); return (ENOENT);
} }
}
/* Allocate/init new vnode/inode. */ /* Allocate/init new vnode/inode. */
lfs_vcreate(mp, ino, vp); lfs_init_vnode(ump, ino, vp);
/*
* Put it onto its hash chain and lock it so that other requests for
* this inode will block if they arrive while we are sleeping waiting
* for old data structures to be purged or for the contents of the
* disk portion of this inode to be read.
*/
ip = VTOI(vp); ip = VTOI(vp);
ulfs_ihashins(ip);
mutex_exit(&ulfs_hashlock);
/* /* If the cleaner supplied the inode, use it. */
* XXX if (curlwp == ump->um_cleaner_thread && ump->um_cleaner_hint != NULL &&
* This may not need to be here, logically it should go down with ump->um_cleaner_hint->bi_lbn == LFS_UNUSED_LBN) {
* the i_devvp initialization. dip = ump->um_cleaner_hint->bi_bp;
* Ask Kirk. error = copyin(dip, ip->i_din.ffs1_din,
*/ sizeof(struct ulfs1_dinode));
ip->i_lfs = ump->um_lfs; if (error) {
lfs_deinit_vnode(ump, vp);
return error;
}
KASSERT(ip->i_number == ino);
goto out;
}
/* Read in the disk contents for the inode, copy into the inode. */ /* Read in the disk contents for the inode, copy into the inode. */
retries = 0; retries = 0;
@ -1531,15 +1583,8 @@ retry:
(fs->lfs_version == 1 ? fs->lfs_bsize : fs->lfs_ibsize), (fs->lfs_version == 1 ? fs->lfs_bsize : fs->lfs_ibsize),
0, &bp); 0, &bp);
if (error) { if (error) {
/* lfs_deinit_vnode(ump, vp);
* The inode does not contain anything useful, so it would return error;
* be misleading to leave it on its hash chain. With mode
* still zero, it will be unlinked and returned to the free
* list by vput().
*/
vput(vp);
*vpp = NULL;
return (error);
} }
dip = lfs_ifind(fs, ino, bp); dip = lfs_ifind(fs, ino, bp);
@ -1547,7 +1592,19 @@ retry:
/* Assume write has not completed yet; try again */ /* Assume write has not completed yet; try again */
brelse(bp, BC_INVAL); brelse(bp, BC_INVAL);
++retries; ++retries;
if (retries > LFS_IFIND_RETRIES) { if (retries <= LFS_IFIND_RETRIES) {
mutex_enter(&lfs_lock);
if (fs->lfs_iocount) {
DLOG((DLOG_VNODE,
"%s: dinode %d not found, retrying...\n",
__func__, ino));
(void)mtsleep(&fs->lfs_iocount, PRIBIO + 1,
"lfs ifind", 1, &lfs_lock);
} else
retries = LFS_IFIND_RETRIES;
mutex_exit(&lfs_lock);
goto again;
}
#ifdef DEBUG #ifdef DEBUG
/* If the seglock is held look at the bpp to see /* If the seglock is held look at the bpp to see
what is there anyway */ what is there anyway */
@ -1562,33 +1619,26 @@ retry:
if ((*bpp)->b_vp == fs->lfs_ivnode && if ((*bpp)->b_vp == fs->lfs_ivnode &&
bpp != fs->lfs_sp->bpp) { bpp != fs->lfs_sp->bpp) {
/* Inode block */ /* Inode block */
printf("lfs_vget: block 0x%" PRIx64 ": ", printf("%s: block 0x%" PRIx64 ": ",
(*bpp)->b_blkno); __func__, (*bpp)->b_blkno);
dp = (struct ulfs1_dinode *)(*bpp)->b_data; dp = (struct ulfs1_dinode *)
(*bpp)->b_data;
for (i = 0; i < LFS_INOPB(fs); i++) for (i = 0; i < LFS_INOPB(fs); i++)
if (dp[i].di_inumber) if (dp[i].di_inumber)
printf("%d ", dp[i].di_inumber); printf("%d ",
dp[i].di_inumber);
printf("\n"); printf("\n");
} }
} }
} }
mutex_exit(&lfs_lock); mutex_exit(&lfs_lock);
#endif /* DEBUG */ #endif /* DEBUG */
panic("lfs_vget: dinode not found"); panic("lfs_loadvnode: dinode not found");
}
mutex_enter(&lfs_lock);
if (fs->lfs_iocount) {
DLOG((DLOG_VNODE, "lfs_vget: dinode %d not found, retrying...\n", ino));
(void)mtsleep(&fs->lfs_iocount, PRIBIO + 1,
"lfs ifind", 1, &lfs_lock);
} else
retries = LFS_IFIND_RETRIES;
mutex_exit(&lfs_lock);
goto again;
} }
*ip->i_din.ffs1_din = *dip; *ip->i_din.ffs1_din = *dip;
brelse(bp, 0); brelse(bp, 0);
out:
if (fs->lfs_version > 1) { if (fs->lfs_version > 1) {
ip->i_ffs1_atime = ts.tv_sec; ip->i_ffs1_atime = ts.tv_sec;
ip->i_ffs1_atimensec = ts.tv_nsec; ip->i_ffs1_atimensec = ts.tv_nsec;
@ -1596,11 +1646,107 @@ retry:
lfs_vinit(mp, &vp); lfs_vinit(mp, &vp);
*vpp = vp; *new_key = &ip->i_number;
return 0;
}
KASSERT(VOP_ISLOCKED(vp)); /*
* Create a new inode and initialize this vnode / inode pair.
*/
int
lfs_newvnode(struct mount *mp, struct vnode *dvp, struct vnode *vp,
struct vattr *vap, kauth_cred_t cred,
size_t *key_len, const void **new_key)
{
ino_t ino;
struct inode *ip;
struct ulfsmount *ump;
struct lfs *fs;
int error, mode, gen;
return (0); KASSERT(dvp != NULL || vap->va_fileid > 0);
KASSERT(dvp != NULL && dvp->v_mount == mp);
KASSERT(vap->va_type != VNON);
*key_len = sizeof(ino);
ump = VFSTOULFS(mp);
fs = ump->um_lfs;
mode = MAKEIMODE(vap->va_type, vap->va_mode);
/*
* Allocate fresh inode. With "dvp == NULL" take the inode number
* and version from "vap".
*/
if (dvp == NULL) {
ino = vap->va_fileid;
gen = vap->va_gen;
error = lfs_valloc_fixed(fs, ino, gen);
} else {
error = lfs_valloc(dvp, mode, cred, &ino, &gen);
}
if (error)
return error;
/* Attach inode to vnode. */
lfs_init_vnode(ump, ino, vp);
ip = VTOI(vp);
mutex_enter(&lfs_lock);
LFS_SET_UINO(ip, IN_CHANGE);
mutex_exit(&lfs_lock);
/* Note no blocks yet */
ip->i_lfs_hiblk = -1;
/* Set a new generation number for this inode. */
ip->i_gen = gen;
ip->i_ffs1_gen = gen;
memset(ip->i_lfs_fragsize, 0,
ULFS_NDADDR * sizeof(*ip->i_lfs_fragsize));
/* Set uid / gid. */
if (cred == NOCRED || cred == FSCRED) {
ip->i_gid = 0;
ip->i_uid = 0;
} else {
ip->i_gid = VTOI(dvp)->i_gid;
ip->i_uid = kauth_cred_geteuid(cred);
}
DIP_ASSIGN(ip, gid, ip->i_gid);
DIP_ASSIGN(ip, uid, ip->i_uid);
#if defined(LFS_QUOTA) || defined(LFS_QUOTA2)
error = lfs_chkiq(ip, 1, cred, 0);
if (error) {
lfs_vfree(dvp, ino, mode);
ffs_deinit_vnode(ump, vp);
return error;
}
#endif
/* Set type and finalize. */
ip->i_flags = 0;
DIP_ASSIGN(ip, flags, 0);
ip->i_mode = mode;
DIP_ASSIGN(ip, mode, mode);
if (vap->va_rdev != VNOVAL) {
/*
* Want to be able to use this to make badblock
* inodes, so don't truncate the dev number.
*/
if (ump->um_fstype == ULFS1)
ip->i_ffs1_rdev = ulfs_rw32(vap->va_rdev,
ULFS_MPNEEDSWAP(ump));
else
ip->i_ffs2_rdev = ulfs_rw64(vap->va_rdev,
ULFS_MPNEEDSWAP(ump));
}
lfs_vinit(mp, &vp);
*new_key = &ip->i_number;
return 0;
} }
/* /*
@ -1610,11 +1756,7 @@ int
lfs_fhtovp(struct mount *mp, struct fid *fhp, struct vnode **vpp) lfs_fhtovp(struct mount *mp, struct fid *fhp, struct vnode **vpp)
{ {
struct lfid lfh; struct lfid lfh;
struct buf *bp;
IFILE *ifp;
int32_t daddr;
struct lfs *fs; struct lfs *fs;
vnode_t *vp;
if (fhp->fid_len != sizeof(struct lfid)) if (fhp->fid_len != sizeof(struct lfid))
return EINVAL; return EINVAL;
@ -1632,17 +1774,6 @@ lfs_fhtovp(struct mount *mp, struct fid *fhp, struct vnode **vpp)
fs->lfs_cleansz - fs->lfs_segtabsz) * fs->lfs_ifpb) fs->lfs_cleansz - fs->lfs_segtabsz) * fs->lfs_ifpb)
return ESTALE; return ESTALE;
mutex_enter(&ulfs_ihash_lock);
vp = ulfs_ihashlookup(VFSTOULFS(mp)->um_dev, lfh.lfid_ino);
mutex_exit(&ulfs_ihash_lock);
if (vp == NULL) {
LFS_IENTRY(ifp, fs, lfh.lfid_ino, bp);
daddr = ifp->if_daddr;
brelse(bp, 0);
if (daddr == LFS_UNUSED_DADDR)
return ESTALE;
}
return (ulfs_fhtovp(mp, &lfh.lfid_ufid, vpp)); return (ulfs_fhtovp(mp, &lfh.lfid_ufid, vpp));
} }
@ -2038,7 +2169,7 @@ lfs_gop_write(struct vnode *vp, struct vm_page **pgs, int npages,
/* /*
* finish vnode/inode initialization. * finish vnode/inode initialization.
* used by lfs_vget and lfs_fastvget. * used by lfs_vget.
*/ */
void void
lfs_vinit(struct mount *mp, struct vnode **vpp) lfs_vinit(struct mount *mp, struct vnode **vpp)

View File

@ -1,4 +1,4 @@
/* $NetBSD: lfs_vnops.c,v 1.271 2015/04/20 23:03:09 riastradh Exp $ */ /* $NetBSD: lfs_vnops.c,v 1.272 2015/05/31 15:48:03 hannken Exp $ */
/*- /*-
* Copyright (c) 1999, 2000, 2001, 2002, 2003 The NetBSD Foundation, Inc. * Copyright (c) 1999, 2000, 2001, 2002, 2003 The NetBSD Foundation, Inc.
@ -125,7 +125,7 @@
*/ */
#include <sys/cdefs.h> #include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: lfs_vnops.c,v 1.271 2015/04/20 23:03:09 riastradh Exp $"); __KERNEL_RCSID(0, "$NetBSD: lfs_vnops.c,v 1.272 2015/05/31 15:48:03 hannken Exp $");
#ifdef _KERNEL_OPT #ifdef _KERNEL_OPT
#include "opt_compat_netbsd.h" #include "opt_compat_netbsd.h"
@ -585,9 +585,7 @@ lfs_mark_vnode(struct vnode *vp)
if (!(ip->i_flag & IN_ADIROP)) { if (!(ip->i_flag & IN_ADIROP)) {
if (!(vp->v_uflag & VU_DIROP)) { if (!(vp->v_uflag & VU_DIROP)) {
mutex_exit(&lfs_lock); mutex_exit(&lfs_lock);
mutex_enter(vp->v_interlock); vref(vp);
if (lfs_vref(vp) != 0)
panic("lfs_mark_vnode: could not vref");
mutex_enter(&lfs_lock); mutex_enter(&lfs_lock);
++lfs_dirvcount; ++lfs_dirvcount;
++fs->lfs_dirvcount; ++fs->lfs_dirvcount;
@ -638,6 +636,7 @@ lfs_symlink(void *v)
KASSERT(vpp != NULL); KASSERT(vpp != NULL);
KASSERT(*vpp == NULL); KASSERT(*vpp == NULL);
KASSERT(ap->a_vap->va_type == VLNK);
/* XXX should handle this material another way */ /* XXX should handle this material another way */
ulr = &VTOI(ap->a_dvp)->i_crap; ulr = &VTOI(ap->a_dvp)->i_crap;
@ -649,31 +648,12 @@ lfs_symlink(void *v)
return EROFS; return EROFS;
} }
/*
* Get a new vnode *before* adjusting the dirop count, to
* avoid a deadlock in getnewvnode(), if we have a stacked
* filesystem mounted on top of us.
*
* NB: this means we have to destroy the new vnode on error.
*/
error = getnewvnode(VT_LFS, dvp->v_mount, lfs_vnodeop_p, NULL, vpp);
if (error) {
DLOG((DLOG_ALLOC, "lfs_mkdir: dvp %p error %d\n", dvp, error));
return error;
}
KASSERT(*vpp != NULL);
error = lfs_set_dirop(dvp, NULL); error = lfs_set_dirop(dvp, NULL);
if (error) { if (error)
ungetnewvnode(*vpp);
*vpp = NULL;
return error; return error;
}
fstrans_start(dvp->v_mount, FSTRANS_SHARED); fstrans_start(dvp->v_mount, FSTRANS_SHARED);
error = ulfs_makeinode(LFS_IFLNK | ap->a_vap->va_mode, dvp, ulr, error = ulfs_makeinode(ap->a_vap, dvp, ulr, vpp, ap->a_cnp);
vpp, ap->a_cnp);
if (error) { if (error) {
goto out; goto out;
} }
@ -708,7 +688,6 @@ out:
UNMARK_VNODE(*vpp); UNMARK_VNODE(*vpp);
if (!((*vpp)->v_uflag & VU_DIROP)) { if (!((*vpp)->v_uflag & VU_DIROP)) {
KASSERT(error != 0); KASSERT(error != 0);
ungetnewvnode(*vpp);
*vpp = NULL; *vpp = NULL;
} }
else { else {
@ -734,7 +713,6 @@ lfs_mknod(void *v)
struct vattr *vap; struct vattr *vap;
struct inode *ip; struct inode *ip;
int error; int error;
struct mount *mp;
ino_t ino; ino_t ino;
struct ulfs_lookup_results *ulr; struct ulfs_lookup_results *ulr;
@ -755,38 +733,18 @@ lfs_mknod(void *v)
return EROFS; return EROFS;
} }
/*
* Get a new vnode *before* adjusting the dirop count, to
* avoid a deadlock in getnewvnode(), if we have a stacked
* filesystem mounted on top of us.
*
* NB: this means we have to destroy the new vnode on error.
*/
error = getnewvnode(VT_LFS, dvp->v_mount, lfs_vnodeop_p, NULL, vpp);
if (error) {
DLOG((DLOG_ALLOC, "lfs_mknod: dvp %p error %d\n", dvp, error));
return error;
}
KASSERT(*vpp != NULL);
error = lfs_set_dirop(dvp, NULL); error = lfs_set_dirop(dvp, NULL);
if (error) { if (error)
ungetnewvnode(*vpp);
*vpp = NULL;
return error; return error;
}
fstrans_start(ap->a_dvp->v_mount, FSTRANS_SHARED); fstrans_start(ap->a_dvp->v_mount, FSTRANS_SHARED);
error = ulfs_makeinode(MAKEIMODE(vap->va_type, vap->va_mode), error = ulfs_makeinode(vap, dvp, ulr, vpp, ap->a_cnp);
dvp, ulr, vpp, ap->a_cnp);
/* Either way we're done with the dirop at this point */ /* Either way we're done with the dirop at this point */
UNMARK_VNODE(dvp); UNMARK_VNODE(dvp);
UNMARK_VNODE(*vpp); UNMARK_VNODE(*vpp);
if (!((*vpp)->v_uflag & VU_DIROP)) { if (!((*vpp)->v_uflag & VU_DIROP)) {
KASSERT(error != 0); KASSERT(error != 0);
ungetnewvnode(*vpp);
*vpp = NULL; *vpp = NULL;
} }
else { else {
@ -808,23 +766,8 @@ lfs_mknod(void *v)
VN_KNOTE(dvp, NOTE_WRITE); VN_KNOTE(dvp, NOTE_WRITE);
ip = VTOI(*vpp); ip = VTOI(*vpp);
mp = (*vpp)->v_mount;
ino = ip->i_number; ino = ip->i_number;
ip->i_flag |= IN_ACCESS | IN_CHANGE | IN_UPDATE; ip->i_flag |= IN_ACCESS | IN_CHANGE | IN_UPDATE;
if (vap->va_rdev != VNOVAL) {
struct ulfsmount *ump = ip->i_ump;
KASSERT(fs == ip->i_lfs);
/*
* Want to be able to use this to make badblock
* inodes, so don't truncate the dev number.
*/
if (ump->um_fstype == ULFS1)
ip->i_ffs1_rdev = ulfs_rw32(vap->va_rdev,
ULFS_MPNEEDSWAP(fs));
else
ip->i_ffs2_rdev = ulfs_rw64(vap->va_rdev,
ULFS_MPNEEDSWAP(fs));
}
/* /*
* Call fsync to write the vnode so that we don't have to deal with * Call fsync to write the vnode so that we don't have to deal with
@ -839,17 +782,6 @@ lfs_mknod(void *v)
(unsigned long long)ino); (unsigned long long)ino);
/* return (error); */ /* return (error); */
} }
/*
* Remove vnode so that it will be reloaded by VFS_VGET and
* checked to see if it is an alias of an existing entry in
* the inode cache.
*/
/* Used to be vput, but that causes us to call VOP_INACTIVE twice. */
(*vpp)->v_type = VNON;
VOP_UNLOCK(*vpp);
vgone(*vpp);
error = VFS_VGET(mp, ino, vpp);
fstrans_done(ap->a_dvp->v_mount); fstrans_done(ap->a_dvp->v_mount);
if (error != 0) { if (error != 0) {
@ -895,29 +827,12 @@ lfs_create(void *v)
return EROFS; return EROFS;
} }
/*
* Get a new vnode *before* adjusting the dirop count, to
* avoid a deadlock in getnewvnode(), if we have a stacked
* filesystem mounted on top of us.
*
* NB: this means we have to destroy the new vnode on error.
*/
error = getnewvnode(VT_LFS, dvp->v_mount, lfs_vnodeop_p, NULL, vpp);
if (error) {
DLOG((DLOG_ALLOC, "lfs_create: dvp %p error %d\n", dvp,error));
return error;
}
error = lfs_set_dirop(dvp, NULL); error = lfs_set_dirop(dvp, NULL);
if (error) { if (error)
ungetnewvnode(*vpp);
*vpp = NULL;
return error; return error;
}
fstrans_start(dvp->v_mount, FSTRANS_SHARED); fstrans_start(dvp->v_mount, FSTRANS_SHARED);
error = ulfs_makeinode(MAKEIMODE(vap->va_type, vap->va_mode), error = ulfs_makeinode(vap, dvp, ulr, vpp, ap->a_cnp);
dvp, ulr, vpp, ap->a_cnp);
if (error) { if (error) {
fstrans_done(dvp->v_mount); fstrans_done(dvp->v_mount);
goto out; goto out;
@ -932,7 +847,6 @@ out:
UNMARK_VNODE(*vpp); UNMARK_VNODE(*vpp);
if (!((*vpp)->v_uflag & VU_DIROP)) { if (!((*vpp)->v_uflag & VU_DIROP)) {
KASSERT(error != 0); KASSERT(error != 0);
ungetnewvnode(*vpp);
*vpp = NULL; *vpp = NULL;
} }
else { else {
@ -963,7 +877,6 @@ lfs_mkdir(void *v)
struct lfs_dirtemplate dirtemplate; struct lfs_dirtemplate dirtemplate;
struct lfs_direct *newdir; struct lfs_direct *newdir;
int dirblksiz; int dirblksiz;
int dmode;
int error; int error;
dvp = ap->a_dvp; dvp = ap->a_dvp;
@ -975,6 +888,7 @@ lfs_mkdir(void *v)
dp = VTOI(dvp); dp = VTOI(dvp);
ip = NULL; ip = NULL;
KASSERT(vap->va_type == VDIR);
KASSERT(vpp != NULL); KASSERT(vpp != NULL);
KASSERT(*vpp == NULL); KASSERT(*vpp == NULL);
@ -989,25 +903,9 @@ lfs_mkdir(void *v)
} }
dirblksiz = fs->um_dirblksiz; dirblksiz = fs->um_dirblksiz;
/*
* Get a new vnode *before* adjusting the dirop count, to
* avoid a deadlock in getnewvnode(), if we have a stacked
* filesystem mounted on top of us.
*
* NB: this means we have to destroy the new vnode on error.
*/
error = getnewvnode(VT_LFS, dvp->v_mount, lfs_vnodeop_p, NULL, vpp);
if (error) {
DLOG((DLOG_ALLOC, "lfs_mkdir: dvp %p error %d\n", dvp, error));
return error;
}
error = lfs_set_dirop(dvp, NULL); error = lfs_set_dirop(dvp, NULL);
if (error) { if (error)
ungetnewvnode(*vpp);
*vpp = NULL;
return error; return error;
}
fstrans_start(dvp->v_mount, FSTRANS_SHARED); fstrans_start(dvp->v_mount, FSTRANS_SHARED);
@ -1016,35 +914,26 @@ lfs_mkdir(void *v)
goto out; goto out;
} }
dmode = vap->va_mode & ACCESSPERMS;
dmode |= LFS_IFDIR;
/* /*
* Must simulate part of ulfs_makeinode here to acquire the inode, * Must simulate part of ulfs_makeinode here to acquire the inode,
* but not have it entered in the parent directory. The entry is * but not have it entered in the parent directory. The entry is
* made later after writing "." and ".." entries. * made later after writing "." and ".." entries.
*/ */
if ((error = lfs_valloc(dvp, dmode, cnp->cn_cred, vpp)) != 0) error = vcache_new(dvp->v_mount, dvp, vap, cnp->cn_cred, ap->a_vpp);
if (error)
goto out; goto out;
tvp = *vpp; error = vn_lock(*ap->a_vpp, LK_EXCLUSIVE);
ip = VTOI(tvp); if (error) {
vrele(*ap->a_vpp);
ip->i_uid = kauth_cred_geteuid(cnp->cn_cred); *ap->a_vpp = NULL;
DIP_ASSIGN(ip, uid, ip->i_uid); goto out;
ip->i_gid = dp->i_gid;
DIP_ASSIGN(ip, gid, ip->i_gid);
#if defined(LFS_QUOTA) || defined(LFS_QUOTA2)
if ((error = lfs_chkiq(ip, 1, cnp->cn_cred, 0))) {
lfs_vfree(tvp, ip->i_number, dmode);
fstrans_done(dvp->v_mount);
vput(tvp);
goto out2;
} }
#endif
tvp = *ap->a_vpp;
lfs_mark_vnode(tvp);
ip = VTOI(tvp);
ip->i_flag |= IN_ACCESS | IN_CHANGE | IN_UPDATE; ip->i_flag |= IN_ACCESS | IN_CHANGE | IN_UPDATE;
ip->i_mode = dmode;
DIP_ASSIGN(ip, mode, dmode);
tvp->v_type = VDIR; /* Rest init'd in getnewvnode(). */
ip->i_nlink = 2; ip->i_nlink = 2;
DIP_ASSIGN(ip, nlink, 2); DIP_ASSIGN(ip, nlink, 2);
if (cnp->cn_flags & ISWHITEOUT) { if (cnp->cn_flags & ISWHITEOUT) {
@ -1136,7 +1025,6 @@ out2:
UNMARK_VNODE(*vpp); UNMARK_VNODE(*vpp);
if (!((*vpp)->v_uflag & VU_DIROP)) { if (!((*vpp)->v_uflag & VU_DIROP)) {
KASSERT(error != 0); KASSERT(error != 0);
ungetnewvnode(*vpp);
*vpp = NULL; *vpp = NULL;
} }
else { else {
@ -1815,12 +1703,12 @@ lfs_flush_pchain(struct lfs *fs)
mutex_exit(vp->v_interlock); mutex_exit(vp->v_interlock);
continue; continue;
} }
if (lfs_vref(vp)) if (vget(vp, LK_NOWAIT, false /* !wait */))
continue; continue;
mutex_exit(&lfs_lock); mutex_exit(&lfs_lock);
if (vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_RETRY) != 0) { if (vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_RETRY) != 0) {
lfs_vunref(vp); vrele(vp);
mutex_enter(&lfs_lock); mutex_enter(&lfs_lock);
continue; continue;
} }
@ -1836,7 +1724,7 @@ lfs_flush_pchain(struct lfs *fs)
error2 = lfs_writeinode(fs, sp, ip); error2 = lfs_writeinode(fs, sp, ip);
VOP_UNLOCK(vp); VOP_UNLOCK(vp);
lfs_vunref(vp); vrele(vp);
if (error == EAGAIN || error2 == EAGAIN) { if (error == EAGAIN || error2 == EAGAIN) {
lfs_writeseg(fs, sp); lfs_writeseg(fs, sp);
@ -2098,16 +1986,14 @@ segwait_common:
wakeup(&fs->lfs_wrappass); wakeup(&fs->lfs_wrappass);
/* Wait for the log to wrap, if asked */ /* Wait for the log to wrap, if asked */
if (*(int *)ap->a_data) { if (*(int *)ap->a_data) {
mutex_enter(ap->a_vp->v_interlock); vref(ap->a_vp);
if (lfs_vref(ap->a_vp) != 0)
panic("LFCNWRAPPASS: lfs_vref failed");
VTOI(ap->a_vp)->i_lfs_iflags |= LFSI_WRAPWAIT; VTOI(ap->a_vp)->i_lfs_iflags |= LFSI_WRAPWAIT;
log(LOG_NOTICE, "LFCNPASS waiting for log wrap\n"); log(LOG_NOTICE, "LFCNPASS waiting for log wrap\n");
error = mtsleep(&fs->lfs_nowrap, PCATCH | PUSER, error = mtsleep(&fs->lfs_nowrap, PCATCH | PUSER,
"segwrap", 0, &lfs_lock); "segwrap", 0, &lfs_lock);
log(LOG_NOTICE, "LFCNPASS done waiting\n"); log(LOG_NOTICE, "LFCNPASS done waiting\n");
VTOI(ap->a_vp)->i_lfs_iflags &= ~LFSI_WRAPWAIT; VTOI(ap->a_vp)->i_lfs_iflags &= ~LFSI_WRAPWAIT;
lfs_vunref(ap->a_vp); vrele(ap->a_vp);
} }
mutex_exit(&lfs_lock); mutex_exit(&lfs_lock);
return error; return error;

View File

@ -1,4 +1,4 @@
/* $NetBSD: ulfs_extern.h,v 1.14 2015/03/27 17:27:56 riastradh Exp $ */ /* $NetBSD: ulfs_extern.h,v 1.15 2015/05/31 15:48:03 hannken Exp $ */
/* from NetBSD: ufs_extern.h,v 1.72 2012/05/09 00:21:18 riastradh Exp */ /* from NetBSD: ufs_extern.h,v 1.72 2012/05/09 00:21:18 riastradh Exp */
/*- /*-
@ -105,15 +105,6 @@ int ulfs_bmaparray(struct vnode *, daddr_t, daddr_t *, struct indir *,
int *, int *, ulfs_issequential_callback_t); int *, int *, ulfs_issequential_callback_t);
int ulfs_getlbns(struct vnode *, daddr_t, struct indir *, int *); int ulfs_getlbns(struct vnode *, daddr_t, struct indir *, int *);
/* ulfs_ihash.c */
void ulfs_ihashinit(void);
void ulfs_ihashreinit(void);
void ulfs_ihashdone(void);
struct vnode *ulfs_ihashlookup(dev_t, ino_t);
struct vnode *ulfs_ihashget(dev_t, ino_t, int);
void ulfs_ihashins(struct inode *);
void ulfs_ihashrem(struct inode *);
/* ulfs_inode.c */ /* ulfs_inode.c */
int ulfs_reclaim(struct vnode *); int ulfs_reclaim(struct vnode *);
int ulfs_balloc_range(struct vnode *, off_t, off_t, kauth_cred_t, int); int ulfs_balloc_range(struct vnode *, off_t, off_t, kauth_cred_t, int);
@ -166,7 +157,8 @@ int ulfs_fhtovp(struct mount *, struct ulfs_ufid *, struct vnode **);
/* ulfs_vnops.c */ /* ulfs_vnops.c */
void ulfs_vinit(struct mount *, int (**)(void *), void ulfs_vinit(struct mount *, int (**)(void *),
int (**)(void *), struct vnode **); int (**)(void *), struct vnode **);
int ulfs_makeinode(int, struct vnode *, const struct ulfs_lookup_results *, int ulfs_makeinode(struct vattr *vap, struct vnode *,
const struct ulfs_lookup_results *,
struct vnode **, struct componentname *); struct vnode **, struct componentname *);
int ulfs_gop_alloc(struct vnode *, off_t, off_t, int, kauth_cred_t); int ulfs_gop_alloc(struct vnode *, off_t, off_t, int, kauth_cred_t);
void ulfs_gop_markupdate(struct vnode *, int); void ulfs_gop_markupdate(struct vnode *, int);
@ -181,7 +173,4 @@ void ulfs_snapgone(struct inode *);
__END_DECLS __END_DECLS
extern kmutex_t ulfs_ihash_lock;
extern kmutex_t ulfs_hashlock;
#endif /* !_UFS_LFS_ULFS_EXTERN_H_ */ #endif /* !_UFS_LFS_ULFS_EXTERN_H_ */

View File

@ -1,202 +0,0 @@
/* $NetBSD: ulfs_ihash.c,v 1.5 2015/04/20 13:44:16 riastradh Exp $ */
/* from NetBSD: ufs_ihash.c,v 1.31 2011/06/12 03:36:02 rmind Exp */
/*
* Copyright (c) 1982, 1986, 1989, 1991, 1993
* The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)ufs_ihash.c 8.7 (Berkeley) 5/17/95
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: ulfs_ihash.c,v 1.5 2015/04/20 13:44:16 riastradh Exp $");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/vnode.h>
#include <sys/proc.h>
#include <sys/mutex.h>
#include <ufs/lfs/ulfs_inode.h>
#include <ufs/lfs/ulfs_extern.h>
/*
* Structures associated with inode cacheing.
*/
static LIST_HEAD(ihashhead, inode) *ihashtbl;
static u_long ihash; /* size of hash table - 1 */
#define INOHASH(device, inum) (((device) + (inum)) & ihash)
kmutex_t ulfs_ihash_lock;
kmutex_t ulfs_hashlock;
/*
* Initialize inode hash table.
*/
void
ulfs_ihashinit(void)
{
mutex_init(&ulfs_hashlock, MUTEX_DEFAULT, IPL_NONE);
mutex_init(&ulfs_ihash_lock, MUTEX_DEFAULT, IPL_NONE);
ihashtbl = hashinit(desiredvnodes, HASH_LIST, true, &ihash);
}
/*
* Reinitialize inode hash table.
*/
void
ulfs_ihashreinit(void)
{
struct inode *ip;
struct ihashhead *oldhash, *hash;
u_long oldmask, mask, val;
int i;
hash = hashinit(desiredvnodes, HASH_LIST, true, &mask);
mutex_enter(&ulfs_ihash_lock);
oldhash = ihashtbl;
oldmask = ihash;
ihashtbl = hash;
ihash = mask;
for (i = 0; i <= oldmask; i++) {
while ((ip = LIST_FIRST(&oldhash[i])) != NULL) {
LIST_REMOVE(ip, i_hash);
val = INOHASH(ip->i_dev, ip->i_number);
LIST_INSERT_HEAD(&hash[val], ip, i_hash);
}
}
mutex_exit(&ulfs_ihash_lock);
hashdone(oldhash, HASH_LIST, oldmask);
}
/*
* Free inode hash table.
*/
void
ulfs_ihashdone(void)
{
hashdone(ihashtbl, HASH_LIST, ihash);
mutex_destroy(&ulfs_hashlock);
mutex_destroy(&ulfs_ihash_lock);
}
/*
* Use the device/inum pair to find the incore inode, and return a pointer
* to it. If it is in core, return it, even if it is locked.
*/
struct vnode *
ulfs_ihashlookup(dev_t dev, ino_t inum)
{
struct inode *ip;
struct ihashhead *ipp;
KASSERT(mutex_owned(&ulfs_ihash_lock));
ipp = &ihashtbl[INOHASH(dev, inum)];
LIST_FOREACH(ip, ipp, i_hash) {
if (inum == ip->i_number && dev == ip->i_dev)
break;
}
if (ip)
return (ITOV(ip));
return (NULLVP);
}
/*
* Use the device/inum pair to find the incore inode, and return a pointer
* to it. If it is in core, but locked, wait for it.
*/
struct vnode *
ulfs_ihashget(dev_t dev, ino_t inum, int flags)
{
struct ihashhead *ipp;
struct inode *ip;
struct vnode *vp;
KASSERT(flags == (flags & LK_EXCLUSIVE));
loop:
mutex_enter(&ulfs_ihash_lock);
ipp = &ihashtbl[INOHASH(dev, inum)];
LIST_FOREACH(ip, ipp, i_hash) {
if (inum == ip->i_number && dev == ip->i_dev) {
vp = ITOV(ip);
if (flags == 0) {
mutex_exit(&ulfs_ihash_lock);
} else {
mutex_enter(vp->v_interlock);
mutex_exit(&ulfs_ihash_lock);
if (vget(vp, 0, true /* wait */) != 0)
goto loop;
if (flags & LK_EXCLUSIVE) {
if (vn_lock(vp, LK_EXCLUSIVE) != 0) {
vrele(vp);
goto loop;
}
}
}
return (vp);
}
}
mutex_exit(&ulfs_ihash_lock);
return (NULL);
}
/*
* Insert the inode into the hash table, and return it locked.
*/
void
ulfs_ihashins(struct inode *ip)
{
struct ihashhead *ipp;
int error __diagused;
KASSERT(mutex_owned(&ulfs_hashlock));
/* lock the inode, then put it on the appropriate hash list */
error = VOP_LOCK(ITOV(ip), LK_EXCLUSIVE);
KASSERT(error == 0);
mutex_enter(&ulfs_ihash_lock);
ipp = &ihashtbl[INOHASH(ip->i_dev, ip->i_number)];
LIST_INSERT_HEAD(ipp, ip, i_hash);
mutex_exit(&ulfs_ihash_lock);
}
/*
* Remove the inode from the hash table.
*/
void
ulfs_ihashrem(struct inode *ip)
{
mutex_enter(&ulfs_ihash_lock);
LIST_REMOVE(ip, i_hash);
mutex_exit(&ulfs_ihash_lock);
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: ulfs_inode.c,v 1.9 2013/07/28 00:37:07 dholland Exp $ */ /* $NetBSD: ulfs_inode.c,v 1.10 2015/05/31 15:48:03 hannken Exp $ */
/* from NetBSD: ufs_inode.c,v 1.89 2013/01/22 09:39:18 dholland Exp */ /* from NetBSD: ufs_inode.c,v 1.89 2013/01/22 09:39:18 dholland Exp */
/* /*
@ -38,7 +38,7 @@
*/ */
#include <sys/cdefs.h> #include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: ulfs_inode.c,v 1.9 2013/07/28 00:37:07 dholland Exp $"); __KERNEL_RCSID(0, "$NetBSD: ulfs_inode.c,v 1.10 2015/05/31 15:48:03 hannken Exp $");
#if defined(_KERNEL_OPT) #if defined(_KERNEL_OPT)
#include "opt_lfs.h" #include "opt_lfs.h"
@ -150,9 +150,9 @@ ulfs_reclaim(struct vnode *vp)
lfs_update(vp, NULL, NULL, UPDATE_CLOSE); lfs_update(vp, NULL, NULL, UPDATE_CLOSE);
/* /*
* Remove the inode from its hash chain. * Remove the inode from the vnode cache.
*/ */
ulfs_ihashrem(ip); vcache_remove(vp->v_mount, &ip->i_number, sizeof(ip->i_number));
if (ip->i_devvp) { if (ip->i_devvp) {
vrele(ip->i_devvp); vrele(ip->i_devvp);

View File

@ -1,4 +1,4 @@
/* $NetBSD: ulfs_lookup.c,v 1.23 2015/03/28 17:23:42 maxv Exp $ */ /* $NetBSD: ulfs_lookup.c,v 1.24 2015/05/31 15:48:03 hannken Exp $ */
/* from NetBSD: ufs_lookup.c,v 1.122 2013/01/22 09:39:18 dholland Exp */ /* from NetBSD: ufs_lookup.c,v 1.122 2013/01/22 09:39:18 dholland Exp */
/* /*
@ -38,7 +38,7 @@
*/ */
#include <sys/cdefs.h> #include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: ulfs_lookup.c,v 1.23 2015/03/28 17:23:42 maxv Exp $"); __KERNEL_RCSID(0, "$NetBSD: ulfs_lookup.c,v 1.24 2015/05/31 15:48:03 hannken Exp $");
#ifdef _KERNEL_OPT #ifdef _KERNEL_OPT
#include "opt_lfs.h" #include "opt_lfs.h"
@ -138,8 +138,7 @@ ulfs_lookup(void *v)
int numdirpasses; /* strategy for directory search */ int numdirpasses; /* strategy for directory search */
doff_t endsearch; /* offset to end directory search */ doff_t endsearch; /* offset to end directory search */
doff_t prevoff; /* previous value of ulr_offset */ doff_t prevoff; /* previous value of ulr_offset */
struct vnode *pdp; /* saved dp during symlink work */ struct vnode *tdp; /* returned by vcache_get */
struct vnode *tdp; /* returned by VFS_VGET */
doff_t enduseful; /* pointer past last used dir slot. doff_t enduseful; /* pointer past last used dir slot.
used for directory truncation. */ used for directory truncation. */
u_long bmask; /* block offset mask */ u_long bmask; /* block offset mask */
@ -566,11 +565,8 @@ found:
vref(vdp); vref(vdp);
tdp = vdp; tdp = vdp;
} else { } else {
if (flags & ISDOTDOT) error = vcache_get(vdp->v_mount,
VOP_UNLOCK(vdp); /* race to get the inode */ &foundino, sizeof(foundino), &tdp);
error = VFS_VGET(vdp->v_mount, foundino, &tdp);
if (flags & ISDOTDOT)
vn_lock(vdp, LK_EXCLUSIVE | LK_RETRY);
if (error) if (error)
goto out; goto out;
} }
@ -579,10 +575,7 @@ found:
*/ */
error = VOP_ACCESS(vdp, VWRITE, cred); error = VOP_ACCESS(vdp, VWRITE, cred);
if (error) { if (error) {
if (dp->i_number == foundino)
vrele(tdp); vrele(tdp);
else
vput(tdp);
goto out; goto out;
} }
/* /*
@ -596,10 +589,7 @@ found:
tdp, vdp, genfs_can_sticky(cred, dp->i_uid, tdp, vdp, genfs_can_sticky(cred, dp->i_uid,
VTOI(tdp)->i_uid)); VTOI(tdp)->i_uid));
if (error) { if (error) {
if (dp->i_number == foundino)
vrele(tdp); vrele(tdp);
else
vput(tdp);
error = EPERM; error = EPERM;
goto out; goto out;
} }
@ -627,11 +617,8 @@ found:
error = EISDIR; error = EISDIR;
goto out; goto out;
} }
if (flags & ISDOTDOT) error = vcache_get(vdp->v_mount,
VOP_UNLOCK(vdp); /* race to get the inode */ &foundino, sizeof(foundino), &tdp);
error = VFS_VGET(vdp->v_mount, foundino, &tdp);
if (flags & ISDOTDOT)
vn_lock(vdp, LK_EXCLUSIVE | LK_RETRY);
if (error) if (error)
goto out; goto out;
*vpp = tdp; *vpp = tdp;
@ -639,39 +626,12 @@ found:
goto out; goto out;
} }
/* if (dp->i_number == foundino) {
* Step through the translation in the name. We do not `vput' the
* directory because we may need it again if a symbolic link
* is relative to the current directory. Instead we save it
* unlocked as "pdp". We must get the target inode before unlocking
* the directory to insure that the inode will not be removed
* before we get it. We prevent deadlock by always fetching
* inodes from the root, moving down the directory tree. Thus
* when following backward pointers ".." we must unlock the
* parent directory before getting the requested directory.
* There is a potential race condition here if both the current
* and parent directories are removed before the VFS_VGET for the
* inode associated with ".." returns. We hope that this occurs
* infrequently since we cannot avoid this race condition without
* implementing a sophisticated deadlock detection algorithm.
* Note also that this simple deadlock detection scheme will not
* work if the file system has any hard links other than ".."
* that point backwards in the directory structure.
*/
pdp = vdp;
if (flags & ISDOTDOT) {
VOP_UNLOCK(pdp); /* race to get the inode */
error = VFS_VGET(vdp->v_mount, foundino, &tdp);
vn_lock(pdp, LK_EXCLUSIVE | LK_RETRY);
if (error) {
goto out;
}
*vpp = tdp;
} else if (dp->i_number == foundino) {
vref(vdp); /* we want ourself, ie "." */ vref(vdp); /* we want ourself, ie "." */
*vpp = vdp; *vpp = vdp;
} else { } else {
error = VFS_VGET(vdp->v_mount, foundino, &tdp); error = vcache_get(vdp->v_mount,
&foundino, sizeof(foundino), &tdp);
if (error) if (error)
goto out; goto out;
*vpp = tdp; *vpp = tdp;
@ -684,8 +644,6 @@ found:
error = 0; error = 0;
out: out:
if (error == 0 && *vpp != vdp)
VOP_UNLOCK(*vpp);
fstrans_done(vdp->v_mount); fstrans_done(vdp->v_mount);
return error; return error;
} }

View File

@ -1,4 +1,4 @@
/* $NetBSD: ulfs_vfsops.c,v 1.8 2013/06/08 02:12:56 dholland Exp $ */ /* $NetBSD: ulfs_vfsops.c,v 1.9 2015/05/31 15:48:03 hannken Exp $ */
/* from NetBSD: ufs_vfsops.c,v 1.52 2013/01/22 09:39:18 dholland Exp */ /* from NetBSD: ufs_vfsops.c,v 1.52 2013/01/22 09:39:18 dholland Exp */
/* /*
@ -38,7 +38,7 @@
*/ */
#include <sys/cdefs.h> #include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: ulfs_vfsops.c,v 1.8 2013/06/08 02:12:56 dholland Exp $"); __KERNEL_RCSID(0, "$NetBSD: ulfs_vfsops.c,v 1.9 2015/05/31 15:48:03 hannken Exp $");
#if defined(_KERNEL_OPT) #if defined(_KERNEL_OPT)
#include "opt_lfs.h" #include "opt_lfs.h"
@ -221,6 +221,8 @@ ulfs_fhtovp(struct mount *mp, struct ulfs_ufid *ufhp, struct vnode **vpp)
int error; int error;
if ((error = VFS_VGET(mp, ufhp->ufid_ino, &nvp)) != 0) { if ((error = VFS_VGET(mp, ufhp->ufid_ino, &nvp)) != 0) {
if (error == ENOENT)
error = ESTALE;
*vpp = NULLVP; *vpp = NULLVP;
return (error); return (error);
} }
@ -247,7 +249,6 @@ ulfs_init(void)
ulfs_direct_cache = pool_cache_init(sizeof(struct lfs_direct), 0, 0, 0, ulfs_direct_cache = pool_cache_init(sizeof(struct lfs_direct), 0, 0, 0,
"ulfsdir", NULL, IPL_NONE, NULL, NULL, NULL); "ulfsdir", NULL, IPL_NONE, NULL, NULL, NULL);
ulfs_ihashinit();
#if defined(LFS_QUOTA) || defined(LFS_QUOTA2) #if defined(LFS_QUOTA) || defined(LFS_QUOTA2)
lfs_dqinit(); lfs_dqinit();
#endif #endif
@ -262,7 +263,7 @@ ulfs_init(void)
void void
ulfs_reinit(void) ulfs_reinit(void)
{ {
ulfs_ihashreinit();
#if defined(LFS_QUOTA) || defined(LFS_QUOTA2) #if defined(LFS_QUOTA) || defined(LFS_QUOTA2)
lfs_dqreinit(); lfs_dqreinit();
#endif #endif
@ -277,7 +278,6 @@ ulfs_done(void)
if (--ulfs_initcount > 0) if (--ulfs_initcount > 0)
return; return;
ulfs_ihashdone();
#if defined(LFS_QUOTA) || defined(LFS_QUOTA2) #if defined(LFS_QUOTA) || defined(LFS_QUOTA2)
lfs_dqdone(); lfs_dqdone();
#endif #endif

View File

@ -1,4 +1,4 @@
/* $NetBSD: ulfs_vnops.c,v 1.25 2015/04/20 23:03:09 riastradh Exp $ */ /* $NetBSD: ulfs_vnops.c,v 1.26 2015/05/31 15:48:03 hannken Exp $ */
/* from NetBSD: ufs_vnops.c,v 1.213 2013/06/08 05:47:02 kardel Exp */ /* from NetBSD: ufs_vnops.c,v 1.213 2013/06/08 05:47:02 kardel Exp */
/*- /*-
@ -67,7 +67,7 @@
*/ */
#include <sys/cdefs.h> #include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: ulfs_vnops.c,v 1.25 2015/04/20 23:03:09 riastradh Exp $"); __KERNEL_RCSID(0, "$NetBSD: ulfs_vnops.c,v 1.26 2015/05/31 15:48:03 hannken Exp $");
#if defined(_KERNEL_OPT) #if defined(_KERNEL_OPT)
#include "opt_lfs.h" #include "opt_lfs.h"
@ -1174,39 +1174,27 @@ ulfs_vinit(struct mount *mntp, int (**specops)(void *), int (**fifoops)(void *),
* Allocate a new inode. * Allocate a new inode.
*/ */
int int
ulfs_makeinode(int mode, struct vnode *dvp, const struct ulfs_lookup_results *ulr, ulfs_makeinode(struct vattr *vap, struct vnode *dvp,
const struct ulfs_lookup_results *ulr,
struct vnode **vpp, struct componentname *cnp) struct vnode **vpp, struct componentname *cnp)
{ {
struct inode *ip, *pdir; struct inode *ip;
struct lfs_direct *newdir; struct lfs_direct *newdir;
struct vnode *tvp; struct vnode *tvp;
int error; int error;
pdir = VTOI(dvp); error = vcache_new(dvp->v_mount, dvp, vap, cnp->cn_cred, &tvp);
if (error)
if ((mode & LFS_IFMT) == 0) return error;
mode |= LFS_IFREG; error = vn_lock(tvp, LK_EXCLUSIVE);
if (error) {
if ((error = lfs_valloc(dvp, mode, cnp->cn_cred, vpp)) != 0) { vrele(tvp);
return (error); return error;
} }
tvp = *vpp; lfs_mark_vnode(tvp);
*vpp = tvp;
ip = VTOI(tvp); ip = VTOI(tvp);
ip->i_gid = pdir->i_gid;
DIP_ASSIGN(ip, gid, ip->i_gid);
ip->i_uid = kauth_cred_geteuid(cnp->cn_cred);
DIP_ASSIGN(ip, uid, ip->i_uid);
#if defined(LFS_QUOTA) || defined(LFS_QUOTA2)
if ((error = lfs_chkiq(ip, 1, cnp->cn_cred, 0))) {
lfs_vfree(tvp, ip->i_number, mode);
vput(tvp);
return (error);
}
#endif
ip->i_flag |= IN_ACCESS | IN_CHANGE | IN_UPDATE; ip->i_flag |= IN_ACCESS | IN_CHANGE | IN_UPDATE;
ip->i_mode = mode;
DIP_ASSIGN(ip, mode, mode);
tvp->v_type = IFTOVT(mode); /* Rest init'd in getnewvnode(). */
ip->i_nlink = 1; ip->i_nlink = 1;
DIP_ASSIGN(ip, nlink, 1); DIP_ASSIGN(ip, nlink, 1);
@ -1214,7 +1202,7 @@ ulfs_makeinode(int mode, struct vnode *dvp, const struct ulfs_lookup_results *ul
if (ip->i_mode & ISGID) { if (ip->i_mode & ISGID) {
error = kauth_authorize_vnode(cnp->cn_cred, KAUTH_VNODE_WRITE_SECURITY, error = kauth_authorize_vnode(cnp->cn_cred, KAUTH_VNODE_WRITE_SECURITY,
tvp, NULL, genfs_can_chmod(tvp->v_type, cnp->cn_cred, ip->i_uid, tvp, NULL, genfs_can_chmod(tvp->v_type, cnp->cn_cred, ip->i_uid,
ip->i_gid, mode)); ip->i_gid, MAKEIMODE(vap->va_type, vap->va_mode)));
if (error) { if (error) {
ip->i_mode &= ~ISGID; ip->i_mode &= ~ISGID;
DIP_ASSIGN(ip, mode, ip->i_mode); DIP_ASSIGN(ip, mode, ip->i_mode);
@ -1250,7 +1238,6 @@ ulfs_makeinode(int mode, struct vnode *dvp, const struct ulfs_lookup_results *ul
ip->i_flag |= IN_CHANGE; ip->i_flag |= IN_CHANGE;
/* If IN_ADIROP, account for it */ /* If IN_ADIROP, account for it */
lfs_unmark_vnode(tvp); lfs_unmark_vnode(tvp);
tvp->v_type = VNON; /* explodes later if VBLK */
vput(tvp); vput(tvp);
return (error); return (error);
} }

View File

@ -1,4 +1,4 @@
/* $NetBSD: ulfsmount.h,v 1.12 2013/07/28 01:10:49 dholland Exp $ */ /* $NetBSD: ulfsmount.h,v 1.13 2015/05/31 15:48:03 hannken Exp $ */
/* from NetBSD: ufsmount.h,v 1.39 2012/10/19 17:09:08 drochner Exp */ /* from NetBSD: ufsmount.h,v 1.39 2012/10/19 17:09:08 drochner Exp */
/* /*
@ -73,6 +73,11 @@ struct ulfsmount {
/* Extended attribute information. */ /* Extended attribute information. */
struct ulfs_extattr_per_mount um_extattr; struct ulfs_extattr_per_mount um_extattr;
/* Cleaner lwp, set on first bmapv syscall. */
struct lwp *um_cleaner_thread;
/* Hint from cleaner, only valid if curlwp == um_cleaner_thread. */
BLOCK_INFO *um_cleaner_hint;
/* Quota-related material. */ /* Quota-related material. */
struct vnode *um_quotas[ULFS_MAXQUOTAS]; /* quota files */ struct vnode *um_quotas[ULFS_MAXQUOTAS]; /* quota files */
kauth_cred_t um_cred[ULFS_MAXQUOTAS]; /* quota file access cred */ kauth_cred_t um_cred[ULFS_MAXQUOTAS]; /* quota file access cred */