2000-12-03 08:56:27 +03:00
|
|
|
/* $NetBSD: lfs_inode.c,v 1.50 2000/12/03 05:56:27 perseant Exp $ */
|
1994-06-29 10:39:25 +04:00
|
|
|
|
1999-03-10 03:20:00 +03:00
|
|
|
/*-
|
Various bug-fixes to LFS, to wit:
Kernel:
* Add runtime quantity lfs_ravail, the number of disk-blocks reserved
for writing. Writes to the filesystem first reserve a maximum amount
of blocks before their write is allowed to proceed; after the blocks
are allocated the reserved total is reduced by a corresponding amount.
If the lfs_reserve function cannot immediately reserve the requested
number of blocks, the inode is unlocked, and the thread sleeps until
the cleaner has made enough space available for the blocks to be
reserved. In this way large files can be written to the filesystem
(or, smaller files can be written to a nearly-full but thoroughly
clean filesystem) and the cleaner can still function properly.
* Remove explicit switching on dlfs_minfreeseg from the kernel code; it
is now merely a fs-creation parameter used to compute dlfs_avail and
dlfs_bfree (and used by fsck_lfs(8) to check their accuracy). Its
former role is better assumed by a properly computed dlfs_avail.
* Bounds-check inode numbers submitted through lfs_bmapv and lfs_markv.
This prevents a panic, but, if the cleaner is feeding the filesystem
the wrong data, you are still in a world of hurt.
* Cleanup: remove explicit references of DEV_BSIZE in favor of
btodb()/dbtob().
lfs_cleanerd:
* Make -n mean "send N segments' blocks through a single call to
lfs_markv". Previously it had meant "clean N segments though N calls
to lfs_markv, before looking again to see if more need to be cleaned".
The new behavior gives better packing of direct data on disk with as
little metadata as possible, largely alleviating the problem that the
cleaner can consume more disk through inefficient use of metadata than
it frees by moving dirty data away from clean "holes" to produce
entirely clean segments.
* Make -b mean "read as many segments as necessary to write N segments
of dirty data back to disk", rather than its former meaning of "read
as many segments as necessary to free N segments worth of space". The
new meaning, combined with the new -n behavior described above,
further aids in cleaning storage efficiency as entire segments can be
written at once, using as few blocks as possible for segment summaries
and inode blocks.
* Make the cleaner take note of segments which could not be cleaned due
to error, and not attempt to clean them until they are entirely free
of dirty blocks. This prevents the case in which a cleanerd running
with -n 1 and without -b (formerly the default) would spin trying
repeatedly to clean a corrupt segment, while the remaining space
filled and deadlocked the filesystem.
* Update the lfs_cleanerd manual page to describe all the options,
including the changes mentioned here (in particular, the -b and -n
flags were previously undocumented).
fsck_lfs:
* Check, and optionally fix, lfs_avail (to an exact figure) and
lfs_bfree (within a margin of error) in pass 5.
newfs_lfs:
* Reduce the default dlfs_minfreeseg to 1/20 of the total segments.
* Add a warning if the sgs disklabel field is 16 (the default for FFS'
cpg, but not usually desirable for LFS' sgs: 5--8 is a better range).
* Change the calculation of lfs_avail and lfs_bfree, corresponding to
the kernel changes mentioned above.
mount_lfs:
* Add -N and -b options to pass corresponding -n and -b options to
lfs_cleanerd.
* Default to calling lfs_cleanerd with "-b -n 4".
[All of these changes were largely tested in the 1.5 branch, with the
idea that they (along with previous un-pulled-up work) could be applied
to the branch while it was still in ALPHA2; however my test system has
experienced corruption on another filesystem (/dev/console has gone
missing :^), and, while I believe this unrelated to the LFS changes, I
cannot with good conscience request that the changes be pulled up.]
2000-09-09 08:49:54 +04:00
|
|
|
* Copyright (c) 1999, 2000 The NetBSD Foundation, Inc.
|
1999-03-10 03:20:00 +03:00
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* This code is derived from software contributed to The NetBSD Foundation
|
|
|
|
* by Konrad E. Schroder <perseant@hhhh.org>.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 3. All advertising materials mentioning features or use of this software
|
|
|
|
* must display the following acknowledgement:
|
|
|
|
* This product includes software developed by the NetBSD
|
|
|
|
* Foundation, Inc. and its contributors.
|
|
|
|
* 4. Neither the name of The NetBSD Foundation nor the names of its
|
|
|
|
* contributors may be used to endorse or promote products derived
|
|
|
|
* from this software without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
|
|
|
|
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
|
|
|
|
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
|
|
|
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
|
|
|
|
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
|
|
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
|
|
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
|
|
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
|
|
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
|
|
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
|
|
* POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
1994-06-08 15:41:58 +04:00
|
|
|
/*
|
|
|
|
* Copyright (c) 1986, 1989, 1991, 1993
|
|
|
|
* The Regents of the University of California. All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 3. All advertising materials mentioning features or use of this software
|
|
|
|
* must display the following acknowledgement:
|
|
|
|
* This product includes software developed by the University of
|
|
|
|
* California, Berkeley and its contributors.
|
|
|
|
* 4. Neither the name of the University nor the names of its contributors
|
|
|
|
* may be used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
1998-03-01 05:20:01 +03:00
|
|
|
* @(#)lfs_inode.c 8.9 (Berkeley) 5/8/95
|
1994-06-08 15:41:58 +04:00
|
|
|
*/
|
|
|
|
|
1998-06-09 11:46:31 +04:00
|
|
|
#if defined(_KERNEL) && !defined(_LKM)
|
1998-06-08 08:27:50 +04:00
|
|
|
#include "opt_quota.h"
|
1998-06-09 11:46:31 +04:00
|
|
|
#endif
|
1998-06-08 08:27:50 +04:00
|
|
|
|
1994-06-08 15:41:58 +04:00
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/systm.h>
|
|
|
|
#include <sys/mount.h>
|
|
|
|
#include <sys/proc.h>
|
|
|
|
#include <sys/file.h>
|
|
|
|
#include <sys/buf.h>
|
|
|
|
#include <sys/vnode.h>
|
|
|
|
#include <sys/kernel.h>
|
|
|
|
#include <sys/malloc.h>
|
2000-06-28 00:57:11 +04:00
|
|
|
#include <sys/trace.h>
|
|
|
|
#include <sys/resourcevar.h>
|
1994-06-08 15:41:58 +04:00
|
|
|
|
|
|
|
#include <ufs/ufs/quota.h>
|
|
|
|
#include <ufs/ufs/inode.h>
|
|
|
|
#include <ufs/ufs/ufsmount.h>
|
|
|
|
#include <ufs/ufs/ufs_extern.h>
|
|
|
|
|
|
|
|
#include <ufs/lfs/lfs.h>
|
|
|
|
#include <ufs/lfs/lfs_extern.h>
|
|
|
|
|
2000-05-06 00:59:20 +04:00
|
|
|
extern int locked_queue_count;
|
|
|
|
extern long locked_queue_bytes;
|
1999-03-30 01:51:38 +04:00
|
|
|
|
2000-06-28 00:57:11 +04:00
|
|
|
static int lfs_update_seguse(struct lfs *, long, size_t);
|
|
|
|
static int lfs_indirtrunc (struct inode *, ufs_daddr_t, ufs_daddr_t,
|
2000-11-27 06:33:57 +03:00
|
|
|
ufs_daddr_t, int, long *, long *, long *, size_t *,
|
|
|
|
struct proc *);
|
2000-06-28 00:57:11 +04:00
|
|
|
static int lfs_blkfree (struct lfs *, daddr_t, size_t, long *, size_t *);
|
|
|
|
static int lfs_vtruncbuf(struct vnode *, daddr_t, int, int);
|
|
|
|
|
1994-06-08 15:41:58 +04:00
|
|
|
/* Search a block for a specific dinode. */
|
|
|
|
struct dinode *
|
2000-01-19 03:03:04 +03:00
|
|
|
lfs_ifind(fs, ino, bp)
|
1994-06-08 15:41:58 +04:00
|
|
|
struct lfs *fs;
|
|
|
|
ino_t ino;
|
2000-01-19 03:03:04 +03:00
|
|
|
struct buf *bp;
|
1994-06-08 15:41:58 +04:00
|
|
|
{
|
2000-03-30 16:41:09 +04:00
|
|
|
int cnt;
|
|
|
|
struct dinode *dip = (struct dinode *)bp->b_data;
|
|
|
|
struct dinode *ldip;
|
1999-03-10 03:20:00 +03:00
|
|
|
|
1994-06-08 15:41:58 +04:00
|
|
|
for (cnt = INOPB(fs), ldip = dip + (cnt - 1); cnt--; --ldip)
|
|
|
|
if (ldip->di_inumber == ino)
|
|
|
|
return (ldip);
|
1999-03-10 03:20:00 +03:00
|
|
|
|
2000-07-03 05:45:46 +04:00
|
|
|
printf("offset is 0x%x (seg %d)\n", fs->lfs_offset,
|
|
|
|
datosn(fs,fs->lfs_offset));
|
|
|
|
printf("block is 0x%x (seg %d)\n", bp->b_blkno,
|
|
|
|
datosn(fs,bp->b_blkno));
|
1994-06-08 15:41:58 +04:00
|
|
|
panic("lfs_ifind: dinode %u not found", ino);
|
|
|
|
/* NOTREACHED */
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
1996-02-10 01:28:45 +03:00
|
|
|
lfs_update(v)
|
|
|
|
void *v;
|
|
|
|
{
|
1994-06-08 15:41:58 +04:00
|
|
|
struct vop_update_args /* {
|
1999-03-10 03:20:00 +03:00
|
|
|
struct vnode *a_vp;
|
|
|
|
struct timespec *a_access;
|
|
|
|
struct timespec *a_modify;
|
2000-05-14 03:43:06 +04:00
|
|
|
int a_flags;
|
1999-03-10 03:20:00 +03:00
|
|
|
} */ *ap = v;
|
1994-06-08 15:41:58 +04:00
|
|
|
struct inode *ip;
|
1998-03-01 05:20:01 +03:00
|
|
|
struct vnode *vp = ap->a_vp;
|
2000-07-06 02:25:43 +04:00
|
|
|
int oflag;
|
1996-09-02 03:47:48 +04:00
|
|
|
struct timespec ts;
|
1999-06-01 07:00:40 +04:00
|
|
|
struct lfs *fs = VFSTOUFS(vp->v_mount)->um_lfs;
|
1999-03-10 03:20:00 +03:00
|
|
|
|
1998-03-01 05:20:01 +03:00
|
|
|
if (vp->v_mount->mnt_flag & MNT_RDONLY)
|
1994-06-08 15:41:58 +04:00
|
|
|
return (0);
|
1998-03-01 05:20:01 +03:00
|
|
|
ip = VTOI(vp);
|
1999-03-10 03:20:00 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If we are called from vinvalbuf, and the file's blocks have
|
|
|
|
* already been scheduled for writing, but the writes have not
|
|
|
|
* yet completed, lfs_vflush will not be called, and vinvalbuf
|
|
|
|
* will cause a panic. So, we must wait until any pending write
|
2000-05-14 03:43:06 +04:00
|
|
|
* for our inode completes, if we are called with UPDATE_WAIT set.
|
1999-03-10 03:20:00 +03:00
|
|
|
*/
|
2000-05-14 03:43:06 +04:00
|
|
|
while((ap->a_flags & (UPDATE_WAIT|UPDATE_DIROP)) == UPDATE_WAIT &&
|
|
|
|
WRITEINPROG(vp)) {
|
1999-04-12 04:25:13 +04:00
|
|
|
#ifdef DEBUG_LFS
|
2000-06-28 00:57:11 +04:00
|
|
|
printf("lfs_update: sleeping on inode %d (in-progress)\n",
|
|
|
|
ip->i_number);
|
1999-04-12 04:25:13 +04:00
|
|
|
#endif
|
1999-03-10 03:20:00 +03:00
|
|
|
tsleep(vp, (PRIBIO+1), "lfs_update", 0);
|
|
|
|
}
|
|
|
|
oflag = ip->i_flag;
|
1996-09-02 03:47:48 +04:00
|
|
|
TIMEVAL_TO_TIMESPEC(&time, &ts);
|
1999-03-10 03:20:00 +03:00
|
|
|
LFS_ITIMES(ip,
|
|
|
|
ap->a_access ? ap->a_access : &ts,
|
|
|
|
ap->a_modify ? ap->a_modify : &ts, &ts);
|
2000-05-31 05:40:01 +04:00
|
|
|
if ((ip->i_flag & (IN_MODIFIED | IN_ACCESSED | IN_CLEANING)) == 0) {
|
1994-06-08 15:41:58 +04:00
|
|
|
return (0);
|
1999-03-10 03:20:00 +03:00
|
|
|
}
|
|
|
|
|
1994-06-08 15:41:58 +04:00
|
|
|
/* If sync, push back the vnode and any dirty blocks it may have. */
|
2000-05-14 03:43:06 +04:00
|
|
|
if((ap->a_flags & (UPDATE_WAIT|UPDATE_DIROP))==UPDATE_WAIT) {
|
1999-06-01 07:00:40 +04:00
|
|
|
/* Avoid flushing VDIROP. */
|
|
|
|
++fs->lfs_diropwait;
|
|
|
|
while(vp->v_flag & VDIROP) {
|
|
|
|
#ifdef DEBUG_LFS
|
2000-06-28 00:57:11 +04:00
|
|
|
printf("lfs_update: sleeping on inode %d (dirops)\n",
|
|
|
|
ip->i_number);
|
|
|
|
printf("lfs_update: vflags 0x%lx, iflags 0x%x\n",
|
|
|
|
vp->v_flag, ip->i_flag);
|
1999-06-01 07:00:40 +04:00
|
|
|
#endif
|
2000-06-28 00:57:11 +04:00
|
|
|
if(fs->lfs_dirops == 0)
|
|
|
|
lfs_flush_fs(fs, SEGM_SYNC);
|
1999-06-01 07:00:40 +04:00
|
|
|
else
|
2000-06-28 00:57:11 +04:00
|
|
|
tsleep(&fs->lfs_writer, PRIBIO+1, "lfs_fsync",
|
|
|
|
0);
|
1999-06-01 07:00:40 +04:00
|
|
|
/* XXX KS - by falling out here, are we writing the vn
|
|
|
|
twice? */
|
|
|
|
}
|
|
|
|
--fs->lfs_diropwait;
|
|
|
|
return lfs_vflush(vp);
|
|
|
|
}
|
|
|
|
return 0;
|
1994-06-08 15:41:58 +04:00
|
|
|
}
|
|
|
|
|
2000-06-28 00:57:11 +04:00
|
|
|
#define SINGLE 0 /* index of single indirect block */
|
|
|
|
#define DOUBLE 1 /* index of double indirect block */
|
|
|
|
#define TRIPLE 2 /* index of triple indirect block */
|
1994-06-08 15:41:58 +04:00
|
|
|
/*
|
2000-06-28 00:57:11 +04:00
|
|
|
* Truncate the inode oip to at most length size, freeing the
|
|
|
|
* disk blocks.
|
1994-06-08 15:41:58 +04:00
|
|
|
*/
|
Various bug-fixes to LFS, to wit:
Kernel:
* Add runtime quantity lfs_ravail, the number of disk-blocks reserved
for writing. Writes to the filesystem first reserve a maximum amount
of blocks before their write is allowed to proceed; after the blocks
are allocated the reserved total is reduced by a corresponding amount.
If the lfs_reserve function cannot immediately reserve the requested
number of blocks, the inode is unlocked, and the thread sleeps until
the cleaner has made enough space available for the blocks to be
reserved. In this way large files can be written to the filesystem
(or, smaller files can be written to a nearly-full but thoroughly
clean filesystem) and the cleaner can still function properly.
* Remove explicit switching on dlfs_minfreeseg from the kernel code; it
is now merely a fs-creation parameter used to compute dlfs_avail and
dlfs_bfree (and used by fsck_lfs(8) to check their accuracy). Its
former role is better assumed by a properly computed dlfs_avail.
* Bounds-check inode numbers submitted through lfs_bmapv and lfs_markv.
This prevents a panic, but, if the cleaner is feeding the filesystem
the wrong data, you are still in a world of hurt.
* Cleanup: remove explicit references of DEV_BSIZE in favor of
btodb()/dbtob().
lfs_cleanerd:
* Make -n mean "send N segments' blocks through a single call to
lfs_markv". Previously it had meant "clean N segments though N calls
to lfs_markv, before looking again to see if more need to be cleaned".
The new behavior gives better packing of direct data on disk with as
little metadata as possible, largely alleviating the problem that the
cleaner can consume more disk through inefficient use of metadata than
it frees by moving dirty data away from clean "holes" to produce
entirely clean segments.
* Make -b mean "read as many segments as necessary to write N segments
of dirty data back to disk", rather than its former meaning of "read
as many segments as necessary to free N segments worth of space". The
new meaning, combined with the new -n behavior described above,
further aids in cleaning storage efficiency as entire segments can be
written at once, using as few blocks as possible for segment summaries
and inode blocks.
* Make the cleaner take note of segments which could not be cleaned due
to error, and not attempt to clean them until they are entirely free
of dirty blocks. This prevents the case in which a cleanerd running
with -n 1 and without -b (formerly the default) would spin trying
repeatedly to clean a corrupt segment, while the remaining space
filled and deadlocked the filesystem.
* Update the lfs_cleanerd manual page to describe all the options,
including the changes mentioned here (in particular, the -b and -n
flags were previously undocumented).
fsck_lfs:
* Check, and optionally fix, lfs_avail (to an exact figure) and
lfs_bfree (within a margin of error) in pass 5.
newfs_lfs:
* Reduce the default dlfs_minfreeseg to 1/20 of the total segments.
* Add a warning if the sgs disklabel field is 16 (the default for FFS'
cpg, but not usually desirable for LFS' sgs: 5--8 is a better range).
* Change the calculation of lfs_avail and lfs_bfree, corresponding to
the kernel changes mentioned above.
mount_lfs:
* Add -N and -b options to pass corresponding -n and -b options to
lfs_cleanerd.
* Default to calling lfs_cleanerd with "-b -n 4".
[All of these changes were largely tested in the 1.5 branch, with the
idea that they (along with previous un-pulled-up work) could be applied
to the branch while it was still in ALPHA2; however my test system has
experienced corruption on another filesystem (/dev/console has gone
missing :^), and, while I believe this unrelated to the LFS changes, I
cannot with good conscience request that the changes be pulled up.]
2000-09-09 08:49:54 +04:00
|
|
|
/* VOP_BWRITE 1 + NIADDR + VOP_BALLOC == 2 + 2*NIADDR times */
|
1994-06-08 15:41:58 +04:00
|
|
|
int
|
1996-02-10 01:28:45 +03:00
|
|
|
lfs_truncate(v)
|
|
|
|
void *v;
|
|
|
|
{
|
1994-06-08 15:41:58 +04:00
|
|
|
struct vop_truncate_args /* {
|
|
|
|
struct vnode *a_vp;
|
|
|
|
off_t a_length;
|
|
|
|
int a_flags;
|
|
|
|
struct ucred *a_cred;
|
|
|
|
struct proc *a_p;
|
1996-02-10 01:28:45 +03:00
|
|
|
} */ *ap = v;
|
2000-06-28 00:57:11 +04:00
|
|
|
struct vnode *ovp = ap->a_vp;
|
|
|
|
ufs_daddr_t lastblock;
|
|
|
|
struct inode *oip;
|
|
|
|
ufs_daddr_t bn, lbn, lastiblock[NIADDR], indir_lbn[NIADDR];
|
|
|
|
ufs_daddr_t newblks[NDADDR + NIADDR];
|
1994-06-08 15:41:58 +04:00
|
|
|
off_t length = ap->a_length;
|
|
|
|
struct lfs *fs;
|
2000-06-28 00:57:11 +04:00
|
|
|
struct buf *bp;
|
|
|
|
int offset, size, level;
|
2000-07-03 05:45:46 +04:00
|
|
|
long count, rcount, nblocks, blocksreleased = 0, real_released = 0;
|
2000-06-28 00:57:11 +04:00
|
|
|
int i;
|
|
|
|
int aflags, error, allerror = 0;
|
|
|
|
off_t osize;
|
|
|
|
long lastseg;
|
|
|
|
size_t bc;
|
2000-11-21 03:00:31 +03:00
|
|
|
int obufsize, odb;
|
2000-06-28 00:57:11 +04:00
|
|
|
|
2000-04-24 01:10:26 +04:00
|
|
|
if (length < 0)
|
|
|
|
return (EINVAL);
|
2000-06-28 00:57:11 +04:00
|
|
|
oip = VTOI(ovp);
|
2000-04-24 01:10:26 +04:00
|
|
|
|
2000-06-28 00:57:11 +04:00
|
|
|
/*
|
|
|
|
* Just return and not update modification times.
|
|
|
|
*/
|
|
|
|
if (oip->i_ffs_size == length)
|
|
|
|
return (0);
|
1999-03-10 03:20:00 +03:00
|
|
|
|
2000-06-28 00:57:11 +04:00
|
|
|
if (ovp->v_type == VLNK &&
|
|
|
|
(oip->i_ffs_size < ovp->v_mount->mnt_maxsymlinklen ||
|
|
|
|
(ovp->v_mount->mnt_maxsymlinklen == 0 &&
|
|
|
|
oip->i_din.ffs_din.di_blocks == 0))) {
|
1994-06-08 15:41:58 +04:00
|
|
|
#ifdef DIAGNOSTIC
|
|
|
|
if (length != 0)
|
|
|
|
panic("lfs_truncate: partial truncate of symlink");
|
|
|
|
#endif
|
2000-06-28 00:57:11 +04:00
|
|
|
memset((char *)&oip->i_ffs_shortlink, 0, (u_int)oip->i_ffs_size);
|
|
|
|
oip->i_ffs_size = 0;
|
|
|
|
oip->i_flag |= IN_CHANGE | IN_UPDATE;
|
|
|
|
return (VOP_UPDATE(ovp, NULL, NULL, 0));
|
1994-06-08 15:41:58 +04:00
|
|
|
}
|
2000-06-28 00:57:11 +04:00
|
|
|
if (oip->i_ffs_size == length) {
|
|
|
|
oip->i_flag |= IN_CHANGE | IN_UPDATE;
|
|
|
|
return (VOP_UPDATE(ovp, NULL, NULL, 0));
|
|
|
|
}
|
|
|
|
#ifdef QUOTA
|
|
|
|
if ((error = getinoquota(oip)) != 0)
|
|
|
|
return (error);
|
|
|
|
#endif
|
|
|
|
fs = oip->i_lfs;
|
1999-03-10 03:20:00 +03:00
|
|
|
lfs_imtime(fs);
|
2000-06-28 00:57:11 +04:00
|
|
|
osize = oip->i_ffs_size;
|
|
|
|
ovp->v_lasta = ovp->v_clen = ovp->v_cstart = ovp->v_lastw = 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Lengthen the size of the file. We must ensure that the
|
|
|
|
* last byte of the file is allocated. Since the smallest
|
|
|
|
* value of osize is 0, length will be at least 1.
|
|
|
|
*/
|
|
|
|
if (osize < length) {
|
2000-04-24 01:10:26 +04:00
|
|
|
if (length > fs->lfs_maxfilesize)
|
|
|
|
return (EFBIG);
|
2000-06-28 00:57:11 +04:00
|
|
|
aflags = B_CLRBUF;
|
|
|
|
if (ap->a_flags & IO_SYNC)
|
|
|
|
aflags |= B_SYNC;
|
Various bug-fixes to LFS, to wit:
Kernel:
* Add runtime quantity lfs_ravail, the number of disk-blocks reserved
for writing. Writes to the filesystem first reserve a maximum amount
of blocks before their write is allowed to proceed; after the blocks
are allocated the reserved total is reduced by a corresponding amount.
If the lfs_reserve function cannot immediately reserve the requested
number of blocks, the inode is unlocked, and the thread sleeps until
the cleaner has made enough space available for the blocks to be
reserved. In this way large files can be written to the filesystem
(or, smaller files can be written to a nearly-full but thoroughly
clean filesystem) and the cleaner can still function properly.
* Remove explicit switching on dlfs_minfreeseg from the kernel code; it
is now merely a fs-creation parameter used to compute dlfs_avail and
dlfs_bfree (and used by fsck_lfs(8) to check their accuracy). Its
former role is better assumed by a properly computed dlfs_avail.
* Bounds-check inode numbers submitted through lfs_bmapv and lfs_markv.
This prevents a panic, but, if the cleaner is feeding the filesystem
the wrong data, you are still in a world of hurt.
* Cleanup: remove explicit references of DEV_BSIZE in favor of
btodb()/dbtob().
lfs_cleanerd:
* Make -n mean "send N segments' blocks through a single call to
lfs_markv". Previously it had meant "clean N segments though N calls
to lfs_markv, before looking again to see if more need to be cleaned".
The new behavior gives better packing of direct data on disk with as
little metadata as possible, largely alleviating the problem that the
cleaner can consume more disk through inefficient use of metadata than
it frees by moving dirty data away from clean "holes" to produce
entirely clean segments.
* Make -b mean "read as many segments as necessary to write N segments
of dirty data back to disk", rather than its former meaning of "read
as many segments as necessary to free N segments worth of space". The
new meaning, combined with the new -n behavior described above,
further aids in cleaning storage efficiency as entire segments can be
written at once, using as few blocks as possible for segment summaries
and inode blocks.
* Make the cleaner take note of segments which could not be cleaned due
to error, and not attempt to clean them until they are entirely free
of dirty blocks. This prevents the case in which a cleanerd running
with -n 1 and without -b (formerly the default) would spin trying
repeatedly to clean a corrupt segment, while the remaining space
filled and deadlocked the filesystem.
* Update the lfs_cleanerd manual page to describe all the options,
including the changes mentioned here (in particular, the -b and -n
flags were previously undocumented).
fsck_lfs:
* Check, and optionally fix, lfs_avail (to an exact figure) and
lfs_bfree (within a margin of error) in pass 5.
newfs_lfs:
* Reduce the default dlfs_minfreeseg to 1/20 of the total segments.
* Add a warning if the sgs disklabel field is 16 (the default for FFS'
cpg, but not usually desirable for LFS' sgs: 5--8 is a better range).
* Change the calculation of lfs_avail and lfs_bfree, corresponding to
the kernel changes mentioned above.
mount_lfs:
* Add -N and -b options to pass corresponding -n and -b options to
lfs_cleanerd.
* Default to calling lfs_cleanerd with "-b -n 4".
[All of these changes were largely tested in the 1.5 branch, with the
idea that they (along with previous un-pulled-up work) could be applied
to the branch while it was still in ALPHA2; however my test system has
experienced corruption on another filesystem (/dev/console has gone
missing :^), and, while I believe this unrelated to the LFS changes, I
cannot with good conscience request that the changes be pulled up.]
2000-09-09 08:49:54 +04:00
|
|
|
error = lfs_reserve(fs, ovp, fsbtodb(fs, NIADDR + 2));
|
|
|
|
if (error)
|
|
|
|
return (error);
|
2000-06-28 00:57:11 +04:00
|
|
|
error = VOP_BALLOC(ovp, length - 1, 1, ap->a_cred, aflags, &bp);
|
Various bug-fixes to LFS, to wit:
Kernel:
* Add runtime quantity lfs_ravail, the number of disk-blocks reserved
for writing. Writes to the filesystem first reserve a maximum amount
of blocks before their write is allowed to proceed; after the blocks
are allocated the reserved total is reduced by a corresponding amount.
If the lfs_reserve function cannot immediately reserve the requested
number of blocks, the inode is unlocked, and the thread sleeps until
the cleaner has made enough space available for the blocks to be
reserved. In this way large files can be written to the filesystem
(or, smaller files can be written to a nearly-full but thoroughly
clean filesystem) and the cleaner can still function properly.
* Remove explicit switching on dlfs_minfreeseg from the kernel code; it
is now merely a fs-creation parameter used to compute dlfs_avail and
dlfs_bfree (and used by fsck_lfs(8) to check their accuracy). Its
former role is better assumed by a properly computed dlfs_avail.
* Bounds-check inode numbers submitted through lfs_bmapv and lfs_markv.
This prevents a panic, but, if the cleaner is feeding the filesystem
the wrong data, you are still in a world of hurt.
* Cleanup: remove explicit references of DEV_BSIZE in favor of
btodb()/dbtob().
lfs_cleanerd:
* Make -n mean "send N segments' blocks through a single call to
lfs_markv". Previously it had meant "clean N segments though N calls
to lfs_markv, before looking again to see if more need to be cleaned".
The new behavior gives better packing of direct data on disk with as
little metadata as possible, largely alleviating the problem that the
cleaner can consume more disk through inefficient use of metadata than
it frees by moving dirty data away from clean "holes" to produce
entirely clean segments.
* Make -b mean "read as many segments as necessary to write N segments
of dirty data back to disk", rather than its former meaning of "read
as many segments as necessary to free N segments worth of space". The
new meaning, combined with the new -n behavior described above,
further aids in cleaning storage efficiency as entire segments can be
written at once, using as few blocks as possible for segment summaries
and inode blocks.
* Make the cleaner take note of segments which could not be cleaned due
to error, and not attempt to clean them until they are entirely free
of dirty blocks. This prevents the case in which a cleanerd running
with -n 1 and without -b (formerly the default) would spin trying
repeatedly to clean a corrupt segment, while the remaining space
filled and deadlocked the filesystem.
* Update the lfs_cleanerd manual page to describe all the options,
including the changes mentioned here (in particular, the -b and -n
flags were previously undocumented).
fsck_lfs:
* Check, and optionally fix, lfs_avail (to an exact figure) and
lfs_bfree (within a margin of error) in pass 5.
newfs_lfs:
* Reduce the default dlfs_minfreeseg to 1/20 of the total segments.
* Add a warning if the sgs disklabel field is 16 (the default for FFS'
cpg, but not usually desirable for LFS' sgs: 5--8 is a better range).
* Change the calculation of lfs_avail and lfs_bfree, corresponding to
the kernel changes mentioned above.
mount_lfs:
* Add -N and -b options to pass corresponding -n and -b options to
lfs_cleanerd.
* Default to calling lfs_cleanerd with "-b -n 4".
[All of these changes were largely tested in the 1.5 branch, with the
idea that they (along with previous un-pulled-up work) could be applied
to the branch while it was still in ALPHA2; however my test system has
experienced corruption on another filesystem (/dev/console has gone
missing :^), and, while I believe this unrelated to the LFS changes, I
cannot with good conscience request that the changes be pulled up.]
2000-09-09 08:49:54 +04:00
|
|
|
lfs_reserve(fs, ovp, -fsbtodb(fs, NIADDR + 2));
|
2000-04-24 01:10:26 +04:00
|
|
|
if (error)
|
2000-06-28 00:57:11 +04:00
|
|
|
return (error);
|
|
|
|
oip->i_ffs_size = length;
|
|
|
|
uvm_vnp_setsize(ovp, length);
|
Various bug-fixes to LFS, to wit:
Kernel:
* Add runtime quantity lfs_ravail, the number of disk-blocks reserved
for writing. Writes to the filesystem first reserve a maximum amount
of blocks before their write is allowed to proceed; after the blocks
are allocated the reserved total is reduced by a corresponding amount.
If the lfs_reserve function cannot immediately reserve the requested
number of blocks, the inode is unlocked, and the thread sleeps until
the cleaner has made enough space available for the blocks to be
reserved. In this way large files can be written to the filesystem
(or, smaller files can be written to a nearly-full but thoroughly
clean filesystem) and the cleaner can still function properly.
* Remove explicit switching on dlfs_minfreeseg from the kernel code; it
is now merely a fs-creation parameter used to compute dlfs_avail and
dlfs_bfree (and used by fsck_lfs(8) to check their accuracy). Its
former role is better assumed by a properly computed dlfs_avail.
* Bounds-check inode numbers submitted through lfs_bmapv and lfs_markv.
This prevents a panic, but, if the cleaner is feeding the filesystem
the wrong data, you are still in a world of hurt.
* Cleanup: remove explicit references of DEV_BSIZE in favor of
btodb()/dbtob().
lfs_cleanerd:
* Make -n mean "send N segments' blocks through a single call to
lfs_markv". Previously it had meant "clean N segments though N calls
to lfs_markv, before looking again to see if more need to be cleaned".
The new behavior gives better packing of direct data on disk with as
little metadata as possible, largely alleviating the problem that the
cleaner can consume more disk through inefficient use of metadata than
it frees by moving dirty data away from clean "holes" to produce
entirely clean segments.
* Make -b mean "read as many segments as necessary to write N segments
of dirty data back to disk", rather than its former meaning of "read
as many segments as necessary to free N segments worth of space". The
new meaning, combined with the new -n behavior described above,
further aids in cleaning storage efficiency as entire segments can be
written at once, using as few blocks as possible for segment summaries
and inode blocks.
* Make the cleaner take note of segments which could not be cleaned due
to error, and not attempt to clean them until they are entirely free
of dirty blocks. This prevents the case in which a cleanerd running
with -n 1 and without -b (formerly the default) would spin trying
repeatedly to clean a corrupt segment, while the remaining space
filled and deadlocked the filesystem.
* Update the lfs_cleanerd manual page to describe all the options,
including the changes mentioned here (in particular, the -b and -n
flags were previously undocumented).
fsck_lfs:
* Check, and optionally fix, lfs_avail (to an exact figure) and
lfs_bfree (within a margin of error) in pass 5.
newfs_lfs:
* Reduce the default dlfs_minfreeseg to 1/20 of the total segments.
* Add a warning if the sgs disklabel field is 16 (the default for FFS'
cpg, but not usually desirable for LFS' sgs: 5--8 is a better range).
* Change the calculation of lfs_avail and lfs_bfree, corresponding to
the kernel changes mentioned above.
mount_lfs:
* Add -N and -b options to pass corresponding -n and -b options to
lfs_cleanerd.
* Default to calling lfs_cleanerd with "-b -n 4".
[All of these changes were largely tested in the 1.5 branch, with the
idea that they (along with previous un-pulled-up work) could be applied
to the branch while it was still in ALPHA2; however my test system has
experienced corruption on another filesystem (/dev/console has gone
missing :^), and, while I believe this unrelated to the LFS changes, I
cannot with good conscience request that the changes be pulled up.]
2000-09-09 08:49:54 +04:00
|
|
|
(void) VOP_BWRITE(bp);
|
2000-06-28 00:57:11 +04:00
|
|
|
oip->i_flag |= IN_CHANGE | IN_UPDATE;
|
|
|
|
return (VOP_UPDATE(ovp, NULL, NULL, 0));
|
1994-06-08 15:41:58 +04:00
|
|
|
}
|
|
|
|
|
Various bug-fixes to LFS, to wit:
Kernel:
* Add runtime quantity lfs_ravail, the number of disk-blocks reserved
for writing. Writes to the filesystem first reserve a maximum amount
of blocks before their write is allowed to proceed; after the blocks
are allocated the reserved total is reduced by a corresponding amount.
If the lfs_reserve function cannot immediately reserve the requested
number of blocks, the inode is unlocked, and the thread sleeps until
the cleaner has made enough space available for the blocks to be
reserved. In this way large files can be written to the filesystem
(or, smaller files can be written to a nearly-full but thoroughly
clean filesystem) and the cleaner can still function properly.
* Remove explicit switching on dlfs_minfreeseg from the kernel code; it
is now merely a fs-creation parameter used to compute dlfs_avail and
dlfs_bfree (and used by fsck_lfs(8) to check their accuracy). Its
former role is better assumed by a properly computed dlfs_avail.
* Bounds-check inode numbers submitted through lfs_bmapv and lfs_markv.
This prevents a panic, but, if the cleaner is feeding the filesystem
the wrong data, you are still in a world of hurt.
* Cleanup: remove explicit references of DEV_BSIZE in favor of
btodb()/dbtob().
lfs_cleanerd:
* Make -n mean "send N segments' blocks through a single call to
lfs_markv". Previously it had meant "clean N segments though N calls
to lfs_markv, before looking again to see if more need to be cleaned".
The new behavior gives better packing of direct data on disk with as
little metadata as possible, largely alleviating the problem that the
cleaner can consume more disk through inefficient use of metadata than
it frees by moving dirty data away from clean "holes" to produce
entirely clean segments.
* Make -b mean "read as many segments as necessary to write N segments
of dirty data back to disk", rather than its former meaning of "read
as many segments as necessary to free N segments worth of space". The
new meaning, combined with the new -n behavior described above,
further aids in cleaning storage efficiency as entire segments can be
written at once, using as few blocks as possible for segment summaries
and inode blocks.
* Make the cleaner take note of segments which could not be cleaned due
to error, and not attempt to clean them until they are entirely free
of dirty blocks. This prevents the case in which a cleanerd running
with -n 1 and without -b (formerly the default) would spin trying
repeatedly to clean a corrupt segment, while the remaining space
filled and deadlocked the filesystem.
* Update the lfs_cleanerd manual page to describe all the options,
including the changes mentioned here (in particular, the -b and -n
flags were previously undocumented).
fsck_lfs:
* Check, and optionally fix, lfs_avail (to an exact figure) and
lfs_bfree (within a margin of error) in pass 5.
newfs_lfs:
* Reduce the default dlfs_minfreeseg to 1/20 of the total segments.
* Add a warning if the sgs disklabel field is 16 (the default for FFS'
cpg, but not usually desirable for LFS' sgs: 5--8 is a better range).
* Change the calculation of lfs_avail and lfs_bfree, corresponding to
the kernel changes mentioned above.
mount_lfs:
* Add -N and -b options to pass corresponding -n and -b options to
lfs_cleanerd.
* Default to calling lfs_cleanerd with "-b -n 4".
[All of these changes were largely tested in the 1.5 branch, with the
idea that they (along with previous un-pulled-up work) could be applied
to the branch while it was still in ALPHA2; however my test system has
experienced corruption on another filesystem (/dev/console has gone
missing :^), and, while I believe this unrelated to the LFS changes, I
cannot with good conscience request that the changes be pulled up.]
2000-09-09 08:49:54 +04:00
|
|
|
if ((error = lfs_reserve(fs, ovp, fsbtodb(fs, 2 * NIADDR + 3))) != 0)
|
|
|
|
return (error);
|
1999-03-10 03:20:00 +03:00
|
|
|
/*
|
2000-05-06 00:59:20 +04:00
|
|
|
* Make sure no writes to this inode can happen while we're
|
|
|
|
* truncating. Otherwise, blocks which are accounted for on the
|
|
|
|
* inode *and* which have been created for cleaning can coexist,
|
|
|
|
* and cause an overcounting.
|
1999-03-26 00:39:18 +03:00
|
|
|
*
|
2000-05-06 00:59:20 +04:00
|
|
|
* (We don't need to *hold* the seglock, though, because we already
|
|
|
|
* hold the inode lock; draining the seglock is sufficient.)
|
1999-03-10 03:20:00 +03:00
|
|
|
*/
|
2000-06-28 00:57:11 +04:00
|
|
|
if (ovp != fs->lfs_unlockvp) {
|
2000-05-06 00:59:20 +04:00
|
|
|
while(fs->lfs_seglock) {
|
|
|
|
tsleep(&fs->lfs_seglock, PRIBIO+1, "lfs_truncate", 0);
|
|
|
|
}
|
1999-03-10 03:20:00 +03:00
|
|
|
}
|
|
|
|
|
1994-06-08 15:41:58 +04:00
|
|
|
/*
|
2000-06-28 00:57:11 +04:00
|
|
|
* Shorten the size of the file. If the file is not being
|
|
|
|
* truncated to a block boundary, the contents of the
|
|
|
|
* partial block following the end of the file must be
|
|
|
|
* zero'ed in case it ever becomes accessible again because
|
|
|
|
* of subsequent file growth. Directories however are not
|
|
|
|
* zero'ed as they should grow back initialized to empty.
|
1994-06-08 15:41:58 +04:00
|
|
|
*/
|
2000-06-28 00:57:11 +04:00
|
|
|
offset = blkoff(fs, length);
|
|
|
|
lastseg = -1;
|
|
|
|
bc = 0;
|
|
|
|
if (offset == 0) {
|
|
|
|
oip->i_ffs_size = length;
|
|
|
|
} else {
|
|
|
|
lbn = lblkno(fs, length);
|
|
|
|
aflags = B_CLRBUF;
|
|
|
|
if (ap->a_flags & IO_SYNC)
|
|
|
|
aflags |= B_SYNC;
|
|
|
|
error = VOP_BALLOC(ovp, length - 1, 1, ap->a_cred, aflags, &bp);
|
Various bug-fixes to LFS, to wit:
Kernel:
* Add runtime quantity lfs_ravail, the number of disk-blocks reserved
for writing. Writes to the filesystem first reserve a maximum amount
of blocks before their write is allowed to proceed; after the blocks
are allocated the reserved total is reduced by a corresponding amount.
If the lfs_reserve function cannot immediately reserve the requested
number of blocks, the inode is unlocked, and the thread sleeps until
the cleaner has made enough space available for the blocks to be
reserved. In this way large files can be written to the filesystem
(or, smaller files can be written to a nearly-full but thoroughly
clean filesystem) and the cleaner can still function properly.
* Remove explicit switching on dlfs_minfreeseg from the kernel code; it
is now merely a fs-creation parameter used to compute dlfs_avail and
dlfs_bfree (and used by fsck_lfs(8) to check their accuracy). Its
former role is better assumed by a properly computed dlfs_avail.
* Bounds-check inode numbers submitted through lfs_bmapv and lfs_markv.
This prevents a panic, but, if the cleaner is feeding the filesystem
the wrong data, you are still in a world of hurt.
* Cleanup: remove explicit references of DEV_BSIZE in favor of
btodb()/dbtob().
lfs_cleanerd:
* Make -n mean "send N segments' blocks through a single call to
lfs_markv". Previously it had meant "clean N segments though N calls
to lfs_markv, before looking again to see if more need to be cleaned".
The new behavior gives better packing of direct data on disk with as
little metadata as possible, largely alleviating the problem that the
cleaner can consume more disk through inefficient use of metadata than
it frees by moving dirty data away from clean "holes" to produce
entirely clean segments.
* Make -b mean "read as many segments as necessary to write N segments
of dirty data back to disk", rather than its former meaning of "read
as many segments as necessary to free N segments worth of space". The
new meaning, combined with the new -n behavior described above,
further aids in cleaning storage efficiency as entire segments can be
written at once, using as few blocks as possible for segment summaries
and inode blocks.
* Make the cleaner take note of segments which could not be cleaned due
to error, and not attempt to clean them until they are entirely free
of dirty blocks. This prevents the case in which a cleanerd running
with -n 1 and without -b (formerly the default) would spin trying
repeatedly to clean a corrupt segment, while the remaining space
filled and deadlocked the filesystem.
* Update the lfs_cleanerd manual page to describe all the options,
including the changes mentioned here (in particular, the -b and -n
flags were previously undocumented).
fsck_lfs:
* Check, and optionally fix, lfs_avail (to an exact figure) and
lfs_bfree (within a margin of error) in pass 5.
newfs_lfs:
* Reduce the default dlfs_minfreeseg to 1/20 of the total segments.
* Add a warning if the sgs disklabel field is 16 (the default for FFS'
cpg, but not usually desirable for LFS' sgs: 5--8 is a better range).
* Change the calculation of lfs_avail and lfs_bfree, corresponding to
the kernel changes mentioned above.
mount_lfs:
* Add -N and -b options to pass corresponding -n and -b options to
lfs_cleanerd.
* Default to calling lfs_cleanerd with "-b -n 4".
[All of these changes were largely tested in the 1.5 branch, with the
idea that they (along with previous un-pulled-up work) could be applied
to the branch while it was still in ALPHA2; however my test system has
experienced corruption on another filesystem (/dev/console has gone
missing :^), and, while I believe this unrelated to the LFS changes, I
cannot with good conscience request that the changes be pulled up.]
2000-09-09 08:49:54 +04:00
|
|
|
if (error) {
|
|
|
|
lfs_reserve(fs, ovp, -fsbtodb(fs, 2 * NIADDR + 3));
|
2000-06-28 00:57:11 +04:00
|
|
|
return (error);
|
Various bug-fixes to LFS, to wit:
Kernel:
* Add runtime quantity lfs_ravail, the number of disk-blocks reserved
for writing. Writes to the filesystem first reserve a maximum amount
of blocks before their write is allowed to proceed; after the blocks
are allocated the reserved total is reduced by a corresponding amount.
If the lfs_reserve function cannot immediately reserve the requested
number of blocks, the inode is unlocked, and the thread sleeps until
the cleaner has made enough space available for the blocks to be
reserved. In this way large files can be written to the filesystem
(or, smaller files can be written to a nearly-full but thoroughly
clean filesystem) and the cleaner can still function properly.
* Remove explicit switching on dlfs_minfreeseg from the kernel code; it
is now merely a fs-creation parameter used to compute dlfs_avail and
dlfs_bfree (and used by fsck_lfs(8) to check their accuracy). Its
former role is better assumed by a properly computed dlfs_avail.
* Bounds-check inode numbers submitted through lfs_bmapv and lfs_markv.
This prevents a panic, but, if the cleaner is feeding the filesystem
the wrong data, you are still in a world of hurt.
* Cleanup: remove explicit references of DEV_BSIZE in favor of
btodb()/dbtob().
lfs_cleanerd:
* Make -n mean "send N segments' blocks through a single call to
lfs_markv". Previously it had meant "clean N segments though N calls
to lfs_markv, before looking again to see if more need to be cleaned".
The new behavior gives better packing of direct data on disk with as
little metadata as possible, largely alleviating the problem that the
cleaner can consume more disk through inefficient use of metadata than
it frees by moving dirty data away from clean "holes" to produce
entirely clean segments.
* Make -b mean "read as many segments as necessary to write N segments
of dirty data back to disk", rather than its former meaning of "read
as many segments as necessary to free N segments worth of space". The
new meaning, combined with the new -n behavior described above,
further aids in cleaning storage efficiency as entire segments can be
written at once, using as few blocks as possible for segment summaries
and inode blocks.
* Make the cleaner take note of segments which could not be cleaned due
to error, and not attempt to clean them until they are entirely free
of dirty blocks. This prevents the case in which a cleanerd running
with -n 1 and without -b (formerly the default) would spin trying
repeatedly to clean a corrupt segment, while the remaining space
filled and deadlocked the filesystem.
* Update the lfs_cleanerd manual page to describe all the options,
including the changes mentioned here (in particular, the -b and -n
flags were previously undocumented).
fsck_lfs:
* Check, and optionally fix, lfs_avail (to an exact figure) and
lfs_bfree (within a margin of error) in pass 5.
newfs_lfs:
* Reduce the default dlfs_minfreeseg to 1/20 of the total segments.
* Add a warning if the sgs disklabel field is 16 (the default for FFS'
cpg, but not usually desirable for LFS' sgs: 5--8 is a better range).
* Change the calculation of lfs_avail and lfs_bfree, corresponding to
the kernel changes mentioned above.
mount_lfs:
* Add -N and -b options to pass corresponding -n and -b options to
lfs_cleanerd.
* Default to calling lfs_cleanerd with "-b -n 4".
[All of these changes were largely tested in the 1.5 branch, with the
idea that they (along with previous un-pulled-up work) could be applied
to the branch while it was still in ALPHA2; however my test system has
experienced corruption on another filesystem (/dev/console has gone
missing :^), and, while I believe this unrelated to the LFS changes, I
cannot with good conscience request that the changes be pulled up.]
2000-09-09 08:49:54 +04:00
|
|
|
}
|
2000-11-21 03:00:31 +03:00
|
|
|
obufsize = bp->b_bufsize;
|
|
|
|
odb = btodb(bp->b_bcount);
|
2000-06-28 00:57:11 +04:00
|
|
|
oip->i_ffs_size = length;
|
|
|
|
size = blksize(fs, oip, lbn);
|
|
|
|
if (ovp->v_type != VDIR)
|
|
|
|
memset((char *)bp->b_data + offset, 0,
|
|
|
|
(u_int)(size - offset));
|
|
|
|
allocbuf(bp, size);
|
2000-11-21 03:00:31 +03:00
|
|
|
if (bp->b_flags & B_DELWRI) {
|
|
|
|
if ((bp->b_flags & (B_LOCKED | B_CALL)) == B_LOCKED)
|
|
|
|
locked_queue_bytes -= obufsize - bp->b_bufsize;
|
|
|
|
fs->lfs_avail += odb - btodb(size);
|
|
|
|
}
|
Various bug-fixes to LFS, to wit:
Kernel:
* Add runtime quantity lfs_ravail, the number of disk-blocks reserved
for writing. Writes to the filesystem first reserve a maximum amount
of blocks before their write is allowed to proceed; after the blocks
are allocated the reserved total is reduced by a corresponding amount.
If the lfs_reserve function cannot immediately reserve the requested
number of blocks, the inode is unlocked, and the thread sleeps until
the cleaner has made enough space available for the blocks to be
reserved. In this way large files can be written to the filesystem
(or, smaller files can be written to a nearly-full but thoroughly
clean filesystem) and the cleaner can still function properly.
* Remove explicit switching on dlfs_minfreeseg from the kernel code; it
is now merely a fs-creation parameter used to compute dlfs_avail and
dlfs_bfree (and used by fsck_lfs(8) to check their accuracy). Its
former role is better assumed by a properly computed dlfs_avail.
* Bounds-check inode numbers submitted through lfs_bmapv and lfs_markv.
This prevents a panic, but, if the cleaner is feeding the filesystem
the wrong data, you are still in a world of hurt.
* Cleanup: remove explicit references of DEV_BSIZE in favor of
btodb()/dbtob().
lfs_cleanerd:
* Make -n mean "send N segments' blocks through a single call to
lfs_markv". Previously it had meant "clean N segments though N calls
to lfs_markv, before looking again to see if more need to be cleaned".
The new behavior gives better packing of direct data on disk with as
little metadata as possible, largely alleviating the problem that the
cleaner can consume more disk through inefficient use of metadata than
it frees by moving dirty data away from clean "holes" to produce
entirely clean segments.
* Make -b mean "read as many segments as necessary to write N segments
of dirty data back to disk", rather than its former meaning of "read
as many segments as necessary to free N segments worth of space". The
new meaning, combined with the new -n behavior described above,
further aids in cleaning storage efficiency as entire segments can be
written at once, using as few blocks as possible for segment summaries
and inode blocks.
* Make the cleaner take note of segments which could not be cleaned due
to error, and not attempt to clean them until they are entirely free
of dirty blocks. This prevents the case in which a cleanerd running
with -n 1 and without -b (formerly the default) would spin trying
repeatedly to clean a corrupt segment, while the remaining space
filled and deadlocked the filesystem.
* Update the lfs_cleanerd manual page to describe all the options,
including the changes mentioned here (in particular, the -b and -n
flags were previously undocumented).
fsck_lfs:
* Check, and optionally fix, lfs_avail (to an exact figure) and
lfs_bfree (within a margin of error) in pass 5.
newfs_lfs:
* Reduce the default dlfs_minfreeseg to 1/20 of the total segments.
* Add a warning if the sgs disklabel field is 16 (the default for FFS'
cpg, but not usually desirable for LFS' sgs: 5--8 is a better range).
* Change the calculation of lfs_avail and lfs_bfree, corresponding to
the kernel changes mentioned above.
mount_lfs:
* Add -N and -b options to pass corresponding -n and -b options to
lfs_cleanerd.
* Default to calling lfs_cleanerd with "-b -n 4".
[All of these changes were largely tested in the 1.5 branch, with the
idea that they (along with previous un-pulled-up work) could be applied
to the branch while it was still in ALPHA2; however my test system has
experienced corruption on another filesystem (/dev/console has gone
missing :^), and, while I believe this unrelated to the LFS changes, I
cannot with good conscience request that the changes be pulled up.]
2000-09-09 08:49:54 +04:00
|
|
|
(void) VOP_BWRITE(bp);
|
2000-06-28 00:57:11 +04:00
|
|
|
}
|
|
|
|
uvm_vnp_setsize(ovp, length);
|
1994-06-08 15:41:58 +04:00
|
|
|
/*
|
2000-06-28 00:57:11 +04:00
|
|
|
* Calculate index into inode's block list of
|
|
|
|
* last direct and indirect blocks (if any)
|
|
|
|
* which we want to keep. Lastblock is -1 when
|
|
|
|
* the file is truncated to 0.
|
1994-06-08 15:41:58 +04:00
|
|
|
*/
|
2000-06-28 00:57:11 +04:00
|
|
|
lastblock = lblkno(fs, length + fs->lfs_bsize - 1) - 1;
|
|
|
|
lastiblock[SINGLE] = lastblock - NDADDR;
|
|
|
|
lastiblock[DOUBLE] = lastiblock[SINGLE] - NINDIR(fs);
|
|
|
|
lastiblock[TRIPLE] = lastiblock[DOUBLE] - NINDIR(fs) * NINDIR(fs);
|
|
|
|
nblocks = btodb(fs->lfs_bsize);
|
|
|
|
/*
|
|
|
|
* Record changed file and block pointers before we start
|
|
|
|
* freeing blocks. lastiblock values are also normalized to -1
|
|
|
|
* for calls to lfs_indirtrunc below.
|
|
|
|
*/
|
|
|
|
memcpy((caddr_t)newblks, (caddr_t)&oip->i_ffs_db[0], sizeof newblks);
|
|
|
|
for (level = TRIPLE; level >= SINGLE; level--)
|
|
|
|
if (lastiblock[level] < 0) {
|
|
|
|
newblks[NDADDR+level] = 0;
|
|
|
|
lastiblock[level] = -1;
|
2000-05-06 00:59:20 +04:00
|
|
|
}
|
2000-06-28 00:57:11 +04:00
|
|
|
for (i = NDADDR - 1; i > lastblock; i--)
|
|
|
|
newblks[i] = 0;
|
|
|
|
|
|
|
|
oip->i_ffs_size = osize;
|
|
|
|
error = lfs_vtruncbuf(ovp, lastblock + 1, 0, 0);
|
|
|
|
if (error && !allerror)
|
|
|
|
allerror = error;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Indirect blocks first.
|
|
|
|
*/
|
|
|
|
indir_lbn[SINGLE] = -NDADDR;
|
|
|
|
indir_lbn[DOUBLE] = indir_lbn[SINGLE] - NINDIR(fs) - 1;
|
|
|
|
indir_lbn[TRIPLE] = indir_lbn[DOUBLE] - NINDIR(fs) * NINDIR(fs) - 1;
|
|
|
|
for (level = TRIPLE; level >= SINGLE; level--) {
|
|
|
|
bn = oip->i_ffs_ib[level];
|
|
|
|
if (bn != 0) {
|
|
|
|
error = lfs_indirtrunc(oip, indir_lbn[level],
|
2000-07-03 05:45:46 +04:00
|
|
|
bn, lastiblock[level],
|
|
|
|
level, &count, &rcount,
|
2000-11-27 06:33:57 +03:00
|
|
|
&lastseg, &bc, ap->a_p);
|
2000-06-28 00:57:11 +04:00
|
|
|
if (error)
|
|
|
|
allerror = error;
|
2000-07-03 05:45:46 +04:00
|
|
|
real_released += rcount;
|
2000-06-28 00:57:11 +04:00
|
|
|
blocksreleased += count;
|
|
|
|
if (lastiblock[level] < 0) {
|
2000-07-03 05:45:46 +04:00
|
|
|
if (oip->i_ffs_ib[level] > 0)
|
|
|
|
real_released += nblocks;
|
|
|
|
blocksreleased += nblocks;
|
2000-06-28 00:57:11 +04:00
|
|
|
oip->i_ffs_ib[level] = 0;
|
|
|
|
lfs_blkfree(fs, bn, fs->lfs_bsize, &lastseg, &bc);
|
|
|
|
}
|
1999-03-10 03:20:00 +03:00
|
|
|
}
|
2000-06-28 00:57:11 +04:00
|
|
|
if (lastiblock[level] >= 0)
|
|
|
|
goto done;
|
1994-06-08 15:41:58 +04:00
|
|
|
}
|
2000-06-28 00:57:11 +04:00
|
|
|
|
1994-06-08 15:41:58 +04:00
|
|
|
/*
|
2000-06-28 00:57:11 +04:00
|
|
|
* All whole direct blocks or frags.
|
1994-06-08 15:41:58 +04:00
|
|
|
*/
|
2000-06-28 00:57:11 +04:00
|
|
|
for (i = NDADDR - 1; i > lastblock; i--) {
|
|
|
|
long bsize;
|
1994-06-08 15:41:58 +04:00
|
|
|
|
2000-06-28 00:57:11 +04:00
|
|
|
bn = oip->i_ffs_db[i];
|
|
|
|
if (bn == 0)
|
|
|
|
continue;
|
|
|
|
bsize = blksize(fs, oip, i);
|
2000-07-03 05:45:46 +04:00
|
|
|
if (oip->i_ffs_db[i] > 0)
|
|
|
|
real_released += btodb(bsize);
|
2000-06-28 00:57:11 +04:00
|
|
|
blocksreleased += btodb(bsize);
|
2000-07-03 05:45:46 +04:00
|
|
|
oip->i_ffs_db[i] = 0;
|
|
|
|
lfs_blkfree(fs, bn, bsize, &lastseg, &bc);
|
2000-06-28 00:57:11 +04:00
|
|
|
}
|
|
|
|
if (lastblock < 0)
|
|
|
|
goto done;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Finally, look for a change in size of the
|
|
|
|
* last direct block; release any frags.
|
|
|
|
*/
|
|
|
|
bn = oip->i_ffs_db[lastblock];
|
|
|
|
if (bn != 0) {
|
|
|
|
long oldspace, newspace;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Calculate amount of space we're giving
|
|
|
|
* back as old block size minus new block size.
|
|
|
|
*/
|
|
|
|
oldspace = blksize(fs, oip, lastblock);
|
|
|
|
oip->i_ffs_size = length;
|
|
|
|
newspace = blksize(fs, oip, lastblock);
|
|
|
|
if (newspace == 0)
|
|
|
|
panic("itrunc: newspace");
|
|
|
|
if (oldspace - newspace > 0) {
|
|
|
|
lfs_blkfree(fs, bn, oldspace - newspace, &lastseg, &bc);
|
2000-10-15 03:22:14 +04:00
|
|
|
if (bn > 0)
|
|
|
|
real_released += btodb(oldspace - newspace);
|
2000-06-28 00:57:11 +04:00
|
|
|
blocksreleased += btodb(oldspace - newspace);
|
1994-06-08 15:41:58 +04:00
|
|
|
}
|
|
|
|
}
|
2000-06-28 00:57:11 +04:00
|
|
|
|
|
|
|
done:
|
|
|
|
/* Finish segment accounting corrections */
|
|
|
|
lfs_update_seguse(fs, lastseg, bc);
|
|
|
|
#ifdef DIAGNOSTIC
|
|
|
|
for (level = SINGLE; level <= TRIPLE; level++)
|
|
|
|
if (newblks[NDADDR + level] != oip->i_ffs_ib[level])
|
|
|
|
panic("lfs itrunc1");
|
|
|
|
for (i = 0; i < NDADDR; i++)
|
|
|
|
if (newblks[i] != oip->i_ffs_db[i])
|
|
|
|
panic("lfs itrunc2");
|
|
|
|
if (length == 0 &&
|
|
|
|
(!LIST_EMPTY(&ovp->v_cleanblkhd) || !LIST_EMPTY(&ovp->v_dirtyblkhd)))
|
|
|
|
panic("lfs itrunc3");
|
|
|
|
#endif /* DIAGNOSTIC */
|
|
|
|
/*
|
|
|
|
* Put back the real size.
|
|
|
|
*/
|
|
|
|
oip->i_ffs_size = length;
|
2000-07-03 05:45:46 +04:00
|
|
|
oip->i_lfs_effnblks -= blocksreleased;
|
|
|
|
oip->i_ffs_blocks -= real_released;
|
2000-06-28 00:57:11 +04:00
|
|
|
fs->lfs_bfree += blocksreleased;
|
|
|
|
#ifdef DIAGNOSTIC
|
2000-10-15 03:22:14 +04:00
|
|
|
if (oip->i_ffs_size == 0 && oip->i_ffs_blocks != 0) {
|
|
|
|
printf("lfs_truncate: truncate to 0 but %d blocks on inode\n",
|
2000-06-28 00:57:11 +04:00
|
|
|
oip->i_ffs_blocks);
|
|
|
|
panic("lfs_truncate: persistent blocks\n");
|
1994-06-08 15:41:58 +04:00
|
|
|
}
|
2000-06-28 00:57:11 +04:00
|
|
|
#endif
|
|
|
|
oip->i_flag |= IN_CHANGE;
|
|
|
|
#ifdef QUOTA
|
|
|
|
(void) chkdq(oip, -blocksreleased, NOCRED, 0);
|
1994-06-08 15:41:58 +04:00
|
|
|
#endif
|
Various bug-fixes to LFS, to wit:
Kernel:
* Add runtime quantity lfs_ravail, the number of disk-blocks reserved
for writing. Writes to the filesystem first reserve a maximum amount
of blocks before their write is allowed to proceed; after the blocks
are allocated the reserved total is reduced by a corresponding amount.
If the lfs_reserve function cannot immediately reserve the requested
number of blocks, the inode is unlocked, and the thread sleeps until
the cleaner has made enough space available for the blocks to be
reserved. In this way large files can be written to the filesystem
(or, smaller files can be written to a nearly-full but thoroughly
clean filesystem) and the cleaner can still function properly.
* Remove explicit switching on dlfs_minfreeseg from the kernel code; it
is now merely a fs-creation parameter used to compute dlfs_avail and
dlfs_bfree (and used by fsck_lfs(8) to check their accuracy). Its
former role is better assumed by a properly computed dlfs_avail.
* Bounds-check inode numbers submitted through lfs_bmapv and lfs_markv.
This prevents a panic, but, if the cleaner is feeding the filesystem
the wrong data, you are still in a world of hurt.
* Cleanup: remove explicit references of DEV_BSIZE in favor of
btodb()/dbtob().
lfs_cleanerd:
* Make -n mean "send N segments' blocks through a single call to
lfs_markv". Previously it had meant "clean N segments though N calls
to lfs_markv, before looking again to see if more need to be cleaned".
The new behavior gives better packing of direct data on disk with as
little metadata as possible, largely alleviating the problem that the
cleaner can consume more disk through inefficient use of metadata than
it frees by moving dirty data away from clean "holes" to produce
entirely clean segments.
* Make -b mean "read as many segments as necessary to write N segments
of dirty data back to disk", rather than its former meaning of "read
as many segments as necessary to free N segments worth of space". The
new meaning, combined with the new -n behavior described above,
further aids in cleaning storage efficiency as entire segments can be
written at once, using as few blocks as possible for segment summaries
and inode blocks.
* Make the cleaner take note of segments which could not be cleaned due
to error, and not attempt to clean them until they are entirely free
of dirty blocks. This prevents the case in which a cleanerd running
with -n 1 and without -b (formerly the default) would spin trying
repeatedly to clean a corrupt segment, while the remaining space
filled and deadlocked the filesystem.
* Update the lfs_cleanerd manual page to describe all the options,
including the changes mentioned here (in particular, the -b and -n
flags were previously undocumented).
fsck_lfs:
* Check, and optionally fix, lfs_avail (to an exact figure) and
lfs_bfree (within a margin of error) in pass 5.
newfs_lfs:
* Reduce the default dlfs_minfreeseg to 1/20 of the total segments.
* Add a warning if the sgs disklabel field is 16 (the default for FFS'
cpg, but not usually desirable for LFS' sgs: 5--8 is a better range).
* Change the calculation of lfs_avail and lfs_bfree, corresponding to
the kernel changes mentioned above.
mount_lfs:
* Add -N and -b options to pass corresponding -n and -b options to
lfs_cleanerd.
* Default to calling lfs_cleanerd with "-b -n 4".
[All of these changes were largely tested in the 1.5 branch, with the
idea that they (along with previous un-pulled-up work) could be applied
to the branch while it was still in ALPHA2; however my test system has
experienced corruption on another filesystem (/dev/console has gone
missing :^), and, while I believe this unrelated to the LFS changes, I
cannot with good conscience request that the changes be pulled up.]
2000-09-09 08:49:54 +04:00
|
|
|
lfs_reserve(fs, ovp, -fsbtodb(fs, 2 * NIADDR + 3));
|
2000-06-28 00:57:11 +04:00
|
|
|
return (allerror);
|
|
|
|
}
|
1999-11-24 02:52:40 +03:00
|
|
|
|
2000-06-28 00:57:11 +04:00
|
|
|
/* Update segment usage information when removing a block. */
|
|
|
|
static int
|
|
|
|
lfs_blkfree(struct lfs *fs, daddr_t daddr, size_t bsize, long *lastseg,
|
|
|
|
size_t *num)
|
|
|
|
{
|
|
|
|
long seg;
|
|
|
|
int error = 0;
|
1999-04-12 04:30:08 +04:00
|
|
|
|
2000-06-28 00:57:11 +04:00
|
|
|
bsize = fragroundup(fs, bsize);
|
|
|
|
if (daddr > 0) {
|
|
|
|
if (*lastseg != (seg = datosn(fs, daddr))) {
|
|
|
|
error = lfs_update_seguse(fs, *lastseg, *num);
|
|
|
|
*num = bsize;
|
|
|
|
*lastseg = seg;
|
|
|
|
} else
|
|
|
|
*num += bsize;
|
1999-03-10 03:20:00 +03:00
|
|
|
}
|
2000-06-28 00:57:11 +04:00
|
|
|
return error;
|
|
|
|
}
|
2000-05-06 00:59:20 +04:00
|
|
|
|
2000-06-28 00:57:11 +04:00
|
|
|
/* Finish the accounting updates for a segment. */
|
|
|
|
static int
|
|
|
|
lfs_update_seguse(struct lfs *fs, long lastseg, size_t num)
|
|
|
|
{
|
|
|
|
SEGUSE *sup;
|
|
|
|
struct buf *bp;
|
|
|
|
|
|
|
|
if (lastseg < 0 || num == 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
|
|
LFS_SEGENTRY(sup, fs, lastseg, bp);
|
|
|
|
if (num > sup->su_nbytes) {
|
|
|
|
printf("lfs_truncate: segment %ld short by %ld\n",
|
|
|
|
lastseg, (long)num - sup->su_nbytes);
|
|
|
|
panic("lfs_truncate: negative bytes");
|
|
|
|
sup->su_nbytes = num;
|
1998-03-01 05:20:01 +03:00
|
|
|
}
|
2000-06-28 00:57:11 +04:00
|
|
|
sup->su_nbytes -= num;
|
Various bug-fixes to LFS, to wit:
Kernel:
* Add runtime quantity lfs_ravail, the number of disk-blocks reserved
for writing. Writes to the filesystem first reserve a maximum amount
of blocks before their write is allowed to proceed; after the blocks
are allocated the reserved total is reduced by a corresponding amount.
If the lfs_reserve function cannot immediately reserve the requested
number of blocks, the inode is unlocked, and the thread sleeps until
the cleaner has made enough space available for the blocks to be
reserved. In this way large files can be written to the filesystem
(or, smaller files can be written to a nearly-full but thoroughly
clean filesystem) and the cleaner can still function properly.
* Remove explicit switching on dlfs_minfreeseg from the kernel code; it
is now merely a fs-creation parameter used to compute dlfs_avail and
dlfs_bfree (and used by fsck_lfs(8) to check their accuracy). Its
former role is better assumed by a properly computed dlfs_avail.
* Bounds-check inode numbers submitted through lfs_bmapv and lfs_markv.
This prevents a panic, but, if the cleaner is feeding the filesystem
the wrong data, you are still in a world of hurt.
* Cleanup: remove explicit references of DEV_BSIZE in favor of
btodb()/dbtob().
lfs_cleanerd:
* Make -n mean "send N segments' blocks through a single call to
lfs_markv". Previously it had meant "clean N segments though N calls
to lfs_markv, before looking again to see if more need to be cleaned".
The new behavior gives better packing of direct data on disk with as
little metadata as possible, largely alleviating the problem that the
cleaner can consume more disk through inefficient use of metadata than
it frees by moving dirty data away from clean "holes" to produce
entirely clean segments.
* Make -b mean "read as many segments as necessary to write N segments
of dirty data back to disk", rather than its former meaning of "read
as many segments as necessary to free N segments worth of space". The
new meaning, combined with the new -n behavior described above,
further aids in cleaning storage efficiency as entire segments can be
written at once, using as few blocks as possible for segment summaries
and inode blocks.
* Make the cleaner take note of segments which could not be cleaned due
to error, and not attempt to clean them until they are entirely free
of dirty blocks. This prevents the case in which a cleanerd running
with -n 1 and without -b (formerly the default) would spin trying
repeatedly to clean a corrupt segment, while the remaining space
filled and deadlocked the filesystem.
* Update the lfs_cleanerd manual page to describe all the options,
including the changes mentioned here (in particular, the -b and -n
flags were previously undocumented).
fsck_lfs:
* Check, and optionally fix, lfs_avail (to an exact figure) and
lfs_bfree (within a margin of error) in pass 5.
newfs_lfs:
* Reduce the default dlfs_minfreeseg to 1/20 of the total segments.
* Add a warning if the sgs disklabel field is 16 (the default for FFS'
cpg, but not usually desirable for LFS' sgs: 5--8 is a better range).
* Change the calculation of lfs_avail and lfs_bfree, corresponding to
the kernel changes mentioned above.
mount_lfs:
* Add -N and -b options to pass corresponding -n and -b options to
lfs_cleanerd.
* Default to calling lfs_cleanerd with "-b -n 4".
[All of these changes were largely tested in the 1.5 branch, with the
idea that they (along with previous un-pulled-up work) could be applied
to the branch while it was still in ALPHA2; however my test system has
experienced corruption on another filesystem (/dev/console has gone
missing :^), and, while I believe this unrelated to the LFS changes, I
cannot with good conscience request that the changes be pulled up.]
2000-09-09 08:49:54 +04:00
|
|
|
return (VOP_BWRITE(bp)); /* Ifile */
|
2000-06-28 00:57:11 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Release blocks associated with the inode ip and stored in the indirect
|
|
|
|
* block bn. Blocks are free'd in LIFO order up to (but not including)
|
|
|
|
* lastbn. If level is greater than SINGLE, the block is an indirect block
|
|
|
|
* and recursive calls to indirtrunc must be used to cleanse other indirect
|
|
|
|
* blocks.
|
|
|
|
*
|
|
|
|
* NB: triple indirect blocks are untested.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
lfs_indirtrunc(struct inode *ip, ufs_daddr_t lbn, daddr_t dbn,
|
|
|
|
ufs_daddr_t lastbn, int level, long *countp,
|
2000-11-27 06:33:57 +03:00
|
|
|
long *rcountp, long *lastsegp, size_t *bcp, struct proc *p)
|
2000-06-28 00:57:11 +04:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
struct buf *bp;
|
|
|
|
struct lfs *fs = ip->i_lfs;
|
|
|
|
ufs_daddr_t *bap;
|
|
|
|
struct vnode *vp;
|
|
|
|
ufs_daddr_t *copy = NULL, nb, nlbn, last;
|
2000-07-03 05:45:46 +04:00
|
|
|
long blkcount, rblkcount, factor;
|
|
|
|
int nblocks, blocksreleased = 0, real_released = 0;
|
2000-06-28 00:57:11 +04:00
|
|
|
int error = 0, allerror = 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Calculate index in current block of last
|
|
|
|
* block to be kept. -1 indicates the entire
|
|
|
|
* block so we need not calculate the index.
|
|
|
|
*/
|
|
|
|
factor = 1;
|
|
|
|
for (i = SINGLE; i < level; i++)
|
|
|
|
factor *= NINDIR(fs);
|
|
|
|
last = lastbn;
|
|
|
|
if (lastbn > 0)
|
|
|
|
last /= factor;
|
|
|
|
nblocks = btodb(fs->lfs_bsize);
|
|
|
|
/*
|
|
|
|
* Get buffer of block pointers, zero those entries corresponding
|
|
|
|
* to blocks to be free'd, and update on disk copy first. Since
|
|
|
|
* double(triple) indirect before single(double) indirect, calls
|
|
|
|
* to bmap on these blocks will fail. However, we already have
|
|
|
|
* the on disk address, so we have to set the b_blkno field
|
|
|
|
* explicitly instead of letting bread do everything for us.
|
|
|
|
*/
|
|
|
|
vp = ITOV(ip);
|
|
|
|
bp = getblk(vp, lbn, (int)fs->lfs_bsize, 0, 0);
|
|
|
|
if (bp->b_flags & (B_DONE | B_DELWRI)) {
|
|
|
|
/* Braces must be here in case trace evaluates to nothing. */
|
|
|
|
trace(TR_BREADHIT, pack(vp, fs->lfs_bsize), lbn);
|
|
|
|
} else {
|
|
|
|
trace(TR_BREADMISS, pack(vp, fs->lfs_bsize), lbn);
|
2000-11-27 06:33:57 +03:00
|
|
|
p->p_stats->p_ru.ru_inblock++; /* pay for read */
|
2000-06-28 00:57:11 +04:00
|
|
|
bp->b_flags |= B_READ;
|
|
|
|
if (bp->b_bcount > bp->b_bufsize)
|
|
|
|
panic("lfs_indirtrunc: bad buffer size");
|
|
|
|
bp->b_blkno = dbn;
|
|
|
|
VOP_STRATEGY(bp);
|
|
|
|
error = biowait(bp);
|
|
|
|
}
|
|
|
|
if (error) {
|
|
|
|
brelse(bp);
|
2000-07-03 05:45:46 +04:00
|
|
|
*countp = *rcountp = 0;
|
2000-06-28 00:57:11 +04:00
|
|
|
return (error);
|
2000-05-06 00:59:20 +04:00
|
|
|
}
|
|
|
|
|
2000-06-28 00:57:11 +04:00
|
|
|
bap = (ufs_daddr_t *)bp->b_data;
|
|
|
|
if (lastbn >= 0) {
|
|
|
|
MALLOC(copy, ufs_daddr_t *, fs->lfs_bsize, M_TEMP, M_WAITOK);
|
|
|
|
memcpy((caddr_t)copy, (caddr_t)bap, (u_int)fs->lfs_bsize);
|
|
|
|
memset((caddr_t)&bap[last + 1], 0,
|
|
|
|
(u_int)(NINDIR(fs) - (last + 1)) * sizeof (ufs_daddr_t));
|
|
|
|
error = VOP_BWRITE(bp);
|
2000-05-06 00:59:20 +04:00
|
|
|
if (error)
|
2000-06-28 00:57:11 +04:00
|
|
|
allerror = error;
|
|
|
|
bap = copy;
|
2000-05-06 00:59:20 +04:00
|
|
|
}
|
|
|
|
|
2000-06-28 00:57:11 +04:00
|
|
|
/*
|
|
|
|
* Recursively free totally unused blocks.
|
|
|
|
*/
|
|
|
|
for (i = NINDIR(fs) - 1, nlbn = lbn + 1 - i * factor; i > last;
|
|
|
|
i--, nlbn += factor) {
|
|
|
|
nb = bap[i];
|
|
|
|
if (nb == 0)
|
|
|
|
continue;
|
|
|
|
if (level > SINGLE) {
|
|
|
|
error = lfs_indirtrunc(ip, nlbn, nb,
|
|
|
|
(ufs_daddr_t)-1, level - 1,
|
2000-07-03 05:45:46 +04:00
|
|
|
&blkcount, &rblkcount,
|
2000-11-27 06:33:57 +03:00
|
|
|
lastsegp, bcp, p);
|
2000-06-28 00:57:11 +04:00
|
|
|
if (error)
|
|
|
|
allerror = error;
|
|
|
|
blocksreleased += blkcount;
|
2000-07-03 05:45:46 +04:00
|
|
|
real_released += rblkcount;
|
2000-06-28 00:57:11 +04:00
|
|
|
}
|
|
|
|
lfs_blkfree(fs, nb, fs->lfs_bsize, lastsegp, bcp);
|
2000-07-03 05:45:46 +04:00
|
|
|
if (bap[i] > 0)
|
|
|
|
real_released += nblocks;
|
2000-06-28 00:57:11 +04:00
|
|
|
blocksreleased += nblocks;
|
|
|
|
}
|
2000-05-06 00:59:20 +04:00
|
|
|
|
2000-06-28 00:57:11 +04:00
|
|
|
/*
|
|
|
|
* Recursively free last partial block.
|
|
|
|
*/
|
|
|
|
if (level > SINGLE && lastbn >= 0) {
|
|
|
|
last = lastbn % factor;
|
|
|
|
nb = bap[i];
|
|
|
|
if (nb != 0) {
|
|
|
|
error = lfs_indirtrunc(ip, nlbn, nb,
|
|
|
|
last, level - 1, &blkcount,
|
2000-11-27 06:33:57 +03:00
|
|
|
&rblkcount, lastsegp, bcp, p);
|
2000-06-28 00:57:11 +04:00
|
|
|
if (error)
|
|
|
|
allerror = error;
|
2000-07-03 05:45:46 +04:00
|
|
|
real_released += rblkcount;
|
2000-06-28 00:57:11 +04:00
|
|
|
blocksreleased += blkcount;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (copy != NULL) {
|
|
|
|
FREE(copy, M_TEMP);
|
|
|
|
} else {
|
2000-11-21 03:00:31 +03:00
|
|
|
if (bp->b_flags & B_DELWRI) {
|
|
|
|
LFS_UNLOCK_BUF(bp);
|
|
|
|
fs->lfs_avail += btodb(bp->b_bcount);
|
|
|
|
wakeup(&fs->lfs_avail);
|
|
|
|
}
|
2000-06-28 00:57:11 +04:00
|
|
|
bp->b_flags |= B_INVAL;
|
|
|
|
brelse(bp);
|
|
|
|
}
|
1999-03-10 03:20:00 +03:00
|
|
|
|
2000-06-28 00:57:11 +04:00
|
|
|
*countp = blocksreleased;
|
2000-07-03 05:45:46 +04:00
|
|
|
*rcountp = real_released;
|
2000-06-28 00:57:11 +04:00
|
|
|
return (allerror);
|
1994-06-08 15:41:58 +04:00
|
|
|
}
|
1999-03-30 01:51:38 +04:00
|
|
|
|
|
|
|
/*
|
2000-06-28 00:57:11 +04:00
|
|
|
* Destroy any in core blocks past the truncation length.
|
|
|
|
* Inlined from vtruncbuf, so that lfs_avail could be updated.
|
1999-03-30 01:51:38 +04:00
|
|
|
*/
|
2000-06-28 00:57:11 +04:00
|
|
|
static int
|
|
|
|
lfs_vtruncbuf(vp, lbn, slpflag, slptimeo)
|
2000-03-30 16:41:09 +04:00
|
|
|
struct vnode *vp;
|
2000-06-28 00:57:11 +04:00
|
|
|
daddr_t lbn;
|
|
|
|
int slpflag, slptimeo;
|
1999-03-30 01:51:38 +04:00
|
|
|
{
|
2000-06-28 00:57:11 +04:00
|
|
|
struct buf *bp, *nbp;
|
|
|
|
int s, error;
|
|
|
|
struct lfs *fs;
|
|
|
|
|
|
|
|
fs = VTOI(vp)->i_lfs;
|
|
|
|
s = splbio();
|
|
|
|
|
|
|
|
restart:
|
|
|
|
for (bp = LIST_FIRST(&vp->v_cleanblkhd); bp; bp = nbp) {
|
|
|
|
nbp = LIST_NEXT(bp, b_vnbufs);
|
|
|
|
if (bp->b_lblkno < lbn)
|
|
|
|
continue;
|
|
|
|
if (bp->b_flags & B_BUSY) {
|
|
|
|
bp->b_flags |= B_WANTED;
|
|
|
|
error = tsleep((caddr_t)bp, slpflag | (PRIBIO + 1),
|
|
|
|
"lfs_vtruncbuf", slptimeo);
|
|
|
|
if (error) {
|
2000-05-06 00:59:20 +04:00
|
|
|
splx(s);
|
2000-06-28 00:57:11 +04:00
|
|
|
return (error);
|
1999-03-30 01:51:38 +04:00
|
|
|
}
|
2000-06-28 00:57:11 +04:00
|
|
|
goto restart;
|
|
|
|
}
|
|
|
|
bp->b_flags |= B_BUSY | B_INVAL | B_VFLUSH;
|
2000-09-09 07:47:05 +04:00
|
|
|
if (bp->b_flags & B_DELWRI) {
|
|
|
|
bp->b_flags &= ~B_DELWRI;
|
2000-07-05 02:30:37 +04:00
|
|
|
fs->lfs_avail += btodb(bp->b_bcount);
|
2000-11-21 03:00:31 +03:00
|
|
|
wakeup(&fs->lfs_avail);
|
2000-09-09 07:47:05 +04:00
|
|
|
}
|
2000-11-17 22:14:41 +03:00
|
|
|
LFS_UNLOCK_BUF(bp);
|
2000-06-28 00:57:11 +04:00
|
|
|
brelse(bp);
|
|
|
|
}
|
1999-04-02 03:28:09 +04:00
|
|
|
|
2000-06-28 00:57:11 +04:00
|
|
|
for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
|
|
|
|
nbp = LIST_NEXT(bp, b_vnbufs);
|
|
|
|
if (bp->b_lblkno < lbn)
|
|
|
|
continue;
|
|
|
|
if (bp->b_flags & B_BUSY) {
|
|
|
|
bp->b_flags |= B_WANTED;
|
|
|
|
error = tsleep((caddr_t)bp, slpflag | (PRIBIO + 1),
|
|
|
|
"lfs_vtruncbuf", slptimeo);
|
|
|
|
if (error) {
|
|
|
|
splx(s);
|
|
|
|
return (error);
|
1999-04-02 03:28:09 +04:00
|
|
|
}
|
2000-06-28 00:57:11 +04:00
|
|
|
goto restart;
|
|
|
|
}
|
|
|
|
bp->b_flags |= B_BUSY | B_INVAL | B_VFLUSH;
|
2000-09-09 07:47:05 +04:00
|
|
|
if (bp->b_flags & B_DELWRI) {
|
|
|
|
bp->b_flags &= ~B_DELWRI;
|
2000-07-05 02:30:37 +04:00
|
|
|
fs->lfs_avail += btodb(bp->b_bcount);
|
2000-11-21 03:00:31 +03:00
|
|
|
wakeup(&fs->lfs_avail);
|
2000-09-09 07:47:05 +04:00
|
|
|
}
|
2000-11-17 22:14:41 +03:00
|
|
|
LFS_UNLOCK_BUF(bp);
|
2000-06-28 00:57:11 +04:00
|
|
|
brelse(bp);
|
1999-03-30 01:51:38 +04:00
|
|
|
}
|
2000-06-28 00:57:11 +04:00
|
|
|
|
|
|
|
splx(s);
|
|
|
|
|
1999-03-30 01:51:38 +04:00
|
|
|
return (0);
|
|
|
|
}
|
2000-06-28 00:57:11 +04:00
|
|
|
|