2003-08-07 20:26:28 +04:00
|
|
|
/* $NetBSD: ffs_inode.c,v 1.60 2003/08/07 16:34:30 agc Exp $ */
|
1994-06-29 10:39:25 +04:00
|
|
|
|
1994-06-08 15:41:58 +04:00
|
|
|
/*
|
|
|
|
* Copyright (c) 1982, 1986, 1989, 1993
|
|
|
|
* The Regents of the University of California. All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
2003-08-07 20:26:28 +04:00
|
|
|
* 3. Neither the name of the University nor the names of its contributors
|
1994-06-08 15:41:58 +04:00
|
|
|
* may be used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
1998-03-01 05:20:01 +03:00
|
|
|
* @(#)ffs_inode.c 8.13 (Berkeley) 4/21/95
|
1994-06-08 15:41:58 +04:00
|
|
|
*/
|
|
|
|
|
2001-10-30 04:11:53 +03:00
|
|
|
#include <sys/cdefs.h>
|
2003-08-07 20:26:28 +04:00
|
|
|
__KERNEL_RCSID(0, "$NetBSD: ffs_inode.c,v 1.60 2003/08/07 16:34:30 agc Exp $");
|
2001-10-30 04:11:53 +03:00
|
|
|
|
2001-05-30 15:57:16 +04:00
|
|
|
#if defined(_KERNEL_OPT)
|
1998-11-12 22:51:10 +03:00
|
|
|
#include "opt_ffs.h"
|
1998-06-08 08:27:50 +04:00
|
|
|
#include "opt_quota.h"
|
1998-06-09 11:46:31 +04:00
|
|
|
#endif
|
1998-02-10 17:08:44 +03:00
|
|
|
|
1994-06-08 15:41:58 +04:00
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/systm.h>
|
|
|
|
#include <sys/mount.h>
|
|
|
|
#include <sys/proc.h>
|
|
|
|
#include <sys/file.h>
|
|
|
|
#include <sys/buf.h>
|
|
|
|
#include <sys/vnode.h>
|
|
|
|
#include <sys/kernel.h>
|
|
|
|
#include <sys/malloc.h>
|
|
|
|
#include <sys/trace.h>
|
|
|
|
#include <sys/resourcevar.h>
|
|
|
|
|
|
|
|
#include <ufs/ufs/quota.h>
|
|
|
|
#include <ufs/ufs/inode.h>
|
|
|
|
#include <ufs/ufs/ufsmount.h>
|
|
|
|
#include <ufs/ufs/ufs_extern.h>
|
1998-03-18 18:57:26 +03:00
|
|
|
#include <ufs/ufs/ufs_bswap.h>
|
1994-06-08 15:41:58 +04:00
|
|
|
|
|
|
|
#include <ufs/ffs/fs.h>
|
|
|
|
#include <ufs/ffs/ffs_extern.h>
|
|
|
|
|
2003-01-25 00:55:02 +03:00
|
|
|
static int ffs_indirtrunc __P((struct inode *, daddr_t, daddr_t,
|
2003-04-02 14:39:19 +04:00
|
|
|
daddr_t, int, int64_t *));
|
1994-06-08 15:41:58 +04:00
|
|
|
|
|
|
|
/*
|
1997-01-27 13:30:14 +03:00
|
|
|
* Update the access, modified, and inode change times as specified
|
|
|
|
* by the IN_ACCESS, IN_UPDATE, and IN_CHANGE flags respectively.
|
|
|
|
* The IN_MODIFIED flag is used to specify that the inode needs to be
|
|
|
|
* updated but that the times have already been set. The access
|
|
|
|
* and modified times are taken from the second and third parameters;
|
|
|
|
* the inode change time is always taken from the current time. If
|
2000-05-14 03:43:06 +04:00
|
|
|
* UPDATE_WAIT flag is set, or UPDATE_DIROP is set and we are not doing
|
|
|
|
* softupdates, then wait for the disk write of the inode to complete.
|
1994-06-08 15:41:58 +04:00
|
|
|
*/
|
1997-01-27 13:30:14 +03:00
|
|
|
|
1994-06-08 15:41:58 +04:00
|
|
|
int
|
1996-02-10 01:22:18 +03:00
|
|
|
ffs_update(v)
|
|
|
|
void *v;
|
|
|
|
{
|
1994-06-08 15:41:58 +04:00
|
|
|
struct vop_update_args /* {
|
|
|
|
struct vnode *a_vp;
|
1996-05-11 22:26:27 +04:00
|
|
|
struct timespec *a_access;
|
|
|
|
struct timespec *a_modify;
|
2000-05-14 03:43:06 +04:00
|
|
|
int a_flags;
|
1996-02-10 01:22:18 +03:00
|
|
|
} */ *ap = v;
|
2000-03-30 16:41:09 +04:00
|
|
|
struct fs *fs;
|
1994-06-08 15:41:58 +04:00
|
|
|
struct buf *bp;
|
|
|
|
struct inode *ip;
|
|
|
|
int error;
|
1996-09-02 03:47:48 +04:00
|
|
|
struct timespec ts;
|
1998-10-23 04:31:28 +04:00
|
|
|
caddr_t cp;
|
2000-05-29 22:04:30 +04:00
|
|
|
int waitfor, flags;
|
1994-06-08 15:41:58 +04:00
|
|
|
|
1996-09-02 03:47:48 +04:00
|
|
|
if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)
|
1994-06-08 15:41:58 +04:00
|
|
|
return (0);
|
1996-09-02 03:47:48 +04:00
|
|
|
ip = VTOI(ap->a_vp);
|
|
|
|
TIMEVAL_TO_TIMESPEC(&time, &ts);
|
1999-03-05 23:47:06 +03:00
|
|
|
FFS_ITIMES(ip,
|
|
|
|
ap->a_access ? ap->a_access : &ts,
|
|
|
|
ap->a_modify ? ap->a_modify : &ts, &ts);
|
2000-05-29 22:04:30 +04:00
|
|
|
flags = ip->i_flag & (IN_MODIFIED | IN_ACCESSED);
|
|
|
|
if (flags == 0)
|
1994-06-08 15:41:58 +04:00
|
|
|
return (0);
|
|
|
|
fs = ip->i_fs;
|
2000-05-14 03:43:06 +04:00
|
|
|
|
2000-05-30 21:23:52 +04:00
|
|
|
if ((flags & IN_MODIFIED) != 0 &&
|
|
|
|
(ap->a_vp->v_mount->mnt_flag & MNT_ASYNC) == 0) {
|
|
|
|
waitfor = ap->a_flags & UPDATE_WAIT;
|
|
|
|
if ((ap->a_flags & UPDATE_DIROP) && !DOINGSOFTDEP(ap->a_vp))
|
|
|
|
waitfor |= UPDATE_WAIT;
|
|
|
|
} else
|
|
|
|
waitfor = 0;
|
2000-05-14 03:43:06 +04:00
|
|
|
|
1994-06-08 15:41:58 +04:00
|
|
|
/*
|
|
|
|
* Ensure that uid and gid are correct. This is a temporary
|
|
|
|
* fix until fsck has been changed to do the update.
|
|
|
|
*/
|
2003-04-02 14:39:19 +04:00
|
|
|
if (fs->fs_magic == FS_UFS1_MAGIC && /* XXX */
|
|
|
|
fs->fs_old_inodefmt < FS_44INODEFMT) { /* XXX */
|
|
|
|
ip->i_ffs1_ouid = ip->i_uid; /* XXX */
|
|
|
|
ip->i_ffs1_ogid = ip->i_gid; /* XXX */
|
1998-03-01 05:20:01 +03:00
|
|
|
} /* XXX */
|
1996-02-10 01:22:18 +03:00
|
|
|
error = bread(ip->i_devvp,
|
|
|
|
fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
|
|
|
|
(int)fs->fs_bsize, NOCRED, &bp);
|
|
|
|
if (error) {
|
1994-06-08 15:41:58 +04:00
|
|
|
brelse(bp);
|
|
|
|
return (error);
|
|
|
|
}
|
2000-05-30 21:23:52 +04:00
|
|
|
ip->i_flag &= ~(IN_MODIFIED | IN_ACCESSED);
|
1999-11-15 21:49:07 +03:00
|
|
|
if (DOINGSOFTDEP(ap->a_vp))
|
2000-05-14 03:43:06 +04:00
|
|
|
softdep_update_inodeblock(ip, bp, waitfor);
|
2003-04-02 14:39:19 +04:00
|
|
|
else if (ip->i_ffs_effnlink != ip->i_nlink)
|
1999-11-15 21:49:07 +03:00
|
|
|
panic("ffs_update: bad link cnt");
|
2003-04-02 14:39:19 +04:00
|
|
|
if (fs->fs_magic == FS_UFS1_MAGIC) {
|
|
|
|
cp = (caddr_t)bp->b_data +
|
|
|
|
(ino_to_fsbo(fs, ip->i_number) * DINODE1_SIZE);
|
1998-10-04 22:07:57 +04:00
|
|
|
#ifdef FFS_EI
|
2003-04-02 14:39:19 +04:00
|
|
|
if (UFS_FSNEEDSWAP(fs))
|
|
|
|
ffs_dinode1_swap(ip->i_din.ffs1_din,
|
|
|
|
(struct ufs1_dinode *)cp);
|
|
|
|
else
|
|
|
|
#endif
|
|
|
|
memcpy(cp, ip->i_din.ffs1_din, DINODE1_SIZE);
|
|
|
|
} else {
|
|
|
|
cp = (caddr_t)bp->b_data +
|
|
|
|
(ino_to_fsbo(fs, ip->i_number) * DINODE2_SIZE);
|
|
|
|
#ifdef FFS_EI
|
|
|
|
if (UFS_FSNEEDSWAP(fs))
|
|
|
|
ffs_dinode2_swap(ip->i_din.ffs2_din,
|
|
|
|
(struct ufs2_dinode *)cp);
|
|
|
|
else
|
1998-10-04 22:07:57 +04:00
|
|
|
#endif
|
2003-04-02 14:39:19 +04:00
|
|
|
memcpy(cp, ip->i_din.ffs2_din, DINODE2_SIZE);
|
|
|
|
}
|
2000-05-30 21:23:52 +04:00
|
|
|
if (waitfor) {
|
1994-06-08 15:41:58 +04:00
|
|
|
return (bwrite(bp));
|
1999-11-15 21:49:07 +03:00
|
|
|
} else {
|
1994-06-08 15:41:58 +04:00
|
|
|
bdwrite(bp);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#define SINGLE 0 /* index of single indirect block */
|
|
|
|
#define DOUBLE 1 /* index of double indirect block */
|
|
|
|
#define TRIPLE 2 /* index of triple indirect block */
|
|
|
|
/*
|
|
|
|
* Truncate the inode oip to at most length size, freeing the
|
|
|
|
* disk blocks.
|
|
|
|
*/
|
1996-02-10 01:22:18 +03:00
|
|
|
int
|
|
|
|
ffs_truncate(v)
|
|
|
|
void *v;
|
|
|
|
{
|
1994-06-08 15:41:58 +04:00
|
|
|
struct vop_truncate_args /* {
|
|
|
|
struct vnode *a_vp;
|
|
|
|
off_t a_length;
|
|
|
|
int a_flags;
|
|
|
|
struct ucred *a_cred;
|
2003-06-30 02:28:00 +04:00
|
|
|
struct proc *a_p;
|
1996-02-10 01:22:18 +03:00
|
|
|
} */ *ap = v;
|
2000-03-30 16:41:09 +04:00
|
|
|
struct vnode *ovp = ap->a_vp;
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
struct genfs_node *gp = VTOG(ovp);
|
2003-01-25 00:55:02 +03:00
|
|
|
daddr_t lastblock;
|
2000-03-30 16:41:09 +04:00
|
|
|
struct inode *oip;
|
2003-01-25 00:55:02 +03:00
|
|
|
daddr_t bn, lastiblock[NIADDR], indir_lbn[NIADDR];
|
2003-04-02 14:39:19 +04:00
|
|
|
daddr_t oldblks[NDADDR + NIADDR], newblks[NDADDR + NIADDR];
|
1994-06-08 15:41:58 +04:00
|
|
|
off_t length = ap->a_length;
|
2000-03-30 16:41:09 +04:00
|
|
|
struct fs *fs;
|
1994-06-08 15:41:58 +04:00
|
|
|
int offset, size, level;
|
2003-04-02 14:39:19 +04:00
|
|
|
int64_t count, blocksreleased = 0;
|
|
|
|
int i, ioflag, aflag, nblocks;
|
2000-11-27 11:39:39 +03:00
|
|
|
int error, allerror = 0;
|
1994-06-08 15:41:58 +04:00
|
|
|
off_t osize;
|
|
|
|
|
1994-06-14 00:38:42 +04:00
|
|
|
if (length < 0)
|
1994-06-14 00:49:56 +04:00
|
|
|
return (EINVAL);
|
1994-06-08 15:41:58 +04:00
|
|
|
oip = VTOI(ovp);
|
|
|
|
if (ovp->v_type == VLNK &&
|
2003-04-02 14:39:19 +04:00
|
|
|
(oip->i_size < ovp->v_mount->mnt_maxsymlinklen ||
|
1994-06-15 19:35:06 +04:00
|
|
|
(ovp->v_mount->mnt_maxsymlinklen == 0 &&
|
2003-04-02 14:39:19 +04:00
|
|
|
DIP(oip, blocks) == 0))) {
|
2000-11-27 11:39:39 +03:00
|
|
|
KDASSERT(length == 0);
|
2003-04-02 14:39:19 +04:00
|
|
|
memset(SHORTLINK(oip), 0, (size_t)oip->i_size);
|
|
|
|
oip->i_size = 0;
|
2003-05-16 00:25:31 +04:00
|
|
|
DIP_ASSIGN(oip, size, 0);
|
1994-06-08 15:41:58 +04:00
|
|
|
oip->i_flag |= IN_CHANGE | IN_UPDATE;
|
2000-05-14 03:43:06 +04:00
|
|
|
return (VOP_UPDATE(ovp, NULL, NULL, UPDATE_WAIT));
|
1994-06-08 15:41:58 +04:00
|
|
|
}
|
2003-04-02 14:39:19 +04:00
|
|
|
if (oip->i_size == length) {
|
1994-06-08 15:41:58 +04:00
|
|
|
oip->i_flag |= IN_CHANGE | IN_UPDATE;
|
1999-03-06 00:09:48 +03:00
|
|
|
return (VOP_UPDATE(ovp, NULL, NULL, 0));
|
1994-06-08 15:41:58 +04:00
|
|
|
}
|
|
|
|
#ifdef QUOTA
|
1996-02-10 01:22:18 +03:00
|
|
|
if ((error = getinoquota(oip)) != 0)
|
1994-06-08 15:41:58 +04:00
|
|
|
return (error);
|
1998-02-05 10:59:28 +03:00
|
|
|
#endif
|
1994-06-08 15:41:58 +04:00
|
|
|
fs = oip->i_fs;
|
2000-11-27 11:39:39 +03:00
|
|
|
if (length > fs->fs_maxfilesize)
|
|
|
|
return (EFBIG);
|
|
|
|
|
2003-04-02 14:39:19 +04:00
|
|
|
osize = oip->i_size;
|
2001-11-08 08:24:52 +03:00
|
|
|
ioflag = ap->a_flags;
|
2001-12-18 09:50:28 +03:00
|
|
|
aflag = ioflag & IO_SYNC ? B_SYNC : 0;
|
1999-11-15 21:49:07 +03:00
|
|
|
|
2000-11-27 11:39:39 +03:00
|
|
|
/*
|
|
|
|
* Lengthen the size of the file. We must ensure that the
|
|
|
|
* last byte of the file is allocated. Since the smallest
|
|
|
|
* value of osize is 0, length will be at least 1.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (osize < length) {
|
2001-11-08 08:24:52 +03:00
|
|
|
if (lblkno(fs, osize) < NDADDR &&
|
|
|
|
lblkno(fs, osize) != lblkno(fs, length) &&
|
|
|
|
blkroundup(fs, osize) != osize) {
|
|
|
|
error = ufs_balloc_range(ovp, osize,
|
|
|
|
blkroundup(fs, osize) - osize, ap->a_cred, aflag);
|
|
|
|
if (error) {
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
if (ioflag & IO_SYNC) {
|
|
|
|
ovp->v_size = blkroundup(fs, osize);
|
|
|
|
simple_lock(&ovp->v_interlock);
|
2001-11-30 10:05:53 +03:00
|
|
|
VOP_PUTPAGES(ovp,
|
|
|
|
trunc_page(osize & ~(fs->fs_bsize - 1)),
|
2001-11-08 08:24:52 +03:00
|
|
|
round_page(ovp->v_size),
|
|
|
|
PGO_CLEANIT | PGO_SYNCIO);
|
|
|
|
}
|
|
|
|
}
|
2001-09-28 15:43:23 +04:00
|
|
|
error = ufs_balloc_range(ovp, length - 1, 1, ap->a_cred,
|
2001-11-08 08:24:52 +03:00
|
|
|
aflag);
|
2001-09-28 15:43:23 +04:00
|
|
|
if (error) {
|
2001-11-08 08:24:52 +03:00
|
|
|
(void) VOP_TRUNCATE(ovp, osize, ioflag & IO_SYNC,
|
2003-06-30 02:28:00 +04:00
|
|
|
ap->a_cred, ap->a_p);
|
2001-09-28 15:43:23 +04:00
|
|
|
return error;
|
|
|
|
}
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
uvm_vnp_setsize(ovp, length);
|
2000-11-27 11:39:39 +03:00
|
|
|
oip->i_flag |= IN_CHANGE | IN_UPDATE;
|
2003-04-02 14:39:19 +04:00
|
|
|
KASSERT(ovp->v_size == oip->i_size);
|
2000-11-27 11:39:39 +03:00
|
|
|
return (VOP_UPDATE(ovp, NULL, NULL, 1));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* When truncating a regular file down to a non-block-aligned size,
|
|
|
|
* we must zero the part of last block which is past the new EOF.
|
|
|
|
* We must synchronously flush the zeroed pages to disk
|
|
|
|
* since the new pages will be invalidated as soon as we
|
|
|
|
* inform the VM system of the new, smaller size.
|
2001-11-08 08:24:52 +03:00
|
|
|
* We must do this before acquiring the GLOCK, since fetching
|
2000-11-27 11:39:39 +03:00
|
|
|
* the pages will acquire the GLOCK internally.
|
|
|
|
* So there is a window where another thread could see a whole
|
|
|
|
* zeroed page past EOF, but that's life.
|
|
|
|
*/
|
|
|
|
|
|
|
|
offset = blkoff(fs, length);
|
|
|
|
if (ovp->v_type == VREG && length < osize && offset != 0) {
|
|
|
|
voff_t eoz;
|
|
|
|
|
2001-12-18 09:50:28 +03:00
|
|
|
error = ufs_balloc_range(ovp, length - 1, 1, ap->a_cred,
|
|
|
|
aflag);
|
|
|
|
if (error) {
|
|
|
|
return error;
|
|
|
|
}
|
2000-11-27 11:39:39 +03:00
|
|
|
size = blksize(fs, oip, lblkno(fs, length));
|
2001-08-30 07:47:53 +04:00
|
|
|
eoz = MIN(lblktosize(fs, lblkno(fs, length)) + size, osize);
|
2000-11-27 11:39:39 +03:00
|
|
|
uvm_vnp_zerorange(ovp, length, eoz - length);
|
2001-11-08 08:24:52 +03:00
|
|
|
simple_lock(&ovp->v_interlock);
|
|
|
|
error = VOP_PUTPAGES(ovp, trunc_page(length), round_page(eoz),
|
|
|
|
PGO_CLEANIT | PGO_DEACTIVATE | PGO_SYNCIO);
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
if (error) {
|
|
|
|
return error;
|
|
|
|
}
|
2000-11-27 11:39:39 +03:00
|
|
|
}
|
|
|
|
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
lockmgr(&gp->g_glock, LK_EXCLUSIVE, NULL);
|
2000-11-27 11:39:39 +03:00
|
|
|
|
1999-11-15 21:49:07 +03:00
|
|
|
if (DOINGSOFTDEP(ovp)) {
|
|
|
|
if (length > 0) {
|
|
|
|
/*
|
|
|
|
* If a file is only partially truncated, then
|
|
|
|
* we have to clean up the data structures
|
|
|
|
* describing the allocation past the truncation
|
|
|
|
* point. Finding and deallocating those structures
|
|
|
|
* is a lot of work. Since partial truncation occurs
|
|
|
|
* rarely, we solve the problem by syncing the file
|
|
|
|
* so that it will have no data structures left.
|
|
|
|
*/
|
|
|
|
if ((error = VOP_FSYNC(ovp, ap->a_cred, FSYNC_WAIT,
|
2003-06-30 02:28:00 +04:00
|
|
|
0, 0, ap->a_p)) != 0) {
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
lockmgr(&gp->g_glock, LK_RELEASE, NULL);
|
1999-11-15 21:49:07 +03:00
|
|
|
return (error);
|
2002-09-27 01:35:27 +04:00
|
|
|
}
|
2001-12-18 13:57:21 +03:00
|
|
|
if (oip->i_flag & IN_SPACECOUNTED)
|
2003-04-02 14:39:19 +04:00
|
|
|
fs->fs_pendingblocks -= DIP(oip, blocks);
|
1999-11-15 21:49:07 +03:00
|
|
|
} else {
|
2001-01-27 07:23:21 +03:00
|
|
|
uvm_vnp_setsize(ovp, length);
|
1999-11-15 21:49:07 +03:00
|
|
|
#ifdef QUOTA
|
2003-04-02 14:39:19 +04:00
|
|
|
(void) chkdq(oip, -DIP(oip, blocks), NOCRED, 0);
|
1999-11-15 21:49:07 +03:00
|
|
|
#endif
|
2003-04-02 14:39:19 +04:00
|
|
|
softdep_setup_freeblocks(oip, length, 0);
|
2003-06-30 02:28:00 +04:00
|
|
|
(void) vinvalbuf(ovp, 0, ap->a_cred, ap->a_p, 0, 0);
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
lockmgr(&gp->g_glock, LK_RELEASE, NULL);
|
1999-11-15 21:49:07 +03:00
|
|
|
oip->i_flag |= IN_CHANGE | IN_UPDATE;
|
|
|
|
return (VOP_UPDATE(ovp, NULL, NULL, 0));
|
|
|
|
}
|
|
|
|
}
|
2003-04-02 14:39:19 +04:00
|
|
|
oip->i_size = length;
|
2003-05-16 00:25:31 +04:00
|
|
|
DIP_ASSIGN(oip, size, length);
|
1998-03-01 05:20:01 +03:00
|
|
|
uvm_vnp_setsize(ovp, length);
|
1994-06-08 15:41:58 +04:00
|
|
|
/*
|
|
|
|
* Calculate index into inode's block list of
|
|
|
|
* last direct and indirect blocks (if any)
|
|
|
|
* which we want to keep. Lastblock is -1 when
|
|
|
|
* the file is truncated to 0.
|
|
|
|
*/
|
|
|
|
lastblock = lblkno(fs, length + fs->fs_bsize - 1) - 1;
|
|
|
|
lastiblock[SINGLE] = lastblock - NDADDR;
|
|
|
|
lastiblock[DOUBLE] = lastiblock[SINGLE] - NINDIR(fs);
|
|
|
|
lastiblock[TRIPLE] = lastiblock[DOUBLE] - NINDIR(fs) * NINDIR(fs);
|
|
|
|
nblocks = btodb(fs->fs_bsize);
|
|
|
|
/*
|
|
|
|
* Update file and block pointers on disk before we start freeing
|
|
|
|
* blocks. If we crash before free'ing blocks below, the blocks
|
|
|
|
* will be returned to the free list. lastiblock values are also
|
|
|
|
* normalized to -1 for calls to ffs_indirtrunc below.
|
|
|
|
*/
|
2003-04-02 14:39:19 +04:00
|
|
|
for (level = TRIPLE; level >= SINGLE; level--) {
|
|
|
|
oldblks[NDADDR + level] = DIP(oip, ib[level]);
|
1994-06-08 15:41:58 +04:00
|
|
|
if (lastiblock[level] < 0) {
|
2003-05-16 00:25:31 +04:00
|
|
|
DIP_ASSIGN(oip, ib[level], 0);
|
1994-06-08 15:41:58 +04:00
|
|
|
lastiblock[level] = -1;
|
|
|
|
}
|
2003-04-02 14:39:19 +04:00
|
|
|
}
|
|
|
|
for (i = 0; i < NDADDR; i++) {
|
|
|
|
oldblks[i] = DIP(oip, db[i]);
|
|
|
|
if (i > lastblock)
|
2003-05-16 00:25:31 +04:00
|
|
|
DIP_ASSIGN(oip, db[i], 0);
|
2003-04-02 14:39:19 +04:00
|
|
|
}
|
1994-06-08 15:41:58 +04:00
|
|
|
oip->i_flag |= IN_CHANGE | IN_UPDATE;
|
2000-05-28 08:13:56 +04:00
|
|
|
error = VOP_UPDATE(ovp, NULL, NULL, UPDATE_WAIT);
|
|
|
|
if (error && !allerror)
|
1994-06-08 15:41:58 +04:00
|
|
|
allerror = error;
|
2000-05-28 08:13:56 +04:00
|
|
|
|
1994-06-08 15:41:58 +04:00
|
|
|
/*
|
|
|
|
* Having written the new inode to disk, save its new configuration
|
|
|
|
* and put back the old block pointers long enough to process them.
|
|
|
|
* Note that we save the new block configuration so we can check it
|
|
|
|
* when we are done.
|
|
|
|
*/
|
2003-04-02 14:39:19 +04:00
|
|
|
for (i = 0; i < NDADDR; i++) {
|
|
|
|
newblks[i] = DIP(oip, db[i]);
|
2003-05-16 00:25:31 +04:00
|
|
|
DIP_ASSIGN(oip, db[i], oldblks[i]);
|
2003-04-02 14:39:19 +04:00
|
|
|
}
|
|
|
|
for (i = 0; i < NIADDR; i++) {
|
|
|
|
newblks[NDADDR + i] = DIP(oip, ib[i]);
|
2003-05-16 00:25:31 +04:00
|
|
|
DIP_ASSIGN(oip, ib[i], oldblks[NDADDR + i]);
|
2003-04-02 14:39:19 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
oip->i_size = osize;
|
2003-05-16 00:25:31 +04:00
|
|
|
DIP_ASSIGN(oip, size, osize);
|
2000-05-28 08:13:56 +04:00
|
|
|
error = vtruncbuf(ovp, lastblock + 1, 0, 0);
|
|
|
|
if (error && !allerror)
|
|
|
|
allerror = error;
|
1994-06-08 15:41:58 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Indirect blocks first.
|
|
|
|
*/
|
|
|
|
indir_lbn[SINGLE] = -NDADDR;
|
|
|
|
indir_lbn[DOUBLE] = indir_lbn[SINGLE] - NINDIR(fs) - 1;
|
|
|
|
indir_lbn[TRIPLE] = indir_lbn[DOUBLE] - NINDIR(fs) * NINDIR(fs) - 1;
|
|
|
|
for (level = TRIPLE; level >= SINGLE; level--) {
|
2003-04-02 14:39:19 +04:00
|
|
|
if (oip->i_ump->um_fstype == UFS1)
|
|
|
|
bn = ufs_rw32(oip->i_ffs1_ib[level],UFS_FSNEEDSWAP(fs));
|
|
|
|
else
|
|
|
|
bn = ufs_rw64(oip->i_ffs2_ib[level],UFS_FSNEEDSWAP(fs));
|
1994-06-08 15:41:58 +04:00
|
|
|
if (bn != 0) {
|
|
|
|
error = ffs_indirtrunc(oip, indir_lbn[level],
|
|
|
|
fsbtodb(fs, bn), lastiblock[level], level, &count);
|
|
|
|
if (error)
|
|
|
|
allerror = error;
|
|
|
|
blocksreleased += count;
|
|
|
|
if (lastiblock[level] < 0) {
|
2003-05-16 00:25:31 +04:00
|
|
|
DIP_ASSIGN(oip, ib[level], 0);
|
1994-06-08 15:41:58 +04:00
|
|
|
ffs_blkfree(oip, bn, fs->fs_bsize);
|
|
|
|
blocksreleased += nblocks;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (lastiblock[level] >= 0)
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* All whole direct blocks or frags.
|
|
|
|
*/
|
|
|
|
for (i = NDADDR - 1; i > lastblock; i--) {
|
2000-03-30 16:41:09 +04:00
|
|
|
long bsize;
|
1994-06-08 15:41:58 +04:00
|
|
|
|
2003-04-02 14:39:19 +04:00
|
|
|
if (oip->i_ump->um_fstype == UFS1)
|
|
|
|
bn = ufs_rw32(oip->i_ffs1_db[i], UFS_FSNEEDSWAP(fs));
|
|
|
|
else
|
|
|
|
bn = ufs_rw64(oip->i_ffs2_db[i], UFS_FSNEEDSWAP(fs));
|
1994-06-08 15:41:58 +04:00
|
|
|
if (bn == 0)
|
|
|
|
continue;
|
2003-05-16 00:25:31 +04:00
|
|
|
DIP_ASSIGN(oip, db[i], 0);
|
1994-06-08 15:41:58 +04:00
|
|
|
bsize = blksize(fs, oip, i);
|
|
|
|
ffs_blkfree(oip, bn, bsize);
|
|
|
|
blocksreleased += btodb(bsize);
|
|
|
|
}
|
|
|
|
if (lastblock < 0)
|
|
|
|
goto done;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Finally, look for a change in size of the
|
|
|
|
* last direct block; release any frags.
|
|
|
|
*/
|
2003-04-02 14:39:19 +04:00
|
|
|
if (oip->i_ump->um_fstype == UFS1)
|
|
|
|
bn = ufs_rw32(oip->i_ffs1_db[lastblock], UFS_FSNEEDSWAP(fs));
|
|
|
|
else
|
|
|
|
bn = ufs_rw64(oip->i_ffs2_db[lastblock], UFS_FSNEEDSWAP(fs));
|
1994-06-08 15:41:58 +04:00
|
|
|
if (bn != 0) {
|
|
|
|
long oldspace, newspace;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Calculate amount of space we're giving
|
|
|
|
* back as old block size minus new block size.
|
|
|
|
*/
|
|
|
|
oldspace = blksize(fs, oip, lastblock);
|
2003-04-02 14:39:19 +04:00
|
|
|
oip->i_size = length;
|
2003-05-16 00:25:31 +04:00
|
|
|
DIP_ASSIGN(oip, size, length);
|
1994-06-08 15:41:58 +04:00
|
|
|
newspace = blksize(fs, oip, lastblock);
|
|
|
|
if (newspace == 0)
|
|
|
|
panic("itrunc: newspace");
|
|
|
|
if (oldspace - newspace > 0) {
|
|
|
|
/*
|
|
|
|
* Block number of space to be free'd is
|
|
|
|
* the old block # plus the number of frags
|
|
|
|
* required for the storage we're keeping.
|
|
|
|
*/
|
|
|
|
bn += numfrags(fs, newspace);
|
|
|
|
ffs_blkfree(oip, bn, oldspace - newspace);
|
|
|
|
blocksreleased += btodb(oldspace - newspace);
|
|
|
|
}
|
|
|
|
}
|
2000-05-28 08:13:56 +04:00
|
|
|
|
1994-06-08 15:41:58 +04:00
|
|
|
done:
|
|
|
|
#ifdef DIAGNOSTIC
|
|
|
|
for (level = SINGLE; level <= TRIPLE; level++)
|
2003-04-02 14:39:19 +04:00
|
|
|
if (newblks[NDADDR + level] != DIP(oip, ib[level]))
|
1994-06-08 15:41:58 +04:00
|
|
|
panic("itrunc1");
|
|
|
|
for (i = 0; i < NDADDR; i++)
|
2003-04-02 14:39:19 +04:00
|
|
|
if (newblks[i] != DIP(oip, db[i]))
|
1994-06-08 15:41:58 +04:00
|
|
|
panic("itrunc2");
|
|
|
|
if (length == 0 &&
|
2000-05-28 08:13:56 +04:00
|
|
|
(!LIST_EMPTY(&ovp->v_cleanblkhd) || !LIST_EMPTY(&ovp->v_dirtyblkhd)))
|
1994-06-08 15:41:58 +04:00
|
|
|
panic("itrunc3");
|
|
|
|
#endif /* DIAGNOSTIC */
|
|
|
|
/*
|
|
|
|
* Put back the real size.
|
|
|
|
*/
|
2003-04-02 14:39:19 +04:00
|
|
|
oip->i_size = length;
|
2003-05-16 00:25:31 +04:00
|
|
|
DIP_ASSIGN(oip, size, length);
|
|
|
|
DIP_ADD(oip, blocks, -blocksreleased);
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
lockmgr(&gp->g_glock, LK_RELEASE, NULL);
|
1994-06-08 15:41:58 +04:00
|
|
|
oip->i_flag |= IN_CHANGE;
|
|
|
|
#ifdef QUOTA
|
|
|
|
(void) chkdq(oip, -blocksreleased, NOCRED, 0);
|
|
|
|
#endif
|
2003-04-02 14:39:19 +04:00
|
|
|
KASSERT(ovp->v_type != VREG || ovp->v_size == oip->i_size);
|
1994-06-08 15:41:58 +04:00
|
|
|
return (allerror);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Release blocks associated with the inode ip and stored in the indirect
|
|
|
|
* block bn. Blocks are free'd in LIFO order up to (but not including)
|
|
|
|
* lastbn. If level is greater than SINGLE, the block is an indirect block
|
|
|
|
* and recursive calls to indirtrunc must be used to cleanse other indirect
|
|
|
|
* blocks.
|
|
|
|
*
|
|
|
|
* NB: triple indirect blocks are untested.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
ffs_indirtrunc(ip, lbn, dbn, lastbn, level, countp)
|
2000-03-30 16:41:09 +04:00
|
|
|
struct inode *ip;
|
2003-01-25 00:55:02 +03:00
|
|
|
daddr_t lbn, lastbn;
|
|
|
|
daddr_t dbn;
|
1994-06-08 15:41:58 +04:00
|
|
|
int level;
|
2003-04-02 14:39:19 +04:00
|
|
|
int64_t *countp;
|
1994-06-08 15:41:58 +04:00
|
|
|
{
|
2000-03-30 16:41:09 +04:00
|
|
|
int i;
|
1994-06-08 15:41:58 +04:00
|
|
|
struct buf *bp;
|
2000-03-30 16:41:09 +04:00
|
|
|
struct fs *fs = ip->i_fs;
|
2003-04-02 14:39:19 +04:00
|
|
|
int32_t *bap1 = NULL;
|
|
|
|
int64_t *bap2 = NULL;
|
1994-06-08 15:41:58 +04:00
|
|
|
struct vnode *vp;
|
2003-01-25 00:55:02 +03:00
|
|
|
daddr_t nb, nlbn, last;
|
2003-04-02 14:39:19 +04:00
|
|
|
char *copy = NULL;
|
|
|
|
int64_t blkcount, factor, blocksreleased = 0;
|
|
|
|
int nblocks;
|
1994-06-08 15:41:58 +04:00
|
|
|
int error = 0, allerror = 0;
|
2003-04-02 14:39:19 +04:00
|
|
|
#ifdef FFS_EI
|
|
|
|
const int needswap = UFS_FSNEEDSWAP(fs);
|
|
|
|
#endif
|
|
|
|
#define RBAP(ip, i) (((ip)->i_ump->um_fstype == UFS1) ? \
|
|
|
|
ufs_rw32(bap1[i], needswap) : ufs_rw64(bap2[i], needswap))
|
2003-05-16 00:25:31 +04:00
|
|
|
#define BAP_ASSIGN(ip, i, value) \
|
|
|
|
do { \
|
|
|
|
if ((ip)->i_ump->um_fstype == UFS1) \
|
|
|
|
bap1[i] = (value); \
|
|
|
|
else \
|
|
|
|
bap2[i] = (value); \
|
|
|
|
} while(0)
|
1994-06-08 15:41:58 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Calculate index in current block of last
|
|
|
|
* block to be kept. -1 indicates the entire
|
|
|
|
* block so we need not calculate the index.
|
|
|
|
*/
|
|
|
|
factor = 1;
|
|
|
|
for (i = SINGLE; i < level; i++)
|
|
|
|
factor *= NINDIR(fs);
|
|
|
|
last = lastbn;
|
|
|
|
if (lastbn > 0)
|
|
|
|
last /= factor;
|
|
|
|
nblocks = btodb(fs->fs_bsize);
|
|
|
|
/*
|
|
|
|
* Get buffer of block pointers, zero those entries corresponding
|
|
|
|
* to blocks to be free'd, and update on disk copy first. Since
|
|
|
|
* double(triple) indirect before single(double) indirect, calls
|
|
|
|
* to bmap on these blocks will fail. However, we already have
|
|
|
|
* the on disk address, so we have to set the b_blkno field
|
|
|
|
* explicitly instead of letting bread do everything for us.
|
|
|
|
*/
|
|
|
|
vp = ITOV(ip);
|
|
|
|
bp = getblk(vp, lbn, (int)fs->fs_bsize, 0, 0);
|
|
|
|
if (bp->b_flags & (B_DONE | B_DELWRI)) {
|
|
|
|
/* Braces must be here in case trace evaluates to nothing. */
|
|
|
|
trace(TR_BREADHIT, pack(vp, fs->fs_bsize), lbn);
|
|
|
|
} else {
|
|
|
|
trace(TR_BREADMISS, pack(vp, fs->fs_bsize), lbn);
|
|
|
|
curproc->p_stats->p_ru.ru_inblock++; /* pay for read */
|
|
|
|
bp->b_flags |= B_READ;
|
|
|
|
if (bp->b_bcount > bp->b_bufsize)
|
|
|
|
panic("ffs_indirtrunc: bad buffer size");
|
|
|
|
bp->b_blkno = dbn;
|
|
|
|
VOP_STRATEGY(bp);
|
|
|
|
error = biowait(bp);
|
|
|
|
}
|
|
|
|
if (error) {
|
|
|
|
brelse(bp);
|
|
|
|
*countp = 0;
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2003-04-02 14:39:19 +04:00
|
|
|
if (ip->i_ump->um_fstype == UFS1)
|
|
|
|
bap1 = (int32_t *)bp->b_data;
|
|
|
|
else
|
|
|
|
bap2 = (int64_t *)bp->b_data;
|
2000-05-28 12:15:40 +04:00
|
|
|
if (lastbn >= 0) {
|
2003-04-02 14:39:19 +04:00
|
|
|
copy = malloc(fs->fs_bsize, M_TEMP, M_WAITOK);
|
|
|
|
memcpy((caddr_t)copy, bp->b_data, (u_int)fs->fs_bsize);
|
|
|
|
for (i = last + 1; i < NINDIR(fs); i++)
|
2003-05-16 00:25:31 +04:00
|
|
|
BAP_ASSIGN(ip, i, 0);
|
1996-11-06 06:02:59 +03:00
|
|
|
error = bwrite(bp);
|
|
|
|
if (error)
|
|
|
|
allerror = error;
|
2003-04-02 14:39:19 +04:00
|
|
|
if (ip->i_ump->um_fstype == UFS1)
|
|
|
|
bap1 = (int32_t *)copy;
|
|
|
|
else
|
|
|
|
bap2 = (int64_t *)copy;
|
1996-11-06 06:02:59 +03:00
|
|
|
}
|
1994-06-08 15:41:58 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Recursively free totally unused blocks.
|
|
|
|
*/
|
|
|
|
for (i = NINDIR(fs) - 1, nlbn = lbn + 1 - i * factor; i > last;
|
|
|
|
i--, nlbn += factor) {
|
2003-04-02 14:39:19 +04:00
|
|
|
nb = RBAP(ip, i);
|
1994-06-08 15:41:58 +04:00
|
|
|
if (nb == 0)
|
|
|
|
continue;
|
|
|
|
if (level > SINGLE) {
|
1996-02-10 01:22:18 +03:00
|
|
|
error = ffs_indirtrunc(ip, nlbn, fsbtodb(fs, nb),
|
2003-01-25 00:55:02 +03:00
|
|
|
(daddr_t)-1, level - 1,
|
1996-02-10 01:22:18 +03:00
|
|
|
&blkcount);
|
|
|
|
if (error)
|
1994-06-08 15:41:58 +04:00
|
|
|
allerror = error;
|
|
|
|
blocksreleased += blkcount;
|
|
|
|
}
|
|
|
|
ffs_blkfree(ip, nb, fs->fs_bsize);
|
|
|
|
blocksreleased += nblocks;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Recursively free last partial block.
|
|
|
|
*/
|
|
|
|
if (level > SINGLE && lastbn >= 0) {
|
|
|
|
last = lastbn % factor;
|
2003-04-02 14:39:19 +04:00
|
|
|
nb = RBAP(ip, i);
|
1994-06-08 15:41:58 +04:00
|
|
|
if (nb != 0) {
|
1996-02-10 01:22:18 +03:00
|
|
|
error = ffs_indirtrunc(ip, nlbn, fsbtodb(fs, nb),
|
|
|
|
last, level - 1, &blkcount);
|
|
|
|
if (error)
|
1994-06-08 15:41:58 +04:00
|
|
|
allerror = error;
|
|
|
|
blocksreleased += blkcount;
|
|
|
|
}
|
|
|
|
}
|
1996-11-06 06:02:59 +03:00
|
|
|
|
|
|
|
if (copy != NULL) {
|
|
|
|
FREE(copy, M_TEMP);
|
|
|
|
} else {
|
|
|
|
bp->b_flags |= B_INVAL;
|
|
|
|
brelse(bp);
|
|
|
|
}
|
|
|
|
|
1994-06-08 15:41:58 +04:00
|
|
|
*countp = blocksreleased;
|
|
|
|
return (allerror);
|
|
|
|
}
|