NetBSD/sys/ufs/ffs/ffs_inode.c

557 lines
16 KiB
C
Raw Normal View History

/* $NetBSD: ffs_inode.c,v 1.44 2001/09/20 08:25:59 chs Exp $ */
/*
* Copyright (c) 1982, 1986, 1989, 1993
* The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the University of
* California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
1998-03-01 05:20:01 +03:00
* @(#)ffs_inode.c 8.13 (Berkeley) 4/21/95
*/
2001-05-30 15:57:16 +04:00
#if defined(_KERNEL_OPT)
1998-11-12 22:51:10 +03:00
#include "opt_ffs.h"
1998-06-08 08:27:50 +04:00
#include "opt_quota.h"
#endif
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/mount.h>
#include <sys/proc.h>
#include <sys/file.h>
#include <sys/buf.h>
#include <sys/vnode.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/trace.h>
#include <sys/resourcevar.h>
#include <ufs/ufs/quota.h>
#include <ufs/ufs/inode.h>
#include <ufs/ufs/ufsmount.h>
#include <ufs/ufs/ufs_extern.h>
#include <ufs/ufs/ufs_bswap.h>
#include <ufs/ffs/fs.h>
#include <ufs/ffs/ffs_extern.h>
1998-03-01 05:20:01 +03:00
static int ffs_indirtrunc __P((struct inode *, ufs_daddr_t, ufs_daddr_t,
ufs_daddr_t, int, long *));
/*
* Update the access, modified, and inode change times as specified
* by the IN_ACCESS, IN_UPDATE, and IN_CHANGE flags respectively.
* The IN_MODIFIED flag is used to specify that the inode needs to be
* updated but that the times have already been set. The access
* and modified times are taken from the second and third parameters;
* the inode change time is always taken from the current time. If
* UPDATE_WAIT flag is set, or UPDATE_DIROP is set and we are not doing
* softupdates, then wait for the disk write of the inode to complete.
*/
int
1996-02-10 01:22:18 +03:00
ffs_update(v)
void *v;
{
struct vop_update_args /* {
struct vnode *a_vp;
struct timespec *a_access;
struct timespec *a_modify;
int a_flags;
1996-02-10 01:22:18 +03:00
} */ *ap = v;
2000-03-30 16:41:09 +04:00
struct fs *fs;
struct buf *bp;
struct inode *ip;
int error;
struct timespec ts;
caddr_t cp;
int waitfor, flags;
if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)
return (0);
ip = VTOI(ap->a_vp);
TIMEVAL_TO_TIMESPEC(&time, &ts);
FFS_ITIMES(ip,
ap->a_access ? ap->a_access : &ts,
ap->a_modify ? ap->a_modify : &ts, &ts);
flags = ip->i_flag & (IN_MODIFIED | IN_ACCESSED);
if (flags == 0)
return (0);
fs = ip->i_fs;
if ((flags & IN_MODIFIED) != 0 &&
(ap->a_vp->v_mount->mnt_flag & MNT_ASYNC) == 0) {
waitfor = ap->a_flags & UPDATE_WAIT;
if ((ap->a_flags & UPDATE_DIROP) && !DOINGSOFTDEP(ap->a_vp))
waitfor |= UPDATE_WAIT;
} else
waitfor = 0;
/*
* Ensure that uid and gid are correct. This is a temporary
* fix until fsck has been changed to do the update.
*/
1998-03-01 05:20:01 +03:00
if (fs->fs_inodefmt < FS_44INODEFMT) { /* XXX */
ip->i_din.ffs_din.di_ouid = ip->i_ffs_uid; /* XXX */
ip->i_din.ffs_din.di_ogid = ip->i_ffs_gid; /* XXX */
} /* XXX */
1996-02-10 01:22:18 +03:00
error = bread(ip->i_devvp,
fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
(int)fs->fs_bsize, NOCRED, &bp);
if (error) {
brelse(bp);
return (error);
}
ip->i_flag &= ~(IN_MODIFIED | IN_ACCESSED);
if (DOINGSOFTDEP(ap->a_vp))
softdep_update_inodeblock(ip, bp, waitfor);
else if (ip->i_ffs_effnlink != ip->i_ffs_nlink)
panic("ffs_update: bad link cnt");
cp = (caddr_t)bp->b_data +
(ino_to_fsbo(fs, ip->i_number) * DINODE_SIZE);
#ifdef FFS_EI
if (UFS_FSNEEDSWAP(fs))
ffs_dinode_swap(&ip->i_din.ffs_din, (struct dinode *)cp);
else
#endif
memcpy(cp, &ip->i_din.ffs_din, DINODE_SIZE);
if (waitfor) {
return (bwrite(bp));
} else {
bdwrite(bp);
return (0);
}
}
#define SINGLE 0 /* index of single indirect block */
#define DOUBLE 1 /* index of double indirect block */
#define TRIPLE 2 /* index of triple indirect block */
/*
* Truncate the inode oip to at most length size, freeing the
* disk blocks.
*/
1996-02-10 01:22:18 +03:00
int
ffs_truncate(v)
void *v;
{
struct vop_truncate_args /* {
struct vnode *a_vp;
off_t a_length;
int a_flags;
struct ucred *a_cred;
struct proc *a_p;
1996-02-10 01:22:18 +03:00
} */ *ap = v;
2000-03-30 16:41:09 +04:00
struct vnode *ovp = ap->a_vp;
a whole bunch of changes to improve performance and robustness under load: - remove special treatment of pager_map mappings in pmaps. this is required now, since I've removed the globals that expose the address range. pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's no longer any need to special-case it. - eliminate struct uvm_vnode by moving its fields into struct vnode. - rewrite the pageout path. the pager is now responsible for handling the high-level requests instead of only getting control after a bunch of work has already been done on its behalf. this will allow us to UBCify LFS, which needs tighter control over its pages than other filesystems do. writing a page to disk no longer requires making it read-only, which allows us to write wired pages without causing all kinds of havoc. - use a new PG_PAGEOUT flag to indicate that a page should be freed on behalf of the pagedaemon when it's unlocked. this flag is very similar to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the pageout fails due to eg. an indirect-block buffer being locked. this allows us to remove the "version" field from struct vm_page, and together with shrinking "loan_count" from 32 bits to 16, struct vm_page is now 4 bytes smaller. - no longer use PG_RELEASED for swap-backed pages. if the page is busy because it's being paged out, we can't release the swap slot to be reallocated until that write is complete, but unlike with vnodes we don't keep a count of in-progress writes so there's no good way to know when the write is done. instead, when we need to free a busy swap-backed page, just sleep until we can get it busy ourselves. - implement a fast-path for extending writes which allows us to avoid zeroing new pages. this substantially reduces cpu usage. - encapsulate the data used by the genfs code in a struct genfs_node, which must be the first element of the filesystem-specific vnode data for filesystems which use genfs_{get,put}pages(). - eliminate many of the UVM pagerops, since they aren't needed anymore now that the pager "put" operation is a higher-level operation. - enhance the genfs code to allow NFS to use the genfs_{get,put}pages instead of a modified copy. - clean up struct vnode by removing all the fields that used to be used by the vfs_cluster.c code (which we don't use anymore with UBC). - remove kmem_object and mb_object since they were useless. instead of allocating pages to these objects, we now just allocate pages with no object. such pages are mapped in the kernel until they are freed, so we can use the mapping to find the page to free it. this allows us to remove splvm() protection in several places. The sum of all these changes improves write throughput on my decstation 5000/200 to within 1% of the rate of NetBSD 1.5 and reduces the elapsed time for "make release" of a NetBSD 1.5 source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
struct genfs_node *gp = VTOG(ovp);
2000-03-30 16:41:09 +04:00
ufs_daddr_t lastblock;
struct inode *oip;
ufs_daddr_t bn, lastiblock[NIADDR], indir_lbn[NIADDR];
1998-03-01 05:20:01 +03:00
ufs_daddr_t oldblks[NDADDR + NIADDR], newblks[NDADDR + NIADDR];
off_t length = ap->a_length;
2000-03-30 16:41:09 +04:00
struct fs *fs;
int offset, size, level;
long count, nblocks, blocksreleased = 0;
2000-03-30 16:41:09 +04:00
int i;
int error, allerror = 0;
off_t osize;
if (length < 0)
1994-06-14 00:49:56 +04:00
return (EINVAL);
oip = VTOI(ovp);
if (ovp->v_type == VLNK &&
(oip->i_ffs_size < ovp->v_mount->mnt_maxsymlinklen ||
1994-06-15 19:35:06 +04:00
(ovp->v_mount->mnt_maxsymlinklen == 0 &&
oip->i_din.ffs_din.di_blocks == 0))) {
KDASSERT(length == 0);
memset(&oip->i_ffs_shortlink, 0, (size_t)oip->i_ffs_size);
oip->i_ffs_size = 0;
oip->i_flag |= IN_CHANGE | IN_UPDATE;
return (VOP_UPDATE(ovp, NULL, NULL, UPDATE_WAIT));
}
if (oip->i_ffs_size == length) {
oip->i_flag |= IN_CHANGE | IN_UPDATE;
return (VOP_UPDATE(ovp, NULL, NULL, 0));
}
#ifdef QUOTA
1996-02-10 01:22:18 +03:00
if ((error = getinoquota(oip)) != 0)
return (error);
#endif
fs = oip->i_fs;
if (length > fs->fs_maxfilesize)
return (EFBIG);
osize = oip->i_ffs_size;
/*
* Lengthen the size of the file. We must ensure that the
* last byte of the file is allocated. Since the smallest
* value of osize is 0, length will be at least 1.
*/
if (osize < length) {
ufs_balloc_range(ovp, length - 1, 1, ap->a_cred,
ap->a_flags & IO_SYNC ? B_SYNC : 0);
a whole bunch of changes to improve performance and robustness under load: - remove special treatment of pager_map mappings in pmaps. this is required now, since I've removed the globals that expose the address range. pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's no longer any need to special-case it. - eliminate struct uvm_vnode by moving its fields into struct vnode. - rewrite the pageout path. the pager is now responsible for handling the high-level requests instead of only getting control after a bunch of work has already been done on its behalf. this will allow us to UBCify LFS, which needs tighter control over its pages than other filesystems do. writing a page to disk no longer requires making it read-only, which allows us to write wired pages without causing all kinds of havoc. - use a new PG_PAGEOUT flag to indicate that a page should be freed on behalf of the pagedaemon when it's unlocked. this flag is very similar to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the pageout fails due to eg. an indirect-block buffer being locked. this allows us to remove the "version" field from struct vm_page, and together with shrinking "loan_count" from 32 bits to 16, struct vm_page is now 4 bytes smaller. - no longer use PG_RELEASED for swap-backed pages. if the page is busy because it's being paged out, we can't release the swap slot to be reallocated until that write is complete, but unlike with vnodes we don't keep a count of in-progress writes so there's no good way to know when the write is done. instead, when we need to free a busy swap-backed page, just sleep until we can get it busy ourselves. - implement a fast-path for extending writes which allows us to avoid zeroing new pages. this substantially reduces cpu usage. - encapsulate the data used by the genfs code in a struct genfs_node, which must be the first element of the filesystem-specific vnode data for filesystems which use genfs_{get,put}pages(). - eliminate many of the UVM pagerops, since they aren't needed anymore now that the pager "put" operation is a higher-level operation. - enhance the genfs code to allow NFS to use the genfs_{get,put}pages instead of a modified copy. - clean up struct vnode by removing all the fields that used to be used by the vfs_cluster.c code (which we don't use anymore with UBC). - remove kmem_object and mb_object since they were useless. instead of allocating pages to these objects, we now just allocate pages with no object. such pages are mapped in the kernel until they are freed, so we can use the mapping to find the page to free it. this allows us to remove splvm() protection in several places. The sum of all these changes improves write throughput on my decstation 5000/200 to within 1% of the rate of NetBSD 1.5 and reduces the elapsed time for "make release" of a NetBSD 1.5 source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
uvm_vnp_setsize(ovp, length);
oip->i_flag |= IN_CHANGE | IN_UPDATE;
a whole bunch of changes to improve performance and robustness under load: - remove special treatment of pager_map mappings in pmaps. this is required now, since I've removed the globals that expose the address range. pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's no longer any need to special-case it. - eliminate struct uvm_vnode by moving its fields into struct vnode. - rewrite the pageout path. the pager is now responsible for handling the high-level requests instead of only getting control after a bunch of work has already been done on its behalf. this will allow us to UBCify LFS, which needs tighter control over its pages than other filesystems do. writing a page to disk no longer requires making it read-only, which allows us to write wired pages without causing all kinds of havoc. - use a new PG_PAGEOUT flag to indicate that a page should be freed on behalf of the pagedaemon when it's unlocked. this flag is very similar to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the pageout fails due to eg. an indirect-block buffer being locked. this allows us to remove the "version" field from struct vm_page, and together with shrinking "loan_count" from 32 bits to 16, struct vm_page is now 4 bytes smaller. - no longer use PG_RELEASED for swap-backed pages. if the page is busy because it's being paged out, we can't release the swap slot to be reallocated until that write is complete, but unlike with vnodes we don't keep a count of in-progress writes so there's no good way to know when the write is done. instead, when we need to free a busy swap-backed page, just sleep until we can get it busy ourselves. - implement a fast-path for extending writes which allows us to avoid zeroing new pages. this substantially reduces cpu usage. - encapsulate the data used by the genfs code in a struct genfs_node, which must be the first element of the filesystem-specific vnode data for filesystems which use genfs_{get,put}pages(). - eliminate many of the UVM pagerops, since they aren't needed anymore now that the pager "put" operation is a higher-level operation. - enhance the genfs code to allow NFS to use the genfs_{get,put}pages instead of a modified copy. - clean up struct vnode by removing all the fields that used to be used by the vfs_cluster.c code (which we don't use anymore with UBC). - remove kmem_object and mb_object since they were useless. instead of allocating pages to these objects, we now just allocate pages with no object. such pages are mapped in the kernel until they are freed, so we can use the mapping to find the page to free it. this allows us to remove splvm() protection in several places. The sum of all these changes improves write throughput on my decstation 5000/200 to within 1% of the rate of NetBSD 1.5 and reduces the elapsed time for "make release" of a NetBSD 1.5 source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
KASSERT(ovp->v_size == oip->i_ffs_size);
return (VOP_UPDATE(ovp, NULL, NULL, 1));
}
/*
* When truncating a regular file down to a non-block-aligned size,
* we must zero the part of last block which is past the new EOF.
* We must synchronously flush the zeroed pages to disk
* since the new pages will be invalidated as soon as we
* inform the VM system of the new, smaller size.
* We must to this before acquiring the GLOCK, since fetching
* the pages will acquire the GLOCK internally.
* So there is a window where another thread could see a whole
* zeroed page past EOF, but that's life.
*/
offset = blkoff(fs, length);
if (ovp->v_type == VREG && length < osize && offset != 0) {
struct uvm_object *uobj;
voff_t eoz;
size = blksize(fs, oip, lblkno(fs, length));
2001-08-30 07:47:53 +04:00
eoz = MIN(lblktosize(fs, lblkno(fs, length)) + size, osize);
uvm_vnp_zerorange(ovp, length, eoz - length);
a whole bunch of changes to improve performance and robustness under load: - remove special treatment of pager_map mappings in pmaps. this is required now, since I've removed the globals that expose the address range. pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's no longer any need to special-case it. - eliminate struct uvm_vnode by moving its fields into struct vnode. - rewrite the pageout path. the pager is now responsible for handling the high-level requests instead of only getting control after a bunch of work has already been done on its behalf. this will allow us to UBCify LFS, which needs tighter control over its pages than other filesystems do. writing a page to disk no longer requires making it read-only, which allows us to write wired pages without causing all kinds of havoc. - use a new PG_PAGEOUT flag to indicate that a page should be freed on behalf of the pagedaemon when it's unlocked. this flag is very similar to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the pageout fails due to eg. an indirect-block buffer being locked. this allows us to remove the "version" field from struct vm_page, and together with shrinking "loan_count" from 32 bits to 16, struct vm_page is now 4 bytes smaller. - no longer use PG_RELEASED for swap-backed pages. if the page is busy because it's being paged out, we can't release the swap slot to be reallocated until that write is complete, but unlike with vnodes we don't keep a count of in-progress writes so there's no good way to know when the write is done. instead, when we need to free a busy swap-backed page, just sleep until we can get it busy ourselves. - implement a fast-path for extending writes which allows us to avoid zeroing new pages. this substantially reduces cpu usage. - encapsulate the data used by the genfs code in a struct genfs_node, which must be the first element of the filesystem-specific vnode data for filesystems which use genfs_{get,put}pages(). - eliminate many of the UVM pagerops, since they aren't needed anymore now that the pager "put" operation is a higher-level operation. - enhance the genfs code to allow NFS to use the genfs_{get,put}pages instead of a modified copy. - clean up struct vnode by removing all the fields that used to be used by the vfs_cluster.c code (which we don't use anymore with UBC). - remove kmem_object and mb_object since they were useless. instead of allocating pages to these objects, we now just allocate pages with no object. such pages are mapped in the kernel until they are freed, so we can use the mapping to find the page to free it. this allows us to remove splvm() protection in several places. The sum of all these changes improves write throughput on my decstation 5000/200 to within 1% of the rate of NetBSD 1.5 and reduces the elapsed time for "make release" of a NetBSD 1.5 source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
uobj = &ovp->v_uobj;
simple_lock(&uobj->vmobjlock);
a whole bunch of changes to improve performance and robustness under load: - remove special treatment of pager_map mappings in pmaps. this is required now, since I've removed the globals that expose the address range. pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's no longer any need to special-case it. - eliminate struct uvm_vnode by moving its fields into struct vnode. - rewrite the pageout path. the pager is now responsible for handling the high-level requests instead of only getting control after a bunch of work has already been done on its behalf. this will allow us to UBCify LFS, which needs tighter control over its pages than other filesystems do. writing a page to disk no longer requires making it read-only, which allows us to write wired pages without causing all kinds of havoc. - use a new PG_PAGEOUT flag to indicate that a page should be freed on behalf of the pagedaemon when it's unlocked. this flag is very similar to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the pageout fails due to eg. an indirect-block buffer being locked. this allows us to remove the "version" field from struct vm_page, and together with shrinking "loan_count" from 32 bits to 16, struct vm_page is now 4 bytes smaller. - no longer use PG_RELEASED for swap-backed pages. if the page is busy because it's being paged out, we can't release the swap slot to be reallocated until that write is complete, but unlike with vnodes we don't keep a count of in-progress writes so there's no good way to know when the write is done. instead, when we need to free a busy swap-backed page, just sleep until we can get it busy ourselves. - implement a fast-path for extending writes which allows us to avoid zeroing new pages. this substantially reduces cpu usage. - encapsulate the data used by the genfs code in a struct genfs_node, which must be the first element of the filesystem-specific vnode data for filesystems which use genfs_{get,put}pages(). - eliminate many of the UVM pagerops, since they aren't needed anymore now that the pager "put" operation is a higher-level operation. - enhance the genfs code to allow NFS to use the genfs_{get,put}pages instead of a modified copy. - clean up struct vnode by removing all the fields that used to be used by the vfs_cluster.c code (which we don't use anymore with UBC). - remove kmem_object and mb_object since they were useless. instead of allocating pages to these objects, we now just allocate pages with no object. such pages are mapped in the kernel until they are freed, so we can use the mapping to find the page to free it. this allows us to remove splvm() protection in several places. The sum of all these changes improves write throughput on my decstation 5000/200 to within 1% of the rate of NetBSD 1.5 and reduces the elapsed time for "make release" of a NetBSD 1.5 source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
error = (uobj->pgops->pgo_put)(uobj, trunc_page(length),
round_page(eoz), PGO_CLEANIT|PGO_DEACTIVATE|PGO_SYNCIO);
if (error) {
return error;
}
}
a whole bunch of changes to improve performance and robustness under load: - remove special treatment of pager_map mappings in pmaps. this is required now, since I've removed the globals that expose the address range. pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's no longer any need to special-case it. - eliminate struct uvm_vnode by moving its fields into struct vnode. - rewrite the pageout path. the pager is now responsible for handling the high-level requests instead of only getting control after a bunch of work has already been done on its behalf. this will allow us to UBCify LFS, which needs tighter control over its pages than other filesystems do. writing a page to disk no longer requires making it read-only, which allows us to write wired pages without causing all kinds of havoc. - use a new PG_PAGEOUT flag to indicate that a page should be freed on behalf of the pagedaemon when it's unlocked. this flag is very similar to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the pageout fails due to eg. an indirect-block buffer being locked. this allows us to remove the "version" field from struct vm_page, and together with shrinking "loan_count" from 32 bits to 16, struct vm_page is now 4 bytes smaller. - no longer use PG_RELEASED for swap-backed pages. if the page is busy because it's being paged out, we can't release the swap slot to be reallocated until that write is complete, but unlike with vnodes we don't keep a count of in-progress writes so there's no good way to know when the write is done. instead, when we need to free a busy swap-backed page, just sleep until we can get it busy ourselves. - implement a fast-path for extending writes which allows us to avoid zeroing new pages. this substantially reduces cpu usage. - encapsulate the data used by the genfs code in a struct genfs_node, which must be the first element of the filesystem-specific vnode data for filesystems which use genfs_{get,put}pages(). - eliminate many of the UVM pagerops, since they aren't needed anymore now that the pager "put" operation is a higher-level operation. - enhance the genfs code to allow NFS to use the genfs_{get,put}pages instead of a modified copy. - clean up struct vnode by removing all the fields that used to be used by the vfs_cluster.c code (which we don't use anymore with UBC). - remove kmem_object and mb_object since they were useless. instead of allocating pages to these objects, we now just allocate pages with no object. such pages are mapped in the kernel until they are freed, so we can use the mapping to find the page to free it. this allows us to remove splvm() protection in several places. The sum of all these changes improves write throughput on my decstation 5000/200 to within 1% of the rate of NetBSD 1.5 and reduces the elapsed time for "make release" of a NetBSD 1.5 source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
lockmgr(&gp->g_glock, LK_EXCLUSIVE, NULL);
if (DOINGSOFTDEP(ovp)) {
if (length > 0) {
/*
* If a file is only partially truncated, then
* we have to clean up the data structures
* describing the allocation past the truncation
* point. Finding and deallocating those structures
* is a lot of work. Since partial truncation occurs
* rarely, we solve the problem by syncing the file
* so that it will have no data structures left.
*/
if ((error = VOP_FSYNC(ovp, ap->a_cred, FSYNC_WAIT,
0, 0, ap->a_p)) != 0) {
a whole bunch of changes to improve performance and robustness under load: - remove special treatment of pager_map mappings in pmaps. this is required now, since I've removed the globals that expose the address range. pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's no longer any need to special-case it. - eliminate struct uvm_vnode by moving its fields into struct vnode. - rewrite the pageout path. the pager is now responsible for handling the high-level requests instead of only getting control after a bunch of work has already been done on its behalf. this will allow us to UBCify LFS, which needs tighter control over its pages than other filesystems do. writing a page to disk no longer requires making it read-only, which allows us to write wired pages without causing all kinds of havoc. - use a new PG_PAGEOUT flag to indicate that a page should be freed on behalf of the pagedaemon when it's unlocked. this flag is very similar to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the pageout fails due to eg. an indirect-block buffer being locked. this allows us to remove the "version" field from struct vm_page, and together with shrinking "loan_count" from 32 bits to 16, struct vm_page is now 4 bytes smaller. - no longer use PG_RELEASED for swap-backed pages. if the page is busy because it's being paged out, we can't release the swap slot to be reallocated until that write is complete, but unlike with vnodes we don't keep a count of in-progress writes so there's no good way to know when the write is done. instead, when we need to free a busy swap-backed page, just sleep until we can get it busy ourselves. - implement a fast-path for extending writes which allows us to avoid zeroing new pages. this substantially reduces cpu usage. - encapsulate the data used by the genfs code in a struct genfs_node, which must be the first element of the filesystem-specific vnode data for filesystems which use genfs_{get,put}pages(). - eliminate many of the UVM pagerops, since they aren't needed anymore now that the pager "put" operation is a higher-level operation. - enhance the genfs code to allow NFS to use the genfs_{get,put}pages instead of a modified copy. - clean up struct vnode by removing all the fields that used to be used by the vfs_cluster.c code (which we don't use anymore with UBC). - remove kmem_object and mb_object since they were useless. instead of allocating pages to these objects, we now just allocate pages with no object. such pages are mapped in the kernel until they are freed, so we can use the mapping to find the page to free it. this allows us to remove splvm() protection in several places. The sum of all these changes improves write throughput on my decstation 5000/200 to within 1% of the rate of NetBSD 1.5 and reduces the elapsed time for "make release" of a NetBSD 1.5 source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
lockmgr(&gp->g_glock, LK_RELEASE, NULL);
return (error);
}
} else {
uvm_vnp_setsize(ovp, length);
#ifdef QUOTA
(void) chkdq(oip, -oip->i_ffs_blocks, NOCRED, 0);
#endif
softdep_setup_freeblocks(oip, length);
(void) vinvalbuf(ovp, 0, ap->a_cred, ap->a_p, 0, 0);
a whole bunch of changes to improve performance and robustness under load: - remove special treatment of pager_map mappings in pmaps. this is required now, since I've removed the globals that expose the address range. pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's no longer any need to special-case it. - eliminate struct uvm_vnode by moving its fields into struct vnode. - rewrite the pageout path. the pager is now responsible for handling the high-level requests instead of only getting control after a bunch of work has already been done on its behalf. this will allow us to UBCify LFS, which needs tighter control over its pages than other filesystems do. writing a page to disk no longer requires making it read-only, which allows us to write wired pages without causing all kinds of havoc. - use a new PG_PAGEOUT flag to indicate that a page should be freed on behalf of the pagedaemon when it's unlocked. this flag is very similar to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the pageout fails due to eg. an indirect-block buffer being locked. this allows us to remove the "version" field from struct vm_page, and together with shrinking "loan_count" from 32 bits to 16, struct vm_page is now 4 bytes smaller. - no longer use PG_RELEASED for swap-backed pages. if the page is busy because it's being paged out, we can't release the swap slot to be reallocated until that write is complete, but unlike with vnodes we don't keep a count of in-progress writes so there's no good way to know when the write is done. instead, when we need to free a busy swap-backed page, just sleep until we can get it busy ourselves. - implement a fast-path for extending writes which allows us to avoid zeroing new pages. this substantially reduces cpu usage. - encapsulate the data used by the genfs code in a struct genfs_node, which must be the first element of the filesystem-specific vnode data for filesystems which use genfs_{get,put}pages(). - eliminate many of the UVM pagerops, since they aren't needed anymore now that the pager "put" operation is a higher-level operation. - enhance the genfs code to allow NFS to use the genfs_{get,put}pages instead of a modified copy. - clean up struct vnode by removing all the fields that used to be used by the vfs_cluster.c code (which we don't use anymore with UBC). - remove kmem_object and mb_object since they were useless. instead of allocating pages to these objects, we now just allocate pages with no object. such pages are mapped in the kernel until they are freed, so we can use the mapping to find the page to free it. this allows us to remove splvm() protection in several places. The sum of all these changes improves write throughput on my decstation 5000/200 to within 1% of the rate of NetBSD 1.5 and reduces the elapsed time for "make release" of a NetBSD 1.5 source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
lockmgr(&gp->g_glock, LK_RELEASE, NULL);
oip->i_flag |= IN_CHANGE | IN_UPDATE;
return (VOP_UPDATE(ovp, NULL, NULL, 0));
}
}
/*
* Reduce the size of the file.
*/
oip->i_ffs_size = length;
1998-03-01 05:20:01 +03:00
uvm_vnp_setsize(ovp, length);
/*
* Calculate index into inode's block list of
* last direct and indirect blocks (if any)
* which we want to keep. Lastblock is -1 when
* the file is truncated to 0.
*/
lastblock = lblkno(fs, length + fs->fs_bsize - 1) - 1;
lastiblock[SINGLE] = lastblock - NDADDR;
lastiblock[DOUBLE] = lastiblock[SINGLE] - NINDIR(fs);
lastiblock[TRIPLE] = lastiblock[DOUBLE] - NINDIR(fs) * NINDIR(fs);
nblocks = btodb(fs->fs_bsize);
/*
* Update file and block pointers on disk before we start freeing
* blocks. If we crash before free'ing blocks below, the blocks
* will be returned to the free list. lastiblock values are also
* normalized to -1 for calls to ffs_indirtrunc below.
*/
memcpy((caddr_t)oldblks, (caddr_t)&oip->i_ffs_db[0], sizeof oldblks);
for (level = TRIPLE; level >= SINGLE; level--)
if (lastiblock[level] < 0) {
oip->i_ffs_ib[level] = 0;
lastiblock[level] = -1;
}
for (i = NDADDR - 1; i > lastblock; i--)
oip->i_ffs_db[i] = 0;
oip->i_flag |= IN_CHANGE | IN_UPDATE;
error = VOP_UPDATE(ovp, NULL, NULL, UPDATE_WAIT);
if (error && !allerror)
allerror = error;
/*
* Having written the new inode to disk, save its new configuration
* and put back the old block pointers long enough to process them.
* Note that we save the new block configuration so we can check it
* when we are done.
*/
memcpy((caddr_t)newblks, (caddr_t)&oip->i_ffs_db[0], sizeof newblks);
memcpy((caddr_t)&oip->i_ffs_db[0], (caddr_t)oldblks, sizeof oldblks);
oip->i_ffs_size = osize;
error = vtruncbuf(ovp, lastblock + 1, 0, 0);
if (error && !allerror)
allerror = error;
/*
* Indirect blocks first.
*/
indir_lbn[SINGLE] = -NDADDR;
indir_lbn[DOUBLE] = indir_lbn[SINGLE] - NINDIR(fs) - 1;
indir_lbn[TRIPLE] = indir_lbn[DOUBLE] - NINDIR(fs) * NINDIR(fs) - 1;
for (level = TRIPLE; level >= SINGLE; level--) {
bn = ufs_rw32(oip->i_ffs_ib[level], UFS_FSNEEDSWAP(fs));
if (bn != 0) {
error = ffs_indirtrunc(oip, indir_lbn[level],
fsbtodb(fs, bn), lastiblock[level], level, &count);
if (error)
allerror = error;
blocksreleased += count;
if (lastiblock[level] < 0) {
oip->i_ffs_ib[level] = 0;
ffs_blkfree(oip, bn, fs->fs_bsize);
blocksreleased += nblocks;
}
}
if (lastiblock[level] >= 0)
goto done;
}
/*
* All whole direct blocks or frags.
*/
for (i = NDADDR - 1; i > lastblock; i--) {
2000-03-30 16:41:09 +04:00
long bsize;
bn = ufs_rw32(oip->i_ffs_db[i], UFS_FSNEEDSWAP(fs));
if (bn == 0)
continue;
oip->i_ffs_db[i] = 0;
bsize = blksize(fs, oip, i);
ffs_blkfree(oip, bn, bsize);
blocksreleased += btodb(bsize);
}
if (lastblock < 0)
goto done;
/*
* Finally, look for a change in size of the
* last direct block; release any frags.
*/
bn = ufs_rw32(oip->i_ffs_db[lastblock], UFS_FSNEEDSWAP(fs));
if (bn != 0) {
long oldspace, newspace;
/*
* Calculate amount of space we're giving
* back as old block size minus new block size.
*/
oldspace = blksize(fs, oip, lastblock);
oip->i_ffs_size = length;
newspace = blksize(fs, oip, lastblock);
if (newspace == 0)
panic("itrunc: newspace");
if (oldspace - newspace > 0) {
/*
* Block number of space to be free'd is
* the old block # plus the number of frags
* required for the storage we're keeping.
*/
bn += numfrags(fs, newspace);
ffs_blkfree(oip, bn, oldspace - newspace);
blocksreleased += btodb(oldspace - newspace);
}
}
done:
#ifdef DIAGNOSTIC
for (level = SINGLE; level <= TRIPLE; level++)
if (newblks[NDADDR + level] != oip->i_ffs_ib[level])
panic("itrunc1");
for (i = 0; i < NDADDR; i++)
if (newblks[i] != oip->i_ffs_db[i])
panic("itrunc2");
if (length == 0 &&
(!LIST_EMPTY(&ovp->v_cleanblkhd) || !LIST_EMPTY(&ovp->v_dirtyblkhd)))
panic("itrunc3");
#endif /* DIAGNOSTIC */
/*
* Put back the real size.
*/
oip->i_ffs_size = length;
oip->i_ffs_blocks -= blocksreleased;
if (oip->i_ffs_blocks < 0) /* sanity */
oip->i_ffs_blocks = 0;
a whole bunch of changes to improve performance and robustness under load: - remove special treatment of pager_map mappings in pmaps. this is required now, since I've removed the globals that expose the address range. pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's no longer any need to special-case it. - eliminate struct uvm_vnode by moving its fields into struct vnode. - rewrite the pageout path. the pager is now responsible for handling the high-level requests instead of only getting control after a bunch of work has already been done on its behalf. this will allow us to UBCify LFS, which needs tighter control over its pages than other filesystems do. writing a page to disk no longer requires making it read-only, which allows us to write wired pages without causing all kinds of havoc. - use a new PG_PAGEOUT flag to indicate that a page should be freed on behalf of the pagedaemon when it's unlocked. this flag is very similar to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the pageout fails due to eg. an indirect-block buffer being locked. this allows us to remove the "version" field from struct vm_page, and together with shrinking "loan_count" from 32 bits to 16, struct vm_page is now 4 bytes smaller. - no longer use PG_RELEASED for swap-backed pages. if the page is busy because it's being paged out, we can't release the swap slot to be reallocated until that write is complete, but unlike with vnodes we don't keep a count of in-progress writes so there's no good way to know when the write is done. instead, when we need to free a busy swap-backed page, just sleep until we can get it busy ourselves. - implement a fast-path for extending writes which allows us to avoid zeroing new pages. this substantially reduces cpu usage. - encapsulate the data used by the genfs code in a struct genfs_node, which must be the first element of the filesystem-specific vnode data for filesystems which use genfs_{get,put}pages(). - eliminate many of the UVM pagerops, since they aren't needed anymore now that the pager "put" operation is a higher-level operation. - enhance the genfs code to allow NFS to use the genfs_{get,put}pages instead of a modified copy. - clean up struct vnode by removing all the fields that used to be used by the vfs_cluster.c code (which we don't use anymore with UBC). - remove kmem_object and mb_object since they were useless. instead of allocating pages to these objects, we now just allocate pages with no object. such pages are mapped in the kernel until they are freed, so we can use the mapping to find the page to free it. this allows us to remove splvm() protection in several places. The sum of all these changes improves write throughput on my decstation 5000/200 to within 1% of the rate of NetBSD 1.5 and reduces the elapsed time for "make release" of a NetBSD 1.5 source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
lockmgr(&gp->g_glock, LK_RELEASE, NULL);
oip->i_flag |= IN_CHANGE;
#ifdef QUOTA
(void) chkdq(oip, -blocksreleased, NOCRED, 0);
#endif
KASSERT(ovp->v_type != VREG || ovp->v_size == oip->i_ffs_size);
return (allerror);
}
/*
* Release blocks associated with the inode ip and stored in the indirect
* block bn. Blocks are free'd in LIFO order up to (but not including)
* lastbn. If level is greater than SINGLE, the block is an indirect block
* and recursive calls to indirtrunc must be used to cleanse other indirect
* blocks.
*
* NB: triple indirect blocks are untested.
*/
static int
ffs_indirtrunc(ip, lbn, dbn, lastbn, level, countp)
2000-03-30 16:41:09 +04:00
struct inode *ip;
1998-03-01 05:20:01 +03:00
ufs_daddr_t lbn, lastbn;
ufs_daddr_t dbn;
int level;
long *countp;
{
2000-03-30 16:41:09 +04:00
int i;
struct buf *bp;
2000-03-30 16:41:09 +04:00
struct fs *fs = ip->i_fs;
ufs_daddr_t *bap;
struct vnode *vp;
1998-03-01 05:20:01 +03:00
ufs_daddr_t *copy = NULL, nb, nlbn, last;
long blkcount, factor;
int nblocks, blocksreleased = 0;
int error = 0, allerror = 0;
/*
* Calculate index in current block of last
* block to be kept. -1 indicates the entire
* block so we need not calculate the index.
*/
factor = 1;
for (i = SINGLE; i < level; i++)
factor *= NINDIR(fs);
last = lastbn;
if (lastbn > 0)
last /= factor;
nblocks = btodb(fs->fs_bsize);
/*
* Get buffer of block pointers, zero those entries corresponding
* to blocks to be free'd, and update on disk copy first. Since
* double(triple) indirect before single(double) indirect, calls
* to bmap on these blocks will fail. However, we already have
* the on disk address, so we have to set the b_blkno field
* explicitly instead of letting bread do everything for us.
*/
vp = ITOV(ip);
bp = getblk(vp, lbn, (int)fs->fs_bsize, 0, 0);
if (bp->b_flags & (B_DONE | B_DELWRI)) {
/* Braces must be here in case trace evaluates to nothing. */
trace(TR_BREADHIT, pack(vp, fs->fs_bsize), lbn);
} else {
trace(TR_BREADMISS, pack(vp, fs->fs_bsize), lbn);
curproc->p_stats->p_ru.ru_inblock++; /* pay for read */
bp->b_flags |= B_READ;
if (bp->b_bcount > bp->b_bufsize)
panic("ffs_indirtrunc: bad buffer size");
bp->b_blkno = dbn;
VOP_STRATEGY(bp);
error = biowait(bp);
}
if (error) {
brelse(bp);
*countp = 0;
return (error);
}
1998-03-01 05:20:01 +03:00
bap = (ufs_daddr_t *)bp->b_data;
if (lastbn >= 0) {
copy = (ufs_daddr_t *) malloc(fs->fs_bsize, M_TEMP, M_WAITOK);
memcpy((caddr_t)copy, (caddr_t)bap, (u_int)fs->fs_bsize);
memset((caddr_t)&bap[last + 1], 0,
1998-03-01 05:20:01 +03:00
(u_int)(NINDIR(fs) - (last + 1)) * sizeof (ufs_daddr_t));
error = bwrite(bp);
if (error)
allerror = error;
bap = copy;
}
/*
* Recursively free totally unused blocks.
*/
for (i = NINDIR(fs) - 1, nlbn = lbn + 1 - i * factor; i > last;
i--, nlbn += factor) {
nb = ufs_rw32(bap[i], UFS_FSNEEDSWAP(fs));
if (nb == 0)
continue;
if (level > SINGLE) {
1996-02-10 01:22:18 +03:00
error = ffs_indirtrunc(ip, nlbn, fsbtodb(fs, nb),
1998-03-01 05:20:01 +03:00
(ufs_daddr_t)-1, level - 1,
1996-02-10 01:22:18 +03:00
&blkcount);
if (error)
allerror = error;
blocksreleased += blkcount;
}
ffs_blkfree(ip, nb, fs->fs_bsize);
blocksreleased += nblocks;
}
/*
* Recursively free last partial block.
*/
if (level > SINGLE && lastbn >= 0) {
last = lastbn % factor;
nb = ufs_rw32(bap[i], UFS_FSNEEDSWAP(fs));
if (nb != 0) {
1996-02-10 01:22:18 +03:00
error = ffs_indirtrunc(ip, nlbn, fsbtodb(fs, nb),
last, level - 1, &blkcount);
if (error)
allerror = error;
blocksreleased += blkcount;
}
}
if (copy != NULL) {
FREE(copy, M_TEMP);
} else {
bp->b_flags |= B_INVAL;
brelse(bp);
}
*countp = blocksreleased;
return (allerror);
}