2005-03-29 06:41:05 +04:00
|
|
|
/* $NetBSD: ffs_vfsops.c,v 1.163 2005/03/29 02:41:06 thorpej Exp $ */
|
1994-06-29 10:39:25 +04:00
|
|
|
|
1994-06-08 15:41:58 +04:00
|
|
|
/*
|
|
|
|
* Copyright (c) 1989, 1991, 1993, 1994
|
|
|
|
* The Regents of the University of California. All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
2003-08-07 20:26:28 +04:00
|
|
|
* 3. Neither the name of the University nor the names of its contributors
|
1994-06-08 15:41:58 +04:00
|
|
|
* may be used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
1998-03-01 05:20:01 +03:00
|
|
|
* @(#)ffs_vfsops.c 8.31 (Berkeley) 5/20/95
|
1994-06-08 15:41:58 +04:00
|
|
|
*/
|
|
|
|
|
2001-10-30 04:11:53 +03:00
|
|
|
#include <sys/cdefs.h>
|
2005-03-29 06:41:05 +04:00
|
|
|
__KERNEL_RCSID(0, "$NetBSD: ffs_vfsops.c,v 1.163 2005/03/29 02:41:06 thorpej Exp $");
|
2001-10-30 04:11:53 +03:00
|
|
|
|
2001-05-30 15:57:16 +04:00
|
|
|
#if defined(_KERNEL_OPT)
|
1998-11-12 22:51:10 +03:00
|
|
|
#include "opt_ffs.h"
|
1998-06-08 08:27:50 +04:00
|
|
|
#include "opt_quota.h"
|
2000-06-16 04:30:15 +04:00
|
|
|
#include "opt_softdep.h"
|
1998-06-09 11:46:31 +04:00
|
|
|
#endif
|
1998-06-08 08:27:50 +04:00
|
|
|
|
1994-06-08 15:41:58 +04:00
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/systm.h>
|
|
|
|
#include <sys/namei.h>
|
|
|
|
#include <sys/proc.h>
|
|
|
|
#include <sys/kernel.h>
|
|
|
|
#include <sys/vnode.h>
|
|
|
|
#include <sys/socket.h>
|
|
|
|
#include <sys/mount.h>
|
|
|
|
#include <sys/buf.h>
|
1997-01-31 06:05:31 +03:00
|
|
|
#include <sys/device.h>
|
1994-06-08 15:41:58 +04:00
|
|
|
#include <sys/mbuf.h>
|
|
|
|
#include <sys/file.h>
|
|
|
|
#include <sys/disklabel.h>
|
|
|
|
#include <sys/ioctl.h>
|
|
|
|
#include <sys/errno.h>
|
|
|
|
#include <sys/malloc.h>
|
1998-09-01 07:11:08 +04:00
|
|
|
#include <sys/pool.h>
|
1997-07-08 03:37:36 +04:00
|
|
|
#include <sys/lock.h>
|
1998-03-01 05:20:01 +03:00
|
|
|
#include <sys/sysctl.h>
|
2002-09-06 17:18:43 +04:00
|
|
|
#include <sys/conf.h>
|
1994-06-08 15:41:58 +04:00
|
|
|
|
|
|
|
#include <miscfs/specfs/specdev.h>
|
|
|
|
|
|
|
|
#include <ufs/ufs/quota.h>
|
|
|
|
#include <ufs/ufs/ufsmount.h>
|
|
|
|
#include <ufs/ufs/inode.h>
|
1997-06-11 14:09:37 +04:00
|
|
|
#include <ufs/ufs/dir.h>
|
1994-06-08 15:41:58 +04:00
|
|
|
#include <ufs/ufs/ufs_extern.h>
|
1998-03-18 18:57:26 +03:00
|
|
|
#include <ufs/ufs/ufs_bswap.h>
|
1994-06-08 15:41:58 +04:00
|
|
|
|
|
|
|
#include <ufs/ffs/fs.h>
|
|
|
|
#include <ufs/ffs/ffs_extern.h>
|
|
|
|
|
2000-03-16 21:20:06 +03:00
|
|
|
/* how many times ffs_init() was called */
|
|
|
|
int ffs_initcount = 0;
|
|
|
|
|
1997-07-08 03:37:36 +04:00
|
|
|
extern struct lock ufs_hashlock;
|
|
|
|
|
2002-12-01 03:12:06 +03:00
|
|
|
extern const struct vnodeopv_desc ffs_vnodeop_opv_desc;
|
|
|
|
extern const struct vnodeopv_desc ffs_specop_opv_desc;
|
|
|
|
extern const struct vnodeopv_desc ffs_fifoop_opv_desc;
|
1998-02-18 10:05:47 +03:00
|
|
|
|
2001-01-22 15:17:35 +03:00
|
|
|
const struct vnodeopv_desc * const ffs_vnodeopv_descs[] = {
|
1998-02-18 10:05:47 +03:00
|
|
|
&ffs_vnodeop_opv_desc,
|
|
|
|
&ffs_specop_opv_desc,
|
|
|
|
&ffs_fifoop_opv_desc,
|
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
|
1995-11-12 01:00:15 +03:00
|
|
|
struct vfsops ffs_vfsops = {
|
|
|
|
MOUNT_FFS,
|
1994-06-08 15:41:58 +04:00
|
|
|
ffs_mount,
|
|
|
|
ufs_start,
|
|
|
|
ffs_unmount,
|
|
|
|
ufs_root,
|
|
|
|
ufs_quotactl,
|
2004-04-21 05:05:31 +04:00
|
|
|
ffs_statvfs,
|
1994-06-08 15:41:58 +04:00
|
|
|
ffs_sync,
|
|
|
|
ffs_vget,
|
|
|
|
ffs_fhtovp,
|
|
|
|
ffs_vptofh,
|
|
|
|
ffs_init,
|
2001-09-15 20:12:54 +04:00
|
|
|
ffs_reinit,
|
2000-03-16 21:20:06 +03:00
|
|
|
ffs_done,
|
Dynamic sysctl.
Gone are the old kern_sysctl(), cpu_sysctl(), hw_sysctl(),
vfs_sysctl(), etc, routines, along with sysctl_int() et al. Now all
nodes are registered with the tree, and nodes can be added (or
removed) easily, and I/O to and from the tree is handled generically.
Since the nodes are registered with the tree, the mapping from name to
number (and back again) can now be discovered, instead of having to be
hard coded. Adding new nodes to the tree is likewise much simpler --
the new infrastructure handles almost all the work for simple types,
and just about anything else can be done with a small helper function.
All existing nodes are where they were before (numerically speaking),
so all existing consumers of sysctl information should notice no
difference.
PS - I'm sorry, but there's a distinct lack of documentation at the
moment. I'm working on sysctl(3/8/9) right now, and I promise to
watch out for buses.
2003-12-04 22:38:21 +03:00
|
|
|
NULL,
|
1997-01-31 06:05:31 +03:00
|
|
|
ffs_mountroot,
|
1999-02-27 02:44:43 +03:00
|
|
|
ufs_check_export,
|
2004-05-25 18:54:55 +04:00
|
|
|
ffs_snapshot,
|
2005-01-02 19:08:28 +03:00
|
|
|
vfs_stdextattrctl,
|
1998-02-18 10:05:47 +03:00
|
|
|
ffs_vnodeopv_descs,
|
1994-06-08 15:41:58 +04:00
|
|
|
};
|
2005-03-29 06:41:05 +04:00
|
|
|
VFS_ATTACH(ffs_vfsops);
|
1994-06-08 15:41:58 +04:00
|
|
|
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
struct genfs_ops ffs_genfsops = {
|
|
|
|
ffs_gop_size,
|
Add code to UBCify LFS. This is still behind "#ifdef LFS_UBC" for now
(there are still some details to work out) but expect that to go
away soon. To support these basic changes (creation of lfs_putpages,
lfs_gop_write, mods to lfs_balloc) several other changes were made, to
wit:
* Create a writer daemon kernel thread whose purpose is to handle page
writes for the pagedaemon, but which also takes over some of the
functions of lfs_check(). This thread is started the first time an
LFS is mounted.
* Add a "flags" parameter to GOP_SIZE. Current values are
GOP_SIZE_READ, meaning that the call should return the size of the
in-core version of the file, and GOP_SIZE_WRITE, meaning that it
should return the on-disk size. One of GOP_SIZE_READ or
GOP_SIZE_WRITE must be specified.
* Instead of using malloc(...M_WAITOK) for everything, reserve enough
resources to get by and use malloc(...M_NOWAIT), using the reserves if
necessary. Use the pool subsystem for structures small enough that
this is feasible. This also obsoletes LFS_THROTTLE.
And a few that are not strictly necessary:
* Moves the LFS inode extensions off onto a separately allocated
structure; getting closer to LFS as an LKM. "Welcome to 1.6O."
* Unified GOP_ALLOC between FFS and LFS.
* Update LFS copyright headers to correct values.
* Actually cast to unsigned in lfs_shellsort, like the comment says.
* Keep track of which segments were empty before the previous
checkpoint; any segments that pass two checkpoints both dirty and
empty can be summarily cleaned. Do this. Right now lfs_segclean
still works, but this should be turned into an effectless
compatibility syscall.
2003-02-18 02:48:08 +03:00
|
|
|
ufs_gop_alloc,
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
genfs_gop_write,
|
|
|
|
};
|
|
|
|
|
2004-04-25 20:42:40 +04:00
|
|
|
POOL_INIT(ffs_inode_pool, sizeof(struct inode), 0, 0, 0, "ffsinopl",
|
|
|
|
&pool_allocator_nointr);
|
|
|
|
POOL_INIT(ffs_dinode1_pool, sizeof(struct ufs1_dinode), 0, 0, 0, "dino1pl",
|
|
|
|
&pool_allocator_nointr);
|
|
|
|
POOL_INIT(ffs_dinode2_pool, sizeof(struct ufs2_dinode), 0, 0, 0, "dino2pl",
|
|
|
|
&pool_allocator_nointr);
|
2003-04-02 14:39:19 +04:00
|
|
|
|
2004-04-26 05:40:40 +04:00
|
|
|
static void ffs_oldfscompat_read(struct fs *, struct ufsmount *, daddr_t);
|
2003-04-02 14:39:19 +04:00
|
|
|
static void ffs_oldfscompat_write(struct fs *, struct ufsmount *);
|
1998-09-01 07:11:08 +04:00
|
|
|
|
1994-06-08 15:41:58 +04:00
|
|
|
/*
|
1998-03-01 05:20:01 +03:00
|
|
|
* Called by main() when ffs is going to be mounted as root.
|
1994-06-08 15:41:58 +04:00
|
|
|
*/
|
|
|
|
|
1996-02-10 01:22:18 +03:00
|
|
|
int
|
1994-06-08 15:41:58 +04:00
|
|
|
ffs_mountroot()
|
|
|
|
{
|
1998-03-01 05:20:01 +03:00
|
|
|
struct fs *fs;
|
|
|
|
struct mount *mp;
|
2003-06-30 02:28:00 +04:00
|
|
|
struct proc *p = curproc; /* XXX */
|
1994-06-08 15:41:58 +04:00
|
|
|
struct ufsmount *ump;
|
|
|
|
int error;
|
1997-01-31 06:05:31 +03:00
|
|
|
|
|
|
|
if (root_device->dv_class != DV_DISK)
|
|
|
|
return (ENODEV);
|
|
|
|
|
1999-07-17 05:08:28 +04:00
|
|
|
if ((error = vfs_rootmountalloc(MOUNT_FFS, "root_device", &mp))) {
|
|
|
|
vrele(rootvp);
|
1994-06-08 15:41:58 +04:00
|
|
|
return (error);
|
1999-07-17 05:08:28 +04:00
|
|
|
}
|
2003-06-30 02:28:00 +04:00
|
|
|
if ((error = ffs_mountfs(rootvp, mp, p)) != 0) {
|
1998-03-01 05:20:01 +03:00
|
|
|
mp->mnt_op->vfs_refcount--;
|
|
|
|
vfs_unbusy(mp);
|
1994-06-08 15:41:58 +04:00
|
|
|
free(mp, M_MOUNT);
|
|
|
|
return (error);
|
|
|
|
}
|
1998-03-01 05:20:01 +03:00
|
|
|
simple_lock(&mountlist_slock);
|
1995-01-18 09:19:49 +03:00
|
|
|
CIRCLEQ_INSERT_TAIL(&mountlist, mp, mnt_list);
|
1998-03-01 05:20:01 +03:00
|
|
|
simple_unlock(&mountlist_slock);
|
1994-06-08 15:41:58 +04:00
|
|
|
ump = VFSTOUFS(mp);
|
|
|
|
fs = ump->um_fs;
|
1998-08-10 00:15:38 +04:00
|
|
|
memset(fs->fs_fsmnt, 0, sizeof(fs->fs_fsmnt));
|
1998-03-01 05:20:01 +03:00
|
|
|
(void)copystr(mp->mnt_stat.f_mntonname, fs->fs_fsmnt, MNAMELEN - 1, 0);
|
2004-04-21 05:05:31 +04:00
|
|
|
(void)ffs_statvfs(mp, &mp->mnt_stat, p);
|
1998-03-01 05:20:01 +03:00
|
|
|
vfs_unbusy(mp);
|
2004-07-05 11:28:45 +04:00
|
|
|
setrootfstime((time_t)fs->fs_time);
|
1994-06-08 15:41:58 +04:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* VFS Operations.
|
|
|
|
*
|
|
|
|
* mount system call
|
|
|
|
*/
|
|
|
|
int
|
2003-06-30 02:28:00 +04:00
|
|
|
ffs_mount(mp, path, data, ndp, p)
|
2000-03-30 16:41:09 +04:00
|
|
|
struct mount *mp;
|
1996-12-22 13:10:12 +03:00
|
|
|
const char *path;
|
|
|
|
void *data;
|
1994-06-08 15:41:58 +04:00
|
|
|
struct nameidata *ndp;
|
2003-06-30 02:28:00 +04:00
|
|
|
struct proc *p;
|
1994-06-08 15:41:58 +04:00
|
|
|
{
|
2002-11-24 14:09:13 +03:00
|
|
|
struct vnode *devvp = NULL;
|
1994-06-08 15:41:58 +04:00
|
|
|
struct ufs_args args;
|
1996-02-10 01:22:18 +03:00
|
|
|
struct ufsmount *ump = NULL;
|
2000-03-30 16:41:09 +04:00
|
|
|
struct fs *fs;
|
2002-04-01 00:53:25 +04:00
|
|
|
int error, flags, update;
|
1994-12-14 16:03:35 +03:00
|
|
|
mode_t accessmode;
|
1994-06-08 15:41:58 +04:00
|
|
|
|
2002-09-21 22:10:34 +04:00
|
|
|
if (mp->mnt_flag & MNT_GETARGS) {
|
|
|
|
ump = VFSTOUFS(mp);
|
|
|
|
if (ump == NULL)
|
|
|
|
return EIO;
|
|
|
|
args.fspec = NULL;
|
|
|
|
vfs_showexport(mp, &args.export, &ump->um_export);
|
|
|
|
return copyout(&args, data, sizeof(args));
|
|
|
|
}
|
2003-03-22 02:11:19 +03:00
|
|
|
error = copyin(data, &args, sizeof (struct ufs_args));
|
1996-02-10 01:22:18 +03:00
|
|
|
if (error)
|
1994-06-08 15:41:58 +04:00
|
|
|
return (error);
|
2000-06-16 04:30:15 +04:00
|
|
|
|
|
|
|
#if !defined(SOFTDEP)
|
2000-06-16 09:45:14 +04:00
|
|
|
mp->mnt_flag &= ~MNT_SOFTDEP;
|
2000-06-16 04:30:15 +04:00
|
|
|
#endif
|
|
|
|
|
2002-04-01 00:53:25 +04:00
|
|
|
update = mp->mnt_flag & MNT_UPDATE;
|
|
|
|
|
|
|
|
/* Check arguments */
|
2002-04-01 11:51:58 +04:00
|
|
|
if (args.fspec != NULL) {
|
2002-04-01 00:53:25 +04:00
|
|
|
/*
|
|
|
|
* Look up the name and verify that it's sane.
|
|
|
|
*/
|
2003-06-30 02:28:00 +04:00
|
|
|
NDINIT(ndp, LOOKUP, FOLLOW, UIO_USERSPACE, args.fspec, p);
|
2002-04-01 00:53:25 +04:00
|
|
|
if ((error = namei(ndp)) != 0)
|
|
|
|
return (error);
|
|
|
|
devvp = ndp->ni_vp;
|
|
|
|
|
|
|
|
if (!update) {
|
|
|
|
/*
|
|
|
|
* Be sure this is a valid block device
|
|
|
|
*/
|
|
|
|
if (devvp->v_type != VBLK)
|
|
|
|
error = ENOTBLK;
|
2002-09-06 17:18:43 +04:00
|
|
|
else if (bdevsw_lookup(devvp->v_rdev) == NULL)
|
2002-04-01 00:53:25 +04:00
|
|
|
error = ENXIO;
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* Be sure we're still naming the same device
|
|
|
|
* used for our initial mount
|
|
|
|
*/
|
2005-01-11 03:19:36 +03:00
|
|
|
ump = VFSTOUFS(mp);
|
2002-04-01 00:53:25 +04:00
|
|
|
if (devvp != ump->um_devvp)
|
|
|
|
error = EINVAL;
|
|
|
|
}
|
2005-01-11 03:19:36 +03:00
|
|
|
} else {
|
|
|
|
if (!update) {
|
|
|
|
/* New mounts must have a filename for the device */
|
|
|
|
return (EINVAL);
|
|
|
|
} else {
|
|
|
|
/* Use the extant mount */
|
|
|
|
ump = VFSTOUFS(mp);
|
|
|
|
devvp = ump->um_devvp;
|
|
|
|
vref(devvp);
|
|
|
|
}
|
2002-04-01 00:53:25 +04:00
|
|
|
}
|
|
|
|
|
1994-06-08 15:41:58 +04:00
|
|
|
/*
|
2002-04-01 00:53:25 +04:00
|
|
|
* If mount by non-root, then verify that user has necessary
|
|
|
|
* permissions on the device.
|
1994-06-08 15:41:58 +04:00
|
|
|
*/
|
2002-04-01 00:53:25 +04:00
|
|
|
if (error == 0 && p->p_ucred->cr_uid != 0) {
|
|
|
|
accessmode = VREAD;
|
2002-04-01 05:52:44 +04:00
|
|
|
if (update ?
|
2003-10-14 18:02:56 +04:00
|
|
|
(mp->mnt_iflag & IMNT_WANTRDWR) != 0 :
|
2002-04-01 05:52:44 +04:00
|
|
|
(mp->mnt_flag & MNT_RDONLY) == 0)
|
2002-04-01 00:53:25 +04:00
|
|
|
accessmode |= VWRITE;
|
|
|
|
vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
|
2003-06-30 02:28:00 +04:00
|
|
|
error = VOP_ACCESS(devvp, accessmode, p->p_ucred, p);
|
2002-04-01 00:53:25 +04:00
|
|
|
VOP_UNLOCK(devvp, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (error) {
|
|
|
|
vrele(devvp);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!update) {
|
2005-01-09 06:11:48 +03:00
|
|
|
int flags;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Disallow multiple mounts of the same device.
|
|
|
|
* Disallow mounting of a device that is currently in use
|
|
|
|
* (except for root, which might share swap device for
|
|
|
|
* miniroot).
|
|
|
|
*/
|
|
|
|
error = vfs_mountedon(devvp);
|
|
|
|
if (error)
|
|
|
|
goto fail;
|
|
|
|
if (vcount(devvp) > 1 && devvp != rootvp) {
|
|
|
|
error = EBUSY;
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
if (mp->mnt_flag & MNT_RDONLY)
|
|
|
|
flags = FREAD;
|
|
|
|
else
|
|
|
|
flags = FREAD|FWRITE;
|
|
|
|
error = VOP_OPEN(devvp, flags, FSCRED, p);
|
|
|
|
if (error)
|
|
|
|
goto fail;
|
2003-06-30 02:28:00 +04:00
|
|
|
error = ffs_mountfs(devvp, mp, p);
|
2002-04-01 00:53:25 +04:00
|
|
|
if (error) {
|
2005-01-09 06:11:48 +03:00
|
|
|
vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
|
|
|
|
(void)VOP_CLOSE(devvp, flags, NOCRED, p);
|
|
|
|
VOP_UNLOCK(devvp, 0);
|
|
|
|
goto fail;
|
2002-04-01 00:53:25 +04:00
|
|
|
}
|
|
|
|
|
1994-06-08 15:41:58 +04:00
|
|
|
ump = VFSTOUFS(mp);
|
|
|
|
fs = ump->um_fs;
|
2002-04-01 00:53:25 +04:00
|
|
|
if ((mp->mnt_flag & (MNT_SOFTDEP | MNT_ASYNC)) ==
|
|
|
|
(MNT_SOFTDEP | MNT_ASYNC)) {
|
|
|
|
printf("%s fs uses soft updates, "
|
2002-04-01 05:52:44 +04:00
|
|
|
"ignoring async mode\n",
|
|
|
|
fs->fs_fsmnt);
|
2002-04-01 00:53:25 +04:00
|
|
|
mp->mnt_flag &= ~MNT_ASYNC;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/*
|
2002-04-01 05:52:44 +04:00
|
|
|
* Update the mount.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The initial mount got a reference on this
|
|
|
|
* device, so drop the one obtained via
|
|
|
|
* namei(), above.
|
2002-04-01 00:53:25 +04:00
|
|
|
*/
|
2002-04-01 05:52:44 +04:00
|
|
|
vrele(devvp);
|
|
|
|
|
2005-01-11 03:19:36 +03:00
|
|
|
ump = VFSTOUFS(mp);
|
2002-04-01 00:53:25 +04:00
|
|
|
fs = ump->um_fs;
|
1994-06-08 15:41:58 +04:00
|
|
|
if (fs->fs_ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) {
|
2002-04-01 00:53:25 +04:00
|
|
|
/*
|
|
|
|
* Changing from r/w to r/o
|
|
|
|
*/
|
2003-11-05 13:18:38 +03:00
|
|
|
vn_start_write(NULL, &mp, V_WAIT);
|
1994-06-08 15:41:58 +04:00
|
|
|
flags = WRITECLOSE;
|
|
|
|
if (mp->mnt_flag & MNT_FORCE)
|
|
|
|
flags |= FORCECLOSE;
|
1999-11-15 21:49:07 +03:00
|
|
|
if (mp->mnt_flag & MNT_SOFTDEP)
|
2003-06-30 02:28:00 +04:00
|
|
|
error = softdep_flushfiles(mp, flags, p);
|
1999-11-15 21:49:07 +03:00
|
|
|
else
|
2003-06-30 02:28:00 +04:00
|
|
|
error = ffs_flushfiles(mp, flags, p);
|
2001-12-18 13:57:21 +03:00
|
|
|
if (fs->fs_pendingblocks != 0 ||
|
|
|
|
fs->fs_pendinginodes != 0) {
|
2003-04-02 14:39:19 +04:00
|
|
|
printf("%s: update error: blocks %" PRId64
|
|
|
|
" files %d\n",
|
2001-12-18 13:57:21 +03:00
|
|
|
fs->fs_fsmnt, fs->fs_pendingblocks,
|
|
|
|
fs->fs_pendinginodes);
|
|
|
|
fs->fs_pendingblocks = 0;
|
|
|
|
fs->fs_pendinginodes = 0;
|
|
|
|
}
|
1995-04-13 01:21:00 +04:00
|
|
|
if (error == 0 &&
|
|
|
|
ffs_cgupdate(ump, MNT_WAIT) == 0 &&
|
|
|
|
fs->fs_clean & FS_WASCLEAN) {
|
2000-06-16 02:35:37 +04:00
|
|
|
if (mp->mnt_flag & MNT_SOFTDEP)
|
|
|
|
fs->fs_flags &= ~FS_DOSOFTDEP;
|
1995-04-13 01:21:00 +04:00
|
|
|
fs->fs_clean = FS_ISCLEAN;
|
|
|
|
(void) ffs_sbupdate(ump, MNT_WAIT);
|
|
|
|
}
|
2003-10-15 15:28:59 +04:00
|
|
|
vn_finished_write(mp, 0);
|
1995-04-13 01:21:00 +04:00
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
fs->fs_ronly = 1;
|
2001-01-10 20:49:18 +03:00
|
|
|
fs->fs_fmod = 0;
|
1994-06-08 15:41:58 +04:00
|
|
|
}
|
2000-06-16 02:35:37 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Flush soft dependencies if disabling it via an update
|
|
|
|
* mount. This may leave some items to be processed,
|
|
|
|
* so don't do this yet XXX.
|
|
|
|
*/
|
|
|
|
if ((fs->fs_flags & FS_DOSOFTDEP) &&
|
|
|
|
!(mp->mnt_flag & MNT_SOFTDEP) && fs->fs_ronly == 0) {
|
|
|
|
#ifdef notyet
|
2003-11-05 13:18:38 +03:00
|
|
|
vn_start_write(NULL, &mp, V_WAIT);
|
2000-06-16 02:35:37 +04:00
|
|
|
flags = WRITECLOSE;
|
|
|
|
if (mp->mnt_flag & MNT_FORCE)
|
|
|
|
flags |= FORCECLOSE;
|
2003-06-30 02:28:00 +04:00
|
|
|
error = softdep_flushfiles(mp, flags, p);
|
2000-06-16 02:35:37 +04:00
|
|
|
if (error == 0 && ffs_cgupdate(ump, MNT_WAIT) == 0)
|
|
|
|
fs->fs_flags &= ~FS_DOSOFTDEP;
|
|
|
|
(void) ffs_sbupdate(ump, MNT_WAIT);
|
2003-10-15 15:28:59 +04:00
|
|
|
vn_finished_write(mp);
|
2000-06-16 04:30:15 +04:00
|
|
|
#elif defined(SOFTDEP)
|
2000-06-16 02:35:37 +04:00
|
|
|
mp->mnt_flag |= MNT_SOFTDEP;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* When upgrading to a softdep mount, we must first flush
|
|
|
|
* all vnodes. (not done yet -- see above)
|
|
|
|
*/
|
|
|
|
if (!(fs->fs_flags & FS_DOSOFTDEP) &&
|
|
|
|
(mp->mnt_flag & MNT_SOFTDEP) && fs->fs_ronly == 0) {
|
|
|
|
#ifdef notyet
|
2003-11-05 13:18:38 +03:00
|
|
|
vn_start_write(NULL, &mp, V_WAIT);
|
2000-06-16 02:35:37 +04:00
|
|
|
flags = WRITECLOSE;
|
|
|
|
if (mp->mnt_flag & MNT_FORCE)
|
|
|
|
flags |= FORCECLOSE;
|
2003-06-30 02:28:00 +04:00
|
|
|
error = ffs_flushfiles(mp, flags, p);
|
2003-10-15 15:28:59 +04:00
|
|
|
vn_finished_write(mp);
|
2000-06-16 02:35:37 +04:00
|
|
|
#else
|
|
|
|
mp->mnt_flag &= ~MNT_SOFTDEP;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
1995-04-13 01:21:00 +04:00
|
|
|
if (mp->mnt_flag & MNT_RELOAD) {
|
2003-06-30 02:28:00 +04:00
|
|
|
error = ffs_reload(mp, p->p_ucred, p);
|
1995-04-13 01:21:00 +04:00
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
}
|
2002-04-01 00:53:25 +04:00
|
|
|
|
2003-10-14 18:02:56 +04:00
|
|
|
if (fs->fs_ronly && (mp->mnt_iflag & IMNT_WANTRDWR)) {
|
1994-12-14 16:03:35 +03:00
|
|
|
/*
|
2002-04-01 00:53:25 +04:00
|
|
|
* Changing from read-only to read/write
|
1994-12-14 16:03:35 +03:00
|
|
|
*/
|
1994-06-08 15:41:58 +04:00
|
|
|
fs->fs_ronly = 0;
|
1995-04-13 01:21:00 +04:00
|
|
|
fs->fs_clean <<= 1;
|
|
|
|
fs->fs_fmod = 1;
|
1999-11-15 21:49:07 +03:00
|
|
|
if ((fs->fs_flags & FS_DOSOFTDEP)) {
|
|
|
|
error = softdep_mount(devvp, mp, fs,
|
|
|
|
p->p_ucred);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
2000-06-16 02:35:37 +04:00
|
|
|
}
|
2004-05-25 18:54:55 +04:00
|
|
|
if (fs->fs_snapinum[0] != 0)
|
|
|
|
ffs_snapshot_mount(mp);
|
1994-12-14 16:03:35 +03:00
|
|
|
}
|
1994-06-08 15:41:58 +04:00
|
|
|
if (args.fspec == 0) {
|
|
|
|
/*
|
|
|
|
* Process export requests.
|
|
|
|
*/
|
|
|
|
return (vfs_export(mp, &ump->um_export, &args.export));
|
|
|
|
}
|
1999-11-15 21:49:07 +03:00
|
|
|
if ((mp->mnt_flag & (MNT_SOFTDEP | MNT_ASYNC)) ==
|
|
|
|
(MNT_SOFTDEP | MNT_ASYNC)) {
|
|
|
|
printf("%s fs uses soft updates, ignoring async mode\n",
|
|
|
|
fs->fs_fsmnt);
|
|
|
|
mp->mnt_flag &= ~MNT_ASYNC;
|
|
|
|
}
|
1994-06-08 15:41:58 +04:00
|
|
|
}
|
|
|
|
|
2004-04-21 05:05:31 +04:00
|
|
|
error = set_statvfs_info(path, UIO_USERSPACE, args.fspec,
|
2003-06-30 02:28:00 +04:00
|
|
|
UIO_USERSPACE, mp, p);
|
2003-05-03 20:24:35 +04:00
|
|
|
if (error == 0)
|
|
|
|
(void)strncpy(fs->fs_fsmnt, mp->mnt_stat.f_mntonname,
|
|
|
|
sizeof(fs->fs_fsmnt));
|
2000-06-16 02:35:37 +04:00
|
|
|
if (mp->mnt_flag & MNT_SOFTDEP)
|
|
|
|
fs->fs_flags |= FS_DOSOFTDEP;
|
2000-12-03 22:52:06 +03:00
|
|
|
else
|
|
|
|
fs->fs_flags &= ~FS_DOSOFTDEP;
|
1995-04-13 01:21:00 +04:00
|
|
|
if (fs->fs_fmod != 0) { /* XXX */
|
|
|
|
fs->fs_fmod = 0;
|
|
|
|
if (fs->fs_clean & FS_WASCLEAN)
|
|
|
|
fs->fs_time = time.tv_sec;
|
2001-12-18 13:57:21 +03:00
|
|
|
else {
|
2001-07-26 11:58:55 +04:00
|
|
|
printf("%s: file system not clean (fs_clean=%x); please fsck(8)\n",
|
1998-03-18 18:57:26 +03:00
|
|
|
mp->mnt_stat.f_mntfromname, fs->fs_clean);
|
2003-04-02 14:39:19 +04:00
|
|
|
printf("%s: lost blocks %" PRId64 " files %d\n",
|
2001-12-18 13:57:21 +03:00
|
|
|
mp->mnt_stat.f_mntfromname, fs->fs_pendingblocks,
|
|
|
|
fs->fs_pendinginodes);
|
|
|
|
}
|
1995-04-13 01:21:00 +04:00
|
|
|
(void) ffs_cgupdate(ump, MNT_WAIT);
|
|
|
|
}
|
2005-01-09 06:11:48 +03:00
|
|
|
return (error);
|
|
|
|
|
|
|
|
fail:
|
|
|
|
vrele(devvp);
|
|
|
|
return (error);
|
1994-06-08 15:41:58 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Reload all incore data for a filesystem (used after running fsck on
|
|
|
|
* the root filesystem and finding things to fix). The filesystem must
|
|
|
|
* be mounted read-only.
|
|
|
|
*
|
|
|
|
* Things to do to update the mount:
|
|
|
|
* 1) invalidate all cached meta-data.
|
|
|
|
* 2) re-read superblock from disk.
|
|
|
|
* 3) re-read summary information from disk.
|
|
|
|
* 4) invalidate all inactive vnodes.
|
|
|
|
* 5) invalidate all cached file data.
|
|
|
|
* 6) re-read inode data for all active vnodes.
|
|
|
|
*/
|
1996-02-10 01:22:18 +03:00
|
|
|
int
|
Fixing age old cruft:
* Rather than using mnt_maxsymlinklen to indicate that a file systems returns
d_type fields(!), add a new internal flag, IMNT_DTYPE.
Add 3 new elements to ufsmount:
* um_maxsymlinklen, replaces mnt_maxsymlinklen (which never should have existed
in the first place).
* um_dirblksiz, which tracks the current directory block size, eliminating the
FS-specific checks littered throughout the code. This may be used later to
make the block size variable.
* um_maxfilesize, which is the maximum file size, possibly adjusted lower due
to implementation issues.
Sync some bug fixes from FFS into ext2fs, particularly:
* ffs_lookup.c 1.21, 1.28, 1.33, 1.48
* ffs_inode.c 1.43, 1.44, 1.45, 1.66, 1.67
* ffs_vnops.c 1.84, 1.85, 1.86
Clean up some crappy pointer frobnication.
2004-08-15 11:19:54 +04:00
|
|
|
ffs_reload(mp, cred, p)
|
|
|
|
struct mount *mp;
|
1994-06-08 15:41:58 +04:00
|
|
|
struct ucred *cred;
|
2003-06-30 02:28:00 +04:00
|
|
|
struct proc *p;
|
1994-06-08 15:41:58 +04:00
|
|
|
{
|
2000-03-30 16:41:09 +04:00
|
|
|
struct vnode *vp, *nvp, *devvp;
|
1994-06-08 15:41:58 +04:00
|
|
|
struct inode *ip;
|
2001-09-02 05:58:30 +04:00
|
|
|
void *space;
|
1994-06-08 15:41:58 +04:00
|
|
|
struct buf *bp;
|
1995-12-20 02:27:53 +03:00
|
|
|
struct fs *fs, *newfs;
|
1994-06-08 15:41:58 +04:00
|
|
|
struct partinfo dpart;
|
|
|
|
int i, blks, size, error;
|
1995-12-20 02:27:53 +03:00
|
|
|
int32_t *lp;
|
2003-04-05 17:37:36 +04:00
|
|
|
struct ufsmount *ump;
|
2004-04-18 07:30:23 +04:00
|
|
|
daddr_t sblockloc;
|
1994-06-08 15:41:58 +04:00
|
|
|
|
Fixing age old cruft:
* Rather than using mnt_maxsymlinklen to indicate that a file systems returns
d_type fields(!), add a new internal flag, IMNT_DTYPE.
Add 3 new elements to ufsmount:
* um_maxsymlinklen, replaces mnt_maxsymlinklen (which never should have existed
in the first place).
* um_dirblksiz, which tracks the current directory block size, eliminating the
FS-specific checks littered throughout the code. This may be used later to
make the block size variable.
* um_maxfilesize, which is the maximum file size, possibly adjusted lower due
to implementation issues.
Sync some bug fixes from FFS into ext2fs, particularly:
* ffs_lookup.c 1.21, 1.28, 1.33, 1.48
* ffs_inode.c 1.43, 1.44, 1.45, 1.66, 1.67
* ffs_vnops.c 1.84, 1.85, 1.86
Clean up some crappy pointer frobnication.
2004-08-15 11:19:54 +04:00
|
|
|
if ((mp->mnt_flag & MNT_RDONLY) == 0)
|
1994-06-08 15:41:58 +04:00
|
|
|
return (EINVAL);
|
2003-04-05 17:37:36 +04:00
|
|
|
|
Fixing age old cruft:
* Rather than using mnt_maxsymlinklen to indicate that a file systems returns
d_type fields(!), add a new internal flag, IMNT_DTYPE.
Add 3 new elements to ufsmount:
* um_maxsymlinklen, replaces mnt_maxsymlinklen (which never should have existed
in the first place).
* um_dirblksiz, which tracks the current directory block size, eliminating the
FS-specific checks littered throughout the code. This may be used later to
make the block size variable.
* um_maxfilesize, which is the maximum file size, possibly adjusted lower due
to implementation issues.
Sync some bug fixes from FFS into ext2fs, particularly:
* ffs_lookup.c 1.21, 1.28, 1.33, 1.48
* ffs_inode.c 1.43, 1.44, 1.45, 1.66, 1.67
* ffs_vnops.c 1.84, 1.85, 1.86
Clean up some crappy pointer frobnication.
2004-08-15 11:19:54 +04:00
|
|
|
ump = VFSTOUFS(mp);
|
1994-06-08 15:41:58 +04:00
|
|
|
/*
|
|
|
|
* Step 1: invalidate all cached meta-data.
|
|
|
|
*/
|
2003-04-05 17:37:36 +04:00
|
|
|
devvp = ump->um_devvp;
|
1999-11-15 21:49:07 +03:00
|
|
|
vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
|
2003-06-30 02:28:00 +04:00
|
|
|
error = vinvalbuf(devvp, 0, cred, p, 0, 0);
|
1999-11-15 21:49:07 +03:00
|
|
|
VOP_UNLOCK(devvp, 0);
|
|
|
|
if (error)
|
1994-06-08 15:41:58 +04:00
|
|
|
panic("ffs_reload: dirty1");
|
|
|
|
/*
|
|
|
|
* Step 2: re-read superblock from disk.
|
|
|
|
*/
|
2003-04-05 17:37:36 +04:00
|
|
|
fs = ump->um_fs;
|
2003-06-30 02:28:00 +04:00
|
|
|
if (VOP_IOCTL(devvp, DIOCGPART, &dpart, FREAD, NOCRED, p) != 0)
|
1994-06-08 15:41:58 +04:00
|
|
|
size = DEV_BSIZE;
|
|
|
|
else
|
|
|
|
size = dpart.disklab->d_secsize;
|
2004-04-18 07:30:23 +04:00
|
|
|
/* XXX we don't handle possibility that superblock moved. */
|
2003-04-02 14:39:19 +04:00
|
|
|
error = bread(devvp, fs->fs_sblockloc / size, fs->fs_sbsize,
|
|
|
|
NOCRED, &bp);
|
1999-02-10 16:14:08 +03:00
|
|
|
if (error) {
|
|
|
|
brelse(bp);
|
1994-06-08 15:41:58 +04:00
|
|
|
return (error);
|
1999-02-10 16:14:08 +03:00
|
|
|
}
|
1998-03-18 18:57:26 +03:00
|
|
|
newfs = malloc(fs->fs_sbsize, M_UFSMNT, M_WAITOK);
|
1998-08-10 00:15:38 +04:00
|
|
|
memcpy(newfs, bp->b_data, fs->fs_sbsize);
|
1998-03-18 18:57:26 +03:00
|
|
|
#ifdef FFS_EI
|
2003-04-05 17:37:36 +04:00
|
|
|
if (ump->um_flags & UFS_NEEDSWAP) {
|
2001-08-17 06:18:46 +04:00
|
|
|
ffs_sb_swap((struct fs*)bp->b_data, newfs);
|
1999-11-15 21:49:07 +03:00
|
|
|
fs->fs_flags |= FS_SWAPPED;
|
2003-09-13 18:09:15 +04:00
|
|
|
} else
|
1998-03-18 18:57:26 +03:00
|
|
|
#endif
|
2003-09-13 18:09:15 +04:00
|
|
|
fs->fs_flags &= ~FS_SWAPPED;
|
2005-02-27 01:31:44 +03:00
|
|
|
if ((newfs->fs_magic != FS_UFS1_MAGIC &&
|
2003-04-02 14:39:19 +04:00
|
|
|
newfs->fs_magic != FS_UFS2_MAGIC)||
|
|
|
|
newfs->fs_bsize > MAXBSIZE ||
|
|
|
|
newfs->fs_bsize < sizeof(struct fs)) {
|
1994-06-08 15:41:58 +04:00
|
|
|
brelse(bp);
|
1998-03-18 18:57:26 +03:00
|
|
|
free(newfs, M_UFSMNT);
|
1994-06-08 15:41:58 +04:00
|
|
|
return (EIO); /* XXX needs translation */
|
|
|
|
}
|
2004-04-18 07:30:23 +04:00
|
|
|
/* Store off old fs_sblockloc for fs_oldfscompat_read. */
|
|
|
|
sblockloc = fs->fs_sblockloc;
|
2005-02-27 01:31:44 +03:00
|
|
|
/*
|
1995-12-20 02:27:53 +03:00
|
|
|
* Copy pointer fields back into superblock before copying in XXX
|
|
|
|
* new superblock. These should really be in the ufsmount. XXX
|
|
|
|
* Note that important parameters (eg fs_ncg) are unchanged.
|
|
|
|
*/
|
2001-09-02 05:58:30 +04:00
|
|
|
newfs->fs_csp = fs->fs_csp;
|
1995-12-20 02:27:53 +03:00
|
|
|
newfs->fs_maxcluster = fs->fs_maxcluster;
|
Incorporate the enhanced ffs_dirpref() by Grigoriy Orlov, as found in
FreeBSD (three commits; the initial work, man page updates, and a fix
to ffs_reload()), with the following differences:
- Be consistent between newfs(8) and tunefs(8) as to the options which
set and control the tuning parameters for this work (avgfilesize & avgfpdir)
- Use u_int16_t instead of u_int8_t to keep track of the number of
contiguous directories (suggested by Chuck Silvers)
- Work within our FFS_EI framework
- Ensure that fs->fs_maxclusters and fs->fs_contigdirs don't point to
the same area of memory
The new algorithm has a marked performance increase, especially when
performing tasks such as untarring pkgsrc.tar.gz, etc.
The original FreeBSD commit messages are attached:
=====
mckusick 2001/04/10 01:39:00 PDT
Directory layout preference improvements from Grigoriy Orlov <gluk@ptci.ru>.
His description of the problem and solution follow. My own tests show
speedups on typical filesystem intensive workloads of 5% to 12% which
is very impressive considering the small amount of code change involved.
------
One day I noticed that some file operations run much faster on
small file systems then on big ones. I've looked at the ffs
algorithms, thought about them, and redesigned the dirpref algorithm.
First I want to describe the results of my tests. These results are old
and I have improved the algorithm after these tests were done. Nevertheless
they show how big the perfomance speedup may be. I have done two file/directory
intensive tests on a two OpenBSD systems with old and new dirpref algorithm.
The first test is "tar -xzf ports.tar.gz", the second is "rm -rf ports".
The ports.tar.gz file is the ports collection from the OpenBSD 2.8 release.
It contains 6596 directories and 13868 files. The test systems are:
1. Celeron-450, 128Mb, two IDE drives, the system at wd0, file system for
test is at wd1. Size of test file system is 8 Gb, number of cg=991,
size of cg is 8m, block size = 8k, fragment size = 1k OpenBSD-current
from Dec 2000 with BUFCACHEPERCENT=35
2. PIII-600, 128Mb, two IBM DTLA-307045 IDE drives at i815e, the system
at wd0, file system for test is at wd1. Size of test file system is 40 Gb,
number of cg=5324, size of cg is 8m, block size = 8k, fragment size = 1k
OpenBSD-current from Dec 2000 with BUFCACHEPERCENT=50
You can get more info about the test systems and methods at:
http://www.ptci.ru/gluk/dirpref/old/dirpref.html
Test Results
tar -xzf ports.tar.gz rm -rf ports
mode old dirpref new dirpref speedup old dirprefnew dirpref speedup
First system
normal 667 472 1.41 477 331 1.44
async 285 144 1.98 130 14 9.29
sync 768 616 1.25 477 334 1.43
softdep 413 252 1.64 241 38 6.34
Second system
normal 329 81 4.06 263.5 93.5 2.81
async 302 25.7 11.75 112 2.26 49.56
sync 281 57.0 4.93 263 90.5 2.9
softdep 341 40.6 8.4 284 4.76 59.66
"old dirpref" and "new dirpref" columns give a test time in seconds.
speedup - speed increasement in times, ie. old dirpref / new dirpref.
------
Algorithm description
The old dirpref algorithm is described in comments:
/*
* Find a cylinder to place a directory.
*
* The policy implemented by this algorithm is to select from
* among those cylinder groups with above the average number of
* free inodes, the one with the smallest number of directories.
*/
A new directory is allocated in a different cylinder groups than its
parent directory resulting in a directory tree that is spreaded across
all the cylinder groups. This spreading out results in a non-optimal
access to the directories and files. When we have a small filesystem
it is not a problem but when the filesystem is big then perfomance
degradation becomes very apparent.
What I mean by a big file system ?
1. A big filesystem is a filesystem which occupy 20-30 or more percent
of total drive space, i.e. first and last cylinder are physically
located relatively far from each other.
2. It has a relatively large number of cylinder groups, for example
more cylinder groups than 50% of the buffers in the buffer cache.
The first results in long access times, while the second results in
many buffers being used by metadata operations. Such operations use
cylinder group blocks and on-disk inode blocks. The cylinder group
block (fs->fs_cblkno) contains struct cg, inode and block bit maps.
It is 2k in size for the default filesystem parameters. If new and
parent directories are located in different cylinder groups then the
system performs more input/output operations and uses more buffers.
On filesystems with many cylinder groups, lots of cache buffers are
used for metadata operations.
My solution for this problem is very simple. I allocate many directories
in one cylinder group. I also do some things, so that the new allocation
method does not cause excessive fragmentation and all directory inodes
will not be located at a location far from its file's inodes and data.
The algorithm is:
/*
* Find a cylinder group to place a directory.
*
* The policy implemented by this algorithm is to allocate a
* directory inode in the same cylinder group as its parent
* directory, but also to reserve space for its files inodes
* and data. Restrict the number of directories which may be
* allocated one after another in the same cylinder group
* without intervening allocation of files.
*
* If we allocate a first level directory then force allocation
* in another cylinder group.
*/
My early versions of dirpref give me a good results for a wide range of
file operations and different filesystem capacities except one case:
those applications that create their entire directory structure first
and only later fill this structure with files.
My solution for such and similar cases is to limit a number of
directories which may be created one after another in the same cylinder
group without intervening file creations. For this purpose, I allocate
an array of counters at mount time. This array is linked to the superblock
fs->fs_contigdirs[cg]. Each time a directory is created the counter
increases and each time a file is created the counter decreases. A 60Gb
filesystem with 8mb/cg requires 10kb of memory for the counters array.
The maxcontigdirs is a maximum number of directories which may be created
without an intervening file creation. I found in my tests that the best
performance occurs when I restrict the number of directories in one cylinder
group such that all its files may be located in the same cylinder group.
There may be some deterioration in performance if all the file inodes
are in the same cylinder group as its containing directory, but their
data partially resides in a different cylinder group. The maxcontigdirs
value is calculated to try to prevent this condition. Since there is
no way to know how many files and directories will be allocated later
I added two optimization parameters in superblock/tunefs. They are:
int32_t fs_avgfilesize; /* expected average file size */
int32_t fs_avgfpdir; /* expected # of files per directory */
These parameters have reasonable defaults but may be tweeked for special
uses of a filesystem. They are only necessary in rare cases like better
tuning a filesystem being used to store a squid cache.
I have been using this algorithm for about 3 months. I have done
a lot of testing on filesystems with different capacities, average
filesize, average number of files per directory, and so on. I think
this algorithm has no negative impact on filesystem perfomance. It
works better than the default one in all cases. The new dirpref
will greatly improve untarring/removing/coping of big directories,
decrease load on cvs servers and much more. The new dirpref doesn't
speedup a compilation process, but also doesn't slow it down.
Obtained from: Grigoriy Orlov <gluk@ptci.ru>
=====
=====
iedowse 2001/04/23 17:37:17 PDT
Pre-dirpref versions of fsck may zero out the new superblock fields
fs_contigdirs, fs_avgfilesize and fs_avgfpdir. This could cause
panics if these fields were zeroed while a filesystem was mounted
read-only, and then remounted read-write.
Add code to ffs_reload() which copies the fs_contigdirs pointer
from the previous superblock, and reinitialises fs_avgf* if necessary.
Reviewed by: mckusick
=====
=====
nik 2001/04/10 03:36:44 PDT
Add information about the new options to newfs and tunefs which set the
expected average file size and number of files per directory. Could do
with some fleshing out.
=====
2001-09-06 06:16:00 +04:00
|
|
|
newfs->fs_contigdirs = fs->fs_contigdirs;
|
2001-01-09 13:44:19 +03:00
|
|
|
newfs->fs_ronly = fs->fs_ronly;
|
2003-04-02 14:39:19 +04:00
|
|
|
newfs->fs_active = fs->fs_active;
|
1998-08-10 00:15:38 +04:00
|
|
|
memcpy(fs, newfs, (u_int)fs->fs_sbsize);
|
1994-06-08 15:41:58 +04:00
|
|
|
brelse(bp);
|
1998-03-18 18:57:26 +03:00
|
|
|
free(newfs, M_UFSMNT);
|
2002-09-29 00:11:05 +04:00
|
|
|
|
|
|
|
/* Recheck for apple UFS filesystem */
|
Fixing age old cruft:
* Rather than using mnt_maxsymlinklen to indicate that a file systems returns
d_type fields(!), add a new internal flag, IMNT_DTYPE.
Add 3 new elements to ufsmount:
* um_maxsymlinklen, replaces mnt_maxsymlinklen (which never should have existed
in the first place).
* um_dirblksiz, which tracks the current directory block size, eliminating the
FS-specific checks littered throughout the code. This may be used later to
make the block size variable.
* um_maxfilesize, which is the maximum file size, possibly adjusted lower due
to implementation issues.
Sync some bug fixes from FFS into ext2fs, particularly:
* ffs_lookup.c 1.21, 1.28, 1.33, 1.48
* ffs_inode.c 1.43, 1.44, 1.45, 1.66, 1.67
* ffs_vnops.c 1.84, 1.85, 1.86
Clean up some crappy pointer frobnication.
2004-08-15 11:19:54 +04:00
|
|
|
ump->um_flags &= ~UFS_ISAPPLEUFS;
|
2002-09-29 00:11:05 +04:00
|
|
|
/* First check to see if this is tagged as an Apple UFS filesystem
|
|
|
|
* in the disklabel
|
|
|
|
*/
|
2003-06-30 02:28:00 +04:00
|
|
|
if ((VOP_IOCTL(devvp, DIOCGPART, &dpart, FREAD, cred, p) == 0) &&
|
2002-09-29 00:11:05 +04:00
|
|
|
(dpart.part->p_fstype == FS_APPLEUFS)) {
|
Fixing age old cruft:
* Rather than using mnt_maxsymlinklen to indicate that a file systems returns
d_type fields(!), add a new internal flag, IMNT_DTYPE.
Add 3 new elements to ufsmount:
* um_maxsymlinklen, replaces mnt_maxsymlinklen (which never should have existed
in the first place).
* um_dirblksiz, which tracks the current directory block size, eliminating the
FS-specific checks littered throughout the code. This may be used later to
make the block size variable.
* um_maxfilesize, which is the maximum file size, possibly adjusted lower due
to implementation issues.
Sync some bug fixes from FFS into ext2fs, particularly:
* ffs_lookup.c 1.21, 1.28, 1.33, 1.48
* ffs_inode.c 1.43, 1.44, 1.45, 1.66, 1.67
* ffs_vnops.c 1.84, 1.85, 1.86
Clean up some crappy pointer frobnication.
2004-08-15 11:19:54 +04:00
|
|
|
ump->um_flags |= UFS_ISAPPLEUFS;
|
2002-09-29 00:11:05 +04:00
|
|
|
}
|
|
|
|
#ifdef APPLE_UFS
|
|
|
|
else {
|
|
|
|
/* Manually look for an apple ufs label, and if a valid one
|
|
|
|
* is found, then treat it like an Apple UFS filesystem anyway
|
|
|
|
*/
|
2003-01-25 00:55:02 +03:00
|
|
|
error = bread(devvp, (daddr_t)(APPLEUFS_LABEL_OFFSET / size),
|
2002-09-29 00:11:05 +04:00
|
|
|
APPLEUFS_LABEL_SIZE, cred, &bp);
|
|
|
|
if (error) {
|
|
|
|
brelse(bp);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
error = ffs_appleufs_validate(fs->fs_fsmnt,
|
|
|
|
(struct appleufslabel *)bp->b_data,NULL);
|
Fixing age old cruft:
* Rather than using mnt_maxsymlinklen to indicate that a file systems returns
d_type fields(!), add a new internal flag, IMNT_DTYPE.
Add 3 new elements to ufsmount:
* um_maxsymlinklen, replaces mnt_maxsymlinklen (which never should have existed
in the first place).
* um_dirblksiz, which tracks the current directory block size, eliminating the
FS-specific checks littered throughout the code. This may be used later to
make the block size variable.
* um_maxfilesize, which is the maximum file size, possibly adjusted lower due
to implementation issues.
Sync some bug fixes from FFS into ext2fs, particularly:
* ffs_lookup.c 1.21, 1.28, 1.33, 1.48
* ffs_inode.c 1.43, 1.44, 1.45, 1.66, 1.67
* ffs_vnops.c 1.84, 1.85, 1.86
Clean up some crappy pointer frobnication.
2004-08-15 11:19:54 +04:00
|
|
|
if (error == 0)
|
|
|
|
ump->um_flags |= UFS_ISAPPLEUFS;
|
2002-09-29 00:11:05 +04:00
|
|
|
brelse(bp);
|
|
|
|
bp = NULL;
|
|
|
|
}
|
|
|
|
#else
|
Fixing age old cruft:
* Rather than using mnt_maxsymlinklen to indicate that a file systems returns
d_type fields(!), add a new internal flag, IMNT_DTYPE.
Add 3 new elements to ufsmount:
* um_maxsymlinklen, replaces mnt_maxsymlinklen (which never should have existed
in the first place).
* um_dirblksiz, which tracks the current directory block size, eliminating the
FS-specific checks littered throughout the code. This may be used later to
make the block size variable.
* um_maxfilesize, which is the maximum file size, possibly adjusted lower due
to implementation issues.
Sync some bug fixes from FFS into ext2fs, particularly:
* ffs_lookup.c 1.21, 1.28, 1.33, 1.48
* ffs_inode.c 1.43, 1.44, 1.45, 1.66, 1.67
* ffs_vnops.c 1.84, 1.85, 1.86
Clean up some crappy pointer frobnication.
2004-08-15 11:19:54 +04:00
|
|
|
if (ump->um_flags & UFS_ISAPPLEUFS)
|
2002-09-29 00:11:05 +04:00
|
|
|
return (EIO);
|
|
|
|
#endif
|
|
|
|
|
Fixing age old cruft:
* Rather than using mnt_maxsymlinklen to indicate that a file systems returns
d_type fields(!), add a new internal flag, IMNT_DTYPE.
Add 3 new elements to ufsmount:
* um_maxsymlinklen, replaces mnt_maxsymlinklen (which never should have existed
in the first place).
* um_dirblksiz, which tracks the current directory block size, eliminating the
FS-specific checks littered throughout the code. This may be used later to
make the block size variable.
* um_maxfilesize, which is the maximum file size, possibly adjusted lower due
to implementation issues.
Sync some bug fixes from FFS into ext2fs, particularly:
* ffs_lookup.c 1.21, 1.28, 1.33, 1.48
* ffs_inode.c 1.43, 1.44, 1.45, 1.66, 1.67
* ffs_vnops.c 1.84, 1.85, 1.86
Clean up some crappy pointer frobnication.
2004-08-15 11:19:54 +04:00
|
|
|
if (UFS_MPISAPPLEUFS(ump)) {
|
2002-09-29 00:11:05 +04:00
|
|
|
/* see comment about NeXT below */
|
Fixing age old cruft:
* Rather than using mnt_maxsymlinklen to indicate that a file systems returns
d_type fields(!), add a new internal flag, IMNT_DTYPE.
Add 3 new elements to ufsmount:
* um_maxsymlinklen, replaces mnt_maxsymlinklen (which never should have existed
in the first place).
* um_dirblksiz, which tracks the current directory block size, eliminating the
FS-specific checks littered throughout the code. This may be used later to
make the block size variable.
* um_maxfilesize, which is the maximum file size, possibly adjusted lower due
to implementation issues.
Sync some bug fixes from FFS into ext2fs, particularly:
* ffs_lookup.c 1.21, 1.28, 1.33, 1.48
* ffs_inode.c 1.43, 1.44, 1.45, 1.66, 1.67
* ffs_vnops.c 1.84, 1.85, 1.86
Clean up some crappy pointer frobnication.
2004-08-15 11:19:54 +04:00
|
|
|
ump->um_maxsymlinklen = APPLEUFS_MAXSYMLINKLEN;
|
|
|
|
ump->um_dirblksiz = APPLEUFS_DIRBLKSIZ;
|
|
|
|
mp->mnt_iflag |= IMNT_DTYPE;
|
|
|
|
} else {
|
|
|
|
ump->um_maxsymlinklen = fs->fs_maxsymlinklen;
|
|
|
|
ump->um_dirblksiz = DIRBLKSIZ;
|
|
|
|
if (ump->um_maxsymlinklen > 0)
|
|
|
|
mp->mnt_iflag |= IMNT_DTYPE;
|
|
|
|
else
|
|
|
|
mp->mnt_iflag &= ~IMNT_DTYPE;
|
2002-09-29 00:11:05 +04:00
|
|
|
}
|
Fixing age old cruft:
* Rather than using mnt_maxsymlinklen to indicate that a file systems returns
d_type fields(!), add a new internal flag, IMNT_DTYPE.
Add 3 new elements to ufsmount:
* um_maxsymlinklen, replaces mnt_maxsymlinklen (which never should have existed
in the first place).
* um_dirblksiz, which tracks the current directory block size, eliminating the
FS-specific checks littered throughout the code. This may be used later to
make the block size variable.
* um_maxfilesize, which is the maximum file size, possibly adjusted lower due
to implementation issues.
Sync some bug fixes from FFS into ext2fs, particularly:
* ffs_lookup.c 1.21, 1.28, 1.33, 1.48
* ffs_inode.c 1.43, 1.44, 1.45, 1.66, 1.67
* ffs_vnops.c 1.84, 1.85, 1.86
Clean up some crappy pointer frobnication.
2004-08-15 11:19:54 +04:00
|
|
|
ffs_oldfscompat_read(fs, ump, sblockloc);
|
2004-09-19 15:58:29 +04:00
|
|
|
ump->um_maxfilesize = fs->fs_maxfilesize;
|
2001-12-18 13:57:21 +03:00
|
|
|
if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
|
|
|
|
fs->fs_pendingblocks = 0;
|
|
|
|
fs->fs_pendinginodes = 0;
|
|
|
|
}
|
Incorporate the enhanced ffs_dirpref() by Grigoriy Orlov, as found in
FreeBSD (three commits; the initial work, man page updates, and a fix
to ffs_reload()), with the following differences:
- Be consistent between newfs(8) and tunefs(8) as to the options which
set and control the tuning parameters for this work (avgfilesize & avgfpdir)
- Use u_int16_t instead of u_int8_t to keep track of the number of
contiguous directories (suggested by Chuck Silvers)
- Work within our FFS_EI framework
- Ensure that fs->fs_maxclusters and fs->fs_contigdirs don't point to
the same area of memory
The new algorithm has a marked performance increase, especially when
performing tasks such as untarring pkgsrc.tar.gz, etc.
The original FreeBSD commit messages are attached:
=====
mckusick 2001/04/10 01:39:00 PDT
Directory layout preference improvements from Grigoriy Orlov <gluk@ptci.ru>.
His description of the problem and solution follow. My own tests show
speedups on typical filesystem intensive workloads of 5% to 12% which
is very impressive considering the small amount of code change involved.
------
One day I noticed that some file operations run much faster on
small file systems then on big ones. I've looked at the ffs
algorithms, thought about them, and redesigned the dirpref algorithm.
First I want to describe the results of my tests. These results are old
and I have improved the algorithm after these tests were done. Nevertheless
they show how big the perfomance speedup may be. I have done two file/directory
intensive tests on a two OpenBSD systems with old and new dirpref algorithm.
The first test is "tar -xzf ports.tar.gz", the second is "rm -rf ports".
The ports.tar.gz file is the ports collection from the OpenBSD 2.8 release.
It contains 6596 directories and 13868 files. The test systems are:
1. Celeron-450, 128Mb, two IDE drives, the system at wd0, file system for
test is at wd1. Size of test file system is 8 Gb, number of cg=991,
size of cg is 8m, block size = 8k, fragment size = 1k OpenBSD-current
from Dec 2000 with BUFCACHEPERCENT=35
2. PIII-600, 128Mb, two IBM DTLA-307045 IDE drives at i815e, the system
at wd0, file system for test is at wd1. Size of test file system is 40 Gb,
number of cg=5324, size of cg is 8m, block size = 8k, fragment size = 1k
OpenBSD-current from Dec 2000 with BUFCACHEPERCENT=50
You can get more info about the test systems and methods at:
http://www.ptci.ru/gluk/dirpref/old/dirpref.html
Test Results
tar -xzf ports.tar.gz rm -rf ports
mode old dirpref new dirpref speedup old dirprefnew dirpref speedup
First system
normal 667 472 1.41 477 331 1.44
async 285 144 1.98 130 14 9.29
sync 768 616 1.25 477 334 1.43
softdep 413 252 1.64 241 38 6.34
Second system
normal 329 81 4.06 263.5 93.5 2.81
async 302 25.7 11.75 112 2.26 49.56
sync 281 57.0 4.93 263 90.5 2.9
softdep 341 40.6 8.4 284 4.76 59.66
"old dirpref" and "new dirpref" columns give a test time in seconds.
speedup - speed increasement in times, ie. old dirpref / new dirpref.
------
Algorithm description
The old dirpref algorithm is described in comments:
/*
* Find a cylinder to place a directory.
*
* The policy implemented by this algorithm is to select from
* among those cylinder groups with above the average number of
* free inodes, the one with the smallest number of directories.
*/
A new directory is allocated in a different cylinder groups than its
parent directory resulting in a directory tree that is spreaded across
all the cylinder groups. This spreading out results in a non-optimal
access to the directories and files. When we have a small filesystem
it is not a problem but when the filesystem is big then perfomance
degradation becomes very apparent.
What I mean by a big file system ?
1. A big filesystem is a filesystem which occupy 20-30 or more percent
of total drive space, i.e. first and last cylinder are physically
located relatively far from each other.
2. It has a relatively large number of cylinder groups, for example
more cylinder groups than 50% of the buffers in the buffer cache.
The first results in long access times, while the second results in
many buffers being used by metadata operations. Such operations use
cylinder group blocks and on-disk inode blocks. The cylinder group
block (fs->fs_cblkno) contains struct cg, inode and block bit maps.
It is 2k in size for the default filesystem parameters. If new and
parent directories are located in different cylinder groups then the
system performs more input/output operations and uses more buffers.
On filesystems with many cylinder groups, lots of cache buffers are
used for metadata operations.
My solution for this problem is very simple. I allocate many directories
in one cylinder group. I also do some things, so that the new allocation
method does not cause excessive fragmentation and all directory inodes
will not be located at a location far from its file's inodes and data.
The algorithm is:
/*
* Find a cylinder group to place a directory.
*
* The policy implemented by this algorithm is to allocate a
* directory inode in the same cylinder group as its parent
* directory, but also to reserve space for its files inodes
* and data. Restrict the number of directories which may be
* allocated one after another in the same cylinder group
* without intervening allocation of files.
*
* If we allocate a first level directory then force allocation
* in another cylinder group.
*/
My early versions of dirpref give me a good results for a wide range of
file operations and different filesystem capacities except one case:
those applications that create their entire directory structure first
and only later fill this structure with files.
My solution for such and similar cases is to limit a number of
directories which may be created one after another in the same cylinder
group without intervening file creations. For this purpose, I allocate
an array of counters at mount time. This array is linked to the superblock
fs->fs_contigdirs[cg]. Each time a directory is created the counter
increases and each time a file is created the counter decreases. A 60Gb
filesystem with 8mb/cg requires 10kb of memory for the counters array.
The maxcontigdirs is a maximum number of directories which may be created
without an intervening file creation. I found in my tests that the best
performance occurs when I restrict the number of directories in one cylinder
group such that all its files may be located in the same cylinder group.
There may be some deterioration in performance if all the file inodes
are in the same cylinder group as its containing directory, but their
data partially resides in a different cylinder group. The maxcontigdirs
value is calculated to try to prevent this condition. Since there is
no way to know how many files and directories will be allocated later
I added two optimization parameters in superblock/tunefs. They are:
int32_t fs_avgfilesize; /* expected average file size */
int32_t fs_avgfpdir; /* expected # of files per directory */
These parameters have reasonable defaults but may be tweeked for special
uses of a filesystem. They are only necessary in rare cases like better
tuning a filesystem being used to store a squid cache.
I have been using this algorithm for about 3 months. I have done
a lot of testing on filesystems with different capacities, average
filesize, average number of files per directory, and so on. I think
this algorithm has no negative impact on filesystem perfomance. It
works better than the default one in all cases. The new dirpref
will greatly improve untarring/removing/coping of big directories,
decrease load on cvs servers and much more. The new dirpref doesn't
speedup a compilation process, but also doesn't slow it down.
Obtained from: Grigoriy Orlov <gluk@ptci.ru>
=====
=====
iedowse 2001/04/23 17:37:17 PDT
Pre-dirpref versions of fsck may zero out the new superblock fields
fs_contigdirs, fs_avgfilesize and fs_avgfpdir. This could cause
panics if these fields were zeroed while a filesystem was mounted
read-only, and then remounted read-write.
Add code to ffs_reload() which copies the fs_contigdirs pointer
from the previous superblock, and reinitialises fs_avgf* if necessary.
Reviewed by: mckusick
=====
=====
nik 2001/04/10 03:36:44 PDT
Add information about the new options to newfs and tunefs which set the
expected average file size and number of files per directory. Could do
with some fleshing out.
=====
2001-09-06 06:16:00 +04:00
|
|
|
|
Fixing age old cruft:
* Rather than using mnt_maxsymlinklen to indicate that a file systems returns
d_type fields(!), add a new internal flag, IMNT_DTYPE.
Add 3 new elements to ufsmount:
* um_maxsymlinklen, replaces mnt_maxsymlinklen (which never should have existed
in the first place).
* um_dirblksiz, which tracks the current directory block size, eliminating the
FS-specific checks littered throughout the code. This may be used later to
make the block size variable.
* um_maxfilesize, which is the maximum file size, possibly adjusted lower due
to implementation issues.
Sync some bug fixes from FFS into ext2fs, particularly:
* ffs_lookup.c 1.21, 1.28, 1.33, 1.48
* ffs_inode.c 1.43, 1.44, 1.45, 1.66, 1.67
* ffs_vnops.c 1.84, 1.85, 1.86
Clean up some crappy pointer frobnication.
2004-08-15 11:19:54 +04:00
|
|
|
ffs_statvfs(mp, &mp->mnt_stat, p);
|
1994-06-08 15:41:58 +04:00
|
|
|
/*
|
|
|
|
* Step 3: re-read summary information from disk.
|
|
|
|
*/
|
|
|
|
blks = howmany(fs->fs_cssize, fs->fs_fsize);
|
2001-09-02 05:58:30 +04:00
|
|
|
space = fs->fs_csp;
|
1994-06-08 15:41:58 +04:00
|
|
|
for (i = 0; i < blks; i += fs->fs_frag) {
|
|
|
|
size = fs->fs_bsize;
|
|
|
|
if (i + fs->fs_frag > blks)
|
|
|
|
size = (blks - i) * fs->fs_fsize;
|
1996-02-10 01:22:18 +03:00
|
|
|
error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
|
|
|
|
NOCRED, &bp);
|
1999-02-10 16:14:08 +03:00
|
|
|
if (error) {
|
|
|
|
brelse(bp);
|
1994-06-08 15:41:58 +04:00
|
|
|
return (error);
|
1999-02-10 16:14:08 +03:00
|
|
|
}
|
1998-03-18 18:57:26 +03:00
|
|
|
#ifdef FFS_EI
|
1999-11-15 21:49:07 +03:00
|
|
|
if (UFS_FSNEEDSWAP(fs))
|
2001-09-02 05:58:30 +04:00
|
|
|
ffs_csum_swap((struct csum *)bp->b_data,
|
|
|
|
(struct csum *)space, size);
|
1998-03-18 18:57:26 +03:00
|
|
|
else
|
|
|
|
#endif
|
2001-09-02 05:58:30 +04:00
|
|
|
memcpy(space, bp->b_data, (size_t)size);
|
|
|
|
space = (char *)space + size;
|
1994-06-08 15:41:58 +04:00
|
|
|
brelse(bp);
|
|
|
|
}
|
1999-11-15 21:49:07 +03:00
|
|
|
if ((fs->fs_flags & FS_DOSOFTDEP))
|
Fixing age old cruft:
* Rather than using mnt_maxsymlinklen to indicate that a file systems returns
d_type fields(!), add a new internal flag, IMNT_DTYPE.
Add 3 new elements to ufsmount:
* um_maxsymlinklen, replaces mnt_maxsymlinklen (which never should have existed
in the first place).
* um_dirblksiz, which tracks the current directory block size, eliminating the
FS-specific checks littered throughout the code. This may be used later to
make the block size variable.
* um_maxfilesize, which is the maximum file size, possibly adjusted lower due
to implementation issues.
Sync some bug fixes from FFS into ext2fs, particularly:
* ffs_lookup.c 1.21, 1.28, 1.33, 1.48
* ffs_inode.c 1.43, 1.44, 1.45, 1.66, 1.67
* ffs_vnops.c 1.84, 1.85, 1.86
Clean up some crappy pointer frobnication.
2004-08-15 11:19:54 +04:00
|
|
|
softdep_mount(devvp, mp, fs, cred);
|
2004-05-25 18:54:55 +04:00
|
|
|
if (fs->fs_snapinum[0] != 0)
|
Fixing age old cruft:
* Rather than using mnt_maxsymlinklen to indicate that a file systems returns
d_type fields(!), add a new internal flag, IMNT_DTYPE.
Add 3 new elements to ufsmount:
* um_maxsymlinklen, replaces mnt_maxsymlinklen (which never should have existed
in the first place).
* um_dirblksiz, which tracks the current directory block size, eliminating the
FS-specific checks littered throughout the code. This may be used later to
make the block size variable.
* um_maxfilesize, which is the maximum file size, possibly adjusted lower due
to implementation issues.
Sync some bug fixes from FFS into ext2fs, particularly:
* ffs_lookup.c 1.21, 1.28, 1.33, 1.48
* ffs_inode.c 1.43, 1.44, 1.45, 1.66, 1.67
* ffs_vnops.c 1.84, 1.85, 1.86
Clean up some crappy pointer frobnication.
2004-08-15 11:19:54 +04:00
|
|
|
ffs_snapshot_mount(mp);
|
1995-12-20 02:27:53 +03:00
|
|
|
/*
|
|
|
|
* We no longer know anything about clusters per cylinder group.
|
|
|
|
*/
|
|
|
|
if (fs->fs_contigsumsize > 0) {
|
|
|
|
lp = fs->fs_maxcluster;
|
|
|
|
for (i = 0; i < fs->fs_ncg; i++)
|
|
|
|
*lp++ = fs->fs_contigsumsize;
|
|
|
|
}
|
|
|
|
|
1994-06-08 15:41:58 +04:00
|
|
|
loop:
|
1998-03-01 05:20:01 +03:00
|
|
|
simple_lock(&mntvnode_slock);
|
Fixing age old cruft:
* Rather than using mnt_maxsymlinklen to indicate that a file systems returns
d_type fields(!), add a new internal flag, IMNT_DTYPE.
Add 3 new elements to ufsmount:
* um_maxsymlinklen, replaces mnt_maxsymlinklen (which never should have existed
in the first place).
* um_dirblksiz, which tracks the current directory block size, eliminating the
FS-specific checks littered throughout the code. This may be used later to
make the block size variable.
* um_maxfilesize, which is the maximum file size, possibly adjusted lower due
to implementation issues.
Sync some bug fixes from FFS into ext2fs, particularly:
* ffs_lookup.c 1.21, 1.28, 1.33, 1.48
* ffs_inode.c 1.43, 1.44, 1.45, 1.66, 1.67
* ffs_vnops.c 1.84, 1.85, 1.86
Clean up some crappy pointer frobnication.
2004-08-15 11:19:54 +04:00
|
|
|
for (vp = mp->mnt_vnodelist.lh_first; vp != NULL; vp = nvp) {
|
|
|
|
if (vp->v_mount != mp) {
|
1998-03-01 05:20:01 +03:00
|
|
|
simple_unlock(&mntvnode_slock);
|
|
|
|
goto loop;
|
|
|
|
}
|
1994-06-08 15:41:58 +04:00
|
|
|
nvp = vp->v_mntvnodes.le_next;
|
|
|
|
/*
|
|
|
|
* Step 4: invalidate all inactive vnodes.
|
|
|
|
*/
|
2003-06-30 02:28:00 +04:00
|
|
|
if (vrecycle(vp, &mntvnode_slock, p))
|
1998-03-01 05:20:01 +03:00
|
|
|
goto loop;
|
1994-06-08 15:41:58 +04:00
|
|
|
/*
|
|
|
|
* Step 5: invalidate all cached file data.
|
|
|
|
*/
|
1998-03-01 05:20:01 +03:00
|
|
|
simple_lock(&vp->v_interlock);
|
|
|
|
simple_unlock(&mntvnode_slock);
|
2003-06-29 22:43:21 +04:00
|
|
|
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK))
|
1994-06-08 15:41:58 +04:00
|
|
|
goto loop;
|
2003-06-30 02:28:00 +04:00
|
|
|
if (vinvalbuf(vp, 0, cred, p, 0, 0))
|
1994-06-08 15:41:58 +04:00
|
|
|
panic("ffs_reload: dirty2");
|
|
|
|
/*
|
|
|
|
* Step 6: re-read inode data for all active vnodes.
|
|
|
|
*/
|
|
|
|
ip = VTOI(vp);
|
1996-02-10 01:22:18 +03:00
|
|
|
error = bread(devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
|
|
|
|
(int)fs->fs_bsize, NOCRED, &bp);
|
|
|
|
if (error) {
|
1999-02-10 16:14:08 +03:00
|
|
|
brelse(bp);
|
1994-06-08 15:41:58 +04:00
|
|
|
vput(vp);
|
|
|
|
return (error);
|
|
|
|
}
|
2003-04-02 14:39:19 +04:00
|
|
|
ffs_load_inode(bp, ip, fs, ip->i_number);
|
|
|
|
ip->i_ffs_effnlink = ip->i_nlink;
|
1994-06-08 15:41:58 +04:00
|
|
|
brelse(bp);
|
|
|
|
vput(vp);
|
1998-03-01 05:20:01 +03:00
|
|
|
simple_lock(&mntvnode_slock);
|
1994-06-08 15:41:58 +04:00
|
|
|
}
|
1998-03-01 05:20:01 +03:00
|
|
|
simple_unlock(&mntvnode_slock);
|
1994-06-08 15:41:58 +04:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2003-04-02 14:39:19 +04:00
|
|
|
/*
|
|
|
|
* Possible superblock locations ordered from most to least likely.
|
|
|
|
*/
|
2004-02-22 11:58:03 +03:00
|
|
|
static const int sblock_try[] = SBLOCKSEARCH;
|
2003-04-02 14:39:19 +04:00
|
|
|
|
1994-06-08 15:41:58 +04:00
|
|
|
/*
|
|
|
|
* Common code for mount and mountroot
|
|
|
|
*/
|
|
|
|
int
|
2003-06-30 02:28:00 +04:00
|
|
|
ffs_mountfs(devvp, mp, p)
|
2000-03-30 16:41:09 +04:00
|
|
|
struct vnode *devvp;
|
1994-06-08 15:41:58 +04:00
|
|
|
struct mount *mp;
|
2003-06-30 02:28:00 +04:00
|
|
|
struct proc *p;
|
1994-06-08 15:41:58 +04:00
|
|
|
{
|
1998-03-18 18:57:26 +03:00
|
|
|
struct ufsmount *ump;
|
1994-06-08 15:41:58 +04:00
|
|
|
struct buf *bp;
|
1998-03-18 18:57:26 +03:00
|
|
|
struct fs *fs;
|
1994-12-14 16:03:35 +03:00
|
|
|
dev_t dev;
|
1994-06-08 15:41:58 +04:00
|
|
|
struct partinfo dpart;
|
2001-09-02 05:58:30 +04:00
|
|
|
void *space;
|
2003-04-02 14:39:19 +04:00
|
|
|
daddr_t sblockloc, fsblockloc;
|
|
|
|
int blks, fstype;
|
1999-08-03 23:22:43 +04:00
|
|
|
int error, i, size, ronly;
|
|
|
|
#ifdef FFS_EI
|
2003-04-02 14:39:19 +04:00
|
|
|
int needswap = 0; /* keep gcc happy */
|
1999-08-03 23:22:43 +04:00
|
|
|
#endif
|
1994-12-14 16:03:35 +03:00
|
|
|
int32_t *lp;
|
|
|
|
struct ucred *cred;
|
2003-04-02 14:39:19 +04:00
|
|
|
u_int32_t sbsize = 8192; /* keep gcc happy*/
|
1994-06-08 15:41:58 +04:00
|
|
|
|
1994-12-14 16:03:35 +03:00
|
|
|
dev = devvp->v_rdev;
|
|
|
|
cred = p ? p->p_ucred : NOCRED;
|
2005-01-09 06:11:48 +03:00
|
|
|
|
|
|
|
/* Flush out any old buffers remaining from a previous use. */
|
1999-11-15 21:49:07 +03:00
|
|
|
vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
|
2003-06-30 02:28:00 +04:00
|
|
|
error = vinvalbuf(devvp, V_SAVE, cred, p, 0, 0);
|
1999-11-15 21:49:07 +03:00
|
|
|
VOP_UNLOCK(devvp, 0);
|
|
|
|
if (error)
|
1994-06-08 15:41:58 +04:00
|
|
|
return (error);
|
|
|
|
|
|
|
|
ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
|
2003-06-30 02:28:00 +04:00
|
|
|
if (VOP_IOCTL(devvp, DIOCGPART, &dpart, FREAD, cred, p) != 0)
|
1994-06-08 15:41:58 +04:00
|
|
|
size = DEV_BSIZE;
|
|
|
|
else
|
|
|
|
size = dpart.disklab->d_secsize;
|
|
|
|
|
|
|
|
bp = NULL;
|
|
|
|
ump = NULL;
|
2003-04-02 14:39:19 +04:00
|
|
|
fs = NULL;
|
2004-03-21 21:48:24 +03:00
|
|
|
sblockloc = 0;
|
2003-04-02 14:39:19 +04:00
|
|
|
fstype = 0;
|
1998-03-18 18:57:26 +03:00
|
|
|
|
2003-04-02 14:39:19 +04:00
|
|
|
/*
|
|
|
|
* Try reading the superblock in each of its possible locations. */
|
2004-03-21 21:48:24 +03:00
|
|
|
for (i = 0; ; i++) {
|
|
|
|
if (bp != NULL) {
|
|
|
|
bp->b_flags |= B_NOCACHE;
|
|
|
|
brelse(bp);
|
|
|
|
bp = NULL;
|
|
|
|
}
|
|
|
|
if (sblock_try[i] == -1) {
|
|
|
|
error = EINVAL;
|
|
|
|
fs = NULL;
|
|
|
|
goto out;
|
|
|
|
}
|
2003-04-02 14:39:19 +04:00
|
|
|
error = bread(devvp, sblock_try[i] / size, SBLOCKSIZE, cred,
|
|
|
|
&bp);
|
|
|
|
if (error)
|
|
|
|
goto out;
|
|
|
|
fs = (struct fs*)bp->b_data;
|
|
|
|
fsblockloc = sblockloc = sblock_try[i];
|
|
|
|
if (fs->fs_magic == FS_UFS1_MAGIC) {
|
|
|
|
sbsize = fs->fs_sbsize;
|
|
|
|
fstype = UFS1;
|
1998-03-18 18:57:26 +03:00
|
|
|
#ifdef FFS_EI
|
2003-04-02 14:39:19 +04:00
|
|
|
needswap = 0;
|
|
|
|
} else if (fs->fs_magic == bswap32(FS_UFS1_MAGIC)) {
|
|
|
|
sbsize = bswap32(fs->fs_sbsize);
|
|
|
|
fstype = UFS1;
|
|
|
|
needswap = 1;
|
1998-03-18 18:57:26 +03:00
|
|
|
#endif
|
2003-04-02 14:39:19 +04:00
|
|
|
} else if (fs->fs_magic == FS_UFS2_MAGIC) {
|
|
|
|
sbsize = fs->fs_sbsize;
|
|
|
|
fstype = UFS2;
|
|
|
|
#ifdef FFS_EI
|
|
|
|
needswap = 0;
|
|
|
|
} else if (fs->fs_magic == bswap32(FS_UFS2_MAGIC)) {
|
|
|
|
sbsize = bswap32(fs->fs_sbsize);
|
|
|
|
fstype = UFS2;
|
|
|
|
needswap = 1;
|
|
|
|
#endif
|
2003-04-12 14:35:58 +04:00
|
|
|
} else
|
2004-03-21 21:48:24 +03:00
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
|
|
/* fs->fs_sblockloc isn't defined for old filesystems */
|
2004-03-27 15:40:46 +03:00
|
|
|
if (fstype == UFS1 && !(fs->fs_old_flags & FS_FLAGS_UPDATED)) {
|
|
|
|
if (sblockloc == SBLOCK_UFS2)
|
2004-03-21 21:48:24 +03:00
|
|
|
/*
|
|
|
|
* This is likely to be the first alternate
|
|
|
|
* in a filesystem with 64k blocks.
|
|
|
|
* Don't use it.
|
|
|
|
*/
|
|
|
|
continue;
|
|
|
|
fsblockloc = sblockloc;
|
|
|
|
} else {
|
|
|
|
fsblockloc = fs->fs_sblockloc;
|
|
|
|
#ifdef FFS_EI
|
|
|
|
if (needswap)
|
|
|
|
fsblockloc = bswap64(fsblockloc);
|
|
|
|
#endif
|
|
|
|
}
|
2003-04-02 14:39:19 +04:00
|
|
|
|
2004-03-21 21:48:24 +03:00
|
|
|
/* Check we haven't found an alternate superblock */
|
|
|
|
if (fsblockloc != sblockloc)
|
|
|
|
continue;
|
2003-04-12 14:35:58 +04:00
|
|
|
|
2004-03-21 21:48:24 +03:00
|
|
|
/* Validate size of superblock */
|
|
|
|
if (sbsize > MAXBSIZE || sbsize < sizeof(struct fs))
|
|
|
|
continue;
|
2003-04-02 14:39:19 +04:00
|
|
|
|
2004-03-21 21:48:24 +03:00
|
|
|
/* Ok seems to be a good superblock */
|
|
|
|
break;
|
1998-03-18 18:57:26 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
fs = malloc((u_long)sbsize, M_UFSMNT, M_WAITOK);
|
1998-08-10 00:15:38 +04:00
|
|
|
memcpy(fs, bp->b_data, sbsize);
|
2003-04-05 17:37:36 +04:00
|
|
|
|
|
|
|
ump = malloc(sizeof *ump, M_UFSMNT, M_WAITOK);
|
|
|
|
memset(ump, 0, sizeof *ump);
|
2004-05-25 18:54:55 +04:00
|
|
|
TAILQ_INIT(&ump->um_snapshots);
|
2003-04-05 17:37:36 +04:00
|
|
|
ump->um_fs = fs;
|
|
|
|
|
1998-03-18 18:57:26 +03:00
|
|
|
#ifdef FFS_EI
|
1999-11-15 21:49:07 +03:00
|
|
|
if (needswap) {
|
2001-08-17 06:18:46 +04:00
|
|
|
ffs_sb_swap((struct fs*)bp->b_data, fs);
|
1999-11-15 21:49:07 +03:00
|
|
|
fs->fs_flags |= FS_SWAPPED;
|
2003-09-13 18:09:15 +04:00
|
|
|
} else
|
1998-03-18 18:57:26 +03:00
|
|
|
#endif
|
2003-09-13 18:09:15 +04:00
|
|
|
fs->fs_flags &= ~FS_SWAPPED;
|
1999-12-10 17:36:04 +03:00
|
|
|
|
2004-01-09 22:10:22 +03:00
|
|
|
ffs_oldfscompat_read(fs, ump, sblockloc);
|
2004-09-19 15:58:29 +04:00
|
|
|
ump->um_maxfilesize = fs->fs_maxfilesize;
|
2004-01-09 22:10:22 +03:00
|
|
|
|
2001-12-18 13:57:21 +03:00
|
|
|
if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
|
|
|
|
fs->fs_pendingblocks = 0;
|
|
|
|
fs->fs_pendinginodes = 0;
|
|
|
|
}
|
1999-12-10 17:36:04 +03:00
|
|
|
|
2003-04-02 14:39:19 +04:00
|
|
|
ump->um_fstype = fstype;
|
|
|
|
if (fs->fs_sbsize < SBLOCKSIZE)
|
1994-06-08 15:41:58 +04:00
|
|
|
bp->b_flags |= B_INVAL;
|
|
|
|
brelse(bp);
|
|
|
|
bp = NULL;
|
2002-03-17 03:02:34 +03:00
|
|
|
|
2002-09-29 00:11:05 +04:00
|
|
|
/* First check to see if this is tagged as an Apple UFS filesystem
|
|
|
|
* in the disklabel
|
|
|
|
*/
|
2003-06-30 02:28:00 +04:00
|
|
|
if ((VOP_IOCTL(devvp, DIOCGPART, &dpart, FREAD, cred, p) == 0) &&
|
2002-09-29 00:11:05 +04:00
|
|
|
(dpart.part->p_fstype == FS_APPLEUFS)) {
|
|
|
|
ump->um_flags |= UFS_ISAPPLEUFS;
|
|
|
|
}
|
|
|
|
#ifdef APPLE_UFS
|
|
|
|
else {
|
|
|
|
/* Manually look for an apple ufs label, and if a valid one
|
|
|
|
* is found, then treat it like an Apple UFS filesystem anyway
|
|
|
|
*/
|
2003-01-25 00:55:02 +03:00
|
|
|
error = bread(devvp, (daddr_t)(APPLEUFS_LABEL_OFFSET / size),
|
2002-09-29 00:11:05 +04:00
|
|
|
APPLEUFS_LABEL_SIZE, cred, &bp);
|
|
|
|
if (error)
|
|
|
|
goto out;
|
|
|
|
error = ffs_appleufs_validate(fs->fs_fsmnt,
|
|
|
|
(struct appleufslabel *)bp->b_data,NULL);
|
|
|
|
if (error == 0) {
|
|
|
|
ump->um_flags |= UFS_ISAPPLEUFS;
|
|
|
|
}
|
|
|
|
brelse(bp);
|
|
|
|
bp = NULL;
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
if (ump->um_flags & UFS_ISAPPLEUFS) {
|
|
|
|
error = EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2002-03-17 03:02:34 +03:00
|
|
|
/*
|
2002-06-09 20:46:49 +04:00
|
|
|
* verify that we can access the last block in the fs
|
|
|
|
* if we're mounting read/write.
|
2002-03-17 03:02:34 +03:00
|
|
|
*/
|
|
|
|
|
2002-06-09 20:46:49 +04:00
|
|
|
if (!ronly) {
|
|
|
|
error = bread(devvp, fsbtodb(fs, fs->fs_size - 1), fs->fs_fsize,
|
|
|
|
cred, &bp);
|
|
|
|
if (bp->b_bcount != fs->fs_fsize)
|
|
|
|
error = EINVAL;
|
|
|
|
bp->b_flags |= B_INVAL;
|
|
|
|
if (error)
|
|
|
|
goto out;
|
|
|
|
brelse(bp);
|
|
|
|
bp = NULL;
|
|
|
|
}
|
2002-03-17 03:02:34 +03:00
|
|
|
|
1994-06-08 15:41:58 +04:00
|
|
|
fs->fs_ronly = ronly;
|
1995-04-13 01:21:00 +04:00
|
|
|
if (ronly == 0) {
|
|
|
|
fs->fs_clean <<= 1;
|
1994-06-08 15:41:58 +04:00
|
|
|
fs->fs_fmod = 1;
|
1995-04-13 01:21:00 +04:00
|
|
|
}
|
1994-12-14 16:03:35 +03:00
|
|
|
size = fs->fs_cssize;
|
|
|
|
blks = howmany(size, fs->fs_fsize);
|
|
|
|
if (fs->fs_contigsumsize > 0)
|
|
|
|
size += fs->fs_ncg * sizeof(int32_t);
|
Incorporate the enhanced ffs_dirpref() by Grigoriy Orlov, as found in
FreeBSD (three commits; the initial work, man page updates, and a fix
to ffs_reload()), with the following differences:
- Be consistent between newfs(8) and tunefs(8) as to the options which
set and control the tuning parameters for this work (avgfilesize & avgfpdir)
- Use u_int16_t instead of u_int8_t to keep track of the number of
contiguous directories (suggested by Chuck Silvers)
- Work within our FFS_EI framework
- Ensure that fs->fs_maxclusters and fs->fs_contigdirs don't point to
the same area of memory
The new algorithm has a marked performance increase, especially when
performing tasks such as untarring pkgsrc.tar.gz, etc.
The original FreeBSD commit messages are attached:
=====
mckusick 2001/04/10 01:39:00 PDT
Directory layout preference improvements from Grigoriy Orlov <gluk@ptci.ru>.
His description of the problem and solution follow. My own tests show
speedups on typical filesystem intensive workloads of 5% to 12% which
is very impressive considering the small amount of code change involved.
------
One day I noticed that some file operations run much faster on
small file systems then on big ones. I've looked at the ffs
algorithms, thought about them, and redesigned the dirpref algorithm.
First I want to describe the results of my tests. These results are old
and I have improved the algorithm after these tests were done. Nevertheless
they show how big the perfomance speedup may be. I have done two file/directory
intensive tests on a two OpenBSD systems with old and new dirpref algorithm.
The first test is "tar -xzf ports.tar.gz", the second is "rm -rf ports".
The ports.tar.gz file is the ports collection from the OpenBSD 2.8 release.
It contains 6596 directories and 13868 files. The test systems are:
1. Celeron-450, 128Mb, two IDE drives, the system at wd0, file system for
test is at wd1. Size of test file system is 8 Gb, number of cg=991,
size of cg is 8m, block size = 8k, fragment size = 1k OpenBSD-current
from Dec 2000 with BUFCACHEPERCENT=35
2. PIII-600, 128Mb, two IBM DTLA-307045 IDE drives at i815e, the system
at wd0, file system for test is at wd1. Size of test file system is 40 Gb,
number of cg=5324, size of cg is 8m, block size = 8k, fragment size = 1k
OpenBSD-current from Dec 2000 with BUFCACHEPERCENT=50
You can get more info about the test systems and methods at:
http://www.ptci.ru/gluk/dirpref/old/dirpref.html
Test Results
tar -xzf ports.tar.gz rm -rf ports
mode old dirpref new dirpref speedup old dirprefnew dirpref speedup
First system
normal 667 472 1.41 477 331 1.44
async 285 144 1.98 130 14 9.29
sync 768 616 1.25 477 334 1.43
softdep 413 252 1.64 241 38 6.34
Second system
normal 329 81 4.06 263.5 93.5 2.81
async 302 25.7 11.75 112 2.26 49.56
sync 281 57.0 4.93 263 90.5 2.9
softdep 341 40.6 8.4 284 4.76 59.66
"old dirpref" and "new dirpref" columns give a test time in seconds.
speedup - speed increasement in times, ie. old dirpref / new dirpref.
------
Algorithm description
The old dirpref algorithm is described in comments:
/*
* Find a cylinder to place a directory.
*
* The policy implemented by this algorithm is to select from
* among those cylinder groups with above the average number of
* free inodes, the one with the smallest number of directories.
*/
A new directory is allocated in a different cylinder groups than its
parent directory resulting in a directory tree that is spreaded across
all the cylinder groups. This spreading out results in a non-optimal
access to the directories and files. When we have a small filesystem
it is not a problem but when the filesystem is big then perfomance
degradation becomes very apparent.
What I mean by a big file system ?
1. A big filesystem is a filesystem which occupy 20-30 or more percent
of total drive space, i.e. first and last cylinder are physically
located relatively far from each other.
2. It has a relatively large number of cylinder groups, for example
more cylinder groups than 50% of the buffers in the buffer cache.
The first results in long access times, while the second results in
many buffers being used by metadata operations. Such operations use
cylinder group blocks and on-disk inode blocks. The cylinder group
block (fs->fs_cblkno) contains struct cg, inode and block bit maps.
It is 2k in size for the default filesystem parameters. If new and
parent directories are located in different cylinder groups then the
system performs more input/output operations and uses more buffers.
On filesystems with many cylinder groups, lots of cache buffers are
used for metadata operations.
My solution for this problem is very simple. I allocate many directories
in one cylinder group. I also do some things, so that the new allocation
method does not cause excessive fragmentation and all directory inodes
will not be located at a location far from its file's inodes and data.
The algorithm is:
/*
* Find a cylinder group to place a directory.
*
* The policy implemented by this algorithm is to allocate a
* directory inode in the same cylinder group as its parent
* directory, but also to reserve space for its files inodes
* and data. Restrict the number of directories which may be
* allocated one after another in the same cylinder group
* without intervening allocation of files.
*
* If we allocate a first level directory then force allocation
* in another cylinder group.
*/
My early versions of dirpref give me a good results for a wide range of
file operations and different filesystem capacities except one case:
those applications that create their entire directory structure first
and only later fill this structure with files.
My solution for such and similar cases is to limit a number of
directories which may be created one after another in the same cylinder
group without intervening file creations. For this purpose, I allocate
an array of counters at mount time. This array is linked to the superblock
fs->fs_contigdirs[cg]. Each time a directory is created the counter
increases and each time a file is created the counter decreases. A 60Gb
filesystem with 8mb/cg requires 10kb of memory for the counters array.
The maxcontigdirs is a maximum number of directories which may be created
without an intervening file creation. I found in my tests that the best
performance occurs when I restrict the number of directories in one cylinder
group such that all its files may be located in the same cylinder group.
There may be some deterioration in performance if all the file inodes
are in the same cylinder group as its containing directory, but their
data partially resides in a different cylinder group. The maxcontigdirs
value is calculated to try to prevent this condition. Since there is
no way to know how many files and directories will be allocated later
I added two optimization parameters in superblock/tunefs. They are:
int32_t fs_avgfilesize; /* expected average file size */
int32_t fs_avgfpdir; /* expected # of files per directory */
These parameters have reasonable defaults but may be tweeked for special
uses of a filesystem. They are only necessary in rare cases like better
tuning a filesystem being used to store a squid cache.
I have been using this algorithm for about 3 months. I have done
a lot of testing on filesystems with different capacities, average
filesize, average number of files per directory, and so on. I think
this algorithm has no negative impact on filesystem perfomance. It
works better than the default one in all cases. The new dirpref
will greatly improve untarring/removing/coping of big directories,
decrease load on cvs servers and much more. The new dirpref doesn't
speedup a compilation process, but also doesn't slow it down.
Obtained from: Grigoriy Orlov <gluk@ptci.ru>
=====
=====
iedowse 2001/04/23 17:37:17 PDT
Pre-dirpref versions of fsck may zero out the new superblock fields
fs_contigdirs, fs_avgfilesize and fs_avgfpdir. This could cause
panics if these fields were zeroed while a filesystem was mounted
read-only, and then remounted read-write.
Add code to ffs_reload() which copies the fs_contigdirs pointer
from the previous superblock, and reinitialises fs_avgf* if necessary.
Reviewed by: mckusick
=====
=====
nik 2001/04/10 03:36:44 PDT
Add information about the new options to newfs and tunefs which set the
expected average file size and number of files per directory. Could do
with some fleshing out.
=====
2001-09-06 06:16:00 +04:00
|
|
|
size += fs->fs_ncg * sizeof(*fs->fs_contigdirs);
|
2001-09-02 05:58:30 +04:00
|
|
|
space = malloc((u_long)size, M_UFSMNT, M_WAITOK);
|
|
|
|
fs->fs_csp = space;
|
1994-06-08 15:41:58 +04:00
|
|
|
for (i = 0; i < blks; i += fs->fs_frag) {
|
|
|
|
size = fs->fs_bsize;
|
|
|
|
if (i + fs->fs_frag > blks)
|
|
|
|
size = (blks - i) * fs->fs_fsize;
|
1996-02-10 01:22:18 +03:00
|
|
|
error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
|
|
|
|
cred, &bp);
|
|
|
|
if (error) {
|
2001-09-02 05:58:30 +04:00
|
|
|
free(fs->fs_csp, M_UFSMNT);
|
2003-11-08 08:35:11 +03:00
|
|
|
goto out;
|
1994-06-08 15:41:58 +04:00
|
|
|
}
|
1998-03-18 18:57:26 +03:00
|
|
|
#ifdef FFS_EI
|
|
|
|
if (needswap)
|
2001-09-02 05:58:30 +04:00
|
|
|
ffs_csum_swap((struct csum *)bp->b_data,
|
|
|
|
(struct csum *)space, size);
|
1998-03-18 18:57:26 +03:00
|
|
|
else
|
|
|
|
#endif
|
1998-08-10 00:15:38 +04:00
|
|
|
memcpy(space, bp->b_data, (u_int)size);
|
2005-02-27 01:31:44 +03:00
|
|
|
|
2001-09-02 05:58:30 +04:00
|
|
|
space = (char *)space + size;
|
1994-06-08 15:41:58 +04:00
|
|
|
brelse(bp);
|
|
|
|
bp = NULL;
|
|
|
|
}
|
1994-12-14 16:03:35 +03:00
|
|
|
if (fs->fs_contigsumsize > 0) {
|
Incorporate the enhanced ffs_dirpref() by Grigoriy Orlov, as found in
FreeBSD (three commits; the initial work, man page updates, and a fix
to ffs_reload()), with the following differences:
- Be consistent between newfs(8) and tunefs(8) as to the options which
set and control the tuning parameters for this work (avgfilesize & avgfpdir)
- Use u_int16_t instead of u_int8_t to keep track of the number of
contiguous directories (suggested by Chuck Silvers)
- Work within our FFS_EI framework
- Ensure that fs->fs_maxclusters and fs->fs_contigdirs don't point to
the same area of memory
The new algorithm has a marked performance increase, especially when
performing tasks such as untarring pkgsrc.tar.gz, etc.
The original FreeBSD commit messages are attached:
=====
mckusick 2001/04/10 01:39:00 PDT
Directory layout preference improvements from Grigoriy Orlov <gluk@ptci.ru>.
His description of the problem and solution follow. My own tests show
speedups on typical filesystem intensive workloads of 5% to 12% which
is very impressive considering the small amount of code change involved.
------
One day I noticed that some file operations run much faster on
small file systems then on big ones. I've looked at the ffs
algorithms, thought about them, and redesigned the dirpref algorithm.
First I want to describe the results of my tests. These results are old
and I have improved the algorithm after these tests were done. Nevertheless
they show how big the perfomance speedup may be. I have done two file/directory
intensive tests on a two OpenBSD systems with old and new dirpref algorithm.
The first test is "tar -xzf ports.tar.gz", the second is "rm -rf ports".
The ports.tar.gz file is the ports collection from the OpenBSD 2.8 release.
It contains 6596 directories and 13868 files. The test systems are:
1. Celeron-450, 128Mb, two IDE drives, the system at wd0, file system for
test is at wd1. Size of test file system is 8 Gb, number of cg=991,
size of cg is 8m, block size = 8k, fragment size = 1k OpenBSD-current
from Dec 2000 with BUFCACHEPERCENT=35
2. PIII-600, 128Mb, two IBM DTLA-307045 IDE drives at i815e, the system
at wd0, file system for test is at wd1. Size of test file system is 40 Gb,
number of cg=5324, size of cg is 8m, block size = 8k, fragment size = 1k
OpenBSD-current from Dec 2000 with BUFCACHEPERCENT=50
You can get more info about the test systems and methods at:
http://www.ptci.ru/gluk/dirpref/old/dirpref.html
Test Results
tar -xzf ports.tar.gz rm -rf ports
mode old dirpref new dirpref speedup old dirprefnew dirpref speedup
First system
normal 667 472 1.41 477 331 1.44
async 285 144 1.98 130 14 9.29
sync 768 616 1.25 477 334 1.43
softdep 413 252 1.64 241 38 6.34
Second system
normal 329 81 4.06 263.5 93.5 2.81
async 302 25.7 11.75 112 2.26 49.56
sync 281 57.0 4.93 263 90.5 2.9
softdep 341 40.6 8.4 284 4.76 59.66
"old dirpref" and "new dirpref" columns give a test time in seconds.
speedup - speed increasement in times, ie. old dirpref / new dirpref.
------
Algorithm description
The old dirpref algorithm is described in comments:
/*
* Find a cylinder to place a directory.
*
* The policy implemented by this algorithm is to select from
* among those cylinder groups with above the average number of
* free inodes, the one with the smallest number of directories.
*/
A new directory is allocated in a different cylinder groups than its
parent directory resulting in a directory tree that is spreaded across
all the cylinder groups. This spreading out results in a non-optimal
access to the directories and files. When we have a small filesystem
it is not a problem but when the filesystem is big then perfomance
degradation becomes very apparent.
What I mean by a big file system ?
1. A big filesystem is a filesystem which occupy 20-30 or more percent
of total drive space, i.e. first and last cylinder are physically
located relatively far from each other.
2. It has a relatively large number of cylinder groups, for example
more cylinder groups than 50% of the buffers in the buffer cache.
The first results in long access times, while the second results in
many buffers being used by metadata operations. Such operations use
cylinder group blocks and on-disk inode blocks. The cylinder group
block (fs->fs_cblkno) contains struct cg, inode and block bit maps.
It is 2k in size for the default filesystem parameters. If new and
parent directories are located in different cylinder groups then the
system performs more input/output operations and uses more buffers.
On filesystems with many cylinder groups, lots of cache buffers are
used for metadata operations.
My solution for this problem is very simple. I allocate many directories
in one cylinder group. I also do some things, so that the new allocation
method does not cause excessive fragmentation and all directory inodes
will not be located at a location far from its file's inodes and data.
The algorithm is:
/*
* Find a cylinder group to place a directory.
*
* The policy implemented by this algorithm is to allocate a
* directory inode in the same cylinder group as its parent
* directory, but also to reserve space for its files inodes
* and data. Restrict the number of directories which may be
* allocated one after another in the same cylinder group
* without intervening allocation of files.
*
* If we allocate a first level directory then force allocation
* in another cylinder group.
*/
My early versions of dirpref give me a good results for a wide range of
file operations and different filesystem capacities except one case:
those applications that create their entire directory structure first
and only later fill this structure with files.
My solution for such and similar cases is to limit a number of
directories which may be created one after another in the same cylinder
group without intervening file creations. For this purpose, I allocate
an array of counters at mount time. This array is linked to the superblock
fs->fs_contigdirs[cg]. Each time a directory is created the counter
increases and each time a file is created the counter decreases. A 60Gb
filesystem with 8mb/cg requires 10kb of memory for the counters array.
The maxcontigdirs is a maximum number of directories which may be created
without an intervening file creation. I found in my tests that the best
performance occurs when I restrict the number of directories in one cylinder
group such that all its files may be located in the same cylinder group.
There may be some deterioration in performance if all the file inodes
are in the same cylinder group as its containing directory, but their
data partially resides in a different cylinder group. The maxcontigdirs
value is calculated to try to prevent this condition. Since there is
no way to know how many files and directories will be allocated later
I added two optimization parameters in superblock/tunefs. They are:
int32_t fs_avgfilesize; /* expected average file size */
int32_t fs_avgfpdir; /* expected # of files per directory */
These parameters have reasonable defaults but may be tweeked for special
uses of a filesystem. They are only necessary in rare cases like better
tuning a filesystem being used to store a squid cache.
I have been using this algorithm for about 3 months. I have done
a lot of testing on filesystems with different capacities, average
filesize, average number of files per directory, and so on. I think
this algorithm has no negative impact on filesystem perfomance. It
works better than the default one in all cases. The new dirpref
will greatly improve untarring/removing/coping of big directories,
decrease load on cvs servers and much more. The new dirpref doesn't
speedup a compilation process, but also doesn't slow it down.
Obtained from: Grigoriy Orlov <gluk@ptci.ru>
=====
=====
iedowse 2001/04/23 17:37:17 PDT
Pre-dirpref versions of fsck may zero out the new superblock fields
fs_contigdirs, fs_avgfilesize and fs_avgfpdir. This could cause
panics if these fields were zeroed while a filesystem was mounted
read-only, and then remounted read-write.
Add code to ffs_reload() which copies the fs_contigdirs pointer
from the previous superblock, and reinitialises fs_avgf* if necessary.
Reviewed by: mckusick
=====
=====
nik 2001/04/10 03:36:44 PDT
Add information about the new options to newfs and tunefs which set the
expected average file size and number of files per directory. Could do
with some fleshing out.
=====
2001-09-06 06:16:00 +04:00
|
|
|
fs->fs_maxcluster = lp = space;
|
1994-12-14 16:03:35 +03:00
|
|
|
for (i = 0; i < fs->fs_ncg; i++)
|
|
|
|
*lp++ = fs->fs_contigsumsize;
|
Incorporate the enhanced ffs_dirpref() by Grigoriy Orlov, as found in
FreeBSD (three commits; the initial work, man page updates, and a fix
to ffs_reload()), with the following differences:
- Be consistent between newfs(8) and tunefs(8) as to the options which
set and control the tuning parameters for this work (avgfilesize & avgfpdir)
- Use u_int16_t instead of u_int8_t to keep track of the number of
contiguous directories (suggested by Chuck Silvers)
- Work within our FFS_EI framework
- Ensure that fs->fs_maxclusters and fs->fs_contigdirs don't point to
the same area of memory
The new algorithm has a marked performance increase, especially when
performing tasks such as untarring pkgsrc.tar.gz, etc.
The original FreeBSD commit messages are attached:
=====
mckusick 2001/04/10 01:39:00 PDT
Directory layout preference improvements from Grigoriy Orlov <gluk@ptci.ru>.
His description of the problem and solution follow. My own tests show
speedups on typical filesystem intensive workloads of 5% to 12% which
is very impressive considering the small amount of code change involved.
------
One day I noticed that some file operations run much faster on
small file systems then on big ones. I've looked at the ffs
algorithms, thought about them, and redesigned the dirpref algorithm.
First I want to describe the results of my tests. These results are old
and I have improved the algorithm after these tests were done. Nevertheless
they show how big the perfomance speedup may be. I have done two file/directory
intensive tests on a two OpenBSD systems with old and new dirpref algorithm.
The first test is "tar -xzf ports.tar.gz", the second is "rm -rf ports".
The ports.tar.gz file is the ports collection from the OpenBSD 2.8 release.
It contains 6596 directories and 13868 files. The test systems are:
1. Celeron-450, 128Mb, two IDE drives, the system at wd0, file system for
test is at wd1. Size of test file system is 8 Gb, number of cg=991,
size of cg is 8m, block size = 8k, fragment size = 1k OpenBSD-current
from Dec 2000 with BUFCACHEPERCENT=35
2. PIII-600, 128Mb, two IBM DTLA-307045 IDE drives at i815e, the system
at wd0, file system for test is at wd1. Size of test file system is 40 Gb,
number of cg=5324, size of cg is 8m, block size = 8k, fragment size = 1k
OpenBSD-current from Dec 2000 with BUFCACHEPERCENT=50
You can get more info about the test systems and methods at:
http://www.ptci.ru/gluk/dirpref/old/dirpref.html
Test Results
tar -xzf ports.tar.gz rm -rf ports
mode old dirpref new dirpref speedup old dirprefnew dirpref speedup
First system
normal 667 472 1.41 477 331 1.44
async 285 144 1.98 130 14 9.29
sync 768 616 1.25 477 334 1.43
softdep 413 252 1.64 241 38 6.34
Second system
normal 329 81 4.06 263.5 93.5 2.81
async 302 25.7 11.75 112 2.26 49.56
sync 281 57.0 4.93 263 90.5 2.9
softdep 341 40.6 8.4 284 4.76 59.66
"old dirpref" and "new dirpref" columns give a test time in seconds.
speedup - speed increasement in times, ie. old dirpref / new dirpref.
------
Algorithm description
The old dirpref algorithm is described in comments:
/*
* Find a cylinder to place a directory.
*
* The policy implemented by this algorithm is to select from
* among those cylinder groups with above the average number of
* free inodes, the one with the smallest number of directories.
*/
A new directory is allocated in a different cylinder groups than its
parent directory resulting in a directory tree that is spreaded across
all the cylinder groups. This spreading out results in a non-optimal
access to the directories and files. When we have a small filesystem
it is not a problem but when the filesystem is big then perfomance
degradation becomes very apparent.
What I mean by a big file system ?
1. A big filesystem is a filesystem which occupy 20-30 or more percent
of total drive space, i.e. first and last cylinder are physically
located relatively far from each other.
2. It has a relatively large number of cylinder groups, for example
more cylinder groups than 50% of the buffers in the buffer cache.
The first results in long access times, while the second results in
many buffers being used by metadata operations. Such operations use
cylinder group blocks and on-disk inode blocks. The cylinder group
block (fs->fs_cblkno) contains struct cg, inode and block bit maps.
It is 2k in size for the default filesystem parameters. If new and
parent directories are located in different cylinder groups then the
system performs more input/output operations and uses more buffers.
On filesystems with many cylinder groups, lots of cache buffers are
used for metadata operations.
My solution for this problem is very simple. I allocate many directories
in one cylinder group. I also do some things, so that the new allocation
method does not cause excessive fragmentation and all directory inodes
will not be located at a location far from its file's inodes and data.
The algorithm is:
/*
* Find a cylinder group to place a directory.
*
* The policy implemented by this algorithm is to allocate a
* directory inode in the same cylinder group as its parent
* directory, but also to reserve space for its files inodes
* and data. Restrict the number of directories which may be
* allocated one after another in the same cylinder group
* without intervening allocation of files.
*
* If we allocate a first level directory then force allocation
* in another cylinder group.
*/
My early versions of dirpref give me a good results for a wide range of
file operations and different filesystem capacities except one case:
those applications that create their entire directory structure first
and only later fill this structure with files.
My solution for such and similar cases is to limit a number of
directories which may be created one after another in the same cylinder
group without intervening file creations. For this purpose, I allocate
an array of counters at mount time. This array is linked to the superblock
fs->fs_contigdirs[cg]. Each time a directory is created the counter
increases and each time a file is created the counter decreases. A 60Gb
filesystem with 8mb/cg requires 10kb of memory for the counters array.
The maxcontigdirs is a maximum number of directories which may be created
without an intervening file creation. I found in my tests that the best
performance occurs when I restrict the number of directories in one cylinder
group such that all its files may be located in the same cylinder group.
There may be some deterioration in performance if all the file inodes
are in the same cylinder group as its containing directory, but their
data partially resides in a different cylinder group. The maxcontigdirs
value is calculated to try to prevent this condition. Since there is
no way to know how many files and directories will be allocated later
I added two optimization parameters in superblock/tunefs. They are:
int32_t fs_avgfilesize; /* expected average file size */
int32_t fs_avgfpdir; /* expected # of files per directory */
These parameters have reasonable defaults but may be tweeked for special
uses of a filesystem. They are only necessary in rare cases like better
tuning a filesystem being used to store a squid cache.
I have been using this algorithm for about 3 months. I have done
a lot of testing on filesystems with different capacities, average
filesize, average number of files per directory, and so on. I think
this algorithm has no negative impact on filesystem perfomance. It
works better than the default one in all cases. The new dirpref
will greatly improve untarring/removing/coping of big directories,
decrease load on cvs servers and much more. The new dirpref doesn't
speedup a compilation process, but also doesn't slow it down.
Obtained from: Grigoriy Orlov <gluk@ptci.ru>
=====
=====
iedowse 2001/04/23 17:37:17 PDT
Pre-dirpref versions of fsck may zero out the new superblock fields
fs_contigdirs, fs_avgfilesize and fs_avgfpdir. This could cause
panics if these fields were zeroed while a filesystem was mounted
read-only, and then remounted read-write.
Add code to ffs_reload() which copies the fs_contigdirs pointer
from the previous superblock, and reinitialises fs_avgf* if necessary.
Reviewed by: mckusick
=====
=====
nik 2001/04/10 03:36:44 PDT
Add information about the new options to newfs and tunefs which set the
expected average file size and number of files per directory. Could do
with some fleshing out.
=====
2001-09-06 06:16:00 +04:00
|
|
|
space = lp;
|
1994-12-14 16:03:35 +03:00
|
|
|
}
|
Incorporate the enhanced ffs_dirpref() by Grigoriy Orlov, as found in
FreeBSD (three commits; the initial work, man page updates, and a fix
to ffs_reload()), with the following differences:
- Be consistent between newfs(8) and tunefs(8) as to the options which
set and control the tuning parameters for this work (avgfilesize & avgfpdir)
- Use u_int16_t instead of u_int8_t to keep track of the number of
contiguous directories (suggested by Chuck Silvers)
- Work within our FFS_EI framework
- Ensure that fs->fs_maxclusters and fs->fs_contigdirs don't point to
the same area of memory
The new algorithm has a marked performance increase, especially when
performing tasks such as untarring pkgsrc.tar.gz, etc.
The original FreeBSD commit messages are attached:
=====
mckusick 2001/04/10 01:39:00 PDT
Directory layout preference improvements from Grigoriy Orlov <gluk@ptci.ru>.
His description of the problem and solution follow. My own tests show
speedups on typical filesystem intensive workloads of 5% to 12% which
is very impressive considering the small amount of code change involved.
------
One day I noticed that some file operations run much faster on
small file systems then on big ones. I've looked at the ffs
algorithms, thought about them, and redesigned the dirpref algorithm.
First I want to describe the results of my tests. These results are old
and I have improved the algorithm after these tests were done. Nevertheless
they show how big the perfomance speedup may be. I have done two file/directory
intensive tests on a two OpenBSD systems with old and new dirpref algorithm.
The first test is "tar -xzf ports.tar.gz", the second is "rm -rf ports".
The ports.tar.gz file is the ports collection from the OpenBSD 2.8 release.
It contains 6596 directories and 13868 files. The test systems are:
1. Celeron-450, 128Mb, two IDE drives, the system at wd0, file system for
test is at wd1. Size of test file system is 8 Gb, number of cg=991,
size of cg is 8m, block size = 8k, fragment size = 1k OpenBSD-current
from Dec 2000 with BUFCACHEPERCENT=35
2. PIII-600, 128Mb, two IBM DTLA-307045 IDE drives at i815e, the system
at wd0, file system for test is at wd1. Size of test file system is 40 Gb,
number of cg=5324, size of cg is 8m, block size = 8k, fragment size = 1k
OpenBSD-current from Dec 2000 with BUFCACHEPERCENT=50
You can get more info about the test systems and methods at:
http://www.ptci.ru/gluk/dirpref/old/dirpref.html
Test Results
tar -xzf ports.tar.gz rm -rf ports
mode old dirpref new dirpref speedup old dirprefnew dirpref speedup
First system
normal 667 472 1.41 477 331 1.44
async 285 144 1.98 130 14 9.29
sync 768 616 1.25 477 334 1.43
softdep 413 252 1.64 241 38 6.34
Second system
normal 329 81 4.06 263.5 93.5 2.81
async 302 25.7 11.75 112 2.26 49.56
sync 281 57.0 4.93 263 90.5 2.9
softdep 341 40.6 8.4 284 4.76 59.66
"old dirpref" and "new dirpref" columns give a test time in seconds.
speedup - speed increasement in times, ie. old dirpref / new dirpref.
------
Algorithm description
The old dirpref algorithm is described in comments:
/*
* Find a cylinder to place a directory.
*
* The policy implemented by this algorithm is to select from
* among those cylinder groups with above the average number of
* free inodes, the one with the smallest number of directories.
*/
A new directory is allocated in a different cylinder groups than its
parent directory resulting in a directory tree that is spreaded across
all the cylinder groups. This spreading out results in a non-optimal
access to the directories and files. When we have a small filesystem
it is not a problem but when the filesystem is big then perfomance
degradation becomes very apparent.
What I mean by a big file system ?
1. A big filesystem is a filesystem which occupy 20-30 or more percent
of total drive space, i.e. first and last cylinder are physically
located relatively far from each other.
2. It has a relatively large number of cylinder groups, for example
more cylinder groups than 50% of the buffers in the buffer cache.
The first results in long access times, while the second results in
many buffers being used by metadata operations. Such operations use
cylinder group blocks and on-disk inode blocks. The cylinder group
block (fs->fs_cblkno) contains struct cg, inode and block bit maps.
It is 2k in size for the default filesystem parameters. If new and
parent directories are located in different cylinder groups then the
system performs more input/output operations and uses more buffers.
On filesystems with many cylinder groups, lots of cache buffers are
used for metadata operations.
My solution for this problem is very simple. I allocate many directories
in one cylinder group. I also do some things, so that the new allocation
method does not cause excessive fragmentation and all directory inodes
will not be located at a location far from its file's inodes and data.
The algorithm is:
/*
* Find a cylinder group to place a directory.
*
* The policy implemented by this algorithm is to allocate a
* directory inode in the same cylinder group as its parent
* directory, but also to reserve space for its files inodes
* and data. Restrict the number of directories which may be
* allocated one after another in the same cylinder group
* without intervening allocation of files.
*
* If we allocate a first level directory then force allocation
* in another cylinder group.
*/
My early versions of dirpref give me a good results for a wide range of
file operations and different filesystem capacities except one case:
those applications that create their entire directory structure first
and only later fill this structure with files.
My solution for such and similar cases is to limit a number of
directories which may be created one after another in the same cylinder
group without intervening file creations. For this purpose, I allocate
an array of counters at mount time. This array is linked to the superblock
fs->fs_contigdirs[cg]. Each time a directory is created the counter
increases and each time a file is created the counter decreases. A 60Gb
filesystem with 8mb/cg requires 10kb of memory for the counters array.
The maxcontigdirs is a maximum number of directories which may be created
without an intervening file creation. I found in my tests that the best
performance occurs when I restrict the number of directories in one cylinder
group such that all its files may be located in the same cylinder group.
There may be some deterioration in performance if all the file inodes
are in the same cylinder group as its containing directory, but their
data partially resides in a different cylinder group. The maxcontigdirs
value is calculated to try to prevent this condition. Since there is
no way to know how many files and directories will be allocated later
I added two optimization parameters in superblock/tunefs. They are:
int32_t fs_avgfilesize; /* expected average file size */
int32_t fs_avgfpdir; /* expected # of files per directory */
These parameters have reasonable defaults but may be tweeked for special
uses of a filesystem. They are only necessary in rare cases like better
tuning a filesystem being used to store a squid cache.
I have been using this algorithm for about 3 months. I have done
a lot of testing on filesystems with different capacities, average
filesize, average number of files per directory, and so on. I think
this algorithm has no negative impact on filesystem perfomance. It
works better than the default one in all cases. The new dirpref
will greatly improve untarring/removing/coping of big directories,
decrease load on cvs servers and much more. The new dirpref doesn't
speedup a compilation process, but also doesn't slow it down.
Obtained from: Grigoriy Orlov <gluk@ptci.ru>
=====
=====
iedowse 2001/04/23 17:37:17 PDT
Pre-dirpref versions of fsck may zero out the new superblock fields
fs_contigdirs, fs_avgfilesize and fs_avgfpdir. This could cause
panics if these fields were zeroed while a filesystem was mounted
read-only, and then remounted read-write.
Add code to ffs_reload() which copies the fs_contigdirs pointer
from the previous superblock, and reinitialises fs_avgf* if necessary.
Reviewed by: mckusick
=====
=====
nik 2001/04/10 03:36:44 PDT
Add information about the new options to newfs and tunefs which set the
expected average file size and number of files per directory. Could do
with some fleshing out.
=====
2001-09-06 06:16:00 +04:00
|
|
|
size = fs->fs_ncg * sizeof(*fs->fs_contigdirs);
|
|
|
|
fs->fs_contigdirs = space;
|
|
|
|
space = (char *)space + size;
|
|
|
|
memset(fs->fs_contigdirs, 0, size);
|
|
|
|
/* Compatibility for old filesystems - XXX */
|
|
|
|
if (fs->fs_avgfilesize <= 0)
|
|
|
|
fs->fs_avgfilesize = AVFILESIZ;
|
|
|
|
if (fs->fs_avgfpdir <= 0)
|
|
|
|
fs->fs_avgfpdir = AFPDIR;
|
2004-05-27 21:04:52 +04:00
|
|
|
fs->fs_active = NULL;
|
2002-07-30 11:40:07 +04:00
|
|
|
mp->mnt_data = ump;
|
2004-04-21 05:05:31 +04:00
|
|
|
mp->mnt_stat.f_fsidx.__fsid_val[0] = (long)dev;
|
|
|
|
mp->mnt_stat.f_fsidx.__fsid_val[1] = makefstype(MOUNT_FFS);
|
|
|
|
mp->mnt_stat.f_fsid = mp->mnt_stat.f_fsidx.__fsid_val[0];
|
|
|
|
mp->mnt_stat.f_namemax = MAXNAMLEN;
|
Fixing age old cruft:
* Rather than using mnt_maxsymlinklen to indicate that a file systems returns
d_type fields(!), add a new internal flag, IMNT_DTYPE.
Add 3 new elements to ufsmount:
* um_maxsymlinklen, replaces mnt_maxsymlinklen (which never should have existed
in the first place).
* um_dirblksiz, which tracks the current directory block size, eliminating the
FS-specific checks littered throughout the code. This may be used later to
make the block size variable.
* um_maxfilesize, which is the maximum file size, possibly adjusted lower due
to implementation issues.
Sync some bug fixes from FFS into ext2fs, particularly:
* ffs_lookup.c 1.21, 1.28, 1.33, 1.48
* ffs_inode.c 1.43, 1.44, 1.45, 1.66, 1.67
* ffs_vnops.c 1.84, 1.85, 1.86
Clean up some crappy pointer frobnication.
2004-08-15 11:19:54 +04:00
|
|
|
if (UFS_MPISAPPLEUFS(ump)) {
|
2002-09-29 00:11:05 +04:00
|
|
|
/* NeXT used to keep short symlinks in the inode even
|
|
|
|
* when using FS_42INODEFMT. In that case fs->fs_maxsymlinklen
|
|
|
|
* is probably -1, but we still need to be able to identify
|
|
|
|
* short symlinks.
|
|
|
|
*/
|
Fixing age old cruft:
* Rather than using mnt_maxsymlinklen to indicate that a file systems returns
d_type fields(!), add a new internal flag, IMNT_DTYPE.
Add 3 new elements to ufsmount:
* um_maxsymlinklen, replaces mnt_maxsymlinklen (which never should have existed
in the first place).
* um_dirblksiz, which tracks the current directory block size, eliminating the
FS-specific checks littered throughout the code. This may be used later to
make the block size variable.
* um_maxfilesize, which is the maximum file size, possibly adjusted lower due
to implementation issues.
Sync some bug fixes from FFS into ext2fs, particularly:
* ffs_lookup.c 1.21, 1.28, 1.33, 1.48
* ffs_inode.c 1.43, 1.44, 1.45, 1.66, 1.67
* ffs_vnops.c 1.84, 1.85, 1.86
Clean up some crappy pointer frobnication.
2004-08-15 11:19:54 +04:00
|
|
|
ump->um_maxsymlinklen = APPLEUFS_MAXSYMLINKLEN;
|
|
|
|
ump->um_dirblksiz = APPLEUFS_DIRBLKSIZ;
|
|
|
|
mp->mnt_iflag |= IMNT_DTYPE;
|
|
|
|
} else {
|
|
|
|
ump->um_maxsymlinklen = fs->fs_maxsymlinklen;
|
|
|
|
ump->um_dirblksiz = DIRBLKSIZ;
|
|
|
|
if (ump->um_maxsymlinklen > 0)
|
|
|
|
mp->mnt_iflag |= IMNT_DTYPE;
|
|
|
|
else
|
|
|
|
mp->mnt_iflag &= ~IMNT_DTYPE;
|
2002-09-29 00:11:05 +04:00
|
|
|
}
|
2000-11-27 11:39:39 +03:00
|
|
|
mp->mnt_fs_bshift = fs->fs_bshift;
|
|
|
|
mp->mnt_dev_bshift = DEV_BSHIFT; /* XXX */
|
1994-06-08 15:41:58 +04:00
|
|
|
mp->mnt_flag |= MNT_LOCAL;
|
1998-03-18 18:57:26 +03:00
|
|
|
#ifdef FFS_EI
|
|
|
|
if (needswap)
|
|
|
|
ump->um_flags |= UFS_NEEDSWAP;
|
|
|
|
#endif
|
1994-06-08 15:41:58 +04:00
|
|
|
ump->um_mountp = mp;
|
|
|
|
ump->um_dev = dev;
|
|
|
|
ump->um_devvp = devvp;
|
|
|
|
ump->um_nindir = fs->fs_nindir;
|
2000-11-27 11:39:39 +03:00
|
|
|
ump->um_lognindir = ffs(fs->fs_nindir) - 1;
|
1994-06-08 15:41:58 +04:00
|
|
|
ump->um_bptrtodb = fs->fs_fsbtodb;
|
|
|
|
ump->um_seqinc = fs->fs_frag;
|
|
|
|
for (i = 0; i < MAXQUOTAS; i++)
|
|
|
|
ump->um_quotas[i] = NULLVP;
|
1999-11-15 21:49:07 +03:00
|
|
|
devvp->v_specmountpoint = mp;
|
|
|
|
if (ronly == 0 && (fs->fs_flags & FS_DOSOFTDEP)) {
|
|
|
|
error = softdep_mount(devvp, mp, fs, cred);
|
|
|
|
if (error) {
|
2001-09-02 05:58:30 +04:00
|
|
|
free(fs->fs_csp, M_UFSMNT);
|
1999-11-15 21:49:07 +03:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
2004-05-25 18:54:55 +04:00
|
|
|
if (ronly == 0 && fs->fs_snapinum[0] != 0)
|
|
|
|
ffs_snapshot_mount(mp);
|
1994-06-08 15:41:58 +04:00
|
|
|
return (0);
|
|
|
|
out:
|
2003-11-08 08:35:11 +03:00
|
|
|
if (fs)
|
|
|
|
free(fs, M_UFSMNT);
|
1999-11-15 21:49:07 +03:00
|
|
|
devvp->v_specmountpoint = NULL;
|
1994-06-08 15:41:58 +04:00
|
|
|
if (bp)
|
|
|
|
brelse(bp);
|
|
|
|
if (ump) {
|
2004-01-09 22:10:22 +03:00
|
|
|
if (ump->um_oldfscompat)
|
|
|
|
free(ump->um_oldfscompat, M_UFSMNT);
|
1994-06-08 15:41:58 +04:00
|
|
|
free(ump, M_UFSMNT);
|
2002-07-30 11:40:07 +04:00
|
|
|
mp->mnt_data = NULL;
|
1994-06-08 15:41:58 +04:00
|
|
|
}
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2003-04-02 14:39:19 +04:00
|
|
|
* Sanity checks for loading old filesystem superblocks.
|
|
|
|
* See ffs_oldfscompat_write below for unwound actions.
|
1994-06-08 15:41:58 +04:00
|
|
|
*
|
2003-04-02 14:39:19 +04:00
|
|
|
* XXX - Parts get retired eventually.
|
|
|
|
* Unfortunately new bits get added.
|
1994-06-08 15:41:58 +04:00
|
|
|
*/
|
2003-04-02 14:39:19 +04:00
|
|
|
static void
|
|
|
|
ffs_oldfscompat_read(fs, ump, sblockloc)
|
1994-06-08 15:41:58 +04:00
|
|
|
struct fs *fs;
|
2003-04-02 14:39:19 +04:00
|
|
|
struct ufsmount *ump;
|
|
|
|
daddr_t sblockloc;
|
1994-06-08 15:41:58 +04:00
|
|
|
{
|
2003-04-02 14:39:19 +04:00
|
|
|
off_t maxfilesize;
|
2004-01-09 22:10:22 +03:00
|
|
|
int32_t *extrasave;
|
2003-04-02 14:39:19 +04:00
|
|
|
|
2004-01-09 22:10:22 +03:00
|
|
|
if ((fs->fs_magic != FS_UFS1_MAGIC) ||
|
|
|
|
(fs->fs_old_flags & FS_FLAGS_UPDATED))
|
2003-04-05 17:37:36 +04:00
|
|
|
return;
|
|
|
|
|
2004-01-09 22:10:22 +03:00
|
|
|
if (!ump->um_oldfscompat)
|
|
|
|
ump->um_oldfscompat = malloc(512 + 3*sizeof(int32_t),
|
|
|
|
M_UFSMNT, M_WAITOK);
|
2003-09-17 06:24:33 +04:00
|
|
|
|
2004-01-09 22:10:22 +03:00
|
|
|
memcpy(ump->um_oldfscompat, &fs->fs_old_postbl_start, 512);
|
|
|
|
extrasave = ump->um_oldfscompat;
|
|
|
|
extrasave += 512/sizeof(int32_t);
|
|
|
|
extrasave[0] = fs->fs_old_npsect;
|
|
|
|
extrasave[1] = fs->fs_old_interleave;
|
|
|
|
extrasave[2] = fs->fs_old_trackskew;
|
|
|
|
|
|
|
|
/* These fields will be overwritten by their
|
|
|
|
* original values in fs_oldfscompat_write, so it is harmless
|
|
|
|
* to modify them here.
|
2003-04-05 17:37:36 +04:00
|
|
|
*/
|
2004-01-09 22:10:22 +03:00
|
|
|
fs->fs_cstotal.cs_ndir = fs->fs_old_cstotal.cs_ndir;
|
|
|
|
fs->fs_cstotal.cs_nbfree = fs->fs_old_cstotal.cs_nbfree;
|
|
|
|
fs->fs_cstotal.cs_nifree = fs->fs_old_cstotal.cs_nifree;
|
|
|
|
fs->fs_cstotal.cs_nffree = fs->fs_old_cstotal.cs_nffree;
|
|
|
|
|
|
|
|
fs->fs_maxbsize = fs->fs_bsize;
|
|
|
|
fs->fs_time = fs->fs_old_time;
|
|
|
|
fs->fs_size = fs->fs_old_size;
|
|
|
|
fs->fs_dsize = fs->fs_old_dsize;
|
|
|
|
fs->fs_csaddr = fs->fs_old_csaddr;
|
|
|
|
fs->fs_sblockloc = sblockloc;
|
|
|
|
|
2005-03-05 00:45:29 +03:00
|
|
|
fs->fs_flags = fs->fs_old_flags | (fs->fs_flags & FS_INTERNAL);
|
2004-01-09 22:10:22 +03:00
|
|
|
|
|
|
|
if (fs->fs_old_postblformat == FS_42POSTBLFMT) {
|
|
|
|
fs->fs_old_nrpos = 8;
|
|
|
|
fs->fs_old_npsect = fs->fs_old_nsect;
|
|
|
|
fs->fs_old_interleave = 1;
|
|
|
|
fs->fs_old_trackskew = 0;
|
2003-04-05 17:37:36 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
if (fs->fs_old_inodefmt < FS_44INODEFMT) {
|
Fixing age old cruft:
* Rather than using mnt_maxsymlinklen to indicate that a file systems returns
d_type fields(!), add a new internal flag, IMNT_DTYPE.
Add 3 new elements to ufsmount:
* um_maxsymlinklen, replaces mnt_maxsymlinklen (which never should have existed
in the first place).
* um_dirblksiz, which tracks the current directory block size, eliminating the
FS-specific checks littered throughout the code. This may be used later to
make the block size variable.
* um_maxfilesize, which is the maximum file size, possibly adjusted lower due
to implementation issues.
Sync some bug fixes from FFS into ext2fs, particularly:
* ffs_lookup.c 1.21, 1.28, 1.33, 1.48
* ffs_inode.c 1.43, 1.44, 1.45, 1.66, 1.67
* ffs_vnops.c 1.84, 1.85, 1.86
Clean up some crappy pointer frobnication.
2004-08-15 11:19:54 +04:00
|
|
|
ump->um_maxfilesize = (u_quad_t) 1LL << 39;
|
2003-04-02 14:39:19 +04:00
|
|
|
fs->fs_qbmask = ~fs->fs_bmask;
|
|
|
|
fs->fs_qfmask = ~fs->fs_fmask;
|
|
|
|
}
|
2003-04-05 17:37:36 +04:00
|
|
|
|
|
|
|
maxfilesize = (u_int64_t)0x80000000 * fs->fs_bsize - 1;
|
Fixing age old cruft:
* Rather than using mnt_maxsymlinklen to indicate that a file systems returns
d_type fields(!), add a new internal flag, IMNT_DTYPE.
Add 3 new elements to ufsmount:
* um_maxsymlinklen, replaces mnt_maxsymlinklen (which never should have existed
in the first place).
* um_dirblksiz, which tracks the current directory block size, eliminating the
FS-specific checks littered throughout the code. This may be used later to
make the block size variable.
* um_maxfilesize, which is the maximum file size, possibly adjusted lower due
to implementation issues.
Sync some bug fixes from FFS into ext2fs, particularly:
* ffs_lookup.c 1.21, 1.28, 1.33, 1.48
* ffs_inode.c 1.43, 1.44, 1.45, 1.66, 1.67
* ffs_vnops.c 1.84, 1.85, 1.86
Clean up some crappy pointer frobnication.
2004-08-15 11:19:54 +04:00
|
|
|
if (ump->um_maxfilesize > maxfilesize)
|
|
|
|
ump->um_maxfilesize = maxfilesize;
|
2003-04-05 17:37:36 +04:00
|
|
|
|
2003-04-02 14:39:19 +04:00
|
|
|
/* Compatibility for old filesystems */
|
|
|
|
if (fs->fs_avgfilesize <= 0)
|
|
|
|
fs->fs_avgfilesize = AVFILESIZ;
|
|
|
|
if (fs->fs_avgfpdir <= 0)
|
|
|
|
fs->fs_avgfpdir = AFPDIR;
|
2004-01-09 22:10:22 +03:00
|
|
|
|
2003-04-02 14:39:19 +04:00
|
|
|
#if 0
|
|
|
|
if (bigcgs) {
|
|
|
|
fs->fs_save_cgsize = fs->fs_cgsize;
|
|
|
|
fs->fs_cgsize = fs->fs_bsize;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Unwinding superblock updates for old filesystems.
|
|
|
|
* See ffs_oldfscompat_read above for details.
|
|
|
|
*
|
|
|
|
* XXX - Parts get retired eventually.
|
|
|
|
* Unfortunately new bits get added.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
ffs_oldfscompat_write(fs, ump)
|
|
|
|
struct fs *fs;
|
|
|
|
struct ufsmount *ump;
|
|
|
|
{
|
2004-01-09 22:10:22 +03:00
|
|
|
int32_t *extrasave;
|
2003-06-12 22:50:43 +04:00
|
|
|
|
2004-01-09 22:10:22 +03:00
|
|
|
if ((fs->fs_magic != FS_UFS1_MAGIC) ||
|
|
|
|
(fs->fs_old_flags & FS_FLAGS_UPDATED))
|
|
|
|
return;
|
2003-04-05 17:37:36 +04:00
|
|
|
|
|
|
|
fs->fs_old_time = fs->fs_time;
|
|
|
|
fs->fs_old_cstotal.cs_ndir = fs->fs_cstotal.cs_ndir;
|
|
|
|
fs->fs_old_cstotal.cs_nbfree = fs->fs_cstotal.cs_nbfree;
|
|
|
|
fs->fs_old_cstotal.cs_nifree = fs->fs_cstotal.cs_nifree;
|
|
|
|
fs->fs_old_cstotal.cs_nffree = fs->fs_cstotal.cs_nffree;
|
2004-01-09 22:10:22 +03:00
|
|
|
fs->fs_old_flags = fs->fs_flags;
|
2003-04-05 17:37:36 +04:00
|
|
|
|
2003-04-02 14:39:19 +04:00
|
|
|
#if 0
|
|
|
|
if (bigcgs) {
|
|
|
|
fs->fs_cgsize = fs->fs_save_cgsize;
|
|
|
|
}
|
|
|
|
#endif
|
2004-01-09 22:10:22 +03:00
|
|
|
|
|
|
|
memcpy(&fs->fs_old_postbl_start, ump->um_oldfscompat, 512);
|
|
|
|
extrasave = ump->um_oldfscompat;
|
|
|
|
extrasave += 512/sizeof(int32_t);
|
|
|
|
fs->fs_old_npsect = extrasave[0];
|
|
|
|
fs->fs_old_interleave = extrasave[1];
|
|
|
|
fs->fs_old_trackskew = extrasave[2];
|
|
|
|
|
1994-06-08 15:41:58 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* unmount system call
|
|
|
|
*/
|
|
|
|
int
|
2003-06-30 02:28:00 +04:00
|
|
|
ffs_unmount(mp, mntflags, p)
|
1994-06-08 15:41:58 +04:00
|
|
|
struct mount *mp;
|
|
|
|
int mntflags;
|
2003-06-30 02:28:00 +04:00
|
|
|
struct proc *p;
|
1994-06-08 15:41:58 +04:00
|
|
|
{
|
2000-03-30 16:41:09 +04:00
|
|
|
struct ufsmount *ump;
|
|
|
|
struct fs *fs;
|
2001-12-30 18:46:53 +03:00
|
|
|
int error, flags, penderr;
|
1994-06-08 15:41:58 +04:00
|
|
|
|
2001-12-30 18:46:53 +03:00
|
|
|
penderr = 0;
|
1994-06-08 15:41:58 +04:00
|
|
|
flags = 0;
|
1995-01-18 09:19:49 +03:00
|
|
|
if (mntflags & MNT_FORCE)
|
1994-06-08 15:41:58 +04:00
|
|
|
flags |= FORCECLOSE;
|
1999-11-15 21:49:07 +03:00
|
|
|
if (mp->mnt_flag & MNT_SOFTDEP) {
|
2003-06-30 02:28:00 +04:00
|
|
|
if ((error = softdep_flushfiles(mp, flags, p)) != 0)
|
1999-11-15 21:49:07 +03:00
|
|
|
return (error);
|
|
|
|
} else {
|
2003-06-30 02:28:00 +04:00
|
|
|
if ((error = ffs_flushfiles(mp, flags, p)) != 0)
|
1999-11-15 21:49:07 +03:00
|
|
|
return (error);
|
|
|
|
}
|
1994-06-08 15:41:58 +04:00
|
|
|
ump = VFSTOUFS(mp);
|
|
|
|
fs = ump->um_fs;
|
2001-12-18 13:57:21 +03:00
|
|
|
if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
|
2003-04-02 14:39:19 +04:00
|
|
|
printf("%s: unmount pending error: blocks %" PRId64
|
|
|
|
" files %d\n",
|
2001-12-18 13:57:21 +03:00
|
|
|
fs->fs_fsmnt, fs->fs_pendingblocks, fs->fs_pendinginodes);
|
|
|
|
fs->fs_pendingblocks = 0;
|
|
|
|
fs->fs_pendinginodes = 0;
|
2001-12-30 18:46:53 +03:00
|
|
|
penderr = 1;
|
2001-12-18 13:57:21 +03:00
|
|
|
}
|
1995-04-13 01:21:00 +04:00
|
|
|
if (fs->fs_ronly == 0 &&
|
|
|
|
ffs_cgupdate(ump, MNT_WAIT) == 0 &&
|
|
|
|
fs->fs_clean & FS_WASCLEAN) {
|
2001-12-30 18:46:53 +03:00
|
|
|
/*
|
|
|
|
* XXXX don't mark fs clean in the case of softdep
|
|
|
|
* pending block errors, until they are fixed.
|
|
|
|
*/
|
|
|
|
if (penderr == 0) {
|
|
|
|
if (mp->mnt_flag & MNT_SOFTDEP)
|
|
|
|
fs->fs_flags &= ~FS_DOSOFTDEP;
|
|
|
|
fs->fs_clean = FS_ISCLEAN;
|
|
|
|
}
|
2003-04-01 01:02:12 +04:00
|
|
|
fs->fs_fmod = 0;
|
1995-04-13 01:21:00 +04:00
|
|
|
(void) ffs_sbupdate(ump, MNT_WAIT);
|
|
|
|
}
|
1999-10-20 18:32:09 +04:00
|
|
|
if (ump->um_devvp->v_type != VBAD)
|
1999-11-15 21:49:07 +03:00
|
|
|
ump->um_devvp->v_specmountpoint = NULL;
|
1999-10-17 03:53:26 +04:00
|
|
|
vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
|
2003-12-01 21:57:07 +03:00
|
|
|
(void)VOP_CLOSE(ump->um_devvp, fs->fs_ronly ? FREAD : FREAD|FWRITE,
|
2003-06-30 02:28:00 +04:00
|
|
|
NOCRED, p);
|
1999-10-17 03:53:26 +04:00
|
|
|
vput(ump->um_devvp);
|
2001-09-02 05:58:30 +04:00
|
|
|
free(fs->fs_csp, M_UFSMNT);
|
1994-06-08 15:41:58 +04:00
|
|
|
free(fs, M_UFSMNT);
|
2004-01-09 22:10:22 +03:00
|
|
|
if (ump->um_oldfscompat != NULL)
|
|
|
|
free(ump->um_oldfscompat, M_UFSMNT);
|
1994-06-08 15:41:58 +04:00
|
|
|
free(ump, M_UFSMNT);
|
2002-07-30 11:40:07 +04:00
|
|
|
mp->mnt_data = NULL;
|
1994-06-08 15:41:58 +04:00
|
|
|
mp->mnt_flag &= ~MNT_LOCAL;
|
2003-12-01 21:57:07 +03:00
|
|
|
return (0);
|
1994-06-08 15:41:58 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Flush out all the files in a filesystem.
|
|
|
|
*/
|
1996-02-10 01:22:18 +03:00
|
|
|
int
|
2003-06-30 02:28:00 +04:00
|
|
|
ffs_flushfiles(mp, flags, p)
|
2000-03-30 16:41:09 +04:00
|
|
|
struct mount *mp;
|
1994-06-08 15:41:58 +04:00
|
|
|
int flags;
|
2003-06-30 02:28:00 +04:00
|
|
|
struct proc *p;
|
1994-06-08 15:41:58 +04:00
|
|
|
{
|
|
|
|
extern int doforce;
|
2000-03-30 16:41:09 +04:00
|
|
|
struct ufsmount *ump;
|
1996-02-10 01:22:18 +03:00
|
|
|
int error;
|
1994-06-08 15:41:58 +04:00
|
|
|
|
|
|
|
if (!doforce)
|
|
|
|
flags &= ~FORCECLOSE;
|
|
|
|
ump = VFSTOUFS(mp);
|
|
|
|
#ifdef QUOTA
|
|
|
|
if (mp->mnt_flag & MNT_QUOTA) {
|
1996-02-10 01:22:18 +03:00
|
|
|
int i;
|
|
|
|
if ((error = vflush(mp, NULLVP, SKIPSYSTEM|flags)) != 0)
|
1994-06-08 15:41:58 +04:00
|
|
|
return (error);
|
|
|
|
for (i = 0; i < MAXQUOTAS; i++) {
|
|
|
|
if (ump->um_quotas[i] == NULLVP)
|
|
|
|
continue;
|
2003-06-30 02:28:00 +04:00
|
|
|
quotaoff(p, mp, i);
|
1994-06-08 15:41:58 +04:00
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Here we fall through to vflush again to ensure
|
|
|
|
* that we have gotten rid of all the system vnodes.
|
|
|
|
*/
|
|
|
|
}
|
|
|
|
#endif
|
2004-05-25 18:54:55 +04:00
|
|
|
if ((error = vflush(mp, 0, SKIPSYSTEM | flags)) != 0)
|
|
|
|
return (error);
|
|
|
|
ffs_snapshot_unmount(mp);
|
1999-11-15 21:49:07 +03:00
|
|
|
/*
|
|
|
|
* Flush all the files.
|
|
|
|
*/
|
1994-06-08 15:41:58 +04:00
|
|
|
error = vflush(mp, NULLVP, flags);
|
1999-11-15 21:49:07 +03:00
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
/*
|
|
|
|
* Flush filesystem metadata.
|
|
|
|
*/
|
|
|
|
vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
|
2003-06-30 02:28:00 +04:00
|
|
|
error = VOP_FSYNC(ump->um_devvp, p->p_ucred, FSYNC_WAIT, 0, 0, p);
|
1999-11-15 21:49:07 +03:00
|
|
|
VOP_UNLOCK(ump->um_devvp, 0);
|
1994-06-08 15:41:58 +04:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Get file system statistics.
|
|
|
|
*/
|
|
|
|
int
|
2004-04-21 05:05:31 +04:00
|
|
|
ffs_statvfs(mp, sbp, p)
|
1994-06-08 15:41:58 +04:00
|
|
|
struct mount *mp;
|
2004-04-21 05:05:31 +04:00
|
|
|
struct statvfs *sbp;
|
2003-06-30 02:28:00 +04:00
|
|
|
struct proc *p;
|
1994-06-08 15:41:58 +04:00
|
|
|
{
|
2000-03-30 16:41:09 +04:00
|
|
|
struct ufsmount *ump;
|
|
|
|
struct fs *fs;
|
1994-06-08 15:41:58 +04:00
|
|
|
|
|
|
|
ump = VFSTOUFS(mp);
|
|
|
|
fs = ump->um_fs;
|
2004-04-21 05:05:31 +04:00
|
|
|
sbp->f_bsize = fs->fs_bsize;
|
|
|
|
sbp->f_frsize = fs->fs_fsize;
|
1994-06-08 15:41:58 +04:00
|
|
|
sbp->f_iosize = fs->fs_bsize;
|
|
|
|
sbp->f_blocks = fs->fs_dsize;
|
2002-04-10 12:05:11 +04:00
|
|
|
sbp->f_bfree = blkstofrags(fs, fs->fs_cstotal.cs_nbfree) +
|
2001-12-18 13:57:21 +03:00
|
|
|
fs->fs_cstotal.cs_nffree + dbtofsb(fs, fs->fs_pendingblocks);
|
2004-04-21 05:05:31 +04:00
|
|
|
sbp->f_bresvd = ((u_int64_t) fs->fs_dsize * (u_int64_t)
|
|
|
|
fs->fs_minfree) / (u_int64_t) 100;
|
|
|
|
if (sbp->f_bfree > sbp->f_bresvd)
|
|
|
|
sbp->f_bavail = sbp->f_bfree - sbp->f_bresvd;
|
|
|
|
else
|
|
|
|
sbp->f_bavail = 0;
|
1994-06-08 15:41:58 +04:00
|
|
|
sbp->f_files = fs->fs_ncg * fs->fs_ipg - ROOTINO;
|
2001-12-18 13:57:21 +03:00
|
|
|
sbp->f_ffree = fs->fs_cstotal.cs_nifree + fs->fs_pendinginodes;
|
2004-04-21 05:05:31 +04:00
|
|
|
sbp->f_favail = sbp->f_ffree;
|
|
|
|
sbp->f_fresvd = 0;
|
|
|
|
copy_statvfs_info(sbp, mp);
|
1994-06-08 15:41:58 +04:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Go through the disk queues to initiate sandbagged IO;
|
|
|
|
* go through the inodes to write those that have been modified;
|
|
|
|
* initiate the writing of the super block if it has been modified.
|
|
|
|
*
|
|
|
|
* Note: we are always called with the filesystem marked `MPBUSY'.
|
|
|
|
*/
|
|
|
|
int
|
2003-06-30 02:28:00 +04:00
|
|
|
ffs_sync(mp, waitfor, cred, p)
|
1994-06-08 15:41:58 +04:00
|
|
|
struct mount *mp;
|
|
|
|
int waitfor;
|
|
|
|
struct ucred *cred;
|
2003-06-30 02:28:00 +04:00
|
|
|
struct proc *p;
|
1994-06-08 15:41:58 +04:00
|
|
|
{
|
1998-03-01 05:20:01 +03:00
|
|
|
struct vnode *vp, *nvp;
|
|
|
|
struct inode *ip;
|
|
|
|
struct ufsmount *ump = VFSTOUFS(mp);
|
|
|
|
struct fs *fs;
|
2004-01-10 19:23:36 +03:00
|
|
|
int error, count, allerror = 0;
|
1994-06-08 15:41:58 +04:00
|
|
|
|
|
|
|
fs = ump->um_fs;
|
1998-03-01 05:20:01 +03:00
|
|
|
if (fs->fs_fmod != 0 && fs->fs_ronly != 0) { /* XXX */
|
|
|
|
printf("fs = %s\n", fs->fs_fsmnt);
|
|
|
|
panic("update: rofs mod");
|
1994-06-08 15:41:58 +04:00
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Write back each (modified) inode.
|
|
|
|
*/
|
1998-03-01 05:20:01 +03:00
|
|
|
simple_lock(&mntvnode_slock);
|
1994-06-08 15:41:58 +04:00
|
|
|
loop:
|
2000-05-29 22:28:48 +04:00
|
|
|
for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nvp) {
|
1994-06-08 15:41:58 +04:00
|
|
|
/*
|
|
|
|
* If the vnode that we are about to sync is no longer
|
|
|
|
* associated with this mount point, start over.
|
|
|
|
*/
|
|
|
|
if (vp->v_mount != mp)
|
|
|
|
goto loop;
|
1998-03-01 05:20:01 +03:00
|
|
|
simple_lock(&vp->v_interlock);
|
2000-05-29 22:28:48 +04:00
|
|
|
nvp = LIST_NEXT(vp, v_mntvnodes);
|
1994-06-08 15:41:58 +04:00
|
|
|
ip = VTOI(vp);
|
2000-02-15 01:00:21 +03:00
|
|
|
if (vp->v_type == VNON ||
|
|
|
|
((ip->i_flag &
|
2004-08-14 05:08:02 +04:00
|
|
|
(IN_CHANGE | IN_UPDATE | IN_MODIFIED)) == 0 &&
|
2000-12-04 12:37:06 +03:00
|
|
|
LIST_EMPTY(&vp->v_dirtyblkhd) &&
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
vp->v_uobj.uo_npages == 0))
|
2000-02-15 01:00:21 +03:00
|
|
|
{
|
1998-03-01 05:20:01 +03:00
|
|
|
simple_unlock(&vp->v_interlock);
|
1994-06-08 15:41:58 +04:00
|
|
|
continue;
|
1998-03-01 05:20:01 +03:00
|
|
|
}
|
|
|
|
simple_unlock(&mntvnode_slock);
|
2003-06-29 22:43:21 +04:00
|
|
|
error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK);
|
1998-03-01 05:20:01 +03:00
|
|
|
if (error) {
|
|
|
|
simple_lock(&mntvnode_slock);
|
|
|
|
if (error == ENOENT)
|
|
|
|
goto loop;
|
|
|
|
continue;
|
|
|
|
}
|
2004-08-14 05:08:02 +04:00
|
|
|
if (vp->v_type == VREG && waitfor == MNT_LAZY)
|
|
|
|
error = VOP_UPDATE(vp, NULL, NULL, 0);
|
|
|
|
else
|
|
|
|
error = VOP_FSYNC(vp, cred,
|
|
|
|
waitfor == MNT_WAIT ? FSYNC_WAIT : 0, 0, 0, p);
|
|
|
|
if (error)
|
1994-06-08 15:41:58 +04:00
|
|
|
allerror = error;
|
|
|
|
vput(vp);
|
1998-03-01 05:20:01 +03:00
|
|
|
simple_lock(&mntvnode_slock);
|
1994-06-08 15:41:58 +04:00
|
|
|
}
|
1998-03-01 05:20:01 +03:00
|
|
|
simple_unlock(&mntvnode_slock);
|
1994-06-08 15:41:58 +04:00
|
|
|
/*
|
|
|
|
* Force stale file system control information to be flushed.
|
|
|
|
*/
|
2004-01-10 19:23:36 +03:00
|
|
|
if (waitfor == MNT_WAIT && (ump->um_mountp->mnt_flag & MNT_SOFTDEP)) {
|
|
|
|
if ((error = softdep_flushworklist(ump->um_mountp, &count, p)))
|
|
|
|
allerror = error;
|
|
|
|
/* Flushed work items may create new vnodes to clean */
|
|
|
|
if (allerror == 0 && count) {
|
|
|
|
simple_lock(&mntvnode_slock);
|
|
|
|
goto loop;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (waitfor != MNT_LAZY && (ump->um_devvp->v_numoutput > 0 ||
|
|
|
|
!LIST_EMPTY(&ump->um_devvp->v_dirtyblkhd))) {
|
1999-11-15 21:49:07 +03:00
|
|
|
vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
|
|
|
|
if ((error = VOP_FSYNC(ump->um_devvp, cred,
|
2003-06-30 02:28:00 +04:00
|
|
|
waitfor == MNT_WAIT ? FSYNC_WAIT : 0, 0, 0, p)) != 0)
|
1999-11-15 21:49:07 +03:00
|
|
|
allerror = error;
|
|
|
|
VOP_UNLOCK(ump->um_devvp, 0);
|
2004-01-10 19:23:36 +03:00
|
|
|
if (allerror == 0 && waitfor == MNT_WAIT) {
|
|
|
|
simple_lock(&mntvnode_slock);
|
|
|
|
goto loop;
|
|
|
|
}
|
1999-11-15 21:49:07 +03:00
|
|
|
}
|
1994-06-08 15:41:58 +04:00
|
|
|
#ifdef QUOTA
|
2003-06-30 02:28:00 +04:00
|
|
|
qsync(mp);
|
1994-06-08 15:41:58 +04:00
|
|
|
#endif
|
1998-03-01 05:20:01 +03:00
|
|
|
/*
|
|
|
|
* Write back modified superblock.
|
|
|
|
*/
|
|
|
|
if (fs->fs_fmod != 0) {
|
|
|
|
fs->fs_fmod = 0;
|
|
|
|
fs->fs_time = time.tv_sec;
|
2000-05-29 22:28:48 +04:00
|
|
|
if ((error = ffs_cgupdate(ump, waitfor)))
|
|
|
|
allerror = error;
|
1998-03-01 05:20:01 +03:00
|
|
|
}
|
1994-06-08 15:41:58 +04:00
|
|
|
return (allerror);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Look up a FFS dinode number to find its incore vnode, otherwise read it
|
|
|
|
* in from disk. If it is in core, wait for the lock bit to clear, then
|
|
|
|
* return the inode locked. Detection and handling of mount points must be
|
|
|
|
* done by the calling routine.
|
|
|
|
*/
|
|
|
|
int
|
2003-06-29 22:43:21 +04:00
|
|
|
ffs_vget(mp, ino, vpp)
|
1994-06-08 15:41:58 +04:00
|
|
|
struct mount *mp;
|
|
|
|
ino_t ino;
|
|
|
|
struct vnode **vpp;
|
|
|
|
{
|
1998-03-01 05:20:01 +03:00
|
|
|
struct fs *fs;
|
|
|
|
struct inode *ip;
|
1994-06-08 15:41:58 +04:00
|
|
|
struct ufsmount *ump;
|
|
|
|
struct buf *bp;
|
|
|
|
struct vnode *vp;
|
|
|
|
dev_t dev;
|
1998-09-01 07:11:08 +04:00
|
|
|
int error;
|
1994-06-08 15:41:58 +04:00
|
|
|
|
|
|
|
ump = VFSTOUFS(mp);
|
|
|
|
dev = ump->um_dev;
|
2000-06-28 03:39:17 +04:00
|
|
|
|
2003-06-29 22:43:21 +04:00
|
|
|
if ((*vpp = ufs_ihashget(dev, ino, LK_EXCLUSIVE)) != NULL)
|
2000-06-28 03:39:17 +04:00
|
|
|
return (0);
|
1994-06-08 15:41:58 +04:00
|
|
|
|
|
|
|
/* Allocate a new vnode/inode. */
|
1996-02-10 01:22:18 +03:00
|
|
|
if ((error = getnewvnode(VT_UFS, mp, ffs_vnodeop_p, &vp)) != 0) {
|
1994-06-08 15:41:58 +04:00
|
|
|
*vpp = NULL;
|
|
|
|
return (error);
|
|
|
|
}
|
2000-06-28 03:39:17 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If someone beat us to it while sleeping in getnewvnode(),
|
|
|
|
* push back the freshly allocated vnode we don't need, and return.
|
|
|
|
*/
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
|
2000-06-28 03:39:17 +04:00
|
|
|
do {
|
2003-06-29 22:43:21 +04:00
|
|
|
if ((*vpp = ufs_ihashget(dev, ino, LK_EXCLUSIVE)) != NULL) {
|
2000-06-28 03:51:22 +04:00
|
|
|
ungetnewvnode(vp);
|
2000-06-28 03:39:17 +04:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
} while (lockmgr(&ufs_hashlock, LK_EXCLUSIVE|LK_SLEEPFAIL, 0));
|
|
|
|
|
2004-09-21 07:10:35 +04:00
|
|
|
vp->v_flag |= VLOCKSWORK;
|
|
|
|
|
1998-09-01 07:11:08 +04:00
|
|
|
/*
|
|
|
|
* XXX MFS ends up here, too, to allocate an inode. Should we
|
|
|
|
* XXX create another pool for MFS inodes?
|
|
|
|
*/
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
|
1998-09-01 07:11:08 +04:00
|
|
|
ip = pool_get(&ffs_inode_pool, PR_WAITOK);
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
memset(ip, 0, sizeof(struct inode));
|
1994-06-08 15:41:58 +04:00
|
|
|
vp->v_data = ip;
|
|
|
|
ip->i_vnode = vp;
|
2003-04-02 14:39:19 +04:00
|
|
|
ip->i_ump = ump;
|
1994-06-08 15:41:58 +04:00
|
|
|
ip->i_fs = fs = ump->um_fs;
|
|
|
|
ip->i_dev = dev;
|
|
|
|
ip->i_number = ino;
|
2001-01-10 07:47:10 +03:00
|
|
|
LIST_INIT(&ip->i_pcbufhd);
|
1994-06-08 15:41:58 +04:00
|
|
|
#ifdef QUOTA
|
1996-02-10 01:22:18 +03:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < MAXQUOTAS; i++)
|
|
|
|
ip->i_dquot[i] = NODQUOT;
|
|
|
|
}
|
1994-06-08 15:41:58 +04:00
|
|
|
#endif
|
2001-09-15 20:12:54 +04:00
|
|
|
|
1994-06-08 15:41:58 +04:00
|
|
|
/*
|
|
|
|
* Put it onto its hash chain and lock it so that other requests for
|
|
|
|
* this inode will block if they arrive while we are sleeping waiting
|
|
|
|
* for old data structures to be purged or for the contents of the
|
|
|
|
* disk portion of this inode to be read.
|
|
|
|
*/
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
|
1994-06-08 15:41:58 +04:00
|
|
|
ufs_ihashins(ip);
|
1998-03-01 05:20:01 +03:00
|
|
|
lockmgr(&ufs_hashlock, LK_RELEASE, 0);
|
1994-06-08 15:41:58 +04:00
|
|
|
|
|
|
|
/* Read in the disk contents for the inode, copy into the inode. */
|
1996-02-10 01:22:18 +03:00
|
|
|
error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, ino)),
|
|
|
|
(int)fs->fs_bsize, NOCRED, &bp);
|
|
|
|
if (error) {
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
|
1994-06-08 15:41:58 +04:00
|
|
|
/*
|
|
|
|
* The inode does not contain anything useful, so it would
|
|
|
|
* be misleading to leave it on its hash chain. With mode
|
|
|
|
* still zero, it will be unlinked and returned to the free
|
|
|
|
* list by vput().
|
|
|
|
*/
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
|
1994-06-08 15:41:58 +04:00
|
|
|
vput(vp);
|
|
|
|
brelse(bp);
|
|
|
|
*vpp = NULL;
|
|
|
|
return (error);
|
|
|
|
}
|
2003-04-02 14:39:19 +04:00
|
|
|
if (ip->i_ump->um_fstype == UFS1)
|
|
|
|
ip->i_din.ffs1_din = pool_get(&ffs_dinode1_pool, PR_WAITOK);
|
|
|
|
else
|
|
|
|
ip->i_din.ffs2_din = pool_get(&ffs_dinode2_pool, PR_WAITOK);
|
|
|
|
ffs_load_inode(bp, ip, fs, ino);
|
1999-11-15 21:49:07 +03:00
|
|
|
if (DOINGSOFTDEP(vp))
|
|
|
|
softdep_load_inodeblock(ip);
|
|
|
|
else
|
2003-04-02 14:39:19 +04:00
|
|
|
ip->i_ffs_effnlink = ip->i_nlink;
|
1994-06-08 15:41:58 +04:00
|
|
|
brelse(bp);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Initialize the vnode from the inode, check for aliases.
|
|
|
|
* Note that the underlying vnode may have changed.
|
|
|
|
*/
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
|
|
|
|
ufs_vinit(mp, ffs_specop_p, ffs_fifoop_p, &vp);
|
|
|
|
|
1994-06-08 15:41:58 +04:00
|
|
|
/*
|
|
|
|
* Finish inode initialization now that aliasing has been resolved.
|
|
|
|
*/
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
|
|
|
|
genfs_node_init(vp, &ffs_genfsops);
|
1994-06-08 15:41:58 +04:00
|
|
|
ip->i_devvp = ump->um_devvp;
|
|
|
|
VREF(ip->i_devvp);
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
|
1994-06-08 15:41:58 +04:00
|
|
|
/*
|
|
|
|
* Ensure that uid and gid are correct. This is a temporary
|
|
|
|
* fix until fsck has been changed to do the update.
|
|
|
|
*/
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
|
2003-04-02 14:39:19 +04:00
|
|
|
if (fs->fs_old_inodefmt < FS_44INODEFMT) { /* XXX */
|
|
|
|
ip->i_uid = ip->i_ffs1_ouid; /* XXX */
|
|
|
|
ip->i_gid = ip->i_ffs1_ogid; /* XXX */
|
1998-06-13 20:26:22 +04:00
|
|
|
} /* XXX */
|
2003-04-02 14:39:19 +04:00
|
|
|
uvm_vnp_setsize(vp, ip->i_size);
|
1994-06-08 15:41:58 +04:00
|
|
|
*vpp = vp;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* File handle to vnode
|
|
|
|
*
|
|
|
|
* Have to be really careful about stale file handles:
|
|
|
|
* - check that the inode number is valid
|
|
|
|
* - call ffs_vget() to get the locked inode
|
|
|
|
* - check for an unallocated inode (i_mode == 0)
|
|
|
|
* - check that the given client host has export rights and return
|
|
|
|
* those rights via. exflagsp and credanonp
|
|
|
|
*/
|
|
|
|
int
|
2003-06-29 22:43:21 +04:00
|
|
|
ffs_fhtovp(mp, fhp, vpp)
|
2000-03-30 16:41:09 +04:00
|
|
|
struct mount *mp;
|
1994-06-08 15:41:58 +04:00
|
|
|
struct fid *fhp;
|
|
|
|
struct vnode **vpp;
|
|
|
|
{
|
2000-03-30 16:41:09 +04:00
|
|
|
struct ufid *ufhp;
|
1994-06-08 15:41:58 +04:00
|
|
|
struct fs *fs;
|
|
|
|
|
|
|
|
ufhp = (struct ufid *)fhp;
|
|
|
|
fs = VFSTOUFS(mp)->um_fs;
|
|
|
|
if (ufhp->ufid_ino < ROOTINO ||
|
|
|
|
ufhp->ufid_ino >= fs->fs_ncg * fs->fs_ipg)
|
|
|
|
return (ESTALE);
|
2003-06-29 22:43:21 +04:00
|
|
|
return (ufs_fhtovp(mp, ufhp, vpp));
|
1994-06-08 15:41:58 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Vnode pointer to File handle
|
|
|
|
*/
|
|
|
|
/* ARGSUSED */
|
1996-02-10 01:22:18 +03:00
|
|
|
int
|
1994-06-08 15:41:58 +04:00
|
|
|
ffs_vptofh(vp, fhp)
|
|
|
|
struct vnode *vp;
|
|
|
|
struct fid *fhp;
|
|
|
|
{
|
2000-03-30 16:41:09 +04:00
|
|
|
struct inode *ip;
|
|
|
|
struct ufid *ufhp;
|
1994-06-08 15:41:58 +04:00
|
|
|
|
|
|
|
ip = VTOI(vp);
|
|
|
|
ufhp = (struct ufid *)fhp;
|
|
|
|
ufhp->ufid_len = sizeof(struct ufid);
|
|
|
|
ufhp->ufid_ino = ip->i_number;
|
2003-04-02 14:39:19 +04:00
|
|
|
ufhp->ufid_gen = ip->i_gen;
|
1994-06-08 15:41:58 +04:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
1998-03-01 05:20:01 +03:00
|
|
|
void
|
|
|
|
ffs_init()
|
|
|
|
{
|
2000-03-16 21:20:06 +03:00
|
|
|
if (ffs_initcount++ > 0)
|
|
|
|
return;
|
|
|
|
|
2004-05-20 09:39:34 +04:00
|
|
|
#ifdef _LKM
|
|
|
|
pool_init(&ffs_inode_pool, sizeof(struct inode), 0, 0, 0,
|
|
|
|
"ffsinopl", &pool_allocator_nointr);
|
2005-02-27 01:31:44 +03:00
|
|
|
pool_init(&ffs_dinode1_pool, sizeof(struct ufs1_dinode), 0, 0, 0,
|
2004-05-20 09:39:34 +04:00
|
|
|
"dino1pl", &pool_allocator_nointr);
|
|
|
|
pool_init(&ffs_dinode2_pool, sizeof(struct ufs2_dinode), 0, 0, 0,
|
|
|
|
"dino2pl", &pool_allocator_nointr);
|
|
|
|
#endif
|
1999-11-15 21:49:07 +03:00
|
|
|
softdep_initialize();
|
1998-03-01 05:20:01 +03:00
|
|
|
ufs_init();
|
|
|
|
}
|
|
|
|
|
2001-09-15 20:12:54 +04:00
|
|
|
void
|
|
|
|
ffs_reinit()
|
|
|
|
{
|
|
|
|
softdep_reinitialize();
|
|
|
|
ufs_reinit();
|
|
|
|
}
|
|
|
|
|
2000-03-16 21:20:06 +03:00
|
|
|
void
|
|
|
|
ffs_done()
|
|
|
|
{
|
|
|
|
if (--ffs_initcount > 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* XXX softdep cleanup ? */
|
|
|
|
ufs_done();
|
2004-05-20 09:39:34 +04:00
|
|
|
#ifdef _LKM
|
|
|
|
pool_destroy(&ffs_dinode2_pool);
|
|
|
|
pool_destroy(&ffs_dinode1_pool);
|
2000-03-16 21:20:06 +03:00
|
|
|
pool_destroy(&ffs_inode_pool);
|
2004-05-20 09:39:34 +04:00
|
|
|
#endif
|
2000-03-16 21:20:06 +03:00
|
|
|
}
|
|
|
|
|
Dynamic sysctl.
Gone are the old kern_sysctl(), cpu_sysctl(), hw_sysctl(),
vfs_sysctl(), etc, routines, along with sysctl_int() et al. Now all
nodes are registered with the tree, and nodes can be added (or
removed) easily, and I/O to and from the tree is handled generically.
Since the nodes are registered with the tree, the mapping from name to
number (and back again) can now be discovered, instead of having to be
hard coded. Adding new nodes to the tree is likewise much simpler --
the new infrastructure handles almost all the work for simple types,
and just about anything else can be done with a small helper function.
All existing nodes are where they were before (numerically speaking),
so all existing consumers of sysctl information should notice no
difference.
PS - I'm sorry, but there's a distinct lack of documentation at the
moment. I'm working on sysctl(3/8/9) right now, and I promise to
watch out for buses.
2003-12-04 22:38:21 +03:00
|
|
|
SYSCTL_SETUP(sysctl_vfs_ffs_setup, "sysctl vfs.ffs subtree setup")
|
1998-03-01 05:20:01 +03:00
|
|
|
{
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
extern int doasyncfree;
|
2000-04-04 13:23:20 +04:00
|
|
|
extern int ffs_log_changeopt;
|
1998-03-01 05:20:01 +03:00
|
|
|
|
2004-03-24 18:34:46 +03:00
|
|
|
sysctl_createv(clog, 0, NULL, NULL,
|
|
|
|
CTLFLAG_PERMANENT,
|
Dynamic sysctl.
Gone are the old kern_sysctl(), cpu_sysctl(), hw_sysctl(),
vfs_sysctl(), etc, routines, along with sysctl_int() et al. Now all
nodes are registered with the tree, and nodes can be added (or
removed) easily, and I/O to and from the tree is handled generically.
Since the nodes are registered with the tree, the mapping from name to
number (and back again) can now be discovered, instead of having to be
hard coded. Adding new nodes to the tree is likewise much simpler --
the new infrastructure handles almost all the work for simple types,
and just about anything else can be done with a small helper function.
All existing nodes are where they were before (numerically speaking),
so all existing consumers of sysctl information should notice no
difference.
PS - I'm sorry, but there's a distinct lack of documentation at the
moment. I'm working on sysctl(3/8/9) right now, and I promise to
watch out for buses.
2003-12-04 22:38:21 +03:00
|
|
|
CTLTYPE_NODE, "vfs", NULL,
|
|
|
|
NULL, 0, NULL, 0,
|
|
|
|
CTL_VFS, CTL_EOL);
|
2004-03-24 18:34:46 +03:00
|
|
|
sysctl_createv(clog, 0, NULL, NULL,
|
|
|
|
CTLFLAG_PERMANENT,
|
2004-05-25 08:44:43 +04:00
|
|
|
CTLTYPE_NODE, "ffs",
|
|
|
|
SYSCTL_DESCR("Berkeley Fast File System"),
|
Dynamic sysctl.
Gone are the old kern_sysctl(), cpu_sysctl(), hw_sysctl(),
vfs_sysctl(), etc, routines, along with sysctl_int() et al. Now all
nodes are registered with the tree, and nodes can be added (or
removed) easily, and I/O to and from the tree is handled generically.
Since the nodes are registered with the tree, the mapping from name to
number (and back again) can now be discovered, instead of having to be
hard coded. Adding new nodes to the tree is likewise much simpler --
the new infrastructure handles almost all the work for simple types,
and just about anything else can be done with a small helper function.
All existing nodes are where they were before (numerically speaking),
so all existing consumers of sysctl information should notice no
difference.
PS - I'm sorry, but there's a distinct lack of documentation at the
moment. I'm working on sysctl(3/8/9) right now, and I promise to
watch out for buses.
2003-12-04 22:38:21 +03:00
|
|
|
NULL, 0, NULL, 0,
|
|
|
|
CTL_VFS, 1, CTL_EOL);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* @@@ should we even bother with these first three?
|
|
|
|
*/
|
2004-03-24 18:34:46 +03:00
|
|
|
sysctl_createv(clog, 0, NULL, NULL,
|
2004-11-21 22:21:51 +03:00
|
|
|
CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
|
Dynamic sysctl.
Gone are the old kern_sysctl(), cpu_sysctl(), hw_sysctl(),
vfs_sysctl(), etc, routines, along with sysctl_int() et al. Now all
nodes are registered with the tree, and nodes can be added (or
removed) easily, and I/O to and from the tree is handled generically.
Since the nodes are registered with the tree, the mapping from name to
number (and back again) can now be discovered, instead of having to be
hard coded. Adding new nodes to the tree is likewise much simpler --
the new infrastructure handles almost all the work for simple types,
and just about anything else can be done with a small helper function.
All existing nodes are where they were before (numerically speaking),
so all existing consumers of sysctl information should notice no
difference.
PS - I'm sorry, but there's a distinct lack of documentation at the
moment. I'm working on sysctl(3/8/9) right now, and I promise to
watch out for buses.
2003-12-04 22:38:21 +03:00
|
|
|
CTLTYPE_INT, "doclusterread", NULL,
|
|
|
|
sysctl_notavail, 0, NULL, 0,
|
|
|
|
CTL_VFS, 1, FFS_CLUSTERREAD, CTL_EOL);
|
2004-03-24 18:34:46 +03:00
|
|
|
sysctl_createv(clog, 0, NULL, NULL,
|
2004-11-21 22:21:51 +03:00
|
|
|
CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
|
Dynamic sysctl.
Gone are the old kern_sysctl(), cpu_sysctl(), hw_sysctl(),
vfs_sysctl(), etc, routines, along with sysctl_int() et al. Now all
nodes are registered with the tree, and nodes can be added (or
removed) easily, and I/O to and from the tree is handled generically.
Since the nodes are registered with the tree, the mapping from name to
number (and back again) can now be discovered, instead of having to be
hard coded. Adding new nodes to the tree is likewise much simpler --
the new infrastructure handles almost all the work for simple types,
and just about anything else can be done with a small helper function.
All existing nodes are where they were before (numerically speaking),
so all existing consumers of sysctl information should notice no
difference.
PS - I'm sorry, but there's a distinct lack of documentation at the
moment. I'm working on sysctl(3/8/9) right now, and I promise to
watch out for buses.
2003-12-04 22:38:21 +03:00
|
|
|
CTLTYPE_INT, "doclusterwrite", NULL,
|
|
|
|
sysctl_notavail, 0, NULL, 0,
|
|
|
|
CTL_VFS, 1, FFS_CLUSTERWRITE, CTL_EOL);
|
2004-03-24 18:34:46 +03:00
|
|
|
sysctl_createv(clog, 0, NULL, NULL,
|
2004-11-21 22:21:51 +03:00
|
|
|
CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
|
Dynamic sysctl.
Gone are the old kern_sysctl(), cpu_sysctl(), hw_sysctl(),
vfs_sysctl(), etc, routines, along with sysctl_int() et al. Now all
nodes are registered with the tree, and nodes can be added (or
removed) easily, and I/O to and from the tree is handled generically.
Since the nodes are registered with the tree, the mapping from name to
number (and back again) can now be discovered, instead of having to be
hard coded. Adding new nodes to the tree is likewise much simpler --
the new infrastructure handles almost all the work for simple types,
and just about anything else can be done with a small helper function.
All existing nodes are where they were before (numerically speaking),
so all existing consumers of sysctl information should notice no
difference.
PS - I'm sorry, but there's a distinct lack of documentation at the
moment. I'm working on sysctl(3/8/9) right now, and I promise to
watch out for buses.
2003-12-04 22:38:21 +03:00
|
|
|
CTLTYPE_INT, "doreallocblks", NULL,
|
|
|
|
sysctl_notavail, 0, NULL, 0,
|
|
|
|
CTL_VFS, 1, FFS_REALLOCBLKS, CTL_EOL);
|
2004-03-24 18:34:46 +03:00
|
|
|
sysctl_createv(clog, 0, NULL, NULL,
|
2004-11-21 22:21:51 +03:00
|
|
|
CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
|
2004-05-25 08:44:43 +04:00
|
|
|
CTLTYPE_INT, "doasyncfree",
|
|
|
|
SYSCTL_DESCR("Release dirty blocks asynchronously"),
|
Dynamic sysctl.
Gone are the old kern_sysctl(), cpu_sysctl(), hw_sysctl(),
vfs_sysctl(), etc, routines, along with sysctl_int() et al. Now all
nodes are registered with the tree, and nodes can be added (or
removed) easily, and I/O to and from the tree is handled generically.
Since the nodes are registered with the tree, the mapping from name to
number (and back again) can now be discovered, instead of having to be
hard coded. Adding new nodes to the tree is likewise much simpler --
the new infrastructure handles almost all the work for simple types,
and just about anything else can be done with a small helper function.
All existing nodes are where they were before (numerically speaking),
so all existing consumers of sysctl information should notice no
difference.
PS - I'm sorry, but there's a distinct lack of documentation at the
moment. I'm working on sysctl(3/8/9) right now, and I promise to
watch out for buses.
2003-12-04 22:38:21 +03:00
|
|
|
NULL, 0, &doasyncfree, 0,
|
|
|
|
CTL_VFS, 1, FFS_ASYNCFREE, CTL_EOL);
|
2004-03-24 18:34:46 +03:00
|
|
|
sysctl_createv(clog, 0, NULL, NULL,
|
2004-11-21 22:21:51 +03:00
|
|
|
CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
|
2004-05-25 08:44:43 +04:00
|
|
|
CTLTYPE_INT, "log_changeopt",
|
|
|
|
SYSCTL_DESCR("Log changes in optimization strategy"),
|
Dynamic sysctl.
Gone are the old kern_sysctl(), cpu_sysctl(), hw_sysctl(),
vfs_sysctl(), etc, routines, along with sysctl_int() et al. Now all
nodes are registered with the tree, and nodes can be added (or
removed) easily, and I/O to and from the tree is handled generically.
Since the nodes are registered with the tree, the mapping from name to
number (and back again) can now be discovered, instead of having to be
hard coded. Adding new nodes to the tree is likewise much simpler --
the new infrastructure handles almost all the work for simple types,
and just about anything else can be done with a small helper function.
All existing nodes are where they were before (numerically speaking),
so all existing consumers of sysctl information should notice no
difference.
PS - I'm sorry, but there's a distinct lack of documentation at the
moment. I'm working on sysctl(3/8/9) right now, and I promise to
watch out for buses.
2003-12-04 22:38:21 +03:00
|
|
|
NULL, 0, &ffs_log_changeopt, 0,
|
|
|
|
CTL_VFS, 1, FFS_LOG_CHANGEOPT, CTL_EOL);
|
1998-03-01 05:20:01 +03:00
|
|
|
}
|
|
|
|
|
1994-06-08 15:41:58 +04:00
|
|
|
/*
|
|
|
|
* Write a superblock and associated information back to disk.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
ffs_sbupdate(mp, waitfor)
|
|
|
|
struct ufsmount *mp;
|
|
|
|
int waitfor;
|
|
|
|
{
|
2000-03-30 16:41:09 +04:00
|
|
|
struct fs *fs = mp->um_fs;
|
|
|
|
struct buf *bp;
|
2003-04-02 14:39:19 +04:00
|
|
|
int error = 0;
|
|
|
|
u_int32_t saveflag;
|
1998-03-18 18:57:26 +03:00
|
|
|
|
2003-04-02 14:39:19 +04:00
|
|
|
bp = getblk(mp->um_devvp,
|
|
|
|
fs->fs_sblockloc >> (fs->fs_fshift - fs->fs_fsbtodb),
|
1998-03-18 18:57:26 +03:00
|
|
|
(int)fs->fs_sbsize, 0, 0);
|
1999-11-15 21:49:07 +03:00
|
|
|
saveflag = fs->fs_flags & FS_INTERNAL;
|
|
|
|
fs->fs_flags &= ~FS_INTERNAL;
|
2005-02-27 01:31:44 +03:00
|
|
|
|
1998-08-10 00:15:38 +04:00
|
|
|
memcpy(bp->b_data, fs, fs->fs_sbsize);
|
2003-04-02 14:39:19 +04:00
|
|
|
|
|
|
|
ffs_oldfscompat_write((struct fs *)bp->b_data, mp);
|
1998-03-18 18:57:26 +03:00
|
|
|
#ifdef FFS_EI
|
|
|
|
if (mp->um_flags & UFS_NEEDSWAP)
|
2003-09-26 03:39:17 +04:00
|
|
|
ffs_sb_swap((struct fs *)bp->b_data, (struct fs *)bp->b_data);
|
1998-03-18 18:57:26 +03:00
|
|
|
#endif
|
1999-11-15 21:49:07 +03:00
|
|
|
fs->fs_flags |= saveflag;
|
1998-03-18 18:57:26 +03:00
|
|
|
|
1994-06-08 15:41:58 +04:00
|
|
|
if (waitfor == MNT_WAIT)
|
|
|
|
error = bwrite(bp);
|
|
|
|
else
|
|
|
|
bawrite(bp);
|
1995-04-13 01:21:00 +04:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
ffs_cgupdate(mp, waitfor)
|
|
|
|
struct ufsmount *mp;
|
|
|
|
int waitfor;
|
|
|
|
{
|
2000-03-30 16:41:09 +04:00
|
|
|
struct fs *fs = mp->um_fs;
|
|
|
|
struct buf *bp;
|
1995-04-13 01:21:00 +04:00
|
|
|
int blks;
|
2001-09-02 05:58:30 +04:00
|
|
|
void *space;
|
1995-04-13 01:21:00 +04:00
|
|
|
int i, size, error = 0, allerror = 0;
|
|
|
|
|
|
|
|
allerror = ffs_sbupdate(mp, waitfor);
|
1994-06-08 15:41:58 +04:00
|
|
|
blks = howmany(fs->fs_cssize, fs->fs_fsize);
|
2001-09-02 05:58:30 +04:00
|
|
|
space = fs->fs_csp;
|
1994-06-08 15:41:58 +04:00
|
|
|
for (i = 0; i < blks; i += fs->fs_frag) {
|
|
|
|
size = fs->fs_bsize;
|
|
|
|
if (i + fs->fs_frag > blks)
|
|
|
|
size = (blks - i) * fs->fs_fsize;
|
|
|
|
bp = getblk(mp->um_devvp, fsbtodb(fs, fs->fs_csaddr + i),
|
|
|
|
size, 0, 0);
|
1998-03-18 18:57:26 +03:00
|
|
|
#ifdef FFS_EI
|
|
|
|
if (mp->um_flags & UFS_NEEDSWAP)
|
|
|
|
ffs_csum_swap((struct csum*)space,
|
1998-06-13 20:26:22 +04:00
|
|
|
(struct csum*)bp->b_data, size);
|
1998-03-18 18:57:26 +03:00
|
|
|
else
|
|
|
|
#endif
|
1998-08-10 00:15:38 +04:00
|
|
|
memcpy(bp->b_data, space, (u_int)size);
|
2001-09-02 05:58:30 +04:00
|
|
|
space = (char *)space + size;
|
1994-06-08 15:41:58 +04:00
|
|
|
if (waitfor == MNT_WAIT)
|
|
|
|
error = bwrite(bp);
|
|
|
|
else
|
|
|
|
bawrite(bp);
|
|
|
|
}
|
1995-04-13 01:21:00 +04:00
|
|
|
if (!allerror && error)
|
|
|
|
allerror = error;
|
|
|
|
return (allerror);
|
1994-06-08 15:41:58 +04:00
|
|
|
}
|