Various bug-fixes to LFS, to wit:

Kernel:

* Add runtime quantity lfs_ravail, the number of disk-blocks reserved
  for writing.  Writes to the filesystem first reserve a maximum amount
  of blocks before their write is allowed to proceed; after the blocks
  are allocated the reserved total is reduced by a corresponding amount.

  If the lfs_reserve function cannot immediately reserve the requested
  number of blocks, the inode is unlocked, and the thread sleeps until
  the cleaner has made enough space available for the blocks to be
  reserved.  In this way large files can be written to the filesystem
  (or, smaller files can be written to a nearly-full but thoroughly
  clean filesystem) and the cleaner can still function properly.

* Remove explicit switching on dlfs_minfreeseg from the kernel code; it
  is now merely a fs-creation parameter used to compute dlfs_avail and
  dlfs_bfree (and used by fsck_lfs(8) to check their accuracy).  Its
  former role is better assumed by a properly computed dlfs_avail.

* Bounds-check inode numbers submitted through lfs_bmapv and lfs_markv.
  This prevents a panic, but, if the cleaner is feeding the filesystem
  the wrong data, you are still in a world of hurt.

* Cleanup: remove explicit references of DEV_BSIZE in favor of
  btodb()/dbtob().

lfs_cleanerd:

* Make -n mean "send N segments' blocks through a single call to
  lfs_markv".  Previously it had meant "clean N segments though N calls
  to lfs_markv, before looking again to see if more need to be cleaned".
  The new behavior gives better packing of direct data on disk with as
  little metadata as possible, largely alleviating the problem that the
  cleaner can consume more disk through inefficient use of metadata than
  it frees by moving dirty data away from clean "holes" to produce
  entirely clean segments.

* Make -b mean "read as many segments as necessary to write N segments
  of dirty data back to disk", rather than its former meaning of "read
  as many segments as necessary to free N segments worth of space".  The
  new meaning, combined with the new -n behavior described above,
  further aids in cleaning storage efficiency as entire segments can be
  written at once, using as few blocks as possible for segment summaries
  and inode blocks.

* Make the cleaner take note of segments which could not be cleaned due
  to error, and not attempt to clean them until they are entirely free
  of dirty blocks.  This prevents the case in which a cleanerd running
  with -n 1 and without -b (formerly the default) would spin trying
  repeatedly to clean a corrupt segment, while the remaining space
  filled and deadlocked the filesystem.

* Update the lfs_cleanerd manual page to describe all the options,
  including the changes mentioned here (in particular, the -b and -n
  flags were previously undocumented).

fsck_lfs:

* Check, and optionally fix, lfs_avail (to an exact figure) and
  lfs_bfree (within a margin of error) in pass 5.

newfs_lfs:

* Reduce the default dlfs_minfreeseg to 1/20 of the total segments.

* Add a warning if the sgs disklabel field is 16 (the default for FFS'
  cpg, but not usually desirable for LFS' sgs: 5--8 is a better range).

* Change the calculation of lfs_avail and lfs_bfree, corresponding to
  the kernel changes mentioned above.

mount_lfs:

* Add -N and -b options to pass corresponding -n and -b options to
  lfs_cleanerd.

* Default to calling lfs_cleanerd with "-b -n 4".


[All of these changes were largely tested in the 1.5 branch, with the
idea that they (along with previous un-pulled-up work) could be applied
to the branch while it was still in ALPHA2; however my test system has
experienced corruption on another filesystem (/dev/console has gone
missing :^), and, while I believe this unrelated to the LFS changes, I
cannot with good conscience request that the changes be pulled up.]
This commit is contained in:
perseant 2000-09-09 04:49:54 +00:00
parent 988a012d50
commit 9c7f8050f4
22 changed files with 595 additions and 264 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: cleanerd.c,v 1.21 2000/07/04 22:36:17 perseant Exp $ */
/* $NetBSD: cleanerd.c,v 1.22 2000/09/09 04:49:56 perseant Exp $ */
/*-
* Copyright (c) 1992, 1993
@ -40,7 +40,7 @@ __COPYRIGHT("@(#) Copyright (c) 1992, 1993\n\
#if 0
static char sccsid[] = "@(#)cleanerd.c 8.5 (Berkeley) 6/10/95";
#else
__RCSID("$NetBSD: cleanerd.c,v 1.21 2000/07/04 22:36:17 perseant Exp $");
__RCSID("$NetBSD: cleanerd.c,v 1.22 2000/09/09 04:49:56 perseant Exp $");
#endif
#endif /* not lint */
@ -57,6 +57,7 @@ __RCSID("$NetBSD: cleanerd.c,v 1.21 2000/07/04 22:36:17 perseant Exp $");
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <unistd.h>
@ -94,6 +95,14 @@ struct tossstruct {
int seg;
};
typedef struct {
int nsegs; /* number of segments */
struct seglist **segs; /* segment numbers, costs, etc */
int nb; /* total number of blocks */
BLOCK_INFO *ba; /* accumulated block_infos */
caddr_t *buf; /* segment buffers */
} SEGS_AND_BLOCKS;
#define CLEAN_BYTES 0x1
/* function prototypes for system calls; not sure where they should go */
@ -108,7 +117,8 @@ int choose_segments __P((FS_INFO *, struct seglist *,
unsigned long (*)(FS_INFO *, SEGUSE *)));
void clean_fs __P((FS_INFO *, unsigned long (*)(FS_INFO *, SEGUSE *), int, long));
int clean_loop __P((FS_INFO *, int, long));
int clean_segment __P((FS_INFO *, struct seglist *));
int add_segment __P((FS_INFO *, struct seglist *, SEGS_AND_BLOCKS *));
int clean_segments __P((FS_INFO *, SEGS_AND_BLOCKS *));
unsigned long cost_benefit __P((FS_INFO *, SEGUSE *));
int cost_compare __P((const void *, const void *));
void sig_report __P((int));
@ -144,6 +154,9 @@ cost_benefit(fsp, su)
lfsp = &fsp->fi_lfs;
if (live == 0) { /* No cost, only benefit. */
return lblkno(lfsp, seg_size(lfsp)) * t.tv_sec;
} else if (su->su_flags & SEGUSE_ERROR) {
/* No benefit: don't even try */
return 0;
} else {
/*
* from lfsSegUsage.c (Mendel's code).
@ -333,24 +346,26 @@ clean_loop(fsp, nsegs, options)
now = time((time_t *)NULL);
if(debug > 1) {
syslog(LOG_DEBUG, "db_per_seg = %lu bfree = %u avail = %d ",
db_per_seg, fsp->fi_cip->bfree,
fsp->fi_cip->avail);
syslog(LOG_DEBUG, "db_per_seg = %lu bfree = %u avail = %d,"
" bfree = %u, ", db_per_seg, fsp->fi_cip->bfree,
fsp->fi_cip->avail, fsp->fi_cip->bfree);
syslog(LOG_DEBUG, "clean segs = %d, max_free_segs = %ld",
fsp->fi_cip->clean, max_free_segs);
}
if ((fsp->fi_cip->bfree - fsp->fi_cip->avail > db_per_seg &&
fsp->fi_cip->avail < (long)db_per_seg) ||
fsp->fi_cip->avail < (long)db_per_seg &&
fsp->fi_cip->bfree > (long)db_per_seg) ||
(fsp->fi_cip->clean < max_free_segs &&
(fsp->fi_cip->clean <= lfsp->lfs_minfreeseg ||
fsp->fi_cip->clean < max_free_segs * BUSY_LIM)))
{
if(debug)
syslog(LOG_DEBUG, "Cleaner Running at %s "
"(%d of %lu segments available, avail = %d)",
syslog(LOG_DEBUG, "Cleaner Running at %s"
" (%d of %lu segments available, avail = %d,"
" bfree = %u)",
ctime(&now), fsp->fi_cip->clean, max_free_segs,
fsp->fi_cip->avail);
fsp->fi_cip->avail, fsp->fi_cip->bfree);
clean_fs(fsp, cost_benefit, nsegs, options);
if(do_quit) {
if(debug)
@ -408,8 +423,20 @@ clean_loop(fsp, nsegs, options)
return (1);
}
}
if(debug > 1)
syslog(LOG_DEBUG, "Cleaner Not Running at %s", ctime(&now));
if(debug > 1) {
if (fsp->fi_cip->bfree - fsp->fi_cip->avail <= db_per_seg)
syslog(LOG_DEBUG, "condition 1 false");
if (fsp->fi_cip->avail >= (long)db_per_seg)
syslog(LOG_DEBUG, "condition 2 false");
if (fsp->fi_cip->clean >= max_free_segs)
syslog(LOG_DEBUG, "condition 3 false");
if (fsp->fi_cip->clean > lfsp->lfs_minfreeseg)
syslog(LOG_DEBUG, "condition 4 false");
if (fsp->fi_cip->clean >= max_free_segs * BUSY_LIM)
syslog(LOG_DEBUG, "condition 5 false");
syslog(LOG_DEBUG, "Cleaner Not Running at %s", ctime(&now));
}
return (0);
}
@ -422,10 +449,15 @@ clean_fs(fsp, cost_func, nsegs, options)
long options;
{
struct seglist *segs, *sp;
long int to_clean, cleaned_bytes;
long int to_clean, cleaned_bytes, seg_size;
unsigned long i, j, total;
struct rusage ru;
fsid_t *fsidp;
int error;
SEGS_AND_BLOCKS *sbp;
fsidp = &fsp->fi_statfsp->f_fsid;
seg_size = (1 << fsp->fi_lfs.lfs_segshift);
if ((segs =
malloc(fsp->fi_lfs.lfs_nseg * sizeof(struct seglist))) == NULL) {
@ -435,11 +467,11 @@ clean_fs(fsp, cost_func, nsegs, options)
total = i = choose_segments(fsp, segs, cost_func);
/* If we can get lots of cleaning for free, do it now */
sp=segs;
sp = segs;
for(j=0; j < total && sp->sl_bytes == 0; j++) {
if(debug)
syslog(LOG_DEBUG,"Wiping empty segment %ld",sp->sl_id);
if(lfs_segclean(&fsp->fi_statfsp->f_fsid, sp->sl_id) < 0)
if(lfs_segclean(fsidp, sp->sl_id) < 0)
syslog(LOG_NOTICE,"lfs_segclean failed empty segment %ld: %m", sp->sl_id);
++cleaner_stats.segs_empty;
sp++;
@ -450,65 +482,79 @@ clean_fs(fsp, cost_func, nsegs, options)
return;
}
#if 0
/* If we relly need to clean a lot, do it now */
if(fsp->fi_cip->clean < 2 * fsp->fi_lfs.lfs_minfreeseg)
nsegs = MAX(nsegs, fsp->fi_lfs.lfs_minfreeseg);
#endif
/* But back down if we haven't got that many free to clean into */
if(fsp->fi_cip->clean < nsegs)
nsegs = fsp->fi_cip->clean;
if(debug > 1)
syslog(LOG_DEBUG, "clean_fs: found %ld segments to clean in file system %s",
syslog(LOG_DEBUG, "clean_fs: found %ld segments to clean in %s",
i, fsp->fi_statfsp->f_mntonname);
if (i) {
sbp = (SEGS_AND_BLOCKS *)malloc(sizeof(SEGS_AND_BLOCKS));
memset(sbp, 0, sizeof(SEGS_AND_BLOCKS));
/* Check which cleaning algorithm to use. */
if (options & CLEAN_BYTES) {
/* Count bytes */
cleaned_bytes = 0;
to_clean = nsegs << fsp->fi_lfs.lfs_segshift;
for (; i && cleaned_bytes < to_clean;
i--, ++sp) {
if (clean_segment(fsp, sp) < 0)
syslog(LOG_NOTICE,"clean_segment failed segment %ld: %m", sp->sl_id);
else if (lfs_segclean(&fsp->fi_statfsp->f_fsid,
sp->sl_id) < 0)
syslog(LOG_NOTICE,"lfs_segclean failed segment %ld: %m", sp->sl_id);
else {
if(debug) {
syslog(LOG_DEBUG,
"Cleaned segment %ld (%ld->%ld/%ld)",
sp->sl_id,
(1<<fsp->fi_lfs.lfs_segshift) - sp->sl_bytes,
cleaned_bytes + (1<<fsp->fi_lfs.lfs_segshift) - sp->sl_bytes,
to_clean);
}
cleaned_bytes += (1<<fsp->fi_lfs.lfs_segshift) - sp->sl_bytes;
for (; i && cleaned_bytes < to_clean; i--, ++sp) {
if (add_segment(fsp, sp, sbp) < 0) {
syslog(LOG_NOTICE,"add_segment failed"
" segment %ld: %m", sp->sl_id);
if (sbp->nsegs == 0)
continue;
else
break;
}
cleaned_bytes += sp->sl_bytes;
}
} else
} else {
/* Count segments */
for (i = MIN(i, nsegs); i-- ; ++sp) {
total--;
syslog(LOG_DEBUG,"Cleaning segment %ld (of %ld choices)", sp->sl_id, i+1);
if (clean_segment(fsp, sp) < 0) {
syslog(LOG_NOTICE,"clean_segment failed segment %ld: %m", sp->sl_id);
if(total)
i++;
syslog(LOG_DEBUG, "Cleaning segment %ld"
" (of %ld choices)", sp->sl_id, i + 1);
if (add_segment(fsp, sp, sbp) != 0) {
syslog(LOG_NOTICE,"add_segment failed"
" segment %ld: %m", sp->sl_id);
if (sbp->nsegs == 0)
continue;
else
break;
}
else if (lfs_segclean(&fsp->fi_statfsp->f_fsid,
sp->sl_id) < 0)
syslog(LOG_NOTICE,"lfs_segclean failed segment %ld: %m", sp->sl_id);
else if(debug)
syslog(LOG_DEBUG,"Completed cleaning segment %ld (of %ld choices)", sp->sl_id, i+1);
}
}
if (clean_segments(fsp, sbp) >= 0) {
for (j = 0; j < sbp->nsegs; j++) {
sp = sbp->segs[j];
if (lfs_segclean(fsidp, sp->sl_id) < 0)
syslog(LOG_NOTICE,
"lfs_segclean: segment %ld: %m",
sp->sl_id);
else
syslog(LOG_DEBUG,
"finished segment %ld",
sp->sl_id);
}
}
free(sbp);
}
free(segs);
if(debug) {
error=getrusage(RUSAGE_SELF,&ru);
error = getrusage(RUSAGE_SELF, &ru);
if(error) {
syslog(LOG_INFO,"getrusage returned error: %m");
syslog(LOG_INFO, "getrusage returned error: %m");
} else {
syslog(LOG_DEBUG,"Current usage: maxrss=%ld, idrss=%ld, isrss=%ld",
ru.ru_maxrss,ru.ru_idrss,ru.ru_isrss);
syslog(LOG_DEBUG, "Current usage: maxrss=%ld,"
" idrss=%ld, isrss=%ld", ru.ru_maxrss,
ru.ru_idrss, ru.ru_isrss);
}
}
}
@ -550,13 +596,17 @@ choose_segments(fsp, seglist, cost_func)
dump_cleaner_info(fsp->fi_cip);
for (sp = seglist, i = 0; i < lfsp->lfs_nseg; ++i) {
if (debug > 1) {
printf("%d...", i);
fflush(stdout);
}
sup = SEGUSE_ENTRY(lfsp, fsp->fi_segusep, i);
if(debug > 1)
if(debug > 2)
PRINT_SEGUSE(sup, i);
if (!(sup->su_flags & SEGUSE_DIRTY) ||
sup->su_flags & SEGUSE_ACTIVE)
continue;
if(debug > 1)
if(debug > 2)
syslog(LOG_DEBUG, "\tchoosing segment %d", i);
sp->sl_cost = (*cost_func)(fsp, sup);
sp->sl_id = i;
@ -565,36 +615,44 @@ choose_segments(fsp, seglist, cost_func)
++sp;
}
nsegs = sp - seglist;
qsort(seglist, nsegs, sizeof(struct seglist), cost_compare);
#if 0
for(i=0; i<nsegs; i++) {
printf("%d: segment %lu age %lu contains %lu priority %lu\n", i,
seglist[i].sl_age, seglist[i].sl_id, seglist[i].sl_bytes,
seglist[i].sl_cost);
if (debug > 1) {
putchar('\n');
syslog(LOG_DEBUG, "Sorting...");
}
#endif
qsort(seglist, nsegs, sizeof(struct seglist), cost_compare);
if (debug > 2)
for(i = 0; i < nsegs; i++) {
syslog(LOG_DEBUG, "%d: segment %lu age %lu"
" contains %lu priority %lu\n", i,
seglist[i].sl_age, seglist[i].sl_id,
seglist[i].sl_bytes, seglist[i].sl_cost);
}
if(debug > 1)
syslog(LOG_DEBUG,"Returning %d segments", nsegs);
return (nsegs);
}
/*
* Add still-valid blocks from the given segment to the block array,
* in preparation for sending through lfs_markv.
*/
int
clean_segment(fsp, slp)
FS_INFO *fsp; /* file system information */
struct seglist *slp; /* segment info */
add_segment(fsp, slp, sbp)
FS_INFO *fsp;
struct seglist *slp;
SEGS_AND_BLOCKS *sbp;
{
int id=slp->sl_id;
BLOCK_INFO *block_array, *bp, *_bip;
int id = slp->sl_id;
BLOCK_INFO *tba, *_bip;
SEGUSE *sp;
struct lfs *lfsp;
struct tossstruct t;
struct dinode *dip;
double util;
caddr_t seg_buf;
daddr_t seg_addr;
int num_blocks, maxblocks, clean_blocks, i, j, error;
int num_blocks, i, j, error;
int seg_isempty=0;
unsigned long *lp;
@ -603,7 +661,7 @@ clean_segment(fsp, slp)
seg_addr = sntoda(lfsp,id);
error = 0;
syslog(LOG_DEBUG, "cleaning segment %d: contains %lu bytes", id,
syslog(LOG_DEBUG, "adding segment %d: contains %lu bytes", id,
(unsigned long)sp->su_nbytes);
/* XXX could add debugging to verify that segment is really empty */
@ -612,17 +670,24 @@ clean_segment(fsp, slp)
++seg_isempty;
}
/* Add a new segment to the accumulated list */
sbp->nsegs++;
sbp->segs = (struct seglist **)realloc(sbp->segs, sizeof(struct seglist *) * sbp->nsegs);
sbp->buf = (caddr_t *)realloc(sbp->buf, sizeof(caddr_t) * sbp->nsegs);
sbp->segs[sbp->nsegs - 1] = slp;
/* map the segment into a buffer */
if (mmap_segment(fsp, id, &seg_buf, do_mmap) < 0) {
syslog(LOG_WARNING,"clean_segment: mmap_segment failed: %m");
syslog(LOG_WARNING,"add_segment: mmap_segment failed: %m");
++cleaner_stats.segs_error;
--sbp->nsegs;
return (-1);
}
sbp->buf[sbp->nsegs - 1] = seg_buf;
/* get a list of blocks that are contained by the segment */
if ((error = lfs_segmapv(fsp, id, seg_buf, &block_array,
&num_blocks)) < 0) {
if ((error = lfs_segmapv(fsp, id, seg_buf, &tba, &num_blocks)) < 0) {
syslog(LOG_WARNING,
"clean_segment: lfs_segmapv failed for segment %d", id);
"add_segment: lfs_segmapv failed for segment %d", id);
goto out;
}
cleaner_stats.blocks_read += fsp->fi_lfs.lfs_ssize;
@ -631,18 +696,18 @@ clean_segment(fsp, slp)
syslog(LOG_DEBUG, "lfs_segmapv returned %d blocks", num_blocks);
/* get the current disk address of blocks contained by the segment */
if ((error = lfs_bmapv(&fsp->fi_statfsp->f_fsid, block_array,
if ((error = lfs_bmapv(&fsp->fi_statfsp->f_fsid, tba,
num_blocks)) < 0) {
perror("clean_segment: lfs_bmapv failed");
syslog(LOG_NOTICE, "add_segment: lfs_bmapv failed");
goto out;
}
/* Now toss any blocks not in the current segment */
t.lfs = lfsp;
t.seg = id;
toss(block_array, &num_blocks, sizeof(BLOCK_INFO), bi_tossold, &t);
toss(tba, &num_blocks, sizeof(BLOCK_INFO), bi_tossold, &t);
/* Check if last element should be tossed */
if (num_blocks && bi_tossold(&t, block_array + num_blocks - 1, NULL))
if (num_blocks && bi_tossold(&t, tba + num_blocks - 1, NULL))
--num_blocks;
if(seg_isempty) {
@ -653,17 +718,17 @@ clean_segment(fsp, slp)
}
/* XXX KS - check for misplaced blocks */
for(i=0; i<num_blocks; i++) {
if(block_array[i].bi_daddr
&& ((char *)(block_array[i].bi_bp) - seg_buf) != (block_array[i].bi_daddr - seg_addr) * DEV_BSIZE
&& datosn(&(fsp->fi_lfs),block_array[i].bi_daddr) == id)
if(tba[i].bi_daddr
&& ((char *)(tba[i].bi_bp) - seg_buf) != (tba[i].bi_daddr - seg_addr) * DEV_BSIZE
&& datosn(&(fsp->fi_lfs), tba[i].bi_daddr) == id)
{
if(debug > 1) {
syslog(LOG_DEBUG, "seg %d, ino %d lbn %d, 0x%x != 0x%lx (fixed)",
id,
block_array[i].bi_inode,
block_array[i].bi_lbn,
block_array[i].bi_daddr,
(long)seg_addr + ((char *)(block_array[i].bi_bp) - seg_buf)/DEV_BSIZE);
tba[i].bi_inode,
tba[i].bi_lbn,
tba[i].bi_daddr,
(long)seg_addr + ((char *)(tba[i].bi_bp) - seg_buf)/DEV_BSIZE);
}
/*
* XXX KS - have to be careful here about Inodes;
@ -671,17 +736,17 @@ clean_segment(fsp, slp)
* segment from where we thought, we need to reload
* the *right* inode, not the first one in the block.
*/
if(block_array[i].bi_lbn == LFS_UNUSED_LBN) {
dip = (struct dinode *)(seg_buf + (block_array[i].bi_daddr - seg_addr) * DEV_BSIZE);
if(tba[i].bi_lbn == LFS_UNUSED_LBN) {
dip = (struct dinode *)(seg_buf + (tba[i].bi_daddr - seg_addr) * DEV_BSIZE);
for(j=INOPB(lfsp)-1;j>=0;j--) {
if(dip[j].di_u.inumber == block_array[i].bi_inode) {
block_array[i].bi_bp = (char *)(dip+j);
if(dip[j].di_u.inumber == tba[i].bi_inode) {
tba[i].bi_bp = (char *)(dip+j);
break;
}
}
if(j<0) {
syslog(LOG_NOTICE, "lost inode %d in the shuffle! (blk %d)",
block_array[i].bi_inode, block_array[i].bi_daddr);
tba[i].bi_inode, tba[i].bi_daddr);
syslog(LOG_DEBUG, "inode numbers found were:");
for(j=INOPB(lfsp)-1;j>=0;j--) {
syslog(LOG_DEBUG, "%d", dip[j].di_u.inumber);
@ -689,12 +754,12 @@ clean_segment(fsp, slp)
err(1,"lost inode");
} else if(debug>1) {
syslog(LOG_DEBUG,"Ino %d corrected to 0x%x+%d",
block_array[i].bi_inode,
block_array[i].bi_daddr,
(int)((caddr_t)(block_array[i].bi_bp) - (caddr_t)(long)seg_addr) % DEV_BSIZE);
tba[i].bi_inode,
tba[i].bi_daddr,
(int)((caddr_t)(tba[i].bi_bp) - (caddr_t)(long)seg_addr) % DEV_BSIZE);
}
} else {
block_array[i].bi_bp = seg_buf + (block_array[i].bi_daddr - seg_addr) * DEV_BSIZE;
tba[i].bi_bp = seg_buf + (tba[i].bi_daddr - seg_addr) * DEV_BSIZE;
}
}
}
@ -702,47 +767,83 @@ clean_segment(fsp, slp)
/* Update live bytes calc - XXX KS */
slp->sl_bytes = 0;
for(i=0; i<num_blocks; i++)
if(block_array[i].bi_lbn == LFS_UNUSED_LBN)
if(tba[i].bi_lbn == LFS_UNUSED_LBN)
slp->sl_bytes += sizeof(struct dinode);
else
slp->sl_bytes += block_array[i].bi_size;
slp->sl_bytes += tba[i].bi_size;
if(debug > 1) {
syslog(LOG_DEBUG, "after bmapv still have %d blocks", num_blocks);
if (num_blocks)
syslog(LOG_DEBUG, "BLOCK INFOS");
for (_bip = block_array, i=0; i < num_blocks; ++_bip, ++i) {
for (_bip = tba, i=0; i < num_blocks; ++_bip, ++i) {
PRINT_BINFO(_bip);
lp = (u_long *)_bip->bi_bp;
}
}
++cleaner_stats.segs_cleaned;
cleaner_stats.blocks_written += num_blocks;
util = ((double)num_blocks / fsp->fi_lfs.lfs_ssize);
/* Add these blocks to the accumulated list */
sbp->ba = realloc(sbp->ba, (sbp->nb + num_blocks) * sizeof(BLOCK_INFO));
memcpy(sbp->ba + sbp->nb, tba, num_blocks * sizeof(BLOCK_INFO));
sbp->nb += num_blocks;
return (0);
out:
--sbp->nsegs;
if (sbp->ba)
free(sbp->ba);
if (error) {
sp->su_flags |= SEGUSE_ERROR;
++cleaner_stats.segs_error;
}
munmap_segment(fsp, sbp->buf[sbp->nsegs], do_mmap);
if (stat_report && cleaner_stats.segs_cleaned % stat_report == 0)
sig_report(SIGUSR1);
return (error);
}
/* Call markv and clean up */
int
clean_segments(fsp, sbp)
FS_INFO *fsp;
SEGS_AND_BLOCKS *sbp;
{
int maxblocks, clean_blocks;
BLOCK_INFO *bp;
int i, error;
double util;
error = 0;
cleaner_stats.segs_cleaned += sbp->nsegs;
cleaner_stats.blocks_written += sbp->nb;
util = ((double)sbp->nb / fsp->fi_lfs.lfs_ssize);
cleaner_stats.util_tot += util;
cleaner_stats.util_sos += util * util;
if (do_small)
maxblocks = MAXPHYS / fsp->fi_lfs.lfs_bsize - 1;
else
maxblocks = num_blocks;
maxblocks = sbp->nb;
for (bp = block_array; num_blocks > 0; bp += clean_blocks) {
clean_blocks = maxblocks < num_blocks ? maxblocks : num_blocks;
for (bp = sbp->ba; sbp->nb > 0; bp += clean_blocks) {
clean_blocks = maxblocks < sbp->nb ? maxblocks : sbp->nb;
if ((error = lfs_markv(&fsp->fi_statfsp->f_fsid,
bp, clean_blocks)) < 0) {
syslog(LOG_WARNING,"clean_segment: lfs_markv failed: %m");
goto out;
++cleaner_stats.segs_error;
}
num_blocks -= clean_blocks;
else
sbp->nb -= clean_blocks;
}
out:
if (block_array)
free(block_array);
/* Clean up */
if (sbp->ba)
free(sbp->ba);
if (error)
++cleaner_stats.segs_error;
munmap_segment(fsp, seg_buf, do_mmap);
for (i = 0; i < sbp->nsegs; i++)
munmap_segment(fsp, sbp->buf[i], do_mmap);
if (stat_report && cleaner_stats.segs_cleaned % stat_report == 0)
sig_report(SIGUSR1);
return (error);
@ -779,8 +880,8 @@ sig_report(sig)
"util_tot ", cleaner_stats.util_tot,
"util_sos ", cleaner_stats.util_sos);
syslog(LOG_DEBUG, "\t\tavg util: %4.2f std dev: %9.6f",
avg = cleaner_stats.util_tot / cleaner_stats.segs_cleaned,
cleaner_stats.util_sos / cleaner_stats.segs_cleaned - avg * avg);
avg = cleaner_stats.util_tot / MAX(cleaner_stats.segs_cleaned, 1.0),
cleaner_stats.util_sos / MAX(cleaner_stats.segs_cleaned - avg * avg, 1.0));
if (sig == SIGUSR2) {

View File

@ -1,4 +1,4 @@
.\" $NetBSD: lfs_cleanerd.8,v 1.6 1999/03/22 18:25:45 garbled Exp $
.\" $NetBSD: lfs_cleanerd.8,v 1.7 2000/09/09 04:49:56 perseant Exp $
.\"
.\" Copyright (c) 1993
.\" The Regents of the University of California. All rights reserved.
@ -41,7 +41,11 @@
.Nd garbage collect a log-structured file system
.Sh SYNOPSIS
.Nm
.Op Fl ds
.Op Fl bdfmqs
.Op Fl l Ar load-threshhold
.Op Fl n Ar number-of-segments
.Op Fl r Ar report-frequency
.Op Fl t Ar timeout
.Pa node
.Sh DESCRIPTION
The
@ -65,14 +69,54 @@ taken by the inactive data which was in it.
.Pp
The following options are available:
.Bl -tag -width indent
.It Fl b
Use bytes written, rather than segments read, when determining how many
segments to clean at once.
.It Fl d
Run in debug mode.
Do not become a daemon process, and print debugging information.
Do not become a daemon process, and print debugging information. More
.Fl d
s give more detailed debugging information.
.It Fl f
Use filesystem idle time as the criterion for aggressive cleaning,
instead of system load.
.It Fl l Ar load-threshhold
Clean more aggressively when the system load is below the given
threshhold. The default threshhold is 0.2.
.It Fl m
Use mmap, rather than rereading the Ifile to update the cleaner's
knowledge of the filesystem. Do not use this option.
.It Fl n Ar number-of-segments
Clean this number of segments at a time: that is, pass this many
segments' blocks through a single call to lfs_markv, or, if
.Fl b
was also given, pass this many segments' worth of blocks through a
single call to lfs_markv.
.It Fl q
Quit after cleaning once.
.It Fl r Ar report-frequency
Give an efficiency report after every
.Ar report-frequency
times through the main loop.
.It Fl s
When cleaning the file system, read data in small chunks.
When cleaning the file system, send only a few blocks through lfs_markv
at a time. Don't use this option.
.It Fl t Ar timeout
Poll the filesystem every
.Ar timeout
seconds, looking for opportunities to clean. The default is 300, that
is, five minutes. Note that
.Nm
will be automatically awakened when the filesystem is active, so it is
not usually necessary to set
.Ar timeout
to a low value.
.El
.Sh SEE ALSO
.Xr mount_lfs 8
.Xr lfs_bmapv 2
.Xr lfs_markv 2
.Xr lfs_segwait 2
.Sh HISTORY
The
.Nm

View File

@ -1,4 +1,4 @@
/* $NetBSD: library.c,v 1.15 2000/01/18 08:02:30 perseant Exp $ */
/* $NetBSD: library.c,v 1.16 2000/09/09 04:49:56 perseant Exp $ */
/*-
* Copyright (c) 1992, 1993
@ -38,7 +38,7 @@
#if 0
static char sccsid[] = "@(#)library.c 8.3 (Berkeley) 5/24/95";
#else
__RCSID("$NetBSD: library.c,v 1.15 2000/01/18 08:02:30 perseant Exp $");
__RCSID("$NetBSD: library.c,v 1.16 2000/09/09 04:49:56 perseant Exp $");
#endif
#endif /* not lint */
@ -290,7 +290,7 @@ redo_read:
fsp->fi_ifilep = (IFILE *)((caddr_t)fsp->fi_segusep + SEGTABSIZE(fsp));
/*
* The number of ifile entries is equal to the number of blocks
* The number of ifile entries is equal to the number of
* blocks in the ifile minus the ones allocated to cleaner info
* and segment usage table multiplied by the number of ifile
* entries per page.

View File

@ -1,4 +1,4 @@
/* $NetBSD: pass5.c,v 1.5 2000/05/30 04:33:15 perseant Exp $ */
/* $NetBSD: pass5.c,v 1.6 2000/09/09 04:49:56 perseant Exp $ */
/*-
* Copyright (c) 2000 The NetBSD Foundation, Inc.
@ -56,11 +56,16 @@ pass5()
SEGUSE *su;
struct bufarea *bp;
int i;
unsigned long bb; /* total number of used blocks (lower bound) */
unsigned long ubb; /* upper bound number of used blocks */
unsigned long avail; /* blocks available for writing */
/*
* Check segment holdings against actual holdings. Check for
* "clean" segments that contain live data.
*/
avail = 0;
bb = ubb = 0;
for (i = 0; i < sblock.lfs_nseg; i++) {
su = lfs_gseguse(i, &bp);
if (!(su->su_flags & SEGUSE_DIRTY) &&
@ -87,6 +92,39 @@ pass5()
dirty(bp);
}
}
if (su->su_flags & SEGUSE_DIRTY) {
bb += btodb(su->su_nbytes) + su->su_nsums;
ubb += btodb(su->su_nbytes) + su->su_nsums + fsbtodb(&sblock, su->su_ninos);
} else {
avail += fsbtodb(&sblock, sblock.lfs_ssize);
if (su->su_flags & SEGUSE_SUPERBLOCK)
avail -= btodb(LFS_SBPAD);
}
bp->b_flags &= ~B_INUSE;
}
/* Also may be available bytes in current seg */
i = datosn(&sblock, sblock.lfs_offset);
avail += sntoda(&sblock, i + 1) - sblock.lfs_offset;
/* But do not count minfreesegs */
avail -= fsbtodb(&sblock, sblock.lfs_ssize) *
(sblock.lfs_minfreeseg - (sblock.lfs_minfreeseg / 2));
if (avail != sblock.lfs_avail) {
pwarn("avail given as %d, should be %ld\n", sblock.lfs_avail,
avail);
if (preen || reply("fix")) {
sblock.lfs_avail = avail;
sbdirty();
}
}
if (sblock.lfs_bfree > sblock.lfs_dsize - bb ||
sblock.lfs_bfree < sblock.lfs_dsize - ubb) {
pwarn("bfree given as %d, should be between %ld and %ld\n",
sblock.lfs_bfree, sblock.lfs_dsize - ubb,
sblock.lfs_dsize - bb);
if (preen || reply("fix")) {
sblock.lfs_bfree = sblock.lfs_dsize - (ubb + bb) / 2;
sbdirty();
}
}
}

View File

@ -1,4 +1,4 @@
.\" $NetBSD: mount_lfs.8,v 1.6 1999/03/07 11:02:07 mycroft Exp $
.\" $NetBSD: mount_lfs.8,v 1.7 2000/09/09 04:49:56 perseant Exp $
.\"
.\" Copyright (c) 1993, 1994
.\" The Regents of the University of California. All rights reserved.
@ -41,7 +41,8 @@
.Nd mount a log-structured file system
.Sh SYNOPSIS
.Nm ""
.Op Fl dns
.Op Fl bdns
.Op Fl N Ar nsegs
.Op Fl o Ar options
.Ar special
.Ar node
@ -62,6 +63,9 @@ at boot time.
.Pp
The options are as follows:
.Bl -tag -width Ds
.It Fl b
Instruct the cleaner to count bytes written, rather than segments read,
to determine how many segments to clean at once.
.It Fl d
Run
.Xr lfs_cleanerd 8
@ -73,6 +77,12 @@ flag followed by a comma separated string of options.
See the
.Xr mount 8
man page for possible options and their meanings.
.It Fl N Ar nsegs
Clean
.Ar nsegs
segments (or bytes' worth of segments if
.Fl b
is also specified) at a time.
.It Fl n
Don't start
.Xr lfs_cleanerd 8

View File

@ -1,4 +1,4 @@
/* $NetBSD: mount_lfs.c,v 1.9 1999/12/08 22:39:25 perseant Exp $ */
/* $NetBSD: mount_lfs.c,v 1.10 2000/09/09 04:49:56 perseant Exp $ */
/*-
* Copyright (c) 1993, 1994
@ -43,7 +43,7 @@ __COPYRIGHT("@(#) Copyright (c) 1993, 1994\n\
#if 0
static char sccsid[] = "@(#)mount_lfs.c 8.4 (Berkeley) 4/26/95";
#else
__RCSID("$NetBSD: mount_lfs.c,v 1.9 1999/12/08 22:39:25 perseant Exp $");
__RCSID("$NetBSD: mount_lfs.c,v 1.10 2000/09/09 04:49:56 perseant Exp $");
#endif
#endif /* not lint */
@ -72,7 +72,8 @@ int main __P((int, char *[]));
void invoke_cleaner __P((char *));
void usage __P((void));
int short_rds, cleaner_debug;
int short_rds, cleaner_debug, cleaner_bytes;
char *nsegs;
int
main(argc, argv)
@ -85,15 +86,23 @@ main(argc, argv)
const char *errcause;
options = NULL;
nsegs = "4";
mntflags = noclean = 0;
while ((ch = getopt(argc, argv, "dno:s")) != -1)
cleaner_bytes = 1;
while ((ch = getopt(argc, argv, "dN:no:s")) != -1)
switch (ch) {
case 'b':
cleaner_bytes = !cleaner_bytes;
break;
case 'd':
cleaner_debug = 1;
break;
case 'n':
noclean = 1;
break;
case 'N':
nsegs = optarg;
break;
case 'o':
getmntopts(optarg, mopts, &mntflags, 0);
break;
@ -155,6 +164,12 @@ invoke_cleaner(name)
/* Build the argument list. */
*ap++ = _PATH_LFS_CLEANERD;
if (cleaner_bytes)
*ap++ = "-b";
if (nsegs) {
*ap++ = "-n";
*ap++ = nsegs;
}
if (short_rds)
*ap++ = "-s";
if (cleaner_debug)

View File

@ -1,4 +1,4 @@
/* $NetBSD: config.h,v 1.4 2000/07/04 22:35:04 perseant Exp $ */
/* $NetBSD: config.h,v 1.5 2000/09/09 04:49:56 perseant Exp $ */
/*-
* Copyright (c) 1991, 1993
@ -54,10 +54,9 @@
/*
* 1/DFL_MIN_FREE_SEGS gives the fraction of segments to be reserved for
* the cleaner. Experimental data show this number should be around
* 5-10.
* the cleaner.
*/
#define DFL_MIN_FREE_SEGS 10
#define DFL_MIN_FREE_SEGS 20
/*
* MINFREE gives the minimum acceptable percentage of file system

View File

@ -1,4 +1,4 @@
/* $NetBSD: lfs.c,v 1.16 2000/07/04 22:35:05 perseant Exp $ */
/* $NetBSD: lfs.c,v 1.17 2000/09/09 04:49:56 perseant Exp $ */
/*-
* Copyright (c) 1991, 1993
@ -38,7 +38,7 @@
#if 0
static char sccsid[] = "@(#)lfs.c 8.5 (Berkeley) 5/24/95";
#else
__RCSID("$NetBSD: lfs.c,v 1.16 2000/07/04 22:35:05 perseant Exp $");
__RCSID("$NetBSD: lfs.c,v 1.17 2000/09/09 04:49:56 perseant Exp $");
#endif
#endif /* not lint */
@ -313,6 +313,7 @@ make_lfs(fd, lp, partp, minfree, block_size, frag_size, seg_size, minfreeseg)
lfsp->lfs_size = partp->p_size >> lfsp->lfs_fsbtodb;
lfsp->lfs_dsize = lfsp->lfs_size - (LFS_LABELPAD >> lfsp->lfs_bshift);
lfsp->lfs_nseg = lfsp->lfs_dsize / lfsp->lfs_ssize;
lfsp->lfs_nclean = lfsp->lfs_nseg - 1;
lfsp->lfs_maxfilesize = maxtable[lfsp->lfs_bshift] << lfsp->lfs_bshift;
if (minfreeseg == 0)
@ -326,18 +327,21 @@ make_lfs(fd, lp, partp, minfree, block_size, frag_size, seg_size, minfreeseg)
|| lfsp->lfs_nseg < LFS_MIN_SBINTERVAL + 1)
{
if(seg_size == 0 && ssize > (bsize<<1)) {
if(!warned_segtoobig)
fprintf(stderr,"Segment size %d is too large; trying smaller sizes...\n", ssize);
if(!warned_segtoobig) {
fprintf(stderr,"Segment size %d is too large; trying smaller sizes.\n", ssize);
if (ssize == (bsize << 16)) {
fprintf(stderr, "(Did you perhaps accidentally leave \"16\" in the disklabel's sgs field?)\n");
}
}
++warned_segtoobig;
ssize >>= 1;
goto tryagain;
}
fatal("Could not allocate enough segments with segment size %d and block size %d; please decrease the segment size.\n",
fatal("Could not allocate enough segments with segment size %d and block size %d;\nplease decrease the segment size.\n",
ssize, lfsp->lfs_bsize);
}
/* Inform them of success */
if(warned_segtoobig)
fprintf(stderr,"Using segment size %d\n", ssize);
printf("Using %d segments of size %d\n", lfsp->lfs_nseg, ssize);
/*
* The number of free blocks is set from the number of segments
@ -347,12 +351,11 @@ make_lfs(fd, lp, partp, minfree, block_size, frag_size, seg_size, minfreeseg)
* and segment usage table, and half a block per segment that can't
* be written due to fragmentation.
*/
lfsp->lfs_dsize = fsbtodb(lfsp, (lfsp->lfs_nseg -
lfsp->lfs_minfreeseg) *
lfsp->lfs_ssize);
lfsp->lfs_dsize -= fsbtodb(lfsp, lfsp->lfs_nseg / 2);
lfsp->lfs_dsize = (lfsp->lfs_nseg - lfsp->lfs_minfreeseg) *
fsbtodb(lfsp, lfsp->lfs_ssize);
lfsp->lfs_bfree = lfsp->lfs_dsize;
lfsp->lfs_bfree -= fsbtodb(lfsp, lfsp->lfs_nseg / 2);
lfsp->lfs_segtabsz = SEGTABSIZE_SU(lfsp);
lfsp->lfs_cleansz = CLEANSIZE_SU(lfsp);
if ((lfsp->lfs_tstamp = time(NULL)) == -1)
@ -372,7 +375,8 @@ make_lfs(fd, lp, partp, minfree, block_size, frag_size, seg_size, minfreeseg)
*/
/* Figure out where the superblocks are going to live */
lfsp->lfs_sboffs[0] = LFS_LABELPAD/lp->d_secsize;
lfsp->lfs_sboffs[0] = btodb(LFS_LABELPAD);
lfsp->lfs_dsize -= btodb(LFS_SBPAD);
for (i = 1; i < LFS_MAXNUMSB; i++) {
sb_addr = ((i * sb_interval) <<
(lfsp->lfs_segshift - lfsp->lfs_bshift + lfsp->lfs_fsbtodb))
@ -380,6 +384,7 @@ make_lfs(fd, lp, partp, minfree, block_size, frag_size, seg_size, minfreeseg)
if (sb_addr > partp->p_size)
break;
lfsp->lfs_sboffs[i] = sb_addr;
lfsp->lfs_dsize -= btodb(LFS_SBPAD);
}
/* We need >= 2 superblocks */
@ -417,7 +422,8 @@ make_lfs(fd, lp, partp, minfree, block_size, frag_size, seg_size, minfreeseg)
segp->su_nsums = 1; /* 1 summary blocks */
segp->su_ninos = 1; /* 1 inode block */
segp->su_flags = SEGUSE_SUPERBLOCK | SEGUSE_DIRTY;
lfsp->lfs_bfree -= LFS_SUMMARY_SIZE / lp->d_secsize;
lfsp->lfs_bfree -= btodb(LFS_SUMMARY_SIZE);
lfsp->lfs_bfree -=
fsbtodb(lfsp, lfsp->lfs_cleansz + lfsp->lfs_segtabsz + 4);
@ -433,6 +439,7 @@ make_lfs(fd, lp, partp, minfree, block_size, frag_size, seg_size, minfreeseg)
if ((i % sb_interval) == 0 && j < LFS_MAXNUMSB) {
segp->su_flags = SEGUSE_SUPERBLOCK;
lfsp->lfs_bfree -= (LFS_SBPAD / lp->d_secsize);
++j;
} else
segp->su_flags = 0;
segp->su_lastmod = 0;
@ -444,7 +451,6 @@ make_lfs(fd, lp, partp, minfree, block_size, frag_size, seg_size, minfreeseg)
/*
* Initialize dynamic accounting.
*/
lfsp->lfs_avail = lfsp->lfs_bfree;
lfsp->lfs_uinodes = 0;
/*
@ -588,7 +594,17 @@ make_lfs(fd, lp, partp, minfree, block_size, frag_size, seg_size, minfreeseg)
/* Write Superblock */
lfsp->lfs_offset = off / lp->d_secsize;
lfsp->lfs_avail = lfsp->lfs_dsize -
(fsbtodb(lfsp, lfsp->lfs_ssize) - btodb(LFS_SBPAD) -
(sntoda(lfsp, 1) - lfsp->lfs_offset));
lfsp->lfs_bfree = lfsp->lfs_avail; /* XXX */
/* Slop for an imperfect cleaner */
lfsp->lfs_avail += (lfsp->lfs_minfreeseg / 2) *
fsbtodb(lfsp, lfsp->lfs_ssize);
lfsp->lfs_cksum = lfs_sb_cksum(&(lfsp->lfs_dlfs));
put(fd, (off_t)LFS_LABELPAD, &(lfsp->lfs_dlfs), sizeof(struct dlfs));
/*

View File

@ -1,7 +1,7 @@
/* $NetBSD: lfs.h,v 1.30 2000/09/09 04:18:28 perseant Exp $ */
/* $NetBSD: lfs.h,v 1.31 2000/09/09 04:49:54 perseant Exp $ */
/*-
* Copyright (c) 1999 The NetBSD Foundation, Inc.
* Copyright (c) 1999, 2000 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
@ -181,6 +181,7 @@ struct segusage {
#define SEGUSE_ACTIVE 0x01 /* segment is currently being written */
#define SEGUSE_DIRTY 0x02 /* segment has data in it */
#define SEGUSE_SUPERBLOCK 0x04 /* segment contains a superblock */
#define SEGUSE_ERROR 0x08 /* cleaner: do not clean segment */
u_int32_t su_flags;
};
@ -262,7 +263,7 @@ struct dlfs {
u_char dlfs_fsmnt[MNAMELEN]; /* 232: name mounted on */
/* XXX this is 2 bytes only to pad to a quad boundary */
u_int16_t dlfs_clean; /* 322: file system is clean flag */
int32_t dlfs_dmeta; /* 324: total number of dirty summaries */
int32_t dlfs_dmeta; /* 324: total number of dirty summaries */
u_int32_t dlfs_minfreeseg; /* 328: segs reserved for cleaner */
int8_t dlfs_pad[176]; /* 332: round to 512 bytes */
/* Checksum -- last valid disk field. */
@ -353,6 +354,7 @@ struct lfs {
struct lock lfs_freelock;
pid_t lfs_rfpid; /* Process ID of roll-forward agent */
int lfs_nadirop; /* number of active dirop nodes */
long lfs_ravail; /* blocks pre-reserved for writing */
};
/*
@ -397,7 +399,7 @@ typedef struct _cleanerinfo {
u_int32_t clean; /* number of clean segments */
u_int32_t dirty; /* number of dirty segments */
u_int32_t bfree; /* disk blocks free */
int32_t avail; /* disk blocks available */
int32_t avail; /* disk blocks available */
} CLEANERINFO;
#define CLEANSIZE_SU(fs) \

View File

@ -1,4 +1,4 @@
/* $NetBSD: lfs_alloc.c,v 1.42 2000/07/05 22:25:43 perseant Exp $ */
/* $NetBSD: lfs_alloc.c,v 1.43 2000/09/09 04:49:54 perseant Exp $ */
/*-
* Copyright (c) 1999, 2000 The NetBSD Foundation, Inc.
@ -97,6 +97,7 @@ extern struct lock ufs_hashlock;
/* Allocate a new inode. */
/* ARGSUSED */
/* VOP_BWRITE 2i times */
int
lfs_valloc(v)
void *v;
@ -160,7 +161,7 @@ lfs_valloc(v)
new_gen = ifp->if_version; /* version was updated by vfree */
#ifdef LFS_DEBUG_NEXTFREE
ifp->if_nextfree = 0;
VOP_BWRITE(bp);
(void) VOP_BWRITE(bp); /* Ifile */
#else
brelse(bp);
#endif
@ -199,7 +200,7 @@ lfs_valloc(v)
}
ifp--;
ifp->if_nextfree = LFS_UNUSED_INUM;
VOP_BWRITE(bp);
(void) VOP_BWRITE(bp); /* Ifile */
lfs_vunref(vp);
}
#ifdef DIAGNOSTIC
@ -261,7 +262,7 @@ lfs_valloc(v)
ifp->if_daddr = LFS_UNUSED_DADDR;
ifp->if_nextfree = fs->lfs_free;
fs->lfs_free = new_ino;
VOP_BWRITE(bp);
(void) VOP_BWRITE(bp); /* Ifile */
return (error);
}
@ -306,6 +307,7 @@ lfs_vcreate(mp, ino, vp)
/* Free an inode. */
/* ARGUSED */
/* VOP_BWRITE 2i times */
int
lfs_vfree(v)
void *v;
@ -367,7 +369,7 @@ lfs_vfree(v)
++ifp->if_version;
ifp->if_nextfree = fs->lfs_free;
fs->lfs_free = ino;
(void) VOP_BWRITE(bp);
(void) VOP_BWRITE(bp); /* Ifile */
#ifdef DIAGNOSTIC
if(fs->lfs_free == LFS_UNUSED_INUM) {
panic("inode 0 freed");
@ -377,13 +379,16 @@ lfs_vfree(v)
LFS_SEGENTRY(sup, fs, datosn(fs, old_iaddr), bp);
#ifdef DIAGNOSTIC
if (sup->su_nbytes < DINODE_SIZE) {
printf("lfs_vfree: negative byte count (segment %d short by %d)\n", datosn(fs, old_iaddr), (int)DINODE_SIZE - sup->su_nbytes);
printf("lfs_vfree: negative byte count"
" (segment %d short by %d)\n",
datosn(fs, old_iaddr),
(int)DINODE_SIZE - sup->su_nbytes);
panic("lfs_vfree: negative byte count");
sup->su_nbytes = DINODE_SIZE;
}
#endif
sup->su_nbytes -= DINODE_SIZE;
(void) VOP_BWRITE(bp);
(void) VOP_BWRITE(bp); /* Ifile */
}
/* Set superblock modified bit and decrement file count. */

View File

@ -1,7 +1,7 @@
/* $NetBSD: lfs_balloc.c,v 1.24 2000/07/04 22:30:37 perseant Exp $ */
/* $NetBSD: lfs_balloc.c,v 1.25 2000/09/09 04:49:54 perseant Exp $ */
/*-
* Copyright (c) 1999 The NetBSD Foundation, Inc.
* Copyright (c) 1999, 2000 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
@ -108,6 +108,7 @@ int lfs_fragextend __P((struct vnode *, int, int, ufs_daddr_t, struct buf **));
* to disk are given the new special disk address UNWRITTEN == -2, so that
* they can be differentiated from completely new blocks.
*/
/* VOP_BWRITE NIADDR+2 times */
int
lfs_balloc(v)
void *v;
@ -169,7 +170,7 @@ lfs_balloc(v)
ip->i_ffs_size = (lastblock + 1) * fs->lfs_bsize;
uvm_vnp_setsize(vp, ip->i_ffs_size);
ip->i_flag |= IN_CHANGE | IN_UPDATE;
VOP_BWRITE(bp);
(void) VOP_BWRITE(bp);
}
}
@ -328,6 +329,7 @@ lfs_balloc(v)
return (0);
}
/* VOP_BWRITE 1 time */
int
lfs_fragextend(vp, osize, nsize, lbn, bpp)
struct vnode *vp;

View File

@ -1,7 +1,7 @@
/* $NetBSD: lfs_bio.c,v 1.26 2000/07/05 22:25:43 perseant Exp $ */
/* $NetBSD: lfs_bio.c,v 1.27 2000/09/09 04:49:54 perseant Exp $ */
/*-
* Copyright (c) 1999 The NetBSD Foundation, Inc.
* Copyright (c) 1999, 2000 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
@ -118,6 +118,57 @@ extern int lfs_dostats;
#define LFS_MAX_BYTES (((bufpages >> 2) - 10) * NBPG)
#define LFS_WAIT_BYTES (((bufpages >> 1) - (bufpages >> 3) - 10) * NBPG)
#define LFS_BUFWAIT 2
inline static int lfs_fits(struct lfs *, int);
/*
* Try to reserve some blocks, prior to performing a sensitive operation that
* requires the vnode lock to be honored. If there is not enough space, give
* up the vnode lock temporarily and wait for the space to become available.
*
* Called with vp locked. (Note nowever that if nb < 0, vp is ignored.)
*/
int
lfs_reserve(fs, vp, nb)
struct lfs *fs;
struct vnode *vp;
int nb;
{
CLEANERINFO *cip;
struct buf *bp;
int error, slept;
slept = 0;
while (nb > 0 && !lfs_fits(fs, nb + fs->lfs_ravail)) {
VOP_UNLOCK(vp, 0);
if (!slept)
printf("lfs_reserve: waiting for %ld (bfree = %d,"
" est_bfree = %ld)\n",
nb + fs->lfs_ravail, fs->lfs_bfree,
LFS_EST_BFREE(fs));
++slept;
/* Wake up the cleaner */
LFS_CLEANERINFO(cip, fs, bp);
cip->bfree = fs->lfs_bfree;
cip->avail = fs->lfs_avail - fs->lfs_ravail;
(void) VOP_BWRITE(bp); /* Ifile */
wakeup(&lfs_allclean_wakeup);
wakeup(&fs->lfs_nextseg);
error = tsleep(&fs->lfs_avail, PCATCH | PUSER, "lfs_reserve",
0);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); /* XXX use lockstatus */
if (error)
return error;
}
if (slept)
printf("lfs_reserve: woke up\n");
fs->lfs_ravail += nb;
return 0;
}
/*
*
* XXX we don't let meta-data writes run out of space because they can
@ -162,28 +213,17 @@ lfs_fits(struct lfs *fs, int db)
int needed;
needed = db + btodb(LFS_SUMMARY_SIZE) +
fsbtodb(fs, howmany(fs->lfs_uinodes, INOPB(fs)) +
fsbtodb(fs, howmany(fs->lfs_uinodes + 1, INOPB(fs)) +
fs->lfs_segtabsz + 1);
if (needed >= fs->lfs_avail) {
#ifdef DEBUG_LFS
#ifdef DEBUG
printf("lfs_fits: no fit: db = %d, uinodes = %d, "
"needed = %d, avail = %d\n",
db, fs->lfs_uinodes, needed, fs->lfs_avail);
#endif
return 0;
}
/*
* Also check the number of segments available for writing.
* If you don't do this here, it is possible for the *cleaner* to
* cause us to become starved of segments, by flushing the pending
* block list.
*
* XXX the old lfs_markv did not have this problem.
*/
if (fs->lfs_nclean <= fs->lfs_minfreeseg)
return 0;
return 1;
}
@ -228,19 +268,6 @@ lfs_bwrite_ext(bp, flags)
if (!(bp->b_flags & B_LOCKED)) {
fs = VFSTOUFS(bp->b_vp->v_mount)->um_lfs;
db = fragstodb(fs, numfrags(fs, bp->b_bcount));
#ifdef DEBUG_LFS
if(CANT_WAIT(bp,flags)) {
if(((db + (fs->lfs_uinodes + INOPB((fs))) / INOPB(fs)
+ fsbtodb(fs, 1)
+ LFS_SUMMARY_SIZE / DEV_BSIZE
+ fs->lfs_segtabsz)) >= fs->lfs_avail)
{
printf("A");
}
if (fs->lfs_nclean < fs->lfs_minfreeseg)
printf("M");
}
#endif
while (!lfs_fits(fs, db) && !CANT_WAIT(bp,flags)) {
/*
* Out of space, need cleaner to run.
@ -253,6 +280,9 @@ lfs_bwrite_ext(bp, flags)
cip->avail = fs->lfs_avail;
(void) VOP_BWRITE(cbp);
printf("lfs_bwrite: out of available space, "
"waiting on cleaner\n");
wakeup(&lfs_allclean_wakeup);
wakeup(&fs->lfs_nextseg);
error = tsleep(&fs->lfs_avail, PCATCH | PUSER,
@ -331,6 +361,7 @@ lfs_flush(fs, flags)
struct lfs *fs;
int flags;
{
int s;
struct mount *mp, *nmp;
if(lfs_dostats)
@ -357,7 +388,9 @@ lfs_flush(fs, flags)
}
simple_unlock(&mountlist_slock);
lfs_countlocked(&locked_queue_count,&locked_queue_bytes);
s = splbio();
lfs_countlocked(&locked_queue_count, &locked_queue_bytes);
splx(s);
wakeup(&locked_queue_count);
lfs_writing = 0;
@ -514,6 +547,7 @@ extern TAILQ_HEAD(bqueues, buf) bufqueues[BQUEUES];
/*
* Return a count of buffers on the "locked" queue.
* Don't count malloced buffers, since they don't detract from the total.
*/
void
lfs_countlocked(count, bytes)
@ -526,9 +560,27 @@ lfs_countlocked(count, bytes)
for (bp = bufqueues[BQ_LOCKED].tqh_first; bp;
bp = bp->b_freelist.tqe_next) {
#ifdef DEBUG_LOCKED_LIST
if (bp->b_flags & B_CALL) /* Malloced buffer */
continue;
#endif
n++;
size += bp->b_bufsize;
#ifdef DEBUG_LOCKED_LIST
if (n > nbuf)
panic("lfs_countlocked: this can't happen: more"
" buffers locked than exist");
#endif
}
#ifdef DEBUG_LFS
/* Theoretically this function never really does anything */
if (n != *count)
printf("lfs_countlocked: adjusted buf count from %d to %d\n",
*count, n);
if (size != *bytes)
printf("lfs_countlocked: adjusted byte count from %ld to %ld\n",
*bytes, size);
#endif
*count = n;
*bytes = size;
return;

View File

@ -1,7 +1,7 @@
/* $NetBSD: lfs_cksum.c,v 1.12 2000/03/30 12:41:13 augustss Exp $ */
/* $NetBSD: lfs_cksum.c,v 1.13 2000/09/09 04:49:54 perseant Exp $ */
/*-
* Copyright (c) 1999 The NetBSD Foundation, Inc.
* Copyright (c) 1999, 2000 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation

View File

@ -1,7 +1,7 @@
/* $NetBSD: lfs_debug.c,v 1.10 2000/04/23 21:10:26 perseant Exp $ */
/* $NetBSD: lfs_debug.c,v 1.11 2000/09/09 04:49:54 perseant Exp $ */
/*-
* Copyright (c) 1999 The NetBSD Foundation, Inc.
* Copyright (c) 1999, 2000 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
@ -263,7 +263,7 @@ lfs_check_bpp(fs, sp, file, line)
(*bpp)->b_blkno);
}
}
blkno += (*bpp)->b_bcount / DEV_BSIZE;
blkno += btodb((*bpp)->b_bcount);
}
}
#endif /* DEBUG */

View File

@ -1,7 +1,7 @@
/* $NetBSD: lfs_extern.h,v 1.20 2000/07/05 22:25:43 perseant Exp $ */
/* $NetBSD: lfs_extern.h,v 1.21 2000/09/09 04:49:55 perseant Exp $ */
/*-
* Copyright (c) 1999 The NetBSD Foundation, Inc.
* Copyright (c) 1999, 2000 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
@ -119,6 +119,7 @@ void lfs_flush __P((struct lfs *, int));
int lfs_check __P((struct vnode *, ufs_daddr_t, int));
void lfs_freebuf __P((struct buf *));
void lfs_countlocked __P((int *, long *));
int lfs_reserve __P((struct lfs *, struct vnode *, int));
/* lfs_cksum.c */
u_long cksum __P((void *, size_t));

View File

@ -1,7 +1,7 @@
/* $NetBSD: lfs_inode.c,v 1.43 2000/09/09 03:47:05 perseant Exp $ */
/* $NetBSD: lfs_inode.c,v 1.44 2000/09/09 04:49:55 perseant Exp $ */
/*-
* Copyright (c) 1999 The NetBSD Foundation, Inc.
* Copyright (c) 1999, 2000 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
@ -202,6 +202,7 @@ lfs_update(v)
* Truncate the inode oip to at most length size, freeing the
* disk blocks.
*/
/* VOP_BWRITE 1 + NIADDR + VOP_BALLOC == 2 + 2*NIADDR times */
int
lfs_truncate(v)
void *v;
@ -276,17 +277,23 @@ lfs_truncate(v)
aflags = B_CLRBUF;
if (ap->a_flags & IO_SYNC)
aflags |= B_SYNC;
error = lfs_reserve(fs, ovp, fsbtodb(fs, NIADDR + 2));
if (error)
return (error);
error = VOP_BALLOC(ovp, length - 1, 1, ap->a_cred, aflags, &bp);
lfs_reserve(fs, ovp, -fsbtodb(fs, NIADDR + 2));
if (error)
return (error);
oip->i_ffs_size = length;
uvm_vnp_setsize(ovp, length);
(void) uvm_vnp_uncache(ovp);
VOP_BWRITE(bp);
(void) VOP_BWRITE(bp);
oip->i_flag |= IN_CHANGE | IN_UPDATE;
return (VOP_UPDATE(ovp, NULL, NULL, 0));
}
if ((error = lfs_reserve(fs, ovp, fsbtodb(fs, 2 * NIADDR + 3))) != 0)
return (error);
/*
* Make sure no writes to this inode can happen while we're
* truncating. Otherwise, blocks which are accounted for on the
@ -321,8 +328,10 @@ lfs_truncate(v)
if (ap->a_flags & IO_SYNC)
aflags |= B_SYNC;
error = VOP_BALLOC(ovp, length - 1, 1, ap->a_cred, aflags, &bp);
if (error)
if (error) {
lfs_reserve(fs, ovp, -fsbtodb(fs, 2 * NIADDR + 3));
return (error);
}
oip->i_ffs_size = length;
size = blksize(fs, oip, lbn);
(void) uvm_vnp_uncache(ovp);
@ -330,7 +339,7 @@ lfs_truncate(v)
memset((char *)bp->b_data + offset, 0,
(u_int)(size - offset));
allocbuf(bp, size);
VOP_BWRITE(bp);
(void) VOP_BWRITE(bp);
}
uvm_vnp_setsize(ovp, length);
/*
@ -473,6 +482,7 @@ done:
#ifdef QUOTA
(void) chkdq(oip, -blocksreleased, NOCRED, 0);
#endif
lfs_reserve(fs, ovp, -fsbtodb(fs, 2 * NIADDR + 3));
return (allerror);
}
@ -515,7 +525,7 @@ lfs_update_seguse(struct lfs *fs, long lastseg, size_t num)
sup->su_nbytes = num;
}
sup->su_nbytes -= num;
return (VOP_BWRITE(bp));
return (VOP_BWRITE(bp)); /* Ifile */
}
/*

View File

@ -1,7 +1,7 @@
/* $NetBSD: lfs_segment.c,v 1.57 2000/09/09 04:13:43 perseant Exp $ */
/* $NetBSD: lfs_segment.c,v 1.58 2000/09/09 04:49:55 perseant Exp $ */
/*-
* Copyright (c) 1999 The NetBSD Foundation, Inc.
* Copyright (c) 1999, 2000 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
@ -277,7 +277,8 @@ lfs_vflush(vp)
if (vp->v_dirtyblkhd.lh_first == NULL) {
lfs_writevnodes(fs, vp->v_mount, sp, VN_EMPTY);
} else if((ip->i_flag & IN_CLEANING) && (fs->lfs_sp->seg_flags & SEGM_CLEAN)) {
} else if((ip->i_flag & IN_CLEANING) &&
(fs->lfs_sp->seg_flags & SEGM_CLEAN)) {
#ifdef DEBUG_LFS
ivndebug(vp,"vflush/clean");
#endif
@ -362,8 +363,10 @@ lfs_writevnodes(fs, mp, sp, op)
* If the vnode that we are about to sync is no longer
* associated with this mount point, start over.
*/
if (vp->v_mount != mp)
if (vp->v_mount != mp) {
printf("lfs_writevnodes: starting over\n");
goto loop;
}
ip = VTOI(vp);
if ((op == VN_DIROP && !(vp->v_flag & VDIROP)) ||
@ -398,9 +401,10 @@ lfs_writevnodes(fs, mp, sp, op)
if (vp != fs->lfs_ivnode &&
vp->v_lock.lk_lockholder != curproc->p_pid) {
#ifdef DEBUG_LFS
printf("lfs_writevnodes: not writing ino %d, locked by pid %d\n",
VTOI(vp)->i_number,
vp->v_lock.lk_lockholder);
printf("lfs_writevnodes: not writing ino %d,"
" locked by pid %d\n",
VTOI(vp)->i_number,
vp->v_lock.lk_lockholder);
#endif
lfs_vunref(vp);
continue;
@ -473,37 +477,33 @@ lfs_segwrite(mp, flags)
return EROFS;
lfs_imtime(fs);
#if 0
/*
* If we are not the cleaner, and we have fewer than lfs_minfreeseg
* clean segments, wait until cleaner writes.
* If we are not the cleaner, and there is no space available,
* wait until cleaner writes.
*/
if(!(flags & SEGM_CLEAN)
&& (!fs->lfs_seglock || !(fs->lfs_sp->seg_flags & SEGM_CLEAN)))
{
do {
if (fs->lfs_nclean <= fs->lfs_minfreeseg ||
fs->lfs_avail <= 0)
{
wakeup(&lfs_allclean_wakeup);
wakeup(&fs->lfs_nextseg);
error = tsleep(&fs->lfs_avail, PRIBIO + 1,
"lfs_avail", 0);
if (error) {
return (error);
}
while (fs->lfs_avail <= 0) {
wakeup(&lfs_allclean_wakeup);
wakeup(&fs->lfs_nextseg);
error = tsleep(&fs->lfs_avail, PRIBIO + 1, "lfs_av2",
0);
if (error) {
return (error);
}
} while (fs->lfs_nclean <= fs->lfs_minfreeseg ||
fs->lfs_avail <= 0);
}
}
#endif
/*
* Synchronize cleaner information
*/
LFS_CLEANERINFO(cip, fs, bp);
cip->bfree = fs->lfs_bfree;
cip->avail = fs->lfs_avail;
(void) VOP_BWRITE(bp);
cip->avail = fs->lfs_avail - fs->lfs_ravail;
(void) VOP_BWRITE(bp); /* Ifile */
/*
* Allocate a segment structure and enough space to hold pointers to
@ -563,7 +563,7 @@ lfs_segwrite(mp, flags)
(ibno-fs->lfs_cleansz))
segusep[datosn(fs, fs->lfs_curseg) %
fs->lfs_sepb].su_flags |= SEGUSE_ACTIVE;
error = VOP_BWRITE(bp);
error = VOP_BWRITE(bp); /* Ifile */
}
}
@ -814,7 +814,7 @@ lfs_writeinode(fs, sp, ip)
ip->i_number);
}
#endif
error = VOP_BWRITE(ibp);
error = VOP_BWRITE(ibp); /* Ifile */
}
/*
@ -822,9 +822,11 @@ lfs_writeinode(fs, sp, ip)
* address or if the last inode address is in the current
* partial segment.
*/
#ifdef DEBUG
if (daddr >= fs->lfs_lastpseg && daddr <= bp->b_blkno)
printf("lfs_writeinode: last inode addr in current pseg "
"(ino %d daddr 0x%x)\n", ino, daddr);
#endif
if (daddr != LFS_UNUSED_DADDR) {
LFS_SEGENTRY(sup, fs, datosn(fs, daddr), bp);
#ifdef DIAGNOSTIC
@ -840,7 +842,7 @@ lfs_writeinode(fs, sp, ip)
sup->su_nbytes -= DINODE_SIZE;
redo_ifile =
(ino == LFS_IFILE_INUM && !(bp->b_flags & B_GATHERED));
error = VOP_BWRITE(bp);
error = VOP_BWRITE(bp); /* Ifile */
}
return (redo_ifile);
}
@ -950,7 +952,9 @@ loop: for (bp = vp->v_dirtyblkhd.lh_first; bp && bp->b_vnbufs.le_next != NULL;
if (!(bp->b_flags & B_DELWRI))
panic("lfs_gather: bp not B_DELWRI");
if (!(bp->b_flags & B_LOCKED)) {
printf("lfs_gather: lbn %d blk %d not B_LOCKED\n", bp->b_lblkno, bp->b_blkno);
printf("lfs_gather: lbn %d blk %d"
" not B_LOCKED\n", bp->b_lblkno,
bp->b_blkno);
VOP_PRINT(bp->b_vp);
panic("lfs_gather: bp not B_LOCKED");
}
@ -1025,7 +1029,9 @@ lfs_updatemeta(sp)
(*sp->start_bpp)->b_blkno = off = fs->lfs_offset;
if((*sp->start_bpp)->b_blkno == (*sp->start_bpp)->b_lblkno) {
printf("lfs_updatemeta: ino %d blk %d has same lbn and daddr\n", VTOI(vp)->i_number, off);
printf("lfs_updatemeta: ino %d blk %d"
" has same lbn and daddr\n",
VTOI(vp)->i_number, off);
}
bb = fragstodb(fs, numfrags(fs, (*sp->start_bpp)->b_bcount));
fs->lfs_offset += bb;
@ -1077,7 +1083,7 @@ lfs_updatemeta(sp)
if (ooff == UNWRITTEN)
ip->i_ffs_blocks += bb;
((ufs_daddr_t *)bp->b_data)[ap->in_off] = off;
VOP_BWRITE(bp);
(void) VOP_BWRITE(bp);
}
#ifdef DEBUG
if (daddr >= fs->lfs_lastpseg && daddr <= off) {
@ -1105,7 +1111,7 @@ lfs_updatemeta(sp)
}
#endif
sup->su_nbytes -= (*sp->start_bpp)->b_bcount;
error = VOP_BWRITE(bp);
error = VOP_BWRITE(bp); /* Ifile */
}
}
}
@ -1138,21 +1144,21 @@ lfs_initseg(fs)
repeat = 1;
fs->lfs_offset = fs->lfs_curseg;
sp->seg_number = datosn(fs, fs->lfs_curseg);
sp->seg_bytes_left = fs->lfs_dbpseg * DEV_BSIZE;
sp->seg_bytes_left = dbtob(fs->lfs_dbpseg);
/*
* If the segment contains a superblock, update the offset
* and summary address to skip over it.
*/
LFS_SEGENTRY(sup, fs, sp->seg_number, bp);
if (sup->su_flags & SEGUSE_SUPERBLOCK) {
fs->lfs_offset += LFS_SBPAD / DEV_BSIZE;
fs->lfs_offset += btodb(LFS_SBPAD);
sp->seg_bytes_left -= LFS_SBPAD;
}
brelse(bp);
} else {
sp->seg_number = datosn(fs, fs->lfs_curseg);
sp->seg_bytes_left = (fs->lfs_dbpseg -
(fs->lfs_offset - fs->lfs_curseg)) * DEV_BSIZE;
sp->seg_bytes_left = dbtob(fs->lfs_dbpseg -
(fs->lfs_offset - fs->lfs_curseg));
}
fs->lfs_lastpseg = fs->lfs_offset;
@ -1168,7 +1174,7 @@ lfs_initseg(fs)
sp->segsum = (*sp->cbpp)->b_data;
bzero(sp->segsum, LFS_SUMMARY_SIZE);
sp->start_bpp = ++sp->cbpp;
fs->lfs_offset += LFS_SUMMARY_SIZE / DEV_BSIZE;
fs->lfs_offset += btodb(LFS_SUMMARY_SIZE);
/* Set point to SEGSUM, initialize it. */
ssp = sp->segsum;
@ -1205,13 +1211,13 @@ lfs_newseg(fs)
sup->su_nbytes = 0;
sup->su_nsums = 0;
sup->su_ninos = 0;
(void) VOP_BWRITE(bp);
(void) VOP_BWRITE(bp); /* Ifile */
LFS_CLEANERINFO(cip, fs, bp);
--cip->clean;
++cip->dirty;
fs->lfs_nclean = cip->clean;
(void) VOP_BWRITE(bp);
(void) VOP_BWRITE(bp); /* Ifile */
fs->lfs_lastseg = fs->lfs_curseg;
fs->lfs_curseg = fs->lfs_nextseg;
@ -1299,7 +1305,7 @@ lfs_writeseg(fs, sp)
fs->lfs_avail -= btodb(LFS_SUMMARY_SIZE);
do_again = !(bp->b_flags & B_GATHERED);
(void)VOP_BWRITE(bp);
(void)VOP_BWRITE(bp); /* Ifile */
/*
* Mark blocks B_BUSY, to prevent then from being changed between
* the checksum computation and the actual write.
@ -1414,7 +1420,7 @@ lfs_writeseg(fs, sp)
cksum(&ssp->ss_datasum, LFS_SUMMARY_SIZE - sizeof(ssp->ss_sumsum));
free(datap, M_SEGMENT);
fs->lfs_bfree -= (fsbtodb(fs, ninos) + LFS_SUMMARY_SIZE / DEV_BSIZE);
fs->lfs_bfree -= (fsbtodb(fs, ninos) + btodb(LFS_SUMMARY_SIZE));
strategy = devvp->v_op[VOFFSET(vop_strategy)];
@ -1445,7 +1451,8 @@ lfs_writeseg(fs, sp)
cbp->b_bcount = 0;
#ifdef DIAGNOSTIC
if(datosn(fs,(*bpp)->b_blkno + ((*bpp)->b_bcount - 1)/DEV_BSIZE) != datosn(fs,cbp->b_blkno)) {
if(datosn(fs, (*bpp)->b_blkno + btodb(*bpp)->b_bcount - 1) !=
datosn(fs, cbp->b_blkno)) {
panic("lfs_writeseg: Segment overwrite");
}
#endif
@ -1553,7 +1560,9 @@ lfs_writeseg(fs, sp)
* doing a big write, we recalculate how many buffers are
* really still left on the locked queue.
*/
lfs_countlocked(&locked_queue_count,&locked_queue_bytes);
s = splbio();
lfs_countlocked(&locked_queue_count, &locked_queue_bytes);
splx(s);
wakeup(&locked_queue_count);
if(lfs_dostats) {
++lfs_stats.psegwrites;

View File

@ -1,7 +1,7 @@
/* $NetBSD: lfs_subr.c,v 1.16 2000/06/27 20:57:16 perseant Exp $ */
/* $NetBSD: lfs_subr.c,v 1.17 2000/09/09 04:49:55 perseant Exp $ */
/*-
* Copyright (c) 1999 The NetBSD Foundation, Inc.
* Copyright (c) 1999, 2000 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
@ -240,7 +240,7 @@ lfs_segunlock(fs)
ckp = sp->seg_flags & SEGM_CKP;
if (sp->bpp != sp->cbpp) {
/* Free allocated segment summary */
fs->lfs_offset -= LFS_SUMMARY_SIZE / DEV_BSIZE;
fs->lfs_offset -= btodb(LFS_SUMMARY_SIZE);
lfs_freebuf(*sp->bpp);
} else
printf ("unlock to 0 with no summary");

View File

@ -1,7 +1,7 @@
/* $NetBSD: lfs_syscalls.c,v 1.48 2000/07/13 17:35:03 thorpej Exp $ */
/* $NetBSD: lfs_syscalls.c,v 1.49 2000/09/09 04:49:55 perseant Exp $ */
/*-
* Copyright (c) 1999 The NetBSD Foundation, Inc.
* Copyright (c) 1999, 2000 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
@ -169,6 +169,7 @@ sys_lfs_markv(p, v, retval)
int j;
#endif
int numlocked=0, numrefed=0;
ino_t maxino;
if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0)
return (error);
@ -180,7 +181,10 @@ sys_lfs_markv(p, v, retval)
if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
return (error);
maxino = (dbtofsb(fs, VTOI(fs->lfs_ivnode)->i_ffs_blocks) -
fs->lfs_cleansz - fs->lfs_segtabsz) * fs->lfs_ifpb;
origcnt = cnt = SCARG(uap, blkcnt);
start = malloc(cnt * sizeof(BLOCK_INFO), M_SEGMENT, M_WAITOK);
error = copyin(SCARG(uap, blkiov), start, cnt * sizeof(BLOCK_INFO));
@ -240,6 +244,11 @@ sys_lfs_markv(p, v, retval)
}
}
#endif /* LFS_TRACK_IOS */
/* Bounds-check incoming data, avoid panic for failed VGET */
if (blkp->bi_inode <= 0 || blkp->bi_inode >= maxino) {
error = EINVAL;
goto again;
}
/*
* Get the IFILE entry (only once) and see if the file still
* exists.
@ -779,9 +788,10 @@ sys_lfs_segclean(p, v, retval)
fs->lfs_avail += fsbtodb(fs, fs->lfs_ssize);
if (sup->su_flags & SEGUSE_SUPERBLOCK)
fs->lfs_avail -= btodb(LFS_SBPAD);
fs->lfs_bfree += (sup->su_nsums * LFS_SUMMARY_SIZE / DEV_BSIZE) +
sup->su_ninos * btodb(fs->lfs_bsize);
fs->lfs_dmeta -= sup->su_nsums + fsbtodb(fs, sup->su_ninos);
fs->lfs_bfree += sup->su_nsums * btodb(LFS_SUMMARY_SIZE) +
fsbtodb(fs, sup->su_ninos);
fs->lfs_dmeta -= sup->su_nsums * btodb(LFS_SUMMARY_SIZE) +
fsbtodb(fs, sup->su_ninos);
if (fs->lfs_dmeta < 0)
fs->lfs_dmeta = 0;
sup->su_flags &= ~SEGUSE_DIRTY;
@ -791,6 +801,8 @@ sys_lfs_segclean(p, v, retval)
++cip->clean;
--cip->dirty;
fs->lfs_nclean = cip->clean;
cip->bfree = fs->lfs_bfree;
cip->avail = fs->lfs_avail - fs->lfs_ravail;
(void) VOP_BWRITE(bp);
wakeup(&fs->lfs_avail);

View File

@ -1,7 +1,7 @@
/* $NetBSD: lfs_vfsops.c,v 1.57 2000/07/05 22:25:44 perseant Exp $ */
/* $NetBSD: lfs_vfsops.c,v 1.58 2000/09/09 04:49:55 perseant Exp $ */
/*-
* Copyright (c) 1999 The NetBSD Foundation, Inc.
* Copyright (c) 1999, 2000 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
@ -423,6 +423,7 @@ lfs_mountfs(devvp, mp, p)
fs->lfs_diropwait = 0;
fs->lfs_activesb = 0;
fs->lfs_uinodes = 0;
fs->lfs_ravail = 0;
#ifdef LFS_CANNOT_ROLLFW
fs->lfs_sbactive = 0;
#endif
@ -430,8 +431,6 @@ lfs_mountfs(devvp, mp, p)
for (i=0;i<LFS_THROTTLE;i++)
fs->lfs_pending[i] = LFS_UNUSED_DADDR;
#endif
if (fs->lfs_minfreeseg == 0)
fs->lfs_minfreeseg = MIN_FREE_SEGS;
/* Set up the ifile and lock aflags */
fs->lfs_doifile = 0;
@ -751,12 +750,12 @@ lfs_vget(mp, ino, vpp)
*vpp = NULL;
return (error);
}
#ifdef DIAGNOSTIC
if(vp->v_type == VNON) {
printf("lfs_vget: ino %d is type VNON! (ifmt %o)\n", ip->i_number, (ip->i_ffs_mode&IFMT)>>12);
#ifdef DDB
Debugger();
#endif
panic("lfs_vget: ino %d is type VNON! (ifmt %o)\n",
ip->i_number, (ip->i_ffs_mode & IFMT) >> 12);
}
#endif
/*
* Finish inode initialization now that aliasing has been resolved.
*/

View File

@ -1,7 +1,7 @@
/* $NetBSD: lfs_vnops.c,v 1.43 2000/07/05 22:25:44 perseant Exp $ */
/* $NetBSD: lfs_vnops.c,v 1.44 2000/09/09 04:49:55 perseant Exp $ */
/*-
* Copyright (c) 1999 The NetBSD Foundation, Inc.
* Copyright (c) 1999, 2000 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
@ -331,6 +331,12 @@ static int lfs_set_dirop(vp)
int error;
fs = VTOI(vp)->i_lfs;
/*
* We might need one directory block plus supporting indirect blocks,
* plus an inode block and ifile page for the new vnode.
*/
if ((error = lfs_reserve(fs, vp, fsbtodb(fs, NIADDR + 3))) != 0)
return (error);
if (fs->lfs_dirops == 0)
lfs_check(vp, LFS_UNUSED_LBN, 0);
while (fs->lfs_writer || lfs_dirvcount > LFS_MAXDIROP) {
@ -345,10 +351,15 @@ static int lfs_set_dirop(vp)
if(lfs_dirvcount > LFS_MAXDIROP) {
#ifdef DEBUG_LFS
printf("lfs_set_dirop: sleeping with dirops=%d, dirvcount=%d\n",fs->lfs_dirops,lfs_dirvcount);
printf("lfs_set_dirop: sleeping with dirops=%d, "
"dirvcount=%d\n", fs->lfs_dirops,
lfs_dirvcount);
#endif
if((error = tsleep(&lfs_dirvcount, PCATCH|PUSER, "lfs_maxdirop", 0)) !=0)
if((error = tsleep(&lfs_dirvcount, PCATCH|PUSER,
"lfs_maxdirop", 0)) !=0) {
lfs_reserve(fs, vp, -fsbtodb(fs, NIADDR + 3));
return error;
}
}
}
++fs->lfs_dirops;
@ -367,6 +378,7 @@ static int lfs_set_dirop(vp)
wakeup(&(fs)->lfs_writer); \
lfs_check((vp),LFS_UNUSED_LBN,0); \
} \
lfs_reserve(fs, vp, -fsbtodb(fs, NIADDR + 3)); /* XXX */ \
}
#define MARK_VNODE(dvp) do { \

View File

@ -1,4 +1,4 @@
/* $NetBSD: ufs_readwrite.c,v 1.26 2000/05/27 00:19:55 perseant Exp $ */
/* $NetBSD: ufs_readwrite.c,v 1.27 2000/09/09 04:49:55 perseant Exp $ */
/*-
* Copyright (c) 1993
@ -271,7 +271,11 @@ WRITE(v)
error =
uiomove((char *)bp->b_data + blkoffset, (int)xfersize, uio);
#ifdef LFS_READWRITE
if (!error)
error = lfs_reserve(fs, vp, fsbtodb(fs, NIADDR + 1));
(void)VOP_BWRITE(bp);
if (!error)
lfs_reserve(fs, vp, fsbtodb(fs, -(NIADDR + 1)));
#else
if (ioflag & IO_SYNC)
(void)bwrite(bp);