ensure that vnodes with dirty pages are always on syncer's queue.

- genfs_putpages: wait for i/o completion of PG_RELEASED/PG_PAGEOUT pages by
  setting "wasclean" false when encountering them.
  suggested by Stephan Uphoff in PR/24596 (1).

- genfs_putpages: write protect pages when cleaning out, if
  we're going to take the vnode off the syncer's queue.
  uvm_fault: don't write-map pages unless its vnode is already on
  the syncer's queue.

  fix PR/24596 (3) but in the different way from the suggested fix.
  (to keep our current behaviour, ie. not to require explicit msync.
  discussed on tech-kern@.)

- genfs_putpages: don't mistakenly take a vnode off the queue
  by introducing a generation number in genfs_node.
  genfs_getpages: increment the generation number.
  suggested by Stephan Uphoff in PR/24596 (2).

- add some assertions.
This commit is contained in:
yamt 2005-07-17 12:27:47 +00:00
parent d36a483f83
commit 8af42d8d3c
4 changed files with 50 additions and 13 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: genfs_node.h,v 1.6 2005/06/28 09:30:37 yamt Exp $ */
/* $NetBSD: genfs_node.h,v 1.7 2005/07/17 12:27:47 yamt Exp $ */
/*
* Copyright (c) 2001 Chuck Silvers.
@ -56,6 +56,7 @@ struct genfs_ops {
struct genfs_node {
const struct genfs_ops *g_op; /* ops vector */
struct lock g_glock; /* getpages lock */
int g_dirtygen;
};
#define VTOG(vp) ((struct genfs_node *)(vp)->v_data)

View File

@ -1,4 +1,4 @@
/* $NetBSD: genfs_vnops.c,v 1.100 2005/07/17 09:13:35 yamt Exp $ */
/* $NetBSD: genfs_vnops.c,v 1.101 2005/07/17 12:27:47 yamt Exp $ */
/*
* Copyright (c) 1982, 1986, 1989, 1993
@ -31,7 +31,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: genfs_vnops.c,v 1.100 2005/07/17 09:13:35 yamt Exp $");
__KERNEL_RCSID(0, "$NetBSD: genfs_vnops.c,v 1.101 2005/07/17 12:27:47 yamt Exp $");
#if defined(_KERNEL_OPT)
#include "opt_nfsserver.h"
@ -537,8 +537,11 @@ genfs_getpages(void *v)
/* uobj is locked */
if (write && (vp->v_flag & VONWORKLST) == 0) {
vn_syncer_add_to_worklist(vp, filedelay);
if (write) {
gp->g_dirtygen++;
if ((vp->v_flag & VONWORKLST) == 0) {
vn_syncer_add_to_worklist(vp, filedelay);
}
}
/*
@ -1089,6 +1092,8 @@ genfs_putpages(void *v)
boolean_t async = (flags & PGO_SYNCIO) == 0;
boolean_t pagedaemon = curproc == uvm.pagedaemon_proc;
struct lwp *l = curlwp ? curlwp : &lwp0;
struct genfs_node *gp = VTOG(vp);
int dirtygen;
UVMHIST_FUNC("genfs_putpages"); UVMHIST_CALLED(ubchist);
@ -1134,6 +1139,7 @@ genfs_putpages(void *v)
* current last page.
*/
dirtygen = gp->g_dirtygen;
freeflag = pagedaemon ? PG_PAGEOUT : PG_RELEASED;
curmp.uobject = uobj;
curmp.offset = (voff_t)-1;
@ -1165,12 +1171,17 @@ genfs_putpages(void *v)
}
if (pg->offset < startoff || pg->offset >= endoff ||
pg->flags & (PG_RELEASED|PG_PAGEOUT)) {
if (pg->flags & (PG_RELEASED|PG_PAGEOUT)) {
wasclean = FALSE;
}
pg = TAILQ_NEXT(pg, listq);
continue;
}
off = pg->offset;
} else if (pg == NULL ||
pg->flags & (PG_RELEASED|PG_PAGEOUT)) {
} else if (pg == NULL || pg->flags & (PG_RELEASED|PG_PAGEOUT)) {
if (pg != NULL) {
wasclean = FALSE;
}
off += PAGE_SIZE;
if (off < endoff) {
pg = uvm_pagelookup(uobj, off);
@ -1225,7 +1236,20 @@ genfs_putpages(void *v)
if (flags & PGO_FREE) {
pmap_page_protect(pg, VM_PROT_NONE);
} else if (flags & PGO_CLEANIT) {
/*
* if we still have some hope to pull this vnode off
* from the syncer queue, write-protect the page.
*/
if (wasclean && gp->g_dirtygen == dirtygen &&
startoff == 0 && endoff == trunc_page(LLONG_MAX)) {
pmap_page_protect(pg,
VM_PROT_READ|VM_PROT_EXECUTE);
}
}
if (flags & PGO_CLEANIT) {
needs_clean = pmap_clear_modify(pg) ||
(pg->flags & PG_CLEAN) == 0;
@ -1242,6 +1266,7 @@ genfs_putpages(void *v)
*/
if (needs_clean) {
KDASSERT((vp->v_flag & VONWORKLST));
wasclean = FALSE;
memset(pgs, 0, sizeof(pgs));
pg->flags |= PG_BUSY;
@ -1389,7 +1414,7 @@ genfs_putpages(void *v)
*/
s = splbio();
if ((flags & PGO_CLEANIT) && wasclean &&
if ((flags & PGO_CLEANIT) && wasclean && gp->g_dirtygen == dirtygen &&
startoff == 0 && endoff == trunc_page(LLONG_MAX) &&
LIST_FIRST(&vp->v_dirtyblkhd) == NULL &&
(vp->v_flag & VONWORKLST)) {

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_fault.c,v 1.95 2005/06/27 02:19:48 thorpej Exp $ */
/* $NetBSD: uvm_fault.c,v 1.96 2005/07/17 12:27:47 yamt Exp $ */
/*
*
@ -39,7 +39,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_fault.c,v 1.95 2005/06/27 02:19:48 thorpej Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_fault.c,v 1.96 2005/07/17 12:27:47 yamt Exp $");
#include "opt_uvmhist.h"
@ -50,6 +50,7 @@ __KERNEL_RCSID(0, "$NetBSD: uvm_fault.c,v 1.95 2005/06/27 02:19:48 thorpej Exp $
#include <sys/malloc.h>
#include <sys/mman.h>
#include <sys/user.h>
#include <sys/vnode.h>
#include <uvm/uvm.h>
@ -961,8 +962,11 @@ ReFault:
*/
KASSERT((curpg->flags & PG_PAGEOUT) == 0);
KASSERT((curpg->flags & PG_RELEASED) == 0);
KASSERT(!UVM_OBJ_IS_CLEAN(uobj) ||
(curpg->flags & PG_CLEAN) != 0);
readonly = (curpg->flags & PG_RDONLY)
|| (curpg->loan_count > 0);
|| (curpg->loan_count > 0)
|| UVM_OBJ_IS_CLEAN(uobj);
(void) pmap_enter(ufi.orig_map->pmap, currva,
VM_PAGE_TO_PHYS(curpg),
@ -1438,6 +1442,8 @@ Case2:
* - at this point uobjpage could be PG_WANTED (handle later)
*/
KASSERT(uobj == NULL || !UVM_OBJ_IS_CLEAN(uobj) ||
(uobjpage->flags & PG_CLEAN) != 0);
if (promote == FALSE) {
/*
@ -1453,7 +1459,8 @@ Case2:
anon = NULL;
uvmexp.flt_obj++;
if (UVM_ET_ISCOPYONWRITE(ufi.entry))
if (UVM_ET_ISCOPYONWRITE(ufi.entry) ||
UVM_OBJ_IS_CLEAN(uobj))
enter_prot &= ~VM_PROT_WRITE;
pg = uobjpage; /* map in the actual object */

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_object.h,v 1.18 2005/06/06 12:09:19 yamt Exp $ */
/* $NetBSD: uvm_object.h,v 1.19 2005/07/17 12:27:47 yamt Exp $ */
/*
*
@ -87,6 +87,10 @@ extern struct uvm_pagerops aobj_pager;
((uobj)->pgops == &uvm_vnodeops && \
((struct vnode *)uobj)->v_flag & VEXECMAP)
#define UVM_OBJ_IS_CLEAN(uobj) \
(UVM_OBJ_IS_VNODE(uobj) && \
(((struct vnode *)uobj)->v_flag & VONWORKLST) == 0)
#define UVM_OBJ_IS_AOBJ(uobj) \
((uobj)->pgops == &aobj_pager)