Make uvm_pagemarkdirty() responsible for putting vnodes onto the syncer

work list.  Proposed on tech-kern@.
This commit is contained in:
ad 2020-03-14 20:45:23 +00:00
parent 37049e7a04
commit da3ef92bf6
9 changed files with 66 additions and 86 deletions

View File

@ -6028,19 +6028,9 @@ zfs_netbsd_getpages(void *v)
pg->flags &= ~(PG_FAKE);
}
if (memwrite) {
if (uvm_pagegetdirty(pg) == UVM_PAGE_STATUS_CLEAN) {
/* For write faults, start dirtiness tracking. */
uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_UNKNOWN);
}
mutex_enter(vp->v_interlock);
if ((vp->v_iflag & VI_ONWORKLST) == 0) {
vn_syncer_add_to_worklist(vp, filedelay);
}
if ((vp->v_iflag & (VI_WRMAP|VI_WRMAPDIRTY)) == VI_WRMAP) {
vp->v_iflag |= VI_WRMAPDIRTY;
}
mutex_exit(vp->v_interlock);
if (memwrite && uvm_pagegetdirty(pg) == UVM_PAGE_STATUS_CLEAN) {
/* For write faults, start dirtiness tracking. */
uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_UNKNOWN);
}
rw_exit(rw);
ap->a_m[ap->a_centeridx] = pg;

View File

@ -1,4 +1,4 @@
/* $NetBSD: vfs_subr.c,v 1.483 2020/03/01 21:39:07 ad Exp $ */
/* $NetBSD: vfs_subr.c,v 1.484 2020/03/14 20:45:23 ad Exp $ */
/*-
* Copyright (c) 1997, 1998, 2004, 2005, 2007, 2008, 2019, 2020
@ -69,7 +69,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: vfs_subr.c,v 1.483 2020/03/01 21:39:07 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: vfs_subr.c,v 1.484 2020/03/14 20:45:23 ad Exp $");
#ifdef _KERNEL_OPT
#include "opt_ddb.h"
@ -422,10 +422,8 @@ brelvp(struct buf *bp)
bufremvn(bp);
if ((vp->v_iflag & (VI_ONWORKLST | VI_PAGES)) == VI_ONWORKLST &&
LIST_FIRST(&vp->v_dirtyblkhd) == NULL) {
KASSERT((vp->v_iflag & VI_WRMAPDIRTY) == 0);
LIST_FIRST(&vp->v_dirtyblkhd) == NULL)
vn_syncer_remove_from_worklist(vp);
}
bp->b_objlock = &buffer_lock;
bp->b_vp = NULL;
@ -463,10 +461,8 @@ reassignbuf(struct buf *bp, struct vnode *vp)
listheadp = &vp->v_cleanblkhd;
if ((vp->v_iflag & (VI_ONWORKLST | VI_PAGES)) ==
VI_ONWORKLST &&
LIST_FIRST(&vp->v_dirtyblkhd) == NULL) {
KASSERT((vp->v_iflag & VI_WRMAPDIRTY) == 0);
LIST_FIRST(&vp->v_dirtyblkhd) == NULL)
vn_syncer_remove_from_worklist(vp);
}
} else {
listheadp = &vp->v_dirtyblkhd;
if ((vp->v_iflag & VI_ONWORKLST) == 0) {

View File

@ -1,4 +1,4 @@
/* $NetBSD: genfs_io.c,v 1.92 2020/03/14 20:23:51 ad Exp $ */
/* $NetBSD: genfs_io.c,v 1.93 2020/03/14 20:45:23 ad Exp $ */
/*
* Copyright (c) 1982, 1986, 1989, 1993
@ -31,7 +31,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: genfs_io.c,v 1.92 2020/03/14 20:23:51 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: genfs_io.c,v 1.93 2020/03/14 20:45:23 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -61,7 +61,6 @@ static int genfs_getpages_read(struct vnode *, struct vm_page **, int, off_t,
static int genfs_do_io(struct vnode *, off_t, vaddr_t, size_t, int, enum uio_rw,
void (*)(struct buf *));
static void genfs_rel_pages(struct vm_page **, unsigned int);
static void genfs_markdirty(struct vnode *);
int genfs_maxdio = MAXPHYS;
@ -83,22 +82,6 @@ genfs_rel_pages(struct vm_page **pgs, unsigned int npages)
uvm_page_unbusy(pgs, npages);
}
static void
genfs_markdirty(struct vnode *vp)
{
KASSERT(rw_write_held(vp->v_uobj.vmobjlock));
mutex_enter(vp->v_interlock);
if ((vp->v_iflag & VI_ONWORKLST) == 0) {
vn_syncer_add_to_worklist(vp, filedelay);
}
if ((vp->v_iflag & (VI_WRMAP|VI_WRMAPDIRTY)) == VI_WRMAP) {
vp->v_iflag |= VI_WRMAPDIRTY;
}
mutex_exit(vp->v_interlock);
}
/*
* generic VM getpages routine.
* Return PG_BUSY pages for the given range,
@ -278,7 +261,6 @@ startover:
UVM_PAGE_STATUS_UNKNOWN);
}
}
genfs_markdirty(vp);
}
goto out_err;
}
@ -547,9 +529,6 @@ out:
uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_UNKNOWN);
}
}
if (memwrite) {
genfs_markdirty(vp);
}
rw_exit(uobj->vmobjlock);
if (ap->a_m != NULL) {
memcpy(ap->a_m, &pgs[ridx],
@ -912,8 +891,6 @@ genfs_do_putpages(struct vnode *vp, off_t startoff, off_t endoff,
retry:
modified = false;
flags = origflags;
KASSERT((vp->v_iflag & VI_ONWORKLST) != 0 ||
(vp->v_iflag & VI_WRMAPDIRTY) == 0);
/*
* shortcut if we have no pages to process.
@ -921,10 +898,14 @@ retry:
nodirty = radix_tree_empty_tagged_tree_p(&uobj->uo_pages,
UVM_PAGE_DIRTY_TAG);
#ifdef DIAGNOSTIC
mutex_enter(vp->v_interlock);
KASSERT((vp->v_iflag & VI_ONWORKLST) != 0 || nodirty);
mutex_exit(vp->v_interlock);
#endif
if (uobj->uo_npages == 0 || (dirtyonly && nodirty)) {
mutex_enter(vp->v_interlock);
if (vp->v_iflag & VI_ONWORKLST) {
vp->v_iflag &= ~VI_WRMAPDIRTY;
if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL)
vn_syncer_remove_from_worklist(vp);
}
@ -1150,7 +1131,6 @@ retry:
*/
if (needs_clean) {
KDASSERT((vp->v_iflag & VI_ONWORKLST));
wasclean = false;
memset(pgs, 0, sizeof(pgs));
pg->flags |= PG_BUSY;
@ -1320,7 +1300,7 @@ retry:
*/
mutex_enter(vp->v_interlock);
if (modified && (vp->v_iflag & VI_WRMAPDIRTY) != 0 &&
if (modified && (vp->v_iflag & VI_WRMAP) != 0 &&
(vp->v_type != VBLK ||
(vp->v_mount->mnt_flag & MNT_NODEVMTIME) == 0)) {
GOP_MARKUPDATE(vp, GOP_UPDATE_MODIFIED);
@ -1334,7 +1314,6 @@ retry:
if ((vp->v_iflag & VI_ONWORKLST) != 0 &&
radix_tree_empty_tagged_tree_p(&uobj->uo_pages,
UVM_PAGE_DIRTY_TAG)) {
vp->v_iflag &= ~VI_WRMAPDIRTY;
if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL)
vn_syncer_remove_from_worklist(vp);
}
@ -1635,9 +1614,6 @@ genfs_compat_getpages(void *v)
UFP_NOWAIT|UFP_NOALLOC| (memwrite ? UFP_NORDONLY : 0));
error = ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0;
if (error == 0 && memwrite) {
genfs_markdirty(vp);
}
return error;
}
if (origoffset + (ap->a_centeridx << PAGE_SHIFT) >= vp->v_size) {
@ -1691,9 +1667,6 @@ genfs_compat_getpages(void *v)
if (error) {
uvm_page_unbusy(pgs, npages);
}
if (error == 0 && memwrite) {
genfs_markdirty(vp);
}
rw_exit(uobj->vmobjlock);
return error;
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: vnode.h,v 1.292 2020/03/05 15:18:55 riastradh Exp $ */
/* $NetBSD: vnode.h,v 1.293 2020/03/14 20:45:23 ad Exp $ */
/*-
* Copyright (c) 2008 The NetBSD Foundation, Inc.
@ -210,9 +210,8 @@ typedef struct vnode vnode_t;
#define VI_TEXT 0x00000100 /* vnode is a pure text prototype */
#define VI_EXECMAP 0x00000200 /* might have PROT_EXEC mappings */
#define VI_WRMAP 0x00000400 /* might have PROT_WRITE u. mappings */
#define VI_WRMAPDIRTY 0x00000800 /* might have dirty pages */
#define VI_PAGES 0x00000800 /* UVM object has >0 pages */
#define VI_ONWORKLST 0x00004000 /* On syncer work-list */
#define VI_PAGES 0x00008000 /* UVM object has >0 pages */
/*
* The third set are locked by the underlying file system.
@ -221,7 +220,7 @@ typedef struct vnode vnode_t;
#define VNODE_FLAGBITS \
"\20\1ROOT\2SYSTEM\3ISTTY\4MAPPED\5MPSAFE\6LOCKSWORK\11TEXT\12EXECMAP" \
"\13WRMAP\14WRMAPDIRTY\17ONWORKLST\31DIROP"
"\13WRMAP\14PAGES\17ONWORKLST\31DIROP"
#define VSIZENOTSET ((voff_t)-1)

View File

@ -1,4 +1,4 @@
/* $NetBSD: lfs_pages.c,v 1.23 2020/03/14 20:23:51 ad Exp $ */
/* $NetBSD: lfs_pages.c,v 1.24 2020/03/14 20:45:23 ad Exp $ */
/*-
* Copyright (c) 1999, 2000, 2001, 2002, 2003, 2019 The NetBSD Foundation, Inc.
@ -60,7 +60,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: lfs_pages.c,v 1.23 2020/03/14 20:23:51 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: lfs_pages.c,v 1.24 2020/03/14 20:45:23 ad Exp $");
#ifdef _KERNEL_OPT
#include "opt_compat_netbsd.h"
@ -455,11 +455,12 @@ retry:
* If there are no pages, don't do anything.
*/
if (vp->v_uobj.uo_npages == 0) {
mutex_enter(vp->v_interlock);
if ((vp->v_iflag & VI_ONWORKLST) &&
LIST_FIRST(&vp->v_dirtyblkhd) == NULL) {
vp->v_iflag &= ~VI_WRMAPDIRTY;
vn_syncer_remove_from_worklist(vp);
}
mutex_exit(vp->v_interlock);
if (trans_mp)
fstrans_done(trans_mp);
rw_exit(vp->v_uobj.vmobjlock);

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_object.h,v 1.37 2020/02/23 15:46:43 ad Exp $ */
/* $NetBSD: uvm_object.h,v 1.38 2020/03/14 20:45:23 ad Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -114,7 +114,7 @@ extern const struct uvm_pagerops aobj_pager;
*/
#define UVM_OBJ_NEEDS_WRITEFAULT(uobj) \
(UVM_OBJ_IS_VNODE(uobj) && uvn_needs_writefault_p(uobj))
(UVM_OBJ_IS_VNODE(uobj) && uvn_clean_p(uobj))
#define UVM_OBJ_IS_AOBJ(uobj) \
((uobj)->pgops == &aobj_pager)

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_page_status.c,v 1.3 2020/02/23 15:46:43 ad Exp $ */
/* $NetBSD: uvm_page_status.c,v 1.4 2020/03/14 20:45:23 ad Exp $ */
/*-
* Copyright (c)2011 YAMAMOTO Takashi,
@ -27,7 +27,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_page_status.c,v 1.3 2020/02/23 15:46:43 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_page_status.c,v 1.4 2020/03/14 20:45:23 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -109,6 +109,15 @@ uvm_pagemarkdirty(struct vm_page *pg, unsigned int newstatus)
radix_tree_clear_tag(&uobj->uo_pages, idx,
UVM_PAGE_DIRTY_TAG);
} else {
/*
* on first dirty page, mark the object dirty.
* for vnodes this inserts to the syncer worklist.
*/
if (radix_tree_empty_tagged_tree_p(&uobj->uo_pages,
UVM_PAGE_DIRTY_TAG) &&
uobj->pgops->pgo_markdirty != NULL) {
(*uobj->pgops->pgo_markdirty)(uobj);
}
radix_tree_set_tag(&uobj->uo_pages, idx,
UVM_PAGE_DIRTY_TAG);
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_pager.h,v 1.45 2018/12/09 20:33:04 jdolecek Exp $ */
/* $NetBSD: uvm_pager.h,v 1.46 2020/03/14 20:45:23 ad Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -135,6 +135,9 @@ struct uvm_pagerops {
/* put/write pages */
int (*pgo_put)(struct uvm_object *, voff_t, voff_t, int);
/* mark object dirty */
void (*pgo_markdirty)(struct uvm_object *);
};
/* pager flags [mostly for flush] */

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_vnode.c,v 1.109 2020/03/14 20:23:51 ad Exp $ */
/* $NetBSD: uvm_vnode.c,v 1.110 2020/03/14 20:45:23 ad Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -45,12 +45,13 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.109 2020/03/14 20:23:51 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.110 2020/03/14 20:45:23 ad Exp $");
#ifdef _KERNEL_OPT
#include "opt_uvmhist.h"
#endif
#include <sys/atomic.h>
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
@ -80,6 +81,7 @@ static void uvn_alloc_ractx(struct uvm_object *);
static void uvn_detach(struct uvm_object *);
static int uvn_get(struct uvm_object *, voff_t, struct vm_page **, int *,
int, vm_prot_t, int, int);
static void uvn_markdirty(struct uvm_object *);
static int uvn_put(struct uvm_object *, voff_t, voff_t, int);
static void uvn_reference(struct uvm_object *);
@ -96,6 +98,7 @@ const struct uvm_pagerops uvm_vnodeops = {
.pgo_detach = uvn_detach,
.pgo_get = uvn_get,
.pgo_put = uvn_put,
.pgo_markdirty = uvn_markdirty,
};
/*
@ -153,7 +156,6 @@ uvn_put(struct uvm_object *uobj, voff_t offlo, voff_t offhi, int flags)
return error;
}
/*
* uvn_get: get pages (synchronously) from backing store
*
@ -194,6 +196,25 @@ uvn_get(struct uvm_object *uobj, voff_t offset,
return error;
}
/*
* uvn_markdirty: called when the object gains first dirty page
*
* => uobj must be write locked.
*/
static void
uvn_markdirty(struct uvm_object *uobj)
{
struct vnode *vp = (struct vnode *)uobj;
KASSERT(rw_write_held(uobj->vmobjlock));
mutex_enter(vp->v_interlock);
if ((vp->v_iflag & VI_ONWORKLST) == 0) {
vn_syncer_add_to_worklist(vp, filedelay);
}
mutex_exit(vp->v_interlock);
}
/*
* uvn_findpages:
@ -462,12 +483,14 @@ bool
uvn_text_p(struct uvm_object *uobj)
{
struct vnode *vp = (struct vnode *)uobj;
int iflag;
/*
* v_interlock is not held here, but VI_EXECMAP is only ever changed
* with the vmobjlock held too.
*/
return (vp->v_iflag & VI_EXECMAP) != 0;
iflag = atomic_load_relaxed(&vp->v_iflag);
return (iflag & VI_EXECMAP) != 0;
}
bool
@ -478,20 +501,6 @@ uvn_clean_p(struct uvm_object *uobj)
UVM_PAGE_DIRTY_TAG);
}
bool
uvn_needs_writefault_p(struct uvm_object *uobj)
{
struct vnode *vp = (struct vnode *)uobj;
/*
* v_interlock is not held here, but VI_WRMAP and VI_WRMAPDIRTY are
* only ever changed with the vmobjlock held too, or when it's known
* the uvm_object contains no pages (VI_PAGES clear).
*/
return uvn_clean_p(uobj) ||
(vp->v_iflag & (VI_WRMAP|VI_WRMAPDIRTY)) == VI_WRMAP;
}
static void
uvn_alloc_ractx(struct uvm_object *uobj)
{