centralize calls from UVM to radixtree into a few functions.

in those functions, assert that the object lock is held in
the correct mode.
This commit is contained in:
chs 2020-08-14 09:06:14 +00:00
parent 909a0d73c8
commit 19303cecfc
8 changed files with 155 additions and 68 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: genfs_io.c,v 1.99 2020/08/10 11:09:15 rin Exp $ */
/* $NetBSD: genfs_io.c,v 1.100 2020/08/14 09:06:14 chs Exp $ */
/*
* Copyright (c) 1982, 1986, 1989, 1993
@ -31,7 +31,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: genfs_io.c,v 1.99 2020/08/10 11:09:15 rin Exp $");
__KERNEL_RCSID(0, "$NetBSD: genfs_io.c,v 1.100 2020/08/14 09:06:14 chs Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -913,8 +913,7 @@ retry:
* shortcut if we have no pages to process.
*/
nodirty = radix_tree_empty_tagged_tree_p(&uobj->uo_pages,
UVM_PAGE_DIRTY_TAG);
nodirty = uvm_obj_clean_p(uobj);
#ifdef DIAGNOSTIC
mutex_enter(vp->v_interlock);
KASSERT((vp->v_iflag & VI_ONWORKLST) != 0 || nodirty);
@ -922,8 +921,7 @@ retry:
#endif
if (uobj->uo_npages == 0 || (dirtyonly && nodirty)) {
mutex_enter(vp->v_interlock);
if (vp->v_iflag & VI_ONWORKLST) {
if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL)
if (vp->v_iflag & VI_ONWORKLST && LIST_EMPTY(&vp->v_dirtyblkhd)) {
vn_syncer_remove_from_worklist(vp);
}
mutex_exit(vp->v_interlock);
@ -978,8 +976,7 @@ retry:
}
error = 0;
wasclean = radix_tree_empty_tagged_tree_p(&uobj->uo_pages,
UVM_PAGE_WRITEBACK_TAG);
wasclean = uvm_obj_nowriteback_p(uobj);
nextoff = startoff;
if (endoff == 0 || flags & PGO_ALLPAGES) {
endoff = trunc_page(LLONG_MAX);
@ -1030,8 +1027,7 @@ retry:
KASSERT(pg->offset >= nextoff);
KASSERT(!dirtyonly ||
uvm_pagegetdirty(pg) != UVM_PAGE_STATUS_CLEAN ||
radix_tree_get_tag(&uobj->uo_pages,
pg->offset >> PAGE_SHIFT, UVM_PAGE_WRITEBACK_TAG));
uvm_obj_page_writeback_p(pg));
if (pg->offset >= endoff) {
break;
@ -1245,9 +1241,7 @@ retry:
* mark pages as WRITEBACK so that concurrent
* fsync can find and wait for our activities.
*/
radix_tree_set_tag(&uobj->uo_pages,
pgs[i]->offset >> PAGE_SHIFT,
UVM_PAGE_WRITEBACK_TAG);
uvm_obj_page_set_writeback(pgs[i]);
}
if (tpg->offset < startoff || tpg->offset >= endoff)
continue;
@ -1332,10 +1326,8 @@ retry:
* syncer list.
*/
if ((vp->v_iflag & VI_ONWORKLST) != 0 &&
radix_tree_empty_tagged_tree_p(&uobj->uo_pages,
UVM_PAGE_DIRTY_TAG)) {
if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL)
if ((vp->v_iflag & VI_ONWORKLST) != 0 && uvm_obj_clean_p(uobj) &&
LIST_EMPTY(&vp->v_dirtyblkhd)) {
vn_syncer_remove_from_worklist(vp);
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_extern.h,v 1.230 2020/06/14 22:25:15 ad Exp $ */
/* $NetBSD: uvm_extern.h,v 1.231 2020/08/14 09:06:15 chs Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -776,6 +776,14 @@ void uvm_obj_destroy(struct uvm_object *, bool);
int uvm_obj_wirepages(struct uvm_object *, off_t, off_t,
struct pglist *);
void uvm_obj_unwirepages(struct uvm_object *, off_t, off_t);
bool uvm_obj_clean_p(struct uvm_object *);
bool uvm_obj_nowriteback_p(struct uvm_object *);
bool uvm_obj_page_dirty_p(struct vm_page *);
void uvm_obj_page_set_dirty(struct vm_page *);
void uvm_obj_page_clear_dirty(struct vm_page *);
bool uvm_obj_page_writeback_p(struct vm_page *);
void uvm_obj_page_set_writeback(struct vm_page *);
void uvm_obj_page_clear_writeback(struct vm_page *);
/* uvm_page.c */
int uvm_availmem(bool);
@ -826,7 +834,6 @@ int uvn_findpages(struct uvm_object *, voff_t,
unsigned int *, struct vm_page **,
struct uvm_page_array *, unsigned int);
bool uvn_text_p(struct uvm_object *);
bool uvn_clean_p(struct uvm_object *);
bool uvn_needs_writefault_p(struct uvm_object *);
/* kern_malloc.c */

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_object.c,v 1.23 2020/05/25 21:15:10 ad Exp $ */
/* $NetBSD: uvm_object.c,v 1.24 2020/08/14 09:06:15 chs Exp $ */
/*
* Copyright (c) 2006, 2010, 2019 The NetBSD Foundation, Inc.
@ -37,7 +37,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_object.c,v 1.23 2020/05/25 21:15:10 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_object.c,v 1.24 2020/08/14 09:06:15 chs Exp $");
#ifdef _KERNEL_OPT
#include "opt_ddb.h"
@ -233,6 +233,103 @@ uvm_obj_unwirepages(struct uvm_object *uobj, off_t start, off_t end)
rw_exit(uobj->vmobjlock);
}
static inline bool
uvm_obj_notag_p(struct uvm_object *uobj, int tag)
{
KASSERT(rw_lock_held(uobj->vmobjlock));
return radix_tree_empty_tagged_tree_p(&uobj->uo_pages, tag);
}
bool
uvm_obj_clean_p(struct uvm_object *uobj)
{
return uvm_obj_notag_p(uobj, UVM_PAGE_DIRTY_TAG);
}
bool
uvm_obj_nowriteback_p(struct uvm_object *uobj)
{
return uvm_obj_notag_p(uobj, UVM_PAGE_WRITEBACK_TAG);
}
static inline bool
uvm_obj_page_tag_p(struct vm_page *pg, int tag)
{
struct uvm_object *uobj = pg->uobject;
int pgidx = pg->offset >> PAGE_SHIFT;
KASSERT(uobj != NULL);
KASSERT(rw_lock_held(uobj->vmobjlock));
return radix_tree_get_tag(&uobj->uo_pages, pgidx, tag) != 0;
}
static inline void
uvm_obj_page_set_tag(struct vm_page *pg, int tag)
{
struct uvm_object *uobj = pg->uobject;
int pgidx = pg->offset >> PAGE_SHIFT;
KASSERT(uobj != NULL);
KASSERT(rw_write_held(uobj->vmobjlock));
radix_tree_set_tag(&uobj->uo_pages, pgidx, tag);
}
static inline void
uvm_obj_page_clear_tag(struct vm_page *pg, int tag)
{
struct uvm_object *uobj = pg->uobject;
int pgidx = pg->offset >> PAGE_SHIFT;
KASSERT(uobj != NULL);
KASSERT(rw_write_held(uobj->vmobjlock));
radix_tree_clear_tag(&uobj->uo_pages, pgidx, tag);
}
bool
uvm_obj_page_dirty_p(struct vm_page *pg)
{
return uvm_obj_page_tag_p(pg, UVM_PAGE_DIRTY_TAG);
}
void
uvm_obj_page_set_dirty(struct vm_page *pg)
{
uvm_obj_page_set_tag(pg, UVM_PAGE_DIRTY_TAG);
}
void
uvm_obj_page_clear_dirty(struct vm_page *pg)
{
uvm_obj_page_clear_tag(pg, UVM_PAGE_DIRTY_TAG);
}
bool
uvm_obj_page_writeback_p(struct vm_page *pg)
{
return uvm_obj_page_tag_p(pg, UVM_PAGE_WRITEBACK_TAG);
}
void
uvm_obj_page_set_writeback(struct vm_page *pg)
{
uvm_obj_page_set_tag(pg, UVM_PAGE_WRITEBACK_TAG);
}
void
uvm_obj_page_clear_writeback(struct vm_page *pg)
{
uvm_obj_page_clear_tag(pg, UVM_PAGE_WRITEBACK_TAG);
}
#if defined(DDB) || defined(DEBUGPRINT)
/*

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_object.h,v 1.38 2020/03/14 20:45:23 ad Exp $ */
/* $NetBSD: uvm_object.h,v 1.39 2020/08/14 09:06:15 chs Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -104,7 +104,7 @@ extern const struct uvm_pagerops aobj_pager;
(UVM_OBJ_IS_VNODE(uobj) && uvn_text_p(uobj))
#define UVM_OBJ_IS_CLEAN(uobj) \
(UVM_OBJ_IS_VNODE(uobj) && uvn_clean_p(uobj))
(UVM_OBJ_IS_VNODE(uobj) && uvm_obj_clean_p(uobj))
/*
* UVM_OBJ_NEEDS_WRITEFAULT: true if the uobj needs to detect modification.
@ -114,7 +114,7 @@ extern const struct uvm_pagerops aobj_pager;
*/
#define UVM_OBJ_NEEDS_WRITEFAULT(uobj) \
(UVM_OBJ_IS_VNODE(uobj) && uvn_clean_p(uobj))
(UVM_OBJ_IS_VNODE(uobj) && uvm_obj_clean_p(uobj))
#define UVM_OBJ_IS_AOBJ(uobj) \
((uobj)->pgops == &aobj_pager)

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_page.c,v 1.244 2020/07/09 05:57:15 skrll Exp $ */
/* $NetBSD: uvm_page.c,v 1.245 2020/08/14 09:06:15 chs Exp $ */
/*-
* Copyright (c) 2019, 2020 The NetBSD Foundation, Inc.
@ -95,7 +95,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.244 2020/07/09 05:57:15 skrll Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.245 2020/08/14 09:06:15 chs Exp $");
#include "opt_ddb.h"
#include "opt_uvm.h"
@ -240,15 +240,17 @@ uvm_pageinsert_tree(struct uvm_object *uobj, struct vm_page *pg)
const uint64_t idx = pg->offset >> PAGE_SHIFT;
int error;
KASSERT(rw_write_held(uobj->vmobjlock));
error = radix_tree_insert_node(&uobj->uo_pages, idx, pg);
if (error != 0) {
return error;
}
if ((pg->flags & PG_CLEAN) == 0) {
radix_tree_set_tag(&uobj->uo_pages, idx, UVM_PAGE_DIRTY_TAG);
uvm_obj_page_set_dirty(pg);
}
KASSERT(((pg->flags & PG_CLEAN) == 0) ==
radix_tree_get_tag(&uobj->uo_pages, idx, UVM_PAGE_DIRTY_TAG));
uvm_obj_page_dirty_p(pg));
return 0;
}
@ -297,6 +299,8 @@ uvm_pageremove_tree(struct uvm_object *uobj, struct vm_page *pg)
{
struct vm_page *opg __unused;
KASSERT(rw_write_held(uobj->vmobjlock));
opg = radix_tree_remove_node(&uobj->uo_pages, pg->offset >> PAGE_SHIFT);
KASSERT(pg == opg);
}
@ -1363,11 +1367,9 @@ uvm_pagereplace(struct vm_page *oldpg, struct vm_page *newpg)
KASSERT(pg == oldpg);
if (((oldpg->flags ^ newpg->flags) & PG_CLEAN) != 0) {
if ((newpg->flags & PG_CLEAN) != 0) {
radix_tree_clear_tag(&uobj->uo_pages, idx,
UVM_PAGE_DIRTY_TAG);
uvm_obj_page_clear_dirty(newpg);
} else {
radix_tree_set_tag(&uobj->uo_pages, idx,
UVM_PAGE_DIRTY_TAG);
uvm_obj_page_set_dirty(newpg);
}
}
/*
@ -1788,8 +1790,13 @@ struct vm_page *
uvm_pagelookup(struct uvm_object *obj, voff_t off)
{
struct vm_page *pg;
bool ddb = false;
#ifdef DDB
extern int db_active;
ddb = db_active != 0;
#endif
/* No - used from DDB. KASSERT(rw_lock_held(obj->vmobjlock)); */
KASSERT(ddb || rw_lock_held(obj->vmobjlock));
pg = radix_tree_lookup_node(&obj->uo_pages, off >> PAGE_SHIFT);

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_page_status.c,v 1.5 2020/05/15 22:25:18 ad Exp $ */
/* $NetBSD: uvm_page_status.c,v 1.6 2020/08/14 09:06:15 chs Exp $ */
/*-
* Copyright (c)2011 YAMAMOTO Takashi,
@ -27,7 +27,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_page_status.c,v 1.5 2020/05/15 22:25:18 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_page_status.c,v 1.6 2020/08/14 09:06:15 chs Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -60,12 +60,11 @@ unsigned int
uvm_pagegetdirty(struct vm_page *pg)
{
struct uvm_object * const uobj __diagused = pg->uobject;
const uint64_t idx __diagused = pg->offset >> PAGE_SHIFT;
KASSERT((~pg->flags & (PG_CLEAN|PG_DIRTY)) != 0);
KASSERT(uvm_page_owner_locked_p(pg, false));
KASSERT(uobj == NULL || ((pg->flags & PG_CLEAN) == 0) ==
!!radix_tree_get_tag(&uobj->uo_pages, idx, UVM_PAGE_DIRTY_TAG));
uvm_obj_page_dirty_p(pg));
return pg->flags & (PG_CLEAN|PG_DIRTY);
}
@ -85,7 +84,6 @@ void
uvm_pagemarkdirty(struct vm_page *pg, unsigned int newstatus)
{
struct uvm_object * const uobj = pg->uobject;
const uint64_t idx = pg->offset >> PAGE_SHIFT;
const unsigned int oldstatus = uvm_pagegetdirty(pg);
enum cpu_count base;
@ -93,7 +91,7 @@ uvm_pagemarkdirty(struct vm_page *pg, unsigned int newstatus)
KASSERT((newstatus & ~(PG_CLEAN|PG_DIRTY)) == 0);
KASSERT(uvm_page_owner_locked_p(pg, true));
KASSERT(uobj == NULL || ((pg->flags & PG_CLEAN) == 0) ==
!!radix_tree_get_tag(&uobj->uo_pages, idx, UVM_PAGE_DIRTY_TAG));
uvm_obj_page_dirty_p(pg));
if (oldstatus == newstatus) {
return;
@ -106,20 +104,17 @@ uvm_pagemarkdirty(struct vm_page *pg, unsigned int newstatus)
if (uobj != NULL) {
if (newstatus == UVM_PAGE_STATUS_CLEAN) {
radix_tree_clear_tag(&uobj->uo_pages, idx,
UVM_PAGE_DIRTY_TAG);
uvm_obj_page_clear_dirty(pg);
} else if (oldstatus == UVM_PAGE_STATUS_CLEAN) {
/*
* on first dirty page, mark the object dirty.
* for vnodes this inserts to the syncer worklist.
*/
if (radix_tree_empty_tagged_tree_p(&uobj->uo_pages,
UVM_PAGE_DIRTY_TAG) &&
if (uvm_obj_clean_p(uobj) &&
uobj->pgops->pgo_markdirty != NULL) {
(*uobj->pgops->pgo_markdirty)(uobj);
}
radix_tree_set_tag(&uobj->uo_pages, idx,
UVM_PAGE_DIRTY_TAG);
uvm_obj_page_set_dirty(pg);
}
}
if (newstatus == UVM_PAGE_STATUS_UNKNOWN) {
@ -131,7 +126,7 @@ uvm_pagemarkdirty(struct vm_page *pg, unsigned int newstatus)
pg->flags &= ~(PG_CLEAN|PG_DIRTY);
pg->flags |= newstatus;
KASSERT(uobj == NULL || ((pg->flags & PG_CLEAN) == 0) ==
!!radix_tree_get_tag(&uobj->uo_pages, idx, UVM_PAGE_DIRTY_TAG));
uvm_obj_page_dirty_p(pg));
if ((pg->flags & PG_STAT) != 0) {
if ((pg->flags & PG_SWAPBACKED) != 0) {
base = CPU_COUNT_ANONUNKNOWN;

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_pager.c,v 1.128 2020/07/09 05:57:15 skrll Exp $ */
/* $NetBSD: uvm_pager.c,v 1.129 2020/08/14 09:06:15 chs Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -32,7 +32,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_pager.c,v 1.128 2020/07/09 05:57:15 skrll Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_pager.c,v 1.129 2020/08/14 09:06:15 chs Exp $");
#include "opt_uvmhist.h"
#include "opt_readahead.h"
@ -391,10 +391,8 @@ uvm_aio_aiodone_pages(struct vm_page **pgs, int npages, bool write, int error)
#endif /* defined(VMSWAP) */
if (write && uobj != NULL) {
KASSERT(radix_tree_get_tag(&uobj->uo_pages,
pg->offset >> PAGE_SHIFT, UVM_PAGE_WRITEBACK_TAG));
radix_tree_clear_tag(&uobj->uo_pages,
pg->offset >> PAGE_SHIFT, UVM_PAGE_WRITEBACK_TAG);
KASSERT(uvm_obj_page_writeback_p(pg));
uvm_obj_page_clear_writeback(pg);
}
/*

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_vnode.c,v 1.115 2020/07/09 05:57:15 skrll Exp $ */
/* $NetBSD: uvm_vnode.c,v 1.116 2020/08/14 09:06:15 chs Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -45,7 +45,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.115 2020/07/09 05:57:15 skrll Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.116 2020/08/14 09:06:15 chs Exp $");
#ifdef _KERNEL_OPT
#include "opt_uvmhist.h"
@ -316,10 +316,9 @@ uvn_findpage(struct uvm_object *uobj, voff_t offset, struct vm_page **pgp,
KASSERT(
((a->ar_flags & UVM_PAGE_ARRAY_FILL_BACKWARD) != 0)
== (pg->offset < offset));
KASSERT(uvm_pagelookup(uobj, offset) == NULL
|| ((a->ar_flags & UVM_PAGE_ARRAY_FILL_DIRTY) != 0
&& radix_tree_get_tag(&uobj->uo_pages,
offset >> PAGE_SHIFT, UVM_PAGE_DIRTY_TAG) == 0));
KASSERT(uvm_pagelookup(uobj, offset) == NULL ||
((a->ar_flags & UVM_PAGE_ARRAY_FILL_DIRTY) != 0 &&
!uvm_obj_page_dirty_p(pg)));
pg = NULL;
if ((a->ar_flags & UVM_PAGE_ARRAY_FILL_DENSE) != 0) {
UVMHIST_LOG(ubchist, "dense", 0,0,0,0);
@ -501,14 +500,6 @@ uvn_text_p(struct uvm_object *uobj)
return (iflag & VI_EXECMAP) != 0;
}
bool
uvn_clean_p(struct uvm_object *uobj)
{
return radix_tree_empty_tagged_tree_p(&uobj->uo_pages,
UVM_PAGE_DIRTY_TAG);
}
static void
uvn_alloc_ractx(struct uvm_object *uobj)
{