centralize calls from UVM to radixtree into a few functions.

in those functions, assert that the object lock is held in
the correct mode.
This commit is contained in:
chs 2020-08-14 09:06:14 +00:00
parent 909a0d73c8
commit 19303cecfc
8 changed files with 155 additions and 68 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: genfs_io.c,v 1.99 2020/08/10 11:09:15 rin Exp $ */ /* $NetBSD: genfs_io.c,v 1.100 2020/08/14 09:06:14 chs Exp $ */
/* /*
* Copyright (c) 1982, 1986, 1989, 1993 * Copyright (c) 1982, 1986, 1989, 1993
@ -31,7 +31,7 @@
*/ */
#include <sys/cdefs.h> #include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: genfs_io.c,v 1.99 2020/08/10 11:09:15 rin Exp $"); __KERNEL_RCSID(0, "$NetBSD: genfs_io.c,v 1.100 2020/08/14 09:06:14 chs Exp $");
#include <sys/param.h> #include <sys/param.h>
#include <sys/systm.h> #include <sys/systm.h>
@ -913,8 +913,7 @@ retry:
* shortcut if we have no pages to process. * shortcut if we have no pages to process.
*/ */
nodirty = radix_tree_empty_tagged_tree_p(&uobj->uo_pages, nodirty = uvm_obj_clean_p(uobj);
UVM_PAGE_DIRTY_TAG);
#ifdef DIAGNOSTIC #ifdef DIAGNOSTIC
mutex_enter(vp->v_interlock); mutex_enter(vp->v_interlock);
KASSERT((vp->v_iflag & VI_ONWORKLST) != 0 || nodirty); KASSERT((vp->v_iflag & VI_ONWORKLST) != 0 || nodirty);
@ -922,9 +921,8 @@ retry:
#endif #endif
if (uobj->uo_npages == 0 || (dirtyonly && nodirty)) { if (uobj->uo_npages == 0 || (dirtyonly && nodirty)) {
mutex_enter(vp->v_interlock); mutex_enter(vp->v_interlock);
if (vp->v_iflag & VI_ONWORKLST) { if (vp->v_iflag & VI_ONWORKLST && LIST_EMPTY(&vp->v_dirtyblkhd)) {
if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL) vn_syncer_remove_from_worklist(vp);
vn_syncer_remove_from_worklist(vp);
} }
mutex_exit(vp->v_interlock); mutex_exit(vp->v_interlock);
if (trans_mp) { if (trans_mp) {
@ -978,8 +976,7 @@ retry:
} }
error = 0; error = 0;
wasclean = radix_tree_empty_tagged_tree_p(&uobj->uo_pages, wasclean = uvm_obj_nowriteback_p(uobj);
UVM_PAGE_WRITEBACK_TAG);
nextoff = startoff; nextoff = startoff;
if (endoff == 0 || flags & PGO_ALLPAGES) { if (endoff == 0 || flags & PGO_ALLPAGES) {
endoff = trunc_page(LLONG_MAX); endoff = trunc_page(LLONG_MAX);
@ -1030,8 +1027,7 @@ retry:
KASSERT(pg->offset >= nextoff); KASSERT(pg->offset >= nextoff);
KASSERT(!dirtyonly || KASSERT(!dirtyonly ||
uvm_pagegetdirty(pg) != UVM_PAGE_STATUS_CLEAN || uvm_pagegetdirty(pg) != UVM_PAGE_STATUS_CLEAN ||
radix_tree_get_tag(&uobj->uo_pages, uvm_obj_page_writeback_p(pg));
pg->offset >> PAGE_SHIFT, UVM_PAGE_WRITEBACK_TAG));
if (pg->offset >= endoff) { if (pg->offset >= endoff) {
break; break;
@ -1245,9 +1241,7 @@ retry:
* mark pages as WRITEBACK so that concurrent * mark pages as WRITEBACK so that concurrent
* fsync can find and wait for our activities. * fsync can find and wait for our activities.
*/ */
radix_tree_set_tag(&uobj->uo_pages, uvm_obj_page_set_writeback(pgs[i]);
pgs[i]->offset >> PAGE_SHIFT,
UVM_PAGE_WRITEBACK_TAG);
} }
if (tpg->offset < startoff || tpg->offset >= endoff) if (tpg->offset < startoff || tpg->offset >= endoff)
continue; continue;
@ -1332,11 +1326,9 @@ retry:
* syncer list. * syncer list.
*/ */
if ((vp->v_iflag & VI_ONWORKLST) != 0 && if ((vp->v_iflag & VI_ONWORKLST) != 0 && uvm_obj_clean_p(uobj) &&
radix_tree_empty_tagged_tree_p(&uobj->uo_pages, LIST_EMPTY(&vp->v_dirtyblkhd)) {
UVM_PAGE_DIRTY_TAG)) { vn_syncer_remove_from_worklist(vp);
if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL)
vn_syncer_remove_from_worklist(vp);
} }
#if !defined(DEBUG) #if !defined(DEBUG)

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_extern.h,v 1.230 2020/06/14 22:25:15 ad Exp $ */ /* $NetBSD: uvm_extern.h,v 1.231 2020/08/14 09:06:15 chs Exp $ */
/* /*
* Copyright (c) 1997 Charles D. Cranor and Washington University. * Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -776,6 +776,14 @@ void uvm_obj_destroy(struct uvm_object *, bool);
int uvm_obj_wirepages(struct uvm_object *, off_t, off_t, int uvm_obj_wirepages(struct uvm_object *, off_t, off_t,
struct pglist *); struct pglist *);
void uvm_obj_unwirepages(struct uvm_object *, off_t, off_t); void uvm_obj_unwirepages(struct uvm_object *, off_t, off_t);
bool uvm_obj_clean_p(struct uvm_object *);
bool uvm_obj_nowriteback_p(struct uvm_object *);
bool uvm_obj_page_dirty_p(struct vm_page *);
void uvm_obj_page_set_dirty(struct vm_page *);
void uvm_obj_page_clear_dirty(struct vm_page *);
bool uvm_obj_page_writeback_p(struct vm_page *);
void uvm_obj_page_set_writeback(struct vm_page *);
void uvm_obj_page_clear_writeback(struct vm_page *);
/* uvm_page.c */ /* uvm_page.c */
int uvm_availmem(bool); int uvm_availmem(bool);
@ -826,7 +834,6 @@ int uvn_findpages(struct uvm_object *, voff_t,
unsigned int *, struct vm_page **, unsigned int *, struct vm_page **,
struct uvm_page_array *, unsigned int); struct uvm_page_array *, unsigned int);
bool uvn_text_p(struct uvm_object *); bool uvn_text_p(struct uvm_object *);
bool uvn_clean_p(struct uvm_object *);
bool uvn_needs_writefault_p(struct uvm_object *); bool uvn_needs_writefault_p(struct uvm_object *);
/* kern_malloc.c */ /* kern_malloc.c */

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_object.c,v 1.23 2020/05/25 21:15:10 ad Exp $ */ /* $NetBSD: uvm_object.c,v 1.24 2020/08/14 09:06:15 chs Exp $ */
/* /*
* Copyright (c) 2006, 2010, 2019 The NetBSD Foundation, Inc. * Copyright (c) 2006, 2010, 2019 The NetBSD Foundation, Inc.
@ -37,7 +37,7 @@
*/ */
#include <sys/cdefs.h> #include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_object.c,v 1.23 2020/05/25 21:15:10 ad Exp $"); __KERNEL_RCSID(0, "$NetBSD: uvm_object.c,v 1.24 2020/08/14 09:06:15 chs Exp $");
#ifdef _KERNEL_OPT #ifdef _KERNEL_OPT
#include "opt_ddb.h" #include "opt_ddb.h"
@ -233,6 +233,103 @@ uvm_obj_unwirepages(struct uvm_object *uobj, off_t start, off_t end)
rw_exit(uobj->vmobjlock); rw_exit(uobj->vmobjlock);
} }
static inline bool
uvm_obj_notag_p(struct uvm_object *uobj, int tag)
{
KASSERT(rw_lock_held(uobj->vmobjlock));
return radix_tree_empty_tagged_tree_p(&uobj->uo_pages, tag);
}
bool
uvm_obj_clean_p(struct uvm_object *uobj)
{
return uvm_obj_notag_p(uobj, UVM_PAGE_DIRTY_TAG);
}
bool
uvm_obj_nowriteback_p(struct uvm_object *uobj)
{
return uvm_obj_notag_p(uobj, UVM_PAGE_WRITEBACK_TAG);
}
static inline bool
uvm_obj_page_tag_p(struct vm_page *pg, int tag)
{
struct uvm_object *uobj = pg->uobject;
int pgidx = pg->offset >> PAGE_SHIFT;
KASSERT(uobj != NULL);
KASSERT(rw_lock_held(uobj->vmobjlock));
return radix_tree_get_tag(&uobj->uo_pages, pgidx, tag) != 0;
}
static inline void
uvm_obj_page_set_tag(struct vm_page *pg, int tag)
{
struct uvm_object *uobj = pg->uobject;
int pgidx = pg->offset >> PAGE_SHIFT;
KASSERT(uobj != NULL);
KASSERT(rw_write_held(uobj->vmobjlock));
radix_tree_set_tag(&uobj->uo_pages, pgidx, tag);
}
static inline void
uvm_obj_page_clear_tag(struct vm_page *pg, int tag)
{
struct uvm_object *uobj = pg->uobject;
int pgidx = pg->offset >> PAGE_SHIFT;
KASSERT(uobj != NULL);
KASSERT(rw_write_held(uobj->vmobjlock));
radix_tree_clear_tag(&uobj->uo_pages, pgidx, tag);
}
bool
uvm_obj_page_dirty_p(struct vm_page *pg)
{
return uvm_obj_page_tag_p(pg, UVM_PAGE_DIRTY_TAG);
}
void
uvm_obj_page_set_dirty(struct vm_page *pg)
{
uvm_obj_page_set_tag(pg, UVM_PAGE_DIRTY_TAG);
}
void
uvm_obj_page_clear_dirty(struct vm_page *pg)
{
uvm_obj_page_clear_tag(pg, UVM_PAGE_DIRTY_TAG);
}
bool
uvm_obj_page_writeback_p(struct vm_page *pg)
{
return uvm_obj_page_tag_p(pg, UVM_PAGE_WRITEBACK_TAG);
}
void
uvm_obj_page_set_writeback(struct vm_page *pg)
{
uvm_obj_page_set_tag(pg, UVM_PAGE_WRITEBACK_TAG);
}
void
uvm_obj_page_clear_writeback(struct vm_page *pg)
{
uvm_obj_page_clear_tag(pg, UVM_PAGE_WRITEBACK_TAG);
}
#if defined(DDB) || defined(DEBUGPRINT) #if defined(DDB) || defined(DEBUGPRINT)
/* /*

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_object.h,v 1.38 2020/03/14 20:45:23 ad Exp $ */ /* $NetBSD: uvm_object.h,v 1.39 2020/08/14 09:06:15 chs Exp $ */
/* /*
* Copyright (c) 1997 Charles D. Cranor and Washington University. * Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -104,7 +104,7 @@ extern const struct uvm_pagerops aobj_pager;
(UVM_OBJ_IS_VNODE(uobj) && uvn_text_p(uobj)) (UVM_OBJ_IS_VNODE(uobj) && uvn_text_p(uobj))
#define UVM_OBJ_IS_CLEAN(uobj) \ #define UVM_OBJ_IS_CLEAN(uobj) \
(UVM_OBJ_IS_VNODE(uobj) && uvn_clean_p(uobj)) (UVM_OBJ_IS_VNODE(uobj) && uvm_obj_clean_p(uobj))
/* /*
* UVM_OBJ_NEEDS_WRITEFAULT: true if the uobj needs to detect modification. * UVM_OBJ_NEEDS_WRITEFAULT: true if the uobj needs to detect modification.
@ -114,7 +114,7 @@ extern const struct uvm_pagerops aobj_pager;
*/ */
#define UVM_OBJ_NEEDS_WRITEFAULT(uobj) \ #define UVM_OBJ_NEEDS_WRITEFAULT(uobj) \
(UVM_OBJ_IS_VNODE(uobj) && uvn_clean_p(uobj)) (UVM_OBJ_IS_VNODE(uobj) && uvm_obj_clean_p(uobj))
#define UVM_OBJ_IS_AOBJ(uobj) \ #define UVM_OBJ_IS_AOBJ(uobj) \
((uobj)->pgops == &aobj_pager) ((uobj)->pgops == &aobj_pager)

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_page.c,v 1.244 2020/07/09 05:57:15 skrll Exp $ */ /* $NetBSD: uvm_page.c,v 1.245 2020/08/14 09:06:15 chs Exp $ */
/*- /*-
* Copyright (c) 2019, 2020 The NetBSD Foundation, Inc. * Copyright (c) 2019, 2020 The NetBSD Foundation, Inc.
@ -95,7 +95,7 @@
*/ */
#include <sys/cdefs.h> #include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.244 2020/07/09 05:57:15 skrll Exp $"); __KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.245 2020/08/14 09:06:15 chs Exp $");
#include "opt_ddb.h" #include "opt_ddb.h"
#include "opt_uvm.h" #include "opt_uvm.h"
@ -240,15 +240,17 @@ uvm_pageinsert_tree(struct uvm_object *uobj, struct vm_page *pg)
const uint64_t idx = pg->offset >> PAGE_SHIFT; const uint64_t idx = pg->offset >> PAGE_SHIFT;
int error; int error;
KASSERT(rw_write_held(uobj->vmobjlock));
error = radix_tree_insert_node(&uobj->uo_pages, idx, pg); error = radix_tree_insert_node(&uobj->uo_pages, idx, pg);
if (error != 0) { if (error != 0) {
return error; return error;
} }
if ((pg->flags & PG_CLEAN) == 0) { if ((pg->flags & PG_CLEAN) == 0) {
radix_tree_set_tag(&uobj->uo_pages, idx, UVM_PAGE_DIRTY_TAG); uvm_obj_page_set_dirty(pg);
} }
KASSERT(((pg->flags & PG_CLEAN) == 0) == KASSERT(((pg->flags & PG_CLEAN) == 0) ==
radix_tree_get_tag(&uobj->uo_pages, idx, UVM_PAGE_DIRTY_TAG)); uvm_obj_page_dirty_p(pg));
return 0; return 0;
} }
@ -297,6 +299,8 @@ uvm_pageremove_tree(struct uvm_object *uobj, struct vm_page *pg)
{ {
struct vm_page *opg __unused; struct vm_page *opg __unused;
KASSERT(rw_write_held(uobj->vmobjlock));
opg = radix_tree_remove_node(&uobj->uo_pages, pg->offset >> PAGE_SHIFT); opg = radix_tree_remove_node(&uobj->uo_pages, pg->offset >> PAGE_SHIFT);
KASSERT(pg == opg); KASSERT(pg == opg);
} }
@ -1363,11 +1367,9 @@ uvm_pagereplace(struct vm_page *oldpg, struct vm_page *newpg)
KASSERT(pg == oldpg); KASSERT(pg == oldpg);
if (((oldpg->flags ^ newpg->flags) & PG_CLEAN) != 0) { if (((oldpg->flags ^ newpg->flags) & PG_CLEAN) != 0) {
if ((newpg->flags & PG_CLEAN) != 0) { if ((newpg->flags & PG_CLEAN) != 0) {
radix_tree_clear_tag(&uobj->uo_pages, idx, uvm_obj_page_clear_dirty(newpg);
UVM_PAGE_DIRTY_TAG);
} else { } else {
radix_tree_set_tag(&uobj->uo_pages, idx, uvm_obj_page_set_dirty(newpg);
UVM_PAGE_DIRTY_TAG);
} }
} }
/* /*
@ -1788,8 +1790,13 @@ struct vm_page *
uvm_pagelookup(struct uvm_object *obj, voff_t off) uvm_pagelookup(struct uvm_object *obj, voff_t off)
{ {
struct vm_page *pg; struct vm_page *pg;
bool ddb = false;
#ifdef DDB
extern int db_active;
ddb = db_active != 0;
#endif
/* No - used from DDB. KASSERT(rw_lock_held(obj->vmobjlock)); */ KASSERT(ddb || rw_lock_held(obj->vmobjlock));
pg = radix_tree_lookup_node(&obj->uo_pages, off >> PAGE_SHIFT); pg = radix_tree_lookup_node(&obj->uo_pages, off >> PAGE_SHIFT);

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_page_status.c,v 1.5 2020/05/15 22:25:18 ad Exp $ */ /* $NetBSD: uvm_page_status.c,v 1.6 2020/08/14 09:06:15 chs Exp $ */
/*- /*-
* Copyright (c)2011 YAMAMOTO Takashi, * Copyright (c)2011 YAMAMOTO Takashi,
@ -27,7 +27,7 @@
*/ */
#include <sys/cdefs.h> #include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_page_status.c,v 1.5 2020/05/15 22:25:18 ad Exp $"); __KERNEL_RCSID(0, "$NetBSD: uvm_page_status.c,v 1.6 2020/08/14 09:06:15 chs Exp $");
#include <sys/param.h> #include <sys/param.h>
#include <sys/systm.h> #include <sys/systm.h>
@ -60,12 +60,11 @@ unsigned int
uvm_pagegetdirty(struct vm_page *pg) uvm_pagegetdirty(struct vm_page *pg)
{ {
struct uvm_object * const uobj __diagused = pg->uobject; struct uvm_object * const uobj __diagused = pg->uobject;
const uint64_t idx __diagused = pg->offset >> PAGE_SHIFT;
KASSERT((~pg->flags & (PG_CLEAN|PG_DIRTY)) != 0); KASSERT((~pg->flags & (PG_CLEAN|PG_DIRTY)) != 0);
KASSERT(uvm_page_owner_locked_p(pg, false)); KASSERT(uvm_page_owner_locked_p(pg, false));
KASSERT(uobj == NULL || ((pg->flags & PG_CLEAN) == 0) == KASSERT(uobj == NULL || ((pg->flags & PG_CLEAN) == 0) ==
!!radix_tree_get_tag(&uobj->uo_pages, idx, UVM_PAGE_DIRTY_TAG)); uvm_obj_page_dirty_p(pg));
return pg->flags & (PG_CLEAN|PG_DIRTY); return pg->flags & (PG_CLEAN|PG_DIRTY);
} }
@ -85,7 +84,6 @@ void
uvm_pagemarkdirty(struct vm_page *pg, unsigned int newstatus) uvm_pagemarkdirty(struct vm_page *pg, unsigned int newstatus)
{ {
struct uvm_object * const uobj = pg->uobject; struct uvm_object * const uobj = pg->uobject;
const uint64_t idx = pg->offset >> PAGE_SHIFT;
const unsigned int oldstatus = uvm_pagegetdirty(pg); const unsigned int oldstatus = uvm_pagegetdirty(pg);
enum cpu_count base; enum cpu_count base;
@ -93,7 +91,7 @@ uvm_pagemarkdirty(struct vm_page *pg, unsigned int newstatus)
KASSERT((newstatus & ~(PG_CLEAN|PG_DIRTY)) == 0); KASSERT((newstatus & ~(PG_CLEAN|PG_DIRTY)) == 0);
KASSERT(uvm_page_owner_locked_p(pg, true)); KASSERT(uvm_page_owner_locked_p(pg, true));
KASSERT(uobj == NULL || ((pg->flags & PG_CLEAN) == 0) == KASSERT(uobj == NULL || ((pg->flags & PG_CLEAN) == 0) ==
!!radix_tree_get_tag(&uobj->uo_pages, idx, UVM_PAGE_DIRTY_TAG)); uvm_obj_page_dirty_p(pg));
if (oldstatus == newstatus) { if (oldstatus == newstatus) {
return; return;
@ -106,20 +104,17 @@ uvm_pagemarkdirty(struct vm_page *pg, unsigned int newstatus)
if (uobj != NULL) { if (uobj != NULL) {
if (newstatus == UVM_PAGE_STATUS_CLEAN) { if (newstatus == UVM_PAGE_STATUS_CLEAN) {
radix_tree_clear_tag(&uobj->uo_pages, idx, uvm_obj_page_clear_dirty(pg);
UVM_PAGE_DIRTY_TAG);
} else if (oldstatus == UVM_PAGE_STATUS_CLEAN) { } else if (oldstatus == UVM_PAGE_STATUS_CLEAN) {
/* /*
* on first dirty page, mark the object dirty. * on first dirty page, mark the object dirty.
* for vnodes this inserts to the syncer worklist. * for vnodes this inserts to the syncer worklist.
*/ */
if (radix_tree_empty_tagged_tree_p(&uobj->uo_pages, if (uvm_obj_clean_p(uobj) &&
UVM_PAGE_DIRTY_TAG) &&
uobj->pgops->pgo_markdirty != NULL) { uobj->pgops->pgo_markdirty != NULL) {
(*uobj->pgops->pgo_markdirty)(uobj); (*uobj->pgops->pgo_markdirty)(uobj);
} }
radix_tree_set_tag(&uobj->uo_pages, idx, uvm_obj_page_set_dirty(pg);
UVM_PAGE_DIRTY_TAG);
} }
} }
if (newstatus == UVM_PAGE_STATUS_UNKNOWN) { if (newstatus == UVM_PAGE_STATUS_UNKNOWN) {
@ -131,7 +126,7 @@ uvm_pagemarkdirty(struct vm_page *pg, unsigned int newstatus)
pg->flags &= ~(PG_CLEAN|PG_DIRTY); pg->flags &= ~(PG_CLEAN|PG_DIRTY);
pg->flags |= newstatus; pg->flags |= newstatus;
KASSERT(uobj == NULL || ((pg->flags & PG_CLEAN) == 0) == KASSERT(uobj == NULL || ((pg->flags & PG_CLEAN) == 0) ==
!!radix_tree_get_tag(&uobj->uo_pages, idx, UVM_PAGE_DIRTY_TAG)); uvm_obj_page_dirty_p(pg));
if ((pg->flags & PG_STAT) != 0) { if ((pg->flags & PG_STAT) != 0) {
if ((pg->flags & PG_SWAPBACKED) != 0) { if ((pg->flags & PG_SWAPBACKED) != 0) {
base = CPU_COUNT_ANONUNKNOWN; base = CPU_COUNT_ANONUNKNOWN;

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_pager.c,v 1.128 2020/07/09 05:57:15 skrll Exp $ */ /* $NetBSD: uvm_pager.c,v 1.129 2020/08/14 09:06:15 chs Exp $ */
/* /*
* Copyright (c) 1997 Charles D. Cranor and Washington University. * Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -32,7 +32,7 @@
*/ */
#include <sys/cdefs.h> #include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_pager.c,v 1.128 2020/07/09 05:57:15 skrll Exp $"); __KERNEL_RCSID(0, "$NetBSD: uvm_pager.c,v 1.129 2020/08/14 09:06:15 chs Exp $");
#include "opt_uvmhist.h" #include "opt_uvmhist.h"
#include "opt_readahead.h" #include "opt_readahead.h"
@ -391,10 +391,8 @@ uvm_aio_aiodone_pages(struct vm_page **pgs, int npages, bool write, int error)
#endif /* defined(VMSWAP) */ #endif /* defined(VMSWAP) */
if (write && uobj != NULL) { if (write && uobj != NULL) {
KASSERT(radix_tree_get_tag(&uobj->uo_pages, KASSERT(uvm_obj_page_writeback_p(pg));
pg->offset >> PAGE_SHIFT, UVM_PAGE_WRITEBACK_TAG)); uvm_obj_page_clear_writeback(pg);
radix_tree_clear_tag(&uobj->uo_pages,
pg->offset >> PAGE_SHIFT, UVM_PAGE_WRITEBACK_TAG);
} }
/* /*

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_vnode.c,v 1.115 2020/07/09 05:57:15 skrll Exp $ */ /* $NetBSD: uvm_vnode.c,v 1.116 2020/08/14 09:06:15 chs Exp $ */
/* /*
* Copyright (c) 1997 Charles D. Cranor and Washington University. * Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -45,7 +45,7 @@
*/ */
#include <sys/cdefs.h> #include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.115 2020/07/09 05:57:15 skrll Exp $"); __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.116 2020/08/14 09:06:15 chs Exp $");
#ifdef _KERNEL_OPT #ifdef _KERNEL_OPT
#include "opt_uvmhist.h" #include "opt_uvmhist.h"
@ -316,10 +316,9 @@ uvn_findpage(struct uvm_object *uobj, voff_t offset, struct vm_page **pgp,
KASSERT( KASSERT(
((a->ar_flags & UVM_PAGE_ARRAY_FILL_BACKWARD) != 0) ((a->ar_flags & UVM_PAGE_ARRAY_FILL_BACKWARD) != 0)
== (pg->offset < offset)); == (pg->offset < offset));
KASSERT(uvm_pagelookup(uobj, offset) == NULL KASSERT(uvm_pagelookup(uobj, offset) == NULL ||
|| ((a->ar_flags & UVM_PAGE_ARRAY_FILL_DIRTY) != 0 ((a->ar_flags & UVM_PAGE_ARRAY_FILL_DIRTY) != 0 &&
&& radix_tree_get_tag(&uobj->uo_pages, !uvm_obj_page_dirty_p(pg)));
offset >> PAGE_SHIFT, UVM_PAGE_DIRTY_TAG) == 0));
pg = NULL; pg = NULL;
if ((a->ar_flags & UVM_PAGE_ARRAY_FILL_DENSE) != 0) { if ((a->ar_flags & UVM_PAGE_ARRAY_FILL_DENSE) != 0) {
UVMHIST_LOG(ubchist, "dense", 0,0,0,0); UVMHIST_LOG(ubchist, "dense", 0,0,0,0);
@ -501,14 +500,6 @@ uvn_text_p(struct uvm_object *uobj)
return (iflag & VI_EXECMAP) != 0; return (iflag & VI_EXECMAP) != 0;
} }
bool
uvn_clean_p(struct uvm_object *uobj)
{
return radix_tree_empty_tagged_tree_p(&uobj->uo_pages,
UVM_PAGE_DIRTY_TAG);
}
static void static void
uvn_alloc_ractx(struct uvm_object *uobj) uvn_alloc_ractx(struct uvm_object *uobj)
{ {