- Alter the convention for uvm_page_array slightly, so the basic search

parameters can't change part way through a search: move the "uobj" and
  "flags" arguments over to uvm_page_array_init() and store those with the
  array.

- With that, detect when it's not possible to find any more pages in the
  tree with the given search parameters, and avoid repeated tree lookups if
  the caller loops over uvm_page_array_fill_and_peek().
This commit is contained in:
ad 2020-05-25 21:15:10 +00:00
parent 1cb93ae5ea
commit 4bfe043955
8 changed files with 101 additions and 75 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.c,v 1.112 2020/04/30 06:16:47 skrll Exp $ */
/* $NetBSD: pmap.c,v 1.113 2020/05/25 21:15:10 ad Exp $ */
/*-
* Copyright (c) 2001, 2002, 2020 The NetBSD Foundation, Inc.
@ -65,7 +65,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.112 2020/04/30 06:16:47 skrll Exp $");
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.113 2020/05/25 21:15:10 ad Exp $");
#include "opt_cputype.h"
@ -1253,11 +1253,10 @@ pmap_destroy(pmap_t pmap)
return;
#ifdef DIAGNOSTIC
uvm_page_array_init(&a);
uvm_page_array_init(&a, &pmap->pm_obj, 0);
off = 0;
rw_enter(pmap->pm_lock, RW_WRITER);
while ((pg = uvm_page_array_fill_and_peek(&a, &pmap->pm_obj, off, 0, 0))
!= NULL) {
while ((pg = uvm_page_array_fill_and_peek(&a, off, 0)) != NULL) {
pt_entry_t *pde, *epde;
struct vm_page *spg;
struct pv_entry *pv, *npv;

View File

@ -1,4 +1,4 @@
/* $NetBSD: genfs_io.c,v 1.96 2020/05/17 19:38:16 ad Exp $ */
/* $NetBSD: genfs_io.c,v 1.97 2020/05/25 21:15:10 ad Exp $ */
/*
* Copyright (c) 1982, 1986, 1989, 1993
@ -31,7 +31,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: genfs_io.c,v 1.96 2020/05/17 19:38:16 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: genfs_io.c,v 1.97 2020/05/25 21:15:10 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -1005,7 +1005,8 @@ retry:
cleanall = true;
freeflag = pagedaemon ? PG_PAGEOUT : PG_RELEASED;
uvm_page_array_init(&a);
uvm_page_array_init(&a, uobj, dirtyonly ? (UVM_PAGE_ARRAY_FILL_DIRTY |
(!async ? UVM_PAGE_ARRAY_FILL_WRITEBACK : 0)) : 0);
for (;;) {
bool pgprotected;
@ -1017,9 +1018,7 @@ retry:
* wait on pages being written back by other threads as well.
*/
pg = uvm_page_array_fill_and_peek(&a, uobj, nextoff, 0,
dirtyonly ? (UVM_PAGE_ARRAY_FILL_DIRTY |
(!async ? UVM_PAGE_ARRAY_FILL_WRITEBACK : 0)) : 0);
pg = uvm_page_array_fill_and_peek(&a, nextoff, 0);
if (pg == NULL) {
break;
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: nfs_subs.c,v 1.239 2020/04/04 07:07:20 mlelstv Exp $ */
/* $NetBSD: nfs_subs.c,v 1.240 2020/05/25 21:15:10 ad Exp $ */
/*
* Copyright (c) 1989, 1993
@ -70,7 +70,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: nfs_subs.c,v 1.239 2020/04/04 07:07:20 mlelstv Exp $");
__KERNEL_RCSID(0, "$NetBSD: nfs_subs.c,v 1.240 2020/05/25 21:15:10 ad Exp $");
#ifdef _KERNEL_OPT
#include "opt_nfs.h"
@ -1798,10 +1798,10 @@ nfs_clearcommit(struct mount *mp)
np->n_pushedhi = 0;
np->n_commitflags &=
~(NFS_COMMIT_PUSH_VALID | NFS_COMMIT_PUSHED_VALID);
uvm_page_array_init(&a);
uvm_page_array_init(&a, &vp->v_uobj, 0);
off = 0;
while ((pg = uvm_page_array_fill_and_peek(&a, &vp->v_uobj, off,
0, 0)) != NULL) {
while ((pg = uvm_page_array_fill_and_peek(&a, off, 0)) !=
NULL) {
pg->flags &= ~PG_NEEDCOMMIT;
uvm_page_array_advance(&a);
off = pg->offset + PAGE_SIZE;

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_aobj.c,v 1.145 2020/05/25 20:13:00 ad Exp $ */
/* $NetBSD: uvm_aobj.c,v 1.146 2020/05/25 21:15:10 ad Exp $ */
/*
* Copyright (c) 1998 Chuck Silvers, Charles D. Cranor and
@ -38,7 +38,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_aobj.c,v 1.145 2020/05/25 20:13:00 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_aobj.c,v 1.146 2020/05/25 21:15:10 ad Exp $");
#ifdef _KERNEL_OPT
#include "opt_uvmhist.h"
@ -618,10 +618,9 @@ uao_detach(struct uvm_object *uobj)
* involved in is complete), release any swap resources and free
* the page itself.
*/
uvm_page_array_init(&a);
uvm_page_array_init(&a, uobj, 0);
rw_enter(uobj->vmobjlock, RW_WRITER);
while ((pg = uvm_page_array_fill_and_peek(&a, uobj, 0, 0, 0))
!= NULL) {
while ((pg = uvm_page_array_fill_and_peek(&a, 0, 0)) != NULL) {
uvm_page_array_advance(&a);
pmap_page_protect(pg, VM_PROT_NONE);
if (pg->flags & PG_BUSY) {
@ -705,10 +704,9 @@ uao_put(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
}
/* locked: uobj */
uvm_page_array_init(&a);
uvm_page_array_init(&a, uobj, 0);
curoff = start;
while ((pg = uvm_page_array_fill_and_peek(&a, uobj, curoff, 0, 0)) !=
NULL) {
while ((pg = uvm_page_array_fill_and_peek(&a, curoff, 0)) != NULL) {
if (pg->offset >= stop) {
break;
}
@ -838,11 +836,11 @@ uao_get(struct uvm_object *uobj, voff_t offset, struct vm_page **pps,
* time through).
*/
uvm_page_array_init(&a);
uvm_page_array_init(&a, uobj, 0);
gotpages = 0; /* # of pages we got so far */
for (lcv = 0; lcv < maxpages; lcv++) {
ptmp = uvm_page_array_fill_and_peek(&a, uobj,
offset + (lcv << PAGE_SHIFT), maxpages, 0);
ptmp = uvm_page_array_fill_and_peek(&a,
offset + (lcv << PAGE_SHIFT), maxpages);
if (ptmp == NULL) {
break;
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_object.c,v 1.22 2020/05/19 22:22:15 ad Exp $ */
/* $NetBSD: uvm_object.c,v 1.23 2020/05/25 21:15:10 ad Exp $ */
/*
* Copyright (c) 2006, 2010, 2019 The NetBSD Foundation, Inc.
@ -37,7 +37,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_object.c,v 1.22 2020/05/19 22:22:15 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_object.c,v 1.23 2020/05/25 21:15:10 ad Exp $");
#ifdef _KERNEL_OPT
#include "opt_ddb.h"
@ -258,10 +258,9 @@ uvm_object_printit(struct uvm_object *uobj, bool full,
return;
}
(*pr)(" PAGES <pg,offset>:\n ");
uvm_page_array_init(&a);
uvm_page_array_init(&a, uobj, 0);
off = 0;
while ((pg = uvm_page_array_fill_and_peek(&a, uobj, off, 0, 0))
!= NULL) {
while ((pg = uvm_page_array_fill_and_peek(&a, off, 0)) != NULL) {
cnt++;
(*pr)("<%p,0x%llx> ", pg, (long long)pg->offset);
if ((cnt % 3) == 0) {

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_page_array.c,v 1.5 2020/03/17 00:30:17 ad Exp $ */
/* $NetBSD: uvm_page_array.c,v 1.6 2020/05/25 21:15:10 ad Exp $ */
/*-
* Copyright (c)2011 YAMAMOTO Takashi,
@ -27,7 +27,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_page_array.c,v 1.5 2020/03/17 00:30:17 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_page_array.c,v 1.6 2020/05/25 21:15:10 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -42,10 +42,14 @@ __KERNEL_RCSID(0, "$NetBSD: uvm_page_array.c,v 1.5 2020/03/17 00:30:17 ad Exp $"
*/
void
uvm_page_array_init(struct uvm_page_array *ar)
uvm_page_array_init(struct uvm_page_array *ar, struct uvm_object *uobj,
unsigned int flags)
{
ar->ar_idx = ar->ar_npages = 0;
ar->ar_idx = 0;
ar->ar_npages = 0;
ar->ar_uobj = uobj;
ar->ar_flags = flags;
}
/*
@ -78,7 +82,8 @@ uvm_page_array_clear(struct uvm_page_array *ar)
{
KASSERT(ar->ar_idx <= ar->ar_npages);
uvm_page_array_init(ar);
ar->ar_idx = 0;
ar->ar_npages = 0;
}
/*
@ -124,14 +129,15 @@ uvm_page_array_advance(struct uvm_page_array *ar)
*/
int
uvm_page_array_fill(struct uvm_page_array *ar, struct uvm_object *uobj,
voff_t off, unsigned int nwant, unsigned int flags)
uvm_page_array_fill(struct uvm_page_array *ar, voff_t off, unsigned int nwant)
{
unsigned int npages;
#if defined(DEBUG)
unsigned int i;
#endif /* defined(DEBUG) */
unsigned int maxpages = __arraycount(ar->ar_pages);
struct uvm_object *uobj = ar->ar_uobj;
const int flags = ar->ar_flags;
const bool dense = (flags & UVM_PAGE_ARRAY_FILL_DENSE) != 0;
const bool backward = (flags & UVM_PAGE_ARRAY_FILL_BACKWARD) != 0;
@ -161,8 +167,26 @@ uvm_page_array_fill(struct uvm_page_array *ar, struct uvm_object *uobj,
maxpages, dense);
}
if (npages == 0) {
uvm_page_array_clear(ar);
return ENOENT;
if (flags != 0) {
/*
* if dense or looking for tagged entries (or
* working backwards), fail right away.
*/
uvm_page_array_clear(ar);
return ENOENT;
} else {
/*
* there's nothing else to be found with the current
* set of arguments, in the current version of the
* tree.
*
* minimize repeated tree lookups by "finding" some
* null pointers, in case the caller keeps looping
* (a common use case).
*/
npages = maxpages;
memset(ar->ar_pages, 0, sizeof(ar->ar_pages[0]) * npages);
}
}
KASSERT(npages <= maxpages);
ar->ar_npages = npages;
@ -171,6 +195,9 @@ uvm_page_array_fill(struct uvm_page_array *ar, struct uvm_object *uobj,
for (i = 0; i < ar->ar_npages; i++) {
struct vm_page * const pg = ar->ar_pages[i];
if (!dense && pg == NULL) {
continue;
}
KDASSERT(pg != NULL);
KDASSERT(pg->uobject == uobj);
if (backward) {
@ -194,8 +221,8 @@ uvm_page_array_fill(struct uvm_page_array *ar, struct uvm_object *uobj,
*/
struct vm_page *
uvm_page_array_fill_and_peek(struct uvm_page_array *a, struct uvm_object *uobj,
voff_t off, unsigned int nwant, unsigned int flags)
uvm_page_array_fill_and_peek(struct uvm_page_array *a, voff_t off,
unsigned int nwant)
{
struct vm_page *pg;
int error;
@ -204,11 +231,10 @@ uvm_page_array_fill_and_peek(struct uvm_page_array *a, struct uvm_object *uobj,
if (pg != NULL) {
return pg;
}
error = uvm_page_array_fill(a, uobj, off, nwant, flags);
error = uvm_page_array_fill(a, off, nwant);
if (error != 0) {
return NULL;
}
pg = uvm_page_array_peek(a);
KASSERT(pg != NULL);
return pg;
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_page_array.h,v 1.2 2019/12/15 21:11:35 ad Exp $ */
/* $NetBSD: uvm_page_array.h,v 1.3 2020/05/25 21:15:10 ad Exp $ */
/*-
* Copyright (c)2011 YAMAMOTO Takashi,
@ -40,33 +40,38 @@
*
* struct uvm_page_array a;
*
* uvm_page_array_init(&a);
* while ((pg = uvm_page_array_fill_and_peek(&a, uobj, off, ....))
* != NULL) {
* uvm_page_array_init(&a, uobj, ...);
* while ((pg = uvm_page_array_fill_and_peek(&a, off, 0)) != NULL) {
* off = pg->offset + PAGE_SIZE;
* do_something(pg);
* uvm_page_array_advance(&a);
* }
* uvm_page_array_fini(&a);
*
* if scanning forwards the "off" argument may not go backwards.
* if scanning backwards, the "off" argument may not go forwards.
*/
struct vm_page;
struct uvm_page_array {
struct vm_page *ar_pages[16]; /* XXX tune */
unsigned int ar_npages; /* valid elements in ar_pages */
unsigned int ar_idx; /* index in ar_pages */
struct uvm_object *ar_uobj;
unsigned int ar_flags;
voff_t ar_lastoff;
struct vm_page *ar_pages[16]; /* XXX tune */
};
void uvm_page_array_init(struct uvm_page_array *);
void uvm_page_array_init(struct uvm_page_array *, struct uvm_object *,
unsigned int);
void uvm_page_array_fini(struct uvm_page_array *);
void uvm_page_array_clear(struct uvm_page_array *);
struct vm_page *uvm_page_array_peek(struct uvm_page_array *);
void uvm_page_array_advance(struct uvm_page_array *);
int uvm_page_array_fill(struct uvm_page_array *, struct uvm_object *,
voff_t, unsigned int, unsigned int);
int uvm_page_array_fill(struct uvm_page_array *, voff_t, unsigned int);
struct vm_page *uvm_page_array_fill_and_peek(struct uvm_page_array *,
struct uvm_object *, voff_t, unsigned int, unsigned int);
voff_t, unsigned int);
/*
* flags for uvm_page_array_fill and uvm_page_array_fill_and_peek

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_vnode.c,v 1.113 2020/05/19 22:22:15 ad Exp $ */
/* $NetBSD: uvm_vnode.c,v 1.114 2020/05/25 21:15:10 ad Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -45,7 +45,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.113 2020/05/19 22:22:15 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.114 2020/05/25 21:15:10 ad Exp $");
#ifdef _KERNEL_OPT
#include "opt_uvmhist.h"
@ -231,8 +231,20 @@ uvn_findpages(struct uvm_object *uobj, voff_t offset, unsigned int *npagesp,
struct uvm_page_array a_store;
if (a == NULL) {
/*
* XXX fragile API
* note that the array can be the one supplied by the caller of
* uvn_findpages. in that case, fillflags used by the caller
* might not match strictly with ours.
* in particular, the caller might have filled the array
* without DENSE but passed us UFP_DIRTYONLY (thus DENSE).
*/
const unsigned int fillflags =
((flags & UFP_BACKWARD) ? UVM_PAGE_ARRAY_FILL_BACKWARD : 0) |
((flags & UFP_DIRTYONLY) ?
(UVM_PAGE_ARRAY_FILL_DIRTY|UVM_PAGE_ARRAY_FILL_DENSE) : 0);
a = &a_store;
uvm_page_array_init(a);
uvm_page_array_init(a, uobj, fillflags);
}
count = found = 0;
npages = *npagesp;
@ -278,10 +290,6 @@ uvn_findpage(struct uvm_object *uobj, voff_t offset, struct vm_page **pgp,
unsigned int flags, struct uvm_page_array *a, unsigned int nleft)
{
struct vm_page *pg;
const unsigned int fillflags =
((flags & UFP_BACKWARD) ? UVM_PAGE_ARRAY_FILL_BACKWARD : 0) |
((flags & UFP_DIRTYONLY) ?
(UVM_PAGE_ARRAY_FILL_DIRTY|UVM_PAGE_ARRAY_FILL_DENSE) : 0);
UVMHIST_FUNC("uvn_findpage"); UVMHIST_CALLED(ubchist);
UVMHIST_LOG(ubchist, "vp %#jx off 0x%jx", (uintptr_t)uobj, offset,
0, 0);
@ -303,26 +311,18 @@ uvn_findpage(struct uvm_object *uobj, voff_t offset, struct vm_page **pgp,
for (;;) {
/*
* look for an existing page.
*
* XXX fragile API
* note that the array can be the one supplied by the caller of
* uvn_findpages. in that case, fillflags used by the caller
* might not match strictly with ours.
* in particular, the caller might have filled the array
* without DENSE but passed us UFP_DIRTYONLY (thus DENSE).
*/
pg = uvm_page_array_fill_and_peek(a, uobj, offset, nleft,
fillflags);
pg = uvm_page_array_fill_and_peek(a, offset, nleft);
if (pg != NULL && pg->offset != offset) {
KASSERT(
((fillflags & UVM_PAGE_ARRAY_FILL_BACKWARD) != 0)
((a->ar_flags & UVM_PAGE_ARRAY_FILL_BACKWARD) != 0)
== (pg->offset < offset));
KASSERT(uvm_pagelookup(uobj, offset) == NULL
|| ((fillflags & UVM_PAGE_ARRAY_FILL_DIRTY) != 0 &&
radix_tree_get_tag(&uobj->uo_pages,
|| ((a->ar_flags & UVM_PAGE_ARRAY_FILL_DIRTY) != 0
&& radix_tree_get_tag(&uobj->uo_pages,
offset >> PAGE_SHIFT, UVM_PAGE_DIRTY_TAG) == 0));
pg = NULL;
if ((fillflags & UVM_PAGE_ARRAY_FILL_DENSE) != 0) {
if ((a->ar_flags & UVM_PAGE_ARRAY_FILL_DENSE) != 0) {
UVMHIST_LOG(ubchist, "dense", 0,0,0,0);
return 0;
}
@ -408,7 +408,7 @@ uvn_findpage(struct uvm_object *uobj, voff_t offset, struct vm_page **pgp,
if (pg->offset == offset) {
uvm_page_array_advance(a);
} else {
KASSERT((fillflags & UVM_PAGE_ARRAY_FILL_DENSE) == 0);
KASSERT((a->ar_flags & UVM_PAGE_ARRAY_FILL_DENSE) == 0);
}
}
return 0;