cosmetics. no functional changes.

- constify
- wrap long lines
- assertions
- comments
This commit is contained in:
yamt 2010-12-17 22:00:43 +00:00
parent 9f266c104e
commit 473b1715a5
1 changed files with 86 additions and 75 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_fault.c,v 1.176 2010/12/15 13:44:17 pooka Exp $ */ /* $NetBSD: uvm_fault.c,v 1.177 2010/12/17 22:00:43 yamt Exp $ */
/* /*
* *
@ -39,7 +39,7 @@
*/ */
#include <sys/cdefs.h> #include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_fault.c,v 1.176 2010/12/15 13:44:17 pooka Exp $"); __KERNEL_RCSID(0, "$NetBSD: uvm_fault.c,v 1.177 2010/12/17 22:00:43 yamt Exp $");
#include "opt_uvmhist.h" #include "opt_uvmhist.h"
@ -98,7 +98,7 @@ __KERNEL_RCSID(0, "$NetBSD: uvm_fault.c,v 1.176 2010/12/15 13:44:17 pooka Exp $"
* the code is structured as follows: * the code is structured as follows:
* *
* - init the "IN" params in the ufi structure * - init the "IN" params in the ufi structure
* ReFault: * ReFault: (ERESTART returned to the loop in uvm_fault_internal)
* - do lookups [locks maps], check protection, handle needs_copy * - do lookups [locks maps], check protection, handle needs_copy
* - check for case 0 fault (error) * - check for case 0 fault (error)
* - establish "range" of fault * - establish "range" of fault
@ -702,23 +702,22 @@ struct uvm_faultctx {
bool wire_mapping; bool wire_mapping;
bool narrow; bool narrow;
bool wire_paging; bool wire_paging;
bool maxprot;
bool cow_now; bool cow_now;
bool promote; bool promote;
}; };
static inline int uvm_fault_check( static inline int uvm_fault_check(
struct uvm_faultinfo *, struct uvm_faultctx *, struct uvm_faultinfo *, struct uvm_faultctx *,
struct vm_anon ***, struct vm_page ***); struct vm_anon ***, bool);
static int uvm_fault_upper( static int uvm_fault_upper(
struct uvm_faultinfo *, struct uvm_faultctx *, struct uvm_faultinfo *, struct uvm_faultctx *,
struct vm_anon **); struct vm_anon **);
static inline int uvm_fault_upper_lookup( static inline int uvm_fault_upper_lookup(
struct uvm_faultinfo *, struct uvm_faultctx *, struct uvm_faultinfo *, const struct uvm_faultctx *,
struct vm_anon **, struct vm_page **); struct vm_anon **, struct vm_page **);
static inline void uvm_fault_upper_neighbor( static inline void uvm_fault_upper_neighbor(
struct uvm_faultinfo *, struct uvm_faultctx *, struct uvm_faultinfo *, const struct uvm_faultctx *,
vaddr_t, struct vm_page *, bool); vaddr_t, struct vm_page *, bool);
static inline int uvm_fault_upper_loan( static inline int uvm_fault_upper_loan(
struct uvm_faultinfo *, struct uvm_faultctx *, struct uvm_faultinfo *, struct uvm_faultctx *,
@ -730,25 +729,24 @@ static inline int uvm_fault_upper_direct(
struct uvm_faultinfo *, struct uvm_faultctx *, struct uvm_faultinfo *, struct uvm_faultctx *,
struct uvm_object *, struct vm_anon *); struct uvm_object *, struct vm_anon *);
static int uvm_fault_upper_enter( static int uvm_fault_upper_enter(
struct uvm_faultinfo *, struct uvm_faultctx *, struct uvm_faultinfo *, const struct uvm_faultctx *,
struct uvm_object *, struct vm_anon *, struct uvm_object *, struct vm_anon *,
struct vm_page *, struct vm_anon *); struct vm_page *, struct vm_anon *);
static inline void uvm_fault_upper_done( static inline void uvm_fault_upper_done(
struct uvm_faultinfo *, struct uvm_faultctx *, struct uvm_faultinfo *, const struct uvm_faultctx *,
struct uvm_object *, struct vm_anon *, struct vm_anon *, struct vm_page *);
struct vm_page *);
static int uvm_fault_lower( static int uvm_fault_lower(
struct uvm_faultinfo *, struct uvm_faultctx *, struct uvm_faultinfo *, struct uvm_faultctx *,
struct vm_page **); struct vm_page **);
static inline void uvm_fault_lower_lookup( static inline void uvm_fault_lower_lookup(
struct uvm_faultinfo *, struct uvm_faultctx *, struct uvm_faultinfo *, const struct uvm_faultctx *,
struct vm_page **); struct vm_page **);
static inline void uvm_fault_lower_neighbor( static inline void uvm_fault_lower_neighbor(
struct uvm_faultinfo *, struct uvm_faultctx *, struct uvm_faultinfo *, const struct uvm_faultctx *,
vaddr_t, struct vm_page *, bool); vaddr_t, struct vm_page *, bool);
static inline int uvm_fault_lower_io( static inline int uvm_fault_lower_io(
struct uvm_faultinfo *, struct uvm_faultctx *, struct uvm_faultinfo *, const struct uvm_faultctx *,
struct uvm_object **, struct vm_page **); struct uvm_object **, struct vm_page **);
static inline int uvm_fault_lower_direct( static inline int uvm_fault_lower_direct(
struct uvm_faultinfo *, struct uvm_faultctx *, struct uvm_faultinfo *, struct uvm_faultctx *,
@ -761,14 +759,13 @@ static inline int uvm_fault_lower_promote(
struct uvm_faultinfo *, struct uvm_faultctx *, struct uvm_faultinfo *, struct uvm_faultctx *,
struct uvm_object *, struct vm_page *); struct uvm_object *, struct vm_page *);
static int uvm_fault_lower_enter( static int uvm_fault_lower_enter(
struct uvm_faultinfo *, struct uvm_faultctx *, struct uvm_faultinfo *, const struct uvm_faultctx *,
struct uvm_object *, struct uvm_object *,
struct vm_anon *, struct vm_page *, struct vm_anon *, struct vm_page *,
struct vm_page *); struct vm_page *);
static inline void uvm_fault_lower_done( static inline void uvm_fault_lower_done(
struct uvm_faultinfo *, struct uvm_faultctx *, struct uvm_faultinfo *, const struct uvm_faultctx *,
struct uvm_object *, struct vm_anon *, struct uvm_object *, struct vm_page *);
struct vm_page *);
int int
uvm_fault_internal(struct vm_map *orig_map, vaddr_t vaddr, uvm_fault_internal(struct vm_map *orig_map, vaddr_t vaddr,
@ -784,9 +781,8 @@ uvm_fault_internal(struct vm_map *orig_map, vaddr_t vaddr,
/* "wire" fault causes wiring of both mapping and paging */ /* "wire" fault causes wiring of both mapping and paging */
.wire_mapping = (fault_flag & UVM_FAULT_WIRE) != 0, .wire_mapping = (fault_flag & UVM_FAULT_WIRE) != 0,
.wire_paging = (fault_flag & UVM_FAULT_WIRE) != 0, .wire_paging = (fault_flag & UVM_FAULT_WIRE) != 0,
.maxprot = (fault_flag & UVM_FAULT_MAXPROT) != 0,
}; };
const bool maxprot = (fault_flag & UVM_FAULT_MAXPROT) != 0;
struct vm_anon *anons_store[UVM_MAXRANGE], **anons; struct vm_anon *anons_store[UVM_MAXRANGE], **anons;
struct vm_page *pages_store[UVM_MAXRANGE], **pages; struct vm_page *pages_store[UVM_MAXRANGE], **pages;
int error; int error;
@ -810,7 +806,7 @@ uvm_fault_internal(struct vm_map *orig_map, vaddr_t vaddr,
anons = anons_store; anons = anons_store;
pages = pages_store; pages = pages_store;
error = uvm_fault_check(&ufi, &flt, &anons, &pages); error = uvm_fault_check(&ufi, &flt, &anons, maxprot);
if (error != 0) if (error != 0)
continue; continue;
@ -821,7 +817,8 @@ uvm_fault_internal(struct vm_map *orig_map, vaddr_t vaddr,
if (pages[flt.centeridx] == PGO_DONTCARE) if (pages[flt.centeridx] == PGO_DONTCARE)
error = uvm_fault_upper(&ufi, &flt, anons); error = uvm_fault_upper(&ufi, &flt, anons);
else { else {
struct uvm_object * const uobj = ufi.entry->object.uvm_obj; struct uvm_object * const uobj =
ufi.entry->object.uvm_obj;
if (uobj && uobj->pgops->pgo_fault != NULL) { if (uobj && uobj->pgops->pgo_fault != NULL) {
/* /*
@ -834,10 +831,14 @@ uvm_fault_internal(struct vm_map *orig_map, vaddr_t vaddr,
flt.centeridx, flt.access_type, flt.centeridx, flt.access_type,
PGO_LOCKED|PGO_SYNCIO); PGO_LOCKED|PGO_SYNCIO);
/* locked: nothing, pgo_fault has unlocked everything */ /*
* locked: nothing, pgo_fault has unlocked
* everything
*/
/* /*
* object fault routine responsible for pmap_update(). * object fault routine responsible for
* pmap_update().
*/ */
} else { } else {
error = uvm_fault_lower(&ufi, &flt, pages); error = uvm_fault_lower(&ufi, &flt, pages);
@ -865,12 +866,13 @@ uvm_fault_internal(struct vm_map *orig_map, vaddr_t vaddr,
* *
* => called with nothing locked. * => called with nothing locked.
* => if we fail (result != 0) we unlock everything. * => if we fail (result != 0) we unlock everything.
* => initialize/adjust many members of flt.
*/ */
static int static int
uvm_fault_check( uvm_fault_check(
struct uvm_faultinfo *ufi, struct uvm_faultctx *flt, struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
struct vm_anon ***ranons, struct vm_page ***rpages) struct vm_anon ***ranons, bool maxprot)
{ {
struct vm_amap *amap; struct vm_amap *amap;
struct uvm_object *uobj; struct uvm_object *uobj;
@ -883,7 +885,8 @@ uvm_fault_check(
*/ */
if (uvmfault_lookup(ufi, false) == false) { if (uvmfault_lookup(ufi, false) == false) {
UVMHIST_LOG(maphist, "<- no mapping @ 0x%x", ufi->orig_rvaddr, 0,0,0); UVMHIST_LOG(maphist, "<- no mapping @ 0x%x", ufi->orig_rvaddr,
0,0,0);
return EFAULT; return EFAULT;
} }
/* locked: maps(read) */ /* locked: maps(read) */
@ -902,7 +905,7 @@ uvm_fault_check(
* check protection * check protection
*/ */
check_prot = flt->maxprot ? check_prot = maxprot ?
ufi->entry->max_protection : ufi->entry->protection; ufi->entry->max_protection : ufi->entry->protection;
if ((check_prot & flt->access_type) != flt->access_type) { if ((check_prot & flt->access_type) != flt->access_type) {
UVMHIST_LOG(maphist, UVMHIST_LOG(maphist,
@ -941,7 +944,7 @@ uvm_fault_check(
if (UVM_ET_ISNEEDSCOPY(ufi->entry)) { if (UVM_ET_ISNEEDSCOPY(ufi->entry)) {
if (flt->cow_now || (ufi->entry->object.uvm_obj == NULL)) { if (flt->cow_now || (ufi->entry->object.uvm_obj == NULL)) {
KASSERT(!flt->maxprot); KASSERT(!maxprot);
/* need to clear */ /* need to clear */
UVMHIST_LOG(maphist, UVMHIST_LOG(maphist,
" need to clear needs_copy and refault",0,0,0,0); " need to clear needs_copy and refault",0,0,0,0);
@ -992,15 +995,15 @@ uvm_fault_check(
KASSERT(uvmadvice[ufi->entry->advice].advice == KASSERT(uvmadvice[ufi->entry->advice].advice ==
ufi->entry->advice); ufi->entry->advice);
nback = MIN(uvmadvice[ufi->entry->advice].nback, nback = MIN(uvmadvice[ufi->entry->advice].nback,
(ufi->orig_rvaddr - ufi->entry->start) >> PAGE_SHIFT); (ufi->orig_rvaddr - ufi->entry->start) >> PAGE_SHIFT);
flt->startva = ufi->orig_rvaddr - (nback << PAGE_SHIFT); flt->startva = ufi->orig_rvaddr - (nback << PAGE_SHIFT);
nforw = MIN(uvmadvice[ufi->entry->advice].nforw,
((ufi->entry->end - ufi->orig_rvaddr) >>
PAGE_SHIFT) - 1);
/* /*
* note: "-1" because we don't want to count the * note: "-1" because we don't want to count the
* faulting page as forw * faulting page as forw
*/ */
nforw = MIN(uvmadvice[ufi->entry->advice].nforw,
((ufi->entry->end - ufi->orig_rvaddr) >>
PAGE_SHIFT) - 1);
flt->npages = nback + nforw + 1; flt->npages = nback + nforw + 1;
flt->centeridx = nback; flt->centeridx = nback;
@ -1072,7 +1075,9 @@ uvm_fault_check(
* => startva is fixed * => startva is fixed
* => npages is fixed * => npages is fixed
*/ */
KASSERT(flt->startva <= ufi->orig_rvaddr);
KASSERT(ufi->orig_rvaddr + ufi->orig_size <=
flt->startva + (flt->npages << PAGE_SHIFT));
return 0; return 0;
} }
@ -1089,7 +1094,7 @@ uvm_fault_check(
static int static int
uvm_fault_upper_lookup( uvm_fault_upper_lookup(
struct uvm_faultinfo *ufi, struct uvm_faultctx *flt, struct uvm_faultinfo *ufi, const struct uvm_faultctx *flt,
struct vm_anon **anons, struct vm_page **pages) struct vm_anon **anons, struct vm_page **pages)
{ {
struct vm_amap *amap = ufi->entry->aref.ar_amap; struct vm_amap *amap = ufi->entry->aref.ar_amap;
@ -1111,8 +1116,8 @@ uvm_fault_upper_lookup(
shadowed = false; shadowed = false;
for (lcv = 0; lcv < flt->npages; lcv++, currva += PAGE_SIZE) { for (lcv = 0; lcv < flt->npages; lcv++, currva += PAGE_SIZE) {
/* /*
* dont play with VAs that are already mapped * don't play with VAs that are already mapped
* except for center) * (except for center)
*/ */
if (lcv != flt->centeridx && if (lcv != flt->centeridx &&
pmap_extract(ufi->orig_map->pmap, currva, NULL)) { pmap_extract(ufi->orig_map->pmap, currva, NULL)) {
@ -1133,7 +1138,7 @@ uvm_fault_upper_lookup(
*/ */
pages[lcv] = PGO_DONTCARE; pages[lcv] = PGO_DONTCARE;
if (lcv == flt->centeridx) { /* save center for later! */ if (lcv == flt->centeridx) { /* save center for later! */
shadowed = true; shadowed = true;
} else { } else {
struct vm_anon *anon = anons[lcv]; struct vm_anon *anon = anons[lcv];
@ -1175,7 +1180,7 @@ uvm_fault_upper_lookup(
static void static void
uvm_fault_upper_neighbor( uvm_fault_upper_neighbor(
struct uvm_faultinfo *ufi, struct uvm_faultctx *flt, struct uvm_faultinfo *ufi, const struct uvm_faultctx *flt,
vaddr_t currva, struct vm_page *pg, bool readonly) vaddr_t currva, struct vm_page *pg, bool readonly)
{ {
UVMHIST_FUNC("uvm_fault_upper_neighbor"); UVMHIST_CALLED(maphist); UVMHIST_FUNC("uvm_fault_upper_neighbor"); UVMHIST_CALLED(maphist);
@ -1315,7 +1320,7 @@ uvm_fault_upper(
/* /*
* uvm_fault_upper_loan: handle loaned upper page. * uvm_fault_upper_loan: handle loaned upper page.
* *
* 1. if not cow'ing now, just mark enter_prot as read-only. * 1. if not cow'ing now, simply adjust flt->enter_prot.
* 2. if cow'ing now, and if ref count is 1, break loan. * 2. if cow'ing now, and if ref count is 1, break loan.
*/ */
@ -1389,8 +1394,8 @@ uvm_fault_upper_promote(
UVMHIST_LOG(maphist, " case 1B: COW fault",0,0,0,0); UVMHIST_LOG(maphist, " case 1B: COW fault",0,0,0,0);
uvmexp.flt_acow++; uvmexp.flt_acow++;
error = uvmfault_promote(ufi, oanon, PGO_DONTCARE, error = uvmfault_promote(ufi, oanon, PGO_DONTCARE, &anon,
&anon, &flt->anon_spare); &flt->anon_spare);
switch (error) { switch (error) {
case 0: case 0:
break; break;
@ -1446,7 +1451,7 @@ uvm_fault_upper_direct(
static int static int
uvm_fault_upper_enter( uvm_fault_upper_enter(
struct uvm_faultinfo *ufi, struct uvm_faultctx *flt, struct uvm_faultinfo *ufi, const struct uvm_faultctx *flt,
struct uvm_object *uobj, struct vm_anon *anon, struct vm_page *pg, struct uvm_object *uobj, struct vm_anon *anon, struct vm_page *pg,
struct vm_anon *oanon) struct vm_anon *oanon)
{ {
@ -1462,11 +1467,13 @@ uvm_fault_upper_enter(
* now map the page in. * now map the page in.
*/ */
UVMHIST_LOG(maphist, " MAPPING: anon: pm=0x%x, va=0x%x, pg=0x%x, promote=%d", UVMHIST_LOG(maphist,
" MAPPING: anon: pm=0x%x, va=0x%x, pg=0x%x, promote=%d",
ufi->orig_map->pmap, ufi->orig_rvaddr, pg, flt->promote); ufi->orig_map->pmap, ufi->orig_rvaddr, pg, flt->promote);
if (pmap_enter(ufi->orig_map->pmap, ufi->orig_rvaddr, VM_PAGE_TO_PHYS(pg), if (pmap_enter(ufi->orig_map->pmap, ufi->orig_rvaddr,
flt->enter_prot, flt->access_type | PMAP_CANFAIL | (flt->wire_mapping ? PMAP_WIRED : 0)) VM_PAGE_TO_PHYS(pg),
!= 0) { flt->enter_prot, flt->access_type | PMAP_CANFAIL |
(flt->wire_mapping ? PMAP_WIRED : 0)) != 0) {
/* /*
* No need to undo what we did; we can simply think of * No need to undo what we did; we can simply think of
@ -1490,7 +1497,7 @@ uvm_fault_upper_enter(
return ERESTART; return ERESTART;
} }
uvm_fault_upper_done(ufi, flt, uobj, anon, pg); uvm_fault_upper_done(ufi, flt, anon, pg);
/* /*
* done case 1! finish up by unlocking everything and returning success * done case 1! finish up by unlocking everything and returning success
@ -1510,8 +1517,8 @@ uvm_fault_upper_enter(
static void static void
uvm_fault_upper_done( uvm_fault_upper_done(
struct uvm_faultinfo *ufi, struct uvm_faultctx *flt, struct uvm_faultinfo *ufi, const struct uvm_faultctx *flt,
struct uvm_object *uobj, struct vm_anon *anon, struct vm_page *pg) struct vm_anon *anon, struct vm_page *pg)
{ {
const bool wire_paging = flt->wire_paging; const bool wire_paging = flt->wire_paging;
@ -1658,11 +1665,12 @@ uvm_fault_lower(
* - at this point uobjpage could be PG_WANTED (handle later) * - at this point uobjpage could be PG_WANTED (handle later)
*/ */
KASSERT(uobjpage != NULL);
KASSERT(uobj == NULL || uobj == uobjpage->uobject); KASSERT(uobj == NULL || uobj == uobjpage->uobject);
KASSERT(uobj == NULL || !UVM_OBJ_IS_CLEAN(uobjpage->uobject) || KASSERT(uobj == NULL || !UVM_OBJ_IS_CLEAN(uobjpage->uobject) ||
(uobjpage->flags & PG_CLEAN) != 0); (uobjpage->flags & PG_CLEAN) != 0);
if (flt->promote == false) { if (!flt->promote) {
error = uvm_fault_lower_direct(ufi, flt, uobj, uobjpage); error = uvm_fault_lower_direct(ufi, flt, uobj, uobjpage);
} else { } else {
error = uvm_fault_lower_promote(ufi, flt, uobj, uobjpage); error = uvm_fault_lower_promote(ufi, flt, uobj, uobjpage);
@ -1680,7 +1688,7 @@ uvm_fault_lower(
static void static void
uvm_fault_lower_lookup( uvm_fault_lower_lookup(
struct uvm_faultinfo *ufi, struct uvm_faultctx *flt, struct uvm_faultinfo *ufi, const struct uvm_faultctx *flt,
struct vm_page **pages) struct vm_page **pages)
{ {
struct uvm_object *uobj = ufi->entry->object.uvm_obj; struct uvm_object *uobj = ufi->entry->object.uvm_obj;
@ -1748,7 +1756,7 @@ uvm_fault_lower_lookup(
static void static void
uvm_fault_lower_neighbor( uvm_fault_lower_neighbor(
struct uvm_faultinfo *ufi, struct uvm_faultctx *flt, struct uvm_faultinfo *ufi, const struct uvm_faultctx *flt,
vaddr_t currva, struct vm_page *pg, bool readonly) vaddr_t currva, struct vm_page *pg, bool readonly)
{ {
UVMHIST_FUNC("uvm_fault_lower_neighor"); UVMHIST_CALLED(maphist); UVMHIST_FUNC("uvm_fault_lower_neighor"); UVMHIST_CALLED(maphist);
@ -1802,7 +1810,7 @@ uvm_fault_lower_neighbor(
static int static int
uvm_fault_lower_io( uvm_fault_lower_io(
struct uvm_faultinfo *ufi, struct uvm_faultctx *flt, struct uvm_faultinfo *ufi, const struct uvm_faultctx *flt,
struct uvm_object **ruobj, struct vm_page **ruobjpage) struct uvm_object **ruobj, struct vm_page **ruobjpage)
{ {
struct vm_amap * const amap = ufi->entry->aref.ar_amap; struct vm_amap * const amap = ufi->entry->aref.ar_amap;
@ -1933,7 +1941,7 @@ uvm_fault_lower_io(
/* /*
* uvm_fault_lower_direct: fault lower center page * uvm_fault_lower_direct: fault lower center page
* *
* 1. adjust h/w mapping protection. * 1. adjust flt->enter_prot.
* 2. if page is loaned, resolve. * 2. if page is loaned, resolve.
*/ */
@ -1978,14 +1986,15 @@ uvm_fault_lower_direct(
/* /*
* uvm_fault_lower_direct_loan: resolve loaned page. * uvm_fault_lower_direct_loan: resolve loaned page.
* *
* 1. if not cow'ing, adjust h/w mapping protection. * 1. if not cow'ing, adjust flt->enter_prot.
* 2. if cow'ing, break loan. * 2. if cow'ing, break loan.
*/ */
static int static int
uvm_fault_lower_direct_loan( uvm_fault_lower_direct_loan(
struct uvm_faultinfo *ufi, struct uvm_faultctx *flt, struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
struct uvm_object *uobj, struct vm_page **rpg, struct vm_page **ruobjpage) struct uvm_object *uobj, struct vm_page **rpg,
struct vm_page **ruobjpage)
{ {
struct vm_amap * const amap = ufi->entry->aref.ar_amap; struct vm_amap * const amap = ufi->entry->aref.ar_amap;
struct vm_page *pg; struct vm_page *pg;
@ -2123,7 +2132,7 @@ uvm_fault_lower_promote(
int int
uvm_fault_lower_enter( uvm_fault_lower_enter(
struct uvm_faultinfo *ufi, struct uvm_faultctx *flt, struct uvm_faultinfo *ufi, const struct uvm_faultctx *flt,
struct uvm_object *uobj, struct uvm_object *uobj,
struct vm_anon *anon, struct vm_page *pg, struct vm_page *uobjpage) struct vm_anon *anon, struct vm_page *pg, struct vm_page *uobjpage)
{ {
@ -2154,9 +2163,12 @@ uvm_fault_lower_enter(
ufi->orig_map->pmap, ufi->orig_rvaddr, pg, flt->promote); ufi->orig_map->pmap, ufi->orig_rvaddr, pg, flt->promote);
KASSERT((flt->access_type & VM_PROT_WRITE) == 0 || KASSERT((flt->access_type & VM_PROT_WRITE) == 0 ||
(pg->flags & PG_RDONLY) == 0); (pg->flags & PG_RDONLY) == 0);
if (pmap_enter(ufi->orig_map->pmap, ufi->orig_rvaddr, VM_PAGE_TO_PHYS(pg), if (pmap_enter(ufi->orig_map->pmap, ufi->orig_rvaddr,
pg->flags & PG_RDONLY ? flt->enter_prot & ~VM_PROT_WRITE : flt->enter_prot, VM_PAGE_TO_PHYS(pg),
flt->access_type | PMAP_CANFAIL | (flt->wire_mapping ? PMAP_WIRED : 0)) != 0) { (pg->flags & PG_RDONLY) != 0 ?
flt->enter_prot & ~VM_PROT_WRITE : flt->enter_prot,
flt->access_type | PMAP_CANFAIL |
(flt->wire_mapping ? PMAP_WIRED : 0)) != 0) {
/* /*
* No need to undo what we did; we can simply think of * No need to undo what we did; we can simply think of
@ -2191,7 +2203,17 @@ uvm_fault_lower_enter(
return ERESTART; return ERESTART;
} }
uvm_fault_lower_done(ufi, flt, uobj, anon, pg); uvm_fault_lower_done(ufi, flt, uobj, pg);
/*
* note that pg can't be PG_RELEASED since we did not drop the object
* lock since the last time we checked.
*/
KASSERT((pg->flags & PG_RELEASED) == 0);
if (pg->flags & PG_WANTED)
wakeup(pg);
pg->flags &= ~(PG_BUSY|PG_FAKE|PG_WANTED);
UVM_PAGE_OWN(pg, NULL);
pmap_update(ufi->orig_map->pmap); pmap_update(ufi->orig_map->pmap);
uvmfault_unlockall(ufi, amap, uobj, anon); uvmfault_unlockall(ufi, amap, uobj, anon);
@ -2206,8 +2228,8 @@ uvm_fault_lower_enter(
void void
uvm_fault_lower_done( uvm_fault_lower_done(
struct uvm_faultinfo *ufi, struct uvm_faultctx *flt, struct uvm_faultinfo *ufi, const struct uvm_faultctx *flt,
struct uvm_object *uobj, struct vm_anon *anon, struct vm_page *pg) struct uvm_object *uobj, struct vm_page *pg)
{ {
bool dropswap = false; bool dropswap = false;
@ -2237,17 +2259,6 @@ uvm_fault_lower_done(
if (dropswap) { if (dropswap) {
uao_dropswap(uobj, pg->offset >> PAGE_SHIFT); uao_dropswap(uobj, pg->offset >> PAGE_SHIFT);
} }
if (pg->flags & PG_WANTED)
wakeup(pg);
/*
* note that pg can't be PG_RELEASED since we did not drop the object
* lock since the last time we checked.
*/
KASSERT((pg->flags & PG_RELEASED) == 0);
pg->flags &= ~(PG_BUSY|PG_FAKE|PG_WANTED);
UVM_PAGE_OWN(pg, NULL);
} }
@ -2283,7 +2294,7 @@ uvm_fault_wire(struct vm_map *map, vaddr_t start, vaddr_t end,
for (va = start; va < end; va += PAGE_SIZE) { for (va = start; va < end; va += PAGE_SIZE) {
error = uvm_fault_internal(map, va, access_type, error = uvm_fault_internal(map, va, access_type,
(maxprot ? UVM_FAULT_MAXPROT : 0) | UVM_FAULT_WIRE); (maxprot ? UVM_FAULT_MAXPROT : 0) | UVM_FAULT_WIRE);
if (error) { if (error) {
if (va != start) { if (va != start) {
uvm_fault_unwire(map, start, va); uvm_fault_unwire(map, start, va);