Sprinkle some assertions:

amap_free(): Assert that the amap is locked.
amap_share_protect(): Assert that the amap is locked.
amap_wipeout(): Assert that the amap is locked.
uvm_anfree(): Assert that the anon has a reference count of 0 and is
              not locked.
uvm_anon_lockloanpg(): Assert that the anon is locked.
anon_pagein(): Assert that the anon is locked.
uvmfault_anonget(): Assert that the anon is locked.
uvm_pagealloc_strat(): Assert that the uobj or the anon is locked

And fix the problems these have uncovered:
amap_cow_now(): Lock the new anon after allocating it, and unref and
                unlock it (rather than lock!) before freeing it in case
                of an error condition.  This should fix a problem reported
		by Dan Carosone using cdrecord on an i386 MP kernel.
uvm_fault(): Case1B -- Lock the new anon afer allocating it, and unlock
             it later when we unlock the old anon.
	     Case2 -- Lock the new anon after allocating it, and unlock
	     it later by passing it to uvmfault_unlockall() (we set anon
	     to NULL if we're not doing a promote fault).
This commit is contained in:
thorpej 2001-01-23 01:56:16 +00:00
parent 5704d1e2b8
commit 13759f5310
4 changed files with 62 additions and 22 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_amap.c,v 1.27 2000/11/25 06:27:59 chs Exp $ */
/* $NetBSD: uvm_amap.c,v 1.28 2001/01/23 01:56:16 thorpej Exp $ */
/*
*
@ -260,6 +260,8 @@ amap_free(amap)
panic("amap_free");
#endif
LOCK_ASSERT(simple_lock_held(&amap->am_l));
free(amap->am_slots, M_UVMAMAP);
free(amap->am_bckptr, M_UVMAMAP);
free(amap->am_anon, M_UVMAMAP);
@ -465,6 +467,8 @@ amap_share_protect(entry, prot)
struct vm_amap *amap = entry->aref.ar_amap;
int slots, lcv, slot, stop;
LOCK_ASSERT(simple_lock_held(&amap->am_l));
AMAP_B2SLOT(slots, (entry->end - entry->start));
stop = entry->aref.ar_pageoff + slots;
@ -508,6 +512,8 @@ amap_wipeout(amap)
UVMHIST_FUNC("amap_wipeout"); UVMHIST_CALLED(maphist);
UVMHIST_LOG(maphist,"(amap=0x%x)", amap, 0,0,0);
LOCK_ASSERT(simple_lock_held(&amap->am_l));
for (lcv = 0 ; lcv < amap->am_nused ; lcv++) {
int refs;
@ -794,9 +800,10 @@ ReStart:
* ok, time to do a copy-on-write to a new anon
*/
nanon = uvm_analloc();
if (nanon)
if (nanon) {
simple_lock(&nanon->an_lock);
npg = uvm_pagealloc(NULL, 0, nanon, 0);
else
} else
npg = NULL; /* XXX: quiet gcc warning */
if (nanon == NULL || npg == NULL) {
@ -806,7 +813,8 @@ ReStart:
* we can't ...
*/
if (nanon) {
simple_lock(&nanon->an_lock);
nanon->an_ref--;
simple_unlock(&nanon->an_lock);
uvm_anfree(nanon);
}
simple_unlock(&anon->an_lock);
@ -833,6 +841,7 @@ ReStart:
uvm_lock_pageq();
uvm_pageactivate(npg);
uvm_unlock_pageq();
simple_unlock(&nanon->an_lock);
}
simple_unlock(&anon->an_lock);

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_anon.c,v 1.11 2000/12/27 09:17:04 chs Exp $ */
/* $NetBSD: uvm_anon.c,v 1.12 2001/01/23 01:56:16 thorpej Exp $ */
/*
*
@ -185,6 +185,9 @@ uvm_anfree(anon)
UVMHIST_FUNC("uvm_anfree"); UVMHIST_CALLED(maphist);
UVMHIST_LOG(maphist,"(anon=0x%x)", anon, 0,0,0);
KASSERT(anon->an_ref == 0);
LOCK_ASSERT(simple_lock_held(&anon->an_lock) == 0);
/*
* get page
*/
@ -274,9 +277,9 @@ uvm_anon_dropswap(anon)
struct vm_anon *anon;
{
UVMHIST_FUNC("uvm_anon_dropswap"); UVMHIST_CALLED(maphist);
if (anon->an_swslot == 0) {
if (anon->an_swslot == 0)
return;
}
UVMHIST_LOG(maphist,"freeing swap for anon %p, paged to swslot 0x%x",
anon, anon->an_swslot, 0, 0);
@ -315,6 +318,8 @@ uvm_anon_lockloanpg(anon)
struct vm_page *pg;
boolean_t locked = FALSE;
LOCK_ASSERT(simple_lock_held(&anon->an_lock));
/*
* loop while we have a resident page that has a non-zero loan count.
* if we successfully get our lock, we will "break" the loop.
@ -469,7 +474,10 @@ anon_pagein(anon)
int rv;
/* locked: anon */
LOCK_ASSERT(simple_lock_held(&anon->an_lock));
rv = uvmfault_anonget(NULL, NULL, anon);
/*
* if rv == VM_PAGER_OK, anon is still locked, else anon
* is unlocked

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_fault.c,v 1.52 2000/11/27 08:40:03 chs Exp $ */
/* $NetBSD: uvm_fault.c,v 1.53 2001/01/23 01:56:16 thorpej Exp $ */
/*
*
@ -300,6 +300,8 @@ uvmfault_anonget(ufi, amap, anon)
int result;
UVMHIST_FUNC("uvmfault_anonget"); UVMHIST_CALLED(maphist);
LOCK_ASSERT(simple_lock_held(&anon->an_lock));
result = 0; /* XXX shut up gcc */
uvmexp.fltanget++;
/* bump rusage counters */
@ -1180,13 +1182,18 @@ ReFault:
uvmexp.flt_acow++;
oanon = anon; /* oanon = old, locked anon */
anon = uvm_analloc();
if (anon)
if (anon) {
simple_lock(&anon->an_lock);
pg = uvm_pagealloc(NULL, 0, anon, 0);
}
/* check for out of RAM */
if (anon == NULL || pg == NULL) {
if (anon)
if (anon) {
anon->an_ref--;
simple_unlock(&anon->an_lock);
uvm_anfree(anon);
}
uvmfault_unlockall(&ufi, amap, uobj, oanon);
KASSERT(uvmexp.swpgonly <= uvmexp.swpages);
if (anon == NULL || uvmexp.swpgonly == uvmexp.swpages) {
@ -1211,11 +1218,11 @@ ReFault:
/* deref: can not drop to zero here by defn! */
oanon->an_ref--;
/*
* note: oanon still locked. anon is _not_ locked, but we
* have the sole references to in from amap which _is_ locked.
* thus, no one can get at it until we are done with it.
* note: oanon is still locked, as is the new anon. we
* need to check for this later when we unlock oanon; if
* oanon != anon, we'll have to unlock anon, too.
*/
} else {
@ -1228,7 +1235,7 @@ ReFault:
}
/* locked: maps(read), amap, oanon */
/* locked: maps(read), amap, oanon, anon (if different from oanon) */
/*
* now map the page in ...
@ -1249,6 +1256,8 @@ ReFault:
* We do, however, have to go through the ReFault path,
* as the map may change while we're asleep.
*/
if (anon != oanon)
simple_unlock(&anon->an_lock);
uvmfault_unlockall(&ufi, amap, uobj, oanon);
KASSERT(uvmexp.swpgonly <= uvmexp.swpages);
if (uvmexp.swpgonly == uvmexp.swpages) {
@ -1291,6 +1300,8 @@ ReFault:
* done case 1! finish up by unlocking everything and returning success
*/
if (anon != oanon)
simple_unlock(&anon->an_lock);
uvmfault_unlockall(&ufi, amap, uobj, oanon);
return (KERN_SUCCESS);
@ -1470,6 +1481,9 @@ Case2:
* set "pg" to the page we want to map in (uobjpage, usually)
*/
/* no anon in this case. */
anon = NULL;
uvmexp.flt_obj++;
if (UVM_ET_ISCOPYONWRITE(ufi.entry))
enter_prot &= ~VM_PROT_WRITE;
@ -1577,6 +1591,7 @@ Case2:
* a zero'd, dirty page, so have
* uvm_pagealloc() do that for us.
*/
simple_lock(&anon->an_lock);
pg = uvm_pagealloc(NULL, 0, anon,
(uobjpage == PGO_DONTCARE) ? UVM_PGA_ZERO : 0);
}
@ -1586,6 +1601,12 @@ Case2:
*/
if (anon == NULL || pg == NULL) {
if (anon != NULL) {
anon->an_ref--;
simple_unlock(&anon->an_lock);
uvm_anfree(anon);
}
/*
* arg! must unbusy our page and fail or sleep.
*/
@ -1613,8 +1634,6 @@ Case2:
UVMHIST_LOG(maphist, " out of RAM, waiting for more",
0,0,0,0);
anon->an_ref--;
uvm_anfree(anon);
uvmexp.fltnoram++;
uvm_wait("flt_noram5");
goto ReFault;
@ -1670,12 +1689,12 @@ Case2:
amap_add(&ufi.entry->aref, ufi.orig_rvaddr - ufi.entry->start,
anon, 0);
}
/*
* locked:
* maps(read), amap(if !null), uobj(if !null), uobjpage(if uobj)
* maps(read), amap(if !null), uobj(if !null), uobjpage(if uobj),
* anon(if !null), pg(if anon)
*
* note: pg is either the uobjpage or the new page in the new anon
*/
@ -1712,7 +1731,7 @@ Case2:
pg->flags &= ~(PG_BUSY|PG_FAKE|PG_WANTED);
UVM_PAGE_OWN(pg, NULL);
uvmfault_unlockall(&ufi, amap, uobj, NULL);
uvmfault_unlockall(&ufi, amap, uobj, anon);
KASSERT(uvmexp.swpgonly <= uvmexp.swpages);
if (uvmexp.swpgonly == uvmexp.swpages) {
UVMHIST_LOG(maphist,
@ -1757,7 +1776,7 @@ Case2:
pg->flags &= ~(PG_BUSY|PG_FAKE|PG_WANTED);
UVM_PAGE_OWN(pg, NULL);
uvmfault_unlockall(&ufi, amap, uobj, NULL);
uvmfault_unlockall(&ufi, amap, uobj, anon);
UVMHIST_LOG(maphist, "<- done (SUCCESS!)",0,0,0,0);
return (KERN_SUCCESS);

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_page.c,v 1.47 2001/01/14 02:10:02 thorpej Exp $ */
/* $NetBSD: uvm_page.c,v 1.48 2001/01/23 01:56:17 thorpej Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -889,6 +889,10 @@ uvm_pagealloc_strat(obj, off, anon, flags, strat, free_list)
KASSERT(obj == NULL || anon == NULL);
KASSERT(off == trunc_page(off));
LOCK_ASSERT(obj == NULL || simple_lock_held(&obj->vmobjlock));
LOCK_ASSERT(anon == NULL || simple_lock_held(&anon->an_lock));
s = uvm_lock_fpageq();
/*