lots of cleanup:
use queue.h macros and KASSERT(). address amap offsets in pages instead of bytes. make amap_ref() and amap_unref() take an amap, offset and length instead of a vm_map_entry_t. improve whitespace and comments.
This commit is contained in:
parent
9f9181afde
commit
2ed28d2c7a
@ -1,4 +1,4 @@
|
||||
/* $NetBSD: uvm_amap.c,v 1.26 2000/08/03 00:47:02 thorpej Exp $ */
|
||||
/* $NetBSD: uvm_amap.c,v 1.27 2000/11/25 06:27:59 chs Exp $ */
|
||||
|
||||
/*
|
||||
*
|
||||
@ -49,6 +49,7 @@
|
||||
#include <sys/systm.h>
|
||||
#include <sys/proc.h>
|
||||
#include <sys/malloc.h>
|
||||
#include <sys/kernel.h>
|
||||
#include <sys/pool.h>
|
||||
|
||||
#define UVM_AMAP_C /* ensure disabled inlines are in */
|
||||
@ -322,7 +323,7 @@ amap_extend(entry, addsize)
|
||||
if (amap->am_nslot >= slotneed) {
|
||||
#ifdef UVM_AMAP_PPREF
|
||||
if (amap->am_ppref && amap->am_ppref != PPREF_NONE) {
|
||||
amap_pp_adjref(amap, slotoff + slotmapped, addsize, 1);
|
||||
amap_pp_adjref(amap, slotoff + slotmapped, slotadd, 1);
|
||||
}
|
||||
#endif
|
||||
amap_unlock(amap);
|
||||
@ -340,8 +341,8 @@ amap_extend(entry, addsize)
|
||||
if (amap->am_ppref && amap->am_ppref != PPREF_NONE) {
|
||||
if ((slotoff + slotmapped) < amap->am_nslot)
|
||||
amap_pp_adjref(amap, slotoff + slotmapped,
|
||||
(amap->am_nslot - (slotoff + slotmapped)) <<
|
||||
PAGE_SHIFT, 1);
|
||||
(amap->am_nslot - (slotoff + slotmapped)),
|
||||
1);
|
||||
pp_setreflen(amap->am_ppref, amap->am_nslot, 1,
|
||||
slotneed - amap->am_nslot);
|
||||
}
|
||||
@ -419,8 +420,7 @@ amap_extend(entry, addsize)
|
||||
amap->am_ppref = newppref;
|
||||
if ((slotoff + slotmapped) < amap->am_nslot)
|
||||
amap_pp_adjref(amap, slotoff + slotmapped,
|
||||
(amap->am_nslot - (slotoff + slotmapped)) <<
|
||||
PAGE_SHIFT, 1);
|
||||
(amap->am_nslot - (slotoff + slotmapped)), 1);
|
||||
pp_setreflen(newppref, amap->am_nslot, 1, slotadded);
|
||||
}
|
||||
#endif
|
||||
@ -568,7 +568,8 @@ amap_copy(map, entry, waitf, canchunk, startva, endva)
|
||||
int slots, lcv;
|
||||
vaddr_t chunksize;
|
||||
UVMHIST_FUNC("amap_copy"); UVMHIST_CALLED(maphist);
|
||||
UVMHIST_LOG(maphist, " (map=%p, entry=%p, waitf=%d)", map, entry, waitf, 0);
|
||||
UVMHIST_LOG(maphist, " (map=%p, entry=%p, waitf=%d)",
|
||||
map, entry, waitf, 0);
|
||||
|
||||
/*
|
||||
* is there a map to copy? if not, create one from scratch.
|
||||
@ -686,7 +687,7 @@ amap_copy(map, entry, waitf, canchunk, startva, endva)
|
||||
#ifdef UVM_AMAP_PPREF
|
||||
if (srcamap->am_ppref && srcamap->am_ppref != PPREF_NONE) {
|
||||
amap_pp_adjref(srcamap, entry->aref.ar_pageoff,
|
||||
entry->end - entry->start, -1);
|
||||
(entry->end - entry->start) >> PAGE_SHIFT, -1);
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -804,8 +805,10 @@ ReStart:
|
||||
* XXXCDC: we should cause fork to fail, but
|
||||
* we can't ...
|
||||
*/
|
||||
if (nanon)
|
||||
if (nanon) {
|
||||
simple_lock(&nanon->an_lock);
|
||||
uvm_anfree(nanon);
|
||||
}
|
||||
simple_unlock(&anon->an_lock);
|
||||
amap_unlock(amap);
|
||||
uvm_wait("cownowpage");
|
||||
@ -855,7 +858,6 @@ amap_splitref(origref, splitref, offset)
|
||||
vaddr_t offset;
|
||||
{
|
||||
int leftslots;
|
||||
UVMHIST_FUNC("amap_splitref"); UVMHIST_CALLED(maphist);
|
||||
|
||||
AMAP_B2SLOT(leftslots, offset);
|
||||
if (leftslots == 0)
|
||||
@ -927,21 +929,20 @@ amap_pp_establish(amap)
|
||||
* => caller must check that ppref != PPREF_NONE before calling
|
||||
*/
|
||||
void
|
||||
amap_pp_adjref(amap, curslot, bytelen, adjval)
|
||||
amap_pp_adjref(amap, curslot, slotlen, adjval)
|
||||
struct vm_amap *amap;
|
||||
int curslot;
|
||||
vsize_t bytelen;
|
||||
vsize_t slotlen;
|
||||
int adjval;
|
||||
{
|
||||
int slots, stopslot, *ppref, lcv;
|
||||
int stopslot, *ppref, lcv;
|
||||
int ref, len;
|
||||
|
||||
/*
|
||||
* get init values
|
||||
*/
|
||||
|
||||
AMAP_B2SLOT(slots, bytelen);
|
||||
stopslot = curslot + slots;
|
||||
stopslot = curslot + slotlen;
|
||||
ppref = amap->am_ppref;
|
||||
|
||||
/*
|
||||
@ -996,7 +997,6 @@ amap_wiperange(amap, slotoff, slots)
|
||||
{
|
||||
int byanon, lcv, stop, curslot, ptr;
|
||||
struct vm_anon *anon;
|
||||
UVMHIST_FUNC("amap_wiperange"); UVMHIST_CALLED(maphist);
|
||||
|
||||
/*
|
||||
* we can either traverse the amap by am_anon or by am_slots depending
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* $NetBSD: uvm_amap.h,v 1.12 1999/07/07 05:31:40 thorpej Exp $ */
|
||||
/* $NetBSD: uvm_amap.h,v 1.13 2000/11/25 06:27:59 chs Exp $ */
|
||||
|
||||
/*
|
||||
*
|
||||
@ -83,7 +83,7 @@ struct vm_amap;
|
||||
AMAP_INLINE
|
||||
void amap_add /* add an anon to an amap */
|
||||
__P((struct vm_aref *, vaddr_t,
|
||||
struct vm_anon *, int));
|
||||
struct vm_anon *, boolean_t));
|
||||
struct vm_amap *amap_alloc /* allocate a new amap */
|
||||
__P((vaddr_t, vaddr_t, int));
|
||||
void amap_copy /* clear amap needs-copy flag */
|
||||
@ -110,7 +110,7 @@ void amap_lookups /* lookup multiple anons */
|
||||
struct vm_anon **, int));
|
||||
AMAP_INLINE
|
||||
void amap_ref /* add a reference to an amap */
|
||||
__P((vm_map_entry_t, int));
|
||||
__P((struct vm_amap *, vaddr_t, vsize_t, int));
|
||||
int amap_refs /* get number of references of amap */
|
||||
__P((struct vm_amap *));
|
||||
void amap_share_protect /* protect pages in a shared amap */
|
||||
@ -125,7 +125,7 @@ void amap_unlock /* unlock amap */
|
||||
__P((struct vm_amap *));
|
||||
AMAP_INLINE
|
||||
void amap_unref /* drop reference to an amap */
|
||||
__P((vm_map_entry_t, int));
|
||||
__P((struct vm_amap *, vaddr_t, vsize_t, int));
|
||||
void amap_wipeout /* remove all anons from amap */
|
||||
__P((struct vm_amap *));
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* $NetBSD: uvm_amap_i.h,v 1.14 1999/09/12 01:17:34 chs Exp $ */
|
||||
/* $NetBSD: uvm_amap_i.h,v 1.15 2000/11/25 06:27:59 chs Exp $ */
|
||||
|
||||
/*
|
||||
*
|
||||
@ -37,8 +37,6 @@
|
||||
#ifndef _UVM_UVM_AMAP_I_H_
|
||||
#define _UVM_UVM_AMAP_I_H_
|
||||
|
||||
#include "opt_uvmhist.h"
|
||||
|
||||
/*
|
||||
* uvm_amap_i.h
|
||||
*/
|
||||
@ -120,7 +118,7 @@ amap_add(aref, offset, anon, replace)
|
||||
struct vm_aref *aref;
|
||||
vaddr_t offset;
|
||||
struct vm_anon *anon;
|
||||
int replace;
|
||||
boolean_t replace;
|
||||
{
|
||||
int slot;
|
||||
struct vm_amap *amap = aref->ar_amap;
|
||||
@ -196,14 +194,16 @@ amap_unadd(aref, offset)
|
||||
* amap_ref: gain a reference to an amap
|
||||
*
|
||||
* => amap must not be locked (we will lock)
|
||||
* => "offset" and "len" are in units of pages
|
||||
* => called at fork time to gain the child's reference
|
||||
*/
|
||||
AMAP_INLINE void
|
||||
amap_ref(entry, flags)
|
||||
vm_map_entry_t entry;
|
||||
amap_ref(amap, offset, len, flags)
|
||||
struct vm_amap *amap;
|
||||
vaddr_t offset;
|
||||
vsize_t len;
|
||||
int flags;
|
||||
{
|
||||
struct vm_amap *amap = entry->aref.ar_amap;
|
||||
UVMHIST_FUNC("amap_ref"); UVMHIST_CALLED(maphist);
|
||||
|
||||
amap_lock(amap);
|
||||
@ -212,14 +212,13 @@ amap_ref(entry, flags)
|
||||
amap->am_flags |= AMAP_SHARED;
|
||||
#ifdef UVM_AMAP_PPREF
|
||||
if (amap->am_ppref == NULL && (flags & AMAP_REFALL) == 0 &&
|
||||
(entry->start - entry->end) >> PAGE_SHIFT != amap->am_nslot)
|
||||
len != amap->am_nslot)
|
||||
amap_pp_establish(amap);
|
||||
if (amap->am_ppref && amap->am_ppref != PPREF_NONE) {
|
||||
if (flags & AMAP_REFALL)
|
||||
amap_pp_adjref(amap, 0, amap->am_nslot << PAGE_SHIFT, 1);
|
||||
amap_pp_adjref(amap, 0, amap->am_nslot, 1);
|
||||
else
|
||||
amap_pp_adjref(amap, entry->aref.ar_pageoff,
|
||||
entry->end - entry->start, 1);
|
||||
amap_pp_adjref(amap, offset, len, 1);
|
||||
}
|
||||
#endif
|
||||
amap_unlock(amap);
|
||||
@ -236,20 +235,20 @@ amap_ref(entry, flags)
|
||||
* => amap must be unlocked (we will lock it).
|
||||
*/
|
||||
AMAP_INLINE void
|
||||
amap_unref(entry, all)
|
||||
vm_map_entry_t entry;
|
||||
int all;
|
||||
amap_unref(amap, offset, len, all)
|
||||
struct vm_amap *amap;
|
||||
vaddr_t offset;
|
||||
vsize_t len;
|
||||
boolean_t all;
|
||||
{
|
||||
struct vm_amap *amap = entry->aref.ar_amap;
|
||||
UVMHIST_FUNC("amap_unref"); UVMHIST_CALLED(maphist);
|
||||
|
||||
/*
|
||||
* lock it
|
||||
*/
|
||||
amap_lock(amap);
|
||||
|
||||
UVMHIST_LOG(maphist,"(entry=0x%x) amap=0x%x refs=%d, nused=%d",
|
||||
entry, amap, amap->am_ref, amap->am_nused);
|
||||
UVMHIST_LOG(maphist," amap=0x%x refs=%d, nused=%d",
|
||||
amap, amap->am_ref, amap->am_nused, 0);
|
||||
|
||||
/*
|
||||
* if we are the last reference, free the amap and return.
|
||||
@ -269,15 +268,13 @@ amap_unref(entry, all)
|
||||
if (amap->am_ref == 1 && (amap->am_flags & AMAP_SHARED) != 0)
|
||||
amap->am_flags &= ~AMAP_SHARED; /* clear shared flag */
|
||||
#ifdef UVM_AMAP_PPREF
|
||||
if (amap->am_ppref == NULL && all == 0 &&
|
||||
(entry->start - entry->end) >> PAGE_SHIFT != amap->am_nslot)
|
||||
if (amap->am_ppref == NULL && all == 0 && len != amap->am_nslot)
|
||||
amap_pp_establish(amap);
|
||||
if (amap->am_ppref && amap->am_ppref != PPREF_NONE) {
|
||||
if (all)
|
||||
amap_pp_adjref(amap, 0, amap->am_nslot << PAGE_SHIFT, -1);
|
||||
amap_pp_adjref(amap, 0, amap->am_nslot, -1);
|
||||
else
|
||||
amap_pp_adjref(amap, entry->aref.ar_pageoff,
|
||||
entry->end - entry->start, -1);
|
||||
amap_pp_adjref(amap, offset, len, -1);
|
||||
}
|
||||
#endif
|
||||
amap_unlock(amap);
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* $NetBSD: uvm_anon.c,v 1.9 2000/08/06 00:21:57 thorpej Exp $ */
|
||||
/* $NetBSD: uvm_anon.c,v 1.10 2000/11/25 06:27:59 chs Exp $ */
|
||||
|
||||
/*
|
||||
*
|
||||
@ -213,19 +213,12 @@ uvm_anfree(anon)
|
||||
*/
|
||||
|
||||
if (pg->uobject) {
|
||||
|
||||
/* kill loan */
|
||||
uvm_lock_pageq();
|
||||
#ifdef DIAGNOSTIC
|
||||
if (pg->loan_count < 1)
|
||||
panic("uvm_anfree: obj owned page "
|
||||
"with no loan count");
|
||||
#endif
|
||||
KASSERT(pg->loan_count > 0);
|
||||
pg->loan_count--;
|
||||
pg->uanon = NULL;
|
||||
uvm_unlock_pageq();
|
||||
simple_unlock(&pg->uobject->vmobjlock);
|
||||
|
||||
} else {
|
||||
|
||||
/*
|
||||
@ -245,13 +238,11 @@ uvm_anfree(anon)
|
||||
anon, pg, 0, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
pmap_page_protect(pg, VM_PROT_NONE);
|
||||
uvm_lock_pageq(); /* lock out pagedaemon */
|
||||
uvm_pagefree(pg); /* bye bye */
|
||||
uvm_unlock_pageq(); /* free the daemon */
|
||||
|
||||
UVMHIST_LOG(maphist," anon 0x%x, page 0x%x: freed now!",
|
||||
UVMHIST_LOG(maphist,"anon 0x%x, page 0x%x: freed now!",
|
||||
anon, pg, 0, 0);
|
||||
}
|
||||
}
|
||||
@ -363,12 +354,14 @@ uvm_anon_lockloanpg(anon)
|
||||
|
||||
if (!locked) {
|
||||
simple_unlock(&anon->an_lock);
|
||||
|
||||
/*
|
||||
* someone locking the object has a chance to
|
||||
* lock us right now
|
||||
*/
|
||||
|
||||
simple_lock(&anon->an_lock);
|
||||
continue; /* start over */
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
@ -387,13 +380,9 @@ uvm_anon_lockloanpg(anon)
|
||||
/*
|
||||
* we did it! break the loop
|
||||
*/
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* done!
|
||||
*/
|
||||
|
||||
return(pg);
|
||||
}
|
||||
|
||||
@ -478,7 +467,6 @@ anon_pagein(anon)
|
||||
struct vm_page *pg;
|
||||
struct uvm_object *uobj;
|
||||
int rv;
|
||||
UVMHIST_FUNC("anon_pagein"); UVMHIST_CALLED(pdhist);
|
||||
|
||||
/* locked: anon */
|
||||
rv = uvmfault_anonget(NULL, NULL, anon);
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* $NetBSD: uvm_aobj.c,v 1.36 2000/11/24 20:34:01 chs Exp $ */
|
||||
/* $NetBSD: uvm_aobj.c,v 1.37 2000/11/25 06:27:59 chs Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 1998 Chuck Silvers, Charles D. Cranor and
|
||||
@ -50,6 +50,7 @@
|
||||
#include <sys/systm.h>
|
||||
#include <sys/proc.h>
|
||||
#include <sys/malloc.h>
|
||||
#include <sys/kernel.h>
|
||||
#include <sys/pool.h>
|
||||
#include <sys/kernel.h>
|
||||
|
||||
@ -183,8 +184,6 @@ static boolean_t uao_releasepg __P((struct vm_page *,
|
||||
static boolean_t uao_pagein __P((struct uvm_aobj *, int, int));
|
||||
static boolean_t uao_pagein_page __P((struct uvm_aobj *, int));
|
||||
|
||||
|
||||
|
||||
/*
|
||||
* aobj_pager
|
||||
*
|
||||
@ -243,7 +242,7 @@ uao_find_swhash_elt(aobj, pageidx, create)
|
||||
/*
|
||||
* now search the bucket for the requested tag
|
||||
*/
|
||||
for (elt = swhash->lh_first; elt != NULL; elt = elt->list.le_next) {
|
||||
LIST_FOREACH(elt, swhash, list) {
|
||||
if (elt->tag == page_tag)
|
||||
return(elt);
|
||||
}
|
||||
@ -375,7 +374,6 @@ uao_set_swslot(uobj, pageidx, slot)
|
||||
pool_put(&uao_swhash_elt_pool, elt);
|
||||
}
|
||||
}
|
||||
|
||||
} else {
|
||||
/* we are using an array */
|
||||
oldslot = aobj->u_swslots[pageidx];
|
||||
@ -418,17 +416,18 @@ uao_free(aobj)
|
||||
for (j = 0; j < UAO_SWHASH_CLUSTER_SIZE; j++) {
|
||||
int slot = elt->slots[j];
|
||||
|
||||
if (slot) {
|
||||
uvm_swap_free(slot, 1);
|
||||
|
||||
/*
|
||||
* this page is no longer
|
||||
* only in swap.
|
||||
*/
|
||||
simple_lock(&uvm.swap_data_lock);
|
||||
uvmexp.swpgonly--;
|
||||
simple_unlock(&uvm.swap_data_lock);
|
||||
if (slot == 0) {
|
||||
continue;
|
||||
}
|
||||
uvm_swap_free(slot, 1);
|
||||
|
||||
/*
|
||||
* this page is no longer
|
||||
* only in swap.
|
||||
*/
|
||||
simple_lock(&uvm.swap_data_lock);
|
||||
uvmexp.swpgonly--;
|
||||
simple_unlock(&uvm.swap_data_lock);
|
||||
}
|
||||
|
||||
next = LIST_NEXT(elt, list);
|
||||
@ -852,7 +851,7 @@ uao_flush(uobj, start, stop, flags)
|
||||
for ( ; (by_list && pp != NULL) ||
|
||||
(!by_list && curoff < stop) ; pp = ppnext) {
|
||||
if (by_list) {
|
||||
ppnext = pp->listq.tqe_next;
|
||||
ppnext = TAILQ_NEXT(pp, listq);
|
||||
|
||||
/* range check */
|
||||
if (pp->offset < start || pp->offset >= stop)
|
||||
@ -972,7 +971,7 @@ uao_get(uobj, offset, pps, npagesp, centeridx, access_type, advice, flags)
|
||||
|
||||
UVMHIST_LOG(pdhist, "aobj=%p offset=%d, flags=%d",
|
||||
aobj, offset, flags,0);
|
||||
|
||||
|
||||
/*
|
||||
* get number of pages
|
||||
*/
|
||||
@ -1251,7 +1250,7 @@ uao_get(uobj, offset, pps, npagesp, centeridx, access_type, advice, flags)
|
||||
* => returns TRUE if page's object is still alive, FALSE if we
|
||||
* killed the page's object. if we return TRUE, then we
|
||||
* return with the object locked.
|
||||
* => if (nextpgp != NULL) => we return pageq.tqe_next here, and return
|
||||
* => if (nextpgp != NULL) => we return the next page on the queue, and return
|
||||
* with the page queues locked [for pagedaemon]
|
||||
* => if (nextpgp == NULL) => we return with page queues unlocked [normal case]
|
||||
* => we kill the aobj if it is not referenced and we are suppose to
|
||||
@ -1276,7 +1275,7 @@ uao_releasepg(pg, nextpgp)
|
||||
uao_dropswap(&aobj->u_obj, pg->offset >> PAGE_SHIFT);
|
||||
uvm_lock_pageq();
|
||||
if (nextpgp)
|
||||
*nextpgp = pg->pageq.tqe_next; /* next page for daemon */
|
||||
*nextpgp = TAILQ_NEXT(pg, pageq); /* next page for daemon */
|
||||
uvm_pagefree(pg);
|
||||
if (!nextpgp)
|
||||
uvm_unlock_pageq(); /* keep locked for daemon */
|
||||
@ -1286,11 +1285,7 @@ uao_releasepg(pg, nextpgp)
|
||||
*/
|
||||
if ((aobj->u_flags & UAO_FLAG_KILLME) == 0)
|
||||
return TRUE;
|
||||
|
||||
#ifdef DIAGNOSTIC
|
||||
if (aobj->u_obj.uo_refs)
|
||||
panic("uvm_km_releasepg: kill flag set on referenced object!");
|
||||
#endif
|
||||
KASSERT(aobj->u_obj.uo_refs == 0);
|
||||
|
||||
/*
|
||||
* if there are still pages in the object, we're done for now.
|
||||
@ -1494,7 +1489,6 @@ uao_pagein_page(aobj, pageidx)
|
||||
{
|
||||
struct vm_page *pg;
|
||||
int rv, slot, npages;
|
||||
UVMHIST_FUNC("uao_pagein_page"); UVMHIST_CALLED(pdhist);
|
||||
|
||||
pg = NULL;
|
||||
npages = 1;
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* $NetBSD: uvm_ddb.h,v 1.4 2000/11/24 07:25:52 chs Exp $ */
|
||||
/* $NetBSD: uvm_ddb.h,v 1.5 2000/11/25 06:27:59 chs Exp $ */
|
||||
|
||||
/*
|
||||
*
|
||||
@ -40,13 +40,10 @@
|
||||
#ifdef _KERNEL
|
||||
|
||||
#ifdef DDB
|
||||
void uvm_map_print __P((vm_map_t, boolean_t));
|
||||
void uvm_map_printit __P((vm_map_t, boolean_t,
|
||||
void (*) __P((const char *, ...))));
|
||||
void uvm_object_print __P((struct uvm_object *, boolean_t));
|
||||
void uvm_object_printit __P((struct uvm_object *, boolean_t,
|
||||
void (*) __P((const char *, ...))));
|
||||
void uvm_page_print __P((struct vm_page *, boolean_t));
|
||||
void uvm_page_printit __P((struct vm_page *, boolean_t,
|
||||
void (*) __P((const char *, ...))));
|
||||
void uvmexp_print(void (*)(const char *, ...));
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* $NetBSD: uvm_device.c,v 1.29 2000/11/24 20:34:01 chs Exp $ */
|
||||
/* $NetBSD: uvm_device.c,v 1.30 2000/11/25 06:27:59 chs Exp $ */
|
||||
|
||||
/*
|
||||
*
|
||||
@ -119,7 +119,7 @@ udv_attach(arg, accessprot, off, size)
|
||||
voff_t off; /* used only for access check */
|
||||
vsize_t size; /* used only for access check */
|
||||
{
|
||||
dev_t device = *((dev_t *) arg);
|
||||
dev_t device = *((dev_t *)arg);
|
||||
struct uvm_device *udv, *lcv;
|
||||
paddr_t (*mapfn) __P((dev_t, off_t, int));
|
||||
UVMHIST_FUNC("udv_attach"); UVMHIST_CALLED(maphist);
|
||||
@ -132,13 +132,14 @@ udv_attach(arg, accessprot, off, size)
|
||||
|
||||
mapfn = cdevsw[major(device)].d_mmap;
|
||||
if (mapfn == NULL ||
|
||||
mapfn == (paddr_t (*) __P((dev_t, off_t, int))) enodev ||
|
||||
mapfn == (paddr_t (*) __P((dev_t, off_t, int))) nullop)
|
||||
mapfn == (paddr_t (*) __P((dev_t, off_t, int))) enodev ||
|
||||
mapfn == (paddr_t (*) __P((dev_t, off_t, int))) nullop)
|
||||
return(NULL);
|
||||
|
||||
/*
|
||||
* Negative offsets on the object are not allowed.
|
||||
*/
|
||||
|
||||
if (off < 0)
|
||||
return(NULL);
|
||||
|
||||
@ -160,14 +161,14 @@ udv_attach(arg, accessprot, off, size)
|
||||
* keep looping until we get it
|
||||
*/
|
||||
|
||||
while (1) {
|
||||
for (;;) {
|
||||
|
||||
/*
|
||||
* first, attempt to find it on the main list
|
||||
*/
|
||||
|
||||
simple_lock(&udv_lock);
|
||||
for (lcv = udv_list.lh_first ; lcv != NULL ; lcv = lcv->u_list.le_next) {
|
||||
LIST_FOREACH(lcv, &udv_list, u_list) {
|
||||
if (device == lcv->u_device)
|
||||
break;
|
||||
}
|
||||
@ -201,7 +202,7 @@ udv_attach(arg, accessprot, off, size)
|
||||
simple_lock(&lcv->u_obj.vmobjlock);
|
||||
lcv->u_obj.uo_refs++;
|
||||
simple_unlock(&lcv->u_obj.vmobjlock);
|
||||
|
||||
|
||||
simple_lock(&udv_lock);
|
||||
if (lcv->u_flags & UVM_DEVICE_WANTED)
|
||||
wakeup(lcv);
|
||||
@ -216,7 +217,8 @@ udv_attach(arg, accessprot, off, size)
|
||||
|
||||
simple_unlock(&udv_lock);
|
||||
/* NOTE: we could sleep in the following malloc() */
|
||||
MALLOC(udv, struct uvm_device *, sizeof(*udv), M_TEMP, M_WAITOK);
|
||||
MALLOC(udv, struct uvm_device *, sizeof(*udv), M_TEMP,
|
||||
M_WAITOK);
|
||||
simple_lock(&udv_lock);
|
||||
|
||||
/*
|
||||
@ -224,14 +226,14 @@ udv_attach(arg, accessprot, off, size)
|
||||
* to the list while we were sleeping...
|
||||
*/
|
||||
|
||||
for (lcv = udv_list.lh_first ; lcv != NULL ;
|
||||
lcv = lcv->u_list.le_next) {
|
||||
LIST_FOREACH(lcv, &udv_list, u_list) {
|
||||
if (device == lcv->u_device)
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* did we lose a race to someone else? free our memory and retry.
|
||||
* did we lose a race to someone else?
|
||||
* free our memory and retry.
|
||||
*/
|
||||
|
||||
if (lcv) {
|
||||
@ -247,18 +249,15 @@ udv_attach(arg, accessprot, off, size)
|
||||
|
||||
simple_lock_init(&udv->u_obj.vmobjlock);
|
||||
udv->u_obj.pgops = &uvm_deviceops;
|
||||
TAILQ_INIT(&udv->u_obj.memq); /* not used, but be safe */
|
||||
TAILQ_INIT(&udv->u_obj.memq);
|
||||
udv->u_obj.uo_npages = 0;
|
||||
udv->u_obj.uo_refs = 1;
|
||||
udv->u_flags = 0;
|
||||
udv->u_device = device;
|
||||
LIST_INSERT_HEAD(&udv_list, udv, u_list);
|
||||
simple_unlock(&udv_lock);
|
||||
|
||||
return(&udv->u_obj);
|
||||
|
||||
} /* while(1) loop */
|
||||
|
||||
}
|
||||
/*NOTREACHED*/
|
||||
}
|
||||
|
||||
@ -281,7 +280,7 @@ udv_reference(uobj)
|
||||
simple_lock(&uobj->vmobjlock);
|
||||
uobj->uo_refs++;
|
||||
UVMHIST_LOG(maphist, "<- done (uobj=0x%x, ref = %d)",
|
||||
uobj, uobj->uo_refs,0,0);
|
||||
uobj, uobj->uo_refs,0,0);
|
||||
simple_unlock(&uobj->vmobjlock);
|
||||
}
|
||||
|
||||
@ -297,37 +296,28 @@ static void
|
||||
udv_detach(uobj)
|
||||
struct uvm_object *uobj;
|
||||
{
|
||||
struct uvm_device *udv = (struct uvm_device *) uobj;
|
||||
struct uvm_device *udv = (struct uvm_device *)uobj;
|
||||
UVMHIST_FUNC("udv_detach"); UVMHIST_CALLED(maphist);
|
||||
|
||||
|
||||
/*
|
||||
* loop until done
|
||||
*/
|
||||
again:
|
||||
simple_lock(&uobj->vmobjlock);
|
||||
|
||||
if (uobj->uo_refs > 1) {
|
||||
uobj->uo_refs--; /* drop ref! */
|
||||
uobj->uo_refs--;
|
||||
simple_unlock(&uobj->vmobjlock);
|
||||
UVMHIST_LOG(maphist," <- done, uobj=0x%x, ref=%d",
|
||||
uobj,uobj->uo_refs,0,0);
|
||||
return;
|
||||
}
|
||||
|
||||
#ifdef DIAGNOSTIC
|
||||
if (uobj->uo_npages || !TAILQ_EMPTY(&uobj->memq))
|
||||
panic("udv_detach: pages in a device object?");
|
||||
#endif
|
||||
|
||||
/*
|
||||
* now lock udv_lock
|
||||
*/
|
||||
simple_lock(&udv_lock);
|
||||
KASSERT(uobj->uo_npages == 0 && TAILQ_EMPTY(&uobj->memq));
|
||||
|
||||
/*
|
||||
* is it being held? if so, wait until others are done.
|
||||
*/
|
||||
|
||||
simple_lock(&udv_lock);
|
||||
if (udv->u_flags & UVM_DEVICE_HOLD) {
|
||||
udv->u_flags |= UVM_DEVICE_WANTED;
|
||||
simple_unlock(&uobj->vmobjlock);
|
||||
@ -338,15 +328,14 @@ again:
|
||||
/*
|
||||
* got it! nuke it now.
|
||||
*/
|
||||
|
||||
LIST_REMOVE(udv, u_list);
|
||||
if (udv->u_flags & UVM_DEVICE_WANTED)
|
||||
wakeup(udv);
|
||||
simple_unlock(&udv_lock);
|
||||
simple_unlock(&uobj->vmobjlock);
|
||||
FREE(udv, M_TEMP);
|
||||
|
||||
UVMHIST_LOG(maphist," <- done, freed uobj=0x%x", uobj,0,0,0);
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
@ -356,7 +345,8 @@ again:
|
||||
* flush pages out of a uvm object. a no-op for devices.
|
||||
*/
|
||||
|
||||
static boolean_t udv_flush(uobj, start, stop, flags)
|
||||
static boolean_t
|
||||
udv_flush(uobj, start, stop, flags)
|
||||
struct uvm_object *uobj;
|
||||
voff_t start, stop;
|
||||
int flags;
|
||||
@ -403,13 +393,6 @@ udv_fault(ufi, vaddr, pps, npages, centeridx, fault_type, access_type, flags)
|
||||
UVMHIST_FUNC("udv_fault"); UVMHIST_CALLED(maphist);
|
||||
UVMHIST_LOG(maphist," flags=%d", flags,0,0,0);
|
||||
|
||||
/*
|
||||
* XXX: !PGO_LOCKED calls are currently not allowed (or used)
|
||||
*/
|
||||
|
||||
if ((flags & PGO_LOCKED) == 0)
|
||||
panic("udv_fault: !PGO_LOCKED fault");
|
||||
|
||||
/*
|
||||
* we do not allow device mappings to be mapped copy-on-write
|
||||
* so we kill any attempt to do so here.
|
||||
@ -425,6 +408,7 @@ udv_fault(ufi, vaddr, pps, npages, centeridx, fault_type, access_type, flags)
|
||||
/*
|
||||
* get device map function.
|
||||
*/
|
||||
|
||||
device = udv->u_device;
|
||||
mapfn = cdevsw[major(device)].d_mmap;
|
||||
|
||||
@ -434,6 +418,7 @@ udv_fault(ufi, vaddr, pps, npages, centeridx, fault_type, access_type, flags)
|
||||
* for pmap_enter (even if we have a submap). since virtual
|
||||
* addresses in a submap must match the main map, this is ok.
|
||||
*/
|
||||
|
||||
/* udv offset = (offset from start of entry) + entry's offset */
|
||||
curr_offset = entry->offset + (vaddr - entry->start);
|
||||
/* pmap va = vaddr (virtual address of pps[0]) */
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* $NetBSD: uvm_glue.c,v 1.42 2000/10/11 17:27:59 thorpej Exp $ */
|
||||
/* $NetBSD: uvm_glue.c,v 1.43 2000/11/25 06:27:59 chs Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 1997 Charles D. Cranor and Washington University.
|
||||
@ -121,7 +121,7 @@ uvm_kernacc(addr, len, rw)
|
||||
vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE;
|
||||
|
||||
saddr = trunc_page((vaddr_t)addr);
|
||||
eaddr = round_page((vaddr_t)addr+len);
|
||||
eaddr = round_page((vaddr_t)addr + len);
|
||||
vm_map_lock_read(kernel_map);
|
||||
rv = uvm_map_checkprot(kernel_map, saddr, eaddr, prot);
|
||||
vm_map_unlock_read(kernel_map);
|
||||
@ -162,7 +162,7 @@ uvm_useracc(addr, len, rw)
|
||||
|
||||
vm_map_lock_read(map);
|
||||
rv = uvm_map_checkprot(map, trunc_page((vaddr_t)addr),
|
||||
round_page((vaddr_t)addr+len), prot);
|
||||
round_page((vaddr_t)addr + len), prot);
|
||||
vm_map_unlock_read(map);
|
||||
|
||||
return(rv);
|
||||
@ -247,8 +247,8 @@ uvm_vsunlock(p, addr, len)
|
||||
caddr_t addr;
|
||||
size_t len;
|
||||
{
|
||||
uvm_fault_unwire(&p->p_vmspace->vm_map, trunc_page((vaddr_t)addr),
|
||||
round_page((vaddr_t)addr+len));
|
||||
uvm_fault_unwire(&p->p_vmspace->vm_map, trunc_page((vaddr_t)addr),
|
||||
round_page((vaddr_t)addr + len));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -304,11 +304,11 @@ uvm_fork(p1, p2, shared, stack, stacksize, func, arg)
|
||||
*/
|
||||
p2->p_stats = &up->u_stats;
|
||||
memset(&up->u_stats.pstat_startzero, 0,
|
||||
(unsigned) ((caddr_t)&up->u_stats.pstat_endzero -
|
||||
(caddr_t)&up->u_stats.pstat_startzero));
|
||||
((caddr_t)&up->u_stats.pstat_endzero -
|
||||
(caddr_t)&up->u_stats.pstat_startzero));
|
||||
memcpy(&up->u_stats.pstat_startcopy, &p1->p_stats->pstat_startcopy,
|
||||
((caddr_t)&up->u_stats.pstat_endcopy -
|
||||
(caddr_t)&up->u_stats.pstat_startcopy));
|
||||
((caddr_t)&up->u_stats.pstat_endcopy -
|
||||
(caddr_t)&up->u_stats.pstat_startcopy));
|
||||
|
||||
/*
|
||||
* cpu_fork() copy and update the pcb, and make the child ready
|
||||
@ -332,9 +332,12 @@ void
|
||||
uvm_exit(p)
|
||||
struct proc *p;
|
||||
{
|
||||
vaddr_t va = (vaddr_t)p->p_addr;
|
||||
|
||||
uvmspace_free(p->p_vmspace);
|
||||
uvm_km_free(kernel_map, (vaddr_t)p->p_addr, USPACE);
|
||||
p->p_flag &= ~P_INMEM;
|
||||
uvm_fault_unwire(kernel_map, va, va + USPACE);
|
||||
uvm_km_free(kernel_map, va, USPACE);
|
||||
p->p_addr = NULL;
|
||||
}
|
||||
|
||||
@ -415,17 +418,16 @@ uvm_scheduler()
|
||||
int pri;
|
||||
struct proc *pp;
|
||||
int ppri;
|
||||
UVMHIST_FUNC("uvm_scheduler"); UVMHIST_CALLED(maphist);
|
||||
|
||||
loop:
|
||||
#ifdef DEBUG
|
||||
while (!enableswap)
|
||||
tsleep((caddr_t)&proc0, PVM, "noswap", 0);
|
||||
tsleep(&proc0, PVM, "noswap", 0);
|
||||
#endif
|
||||
pp = NULL; /* process to choose */
|
||||
ppri = INT_MIN; /* its priority */
|
||||
proclist_lock_read();
|
||||
for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
|
||||
LIST_FOREACH(p, &allproc, p_list) {
|
||||
|
||||
/* is it a runnable swapped out process? */
|
||||
if (p->p_stat == SRUN && (p->p_flag & P_INMEM) == 0) {
|
||||
@ -451,7 +453,7 @@ loop:
|
||||
* Nothing to do, back to sleep
|
||||
*/
|
||||
if ((p = pp) == NULL) {
|
||||
tsleep((caddr_t)&proc0, PVM, "scheduler", 0);
|
||||
tsleep(&proc0, PVM, "scheduler", 0);
|
||||
goto loop;
|
||||
}
|
||||
|
||||
@ -528,7 +530,7 @@ uvm_swapout_threads()
|
||||
outp = outp2 = NULL;
|
||||
outpri = outpri2 = 0;
|
||||
proclist_lock_read();
|
||||
for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
|
||||
LIST_FOREACH(p, &allproc, p_list) {
|
||||
if (!swappable(p))
|
||||
continue;
|
||||
switch (p->p_stat) {
|
||||
@ -543,7 +545,7 @@ uvm_swapout_threads()
|
||||
case SSLEEP:
|
||||
case SSTOP:
|
||||
if (p->p_slptime >= maxslp) {
|
||||
uvm_swapout(p); /* zap! */
|
||||
uvm_swapout(p);
|
||||
didswap++;
|
||||
} else if (p->p_slptime > outpri) {
|
||||
outp = p;
|
||||
@ -570,6 +572,7 @@ uvm_swapout_threads()
|
||||
if (p)
|
||||
uvm_swapout(p);
|
||||
}
|
||||
pmap_update();
|
||||
}
|
||||
|
||||
/*
|
||||
@ -600,13 +603,6 @@ uvm_swapout(p)
|
||||
*/
|
||||
cpu_swapout(p);
|
||||
|
||||
/*
|
||||
* Unwire the to-be-swapped process's user struct and kernel stack.
|
||||
*/
|
||||
addr = (vaddr_t)p->p_addr;
|
||||
uvm_fault_unwire(kernel_map, addr, addr + USPACE); /* !P_INMEM */
|
||||
pmap_collect(vm_map_pmap(&p->p_vmspace->vm_map));
|
||||
|
||||
/*
|
||||
* Mark it as (potentially) swapped out.
|
||||
*/
|
||||
@ -617,5 +613,12 @@ uvm_swapout(p)
|
||||
SCHED_UNLOCK(s);
|
||||
p->p_swtime = 0;
|
||||
++uvmexp.swapouts;
|
||||
|
||||
/*
|
||||
* Unwire the to-be-swapped process's user struct and kernel stack.
|
||||
*/
|
||||
addr = (vaddr_t)p->p_addr;
|
||||
uvm_fault_unwire(kernel_map, addr, addr + USPACE); /* !P_INMEM */
|
||||
pmap_collect(vm_map_pmap(&p->p_vmspace->vm_map));
|
||||
}
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* $NetBSD: uvm_map.c,v 1.84 2000/10/16 23:17:54 thorpej Exp $ */
|
||||
/* $NetBSD: uvm_map.c,v 1.85 2000/11/25 06:27:59 chs Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 1997 Charles D. Cranor and Washington University.
|
||||
@ -182,6 +182,8 @@ static vm_map_entry_t uvm_mapent_alloc __P((vm_map_t));
|
||||
static void uvm_mapent_copy __P((vm_map_entry_t,vm_map_entry_t));
|
||||
static void uvm_mapent_free __P((vm_map_entry_t));
|
||||
static void uvm_map_entry_unwire __P((vm_map_t, vm_map_entry_t));
|
||||
static void uvm_map_reference_amap __P((vm_map_entry_t, int));
|
||||
static void uvm_map_unreference_amap __P((vm_map_entry_t, int));
|
||||
|
||||
/*
|
||||
* local inlines
|
||||
@ -281,6 +283,33 @@ uvm_map_entry_unwire(map, entry)
|
||||
uvm_fault_unwire_locked(map, entry->start, entry->end);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* wrapper for calling amap_ref()
|
||||
*/
|
||||
static __inline void
|
||||
uvm_map_reference_amap(entry, flags)
|
||||
vm_map_entry_t entry;
|
||||
int flags;
|
||||
{
|
||||
amap_ref(entry->aref.ar_amap, entry->aref.ar_pageoff,
|
||||
(entry->end - entry->start) >> PAGE_SHIFT, flags);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* wrapper for calling amap_unref()
|
||||
*/
|
||||
static __inline void
|
||||
uvm_map_unreference_amap(entry, flags)
|
||||
vm_map_entry_t entry;
|
||||
int flags;
|
||||
{
|
||||
amap_unref(entry->aref.ar_amap, entry->aref.ar_pageoff,
|
||||
(entry->end - entry->start) >> PAGE_SHIFT, flags);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* uvm_map_init: init mapping system at boot time. note that we allocate
|
||||
* and init the static pool of vm_map_entry_t's for the kernel here.
|
||||
@ -366,7 +395,7 @@ void uvm_map_clip_start(map, entry, start)
|
||||
|
||||
new_entry = uvm_mapent_alloc(map);
|
||||
uvm_mapent_copy(entry, new_entry); /* entry -> new_entry */
|
||||
|
||||
|
||||
new_entry->end = start;
|
||||
new_adj = start - new_entry->start;
|
||||
if (entry->object.uvm_obj)
|
||||
@ -378,7 +407,7 @@ void uvm_map_clip_start(map, entry, start)
|
||||
}
|
||||
|
||||
uvm_map_entry_link(map, entry->prev, new_entry);
|
||||
|
||||
|
||||
if (UVM_ET_ISSUBMAP(entry)) {
|
||||
/* ... unlikely to happen, but play it safe */
|
||||
uvm_map_reference(new_entry->object.sub_map);
|
||||
@ -550,11 +579,7 @@ uvm_map(map, startp, size, uobj, uoffset, align, flags)
|
||||
uoffset = 0;
|
||||
} else {
|
||||
if (uoffset == UVM_UNKNOWN_OFFSET) {
|
||||
#ifdef DIAGNOSTIC
|
||||
if (UVM_OBJ_IS_KERN_OBJECT(uobj) == 0)
|
||||
panic("uvm_map: unknown offset with "
|
||||
"non-kernel object");
|
||||
#endif
|
||||
KASSERT(UVM_OBJ_IS_KERN_OBJECT(uobj));
|
||||
uoffset = *startp - vm_map_min(kernel_map);
|
||||
}
|
||||
}
|
||||
@ -594,12 +619,12 @@ uvm_map(map, startp, size, uobj, uoffset, align, flags)
|
||||
* look at refs since we don't care about its exact value.
|
||||
* if it is one (i.e. we have only reference) it will stay there
|
||||
*/
|
||||
|
||||
|
||||
if (prev_entry->aref.ar_amap &&
|
||||
amap_refs(prev_entry->aref.ar_amap) != 1) {
|
||||
goto step3;
|
||||
}
|
||||
|
||||
|
||||
/* got it! */
|
||||
|
||||
UVMCNT_INCR(map_backmerge);
|
||||
@ -816,22 +841,17 @@ uvm_map_findspace(map, hint, length, result, uobj, uoffset, align, flags)
|
||||
UVMHIST_CALLED(maphist);
|
||||
|
||||
UVMHIST_LOG(maphist, "(map=0x%x, hint=0x%x, len=%d, flags=0x%x)",
|
||||
map, hint, length, flags);
|
||||
|
||||
#ifdef DIAGNOSTIC
|
||||
if ((align & (align - 1)) != 0)
|
||||
panic("uvm_map_findspace: alignment not power of 2");
|
||||
if ((flags & UVM_FLAG_FIXED) != 0 && align != 0)
|
||||
panic("uvm_map_findslace: fixed and alignment both specified");
|
||||
#endif
|
||||
map, hint, length, flags);
|
||||
KASSERT((align & (align - 1)) == 0);
|
||||
KASSERT((flags & UVM_FLAG_FIXED) == 0 || align == 0);
|
||||
|
||||
/*
|
||||
* remember the original hint. if we are aligning, then we
|
||||
* may have to try again with no alignment constraint if
|
||||
* we fail the first time.
|
||||
*/
|
||||
orig_hint = hint;
|
||||
|
||||
orig_hint = hint;
|
||||
if (hint < map->min_offset) { /* check ranges ... */
|
||||
if (flags & UVM_FLAG_FIXED) {
|
||||
UVMHIST_LOG(maphist,"<- VA below map range",0,0,0,0);
|
||||
@ -1017,11 +1037,7 @@ uvm_unmap_remove(map, start, end, entry_list)
|
||||
*/
|
||||
if (UVM_ET_ISOBJ(entry) &&
|
||||
UVM_OBJ_IS_KERN_OBJECT(entry->object.uvm_obj)) {
|
||||
#ifdef DIAGNOSTIC
|
||||
if (vm_map_pmap(map) != pmap_kernel())
|
||||
panic("uvm_unmap_remove: kernel object "
|
||||
"mapped by non-kernel map");
|
||||
#endif
|
||||
KASSERT(vm_map_pmap(map) == pmap_kernel());
|
||||
|
||||
/*
|
||||
* note: kernel object mappings are currently used in
|
||||
@ -1118,24 +1134,15 @@ uvm_unmap_remove(map, start, end, entry_list)
|
||||
*/
|
||||
|
||||
void
|
||||
uvm_unmap_detach(first_entry, amap_unref_flags)
|
||||
uvm_unmap_detach(first_entry, flags)
|
||||
vm_map_entry_t first_entry;
|
||||
int amap_unref_flags;
|
||||
int flags;
|
||||
{
|
||||
vm_map_entry_t next_entry;
|
||||
UVMHIST_FUNC("uvm_unmap_detach"); UVMHIST_CALLED(maphist);
|
||||
|
||||
while (first_entry) {
|
||||
|
||||
#ifdef DIAGNOSTIC
|
||||
/*
|
||||
* sanity check
|
||||
*/
|
||||
/* was part of vm_map_entry_delete() */
|
||||
if (VM_MAPENT_ISWIRED(first_entry))
|
||||
panic("unmap: still wired!");
|
||||
#endif
|
||||
|
||||
KASSERT(!VM_MAPENT_ISWIRED(first_entry));
|
||||
UVMHIST_LOG(maphist,
|
||||
" detach 0x%x: amap=0x%x, obj=0x%x, submap?=%d",
|
||||
first_entry, first_entry->aref.ar_amap,
|
||||
@ -1147,12 +1154,12 @@ uvm_unmap_detach(first_entry, amap_unref_flags)
|
||||
*/
|
||||
|
||||
if (first_entry->aref.ar_amap)
|
||||
amap_unref(first_entry, amap_unref_flags);
|
||||
uvm_map_unreference_amap(first_entry, flags);
|
||||
|
||||
/*
|
||||
* drop reference to our backing object, if we've got one
|
||||
*/
|
||||
|
||||
|
||||
if (UVM_ET_ISSUBMAP(first_entry)) {
|
||||
/* ... unlikely to happen, but play it safe */
|
||||
uvm_map_deallocate(first_entry->object.sub_map);
|
||||
@ -1163,19 +1170,11 @@ uvm_unmap_detach(first_entry, amap_unref_flags)
|
||||
pgo_detach(first_entry->object.uvm_obj);
|
||||
}
|
||||
|
||||
/*
|
||||
* next entry
|
||||
*/
|
||||
next_entry = first_entry->next;
|
||||
uvm_mapent_free(first_entry);
|
||||
first_entry = next_entry;
|
||||
}
|
||||
|
||||
/*
|
||||
* done!
|
||||
*/
|
||||
UVMHIST_LOG(maphist, "<- done", 0,0,0,0);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1201,25 +1200,25 @@ uvm_map_reserve(map, size, offset, align, raddr)
|
||||
vaddr_t *raddr; /* IN:hint, OUT: reserved VA */
|
||||
{
|
||||
UVMHIST_FUNC("uvm_map_reserve"); UVMHIST_CALLED(maphist);
|
||||
|
||||
|
||||
UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x, offset=0x%x,addr=0x%x)",
|
||||
map,size,offset,raddr);
|
||||
|
||||
|
||||
size = round_page(size);
|
||||
if (*raddr < vm_map_min(map))
|
||||
*raddr = vm_map_min(map); /* hint */
|
||||
|
||||
|
||||
/*
|
||||
* reserve some virtual space.
|
||||
*/
|
||||
|
||||
|
||||
if (uvm_map(map, raddr, size, NULL, offset, 0,
|
||||
UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
|
||||
UVM_ADV_RANDOM, UVM_FLAG_NOMERGE)) != KERN_SUCCESS) {
|
||||
UVMHIST_LOG(maphist, "<- done (no VM)", 0,0,0,0);
|
||||
return (FALSE);
|
||||
}
|
||||
|
||||
|
||||
UVMHIST_LOG(maphist, "<- done (*raddr=0x%x)", *raddr,0,0,0);
|
||||
return (TRUE);
|
||||
}
|
||||
@ -1243,17 +1242,15 @@ uvm_map_replace(map, start, end, newents, nnewents)
|
||||
int nnewents;
|
||||
{
|
||||
vm_map_entry_t oldent, last;
|
||||
UVMHIST_FUNC("uvm_map_replace");
|
||||
UVMHIST_CALLED(maphist);
|
||||
|
||||
/*
|
||||
* first find the blank map entry at the specified address
|
||||
*/
|
||||
|
||||
|
||||
if (!uvm_map_lookup_entry(map, start, &oldent)) {
|
||||
return(FALSE);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* check to make sure we have a proper blank entry
|
||||
*/
|
||||
@ -1368,23 +1365,20 @@ uvm_map_extract(srcmap, start, len, dstmap, dstaddrp, flags)
|
||||
vsize_t elen;
|
||||
int nchain, error, copy_ok;
|
||||
UVMHIST_FUNC("uvm_map_extract"); UVMHIST_CALLED(maphist);
|
||||
|
||||
UVMHIST_LOG(maphist,"(srcmap=0x%x,start=0x%x, len=0x%x", srcmap, start,
|
||||
len,0);
|
||||
UVMHIST_LOG(maphist," ...,dstmap=0x%x, flags=0x%x)", dstmap,flags,0,0);
|
||||
|
||||
#ifdef DIAGNOSTIC
|
||||
/*
|
||||
* step 0: sanity check: start must be on a page boundary, length
|
||||
* must be page sized. can't ask for CONTIG/QREF if you asked for
|
||||
* REMOVE.
|
||||
*/
|
||||
if ((start & PAGE_MASK) || (len & PAGE_MASK))
|
||||
panic("uvm_map_extract1");
|
||||
if (flags & UVM_EXTRACT_REMOVE)
|
||||
if (flags & (UVM_EXTRACT_CONTIG|UVM_EXTRACT_QREF))
|
||||
panic("uvm_map_extract2");
|
||||
#endif
|
||||
|
||||
KASSERT((start & PAGE_MASK) == 0 && (len & PAGE_MASK) == 0);
|
||||
KASSERT((flags & UVM_EXTRACT_REMOVE) == 0 ||
|
||||
(flags & (UVM_EXTRACT_CONTIG|UVM_EXTRACT_QREF)) == 0);
|
||||
|
||||
/*
|
||||
* step 1: reserve space in the target map for the extracted area
|
||||
@ -1396,7 +1390,6 @@ uvm_map_extract(srcmap, start, len, dstmap, dstaddrp, flags)
|
||||
*dstaddrp = dstaddr; /* pass address back to caller */
|
||||
UVMHIST_LOG(maphist, " dstaddr=0x%x", dstaddr,0,0,0);
|
||||
|
||||
|
||||
/*
|
||||
* step 2: setup for the extraction process loop by init'ing the
|
||||
* map entry chain, locking src map, and looking up the first useful
|
||||
@ -1413,6 +1406,7 @@ uvm_map_extract(srcmap, start, len, dstmap, dstaddrp, flags)
|
||||
|
||||
/* "start" is within an entry */
|
||||
if (flags & UVM_EXTRACT_QREF) {
|
||||
|
||||
/*
|
||||
* for quick references we don't clip the entry, so
|
||||
* the entry may map space "before" the starting
|
||||
@ -1420,19 +1414,21 @@ uvm_map_extract(srcmap, start, len, dstmap, dstaddrp, flags)
|
||||
* (which can be non-zero only the first time
|
||||
* through the "while" loop in step 3).
|
||||
*/
|
||||
|
||||
fudge = start - entry->start;
|
||||
} else {
|
||||
|
||||
/*
|
||||
* normal reference: we clip the map to fit (thus
|
||||
* fudge is zero)
|
||||
*/
|
||||
|
||||
UVM_MAP_CLIP_START(srcmap, entry, start);
|
||||
SAVE_HINT(srcmap, srcmap->hint, entry->prev);
|
||||
fudge = 0;
|
||||
}
|
||||
|
||||
} else {
|
||||
|
||||
|
||||
/* "start" is not within an entry ... skip to next entry */
|
||||
if (flags & UVM_EXTRACT_CONTIG) {
|
||||
error = EINVAL;
|
||||
@ -1442,18 +1438,18 @@ uvm_map_extract(srcmap, start, len, dstmap, dstaddrp, flags)
|
||||
entry = entry->next;
|
||||
fudge = 0;
|
||||
}
|
||||
|
||||
/* save values from srcmap for step 6 */
|
||||
orig_entry = entry;
|
||||
orig_fudge = fudge;
|
||||
|
||||
|
||||
/*
|
||||
* step 3: now start looping through the map entries, extracting
|
||||
* as we go.
|
||||
*/
|
||||
|
||||
while (entry->start < end && entry != &srcmap->header) {
|
||||
|
||||
|
||||
/* if we are not doing a quick reference, clip it */
|
||||
if ((flags & UVM_EXTRACT_QREF) == 0)
|
||||
UVM_MAP_CLIP_END(srcmap, entry, end);
|
||||
@ -1469,6 +1465,7 @@ uvm_map_extract(srcmap, start, len, dstmap, dstaddrp, flags)
|
||||
error = ENOMEM;
|
||||
goto bad;
|
||||
}
|
||||
|
||||
/* amap_copy could clip (during chunk)! update fudge */
|
||||
if (fudge) {
|
||||
fudge = fudge - (entry->start - oldstart);
|
||||
@ -1513,7 +1510,7 @@ uvm_map_extract(srcmap, start, len, dstmap, dstaddrp, flags)
|
||||
if (newentry->aref.ar_amap) {
|
||||
newentry->aref.ar_pageoff =
|
||||
entry->aref.ar_pageoff + (fudge >> PAGE_SHIFT);
|
||||
amap_ref(newentry, AMAP_SHARED |
|
||||
uvm_map_reference_amap(newentry, AMAP_SHARED |
|
||||
((flags & UVM_EXTRACT_QREF) ? AMAP_REFALL : 0));
|
||||
} else {
|
||||
newentry->aref.ar_pageoff = 0;
|
||||
@ -1540,7 +1537,6 @@ uvm_map_extract(srcmap, start, len, dstmap, dstaddrp, flags)
|
||||
fudge = 0;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* step 4: close off chain (in format expected by uvm_map_replace)
|
||||
*/
|
||||
@ -1548,16 +1544,14 @@ uvm_map_extract(srcmap, start, len, dstmap, dstaddrp, flags)
|
||||
if (chain)
|
||||
chain->prev = endchain;
|
||||
|
||||
|
||||
/*
|
||||
* step 5: attempt to lock the dest map so we can pmap_copy.
|
||||
* note usage of copy_ok:
|
||||
* 1 => dstmap locked, pmap_copy ok, and we "replace" here (step 5)
|
||||
* 0 => dstmap unlocked, NO pmap_copy, and we will "replace" in step 7
|
||||
*/
|
||||
|
||||
if (srcmap == dstmap || vm_map_lock_try(dstmap) == TRUE) {
|
||||
|
||||
if (srcmap == dstmap || vm_map_lock_try(dstmap) == TRUE) {
|
||||
copy_ok = 1;
|
||||
if (!uvm_map_replace(dstmap, dstaddr, dstaddr+len, chain,
|
||||
nchain)) {
|
||||
@ -1566,15 +1560,11 @@ uvm_map_extract(srcmap, start, len, dstmap, dstaddrp, flags)
|
||||
error = EIO;
|
||||
goto bad;
|
||||
}
|
||||
|
||||
} else {
|
||||
|
||||
copy_ok = 0;
|
||||
/* replace defered until step 7 */
|
||||
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* step 6: traverse the srcmap a second time to do the following:
|
||||
* - if we got a lock on the dstmap do pmap_copy
|
||||
@ -1596,7 +1586,6 @@ uvm_map_extract(srcmap, start, len, dstmap, dstaddrp, flags)
|
||||
deadentry = NULL; /* for UVM_EXTRACT_REMOVE */
|
||||
|
||||
while (entry->start < end && entry != &srcmap->header) {
|
||||
|
||||
if (copy_ok) {
|
||||
oldoffset = (entry->start + fudge) - start;
|
||||
elen = min(end, entry->end) -
|
||||
@ -1628,6 +1617,7 @@ uvm_map_extract(srcmap, start, len, dstmap, dstaddrp, flags)
|
||||
* unlock dstmap. we will dispose of deadentry in
|
||||
* step 7 if needed
|
||||
*/
|
||||
|
||||
if (copy_ok && srcmap != dstmap)
|
||||
vm_map_unlock(dstmap);
|
||||
|
||||
@ -1657,10 +1647,6 @@ uvm_map_extract(srcmap, start, len, dstmap, dstaddrp, flags)
|
||||
goto bad2;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* done!
|
||||
*/
|
||||
return(0);
|
||||
|
||||
/*
|
||||
@ -1694,7 +1680,7 @@ bad2: /* src already unlocked */
|
||||
* => submap must have been init'd and have a zero reference count.
|
||||
* [need not be locked as we don't actually reference it]
|
||||
*/
|
||||
|
||||
|
||||
int
|
||||
uvm_map_submap(map, start, end, submap)
|
||||
vm_map_t map, submap;
|
||||
@ -1702,12 +1688,10 @@ uvm_map_submap(map, start, end, submap)
|
||||
{
|
||||
vm_map_entry_t entry;
|
||||
int result;
|
||||
UVMHIST_FUNC("uvm_map_submap"); UVMHIST_CALLED(maphist);
|
||||
|
||||
vm_map_lock(map);
|
||||
|
||||
VM_MAP_RANGE_CHECK(map, start, end);
|
||||
|
||||
|
||||
if (uvm_map_lookup_entry(map, start, &entry)) {
|
||||
UVM_MAP_CLIP_START(map, entry, start);
|
||||
UVM_MAP_CLIP_END(map, entry, end); /* to be safe */
|
||||
@ -1720,10 +1704,6 @@ uvm_map_submap(map, start, end, submap)
|
||||
entry->start == start && entry->end == end &&
|
||||
entry->object.uvm_obj == NULL && entry->aref.ar_amap == NULL &&
|
||||
!UVM_ET_ISCOPYONWRITE(entry) && !UVM_ET_ISNEEDSCOPY(entry)) {
|
||||
|
||||
/*
|
||||
* doit!
|
||||
*/
|
||||
entry->etype |= UVM_ET_SUBMAP;
|
||||
entry->object.sub_map = submap;
|
||||
entry->offset = 0;
|
||||
@ -1733,7 +1713,6 @@ uvm_map_submap(map, start, end, submap)
|
||||
result = KERN_INVALID_ARGUMENT;
|
||||
}
|
||||
vm_map_unlock(map);
|
||||
|
||||
return(result);
|
||||
}
|
||||
|
||||
@ -1760,12 +1739,12 @@ uvm_map_protect(map, start, end, new_prot, set_max)
|
||||
int rv = KERN_SUCCESS;
|
||||
UVMHIST_FUNC("uvm_map_protect"); UVMHIST_CALLED(maphist);
|
||||
UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_prot=0x%x)",
|
||||
map, start, end, new_prot);
|
||||
|
||||
map, start, end, new_prot);
|
||||
|
||||
vm_map_lock(map);
|
||||
|
||||
VM_MAP_RANGE_CHECK(map, start, end);
|
||||
|
||||
|
||||
if (uvm_map_lookup_entry(map, start, &entry)) {
|
||||
UVM_MAP_CLIP_START(map, entry, start);
|
||||
} else {
|
||||
@ -1795,7 +1774,7 @@ uvm_map_protect(map, start, end, new_prot, set_max)
|
||||
|
||||
while ((current != &map->header) && (current->start < end)) {
|
||||
vm_prot_t old_prot;
|
||||
|
||||
|
||||
UVM_MAP_CLIP_END(map, current, end);
|
||||
|
||||
old_prot = current->protection;
|
||||
@ -1848,7 +1827,7 @@ uvm_map_protect(map, start, end, new_prot, set_max)
|
||||
|
||||
current = current->next;
|
||||
}
|
||||
|
||||
|
||||
out:
|
||||
vm_map_unlock(map);
|
||||
UVMHIST_LOG(maphist, "<- done, rv=%d",rv,0,0,0);
|
||||
@ -1889,21 +1868,18 @@ uvm_map_inherit(map, start, end, new_inheritance)
|
||||
}
|
||||
|
||||
vm_map_lock(map);
|
||||
|
||||
|
||||
VM_MAP_RANGE_CHECK(map, start, end);
|
||||
|
||||
|
||||
if (uvm_map_lookup_entry(map, start, &temp_entry)) {
|
||||
entry = temp_entry;
|
||||
UVM_MAP_CLIP_START(map, entry, start);
|
||||
} else {
|
||||
entry = temp_entry->next;
|
||||
}
|
||||
|
||||
while ((entry != &map->header) && (entry->start < end)) {
|
||||
UVM_MAP_CLIP_END(map, entry, end);
|
||||
|
||||
entry->inheritance = new_inheritance;
|
||||
|
||||
entry = entry->next;
|
||||
}
|
||||
|
||||
@ -1931,9 +1907,7 @@ uvm_map_advice(map, start, end, new_advice)
|
||||
map, start, end, new_advice);
|
||||
|
||||
vm_map_lock(map);
|
||||
|
||||
VM_MAP_RANGE_CHECK(map, start, end);
|
||||
|
||||
if (uvm_map_lookup_entry(map, start, &temp_entry)) {
|
||||
entry = temp_entry;
|
||||
UVM_MAP_CLIP_START(map, entry, start);
|
||||
@ -1960,10 +1934,7 @@ uvm_map_advice(map, start, end, new_advice)
|
||||
UVMHIST_LOG(maphist,"<- done (INVALID ARG)",0,0,0,0);
|
||||
return (KERN_INVALID_ARGUMENT);
|
||||
}
|
||||
|
||||
|
||||
entry->advice = new_advice;
|
||||
|
||||
entry = entry->next;
|
||||
}
|
||||
|
||||
@ -2000,12 +1971,8 @@ uvm_map_pageable(map, start, end, new_pageable, lockflags)
|
||||
#endif
|
||||
UVMHIST_FUNC("uvm_map_pageable"); UVMHIST_CALLED(maphist);
|
||||
UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_pageable=0x%x)",
|
||||
map, start, end, new_pageable);
|
||||
|
||||
#ifdef DIAGNOSTIC
|
||||
if ((map->flags & VM_MAP_PAGEABLE) == 0)
|
||||
panic("uvm_map_pageable: map %p not pageable", map);
|
||||
#endif
|
||||
map, start, end, new_pageable);
|
||||
KASSERT(map->flags & VM_MAP_PAGEABLE);
|
||||
|
||||
if ((lockflags & UVM_LK_ENTER) == 0)
|
||||
vm_map_lock(map);
|
||||
@ -2023,7 +1990,7 @@ uvm_map_pageable(map, start, end, new_pageable, lockflags)
|
||||
if (uvm_map_lookup_entry(map, start, &start_entry) == FALSE) {
|
||||
if ((lockflags & UVM_LK_EXIT) == 0)
|
||||
vm_map_unlock(map);
|
||||
|
||||
|
||||
UVMHIST_LOG(maphist,"<- done (INVALID ARG)",0,0,0,0);
|
||||
return (KERN_INVALID_ADDRESS);
|
||||
}
|
||||
@ -2035,10 +2002,12 @@ uvm_map_pageable(map, start, end, new_pageable, lockflags)
|
||||
|
||||
if (new_pageable) { /* unwire */
|
||||
UVM_MAP_CLIP_START(map, entry, start);
|
||||
|
||||
/*
|
||||
* unwiring. first ensure that the range to be unwired is
|
||||
* really wired down and that there are no holes.
|
||||
*/
|
||||
|
||||
while ((entry != &map->header) && (entry->start < end)) {
|
||||
if (entry->wired_count == 0 ||
|
||||
(entry->end < end &&
|
||||
@ -2058,6 +2027,7 @@ uvm_map_pageable(map, start, end, new_pageable, lockflags)
|
||||
* regardless of the number of mlock calls made on that
|
||||
* region.
|
||||
*/
|
||||
|
||||
entry = start_entry;
|
||||
while ((entry != &map->header) && (entry->start < end)) {
|
||||
UVM_MAP_CLIP_END(map, entry, end);
|
||||
@ -2069,10 +2039,6 @@ uvm_map_pageable(map, start, end, new_pageable, lockflags)
|
||||
vm_map_unlock(map);
|
||||
UVMHIST_LOG(maphist,"<- done (OK UNWIRE)",0,0,0,0);
|
||||
return(KERN_SUCCESS);
|
||||
|
||||
/*
|
||||
* end of unwire case!
|
||||
*/
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2098,13 +2064,15 @@ uvm_map_pageable(map, start, end, new_pageable, lockflags)
|
||||
|
||||
while ((entry != &map->header) && (entry->start < end)) {
|
||||
if (VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */
|
||||
/*
|
||||
|
||||
/*
|
||||
* perform actions of vm_map_lookup that need the
|
||||
* write lock on the map: create an anonymous map
|
||||
* for a copy-on-write region, or an anonymous map
|
||||
* for a zero-fill region. (XXXCDC: submap case
|
||||
* ok?)
|
||||
*/
|
||||
|
||||
if (!UVM_ET_ISSUBMAP(entry)) { /* not submap */
|
||||
if (UVM_ET_ISNEEDSCOPY(entry) &&
|
||||
((entry->protection & VM_PROT_WRITE) ||
|
||||
@ -2122,14 +2090,17 @@ uvm_map_pageable(map, start, end, new_pageable, lockflags)
|
||||
/*
|
||||
* Check for holes
|
||||
*/
|
||||
|
||||
if (entry->protection == VM_PROT_NONE ||
|
||||
(entry->end < end &&
|
||||
(entry->next == &map->header ||
|
||||
entry->next->start > entry->end))) {
|
||||
|
||||
/*
|
||||
* found one. amap creation actions do not need to
|
||||
* be undone, but the wired counts need to be restored.
|
||||
*/
|
||||
|
||||
while (entry != &map->header && entry->end > start) {
|
||||
entry->wired_count--;
|
||||
entry = entry->prev;
|
||||
@ -2171,9 +2142,11 @@ uvm_map_pageable(map, start, end, new_pageable, lockflags)
|
||||
}
|
||||
|
||||
if (rv) { /* failed? */
|
||||
|
||||
/*
|
||||
* Get back to an exclusive (write) lock.
|
||||
*/
|
||||
|
||||
vm_map_upgrade(map);
|
||||
vm_map_unbusy(map);
|
||||
|
||||
@ -2186,6 +2159,7 @@ uvm_map_pageable(map, start, end, new_pageable, lockflags)
|
||||
* first drop the wiring count on all the entries
|
||||
* which haven't actually been wired yet.
|
||||
*/
|
||||
|
||||
failed_entry = entry;
|
||||
while (entry != &map->header && entry->start < end) {
|
||||
entry->wired_count--;
|
||||
@ -2196,6 +2170,7 @@ uvm_map_pageable(map, start, end, new_pageable, lockflags)
|
||||
* now, unwire all the entries that were successfully
|
||||
* wired above.
|
||||
*/
|
||||
|
||||
entry = start_entry;
|
||||
while (entry != failed_entry) {
|
||||
entry->wired_count--;
|
||||
@ -2214,9 +2189,11 @@ uvm_map_pageable(map, start, end, new_pageable, lockflags)
|
||||
vm_map_unbusy(map);
|
||||
vm_map_unlock_read(map);
|
||||
} else {
|
||||
|
||||
/*
|
||||
* Get back to an exclusive (write) lock.
|
||||
*/
|
||||
|
||||
vm_map_upgrade(map);
|
||||
vm_map_unbusy(map);
|
||||
}
|
||||
@ -2249,10 +2226,7 @@ uvm_map_pageable_all(map, flags, limit)
|
||||
UVMHIST_FUNC("uvm_map_pageable_all"); UVMHIST_CALLED(maphist);
|
||||
UVMHIST_LOG(maphist,"(map=0x%x,flags=0x%x)", map, flags, 0, 0);
|
||||
|
||||
#ifdef DIAGNOSTIC
|
||||
if ((map->flags & VM_MAP_PAGEABLE) == 0)
|
||||
panic("uvm_map_pageable_all: map %p not pageable", map);
|
||||
#endif
|
||||
KASSERT(map->flags & VM_MAP_PAGEABLE);
|
||||
|
||||
vm_map_lock(map);
|
||||
|
||||
@ -2483,13 +2457,11 @@ uvm_map_clean(map, start, end, flags)
|
||||
vsize_t size;
|
||||
int rv, error, refs;
|
||||
UVMHIST_FUNC("uvm_map_clean"); UVMHIST_CALLED(maphist);
|
||||
UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,flags=0x%x)",
|
||||
map, start, end, flags);
|
||||
|
||||
#ifdef DIAGNOSTIC
|
||||
if ((flags & (PGO_FREE|PGO_DEACTIVATE)) == (PGO_FREE|PGO_DEACTIVATE))
|
||||
panic("uvm_map_clean: FREE and DEACTIVATE");
|
||||
#endif
|
||||
UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,flags=0x%x)",
|
||||
map, start, end, flags);
|
||||
KASSERT((flags & (PGO_FREE|PGO_DEACTIVATE)) !=
|
||||
(PGO_FREE|PGO_DEACTIVATE));
|
||||
|
||||
vm_map_lock_read(map);
|
||||
VM_MAP_RANGE_CHECK(map, start, end);
|
||||
@ -2501,6 +2473,7 @@ uvm_map_clean(map, start, end, flags)
|
||||
/*
|
||||
* Make a first pass to check for holes.
|
||||
*/
|
||||
|
||||
for (current = entry; current->start < end; current = current->next) {
|
||||
if (UVM_ET_ISSUBMAP(current)) {
|
||||
vm_map_unlock_read(map);
|
||||
@ -2518,11 +2491,7 @@ uvm_map_clean(map, start, end, flags)
|
||||
for (current = entry; current->start < end; current = current->next) {
|
||||
amap = current->aref.ar_amap; /* top layer */
|
||||
uobj = current->object.uvm_obj; /* bottom layer */
|
||||
|
||||
#ifdef DIAGNOSTIC
|
||||
if (start < current->start)
|
||||
panic("uvm_map_clean: hole");
|
||||
#endif
|
||||
KASSERT(start >= current->start);
|
||||
|
||||
/*
|
||||
* No amap cleaning necessary if:
|
||||
@ -2531,6 +2500,7 @@ uvm_map_clean(map, start, end, flags)
|
||||
*
|
||||
* (2) We're not deactivating or freeing pages.
|
||||
*/
|
||||
|
||||
if (amap == NULL ||
|
||||
(flags & (PGO_DEACTIVATE|PGO_FREE)) == 0)
|
||||
goto flush_object;
|
||||
@ -2560,12 +2530,14 @@ uvm_map_clean(map, start, end, flags)
|
||||
}
|
||||
|
||||
switch (flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE)) {
|
||||
|
||||
/*
|
||||
* XXX In these first 3 cases, we always just
|
||||
* XXX deactivate the page. We may want to
|
||||
* XXX handle the different cases more
|
||||
* XXX specifically, in the future.
|
||||
*/
|
||||
|
||||
case PGO_CLEANIT|PGO_FREE:
|
||||
case PGO_CLEANIT|PGO_DEACTIVATE:
|
||||
case PGO_DEACTIVATE:
|
||||
@ -2584,23 +2556,14 @@ uvm_map_clean(map, start, end, flags)
|
||||
* by the anon (may simply be loaned to the
|
||||
* anon).
|
||||
*/
|
||||
|
||||
if ((pg->pqflags & PQ_ANON) == 0) {
|
||||
#ifdef DIAGNOSTIC
|
||||
if (pg->uobject != NULL)
|
||||
panic("uvm_map_clean: "
|
||||
"page anon vs. object "
|
||||
"inconsistency");
|
||||
#endif
|
||||
KASSERT(pg->uobject == NULL);
|
||||
uvm_unlock_pageq();
|
||||
simple_unlock(&anon->an_lock);
|
||||
continue;
|
||||
}
|
||||
|
||||
#ifdef DIAGNOSTIC
|
||||
if (pg->uanon != anon)
|
||||
panic("uvm_map_clean: anon "
|
||||
"inconsistency");
|
||||
#endif
|
||||
KASSERT(pg->uanon == anon);
|
||||
|
||||
/* zap all mappings for the page. */
|
||||
pmap_page_protect(pg, VM_PROT_NONE);
|
||||
@ -2613,10 +2576,12 @@ uvm_map_clean(map, start, end, flags)
|
||||
continue;
|
||||
|
||||
case PGO_FREE:
|
||||
|
||||
/*
|
||||
* If there are multiple references to
|
||||
* the amap, just deactivate the page.
|
||||
*/
|
||||
|
||||
if (amap_refs(amap) > 1)
|
||||
goto deactivate_it;
|
||||
|
||||
@ -2661,7 +2626,6 @@ uvm_map_clean(map, start, end, flags)
|
||||
}
|
||||
start += size;
|
||||
}
|
||||
|
||||
vm_map_unlock_read(map);
|
||||
return (error);
|
||||
}
|
||||
@ -2686,14 +2650,12 @@ uvm_map_checkprot(map, start, end, protection)
|
||||
if (!uvm_map_lookup_entry(map, start, &tmp_entry)) {
|
||||
return(FALSE);
|
||||
}
|
||||
|
||||
entry = tmp_entry;
|
||||
|
||||
while (start < end) {
|
||||
if (entry == &map->header) {
|
||||
return(FALSE);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* no holes allowed
|
||||
*/
|
||||
@ -2711,7 +2673,7 @@ uvm_map_checkprot(map, start, end, protection)
|
||||
}
|
||||
|
||||
/* go to next entry */
|
||||
|
||||
|
||||
start = entry->end;
|
||||
entry = entry->next;
|
||||
}
|
||||
@ -2794,11 +2756,11 @@ uvmspace_unshare(p)
|
||||
struct proc *p;
|
||||
{
|
||||
struct vmspace *nvm, *ovm = p->p_vmspace;
|
||||
|
||||
|
||||
if (ovm->vm_refcnt == 1)
|
||||
/* nothing to do: vmspace isn't shared in the first place */
|
||||
return;
|
||||
|
||||
|
||||
/* make a new vmspace, still holding old one */
|
||||
nvm = uvmspace_fork(ovm);
|
||||
|
||||
@ -3007,7 +2969,7 @@ uvmspace_fork(vm1)
|
||||
*/
|
||||
if (new_entry->aref.ar_amap)
|
||||
/* share reference */
|
||||
amap_ref(new_entry, AMAP_SHARED);
|
||||
uvm_map_reference_amap(new_entry, AMAP_SHARED);
|
||||
|
||||
if (new_entry->object.uvm_obj &&
|
||||
new_entry->object.uvm_obj->pgops->pgo_reference)
|
||||
@ -3046,7 +3008,7 @@ uvmspace_fork(vm1)
|
||||
uvm_mapent_copy(old_entry, new_entry);
|
||||
|
||||
if (new_entry->aref.ar_amap)
|
||||
amap_ref(new_entry, 0);
|
||||
uvm_map_reference_amap(new_entry, 0);
|
||||
|
||||
if (new_entry->object.uvm_obj &&
|
||||
new_entry->object.uvm_obj->pgops->pgo_reference)
|
||||
@ -3060,7 +3022,7 @@ uvmspace_fork(vm1)
|
||||
(UVM_ET_COPYONWRITE|UVM_ET_NEEDSCOPY);
|
||||
uvm_map_entry_link(new_map, new_map->header.prev,
|
||||
new_entry);
|
||||
|
||||
|
||||
/*
|
||||
* the new entry will need an amap. it will either
|
||||
* need to be copied from the old entry or created
|
||||
@ -3104,7 +3066,7 @@ uvmspace_fork(vm1)
|
||||
/* XXXCDC: M_WAITOK ... ok? */
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* if the parent's entry is wired down, then the
|
||||
* parent process does not want page faults on
|
||||
@ -3186,7 +3148,7 @@ uvmspace_fork(vm1)
|
||||
new_entry->start,
|
||||
(old_entry->end - old_entry->start),
|
||||
old_entry->start);
|
||||
|
||||
|
||||
/*
|
||||
* protect the child's mappings if necessary
|
||||
*/
|
||||
@ -3226,19 +3188,6 @@ uvmspace_fork(vm1)
|
||||
* DDB hooks
|
||||
*/
|
||||
|
||||
/*
|
||||
* uvm_map_print: print out a map
|
||||
*/
|
||||
|
||||
void
|
||||
uvm_map_print(map, full)
|
||||
vm_map_t map;
|
||||
boolean_t full;
|
||||
{
|
||||
|
||||
uvm_map_printit(map, full, printf);
|
||||
}
|
||||
|
||||
/*
|
||||
* uvm_map_printit: actually prints the map
|
||||
*/
|
||||
@ -3268,9 +3217,11 @@ uvm_map_printit(map, full, pr)
|
||||
entry = entry->next) {
|
||||
(*pr)(" - %p: 0x%lx->0x%lx: obj=%p/0x%llx, amap=%p/%d\n",
|
||||
entry, entry->start, entry->end, entry->object.uvm_obj,
|
||||
(long long)entry->offset, entry->aref.ar_amap, entry->aref.ar_pageoff);
|
||||
(long long)entry->offset, entry->aref.ar_amap,
|
||||
entry->aref.ar_pageoff);
|
||||
(*pr)(
|
||||
"\tsubmap=%c, cow=%c, nc=%c, prot(max)=%d/%d, inh=%d, wc=%d, adv=%d\n",
|
||||
"\tsubmap=%c, cow=%c, nc=%c, prot(max)=%d/%d, inh=%d, "
|
||||
"wc=%d, adv=%d\n",
|
||||
(entry->etype & UVM_ET_SUBMAP) ? 'T' : 'F',
|
||||
(entry->etype & UVM_ET_COPYONWRITE) ? 'T' : 'F',
|
||||
(entry->etype & UVM_ET_NEEDSCOPY) ? 'T' : 'F',
|
||||
@ -3279,19 +3230,6 @@ uvm_map_printit(map, full, pr)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* uvm_object_print: print out an object
|
||||
*/
|
||||
|
||||
void
|
||||
uvm_object_print(uobj, full)
|
||||
struct uvm_object *uobj;
|
||||
boolean_t full;
|
||||
{
|
||||
|
||||
uvm_object_printit(uobj, full, printf);
|
||||
}
|
||||
|
||||
/*
|
||||
* uvm_object_printit: actually prints the object
|
||||
*/
|
||||
@ -3319,7 +3257,7 @@ uvm_object_printit(uobj, full, pr)
|
||||
for (pg = TAILQ_FIRST(&uobj->memq);
|
||||
pg != NULL;
|
||||
pg = TAILQ_NEXT(pg, listq), cnt++) {
|
||||
(*pr)("<%p,0x%lx> ", pg, pg->offset);
|
||||
(*pr)("<%p,0x%llx> ", pg, (long long)pg->offset);
|
||||
if ((cnt % 3) == 2) {
|
||||
(*pr)("\n ");
|
||||
}
|
||||
@ -3335,19 +3273,6 @@ const char page_flagbits[] =
|
||||
const char page_pqflagbits[] =
|
||||
"\20\1FREE\2INACTIVE\3ACTIVE\4LAUNDRY\5ANON\6AOBJ";
|
||||
|
||||
/*
|
||||
* uvm_page_print: print out a page
|
||||
*/
|
||||
|
||||
void
|
||||
uvm_page_print(pg, full)
|
||||
struct vm_page *pg;
|
||||
boolean_t full;
|
||||
{
|
||||
|
||||
uvm_page_printit(pg, full, printf);
|
||||
}
|
||||
|
||||
/*
|
||||
* uvm_page_printit: actually print the page
|
||||
*/
|
||||
@ -3358,7 +3283,7 @@ uvm_page_printit(pg, full, pr)
|
||||
boolean_t full;
|
||||
void (*pr) __P((const char *, ...));
|
||||
{
|
||||
struct vm_page *lcv;
|
||||
struct vm_page *tpg;
|
||||
struct uvm_object *uobj;
|
||||
struct pglist *pgl;
|
||||
char pgbuf[128];
|
||||
@ -3388,7 +3313,7 @@ uvm_page_printit(pg, full, pr)
|
||||
if ((pg->pqflags & PQ_FREE) == 0) {
|
||||
if (pg->pqflags & PQ_ANON) {
|
||||
if (pg->uanon == NULL || pg->uanon->u.an_page != pg)
|
||||
(*pr)(" >>> ANON DOES NOT POINT HERE <<< (%p)\n",
|
||||
(*pr)(" >>> ANON DOES NOT POINT HERE <<< (%p)\n",
|
||||
(pg->uanon) ? pg->uanon->u.an_page : NULL);
|
||||
else
|
||||
(*pr)(" anon backpointer is OK\n");
|
||||
@ -3396,11 +3321,12 @@ uvm_page_printit(pg, full, pr)
|
||||
uobj = pg->uobject;
|
||||
if (uobj) {
|
||||
(*pr)(" checking object list\n");
|
||||
for (lcv = uobj->memq.tqh_first ; lcv ;
|
||||
lcv = lcv->listq.tqe_next) {
|
||||
if (lcv == pg) break;
|
||||
TAILQ_FOREACH(tpg, &uobj->memq, listq) {
|
||||
if (tpg == pg) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (lcv)
|
||||
if (tpg)
|
||||
(*pr)(" page found on object list\n");
|
||||
else
|
||||
(*pr)(" >>> PAGE NOT FOUND ON OBJECT LIST! <<<\n");
|
||||
@ -3413,21 +3339,23 @@ uvm_page_printit(pg, full, pr)
|
||||
int fl = uvm_page_lookup_freelist(pg);
|
||||
pgl = &uvm.page_free[fl].pgfl_queues[((pg)->flags & PG_ZERO) ?
|
||||
PGFL_ZEROS : PGFL_UNKNOWN];
|
||||
}
|
||||
else if (pg->pqflags & PQ_INACTIVE)
|
||||
pgl = (pg->pqflags & PQ_SWAPBACKED) ?
|
||||
} else if (pg->pqflags & PQ_INACTIVE) {
|
||||
pgl = (pg->pqflags & PQ_SWAPBACKED) ?
|
||||
&uvm.page_inactive_swp : &uvm.page_inactive_obj;
|
||||
else if (pg->pqflags & PQ_ACTIVE)
|
||||
} else if (pg->pqflags & PQ_ACTIVE) {
|
||||
pgl = &uvm.page_active;
|
||||
else
|
||||
} else {
|
||||
pgl = NULL;
|
||||
}
|
||||
|
||||
if (pgl) {
|
||||
(*pr)(" checking pageq list\n");
|
||||
for (lcv = pgl->tqh_first ; lcv ; lcv = lcv->pageq.tqe_next) {
|
||||
if (lcv == pg) break;
|
||||
TAILQ_FOREACH(tpg, pgl, pageq) {
|
||||
if (tpg == pg) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (lcv)
|
||||
if (tpg)
|
||||
(*pr)(" page found on pageq list\n");
|
||||
else
|
||||
(*pr)(" >>> PAGE NOT FOUND ON PAGEQ LIST! <<<\n");
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* $NetBSD: uvm_pager_i.h,v 1.9 2000/05/08 23:13:42 thorpej Exp $ */
|
||||
/* $NetBSD: uvm_pager_i.h,v 1.10 2000/11/25 06:28:00 chs Exp $ */
|
||||
|
||||
/*
|
||||
*
|
||||
@ -56,11 +56,15 @@ PAGER_INLINE struct vm_page *
|
||||
uvm_pageratop(kva)
|
||||
vaddr_t kva;
|
||||
{
|
||||
struct vm_page *pg;
|
||||
paddr_t pa;
|
||||
boolean_t rv;
|
||||
|
||||
if (__predict_false(pmap_extract(pmap_kernel(), kva, &pa) == FALSE))
|
||||
panic("uvm_pageratop");
|
||||
return (PHYS_TO_VM_PAGE(pa));
|
||||
rv = pmap_extract(pmap_kernel(), kva, &pa);
|
||||
KASSERT(rv);
|
||||
pg = PHYS_TO_VM_PAGE(pa);
|
||||
KASSERT(pg != NULL);
|
||||
return (pg);
|
||||
}
|
||||
|
||||
#endif /* defined(UVM_PAGER_INLINE) || defined(UVM_PAGER) */
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* $NetBSD: uvm_pglist.c,v 1.11 2000/06/27 17:29:34 mrg Exp $ */
|
||||
/* $NetBSD: uvm_pglist.c,v 1.12 2000/11/25 06:28:00 chs Exp $ */
|
||||
|
||||
/*-
|
||||
* Copyright (c) 1997 The NetBSD Foundation, Inc.
|
||||
@ -127,18 +127,19 @@ uvm_pglistalloc(size, low, high, alignment, boundary, rlist, nsegs, waitok)
|
||||
/*
|
||||
* Block all memory allocation and lock the free list.
|
||||
*/
|
||||
s = uvm_lock_fpageq(); /* lock free page queue */
|
||||
s = uvm_lock_fpageq();
|
||||
|
||||
/* Are there even any free pages? */
|
||||
if (uvmexp.free <= (uvmexp.reserve_pagedaemon +
|
||||
uvmexp.reserve_kernel))
|
||||
if (uvmexp.free <= (uvmexp.reserve_pagedaemon + uvmexp.reserve_kernel))
|
||||
goto out;
|
||||
|
||||
for (;; try += alignment) {
|
||||
if (try + size > high) {
|
||||
|
||||
/*
|
||||
* We've run past the allowable range.
|
||||
*/
|
||||
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -158,39 +159,34 @@ uvm_pglistalloc(size, low, high, alignment, boundary, rlist, nsegs, waitok)
|
||||
/*
|
||||
* Found a suitable starting page. See of the range is free.
|
||||
*/
|
||||
|
||||
for (; idx < end; idx++) {
|
||||
if (VM_PAGE_IS_FREE(&pgs[idx]) == 0) {
|
||||
/*
|
||||
* Page not available.
|
||||
*/
|
||||
break;
|
||||
}
|
||||
|
||||
idxpa = VM_PAGE_TO_PHYS(&pgs[idx]);
|
||||
|
||||
if (idx > tryidx) {
|
||||
lastidxpa = VM_PAGE_TO_PHYS(&pgs[idx - 1]);
|
||||
|
||||
if ((lastidxpa + PAGE_SIZE) != idxpa) {
|
||||
|
||||
/*
|
||||
* Region not contiguous.
|
||||
*/
|
||||
|
||||
break;
|
||||
}
|
||||
if (boundary != 0 &&
|
||||
((lastidxpa ^ idxpa) & pagemask) != 0) {
|
||||
|
||||
/*
|
||||
* Region crosses boundary.
|
||||
*/
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (idx == end) {
|
||||
/*
|
||||
* Woo hoo! Found one.
|
||||
*/
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -209,7 +205,7 @@ uvm_pglistalloc(size, low, high, alignment, boundary, rlist, nsegs, waitok)
|
||||
pgflidx = (m->flags & PG_ZERO) ? PGFL_ZEROS : PGFL_UNKNOWN;
|
||||
#ifdef DEBUG
|
||||
for (tp = TAILQ_FIRST(&uvm.page_free[
|
||||
free_list].pgfl_queues[pgflidx]);
|
||||
free_list].pgfl_queues[pgflidx]);
|
||||
tp != NULL;
|
||||
tp = TAILQ_NEXT(tp, pageq)) {
|
||||
if (tp == m)
|
||||
@ -227,8 +223,7 @@ uvm_pglistalloc(size, low, high, alignment, boundary, rlist, nsegs, waitok)
|
||||
m->pqflags = 0;
|
||||
m->uobject = NULL;
|
||||
m->uanon = NULL;
|
||||
m->wire_count = 0;
|
||||
m->loan_count = 0;
|
||||
m->version++;
|
||||
TAILQ_INSERT_TAIL(rlist, m, pageq);
|
||||
idx++;
|
||||
STAT_INCR(uvm_pglistalloc_npages);
|
||||
@ -236,18 +231,18 @@ uvm_pglistalloc(size, low, high, alignment, boundary, rlist, nsegs, waitok)
|
||||
error = 0;
|
||||
|
||||
out:
|
||||
uvm_unlock_fpageq(s);
|
||||
|
||||
/*
|
||||
* check to see if we need to generate some free pages waking
|
||||
* the pagedaemon.
|
||||
* XXX: we read uvm.free without locking
|
||||
*/
|
||||
|
||||
if (uvmexp.free < uvmexp.freemin ||
|
||||
(uvmexp.free < uvmexp.freetarg &&
|
||||
uvmexp.inactive < uvmexp.inactarg))
|
||||
if (uvmexp.free + uvmexp.paging < uvmexp.freemin ||
|
||||
(uvmexp.free + uvmexp.paging < uvmexp.freetarg &&
|
||||
uvmexp.inactive < uvmexp.inactarg)) {
|
||||
wakeup(&uvm.pagedaemon);
|
||||
}
|
||||
|
||||
uvm_unlock_fpageq(s);
|
||||
|
||||
return (error);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user