Just garbage-collect the #if 0'd pmap_transfer() stuff.

This commit is contained in:
thorpej 2001-01-04 00:17:43 +00:00
parent 8981b09426
commit d04f6f6d57
2 changed files with 22 additions and 746 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.c,v 1.116 2001/01/01 22:13:54 thorpej Exp $ */
/* $NetBSD: pmap.c,v 1.117 2001/01/04 00:17:43 thorpej Exp $ */
/*
*
@ -151,16 +151,10 @@
* is a void function.
*
* [B] new page tables pages (PTP)
* - plan 1: call uvm_pagealloc()
* call uvm_pagealloc()
* => success: zero page, add to pm_pdir
* => failure: we are out of free vm_pages
* - plan 2: using a linked LIST of active pmaps we attempt
* to "steal" a PTP from another process. we lock
* the target pmap with simple_lock_try so that if it is
* busy we do not block.
* => success: remove old mappings, zero, add to pm_pdir
* => failure: highly unlikely
* - plan 3: panic
* => failure: we are out of free vm_pages, let pmap_enter()
* tell UVM about it.
*
* note: for kernel PTPs, we start with NKPTP of them. as we map
* kernel memory (at uvm_map time) we check to see if we've grown
@ -183,14 +177,7 @@
* => success: map it in, free the pv_entry's, DONE!
* => failure: kmem_object locked, no free vm_pages, etc.
* save VA for later call to [a], go to plan 3.
* - plan 3: using the pv_entry/pv_head lists find a pv_entry
* structure that is part of a non-kernel lockable pmap
* and "steal" that pv_entry by removing the mapping
* and reusing that pv_entry.
* => success: done
* => failure: highly unlikely: unable to lock and steal
* pv_entry
* - plan 4: we panic.
* If we fail, we simply let pmap_enter() tell UVM about it.
*/
/*
@ -350,7 +337,6 @@ static vaddr_t pv_cachedva; /* cached VA for later use */
*/
static struct pmap_head pmaps;
static struct pmap *pmaps_hand = NULL; /* used by pmap_steal_ptp */
/*
* pool that pmap structures are allocated from
@ -392,7 +378,7 @@ extern vaddr_t pentium_idt_vaddr;
*/
static struct pv_entry *pmap_add_pvpage __P((struct pv_page *, boolean_t));
static struct vm_page *pmap_alloc_ptp __P((struct pmap *, int, boolean_t));
static struct vm_page *pmap_alloc_ptp __P((struct pmap *, int));
static struct pv_entry *pmap_alloc_pv __P((struct pmap *, int)); /* see codes below */
#define ALLOCPV_NEED 0 /* need PV now */
#define ALLOCPV_TRY 1 /* just try to allocate, don't steal */
@ -405,7 +391,7 @@ static void pmap_free_pv __P((struct pmap *, struct pv_entry *));
static void pmap_free_pvs __P((struct pmap *, struct pv_entry *));
static void pmap_free_pv_doit __P((struct pv_entry *));
static void pmap_free_pvpage __P((void));
static struct vm_page *pmap_get_ptp __P((struct pmap *, int, boolean_t));
static struct vm_page *pmap_get_ptp __P((struct pmap *, int));
static boolean_t pmap_is_curpmap __P((struct pmap *));
static pt_entry_t *pmap_map_ptes __P((struct pmap *));
static struct pv_entry *pmap_remove_pv __P((struct pv_head *, struct pmap *,
@ -420,22 +406,10 @@ static void pmap_remove_ptes __P((struct pmap *,
vaddr_t, vaddr_t, int));
#define PMAP_REMOVE_ALL 0 /* remove all mappings */
#define PMAP_REMOVE_SKIPWIRED 1 /* skip wired mappings */
static struct vm_page *pmap_steal_ptp __P((struct uvm_object *,
vaddr_t));
static vaddr_t pmap_tmpmap_pa __P((paddr_t));
static pt_entry_t *pmap_tmpmap_pvepte __P((struct pv_entry *));
static void pmap_tmpunmap_pa __P((void));
static void pmap_tmpunmap_pvepte __P((struct pv_entry *));
#if 0
static boolean_t pmap_transfer_ptes __P((struct pmap *,
struct pmap_transfer_location *,
struct pmap *,
struct pmap_transfer_location *,
int, boolean_t));
#endif
static boolean_t pmap_try_steal_pv __P((struct pv_head *,
struct pv_entry *,
struct pv_entry *));
static void pmap_unmap_ptes __P((struct pmap *));
/*
@ -1105,8 +1079,8 @@ pmap_alloc_pvpage(pmap, mode)
{
struct vm_page *pg;
struct pv_page *pvpage;
int lcv, idx, npg, s;
struct pv_entry *pv, *cpv, *prevpv;
struct pv_entry *pv;
int s;
/*
* if we need_entry and we've got unused pv_pages, allocate from there
@ -1143,7 +1117,7 @@ pmap_alloc_pvpage(pmap, mode)
PAGE_SIZE, UVM_KMF_TRYLOCK|UVM_KMF_VALLOC);
if (pv_cachedva == 0) {
splx(s);
goto steal_one;
return (NULL);
}
}
@ -1154,7 +1128,7 @@ pmap_alloc_pvpage(pmap, mode)
if (!simple_lock_try(&uvmexp.kmem_object->vmobjlock)) {
splx(s);
goto steal_one;
return (NULL);
}
pg = uvm_pagealloc(uvmexp.kmem_object, pv_cachedva -
@ -1168,7 +1142,7 @@ pmap_alloc_pvpage(pmap, mode)
/* splimp now dropped */
if (pg == NULL)
goto steal_one;
return (NULL);
/*
* add a mapping for our new pv_page and free its entrys (save one!)
@ -1180,110 +1154,7 @@ pmap_alloc_pvpage(pmap, mode)
pmap_kenter_pa(pv_cachedva, VM_PAGE_TO_PHYS(pg), VM_PROT_ALL);
pvpage = (struct pv_page *) pv_cachedva;
pv_cachedva = 0;
return(pmap_add_pvpage(pvpage, mode != ALLOCPV_NONEED));
steal_one:
/*
* if we don't really need a pv_entry right now, we can just return.
*/
if (mode != ALLOCPV_NEED)
return(NULL);
/*
* last ditch effort! we couldn't allocate a free page to make
* more pv_entrys so we try and steal one from someone else.
*/
pv = NULL;
for (lcv = 0 ; pv == NULL && lcv < vm_nphysseg ; lcv++) {
npg = vm_physmem[lcv].end - vm_physmem[lcv].start;
for (idx = 0 ; idx < npg ; idx++) {
struct pv_head *pvhead = vm_physmem[lcv].pmseg.pvhead;
if (pvhead->pvh_list == NULL)
continue; /* spot check */
if (!simple_lock_try(&pvhead->pvh_lock))
continue;
cpv = prevpv = pvhead->pvh_list;
while (cpv) {
if (pmap_try_steal_pv(pvhead, cpv, prevpv))
break;
prevpv = cpv;
cpv = cpv->pv_next;
}
simple_unlock(&pvhead->pvh_lock);
/* got one? break out of the loop! */
if (cpv) {
pv = cpv;
break;
}
}
}
return(pv);
}
/*
* pmap_try_steal_pv: try and steal a pv_entry from a pmap
*
* => return true if we did it!
*/
static boolean_t
pmap_try_steal_pv(pvh, cpv, prevpv)
struct pv_head *pvh;
struct pv_entry *cpv, *prevpv;
{
pt_entry_t *ptep; /* pointer to a PTE */
/*
* we never steal kernel mappings or mappings from pmaps we can't lock
*/
if (cpv->pv_pmap == pmap_kernel() ||
!simple_lock_try(&cpv->pv_pmap->pm_obj.vmobjlock))
return(FALSE);
/*
* yes, we can try and steal it. first we need to remove the
* mapping from the pmap.
*/
ptep = pmap_tmpmap_pvepte(cpv);
if (*ptep & PG_W) {
ptep = NULL; /* wired page, avoid stealing this one */
} else {
*ptep = 0; /* zap! */
if (pmap_is_curpmap(cpv->pv_pmap))
pmap_update_pg(cpv->pv_va);
pmap_tmpunmap_pvepte(cpv);
}
if (ptep == NULL) {
simple_unlock(&cpv->pv_pmap->pm_obj.vmobjlock);
return(FALSE); /* wired page, abort! */
}
cpv->pv_pmap->pm_stats.resident_count--;
if (cpv->pv_ptp && cpv->pv_ptp->wire_count)
/* drop PTP's wired count */
cpv->pv_ptp->wire_count--;
/*
* XXX: if wire_count goes to one the PTP could be freed, however,
* we'd have to lock the page queues (etc.) to do that and it could
* cause deadlock headaches. besides, the pmap we just stole from
* may want the mapping back anyway, so leave the PTP around.
*/
/*
* now we need to remove the entry from the pvlist
*/
if (cpv == pvh->pvh_list)
pvh->pvh_list = cpv->pv_next;
else
prevpv->pv_next = cpv->pv_next;
return(TRUE);
return (pmap_add_pvpage(pvpage, mode != ALLOCPV_NONEED));
}
/*
@ -1546,33 +1417,19 @@ pmap_remove_pv(pvh, pmap, va)
* => we use the ptp's wire_count to count the number of active mappings
* in the PTP (we start it at one to prevent any chance this PTP
* will ever leak onto the active/inactive queues)
* => we should not be holding any pv_head locks (in case we are forced
* to call pmap_steal_ptp())
* => we may need to lock pv_head's if we have to steal a PTP
* => just_try: true if we want a PTP, but not enough to steal one
* from another pmap (e.g. during optional functions like pmap_copy)
*/
__inline static struct vm_page *
pmap_alloc_ptp(pmap, pde_index, just_try)
pmap_alloc_ptp(pmap, pde_index)
struct pmap *pmap;
int pde_index;
boolean_t just_try;
{
struct vm_page *ptp;
ptp = uvm_pagealloc(&pmap->pm_obj, ptp_i2o(pde_index), NULL,
UVM_PGA_USERESERVE|UVM_PGA_ZERO);
if (ptp == NULL) {
if (just_try)
return(NULL);
ptp = pmap_steal_ptp(&pmap->pm_obj, ptp_i2o(pde_index));
if (ptp == NULL) {
return (NULL);
}
/* stole one; zero it. */
pmap_zero_page(VM_PAGE_TO_PHYS(ptp));
}
if (ptp == NULL)
return(NULL);
/* got one! */
ptp->flags &= ~PG_BUSY; /* never busy */
@ -1584,112 +1441,6 @@ pmap_alloc_ptp(pmap, pde_index, just_try)
return(ptp);
}
/*
* pmap_steal_ptp: steal a PTP from any pmap that we can access
*
* => obj is locked by caller.
* => we can throw away mappings at this level (except in the kernel's pmap)
* => stolen PTP is placed in <obj,offset> pmap
* => we lock pv_head's
* => hopefully, this function will be seldom used [much better to have
* enough free pages around for us to allocate off the free page list]
*/
static struct vm_page *
pmap_steal_ptp(obj, offset)
struct uvm_object *obj;
vaddr_t offset;
{
struct vm_page *ptp = NULL;
struct pmap *firstpmap;
struct uvm_object *curobj;
pt_entry_t *ptes;
int idx, lcv;
boolean_t caller_locked, we_locked;
simple_lock(&pmaps_lock);
if (pmaps_hand == NULL)
pmaps_hand = LIST_FIRST(&pmaps);
firstpmap = pmaps_hand;
do { /* while we haven't looped back around to firstpmap */
curobj = &pmaps_hand->pm_obj;
we_locked = FALSE;
caller_locked = (curobj == obj);
if (!caller_locked) {
we_locked = simple_lock_try(&curobj->vmobjlock);
}
if (caller_locked || we_locked) {
ptp = curobj->memq.tqh_first;
for (/*null*/; ptp != NULL; ptp = ptp->listq.tqe_next) {
/*
* might have found a PTP we can steal
* (unless it has wired pages).
*/
idx = ptp_o2i(ptp->offset);
#ifdef DIAGNOSTIC
if (VM_PAGE_TO_PHYS(ptp) !=
(pmaps_hand->pm_pdir[idx] & PG_FRAME))
panic("pmap_steal_ptp: PTP mismatch!");
#endif
ptes = (pt_entry_t *)
pmap_tmpmap_pa(VM_PAGE_TO_PHYS(ptp));
for (lcv = 0 ; lcv < PTES_PER_PTP ; lcv++)
if ((ptes[lcv] & (PG_V|PG_W)) ==
(PG_V|PG_W))
break;
if (lcv == PTES_PER_PTP)
pmap_remove_ptes(pmaps_hand, NULL, ptp,
(vaddr_t)ptes,
ptp_i2v(idx),
ptp_i2v(idx+1),
PMAP_REMOVE_ALL);
pmap_tmpunmap_pa();
if (lcv != PTES_PER_PTP)
/* wired, try next PTP */
continue;
/*
* got it!!!
*/
pmaps_hand->pm_pdir[idx] = 0; /* zap! */
pmaps_hand->pm_stats.resident_count--;
if (pmap_is_curpmap(pmaps_hand))
tlbflush();
else if (pmap_valid_entry(*APDP_PDE) &&
(*APDP_PDE & PG_FRAME) ==
pmaps_hand->pm_pdirpa) {
pmap_update_pg(((vaddr_t)APTE_BASE) +
ptp->offset);
}
/* put it in our pmap! */
uvm_pagerealloc(ptp, obj, offset);
break; /* break out of "for" loop */
}
if (we_locked) {
simple_unlock(&curobj->vmobjlock);
}
}
/* advance the pmaps_hand */
pmaps_hand = LIST_NEXT(pmaps_hand, pm_list);
if (pmaps_hand == NULL) {
pmaps_hand = LIST_FIRST(&pmaps);
}
} while (ptp == NULL && pmaps_hand != firstpmap);
simple_unlock(&pmaps_lock);
return(ptp);
}
/*
* pmap_get_ptp: get a PTP (if there isn't one, allocate a new one)
*
@ -1698,10 +1449,9 @@ pmap_steal_ptp(obj, offset)
*/
static struct vm_page *
pmap_get_ptp(pmap, pde_index, just_try)
pmap_get_ptp(pmap, pde_index)
struct pmap *pmap;
int pde_index;
boolean_t just_try;
{
struct vm_page *ptp;
@ -1723,7 +1473,7 @@ pmap_get_ptp(pmap, pde_index, just_try)
}
/* allocate a new PTP (updates ptphint) */
return(pmap_alloc_ptp(pmap, pde_index, just_try));
return(pmap_alloc_ptp(pmap, pde_index));
}
/*
@ -1854,8 +1604,6 @@ pmap_destroy(pmap)
*/
simple_lock(&pmaps_lock);
if (pmap == pmaps_hand)
pmaps_hand = LIST_NEXT(pmaps_hand, pm_list);
LIST_REMOVE(pmap, pm_list);
simple_unlock(&pmaps_lock);
@ -3039,450 +2787,6 @@ pmap_collect(pmap)
PMAP_REMOVE_SKIPWIRED);
}
#if 0
/*
* pmap_transfer: transfer (move or copy) mapping from one pmap
* to another.
*
* => this function is optional, it doesn't have to do anything
* => we assume that the mapping in the src pmap is valid (i.e. that
* it doesn't run off the end of the map's virtual space).
* => we assume saddr, daddr, and len are page aligned/lengthed
*/
void
pmap_transfer(dstpmap, srcpmap, daddr, len, saddr, move)
struct pmap *dstpmap, *srcpmap;
vaddr_t daddr, saddr;
vsize_t len;
boolean_t move;
{
/* base address of PTEs, dst could be NULL */
pt_entry_t *srcptes, *dstptes;
struct pmap_transfer_location srcl, dstl;
int dstvalid; /* # of PTEs left in dst's current PTP */
struct pmap *mapped_pmap; /* the pmap we passed to pmap_map_ptes */
vsize_t blklen;
int blkpgs, toxfer;
boolean_t ok;
#ifdef DIAGNOSTIC
/*
* sanity check: let's make sure our len doesn't overflow our dst
* space.
*/
if (daddr < VM_MAXUSER_ADDRESS) {
if (VM_MAXUSER_ADDRESS - daddr < len) {
printf("pmap_transfer: no room in user pmap "
"(addr=0x%lx, len=0x%lx)\n", daddr, len);
return;
}
} else if (daddr < VM_MIN_KERNEL_ADDRESS ||
daddr >= VM_MAX_KERNEL_ADDRESS) {
printf("pmap_transfer: invalid transfer address 0x%lx\n",
daddr);
} else {
if (VM_MAX_KERNEL_ADDRESS - daddr < len) {
printf("pmap_transfer: no room in kernel pmap "
"(addr=0x%lx, len=0x%lx)\n", daddr, len);
return;
}
}
#endif
/*
* ideally we would like to have either src or dst pmap's be the
* current pmap so that we can map the other one in APTE space
* (if needed... one of the maps could be the kernel's pmap).
*
* however, if we can't get this, then we have to use the tmpmap
* (alternately we could punt).
*/
if (!pmap_is_curpmap(dstpmap) && !pmap_is_curpmap(srcpmap)) {
dstptes = NULL; /* dstptes NOT mapped */
srcptes = pmap_map_ptes(srcpmap); /* let's map the source */
mapped_pmap = srcpmap;
} else {
if (!pmap_is_curpmap(srcpmap)) {
srcptes = pmap_map_ptes(srcpmap); /* possible APTE */
dstptes = PTE_BASE;
mapped_pmap = srcpmap;
} else {
dstptes = pmap_map_ptes(dstpmap); /* possible APTE */
srcptes = PTE_BASE;
mapped_pmap = dstpmap;
}
}
/*
* at this point we know that the srcptes are mapped. the dstptes
* are mapped if (dstptes != NULL). if (dstptes == NULL) then we
* will have to map the dst PTPs page at a time using the tmpmap.
* [XXX: is it worth the effort, or should we just punt?]
*/
srcl.addr = saddr;
srcl.pte = &srcptes[i386_btop(srcl.addr)];
srcl.ptp = NULL;
dstl.addr = daddr;
if (dstptes)
dstl.pte = &dstptes[i386_btop(dstl.addr)];
else
dstl.pte = NULL; /* we map page at a time */
dstl.ptp = NULL;
dstvalid = 0; /* force us to load a new dst PTP to start */
while (len) {
/*
* compute the size of this block.
*/
/* length in bytes */
blklen = i386_round_pdr(srcl.addr+1) - srcl.addr;
if (blklen > len)
blklen = len;
blkpgs = i386_btop(blklen);
/*
* if the block is not valid in the src pmap,
* then we can skip it!
*/
if (!pmap_valid_entry(srcpmap->pm_pdir[pdei(srcl.addr)])) {
len = len - blklen;
srcl.pte = srcl.pte + blkpgs;
srcl.addr += blklen;
dstl.addr += blklen;
if (blkpgs > dstvalid) {
dstvalid = 0;
dstl.ptp = NULL;
} else {
dstvalid = dstvalid - blkpgs;
}
if (dstptes == NULL && (len == 0 || dstvalid == 0)) {
if (dstl.pte) {
pmap_tmpunmap_pa();
dstl.pte = NULL;
}
} else {
dstl.pte += blkpgs;
}
continue;
}
/*
* we have a valid source block of "blkpgs" PTEs to transfer.
* if we don't have any dst PTEs ready, then get some.
*/
if (dstvalid == 0) {
if (!pmap_valid_entry(dstpmap->
pm_pdir[pdei(dstl.addr)])) {
#ifdef DIAGNOSTIC
if (dstl.addr >= VM_MIN_KERNEL_ADDRESS)
panic("pmap_transfer: missing kernel "
"PTP at 0x%lx", dstl.addr);
#endif
dstl.ptp = pmap_get_ptp(dstpmap,
pdei(dstl.addr), TRUE);
if (dstl.ptp == NULL) /* out of RAM? punt. */
break;
} else {
dstl.ptp = NULL;
}
dstvalid = i386_btop(i386_round_pdr(dstl.addr+1) -
dstl.addr);
if (dstptes == NULL) {
dstl.pte = (pt_entry_t *)
pmap_tmpmap_pa(dstpmap->
pm_pdir[pdei(dstl.addr)]
& PG_FRAME);
dstl.pte = dstl.pte + (PTES_PER_PTP - dstvalid);
}
}
/*
* we have a valid source block of "blkpgs" PTEs to transfer.
* we have a valid dst block of "dstvalid" PTEs ready.
* thus we can transfer min(blkpgs, dstvalid) PTEs now.
*/
srcl.ptp = NULL; /* don't know source PTP yet */
if (dstvalid < blkpgs)
toxfer = dstvalid;
else
toxfer = blkpgs;
if (toxfer > 0) {
ok = pmap_transfer_ptes(srcpmap, &srcl, dstpmap, &dstl,
toxfer, move);
if (!ok) /* memory shortage? punt. */
break;
dstvalid -= toxfer;
blkpgs -= toxfer;
len -= i386_ptob(toxfer);
if (blkpgs == 0) /* out of src PTEs? restart */
continue;
}
/*
* we have a valid source block of "blkpgs" PTEs left
* to transfer. we have just used up our "dstvalid"
* PTEs, and thus must obtain more dst PTEs to finish
* off the src block. since we are now going to
* obtain a brand new dst PTP, we know we can finish
* the src block in one more transfer.
*/
#ifdef DIAGNOSTIC
if (dstvalid)
panic("pmap_transfer: dstvalid non-zero after drain");
if ((dstl.addr & (NBPD-1)) != 0)
panic("pmap_transfer: dstaddr not on PD boundary "
"(0x%lx)\n", dstl.addr);
#endif
if (dstptes == NULL && dstl.pte != NULL) {
/* dispose of old PT mapping */
pmap_tmpunmap_pa();
dstl.pte = NULL;
}
/*
* get new dst PTP
*/
if (!pmap_valid_entry(dstpmap->pm_pdir[pdei(dstl.addr)])) {
#ifdef DIAGNOSTIC
if (dstl.addr >= VM_MIN_KERNEL_ADDRESS)
panic("pmap_transfer: missing kernel PTP at "
"0x%lx", dstl.addr);
#endif
dstl.ptp = pmap_get_ptp(dstpmap, pdei(dstl.addr), TRUE);
if (dstl.ptp == NULL) /* out of free RAM? punt. */
break;
} else {
dstl.ptp = NULL;
}
dstvalid = PTES_PER_PTP; /* new PTP */
/*
* if the dstptes are un-mapped, then we need to tmpmap in the
* dstl.ptp.
*/
if (dstptes == NULL) {
dstl.pte = (pt_entry_t *)
pmap_tmpmap_pa(dstpmap->pm_pdir[pdei(dstl.addr)]
& PG_FRAME);
}
/*
* we have a valid source block of "blkpgs" PTEs left
* to transfer. we just got a brand new dst PTP to
* receive these PTEs.
*/
#ifdef DIAGNOSTIC
if (dstvalid < blkpgs)
panic("pmap_transfer: too many blkpgs?");
#endif
toxfer = blkpgs;
ok = pmap_transfer_ptes(srcpmap, &srcl, dstpmap, &dstl, toxfer,
move);
if (!ok) /* memory shortage? punt. */
break;
dstvalid -= toxfer;
blkpgs -= toxfer;
len -= i386_ptob(toxfer);
/*
* done src pte block
*/
}
if (dstptes == NULL && dstl.pte != NULL)
pmap_tmpunmap_pa(); /* dst PTP still mapped? */
pmap_unmap_ptes(mapped_pmap);
}
/*
* pmap_transfer_ptes: transfer PTEs from one pmap to another
*
* => we assume that the needed PTPs are mapped and that we will
* not cross a block boundary.
* => we return TRUE if we transfered all PTEs, FALSE if we were
* unable to allocate a pv_entry
*/
static boolean_t
pmap_transfer_ptes(srcpmap, srcl, dstpmap, dstl, toxfer, move)
struct pmap *srcpmap, *dstpmap;
struct pmap_transfer_location *srcl, *dstl;
int toxfer;
boolean_t move;
{
pt_entry_t dstproto, opte;
int bank, off;
struct pv_head *pvh;
struct pv_entry *pve, *lpve;
/*
* generate "prototype" dst PTE
*/
if (dstl->addr < VM_MAX_ADDRESS)
dstproto = PG_u; /* "user" page */
else
dstproto = pmap_pg_g; /* kernel page */
/*
* ensure we have dst PTP for user addresses.
*/
if (dstl->ptp == NULL && dstl->addr < VM_MAXUSER_ADDRESS)
dstl->ptp = PHYS_TO_VM_PAGE(dstpmap->pm_pdir[pdei(dstl->addr)] &
PG_FRAME);
/*
* main loop over range
*/
for (/*null*/; toxfer > 0 ; toxfer--,
srcl->addr += PAGE_SIZE, dstl->addr += PAGE_SIZE,
srcl->pte++, dstl->pte++) {
if (!pmap_valid_entry(*srcl->pte)) /* skip invalid entrys */
continue;
#ifdef DIAGNOSTIC
if (pmap_valid_entry(*dstl->pte))
panic("pmap_transfer_ptes: attempt to overwrite "
"active entry");
#endif
/*
* let's not worry about non-pvlist mappings (typically device
* pager mappings).
*/
opte = *srcl->pte;
if ((opte & PG_PVLIST) == 0)
continue;
/*
* if we are moving the mapping, then we can just adjust the
* current pv_entry. if we are copying the mapping, then we
* need to allocate a new pv_entry to account for it.
*/
if (move == FALSE) {
pve = pmap_alloc_pv(dstpmap, ALLOCPV_TRY);
if (pve == NULL)
return(FALSE); /* punt! */
} else {
pve = NULL; /* XXX: quiet gcc warning */
}
/*
* find the pv_head for this mapping. since our mapping is
* on the pvlist (PG_PVLIST), there must be a pv_head.
*/
bank = vm_physseg_find(atop(opte & PG_FRAME), &off);
#ifdef DIAGNOSTIC
if (bank == -1)
panic("pmap_transfer_ptes: PG_PVLIST PTE and "
"no pv_head!");
#endif
pvh = &vm_physmem[bank].pmseg.pvhead[off];
/*
* now lock down the pvhead and find the current entry (there
* must be one).
*/
simple_lock(&pvh->pvh_lock);
for (lpve = pvh->pvh_list ; lpve ; lpve = lpve->pv_next)
if (lpve->pv_pmap == srcpmap &&
lpve->pv_va == srcl->addr)
break;
#ifdef DIAGNOSTIC
if (lpve == NULL)
panic("pmap_transfer_ptes: PG_PVLIST PTE, but "
"entry not found");
#endif
/*
* update src ptp. if the ptp is null in the pventry, then
* we are not counting valid entrys for this ptp (this is only
* true for kernel PTPs).
*/
if (srcl->ptp == NULL)
srcl->ptp = lpve->pv_ptp;
#ifdef DIAGNOSTIC
if (srcl->ptp &&
(srcpmap->pm_pdir[pdei(srcl->addr)] & PG_FRAME) !=
VM_PAGE_TO_PHYS(srcl->ptp))
panic("pmap_transfer_ptes: pm_pdir - pv_ptp mismatch!");
#endif
/*
* for move, update the pve we just found (lpve) to
* point to its new mapping. for copy, init the new
* pve and put it in the list.
*/
if (move == TRUE) {
pve = lpve;
}
pve->pv_pmap = dstpmap;
pve->pv_va = dstl->addr;
pve->pv_ptp = dstl->ptp;
if (move == FALSE) { /* link in copy */
pve->pv_next = lpve->pv_next;
lpve->pv_next = pve;
}
/*
* sync the R/M bits while we are here.
*/
vm_physmem[bank].pmseg.attrs[off] |= (opte & (PG_U|PG_M));
/*
* now actually update the ptes and unlock the pvlist.
*/
if (move) {
*srcl->pte = 0; /* zap! */
if (pmap_is_curpmap(srcpmap))
pmap_update_pg(srcl->addr);
if (srcl->ptp)
/* don't bother trying to free PTP */
srcl->ptp->wire_count--;
srcpmap->pm_stats.resident_count--;
if (opte & PG_W)
srcpmap->pm_stats.wired_count--;
}
*dstl->pte = (opte & ~(PG_u|PG_U|PG_M|PG_G|PG_W)) | dstproto;
dstpmap->pm_stats.resident_count++;
if (dstl->ptp)
dstl->ptp->wire_count++;
simple_unlock(&pvh->pvh_lock);
}
return(TRUE);
}
/*
* pmap_copy: copy mappings from one pmap to another
*
@ -3491,21 +2795,9 @@ pmap_transfer_ptes(srcpmap, srcl, dstpmap, dstl, toxfer, move)
*/
/*
* defined as macro call to pmap_transfer in pmap.h
* defined as macro call in pmap.h
*/
/*
* pmap_move: move mappings from one pmap to another
*
* => optional function
* void pmap_move(dst_pmap, src_pmap, dst_addr, len, src_addr)
*/
/*
* defined as macro call to pmap_transfer in pmap.h
*/
#endif
/*
* pmap_enter: enter a mapping into a pmap
*
@ -3553,7 +2845,7 @@ pmap_enter(pmap, va, pa, prot, flags)
if (pmap == pmap_kernel()) {
ptp = NULL;
} else {
ptp = pmap_get_ptp(pmap, pdei(va), FALSE);
ptp = pmap_get_ptp(pmap, pdei(va));
if (ptp == NULL) {
if (flags & PMAP_CANFAIL) {
return (KERN_RESOURCE_SHORTAGE);
@ -3759,7 +3051,7 @@ pmap_growkernel(maxkvaddr)
* INVOKED WHILE pmap_init() IS RUNNING!
*/
if (pmap_alloc_ptp(kpm, PDSLOT_KERN + nkpde, FALSE) == NULL) {
if (pmap_alloc_ptp(kpm, PDSLOT_KERN + nkpde) == NULL) {
panic("pmap_growkernel: alloc ptp failed");
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.h,v 1.52 2001/01/01 22:13:53 thorpej Exp $ */
/* $NetBSD: pmap.h,v 1.53 2001/01/04 00:17:43 thorpej Exp $ */
/*
*
@ -322,20 +322,6 @@ struct pmap_remove_record {
vaddr_t prr_vas[PMAP_RR_MAX];
};
#if 0
/*
* pmap_transfer_location: used to pass the current location in the
* pmap between pmap_transfer and pmap_transfer_ptes [e.g. during
* a pmap_copy].
*/
struct pmap_transfer_location {
vaddr_t addr; /* the address (page-aligned) */
pt_entry_t *pte; /* the PTE that maps address */
struct vm_page *ptp; /* the PTP that the PTE lives in */
};
#endif
/*
* global kernel variables
*/
@ -380,8 +366,6 @@ static void pmap_protect __P((struct pmap *, vaddr_t,
vaddr_t, vm_prot_t));
void pmap_remove __P((struct pmap *, vaddr_t, vaddr_t));
boolean_t pmap_test_attrs __P((struct vm_page *, int));
void pmap_transfer __P((struct pmap *, struct pmap *, vaddr_t,
vsize_t, vaddr_t, boolean_t));
static void pmap_update_pg __P((vaddr_t));
static void pmap_update_2pg __P((vaddr_t,vaddr_t));
void pmap_write_protect __P((struct pmap *, vaddr_t,