diff --git a/sys/arch/arm/arm32/pmap.c b/sys/arch/arm/arm32/pmap.c index 88ce05139036..2f47c9a3d81b 100644 --- a/sys/arch/arm/arm32/pmap.c +++ b/sys/arch/arm/arm32/pmap.c @@ -1,4 +1,4 @@ -/* $NetBSD: pmap.c,v 1.119 2002/11/11 08:58:05 chris Exp $ */ +/* $NetBSD: pmap.c,v 1.120 2002/11/11 09:34:44 chris Exp $ */ /* * Copyright (c) 2002 Wasabi Systems, Inc. @@ -143,7 +143,7 @@ #include #include -__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.119 2002/11/11 08:58:05 chris Exp $"); +__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.120 2002/11/11 09:34:44 chris Exp $"); #ifdef PMAP_DEBUG #define PDEBUG(_lev_,_stat_) \ @@ -2354,6 +2354,7 @@ pmap_remove(struct pmap *pmap, vaddr_t sva, vaddr_t eva) paddr_t pa; int pmap_active; struct vm_page *pg; + struct pv_entry *pv_tofree = NULL; /* Exit quick if there is no pmap */ if (!pmap) @@ -2467,9 +2468,12 @@ pmap_remove(struct pmap *pmap, vaddr_t sva, vaddr_t eva) struct pv_entry *pve; simple_lock(&pg->mdpage.pvh_slock); pve = pmap_remove_pv(pg, pmap, sva); - pmap_free_pv(pmap, pve); pmap_vac_me_harder(pmap, pg, ptes, FALSE); simple_unlock(&pg->mdpage.pvh_slock); + if (pve != NULL) { + pve->pv_next = pv_tofree; + pv_tofree = pve; + } } } else if (pmap_active == 0) PTE_FLUSH(pte); @@ -2499,6 +2503,10 @@ pmap_remove(struct pmap *pmap, vaddr_t sva, vaddr_t eva) } } + /* Delete pv entries */ + if (pv_tofree != NULL) + pmap_free_pvs(pmap, pv_tofree); + pmap_unmap_ptes(pmap); PMAP_MAP_TO_HEAD_UNLOCK(); @@ -2708,6 +2716,7 @@ pmap_enter(struct pmap *pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, struct vm_page *pg; struct pv_entry *pve; int error, nflags; + struct vm_page *ptp = NULL; PDEBUG(5, printf("pmap_enter: V%08lx P%08lx in pmap %p prot=%08x, wired = %d\n", va, pa, pmap, prot, wired)); @@ -2743,12 +2752,9 @@ pmap_enter(struct pmap *pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, * address, allocate one. */ ptes = pmap_map_ptes(pmap); /* locks pmap */ - if (pmap_pde_v(pmap_pde(pmap, va)) == 0) { - struct vm_page *ptp; - - /* kernel should be pre-grown */ - KASSERT(pmap != pmap_kernel()); - + /* kernel should be pre-grown */ + if (pmap != pmap_kernel()) + { /* if failure is allowed then don't try too hard */ ptp = pmap_get_ptp(pmap, va & PD_FRAME); if (ptp == NULL) { @@ -2812,7 +2818,10 @@ pmap_enter(struct pmap *pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, } else { opa = 0; pve = NULL; - pmap_pte_addref(pmap, va); + + /* bump ptp ref */ + if (ptp != NULL) + ptp->wire_count++; /* pte is not valid so we must be hooking in a new page */ ++pmap->pm_stats.resident_count; @@ -2836,7 +2845,7 @@ pmap_enter(struct pmap *pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, } } /* enter_pv locks pvh when adding */ - pmap_enter_pv(pg, pve, pmap, va, NULL, nflags); + pmap_enter_pv(pg, pve, pmap, va, ptp, nflags); } else { if (pve != NULL) pmap_free_pv(pmap, pve); @@ -3597,14 +3606,15 @@ static struct vm_page * pmap_get_ptp(struct pmap *pmap, vaddr_t va) { struct vm_page *ptp; + pd_entry_t *pde; KASSERT((va & PD_OFFSET) == 0); /* XXX KDASSERT */ - if (pmap_pde_page(pmap_pde(pmap, va))) { - + pde = pmap_pde(pmap, va); + if (pmap_pde_v(pde)) { /* valid... check hint (saves us a PA->PG lookup) */ if (pmap->pm_ptphint && - (pmap->pm_pdir[pmap_pdei(va)] & L2_S_FRAME) == + ((*pde) & L2_S_FRAME) == VM_PAGE_TO_PHYS(pmap->pm_ptphint)) return (pmap->pm_ptphint); ptp = uvm_pagelookup(&pmap->pm_obj, va);