-Subtract vm_map_min(kernel_map) from kernel virtual addresses to get offsets

into kernel_object where this was missing.
 (important here because VM_MIN_KERNEL_ADDRESS != 0)
-add some diagnostics
-eliminate some differences to other Utah derived pmaps
This commit is contained in:
drochner 2002-05-22 14:34:26 +00:00
parent 60b6f587bc
commit 7a829b7e85
1 changed files with 37 additions and 19 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.c,v 1.12 2002/03/08 20:48:30 thorpej Exp $ */
/* $NetBSD: pmap.c,v 1.13 2002/05/22 14:34:26 drochner Exp $ */
/*
* Copyright (c) 1991, 1993
@ -779,8 +779,10 @@ pmap_release(pmap)
if (pmap->pm_ptab) {
pmap_remove(pmap_kernel(), (vaddr_t)pmap->pm_ptab,
(vaddr_t)pmap->pm_ptab + HP_MAX_PTSIZE);
uvm_km_pgremove(uvm.kernel_object, (vaddr_t)pmap->pm_ptab,
(vaddr_t)pmap->pm_ptab + HP_MAX_PTSIZE);
uvm_km_pgremove(uvm.kernel_object,
(vaddr_t)pmap->pm_ptab - vm_map_min(kernel_map),
(vaddr_t)pmap->pm_ptab + HP_MAX_PTSIZE
- vm_map_min(kernel_map));
uvm_km_free_wakeup(pt_map, (vaddr_t)pmap->pm_ptab,
HP_MAX_PTSIZE);
}
@ -1267,6 +1269,19 @@ pmap_enter(pmap, va, pa, prot, flags)
}
#endif
}
/*
* Speed pmap_is_referenced() or pmap_is_modified() based
* on the hint provided in access_type.
*/
#ifdef DIAGNOSTIC
if ((flags & VM_PROT_ALL) & ~prot)
panic("pmap_enter: access_type exceeds prot");
#endif
if (flags & VM_PROT_WRITE)
*pa_to_attribute(pa) |= (PG_U|PG_M);
else if (flags & VM_PROT_ALL)
*pa_to_attribute(pa) |= PG_U;
splx(s);
}
/*
@ -1760,14 +1775,12 @@ ok:
* machine dependent page at a time.
*
* Note: WE DO NOT CURRENTLY LOCK THE TEMPORARY ADDRESSES!
* (Actually, we go to splvm(), and since we don't
* support multiple processors, this is sufficient.)
*/
void
pmap_zero_page(phys)
paddr_t phys;
{
int s, npte;
int npte;
PMAP_DPRINTF(PDB_FOLLOW, ("pmap_zero_page(%lx)\n", phys));
@ -1793,8 +1806,6 @@ pmap_zero_page(phys)
}
#endif
s = splvm();
*caddr1_pte = npte;
TBIS((vaddr_t)CADDR1);
@ -1804,8 +1815,6 @@ pmap_zero_page(phys)
*caddr1_pte = PG_NV;
TBIS((vaddr_t)CADDR1);
#endif
splx(s);
}
/*
@ -1816,14 +1825,12 @@ pmap_zero_page(phys)
* dependent page at a time.
*
* Note: WE DO NOT CURRENTLY LOCK THE TEMPORARY ADDRESSES!
* (Actually, we go to splvm(), and since we don't
* support multiple processors, this is sufficient.)
*/
void
pmap_copy_page(src, dst)
paddr_t src, dst;
{
int s, npte1, npte2;
int npte1, npte2;
PMAP_DPRINTF(PDB_FOLLOW, ("pmap_copy_page(%lx, %lx)\n", src, dst));
@ -1852,8 +1859,6 @@ pmap_copy_page(src, dst)
}
#endif
s = splvm();
*caddr1_pte = npte1;
TBIS((vaddr_t)CADDR1);
@ -1869,8 +1874,6 @@ pmap_copy_page(src, dst)
*caddr2_pte = PG_NV;
TBIS((vaddr_t)CADDR2);
#endif
splx(s);
}
/*
@ -2561,8 +2564,9 @@ pmap_enter_ptpage(pmap, va)
pmap->pm_sref++;
PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE,
("enter: about to alloc UPT pg at %lx\n", va));
while ((pg = uvm_pagealloc(uvm.kernel_object, va, NULL,
UVM_PGA_ZERO)) == NULL) {
while ((pg = uvm_pagealloc(uvm.kernel_object,
va - vm_map_min(kernel_map),
NULL, UVM_PGA_ZERO)) == NULL) {
uvm_wait("ptpage");
}
pg->flags &= ~(PG_BUSY|PG_FAKE);
@ -2666,7 +2670,14 @@ pmap_ptpage_addref(ptpva)
simple_lock(&uvm.kernel_object->vmobjlock);
pg = uvm_pagelookup(uvm.kernel_object, ptpva - vm_map_min(kernel_map));
#ifdef DEBUG
if (!pg)
panic("pmap_ptpage_addref(%lx): no page\n", ptpva);
#endif
pg->wire_count++;
PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB,
("ptpage addref: pg %p now %d\n",
pg, pg->wire_count));
simple_unlock(&uvm.kernel_object->vmobjlock);
}
@ -2684,7 +2695,14 @@ pmap_ptpage_delref(ptpva)
simple_lock(&uvm.kernel_object->vmobjlock);
pg = uvm_pagelookup(uvm.kernel_object, ptpva - vm_map_min(kernel_map));
#ifdef DEBUG
if (!pg)
panic("pmap_ptpage_delref(%lx): no page\n", ptpva);
#endif
rv = --pg->wire_count;
PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB,
("ptpage delref: pg %p now %d\n",
pg, pg->wire_count));
simple_unlock(&uvm.kernel_object->vmobjlock);
return (rv);
}