Sync with mvme68k.

This commit is contained in:
minoura 2001-12-06 04:13:39 +00:00
parent bf3ea26688
commit da36b95207
1 changed files with 105 additions and 256 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.c,v 1.71 2001/11/25 15:42:23 minoura Exp $ */
/* $NetBSD: pmap.c,v 1.72 2001/12/06 04:13:39 minoura Exp $ */
/*-
* Copyright (c) 1999 The NetBSD Foundation, Inc.
@ -198,8 +198,7 @@ int dokwriteback = 1; /* 68040/060: enable writeback caching of kernel AS */
#define pmap_ste(m, v) \
(&((m)->pm_stab[(vaddr_t)(v) >> SG4_SHIFT1]))
#define pmap_ste_v(m, v) \
((*pmap_ste1(m, v) & SG_V) && \
(*pmap_ste2(m, v) & SG_V))
((*pmap_ste1(m, v) & SG_V) && (*pmap_ste2(m, v) & SG_V))
#endif
#else
#define pmap_ste(m, v) (&((m)->pm_stab[(vaddr_t)(v) >> SG_ISHIFT]))
@ -224,7 +223,7 @@ int dokwriteback = 1; /* 68040/060: enable writeback caching of kernel AS */
/*
* Given a map and a machine independent protection code,
* convert to an x68k protection code.
* convert to an m68k protection code.
*/
#define pte_prot(m, p) (protection_codes[p])
int protection_codes[8];
@ -271,9 +270,6 @@ char *pmap_attributes; /* reference and modify bits */
TAILQ_HEAD(pv_page_list, pv_page) pv_page_freelist;
int pv_nfree;
#ifdef M68K_MMU_HP
int pmap_aliasmask; /* seperation at which VA aliasing ok */
#endif
#if defined(M68040) || defined(M68060)
int protostfree; /* prototype (default) free ST map */
#endif
@ -316,7 +312,8 @@ int pmap_mapmulti __P((pmap_t, vaddr_t));
* Internal routines
*/
void pmap_remove_mapping __P((pmap_t, vaddr_t, pt_entry_t *, int));
boolean_t pmap_testbit __P((paddr_t, int));
void pmap_do_remove __P((pmap_t, vaddr_t, vaddr_t, int));
boolean_t pmap_testbit __P((paddr_t, int));
boolean_t pmap_changebit __P((paddr_t, int, int));
void pmap_enter_ptpage __P((pmap_t, vaddr_t));
void pmap_ptpage_addref __P((vaddr_t));
@ -443,7 +440,7 @@ pmap_init()
s += page_cnt * sizeof(struct pv_entry); /* pv table */
s += page_cnt * sizeof(char); /* attribute table */
s = round_page(s);
addr = (vaddr_t) uvm_km_zalloc(kernel_map, s);
addr = uvm_km_zalloc(kernel_map, s);
if (addr == 0)
panic("pmap_init: can't allocate data structures");
@ -498,7 +495,7 @@ pmap_init()
* Now allocate the space and link the pages together to
* form the KPT free list.
*/
addr = (vaddr_t) uvm_km_zalloc(kernel_map, s);
addr = uvm_km_zalloc(kernel_map, s);
if (addr == 0)
panic("pmap_init: cannot allocate KPT free list");
s = ptoa(npages);
@ -561,7 +558,10 @@ pmap_init()
* use the va's?
*/
#if defined (M68060)
if (cputype == CPU_68060) {
#if defined(M68020) || defined(M68030) || defined(M68040)
if (cputype == CPU_68060)
#endif
{
struct kpt_page *kptp = kpt_free_list;
while (kptp) {
@ -762,8 +762,7 @@ pmap_create()
("pmap_create()\n"));
pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
bzero(pmap, sizeof(*pmap));
memset(pmap, 0, sizeof(*pmap));
pmap_pinit(pmap);
return (pmap);
}
@ -811,16 +810,17 @@ void
pmap_destroy(pmap)
pmap_t pmap;
{
int count;
PMAP_DPRINTF(PDB_FOLLOW, ("pmap_destroy(%p)\n", pmap));
if (pmap->pm_count == 1) {
simple_lock(&pmap->pm_lock);
count = --pmap->pm_count;
simple_unlock(&pmap->pm_lock);
if (count == 0) {
pmap_release(pmap);
pool_put(&pmap_pmap_pool, pmap);
} else {
simple_lock(&pmap->pm_lock);
pmap->pm_count--;
simple_unlock(&pmap->pm_lock);
}
}
}
/*
@ -837,7 +837,9 @@ pmap_release(pmap)
PMAP_DPRINTF(PDB_FOLLOW, ("pmap_release(%p)\n", pmap));
#ifdef DIAGNOSTIC
#ifdef notdef /* DIAGNOSTIC */
/* count would be 0 from pmap_destroy... */
simple_lock(&pmap->pm_lock);
if (pmap->pm_count != 1)
panic("pmap_release count");
#endif
@ -920,17 +922,23 @@ void
pmap_remove(pmap, sva, eva)
pmap_t pmap;
vaddr_t sva, eva;
{
pmap_do_remove(pmap, sva, eva, 1);
}
void
pmap_do_remove(pmap, sva, eva, remove_wired)
pmap_t pmap;
vaddr_t sva, eva;
int remove_wired;
{
vaddr_t nssva;
pt_entry_t *pte;
boolean_t firstpage, needcflush;
int flags;
PMAP_DPRINTF(PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT,
("pmap_remove(%p, %lx, %lx)\n", pmap, sva, eva));
firstpage = TRUE;
needcflush = FALSE;
flags = active_pmap(pmap) ? PRM_TFLUSH : 0;
while (sva < eva) {
nssva = x68k_trunc_seg(sva) + X68K_SEG_SIZE;
@ -939,6 +947,7 @@ pmap_remove(pmap, sva, eva)
/*
* Invalidate every valid mapping within this segment.
* If remove_wired is zero, skip the wired pages.
*/
pte = pmap_pte(pmap, sva);
@ -953,58 +962,15 @@ pmap_remove(pmap, sva, eva)
sva = nssva;
break;
}
if (pmap_pte_v(pte)) {
#ifdef M68K_MMU_HP
if (pmap_aliasmask) {
/*
* Purge kernel side of VAC to ensure
* we get the correct state of any
* hardware maintained bits.
*/
if (firstpage) {
DCIS();
}
/*
* Remember if we may need to
* flush the VAC due to a non-CI
* mapping.
*/
if (!needcflush && !pmap_pte_ci(pte))
needcflush = TRUE;
}
#endif
if (pmap_pte_v(pte) &&
(remove_wired || !pmap_pte_w(pte))) {
pmap_remove_mapping(pmap, sva, pte, flags);
firstpage = FALSE;
}
pte++;
sva += NBPG;
}
}
/*
* Didn't do anything, no need for cache flushes
*/
if (firstpage)
return;
#ifdef M68K_MMU_HP
/*
* In a couple of cases, we don't need to worry about flushing
* the VAC:
* 1. if this is a kernel mapping,
* we have already done it
* 2. if it is a user mapping not for the current process,
* it won't be there
*/
if (pmap_aliasmask && !active_user_pmap(pmap))
needcflush = FALSE;
if (needcflush) {
if (pmap == pmap_kernel()) {
DCIS();
} else {
DCIU();
}
}
#endif
}
/*
@ -1025,8 +991,7 @@ pmap_page_protect(pg, prot)
#ifdef DEBUG
if ((pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) ||
(prot == VM_PROT_NONE && (pmapdebug & PDB_REMOVE)))
printf("pmap_page_protect(%p, %x)\n", pg, prot);
printf("pmap_page_protect(%lx, %x)\n", pa, prot);
#endif
switch (prot) {
@ -1080,7 +1045,7 @@ pmap_page_protect(pg, prot)
void
pmap_protect(pmap, sva, eva, prot)
pmap_t pmap;
vaddr_t sva, eva;
vaddr_t sva, eva;
vm_prot_t prot;
{
vaddr_t nssva;
@ -1092,6 +1057,9 @@ pmap_protect(pmap, sva, eva, prot)
("pmap_protect(%p, %lx, %lx, %x)\n",
pmap, sva, eva, prot));
#ifdef PMAPSTATS
protect_stats.calls++;
#endif
if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
pmap_remove(pmap, sva, eva);
return;
@ -1118,18 +1086,6 @@ pmap_protect(pmap, sva, eva, prot)
pte = pmap_pte(pmap, sva);
while (sva < nssva) {
if (pmap_pte_v(pte) && pmap_pte_prot_chg(pte, isro)) {
#ifdef M68K_MMU_HP
/*
* Purge kernel side of VAC to ensure we
* get the correct state of any hardware
* maintained bits.
*
* XXX do we need to clear the VAC in
* general to reflect the new protection?
*/
if (firstpage && pmap_aliasmask)
DCIS();
#endif
#if defined(M68040) || defined(M68060)
/*
* Clear caches if making RO (see section
@ -1276,6 +1232,7 @@ pmap_enter(pmap, va, pa, prot, flags)
pv = pa_to_pvh(pa);
s = splvm();
PMAP_DPRINTF(PDB_ENTER,
("enter: pv at %p: %lx/%p/%p\n",
pv, pv->pv_va, pv->pv_pmap, pv->pv_next));
@ -1308,49 +1265,6 @@ pmap_enter(pmap, va, pa, prot, flags)
npv->pv_ptpmap = NULL;
npv->pv_flags = 0;
pv->pv_next = npv;
#ifdef M68K_MMU_HP
/*
* Since there is another logical mapping for the
* same page we may need to cache-inhibit the
* descriptors on those CPUs with external VACs.
* We don't need to CI if:
*
* - No two mappings belong to the same user pmaps.
* Since the cache is flushed on context switches
* there is no problem between user processes.
*
* - Mappings within a single pmap are a certain
* magic distance apart. VAs at these appropriate
* boundaries map to the same cache entries or
* otherwise don't conflict.
*
* To keep it simple, we only check for these special
* cases if there are only two mappings, otherwise we
* punt and always CI.
*
* Note that there are no aliasing problems with the
* on-chip data-cache when the WA bit is set.
*/
if (pmap_aliasmask) {
if (pv->pv_flags & PV_CI) {
PMAP_DPRINTF(PDB_CACHE,
("enter: pa %lx already CI'ed\n",
pa));
checkpv = cacheable = FALSE;
} else if (npv->pv_next ||
((pmap == pv->pv_pmap ||
pmap == pmap_kernel() ||
pv->pv_pmap == pmap_kernel()) &&
((pv->pv_va & pmap_aliasmask) !=
(va & pmap_aliasmask)))) {
PMAP_DPRINTF(PDB_CACHE,
("enter: pa %lx CI'ing all\n",
pa));
cacheable = FALSE;
pv->pv_flags |= PV_CI;
}
}
#endif
}
/*
@ -1384,14 +1298,6 @@ pmap_enter(pmap, va, pa, prot, flags)
pmap->pm_stats.wired_count++;
validate:
#ifdef M68K_MMU_HP
/*
* Purge kernel side of VAC to ensure we get correct state
* of HW bits so we don't clobber them.
*/
if (pmap_aliasmask)
DCIS();
#endif
/*
* Build the new PTE.
*/
@ -1429,10 +1335,11 @@ validate:
wired = ((*pte ^ npte) == PG_W);
#if defined(M68040) || defined(M68060)
#if defined(M68020) || defined(M68030)
if (mmutype == MMU_68040 && !wired) {
if (mmutype == MMU_68040 && !wired)
#else
if (!wired) {
if (!wired)
#endif
{
DCFP(pa);
ICPP(pa);
}
@ -1440,36 +1347,14 @@ validate:
*pte = npte;
if (!wired && active_pmap(pmap))
TBIS(va);
#ifdef M68K_MMU_HP
/*
* The following is executed if we are entering a second
* (or greater) mapping for a physical page and the mappings
* may create an aliasing problem. In this case we must
* cache inhibit the descriptors involved and flush any
* external VAC.
*/
if (checkpv && !cacheable) {
pmap_changebit(pa, PG_CI, ~0);
DCIA();
#ifdef DEBUG
if ((pmapdebug & (PDB_CACHE|PDB_PVDUMP)) ==
(PDB_CACHE|PDB_PVDUMP))
pmap_pvdump(pa);
#endif
}
#endif
#ifdef DEBUG
if ((pmapdebug & PDB_WIRING) && pmap != pmap_kernel())
pmap_check_wiring("enter", trunc_page((vaddr_t)pte));
#endif
return 0;
}
/*
* pmap_k* parts are original for x68k.
* We support no VAC machines i.e. M68k_MMU_HP. (mi)
*/
/*
* pmap_kenter_pa: [ INTERFACE ]
*
@ -1532,6 +1417,7 @@ pmap_kenter_pa(va, pa, prot)
#endif
*pte = npte;
TBIS(va);
}
/*
@ -1654,7 +1540,7 @@ pmap_extract(pmap, va, pap)
PMAP_DPRINTF(PDB_FOLLOW,
("pmap_extract(%p, %lx) -> ", pmap, va));
if (pmap && pmap_ste_v(pmap, va)) {
if (pmap_ste_v(pmap, va)) {
pte = *(u_int *)pmap_pte(pmap, va);
if (pte) {
pa = (pte & PG_FRAME) | (va & ~PG_FRAME);
@ -1734,8 +1620,8 @@ pmap_collect(pmap)
* entire address space. Note: pmap_remove() performs
* all necessary locking.
*/
pmap_remove(pmap, VM_MIN_ADDRESS, VM_MAX_ADDRESS);
pmap_update(pmap_kernel());
pmap_do_remove(pmap, VM_MIN_ADDRESS, VM_MAX_ADDRESS, 0);
pmap_update(pmap);
}
#ifdef notyet
@ -1856,29 +1742,22 @@ ok:
* pmap_zero_page: [ INTERFACE ]
*
* Zero the specified (machine independent) page by mapping the page
* into virtual memory and using bzero to clear its contents, one
* into virtual memory and using memset to clear its contents, one
* machine dependent page at a time.
*
* Note: WE DO NOT CURRENTLY LOCK THE TEMPORARY ADDRESSES!
* (Actually, we go to splvm(), and since we don't
* support multiple processors, this is sufficient.)
*/
void
pmap_zero_page(phys)
paddr_t phys;
{
int npte;
int s, npte;
PMAP_DPRINTF(PDB_FOLLOW, ("pmap_zero_page(%lx)\n", phys));
npte = phys | PG_V;
#ifdef M68K_MMU_HP
if (pmap_aliasmask) {
/*
* Cache-inhibit the mapping on VAC machines, as we would
* be wasting the cache load.
*/
npte |= PG_CI;
}
#endif
#if defined(M68040) || defined(M68060)
#if defined(M68020) || defined(M68030)
@ -1894,6 +1773,8 @@ pmap_zero_page(phys)
}
#endif
s = splvm();
*caddr1_pte = npte;
TBIS((vaddr_t)CADDR1);
@ -1903,6 +1784,8 @@ pmap_zero_page(phys)
*caddr1_pte = PG_NV;
TBIS((vaddr_t)CADDR1);
#endif
splx(s);
}
/*
@ -1916,18 +1799,21 @@ boolean_t
pmap_zero_page_uncached(phys)
paddr_t phys;
{
int npte;
int s, npte;
PMAP_DPRINTF(PDB_FOLLOW, ("pmap_zero_page_uncached(%lx)\n", phys));
#if defined(M68040) || defined(M68060)
if (mmutype == MMU_68040) {
DCPP(phys);
}
#if defined(M68020) || defined(M68030)
if (mmutype == MMU_68040)
#endif
DCPP_40(phys);
#endif
npte = phys | PG_V | PG_CI;
s = splvm();
*caddr1_pte = npte;
TBIS((vaddr_t)CADDR1);
@ -1937,6 +1823,9 @@ pmap_zero_page_uncached(phys)
*caddr1_pte = PG_NV;
TBIS((vaddr_t)CADDR1);
#endif
splx(s);
return (TRUE);
}
@ -1948,27 +1837,19 @@ pmap_zero_page_uncached(phys)
* dependent page at a time.
*
* Note: WE DO NOT CURRENTLY LOCK THE TEMPORARY ADDRESSES!
* (Actually, we go to splvm(), and since we don't
* support multiple processors, this is sufficient.)
*/
void
pmap_copy_page(src, dst)
paddr_t src, dst;
{
int npte1, npte2;
int s, npte1, npte2;
PMAP_DPRINTF(PDB_FOLLOW, ("pmap_copy_page(%lx, %lx)\n", src, dst));
npte1 = src | PG_RO | PG_V;
npte2 = dst | PG_V;
#ifdef M68K_MMU_HP
if (pmap_aliasmask) {
/*
* Cache-inhibit the mapping on VAC machines, as we would
* be wasting the cache load.
*/
npte1 |= PG_CI;
npte2 |= PG_CI;
}
#endif
#if defined(M68040) || defined(M68060)
#if defined(M68020) || defined(M68030)
@ -1985,6 +1866,8 @@ pmap_copy_page(src, dst)
}
#endif
s = splvm();
*caddr1_pte = npte1;
TBIS((vaddr_t)CADDR1);
@ -2000,6 +1883,8 @@ pmap_copy_page(src, dst)
*caddr2_pte = PG_NV;
TBIS((vaddr_t)CADDR2);
#endif
splx(s);
}
/*
@ -2007,7 +1892,7 @@ pmap_copy_page(src, dst)
*
* Clear the modify bits on the specified physical page.
*/
int
boolean_t
pmap_clear_modify(pg)
struct vm_page *pg;
{
@ -2023,7 +1908,7 @@ pmap_clear_modify(pg)
*
* Clear the reference bit on the specified physical page.
*/
int
boolean_t
pmap_clear_reference(pg)
struct vm_page *pg;
{
@ -2180,23 +2065,7 @@ pmap_remove_mapping(pmap, va, pte, flags)
if (*pte == PG_NV)
return;
}
#ifdef M68K_MMU_HP
if (pmap_aliasmask && (flags & PRM_CFLUSH)) {
/*
* Purge kernel side of VAC to ensure we get the correct
* state of any hardware maintained bits.
*/
DCIS();
/*
* If this is a non-CI user mapping for the current process,
* flush the VAC. Note that the kernel side was flushed
* above so we don't worry about non-CI kernel mappings.
*/
if (active_user_pmap(pmap) && !pmap_pte_ci(pte)) {
DCIU();
}
}
#endif
pa = pmap_pte_pa(pte);
#ifdef DEBUG
opte = *pte;
@ -2208,6 +2077,16 @@ pmap_remove_mapping(pmap, va, pte, flags)
pmap->pm_stats.wired_count--;
pmap->pm_stats.resident_count--;
#if defined(M68040) || defined(M68060)
#if defined(M68020) || defined(M68030)
if (mmutype == MMU_68040)
#endif
if ((flags & PRM_CFLUSH)) {
DCFP(pa);
ICPP(pa);
}
#endif
/*
* Invalidate the PTE after saving the reference modify info.
*/
@ -2303,23 +2182,6 @@ pmap_remove_mapping(pmap, va, pte, flags)
pmap_free_pv(npv);
pv = pa_to_pvh(pa);
}
#ifdef M68K_MMU_HP
/*
* If only one mapping left we no longer need to cache inhibit
*/
if (pmap_aliasmask &&
pv->pv_pmap && pv->pv_next == NULL && (pv->pv_flags & PV_CI)) {
PMAP_DPRINTF(PDB_CACHE,
("remove: clearing CI for pa %lx\n", pa));
pv->pv_flags &= ~PV_CI;
pmap_changebit(pa, 0, ~PG_CI);
#ifdef DEBUG
if ((pmapdebug & (PDB_CACHE|PDB_PVDUMP)) ==
(PDB_CACHE|PDB_PVDUMP))
pmap_pvdump(pa);
#endif
}
#endif
/*
* If this was a PT page we must also remove the
* mapping from the associated segment table.
@ -2409,7 +2271,6 @@ pmap_remove_mapping(pmap, va, pte, flags)
pv->pv_flags &= ~PV_PTPAGE;
ptpmap->pm_ptpages--;
}
/*
* Update saved attributes for managed page
*/
@ -2443,21 +2304,11 @@ pmap_testbit(pa, bit)
splx(s);
return(TRUE);
}
#ifdef M68K_MMU_HP
/*
* Flush VAC to get correct state of any hardware maintained bits.
*/
if (pmap_aliasmask && (bit & (PG_U|PG_M)))
DCIS();
#endif
/*
* Not found. Check current mappings, returning immediately if
* found. Cache a hit to speed future lookups.
*/
if (pv->pv_pmap != NULL) {
for (; pv; pv = pv->pv_next) {
pte = pmap_pte(pv->pv_pmap, pv->pv_va);
@ -2489,7 +2340,7 @@ pmap_changebit(pa, set, mask)
vaddr_t va;
char *attrp;
int s;
#if defined(M68K_MMU_HP) || defined(M68040) || defined(M68060)
#if defined(M68040) || defined(M68060)
boolean_t firstpage = TRUE;
#endif
boolean_t r;
@ -2512,7 +2363,6 @@ pmap_changebit(pa, set, mask)
* Loop over all current mappings setting/clearing as appropos
* If setting RO do we need to clear the VAC?
*/
if (pv->pv_pmap != NULL) {
#ifdef DEBUG
int toflush = 0;
@ -2523,16 +2373,6 @@ pmap_changebit(pa, set, mask)
#endif
va = pv->pv_va;
pte = pmap_pte(pv->pv_pmap, va);
#ifdef M68K_MMU_HP
/*
* Flush VAC to ensure we get correct state of HW bits
* so we don't clobber them.
*/
if (firstpage && pmap_aliasmask) {
firstpage = FALSE;
DCIS();
}
#endif
npte = (*pte | set) & mask;
if (*pte != npte) {
r = TRUE;
@ -2605,7 +2445,9 @@ pmap_enter_ptpage(pmap, va)
if (dowriteback && dokwriteback)
#endif
/* Assume segment table size <= 4096 */
pmap_changebit((paddr_t)pmap->pm_stpa, PG_CI, ~PG_CCB);
if (pmap_changebit((paddr_t)pmap->pm_stpa,
PG_CI, ~PG_CCB))
DCIS();
pmap->pm_stfree = protostfree;
}
#endif
@ -2639,7 +2481,7 @@ pmap_enter_ptpage(pmap, va)
panic("enter: out of address space"); /* XXX */
pmap->pm_stfree &= ~l2tobm(ix);
addr = (caddr_t)&pmap->pm_stab[ix*SG4_LEV2SIZE];
bzero(addr, SG4_LEV2SIZE*sizeof(st_entry_t));
memset(addr, 0, SG4_LEV2SIZE*sizeof(st_entry_t));
addr = (caddr_t)&pmap->pm_stpa[ix*SG4_LEV2SIZE];
*ste = (u_int)addr | SG_RW | SG_U | SG_V;
#if 0 /* XXX should be superfluous here: defined(M68060) */
@ -2693,7 +2535,7 @@ pmap_enter_ptpage(pmap, va)
kpt->kpt_next = kpt_used_list;
kpt_used_list = kpt;
ptpa = kpt->kpt_pa;
bzero((caddr_t)kpt->kpt_va, NBPG);
memset((caddr_t)kpt->kpt_va, 0, NBPG);
pmap_enter(pmap, va, ptpa, VM_PROT_READ | VM_PROT_WRITE,
VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);
pmap_update(pmap);
@ -2753,7 +2595,8 @@ pmap_enter_ptpage(pmap, va)
pmap == pmap_kernel() ? "Kernel" : "User",
va, ptpa, pte, *pte);
#endif
pmap_changebit(ptpa, PG_CI, ~PG_CCB);
if (pmap_changebit(ptpa, PG_CI, ~PG_CCB))
DCIS();
}
#endif
/*
@ -2789,22 +2632,29 @@ pmap_enter_ptpage(pmap, va)
* release them. We also avoid the overhead of vm_map_pageable.
*/
#if defined(M68040) || defined(M68060)
if (mmutype == MMU_68040) {
#if defined(M68020) || defined(M68030)
if (mmutype == MMU_68040)
#endif
{
st_entry_t *este;
for (este = &ste[NPTEPG/SG4_LEV3SIZE]; ste < este; ste++) {
*ste = ptpa | SG_U | SG_RW | SG_V;
ptpa += SG4_LEV3SIZE * sizeof(st_entry_t);
}
} else
}
#if defined(M68020) || defined(M68030)
else
*ste = (ptpa & SG_FRAME) | SG_RW | SG_V;
#endif
#else
*ste = (ptpa & SG_FRAME) | SG_RW | SG_V;
#endif
if (pmap != pmap_kernel()) {
PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB,
("enter: stab %p refcnt %d\n",
pmap->pm_stab, pmap->pm_sref));
}
#if 0
/*
* Flush stale TLB info.
*/
@ -2812,7 +2662,6 @@ pmap_enter_ptpage(pmap, va)
TBIAS();
else
TBIAU();
#endif
pmap->pm_ptpages++;
splx(s);
}
@ -2874,7 +2723,6 @@ pmap_pvdump(pa)
printf("\n");
}
/* static */
/*
* pmap_check_wiring:
*
@ -2882,6 +2730,7 @@ pmap_pvdump(pa)
* and ensure that it is consistent with the number of wirings
* to that page that the VM system has.
*/
/* static */
void
pmap_check_wiring(str, va)
char *str;