If TLB entries need to be flushed, make sure to do this after any necessary

cache flushes, since on VIPT machines the cache flush may induce a TLB load.
This commit is contained in:
pk 1999-05-20 10:03:12 +00:00
parent 4655ae1230
commit 1040b9e209
1 changed files with 67 additions and 45 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.c,v 1.144 1999/05/16 16:48:59 pk Exp $ */
/* $NetBSD: pmap.c,v 1.145 1999/05/20 10:03:12 pk Exp $ */
/*
* Copyright (c) 1996
@ -524,7 +524,7 @@ void (*pmap_rmu_p) __P((struct pmap *, vaddr_t, vaddr_t, int, int));
* Macros which implement SRMMU TLB flushing/invalidation
*/
#define tlb_flush_page(va) \
sta(((va) & ~0xfff) | ASI_SRMMUFP_L3, ASI_SRMMUFP, 0)
sta(((vaddr_t)(va) & ~0xfff) | ASI_SRMMUFP_L3, ASI_SRMMUFP, 0)
#define tlb_flush_segment(vr, vs) \
sta(((vr)<<RGSHIFT) | ((vs)<<SGSHIFT) | ASI_SRMMUFP_L2, ASI_SRMMUFP,0)
@ -628,6 +628,7 @@ setpgt4m(ptep, pte)
#endif
}
/*
* Set the page table entry for va to pte. Only affects software MMU page-
* tables (the in-core pagetables read by the MMU). Ignores TLB, and
@ -1714,11 +1715,17 @@ mmu_pagein(pm, va, prot)
#ifdef DEBUG
if (pm == pmap_kernel())
printf("mmu_pagein: kernel wants map at va 0x%x, vr %d, vs %d\n", va, vr, vs);
#endif
#if 0
#if defined(SUN4_MMU3L)
printf("mmu_pagein: pm=%p, va 0x%x, vr %d, vs %d, rp=%p, segmap=%p\n", pm, va, vr, vs, rp, rp->rg_segmap);
#endif
#endif
/* return 0 if we have no PMEGs to load */
if (rp->rg_segmap == NULL)
return (0);
#if defined(SUN4_MMU3L)
if (HASSUN4_MMU3L && rp->rg_smeg == reginval) {
smeg_t smeg;
@ -2472,8 +2479,13 @@ pv_syncflags4m(pv0)
flags |= MR4M(tpte);
if (pm->pm_ctx && (tpte & SRMMU_PG_M)) {
cache_flush_page(va); /* XXX: do we need this?*/
tlb_flush_page(va); /* paranoid? */
/* Only do this for write-back caches? */
cache_flush_page(va);
/*
* VIPT caches might use the TLB when
* flushing, so we flush the TLB again.
*/
tlb_flush_page(va);
}
/* Clear mod/ref bits from PTE and write it back */
@ -2641,6 +2653,10 @@ pv_flushcache(pv)
if (pm->pm_ctx) {
setcontext(pm->pm_ctxnum);
cache_flush_page(pv->pv_va);
#if defined(SUN4M)
if (CPU_ISSUN4M)
tlb_flush_page(pv->pv_va);
#endif
}
pv = pv->pv_next;
if (pv == NULL)
@ -4229,7 +4245,7 @@ pmap_rmk4m(pm, va, endva, vr, vs)
}
/* if we're done with a region, leave it wired */
}
#endif /* sun4m */
#endif /* SUN4M */
/*
* Just like pmap_rmk_magic, but we have a different threshold.
* Note that this may well deserve further tuning work.
@ -4459,9 +4475,6 @@ pmap_rmu4m(pm, va, endva, vr, vs)
for (; va < endva; va += NBPG) {
int tpte;
if (pm->pm_ctx)
tlb_flush_page(va);
tpte = pte0[VA_SUN4M_VPG(va)];
if ((tpte & SRMMU_TETYPE) != SRMMU_TEPTE) {
@ -4492,6 +4505,9 @@ pmap_rmu4m(pm, va, endva, vr, vs)
panic("pmap_rmu: too many PTEs in segment; "
"va 0x%lx; endva 0x%lx", va, endva);
#endif
if (pm->pm_ctx)
tlb_flush_page(va);
setpgt4m(&pte0[VA_SUN4M_VPG(va)], SRMMU_TEINVALID);
}
@ -5072,8 +5088,8 @@ pmap_protect4m(pm, sva, eva, prot)
vaddr_t sva, eva;
vm_prot_t prot;
{
int va, nva, vr, vs;
int s, ctx;
vaddr_t va, nva;
int s, ctx, vr, vs;
struct regmap *rp;
struct segmap *sp;
@ -5085,6 +5101,13 @@ pmap_protect4m(pm, sva, eva, prot)
return;
}
#ifdef DEBUG
if (pmapdebug & PDB_CHANGEPROT)
printf("pmap_protect[curpid %d, ctx %d](%lx, %lx, %x)\n",
curproc==NULL ? -1 : curproc->p_pid,
pm->pm_ctx ? pm->pm_ctxnum : -1, sva, eva, prot);
#endif
write_user_windows();
ctx = getcontext4m();
s = splpmap();
@ -5116,7 +5139,9 @@ pmap_protect4m(pm, sva, eva, prot)
if (sp->sg_pte == NULL)
panic("pmap_protect: no pages");
#endif
/* pages loaded: take away write bits from MMU PTEs */
/*
* pages loaded: take away write bits from MMU PTEs
*/
if (pm->pm_ctx)
setcontext4m(pm->pm_ctxnum);
@ -5124,11 +5149,6 @@ pmap_protect4m(pm, sva, eva, prot)
for (; va < nva; va += NBPG) {
int tpte;
if (pm->pm_ctx) {
/* Flush TLB entry */
tlb_flush_page(va);
}
tpte = sp->sg_pte[VA_SUN4M_VPG(va)];
/*
* Flush cache so that any existing cache
@ -5140,6 +5160,8 @@ pmap_protect4m(pm, sva, eva, prot)
pmap_stats.ps_npg_prot_actual++;
if (pm->pm_ctx) {
cache_flush_page(va);
/* Flush TLB entry */
tlb_flush_page(va);
}
setpgt4m(&sp->sg_pte[VA_SUN4M_VPG(va)],
tpte & ~PPROT_WRITE);
@ -5188,14 +5210,7 @@ pmap_changeprot4m(pm, va, prot, wired)
rp = &pm->pm_regmap[VA_VREG(va)];
sp = &rp->rg_segmap[VA_VSEG(va)];
ctx = getcontext4m();
if (pm->pm_ctx) {
/* Flush TLB entry */
setcontext4m(pm->pm_ctxnum);
tlb_flush_page(va);
}
pte = sp->sg_pte[VA_SUN4M_VPG(va)];
if ((pte & SRMMU_PROT_MASK) == newprot) {
/* only wiring changed, and we ignore wiring */
pmap_stats.ps_useless_changeprots++;
@ -5208,18 +5223,24 @@ pmap_changeprot4m(pm, va, prot, wired)
* Flush cache if page has been referenced to
* avoid stale protection bits in the cache tags.
*/
ctx = getcontext4m();
setcontext4m(pm->pm_ctxnum);
if ((pte & (SRMMU_PG_C|SRMMU_PGTYPE)) ==
(SRMMU_PG_C|PG_SUN4M_OBMEM))
cache_flush_page(va);
tlb_flush_page(va);
setcontext4m(ctx);
}
setpgt4m(&sp->sg_pte[VA_SUN4M_VPG(va)],
(pte & ~SRMMU_PROT_MASK) | newprot);
out:
setcontext4m(ctx);
splx(s);
}
#endif /* 4m */
#endif /* SUN4M */
/*
* Insert (MI) physical page pa at virtual address va in the given pmap.
@ -5473,6 +5494,7 @@ printf("pmap_enter: segment filled during sleep\n"); /* can this happen? */
rp->rg_nsegmap = 0;
for (i = NSEGRG; --i >= 0;)
sp++->sg_pmeg = seginval;
#if defined(SUN4_MMU3L)
/*
* XXX - preallocate the region MMU cookies.
@ -5640,7 +5662,10 @@ pmap_enter4m(pm, va, pa, prot, wired, access_type)
#ifdef DEBUG
if (pmapdebug & PDB_ENTER)
printf("pmap_enter(%p, 0x%lx, 0x%lx, 0x%x, 0x%x)\n",
printf("pmap_enter[curpid %d, ctx %d]"
"(%p, 0x%lx, 0x%lx, 0x%x, 0x%x)\n",
curproc==NULL ? -1 : curproc->p_pid,
pm->pm_ctx==NULL ? -1 : pm->pm_ctxnum,
pm, va, pa, prot, wired);
#endif
@ -5928,7 +5953,7 @@ printf("%s[%d]: pmap_enu: changing existing va 0x%x: pte 0x%x=>0x%x\n",
splx(s);
}
#endif /* sun4m */
#endif /* SUN4M */
/*
* Change the wiring attribute for a map/virtual-address pair.
@ -6045,7 +6070,7 @@ pmap_extract4m(pm, va)
if ((rm = pm->pm_regmap) == NULL) {
#ifdef DEBUG
if (pmapdebug & PDB_FOLLOW)
printf("pmap_extract: no regmap entry");
printf("pmap_extract: no regmap entry\n");
#endif
return (0);
}
@ -6054,7 +6079,7 @@ pmap_extract4m(pm, va)
if ((sm = rm->rg_segmap) == NULL) {
#ifdef DEBUG
if (pmapdebug & PDB_FOLLOW)
panic("pmap_extract: no segmap");
printf("pmap_extract: no segmap\n");
#endif
return (0);
}
@ -6063,7 +6088,7 @@ pmap_extract4m(pm, va)
if (sm->sg_pte == NULL) {
#ifdef DEBUG
if (pmapdebug & PDB_FOLLOW)
panic("pmap_extract: no ptes");
printf("pmap_extract: no ptes\n");
#endif
return (0);
}
@ -6410,7 +6435,7 @@ pmap_copy_page4_4c(src, dst)
setpte4(sva, spte);
setpte4(dva, dpte);
qcopy(sva, dva, NBPG); /* loads cache, so we must ... */
cache_flush_page((int)sva);
cache_flush_page((vaddr_t)sva);
setpte4(sva, 0);
setpte4(dva, 0);
}
@ -6423,6 +6448,7 @@ pmap_copy_page4_4c(src, dst)
* We avoid stomping on the cache.
* XXX might be faster to use destination's context and allow cache to fill?
*/
int xxxdebug = 0;
void
pmap_zero_page4m(pa)
paddr_t pa;
@ -6439,18 +6465,15 @@ pmap_zero_page4m(pa)
if (CACHEINFO.c_vactype != VAC_NONE)
pv_flushcache(pvhead(pa));
}
pte = (SRMMU_TEPTE | PPROT_S | PPROT_WRITE |
(atop(pa) << SRMMU_PPNSHIFT));
pte = SRMMU_TEPTE | PPROT_N_RWX | (atop(pa) << SRMMU_PPNSHIFT);
if (cpuinfo.flags & CPUFLG_CACHE_MANDATORY)
pte |= SRMMU_PG_C;
else
pte &= ~SRMMU_PG_C;
va = vpage[0];
setpgt4m(vpage_pte[0], pte);
qzero(va, NBPG);
/* Remove temporary mapping */
tlb_flush_page((int)va);
tlb_flush_page(va);
setpgt4m(vpage_pte[0], SRMMU_TEINVALID);
}
@ -6474,7 +6497,8 @@ pmap_copy_page4m(src, dst)
if (CACHEINFO.c_vactype == VAC_WRITEBACK)
pv_flushcache(pvhead(src));
}
spte = SRMMU_TEPTE | SRMMU_PG_C | PPROT_S |
spte = SRMMU_TEPTE | SRMMU_PG_C | PPROT_N_RX |
(atop(src) << SRMMU_PPNSHIFT);
if (managed(dst)) {
@ -6482,22 +6506,20 @@ pmap_copy_page4m(src, dst)
if (CACHEINFO.c_vactype != VAC_NONE)
pv_flushcache(pvhead(dst));
}
dpte = (SRMMU_TEPTE | PPROT_S | PPROT_WRITE |
(atop(dst) << SRMMU_PPNSHIFT));
dpte = SRMMU_TEPTE | PPROT_N_RWX | (atop(dst) << SRMMU_PPNSHIFT);
if (cpuinfo.flags & CPUFLG_CACHE_MANDATORY)
dpte |= SRMMU_PG_C;
else
dpte &= ~SRMMU_PG_C;
sva = vpage[0];
dva = vpage[1];
setpgt4m(vpage_pte[0], spte);
setpgt4m(vpage_pte[1], dpte);
qcopy(sva, dva, NBPG); /* loads cache, so we must ... */
cache_flush_page((int)sva);
tlb_flush_page((int)sva);
cache_flush_page((vaddr_t)sva);
tlb_flush_page(sva);
setpgt4m(vpage_pte[0], SRMMU_TEINVALID);
tlb_flush_page((int)dva);
tlb_flush_page(dva);
setpgt4m(vpage_pte[1], SRMMU_TEINVALID);
}
#endif /* SUN4M */