Use pmap_remove_all() to flush the cache by context and set a flag to

avoid subsequent user space cache flushes by page or segment in pmap_remove().
This commit is contained in:
pk 2003-08-12 15:13:11 +00:00
parent 46fc66f7d8
commit fe30a2e6c1
2 changed files with 40 additions and 11 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.h,v 1.68 2003/05/10 21:10:40 thorpej Exp $ */
/* $NetBSD: pmap.h,v 1.69 2003/08/12 15:13:13 pk Exp $ */
/*
* Copyright (c) 1996
@ -158,6 +158,8 @@ struct pmap {
int pm_gap_end; /* no valid mapping until here */
struct pmap_statistics pm_stats; /* pmap statistics */
u_int pm_flags;
#define PMAP_USERCACHECLEAN 1
};
struct regmap {
@ -265,18 +267,16 @@ void pmap_reference __P((pmap_t));
void pmap_remove __P((pmap_t, vaddr_t, vaddr_t));
#define pmap_update(pmap) /* nothing (yet) */
void pmap_virtual_space __P((vaddr_t *, vaddr_t *));
#ifdef PMAP_GROWKERNEL
vaddr_t pmap_growkernel __P((vaddr_t));
#endif
void pmap_redzone __P((void));
void kvm_uncache __P((caddr_t, int));
struct user;
int mmu_pagein __P((struct pmap *pm, vaddr_t, int));
void pmap_writetext __P((unsigned char *, int));
void pmap_globalize_boot_cpuinfo __P((struct cpu_info *));
static __inline void
pmap_remove_all(struct pmap *pmap)
{
/* Nothing. */
}
void pmap_remove_all(struct pmap *pm);
/* SUN4/SUN4C SPECIFIC DECLARATIONS */

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.c,v 1.263 2003/07/15 00:05:08 lukem Exp $ */
/* $NetBSD: pmap.c,v 1.264 2003/08/12 15:13:11 pk Exp $ */
/*
* Copyright (c) 1996
@ -56,7 +56,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.263 2003/07/15 00:05:08 lukem Exp $");
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.264 2003/08/12 15:13:11 pk Exp $");
#include "opt_ddb.h"
#include "opt_kgdb.h"
@ -4268,6 +4268,29 @@ static void pgt_lvl23_remove4m(struct pmap *pm, struct regmap *rp,
}
#endif /* SUN4M || SUN4D */
void pmap_remove_all(struct pmap *pm)
{
if (pm->pm_ctx == NULL)
return;
#if defined(SUN4) || defined(SUN4C)
if (CPU_ISSUN4 || CPU_ISSUN4C) {
int ctx = getcontext4();
setcontext4(pm->pm_ctxnum);
cache_flush_context(pm->pm_ctxnum);
setcontext4(ctx);
}
#endif
#if defined(SUN4M) || defined(SUN4D)
if (CPU_HAS_SRMMU) {
cache_flush_context(pm->pm_ctxnum);
}
#endif
pm->pm_flags |= PMAP_USERCACHECLEAN;
}
/*
* Remove the given range of mapping entries.
* The starting and ending addresses are already rounded to pages.
@ -4572,7 +4595,9 @@ pmap_rmu4_4c(pm, va, endva, vr, vs)
/* process has a context, must flush cache */
npg = (endva - va) >> PGSHIFT;
setcontext4(pm->pm_ctxnum);
if (npg > PMAP_SFL_THRESHOLD) {
if ((pm->pm_flags & PMAP_USERCACHECLEAN) != 0)
perpage = 0;
else if (npg > PMAP_SFL_THRESHOLD) {
perpage = 0; /* flush the whole segment */
cache_flush_segment(vr, vs, pm->pm_ctxnum);
} else
@ -4675,7 +4700,7 @@ pmap_rmu4m(pm, va, endva, vr, vs)
/*
* Invalidate PTE in MMU pagetables. Flush cache if necessary.
*/
if (pm->pm_ctx) {
if (pm->pm_ctx && (pm->pm_flags & PMAP_USERCACHECLEAN) == 0) {
/* process has a context, must flush cache */
if (CACHEINFO.c_vactype != VAC_NONE) {
npg = (endva - va) >> PGSHIFT;
@ -5563,6 +5588,8 @@ pmap_enu4_4c(pm, va, prot, flags, pg, pteproto)
struct regmap *rp;
struct segmap *sp;
pm->pm_flags &= ~PMAP_USERCACHECLEAN;
vr = VA_VREG(va);
vs = VA_VSEG(va);
rp = &pm->pm_regmap[vr];
@ -6154,6 +6181,8 @@ pmap_enu4m(pm, va, prot, flags, pg, pteproto)
panic("pmap_enu4m: can't enter va 0x%lx above KERNBASE", va);
#endif
pm->pm_flags &= ~PMAP_USERCACHECLEAN;
vr = VA_VREG(va);
vs = VA_VSEG(va);
rp = &pm->pm_regmap[vr];