Re-arrange code to flush physically indexed caches:

* replace `flush by line' function with a `flush by page' funtion, which
  also takes an argument to indicate that write-back caches need not
  validate its backing memory.

* use this function when allocating page table memory to flush the cache
  before mapping it into kernel virtual space.

* also use it in pmap_{zero,copy}_page(), so we can safely use non-cacheable
  access in there.
This commit is contained in:
pk 2000-06-05 20:38:24 +00:00
parent 95c4357e1b
commit 862a955e34
5 changed files with 207 additions and 137 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: cache.c,v 1.50 2000/05/22 22:03:32 pk Exp $ */
/* $NetBSD: cache.c,v 1.51 2000/06/05 20:38:24 pk Exp $ */
/*
* Copyright (c) 1996
@ -82,7 +82,7 @@ int cache_alias_bits;
void
sun4_cache_enable()
{
register u_int i, lim, ls, ts;
u_int i, lim, ls, ts;
cache_alias_bits = CPU_ISSUN4
? CACHE_ALIAS_BITS_SUN4
@ -335,8 +335,8 @@ turbosparc_cache_enable()
void
sun4_vcache_flush_context()
{
register char *p;
register int i, ls;
char *p;
int i, ls;
cachestats.cs_ncxflush++;
p = (char *)0; /* addresses 0..cacheinfo.c_totalsize will do fine */
@ -365,10 +365,10 @@ sun4_vcache_flush_context()
*/
void
sun4_vcache_flush_region(vreg)
register int vreg;
int vreg;
{
register int i, ls;
register char *p;
int i, ls;
char *p;
cachestats.cs_nrgflush++;
p = (char *)VRTOVA(vreg); /* reg..reg+sz rather than 0..sz */
@ -389,10 +389,10 @@ sun4_vcache_flush_region(vreg)
*/
void
sun4_vcache_flush_segment(vreg, vseg)
register int vreg, vseg;
int vreg, vseg;
{
register int i, ls;
register char *p;
int i, ls;
char *p;
cachestats.cs_nsgflush++;
p = (char *)VSTOVA(vreg, vseg); /* seg..seg+sz rather than 0..sz */
@ -418,8 +418,8 @@ void
sun4_vcache_flush_page(va)
int va;
{
register int i, ls;
register char *p;
int i, ls;
char *p;
#ifdef DEBUG
if (va & PGOFSET)
@ -451,10 +451,10 @@ sun4_vcache_flush_page(va)
void
sun4_cache_flush(base, len)
caddr_t base;
register u_int len;
u_int len;
{
register int i, ls, baseoff;
register char *p;
int i, ls, baseoff;
char *p;
if (CACHEINFO.c_vactype == VAC_NONE)
return;
@ -528,8 +528,8 @@ sun4_cache_flush(base, len)
void
srmmu_vcache_flush_context()
{
register char *p;
register int i, ls;
char *p;
int i, ls;
cachestats.cs_ncxflush++;
p = (char *)0; /* addresses 0..cacheinfo.c_totalsize will do fine */
@ -548,10 +548,10 @@ srmmu_vcache_flush_context()
*/
void
srmmu_vcache_flush_region(vreg)
register int vreg;
int vreg;
{
register int i, ls;
register char *p;
int i, ls;
char *p;
cachestats.cs_nrgflush++;
p = (char *)VRTOVA(vreg); /* reg..reg+sz rather than 0..sz */
@ -572,10 +572,10 @@ srmmu_vcache_flush_region(vreg)
*/
void
srmmu_vcache_flush_segment(vreg, vseg)
register int vreg, vseg;
int vreg, vseg;
{
register int i, ls;
register char *p;
int i, ls;
char *p;
cachestats.cs_nsgflush++;
p = (char *)VSTOVA(vreg, vseg); /* seg..seg+sz rather than 0..sz */
@ -594,8 +594,8 @@ void
srmmu_vcache_flush_page(va)
int va;
{
register int i, ls;
register char *p;
int i, ls;
char *p;
#ifdef DEBUG
if (va & PGOFSET)
@ -726,6 +726,17 @@ ms1_cache_flush(base, len)
*
* Note: we don't bother to compare the actual tags
* since that would require looking up physical addresses.
*
* The format of the tags we read from ASI_DCACHE control
* space is:
*
* 31 27 26 11 10 1 0
* +--------+----------------+------------+-+
* | xxx | PA[26-11] | xxx |V|
* +--------+----------------+------------+-+
*
* PA: bits 11-26 of the physical address
* V: line valid bit
*/
int tagaddr = ((u_int)base & 0x7f0);
@ -786,7 +797,7 @@ cypress_cache_flush_all()
void
viking_cache_flush(base, len)
caddr_t base;
register u_int len;
u_int len;
{
/*
* Although physically tagged, we still need to flush the
@ -797,73 +808,102 @@ viking_cache_flush(base, len)
}
void
viking_pcache_flush_line(va, pa)
int va;
int pa;
viking_pcache_flush_page(pa, invalidate_only)
paddr_t pa;
int invalidate_only;
{
/*
* Flush cache line corresponding to virtual address `va'
* which is mapped at physical address `pa'.
*/
extern char etext[];
static char *base;
int i;
char *v;
int set, i;
/*
* Construct a virtual address that hits the same cache line
* as PA, then read from 2*ASSOCIATIVITY-1 different physical
* locations (all different from PA).
* The viking's on-chip data cache is 4-way set associative,
* consisting of 128 sets, each holding 4 lines of 32 bytes.
* Note that one 4096 byte page exactly covers all 128 sets
* in the cache.
*/
if (invalidate_only) {
u_int pa_tag = (pa >> 12);
u_int tagaddr;
u_int64_t tag;
#if 0
if (base == 0) {
cshift = CACHEINFO.ic_l2linesize;
csize = CACHEINFO.ic_nlines << cshift;
cmask = csize - 1;
base = (char *)roundup((int)etext, csize);
/*
* Loop over all sets and invalidate all entries tagged
* with the given physical address by resetting the cache
* tag in ASI_DCACHETAG control space.
*
* The address format for accessing a tag is:
*
* 31 30 27 26 11 5 4 3 2 0
* +------+-----+------+-------//--------+--------+----+-----+
* | type | xxx | line | xxx | set | xx | 0 |
* +------+-----+------+-------//--------+--------+----+-----+
*
* set: the cache set tag to be read (0-127)
* line: the line within the set (0-3)
* type: 1: read set tag; 2: read physical tag
*
* The (type 2) tag read from this address is a 64-bit word
* formatted as follows:
*
* 5 4 4
* 63 6 8 0 23 0
* +-------+-+-------+-+-------+-+-----------+----------------+
* | xxx |V| xxx |D| xxx |S| xxx | PA[35-12] |
* +-------+-+-------+-+-------+-+-----------+----------------+
*
* PA: bits 12-35 of the physical address
* S: line shared bit
* D: line dirty bit
* V: line valid bit
*/
#define VIKING_DCACHETAG_S 0000010000000000UL /* line valid bit */
#define VIKING_DCACHETAG_D 0001000000000000UL /* line dirty bit */
#define VIKING_DCACHETAG_V 0100000000000000UL /* line shared bit */
for (set = 0; set < 128; set++) {
/* Set set number and access type */
tagaddr = (set << 5) | (2 << 30);
/* Examine the tag for each line in the set */
for (i = 0 ; i < 4; i++) {
tag = ldda(tagaddr | (i << 26), ASI_DCACHETAG);
/*
* If this is a valid tag and the PA field
* matches clear the tag.
*/
if ((tag & 0x000fffff) == pa_tag &&
(tag & VIKING_DCACHETAG_V) != 0)
stda(tagaddr | (i << 26),
ASI_DCACHETAG, 0);
}
}
} else {
extern char kernel_text[];
/*
* Force the cache to validate its backing memory
* by displacing all cache lines with known read-only
* content from the start of kernel text.
*
* Note that this thrashes the entire cache. However,
* we currently only need to call upon this code
* once at boot time.
*/
for (set = 0; set < 128; set++) {
int *v = (int *)(kernel_text + (set << 5));
/*
* We need to read (2*associativity-1) different
* locations to be sure to displace the entire set.
*/
i = 2 * 4 - 1;
while (i--) {
(*(volatile int *)v);
v += 4096;
}
}
}
v = base + (((va & cmask) >> cshift) << cshift);
i = CACHEINFO.dc_associativity * 2 - 1;
while (i--) {
(*(volatile int *)v);
v += csize;
}
#else
#define cshift 5 /* CACHEINFO.ic_l2linesize */
#define csize (128 << cshift) /* CACHEINFO.ic_nlines << cshift */
#define cmask (csize - 1)
#define cass 4 /* CACHEINFO.dc_associativity */
if (base == 0)
base = (char *)roundup((unsigned int)etext, csize);
v = base + (((pa & cmask) >> cshift) << cshift);
i = 2 * cass - 1;
while (i--) {
(*(volatile int *)v);
v += csize;
}
#undef cass
#undef cmask
#undef csize
#undef cshift
#endif
}
void
srmmu_pcache_flush_line(va, pa)
int va;
int pa;
{
/*
* Flush cache line corresponding to virtual address `va'
* which is mapped at physical address `pa'.
*/
sta(va, ASI_IDCACHELFP, 0);
}
#endif /* SUN4M */

View File

@ -1,4 +1,4 @@
/* $NetBSD: cache.h,v 1.20 1998/10/09 10:48:14 pk Exp $ */
/* $NetBSD: cache.h,v 1.21 2000/06/05 20:38:24 pk Exp $ */
/*
* Copyright (c) 1996
@ -171,7 +171,7 @@ void srmmu_cache_flush __P((caddr_t, u_int));/* flush region */
void ms1_cache_flush __P((caddr_t, u_int));
void viking_cache_flush __P((caddr_t, u_int));
void viking_pcache_flush_line __P((int, int));
void viking_pcache_flush_page __P((paddr_t, int));
void srmmu_pcache_flush_line __P((int, int));
void hypersparc_pure_vcache_flush __P((void));
@ -192,7 +192,7 @@ extern void sparc_noop __P((void));
(void (*)__P((int))) sparc_noop
#define noop_cache_flush \
(void (*)__P((caddr_t, u_int))) sparc_noop
#define noop_pcache_flush_line \
#define noop_pcache_flush_page \
(void (*)__P((int, int))) sparc_noop
#define noop_pure_vcache_flush \
(void (*)__P((void))) sparc_noop
@ -215,6 +215,8 @@ void smp_cache_flush __P((caddr_t, u_int)); /* flush region */
#define cache_flush_region(vr) cpuinfo.vcache_flush_region(vr)
#define cache_flush_context() cpuinfo.vcache_flush_context()
#define pcache_flush_page(pa,flag) cpuinfo.pcache_flush_page(pa,flag)
/*
* Cache control information.
*/

View File

@ -1,4 +1,4 @@
/* $NetBSD: cpu.c,v 1.103 2000/06/03 09:56:35 pk Exp $ */
/* $NetBSD: cpu.c,v 1.104 2000/06/05 20:38:25 pk Exp $ */
/*
* Copyright (c) 1996
@ -671,7 +671,7 @@ struct module_info module_sun4 = {
sun4_vcache_flush_segment,
sun4_vcache_flush_region,
sun4_vcache_flush_context,
noop_pcache_flush_line,
noop_pcache_flush_page,
noop_pure_vcache_flush,
noop_cache_flush_all,
0,
@ -799,7 +799,7 @@ struct module_info module_sun4c = {
sun4_vcache_flush_segment,
sun4_vcache_flush_region,
sun4_vcache_flush_context,
noop_pcache_flush_line,
noop_pcache_flush_page,
noop_pure_vcache_flush,
noop_cache_flush_all,
0,
@ -999,7 +999,7 @@ struct module_info module_ms1 = {
noop_vcache_flush_segment,
noop_vcache_flush_region,
noop_vcache_flush_context,
noop_pcache_flush_line,
noop_pcache_flush_page,
noop_pure_vcache_flush,
ms1_cache_flush_all,
memerr4m,
@ -1029,7 +1029,7 @@ struct module_info module_ms2 = { /* UNTESTED */
srmmu_vcache_flush_segment,
srmmu_vcache_flush_region,
srmmu_vcache_flush_context,
noop_pcache_flush_line,
noop_pcache_flush_page,
noop_pure_vcache_flush,
srmmu_cache_flush_all,
memerr4m,
@ -1054,7 +1054,7 @@ struct module_info module_swift = {
srmmu_vcache_flush_segment,
srmmu_vcache_flush_region,
srmmu_vcache_flush_context,
srmmu_pcache_flush_line,
noop_pcache_flush_page,
noop_pure_vcache_flush,
srmmu_cache_flush_all,
memerr4m,
@ -1095,7 +1095,7 @@ struct module_info module_viking = {
noop_vcache_flush_segment,
noop_vcache_flush_region,
noop_vcache_flush_context,
viking_pcache_flush_line,
viking_pcache_flush_page,
noop_pure_vcache_flush,
noop_cache_flush_all,
viking_memerr,
@ -1180,7 +1180,7 @@ struct module_info module_hypersparc = {
srmmu_vcache_flush_segment,
srmmu_vcache_flush_region,
srmmu_vcache_flush_context,
srmmu_pcache_flush_line,
noop_pcache_flush_page,
hypersparc_pure_vcache_flush,
hypersparc_cache_flush_all,
hypersparc_memerr,
@ -1230,7 +1230,7 @@ struct module_info module_cypress = {
srmmu_vcache_flush_segment,
srmmu_vcache_flush_region,
srmmu_vcache_flush_context,
srmmu_pcache_flush_line,
noop_pcache_flush_page,
noop_pure_vcache_flush,
cypress_cache_flush_all,
memerr4m,
@ -1255,7 +1255,7 @@ struct module_info module_turbosparc = {
srmmu_vcache_flush_segment,
srmmu_vcache_flush_region,
srmmu_vcache_flush_context,
srmmu_pcache_flush_line,
noop_pcache_flush_page,
noop_pure_vcache_flush,
srmmu_cache_flush_all,
memerr4m,
@ -1295,7 +1295,7 @@ cpumatch_turbosparc(sc, mp, node)
sc->sp_vcache_flush_segment = 0;
sc->sp_vcache_flush_region = 0;
sc->sp_vcache_flush_context = 0;
sc->pcache_flush_line = 0;
sc->pcache_flush_page = 0;
}
void
@ -1480,7 +1480,7 @@ getcpuinfo(sc, node)
MPCOPY(sp_vcache_flush_segment);
MPCOPY(sp_vcache_flush_region);
MPCOPY(sp_vcache_flush_context);
MPCOPY(pcache_flush_line);
MPCOPY(pcache_flush_page);
MPCOPY(pure_vcache_flush);
MPCOPY(cache_flush_all);
MPCOPY(memerr);

View File

@ -1,4 +1,4 @@
/* $NetBSD: cpuvar.h,v 1.26 2000/05/31 05:28:28 thorpej Exp $ */
/* $NetBSD: cpuvar.h,v 1.27 2000/06/05 20:38:25 pk Exp $ */
/*
* Copyright (c) 1996 The NetBSD Foundation, Inc.
@ -78,7 +78,7 @@ struct module_info {
void (*sp_vcache_flush_segment)__P((int, int));
void (*sp_vcache_flush_region)__P((int));
void (*sp_vcache_flush_context)__P((void));
void (*pcache_flush_line)__P((int, int));
void (*pcache_flush_page)__P((int, int));
void (*pure_vcache_flush)__P((void));
void (*cache_flush_all)__P((void));
void (*memerr)__P((unsigned, u_int, u_int, struct trapframe *));
@ -280,7 +280,7 @@ struct cpu_info {
void (*vcache_flush_context)__P((void));
void (*sp_vcache_flush_context)__P((void));
void (*pcache_flush_line)__P((int, int));
void (*pcache_flush_page)__P((int, int));
void (*pure_vcache_flush)__P((void));
void (*cache_flush_all)__P((void));

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.c,v 1.166 2000/06/02 10:43:59 pk Exp $ */
/* $NetBSD: pmap.c,v 1.167 2000/06/05 20:38:26 pk Exp $ */
/*
* Copyright (c) 1996
@ -570,11 +570,6 @@ setpgt4m(ptep, pte)
int pte;
{
swap(ptep, pte);
#if 0
/* XXX - uncaching in pgt_page_alloc() below is not yet quite Okay */
if (cpuinfo.cpu_type == CPUTYP_SS1_MBUS_NOMXCC)
cpuinfo.pcache_flush_line((int)ptep, VA2PA((caddr_t)ptep));
#endif
}
/* Set the page table entry for va to pte. */
@ -613,18 +608,6 @@ setpte4m(va, pte)
setpgt4m(sm->sg_pte + VA_SUN4M_VPG(va), pte);
}
void pcache_flush __P((caddr_t, caddr_t, int));
void
pcache_flush(va, pa, n)
caddr_t va, pa;
int n;
{
void (*f)__P((int,int)) = cpuinfo.pcache_flush_line;
while ((n -= 4) >= 0)
(*f)((u_int)va+n, (u_int)pa+n);
}
/*
* Page table pool back-end.
*/
@ -634,16 +617,38 @@ pgt_page_alloc(sz, flags, mtype)
int flags;
int mtype;
{
caddr_t p;
int cacheit = (cpuinfo.flags & CPUFLG_CACHEPAGETABLES) != 0;
struct vm_page *pg;
vaddr_t va;
paddr_t pa;
p = (caddr_t)uvm_km_kmemalloc(kernel_map, uvm.kernel_object,
(vsize_t)sz, UVM_KMF_NOWAIT);
/* Allocate a page of physical memory */
if ((pg = uvm_pagealloc(NULL, 0, NULL, 0)) == NULL)
return (NULL);
if ((cpuinfo.flags & CPUFLG_CACHEPAGETABLES) == 0) {
pcache_flush(p, (caddr_t)VA2PA(p), sz);
kvm_uncache(p, sz/NBPG);
/* Allocate virtual memory */
va = uvm_km_valloc(kernel_map, PAGE_SIZE);
if (va == 0) {
uvm_pagefree(pg);
return (NULL);
}
return (p);
/*
* On systems with a physical data cache we need to flush this page
* from the cache if the pagetables cannot be cached.
* On systems with a virtually indexed data cache, we only need
* to map it non-cacheable, since the page is not currently mapped.
*/
pa = VM_PAGE_TO_PHYS(pg);
if (cacheit == 0)
pcache_flush_page(pa, 1);
/* Map the page */
pmap_enter(pmap_kernel(), va, pa | (cacheit ? 0 : PMAP_NC),
VM_PROT_READ|VM_PROT_WRITE,
VM_PROT_READ|VM_PROT_WRITE|PMAP_WIRED);
return ((void *)va);
}
void
@ -3066,6 +3071,7 @@ pmap_bootstrap4m(void)
int reg, seg;
unsigned int ctxtblsize;
caddr_t pagetables_start, pagetables_end;
paddr_t pagetables_start_pa;
extern char end[];
extern char etext[];
extern caddr_t reserve_dumppages(caddr_t);
@ -3153,7 +3159,7 @@ pmap_bootstrap4m(void)
get_phys_mem();
/* Allocate physical memory for pv_table[] */
p += pv_table_map((paddr_t)p - KERNBASE, 0);
p += pv_table_map((paddr_t)(p - KERNBASE), 0);
/*
* Reserve memory for MMU pagetables. Some of these have severe
@ -3162,6 +3168,7 @@ pmap_bootstrap4m(void)
*/
pagetables_start = p;
pagetables_start_pa = (paddr_t)(p - KERNBASE);
/*
* Allocate context table.
@ -3169,7 +3176,7 @@ pmap_bootstrap4m(void)
*/
ctxtblsize = max(ncontext,1024) * sizeof(int);
cpuinfo.ctx_tbl = (int *)roundup((u_int)p, ctxtblsize);
cpuinfo.ctx_tbl_pa = (paddr_t)cpuinfo.ctx_tbl - KERNBASE;
cpuinfo.ctx_tbl_pa = (paddr_t)(cpuinfo.ctx_tbl - KERNBASE);
p = (caddr_t)((u_int)cpuinfo.ctx_tbl + ctxtblsize);
/*
@ -3197,7 +3204,7 @@ pmap_bootstrap4m(void)
/* Round to next page and mark end of pre-wired kernel space */
p = (caddr_t)(((u_int)p + NBPG - 1) & ~PGOFSET);
pagetables_end = p;
avail_start = (paddr_t)p - KERNBASE;
avail_start = (paddr_t)(p - KERNBASE);
/*
* Now wire the region and segment tables of the kernel map.
@ -3342,8 +3349,22 @@ pmap_bootstrap4m(void)
* Flush it now, and don't touch it again until we
* switch to our own tables (will be done immediately below).
*/
pcache_flush(pagetables_start, (caddr_t)VA2PA(pagetables_start),
pagetables_end - pagetables_start);
int size = pagetables_end - pagetables_start;
if (CACHEINFO.c_vactype != VAC_NONE) {
int va = (vaddr_t)pagetables_start;
while (size != 0) {
cache_flush_page(va);
va += NBPG;
size -= NBPG;
}
} else if (cpuinfo.pcache_flush_page != NULL) {
int pa = pagetables_start_pa;
while (size != 0) {
pcache_flush_page(pa, 0);
pa += NBPG;
size -= NBPG;
}
}
}
/*
@ -6396,12 +6417,17 @@ pmap_zero_page4m(pa)
if (((pa & (PMAP_TNC_SRMMU & ~PMAP_NC)) == 0) && managed(pa)) {
/*
* The following might not be necessary since the page
* is being cleared because it is about to be allocated,
* The following VAC flush might not be necessary since the
* page is being cleared because it is about to be allocated,
* i.e., is in use by no one.
* In the case of a physical cache, a flush (or just an
* invalidate, if possible) is usually necessary when using
* uncached access to clear it.
*/
if (CACHEINFO.c_vactype != VAC_NONE)
pv_flushcache(pvhead(pa));
else
pcache_flush_page(pa, 1);
}
pte = SRMMU_TEPTE | PPROT_N_RWX | (atop(pa) << SRMMU_PPNSHIFT);
if (cpuinfo.flags & CPUFLG_CACHE_MANDATORY)
@ -6507,6 +6533,8 @@ pmap_copy_page4m(src, dst)
/* similar `might not be necessary' comment applies */
if (CACHEINFO.c_vactype != VAC_NONE)
pv_flushcache(pvhead(dst));
else
pcache_flush_page(dst, 1);
}
dpte = SRMMU_TEPTE | PPROT_N_RWX | (atop(dst) << SRMMU_PPNSHIFT);