Only SH7750 and SH7750S require updates to memory-mapped data cache

arrays to be performed while running on P2.  Don't penalize other cpus
that can do it from P1.
This commit is contained in:
uwe 2008-03-16 19:17:53 +00:00
parent 04e0ee32b2
commit f0757531d4
1 changed files with 22 additions and 15 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: cache_sh4.c,v 1.18 2008/03/15 22:48:58 uwe Exp $ */
/* $NetBSD: cache_sh4.c,v 1.19 2008/03/16 19:17:53 uwe Exp $ */
/*-
* Copyright (c) 2002 The NetBSD Foundation, Inc.
@ -37,7 +37,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: cache_sh4.c,v 1.18 2008/03/15 22:48:58 uwe Exp $");
__KERNEL_RCSID(0, "$NetBSD: cache_sh4.c,v 1.19 2008/03/16 19:17:53 uwe Exp $");
#include "opt_cache.h"
@ -164,6 +164,16 @@ sh4_cache_config(void)
sh_cache_ops._dcache_wb_range = sh4_dcache_wb_range;
switch (cpu_product) {
case CPU_PRODUCT_7750: /* FALLTHROUGH */
case CPU_PRODUCT_7750S:
/* memory mapped d$ can only be accessed from p2 */
sh_cache_ops._dcache_wbinv_all
= (void *)SH3_P1SEG_TO_P2SEG(sh4_dcache_wbinv_all);
sh_cache_ops._dcache_wbinv_range_index
= (void *)SH3_P1SEG_TO_P2SEG(sh4_dcache_wbinv_range_index);
break;
#if !defined(SH4_CACHE_DISABLE_EMODE)
case CPU_PRODUCT_7750R:
case CPU_PRODUCT_7751R:
if (!(r & SH4_CCR_EMODE)) {
@ -174,6 +184,7 @@ sh4_cache_config(void)
sh_cache_ops._dcache_wbinv_all = sh4_emode_dcache_wbinv_all;
sh_cache_ops._dcache_wbinv_range_index = sh4_emode_dcache_wbinv_range_index;
break;
#endif
}
}
@ -225,7 +236,7 @@ sh4_icache_sync_all(void)
cache_sh4_op_8lines_32(va, SH4_CCIA, CCIA_ENTRY_MASK, CCIA_V);
va += 32 * 8;
}
RUN_P1;
PAD_P1_SWITCH;
}
void
@ -244,7 +255,7 @@ sh4_icache_sync_range(vaddr_t va, vsize_t sz)
_reg_write_4(ccia, va & CCIA_TAGADDR_MASK); /* V = 0 */
va += 32;
}
RUN_P1;
PAD_P1_SWITCH;
}
void
@ -265,7 +276,7 @@ sh4_icache_sync_range_index(vaddr_t va, vsize_t sz)
cache_sh4_op_line_32(va, SH4_CCIA, CCIA_ENTRY_MASK, CCIA_V);
va += 32;
}
RUN_P1;
PAD_P1_SWITCH;
}
void
@ -274,13 +285,13 @@ sh4_dcache_wbinv_all(void)
vaddr_t va = 0;
vaddr_t eva = SH4_DCACHE_SIZE;
RUN_P2;
/* RUN_P2; */ /* called via P2 address if necessary */
while (va < eva) {
cache_sh4_op_8lines_32(va, SH4_CCDA, CCDA_ENTRY_MASK,
(CCDA_U | CCDA_V));
va += 32 * 8;
}
RUN_P1;
PAD_P1_SWITCH;
}
void
@ -301,7 +312,7 @@ sh4_dcache_wbinv_range_index(vaddr_t va, vsize_t sz)
vaddr_t eva = round_line(va + sz);
va = trunc_line(va);
RUN_P2;
/* RUN_P2; */ /* called via P2 address if necessary */
while ((eva - va) >= (8 * 32)) {
cache_sh4_op_8lines_32(va, SH4_CCDA, CCDA_ENTRY_MASK,
(CCDA_U | CCDA_V));
@ -313,7 +324,7 @@ sh4_dcache_wbinv_range_index(vaddr_t va, vsize_t sz)
(CCDA_U | CCDA_V));
va += 32;
}
RUN_P1;
PAD_P1_SWITCH;
}
void
@ -415,7 +426,7 @@ sh4_emode_icache_sync_all(void)
CCIA_V, 13);
va += 32 * 8;
}
RUN_P1;
PAD_P1_SWITCH;
}
void
@ -438,7 +449,7 @@ sh4_emode_icache_sync_range_index(vaddr_t va, vsize_t sz)
CCIA_V, 13);
va += 32;
}
RUN_P1;
PAD_P1_SWITCH;
}
void
@ -447,13 +458,11 @@ sh4_emode_dcache_wbinv_all(void)
vaddr_t va = 0;
vaddr_t eva = SH4_EMODE_DCACHE_SIZE;
RUN_P2;
while (va < eva) {
cache_sh4_emode_op_8lines_32(va, SH4_CCDA, CCDA_ENTRY_MASK,
(CCDA_U | CCDA_V), 14);
va += 32 * 8;
}
RUN_P1;
}
void
@ -462,7 +471,6 @@ sh4_emode_dcache_wbinv_range_index(vaddr_t va, vsize_t sz)
vaddr_t eva = round_line(va + sz);
va = trunc_line(va);
RUN_P2;
while ((eva - va) >= (8 * 32)) {
cache_sh4_emode_op_8lines_32(va, SH4_CCDA, CCDA_ENTRY_MASK,
(CCDA_U | CCDA_V), 14);
@ -474,5 +482,4 @@ sh4_emode_dcache_wbinv_range_index(vaddr_t va, vsize_t sz)
(CCDA_U | CCDA_V), 14);
va += 32;
}
RUN_P1;
}