Switch the ARM9 to using the Dcache in write-back mode. Avoid an

unknown problem with dcache_inv_range by using a wbinv for now
(similarly for ARM10).

When setting the ARM9 system control register, use the computed
cpuctrlmask value (not 0xffffffff) so that the clocking-mode bits are
not reset to FastBus mode (which isn't very fast).
This commit is contained in:
rearnsha 2004-01-26 15:54:16 +00:00
parent f678f5ebbb
commit 8e61df4a12
3 changed files with 198 additions and 71 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: cpufunc.c,v 1.65 2003/11/05 12:53:15 scw Exp $ */
/* $NetBSD: cpufunc.c,v 1.66 2004/01/26 15:54:16 rearnsha Exp $ */
/*
* arm7tdmi support code Copyright (c) 2001 John Fremlin
@ -46,7 +46,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: cpufunc.c,v 1.65 2003/11/05 12:53:15 scw Exp $");
__KERNEL_RCSID(0, "$NetBSD: cpufunc.c,v 1.66 2004/01/26 15:54:16 rearnsha Exp $");
#include "opt_compat_netbsd.h"
#include "opt_cpuoptions.h"
@ -425,17 +425,16 @@ struct cpu_functions arm9_cpufuncs = {
/* Cache operations */
arm9_cache_syncI, /* icache_sync_all */
arm9_cache_syncI_rng, /* icache_sync_range */
arm9_icache_sync_all, /* icache_sync_all */
arm9_icache_sync_range, /* icache_sync_range */
/* ...cache in write-though mode... */
arm9_cache_flushD, /* dcache_wbinv_all */
arm9_cache_flushD_rng, /* dcache_wbinv_range */
arm9_cache_flushD_rng, /* dcache_inv_range */
(void *)cpufunc_nullop, /* dcache_wb_range */
arm9_dcache_wbinv_all, /* dcache_wbinv_all */
arm9_dcache_wbinv_range, /* dcache_wbinv_range */
/*XXX*/ arm9_dcache_wbinv_range, /* dcache_inv_range */
arm9_dcache_wb_range, /* dcache_wb_range */
arm9_cache_flushID, /* idcache_wbinv_all */
arm9_cache_flushID_rng, /* idcache_wbinv_range */
arm9_idcache_wbinv_all, /* idcache_wbinv_all */
arm9_idcache_wbinv_range, /* idcache_wbinv_range */
/* Other functions */
@ -489,7 +488,7 @@ struct cpu_functions arm10_cpufuncs = {
arm10_dcache_wbinv_all, /* dcache_wbinv_all */
arm10_dcache_wbinv_range, /* dcache_wbinv_range */
arm10_dcache_inv_range, /* dcache_inv_range */
/*XXX*/ arm10_dcache_wbinv_range, /* dcache_inv_range */
arm10_dcache_wb_range, /* dcache_wb_range */
arm10_idcache_wbinv_all, /* idcache_wbinv_all */
@ -972,7 +971,13 @@ set_cpufuncs()
cpufuncs = arm9_cpufuncs;
cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */
get_cachetype_cp15();
pmap_pte_init_arm9();
arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
arm9_dcache_sets_max =
(1U << (arm_dcache_l2_linesize + arm_dcache_l2_nsets)) -
arm9_dcache_sets_inc;
arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
pmap_pte_init_generic();
return 0;
}
#endif /* CPU_ARM9 */
@ -1848,8 +1853,8 @@ arm9_setup(args)
| CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
| CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
| CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
| CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
| CPU_CONTROL_CPCLK;
| CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
| CPU_CONTROL_ROUNDROBIN;
#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
@ -1866,7 +1871,7 @@ arm9_setup(args)
/* Set the control register */
curcpu()->ci_ctrl = cpuctrl;
cpu_control(0xffffffff, cpuctrl);
cpu_control(cpuctrlmask, cpuctrl);
}
#endif /* CPU_ARM9 */
@ -1891,7 +1896,7 @@ arm10_setup(args)
int cpuctrl, cpuctrlmask;
cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
| CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
| CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
| CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
| CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE

View File

@ -1,7 +1,7 @@
/* $NetBSD: cpufunc_asm_arm9.S,v 1.2 2002/01/29 15:27:29 rearnsha Exp $ */
/* $NetBSD: cpufunc_asm_arm9.S,v 1.3 2004/01/26 15:54:16 rearnsha Exp $ */
/*
* Copyright (c) 2001 ARM Limited
* Copyright (c) 2001, 2004 ARM Limited
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -41,12 +41,9 @@
* addresses that are about to change.
*/
ENTRY(arm9_setttb)
/*
* Since we use the caches in write-through mode, we only have to
* drain the write buffers and flush the caches.
*/
mcr p15, 0, r0, c7, c7, 0 /* flush I+D caches */
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
stmfd sp!, {r0, lr}
bl _C_LABEL(arm9_idcache_wbinv_all)
ldmfd sp!, {r0, lr}
mcr p15, 0, r0, c2, c0, 0 /* load new TTB */
@ -62,54 +59,156 @@ ENTRY(arm9_tlb_flushID_SE)
mov pc, lr
/*
* Cache functions
* Cache operations. For the entire cache we use the set/index
* operations.
*/
ENTRY(arm9_cache_flushID)
mcr p15, 0, r0, c7, c7, 0 /* flush I+D cache */
s_max .req r0
i_max .req r1
s_inc .req r2
i_inc .req r3
ENTRY_NP(arm9_icache_sync_range)
ldr ip, .Larm9_line_size
cmp r1, #0x4000
bcs .Larm9_icache_sync_all
ldr ip, [ip]
sub r3, ip, #1
and r2, r0, r3
add r1, r1, r2
bic r0, r0, r3
.Larm9_sync_next:
mcr p15, 0, r0, c7, c5, 1 /* Invalidate I cache SE with VA */
mcr p15, 0, r0, c7, c10, 1 /* Clean D cache SE with VA */
add r0, r0, ip
subs r1, r1, ip
bpl .Larm9_sync_next
mov pc, lr
ENTRY(arm9_cache_flushID_SE)
mcr p15, 0, r0, c7, c5, 1 /* flush one entry from I cache */
mcr p15, 0, r0, c7, c6, 1 /* flush one entry from D cache */
ENTRY_NP(arm9_icache_sync_all)
.Larm9_icache_sync_all:
/*
* We assume that the code here can never be out of sync with the
* dcache, so that we can safely flush the Icache and fall through
* into the Dcache cleaning code.
*/
mcr p15, 0, r0, c7, c5, 0 /* Flush I cache */
/* Fall through to clean Dcache. */
.Larm9_dcache_wb:
ldr ip, .Larm9_cache_data
ldmia ip, {s_max, i_max, s_inc, i_inc}
.Lnext_set:
orr ip, s_max, i_max
.Lnext_index:
mcr p15, 0, ip, c7, c10, 2 /* Clean D cache SE with Set/Index */
sub ip, ip, i_inc
tst ip, i_max /* Index 0 is last one */
bne .Lnext_index /* Next index */
mcr p15, 0, ip, c7, c10, 2 /* Clean D cache SE with Set/Index */
subs s_max, s_max, s_inc
bpl .Lnext_set /* Next set */
mov pc, lr
ENTRY(arm9_cache_flushI)
mcr p15, 0, r0, c7, c5, 0 /* flush I cache */
mov pc, lr
.Larm9_line_size:
.word _C_LABEL(arm_pdcache_line_size)
ENTRY(arm9_cache_flushI_SE)
mcr p15, 0, r0, c7, c5, 1 /* flush one entry from I cache */
ENTRY(arm9_dcache_wb_range)
ldr ip, .Larm9_line_size
cmp r1, #0x4000
bcs .Larm9_dcache_wb
ldr ip, [ip]
sub r3, ip, #1
and r2, r0, r3
add r1, r1, r2
bic r0, r0, r3
.Larm9_wb_next:
mcr p15, 0, r0, c7, c10, 1 /* Clean D cache SE with VA */
add r0, r0, ip
subs r1, r1, ip
bpl .Larm9_wb_next
mov pc, lr
ENTRY(arm9_cache_flushD)
mcr p15, 0, r0, c7, c6, 0 /* flush D cache */
ENTRY(arm9_dcache_wbinv_range)
ldr ip, .Larm9_line_size
cmp r1, #0x4000
bcs .Larm9_dcache_wbinv_all
ldr ip, [ip]
sub r3, ip, #1
and r2, r0, r3
add r1, r1, r2
bic r0, r0, r3
.Larm9_wbinv_next:
mcr p15, 0, r0, c7, c14, 1 /* Purge D cache SE with VA */
add r0, r0, ip
subs r1, r1, ip
bpl .Larm9_wbinv_next
mov pc, lr
ENTRY(arm9_cache_flushD_SE)
mcr p15, 0, r0, c7, c6, 1 /* flush one entry from D cache */
mov pc, lr
ENTRY(arm9_cache_cleanID)
mcr p15, 0, r0, c7, c10, 4
mov pc, lr
/*
* Soft functions
* Note, we must not invalidate everything. If the range is too big we
* must use wb-inv of the entire cache.
*/
ENTRY(arm9_cache_syncI)
mcr p15, 0, r0, c7, c7, 0 /* flush I+D caches */
ENTRY(arm9_dcache_inv_range)
ldr ip, .Larm9_line_size
cmp r1, #0x4000
bcs .Larm9_dcache_wbinv_all
ldr ip, [ip]
sub r3, ip, #1
and r2, r0, r3
add r1, r1, r2
bic r0, r0, r3
.Larm9_inv_next:
mcr p15, 0, r0, c7, c6, 1 /* Invalidate D cache SE with VA */
add r0, r0, ip
subs r1, r1, ip
bpl .Larm9_inv_next
mov pc, lr
ENTRY_NP(arm9_cache_flushID_rng)
b _C_LABEL(arm9_cache_flushID)
ENTRY(arm9_idcache_wbinv_range)
ldr ip, .Larm9_line_size
cmp r1, #0x4000
bcs .Larm9_idcache_wbinv_all
ldr ip, [ip]
sub r3, ip, #1
and r2, r0, r3
add r1, r1, r2
bic r0, r0, r3
.Larm9_id_wbinv_next:
mcr p15, 0, r0, c7, c5, 1 /* Invalidate I cache SE with VA */
mcr p15, 0, r0, c7, c14, 1 /* Purge D cache SE with VA */
add r0, r0, ip
subs r1, r1, ip
bpl .Larm9_id_wbinv_next
mov pc, lr
ENTRY_NP(arm9_cache_flushD_rng)
/* Same as above, but D cache only */
b _C_LABEL(arm9_cache_flushD)
ENTRY_NP(arm9_idcache_wbinv_all)
.Larm9_idcache_wbinv_all:
/*
* We assume that the code here can never be out of sync with the
* dcache, so that we can safely flush the Icache and fall through
* into the Dcache purging code.
*/
mcr p15, 0, r0, c7, c5, 0 /* Flush I cache */
/* Fall through to purge Dcache. */
ENTRY_NP(arm9_cache_syncI_rng)
/* Similarly, for I cache sync */
b _C_LABEL(arm9_cache_syncI)
ENTRY(arm9_dcache_wbinv_all)
.Larm9_dcache_wbinv_all:
ldr ip, .Larm9_cache_data
ldmia ip, {s_max, i_max, s_inc, i_inc}
.Lnext_set_inv:
orr ip, s_max, i_max
.Lnext_index_inv:
mcr p15, 0, ip, c7, c14, 2 /* Purge D cache SE with Set/Index */
sub ip, ip, i_inc
tst ip, i_max /* Index 0 is last one */
bne .Lnext_index_inv /* Next index */
mcr p15, 0, ip, c7, c14, 2 /* Purge D cache SE with Set/Index */
subs s_max, s_max, s_inc
bpl .Lnext_set_inv /* Next set */
mov pc, lr
.Larm9_cache_data:
.word _C_LABEL(arm9_dcache_sets_max)
/*
* Context switch.
@ -134,3 +233,24 @@ ENTRY(arm9_context_switch)
nop
nop
mov pc, lr
.bss
/* XXX The following macros should probably be moved to asm.h */
#define _DATA_OBJECT(x) .globl x; .type x,_ASM_TYPE_OBJECT; x:
#define C_OBJECT(x) _DATA_OBJECT(_C_LABEL(x))
/*
* Parameters for the cache cleaning code. Note that the order of these
* four variables is assumed in the code above. Hence the reason for
* declaring them in the assembler file.
*/
.align 0
C_OBJECT(arm9_dcache_sets_max)
.space 4
C_OBJECT(arm9_dcache_index_max)
.space 4
C_OBJECT(arm9_dcache_sets_inc)
.space 4
C_OBJECT(arm9_dcache_index_inc)
.space 4

View File

@ -1,4 +1,4 @@
/* $NetBSD: cpufunc.h,v 1.29 2003/09/06 09:08:35 rearnsha Exp $ */
/* $NetBSD: cpufunc.h,v 1.30 2004/01/26 15:54:16 rearnsha Exp $ */
/*
* Copyright (c) 1997 Mark Brinicombe.
@ -313,23 +313,25 @@ void arm9_setttb __P((u_int));
void arm9_tlb_flushID_SE __P((u_int va));
void arm9_cache_flushID __P((void));
void arm9_cache_flushID_SE __P((u_int));
void arm9_cache_flushI __P((void));
void arm9_cache_flushI_SE __P((u_int));
void arm9_cache_flushD __P((void));
void arm9_cache_flushD_SE __P((u_int));
void arm9_icache_sync_all __P((void));
void arm9_icache_sync_range __P((vaddr_t, vsize_t));
void arm9_cache_cleanID __P((void));
void arm9_dcache_wbinv_all __P((void));
void arm9_dcache_wbinv_range __P((vaddr_t, vsize_t));
void arm9_dcache_inv_range __P((vaddr_t, vsize_t));
void arm9_dcache_wb_range __P((vaddr_t, vsize_t));
void arm9_cache_syncI __P((void));
void arm9_cache_flushID_rng __P((vaddr_t, vsize_t));
void arm9_cache_flushD_rng __P((vaddr_t, vsize_t));
void arm9_cache_syncI_rng __P((vaddr_t, vsize_t));
void arm9_idcache_wbinv_all __P((void));
void arm9_idcache_wbinv_range __P((vaddr_t, vsize_t));
void arm9_context_switch __P((void));
void arm9_setup __P((char *string));
extern unsigned arm9_dcache_sets_max;
extern unsigned arm9_dcache_sets_inc;
extern unsigned arm9_dcache_index_max;
extern unsigned arm9_dcache_index_inc;
#endif
#ifdef CPU_ARM10