Very basic support for the ARM1136.

This code takes no advantage of any 'new' features provided by
architecture 6 devices (such as physically tagged caches or new
MMU features), and basically runs the chip in a 'legacy v5' mode.
This commit is contained in:
rearnsha 2005-06-03 15:55:55 +00:00
parent a53d9d9c2a
commit 80a3b6d023
9 changed files with 568 additions and 241 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: bus_space_asm_generic.S,v 1.3 2003/03/27 19:46:14 mycroft Exp $ */
/* $NetBSD: bus_space_asm_generic.S,v 1.4 2005/06/03 15:55:55 rearnsha Exp $ */
/*
* Copyright (c) 1997 Causality Limited.
@ -49,7 +49,7 @@ ENTRY(generic_bs_r_1)
ldrb r0, [r1, r2]
mov pc, lr
#if (ARM_ARCH_4 + ARM_ARCH_5) > 0
#if (ARM_ARCH_4 + ARM_ARCH_5 + ARM_ARCH_6) > 0
ENTRY(generic_armv4_bs_r_2)
ldrh r0, [r1, r2]
mov pc, lr
@ -67,7 +67,7 @@ ENTRY(generic_bs_w_1)
strb r3, [r1, r2]
mov pc, lr
#if (ARM_ARCH_4 + ARM_ARCH_5) > 0
#if (ARM_ARCH_4 + ARM_ARCH_5 + ARM_ARCH_6) > 0
ENTRY(generic_armv4_bs_w_2)
strh r3, [r1, r2]
mov pc, lr
@ -95,7 +95,7 @@ ENTRY(generic_bs_rm_1)
mov pc, lr
#if (ARM_ARCH_4 + ARM_ARCH_5) > 0
#if (ARM_ARCH_4 + ARM_ARCH_5 + ARM_ARCH_6) > 0
ENTRY(generic_armv4_bs_rm_2)
add r0, r1, r2
mov r1, r3
@ -143,7 +143,7 @@ ENTRY(generic_bs_wm_1)
mov pc, lr
#if (ARM_ARCH_4 + ARM_ARCH_5) > 0
#if (ARM_ARCH_4 + ARM_ARCH_5 + ARM_ARCH_6) > 0
ENTRY(generic_armv4_bs_wm_2)
add r0, r1, r2
mov r1, r3
@ -191,7 +191,7 @@ ENTRY(generic_bs_rr_1)
mov pc, lr
#if (ARM_ARCH_4 + ARM_ARCH_5) > 0
#if (ARM_ARCH_4 + ARM_ARCH_5 + ARM_ARCH_6) > 0
ENTRY(generic_armv4_bs_rr_2)
add r0, r1, r2
mov r1, r3
@ -239,7 +239,7 @@ ENTRY(generic_bs_wr_1)
mov pc, lr
#if (ARM_ARCH_4 + ARM_ARCH_5) > 0
#if (ARM_ARCH_4 + ARM_ARCH_5 + ARM_ARCH_6) > 0
ENTRY(generic_armv4_bs_wr_2)
add r0, r1, r2
mov r1, r3
@ -286,7 +286,7 @@ ENTRY(generic_bs_sr_1)
mov pc, lr
#if (ARM_ARCH_4 + ARM_ARCH_5) > 0
#if (ARM_ARCH_4 + ARM_ARCH_5 + ARM_ARCH_6) > 0
ENTRY(generic_armv4_bs_sr_2)
add r0, r1, r2
mov r1, r3
@ -318,7 +318,7 @@ ENTRY(generic_bs_sr_4)
* copy region
*/
#if (ARM_ARCH_4 + ARM_ARCH_5) > 0
#if (ARM_ARCH_4 + ARM_ARCH_5 + ARM_ARCH_6) > 0
ENTRY(generic_armv4_bs_c_2)
add r0, r1, r2
ldr r2, [sp, #0]

View File

@ -1,4 +1,4 @@
/* $NetBSD: cpufunc.c,v 1.70 2005/06/02 17:45:59 he Exp $ */
/* $NetBSD: cpufunc.c,v 1.71 2005/06/03 15:55:55 rearnsha Exp $ */
/*
* arm7tdmi support code Copyright (c) 2001 John Fremlin
@ -46,7 +46,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: cpufunc.c,v 1.70 2005/06/02 17:45:59 he Exp $");
__KERNEL_RCSID(0, "$NetBSD: cpufunc.c,v 1.71 2005/06/03 15:55:55 rearnsha Exp $");
#include "opt_compat_netbsd.h"
#include "opt_cpuoptions.h"
@ -483,16 +483,16 @@ struct cpu_functions arm10_cpufuncs = {
/* Cache operations */
arm10_icache_sync_all, /* icache_sync_all */
arm10_icache_sync_range, /* icache_sync_range */
armv5_icache_sync_all, /* icache_sync_all */
armv5_icache_sync_range, /* icache_sync_range */
arm10_dcache_wbinv_all, /* dcache_wbinv_all */
arm10_dcache_wbinv_range, /* dcache_wbinv_range */
/*XXX*/ arm10_dcache_wbinv_range, /* dcache_inv_range */
arm10_dcache_wb_range, /* dcache_wb_range */
armv5_dcache_wbinv_all, /* dcache_wbinv_all */
armv5_dcache_wbinv_range, /* dcache_wbinv_range */
/*XXX*/ armv5_dcache_wbinv_range, /* dcache_inv_range */
armv5_dcache_wb_range, /* dcache_wb_range */
arm10_idcache_wbinv_all, /* idcache_wbinv_all */
arm10_idcache_wbinv_range, /* idcache_wbinv_range */
armv5_idcache_wbinv_all, /* idcache_wbinv_all */
armv5_idcache_wbinv_range, /* idcache_wbinv_range */
/* Other functions */
@ -515,6 +515,64 @@ struct cpu_functions arm10_cpufuncs = {
};
#endif /* CPU_ARM10 */
#ifdef CPU_ARM11
struct cpu_functions arm11_cpufuncs = {
/* CPU functions */
cpufunc_id, /* id */
cpufunc_nullop, /* cpwait */
/* MMU functions */
cpufunc_control, /* control */
cpufunc_domains, /* Domain */
arm11_setttb, /* Setttb */
cpufunc_faultstatus, /* Faultstatus */
cpufunc_faultaddress, /* Faultaddress */
/* TLB functions */
arm11_tlb_flushID, /* tlb_flushID */
arm11_tlb_flushID_SE, /* tlb_flushID_SE */
arm11_tlb_flushI, /* tlb_flushI */
arm11_tlb_flushI_SE, /* tlb_flushI_SE */
arm11_tlb_flushD, /* tlb_flushD */
arm11_tlb_flushD_SE, /* tlb_flushD_SE */
/* Cache operations */
armv5_icache_sync_all, /* icache_sync_all */
armv5_icache_sync_range, /* icache_sync_range */
armv5_dcache_wbinv_all, /* dcache_wbinv_all */
armv5_dcache_wbinv_range, /* dcache_wbinv_range */
/*XXX*/ armv5_dcache_wbinv_range, /* dcache_inv_range */
armv5_dcache_wb_range, /* dcache_wb_range */
armv5_idcache_wbinv_all, /* idcache_wbinv_all */
armv5_idcache_wbinv_range, /* idcache_wbinv_range */
/* Other functions */
cpufunc_nullop, /* flush_prefetchbuf */
arm11_drain_writebuf, /* drain_writebuf */
cpufunc_nullop, /* flush_brnchtgt_C */
(void *)cpufunc_nullop, /* flush_brnchtgt_E */
(void *)cpufunc_nullop, /* sleep */
/* Soft functions */
cpufunc_null_fixup, /* dataabt_fixup */
cpufunc_null_fixup, /* prefetchabt_fixup */
arm11_context_switch, /* context_switch */
arm11_setup /* cpu setup */
};
#endif /* CPU_ARM10 || CPU_ARM11 */
#ifdef CPU_SA110
struct cpu_functions sa110_cpufuncs = {
/* CPU functions */
@ -754,7 +812,7 @@ u_int cputype;
u_int cpu_reset_needs_v4_MMU_disable; /* flag used in locore.s */
#if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined(CPU_ARM9) || \
defined (CPU_ARM10) || \
defined (CPU_ARM10) || defined (CPU_ARM11) || \
defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425)
static void get_cachetype_cp15 __P((void));
@ -996,16 +1054,32 @@ set_cpufuncs()
cpufuncs = arm10_cpufuncs;
cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */
get_cachetype_cp15();
arm10_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
arm10_dcache_sets_max =
armv5_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
armv5_dcache_sets_max =
(1U << (arm_dcache_l2_linesize + arm_dcache_l2_nsets)) -
arm10_dcache_sets_inc;
arm10_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
arm10_dcache_index_max = 0U - arm10_dcache_index_inc;
armv5_dcache_sets_inc;
armv5_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
armv5_dcache_index_max = 0U - armv5_dcache_index_inc;
pmap_pte_init_generic();
return 0;
}
#endif /* CPU_ARM10 */
#ifdef CPU_ARM11
if (cputype == CPU_ID_ARM1136JS ||
cputype == CPU_ID_ARM1136JSR1) {
cpufuncs = arm11_cpufuncs;
cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */
get_cachetype_cp15();
armv5_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
armv5_dcache_sets_max =
(1U << (arm_dcache_l2_linesize + arm_dcache_l2_nsets)) -
armv5_dcache_sets_inc;
armv5_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
armv5_dcache_index_max = 0U - armv5_dcache_index_inc;
pmap_pte_init_generic();
return 0;
}
#endif /* CPU_ARM11 */
#ifdef CPU_SA110
if (cputype == CPU_ID_SA110) {
cpufuncs = sa110_cpufuncs;
@ -1543,7 +1617,8 @@ late_abort_fixup(arg)
defined(CPU_ARM8) || defined (CPU_ARM9) || defined(CPU_SA110) || \
defined(CPU_SA1100) || defined(CPU_SA1110) || \
defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425)
defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
defined(CPU_ARM10) || defined(CPU_ARM11)
#define IGN 0
#define OR 1
@ -1938,6 +2013,56 @@ arm10_setup(args)
}
#endif /* CPU_ARM10 */
#ifdef CPU_ARM11
struct cpu_option arm11_options[] = {
{ "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
{ "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
{ "arm11.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
{ "arm11.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
{ "arm11.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
{ NULL, IGN, IGN, 0 }
};
void
arm11_setup(args)
char *args;
{
int cpuctrl, cpuctrlmask;
cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
| CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
/* | CPU_CONTROL_BPRD_ENABLE */;
cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
| CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
| CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BPRD_ENABLE
| CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
| CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
#endif
cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl);
#ifdef __ARMEB__
cpuctrl |= CPU_CONTROL_BEND_ENABLE;
#endif
/* Clear out the cache */
cpu_idcache_wbinv_all();
/* Now really make sure they are clean. */
asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
/* Set the control register */
curcpu()->ci_ctrl = cpuctrl;
cpu_control(0xffffffff, cpuctrl);
/* And again. */
cpu_idcache_wbinv_all();
}
#endif /* CPU_ARM11 */
#ifdef CPU_SA110
struct cpu_option sa110_options[] = {
#ifdef COMPAT_12

View File

@ -1,4 +1,4 @@
/* $NetBSD: cpufunc_asm_arm10.S,v 1.3 2005/06/02 14:40:05 rearnsha Exp $ */
/* $NetBSD: cpufunc_asm_arm10.S,v 1.4 2005/06/03 15:55:55 rearnsha Exp $ */
/*
* Copyright (c) 2002 ARM Limited
@ -42,7 +42,7 @@
*/
ENTRY(arm10_setttb)
stmfd sp!, {r0, lr}
bl _C_LABEL(arm10_idcache_wbinv_all)
bl _C_LABEL(armv5_idcache_wbinv_all)
ldmfd sp!, {r0, lr}
mcr p15, 0, r0, c2, c0, 0 /* load new TTB */
@ -63,170 +63,6 @@ ENTRY(arm10_tlb_flushI_SE)
RET
/*
* Cache operations. For the entire cache we use the set/index
* operations.
*/
s_max .req r0
i_max .req r1
s_inc .req r2
i_inc .req r3
ENTRY_NP(arm10_icache_sync_range)
ldr ip, .Larm10_line_size
cmp r1, #0x4000
bcs .Larm10_icache_sync_all
ldr ip, [ip]
sub r3, ip, #1
and r2, r0, r3
add r1, r1, r2
sub r1, r1, #1
bic r0, r0, r3
.Larm10_sync_next:
mcr p15, 0, r0, c7, c5, 1 /* Invalidate I cache SE with VA */
mcr p15, 0, r0, c7, c10, 1 /* Clean D cache SE with VA */
add r0, r0, ip
subs r1, r1, ip
bpl .Larm10_sync_next
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
ENTRY_NP(arm10_icache_sync_all)
.Larm10_icache_sync_all:
/*
* We assume that the code here can never be out of sync with the
* dcache, so that we can safely flush the Icache and fall through
* into the Dcache cleaning code.
*/
mcr p15, 0, r0, c7, c5, 0 /* Flush I cache */
/* Fall through to clean Dcache. */
.Larm10_dcache_wb:
ldr ip, .Larm10_cache_data
ldmia ip, {s_max, i_max, s_inc, i_inc}
.Lnext_set:
orr ip, s_max, i_max
.Lnext_index:
mcr p15, 0, ip, c7, c10, 2 /* Clean D cache SE with Set/Index */
sub ip, ip, i_inc
tst ip, i_max /* Index 0 is last one */
bne .Lnext_index /* Next index */
mcr p15, 0, ip, c7, c10, 2 /* Clean D cache SE with Set/Index */
subs s_max, s_max, s_inc
bpl .Lnext_set /* Next set */
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
.Larm10_line_size:
.word _C_LABEL(arm_pdcache_line_size)
ENTRY(arm10_dcache_wb_range)
ldr ip, .Larm10_line_size
cmp r1, #0x4000
bcs .Larm10_dcache_wb
ldr ip, [ip]
sub r3, ip, #1
and r2, r0, r3
add r1, r1, r2
sub r1, r1, #1
bic r0, r0, r3
.Larm10_wb_next:
mcr p15, 0, r0, c7, c10, 1 /* Clean D cache SE with VA */
add r0, r0, ip
subs r1, r1, ip
bpl .Larm10_wb_next
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
ENTRY(arm10_dcache_wbinv_range)
ldr ip, .Larm10_line_size
cmp r1, #0x4000
bcs .Larm10_dcache_wbinv_all
ldr ip, [ip]
sub r3, ip, #1
and r2, r0, r3
add r1, r1, r2
sub r1, r1, #1
bic r0, r0, r3
.Larm10_wbinv_next:
mcr p15, 0, r0, c7, c14, 1 /* Purge D cache SE with VA */
add r0, r0, ip
subs r1, r1, ip
bpl .Larm10_wbinv_next
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
/*
* Note, we must not invalidate everything. If the range is too big we
* must use wb-inv of the entire cache.
*/
ENTRY(arm10_dcache_inv_range)
ldr ip, .Larm10_line_size
cmp r1, #0x4000
bcs .Larm10_dcache_wbinv_all
ldr ip, [ip]
sub r3, ip, #1
and r2, r0, r3
add r1, r1, r2
sub r1, r1, #1
bic r0, r0, r3
.Larm10_inv_next:
mcr p15, 0, r0, c7, c6, 1 /* Invalidate D cache SE with VA */
add r0, r0, ip
subs r1, r1, ip
bpl .Larm10_inv_next
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
ENTRY(arm10_idcache_wbinv_range)
ldr ip, .Larm10_line_size
cmp r1, #0x4000
bcs .Larm10_idcache_wbinv_all
ldr ip, [ip]
sub r3, ip, #1
and r2, r0, r3
add r1, r1, r2
sub r1, r1, #1
bic r0, r0, r3
.Larm10_id_wbinv_next:
mcr p15, 0, r0, c7, c5, 1 /* Invalidate I cache SE with VA */
mcr p15, 0, r0, c7, c14, 1 /* Purge D cache SE with VA */
add r0, r0, ip
subs r1, r1, ip
bpl .Larm10_id_wbinv_next
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
ENTRY_NP(arm10_idcache_wbinv_all)
.Larm10_idcache_wbinv_all:
/*
* We assume that the code here can never be out of sync with the
* dcache, so that we can safely flush the Icache and fall through
* into the Dcache purging code.
*/
mcr p15, 0, r0, c7, c5, 0 /* Flush I cache */
/* Fall through to purge Dcache. */
ENTRY(arm10_dcache_wbinv_all)
.Larm10_dcache_wbinv_all:
ldr ip, .Larm10_cache_data
ldmia ip, {s_max, i_max, s_inc, i_inc}
.Lnext_set_inv:
orr ip, s_max, i_max
.Lnext_index_inv:
mcr p15, 0, ip, c7, c14, 2 /* Purge D cache SE with Set/Index */
sub ip, ip, i_inc
tst ip, i_max /* Index 0 is last one */
bne .Lnext_index_inv /* Next index */
mcr p15, 0, ip, c7, c14, 2 /* Purge D cache SE with Set/Index */
subs s_max, s_max, s_inc
bpl .Lnext_set_inv /* Next set */
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
.Larm10_cache_data:
.word _C_LABEL(arm10_dcache_sets_max)
/*
* Context switch.
*
@ -250,24 +86,3 @@ ENTRY(arm10_context_switch)
nop
nop
RET
.bss
/* XXX The following macros should probably be moved to asm.h */
#define _DATA_OBJECT(x) .globl x; .type x,_ASM_TYPE_OBJECT; x:
#define C_OBJECT(x) _DATA_OBJECT(_C_LABEL(x))
/*
* Parameters for the cache cleaning code. Note that the order of these
* four variables is assumed in the code above. Hence the reason for
* declaring them in the assembler file.
*/
.align 0
C_OBJECT(arm10_dcache_sets_max)
.space 4
C_OBJECT(arm10_dcache_index_max)
.space 4
C_OBJECT(arm10_dcache_sets_inc)
.space 4
C_OBJECT(arm10_dcache_index_inc)
.space 4

View File

@ -0,0 +1,124 @@
/* $NetBSD: cpufunc_asm_arm11.S,v 1.1 2005/06/03 15:55:55 rearnsha Exp $ */
/*
* Copyright (c) 2002, 2005 ARM Limited
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* ARM11 assembly functions for CPU / MMU / TLB specific operations
*
* XXX We make no attempt at present to take advantage of the v6 memroy
* architecture or physically tagged cache.
*/
#include <machine/cpu.h>
#include <machine/asm.h>
/*
* Functions to set the MMU Translation Table Base register
*
* We need to clean and flush the cache as it uses virtual
* addresses that are about to change.
*/
ENTRY(arm11_setttb)
stmfd sp!, {r0, lr}
bl _C_LABEL(armv5_idcache_wbinv_all)
ldmfd sp!, {r0, lr}
mcr p15, 0, r0, c2, c0, 0 /* load new TTB */
mcr p15, 0, r0, c8, c7, 0 /* invalidate I+D TLBs */
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
RET
/*
* TLB functions
*/
ENTRY(arm11_tlb_flushID_SE)
mcr p15, 0, r0, c8, c6, 1 /* flush D tlb single entry */
mcr p15, 0, r0, c8, c5, 1 /* flush I tlb single entry */
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
RET
ENTRY(arm11_tlb_flushI_SE)
mcr p15, 0, r0, c8, c5, 1 /* flush I tlb single entry */
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
RET
/*
* Context switch.
*
* These is the CPU-specific parts of the context switcher cpu_switch()
* These functions actually perform the TTB reload.
*
* NOTE: Special calling convention
* r1, r4-r13 must be preserved
*/
ENTRY(arm11_context_switch)
/*
* We can assume that the caches will only contain kernel addresses
* at this point. So no need to flush them again.
*/
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
mcr p15, 0, r0, c2, c0, 0 /* set the new TTB */
mcr p15, 0, r0, c8, c7, 0 /* and flush the I+D tlbs */
/* Paranoia -- make sure the pipeline is empty. */
nop
nop
nop
RET
/*
* TLB functions
*/
ENTRY(arm11_tlb_flushID)
mcr p15, 0, r0, c8, c7, 0 /* flush I+D tlb */
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
mov pc, lr
ENTRY(arm11_tlb_flushI)
mcr p15, 0, r0, c8, c5, 0 /* flush I tlb */
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
mov pc, lr
ENTRY(arm11_tlb_flushD)
mcr p15, 0, r0, c8, c6, 0 /* flush D tlb */
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
mov pc, lr
ENTRY(arm11_tlb_flushD_SE)
mcr p15, 0, r0, c8, c6, 1 /* flush D tlb single entry */
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
mov pc, lr
/*
* Other functions
*/
ENTRY(arm11_drain_writebuf)
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
mov pc, lr

View File

@ -0,0 +1,222 @@
/* $NetBSD: cpufunc_asm_armv5.S,v 1.1 2005/06/03 15:55:55 rearnsha Exp $ */
/*
* Copyright (c) 2002, 2005 ARM Limited
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* ARMv5 assembly functions for manipulating caches.
* These routines can be used by any core that supports the set/index
* operations.
*/
#include <machine/cpu.h>
#include <machine/asm.h>
/*
* Cache operations. For the entire cache we use the set/index
* operations.
*/
s_max .req r0
i_max .req r1
s_inc .req r2
i_inc .req r3
ENTRY_NP(armv5_icache_sync_range)
ldr ip, .Larmv5_line_size
cmp r1, #0x4000
bcs .Larmv5_icache_sync_all
ldr ip, [ip]
sub r1, r1, #1 /* Don't overrun */
sub r3, ip, #1
and r2, r0, r3
add r1, r1, r2
bic r0, r0, r3
1:
mcr p15, 0, r0, c7, c5, 1 /* Invalidate I cache SE with VA */
mcr p15, 0, r0, c7, c10, 1 /* Clean D cache SE with VA */
add r0, r0, ip
subs r1, r1, ip
bpl 1b
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
ENTRY_NP(armv5_icache_sync_all)
.Larmv5_icache_sync_all:
/*
* We assume that the code here can never be out of sync with the
* dcache, so that we can safely flush the Icache and fall through
* into the Dcache cleaning code.
*/
mcr p15, 0, r0, c7, c5, 0 /* Flush I cache */
/* Fall through to clean Dcache. */
.Larmv5_dcache_wb:
ldr ip, .Larmv5_cache_data
ldmia ip, {s_max, i_max, s_inc, i_inc}
1:
orr ip, s_max, i_max
2:
mcr p15, 0, ip, c7, c10, 2 /* Clean D cache SE with Set/Index */
sub ip, ip, i_inc
tst ip, i_max /* Index 0 is last one */
bne 2b /* Next index */
mcr p15, 0, ip, c7, c10, 2 /* Clean D cache SE with Set/Index */
subs s_max, s_max, s_inc
bpl 1b /* Next set */
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
.Larmv5_line_size:
.word _C_LABEL(arm_pdcache_line_size)
ENTRY(armv5_dcache_wb_range)
ldr ip, .Larmv5_line_size
cmp r1, #0x4000
bcs .Larmv5_dcache_wb
ldr ip, [ip]
sub r1, r1, #1 /* Don't overrun */
sub r3, ip, #1
and r2, r0, r3
add r1, r1, r2
bic r0, r0, r3
1:
mcr p15, 0, r0, c7, c10, 1 /* Clean D cache SE with VA */
add r0, r0, ip
subs r1, r1, ip
bpl 1b
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
ENTRY(armv5_dcache_wbinv_range)
ldr ip, .Larmv5_line_size
cmp r1, #0x4000
bcs .Larmv5_dcache_wbinv_all
ldr ip, [ip]
sub r1, r1, #1 /* Don't overrun */
sub r3, ip, #1
and r2, r0, r3
add r1, r1, r2
bic r0, r0, r3
1:
mcr p15, 0, r0, c7, c14, 1 /* Purge D cache SE with VA */
add r0, r0, ip
subs r1, r1, ip
bpl 1b
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
/*
* Note, we must not invalidate everything. If the range is too big we
* must use wb-inv of the entire cache.
*/
ENTRY(armv5_dcache_inv_range)
ldr ip, .Larmv5_line_size
cmp r1, #0x4000
bcs .Larmv5_dcache_wbinv_all
ldr ip, [ip]
sub r1, r1, #1 /* Don't overrun */
sub r3, ip, #1
and r2, r0, r3
add r1, r1, r2
bic r0, r0, r3
1:
mcr p15, 0, r0, c7, c6, 1 /* Invalidate D cache SE with VA */
add r0, r0, ip
subs r1, r1, ip
bpl 1b
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
ENTRY(armv5_idcache_wbinv_range)
ldr ip, .Larmv5_line_size
cmp r1, #0x4000
bcs .Larmv5_idcache_wbinv_all
ldr ip, [ip]
sub r1, r1, #1 /* Don't overrun */
sub r3, ip, #1
and r2, r0, r3
add r1, r1, r2
bic r0, r0, r3
1:
mcr p15, 0, r0, c7, c5, 1 /* Invalidate I cache SE with VA */
mcr p15, 0, r0, c7, c14, 1 /* Purge D cache SE with VA */
add r0, r0, ip
subs r1, r1, ip
bpl 1b
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
ENTRY_NP(armv5_idcache_wbinv_all)
.Larmv5_idcache_wbinv_all:
/*
* We assume that the code here can never be out of sync with the
* dcache, so that we can safely flush the Icache and fall through
* into the Dcache purging code.
*/
mcr p15, 0, r0, c7, c5, 0 /* Flush I cache */
/* Fall through to purge Dcache. */
ENTRY(armv5_dcache_wbinv_all)
.Larmv5_dcache_wbinv_all:
ldr ip, .Larmv5_cache_data
ldmia ip, {s_max, i_max, s_inc, i_inc}
1:
orr ip, s_max, i_max
2:
mcr p15, 0, ip, c7, c14, 2 /* Purge D cache SE with Set/Index */
sub ip, ip, i_inc
tst ip, i_max /* Index 0 is last one */
bne 2b /* Next index */
mcr p15, 0, ip, c7, c14, 2 /* Purge D cache SE with Set/Index */
subs s_max, s_max, s_inc
bpl 1b /* Next set */
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
.Larmv5_cache_data:
.word _C_LABEL(armv5_dcache_sets_max)
.bss
/* XXX The following macros should probably be moved to asm.h */
#define _DATA_OBJECT(x) .globl x; .type x,_ASM_TYPE_OBJECT; x:
#define C_OBJECT(x) _DATA_OBJECT(_C_LABEL(x))
/*
* Parameters for the cache cleaning code. Note that the order of these
* four variables is assumed in the code above. Hence the reason for
* declaring them in the assembler file.
*/
.align 0
C_OBJECT(armv5_dcache_sets_max)
.space 4
C_OBJECT(armv5_dcache_index_max)
.space 4
C_OBJECT(armv5_dcache_sets_inc)
.space 4
C_OBJECT(armv5_dcache_index_inc)
.space 4

View File

@ -1,4 +1,4 @@
/* $NetBSD: cpu.c,v 1.57 2005/05/10 13:02:55 rearnsha Exp $ */
/* $NetBSD: cpu.c,v 1.58 2005/06/03 15:55:55 rearnsha Exp $ */
/*
* Copyright (c) 1995 Mark Brinicombe.
@ -46,7 +46,7 @@
#include <sys/param.h>
__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.57 2005/05/10 13:02:55 rearnsha Exp $");
__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.58 2005/06/03 15:55:55 rearnsha Exp $");
#include <sys/systm.h>
#include <sys/malloc.h>
@ -172,7 +172,8 @@ enum cpu_class {
CPU_CLASS_ARM10E,
CPU_CLASS_ARM10EJ,
CPU_CLASS_SA1,
CPU_CLASS_XSCALE
CPU_CLASS_XSCALE,
CPU_CLASS_ARM11J
};
static const char * const generic_steppings[16] = {
@ -360,6 +361,11 @@ const struct cpuidtab cpuids[] = {
{ CPU_ID_IXP425_266, CPU_CLASS_XSCALE, "IXP425 266MHz",
ixp425_steppings },
{ CPU_ID_ARM1136JS, CPU_CLASS_ARM11J, "ARM1136J-S",
generic_steppings },
{ CPU_ID_ARM1136JSR1, CPU_CLASS_ARM11J, "ARM1136J-S R1",
generic_steppings },
{ 0, CPU_CLASS_NONE, NULL, NULL }
};
@ -383,6 +389,7 @@ const struct cpu_classtab cpu_classes[] = {
{ "ARM10EJ", "CPU_ARM10" }, /* CPU_CLASS_ARM10EJ */
{ "SA-1", "CPU_SA110" }, /* CPU_CLASS_SA1 */
{ "XScale", "CPU_XSCALE_..." }, /* CPU_CLASS_XSCALE */
{ "ARM11J", "CPU_ARM11" }, /* CPU_CLASS_ARM11J */
};
/*
@ -458,6 +465,7 @@ identify_arm_cpu(struct device *dv, struct cpu_info *ci)
case CPU_CLASS_ARM10EJ:
case CPU_CLASS_SA1:
case CPU_CLASS_XSCALE:
case CPU_CLASS_ARM11J:
if ((ci->ci_ctrl & CPU_CONTROL_DC_ENABLE) == 0)
aprint_normal(" DC disabled");
else
@ -542,6 +550,9 @@ identify_arm_cpu(struct device *dv, struct cpu_info *ci)
#if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425)
case CPU_CLASS_XSCALE:
#endif
#ifdef CPU_ARM11
case CPU_CLASS_ARM11J:
#endif
break;
default:

View File

@ -1,4 +1,4 @@
# $NetBSD: files.arm,v 1.77 2004/04/03 04:34:40 bsh Exp $
# $NetBSD: files.arm,v 1.78 2005/06/03 15:55:56 rearnsha Exp $
# temporary define to allow easy moving to ../arch/arm/arm32
defflag ARM32
@ -6,10 +6,10 @@ defflag ARM32
# CPU types. Make sure to update <arm/cpuconf.h> if you change this list.
defflag opt_cputypes.h CPU_ARM2 CPU_ARM250 CPU_ARM3
defflag opt_cputypes.h CPU_ARM6 CPU_ARM7 CPU_ARM7TDMI CPU_ARM8
CPU_ARM9 CPU_ARM10 CPU_SA110 CPU_SA1100
CPU_SA1110 CPU_IXP12X0 CPU_XSCALE_80200
CPU_XSCALE_80321 CPU_XSCALE_PXA2X0
CPU_XSCALE_IXP425
CPU_ARM9 CPU_ARM10 CPU_ARM11 CPU_SA110
CPU_SA1100 CPU_SA1110 CPU_IXP12X0
CPU_XSCALE_80200 CPU_XSCALE_80321
CPU_XSCALE_PXA2X0 CPU_XSCALE_IXP425
defparam opt_cpuoptions.h XSCALE_CCLKCFG
defflag opt_cpuoptions.h XSCALE_CACHE_WRITE_THROUGH
@ -89,6 +89,7 @@ file arch/arm/arm/cpufunc_asm_arm7tdmi.S cpu_arm7tdmi
file arch/arm/arm/cpufunc_asm_arm8.S cpu_arm8
file arch/arm/arm/cpufunc_asm_arm9.S cpu_arm9
file arch/arm/arm/cpufunc_asm_arm10.S cpu_arm10
file arch/arm/arm/cpufunc_asm_arm11.S cpu_arm11
file arch/arm/arm/cpufunc_asm_armv4.S cpu_arm9 | cpu_arm10 |
cpu_sa110 |
cpu_sa1100 |
@ -98,6 +99,7 @@ file arch/arm/arm/cpufunc_asm_armv4.S cpu_arm9 | cpu_arm10 |
cpu_xscale_80321 |
cpu_xscale_ixp425 |
cpu_xscale_pxa2x0
file arch/arm/arm/cpufunc_asm_armv5.S cpu_arm10 | cpu_arm11
file arch/arm/arm/cpufunc_asm_sa1.S cpu_sa110 | cpu_sa1100 |
cpu_sa1110 |
cpu_ixp12x0

View File

@ -1,4 +1,4 @@
/* $NetBSD: cpuconf.h,v 1.9 2004/08/21 11:08:20 rearnsha Exp $ */
/* $NetBSD: cpuconf.h,v 1.10 2005/06/03 15:55:56 rearnsha Exp $ */
/*
* Copyright (c) 2002 Wasabi Systems, Inc.
@ -58,6 +58,7 @@
defined(CPU_ARM7TDMI) + \
defined(CPU_ARM8) + defined(CPU_ARM9) + \
defined(CPU_ARM10) + \
defined(CPU_ARM11) + \
defined(CPU_SA110) + defined(CPU_SA1100) + \
defined(CPU_SA1110) + \
defined(CPU_IXP12X0) + \
@ -103,12 +104,19 @@
#define ARM_ARCH_5 0
#endif
#define ARM_NARCH (ARM_ARCH_2 + ARM_ARCH_3 + ARM_ARCH_4 + ARM_ARCH_5)
#if defined(CPU_ARM11)
#define ARM_ARCH_6 1
#else
#define ARM_ARCH_6 0
#endif
#define ARM_NARCH (ARM_ARCH_2 + ARM_ARCH_3 + ARM_ARCH_4 + \
ARM_ARCH_5 + ARM_ARCH_6)
#if ARM_NARCH == 0
#error ARM_NARCH is 0
#endif
#if ARM_ARCH_5
#if ARM_ARCH_5 || ARM_ARCH_6
/*
* We could support Thumb code on v4T, but the lack of clean interworking
* makes that hard.
@ -140,7 +148,8 @@
#if !defined(_KERNEL_OPT) || \
(defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) || \
defined(CPU_ARM8) || defined(CPU_ARM9) || defined(CPU_ARM10))
defined(CPU_ARM8) || defined(CPU_ARM9) || defined(CPU_ARM10) || \
defined(CPU_ARM11))
#define ARM_MMU_GENERIC 1
#else
#define ARM_MMU_GENERIC 0

View File

@ -1,4 +1,4 @@
/* $NetBSD: cpufunc.h,v 1.32 2005/06/02 19:32:03 uwe Exp $ */
/* $NetBSD: cpufunc.h,v 1.33 2005/06/03 15:55:56 rearnsha Exp $ */
/*
* Copyright (c) 1997 Mark Brinicombe.
@ -340,25 +340,44 @@ void arm10_setttb __P((u_int));
void arm10_tlb_flushID_SE __P((u_int));
void arm10_tlb_flushI_SE __P((u_int));
void arm10_icache_sync_all __P((void));
void arm10_icache_sync_range __P((vaddr_t, vsize_t));
void arm10_dcache_wbinv_all __P((void));
void arm10_dcache_wbinv_range __P((vaddr_t, vsize_t));
void arm10_dcache_inv_range __P((vaddr_t, vsize_t));
void arm10_dcache_wb_range __P((vaddr_t, vsize_t));
void arm10_idcache_wbinv_all __P((void));
void arm10_idcache_wbinv_range __P((vaddr_t, vsize_t));
void arm10_context_switch __P((void));
void arm10_setup __P((char *));
#endif
extern unsigned arm10_dcache_sets_max;
extern unsigned arm10_dcache_sets_inc;
extern unsigned arm10_dcache_index_max;
extern unsigned arm10_dcache_index_inc;
#ifdef CPU_ARM11
void arm11_setttb __P((u_int));
void arm11_tlb_flushID_SE __P((u_int));
void arm11_tlb_flushI_SE __P((u_int));
void arm11_context_switch __P((void));
void arm11_setup __P((char *string));
void arm11_tlb_flushID __P((void));
void arm11_tlb_flushI __P((void));
void arm11_tlb_flushD __P((void));
void arm11_tlb_flushD_SE __P((u_int va));
void arm11_drain_writebuf __P((void));
#endif
#if defined (CPU_ARM10) || defined (CPU_ARM11)
void armv5_icache_sync_all __P((void));
void armv5_icache_sync_range __P((vaddr_t, vsize_t));
void armv5_dcache_wbinv_all __P((void));
void armv5_dcache_wbinv_range __P((vaddr_t, vsize_t));
void armv5_dcache_inv_range __P((vaddr_t, vsize_t));
void armv5_dcache_wb_range __P((vaddr_t, vsize_t));
void armv5_idcache_wbinv_all __P((void));
void armv5_idcache_wbinv_range __P((vaddr_t, vsize_t));
extern unsigned armv5_dcache_sets_max;
extern unsigned armv5_dcache_sets_inc;
extern unsigned armv5_dcache_index_max;
extern unsigned armv5_dcache_index_inc;
#endif
#if defined(CPU_ARM9) || defined(CPU_ARM10) || defined(CPU_SA110) || \