From 9e1b7856268ceba53f634e7d599c3a3047729c23 Mon Sep 17 00:00:00 2001 From: thorpej Date: Sat, 10 Nov 2001 23:14:08 +0000 Subject: [PATCH] Split all the model/version-specific cpufuncs into separate files corresponding to the model/version. --- sys/arch/arm/arm/cpufunc_asm.S | 1154 +---------------------- sys/arch/arm/arm/cpufunc_asm_arm3.S | 60 ++ sys/arch/arm/arm/cpufunc_asm_arm67.S | 110 +++ sys/arch/arm/arm/cpufunc_asm_arm7tdmi.S | 99 ++ sys/arch/arm/arm/cpufunc_asm_arm8.S | 285 ++++++ sys/arch/arm/arm/cpufunc_asm_arm9.S | 139 +++ sys/arch/arm/arm/cpufunc_asm_armv4.S | 66 ++ sys/arch/arm/arm/cpufunc_asm_sa1.S | 383 ++++++++ sys/arch/arm/arm/cpufunc_asm_xscale.S | 321 +++++++ sys/arch/arm/conf/files.arm | 11 +- 10 files changed, 1478 insertions(+), 1150 deletions(-) create mode 100644 sys/arch/arm/arm/cpufunc_asm_arm3.S create mode 100644 sys/arch/arm/arm/cpufunc_asm_arm67.S create mode 100644 sys/arch/arm/arm/cpufunc_asm_arm7tdmi.S create mode 100644 sys/arch/arm/arm/cpufunc_asm_arm8.S create mode 100644 sys/arch/arm/arm/cpufunc_asm_arm9.S create mode 100644 sys/arch/arm/arm/cpufunc_asm_armv4.S create mode 100644 sys/arch/arm/arm/cpufunc_asm_sa1.S create mode 100644 sys/arch/arm/arm/cpufunc_asm_xscale.S diff --git a/sys/arch/arm/arm/cpufunc_asm.S b/sys/arch/arm/arm/cpufunc_asm.S index 9d535f23b878..c649496c4dd3 100644 --- a/sys/arch/arm/arm/cpufunc_asm.S +++ b/sys/arch/arm/arm/cpufunc_asm.S @@ -1,11 +1,6 @@ -/* $NetBSD: cpufunc_asm.S,v 1.10 2001/10/18 14:10:07 rearnsha Exp $ */ +/* $NetBSD: cpufunc_asm.S,v 1.11 2001/11/10 23:14:08 thorpej Exp $ */ /* - * xscale support code Copyright (c) 2001 Matt Thomas - * arm7tdmi support code Copyright (c) 2001 John Fremlin - * arm8 support code Copyright (c) 1997 ARM Limited - * arm8 support code Copyright (c) 1997 Causality Limited - * arm9 support code Copyright (C) 2001 ARM Limited * Copyright (c) 1997,1998 Mark Brinicombe. * Copyright (c) 1997 Causality Limited * All rights reserved. @@ -49,10 +44,6 @@ #include #include -sp .req r13 -lr .req r14 -pc .req r15 - .text .align 0 @@ -97,9 +88,11 @@ ENTRY(cpufunc_faultaddress) * All other registers are CPU architecture specific */ -/*ENTRY(cpufunc_control) +#if 0 /* See below. */ +ENTRY(cpufunc_control) mcr p15, 0, r0, c1, c0, 0 - mov pc, lr*/ + mov pc, lr +#endif ENTRY(cpufunc_domains) mcr p15, 0, r0, c3, c0, 0 @@ -125,1143 +118,6 @@ ENTRY(cpufunc_control) mov r0, r3 /* Return old value */ mov pc, lr -#ifdef CPU_ARM3 - /* The ARM3 has its control register in a different place. */ -ENTRY(arm3_control) - mrc p15, 0, r3, c2, c0, 0 /* Read the control register */ - bic r2, r3, r0 /* Clear bits */ - eor r2, r2, r1 /* XOR bits */ - - teq r2, r3 /* Only write if there is a change */ - mcrne p15, 0, r2, c2, c0, 0 /* Write new control register */ - mov r0, r3 /* Return old value */ - mov pc, lr -#endif - -#ifdef CPU_ARM8 -ENTRY(arm8_clock_config) - mrc p15, 0, r3, c15, c0, 0 /* Read the clock register */ - bic r2, r3, #0x11 /* turn off dynamic clocking - and clear L bit */ - mcr p15, 0, r2, c15, c0, 0 /* Write clock register */ - - bic r2, r3, r0 /* Clear bits */ - eor r2, r2, r1 /* XOR bits */ - bic r2, r2, #0x10 /* clear the L bit */ - - bic r1, r2, #0x01 /* still keep dynamic clocking off */ - mcr p15, 0, r1, c15, c0, 0 /* Write clock register */ - mov r0, r0 /* NOP */ - mov r0, r0 /* NOP */ - mov r0, r0 /* NOP */ - mov r0, r0 /* NOP */ - mcr p15, 0, r2, c15, c0, 0 /* Write clock register */ - mov r0, r3 /* Return old value */ - mov pc, lr -#endif /* CPU_ARM8 */ - -/* - * Functions to set the MMU Translation Table Base register - */ - -#if defined(CPU_ARM6) || defined(CPU_ARM7) -ENTRY(arm67_setttb) - - /* - * We need to flush the cache as it uses virtual addresses that - * are about to change - */ - mcr p15, 0, r0, c7, c0, 0 - - /* Write the TTB */ - mcr p15, 0, r0, c2, c0, 0 - - /* If we have updated the TTB we must flush the TLB */ - mcr p15, 0, r0, c5, c0, 0 - - /* For good measure we will flush the IDC as well */ - mcr p15, 0, r0, c7, c0, 0 - - /* Make sure that pipeline is emptied */ - mov r0, r0 - mov r0, r0 - - mov pc, lr -#endif /* CPU_ARM6 || CPU_ARM7 */ - -#ifdef CPU_ARM7TDMI - -ENTRY(arm7tdmi_setttb) - mov r1,r0 /* store the ttb in a safe place */ - mov r2,lr /* ditto with lr */ - - bl _C_LABEL(arm7tdmi_cache_flushID) - - /* Write the TTB */ - mcr p15, 0, r1, c2, c0, 0 - - /* If we have updated the TTB we must flush the TLB */ - bl _C_LABEL(arm7tdmi_tlb_flushID) - /* For good measure we will flush the IDC as well */ - bl _C_LABEL(arm7tdmi_cache_flushID) - - mov pc, r2 -#endif /* CPU_7TDMI */ - -#ifdef CPU_ARM8 -ENTRY(arm8_setttb) - /* We need to clean and flush the cache as it uses virtual - * addresses that are about to change - */ - mrs r3, cpsr_all - orr r1, r3, #(I32_bit | F32_bit) - msr cpsr_all , r1 - - stmfd sp!, {r0-r3, lr} - bl _C_LABEL(arm8_cache_cleanID) - ldmfd sp!, {r0-r3, lr} - mcr p15, 0, r0, c7, c7, 0 /* flush I+D cache */ - - /* Write the TTB */ - mcr p15, 0, r0, c2, c0, 0 - - /* If we have updated the TTB we must flush the TLB */ - mcr p15, 0, r0, c8, c7, 0 - - /* For good measure we will flush the IDC as well */ - mcr p15, 0, r0, c7, c7, 0 - - /* Make sure that pipeline is emptied */ - mov r0, r0 - mov r0, r0 - msr cpsr_all , r3 - - mov pc, lr -#endif /* CPU_ARM8 */ - -#ifdef CPU_ARM9 -ENTRY(arm9_setttb) - /* - * Since we use the caches in write-through mode, we only have to - * drain the write buffers and flush the caches. - */ - mcr p15, 0, r0, c7, c7, 0 /* Flush I+D Caches */ - mcr p15, 0, r0, c7, c10, 4 /* Drain write buffer */ - - mcr p15, 0, r0, c2, c0, 0 /* Load new ttb */ - - mcr p15, 0, r0, c8, c7, 0 /* Invalidate I+D TLBs */ - mov pc, lr -#endif /* CPU_ARM9 */ - -#if defined(CPU_SA110) || defined(CPU_XSCALE) -Lblock_userspace_access: - .word _C_LABEL(block_userspace_access) -#endif - -#if defined(CPU_SA110) -ENTRY(sa110_setttb) - /* We need to flush the cache as it uses virtual addresses that are about to change */ -#ifdef CACHE_CLEAN_BLOCK_INTR - mrs r3, cpsr_all - orr r1, r3, #(I32_bit | F32_bit) - msr cpsr_all , r1 -#else - ldr r3, Lblock_userspace_access - ldr r2, [r3] - orr r1, r2, #1 - str r1, [r3] -#endif - stmfd sp!, {r0-r3, lr} - bl _C_LABEL(sa110_cache_cleanID) - ldmfd sp!, {r0-r3, lr} - mcr p15, 0, r0, c7, c5, 0 /* invalidate icache & BTB */ - mcr p15, 0, r0, c7, c10, 4 /* drain write (& fill) buffer */ - - /* Write the TTB */ - mcr p15, 0, r0, c2, c0, 0 /* set translation table base */ - - /* If we have updated the TTB we must flush the TLB */ - mcr p15, 0, r0, c8, c7, 0 /* invalidate I&D TLB */ - - /* The cleanID above means we only need to flush the I cache here */ - mcr p15, 0, r0, c7, c5, 0 /* invalidate icache & BTB */ - - /* Make sure that pipeline is emptied */ - mov r0, r0 - mov r0, r0 -#ifdef CACHE_CLEAN_BLOCK_INTR - msr cpsr_all, r3 -#else - str r2, [r3] -#endif - mov pc, lr -#endif /* CPU_SA110 */ - -#if defined(CPU_XSCALE) -ENTRY(xscale_setttb) - /* We need to flush the cache as it uses virtual addresses that are about to change */ -#ifdef CACHE_CLEAN_BLOCK_INTR - mrs r3, cpsr_all - orr r1, r3, #(I32_bit | F32_bit) - msr cpsr_all , r1 -#else - ldr r3, Lblock_userspace_access - ldr r2, [r3] - orr r1, r2, #1 - str r1, [r3] -#endif - stmfd sp!, {r0-r3, lr} - bl _C_LABEL(xscale_cache_cleanID) - ldmfd sp!, {r0-r3, lr} - mcr p15, 0, r0, c7, c5, 0 /* invalidate icache & BTB */ - mcr p15, 0, r0, c7, c10, 4 /* drain write (& fill) buffer */ - - /* Write the TTB */ - mcr p15, 0, r0, c2, c0, 0 /* set translation table base */ - - /* If we have updated the TTB we must flush the TLB */ - mcr p15, 0, r0, c8, c7, 0 /* invalidate I&D TLB */ - - /* The cleanID above means we only need to flush the I cache here */ - mcr p15, 0, r0, c7, c5, 0 /* invalidate icache & BTB */ - - /* Make sure that pipeline is emptied */ - mrc p15, 0, r0, c2, c0, 0 /* read some register in CP15 */ - mov r0, r0 /* for the read to complete */ - sub pc, pc, #4 /* branch to next instruction */ - /* (flush the instruction pipeline) */ -#ifdef CACHE_CLEAN_BLOCK_INTR - msr cpsr_all, r3 -#else - str r2, [r3] -#endif - mov pc, lr -#endif /* CPU_XSCALE */ - -/* - * TLB functions - */ - -#if defined(CPU_ARM6) || defined(CPU_ARM7) -ENTRY(arm67_tlb_flush) - mcr p15, 0, r0, c5, c0, 0 - mov pc, lr - -ENTRY(arm67_tlb_purge) - mcr p15, 0, r0, c6, c0, 0 - mov pc, lr -#endif /* CPU_ARM6 || CPU_ARM7 */ - -#ifdef CPU_ARM7TDMI -ENTRY(arm7tdmi_tlb_flushID) - mov r0,#0 - mcr p15, 0, r0, c8, c7, 0 - mov pc,lr - -ENTRY(arm7tdmi_tlb_flushID_SE) - mcr p15, 0, r0, c8, c7, 1 - mov pc,lr -#endif -#ifdef CPU_ARM8 -ENTRY(arm8_tlb_flushID) - mcr p15, 0, r0, c8, c7, 0 /* flush I+D tlb */ - mov pc, lr - -ENTRY(arm8_tlb_flushID_SE) - mcr p15, 0, r0, c8, c7, 1 /* flush I+D tlb single entry */ - mov pc, lr -#endif /* CPU_ARM8 */ - -#if defined (CPU_ARM9) || defined(CPU_SA110) || defined(CPU_XSCALE) -ENTRY(armv4_tlb_flushID) - mcr p15, 0, r0, c8, c7, 0 /* flush I+D tlb */ - mov pc, lr - -#if defined(CPU_SA110) -ENTRY(sa110_tlb_flushID_SE) - mcr p15, 0, r0, c8, c6, 1 /* flush D tlb single entry */ - mcr p15, 0, r0, c8, c5, 0 /* flush I tlb */ - mov pc, lr -#endif /* CPU_SA110 */ - -#if defined(CPU_ARM9) -ENTRY(arm9_tlb_flushID_SE) - mcr p15, 0, r0, c8, c6, 1 /* flush D tlb single entry */ - mcr p15, 0, r0, c8, c5, 1 /* flush I tlb single entry */ - mov pc, lr -#endif - -#if defined(CPU_XSCALE) -ENTRY(xscale_tlb_flushID_SE) - mcr p15, 0, r0, c8, c6, 1 /* flush D tlb single entry */ - mcr p15, 0, r0, c8, c5, 1 /* flush I tlb single entry */ - mcr p15, 0, r0, c7, c5, 6 /* inv. branch target buffer */ - mov pc, lr -#endif /* CPU_XSCALE */ - -ENTRY(armv4_tlb_flushI) - mcr p15, 0, r0, c8, c5, 0 /* flush I tlb */ - mov pc, lr - -ENTRY(armv4_tlb_flushD) - mcr p15, 0, r0, c8, c6, 0 /* flush D tlb */ - mov pc, lr - -ENTRY(armv4_tlb_flushD_SE) - mcr p15, 0, r0, c8, c6, 1 /* flush D tlb single entry */ - mov pc, lr -#endif /* CPU_ARM9 || CPU_SA110 || CPU_XSCALE */ - -/* - * Cache functions - */ - -#if defined(CPU_ARM3) -ENTRY(arm3_cache_flush) - mcr p15, 0, r0, c1, c0, 0 - mov pc, lr -#endif /* CPU_ARM3 */ - -#if defined(CPU_ARM6) || defined(CPU_ARM7) -ENTRY(arm67_cache_flush) - mcr p15, 0, r0, c7, c0, 0 - mov pc, lr -#endif /* CPU_ARM6 || CPU_ARM7 */ - -#ifdef CPU_ARM7TDMI -ENTRY(arm7tdmi_cache_flushID) - mov r0, #0 - - mcr p15, 0, r0, c7, c7, 0 - /* Make sure that the pipeline is emptied */ - mov r0, r0 - mov r0, r0 - - mov pc,lr -#endif - -#ifdef CPU_ARM8 -ENTRY(arm8_cache_flushID) - mcr p15, 0, r0, c7, c7, 0 /* flush I+D cache */ - mov pc, lr - -ENTRY(arm8_cache_flushID_E) - mcr p15, 0, r0, c7, c7, 1 /* flush I+D single entry */ - mov pc, lr - -ENTRY(arm8_cache_cleanID) - mov r0, #0x00000000 - -Larm8_cache_cleanID_loop: - mov r2, r0 - mcr p15, 0, r2, c7, c11, 1 - add r2, r2, #0x10 - mcr p15, 0, r2, c7, c11, 1 - add r2, r2, #0x10 - mcr p15, 0, r2, c7, c11, 1 - add r2, r2, #0x10 - mcr p15, 0, r2, c7, c11, 1 - add r2, r2, #0x10 - mcr p15, 0, r2, c7, c11, 1 - add r2, r2, #0x10 - mcr p15, 0, r2, c7, c11, 1 - add r2, r2, #0x10 - mcr p15, 0, r2, c7, c11, 1 - add r2, r2, #0x10 - mcr p15, 0, r2, c7, c11, 1 - add r2, r2, #0x10 - mcr p15, 0, r2, c7, c11, 1 - add r2, r2, #0x10 - mcr p15, 0, r2, c7, c11, 1 - add r2, r2, #0x10 - mcr p15, 0, r2, c7, c11, 1 - add r2, r2, #0x10 - mcr p15, 0, r2, c7, c11, 1 - add r2, r2, #0x10 - mcr p15, 0, r2, c7, c11, 1 - add r2, r2, #0x10 - mcr p15, 0, r2, c7, c11, 1 - add r2, r2, #0x10 - mcr p15, 0, r2, c7, c11, 1 - add r2, r2, #0x10 - mcr p15, 0, r2, c7, c11, 1 - - adds r0, r0, #0x04000000 - bne Larm8_cache_cleanID_loop - - mov pc, lr - -ENTRY(arm8_cache_cleanID_E) - mcr p15, 0, r0, c7, c11, 1 /* clean ID single entry */ - mov pc, lr - -ENTRY(arm8_cache_purgeID) - /* - * ARM810 bug 3 - * - * Clean and invalidate entry will not invalidate the entry - * if the line was already clean. (mcr p15, 0, rd, c7, 15, 1) - * - * Instead of using the clean and invalidate entry operation - * use a separate clean and invalidate entry operations. - * i.e. - * mcr p15, 0, rd, c7, c11, 1 - * mcr p15, 0, rd, c7, c7, 1 - */ - - mov r0, #0x00000000 - - mrs r3, cpsr_all - orr r2, r3, #(I32_bit | F32_bit) - msr cpsr_all , r2 - -Larm8_cache_purgeID_loop: - mov r2, r0 - mcr p15, 0, r2, c7, c11, 1 - mcr p15, 0, r2, c7, c7, 1 - add r2, r2, #0x10 - mcr p15, 0, r2, c7, c11, 1 - mcr p15, 0, r2, c7, c7, 1 - add r2, r2, #0x10 - mcr p15, 0, r2, c7, c11, 1 - mcr p15, 0, r2, c7, c7, 1 - add r2, r2, #0x10 - mcr p15, 0, r2, c7, c11, 1 - mcr p15, 0, r2, c7, c7, 1 - add r2, r2, #0x10 - mcr p15, 0, r2, c7, c11, 1 - mcr p15, 0, r2, c7, c7, 1 - add r2, r2, #0x10 - mcr p15, 0, r2, c7, c11, 1 - mcr p15, 0, r2, c7, c7, 1 - add r2, r2, #0x10 - mcr p15, 0, r2, c7, c11, 1 - mcr p15, 0, r2, c7, c7, 1 - add r2, r2, #0x10 - mcr p15, 0, r2, c7, c11, 1 - mcr p15, 0, r2, c7, c7, 1 - add r2, r2, #0x10 - mcr p15, 0, r2, c7, c11, 1 - mcr p15, 0, r2, c7, c7, 1 - add r2, r2, #0x10 - mcr p15, 0, r2, c7, c11, 1 - mcr p15, 0, r2, c7, c7, 1 - add r2, r2, #0x10 - mcr p15, 0, r2, c7, c11, 1 - mcr p15, 0, r2, c7, c7, 1 - add r2, r2, #0x10 - mcr p15, 0, r2, c7, c11, 1 - mcr p15, 0, r2, c7, c7, 1 - add r2, r2, #0x10 - mcr p15, 0, r2, c7, c11, 1 - mcr p15, 0, r2, c7, c7, 1 - add r2, r2, #0x10 - mcr p15, 0, r2, c7, c11, 1 - mcr p15, 0, r2, c7, c7, 1 - add r2, r2, #0x10 - mcr p15, 0, r2, c7, c11, 1 - mcr p15, 0, r2, c7, c7, 1 - add r2, r2, #0x10 - mcr p15, 0, r2, c7, c11, 1 - mcr p15, 0, r2, c7, c7, 1 - - adds r0, r0, #0x04000000 - bne Larm8_cache_purgeID_loop - - msr cpsr_all, r3 - mov pc, lr - -ENTRY(arm8_cache_purgeID_E) - /* - * ARM810 bug 3 - * - * Clean and invalidate entry will not invalidate the entry - * if the line was already clean. (mcr p15, 0, rd, c7, c15, 1) - * - * Instead of using the clean and invalidate entry operation - * use a separate clean and invalidate entry operations. - * i.e. - * mcr p15, 0, rd, c7, c11, 1 - * mcr p15, 0, rd, c7, c7, 1 - */ - mrs r3, cpsr_all - orr r2, r3, #(I32_bit | F32_bit) - msr cpsr_all , r2 - mcr p15, 0, r0, c7, c11, 1 /* clean ID single entry */ - mcr p15, 0, r0, c7, c7, 1 /* flush ID single entry */ - msr cpsr_all , r3 - mov pc, lr -#endif /* CPU_ARM8 */ - -#ifdef CPU_ARM9 -ENTRY(arm9_cache_flushID) - mcr p15, 0, r0, c7, c7, 0 /* Flush I+D cache */ - mov pc, lr - -ENTRY(arm9_cache_flushID_SE) - mcr p15, 0, r0, c7, c5, 1 /* Flush one entry from I cache */ - mcr p15, 0, r0, c7, c6, 1 /* Flush one entry from D cache */ - mov pc, lr - -ENTRY(arm9_cache_flushI) - mcr p15, 0, r0, c7, c5, 0 /* Flush I cache */ - mov pc, lr - -ENTRY(arm9_cache_flushI_SE) - mcr p15, 0, r0, c7, c5, 1 /* Flush one entry from I cache */ - mov pc, lr - -ENTRY(arm9_cache_flushD) - mcr p15, 0, r0, c7, c6, 0 /* Flush D cache */ - mov pc, lr - -ENTRY(arm9_cache_flushD_SE) - mcr p15, 0, r0, c7, c6, 1 /* Flush one entry from D cache */ - mov pc, lr - -ENTRY(arm9_cache_cleanID) - mcr p15, 0, r0, c7, c10, 4 - mov pc, lr - -#endif /* CPU_ARM9 */ - -#if defined(CPU_SA110) || defined(CPU_XSCALE) -ENTRY_NP(xscale_cache_flushID) -ENTRY(sa110_cache_flushID) - mcr p15, 0, r0, c7, c7, 0 /* flush I+D cache */ - mov pc, lr - -ENTRY_NP(xscale_cache_flushI) -ENTRY(sa110_cache_flushI) - mcr p15, 0, r0, c7, c5, 0 /* flush I cache */ - mov pc, lr - -ENTRY_NP(xscale_cache_flushD) -ENTRY(sa110_cache_flushD) - mcr p15, 0, r0, c7, c6, 0 /* flush D cache */ - mov pc, lr - -#if defined(CPU_XSCALE) -ENTRY(xscale_cache_flushI_SE) - mcr p15, 0, r0, c7, c5, 1 /* flush I cache single entry */ - mcr p15, 0, r0, c7, c5, 6 /* inv. branch target buffer */ - mov pc, lr -#endif - -ENTRY_NP(xscale_cache_flushD_SE) -ENTRY(sa110_cache_flushD_SE) - mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */ - mov pc, lr - -ENTRY_NP(xscale_cache_cleanD_E) -ENTRY(sa110_cache_cleanD_E) - mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */ - mov pc, lr -#endif /* CPU_SA110 || CPU_XSCALE */ - -#ifdef CPU_SA110 -/* - * Information for SA110 cache clean/purge functions - * - * The address of the blocks of memory to use - * The size of the block of memory to use - */ - - .data - .global _C_LABEL(sa110_cache_clean_addr) -_C_LABEL(sa110_cache_clean_addr): - .word 0xf0000000 - .global _C_LABEL(sa110_cache_clean_size) -_C_LABEL(sa110_cache_clean_size): - .word 0x00008000 - - .text -Lsa110_cache_clean_addr: - .word _C_LABEL(sa110_cache_clean_addr) -Lsa110_cache_clean_size: - .word _C_LABEL(sa110_cache_clean_size) - -ENTRY(sa110_cache_cleanID) -ENTRY(sa110_cache_cleanD) -#ifdef CACHE_CLEAN_BLOCK_INTR - mrs r3, cpsr_all - orr r0, r3, #(I32_bit | F32_bit) - msr cpsr_all , r0 -#else - ldr r3, Lblock_userspace_access - ldr ip, [r3] - orr r0, ip, #1 - str r0, [r3] -#endif - ldr r2, Lsa110_cache_clean_addr - ldmia r2, {r0, r1} -#ifdef DOUBLE_CACHE_CLEAN_BANK - eor r0, r0, r1 - str r0, [r2] -#endif - -Lsa110_cache_cleanD_loop: - ldr r2, [r0], #32 - subs r1, r1, #32 - bne Lsa110_cache_cleanD_loop - - mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ -#ifdef CACHE_CLEAN_BLOCK_INTR - msr cpsr_all , r3 -#else - str ip, [r3] -#endif - mov pc, lr - -ENTRY(sa110_cache_purgeID) -#ifdef CACHE_CLEAN_BLOCK_INTR - mrs r3, cpsr_all - orr r0, r3, #(I32_bit | F32_bit) - msr cpsr_all , r0 -#else - ldr r3, Lblock_userspace_access - ldr ip, [r3] - orr r0, ip, #1 - str r0, [r3] -#endif - ldr r2, Lsa110_cache_clean_addr - ldmia r2, {r0, r1} -#ifdef DOUBLE_CACHE_CLEAN_BANK - eor r0, r0, r1 - str r0, [r2] -#endif - -Lsa110_cache_purgeID_loop: - ldr r2, [r0], #32 - subs r1, r1, #32 - bne Lsa110_cache_purgeID_loop - - mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ - mcr p15, 0, r0, c7, c5, 0 /* flush I cache (D flushed above) */ -#ifdef CACHE_CLEAN_BLOCK_INTR - msr cpsr_all , r3 -#else - str ip, [r3] -#endif - mov pc, lr - -ENTRY(sa110_cache_purgeD) -#ifdef CACHE_CLEAN_BLOCK_INTR - mrs r3, cpsr_all - orr r0, r3, #(I32_bit | F32_bit) - msr cpsr_all , r0 -#else - ldr r3, Lblock_userspace_access - ldr ip, [r3] - orr r0, ip, #1 - str r0, [r3] -#endif - ldr r2, Lsa110_cache_clean_addr - ldmia r2, {r0, r1} -#ifdef DOUBLE_CACHE_CLEAN_BANK - eor r0, r0, r1 - str r0, [r2] -#endif - -Lsa110_cache_purgeD_loop: - ldr r2, [r0], #32 - subs r1, r1, #32 - bne Lsa110_cache_purgeD_loop - - mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ -#ifdef CACHE_CLEAN_BLOCK_INTR - msr cpsr_all , r3 -#else - str ip, [r3] -#endif - mov pc, lr - -#endif /* CPU_SA110 */ - -#ifdef CPU_XSCALE -/* - * Information for XScale cache clean/purge functions - * - * The address of the blocks of memory to use - * The size of the block of memory to use - */ - - .data - .global _C_LABEL(xscale_cache_clean_addr) -_C_LABEL(xscale_cache_clean_addr): - .word 0xf0000000 - .global _C_LABEL(xscale_cache_clean_size) -_C_LABEL(xscale_cache_clean_size): - .word 0x00008000 - - .text -Lxscale_cache_clean_addr: - .word _C_LABEL(xscale_cache_clean_addr) -Lxscale_cache_clean_size: - .word _C_LABEL(xscale_cache_clean_size) - -ENTRY_NP(xscale_cache_syncI) -ENTRY_NP(xscale_cache_purgeID) - mcr p15, 0, r0, c7, c5, 0 /* flush I cache (D cleaned below) */ -ENTRY_NP(xscale_cache_cleanID) -ENTRY_NP(xscale_cache_purgeD) -ENTRY(xscale_cache_cleanD) -#ifdef CACHE_CLEAN_BLOCK_INTR - mrs r3, cpsr_all - orr r0, r3, #(I32_bit | F32_bit) - msr cpsr_all , r0 -#else - ldr r3, Lblock_userspace_access - ldr ip, [r3] - orr r0, ip, #1 - str r0, [r3] -#endif - ldr r2, Lxscale_cache_clean_addr - ldmia r2, {r0, r1} - add r0, r0, r1 - -Lxscale_cache_cleanD_loop: - subs r0, r0, #32 - mcr p15, 0, r0, c7, c2, 5 /* allocate cache line */ - subs r1, r1, #32 - bne Lxscale_cache_cleanD_loop - -/* - * It's expected that we only use the minidata cache for kernel - * addresses, so there is no need to purge it on context switch - */ -#ifdef CACHE_CLEAN_MINIDATA -/* - * Clean mini-data-cache - */ - mov r1, #64 -Lxscale_cache_cleanD_loop2: - ldr r3, [r0], #32 - subs r1, r1, #1 - bne Lxscale_cache_cleanD_loop2 -#endif - - mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ - -#ifdef CACHE_CLEAN_BLOCK_INTR - msr cpsr_all , r3 -#else - str ip, [r3] -#endif - mov pc, lr - -#endif /* CPU_XSCALE */ - -#if defined(CPU_SA110) -ENTRY(sa110_cache_purgeID_E) - mcr p15, 0, r0, c7, c10, 1 /* clean dcache entry */ - mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ - mcr p15, 0, r0, c7, c5, 0 /* flush I cache */ - mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */ - mov pc, lr -#endif /* CPU_SA110 */ - -#if defined(CPU_XSCALE) -ENTRY(xscale_cache_purgeID_E) - mcr p15, 0, r0, c7, c10, 1 /* clean dcache entry */ - mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ - mcr p15, 0, r0, c7, c5, 1 /* flush I cache single entry */ - mcr p15, 0, r0, c7, c5, 6 /* inv. branch target buffer */ - mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */ - mov pc, lr -#endif /* CPU_XSCALE */ - -#if defined(CPU_SA110) || defined(CPU_XSCALE) -ENTRY_NP(xscale_cache_purgeD_E) -ENTRY(sa110_cache_purgeD_E) - mcr p15, 0, r0, c7, c10, 1 /* clean dcache entry */ - mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ - mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */ - mov pc, lr -#endif /* CPU_SA110 || CPU_XSCALE */ - -/* - * Other functions - */ - -#if defined(CPU_ARM9) || defined(CPU_SA110) || defined(CPU_XSCALE) -ENTRY(armv4_drain_writebuf) - mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ - mov pc, lr -#endif /* CPU_ARM9 || CPU_SA110 || CPU_XSCALE */ - -/* - * Soft functions - */ - -#ifdef CPU_ARM9 -ENTRY(arm9_cache_syncI) - mcr p15, 0, r0, c7, c7, 0 /* Flush I+D caches */ - mov pc, lr - -ENTRY_NP(arm9_cache_flushID_rng) - b _C_LABEL(arm9_cache_flushID) - -ENTRY_NP(arm9_cache_flushD_rng) - /* Same as above, but D cache only */ - b _C_LABEL(arm9_cache_flushD) - -ENTRY_NP(arm9_cache_syncI_rng) - /* Similarly, for I cache sync */ - b _C_LABEL(arm9_cache_syncI) - -#endif /* CPU_ARM1020 */ - -#ifdef CPU_SA110 -ENTRY(sa110_cache_syncI) -#ifdef CACHE_CLEAN_BLOCK_INTR - mrs r3, cpsr_all - orr r0, r3, #(I32_bit | F32_bit) - msr cpsr_all , r0 -#else - ldr r3, Lblock_userspace_access - ldr ip, [r3] - orr r0, ip, #1 - str r0, [r3] -#endif - ldr r2, Lsa110_cache_clean_addr - ldmia r2, {r0, r1} -#ifdef DOUBLE_CACHE_CLEAN_BANK - eor r0, r0, r1 - str r0, [r2] -#endif - -Lsa110_cache_syncI_loop: - ldr r2, [r0], #32 - subs r1, r1, #32 - bne Lsa110_cache_syncI_loop - - mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ - mcr p15, 0, r0, c7, c5, 0 /* flush I cache */ -#ifdef CACHE_CLEAN_BLOCK_INTR - msr cpsr_all , r3 -#else - str ip, [r3] -#endif - mov pc, lr - -ENTRY(sa110_cache_cleanID_rng) -ENTRY(sa110_cache_cleanD_rng) - cmp r1, #0x4000 - bcs _C_LABEL(sa110_cache_cleanID) - - and r2, r0, #0x1f - add r1, r1, r2 - bic r0, r0, #0x1f - -sa110_cache_cleanD_rng_loop: - mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */ - add r0, r0, #32 - subs r1, r1, #32 - bpl sa110_cache_cleanD_rng_loop - - mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ - mov pc, lr - -ENTRY(sa110_cache_purgeID_rng) - cmp r1, #0x4000 - bcs _C_LABEL(sa110_cache_purgeID) - - and r2, r0, #0x1f - add r1, r1, r2 - bic r0, r0, #0x1f - -sa110_cache_purgeID_rng_loop: - mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */ - mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */ - add r0, r0, #32 - subs r1, r1, #32 - bpl sa110_cache_purgeID_rng_loop - - mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ - mcr p15, 0, r0, c7, c5, 0 /* flush I cache */ - - mov pc, lr - -ENTRY(sa110_cache_purgeD_rng) - cmp r1, #0x4000 - bcs _C_LABEL(sa110_cache_purgeD) - - and r2, r0, #0x1f - add r1, r1, r2 - bic r0, r0, #0x1f - -sa110_cache_purgeD_rng_loop: - mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */ - mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */ - add r0, r0, #32 - subs r1, r1, #32 - bpl sa110_cache_purgeD_rng_loop - - mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ - mov pc, lr - -ENTRY(sa110_cache_syncI_rng) - cmp r1, #0x4000 - bcs _C_LABEL(sa110_cache_syncI) - - and r2, r0, #0x1f - add r1, r1, r2 - bic r0, r0, #0x1f - -sa110_cache_syncI_rng_loop: - mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */ - add r0, r0, #32 - subs r1, r1, #32 - bpl sa110_cache_syncI_rng_loop - - mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ - mcr p15, 0, r0, c7, c5, 0 /* flush I cache */ - - mov pc, lr -#endif /* CPU_SA110 */ - -#ifdef CPU_XSCALE -/* - * xscale_cache_syncI is identical to xscale_cache_purgeID - */ -#if 0 -ENTRY(xscale_cache_syncI) -#ifdef CACHE_CLEAN_BLOCK_INTR - mrs r3, cpsr_all - orr r0, r3, #(I32_bit | F32_bit) - msr cpsr_all , r0 -#else - ldr r3, Lblock_userspace_access - ldr ip, [r3] - orr r0, ip, #1 - str r0, [r3] -#endif - ldr r2, Lxscale_cache_clean_addr - ldmia r2, {r0, r1} - eor r0, r0, r1 - str r0, [r2] - -Lxscale_cache_syncI_loop: - ldr r2, [r0], #32 - subs r1, r1, #32 - bne Lxscale_cache_syncI_loop - - mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ - mcr p15, 0, r0, c7, c5, 0 /* flush I cache */ -#ifdef CACHE_CLEAN_BLOCK_INTR - msr cpsr_all , r3 -#else - str ip, [r3] -#endif - mov pc, lr -#endif - -ENTRY(xscale_cache_cleanID_rng) -ENTRY(xscale_cache_cleanD_rng) - cmp r1, #0x4000 - bcs _C_LABEL(xscale_cache_cleanID) - - and r2, r0, #0x1f - add r1, r1, r2 - bic r0, r0, #0x1f - -xscale_cache_cleanD_rng_loop: - mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */ - add r0, r0, #32 - subs r1, r1, #32 - bpl xscale_cache_cleanD_rng_loop - - mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ - mov pc, lr - -ENTRY(xscale_cache_purgeID_rng) - cmp r1, #0x4000 - bcs _C_LABEL(xscale_cache_purgeID) - - and r2, r0, #0x1f - add r1, r1, r2 - bic r0, r0, #0x1f - -xscale_cache_purgeID_rng_loop: - mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */ - mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */ - mcr p15, 0, r0, c7, c5, 1 /* flush I cache single entry */ - mcr p15, 0, r0, c7, c5, 6 /* inv. branch target buffer */ - add r0, r0, #32 - subs r1, r1, #32 - bpl xscale_cache_purgeID_rng_loop - - mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ - - mov pc, lr - -ENTRY(xscale_cache_purgeD_rng) - cmp r1, #0x4000 - bcs _C_LABEL(xscale_cache_purgeD) - - and r2, r0, #0x1f - add r1, r1, r2 - bic r0, r0, #0x1f - -xscale_cache_purgeD_rng_loop: - mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */ - mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */ - add r0, r0, #32 - subs r1, r1, #32 - bpl xscale_cache_purgeD_rng_loop - - mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ - mov pc, lr - -ENTRY(xscale_cache_syncI_rng) - cmp r1, #0x4000 - bcs _C_LABEL(xscale_cache_syncI) - - and r2, r0, #0x1f - add r1, r1, r2 - bic r0, r0, #0x1f - -xscale_cache_syncI_rng_loop: - mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */ - mcr p15, 0, r0, c7, c5, 1 /* flush I cache single entry */ - mcr p15, 0, r0, c7, c5, 6 /* inv. branch target buffer */ - add r0, r0, #32 - subs r1, r1, #32 - bpl xscale_cache_syncI_rng_loop - - mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ - - mov pc, lr -#endif /* CPU_SA110 */ - -/* - * *_context_switch() - * - * These are CPU specific parts of the context switcher cpu_switch() - * These functions actually perform the TTB reload. - * - * NOTE: Special calling convention - * r1, r4-r13 must be preserved - */ - -#if defined(CPU_ARM6) || defined(CPU_ARM7) -ENTRY(arm67_context_switch) - /* Switch the memory to the new process */ - - /* For good measure we will flush the IDC as well */ - mcr p15, 0, r0, c7, c0, 0 /* flush cache */ - - /* Write the TTB */ - mcr p15, 0, r0, c2, c0, 0 - - /* If we have updated the TTB we must flush the TLB */ - mcr p15, 0, r0, c5, c0, 0 - - /* For good measure we will flush the IDC as well */ -/* mcr p15, 0, r0, c7, c0, 0*/ - - /* Make sure that pipeline is emptied */ - mov r0, r0 - mov r0, r0 - mov pc, lr -#endif - -#ifdef CPU_ARM7TDMI -ENTRY(arm7tdmi_context_switch) - b arm7tdmi_setttb -#endif -#ifdef CPU_ARM8 -ENTRY(arm8_context_switch) - /* Switch the memory to the new process */ - - /* For good measure we will flush the IDC as well */ - mcr p15, 0, r0, c7, c7, 0 /* flush i+d cache */ - - /* Write the TTB */ - mcr p15, 0, r0, c2, c0, 0 - - /* If we have updated the TTB we must flush the TLB */ - mcr p15, 0, r0, c8, c7, 0 /* flush the i+d tlb */ - - /* For good measure we will flush the IDC as well */ -/* mcr p15, 0, r0, c7, c7, 0*/ /* flush the i+d cache */ - - /* Make sure that pipeline is emptied */ - mov r0, r0 - mov r0, r0 - mov pc, lr -#endif /* CPU_ARM8 */ - -#ifdef CPU_ARM9 -ENTRY(arm9_context_switch) - /* Switch the memory to the new process */ - - /* - * We can assume that the caches will only contain kernel addresses - * at this point. So no need to flush them again. - */ - mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ - mcr p15, 0, r0, c2, c0, 0 /* Set the new ttb */ - mcr p15, 0, r0, c8, c7, 0 /* And flush the I+D tlbs */ - - /* Paranoia -- make sure pipeline is empty. */ - nop - nop - nop - mov pc, lr -#endif /* CPU_ARM9 */ - -#if defined(CPU_SA110) -ENTRY(sa110_context_switch) - /* Switch the memory to the new process */ - - /* - * CF_CACHE_PURGE_ID will ALWAYS be called prior to this - * Thus the data cache will contain only kernel data - * and the instruction cache will contain only kernel code - * and all the kernel mappings shared by all processes. - */ - - /* Write the TTB */ - mcr p15, 0, r0, c2, c0, 0 - - /* If we have updated the TTB we must flush the TLB */ - mcr p15, 0, r0, c8, c7, 0 /* flush the i+d tlb */ - - /* Make sure that pipeline is emptied */ - mov r0, r0 - mov r0, r0 - mov pc, lr -#endif /* CPU_SA110 */ - -#if defined(CPU_XSCALE) -ENTRY(xscale_context_switch) - /* Switch the memory to the new process */ - - /* - * CF_CACHE_PURGE_ID will ALWAYS be called prior to this - * Thus the data cache will contain only kernel data - * and the instruction cache will contain only kernel code - * and all the kernel mappings shared by all processes. - */ - - /* Write the TTB */ - mcr p15, 0, r0, c2, c0, 0 - - /* If we have updated the TTB we must flush the TLB */ - mcr p15, 0, r0, c8, c7, 0 /* flush the i+d tlb */ - - /* Make sure that pipeline is emptied */ - mrc p15, 0, r0, c2, c0, 0 /* read some register in CP15 */ - mov r0, r0 /* for the read to complete */ - sub pc, pc, #4 /* branch to next instruction */ - - /* return */ - mov pc, lr -#endif /* CPU_XSCALE */ - /* * other potentially useful software functions are: * clean D cache entry and flush I cache entry diff --git a/sys/arch/arm/arm/cpufunc_asm_arm3.S b/sys/arch/arm/arm/cpufunc_asm_arm3.S new file mode 100644 index 000000000000..27b26b19ffc7 --- /dev/null +++ b/sys/arch/arm/arm/cpufunc_asm_arm3.S @@ -0,0 +1,60 @@ +/* $NetBSD: cpufunc_asm_arm3.S,v 1.1 2001/11/10 23:14:09 thorpej Exp $ */ + +/* + * Copyright (c) 1997,1998 Mark Brinicombe. + * Copyright (c) 1997 Causality Limited + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Causality Limited. + * 4. The name of Causality Limited may not be used to endorse or promote + * products derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS + * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * ARM3 assembly functions for CPU / MMU / TLB specific operations + */ + +#include +#include + +/* + * The ARM3 has its own control register in a different place. + */ +ENTRY(arm3_control) + mrc p15, 0, r3, c2, c0, 0 /* Read the control register */ + bic r2, r3, r0 /* Clear bits */ + eor r2, r2, r1 /* XOR bits */ + + teq r2, r3 /* Only write if there is a change */ + mcrne p15, 0, r2, c2, c0, 0 /* Write new control register */ + mov r0, r3 /* Return old value */ + mov pc, lr + +/* + * Cache functions. + */ + +ENTRY(arm3_cache_flush) + mcr p15, 0, r0, c1, c0, 0 + mov pc, lr diff --git a/sys/arch/arm/arm/cpufunc_asm_arm67.S b/sys/arch/arm/arm/cpufunc_asm_arm67.S new file mode 100644 index 000000000000..570967e5e5d9 --- /dev/null +++ b/sys/arch/arm/arm/cpufunc_asm_arm67.S @@ -0,0 +1,110 @@ +/* $NetBSD: cpufunc_asm_arm67.S,v 1.1 2001/11/10 23:14:09 thorpej Exp $ */ + +/* + * Copyright (c) 1997,1998 Mark Brinicombe. + * Copyright (c) 1997 Causality Limited + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Causality Limited. + * 4. The name of Causality Limited may not be used to endorse or promote + * products derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS + * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * ARM6/ARM7 assembly functions for CPU / MMU / TLB specific operations + */ + +#include +#include + +/* + * Functions to set the MMU Translation Table Base register + * + * We need to clean and flush the cache as it uses virtual + * addresses that are about to change. + */ +ENTRY(arm67_setttb) + mcr p15, 0, r0, c7, c0, 0 + + /* Write the TTB */ + mcr p15, 0, r0, c2, c0, 0 + + /* If we have updated the TTB we must flush the TLB */ + mcr p15, 0, r0, c5, c0, 0 + + /* For good measure we will flush the IDC as well */ + mcr p15, 0, r0, c7, c0, 0 + + /* Make sure that pipeline is emptied */ + mov r0, r0 + mov r0, r0 + + mov pc, lr + +/* + * TLB functions + */ +ENTRY(arm67_tlb_flush) + mcr p15, 0, r0, c5, c0, 0 + mov pc, lr + +ENTRY(arm67_tlb_purge) + mcr p15, 0, r0, c6, c0, 0 + mov pc, lr + +/* + * Cache functions + */ +ENTRY(arm67_cache_flush) + mcr p15, 0, r0, c7, c0, 0 + mov pc, lr + +/* + * Context switch. + * + * These is the CPU-specific parts of the context switcher cpu_switch() + * These functions actually perform the TTB reload. + * + * NOTE: Special calling convention + * r1, r4-r13 must be preserved + */ +ENTRY(arm67_context_switch) + /* For good measure we will flush the IDC as well */ + mcr p15, 0, r0, c7, c0, 0 /* flush cache */ + + /* Write the TTB */ + mcr p15, 0, r0, c2, c0, 0 + + /* If we have updated the TTB we must flush the TLB */ + mcr p15, 0, r0, c5, c0, 0 + +#if 0 + /* For good measure we will flush the IDC as well */ + mcr p15, 0, r0, c7, c0, 0 /* flush cache */ +#endif + + /* Make sure that pipeline is emptied */ + mov r0, r0 + mov r0, r0 + mov pc, lr diff --git a/sys/arch/arm/arm/cpufunc_asm_arm7tdmi.S b/sys/arch/arm/arm/cpufunc_asm_arm7tdmi.S new file mode 100644 index 000000000000..f29661296d6d --- /dev/null +++ b/sys/arch/arm/arm/cpufunc_asm_arm7tdmi.S @@ -0,0 +1,99 @@ +/* $NetBSD: cpufunc_asm_arm7tdmi.S,v 1.1 2001/11/10 23:14:09 thorpej Exp $ */ + +/* + * Copyright (c) 2001 John Fremlin + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Causality Limited. + * 4. The name of Causality Limited may not be used to endorse or promote + * products derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS + * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * ARM7TDMI assembly functions for CPU / MMU / TLB specific operations + */ + +#include +#include + +/* + * Functions to set the MMU Translation Table Base register + * + * We need to clean and flush the cache as it uses virtual + * addresses that are about to change. + */ +ENTRY(arm7tdmi_setttb) + mov r1, r0 /* store the TTB in a safe place */ + mov r2, lr /* ditto with lr */ + + bl _C_LABEL(arm7tdmi_cache_flushID) + + /* Write the TTB */ + mcr p15, 0, r1, c2, c0, 0 + + /* If we have updated the TTB we must flush the TLB */ + bl _C_LABEL(arm7tdmi_tlb_flushID) + + /* For good measure we will flush the IDC as well */ + bl _C_LABEL(arm7tdmi_cache_flushID) + + mov pc, r2 + +/* + * TLB functions + */ +ENTRY(arm7tdmi_tlb_flushID) + mov r0, #0 + mcr p15, 0, r0, c8, c7, 0 + mov pc, lr + +ENTRY(arm7tdmi_tlb_flushID_SE) + mcr p15, 0, r0, c8, c7, 1 + mov pc, lr + +/* + * Cache functions + */ +ENTRY(arm7tdmi_cache_flushID) + mov r0, #0 + + mcr p15, 0, r0, c7, c7, 0 + + /* Make sure that the pipeline is emptied */ + mov r0, r0 + mov r0, r0 + + mov pc, lr + +/* + * Context switch. + * + * These is the CPU-specific parts of the context switcher cpu_switch() + * These functions actually perform the TTB reload. + * + * NOTE: Special calling convention + * r1, r4-r13 must be preserved + */ +ENTRY(arm7tdmi_context_switch) + b _C_LABEL(arm7tdmi_setttb) diff --git a/sys/arch/arm/arm/cpufunc_asm_arm8.S b/sys/arch/arm/arm/cpufunc_asm_arm8.S new file mode 100644 index 000000000000..395a72c4bf2c --- /dev/null +++ b/sys/arch/arm/arm/cpufunc_asm_arm8.S @@ -0,0 +1,285 @@ +/* $NetBSD: cpufunc_asm_arm8.S,v 1.1 2001/11/10 23:14:09 thorpej Exp $ */ + +/* + * Copyright (c) 1997 ARM Limited + * Copyright (c) 1997 Causality Limited + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Causality Limited. + * 4. The name of Causality Limited may not be used to endorse or promote + * products derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS + * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * ARM8 assembly functions for CPU / MMU / TLB specific operations + */ + +#include +#include + +ENTRY(arm8_clock_config) + mrc p15, 0, r3, c15, c0, 0 /* Read the clock register */ + bic r2, r3, #0x11 /* turn off dynamic clocking + and clear L bit */ + mcr p15, 0, r2, c15, c0, 0 /* Write clock register */ + + bic r2, r3, r0 /* Clear bits */ + eor r2, r2, r1 /* XOR bits */ + bic r2, r2, #0x10 /* clear the L bit */ + + bic r1, r2, #0x01 /* still keep dynamic clocking off */ + mcr p15, 0, r1, c15, c0, 0 /* Write clock register */ + mov r0, r0 /* NOP */ + mov r0, r0 /* NOP */ + mov r0, r0 /* NOP */ + mov r0, r0 /* NOP */ + mcr p15, 0, r2, c15, c0, 0 /* Write clock register */ + mov r0, r3 /* Return old value */ + mov pc, lr + +/* + * Functions to set the MMU Translation Table Base register + * + * We need to clean and flush the cache as it uses virtual + * addresses that are about to change. + */ +ENTRY(arm8_setttb) + mrs r3, cpsr_all + orr r1, r3, #(I32_bit | F32_bit) + msr cpsr_all, r1 + + stmfd sp!, {r0-r3, lr} + bl _C_LABEL(arm8_cache_cleanID) + ldmfd sp!, {r0-r3, lr} + mcr p15, 0, r0, c7, c7, 0 /* flush I+D cache */ + + /* Write the TTB */ + mcr p15, 0, r0, c2, c0, 0 + + /* If we have updated the TTB we must flush the TLB */ + mcr p15, 0, r0, c8, c7, 0 + + /* For good measure we will flush the IDC as well */ + mcr p15, 0, r0, c7, c7, 0 + + /* Make sure that pipeline is emptied */ + mov r0, r0 + mov r0, r0 + msr cpsr_all, r3 + + mov pc, lr + +/* + * TLB functions + */ +ENTRY(arm8_tlb_flushID) + mcr p15, 0, r0, c8, c7, 0 /* flush I+D tlb */ + mov pc, lr + +ENTRY(arm8_tlb_flushID_SE) + mcr p15, 0, r0, c8, c7, 1 /* flush I+D tlb single entry */ + mov pc, lr + +/* + * Cache functions + */ +ENTRY(arm8_cache_flushID) + mcr p15, 0, r0, c7, c7, 0 /* flush I+D cache */ + mov pc, lr + +ENTRY(arm8_cache_flushID_E) + mcr p15, 0, r0, c7, c7, 1 /* flush I+D single entry */ + mov pc, lr + +ENTRY(arm8_cache_cleanID) + mov r0, #0x00000000 + +Larm8_cache_cleanID_loop: + mov r2, r0 + mcr p15, 0, r2, c7, c11, 1 + add r2, r2, #0x10 + mcr p15, 0, r2, c7, c11, 1 + add r2, r2, #0x10 + mcr p15, 0, r2, c7, c11, 1 + add r2, r2, #0x10 + mcr p15, 0, r2, c7, c11, 1 + add r2, r2, #0x10 + mcr p15, 0, r2, c7, c11, 1 + add r2, r2, #0x10 + mcr p15, 0, r2, c7, c11, 1 + add r2, r2, #0x10 + mcr p15, 0, r2, c7, c11, 1 + add r2, r2, #0x10 + mcr p15, 0, r2, c7, c11, 1 + add r2, r2, #0x10 + mcr p15, 0, r2, c7, c11, 1 + add r2, r2, #0x10 + mcr p15, 0, r2, c7, c11, 1 + add r2, r2, #0x10 + mcr p15, 0, r2, c7, c11, 1 + add r2, r2, #0x10 + mcr p15, 0, r2, c7, c11, 1 + add r2, r2, #0x10 + mcr p15, 0, r2, c7, c11, 1 + add r2, r2, #0x10 + mcr p15, 0, r2, c7, c11, 1 + add r2, r2, #0x10 + mcr p15, 0, r2, c7, c11, 1 + add r2, r2, #0x10 + mcr p15, 0, r2, c7, c11, 1 + + adds r0, r0, #0x04000000 + bne Larm8_cache_cleanID_loop + + mov pc, lr + +ENTRY(arm8_cache_cleanID_E) + mcr p15, 0, r0, c7, c11, 1 /* clean I+D single entry */ + mov pc, lr + +ENTRY(arm8_cache_purgeID) + /* + * ARM810 bug 3 + * + * Clean and invalidate entry will not invalidate the entry + * if the line was already clean. (mcr p15, 0, rd, c7, 15, 1) + * + * Instead of using the clean and invalidate entry operation + * use a separate clean and invalidate entry operations. + * i.e. + * mcr p15, 0, rd, c7, c11, 1 + * mcr p15, 0, rd, c7, c7, 1 + */ + + mov r0, #0x00000000 + + mrs r3, cpsr_all + orr r2, r3, #(I32_bit | F32_bit) + msr cpsr_all, r2 + +Larm8_cache_purgeID_loop: + mov r2, r0 + mcr p15, 0, r2, c7, c11, 1 + mcr p15, 0, r2, c7, c7, 1 + add r2, r2, #0x10 + mcr p15, 0, r2, c7, c11, 1 + mcr p15, 0, r2, c7, c7, 1 + add r2, r2, #0x10 + mcr p15, 0, r2, c7, c11, 1 + mcr p15, 0, r2, c7, c7, 1 + add r2, r2, #0x10 + mcr p15, 0, r2, c7, c11, 1 + mcr p15, 0, r2, c7, c7, 1 + add r2, r2, #0x10 + mcr p15, 0, r2, c7, c11, 1 + mcr p15, 0, r2, c7, c7, 1 + add r2, r2, #0x10 + mcr p15, 0, r2, c7, c11, 1 + mcr p15, 0, r2, c7, c7, 1 + add r2, r2, #0x10 + mcr p15, 0, r2, c7, c11, 1 + mcr p15, 0, r2, c7, c7, 1 + add r2, r2, #0x10 + mcr p15, 0, r2, c7, c11, 1 + mcr p15, 0, r2, c7, c7, 1 + add r2, r2, #0x10 + mcr p15, 0, r2, c7, c11, 1 + mcr p15, 0, r2, c7, c7, 1 + add r2, r2, #0x10 + mcr p15, 0, r2, c7, c11, 1 + mcr p15, 0, r2, c7, c7, 1 + add r2, r2, #0x10 + mcr p15, 0, r2, c7, c11, 1 + mcr p15, 0, r2, c7, c7, 1 + add r2, r2, #0x10 + mcr p15, 0, r2, c7, c11, 1 + mcr p15, 0, r2, c7, c7, 1 + add r2, r2, #0x10 + mcr p15, 0, r2, c7, c11, 1 + mcr p15, 0, r2, c7, c7, 1 + add r2, r2, #0x10 + mcr p15, 0, r2, c7, c11, 1 + mcr p15, 0, r2, c7, c7, 1 + add r2, r2, #0x10 + mcr p15, 0, r2, c7, c11, 1 + mcr p15, 0, r2, c7, c7, 1 + add r2, r2, #0x10 + mcr p15, 0, r2, c7, c11, 1 + mcr p15, 0, r2, c7, c7, 1 + + adds r0, r0, #0x04000000 + bne Larm8_cache_purgeID_loop + + msr cpsr_all, r3 + mov pc, lr + +ENTRY(arm8_cache_purgeID_E) + /* + * ARM810 bug 3 + * + * Clean and invalidate entry will not invalidate the entry + * if the line was already clean. (mcr p15, 0, rd, c7, 15, 1) + * + * Instead of using the clean and invalidate entry operation + * use a separate clean and invalidate entry operations. + * i.e. + * mcr p15, 0, rd, c7, c11, 1 + * mcr p15, 0, rd, c7, c7, 1 + */ + mrs r3, cpsr_all + orr r2, r3, #(I32_bit | F32_bit) + msr cpsr_all, r2 + mcr p15, 0, r0, c7, c11, 1 /* clean I+D single entry */ + mcr p15, 0, r0, c7, c7, 1 /* flush I+D single entry */ + msr cpsr_all, r3 + mov pc, lr + +/* + * Context switch. + * + * These is the CPU-specific parts of the context switcher cpu_switch() + * These functions actually perform the TTB reload. + * + * NOTE: Special calling convention + * r1, r4-r13 must be preserved + */ +ENTRY(arm8_context_switch) + /* For good measure we will flush the IDC as well */ + mcr p15, 0, r0, c7, c7, 0 /* flush I+D cache */ + + /* Write the TTB */ + mcr p15, 0, r0, c2, c0, 0 + + /* If we have updated the TTB we must flush the TLB */ + mcr p15, 0, r0, c8, c7, 0 /* flush the I+D tlb */ + +#if 0 + /* For good measure we will flush the IDC as well */ + mcr p15, 0, r0, c7, c7, 0 /* flush I+D cache */ +#endif + + /* Make sure that pipeline is emptied */ + mov r0, r0 + mov r0, r0 + mov pc, lr diff --git a/sys/arch/arm/arm/cpufunc_asm_arm9.S b/sys/arch/arm/arm/cpufunc_asm_arm9.S new file mode 100644 index 000000000000..abcac313a5f6 --- /dev/null +++ b/sys/arch/arm/arm/cpufunc_asm_arm9.S @@ -0,0 +1,139 @@ +/* $NetBSD: cpufunc_asm_arm9.S,v 1.1 2001/11/10 23:14:09 thorpej Exp $ */ + +/* + * Copyright (c) 2001 ARM Limited + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Causality Limited. + * 4. The name of Causality Limited may not be used to endorse or promote + * products derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS + * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * ARM9 assembly functions for CPU / MMU / TLB specific operations + */ + +#include +#include + +/* + * Functions to set the MMU Translation Table Base register + * + * We need to clean and flush the cache as it uses virtual + * addresses that are about to change. + */ +ENTRY(arm9_setttb) + /* + * Since we use the caches in write-through mode, we only have to + * drain the write buffers and flush the caches. + */ + mcr p15, 0, r0, c7, c7, 0 /* flush I+D caches */ + mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ + + mcr p15, 0, r0, c2, c0, 0 /* load new TTB */ + + mcr p15, 0, r0, c8, c7, 0 /* invalidate I+D TLBs */ + mov pc, lr + +/* + * TLB functions + */ +ENTRY(arm9_tlb_flushID_SE) + mcr p15, 0, r0, c8, c6, 1 /* flush D tlb single entry */ + mcr p15, 0, r0, c8, c5, 1 /* flush I tlb single entry */ + mov pc, lr + +/* + * Cache functions + */ +ENTRY(arm9_cache_flushID) + mcr p15, 0, r0, c7, c7, 0 /* flush I+D cache */ + mov pc, lr + +ENTRY(arm9_cache_flushID_SE) + mcr p15, 0, r0, c7, c5, 1 /* flush one entry from I cache */ + mcr p15, 0, r0, c7, c6, 1 /* flush one entry from D cache */ + mov pc, lr + +ENTRY(arm9_cache_flushI) + mcr p15, 0, r0, c7, c5, 0 /* flush I cache */ + mov pc, lr + +ENTRY(arm9_cache_flushI_SE) + mcr p15, 0, r0, c7, c5, 1 /* flush one entry from I cache */ + mov pc, lr + +ENTRY(arm9_cache_flushD) + mcr p15, 0, r0, c7, c6, 0 /* flush D cache */ + mov pc, lr + +ENTRY(arm9_cache_flushD_SE) + mcr p15, 0, r0, c7, c6, 1 /* flush one entry from D cache */ + mov pc, lr + +ENTRY(arm9_cache_cleanID) + mcr p15, 0, r0, c7, c10, 4 + mov pc, lr + +/* + * Soft functions + */ +ENTRY(arm9_cache_syncI) + mcr p15, 0, r0, c7, c7, 0 /* flush I+D caches */ + mov pc, lr + +ENTRY_NP(arm9_cache_flushID_rng) + b _C_LABEL(arm9_cache_flushID) + +ENTRY_NP(arm9_cache_flushD_rng) + /* Same as above, but D cache only */ + b _C_LABEL(arm9_cache_flushD) + +ENTRY_NP(arm9_cache_syncI_rng) + /* Similarly, for I cache sync */ + b _C_LABEL(arm9_cache_syncI) + +/* + * Context switch. + * + * These is the CPU-specific parts of the context switcher cpu_switch() + * These functions actually perform the TTB reload. + * + * NOTE: Special calling convention + * r1, r4-r13 must be preserved + */ +ENTRY(arm9_context_switch) + /* + * We can assume that the caches will only contain kernel addresses + * at this point. So no need to flush them again. + */ + mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */ + mcr p15, 0, r0, c2, c0, 0 /* set the new TTB */ + mcr p15, 0, r0, c8, c7, 0 /* and flush the I+D tlbs */ + + /* Paranoia -- make sure the pipeline is empty. */ + nop + nop + nop + mov pc, lr diff --git a/sys/arch/arm/arm/cpufunc_asm_armv4.S b/sys/arch/arm/arm/cpufunc_asm_armv4.S new file mode 100644 index 000000000000..e8ca04d17612 --- /dev/null +++ b/sys/arch/arm/arm/cpufunc_asm_armv4.S @@ -0,0 +1,66 @@ +/* $NetBSD: cpufunc_asm_armv4.S,v 1.1 2001/11/10 23:14:09 thorpej Exp $ */ + +/* + * Copyright (c) 2001 ARM Limited + * Copyright (c) 1997,1998 Mark Brinicombe. + * Copyright (c) 1997 Causality Limited + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Causality Limited. + * 4. The name of Causality Limited may not be used to endorse or promote + * products derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS + * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * ARM9 assembly functions for CPU / MMU / TLB specific operations + */ + +#include +#include + +/* + * TLB functions + */ +ENTRY(armv4_tlb_flushID) + mcr p15, 0, r0, c8, c7, 0 /* flush I+D tlb */ + mov pc, lr + +ENTRY(armv4_tlb_flushI) + mcr p15, 0, r0, c8, c5, 0 /* flush I tlb */ + mov pc, lr + +ENTRY(armv4_tlb_flushD) + mcr p15, 0, r0, c8, c6, 0 /* flush D tlb */ + mov pc, lr + +ENTRY(armv4_tlb_flushD_SE) + mcr p15, 0, r0, c8, c6, 1 /* flush D tlb single entry */ + mov pc, lr + +/* + * Other functions + */ +ENTRY(armv4_drain_writebuf) + mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ + mov pc, lr diff --git a/sys/arch/arm/arm/cpufunc_asm_sa1.S b/sys/arch/arm/arm/cpufunc_asm_sa1.S new file mode 100644 index 000000000000..8f6004b8f896 --- /dev/null +++ b/sys/arch/arm/arm/cpufunc_asm_sa1.S @@ -0,0 +1,383 @@ +/* $NetBSD: cpufunc_asm_sa1.S,v 1.1 2001/11/10 23:14:09 thorpej Exp $ */ + +/* + * Copyright (c) 1997,1998 Mark Brinicombe. + * Copyright (c) 1997 Causality Limited + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Causality Limited. + * 4. The name of Causality Limited may not be used to endorse or promote + * products derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS + * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * SA-1 assembly functions for CPU / MMU / TLB specific operations + */ + +#include +#include + +Lblock_userspace_access: + .word _C_LABEL(block_userspace_access) + +/* + * Functions to set the MMU Translation Table Base register + * + * We need to clean and flush the cache as it uses virtual + * addresses that are about to change. + */ +ENTRY(sa110_setttb) +#ifdef CACHE_CLEAN_BLOCK_INTR + mrs r3, cpsr_all + orr r1, r3, #(I32_bit | F32_bit) + msr cpsr_all, r1 +#else + ldr r3, Lblock_userspace_access + ldr r2, [r3] + orr r1, r2, #1 + str r1, [r3] +#endif + stmfd sp!, {r0-r3, lr} + bl _C_LABEL(sa110_cache_cleanID) + ldmfd sp!, {r0-r3, lr} + mcr p15, 0, r0, c7, c5, 0 /* invalidate I$ and BTB */ + mcr p15, 0, r0, c7, c10, 4 /* drain write and fill buffer */ + + /* Write the TTB */ + mcr p15, 0, r0, c2, c0, 0 + + /* If we have updated the TTB we must flush the TLB */ + mcr p15, 0, r0, c8, c7, 0 /* invalidate I+D TLB */ + + /* The cleanID above means we only need to flush the I cache here */ + mcr p15, 0, r0, c7, c5, 0 /* invalidate I$ and BTB */ + + /* Make sure that pipeline is emptied */ + mov r0, r0 + mov r0, r0 +#ifdef CACHE_CLEAN_BLOCK_INTR + msr cpsr_all, r3 +#else + str r2, [r3] +#endif + mov pc, lr + +/* + * TLB functions + */ +ENTRY(sa110_tlb_flushID_SE) + mcr p15, 0, r0, c8, c6, 1 /* flush D tlb single entry */ + mcr p15, 0, r0, c8, c5, 0 /* flush I tlb */ + mov pc, lr + +/* + * Cache functions + */ +ENTRY(sa110_cache_flushID) + mcr p15, 0, r0, c7, c7, 0 /* flush I+D cache */ + mov pc, lr + +ENTRY(sa110_cache_flushI) + mcr p15, 0, r0, c7, c5, 0 /* flush I cache */ + mov pc, lr + +ENTRY(sa110_cache_flushD) + mcr p15, 0, r0, c7, c6, 0 /* flush D cache */ + mov pc, lr + +ENTRY(sa110_cache_flushD_SE) + mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */ + mov pc, lr + +ENTRY(sa110_cache_cleanD_E) + mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */ + mov pc, lr + +/* + * Information for the SA-1 cache clean/purge functions: + * + * * Virtual address of the memory region to use + * * Size of memory region + */ + .data + + .global _C_LABEL(sa110_cache_clean_addr) +_C_LABEL(sa110_cache_clean_addr): + .word 0xf0000000 + + .global _C_LABEL(sa110_cache_clean_size) +_C_LABEL(sa110_cache_clean_size): + .word 0x00008000 + + .text + +Lsa110_cache_clean_addr: + .word _C_LABEL(sa110_cache_clean_addr) +Lsa110_cache_clean_size: + .word _C_LABEL(sa110_cache_clean_size) + +ENTRY(sa110_cache_cleanID) +ENTRY(sa110_cache_cleanD) +#ifdef CACHE_CLEAN_BLOCK_INTR + mrs r3, cpsr_all + orr r0, r3, #(I32_bit | F32_bit) + msr cpsr_all, r0 +#else + ldr r3, Lblock_userspace_access + ldr ip, [r3] + orr r0, ip, #1 + str r0, [r3] +#endif + ldr r2, Lsa110_cache_clean_addr + ldmia r2, {r0, r1} +#ifdef DOUBLE_CACHE_CLEAN_BANK + eor r0, r0, r1 + str r0, [r2] +#endif + +Lsa110_cache_cleanD_loop: + ldr r2, [r0], #32 + subs r1, r1, #32 + bne Lsa110_cache_cleanD_loop + + mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ +#ifdef CACHE_CLEAN_BLOCK_INTR + msr cpsr_all, r3 +#else + str ip, [r3] +#endif + mov pc, lr + +ENTRY(sa110_cache_purgeID) +#ifdef CACHE_CLEAN_BLOCK_INTR + mrs r3, cpsr_all + orr r0, r3, #(I32_bit | F32_bit) + msr cpsr_all, r0 +#else + ldr r3, Lblock_userspace_access + ldr ip, [r3] + orr r0, ip, #1 + str r0, [r3] +#endif + ldr r2, Lsa110_cache_clean_addr + ldmia r2, {r0, r1} +#ifdef DOUBLE_CACHE_CLEAN_BANK + eor r0, r0, r1 + str r0, [r2] +#endif + +Lsa110_cache_purgeID_loop: + ldr r2, [r0], #32 + subs r1, r1, #32 + bne Lsa110_cache_purgeID_loop + + mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ + mcr p15, 0, r0, c7, c5, 0 /* flush I cache (D flushed above) */ +#ifdef CACHE_CLEAN_BLOCK_INTR + msr cpsr_all, r3 +#else + str ip, [r3] +#endif + mov pc, lr + +ENTRY(sa110_cache_purgeD) +#ifdef CACHE_CLEAN_BLOCK_INTR + mrs r3, cpsr_all + orr r0, r3, #(I32_bit | F32_bit) + msr cpsr_all, r0 +#else + ldr r3, Lblock_userspace_access + ldr ip, [r3] + orr r0, ip, #1 + str r0, [r3] +#endif + ldr r2, Lsa110_cache_clean_addr + ldmia r2, {r0, r1} +#ifdef DOUBLE_CACHE_CLEAN_BANK + eor r0, r0, r1 + str r0, [r2] +#endif + +Lsa110_cache_purgeD_loop: + ldr r2, [r0], #32 + subs r1, r1, #32 + bne Lsa110_cache_purgeD_loop + + mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ +#ifdef CACHE_CLEAN_BLOCK_INTR + msr cpsr_all, r3 +#else + str ip, [r3] +#endif + mov pc, lr + +ENTRY(sa110_cache_purgeID_E) + mcr p15, 0, r0, c7, c10, 1 /* clean dcache entry */ + mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ + mcr p15, 0, r0, c7, c5, 0 /* flush I cache */ + mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */ + mov pc, lr + +ENTRY(sa110_cache_purgeD_E) + mcr p15, 0, r0, c7, c10, 1 /* clean dcache entry */ + mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ + mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */ + mov pc, lr + +/* + * Soft functions + */ +ENTRY(sa110_cache_syncI) +#ifdef CACHE_CLEAN_BLOCK_INTR + mrs r3, cpsr_all + orr r0, r3, #(I32_bit | F32_bit) + msr cpsr_all, r0 +#else + ldr r3, Lblock_userspace_access + ldr ip, [r3] + orr r0, ip, #1 + str r0, [r3] +#endif + ldr r2, Lsa110_cache_clean_addr + ldmia r2, {r0, r1} +#ifdef DOUBLE_CACHE_CLEAN_BANK + eor r0, r0, r1 + str r0, [r2] +#endif + +Lsa110_cache_syncI_loop: + ldr r2, [r0], #32 + subs r1, r1, #32 + bne Lsa110_cache_syncI_loop + + mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ + mcr p15, 0, r0, c7, c5, 0 /* flush I cache */ +#ifdef CACHE_CLEAN_BLOCK_INTR + msr cpsr_all, r3 +#else + str ip, [r3] +#endif + mov pc, lr + +ENTRY(sa110_cache_cleanID_rng) +ENTRY(sa110_cache_cleanD_rng) + cmp r1, #0x4000 + bcs _C_LABEL(sa110_cache_cleanID) + + and r2, r0, #0x1f + add r1, r1, r2 + bic r0, r0, #0x1f + +sa110_cache_cleanD_rng_loop: + mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */ + add r0, r0, #32 + subs r1, r1, #32 + bpl sa110_cache_cleanD_rng_loop + + mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ + mov pc, lr + +ENTRY(sa110_cache_purgeID_rng) + cmp r1, #0x4000 + bcs _C_LABEL(sa110_cache_purgeID) + + and r2, r0, #0x1f + add r1, r1, r2 + bic r0, r0, #0x1f + +sa110_cache_purgeID_rng_loop: + mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */ + mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */ + add r0, r0, #32 + subs r1, r1, #32 + bpl sa110_cache_purgeID_rng_loop + + mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ + mcr p15, 0, r0, c7, c5, 0 /* flush I cache */ + mov pc, lr + +ENTRY(sa110_cache_purgeD_rng) + cmp r1, #0x4000 + bcs _C_LABEL(sa110_cache_purgeD) + + and r2, r0, #0x1f + add r1, r1, r2 + bic r0, r0, #0x1f + +sa110_cache_purgeD_rng_loop: + mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */ + mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */ + add r0, r0, #32 + subs r1, r1, #32 + bpl sa110_cache_purgeD_rng_loop + + mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ + mov pc, lr + +ENTRY(sa110_cache_syncI_rng) + cmp r1, #0x4000 + bcs _C_LABEL(sa110_cache_syncI) + + and r2, r0, #0x1f + add r1, r1, r2 + bic r0, r0, #0x1f + +sa110_cache_syncI_rng_loop: + mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */ + add r0, r0, #32 + subs r1, r1, #32 + bpl sa110_cache_syncI_rng_loop + + mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ + mcr p15, 0, r0, c7, c5, 0 /* flush I cache */ + + mov pc, lr + +/* + * Context switch. + * + * These is the CPU-specific parts of the context switcher cpu_switch() + * These functions actually perform the TTB reload. + * + * NOTE: Special calling convention + * r1, r4-r13 must be preserved + */ +ENTRY(sa110_context_switch) + /* + * CF_CACHE_PURGE_ID will *ALWAYS* be called prior to this. + * Thus the data cache will contain only kernel data and the + * instruction cache will contain only kernel code, and all + * kernel mappings are shared by all processes. + */ + + /* Write the TTB */ + mcr p15, 0, r0, c2, c0, 0 + + /* If we have updated the TTB we must flush the TLB */ + mcr p15, 0, r0, c8, c7, 0 /* flush the I+D tlb */ + + /* Make sure that pipeline is emptied */ + mov r0, r0 + mov r0, r0 + mov pc, lr diff --git a/sys/arch/arm/arm/cpufunc_asm_xscale.S b/sys/arch/arm/arm/cpufunc_asm_xscale.S new file mode 100644 index 000000000000..1e3045340134 --- /dev/null +++ b/sys/arch/arm/arm/cpufunc_asm_xscale.S @@ -0,0 +1,321 @@ +/* $NetBSD: cpufunc_asm_xscale.S,v 1.1 2001/11/10 23:14:09 thorpej Exp $ */ + +/* + * Copyright (c) 2001 Matt Thomas + * Copyright (c) 1997,1998 Mark Brinicombe. + * Copyright (c) 1997 Causality Limited + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Causality Limited. + * 4. The name of Causality Limited may not be used to endorse or promote + * products derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS + * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * XScale assembly functions for CPU / MMU / TLB specific operations + */ + +#include +#include + +Lblock_userspace_access: + .word _C_LABEL(block_userspace_access) + +/* + * Functions to set the MMU Translation Table Base register + * + * We need to clean and flush the cache as it uses virtual + * addresses that are about to change. + */ +ENTRY(xscale_setttb) +#ifdef CACHE_CLEAN_BLOCK_INTR + mrs r3, cpsr_all + orr r1, r3, #(I32_bit | F32_bit) + msr cpsr_all, r1 +#else + ldr r3, Lblock_userspace_access + ldr r2, [r3] + orr r1, r2, #1 + str r1, [r3] +#endif + stmfd sp!, {r0-r3, lr} + bl _C_LABEL(xscale_cache_cleanID) + ldmfd sp!, {r0-r3, lr} + mcr p15, 0, r0, c7, c5, 0 /* invalidate I$ and BTB */ + mcr p15, 0, r0, c7, c10, 4 /* drain write and fill buffer */ + + /* Write the TTB */ + mcr p15, 0, r0, c2, c0, 0 + + /* If we have updated the TTB we must flush the TLB */ + mcr p15, 0, r0, c8, c7, 0 /* invalidate I+D TLB */ + + /* The cleanID above means we only need to flush the I cache here */ + mcr p15, 0, r0, c7, c5, 0 /* invalidate I$ and BTB */ + + /* Make sure that pipeline is emptied */ + mrc p15, 0, r0, c2, c0, 0 /* read some register in CP15 */ + mov r0, r0 /* force read to complete */ + sub pc, pc, #4 /* branch to next instruction */ + +#ifdef CACHE_CLEAN_BLOCK_INTR + msr cpsr_all, r3 +#else + str r2, [r3] +#endif + mov pc, lr + +/* + * TLB functions + */ +ENTRY(xscale_tlb_flushID_SE) + mcr p15, 0, r0, c8, c6, 1 /* flush D tlb single entry */ + mcr p15, 0, r0, c8, c5, 1 /* flush I tlb single entry */ + mcr p15, 0, r0, c7, c5, 6 /* inv. BTB */ + mov pc, lr + +/* + * Cache functions + */ +ENTRY(xscale_cache_flushID) + mcr p15, 0, r0, c7, c7, 0 /* flush I+D cache */ + mov pc, lr + +ENTRY(xscale_cache_flushI) + mcr p15, 0, r0, c7, c5, 0 /* flush I cache */ + mov pc, lr + +ENTRY(xscale_cache_flushD) + mcr p15, 0, r0, c7, c6, 0 /* flush D cache */ + mov pc, lr + +ENTRY(xscale_cache_flushI_SE) + mcr p15, 0, r0, c7, c5, 1 /* flush I cache single entry */ + mcr p15, 0, r0, c7, c5, 6 /* inv. BTB */ + mov pc, lr + +ENTRY(xscale_cache_flushD_SE) + mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */ + mov pc, lr + +ENTRY(xscale_cache_cleanD_E) + mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */ + mov pc, lr + +/* + * Information for the XScale cache clean/purge functions: + * + * * Virtual address of the memory region to use + * * Size of memory region + */ + .data + + .global _C_LABEL(xscale_cache_clean_addr) +_C_LABEL(xscale_cache_clean_addr): + .word 0xf0000000 + + .global _C_LABEL(xscale_cache_clean_size) +_C_LABEL(xscale_cache_clean_size): + .word 0x00008000 + + .text + +Lxscale_cache_clean_addr: + .word _C_LABEL(xscale_cache_clean_addr) +Lxscale_cache_clean_size: + .word _C_LABEL(xscale_cache_clean_size) + +ENTRY_NP(xscale_cache_syncI) +ENTRY_NP(xscale_cache_purgeID) + mcr p15, 0, r0, c7, c5, 0 /* flush I cache (D cleaned below) */ +ENTRY_NP(xscale_cache_cleanID) +ENTRY_NP(xscale_cache_purgeD) +ENTRY(xscale_cache_cleanD) +#ifdef CACHE_CLEAN_BLOCK_INTR + mrs r3, cpsr_all + orr r0, r3, #(I32_bit | F32_bit) + msr cpsr_all, r0 +#else + ldr r3, Lblock_userspace_access + ldr ip, [r3] + orr r0, ip, #1 + str r0, [r3] +#endif + ldr r2, Lxscale_cache_clean_addr + ldmia r2, {r0, r1} + add r0, r0, r1 + +Lxscale_cache_cleanD_loop: + subs r0, r0, #32 + mcr p15, 0, r0, c7, c2, 5 /* allocate cache line */ + subs r1, r1, #32 + bne Lxscale_cache_cleanD_loop + +#ifdef CACHE_CLEAN_MINIDATA + /* + * Clean the mini-data cache. + * + * It's expected that we only use the mini-data cache for + * kernel addresses, so there is no need to purge it on + * context switch. + */ + mov r1, #64 +Lxscale_cache_cleanD_loop2: + ldr r3, [r0], #32 + subs r1, r1, #1 + bne Lxscale_cache_cleanD_loop2 +#endif + + mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ + +#ifdef CACHE_CLEAN_BLOCK_INTR + msr cpsr_all, r3 +#else + str ip, [r3] +#endif + mov pc, lr + +ENTRY(xscale_cache_purgeID_E) + mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */ + mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ + mcr p15, 0, r0, c7, c5, 1 /* flush I cache single entry */ + mcr p15, 0, r0, c7, c5, 6 /* inv. BTB */ + mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */ + mov pc, lr + +ENTRY(xscale_cache_purgeD_E) + mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */ + mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ + mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */ + mov pc, lr + +/* + * Soft functions + */ +/* xscale_cache_syncI is identical to xscale_cache_purgeID */ + +ENTRY(xscale_cache_cleanID_rng) +ENTRY(xscale_cache_cleanD_rng) + cmp r1, #0x4000 + bcs _C_LABEL(xscale_cache_cleanID) + + and r2, r0, #0x1f + add r1, r1, r2 + bic r0, r0, #0x1f + +xscale_cache_cleanD_rng_loop: + mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */ + add r0, r0, #32 + subs r1, r1, #32 + bpl xscale_cache_cleanD_rng_loop + + mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ + mov pc, lr + +ENTRY(xscale_cache_purgeID_rng) + cmp r1, #0x4000 + bcs _C_LABEL(xscale_cache_purgeID) + + and r2, r0, #0x1f + add r1, r1, r2 + bic r0, r0, #0x1f + +xscale_cache_purgeID_rng_loop: + mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */ + mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */ + mcr p15, 0, r0, c7, c5, 1 /* flush I cache single entry */ + mcr p15, 0, r0, c7, c5, 6 /* inv. BTB */ + add r0, r0, #32 + subs r1, r1, #32 + bpl xscale_cache_purgeID_rng_loop + + mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ + mov pc, lr + +ENTRY(xscale_cache_purgeD_rng) + cmp r1, #0x4000 + bcs _C_LABEL(xscale_cache_purgeD) + + and r2, r0, #0x1f + add r1, r1, r2 + bic r0, r0, #0x1f + +xscale_cache_purgeD_rng_loop: + mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */ + mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */ + add r0, r0, #32 + subs r1, r1, #32 + bpl xscale_cache_purgeD_rng_loop + + mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ + mov pc, lr + +ENTRY(xscale_cache_syncI_rng) + cmp r1, #0x4000 + bcs _C_LABEL(xscale_cache_syncI) + + and r2, r0, #0x1f + add r1, r1, r2 + bic r0, r0, #0x1f + +xscale_cache_syncI_rng_loop: + mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */ + mcr p15, 0, r0, c7, c5, 1 /* flush I cache single entry */ + mcr p15, 0, r0, c7, c5, 6 /* inv. BTB */ + add r0, r0, #32 + subs r1, r1, #32 + bpl xscale_cache_syncI_rng_loop + + mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ + mov pc, lr + +/* + * Context switch. + * + * These is the CPU-specific parts of the context switcher cpu_switch() + * These functions actually perform the TTB reload. + * + * NOTE: Special calling convention + * r1, r4-r13 must be preserved + */ +ENTRY(xscale_context_switch) + /* + * CF_CACHE_PURGE_ID will *ALWAYS* be called prior to this. + * Thus the data cache will contain only kernel data and the + * instruction cache will contain only kernel code, and all + * kernel mappings are shared by all processes. + */ + + /* Write the TTB */ + mcr p15, 0, r0, c2, c0, 0 + + /* If we have updated the TTB we must flush the TLB */ + mcr p15, 0, r0, c8, c7, 0 /* flush the I+D tlb */ + + /* Make sure that pipeline is emptied */ + mrc p15, 0, r0, c2, c0, 0 /* read some register in CP15 */ + mov r0, r0 /* force the read to complete */ + sub pc, pc, #4 /* branch to next instruction */ + + mov pc, lr diff --git a/sys/arch/arm/conf/files.arm b/sys/arch/arm/conf/files.arm index 00cfe50cc044..6221171f9bbc 100644 --- a/sys/arch/arm/conf/files.arm +++ b/sys/arch/arm/conf/files.arm @@ -1,4 +1,4 @@ -# $NetBSD: files.arm,v 1.39 2001/10/18 14:03:43 rearnsha Exp $ +# $NetBSD: files.arm,v 1.40 2001/11/10 23:14:08 thorpej Exp $ # temporary define to allow easy moving to ../arch/arm/arm32 defopt ARM32 @@ -52,6 +52,15 @@ file arch/arm/arm/bus_space_notimpl.S arm32 file arch/arm/arm/compat_13_machdep.c compat_13 file arch/arm/arm/cpufunc.c file arch/arm/arm/cpufunc_asm.S +file arch/arm/arm/cpufunc_asm_arm3.S cpu_arm3 +file arch/arm/arm/cpufunc_asm_arm67.S cpu_arm6 | cpu_arm7 +file arch/arm/arm/cpufunc_asm_arm7tdmi.S cpu_arm7tdmi +file arch/arm/arm/cpufunc_asm_arm8.S cpu_arm8 +file arch/arm/arm/cpufunc_asm_arm9.S cpu_arm9 +file arch/arm/arm/cpufunc_asm_armv4.S cpu_arm9 | cpu_sa110 | + cpu_xscale +file arch/arm/arm/cpufunc_asm_sa1.S cpu_sa110 +file arch/arm/arm/cpufunc_asm_xscale.S cpu_xscale file arch/arm/arm/process_machdep.c file arch/arm/arm/procfs_machdep.c procfs file arch/arm/arm/sig_machdep.c