Split all the model/version-specific cpufuncs into separate files

corresponding to the model/version.
This commit is contained in:
thorpej 2001-11-10 23:14:08 +00:00
parent a27687fb2f
commit 9e1b785626
10 changed files with 1478 additions and 1150 deletions

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,60 @@
/* $NetBSD: cpufunc_asm_arm3.S,v 1.1 2001/11/10 23:14:09 thorpej Exp $ */
/*
* Copyright (c) 1997,1998 Mark Brinicombe.
* Copyright (c) 1997 Causality Limited
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Causality Limited.
* 4. The name of Causality Limited may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* ARM3 assembly functions for CPU / MMU / TLB specific operations
*/
#include <machine/cpu.h>
#include <machine/asm.h>
/*
* The ARM3 has its own control register in a different place.
*/
ENTRY(arm3_control)
mrc p15, 0, r3, c2, c0, 0 /* Read the control register */
bic r2, r3, r0 /* Clear bits */
eor r2, r2, r1 /* XOR bits */
teq r2, r3 /* Only write if there is a change */
mcrne p15, 0, r2, c2, c0, 0 /* Write new control register */
mov r0, r3 /* Return old value */
mov pc, lr
/*
* Cache functions.
*/
ENTRY(arm3_cache_flush)
mcr p15, 0, r0, c1, c0, 0
mov pc, lr

View File

@ -0,0 +1,110 @@
/* $NetBSD: cpufunc_asm_arm67.S,v 1.1 2001/11/10 23:14:09 thorpej Exp $ */
/*
* Copyright (c) 1997,1998 Mark Brinicombe.
* Copyright (c) 1997 Causality Limited
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Causality Limited.
* 4. The name of Causality Limited may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* ARM6/ARM7 assembly functions for CPU / MMU / TLB specific operations
*/
#include <machine/cpu.h>
#include <machine/asm.h>
/*
* Functions to set the MMU Translation Table Base register
*
* We need to clean and flush the cache as it uses virtual
* addresses that are about to change.
*/
ENTRY(arm67_setttb)
mcr p15, 0, r0, c7, c0, 0
/* Write the TTB */
mcr p15, 0, r0, c2, c0, 0
/* If we have updated the TTB we must flush the TLB */
mcr p15, 0, r0, c5, c0, 0
/* For good measure we will flush the IDC as well */
mcr p15, 0, r0, c7, c0, 0
/* Make sure that pipeline is emptied */
mov r0, r0
mov r0, r0
mov pc, lr
/*
* TLB functions
*/
ENTRY(arm67_tlb_flush)
mcr p15, 0, r0, c5, c0, 0
mov pc, lr
ENTRY(arm67_tlb_purge)
mcr p15, 0, r0, c6, c0, 0
mov pc, lr
/*
* Cache functions
*/
ENTRY(arm67_cache_flush)
mcr p15, 0, r0, c7, c0, 0
mov pc, lr
/*
* Context switch.
*
* These is the CPU-specific parts of the context switcher cpu_switch()
* These functions actually perform the TTB reload.
*
* NOTE: Special calling convention
* r1, r4-r13 must be preserved
*/
ENTRY(arm67_context_switch)
/* For good measure we will flush the IDC as well */
mcr p15, 0, r0, c7, c0, 0 /* flush cache */
/* Write the TTB */
mcr p15, 0, r0, c2, c0, 0
/* If we have updated the TTB we must flush the TLB */
mcr p15, 0, r0, c5, c0, 0
#if 0
/* For good measure we will flush the IDC as well */
mcr p15, 0, r0, c7, c0, 0 /* flush cache */
#endif
/* Make sure that pipeline is emptied */
mov r0, r0
mov r0, r0
mov pc, lr

View File

@ -0,0 +1,99 @@
/* $NetBSD: cpufunc_asm_arm7tdmi.S,v 1.1 2001/11/10 23:14:09 thorpej Exp $ */
/*
* Copyright (c) 2001 John Fremlin
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Causality Limited.
* 4. The name of Causality Limited may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* ARM7TDMI assembly functions for CPU / MMU / TLB specific operations
*/
#include <machine/cpu.h>
#include <machine/asm.h>
/*
* Functions to set the MMU Translation Table Base register
*
* We need to clean and flush the cache as it uses virtual
* addresses that are about to change.
*/
ENTRY(arm7tdmi_setttb)
mov r1, r0 /* store the TTB in a safe place */
mov r2, lr /* ditto with lr */
bl _C_LABEL(arm7tdmi_cache_flushID)
/* Write the TTB */
mcr p15, 0, r1, c2, c0, 0
/* If we have updated the TTB we must flush the TLB */
bl _C_LABEL(arm7tdmi_tlb_flushID)
/* For good measure we will flush the IDC as well */
bl _C_LABEL(arm7tdmi_cache_flushID)
mov pc, r2
/*
* TLB functions
*/
ENTRY(arm7tdmi_tlb_flushID)
mov r0, #0
mcr p15, 0, r0, c8, c7, 0
mov pc, lr
ENTRY(arm7tdmi_tlb_flushID_SE)
mcr p15, 0, r0, c8, c7, 1
mov pc, lr
/*
* Cache functions
*/
ENTRY(arm7tdmi_cache_flushID)
mov r0, #0
mcr p15, 0, r0, c7, c7, 0
/* Make sure that the pipeline is emptied */
mov r0, r0
mov r0, r0
mov pc, lr
/*
* Context switch.
*
* These is the CPU-specific parts of the context switcher cpu_switch()
* These functions actually perform the TTB reload.
*
* NOTE: Special calling convention
* r1, r4-r13 must be preserved
*/
ENTRY(arm7tdmi_context_switch)
b _C_LABEL(arm7tdmi_setttb)

View File

@ -0,0 +1,285 @@
/* $NetBSD: cpufunc_asm_arm8.S,v 1.1 2001/11/10 23:14:09 thorpej Exp $ */
/*
* Copyright (c) 1997 ARM Limited
* Copyright (c) 1997 Causality Limited
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Causality Limited.
* 4. The name of Causality Limited may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* ARM8 assembly functions for CPU / MMU / TLB specific operations
*/
#include <machine/cpu.h>
#include <machine/asm.h>
ENTRY(arm8_clock_config)
mrc p15, 0, r3, c15, c0, 0 /* Read the clock register */
bic r2, r3, #0x11 /* turn off dynamic clocking
and clear L bit */
mcr p15, 0, r2, c15, c0, 0 /* Write clock register */
bic r2, r3, r0 /* Clear bits */
eor r2, r2, r1 /* XOR bits */
bic r2, r2, #0x10 /* clear the L bit */
bic r1, r2, #0x01 /* still keep dynamic clocking off */
mcr p15, 0, r1, c15, c0, 0 /* Write clock register */
mov r0, r0 /* NOP */
mov r0, r0 /* NOP */
mov r0, r0 /* NOP */
mov r0, r0 /* NOP */
mcr p15, 0, r2, c15, c0, 0 /* Write clock register */
mov r0, r3 /* Return old value */
mov pc, lr
/*
* Functions to set the MMU Translation Table Base register
*
* We need to clean and flush the cache as it uses virtual
* addresses that are about to change.
*/
ENTRY(arm8_setttb)
mrs r3, cpsr_all
orr r1, r3, #(I32_bit | F32_bit)
msr cpsr_all, r1
stmfd sp!, {r0-r3, lr}
bl _C_LABEL(arm8_cache_cleanID)
ldmfd sp!, {r0-r3, lr}
mcr p15, 0, r0, c7, c7, 0 /* flush I+D cache */
/* Write the TTB */
mcr p15, 0, r0, c2, c0, 0
/* If we have updated the TTB we must flush the TLB */
mcr p15, 0, r0, c8, c7, 0
/* For good measure we will flush the IDC as well */
mcr p15, 0, r0, c7, c7, 0
/* Make sure that pipeline is emptied */
mov r0, r0
mov r0, r0
msr cpsr_all, r3
mov pc, lr
/*
* TLB functions
*/
ENTRY(arm8_tlb_flushID)
mcr p15, 0, r0, c8, c7, 0 /* flush I+D tlb */
mov pc, lr
ENTRY(arm8_tlb_flushID_SE)
mcr p15, 0, r0, c8, c7, 1 /* flush I+D tlb single entry */
mov pc, lr
/*
* Cache functions
*/
ENTRY(arm8_cache_flushID)
mcr p15, 0, r0, c7, c7, 0 /* flush I+D cache */
mov pc, lr
ENTRY(arm8_cache_flushID_E)
mcr p15, 0, r0, c7, c7, 1 /* flush I+D single entry */
mov pc, lr
ENTRY(arm8_cache_cleanID)
mov r0, #0x00000000
Larm8_cache_cleanID_loop:
mov r2, r0
mcr p15, 0, r2, c7, c11, 1
add r2, r2, #0x10
mcr p15, 0, r2, c7, c11, 1
add r2, r2, #0x10
mcr p15, 0, r2, c7, c11, 1
add r2, r2, #0x10
mcr p15, 0, r2, c7, c11, 1
add r2, r2, #0x10
mcr p15, 0, r2, c7, c11, 1
add r2, r2, #0x10
mcr p15, 0, r2, c7, c11, 1
add r2, r2, #0x10
mcr p15, 0, r2, c7, c11, 1
add r2, r2, #0x10
mcr p15, 0, r2, c7, c11, 1
add r2, r2, #0x10
mcr p15, 0, r2, c7, c11, 1
add r2, r2, #0x10
mcr p15, 0, r2, c7, c11, 1
add r2, r2, #0x10
mcr p15, 0, r2, c7, c11, 1
add r2, r2, #0x10
mcr p15, 0, r2, c7, c11, 1
add r2, r2, #0x10
mcr p15, 0, r2, c7, c11, 1
add r2, r2, #0x10
mcr p15, 0, r2, c7, c11, 1
add r2, r2, #0x10
mcr p15, 0, r2, c7, c11, 1
add r2, r2, #0x10
mcr p15, 0, r2, c7, c11, 1
adds r0, r0, #0x04000000
bne Larm8_cache_cleanID_loop
mov pc, lr
ENTRY(arm8_cache_cleanID_E)
mcr p15, 0, r0, c7, c11, 1 /* clean I+D single entry */
mov pc, lr
ENTRY(arm8_cache_purgeID)
/*
* ARM810 bug 3
*
* Clean and invalidate entry will not invalidate the entry
* if the line was already clean. (mcr p15, 0, rd, c7, 15, 1)
*
* Instead of using the clean and invalidate entry operation
* use a separate clean and invalidate entry operations.
* i.e.
* mcr p15, 0, rd, c7, c11, 1
* mcr p15, 0, rd, c7, c7, 1
*/
mov r0, #0x00000000
mrs r3, cpsr_all
orr r2, r3, #(I32_bit | F32_bit)
msr cpsr_all, r2
Larm8_cache_purgeID_loop:
mov r2, r0
mcr p15, 0, r2, c7, c11, 1
mcr p15, 0, r2, c7, c7, 1
add r2, r2, #0x10
mcr p15, 0, r2, c7, c11, 1
mcr p15, 0, r2, c7, c7, 1
add r2, r2, #0x10
mcr p15, 0, r2, c7, c11, 1
mcr p15, 0, r2, c7, c7, 1
add r2, r2, #0x10
mcr p15, 0, r2, c7, c11, 1
mcr p15, 0, r2, c7, c7, 1
add r2, r2, #0x10
mcr p15, 0, r2, c7, c11, 1
mcr p15, 0, r2, c7, c7, 1
add r2, r2, #0x10
mcr p15, 0, r2, c7, c11, 1
mcr p15, 0, r2, c7, c7, 1
add r2, r2, #0x10
mcr p15, 0, r2, c7, c11, 1
mcr p15, 0, r2, c7, c7, 1
add r2, r2, #0x10
mcr p15, 0, r2, c7, c11, 1
mcr p15, 0, r2, c7, c7, 1
add r2, r2, #0x10
mcr p15, 0, r2, c7, c11, 1
mcr p15, 0, r2, c7, c7, 1
add r2, r2, #0x10
mcr p15, 0, r2, c7, c11, 1
mcr p15, 0, r2, c7, c7, 1
add r2, r2, #0x10
mcr p15, 0, r2, c7, c11, 1
mcr p15, 0, r2, c7, c7, 1
add r2, r2, #0x10
mcr p15, 0, r2, c7, c11, 1
mcr p15, 0, r2, c7, c7, 1
add r2, r2, #0x10
mcr p15, 0, r2, c7, c11, 1
mcr p15, 0, r2, c7, c7, 1
add r2, r2, #0x10
mcr p15, 0, r2, c7, c11, 1
mcr p15, 0, r2, c7, c7, 1
add r2, r2, #0x10
mcr p15, 0, r2, c7, c11, 1
mcr p15, 0, r2, c7, c7, 1
add r2, r2, #0x10
mcr p15, 0, r2, c7, c11, 1
mcr p15, 0, r2, c7, c7, 1
adds r0, r0, #0x04000000
bne Larm8_cache_purgeID_loop
msr cpsr_all, r3
mov pc, lr
ENTRY(arm8_cache_purgeID_E)
/*
* ARM810 bug 3
*
* Clean and invalidate entry will not invalidate the entry
* if the line was already clean. (mcr p15, 0, rd, c7, 15, 1)
*
* Instead of using the clean and invalidate entry operation
* use a separate clean and invalidate entry operations.
* i.e.
* mcr p15, 0, rd, c7, c11, 1
* mcr p15, 0, rd, c7, c7, 1
*/
mrs r3, cpsr_all
orr r2, r3, #(I32_bit | F32_bit)
msr cpsr_all, r2
mcr p15, 0, r0, c7, c11, 1 /* clean I+D single entry */
mcr p15, 0, r0, c7, c7, 1 /* flush I+D single entry */
msr cpsr_all, r3
mov pc, lr
/*
* Context switch.
*
* These is the CPU-specific parts of the context switcher cpu_switch()
* These functions actually perform the TTB reload.
*
* NOTE: Special calling convention
* r1, r4-r13 must be preserved
*/
ENTRY(arm8_context_switch)
/* For good measure we will flush the IDC as well */
mcr p15, 0, r0, c7, c7, 0 /* flush I+D cache */
/* Write the TTB */
mcr p15, 0, r0, c2, c0, 0
/* If we have updated the TTB we must flush the TLB */
mcr p15, 0, r0, c8, c7, 0 /* flush the I+D tlb */
#if 0
/* For good measure we will flush the IDC as well */
mcr p15, 0, r0, c7, c7, 0 /* flush I+D cache */
#endif
/* Make sure that pipeline is emptied */
mov r0, r0
mov r0, r0
mov pc, lr

View File

@ -0,0 +1,139 @@
/* $NetBSD: cpufunc_asm_arm9.S,v 1.1 2001/11/10 23:14:09 thorpej Exp $ */
/*
* Copyright (c) 2001 ARM Limited
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Causality Limited.
* 4. The name of Causality Limited may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* ARM9 assembly functions for CPU / MMU / TLB specific operations
*/
#include <machine/cpu.h>
#include <machine/asm.h>
/*
* Functions to set the MMU Translation Table Base register
*
* We need to clean and flush the cache as it uses virtual
* addresses that are about to change.
*/
ENTRY(arm9_setttb)
/*
* Since we use the caches in write-through mode, we only have to
* drain the write buffers and flush the caches.
*/
mcr p15, 0, r0, c7, c7, 0 /* flush I+D caches */
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
mcr p15, 0, r0, c2, c0, 0 /* load new TTB */
mcr p15, 0, r0, c8, c7, 0 /* invalidate I+D TLBs */
mov pc, lr
/*
* TLB functions
*/
ENTRY(arm9_tlb_flushID_SE)
mcr p15, 0, r0, c8, c6, 1 /* flush D tlb single entry */
mcr p15, 0, r0, c8, c5, 1 /* flush I tlb single entry */
mov pc, lr
/*
* Cache functions
*/
ENTRY(arm9_cache_flushID)
mcr p15, 0, r0, c7, c7, 0 /* flush I+D cache */
mov pc, lr
ENTRY(arm9_cache_flushID_SE)
mcr p15, 0, r0, c7, c5, 1 /* flush one entry from I cache */
mcr p15, 0, r0, c7, c6, 1 /* flush one entry from D cache */
mov pc, lr
ENTRY(arm9_cache_flushI)
mcr p15, 0, r0, c7, c5, 0 /* flush I cache */
mov pc, lr
ENTRY(arm9_cache_flushI_SE)
mcr p15, 0, r0, c7, c5, 1 /* flush one entry from I cache */
mov pc, lr
ENTRY(arm9_cache_flushD)
mcr p15, 0, r0, c7, c6, 0 /* flush D cache */
mov pc, lr
ENTRY(arm9_cache_flushD_SE)
mcr p15, 0, r0, c7, c6, 1 /* flush one entry from D cache */
mov pc, lr
ENTRY(arm9_cache_cleanID)
mcr p15, 0, r0, c7, c10, 4
mov pc, lr
/*
* Soft functions
*/
ENTRY(arm9_cache_syncI)
mcr p15, 0, r0, c7, c7, 0 /* flush I+D caches */
mov pc, lr
ENTRY_NP(arm9_cache_flushID_rng)
b _C_LABEL(arm9_cache_flushID)
ENTRY_NP(arm9_cache_flushD_rng)
/* Same as above, but D cache only */
b _C_LABEL(arm9_cache_flushD)
ENTRY_NP(arm9_cache_syncI_rng)
/* Similarly, for I cache sync */
b _C_LABEL(arm9_cache_syncI)
/*
* Context switch.
*
* These is the CPU-specific parts of the context switcher cpu_switch()
* These functions actually perform the TTB reload.
*
* NOTE: Special calling convention
* r1, r4-r13 must be preserved
*/
ENTRY(arm9_context_switch)
/*
* We can assume that the caches will only contain kernel addresses
* at this point. So no need to flush them again.
*/
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
mcr p15, 0, r0, c2, c0, 0 /* set the new TTB */
mcr p15, 0, r0, c8, c7, 0 /* and flush the I+D tlbs */
/* Paranoia -- make sure the pipeline is empty. */
nop
nop
nop
mov pc, lr

View File

@ -0,0 +1,66 @@
/* $NetBSD: cpufunc_asm_armv4.S,v 1.1 2001/11/10 23:14:09 thorpej Exp $ */
/*
* Copyright (c) 2001 ARM Limited
* Copyright (c) 1997,1998 Mark Brinicombe.
* Copyright (c) 1997 Causality Limited
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Causality Limited.
* 4. The name of Causality Limited may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* ARM9 assembly functions for CPU / MMU / TLB specific operations
*/
#include <machine/cpu.h>
#include <machine/asm.h>
/*
* TLB functions
*/
ENTRY(armv4_tlb_flushID)
mcr p15, 0, r0, c8, c7, 0 /* flush I+D tlb */
mov pc, lr
ENTRY(armv4_tlb_flushI)
mcr p15, 0, r0, c8, c5, 0 /* flush I tlb */
mov pc, lr
ENTRY(armv4_tlb_flushD)
mcr p15, 0, r0, c8, c6, 0 /* flush D tlb */
mov pc, lr
ENTRY(armv4_tlb_flushD_SE)
mcr p15, 0, r0, c8, c6, 1 /* flush D tlb single entry */
mov pc, lr
/*
* Other functions
*/
ENTRY(armv4_drain_writebuf)
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
mov pc, lr

View File

@ -0,0 +1,383 @@
/* $NetBSD: cpufunc_asm_sa1.S,v 1.1 2001/11/10 23:14:09 thorpej Exp $ */
/*
* Copyright (c) 1997,1998 Mark Brinicombe.
* Copyright (c) 1997 Causality Limited
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Causality Limited.
* 4. The name of Causality Limited may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* SA-1 assembly functions for CPU / MMU / TLB specific operations
*/
#include <machine/cpu.h>
#include <machine/asm.h>
Lblock_userspace_access:
.word _C_LABEL(block_userspace_access)
/*
* Functions to set the MMU Translation Table Base register
*
* We need to clean and flush the cache as it uses virtual
* addresses that are about to change.
*/
ENTRY(sa110_setttb)
#ifdef CACHE_CLEAN_BLOCK_INTR
mrs r3, cpsr_all
orr r1, r3, #(I32_bit | F32_bit)
msr cpsr_all, r1
#else
ldr r3, Lblock_userspace_access
ldr r2, [r3]
orr r1, r2, #1
str r1, [r3]
#endif
stmfd sp!, {r0-r3, lr}
bl _C_LABEL(sa110_cache_cleanID)
ldmfd sp!, {r0-r3, lr}
mcr p15, 0, r0, c7, c5, 0 /* invalidate I$ and BTB */
mcr p15, 0, r0, c7, c10, 4 /* drain write and fill buffer */
/* Write the TTB */
mcr p15, 0, r0, c2, c0, 0
/* If we have updated the TTB we must flush the TLB */
mcr p15, 0, r0, c8, c7, 0 /* invalidate I+D TLB */
/* The cleanID above means we only need to flush the I cache here */
mcr p15, 0, r0, c7, c5, 0 /* invalidate I$ and BTB */
/* Make sure that pipeline is emptied */
mov r0, r0
mov r0, r0
#ifdef CACHE_CLEAN_BLOCK_INTR
msr cpsr_all, r3
#else
str r2, [r3]
#endif
mov pc, lr
/*
* TLB functions
*/
ENTRY(sa110_tlb_flushID_SE)
mcr p15, 0, r0, c8, c6, 1 /* flush D tlb single entry */
mcr p15, 0, r0, c8, c5, 0 /* flush I tlb */
mov pc, lr
/*
* Cache functions
*/
ENTRY(sa110_cache_flushID)
mcr p15, 0, r0, c7, c7, 0 /* flush I+D cache */
mov pc, lr
ENTRY(sa110_cache_flushI)
mcr p15, 0, r0, c7, c5, 0 /* flush I cache */
mov pc, lr
ENTRY(sa110_cache_flushD)
mcr p15, 0, r0, c7, c6, 0 /* flush D cache */
mov pc, lr
ENTRY(sa110_cache_flushD_SE)
mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */
mov pc, lr
ENTRY(sa110_cache_cleanD_E)
mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */
mov pc, lr
/*
* Information for the SA-1 cache clean/purge functions:
*
* * Virtual address of the memory region to use
* * Size of memory region
*/
.data
.global _C_LABEL(sa110_cache_clean_addr)
_C_LABEL(sa110_cache_clean_addr):
.word 0xf0000000
.global _C_LABEL(sa110_cache_clean_size)
_C_LABEL(sa110_cache_clean_size):
.word 0x00008000
.text
Lsa110_cache_clean_addr:
.word _C_LABEL(sa110_cache_clean_addr)
Lsa110_cache_clean_size:
.word _C_LABEL(sa110_cache_clean_size)
ENTRY(sa110_cache_cleanID)
ENTRY(sa110_cache_cleanD)
#ifdef CACHE_CLEAN_BLOCK_INTR
mrs r3, cpsr_all
orr r0, r3, #(I32_bit | F32_bit)
msr cpsr_all, r0
#else
ldr r3, Lblock_userspace_access
ldr ip, [r3]
orr r0, ip, #1
str r0, [r3]
#endif
ldr r2, Lsa110_cache_clean_addr
ldmia r2, {r0, r1}
#ifdef DOUBLE_CACHE_CLEAN_BANK
eor r0, r0, r1
str r0, [r2]
#endif
Lsa110_cache_cleanD_loop:
ldr r2, [r0], #32
subs r1, r1, #32
bne Lsa110_cache_cleanD_loop
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
#ifdef CACHE_CLEAN_BLOCK_INTR
msr cpsr_all, r3
#else
str ip, [r3]
#endif
mov pc, lr
ENTRY(sa110_cache_purgeID)
#ifdef CACHE_CLEAN_BLOCK_INTR
mrs r3, cpsr_all
orr r0, r3, #(I32_bit | F32_bit)
msr cpsr_all, r0
#else
ldr r3, Lblock_userspace_access
ldr ip, [r3]
orr r0, ip, #1
str r0, [r3]
#endif
ldr r2, Lsa110_cache_clean_addr
ldmia r2, {r0, r1}
#ifdef DOUBLE_CACHE_CLEAN_BANK
eor r0, r0, r1
str r0, [r2]
#endif
Lsa110_cache_purgeID_loop:
ldr r2, [r0], #32
subs r1, r1, #32
bne Lsa110_cache_purgeID_loop
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
mcr p15, 0, r0, c7, c5, 0 /* flush I cache (D flushed above) */
#ifdef CACHE_CLEAN_BLOCK_INTR
msr cpsr_all, r3
#else
str ip, [r3]
#endif
mov pc, lr
ENTRY(sa110_cache_purgeD)
#ifdef CACHE_CLEAN_BLOCK_INTR
mrs r3, cpsr_all
orr r0, r3, #(I32_bit | F32_bit)
msr cpsr_all, r0
#else
ldr r3, Lblock_userspace_access
ldr ip, [r3]
orr r0, ip, #1
str r0, [r3]
#endif
ldr r2, Lsa110_cache_clean_addr
ldmia r2, {r0, r1}
#ifdef DOUBLE_CACHE_CLEAN_BANK
eor r0, r0, r1
str r0, [r2]
#endif
Lsa110_cache_purgeD_loop:
ldr r2, [r0], #32
subs r1, r1, #32
bne Lsa110_cache_purgeD_loop
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
#ifdef CACHE_CLEAN_BLOCK_INTR
msr cpsr_all, r3
#else
str ip, [r3]
#endif
mov pc, lr
ENTRY(sa110_cache_purgeID_E)
mcr p15, 0, r0, c7, c10, 1 /* clean dcache entry */
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
mcr p15, 0, r0, c7, c5, 0 /* flush I cache */
mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */
mov pc, lr
ENTRY(sa110_cache_purgeD_E)
mcr p15, 0, r0, c7, c10, 1 /* clean dcache entry */
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */
mov pc, lr
/*
* Soft functions
*/
ENTRY(sa110_cache_syncI)
#ifdef CACHE_CLEAN_BLOCK_INTR
mrs r3, cpsr_all
orr r0, r3, #(I32_bit | F32_bit)
msr cpsr_all, r0
#else
ldr r3, Lblock_userspace_access
ldr ip, [r3]
orr r0, ip, #1
str r0, [r3]
#endif
ldr r2, Lsa110_cache_clean_addr
ldmia r2, {r0, r1}
#ifdef DOUBLE_CACHE_CLEAN_BANK
eor r0, r0, r1
str r0, [r2]
#endif
Lsa110_cache_syncI_loop:
ldr r2, [r0], #32
subs r1, r1, #32
bne Lsa110_cache_syncI_loop
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
mcr p15, 0, r0, c7, c5, 0 /* flush I cache */
#ifdef CACHE_CLEAN_BLOCK_INTR
msr cpsr_all, r3
#else
str ip, [r3]
#endif
mov pc, lr
ENTRY(sa110_cache_cleanID_rng)
ENTRY(sa110_cache_cleanD_rng)
cmp r1, #0x4000
bcs _C_LABEL(sa110_cache_cleanID)
and r2, r0, #0x1f
add r1, r1, r2
bic r0, r0, #0x1f
sa110_cache_cleanD_rng_loop:
mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */
add r0, r0, #32
subs r1, r1, #32
bpl sa110_cache_cleanD_rng_loop
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
mov pc, lr
ENTRY(sa110_cache_purgeID_rng)
cmp r1, #0x4000
bcs _C_LABEL(sa110_cache_purgeID)
and r2, r0, #0x1f
add r1, r1, r2
bic r0, r0, #0x1f
sa110_cache_purgeID_rng_loop:
mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */
mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */
add r0, r0, #32
subs r1, r1, #32
bpl sa110_cache_purgeID_rng_loop
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
mcr p15, 0, r0, c7, c5, 0 /* flush I cache */
mov pc, lr
ENTRY(sa110_cache_purgeD_rng)
cmp r1, #0x4000
bcs _C_LABEL(sa110_cache_purgeD)
and r2, r0, #0x1f
add r1, r1, r2
bic r0, r0, #0x1f
sa110_cache_purgeD_rng_loop:
mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */
mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */
add r0, r0, #32
subs r1, r1, #32
bpl sa110_cache_purgeD_rng_loop
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
mov pc, lr
ENTRY(sa110_cache_syncI_rng)
cmp r1, #0x4000
bcs _C_LABEL(sa110_cache_syncI)
and r2, r0, #0x1f
add r1, r1, r2
bic r0, r0, #0x1f
sa110_cache_syncI_rng_loop:
mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */
add r0, r0, #32
subs r1, r1, #32
bpl sa110_cache_syncI_rng_loop
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
mcr p15, 0, r0, c7, c5, 0 /* flush I cache */
mov pc, lr
/*
* Context switch.
*
* These is the CPU-specific parts of the context switcher cpu_switch()
* These functions actually perform the TTB reload.
*
* NOTE: Special calling convention
* r1, r4-r13 must be preserved
*/
ENTRY(sa110_context_switch)
/*
* CF_CACHE_PURGE_ID will *ALWAYS* be called prior to this.
* Thus the data cache will contain only kernel data and the
* instruction cache will contain only kernel code, and all
* kernel mappings are shared by all processes.
*/
/* Write the TTB */
mcr p15, 0, r0, c2, c0, 0
/* If we have updated the TTB we must flush the TLB */
mcr p15, 0, r0, c8, c7, 0 /* flush the I+D tlb */
/* Make sure that pipeline is emptied */
mov r0, r0
mov r0, r0
mov pc, lr

View File

@ -0,0 +1,321 @@
/* $NetBSD: cpufunc_asm_xscale.S,v 1.1 2001/11/10 23:14:09 thorpej Exp $ */
/*
* Copyright (c) 2001 Matt Thomas
* Copyright (c) 1997,1998 Mark Brinicombe.
* Copyright (c) 1997 Causality Limited
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Causality Limited.
* 4. The name of Causality Limited may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* XScale assembly functions for CPU / MMU / TLB specific operations
*/
#include <machine/cpu.h>
#include <machine/asm.h>
Lblock_userspace_access:
.word _C_LABEL(block_userspace_access)
/*
* Functions to set the MMU Translation Table Base register
*
* We need to clean and flush the cache as it uses virtual
* addresses that are about to change.
*/
ENTRY(xscale_setttb)
#ifdef CACHE_CLEAN_BLOCK_INTR
mrs r3, cpsr_all
orr r1, r3, #(I32_bit | F32_bit)
msr cpsr_all, r1
#else
ldr r3, Lblock_userspace_access
ldr r2, [r3]
orr r1, r2, #1
str r1, [r3]
#endif
stmfd sp!, {r0-r3, lr}
bl _C_LABEL(xscale_cache_cleanID)
ldmfd sp!, {r0-r3, lr}
mcr p15, 0, r0, c7, c5, 0 /* invalidate I$ and BTB */
mcr p15, 0, r0, c7, c10, 4 /* drain write and fill buffer */
/* Write the TTB */
mcr p15, 0, r0, c2, c0, 0
/* If we have updated the TTB we must flush the TLB */
mcr p15, 0, r0, c8, c7, 0 /* invalidate I+D TLB */
/* The cleanID above means we only need to flush the I cache here */
mcr p15, 0, r0, c7, c5, 0 /* invalidate I$ and BTB */
/* Make sure that pipeline is emptied */
mrc p15, 0, r0, c2, c0, 0 /* read some register in CP15 */
mov r0, r0 /* force read to complete */
sub pc, pc, #4 /* branch to next instruction */
#ifdef CACHE_CLEAN_BLOCK_INTR
msr cpsr_all, r3
#else
str r2, [r3]
#endif
mov pc, lr
/*
* TLB functions
*/
ENTRY(xscale_tlb_flushID_SE)
mcr p15, 0, r0, c8, c6, 1 /* flush D tlb single entry */
mcr p15, 0, r0, c8, c5, 1 /* flush I tlb single entry */
mcr p15, 0, r0, c7, c5, 6 /* inv. BTB */
mov pc, lr
/*
* Cache functions
*/
ENTRY(xscale_cache_flushID)
mcr p15, 0, r0, c7, c7, 0 /* flush I+D cache */
mov pc, lr
ENTRY(xscale_cache_flushI)
mcr p15, 0, r0, c7, c5, 0 /* flush I cache */
mov pc, lr
ENTRY(xscale_cache_flushD)
mcr p15, 0, r0, c7, c6, 0 /* flush D cache */
mov pc, lr
ENTRY(xscale_cache_flushI_SE)
mcr p15, 0, r0, c7, c5, 1 /* flush I cache single entry */
mcr p15, 0, r0, c7, c5, 6 /* inv. BTB */
mov pc, lr
ENTRY(xscale_cache_flushD_SE)
mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */
mov pc, lr
ENTRY(xscale_cache_cleanD_E)
mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */
mov pc, lr
/*
* Information for the XScale cache clean/purge functions:
*
* * Virtual address of the memory region to use
* * Size of memory region
*/
.data
.global _C_LABEL(xscale_cache_clean_addr)
_C_LABEL(xscale_cache_clean_addr):
.word 0xf0000000
.global _C_LABEL(xscale_cache_clean_size)
_C_LABEL(xscale_cache_clean_size):
.word 0x00008000
.text
Lxscale_cache_clean_addr:
.word _C_LABEL(xscale_cache_clean_addr)
Lxscale_cache_clean_size:
.word _C_LABEL(xscale_cache_clean_size)
ENTRY_NP(xscale_cache_syncI)
ENTRY_NP(xscale_cache_purgeID)
mcr p15, 0, r0, c7, c5, 0 /* flush I cache (D cleaned below) */
ENTRY_NP(xscale_cache_cleanID)
ENTRY_NP(xscale_cache_purgeD)
ENTRY(xscale_cache_cleanD)
#ifdef CACHE_CLEAN_BLOCK_INTR
mrs r3, cpsr_all
orr r0, r3, #(I32_bit | F32_bit)
msr cpsr_all, r0
#else
ldr r3, Lblock_userspace_access
ldr ip, [r3]
orr r0, ip, #1
str r0, [r3]
#endif
ldr r2, Lxscale_cache_clean_addr
ldmia r2, {r0, r1}
add r0, r0, r1
Lxscale_cache_cleanD_loop:
subs r0, r0, #32
mcr p15, 0, r0, c7, c2, 5 /* allocate cache line */
subs r1, r1, #32
bne Lxscale_cache_cleanD_loop
#ifdef CACHE_CLEAN_MINIDATA
/*
* Clean the mini-data cache.
*
* It's expected that we only use the mini-data cache for
* kernel addresses, so there is no need to purge it on
* context switch.
*/
mov r1, #64
Lxscale_cache_cleanD_loop2:
ldr r3, [r0], #32
subs r1, r1, #1
bne Lxscale_cache_cleanD_loop2
#endif
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
#ifdef CACHE_CLEAN_BLOCK_INTR
msr cpsr_all, r3
#else
str ip, [r3]
#endif
mov pc, lr
ENTRY(xscale_cache_purgeID_E)
mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
mcr p15, 0, r0, c7, c5, 1 /* flush I cache single entry */
mcr p15, 0, r0, c7, c5, 6 /* inv. BTB */
mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */
mov pc, lr
ENTRY(xscale_cache_purgeD_E)
mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */
mov pc, lr
/*
* Soft functions
*/
/* xscale_cache_syncI is identical to xscale_cache_purgeID */
ENTRY(xscale_cache_cleanID_rng)
ENTRY(xscale_cache_cleanD_rng)
cmp r1, #0x4000
bcs _C_LABEL(xscale_cache_cleanID)
and r2, r0, #0x1f
add r1, r1, r2
bic r0, r0, #0x1f
xscale_cache_cleanD_rng_loop:
mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */
add r0, r0, #32
subs r1, r1, #32
bpl xscale_cache_cleanD_rng_loop
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
mov pc, lr
ENTRY(xscale_cache_purgeID_rng)
cmp r1, #0x4000
bcs _C_LABEL(xscale_cache_purgeID)
and r2, r0, #0x1f
add r1, r1, r2
bic r0, r0, #0x1f
xscale_cache_purgeID_rng_loop:
mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */
mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */
mcr p15, 0, r0, c7, c5, 1 /* flush I cache single entry */
mcr p15, 0, r0, c7, c5, 6 /* inv. BTB */
add r0, r0, #32
subs r1, r1, #32
bpl xscale_cache_purgeID_rng_loop
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
mov pc, lr
ENTRY(xscale_cache_purgeD_rng)
cmp r1, #0x4000
bcs _C_LABEL(xscale_cache_purgeD)
and r2, r0, #0x1f
add r1, r1, r2
bic r0, r0, #0x1f
xscale_cache_purgeD_rng_loop:
mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */
mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */
add r0, r0, #32
subs r1, r1, #32
bpl xscale_cache_purgeD_rng_loop
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
mov pc, lr
ENTRY(xscale_cache_syncI_rng)
cmp r1, #0x4000
bcs _C_LABEL(xscale_cache_syncI)
and r2, r0, #0x1f
add r1, r1, r2
bic r0, r0, #0x1f
xscale_cache_syncI_rng_loop:
mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */
mcr p15, 0, r0, c7, c5, 1 /* flush I cache single entry */
mcr p15, 0, r0, c7, c5, 6 /* inv. BTB */
add r0, r0, #32
subs r1, r1, #32
bpl xscale_cache_syncI_rng_loop
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
mov pc, lr
/*
* Context switch.
*
* These is the CPU-specific parts of the context switcher cpu_switch()
* These functions actually perform the TTB reload.
*
* NOTE: Special calling convention
* r1, r4-r13 must be preserved
*/
ENTRY(xscale_context_switch)
/*
* CF_CACHE_PURGE_ID will *ALWAYS* be called prior to this.
* Thus the data cache will contain only kernel data and the
* instruction cache will contain only kernel code, and all
* kernel mappings are shared by all processes.
*/
/* Write the TTB */
mcr p15, 0, r0, c2, c0, 0
/* If we have updated the TTB we must flush the TLB */
mcr p15, 0, r0, c8, c7, 0 /* flush the I+D tlb */
/* Make sure that pipeline is emptied */
mrc p15, 0, r0, c2, c0, 0 /* read some register in CP15 */
mov r0, r0 /* force the read to complete */
sub pc, pc, #4 /* branch to next instruction */
mov pc, lr

View File

@ -1,4 +1,4 @@
# $NetBSD: files.arm,v 1.39 2001/10/18 14:03:43 rearnsha Exp $
# $NetBSD: files.arm,v 1.40 2001/11/10 23:14:08 thorpej Exp $
# temporary define to allow easy moving to ../arch/arm/arm32
defopt ARM32
@ -52,6 +52,15 @@ file arch/arm/arm/bus_space_notimpl.S arm32
file arch/arm/arm/compat_13_machdep.c compat_13
file arch/arm/arm/cpufunc.c
file arch/arm/arm/cpufunc_asm.S
file arch/arm/arm/cpufunc_asm_arm3.S cpu_arm3
file arch/arm/arm/cpufunc_asm_arm67.S cpu_arm6 | cpu_arm7
file arch/arm/arm/cpufunc_asm_arm7tdmi.S cpu_arm7tdmi
file arch/arm/arm/cpufunc_asm_arm8.S cpu_arm8
file arch/arm/arm/cpufunc_asm_arm9.S cpu_arm9
file arch/arm/arm/cpufunc_asm_armv4.S cpu_arm9 | cpu_sa110 |
cpu_xscale
file arch/arm/arm/cpufunc_asm_sa1.S cpu_sa110
file arch/arm/arm/cpufunc_asm_xscale.S cpu_xscale
file arch/arm/arm/process_machdep.c
file arch/arm/arm/procfs_machdep.c procfs
file arch/arm/arm/sig_machdep.c