ARM: kernel: Make 32/64-bit atomics work for ARMv5/6

Support for 64-bit atomic operations for ARMv7+ is currently stubbed
out in libroot, but our current targets do not use it anyway.

We now select atomics-as-syscalls automatically based on the ARM
architecture we're building for. The intent is to do away with
most of the board specifics (at the very least on the kernel side)
and just specify the lowest ARMvX version you want to build for.

This will give flexibility in being able to distribute a single
image for a wide range of devices, and building a tuned system
for one specific core type.
This commit is contained in:
Ithamar R. Adema 2013-09-18 05:03:18 +02:00
parent cc65466f0d
commit 501b24c63b
5 changed files with 389 additions and 90 deletions

View File

@ -10,8 +10,14 @@
#define STACK_GROWS_DOWNWARDS
//#define ATOMIC_FUNCS_ARE_SYSCALLS
//#define ATOMIC64_FUNCS_ARE_SYSCALLS
// If we're building on ARMv5 or older, all our atomics need to be syscalls... :(
#if _M_ARM <= 5
#define ATOMIC_FUNCS_ARE_SYSCALLS
#endif
// If we're building on ARMv6 or older, 64-bit atomics need to be syscalls...
#if _M_ARM < 7
#define ATOMIC64_FUNCS_ARE_SYSCALLS
#endif
#endif /* _KERNEL_ARCH_ARM_CONFIG_H */

View File

@ -10,10 +10,8 @@ SEARCH_SOURCE += [ FDirName $(SUBDIR) paging 32bit ] ;
SEARCH_SOURCE += [ FDirName $(SUBDIR) $(DOTDOT) generic ] ;
KernelMergeObject kernel_arch_arm.o :
# arch_atomic.c
arch_commpage.cpp
arch_cpu.cpp
# arch_cpu_asm.S
arch_debug_console.cpp
arch_debug.cpp
arch_elf.cpp
@ -33,6 +31,9 @@ KernelMergeObject kernel_arch_arm.o :
arch_uart_8250.cpp
arch_uart_pl011.cpp
arch_atomic64.cpp
arch_atomic32.cpp
# paging
arm_physical_page_mapper.cpp
arm_physical_page_mapper_large_memory.cpp

View File

@ -0,0 +1,177 @@
/*
* Copyright 2013 Haiku, Inc. All rights reserved.
* Distributed under the terms of the MIT License.
*
* Authors:
* Ithamar R. Adema, ithamar@upgrade-android.com
*/
#include <KernelExport.h>
#include <kernel.h>
#include <user_atomic.h>
#include <util/AutoLock.h>
#ifdef ATOMIC_FUNCS_ARE_SYSCALLS
/*
* NOTE: These functions are _intentionally_ not using spinlocks, unlike
* the 64 bit versions. The reason for this is that they are used by the
* spinlock code itself, and therefore would deadlock.
*
* Since these are only really needed for ARMv5, which is not SMP anyway,
* this is an acceptable compromise.
*/
int32
atomic_set(vint32 *value, int32 newValue)
{
InterruptsLocker locker;
int32 oldValue = *value;
*value = newValue;
return oldValue;
}
int32
atomic_test_and_set(vint32 *value, int32 newValue, int32 testAgainst)
{
InterruptsLocker locker;
int32 oldValue = *value;
if (oldValue == testAgainst)
*value = newValue;
return oldValue;
}
int32
atomic_add(vint32 *value, int32 addValue)
{
InterruptsLocker locker;
int32 oldValue = *value;
*value += addValue;
return oldValue;
}
int32
atomic_and(vint32 *value, int32 andValue)
{
InterruptsLocker locker;
int32 oldValue = *value;
*value &= andValue;
return oldValue;
}
int32
atomic_or(vint32 *value, int32 orValue)
{
InterruptsLocker locker;
int32 oldValue = *value;
*value |= orValue;
return oldValue;
}
int32
atomic_get(vint32 *value)
{
InterruptsLocker locker;
int32 oldValue = *value;
return oldValue;
}
int32
_user_atomic_set(vint32 *value, int32 newValue)
{
if (IS_USER_ADDRESS(value)
&& lock_memory((void *)value, sizeof(int32), B_READ_DEVICE) == B_OK) {
int32 oldValue = atomic_set(value, newValue);
unlock_memory((void *)value, sizeof(int32), B_READ_DEVICE);
return oldValue;
}
access_violation:
// XXX kill application
return -1;
}
int32
_user_atomic_test_and_set(vint32 *value, int32 newValue, int32 testAgainst)
{
if (IS_USER_ADDRESS(value)
&& lock_memory((void *)value, sizeof(int32), B_READ_DEVICE) == B_OK) {
int32 oldValue = atomic_test_and_set(value, newValue, testAgainst);
unlock_memory((void *)value, sizeof(int32), B_READ_DEVICE);
return oldValue;
}
access_violation:
// XXX kill application
return -1;
}
int32
_user_atomic_add(vint32 *value, int32 addValue)
{
if (IS_USER_ADDRESS(value)
&& lock_memory((void *)value, sizeof(int32), B_READ_DEVICE) == B_OK) {
int32 oldValue = atomic_add(value, addValue);
unlock_memory((void *)value, sizeof(int32), B_READ_DEVICE);
return oldValue;
}
access_violation:
// XXX kill application
return -1;
}
int32
_user_atomic_and(vint32 *value, int32 andValue)
{
if (IS_USER_ADDRESS(value)
&& lock_memory((void *)value, sizeof(int32), B_READ_DEVICE) == B_OK) {
int32 oldValue = atomic_and(value, andValue);
unlock_memory((void *)value, sizeof(int32), B_READ_DEVICE);
return oldValue;
}
access_violation:
// XXX kill application
return -1;
}
int32
_user_atomic_or(vint32 *value, int32 orValue)
{
if (IS_USER_ADDRESS(value)
&& lock_memory((void *)value, sizeof(int32), B_READ_DEVICE) == B_OK) {
int32 oldValue = atomic_or(value, orValue);
unlock_memory((void *)value, sizeof(int32), B_READ_DEVICE);
return oldValue;
}
access_violation:
// XXX kill application
return -1;
}
int32
_user_atomic_get(vint32 *value)
{
if (IS_USER_ADDRESS(value)
&& lock_memory((void *)value, sizeof(int32), B_READ_DEVICE) == B_OK) {
int32 oldValue = atomic_get(value);
unlock_memory((void *)value, sizeof(int32), B_READ_DEVICE);
return oldValue;
}
access_violation:
// XXX kill application
return -1;
}
#endif /* ATOMIC_FUNCS_ARE_SYSCALLS */

View File

@ -0,0 +1,192 @@
/*
* Copyright 2013 Haiku, Inc. All rights reserved.
* Distributed under the terms of the MIT License.
*
* Authors:
* Ithamar R. Adema, ithamar@upgrade-android.com
*/
#include <KernelExport.h>
#include <kernel.h>
#include <user_atomic.h>
#include <util/AutoLock.h>
#ifdef ATOMIC64_FUNCS_ARE_SYSCALLS
/*
* NOTE: Unlike their 32-bit counterparts, these functions can use
* spinlocks safely currently, as no atomic 64-bit operations are
* done in the spinlock code. If this ever changes, this code will
* have to change.
*
* This code is here for ARMv6, which cannot do proper 64-bit atomic
* operations. Anything newer is capable, and does therefore not
* depend on this code.
*/
static spinlock atomic_lock = B_SPINLOCK_INITIALIZER;
int64
atomic_set64(vint64 *value, int64 newValue)
{
SpinLocker locker(&atomic_lock);
int64 oldValue = *value;
*value = newValue;
return oldValue;
}
int64
atomic_test_and_set64(vint64 *value, int64 newValue, int64 testAgainst)
{
SpinLocker locker(&atomic_lock);
int64 oldValue = *value;
if (oldValue == testAgainst)
*value = newValue;
return oldValue;
}
int64
atomic_add64(vint64 *value, int64 addValue)
{
SpinLocker locker(&atomic_lock);
int64 oldValue = *value;
*value += addValue;
return oldValue;
}
int64
atomic_and64(vint64 *value, int64 andValue)
{
SpinLocker locker(&atomic_lock);
int64 oldValue = *value;
*value &= andValue;
return oldValue;
}
int64
atomic_or64(vint64 *value, int64 orValue)
{
SpinLocker locker(&atomic_lock);
int64 oldValue = *value;
*value |= orValue;
return oldValue;
}
int64
atomic_get64(vint64 *value)
{
SpinLocker locker(&atomic_lock);
return *value;
}
int64
_user_atomic_set64(vint64 *value, int64 newValue)
{
if (IS_USER_ADDRESS(value)
&& lock_memory((void *)value, sizeof(int64), B_READ_DEVICE) == B_OK) {
int64 oldValue = atomic_set64(value, newValue);
unlock_memory((void *)value, sizeof(int64), B_READ_DEVICE);
return oldValue;
}
access_violation:
// XXX kill application
return -1;
}
int64
_user_atomic_test_and_set64(vint64 *value, int64 newValue, int64 testAgainst)
{
if (IS_USER_ADDRESS(value)
&& lock_memory((void *)value, sizeof(int64), B_READ_DEVICE) == B_OK) {
int64 oldValue = atomic_test_and_set64(value, newValue, testAgainst);
unlock_memory((void *)value, sizeof(int64), B_READ_DEVICE);
return oldValue;
}
access_violation:
// XXX kill application
return -1;
}
int64
_user_atomic_add64(vint64 *value, int64 addValue)
{
if (IS_USER_ADDRESS(value)
&& lock_memory((void *)value, sizeof(int64), B_READ_DEVICE) == B_OK) {
int64 oldValue = atomic_add64(value, addValue);
unlock_memory((void *)value, sizeof(int64), B_READ_DEVICE);
return oldValue;
}
access_violation:
// XXX kill application
return -1;
}
int64
_user_atomic_and64(vint64 *value, int64 andValue)
{
if (IS_USER_ADDRESS(value)
&& lock_memory((void *)value, sizeof(int64), B_READ_DEVICE) == B_OK) {
int64 oldValue = atomic_and64(value, andValue);
unlock_memory((void *)value, sizeof(int64), B_READ_DEVICE);
return oldValue;
}
access_violation:
// XXX kill application
return -1;
}
int64
_user_atomic_or64(vint64 *value, int64 orValue)
{
if (IS_USER_ADDRESS(value)
&& lock_memory((void *)value, sizeof(int64), B_READ_DEVICE) == B_OK) {
int64 oldValue = atomic_or64(value, orValue);
unlock_memory((void *)value, sizeof(int64), B_READ_DEVICE);
return oldValue;
}
access_violation:
// XXX kill application
return -1;
}
int64
_user_atomic_get64(vint64 *value)
{
if (IS_USER_ADDRESS(value)
&& lock_memory((void *)value, sizeof(int64), B_READ_DEVICE) == B_OK) {
int64 oldValue = atomic_get64(value);
unlock_memory((void *)value, sizeof(int64), B_READ_DEVICE);
return oldValue;
}
access_violation:
// XXX kill application
return -1;
}
#endif /* ATOMIC64_FUNCS_ARE_SYSCALLS */

View File

@ -4,14 +4,15 @@
*/
#include <asm_defs.h>
#include <arch_config.h>
.text
#ifndef ATOMIC_FUNCS_ARE_SYSCALLS
/* int atomic_add(int *value, int increment)
*/
FUNCTION(atomic_add):
#if __ARM_ARCH__ >= 6
miss1: ldrex r12, [r0]
add r2, r12, r1
strex r3, r2, [r0]
@ -19,25 +20,6 @@ miss1: ldrex r12, [r0]
bne miss1
mov r0, r12
bx lr
#else
/* disable interrupts, do the add, and reenable */
mrs r2, cpsr
mov r12, r2
orr r2, r2, #(3<<6)
msr cpsr_c, r2
/* ints disabled, old cpsr state in r12 */
/* do the add, leave the previous value in r0 */
mov r3, r0
ldr r0, [r3]
add r2, r0, r1
str r2, [r3]
/* restore interrupts and exit */
msr cpsr_c, r12
bx lr
#endif
FUNCTION_END(atomic_add)
@ -46,7 +28,6 @@ FUNCTION_END(atomic_add)
/* int atomic_and(int *value, int andValue)
*/
FUNCTION(atomic_and):
#if __ARM_ARCH__ >= 6
miss2: ldrex r12, [r0]
and r2, r12, r1
strex r3, r2, [r0]
@ -54,33 +35,11 @@ miss2: ldrex r12, [r0]
bne miss2
mov r0, r12
bx lr
#else
/* disable interrupts, do the and, and reenable */
mrs r2, cpsr
mov r12, r2
orr r2, r2, #(3<<6)
msr cpsr_c, r2
/* ints disabled, old cpsr state in r12 */
/* do the and, leave the previous value in r0 */
mov r3, r0
ldr r0, [r3]
and r2, r0, r1
str r2, [r3]
/* restore interrupts and exit */
msr cpsr_c, r12
bx lr
#endif
FUNCTION_END(atomic_and)
/* int atomic_or(int *value, int orValue)
*/
FUNCTION(atomic_or):
#if __ARM_ARCH__ >= 6
miss3: ldrex r12, [r0]
eor r2, r12, r1
strex r3, r2, [r0]
@ -88,48 +47,21 @@ miss3: ldrex r12, [r0]
bne miss3
mov r0, r12
bx lr
#else
/* disable interrupts, do the or, and reenable */
mrs r2, cpsr
mov r12, r2
orr r2, r2, #(3<<6)
msr cpsr_c, r2
/* ints disabled, old cpsr state in r12 */
/* do the or, leave the previous value in r0 */
mov r3, r0
ldr r0, [r3]
orr r2, r0, r1
str r2, [r3]
/* restore interrupts and exit */
msr cpsr_c, r12
bx lr
#endif
FUNCTION_END(atomic_or)
/* int atomic_set(int *value, int setTo)
*/
FUNCTION(atomic_set):
#if __ARM_ARCH__ >= 6
miss4: ldrex r12, [r0]
strex r3, r1, [r0]
teq r3, #0
bne miss4
bx lr
#else
mov r3, r0
swp r0, r1, [r3]
bx lr
#endif
FUNCTION_END(atomic_set)
/* int atomic_test_and_set(int *value, int setTo, int testValue)
*/
FUNCTION(atomic_test_and_set):
#if __ARM_ARCH__ >= 6
miss5: ldrex r12, [r0] @ load from the address and mark it exclusive
cmp r12, r2 @ compare the value with the comperand(r2)
strexeq r3, r1, [r0] @ if they were equal, attempt to store the new value (r1)
@ -139,21 +71,6 @@ miss5: ldrex r12, [r0] @ load from the address and
bne same @ if it succeeded, jump to (same) and return. there is no need to clrex if strex succeeded
differ: clrex @ clrex
same: mov r0, r12
#else
/* disable interrupts, and save state */
mrs r3, cpsr
mov r12, r3
orr r3, r3, #(3<<6)
msr cpsr_c, r3
mov r3, r0
ldr r0, [r3]
cmp r0, r2
streq r1, [r3]
/* restore interrupts and exit */
msr cpsr_c, r12
#endif
bx lr
FUNCTION_END(atomic_test_and_set)
@ -168,6 +85,10 @@ FUNCTION(__sync_fetch_and_add_4):
bx lr
FUNCTION_END(__sync_fetch_and_add_4)
#endif
#ifndef ATOMIC64_FUNCS_ARE_SYSCALLS
/* int64 atomic_add64(vint64 *value, int64 addValue) */
FUNCTION(atomic_add64):
bx lr
@ -197,3 +118,5 @@ FUNCTION_END(atomic_test_and_set64)
FUNCTION(atomic_get64):
bx lr
FUNCTION_END(atomic_get64)
#endif /* ATOMIC64_FUNCS_ARE_SYSCALLS */