renamed atomic_read() into atomic_get() (since we already have atomic_set(), but not a atomic_write())

renamed user_??? functions into the new _user_??? naming style.
changed implementation of PPC 64 bit atomic functions to use (un)lock_memory()


git-svn-id: file:///srv/svn/repos/haiku/trunk/current@4417 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
beveloper 2003-08-31 01:21:56 +00:00
parent a54e42d79a
commit 188b5de0bf
11 changed files with 138 additions and 159 deletions

View File

@ -138,14 +138,14 @@ extern _IMPEXP_ROOT int32 atomic_test_and_set(vint32 *value, int32 newValue, int
extern _IMPEXP_ROOT int32 atomic_add(vint32 *value, int32 addValue);
extern _IMPEXP_ROOT int32 atomic_and(vint32 *value, int32 andValue);
extern _IMPEXP_ROOT int32 atomic_or(vint32 *value, int32 orValue);
extern _IMPEXP_ROOT int32 atomic_read(vint32 *value);
extern _IMPEXP_ROOT int32 atomic_get(vint32 *value);
extern _IMPEXP_ROOT int64 atomic_set64(vint64 *value, int64 newValue);
extern _IMPEXP_ROOT int64 atomic_test_and_set64(vint64 *value, int64 newValue, int64 testAgainst);
extern _IMPEXP_ROOT int64 atomic_add64(vint64 *value, int64 addValue);
extern _IMPEXP_ROOT int64 atomic_and64(vint64 *value, int64 andValue);
extern _IMPEXP_ROOT int64 atomic_or64(vint64 *value, int64 orValue);
extern _IMPEXP_ROOT int64 atomic_read64(vint64 *value);
extern _IMPEXP_ROOT int64 atomic_get64(vint64 *value);
// Other stuff -----------------------------------------------------------------
extern _IMPEXP_ROOT void * get_stack_frame(void);

View File

@ -140,13 +140,13 @@ enum {
SYSCALL_ATOMIC_ADD,
SYSCALL_ATOMIC_AND,
SYSCALL_ATOMIC_OR, /* 130 */
SYSCALL_ATOMIC_READ,
SYSCALL_ATOMIC_GET,
SYSCALL_ATOMIC_SET64,
SYSCALL_ATOMIC_TEST_AND_SET64,
SYSCALL_ATOMIC_ADD64,
SYSCALL_ATOMIC_AND64, /* 135 */
SYSCALL_ATOMIC_OR64,
SYSCALL_ATOMIC_READ64,
SYSCALL_ATOMIC_GET64,
};
int syscall_dispatcher(unsigned long call_num, void *arg_buffer, uint64 *call_ret);

View File

@ -114,13 +114,13 @@ int32 _kern_atomic_test_and_set(vint32 *value, int32 newValue, int32 testAgainst
int32 _kern_atomic_add(vint32 *value, int32 addValue);
int32 _kern_atomic_and(vint32 *value, int32 andValue);
int32 _kern_atomic_or(vint32 *value, int32 orValue);
int32 _kern_atomic_read(vint32 *value);
int32 _kern_atomic_get(vint32 *value);
int64 _kern_atomic_set64(vint64 *value, int64 newValue);
int64 _kern_atomic_test_and_set64(vint64 *value, int64 newValue, int64 testAgainst);
int64 _kern_atomic_add64(vint64 *value, int64 addValue);
int64 _kern_atomic_and64(vint64 *value, int64 andValue);
int64 _kern_atomic_or64(vint64 *value, int64 orValue);
int64 _kern_atomic_read64(vint64 *value);
int64 _kern_atomic_get64(vint64 *value);
int sys_sysctl(int *, uint, void *, size_t *, void *, size_t);
int sys_socket(int, int, int);

View File

@ -9,18 +9,18 @@
* in userspace, they are implemented as these syscalls.
*/
int32 user_atomic_set(vint32 *value, int32 newValue);
int32 user_atomic_test_and_set(vint32 *value, int32 newValue, int32 testAgainst);
int32 user_atomic_add(vint32 *value, int32 addValue);
int32 user_atomic_and(vint32 *value, int32 andValue);
int32 user_atomic_or(vint32 *value, int32 orValue);
int32 user_atomic_read(vint32 *value);
int32 _user_atomic_set(vint32 *value, int32 newValue);
int32 _user_atomic_test_and_set(vint32 *value, int32 newValue, int32 testAgainst);
int32 _user_atomic_add(vint32 *value, int32 addValue);
int32 _user_atomic_and(vint32 *value, int32 andValue);
int32 _user_atomic_or(vint32 *value, int32 orValue);
int32 _user_atomic_get(vint32 *value);
int64 user_atomic_set64(vint64 *value, int64 newValue);
int64 user_atomic_test_and_set64(vint64 *value, int64 newValue, int64 testAgainst);
int64 user_atomic_add64(vint64 *value, int64 addValue);
int64 user_atomic_and64(vint64 *value, int64 andValue);
int64 user_atomic_or64(vint64 *value, int64 orValue);
int64 user_atomic_read64(vint64 *value);
int64 _user_atomic_set64(vint64 *value, int64 newValue);
int64 _user_atomic_test_and_set64(vint64 *value, int64 newValue, int64 testAgainst);
int64 _user_atomic_add64(vint64 *value, int64 addValue);
int64 _user_atomic_and64(vint64 *value, int64 andValue);
int64 _user_atomic_or64(vint64 *value, int64 orValue);
int64 _user_atomic_get64(vint64 *value);
#endif /* _KERNEL_USER_ATOMIC_H */

View File

@ -13,8 +13,7 @@
* Slow, using spinlocks...
*/
static spinlock kernel_lock = 0;
static spinlock user_lock = 0;
static spinlock atomic_lock = 0;
int64
atomic_set64(vint64 *value, int64 newValue)
@ -22,10 +21,10 @@ atomic_set64(vint64 *value, int64 newValue)
cpu_status status;
int64 oldValue;
status = disable_interrupts();
acquire_spinlock(&kernel_lock);
acquire_spinlock(&atomic_lock);
oldValue = *value;
*value = newValue;
release_spinlock(&kernel_lock);
release_spinlock(&atomic_lock);
restore_interrupts(status);
return oldValue;
}
@ -36,11 +35,11 @@ atomic_test_and_set64(vint64 *value, int64 newValue, int64 testAgainst)
cpu_status status;
int64 oldValue;
status = disable_interrupts();
acquire_spinlock(&kernel_lock);
acquire_spinlock(&atomic_lock);
oldValue = *value;
if (*value == testAgainst)
if (oldValue == testAgainst)
*value = newValue;
release_spinlock(&kernel_lock);
release_spinlock(&atomic_lock);
restore_interrupts(status);
return oldValue;
}
@ -51,10 +50,10 @@ atomic_add64(vint64 *value, int64 addValue)
cpu_status status;
int64 oldValue;
status = disable_interrupts();
acquire_spinlock(&kernel_lock);
acquire_spinlock(&atomic_lock);
oldValue = *value;
*value += addValue;
release_spinlock(&kernel_lock);
release_spinlock(&atomic_lock);
restore_interrupts(status);
return oldValue;
}
@ -65,10 +64,10 @@ atomic_and64(vint64 *value, int64 andValue)
cpu_status status;
int64 oldValue;
status = disable_interrupts();
acquire_spinlock(&kernel_lock);
acquire_spinlock(&atomic_lock);
oldValue = *value;
*value &= andValue;
release_spinlock(&kernel_lock);
release_spinlock(&atomic_lock);
restore_interrupts(status);
return oldValue;
}
@ -79,169 +78,155 @@ atomic_or64(vint64 *value, int64 orValue)
cpu_status status;
int64 oldValue;
status = disable_interrupts();
acquire_spinlock(&kernel_lock);
acquire_spinlock(&atomic_lock);
oldValue = *value;
*value |= orValue;
release_spinlock(&kernel_lock);
release_spinlock(&atomic_lock);
restore_interrupts(status);
return oldValue;
}
int64
atomic_read64(vint64 *value)
atomic_get64(vint64 *value)
{
cpu_status status;
int64 oldValue;
status = disable_interrupts();
acquire_spinlock(&kernel_lock);
acquire_spinlock(&atomic_lock);
oldValue = *value;
release_spinlock(&kernel_lock);
release_spinlock(&atomic_lock);
restore_interrupts(status);
return oldValue;
}
int64
user_atomic_set64(vint64 *value, int64 newValue)
_user_atomic_set64(vint64 *value, int64 newValue)
{
cpu_status status;
int64 oldValue;
if (!CHECK_USER_ADDRESS(value))
goto access_violation;
if (B_OK != lock_memory(value, 8, B_READ_DEVICE))
goto access_violation;
status = disable_interrupts();
acquire_spinlock(&user_lock);
if ((addr)value >= KERNEL_BASE && (addr)value <= KERNEL_TOP)
goto error;
if (user_memcpy(&oldValue, value, 8) < 0)
goto error;
if (user_memcpy(value, &newValue, 8) < 0)
goto error;
release_spinlock(&user_lock);
acquire_spinlock(&atomic_lock);
oldValue = *value;
*value = newValue;
release_spinlock(&atomic_lock);
restore_interrupts(status);
unlock_memory(value, 8, B_READ_DEVICE))
return oldValue;
error:
release_spinlock(&user_lock);
restore_interrupts(status);
access_violation:
// XXX kill application
return -1;
}
int64
user_atomic_test_and_set64(vint64 *value, int64 newValue, int64 testAgainst)
_user_atomic_test_and_set64(vint64 *value, int64 newValue, int64 testAgainst)
{
cpu_status status;
int64 oldValue;
if (!CHECK_USER_ADDRESS(value))
goto access_violation;
if (B_OK != lock_memory(value, 8, B_READ_DEVICE))
goto access_violation;
status = disable_interrupts();
acquire_spinlock(&user_lock);
if ((addr)value >= KERNEL_BASE && (addr)value <= KERNEL_TOP)
goto error;
if (user_memcpy(&oldValue, value, 8) < 0)
goto error;
acquire_spinlock(&atomic_lock);
oldValue = *value;
if (oldValue == testAgainst)
if (user_memcpy(value, &newValue, 8) < 0)
goto error;
release_spinlock(&user_lock);
*value = newValue;
release_spinlock(&atomic_lock);
restore_interrupts(status);
unlock_memory(value, 8, B_READ_DEVICE))
return oldValue;
error:
release_spinlock(&user_lock);
restore_interrupts(status);
access_violation:
// XXX kill application
return -1;
}
int64
user_atomic_add64(vint64 *value, int64 addValue)
_user_atomic_add64(vint64 *value, int64 addValue)
{
cpu_status status;
int64 oldValue;
if (!CHECK_USER_ADDRESS(value))
goto access_violation;
if (B_OK != lock_memory(value, 8, B_READ_DEVICE))
goto access_violation;
status = disable_interrupts();
acquire_spinlock(&user_lock);
if ((addr)value >= KERNEL_BASE && (addr)value <= KERNEL_TOP)
goto error;
if (user_memcpy(&oldValue, value, 8) < 0)
goto error;
addValue += oldValue;
if (user_memcpy(value, &addValue, 8) < 0)
goto error;
release_spinlock(&user_lock);
acquire_spinlock(&atomic_lock);
oldValue = *value;
*value += addValue;
release_spinlock(&atomic_lock);
restore_interrupts(status);
unlock_memory(value, 8, B_READ_DEVICE))
return oldValue;
error:
release_spinlock(&user_lock);
restore_interrupts(status);
access_violation:
// XXX kill application
return -1;
}
int64
user_atomic_and64(vint64 *value, int64 andValue)
_user_atomic_and64(vint64 *value, int64 andValue)
{
cpu_status status;
int64 oldValue;
if (!CHECK_USER_ADDRESS(value))
goto access_violation;
if (B_OK != lock_memory(value, 8, B_READ_DEVICE))
goto access_violation;
status = disable_interrupts();
acquire_spinlock(&user_lock);
if ((addr)value >= KERNEL_BASE && (addr)value <= KERNEL_TOP)
goto error;
if (user_memcpy(&oldValue, value, 8) < 0)
goto error;
andValue &= oldValue;
if (user_memcpy(value, &andValue, 8) < 0)
goto error;
release_spinlock(&user_lock);
acquire_spinlock(&atomic_lock);
oldValue = *value;
*value &= andValue;
release_spinlock(&atomic_lock);
restore_interrupts(status);
unlock_memory(value, 8, B_READ_DEVICE))
return oldValue;
error:
release_spinlock(&user_lock);
restore_interrupts(status);
access_violation:
// XXX kill application
return -1;
}
int64
user_atomic_or64(vint64 *value, int64 orValue)
_user_atomic_or64(vint64 *value, int64 orValue)
{
cpu_status status;
int64 oldValue;
if (!CHECK_USER_ADDRESS(value))
goto access_violation;
if (B_OK != lock_memory(value, 8, B_READ_DEVICE))
goto access_violation;
status = disable_interrupts();
acquire_spinlock(&user_lock);
if ((addr)value >= KERNEL_BASE && (addr)value <= KERNEL_TOP)
goto error;
if (user_memcpy(&oldValue, value, 8) < 0)
goto error;
orValue |= oldValue;
if (user_memcpy(value, &orValue, 8) < 0)
goto error;
release_spinlock(&user_lock);
acquire_spinlock(&atomic_lock);
oldValue = *value;
*value |= orValue;
release_spinlock(&atomic_lock);
restore_interrupts(status);
unlock_memory(value, 8, B_READ_DEVICE))
return oldValue;
error:
release_spinlock(&user_lock);
restore_interrupts(status);
access_violation:
// XXX kill application
return -1;
}
int64
user_atomic_read64(vint64 *value)
_user_atomic_get64(vint64 *value)
{
cpu_status status;
int64 oldValue;
if (!CHECK_USER_ADDRESS(value))
goto access_violation;
if (B_OK != lock_memory(value, 8, B_READ_DEVICE))
goto access_violation;
status = disable_interrupts();
acquire_spinlock(&user_lock);
if ((addr)value >= KERNEL_BASE && (addr)value <= KERNEL_TOP)
goto error;
if (user_memcpy(&oldValue, value, 8) < 0)
goto error;
release_spinlock(&user_lock);
acquire_spinlock(&atomic_lock);
oldValue = *value;
release_spinlock(&atomic_lock);
restore_interrupts(status);
unlock_memory(value, 8, B_READ_DEVICE))
return oldValue;
error:
release_spinlock(&user_lock);
restore_interrupts(status);
access_violation:
// XXX kill application
return -1;
}

View File

@ -5,9 +5,14 @@
#include <SupportDefs.h>
#include <ktypes.h>
#include <user_atomic.h>
// The code below does only work on single CPU SH4 systems.
// Interrupts must be disabled during execution, too.
int32
user_atomic_add(vint32 *uval, int32 incr)
_user_atomic_add(vint32 *uval, int32 incr)
{
int32 val;
int32 ret;
@ -18,8 +23,6 @@ user_atomic_add(vint32 *uval, int32 incr)
if (user_memcpy(&val, (int32 *)uval, sizeof(val)) < 0)
goto error;
// XXX broken on non SH4-systems, or when interrupts are enabled
// XXX x86 must use the assembly functions directly in userspace and not this ones
ret = val;
val += incr;
@ -35,7 +38,7 @@ error:
int32
user_atomic_and(vint32 *uval, int32 incr)
_user_atomic_and(vint32 *uval, int32 incr)
{
int val;
int ret;
@ -46,8 +49,6 @@ user_atomic_and(vint32 *uval, int32 incr)
if (user_memcpy(&val, (int32 *)uval, sizeof(val)) < 0)
goto error;
// XXX broken on non SH4-systems, or when interrupts are enabled
// XXX x86 must use the assembly functions directly in userspace and not this ones
ret = val;
val &= incr;
@ -63,7 +64,7 @@ error:
int32
user_atomic_or(vint32 *uval, int32 incr)
_user_atomic_or(vint32 *uval, int32 incr)
{
int val;
int ret;
@ -74,8 +75,6 @@ user_atomic_or(vint32 *uval, int32 incr)
if (user_memcpy(&val, (int32 *)uval, sizeof(val)) < 0)
goto error;
// XXX broken on non SH4-systems, or when interrupts are enabled
// XXX x86 must use the assembly functions directly in userspace and not this ones
ret = val;
val |= incr;
@ -91,7 +90,7 @@ error:
int32
user_atomic_set(vint32 *uval, int32 set_to)
_user_atomic_set(vint32 *uval, int32 set_to)
{
int val;
int ret;
@ -102,8 +101,6 @@ user_atomic_set(vint32 *uval, int32 set_to)
if (user_memcpy(&val, (int32 *)uval, sizeof(val)) < 0)
goto error;
// XXX broken on non SH4-systems, or when interrupts are enabled
// XXX x86 must use the assembly functions directly in userspace and not this ones
ret = val;
val = set_to;
@ -119,7 +116,7 @@ error:
int32
user_atomic_test_and_set(vint32 *uval, int32 set_to, int32 test_val)
_user_atomic_test_and_set(vint32 *uval, int32 set_to, int32 test_val)
{
int val;
int ret;
@ -130,8 +127,6 @@ user_atomic_test_and_set(vint32 *uval, int32 set_to, int32 test_val)
if (user_memcpy(&val, (int32 *)uval, sizeof(val)) < 0)
goto error;
// XXX broken on non SH4-systems, or when interrupts are enabled
// XXX x86 must use the assembly functions directly in userspace and not this ones
ret = val;
if (val == test_val) {
val = set_to;
@ -145,4 +140,3 @@ error:
// XXX kill the app
return -1;
}

View File

@ -441,44 +441,44 @@ int syscall_dispatcher(unsigned long call_num, void *arg_buffer, uint64 *call_re
// 32 bit atomic functions
#ifdef ATOMIC_FUNCS_ARE_SYSCALLS
case SYSCALL_ATOMIC_SET:
*call_ret = user_atomic_set((int32 *)arg0, (int32)arg1);
*call_ret = _user_atomic_set((vint32 *)arg0, (int32)arg1);
break;
case SYSCALL_ATOMIC_TEST_AND_SET:
*call_ret = user_atomic_test_and_set((int32 *)arg0, (int32)arg1, (int32)arg2);
*call_ret = _user_atomic_test_and_set((vint32 *)arg0, (int32)arg1, (int32)arg2);
break;
case SYSCALL_ATOMIC_ADD:
*call_ret = user_atomic_add((int32 *)arg0, (int32)arg1);
*call_ret = _user_atomic_add((vint32 *)arg0, (int32)arg1);
break;
case SYSCALL_ATOMIC_AND:
*call_ret = user_atomic_and((int32 *)arg0, (int32)arg1);
*call_ret = _user_atomic_and((vint32 *)arg0, (int32)arg1);
break;
case SYSCALL_ATOMIC_OR:
*call_ret = user_atomic_or((int32 *)arg0, (int32)arg1);
*call_ret = _user_atomic_or((vint32 *)arg0, (int32)arg1);
break;
case SYSCALL_ATOMIC_READ:
*call_ret = user_atomic_read((int32 *)arg0);
case SYSCALL_ATOMIC_GET:
*call_ret = _user_atomic_get((vint32 *)arg0);
break;
#endif
// 64 bit atomic functions
#ifdef ATOMIC64_FUNCS_ARE_SYSCALLS
case SYSCALL_ATOMIC_SET64:
*call_ret = user_atomic_set64((int64 *)arg0, INT32TOINT64(arg1, arg2));
*call_ret = _user_atomic_set64((vint64 *)arg0, INT32TOINT64(arg1, arg2));
break;
case SYSCALL_ATOMIC_TEST_AND_SET64:
*call_ret = user_atomic_test_and_set64((int64 *)arg0, INT32TOINT64(arg1, arg2), INT32TOINT64(arg3, arg4));
*call_ret = _user_atomic_test_and_set64((vint64 *)arg0, INT32TOINT64(arg1, arg2), INT32TOINT64(arg3, arg4));
break;
case SYSCALL_ATOMIC_ADD64:
*call_ret = user_atomic_add64((int64 *)arg0, INT32TOINT64(arg1, arg2));
*call_ret = _user_atomic_add64((vint64 *)arg0, INT32TOINT64(arg1, arg2));
break;
case SYSCALL_ATOMIC_AND64:
*call_ret = user_atomic_and64((int64 *)arg0, INT32TOINT64(arg1, arg2));
*call_ret = _user_atomic_and64((vint64 *)arg0, INT32TOINT64(arg1, arg2));
break;
case SYSCALL_ATOMIC_OR64:
*call_ret = user_atomic_or64((int64 *)arg0, INT32TOINT64(arg1, arg2));
*call_ret = _user_atomic_or64((vint64 *)arg0, INT32TOINT64(arg1, arg2));
break;
case SYSCALL_ATOMIC_READ64:
*call_ret = user_atomic_read64((int64 *)arg0);
case SYSCALL_ATOMIC_GET64:
*call_ret = _user_atomic_get64((vint64 *)arg0);
break;
#endif

View File

@ -61,10 +61,10 @@ lost5: lwarx %r6, 0, %r3
out5: mr %r3, %r6
blr
/* int atomic_read(int *value)
/* int atomic_get(int *value)
* r3
*/
FUNCTION(atomic_read):
FUNCTION(atomic_get):
lost6: lwarx %r5, 0, %r3
stwcx. %r5, 0, %r3
bne- lost6

View File

@ -59,15 +59,15 @@ _atomic_or1:
jnz _atomic_or1
ret
/* int32 atomic_read(vint32 *value) */
FUNCTION(atomic_read):
/* int32 atomic_get(vint32 *value) */
FUNCTION(atomic_get):
movl 4(%esp), %edx
_atomic_read1:
_atomic_get1:
movl (%edx), %eax
movl %eax, %ecx
lock
cmpxchgl %ecx, (%edx)
jnz _atomic_read1
jnz _atomic_get1
ret
/* int64 atomic_set64(vint64 *value, int64 newValue) */
@ -159,19 +159,19 @@ _atomic_or64_1:
pop %ebp
ret
/* int64 atomic_read64(vint64 *value) */
FUNCTION(atomic_read64):
/* int64 atomic_get64(vint64 *value) */
FUNCTION(atomic_get64):
push %ebp
push %ebx
movl 12(%esp), %ebp
_atomic_read64_1:
_atomic_get64_1:
movl (%ebp), %eax
movl 4(%ebp), %edx
movl %eax, %ebx
movl %edx, %ecx
lock
cmpxchg8b (%ebp)
jnz _atomic_read64_1
jnz _atomic_get64_1
pop %ebx
pop %ebp
ret

View File

@ -40,9 +40,9 @@ atomic_or(vint32 *value, int32 orValue)
}
int32
atomic_read(vint32 *value)
atomic_get(vint32 *value)
{
return _kern_atomic_read(value);
return _kern_atomic_get(value);
}
#endif
@ -78,8 +78,8 @@ atomic_or64(vint64 *value, int64 orValue)
}
int64
atomic_read64(vint64 *value)
atomic_get64(vint64 *value)
{
return _kern_atomic_read64(value);
return _kern_atomic_get64(value);
}
#endif

View File

@ -168,13 +168,13 @@ SYSCALL3(_kern_atomic_test_and_set, 127)
SYSCALL2(_kern_atomic_add, 128)
SYSCALL2(_kern_atomic_and, 129)
SYSCALL2(_kern_atomic_or, 130)
SYSCALL1(_kern_atomic_read, 131)
SYSCALL1(_kern_atomic_get, 131)
SYSCALL3(_kern_atomic_set64, 132)
SYSCALL5(_kern_atomic_test_and_set64, 133)
SYSCALL3(_kern_atomic_add64, 134)
SYSCALL3(_kern_atomic_and64, 135)
SYSCALL3(_kern_atomic_or64, 136)
SYSCALL1(_kern_atomic_read64, 137)
SYSCALL1(_kern_atomic_get64, 137)
/* Signal handling calls */
SYSCALL0(sys_return_from_signal, 103)