kernel/smp: Add a comment for some obsecure knowledge

* I was ready to rip this out until PulkoMandy set me stright.
* Add a comment so others understand the impact here.
This commit is contained in:
Alexander von Gluck IV 2017-07-03 09:41:20 -05:00
parent 3eee68eb9a
commit 9c8119e02c
2 changed files with 3 additions and 110 deletions

View File

@ -1,110 +0,0 @@
/*
* Copyright 2014, Paweł Dziepak, pdziepak@quarnos.org.
* Distributed under the terms of the MIT License.
*
* Authors:
* Alexander von Gluck IV <kallisti5@unixzen.com>
*/
#ifndef _KERNEL_ARCH_PPC_ATOMIC_H
#define _KERNEL_ARCH_PPC_ATOMIC_H
static inline void
memory_read_barrier_inline(void)
{
#ifdef __powerpc64__
asm volatile("lwsync" : : : "memory");
#else
asm volatile("sync" : : : "memory");
#endif
}
static inline void
memory_write_barrier_inline(void)
{
#ifdef __powerpc64__
asm volatile("lwsync" : : : "memory");
#else
asm volatile("eieio" : : : "memory");
#endif
}
static inline void
memory_full_barrier_inline(void)
{
asm volatile("sync" : : : "memory");
}
#define memory_read_barrier memory_read_barrier_inline
#define memory_write_barrier memory_write_barrier_inline
#define memory_full_barrier memory_full_barrier_inline
static inline void
atomic_set_inline(int32* value, int32 newValue)
{
memory_write_barrier();
*(volatile int32*)value = newValue;
}
static inline int32
atomic_get_and_set_inline(int32* value, int32 newValue)
{
// BIG TODO: PowerPC Atomic get and set
// asm volatile("xchgl %0, (%1)"
// : "+r" (newValue)
// : "r" (value)
// : "memory");
return newValue;
}
static inline int32
atomic_test_and_set_inline(int32* value, int32 newValue, int32 testAgainst)
{
// BIG TODO: PowerPC Atomic test and set inline
// asm volatile("lock; cmpxchgl %2, (%3)"
// : "=a" (newValue)
// : "0" (testAgainst), "r" (newValue), "r" (value)
// : "memory");
return newValue;
}
static inline int32
atomic_add_inline(int32* value, int32 newValue)
{
// BIG TODO: PowerPC Atomic add inline
// asm volatile("lock; xaddl %0, (%1)"
// : "+r" (newValue)
// : "r" (value)
// : "memory");
return newValue;
}
static inline int32
atomic_get_inline(int32* value)
{
int32 newValue = *(volatile int32*)value;
memory_read_barrier();
return newValue;
}
#define atomic_set atomic_set_inline
#define atomic_get_and_set atomic_get_and_set_inline
#ifndef atomic_test_and_set
# define atomic_test_and_set atomic_test_and_set_inline
#endif
#ifndef atomic_add
# define atomic_add atomic_add_inline
#endif
#define atomic_get atomic_get_inline
#endif // _KERNEL_ARCH_PPC_ATOMIC_H

View File

@ -1540,6 +1540,9 @@ call_all_cpus_sync(void (*func)(void*, int), void* cookie)
}
// Ensure the symbols for memory_barriers are still included
// in the kernel for binary compatibility. Calls are forwarded
// to the more efficent per-processor atomic implementations.
#undef memory_read_barrier
#undef memory_write_barrier