* Added optional spinlock contention measurement feature. Enabled when
B_DEBUG_SPINLOCK_CONTENTION is defined to 1. It typedefs spinlock to a structure (thus breaking BeOS binary compatibility), containing a counter which is incremented whenever a thread has to wait for the spinlock. * Added macros for spinlock initialization and access and changed code using spinlocks accordingly. This breaks compilation for BeOS -- the macros should be defined in the respective compatibility wrappers. * Added generic syscall to get the spinlock counters for the thread and the team spinlocks. git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@25752 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
parent
e78b56aaf7
commit
1c8de8581b
@ -13,7 +13,32 @@
|
||||
/* interrupts and spinlocks */
|
||||
|
||||
typedef ulong cpu_status;
|
||||
typedef vint32 spinlock;
|
||||
|
||||
// WARNING: For Haiku debugging only! This changes the spinlock type in a
|
||||
// binary incompatible way!
|
||||
//#define B_DEBUG_SPINLOCK_CONTENTION 1
|
||||
|
||||
#if B_DEBUG_SPINLOCK_CONTENTION
|
||||
typedef struct {
|
||||
vint32 lock;
|
||||
vint32 count_low;
|
||||
vint32 count_high;
|
||||
} spinlock;
|
||||
|
||||
# define B_SPINLOCK_INITIALIZER { 0, 0, 0 }
|
||||
# define B_INITIALIZE_SPINLOCK(spinlock) do { \
|
||||
(spinlock)->lock = 0; \
|
||||
(spinlock)->count_low = 0; \
|
||||
(spinlock)->count_high = 0; \
|
||||
} while (false)
|
||||
# define B_SPINLOCK_IS_LOCKED(spinlock) ((spinlock)->lock > 0)
|
||||
#else
|
||||
typedef vint32 spinlock;
|
||||
|
||||
# define B_SPINLOCK_INITIALIZER 0
|
||||
# define B_INITIALIZE_SPINLOCK(lock) do { *(lock) = 0; } while (false)
|
||||
# define B_SPINLOCK_IS_LOCKED(lock) (*(lock) > 0)
|
||||
#endif
|
||||
|
||||
/* interrupt handling support for device drivers */
|
||||
|
||||
|
@ -40,6 +40,7 @@ extern "C" {
|
||||
|
||||
status_t smp_init(struct kernel_args *args);
|
||||
status_t smp_per_cpu_init(struct kernel_args *args, int32 cpu);
|
||||
status_t smp_init_post_generic_syscalls(void);
|
||||
bool smp_trap_non_boot_cpus(int32 cpu);
|
||||
void smp_wake_up_non_boot_cpus(void);
|
||||
void smp_cpu_rendezvous(volatile uint32 *var, int current_cpu);
|
||||
|
21
headers/private/system/spinlock_contention.h
Normal file
21
headers/private/system/spinlock_contention.h
Normal file
@ -0,0 +1,21 @@
|
||||
/*
|
||||
* Copyright 2008, Ingo Weinhold, ingo_weinhold@gmx.de.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*/
|
||||
#ifndef _SYSTEM_SPINLOCK_CONTENTION_H
|
||||
#define _SYSTEM_SPINLOCK_CONTENTION_H
|
||||
|
||||
#include <OS.h>
|
||||
|
||||
|
||||
#define SPINLOCK_CONTENTION "spinlock contention"
|
||||
#define GET_SPINLOCK_CONTENTION_INFO 0x01
|
||||
|
||||
|
||||
typedef struct spinlock_contention_info {
|
||||
uint64 thread_spinlock_counter;
|
||||
uint64 team_spinlock_counter;
|
||||
} spinlock_contention_info;
|
||||
|
||||
|
||||
#endif /* _SYSTEM_SPINLOCK_CONTENTION_H */
|
@ -521,7 +521,7 @@ ide_sim_init_bus(device_node *node, void **cookie)
|
||||
|
||||
memset(bus, 0, sizeof(*bus));
|
||||
bus->node = node;
|
||||
bus->lock = 0;
|
||||
B_INITIALIZE_SPINLOCK(&bus->lock);
|
||||
bus->num_running_reqs = 0;
|
||||
bus->active_qrequest = NULL;
|
||||
bus->disconnected = false;
|
||||
|
@ -32,7 +32,7 @@
|
||||
restore_interrupts(cpu_status); \
|
||||
}
|
||||
|
||||
spinlock sConfigLock = 0;
|
||||
spinlock sConfigLock = B_SPINLOCK_INITIALIZER;
|
||||
|
||||
static status_t
|
||||
pci_mech1_read_config(void *cookie, uint8 bus, uint8 device, uint8 function,
|
||||
|
@ -40,7 +40,7 @@ create_packet_buffer(size_t size)
|
||||
free(buffer);
|
||||
return NULL;
|
||||
}
|
||||
buffer->lock = 0;
|
||||
B_INITIALIZE_SPINLOCK(&buffer->lock);
|
||||
|
||||
return buffer;
|
||||
}
|
||||
|
@ -29,7 +29,7 @@ typedef struct spinlock_irq {
|
||||
static inline void
|
||||
spinlock_irq_init(spinlock_irq *lock)
|
||||
{
|
||||
lock->lock = 0;
|
||||
B_INITIALIZE_SPINLOCK(&lock->lock);
|
||||
}
|
||||
|
||||
static inline void
|
||||
|
@ -30,7 +30,6 @@ AHCIPort::AHCIPort(AHCIController *controller, int index)
|
||||
, fIndex(index)
|
||||
, fRegs(&controller->fRegs->port[index])
|
||||
, fArea(-1)
|
||||
, fSpinlock(0)
|
||||
, fCommandsActive(0)
|
||||
, fRequestSem(-1)
|
||||
, fResponseSem(-1)
|
||||
@ -42,6 +41,7 @@ AHCIPort::AHCIPort(AHCIController *controller, int index)
|
||||
, fResetPort(false)
|
||||
, fError(false)
|
||||
{
|
||||
B_INITIALIZE_SPINLOCK(&fSpinlock);
|
||||
fRequestSem = create_sem(1, "ahci request");
|
||||
fResponseSem = create_sem(0, "ahci response");
|
||||
}
|
||||
|
@ -782,7 +782,7 @@ EHCI::InterruptHandler(void *data)
|
||||
int32
|
||||
EHCI::Interrupt()
|
||||
{
|
||||
static spinlock lock = 0;
|
||||
static spinlock lock = B_SPINLOCK_INITIALIZER;
|
||||
acquire_spinlock(&lock);
|
||||
|
||||
// check if any interrupt was generated
|
||||
|
@ -694,7 +694,7 @@ OHCI::_InterruptHandler(void *data)
|
||||
int32
|
||||
OHCI::_Interrupt()
|
||||
{
|
||||
static spinlock lock = 0;
|
||||
static spinlock lock = B_SPINLOCK_INITIALIZER;
|
||||
acquire_spinlock(&lock);
|
||||
|
||||
uint32 status = 0;
|
||||
|
@ -1531,7 +1531,7 @@ UHCI::InterruptHandler(void *data)
|
||||
int32
|
||||
UHCI::Interrupt()
|
||||
{
|
||||
static spinlock lock = 0;
|
||||
static spinlock lock = B_SPINLOCK_INITIALIZER;
|
||||
acquire_spinlock(&lock);
|
||||
|
||||
// Check if we really had an interrupt
|
||||
|
@ -34,7 +34,7 @@
|
||||
#include "debug.h"
|
||||
#include "util.h"
|
||||
|
||||
spinlock slock = 0;
|
||||
spinlock slock = B_SPINLOCK_INITIALIZER;
|
||||
|
||||
uint32 round_to_pagesize(uint32 size);
|
||||
|
||||
|
@ -34,7 +34,7 @@
|
||||
#include "debug.h"
|
||||
#include "util.h"
|
||||
|
||||
spinlock slock = 0;
|
||||
spinlock slock = B_SPINLOCK_INITIALIZER;
|
||||
|
||||
uint32 round_to_pagesize(uint32 size);
|
||||
|
||||
|
@ -34,7 +34,7 @@
|
||||
#include "debug.h"
|
||||
#include "util.h"
|
||||
|
||||
spinlock slock = 0;
|
||||
spinlock slock = B_SPINLOCK_INITIALIZER;
|
||||
|
||||
uint32 round_to_pagesize(uint32 size);
|
||||
|
||||
|
@ -237,7 +237,7 @@ Radeon_SetupIRQ(device_info *di, char *buffer)
|
||||
goto err3;
|
||||
}
|
||||
|
||||
di->cap_spinlock = 0;
|
||||
B_INITIALIZE_SPINLOCK(&di->cap_spinlock);
|
||||
|
||||
sprintf(buffer, "%04X_%04X_%02X%02X%02X DMA I",
|
||||
di->pcii.vendor_id, di->pcii.device_id,
|
||||
|
@ -467,7 +467,7 @@ rtl8169_open(const char *name, uint32 flags, void** cookie)
|
||||
read_settings(device);
|
||||
|
||||
device->rxBuf = (void **)malloc(sizeof(void *) * device->rxBufferCount);
|
||||
device->rxSpinlock = 0;
|
||||
B_INITIALIZE_SPINLOCK(&device->rxSpinlock);
|
||||
device->rxNextIndex = 0;
|
||||
device->rxIntIndex = 0;
|
||||
device->rxFree = device->rxBufferCount;
|
||||
@ -475,7 +475,7 @@ rtl8169_open(const char *name, uint32 flags, void** cookie)
|
||||
set_sem_owner(device->rxReadySem, B_SYSTEM_TEAM);
|
||||
|
||||
device->txBuf = (void **)malloc(sizeof(void *) * device->txBufferCount);
|
||||
device->txSpinlock = 0;
|
||||
B_INITIALIZE_SPINLOCK(&device->txSpinlock);
|
||||
device->txNextIndex = 0;
|
||||
device->txIntIndex = 0;
|
||||
device->txUsed = 0;
|
||||
|
@ -187,7 +187,7 @@ initialize_timer(void)
|
||||
{
|
||||
sTimerCount = 0;
|
||||
sTimerNextId = 1;
|
||||
sTimerSpinlock = 0;
|
||||
B_INITIALIZE_SPINLOCK(&sTimerSpinlock);
|
||||
|
||||
sTimerThread = spawn_kernel_thread(timer_thread, "rtl8169 timer", 80, 0);
|
||||
sTimerSem = create_sem(0, "rtl8169 timer");
|
||||
|
@ -91,7 +91,7 @@ new_dpc_queue(void **handle, const char *name, int32 priority)
|
||||
queue->head = queue->tail = 0;
|
||||
queue->size = DPC_QUEUE_SIZE;
|
||||
queue->count = 0;
|
||||
queue->lock = 0; // Init the spinlock
|
||||
B_INITIALIZE_SPINLOCK(&queue->lock); // Init the spinlock
|
||||
|
||||
#ifdef __HAIKU__
|
||||
snprintf(str, sizeof(str), "%.*s_wakeup_sem",
|
||||
|
@ -480,7 +480,7 @@ _EXPORT generic_mpu401_module *modules[] =
|
||||
NULL
|
||||
};
|
||||
|
||||
spinlock locked = 0;
|
||||
spinlock locked = B_SPINLOCK_INITIALIZER;
|
||||
cpu_status
|
||||
lock(void)
|
||||
{
|
||||
|
@ -20,7 +20,7 @@ struct taskqueue {
|
||||
taskqueue_enqueue_fn tq_enqueue;
|
||||
void *tq_arg;
|
||||
int tq_fast;
|
||||
int32 tq_spinlock;
|
||||
spinlock tq_spinlock;
|
||||
sem_id tq_sem;
|
||||
thread_id *tq_threads;
|
||||
thread_id tq_thread_storage;
|
||||
@ -43,7 +43,7 @@ _taskqueue_create(const char *name, int mflags, int fast,
|
||||
tq->tq_fast = fast;
|
||||
|
||||
if (fast) {
|
||||
tq->tq_spinlock = 0;
|
||||
B_INITIALIZE_SPINLOCK(&tq->tq_spinlock);
|
||||
} else {
|
||||
mutex_init_etc(&tq->tq_mutex, name, MUTEX_FLAG_CLONE_NAME);
|
||||
}
|
||||
|
@ -1180,7 +1180,7 @@ arch_vm_translation_map_init(kernel_args *args)
|
||||
|
||||
sQueryDesc.type = DT_INVALID;
|
||||
|
||||
tmap_list_lock = 0;
|
||||
B_INITIALIZE_SPINLOCK(&tmap_list_lock);
|
||||
tmap_list = NULL;
|
||||
|
||||
// allocate some space to hold physical page mapping info
|
||||
|
@ -99,7 +99,7 @@ static bool sKeyboardHandlerInstalled = false;
|
||||
static bool sBochsOutput = false;
|
||||
#endif
|
||||
|
||||
static spinlock sSerialOutputSpinlock = 0;
|
||||
static spinlock sSerialOutputSpinlock = B_SPINLOCK_INITIALIZER;
|
||||
|
||||
|
||||
static void
|
||||
|
@ -852,7 +852,7 @@ arch_vm_translation_map_init(kernel_args *args)
|
||||
sKernelPhysicalPageDirectory = (page_directory_entry *)args->arch_args.phys_pgdir;
|
||||
sKernelVirtualPageDirectory = (page_directory_entry *)args->arch_args.vir_pgdir;
|
||||
|
||||
tmap_list_lock = 0;
|
||||
B_INITIALIZE_SPINLOCK(&tmap_list_lock);
|
||||
tmap_list = NULL;
|
||||
|
||||
// allocate some space to hold physical page mapping info
|
||||
|
@ -54,7 +54,7 @@ static bool sBlueScreenEnabled = false;
|
||||
// must always be false on startup
|
||||
static bool sDebugScreenEnabled = false;
|
||||
static bool sBlueScreenOutput = true;
|
||||
static spinlock sSpinlock = 0;
|
||||
static spinlock sSpinlock = B_SPINLOCK_INITIALIZER;
|
||||
static int32 sDebuggerOnCPU = -1;
|
||||
|
||||
static sem_id sSyslogNotify = -1;
|
||||
|
@ -30,7 +30,7 @@
|
||||
static const int32 kMaxInvokeCommandDepth = 5;
|
||||
static const int32 kOutputBufferSize = 1024;
|
||||
|
||||
static spinlock sSpinlock = 0;
|
||||
static spinlock sSpinlock = B_SPINLOCK_INITIALIZER;
|
||||
|
||||
static struct debugger_command *sCommands;
|
||||
|
||||
|
@ -219,7 +219,7 @@ clear_team_debug_info(struct team_debug_info *info, bool initLock)
|
||||
info->debugger_write_lock = -1;
|
||||
|
||||
if (initLock)
|
||||
info->lock = 0;
|
||||
B_INITIALIZE_SPINLOCK(&info->lock);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -62,7 +62,7 @@ dump_int_statistics(int argc, char **argv)
|
||||
for (i = 0; i < NUM_IO_VECTORS; i++) {
|
||||
struct io_handler *io;
|
||||
|
||||
if (sVectors[i].vector_lock == 0
|
||||
if (!B_SPINLOCK_IS_LOCKED(&sVectors[i].vector_lock)
|
||||
&& sVectors[i].enable_count == 0
|
||||
&& sVectors[i].handled_count == 0
|
||||
&& sVectors[i].unhandled_count == 0
|
||||
@ -72,7 +72,7 @@ dump_int_statistics(int argc, char **argv)
|
||||
kprintf("int %3d, enabled %ld, handled %8lld, unhandled %8lld%s%s\n",
|
||||
i, sVectors[i].enable_count, sVectors[i].handled_count,
|
||||
sVectors[i].unhandled_count,
|
||||
sVectors[i].vector_lock != 0 ? ", ACTIVE" : "",
|
||||
B_SPINLOCK_IS_LOCKED(&sVectors[i].vector_lock) ? ", ACTIVE" : "",
|
||||
sVectors[i].handler_list.next == &sVectors[i].handler_list
|
||||
? ", no handler" : "");
|
||||
|
||||
@ -114,7 +114,7 @@ int_init_post_vm(kernel_args *args)
|
||||
|
||||
/* initialize the vector list */
|
||||
for (i = 0; i < NUM_IO_VECTORS; i++) {
|
||||
sVectors[i].vector_lock = 0; /* initialize spinlock */
|
||||
B_INITIALIZE_SPINLOCK(&sVectors[i].vector_lock);
|
||||
sVectors[i].enable_count = 0;
|
||||
sVectors[i].no_lock_vector = false;
|
||||
#ifdef DEBUG_INT
|
||||
|
@ -160,6 +160,7 @@ _start(kernel_args *bootKernelArgs, int currentCPU)
|
||||
driver_settings_init_post_sem(&sKernelArgs);
|
||||
TRACE("init generic syscall\n");
|
||||
generic_syscall_init();
|
||||
smp_init_post_generic_syscalls();
|
||||
TRACE("init cbuf\n");
|
||||
cbuf_init();
|
||||
TRACE("init teams\n");
|
||||
|
@ -72,7 +72,7 @@ static bool sPortsActive = false;
|
||||
static port_id sNextPort = 1;
|
||||
static int32 sFirstFreeSlot = 1;
|
||||
|
||||
static spinlock sPortSpinlock = 0;
|
||||
static spinlock sPortSpinlock = B_SPINLOCK_INITIALIZER;
|
||||
|
||||
#define GRAB_PORT_LIST_LOCK() acquire_spinlock(&sPortSpinlock)
|
||||
#define RELEASE_PORT_LIST_LOCK() release_spinlock(&sPortSpinlock)
|
||||
|
@ -108,7 +108,7 @@ static bool sSemsActive = false;
|
||||
static struct sem_entry *sFreeSemsHead = NULL;
|
||||
static struct sem_entry *sFreeSemsTail = NULL;
|
||||
|
||||
static spinlock sem_spinlock = 0;
|
||||
static spinlock sem_spinlock = B_SPINLOCK_INITIALIZER;
|
||||
#define GRAB_SEM_LIST_LOCK() acquire_spinlock(&sem_spinlock)
|
||||
#define RELEASE_SEM_LIST_LOCK() release_spinlock(&sem_spinlock)
|
||||
#define GRAB_SEM_LOCK(s) acquire_spinlock(&(s).lock)
|
||||
|
@ -8,18 +8,22 @@
|
||||
|
||||
/* Functionality for symetrical multi-processors */
|
||||
|
||||
#include <thread.h>
|
||||
#include <int.h>
|
||||
#include <smp.h>
|
||||
#include <cpu.h>
|
||||
#include <arch/cpu.h>
|
||||
#include <arch/smp.h>
|
||||
#include <arch/int.h>
|
||||
#include <arch/debug.h>
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
|
||||
#include <arch/cpu.h>
|
||||
#include <arch/debug.h>
|
||||
#include <arch/int.h>
|
||||
#include <arch/smp.h>
|
||||
#include <cpu.h>
|
||||
#include <generic_syscall.h>
|
||||
#include <int.h>
|
||||
#include <spinlock_contention.h>
|
||||
#include <thread.h>
|
||||
|
||||
|
||||
#define DEBUG_SPINLOCKS 1
|
||||
//#define TRACE_SMP
|
||||
|
||||
@ -53,17 +57,17 @@ struct smp_msg {
|
||||
#define MAILBOX_LOCAL 1
|
||||
#define MAILBOX_BCAST 2
|
||||
|
||||
static spinlock boot_cpu_spin[SMP_MAX_CPUS] = { 0, };
|
||||
static spinlock boot_cpu_spin[SMP_MAX_CPUS] = { };
|
||||
|
||||
static struct smp_msg *free_msgs = NULL;
|
||||
static volatile int free_msg_count = 0;
|
||||
static spinlock free_msg_spinlock = 0;
|
||||
static spinlock free_msg_spinlock = B_SPINLOCK_INITIALIZER;
|
||||
|
||||
static struct smp_msg *smp_msgs[SMP_MAX_CPUS] = { NULL, };
|
||||
static spinlock cpu_msg_spinlock[SMP_MAX_CPUS] = { 0, };
|
||||
static spinlock cpu_msg_spinlock[SMP_MAX_CPUS];
|
||||
|
||||
static struct smp_msg *smp_broadcast_msgs = NULL;
|
||||
static spinlock broadcast_msg_spinlock = 0;
|
||||
static spinlock broadcast_msg_spinlock = B_SPINLOCK_INITIALIZER;
|
||||
|
||||
static bool sICIEnabled = false;
|
||||
static int32 sNumCPUs = 1;
|
||||
@ -115,6 +119,10 @@ acquire_spinlock(spinlock *lock)
|
||||
int currentCPU = smp_get_current_cpu();
|
||||
if (are_interrupts_enabled())
|
||||
panic("acquire_spinlock: attempt to acquire lock %p with interrupts enabled\n", lock);
|
||||
#if B_DEBUG_SPINLOCK_CONTENTION
|
||||
while (atomic_add(&lock->lock, 1) != 0)
|
||||
process_pending_ici(currentCPU);
|
||||
#else
|
||||
while (1) {
|
||||
while (*lock != 0) {
|
||||
process_pending_ici(currentCPU);
|
||||
@ -123,6 +131,7 @@ acquire_spinlock(spinlock *lock)
|
||||
if (atomic_set((int32 *)lock, 1) == 0)
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
} else {
|
||||
#if DEBUG_SPINLOCKS
|
||||
int32 oldValue;
|
||||
@ -148,12 +157,17 @@ acquire_spinlock_nocheck(spinlock *lock)
|
||||
if (are_interrupts_enabled())
|
||||
panic("acquire_spinlock_nocheck: attempt to acquire lock %p with interrupts enabled\n", lock);
|
||||
#endif
|
||||
#if B_DEBUG_SPINLOCK_CONTENTION
|
||||
while (atomic_add(&lock->lock, 1) != 0) {
|
||||
}
|
||||
#else
|
||||
while (1) {
|
||||
while(*lock != 0)
|
||||
PAUSE();
|
||||
if (atomic_set((int32 *)lock, 1) == 0)
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
} else {
|
||||
#if DEBUG_SPINLOCKS
|
||||
if (are_interrupts_enabled())
|
||||
@ -171,8 +185,23 @@ release_spinlock(spinlock *lock)
|
||||
if (sNumCPUs > 1) {
|
||||
if (are_interrupts_enabled())
|
||||
panic("release_spinlock: attempt to release lock %p with interrupts enabled\n", lock);
|
||||
#if B_DEBUG_SPINLOCK_CONTENTION
|
||||
{
|
||||
int32 count = atomic_set(&lock->lock, 0) - 1;
|
||||
if (count < 0) {
|
||||
panic("release_spinlock: lock %p was already released\n", lock);
|
||||
} else {
|
||||
// add to the total count -- deal with carry manually
|
||||
if ((uint32)atomic_add(&lock->count_low, count) + count
|
||||
< (uint32)count) {
|
||||
atomic_add(&lock->count_high, 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
#else
|
||||
if (atomic_set((int32 *)lock, 0) != 1)
|
||||
panic("release_spinlock: lock %p was already released\n", lock);
|
||||
#endif
|
||||
} else {
|
||||
#if DEBUG_SPINLOCKS
|
||||
if (are_interrupts_enabled())
|
||||
@ -405,6 +434,48 @@ process_pending_ici(int32 currentCPU)
|
||||
}
|
||||
|
||||
|
||||
#if B_DEBUG_SPINLOCK_CONTENTION
|
||||
|
||||
static uint64
|
||||
get_spinlock_counter(spinlock* lock)
|
||||
{
|
||||
uint32 high;
|
||||
uint32 low;
|
||||
do {
|
||||
high = (uint32)atomic_get(&lock->count_high);
|
||||
low = (uint32)atomic_get(&lock->count_low);
|
||||
} while (high != atomic_get(&lock->count_high));
|
||||
|
||||
return ((uint64)high << 32) | low;
|
||||
}
|
||||
|
||||
|
||||
static status_t
|
||||
spinlock_contention_syscall(const char* subsystem, uint32 function,
|
||||
void* buffer, size_t bufferSize)
|
||||
{
|
||||
spinlock_contention_info info;
|
||||
|
||||
if (function != GET_SPINLOCK_CONTENTION_INFO)
|
||||
return B_BAD_VALUE;
|
||||
|
||||
if (bufferSize < sizeof(spinlock_contention_info))
|
||||
return B_BAD_VALUE;
|
||||
|
||||
info.thread_spinlock_counter = get_spinlock_counter(&thread_spinlock);
|
||||
info.team_spinlock_counter = get_spinlock_counter(&team_spinlock);
|
||||
|
||||
if (!IS_USER_ADDRESS(buffer)
|
||||
|| user_memcpy(buffer, &info, sizeof(info)) != B_OK) {
|
||||
return B_BAD_ADDRESS;
|
||||
}
|
||||
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
#endif // B_DEBUG_SPINLOCK_CONTENTION
|
||||
|
||||
|
||||
// #pragma mark -
|
||||
|
||||
|
||||
@ -553,7 +624,11 @@ bool
|
||||
smp_trap_non_boot_cpus(int32 cpu)
|
||||
{
|
||||
if (cpu > 0) {
|
||||
#if B_DEBUG_SPINLOCK_CONTENTION
|
||||
boot_cpu_spin[cpu].lock = 1;
|
||||
#else
|
||||
boot_cpu_spin[cpu] = 1;
|
||||
#endif
|
||||
acquire_spinlock_nocheck(&boot_cpu_spin[cpu]);
|
||||
return false;
|
||||
}
|
||||
@ -624,6 +699,18 @@ smp_per_cpu_init(kernel_args *args, int32 cpu)
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
smp_init_post_generic_syscalls(void)
|
||||
{
|
||||
#if B_DEBUG_SPINLOCK_CONTENTION
|
||||
return register_generic_syscall(SPINLOCK_CONTENTION,
|
||||
&spinlock_contention_syscall, 0, 0);
|
||||
#else
|
||||
return B_OK;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
smp_set_num_cpus(int32 numCPUs)
|
||||
{
|
||||
|
@ -89,7 +89,7 @@ static struct team *sKernelTeam = NULL;
|
||||
static int32 sMaxTeams = 2048;
|
||||
static int32 sUsedTeams = 1;
|
||||
|
||||
spinlock team_spinlock = 0;
|
||||
spinlock team_spinlock = B_SPINLOCK_INITIALIZER;
|
||||
|
||||
|
||||
// #pragma mark - Tracing
|
||||
|
@ -60,7 +60,7 @@ struct thread_key {
|
||||
};
|
||||
|
||||
// global
|
||||
spinlock thread_spinlock = 0;
|
||||
spinlock thread_spinlock = B_SPINLOCK_INITIALIZER;
|
||||
|
||||
// thread list
|
||||
static struct thread sIdleThreads[B_MAX_CPU_COUNT];
|
||||
|
Loading…
Reference in New Issue
Block a user