* Renamed thread_spinlock and team_spinlock to gThreadSpinlock and
gTeamSpinlock. * Renamed the static global variables in smp.c to match our style guide. * Minor other cleanup. * Removed superfluous white space. git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@26730 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
parent
11790544cd
commit
15374c5dbd
@ -19,15 +19,15 @@
|
||||
#include <arch/thread_types.h>
|
||||
|
||||
|
||||
extern spinlock thread_spinlock;
|
||||
#define GRAB_THREAD_LOCK() acquire_spinlock(&thread_spinlock)
|
||||
#define RELEASE_THREAD_LOCK() release_spinlock(&thread_spinlock)
|
||||
extern spinlock gThreadSpinlock;
|
||||
#define GRAB_THREAD_LOCK() acquire_spinlock(&gThreadSpinlock)
|
||||
#define RELEASE_THREAD_LOCK() release_spinlock(&gThreadSpinlock)
|
||||
|
||||
extern spinlock team_spinlock;
|
||||
extern spinlock gTeamSpinlock;
|
||||
// NOTE: TEAM lock can be held over a THREAD lock acquisition,
|
||||
// but not the other way (to avoid deadlock)
|
||||
#define GRAB_TEAM_LOCK() acquire_spinlock(&team_spinlock)
|
||||
#define RELEASE_TEAM_LOCK() release_spinlock(&team_spinlock)
|
||||
#define GRAB_TEAM_LOCK() acquire_spinlock(&gTeamSpinlock)
|
||||
#define RELEASE_TEAM_LOCK() release_spinlock(&gTeamSpinlock)
|
||||
|
||||
enum additional_thread_state {
|
||||
THREAD_STATE_FREE_ON_RESCHED = 7, // free the thread structure upon reschedule
|
||||
|
@ -132,7 +132,7 @@ ConditionVariableEntry::Wait(uint32 flags, bigtime_t timeout)
|
||||
|
||||
conditionLocker.Unlock();
|
||||
|
||||
SpinLocker threadLocker(thread_spinlock);
|
||||
SpinLocker threadLocker(gThreadSpinlock);
|
||||
|
||||
status_t error;
|
||||
if ((flags & (B_RELATIVE_TIMEOUT | B_ABSOLUTE_TIMEOUT)) != 0)
|
||||
@ -215,7 +215,7 @@ ConditionVariable::Unpublish(bool threadsLocked)
|
||||
ASSERT(fObject != NULL);
|
||||
|
||||
InterruptsLocker _;
|
||||
SpinLocker threadLocker(threadsLocked ? NULL : &thread_spinlock);
|
||||
SpinLocker threadLocker(threadsLocked ? NULL : &gThreadSpinlock);
|
||||
SpinLocker locker(sConditionVariablesLock);
|
||||
|
||||
#if KDEBUG
|
||||
@ -286,7 +286,7 @@ void
|
||||
ConditionVariable::_Notify(bool all, bool threadsLocked)
|
||||
{
|
||||
InterruptsLocker _;
|
||||
SpinLocker threadLocker(threadsLocked ? NULL : &thread_spinlock);
|
||||
SpinLocker threadLocker(threadsLocked ? NULL : &gThreadSpinlock);
|
||||
SpinLocker locker(sConditionVariablesLock);
|
||||
|
||||
if (!fEntries.IsEmpty())
|
||||
|
@ -150,7 +150,7 @@ update_thread_breakpoints_flag()
|
||||
static void
|
||||
update_threads_breakpoints_flag()
|
||||
{
|
||||
InterruptsSpinLocker _(team_spinlock);
|
||||
InterruptsSpinLocker _(gTeamSpinlock);
|
||||
|
||||
struct team* team = thread_get_current_thread()->team;
|
||||
struct thread* thread = team->thread_list;
|
||||
@ -538,7 +538,7 @@ thread_hit_debug_event_internal(debug_debugger_message event,
|
||||
thread->id));
|
||||
arch_set_debug_cpu_state(
|
||||
&commandMessage.set_cpu_state.cpu_state);
|
||||
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
@ -562,7 +562,7 @@ thread_hit_debug_event_internal(debug_debugger_message event,
|
||||
case B_DEBUGGED_THREAD_DEBUGGER_CHANGED:
|
||||
{
|
||||
// Check, if the debugger really changed, i.e. is different
|
||||
// than the one we know.
|
||||
// than the one we know.
|
||||
team_debug_info teamDebugInfo;
|
||||
get_team_debug_info(teamDebugInfo);
|
||||
|
||||
@ -850,7 +850,7 @@ user_debug_update_new_thread_flags(thread_id threadID)
|
||||
|
||||
InterruptsLocker interruptsLocker;
|
||||
|
||||
SpinLocker threadLocker(thread_spinlock);
|
||||
SpinLocker threadLocker(gThreadSpinlock);
|
||||
|
||||
struct thread *thread = thread_get_thread_struct_locked(threadID);
|
||||
if (!thread)
|
||||
@ -860,7 +860,7 @@ user_debug_update_new_thread_flags(thread_id threadID)
|
||||
|
||||
threadLocker.Unlock();
|
||||
|
||||
SpinLocker teamLocker(team_spinlock);
|
||||
SpinLocker teamLocker(gTeamSpinlock);
|
||||
update_thread_breakpoints_flag();
|
||||
update_thread_debugger_installed_flag();
|
||||
}
|
||||
@ -1611,7 +1611,7 @@ debug_nub_thread(void *)
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
case B_DEBUG_MESSAGE_CLEAR_BREAKPOINT:
|
||||
{
|
||||
// get the parameters
|
||||
@ -1758,7 +1758,7 @@ debug_nub_thread(void *)
|
||||
// get the masks
|
||||
uint64 ignore = 0;
|
||||
uint64 ignoreOnce = 0;
|
||||
|
||||
|
||||
cpu_status state = disable_interrupts();
|
||||
GRAB_THREAD_LOCK();
|
||||
|
||||
@ -1888,7 +1888,7 @@ debug_nub_thread(void *)
|
||||
} else {
|
||||
// We probably got a SIGKILL. If so, we will terminate when
|
||||
// reading the next message fails.
|
||||
}
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
@ -158,7 +158,7 @@ IOScheduler::OperationCompleted(IOOperation* operation, status_t status,
|
||||
fFinishedOperationCondition.NotifyAll();
|
||||
|
||||
if (fWaiting) {
|
||||
SpinLocker _2(thread_spinlock);
|
||||
SpinLocker _2(gThreadSpinlock);
|
||||
thread_interrupt(thread_get_thread_struct_locked(fSchedulerThread),
|
||||
false);
|
||||
}
|
||||
|
@ -1503,7 +1503,7 @@ disconnect_mount_or_vnode_fds(struct fs_mount *mount,
|
||||
team_id lastTeamID;
|
||||
|
||||
cpu_status state = disable_interrupts();
|
||||
SpinLocker teamsLock(team_spinlock);
|
||||
SpinLocker teamsLock(gTeamSpinlock);
|
||||
|
||||
lastTeamID = peek_next_thread_id();
|
||||
if (nextTeamID < lastTeamID) {
|
||||
|
@ -246,7 +246,7 @@ rw_lock_destroy(rw_lock* lock)
|
||||
? (char*)lock->name : NULL;
|
||||
|
||||
// unblock all waiters
|
||||
InterruptsSpinLocker locker(thread_spinlock);
|
||||
InterruptsSpinLocker locker(gThreadSpinlock);
|
||||
|
||||
#ifdef KDEBUG
|
||||
if (lock->waiters != NULL && thread_get_current_thread_id()
|
||||
@ -283,7 +283,7 @@ rw_lock_read_lock(rw_lock* lock)
|
||||
#if KDEBUG_RW_LOCK_DEBUG
|
||||
return rw_lock_write_lock(lock);
|
||||
#else
|
||||
InterruptsSpinLocker locker(thread_spinlock);
|
||||
InterruptsSpinLocker locker(gThreadSpinlock);
|
||||
|
||||
if (lock->writer_count == 0) {
|
||||
lock->reader_count++;
|
||||
@ -305,7 +305,7 @@ rw_lock_read_unlock(rw_lock* lock)
|
||||
#if KDEBUG_RW_LOCK_DEBUG
|
||||
return rw_lock_write_unlock(lock);
|
||||
#else
|
||||
InterruptsSpinLocker locker(thread_spinlock);
|
||||
InterruptsSpinLocker locker(gThreadSpinlock);
|
||||
|
||||
if (lock->holder == thread_get_current_thread_id()) {
|
||||
if (--lock->owner_count > 0)
|
||||
@ -335,7 +335,7 @@ rw_lock_read_unlock(rw_lock* lock)
|
||||
status_t
|
||||
rw_lock_write_lock(rw_lock* lock)
|
||||
{
|
||||
InterruptsSpinLocker locker(thread_spinlock);
|
||||
InterruptsSpinLocker locker(gThreadSpinlock);
|
||||
|
||||
if (lock->reader_count == 0 && lock->writer_count == 0) {
|
||||
lock->writer_count++;
|
||||
@ -362,7 +362,7 @@ rw_lock_write_lock(rw_lock* lock)
|
||||
status_t
|
||||
rw_lock_write_unlock(rw_lock* lock)
|
||||
{
|
||||
InterruptsSpinLocker locker(thread_spinlock);
|
||||
InterruptsSpinLocker locker(gThreadSpinlock);
|
||||
|
||||
if (thread_get_current_thread_id() != lock->holder) {
|
||||
panic("rw_lock_write_unlock(): lock %p not write-locked by this thread",
|
||||
@ -454,7 +454,7 @@ mutex_destroy(mutex* lock)
|
||||
? (char*)lock->name : NULL;
|
||||
|
||||
// unblock all waiters
|
||||
InterruptsSpinLocker locker(thread_spinlock);
|
||||
InterruptsSpinLocker locker(gThreadSpinlock);
|
||||
|
||||
#ifdef KDEBUG
|
||||
if (lock->waiters != NULL && thread_get_current_thread_id()
|
||||
@ -485,7 +485,7 @@ mutex_destroy(mutex* lock)
|
||||
status_t
|
||||
mutex_switch_lock(mutex* from, mutex* to)
|
||||
{
|
||||
InterruptsSpinLocker locker(thread_spinlock);
|
||||
InterruptsSpinLocker locker(gThreadSpinlock);
|
||||
|
||||
#if !defined(KDEBUG)
|
||||
if (atomic_add(&from->count, 1) < -1)
|
||||
@ -507,7 +507,7 @@ _mutex_lock(mutex* lock, bool threadsLocked)
|
||||
#endif
|
||||
|
||||
// lock only, if !threadsLocked
|
||||
InterruptsSpinLocker locker(thread_spinlock, false, !threadsLocked);
|
||||
InterruptsSpinLocker locker(gThreadSpinlock, false, !threadsLocked);
|
||||
|
||||
// Might have been released after we decremented the count, but before
|
||||
// we acquired the spinlock.
|
||||
@ -556,7 +556,7 @@ void
|
||||
_mutex_unlock(mutex* lock, bool threadsLocked)
|
||||
{
|
||||
// lock only, if !threadsLocked
|
||||
InterruptsSpinLocker locker(thread_spinlock, false, !threadsLocked);
|
||||
InterruptsSpinLocker locker(gThreadSpinlock, false, !threadsLocked);
|
||||
|
||||
#ifdef KDEBUG
|
||||
if (thread_get_current_thread_id() != lock->holder) {
|
||||
@ -600,7 +600,7 @@ status_t
|
||||
_mutex_trylock(mutex* lock)
|
||||
{
|
||||
#ifdef KDEBUG
|
||||
InterruptsSpinLocker _(thread_spinlock);
|
||||
InterruptsSpinLocker _(gThreadSpinlock);
|
||||
|
||||
if (lock->holder <= 0) {
|
||||
lock->holder = thread_get_current_thread_id();
|
||||
|
@ -84,7 +84,7 @@ public:
|
||||
{
|
||||
// For some reason the semaphore is getting destroyed.
|
||||
// Wake up any remaing awaiting threads
|
||||
InterruptsSpinLocker _(thread_spinlock);
|
||||
InterruptsSpinLocker _(gThreadSpinlock);
|
||||
while (queued_thread *entry = fWaitingToIncreaseQueue.RemoveHead()) {
|
||||
entry->queued = false;
|
||||
thread_unblock_locked(entry->thread, EIDRM);
|
||||
@ -184,7 +184,7 @@ public:
|
||||
thread_prepare_to_block(thread, B_CAN_INTERRUPT,
|
||||
THREAD_BLOCK_TYPE_OTHER, (void*)"xsi semaphore");
|
||||
|
||||
InterruptsSpinLocker _(thread_spinlock);
|
||||
InterruptsSpinLocker _(gThreadSpinlock);
|
||||
status_t result = thread_block_locked(thread);
|
||||
|
||||
if (queueEntry.queued) {
|
||||
@ -204,7 +204,7 @@ public:
|
||||
|
||||
void WakeUpThread(bool waitingForZero)
|
||||
{
|
||||
InterruptsSpinLocker _(thread_spinlock);
|
||||
InterruptsSpinLocker _(gThreadSpinlock);
|
||||
if (waitingForZero) {
|
||||
// Wake up all threads waiting on zero
|
||||
while (queued_thread *entry = fWaitingToBeZeroQueue.RemoveHead()) {
|
||||
@ -240,7 +240,7 @@ public:
|
||||
XsiSemaphoreSet(int numberOfSemaphores, int flags)
|
||||
: fInitOK(false),
|
||||
fLastSemctlTime((time_t)real_time_clock()),
|
||||
fLastSemopTime(0),
|
||||
fLastSemopTime(0),
|
||||
fNumberOfSemaphores(numberOfSemaphores),
|
||||
fSemaphores(0)
|
||||
{
|
||||
@ -495,7 +495,7 @@ XsiSemaphore::ClearUndos(int semaphoreSetID, short semaphoreNumber)
|
||||
// Clear all undo_value (Posix semadj equivalent),
|
||||
// which result in removing the sem_undo record from
|
||||
// the global undo list, plus decrementing the related
|
||||
// team xsi_sem_undo_requests field.
|
||||
// team xsi_sem_undo_requests field.
|
||||
// This happens only on semctl SETVAL and SETALL.
|
||||
TRACE(("XsiSemaphore::ClearUndos: semaphoreSetID = %d, "
|
||||
"semaphoreNumber = %d\n", semaphoreSetID, semaphoreNumber));
|
||||
@ -505,7 +505,7 @@ XsiSemaphore::ClearUndos(int semaphoreSetID, short semaphoreNumber)
|
||||
struct sem_undo *current = iterator.Next();
|
||||
if (current->semaphore_set_id == semaphoreSetID
|
||||
&& current->semaphore_number == semaphoreNumber) {
|
||||
InterruptsSpinLocker lock(team_spinlock);
|
||||
InterruptsSpinLocker lock(gTeamSpinlock);
|
||||
if (current->team)
|
||||
current->team->xsi_sem_undo_requests--;
|
||||
iterator.Remove();
|
||||
@ -560,7 +560,7 @@ XsiSemaphore::RecordUndo(int semaphoreSetID, short semaphoreNumber, short value)
|
||||
request->semaphore_number = semaphoreNumber;
|
||||
request->undo_value = value;
|
||||
// Add the request to the global sem_undo list
|
||||
InterruptsSpinLocker _(team_spinlock);
|
||||
InterruptsSpinLocker _(gTeamSpinlock);
|
||||
if ((int)(team->xsi_sem_undo_requests + 1) < USHRT_MAX)
|
||||
team->xsi_sem_undo_requests++;
|
||||
else
|
||||
@ -592,7 +592,7 @@ XsiSemaphore::RemoveUndo(int semaphoreSetID, short semaphoreNumber, short value)
|
||||
// sem_undo request made previously by the same
|
||||
// process
|
||||
if (current->undo_value == 0) {
|
||||
InterruptsSpinLocker _(team_spinlock);
|
||||
InterruptsSpinLocker _(gTeamSpinlock);
|
||||
if (current->team)
|
||||
current->team->xsi_sem_undo_requests--;
|
||||
iterator.Remove();
|
||||
@ -674,7 +674,7 @@ xsi_sem_undo(team_id teamID, int32 numberOfUndos)
|
||||
"SemaphoreNumber = %d, undo value = %d\n", (int)teamID,
|
||||
semaphoreSetID, current->semaphore_number,
|
||||
current->undo_value));
|
||||
semaphore->Revert(current->undo_value);
|
||||
semaphore->Revert(current->undo_value);
|
||||
} else
|
||||
TRACE(("xsi_do_undo: semaphore set %d does not exist "
|
||||
"anymore. Ignore record.\n", semaphoreSetID));
|
||||
@ -756,7 +756,7 @@ _user_xsi_semget(key_t key, int numberOfSemaphores, int flags)
|
||||
|
||||
if (create) {
|
||||
// Create a new sempahore set for this key
|
||||
if (numberOfSemaphores < 0
|
||||
if (numberOfSemaphores < 0
|
||||
|| numberOfSemaphores >= MAX_XSI_SEMS_PER_TEAM) {
|
||||
TRACE_ERROR(("xsi_semget: numberOfSemaphores out of range\n"));
|
||||
return EINVAL;
|
||||
@ -1058,7 +1058,7 @@ _user_xsi_semop(int semaphoreID, struct sembuf *ops, size_t numOps)
|
||||
break;
|
||||
}
|
||||
} else if (operation == 0) {
|
||||
if (value == 0)
|
||||
if (value == 0)
|
||||
continue;
|
||||
else if (operations[i].sem_flg & IPC_NOWAIT) {
|
||||
result = EAGAIN;
|
||||
|
@ -918,7 +918,7 @@ release_sem_etc(sem_id id, int32 count, uint32 flags)
|
||||
|
||||
bool unblockedAny = false;
|
||||
|
||||
SpinLocker threadLocker(thread_spinlock);
|
||||
SpinLocker threadLocker(gThreadSpinlock);
|
||||
|
||||
while (count > 0) {
|
||||
queued_thread* entry = sSems[slot].queue.Head();
|
||||
@ -1058,7 +1058,7 @@ _get_next_sem_info(team_id team, int32 *_cookie, struct sem_info *info,
|
||||
team = team_get_current_team_id();
|
||||
/* prevents sSems[].owner == -1 >= means owned by a port */
|
||||
if (team < 0 || !team_is_valid(team))
|
||||
return B_BAD_TEAM_ID;
|
||||
return B_BAD_TEAM_ID;
|
||||
|
||||
slot = *_cookie;
|
||||
if (slot >= sMaxSems)
|
||||
|
@ -229,7 +229,7 @@ update_thread_signals_flag(struct thread* thread)
|
||||
void
|
||||
update_current_thread_signals_flag()
|
||||
{
|
||||
InterruptsSpinLocker locker(thread_spinlock);
|
||||
InterruptsSpinLocker locker(gThreadSpinlock);
|
||||
|
||||
update_thread_signals_flag(thread_get_current_thread());
|
||||
}
|
||||
@ -343,7 +343,7 @@ handle_signals(struct thread *thread)
|
||||
|
||||
// notify threads waiting for team state changes
|
||||
if (thread == thread->team->main_thread) {
|
||||
InterruptsSpinLocker locker(team_spinlock);
|
||||
InterruptsSpinLocker locker(gTeamSpinlock);
|
||||
team_set_job_control_state(thread->team,
|
||||
JOB_CONTROL_STATE_CONTINUED, signal, false);
|
||||
|
||||
@ -366,13 +366,13 @@ handle_signals(struct thread *thread)
|
||||
|
||||
// notify threads waiting for team state changes
|
||||
if (thread == thread->team->main_thread) {
|
||||
InterruptsSpinLocker locker(team_spinlock);
|
||||
InterruptsSpinLocker locker(gTeamSpinlock);
|
||||
team_set_job_control_state(thread->team,
|
||||
JOB_CONTROL_STATE_STOPPED, signal, false);
|
||||
|
||||
// send a SIGCHLD to the parent (if it does have
|
||||
// SA_NOCLDSTOP defined)
|
||||
SpinLocker _(thread_spinlock);
|
||||
SpinLocker _(gThreadSpinlock);
|
||||
struct thread* parentThread
|
||||
= thread->team->parent->main_thread;
|
||||
struct sigaction& parentHandler
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de.
|
||||
* Copyright 2002-2008, Axel Dörfler, axeld@pinc-software.de.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*
|
||||
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
|
||||
@ -59,15 +59,15 @@ struct smp_msg {
|
||||
|
||||
static spinlock boot_cpu_spin[SMP_MAX_CPUS] = { };
|
||||
|
||||
static struct smp_msg *free_msgs = NULL;
|
||||
static volatile int free_msg_count = 0;
|
||||
static spinlock free_msg_spinlock = B_SPINLOCK_INITIALIZER;
|
||||
static struct smp_msg *sFreeMessages = NULL;
|
||||
static volatile int sFreeMessageCount = 0;
|
||||
static spinlock sFreeMessageSpinlock = B_SPINLOCK_INITIALIZER;
|
||||
|
||||
static struct smp_msg *smp_msgs[SMP_MAX_CPUS] = { NULL, };
|
||||
static spinlock cpu_msg_spinlock[SMP_MAX_CPUS];
|
||||
static struct smp_msg *sCPUMessages[SMP_MAX_CPUS] = { NULL, };
|
||||
static spinlock sCPUMessageSpinlock[SMP_MAX_CPUS];
|
||||
|
||||
static struct smp_msg *smp_broadcast_msgs = NULL;
|
||||
static spinlock broadcast_msg_spinlock = B_SPINLOCK_INITIALIZER;
|
||||
static struct smp_msg *sBroadcastMessages = NULL;
|
||||
static spinlock sBroadcastMessageSpinlock = B_SPINLOCK_INITIALIZER;
|
||||
|
||||
static bool sICIEnabled = false;
|
||||
static int32 sNumCPUs = 1;
|
||||
@ -89,7 +89,7 @@ static void
|
||||
push_lock_caller(void *caller, spinlock *lock)
|
||||
{
|
||||
sLastCaller[sLastIndex].caller = caller;
|
||||
sLastCaller[sLastIndex].lock = lock;
|
||||
sLastCaller[sLastIndex].lock = lock;
|
||||
|
||||
if (++sLastIndex >= NUM_LAST_CALLERS)
|
||||
sLastIndex = 0;
|
||||
@ -139,7 +139,7 @@ acquire_spinlock(spinlock *lock)
|
||||
panic("acquire_spinlock: attempt to acquire lock %p with interrupts enabled\n", lock);
|
||||
oldValue = atomic_set((int32 *)lock, 1);
|
||||
if (oldValue != 0) {
|
||||
panic("acquire_spinlock: attempt to acquire lock %p twice on non-SMP system (last caller: %p, value %ld)\n",
|
||||
panic("acquire_spinlock: attempt to acquire lock %p twice on non-SMP system (last caller: %p, value %ld)\n",
|
||||
lock, find_lock_caller(lock), oldValue);
|
||||
}
|
||||
|
||||
@ -147,7 +147,7 @@ acquire_spinlock(spinlock *lock)
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
static void
|
||||
acquire_spinlock_nocheck(spinlock *lock)
|
||||
@ -226,24 +226,24 @@ find_free_message(struct smp_msg **msg)
|
||||
TRACE(("find_free_message: entry\n"));
|
||||
|
||||
retry:
|
||||
while (free_msg_count <= 0)
|
||||
while (sFreeMessageCount <= 0)
|
||||
PAUSE();
|
||||
state = disable_interrupts();
|
||||
acquire_spinlock(&free_msg_spinlock);
|
||||
acquire_spinlock(&sFreeMessageSpinlock);
|
||||
|
||||
if (free_msg_count <= 0) {
|
||||
if (sFreeMessageCount <= 0) {
|
||||
// someone grabbed one while we were getting the lock,
|
||||
// go back to waiting for it
|
||||
release_spinlock(&free_msg_spinlock);
|
||||
release_spinlock(&sFreeMessageSpinlock);
|
||||
restore_interrupts(state);
|
||||
goto retry;
|
||||
}
|
||||
|
||||
*msg = free_msgs;
|
||||
free_msgs = (*msg)->next;
|
||||
free_msg_count--;
|
||||
*msg = sFreeMessages;
|
||||
sFreeMessages = (*msg)->next;
|
||||
sFreeMessageCount--;
|
||||
|
||||
release_spinlock(&free_msg_spinlock);
|
||||
release_spinlock(&sFreeMessageSpinlock);
|
||||
|
||||
TRACE(("find_free_message: returning msg %p\n", *msg));
|
||||
|
||||
@ -256,11 +256,11 @@ return_free_message(struct smp_msg *msg)
|
||||
{
|
||||
TRACE(("return_free_message: returning msg %p\n", msg));
|
||||
|
||||
acquire_spinlock_nocheck(&free_msg_spinlock);
|
||||
msg->next = free_msgs;
|
||||
free_msgs = msg;
|
||||
free_msg_count++;
|
||||
release_spinlock(&free_msg_spinlock);
|
||||
acquire_spinlock_nocheck(&sFreeMessageSpinlock);
|
||||
msg->next = sFreeMessages;
|
||||
sFreeMessages = msg;
|
||||
sFreeMessageCount++;
|
||||
release_spinlock(&sFreeMessageSpinlock);
|
||||
}
|
||||
|
||||
|
||||
@ -272,20 +272,20 @@ check_for_message(int currentCPU, int *source_mailbox)
|
||||
if (!sICIEnabled)
|
||||
return NULL;
|
||||
|
||||
acquire_spinlock_nocheck(&cpu_msg_spinlock[currentCPU]);
|
||||
msg = smp_msgs[currentCPU];
|
||||
acquire_spinlock_nocheck(&sCPUMessageSpinlock[currentCPU]);
|
||||
msg = sCPUMessages[currentCPU];
|
||||
if (msg != NULL) {
|
||||
smp_msgs[currentCPU] = msg->next;
|
||||
release_spinlock(&cpu_msg_spinlock[currentCPU]);
|
||||
sCPUMessages[currentCPU] = msg->next;
|
||||
release_spinlock(&sCPUMessageSpinlock[currentCPU]);
|
||||
TRACE((" cpu %d: found msg %p in cpu mailbox\n", currentCPU, msg));
|
||||
*source_mailbox = MAILBOX_LOCAL;
|
||||
} else {
|
||||
// try getting one from the broadcast mailbox
|
||||
|
||||
release_spinlock(&cpu_msg_spinlock[currentCPU]);
|
||||
acquire_spinlock_nocheck(&broadcast_msg_spinlock);
|
||||
release_spinlock(&sCPUMessageSpinlock[currentCPU]);
|
||||
acquire_spinlock_nocheck(&sBroadcastMessageSpinlock);
|
||||
|
||||
msg = smp_broadcast_msgs;
|
||||
msg = sBroadcastMessages;
|
||||
while (msg != NULL) {
|
||||
if (CHECK_BIT(msg->proc_bitmap, currentCPU) != 0) {
|
||||
// we have handled this one already
|
||||
@ -298,7 +298,7 @@ check_for_message(int currentCPU, int *source_mailbox)
|
||||
*source_mailbox = MAILBOX_BCAST;
|
||||
break;
|
||||
}
|
||||
release_spinlock(&broadcast_msg_spinlock);
|
||||
release_spinlock(&sBroadcastMessageSpinlock);
|
||||
TRACE((" cpu %d: found msg %p in broadcast mailbox\n", currentCPU, msg));
|
||||
}
|
||||
return msg;
|
||||
@ -320,12 +320,12 @@ finish_message_processing(int currentCPU, struct smp_msg *msg, int source_mailbo
|
||||
// clean up the message from one of the mailboxes
|
||||
switch (source_mailbox) {
|
||||
case MAILBOX_BCAST:
|
||||
mbox = &smp_broadcast_msgs;
|
||||
spinlock = &broadcast_msg_spinlock;
|
||||
mbox = &sBroadcastMessages;
|
||||
spinlock = &sBroadcastMessageSpinlock;
|
||||
break;
|
||||
case MAILBOX_LOCAL:
|
||||
mbox = &smp_msgs[currentCPU];
|
||||
spinlock = &cpu_msg_spinlock[currentCPU];
|
||||
mbox = &sCPUMessages[currentCPU];
|
||||
spinlock = &sCPUMessageSpinlock[currentCPU];
|
||||
break;
|
||||
}
|
||||
|
||||
@ -462,8 +462,8 @@ spinlock_contention_syscall(const char* subsystem, uint32 function,
|
||||
if (bufferSize < sizeof(spinlock_contention_info))
|
||||
return B_BAD_VALUE;
|
||||
|
||||
info.thread_spinlock_counter = get_spinlock_counter(&thread_spinlock);
|
||||
info.team_spinlock_counter = get_spinlock_counter(&team_spinlock);
|
||||
info.thread_spinlock_counter = get_spinlock_counter(&gThreadSpinlock);
|
||||
info.team_spinlock_counter = get_spinlock_counter(&gTeamSpinlock);
|
||||
|
||||
if (!IS_USER_ADDRESS(buffer)
|
||||
|| user_memcpy(buffer, &info, sizeof(info)) != B_OK) {
|
||||
@ -529,10 +529,10 @@ smp_send_ici(int32 targetCPU, int32 message, uint32 data, uint32 data2, uint32 d
|
||||
msg->done = false;
|
||||
|
||||
// stick it in the appropriate cpu's mailbox
|
||||
acquire_spinlock_nocheck(&cpu_msg_spinlock[targetCPU]);
|
||||
msg->next = smp_msgs[targetCPU];
|
||||
smp_msgs[targetCPU] = msg;
|
||||
release_spinlock(&cpu_msg_spinlock[targetCPU]);
|
||||
acquire_spinlock_nocheck(&sCPUMessageSpinlock[targetCPU]);
|
||||
msg->next = sCPUMessages[targetCPU];
|
||||
sCPUMessages[targetCPU] = msg;
|
||||
release_spinlock(&sCPUMessageSpinlock[targetCPU]);
|
||||
|
||||
arch_smp_send_ici(targetCPU);
|
||||
|
||||
@ -586,10 +586,10 @@ smp_send_broadcast_ici(int32 message, uint32 data, uint32 data2, uint32 data3,
|
||||
currentCPU, msg));
|
||||
|
||||
// stick it in the appropriate cpu's mailbox
|
||||
acquire_spinlock_nocheck(&broadcast_msg_spinlock);
|
||||
msg->next = smp_broadcast_msgs;
|
||||
smp_broadcast_msgs = msg;
|
||||
release_spinlock(&broadcast_msg_spinlock);
|
||||
acquire_spinlock_nocheck(&sBroadcastMessageSpinlock);
|
||||
msg->next = sBroadcastMessages;
|
||||
sBroadcastMessages = msg;
|
||||
release_spinlock(&sBroadcastMessageSpinlock);
|
||||
|
||||
arch_smp_send_broadcast_ici();
|
||||
|
||||
@ -671,8 +671,8 @@ smp_init(kernel_args *args)
|
||||
TRACE(("smp_init: entry\n"));
|
||||
|
||||
if (args->num_cpus > 1) {
|
||||
free_msgs = NULL;
|
||||
free_msg_count = 0;
|
||||
sFreeMessages = NULL;
|
||||
sFreeMessageCount = 0;
|
||||
for (i = 0; i < MSG_POOL_SIZE; i++) {
|
||||
msg = (struct smp_msg *)malloc(sizeof(struct smp_msg));
|
||||
if (msg == NULL) {
|
||||
@ -680,9 +680,9 @@ smp_init(kernel_args *args)
|
||||
return B_ERROR;
|
||||
}
|
||||
memset(msg, 0, sizeof(struct smp_msg));
|
||||
msg->next = free_msgs;
|
||||
free_msgs = msg;
|
||||
free_msg_count++;
|
||||
msg->next = sFreeMessages;
|
||||
sFreeMessages = msg;
|
||||
sFreeMessageCount++;
|
||||
}
|
||||
sNumCPUs = args->num_cpus;
|
||||
}
|
||||
|
@ -91,7 +91,7 @@ static struct team *sKernelTeam = NULL;
|
||||
static int32 sMaxTeams = 2048;
|
||||
static int32 sUsedTeams = 1;
|
||||
|
||||
spinlock team_spinlock = B_SPINLOCK_INITIALIZER;
|
||||
spinlock gTeamSpinlock = B_SPINLOCK_INITIALIZER;
|
||||
|
||||
|
||||
// #pragma mark - Tracing
|
||||
@ -101,73 +101,73 @@ spinlock team_spinlock = B_SPINLOCK_INITIALIZER;
|
||||
namespace TeamTracing {
|
||||
|
||||
class TeamForked : public AbstractTraceEntry {
|
||||
public:
|
||||
TeamForked(thread_id forkedThread)
|
||||
:
|
||||
fForkedThread(forkedThread)
|
||||
{
|
||||
Initialized();
|
||||
}
|
||||
public:
|
||||
TeamForked(thread_id forkedThread)
|
||||
:
|
||||
fForkedThread(forkedThread)
|
||||
{
|
||||
Initialized();
|
||||
}
|
||||
|
||||
virtual void AddDump(TraceOutput& out)
|
||||
{
|
||||
out.Print("team forked, new thread %ld", fForkedThread);
|
||||
}
|
||||
virtual void AddDump(TraceOutput& out)
|
||||
{
|
||||
out.Print("team forked, new thread %ld", fForkedThread);
|
||||
}
|
||||
|
||||
private:
|
||||
thread_id fForkedThread;
|
||||
private:
|
||||
thread_id fForkedThread;
|
||||
};
|
||||
|
||||
|
||||
class ExecTeam : public AbstractTraceEntry {
|
||||
public:
|
||||
ExecTeam(const char* path, int32 argCount, const char* const* args,
|
||||
int32 envCount, const char* const* env)
|
||||
:
|
||||
fArgCount(argCount),
|
||||
fArgs(NULL)
|
||||
{
|
||||
fPath = alloc_tracing_buffer_strcpy(path, B_PATH_NAME_LENGTH,
|
||||
false);
|
||||
public:
|
||||
ExecTeam(const char* path, int32 argCount, const char* const* args,
|
||||
int32 envCount, const char* const* env)
|
||||
:
|
||||
fArgCount(argCount),
|
||||
fArgs(NULL)
|
||||
{
|
||||
fPath = alloc_tracing_buffer_strcpy(path, B_PATH_NAME_LENGTH,
|
||||
false);
|
||||
|
||||
// determine the buffer size we need for the args
|
||||
size_t argBufferSize = 0;
|
||||
for (int32 i = 0; i < argCount; i++)
|
||||
argBufferSize += strlen(args[i]) + 1;
|
||||
// determine the buffer size we need for the args
|
||||
size_t argBufferSize = 0;
|
||||
for (int32 i = 0; i < argCount; i++)
|
||||
argBufferSize += strlen(args[i]) + 1;
|
||||
|
||||
// allocate a buffer
|
||||
fArgs = (char*)alloc_tracing_buffer(argBufferSize);
|
||||
if (fArgs) {
|
||||
char* buffer = fArgs;
|
||||
for (int32 i = 0; i < argCount; i++) {
|
||||
size_t argSize = strlen(args[i]) + 1;
|
||||
memcpy(buffer, args[i], argSize);
|
||||
buffer += argSize;
|
||||
}
|
||||
}
|
||||
|
||||
// ignore env for the time being
|
||||
(void)envCount;
|
||||
(void)env;
|
||||
|
||||
Initialized();
|
||||
}
|
||||
|
||||
virtual void AddDump(TraceOutput& out)
|
||||
{
|
||||
out.Print("team exec, \"%p\", args:", fPath);
|
||||
|
||||
char* args = fArgs;
|
||||
for (int32 i = 0; !out.IsFull() && i < fArgCount; i++) {
|
||||
out.Print(" \"%s\"", args);
|
||||
args += strlen(args) + 1;
|
||||
// allocate a buffer
|
||||
fArgs = (char*)alloc_tracing_buffer(argBufferSize);
|
||||
if (fArgs) {
|
||||
char* buffer = fArgs;
|
||||
for (int32 i = 0; i < argCount; i++) {
|
||||
size_t argSize = strlen(args[i]) + 1;
|
||||
memcpy(buffer, args[i], argSize);
|
||||
buffer += argSize;
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
char* fPath;
|
||||
int32 fArgCount;
|
||||
char* fArgs;
|
||||
// ignore env for the time being
|
||||
(void)envCount;
|
||||
(void)env;
|
||||
|
||||
Initialized();
|
||||
}
|
||||
|
||||
virtual void AddDump(TraceOutput& out)
|
||||
{
|
||||
out.Print("team exec, \"%p\", args:", fPath);
|
||||
|
||||
char* args = fArgs;
|
||||
for (int32 i = 0; !out.IsFull() && i < fArgCount; i++) {
|
||||
out.Print(" \"%s\"", args);
|
||||
args += strlen(args) + 1;
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
char* fPath;
|
||||
int32 fArgCount;
|
||||
char* fArgs;
|
||||
};
|
||||
|
||||
|
||||
@ -190,91 +190,91 @@ job_control_state_name(job_control_state state)
|
||||
|
||||
|
||||
class SetJobControlState : public AbstractTraceEntry {
|
||||
public:
|
||||
SetJobControlState(team_id team, job_control_state newState, int signal)
|
||||
:
|
||||
fTeam(team),
|
||||
fNewState(newState),
|
||||
fSignal(signal)
|
||||
{
|
||||
Initialized();
|
||||
}
|
||||
public:
|
||||
SetJobControlState(team_id team, job_control_state newState, int signal)
|
||||
:
|
||||
fTeam(team),
|
||||
fNewState(newState),
|
||||
fSignal(signal)
|
||||
{
|
||||
Initialized();
|
||||
}
|
||||
|
||||
virtual void AddDump(TraceOutput& out)
|
||||
{
|
||||
out.Print("team set job control state, team %ld, "
|
||||
"new state: %s, signal: %d",
|
||||
fTeam, job_control_state_name(fNewState), fSignal);
|
||||
}
|
||||
virtual void AddDump(TraceOutput& out)
|
||||
{
|
||||
out.Print("team set job control state, team %ld, "
|
||||
"new state: %s, signal: %d",
|
||||
fTeam, job_control_state_name(fNewState), fSignal);
|
||||
}
|
||||
|
||||
private:
|
||||
team_id fTeam;
|
||||
job_control_state fNewState;
|
||||
int fSignal;
|
||||
private:
|
||||
team_id fTeam;
|
||||
job_control_state fNewState;
|
||||
int fSignal;
|
||||
};
|
||||
|
||||
|
||||
class WaitForChild : public AbstractTraceEntry {
|
||||
public:
|
||||
WaitForChild(pid_t child, uint32 flags)
|
||||
:
|
||||
fChild(child),
|
||||
fFlags(flags)
|
||||
{
|
||||
Initialized();
|
||||
}
|
||||
public:
|
||||
WaitForChild(pid_t child, uint32 flags)
|
||||
:
|
||||
fChild(child),
|
||||
fFlags(flags)
|
||||
{
|
||||
Initialized();
|
||||
}
|
||||
|
||||
virtual void AddDump(TraceOutput& out)
|
||||
{
|
||||
out.Print("team wait for child, child: %ld, "
|
||||
"flags: 0x%lx", fChild, fFlags);
|
||||
}
|
||||
virtual void AddDump(TraceOutput& out)
|
||||
{
|
||||
out.Print("team wait for child, child: %ld, "
|
||||
"flags: 0x%lx", fChild, fFlags);
|
||||
}
|
||||
|
||||
private:
|
||||
pid_t fChild;
|
||||
uint32 fFlags;
|
||||
private:
|
||||
pid_t fChild;
|
||||
uint32 fFlags;
|
||||
};
|
||||
|
||||
|
||||
class WaitForChildDone : public AbstractTraceEntry {
|
||||
public:
|
||||
WaitForChildDone(const job_control_entry& entry)
|
||||
:
|
||||
fState(entry.state),
|
||||
fTeam(entry.thread),
|
||||
fStatus(entry.status),
|
||||
fReason(entry.reason),
|
||||
fSignal(entry.signal)
|
||||
{
|
||||
Initialized();
|
||||
}
|
||||
public:
|
||||
WaitForChildDone(const job_control_entry& entry)
|
||||
:
|
||||
fState(entry.state),
|
||||
fTeam(entry.thread),
|
||||
fStatus(entry.status),
|
||||
fReason(entry.reason),
|
||||
fSignal(entry.signal)
|
||||
{
|
||||
Initialized();
|
||||
}
|
||||
|
||||
WaitForChildDone(status_t error)
|
||||
:
|
||||
fTeam(error)
|
||||
{
|
||||
Initialized();
|
||||
}
|
||||
WaitForChildDone(status_t error)
|
||||
:
|
||||
fTeam(error)
|
||||
{
|
||||
Initialized();
|
||||
}
|
||||
|
||||
virtual void AddDump(TraceOutput& out)
|
||||
{
|
||||
if (fTeam >= 0) {
|
||||
out.Print("team wait for child done, team: %ld, "
|
||||
"state: %s, status: 0x%lx, reason: 0x%x, signal: %d\n",
|
||||
fTeam, job_control_state_name(fState), fStatus, fReason,
|
||||
fSignal);
|
||||
} else {
|
||||
out.Print("team wait for child failed, error: "
|
||||
"0x%lx, ", fTeam);
|
||||
}
|
||||
virtual void AddDump(TraceOutput& out)
|
||||
{
|
||||
if (fTeam >= 0) {
|
||||
out.Print("team wait for child done, team: %ld, "
|
||||
"state: %s, status: 0x%lx, reason: 0x%x, signal: %d\n",
|
||||
fTeam, job_control_state_name(fState), fStatus, fReason,
|
||||
fSignal);
|
||||
} else {
|
||||
out.Print("team wait for child failed, error: "
|
||||
"0x%lx, ", fTeam);
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
job_control_state fState;
|
||||
team_id fTeam;
|
||||
status_t fStatus;
|
||||
uint16 fReason;
|
||||
uint16 fSignal;
|
||||
private:
|
||||
job_control_state fState;
|
||||
team_id fTeam;
|
||||
status_t fStatus;
|
||||
uint16 fReason;
|
||||
uint16 fSignal;
|
||||
};
|
||||
|
||||
} // namespace TeamTracing
|
||||
@ -1639,7 +1639,7 @@ job_control_entry::job_control_entry()
|
||||
job_control_entry::~job_control_entry()
|
||||
{
|
||||
if (has_group_ref) {
|
||||
InterruptsSpinLocker locker(team_spinlock);
|
||||
InterruptsSpinLocker locker(gTeamSpinlock);
|
||||
release_process_group_ref(group_id);
|
||||
}
|
||||
}
|
||||
@ -1706,7 +1706,7 @@ wait_for_child(pid_t child, uint32 flags, int32 *_reason,
|
||||
bool ignoreFoundEntriesChecked = false;
|
||||
|
||||
while (true) {
|
||||
InterruptsSpinLocker locker(team_spinlock);
|
||||
InterruptsSpinLocker locker(gTeamSpinlock);
|
||||
|
||||
// check whether any condition holds
|
||||
job_control_entry* entry = get_job_control_entry(team, child, flags);
|
||||
@ -1820,7 +1820,7 @@ wait_for_child(pid_t child, uint32 flags, int32 *_reason,
|
||||
// If SIGCHLD is blocked, we shall clear pending SIGCHLDs, if no other child
|
||||
// status is available.
|
||||
if (is_signal_blocked(SIGCHLD)) {
|
||||
InterruptsSpinLocker locker(team_spinlock);
|
||||
InterruptsSpinLocker locker(gTeamSpinlock);
|
||||
|
||||
if (get_job_control_entry(team, child, flags) == NULL)
|
||||
atomic_and(&thread->sig_pending, ~SIGNAL_TO_MASK(SIGCHLD));
|
||||
@ -1905,7 +1905,7 @@ update_orphaned_process_group(process_group* group, pid_t dyingProcess)
|
||||
static bool
|
||||
process_group_has_stopped_processes(process_group* group)
|
||||
{
|
||||
SpinLocker _(thread_spinlock);
|
||||
SpinLocker _(gThreadSpinlock);
|
||||
|
||||
struct team* team = group->teams;
|
||||
while (team != NULL) {
|
||||
@ -2101,7 +2101,7 @@ team_set_controlling_tty(int32 ttyIndex)
|
||||
{
|
||||
struct team* team = thread_get_current_thread()->team;
|
||||
|
||||
InterruptsSpinLocker _(team_spinlock);
|
||||
InterruptsSpinLocker _(gTeamSpinlock);
|
||||
|
||||
team->group->session->controlling_tty = ttyIndex;
|
||||
team->group->session->foreground_group = -1;
|
||||
@ -2113,7 +2113,7 @@ team_get_controlling_tty()
|
||||
{
|
||||
struct team* team = thread_get_current_thread()->team;
|
||||
|
||||
InterruptsSpinLocker _(team_spinlock);
|
||||
InterruptsSpinLocker _(gTeamSpinlock);
|
||||
|
||||
return team->group->session->controlling_tty;
|
||||
}
|
||||
@ -2125,7 +2125,7 @@ team_set_foreground_process_group(int32 ttyIndex, pid_t processGroupID)
|
||||
struct thread* thread = thread_get_current_thread();
|
||||
struct team* team = thread->team;
|
||||
|
||||
InterruptsSpinLocker locker(team_spinlock);
|
||||
InterruptsSpinLocker locker(gTeamSpinlock);
|
||||
|
||||
process_session* session = team->group->session;
|
||||
|
||||
@ -2610,7 +2610,7 @@ team_free_user_thread(struct thread* thread)
|
||||
return;
|
||||
}
|
||||
|
||||
InterruptsSpinLocker _(team_spinlock);
|
||||
InterruptsSpinLocker _(gTeamSpinlock);
|
||||
|
||||
entry->thread = userThread;
|
||||
entry->next = thread->team->free_user_threads;
|
||||
@ -3069,7 +3069,7 @@ _user_setpgid(pid_t processID, pid_t groupID)
|
||||
return B_NOT_ALLOWED;
|
||||
} else {
|
||||
// another team is the target of the call -- check it out
|
||||
InterruptsSpinLocker _(team_spinlock);
|
||||
InterruptsSpinLocker _(gTeamSpinlock);
|
||||
|
||||
team = team_get_team_struct_locked(processID);
|
||||
if (team == NULL)
|
||||
@ -3106,7 +3106,7 @@ _user_setpgid(pid_t processID, pid_t groupID)
|
||||
status_t status = B_OK;
|
||||
struct process_group *freeGroup = NULL;
|
||||
|
||||
InterruptsSpinLocker locker(team_spinlock);
|
||||
InterruptsSpinLocker locker(gTeamSpinlock);
|
||||
|
||||
team = team_get_team_struct_locked(processID);
|
||||
if (team != NULL) {
|
||||
|
@ -60,7 +60,7 @@ struct thread_key {
|
||||
};
|
||||
|
||||
// global
|
||||
spinlock thread_spinlock = B_SPINLOCK_INITIALIZER;
|
||||
spinlock gThreadSpinlock = B_SPINLOCK_INITIALIZER;
|
||||
|
||||
// thread list
|
||||
static struct thread sIdleThreads[B_MAX_CPU_COUNT];
|
||||
@ -537,7 +537,7 @@ undertaker(void* /*args*/)
|
||||
{
|
||||
while (true) {
|
||||
// wait for a thread to bury
|
||||
InterruptsSpinLocker locker(thread_spinlock);
|
||||
InterruptsSpinLocker locker(gThreadSpinlock);
|
||||
|
||||
while (sUndertakerEntries.IsEmpty()) {
|
||||
ConditionVariableEntry conditionEntry;
|
||||
@ -1587,7 +1587,7 @@ thread_at_kernel_exit(void)
|
||||
TRACE(("thread_at_kernel_exit: exit thread %ld\n", thread->id));
|
||||
|
||||
while (handle_signals(thread)) {
|
||||
InterruptsSpinLocker _(thread_spinlock);
|
||||
InterruptsSpinLocker _(gThreadSpinlock);
|
||||
scheduler_reschedule();
|
||||
}
|
||||
|
||||
@ -1743,7 +1743,7 @@ thread_yield(bool force)
|
||||
return;
|
||||
|
||||
// Don't force the thread off the CPU, just reschedule.
|
||||
InterruptsSpinLocker _(thread_spinlock);
|
||||
InterruptsSpinLocker _(gThreadSpinlock);
|
||||
scheduler_reschedule();
|
||||
}
|
||||
}
|
||||
@ -1894,7 +1894,7 @@ wait_for_thread_etc(thread_id id, uint32 flags, bigtime_t timeout,
|
||||
status_t
|
||||
select_thread(int32 id, struct select_info* info, bool kernel)
|
||||
{
|
||||
InterruptsSpinLocker locker(thread_spinlock);
|
||||
InterruptsSpinLocker locker(gThreadSpinlock);
|
||||
|
||||
// get thread
|
||||
struct thread* thread = thread_get_thread_struct_locked(id);
|
||||
@ -1920,7 +1920,7 @@ select_thread(int32 id, struct select_info* info, bool kernel)
|
||||
status_t
|
||||
deselect_thread(int32 id, struct select_info* info, bool kernel)
|
||||
{
|
||||
InterruptsSpinLocker locker(thread_spinlock);
|
||||
InterruptsSpinLocker locker(gThreadSpinlock);
|
||||
|
||||
// get thread
|
||||
struct thread* thread = thread_get_thread_struct_locked(id);
|
||||
@ -2125,7 +2125,7 @@ thread_block_timeout(timer* timer)
|
||||
status_t
|
||||
thread_block()
|
||||
{
|
||||
InterruptsSpinLocker _(thread_spinlock);
|
||||
InterruptsSpinLocker _(gThreadSpinlock);
|
||||
return thread_block_locked(thread_get_current_thread());
|
||||
}
|
||||
|
||||
@ -2133,7 +2133,7 @@ thread_block()
|
||||
bool
|
||||
thread_unblock(status_t threadID, status_t status)
|
||||
{
|
||||
InterruptsSpinLocker _(thread_spinlock);
|
||||
InterruptsSpinLocker _(gThreadSpinlock);
|
||||
|
||||
struct thread* thread = thread_get_thread_struct_locked(threadID);
|
||||
if (thread == NULL)
|
||||
@ -2145,7 +2145,7 @@ thread_unblock(status_t threadID, status_t status)
|
||||
status_t
|
||||
thread_block_with_timeout(uint32 timeoutFlags, bigtime_t timeout)
|
||||
{
|
||||
InterruptsSpinLocker _(thread_spinlock);
|
||||
InterruptsSpinLocker _(gThreadSpinlock);
|
||||
return thread_block_with_timeout_locked(timeoutFlags, timeout);
|
||||
}
|
||||
|
||||
@ -2468,7 +2468,7 @@ snooze_etc(bigtime_t timeout, int timebase, uint32 flags)
|
||||
if (timebase != B_SYSTEM_TIMEBASE)
|
||||
return B_BAD_VALUE;
|
||||
|
||||
InterruptsSpinLocker _(thread_spinlock);
|
||||
InterruptsSpinLocker _(gThreadSpinlock);
|
||||
struct thread* thread = thread_get_current_thread();
|
||||
|
||||
thread_prepare_to_block(thread, flags, THREAD_BLOCK_TYPE_SNOOZE, NULL);
|
||||
@ -2812,7 +2812,7 @@ _user_block_thread(uint32 flags, bigtime_t timeout)
|
||||
|
||||
struct thread* thread = thread_get_current_thread();
|
||||
|
||||
InterruptsSpinLocker locker(thread_spinlock);
|
||||
InterruptsSpinLocker locker(gThreadSpinlock);
|
||||
|
||||
// check, if already done
|
||||
if (thread->user_thread->wait_status <= 0)
|
||||
@ -2830,7 +2830,7 @@ _user_block_thread(uint32 flags, bigtime_t timeout)
|
||||
status_t
|
||||
_user_unblock_thread(thread_id threadID, status_t status)
|
||||
{
|
||||
InterruptsSpinLocker locker(thread_spinlock);
|
||||
InterruptsSpinLocker locker(gThreadSpinlock);
|
||||
return user_unblock_thread(threadID, status);
|
||||
}
|
||||
|
||||
|
@ -39,7 +39,7 @@ common_setregid(gid_t rgid, gid_t egid, bool setAllIfPrivileged, bool kernel)
|
||||
{
|
||||
struct team* team = thread_get_current_thread()->team;
|
||||
|
||||
InterruptsSpinLocker _(team_spinlock);
|
||||
InterruptsSpinLocker _(gTeamSpinlock);
|
||||
|
||||
bool privileged = kernel || is_privileged(team);
|
||||
|
||||
@ -102,7 +102,7 @@ common_setreuid(uid_t ruid, uid_t euid, bool setAllIfPrivileged, bool kernel)
|
||||
{
|
||||
struct team* team = thread_get_current_thread()->team;
|
||||
|
||||
InterruptsSpinLocker _(team_spinlock);
|
||||
InterruptsSpinLocker _(gTeamSpinlock);
|
||||
|
||||
bool privileged = kernel || is_privileged(team);
|
||||
|
||||
@ -165,7 +165,7 @@ common_getgroups(int groupCount, gid_t* groupList, bool kernel)
|
||||
{
|
||||
struct team* team = thread_get_current_thread()->team;
|
||||
|
||||
InterruptsSpinLocker _(team_spinlock);
|
||||
InterruptsSpinLocker _(gTeamSpinlock);
|
||||
|
||||
const gid_t* groups = team->supplementary_groups;
|
||||
int actualCount = team->supplementary_group_count;
|
||||
@ -223,7 +223,7 @@ common_setgroups(int groupCount, const gid_t* groupList, bool kernel)
|
||||
}
|
||||
}
|
||||
|
||||
InterruptsSpinLocker locker(team_spinlock);
|
||||
InterruptsSpinLocker locker(gTeamSpinlock);
|
||||
|
||||
struct team* team = thread_get_current_thread()->team;
|
||||
|
||||
@ -245,7 +245,7 @@ common_setgroups(int groupCount, const gid_t* groupList, bool kernel)
|
||||
void
|
||||
inherit_parent_user_and_group(struct team* team, struct team* parent)
|
||||
{
|
||||
InterruptsSpinLocker _(team_spinlock);
|
||||
InterruptsSpinLocker _(gTeamSpinlock);
|
||||
|
||||
team->saved_set_uid = parent->saved_set_uid;
|
||||
team->real_uid = parent->real_uid;
|
||||
@ -268,7 +268,7 @@ update_set_id_user_and_group(struct team* team, const char* file)
|
||||
if (status != B_OK)
|
||||
return status;
|
||||
|
||||
InterruptsSpinLocker _(team_spinlock);
|
||||
InterruptsSpinLocker _(gTeamSpinlock);
|
||||
|
||||
if ((st.st_mode & S_ISUID) != 0) {
|
||||
team->saved_set_uid = st.st_uid;
|
||||
|
Loading…
Reference in New Issue
Block a user