kernel: Remove gSchedulerLock

* Thread::scheduler_lock protects thread state, priority, etc.
 * sThreadCreationLock protects thread creation and removal and list of
   threads in team.
 * Team::signal_lock and Team::time_lock protect list of threads in team
   as well.
 * Scheduler uses its own internal locking.
This commit is contained in:
Pawel Dziepak 2013-11-08 02:41:26 +01:00
parent 72addc62e0
commit 03fb2d8868
21 changed files with 182 additions and 178 deletions

View File

@ -56,19 +56,13 @@ public:
void Publish(const void* object,
const char* objectType);
void Unpublish(bool schedulerLocked = false);
void Unpublish();
inline void NotifyOne(bool schedulerLocked = false,
status_t result = B_OK);
inline void NotifyAll(bool schedulerLocked = false,
status_t result = B_OK);
inline void NotifyOne(status_t result = B_OK);
inline void NotifyAll(status_t result = B_OK);
static void NotifyOne(const void* object,
bool schedulerLocked = false,
status_t result = B_OK);
static void NotifyAll(const void* object,
bool schedulerLocked = false,
status_t result = B_OK);
static void NotifyOne(const void* object, status_t result);
static void NotifyAll(const void* object, status_t result);
// (both methods) caller must ensure that
// the variable is not unpublished
// concurrently
@ -86,8 +80,7 @@ public:
void Dump() const;
private:
void _Notify(bool all, bool schedulerLocked,
status_t result);
void _Notify(bool all, status_t result);
void _NotifyLocked(bool all, status_t result);
protected:
@ -124,16 +117,16 @@ ConditionVariableEntry::~ConditionVariableEntry()
inline void
ConditionVariable::NotifyOne(bool schedulerLocked, status_t result)
ConditionVariable::NotifyOne(status_t result)
{
_Notify(false, schedulerLocked, result);
_Notify(false, result);
}
inline void
ConditionVariable::NotifyAll(bool schedulerLocked, status_t result)
ConditionVariable::NotifyAll(status_t result)
{
_Notify(true, schedulerLocked, result);
_Notify(true, result);
}

View File

@ -24,15 +24,13 @@ typedef enum scheduler_mode {
SCHEDULER_MODE_COUNT
} scheduler_mode;
extern spinlock gSchedulerLock;
#ifdef __cplusplus
extern "C" {
#endif
/*! Enqueues the thread in the ready-to-run queue.
The caller must hold the scheduler lock (with disabled interrupts).
The caller must hold the enqueued thread \c scheduler_lock.
*/
void scheduler_enqueue_in_run_queue(Thread* thread);
@ -42,15 +40,14 @@ void scheduler_enqueue_in_run_queue(Thread* thread);
If it's the same thread, the thread will just continue to run.
In either case, unless the thread is dead or is sleeping/waiting
indefinitely, the function will eventually return.
The caller must hold the scheduler lock (with disabled interrupts).
The caller must hold the current thread \c scheduler_lock.
*/
void scheduler_reschedule(void);
/*! Sets the given thread's priority.
The thread may be running or may be in the ready-to-run queue.
The caller must hold the scheduler lock (with disabled interrupts).
*/
void scheduler_set_thread_priority(Thread* thread, int32 priority);
int32 scheduler_set_thread_priority(Thread* thread, int32 priority);
/*! Called when the Thread structure is first created.
Per-thread housekeeping resources can be allocated.
@ -61,7 +58,6 @@ status_t scheduler_on_thread_create(Thread* thread, bool idleThread);
/*! Called when a Thread structure is initialized and made ready for
use.
The per-thread housekeeping data structures are reset, if needed.
The caller must hold the scheduler lock (with disabled interrupts).
*/
void scheduler_on_thread_init(Thread* thread);
@ -75,8 +71,6 @@ void scheduler_on_thread_destroy(Thread* thread);
/*! Called in the early boot process to start thread scheduling on the
current CPU.
The function is called once for each CPU.
Interrupts must be disabled, but the caller must not hold the scheduler
lock.
*/
void scheduler_start(void);
@ -122,11 +116,14 @@ scheduler_reschedule_if_necessary()
{
if (are_interrupts_enabled()) {
cpu_status state = disable_interrupts();
acquire_spinlock(&gSchedulerLock);
Thread* thread = get_cpu_struct()->running_thread;
acquire_spinlock(&thread->scheduler_lock);
scheduler_reschedule_if_necessary_locked();
release_spinlock(&gSchedulerLock);
release_spinlock(&thread->scheduler_lock);
restore_interrupts(state);
}
}

View File

@ -34,7 +34,7 @@ struct SchedulerListener : DoublyLinkedListLinkImpl<SchedulerListener> {
typedef DoublyLinkedList<SchedulerListener> SchedulerListenerList;
extern SchedulerListenerList gSchedulerListeners;
// guarded by the thread spinlock
extern spinlock gSchedulerListenersLock;
template<typename Parameter1>

View File

@ -46,7 +46,7 @@ thread_id load_image_etc(int32 argCount, const char* const* args,
const char* const* env, int32 priority, team_id parentID, uint32 flags);
void team_set_job_control_state(Team* team, job_control_state newState,
Signal* signal, bool threadsLocked);
Signal* signal);
void team_set_controlling_tty(int32 index);
int32 team_get_controlling_tty();
status_t team_set_foreground_process_group(int32 ttyIndex, pid_t processGroup);

View File

@ -69,6 +69,9 @@ public:
using BKernel::ThreadCreationAttributes;
extern spinlock gThreadCreationLock;
#ifdef __cplusplus
extern "C" {
#endif

View File

@ -243,10 +243,10 @@ struct Team : TeamThreadIteratorEntry<team_id>, KernelReferenceable,
struct job_control_entry* job_control_entry;
VMAddressSpace *address_space;
Thread *main_thread; // protected by fLock and the scheduler
// lock (and the thread's lock), immutable
Thread *main_thread; // protected by fLock, immutable
// after first set
Thread *thread_list; // protected by fLock and the scheduler lock
Thread *thread_list; // protected by fLock, signal_lock and
// gThreadCreationLock
struct team_loading_info *loading_info; // protected by fLock
struct list image_list; // protected by sImageMutex
struct list watcher_list;
@ -270,8 +270,7 @@ struct Team : TeamThreadIteratorEntry<team_id>, KernelReferenceable,
bigtime_t cpu_clock_offset;
spinlock time_lock;
// user group information; protected by fLock, the *_uid/*_gid fields also
// by the scheduler lock
// user group information; protected by fLock
uid_t saved_set_uid;
uid_t real_uid;
uid_t effective_uid;
@ -430,6 +429,7 @@ struct Thread : TeamThreadIteratorEntry<thread_id>, KernelReferenceable,
struct cpu_ent *previous_cpu; // protected by scheduler lock
int32 pinned_to_cpu; // only accessed by this thread or in the
// scheduler, when thread is not running
spinlock scheduler_lock;
sigset_t sig_block_mask; // protected by team->signal_lock,
// only modified by the thread itself

View File

@ -39,12 +39,6 @@ invalidate_loop(void *data)
static void
exit_debugger()
{
// If someone holds the scheduler lock at this point, release_sem_etc()
// will block forever. So avoid that.
if (!try_acquire_spinlock(&gSchedulerLock))
return;
release_spinlock(&gSchedulerLock);
release_sem_etc(sRequestSem, 1, B_DO_NOT_RESCHEDULE);
}

View File

@ -77,5 +77,5 @@ publishedConditionTimedWait(const void* waitChannel, const int timeout)
void
publishedConditionNotifyAll(const void* waitChannel)
{
ConditionVariable::NotifyAll(waitChannel);
ConditionVariable::NotifyAll(waitChannel, B_OK);
}

View File

@ -231,7 +231,7 @@ x86_hardware_interrupt(struct iframe* frame)
cpu_status state = disable_interrupts();
if (thread->cpu->invoke_scheduler) {
SpinLocker schedulerLocker(gSchedulerLock);
SpinLocker schedulerLocker(thread->scheduler_lock);
scheduler_reschedule();
schedulerLocker.Unlock();
restore_interrupts(state);

View File

@ -217,13 +217,11 @@ ConditionVariable::Publish(const void* object, const char* objectType)
void
ConditionVariable::Unpublish(bool schedulerLocked)
ConditionVariable::Unpublish()
{
ASSERT(fObject != NULL);
InterruptsLocker _;
SpinLocker schedulerLocker(schedulerLocked ? NULL : &gSchedulerLock);
SpinLocker locker(sConditionVariablesLock);
InterruptsSpinLocker locker(sConditionVariablesLock);
#if KDEBUG
ConditionVariable* variable = sConditionVariableHash.Lookup(fObject);
@ -259,8 +257,7 @@ ConditionVariable::Wait(uint32 flags, bigtime_t timeout)
/*static*/ void
ConditionVariable::NotifyOne(const void* object, bool schedulerLocked,
status_t result)
ConditionVariable::NotifyOne(const void* object, status_t result)
{
InterruptsSpinLocker locker(sConditionVariablesLock);
ConditionVariable* variable = sConditionVariableHash.Lookup(object);
@ -268,13 +265,12 @@ ConditionVariable::NotifyOne(const void* object, bool schedulerLocked,
if (variable == NULL)
return;
variable->NotifyOne(schedulerLocked, result);
variable->NotifyOne(result);
}
/*static*/ void
ConditionVariable::NotifyAll(const void* object, bool schedulerLocked,
status_t result)
ConditionVariable::NotifyAll(const void* object, status_t result)
{
InterruptsSpinLocker locker(sConditionVariablesLock);
ConditionVariable* variable = sConditionVariableHash.Lookup(object);
@ -282,7 +278,7 @@ ConditionVariable::NotifyAll(const void* object, bool schedulerLocked,
if (variable == NULL)
return;
variable->NotifyAll(schedulerLocked, result);
variable->NotifyAll(result);
}
@ -318,11 +314,9 @@ ConditionVariable::Dump() const
void
ConditionVariable::_Notify(bool all, bool schedulerLocked, status_t result)
ConditionVariable::_Notify(bool all, status_t result)
{
InterruptsLocker _;
SpinLocker schedulerLocker(schedulerLocked ? NULL : &gSchedulerLock);
SpinLocker locker(sConditionVariablesLock);
InterruptsSpinLocker locker(sConditionVariablesLock);
if (!fEntries.IsEmpty()) {
if (result > B_OK) {
@ -348,8 +342,10 @@ ConditionVariable::_NotifyLocked(bool all, status_t result)
if (entry->fWaitStatus <= 0)
continue;
if (entry->fWaitStatus == STATUS_WAITING)
if (entry->fWaitStatus == STATUS_WAITING) {
SpinLocker _(entry->fThread->scheduler_lock);
thread_unblock_locked(entry->fThread, result);
}
entry->fWaitStatus = result;

View File

@ -220,6 +220,7 @@ SystemProfiler::_MaybeNotifyProfilerThreadLocked()
int cpu = smp_get_current_cpu();
fReentered[cpu] = true;
InterruptsSpinLocker _(fWaitingProfilerThread->scheduler_lock);
thread_unblock_locked(fWaitingProfilerThread, B_OK);
fWaitingProfilerThread = NULL;
@ -234,7 +235,6 @@ SystemProfiler::_MaybeNotifyProfilerThread()
if (fWaitingProfilerThread == NULL)
return;
InterruptsSpinLocker schedulerLocker(gSchedulerLock);
SpinLocker locker(fLock);
_MaybeNotifyProfilerThreadLocked();
@ -305,10 +305,8 @@ SystemProfiler::~SystemProfiler()
locker.Unlock();
// stop scheduler listening
if (fSchedulerNotificationsRequested) {
InterruptsSpinLocker schedulerLocker(gSchedulerLock);
if (fSchedulerNotificationsRequested)
scheduler_remove_listener(this);
}
// stop wait object listening
if (fWaitObjectNotificationsRequested) {
@ -498,8 +496,6 @@ SystemProfiler::Init()
fThreadNotificationsEnabled = true;
}
InterruptsSpinLocker schedulerLocker(gSchedulerLock);
fProfilingActive = true;
// start scheduler and wait object listening
@ -521,8 +517,6 @@ SystemProfiler::Init()
}
}
schedulerLocker.Unlock();
// I/O scheduling
if ((fFlags & B_SYSTEM_PROFILER_IO_SCHEDULING_EVENTS) != 0) {
IOSchedulerRoster* roster = IOSchedulerRoster::Default();
@ -571,12 +565,9 @@ SystemProfiler::NextBuffer(size_t bytesRead, uint64* _droppedEvents)
Thread* thread = thread_get_current_thread();
fWaitingProfilerThread = thread;
InterruptsSpinLocker schedulerLocker(gSchedulerLock);
thread_prepare_to_block(thread, B_CAN_INTERRUPT,
THREAD_BLOCK_TYPE_OTHER, "system profiler buffer");
schedulerLocker.Unlock();
locker.Unlock();
status_t error = thread_block_with_timeout(B_RELATIVE_TIMEOUT, 1000000);

View File

@ -440,7 +440,7 @@ finish_debugger_change(Team* team)
ConditionVariable* condition = team->debug_info.debugger_changed_condition;
team->debug_info.debugger_changed_condition = NULL;
condition->NotifyAll(false);
condition->NotifyAll();
}
@ -2901,7 +2901,7 @@ _user_debug_thread(thread_id threadID)
// resume/interrupt the thread, if necessary
threadDebugInfoLocker.Unlock();
SpinLocker schedulerLocker(gSchedulerLock);
SpinLocker schedulerLocker(thread->scheduler_lock);
switch (thread->state) {
case B_THREAD_SUSPENDED:
@ -2916,6 +2916,10 @@ _user_debug_thread(thread_id threadID)
// about to acquire a semaphore (before
// thread_prepare_to_block()), we won't interrupt it.
// Maybe we should rather send a signal (SIGTRAP).
schedulerLocker.Unlock();
schedulerLocker.SetTo(thread_get_current_thread()->scheduler_lock,
false);
scheduler_reschedule_if_necessary_locked();
break;
}

View File

@ -205,10 +205,10 @@ IOCache::OperationCompleted(IOOperation* operation, status_t status,
{
if (status == B_OK) {
// always fail in case of partial transfers
((Operation*)operation)->finishedCondition.NotifyAll(false,
((Operation*)operation)->finishedCondition.NotifyAll(
transferredBytes == operation->Length() ? B_OK : B_ERROR);
} else
((Operation*)operation)->finishedCondition.NotifyAll(false, status);
((Operation*)operation)->finishedCondition.NotifyAll(status);
}
@ -498,7 +498,7 @@ IOCache::_DoOperation(Operation& operation)
status_t error = fIOCallback(fIOCallbackData, &operation);
if (error != B_OK) {
operation.finishedCondition.NotifyAll(false, error);
operation.finishedCondition.NotifyAll(error);
// removes the entry from the variable
return error;
}

View File

@ -370,15 +370,20 @@ notify_loading_app(status_t result, bool suspend)
// we're done with the team stuff, get the scheduler lock instead
teamLocker.Unlock();
InterruptsSpinLocker schedulerLocker(gSchedulerLock);
Thread* thread = loadingInfo->thread;
InterruptsSpinLocker schedulerLocker(thread->scheduler_lock);
// wake up the waiting thread
if (loadingInfo->thread->state == B_THREAD_SUSPENDED)
scheduler_enqueue_in_run_queue(loadingInfo->thread);
if (thread->state == B_THREAD_SUSPENDED)
scheduler_enqueue_in_run_queue(thread);
schedulerLocker.Unlock();
// suspend ourselves, if desired
if (suspend) {
thread_get_current_thread()->next_state = B_THREAD_SUSPENDED;
Thread* thread = thread_get_current_thread();
InterruptsSpinLocker schedulerLocker(thread->scheduler_lock);
thread->next_state = B_THREAD_SUSPENDED;
scheduler_reschedule();
}
}

View File

@ -684,8 +684,8 @@ uninit_port_locked(Port* port)
// Release the threads that were blocking on this port.
// read_port() will see the B_BAD_PORT_ID return value, and act accordingly
port->read_condition.NotifyAll(false, B_BAD_PORT_ID);
port->write_condition.NotifyAll(false, B_BAD_PORT_ID);
port->read_condition.NotifyAll(B_BAD_PORT_ID);
port->write_condition.NotifyAll(B_BAD_PORT_ID);
sNotificationService.Notify(PORT_REMOVED, port->id);
}
@ -891,8 +891,8 @@ close_port(port_id id)
notify_port_select_events(port, B_EVENT_INVALID);
port->select_infos = NULL;
port->read_condition.NotifyAll(false, B_BAD_PORT_ID);
port->write_condition.NotifyAll(false, B_BAD_PORT_ID);
port->read_condition.NotifyAll(B_BAD_PORT_ID);
port->write_condition.NotifyAll(B_BAD_PORT_ID);
return B_OK;
}

View File

@ -45,10 +45,11 @@
#endif
spinlock gSchedulerLock = B_SPINLOCK_INITIALIZER;
SchedulerListenerList gSchedulerListeners;
spinlock gSchedulerListenersLock = B_SPINLOCK_INITIALIZER;
bool sSchedulerEnabled;
static spinlock sSchedulerInternalLock;
static bool sSchedulerEnabled;
const bigtime_t kThreadQuantum = 1000;
const bigtime_t kMinThreadQuantum = 3000;
@ -1129,6 +1130,8 @@ enqueue(Thread* thread, bool newOne)
void
scheduler_enqueue_in_run_queue(Thread *thread)
{
InterruptsSpinLocker _(sSchedulerInternalLock);
TRACE("enqueueing new thread %ld with static priority %ld\n", thread->id,
thread->priority);
enqueue(thread, true);
@ -1156,17 +1159,19 @@ put_back(Thread* thread)
/*! Sets the priority of a thread.
Note: thread lock must be held when entering this function
*/
void
int32
scheduler_set_thread_priority(Thread *thread, int32 priority)
{
InterruptsSpinLocker _(sSchedulerInternalLock);
if (priority == thread->priority)
return;
return thread->priority;
int32 oldPriority = thread->priority;
TRACE("changing thread %ld priority to %ld (old: %ld, effective: %ld)\n",
thread->id, priority, thread->priority,
get_effective_priority(thread));
thread->id, priority, oldPriority, get_effective_priority(thread));
if (thread->state != B_THREAD_READY) {
cancel_penalty(thread);
@ -1174,7 +1179,7 @@ scheduler_set_thread_priority(Thread *thread, int32 priority)
if (thread->state == B_THREAD_RUNNING)
update_priority_heaps(thread->cpu->cpu_num, priority);
return;
return oldPriority;
}
// The thread is in the run queue. We need to remove it and re-insert it at
@ -1194,8 +1199,9 @@ scheduler_set_thread_priority(Thread *thread, int32 priority)
// set priority and re-insert
cancel_penalty(thread);
thread->priority = priority;
enqueue(thread, true);
scheduler_enqueue_in_run_queue(thread);
return oldPriority;
}
@ -1403,6 +1409,8 @@ update_cpu_performance(Thread* thread, int32 thisCore)
static void
_scheduler_reschedule(void)
{
InterruptsSpinLocker internalLocker(sSchedulerInternalLock);
Thread* oldThread = thread_get_current_thread();
int32 thisCPU = smp_get_current_cpu();
@ -1473,6 +1481,8 @@ _scheduler_reschedule(void)
nextThread = dequeue_thread(thisCPU);
if (!nextThread)
panic("reschedule(): run queues are empty!\n");
if (nextThread != oldThread)
acquire_spinlock(&nextThread->scheduler_lock);
TRACE("reschedule(): cpu %ld, next thread = %ld\n", thisCPU,
nextThread->id);
@ -1513,6 +1523,7 @@ _scheduler_reschedule(void)
} else
nextThread->scheduler_data->quantum_start = system_time();
internalLocker.Unlock();
if (nextThread != oldThread)
scheduler_switch_thread(oldThread, nextThread);
}
@ -1566,7 +1577,7 @@ scheduler_on_thread_destroy(Thread* thread)
void
scheduler_start(void)
{
SpinLocker schedulerLocker(gSchedulerLock);
InterruptsSpinLocker _(thread_get_current_thread()->scheduler_lock);
_scheduler_reschedule();
}
@ -1583,7 +1594,7 @@ scheduler_set_operation_mode(scheduler_mode mode)
const char* modeNames[] = { "performance", "power saving" };
dprintf("scheduler: switching to %s mode\n", modeNames[mode]);
InterruptsSpinLocker _(gSchedulerLock);
InterruptsSpinLocker _(sSchedulerInternalLock);
sSchedulerMode = mode;
switch (mode) {
@ -1877,6 +1888,7 @@ SchedulerListener::~SchedulerListener()
void
scheduler_add_listener(struct SchedulerListener* listener)
{
InterruptsSpinLocker _(gSchedulerListenersLock);
gSchedulerListeners.Add(listener);
}
@ -1886,6 +1898,7 @@ scheduler_add_listener(struct SchedulerListener* listener)
void
scheduler_remove_listener(struct SchedulerListener* listener)
{
InterruptsSpinLocker _(gSchedulerListenersLock);
gSchedulerListeners.Remove(listener);
}

View File

@ -45,6 +45,8 @@ scheduler_switch_thread(Thread* fromThread, Thread* toThread)
arch_thread_set_current_thread(toThread);
arch_thread_context_switch(fromThread, toThread);
release_spinlock(&fromThread->cpu->previous_thread->scheduler_lock);
// The use of fromThread below looks weird, but is correct. fromThread had
// been unscheduled earlier, but is back now. For a thread scheduled the
// first time the same is done in thread.cpp:common_thread_entry().

View File

@ -393,7 +393,7 @@ delete_sem_internal(sem_id id, bool checkPermission)
char* name;
uninit_sem_locked(sSems[slot], &name);
SpinLocker schedulerLocker(gSchedulerLock);
SpinLocker schedulerLocker(thread_get_current_thread()->scheduler_lock);
scheduler_reschedule_if_necessary_locked();
schedulerLocker.Unlock();
@ -644,9 +644,8 @@ remove_thread_from_sem(queued_thread *entry, struct sem_entry *sem)
// for that time, so the blocking state of threads won't change (due to
// interruption or timeout). We need that lock anyway when unblocking a
// thread.
SpinLocker schedulerLocker(gSchedulerLock);
while ((entry = sem->queue.Head()) != NULL) {
SpinLocker schedulerLocker(entry->thread->scheduler_lock);
if (thread_is_blocked(entry->thread)) {
// The thread is still waiting. If its count is satisfied, unblock
// it. Otherwise we can't unblock any other thread.
@ -665,8 +664,6 @@ remove_thread_from_sem(queued_thread *entry, struct sem_entry *sem)
entry->queued = false;
}
schedulerLocker.Unlock();
// select notification, if the semaphore is now acquirable
if (sem->u.used.count > 0)
notify_sem_select_events(sem, B_EVENT_ACQUIRE_SEMAPHORE);
@ -823,7 +820,7 @@ switch_sem_etc(sem_id semToBeReleased, sem_id id, int32 count,
// do a quick check to see if the thread has any pending signals
// this should catch most of the cases where the thread had a signal
SpinLocker schedulerLocker(gSchedulerLock);
SpinLocker schedulerLocker(thread->scheduler_lock);
if (thread_is_interrupted(thread, flags)) {
schedulerLocker.Unlock();
sSems[slot].u.used.count += count;
@ -832,6 +829,8 @@ switch_sem_etc(sem_id semToBeReleased, sem_id id, int32 count,
goto err;
}
schedulerLocker.Unlock();
if ((flags & (B_RELATIVE_TIMEOUT | B_ABSOLUTE_TIMEOUT)) == 0)
timeout = B_INFINITE_TIMEOUT;
@ -843,7 +842,6 @@ switch_sem_etc(sem_id semToBeReleased, sem_id id, int32 count,
thread_prepare_to_block(thread, flags, THREAD_BLOCK_TYPE_SEMAPHORE,
(void*)(addr_t)id);
schedulerLocker.Unlock();
RELEASE_SEM_LOCK(sSems[slot]);
// release the other semaphore, if any
@ -968,8 +966,6 @@ release_sem_etc(sem_id id, int32 count, uint32 flags)
// Grab the scheduler lock, so thread_is_blocked() is reliable (due to
// possible interruptions or timeouts, it wouldn't be otherwise).
SpinLocker schedulerLocker(gSchedulerLock);
while (count > 0) {
queued_thread* entry = sSems[slot].queue.Head();
if (entry == NULL) {
@ -980,6 +976,7 @@ release_sem_etc(sem_id id, int32 count, uint32 flags)
break;
}
SpinLocker schedulerLock(entry->thread->scheduler_lock);
if (thread_is_blocked(entry->thread)) {
// The thread is still waiting. If its count is satisfied,
// unblock it. Otherwise we can't unblock any other thread.
@ -1005,8 +1002,6 @@ release_sem_etc(sem_id id, int32 count, uint32 flags)
entry->queued = false;
}
schedulerLocker.Unlock();
if (sSems[slot].u.used.count > 0)
notify_sem_select_events(&sSems[slot], B_EVENT_ACQUIRE_SEMAPHORE);
@ -1014,7 +1009,8 @@ release_sem_etc(sem_id id, int32 count, uint32 flags)
// been told not to.
if ((flags & B_DO_NOT_RESCHEDULE) == 0) {
semLocker.Unlock();
schedulerLocker.Lock();
SpinLocker _(thread_get_current_thread()->scheduler_lock);
scheduler_reschedule_if_necessary_locked();
}

View File

@ -1064,7 +1064,7 @@ handle_signals(Thread* thread)
team->LockTeamAndParent(false);
team_set_job_control_state(team,
JOB_CONTROL_STATE_CONTINUED, signal, false);
JOB_CONTROL_STATE_CONTINUED, signal);
team->UnlockTeamAndParent();
@ -1099,7 +1099,7 @@ handle_signals(Thread* thread)
team->LockTeamAndParent(false);
team_set_job_control_state(team,
JOB_CONTROL_STATE_STOPPED, signal, false);
JOB_CONTROL_STATE_STOPPED, signal);
// send a SIGCHLD to the parent (if it does have
// SA_NOCLDSTOP defined)
@ -1128,7 +1128,7 @@ handle_signals(Thread* thread)
locker.Unlock();
if (!resume) {
InterruptsSpinLocker schedulerLocker(gSchedulerLock);
InterruptsSpinLocker _(thread->scheduler_lock);
thread->next_state = B_THREAD_SUSPENDED;
scheduler_reschedule();
}
@ -1377,7 +1377,7 @@ send_signal_to_thread_locked(Thread* thread, uint32 signalNumber,
if (thread->team == team_get_kernel_team()) {
// Signals to kernel threads will only wake them up
SpinLocker _(gSchedulerLock);
SpinLocker _(thread->scheduler_lock);
if (thread->state == B_THREAD_SUSPENDED)
scheduler_enqueue_in_run_queue(thread);
return B_OK;
@ -1401,7 +1401,7 @@ send_signal_to_thread_locked(Thread* thread, uint32 signalNumber,
mainThread->AddPendingSignal(SIGKILLTHR);
// wake up main thread
SpinLocker locker(gSchedulerLock);
SpinLocker locker(mainThread->scheduler_lock);
if (mainThread->state == B_THREAD_SUSPENDED)
scheduler_enqueue_in_run_queue(mainThread);
else
@ -1416,7 +1416,7 @@ send_signal_to_thread_locked(Thread* thread, uint32 signalNumber,
case SIGKILLTHR:
{
// Wake up suspended threads and interrupt waiting ones
SpinLocker locker(gSchedulerLock);
SpinLocker locker(thread->scheduler_lock);
if (thread->state == B_THREAD_SUSPENDED)
scheduler_enqueue_in_run_queue(thread);
else
@ -1427,7 +1427,7 @@ send_signal_to_thread_locked(Thread* thread, uint32 signalNumber,
case SIGNAL_CONTINUE_THREAD:
{
// wake up thread, and interrupt its current syscall
SpinLocker locker(gSchedulerLock);
SpinLocker locker(thread->scheduler_lock);
if (thread->state == B_THREAD_SUSPENDED)
scheduler_enqueue_in_run_queue(thread);
@ -1438,7 +1438,7 @@ send_signal_to_thread_locked(Thread* thread, uint32 signalNumber,
{
// Wake up thread if it was suspended, otherwise interrupt it, if
// the signal isn't blocked.
SpinLocker locker(gSchedulerLock);
SpinLocker locker(thread->scheduler_lock);
if (thread->state == B_THREAD_SUSPENDED)
scheduler_enqueue_in_run_queue(thread);
else if ((SIGNAL_TO_MASK(SIGCONT) & ~thread->sig_block_mask) != 0)
@ -1455,7 +1455,7 @@ send_signal_to_thread_locked(Thread* thread, uint32 signalNumber,
& (~thread->sig_block_mask | SIGNAL_TO_MASK(SIGCHLD)))
!= 0) {
// Interrupt thread if it was waiting
SpinLocker locker(gSchedulerLock);
SpinLocker locker(thread->scheduler_lock);
thread_interrupt(thread, false);
}
break;
@ -1605,7 +1605,7 @@ send_signal_to_team_locked(Team* team, uint32 signalNumber, Signal* signal,
mainThread->AddPendingSignal(SIGKILLTHR);
// wake up main thread
SpinLocker _(gSchedulerLock);
SpinLocker _(mainThread->scheduler_lock);
if (mainThread->state == B_THREAD_SUSPENDED)
scheduler_enqueue_in_run_queue(mainThread);
else
@ -1619,7 +1619,7 @@ send_signal_to_team_locked(Team* team, uint32 signalNumber, Signal* signal,
// don't block the signal.
for (Thread* thread = team->thread_list; thread != NULL;
thread = thread->team_next) {
SpinLocker _(gSchedulerLock);
SpinLocker _(thread->scheduler_lock);
if (thread->state == B_THREAD_SUSPENDED) {
scheduler_enqueue_in_run_queue(thread);
} else if ((SIGNAL_TO_MASK(SIGCONT) & ~thread->sig_block_mask)
@ -1662,7 +1662,7 @@ send_signal_to_team_locked(Team* team, uint32 signalNumber, Signal* signal,
sigset_t nonBlocked = ~thread->sig_block_mask
| SIGNAL_TO_MASK(SIGCHLD);
if ((thread->AllPendingSignals() & nonBlocked) != 0) {
SpinLocker _(gSchedulerLock);
SpinLocker _(thread->scheduler_lock);
thread_interrupt(thread, false);
}
}

View File

@ -1794,11 +1794,13 @@ load_image_internal(char**& _flatArgs, size_t flatArgsSize, int32 argCount,
// wait for the loader of the new team to finish its work
if ((flags & B_WAIT_TILL_LOADED) != 0) {
InterruptsSpinLocker schedulerLocker(gSchedulerLock);
if (mainThread != NULL) {
InterruptsSpinLocker schedulerLocker(mainThread->scheduler_lock);
// resume the team's main thread
if (mainThread != NULL && mainThread->state == B_THREAD_SUSPENDED)
scheduler_enqueue_in_run_queue(mainThread);
// resume the team's main thread
if (mainThread->state == B_THREAD_SUSPENDED)
scheduler_enqueue_in_run_queue(mainThread);
}
// Now suspend ourselves until loading is finished. We will be woken
// either by the thread, when it finished or aborted loading, or when
@ -1806,12 +1808,13 @@ load_image_internal(char**& _flatArgs, size_t flatArgsSize, int32 argCount,
// setting `loadingInfo.done' is responsible for removing the info from
// the team structure.
while (!loadingInfo.done) {
thread_get_current_thread()->next_state = B_THREAD_SUSPENDED;
Thread* thread = thread_get_current_thread();
InterruptsSpinLocker schedulerLocker(thread->scheduler_lock);
thread->next_state = B_THREAD_SUSPENDED;
scheduler_reschedule();
}
schedulerLocker.Unlock();
if (loadingInfo.result < B_OK)
return loadingInfo.result;
}
@ -2444,7 +2447,7 @@ wait_for_child(pid_t child, uint32 flags, siginfo_t& _info)
} else {
// The child is well. Reset its job control state.
team_set_job_control_state(entry->team,
JOB_CONTROL_STATE_NONE, NULL, false);
JOB_CONTROL_STATE_NONE, NULL);
}
}
}
@ -2531,14 +2534,16 @@ wait_for_child(pid_t child, uint32 flags, siginfo_t& _info)
// If SIGCHLD is blocked, we shall clear pending SIGCHLDs, if no other child
// status is available.
TeamLocker teamLocker(team);
InterruptsSpinLocker schedulerLocker(gSchedulerLock);
InterruptsSpinLocker signalLocker(team->signal_lock);
SpinLocker threadCreationLocker(gThreadCreationLock);
if (is_team_signal_blocked(team, SIGCHLD)) {
if (get_job_control_entry(team, child, flags) == NULL)
team->RemovePendingSignals(SIGNAL_TO_MASK(SIGCHLD));
}
schedulerLocker.Unlock();
threadCreationLocker.Unlock();
signalLocker.Unlock();
teamLocker.Unlock();
// When the team is dead, the main thread continues to live in the kernel
@ -2925,12 +2930,12 @@ team_set_foreground_process_group(int32 ttyIndex, pid_t processGroupID)
if (session->foreground_group != -1
&& session->foreground_group != team->group_id
&& team->SignalActionFor(SIGTTOU).sa_handler != SIG_IGN) {
InterruptsSpinLocker schedulerLocker(gSchedulerLock);
InterruptsSpinLocker signalLocker(team->signal_lock);
if (!is_team_signal_blocked(team, SIGTTOU)) {
pid_t groupID = team->group_id;
schedulerLocker.Unlock();
signalLocker.Unlock();
sessionLocker.Unlock();
teamLocker.Unlock();
@ -3170,7 +3175,7 @@ team_delete_team(Team* team, port_id debuggerPort)
loadingInfo->result = B_ERROR;
loadingInfo->done = true;
InterruptsSpinLocker schedulerLocker(gSchedulerLock);
InterruptsSpinLocker _(loadingInfo->thread->scheduler_lock);
// wake up the waiting thread
if (loadingInfo->thread->state == B_THREAD_SUSPENDED)
@ -3256,8 +3261,7 @@ team_get_address_space(team_id id, VMAddressSpace** _addressSpace)
/*! Sets the team's job control state.
The caller must hold the parent team's lock. Interrupts are allowed to be
enabled or disabled. In the latter case the scheduler lock may be held as
well.
enabled or disabled.
\a team The team whose job control state shall be set.
\a newState The new state to be set.
\a signal The signal the new state was caused by. Can \c NULL, if none. Then
@ -3266,11 +3270,10 @@ team_get_address_space(team_id id, VMAddressSpace** _addressSpace)
\c JOB_CONTROL_STATE_NONE:
- \c signal: The number of the signal causing the state change.
- \c signaling_user: The real UID of the user sending the signal.
\a schedulerLocked indicates whether the scheduler lock is being held, too.
*/
void
team_set_job_control_state(Team* team, job_control_state newState,
Signal* signal, bool schedulerLocked)
Signal* signal)
{
if (team == NULL || team->job_control_entry == NULL)
return;
@ -3326,8 +3329,7 @@ team_set_job_control_state(Team* team, job_control_state newState,
if (childList != NULL) {
childList->entries.Add(entry);
team->parent->dead_children.condition_variable.NotifyAll(
schedulerLocked);
team->parent->dead_children.condition_variable.NotifyAll();
}
}
@ -4087,7 +4089,7 @@ _user_setpgid(pid_t processID, pid_t groupID)
// Changing the process group might have changed the situation for a
// parent waiting in wait_for_child(). Hence we notify it.
team->parent->dead_children.condition_variable.NotifyAll(false);
team->parent->dead_children.condition_variable.NotifyAll();
return group->id;
}
@ -4129,7 +4131,7 @@ _user_setsid(void)
// Changing the process group might have changed the situation for a
// parent waiting in wait_for_child(). Hence we notify it.
team->parent->dead_children.condition_variable.NotifyAll(false);
team->parent->dead_children.condition_variable.NotifyAll();
return group->id;
}

View File

@ -81,6 +81,8 @@ static thread_id sNextThreadID = 2;
static int32 sMaxThreads = 4096;
static int32 sUsedThreads = 0;
spinlock gThreadCreationLock = B_SPINLOCK_INITIALIZER;
struct UndertakerEntry : DoublyLinkedListLinkImpl<UndertakerEntry> {
Thread* thread;
@ -206,6 +208,7 @@ Thread::Thread(const char* name, thread_id threadID, struct cpu_ent* cpu)
mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME);
B_INITIALIZE_SPINLOCK(&time_lock);
B_INITIALIZE_SPINLOCK(&scheduler_lock);
B_INITIALIZE_RW_SPINLOCK(&team_lock);
// init name
@ -713,7 +716,8 @@ common_thread_entry(void* _args)
user_debug_thread_scheduled(thread);
// unlock the scheduler lock and enable interrupts
release_spinlock(&gSchedulerLock);
release_spinlock(&thread->cpu->previous_thread->scheduler_lock);
release_spinlock(&thread->scheduler_lock);
enable_interrupts();
// call the kernel function, if any
@ -1020,7 +1024,8 @@ thread_create_thread(const ThreadCreationAttributes& attributes, bool kernel)
// for our own use (and threadReference remains armed).
ThreadLocker threadLocker(thread);
InterruptsSpinLocker schedulerLocker(gSchedulerLock);
InterruptsSpinLocker threadCreationLocker(gThreadCreationLock);
SpinLocker threadHashLocker(sThreadHashLock);
// check the thread limit
@ -1028,7 +1033,7 @@ thread_create_thread(const ThreadCreationAttributes& attributes, bool kernel)
// Clean up the user_thread structure. It's a bit unfortunate that the
// Thread destructor cannot do that, so we have to do that explicitly.
threadHashLocker.Unlock();
schedulerLocker.Unlock();
threadCreationLocker.Unlock();
user_thread* userThread = thread->user_thread;
thread->user_thread = NULL;
@ -1044,6 +1049,7 @@ thread_create_thread(const ThreadCreationAttributes& attributes, bool kernel)
// make thread visible in global hash/list
thread->visible = true;
sUsedThreads++;
scheduler_on_thread_init(thread);
thread->AcquireReference();
@ -1060,11 +1066,16 @@ thread_create_thread(const ThreadCreationAttributes& attributes, bool kernel)
}
}
// insert thread into team
insert_thread_into_team(team, thread);
{
SpinLocker signalLocker(team->signal_lock);
SpinLocker timeLocker(team->time_lock);
// insert thread into team
insert_thread_into_team(team, thread);
}
threadHashLocker.Unlock();
schedulerLocker.Unlock();
threadCreationLocker.Unlock();
threadLocker.Unlock();
teamLocker.Unlock();
@ -1106,13 +1117,17 @@ undertaker(void* /*args*/)
Team* kernelTeam = team_get_kernel_team();
TeamLocker kernelTeamLocker(kernelTeam);
thread->Lock();
InterruptsSpinLocker signalLocker(kernelTeam->signal_lock);
SpinLocker schedulerLocker(gSchedulerLock);
InterruptsSpinLocker threadCreationLocker(gThreadCreationLock);
SpinLocker signalLocker(kernelTeam->signal_lock);
SpinLocker timeLocker(kernelTeam->time_lock);
remove_thread_from_team(kernelTeam, thread);
schedulerLocker.Unlock();
timeLocker.Unlock();
signalLocker.Unlock();
threadCreationLocker.Unlock();
kernelTeamLocker.Unlock();
// free the thread structure
@ -1921,10 +1936,7 @@ thread_exit(void)
panic("thread_exit() called with interrupts disabled!\n");
// boost our priority to get this over with
{
InterruptsSpinLocker _(gSchedulerLock);
scheduler_set_thread_priority(thread, B_URGENT_DISPLAY_PRIORITY);
}
scheduler_set_thread_priority(thread, B_URGENT_DISPLAY_PRIORITY);
if (team != kernelTeam) {
// Delete all user timers associated with the thread.
@ -1989,14 +2001,17 @@ thread_exit(void)
vm_swap_address_space(team->address_space, VMAddressSpace::Kernel());
WriteSpinLocker teamLocker(thread->team_lock);
SpinLocker schedulerLocker(gSchedulerLock);
SpinLocker threadCreationLocker(gThreadCreationLock);
// removing the thread and putting its death entry to the parent
// team needs to be an atomic operation
// remember how long this thread lasted
bigtime_t now = system_time();
InterruptsSpinLocker teamTimeLocker(team->time_lock);
InterruptsSpinLocker signalLocker(kernelTeam->signal_lock);
SpinLocker teamTimeLocker(kernelTeam->time_lock);
SpinLocker threadTimeLocker(thread->time_lock);
thread->kernel_time += now - thread->last_time;
thread->last_time = now;
@ -2014,17 +2029,19 @@ thread_exit(void)
thread->DeactivateCPUTimeUserTimers();
threadTimeLocker.Unlock();
teamTimeLocker.Unlock();
// put the thread into the kernel team until it dies
remove_thread_from_team(team, thread);
insert_thread_into_team(kernelTeam, thread);
teamTimeLocker.Unlock();
signalLocker.Unlock();
teamLocker.Unlock();
if (team->death_entry != NULL) {
if (--team->death_entry->remaining_threads == 0)
team->death_entry->condition.NotifyOne(true, B_OK);
team->death_entry->condition.NotifyOne();
}
if (deleteTeam) {
@ -2032,8 +2049,7 @@ thread_exit(void)
// Set the team job control state to "dead" and detach the job
// control entry from our team struct.
team_set_job_control_state(team, JOB_CONTROL_STATE_DEAD, NULL,
true);
team_set_job_control_state(team, JOB_CONTROL_STATE_DEAD, NULL);
death = team->job_control_entry;
team->job_control_entry = NULL;
@ -2050,7 +2066,7 @@ thread_exit(void)
death = NULL;
}
schedulerLocker.Unlock();
threadCreationLocker.Unlock();
restore_interrupts(state);
threadLocker.Unlock();
@ -2113,7 +2129,7 @@ thread_exit(void)
}
}
schedulerLocker.Unlock();
threadCreationLocker.Unlock();
restore_interrupts(state);
threadLocker.Unlock();
@ -2138,7 +2154,7 @@ thread_exit(void)
ThreadLocker threadLocker(thread);
state = disable_interrupts();
SpinLocker schedulerLocker(gSchedulerLock);
SpinLocker threadCreationLocker(gThreadCreationLock);
// mark invisible in global hash/list, so it's no longer accessible
SpinLocker threadHashLocker(sThreadHashLock);
@ -2156,7 +2172,7 @@ thread_exit(void)
select_info* selectInfos = thread->select_infos;
thread->select_infos = NULL;
schedulerLocker.Unlock();
threadCreationLocker.Unlock();
restore_interrupts(state);
threadLocker.Unlock();
@ -2243,7 +2259,7 @@ thread_exit(void)
sUndertakerCondition.NotifyOne();
undertakerLocker.Unlock();
schedulerLocker.Lock();
SpinLocker schedulerLocker(thread->scheduler_lock);
thread->next_state = THREAD_STATE_FREE_ON_RESCHED;
scheduler_reschedule();
@ -2392,7 +2408,7 @@ thread_yield(void)
if (thread == NULL)
return;
InterruptsSpinLocker _(gSchedulerLock);
InterruptsSpinLocker _(thread->scheduler_lock);
thread->has_yielded = true;
scheduler_reschedule();
@ -2604,7 +2620,6 @@ thread_get_io_priority(thread_id id)
int32 priority = thread->io_priority;
if (priority < 0) {
// negative I/O priority means using the (CPU) priority
InterruptsSpinLocker schedulerLocker(gSchedulerLock);
priority = thread->priority;
}
@ -2798,7 +2813,7 @@ thread_block_timeout(timer* timer)
status_t
thread_block()
{
InterruptsSpinLocker _(gSchedulerLock);
InterruptsSpinLocker _(thread_get_current_thread()->scheduler_lock);
return thread_block_locked(thread_get_current_thread());
}
@ -2835,7 +2850,7 @@ thread_block_with_timeout(uint32 timeoutFlags, bigtime_t timeout)
{
Thread* thread = thread_get_current_thread();
InterruptsSpinLocker locker(gSchedulerLock);
InterruptsSpinLocker locker(thread->scheduler_lock);
if (thread->wait.status != 1)
return thread->wait.status;
@ -2881,7 +2896,7 @@ thread_block_with_timeout(uint32 timeoutFlags, bigtime_t timeout)
void
thread_unblock(Thread* thread, status_t status)
{
InterruptsSpinLocker _(gSchedulerLock);
InterruptsSpinLocker locker(thread->scheduler_lock);
thread_unblock_locked(thread, status);
}
@ -2902,7 +2917,7 @@ user_unblock_thread(thread_id threadID, status_t status)
if (thread->user_thread == NULL)
return B_NOT_ALLOWED;
InterruptsSpinLocker schedulerLocker(gSchedulerLock);
InterruptsSpinLocker locker(thread->scheduler_lock);
if (thread->user_thread->wait_status > 0) {
thread->user_thread->wait_status = status;
@ -3005,7 +3020,7 @@ _get_thread_info(thread_id id, thread_info *info, size_t size)
ThreadLocker threadLocker(thread, true);
// fill the info -- also requires the scheduler lock to be held
InterruptsSpinLocker schedulerLocker(gSchedulerLock);
InterruptsSpinLocker locker(thread->scheduler_lock);
fill_thread_info(thread, info, size);
@ -3055,7 +3070,7 @@ _get_next_thread_info(team_id teamID, int32 *_cookie, thread_info *info,
*_cookie = lastID;
ThreadLocker threadLocker(thread);
InterruptsSpinLocker schedulerLocker(gSchedulerLock);
InterruptsSpinLocker locker(thread->scheduler_lock);
fill_thread_info(thread, info, size);
@ -3121,8 +3136,6 @@ rename_thread(thread_id id, const char* name)
status_t
set_thread_priority(thread_id id, int32 priority)
{
int32 oldPriority;
// make sure the passed in priority is within bounds
if (priority > THREAD_MAX_SET_PRIORITY)
priority = THREAD_MAX_SET_PRIORITY;
@ -3140,12 +3153,7 @@ set_thread_priority(thread_id id, int32 priority)
if (thread_is_idle_thread(thread))
return B_NOT_ALLOWED;
InterruptsSpinLocker schedulerLocker(gSchedulerLock);
oldPriority = thread->priority;
scheduler_set_thread_priority(thread, priority);
return oldPriority;
return scheduler_set_thread_priority(thread, priority);
}