* Strip down scheduler_simple. Anything related to multiple CPU handling has
been removed. That includes CPU disabling and thread pinning, as that becomes pointless with only one CPU. * Return a proper reschedule hint on enqueing a thread, based on the priority of the current thread vs. the enqueued one. * Enable dynamic scheduler selection. With one CPU the simple scheduler will be used, otherwise affine is selected. * Removed the scheduler type define as we now always auto-select it. * Some cleanup. git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@32573 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
parent
d3ae6ef65b
commit
861b223cf2
@ -10,11 +10,6 @@
|
|||||||
#include "scheduler_affine.h"
|
#include "scheduler_affine.h"
|
||||||
#include "scheduler_simple.h"
|
#include "scheduler_simple.h"
|
||||||
|
|
||||||
// Defines which scheduler(s) to use. Possible values:
|
|
||||||
// 0 - Auto-select scheduler based on detected core count
|
|
||||||
// 1 - Always use the simple scheduler
|
|
||||||
// 2 - Always use the affine scheduler
|
|
||||||
#define SCHEDULER_TYPE 2
|
|
||||||
|
|
||||||
struct scheduler_ops* gScheduler;
|
struct scheduler_ops* gScheduler;
|
||||||
SchedulerListenerList gSchedulerListeners;
|
SchedulerListenerList gSchedulerListeners;
|
||||||
@ -46,21 +41,17 @@ scheduler_remove_listener(struct SchedulerListener* listener)
|
|||||||
void
|
void
|
||||||
scheduler_init(void)
|
scheduler_init(void)
|
||||||
{
|
{
|
||||||
int32 cpu_count = smp_get_num_cpus();
|
int32 cpuCount = smp_get_num_cpus();
|
||||||
dprintf("scheduler_init: found %ld logical cpus\n", cpu_count);
|
dprintf("scheduler_init: found %ld logical cpu%s\n", cpuCount,
|
||||||
#if SCHEDULER_TYPE == 0
|
cpuCount != 1 ? "s" : "");
|
||||||
if (cpu_count > 1) {
|
|
||||||
|
if (cpuCount > 1) {
|
||||||
dprintf("scheduler_init: using affine scheduler\n");
|
dprintf("scheduler_init: using affine scheduler\n");
|
||||||
scheduler_affine_init();
|
scheduler_affine_init();
|
||||||
} else {
|
} else {
|
||||||
dprintf("scheduler_init: using simple scheduler\n");
|
dprintf("scheduler_init: using simple scheduler\n");
|
||||||
scheduler_simple_init();
|
scheduler_simple_init();
|
||||||
}
|
}
|
||||||
#elif SCHEDULER_TYPE == 1
|
|
||||||
scheduler_simple_init();
|
|
||||||
#elif SCHEDULER_TYPE == 2
|
|
||||||
scheduler_affine_init();
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if SCHEDULER_TRACING
|
#if SCHEDULER_TRACING
|
||||||
add_debugger_command_etc("scheduler", &cmd_scheduler,
|
add_debugger_command_etc("scheduler", &cmd_scheduler,
|
||||||
|
@ -19,7 +19,6 @@
|
|||||||
#include <kscheduler.h>
|
#include <kscheduler.h>
|
||||||
#include <listeners.h>
|
#include <listeners.h>
|
||||||
#include <scheduler_defs.h>
|
#include <scheduler_defs.h>
|
||||||
#include <smp.h>
|
|
||||||
#include <thread.h>
|
#include <thread.h>
|
||||||
#include <timer.h>
|
#include <timer.h>
|
||||||
#include <user_debugger.h>
|
#include <user_debugger.h>
|
||||||
@ -37,7 +36,6 @@
|
|||||||
|
|
||||||
// The run queue. Holds the threads ready to run ordered by priority.
|
// The run queue. Holds the threads ready to run ordered by priority.
|
||||||
static struct thread *sRunQueue = NULL;
|
static struct thread *sRunQueue = NULL;
|
||||||
static cpu_mask_t sIdleCPUs = 0;
|
|
||||||
|
|
||||||
|
|
||||||
static int
|
static int
|
||||||
@ -80,14 +78,6 @@ dump_run_queue(int argc, char **argv)
|
|||||||
static bool
|
static bool
|
||||||
simple_enqueue_in_run_queue(struct thread *thread)
|
simple_enqueue_in_run_queue(struct thread *thread)
|
||||||
{
|
{
|
||||||
if (thread->state == B_THREAD_RUNNING) {
|
|
||||||
// The thread is currently running (on another CPU) and we cannot
|
|
||||||
// insert it into the run queue. Set the next state to ready so the
|
|
||||||
// thread is inserted into the run queue on the next reschedule.
|
|
||||||
thread->next_state = B_THREAD_READY;
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
thread->state = thread->next_state = B_THREAD_READY;
|
thread->state = thread->next_state = B_THREAD_READY;
|
||||||
|
|
||||||
struct thread *curr, *prev;
|
struct thread *curr, *prev;
|
||||||
@ -110,47 +100,11 @@ simple_enqueue_in_run_queue(struct thread *thread)
|
|||||||
|
|
||||||
thread->next_priority = thread->priority;
|
thread->next_priority = thread->priority;
|
||||||
|
|
||||||
if (thread->priority != B_IDLE_PRIORITY) {
|
|
||||||
int32 currentCPU = smp_get_current_cpu();
|
|
||||||
if (sIdleCPUs != 0) {
|
|
||||||
if (thread->pinned_to_cpu > 0) {
|
|
||||||
// thread is pinned to a CPU -- notify it, if it is idle
|
|
||||||
int32 targetCPU = thread->previous_cpu->cpu_num;
|
|
||||||
if ((sIdleCPUs & (1 << targetCPU)) != 0) {
|
|
||||||
sIdleCPUs &= ~(1 << targetCPU);
|
|
||||||
smp_send_ici(targetCPU, SMP_MSG_RESCHEDULE_IF_IDLE, 0, 0,
|
|
||||||
0, NULL, SMP_MSG_FLAG_ASYNC);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// Thread is not pinned to any CPU -- take it ourselves, if we
|
|
||||||
// are idle, otherwise notify the next idle CPU. In either case
|
|
||||||
// we clear the idle bit of the chosen CPU, so that the
|
|
||||||
// simple_enqueue_in_run_queue() won't try to bother the
|
|
||||||
// same CPU again, if invoked before it handled the interrupt.
|
|
||||||
cpu_mask_t idleCPUs = CLEAR_BIT(sIdleCPUs, currentCPU);
|
|
||||||
if ((sIdleCPUs & (1 << currentCPU)) != 0) {
|
|
||||||
sIdleCPUs = idleCPUs;
|
|
||||||
} else {
|
|
||||||
int32 targetCPU = 0;
|
|
||||||
for (; targetCPU < B_MAX_CPU_COUNT; targetCPU++) {
|
|
||||||
cpu_mask_t mask = 1 << targetCPU;
|
|
||||||
if ((idleCPUs & mask) != 0) {
|
|
||||||
sIdleCPUs &= ~mask;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
smp_send_ici(targetCPU, SMP_MSG_RESCHEDULE_IF_IDLE, 0, 0,
|
|
||||||
0, NULL, SMP_MSG_FLAG_ASYNC);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// notify listeners
|
// notify listeners
|
||||||
NotifySchedulerListeners(&SchedulerListener::ThreadEnqueuedInRunQueue,
|
NotifySchedulerListeners(&SchedulerListener::ThreadEnqueuedInRunQueue,
|
||||||
thread);
|
thread);
|
||||||
return false;
|
|
||||||
|
return thread->priority > thread_get_current_thread()->priority;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -243,7 +197,7 @@ simple_reschedule(void)
|
|||||||
struct thread *oldThread = thread_get_current_thread();
|
struct thread *oldThread = thread_get_current_thread();
|
||||||
struct thread *nextThread, *prevThread;
|
struct thread *nextThread, *prevThread;
|
||||||
|
|
||||||
TRACE(("reschedule(): cpu %d, cur_thread = %ld\n", smp_get_current_cpu(), thread_get_current_thread()->id));
|
TRACE(("reschedule(): current thread = %ld\n", oldThread->id));
|
||||||
|
|
||||||
oldThread->cpu->invoke_scheduler = false;
|
oldThread->cpu->invoke_scheduler = false;
|
||||||
|
|
||||||
@ -251,7 +205,8 @@ simple_reschedule(void)
|
|||||||
switch (oldThread->next_state) {
|
switch (oldThread->next_state) {
|
||||||
case B_THREAD_RUNNING:
|
case B_THREAD_RUNNING:
|
||||||
case B_THREAD_READY:
|
case B_THREAD_READY:
|
||||||
TRACE(("enqueueing thread %ld into run q. pri = %ld\n", oldThread->id, oldThread->priority));
|
TRACE(("enqueueing thread %ld into run queue priority = %ld\n",
|
||||||
|
oldThread->id, oldThread->priority));
|
||||||
simple_enqueue_in_run_queue(oldThread);
|
simple_enqueue_in_run_queue(oldThread);
|
||||||
break;
|
break;
|
||||||
case B_THREAD_SUSPENDED:
|
case B_THREAD_SUSPENDED:
|
||||||
@ -260,24 +215,14 @@ simple_reschedule(void)
|
|||||||
case THREAD_STATE_FREE_ON_RESCHED:
|
case THREAD_STATE_FREE_ON_RESCHED:
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
TRACE(("not enqueueing thread %ld into run q. next_state = %ld\n", oldThread->id, oldThread->next_state));
|
TRACE(("not enqueueing thread %ld into run queue next_state = %ld\n",
|
||||||
|
oldThread->id, oldThread->next_state));
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
nextThread = sRunQueue;
|
nextThread = sRunQueue;
|
||||||
prevThread = NULL;
|
prevThread = NULL;
|
||||||
|
|
||||||
if (oldThread->cpu->disabled) {
|
|
||||||
// CPU is disabled - service any threads we may have that are pinned,
|
|
||||||
// otherwise just select the idle thread
|
|
||||||
while (nextThread && nextThread->priority > B_IDLE_PRIORITY) {
|
|
||||||
if (nextThread->pinned_to_cpu > 0 &&
|
|
||||||
nextThread->previous_cpu == oldThread->cpu)
|
|
||||||
break;
|
|
||||||
prevThread = nextThread;
|
|
||||||
nextThread = nextThread->queue_next;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
while (nextThread) {
|
while (nextThread) {
|
||||||
// select next thread from the run queue
|
// select next thread from the run queue
|
||||||
while (nextThread && nextThread->priority > B_IDLE_PRIORITY) {
|
while (nextThread && nextThread->priority > B_IDLE_PRIORITY) {
|
||||||
@ -290,20 +235,13 @@ simple_reschedule(void)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// skip thread, if it doesn't want to run on this CPU
|
|
||||||
if (nextThread->pinned_to_cpu > 0
|
|
||||||
&& nextThread->previous_cpu != oldThread->cpu) {
|
|
||||||
prevThread = nextThread;
|
|
||||||
nextThread = nextThread->queue_next;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
// always extract real time threads
|
// always extract real time threads
|
||||||
if (nextThread->priority >= B_FIRST_REAL_TIME_PRIORITY)
|
if (nextThread->priority >= B_FIRST_REAL_TIME_PRIORITY)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
// never skip last non-idle normal thread
|
// never skip last non-idle normal thread
|
||||||
if (nextThread->queue_next && nextThread->queue_next->priority == B_IDLE_PRIORITY)
|
if (nextThread->queue_next
|
||||||
|
&& nextThread->queue_next->priority == B_IDLE_PRIORITY)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
// skip normal threads sometimes (roughly 20%)
|
// skip normal threads sometimes (roughly 20%)
|
||||||
@ -320,19 +258,8 @@ simple_reschedule(void)
|
|||||||
&& nextThread->queue_next->priority > B_IDLE_PRIORITY);
|
&& nextThread->queue_next->priority > B_IDLE_PRIORITY);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (nextThread->cpu
|
|
||||||
&& nextThread->cpu->cpu_num != oldThread->cpu->cpu_num) {
|
|
||||||
panic("thread in run queue that's still running on another CPU!\n");
|
|
||||||
// ToDo: remove this check completely when we're sure that this
|
|
||||||
// cannot happen anymore.
|
|
||||||
prevThread = nextThread;
|
|
||||||
nextThread = nextThread->queue_next;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
if (!nextThread)
|
if (!nextThread)
|
||||||
panic("reschedule(): run queue is empty!\n");
|
panic("reschedule(): run queue is empty!\n");
|
||||||
@ -381,13 +308,6 @@ simple_reschedule(void)
|
|||||||
add_timer(quantumTimer, &reschedule_event, quantum,
|
add_timer(quantumTimer, &reschedule_event, quantum,
|
||||||
B_ONE_SHOT_RELATIVE_TIMER | B_TIMER_ACQUIRE_THREAD_LOCK);
|
B_ONE_SHOT_RELATIVE_TIMER | B_TIMER_ACQUIRE_THREAD_LOCK);
|
||||||
|
|
||||||
// update the idle bit for this CPU in the CPU mask
|
|
||||||
int32 cpuNum = smp_get_current_cpu();
|
|
||||||
if (nextThread->priority == B_IDLE_PRIORITY)
|
|
||||||
sIdleCPUs = SET_BIT(sIdleCPUs, cpuNum);
|
|
||||||
else
|
|
||||||
sIdleCPUs = CLEAR_BIT(sIdleCPUs, cpuNum);
|
|
||||||
|
|
||||||
if (nextThread != oldThread)
|
if (nextThread != oldThread)
|
||||||
context_switch(oldThread, nextThread);
|
context_switch(oldThread, nextThread);
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user