* Added cpu_ent::running_thread which is maintained by the schedulers.
* simple_smp scheduler: Rewrote the interesting part of enqueue_in_run_queue(). It always selects a target CPU for the inserted thread, now. If no CPU is idle, the CPU running the thread with the lowest priority is chosen. If the thread running on the target CPU has a lower priority than the inserted one, it will be asked to reschedule. If that's the current CPU, we'll return the correct value (wasn't done before at all). These changes help reducing latencies. On my machine in an idle system playing music DebugAnalyzer shows maximum latencies of about 1 us. I still find that a bit much, but it's several orders of magnitude better than before. The -j8 Haiku image build time dropped about 10%. git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@34635 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
parent
edc69377fa
commit
b4be7c9021
@ -23,28 +23,32 @@
|
||||
#endif
|
||||
|
||||
|
||||
struct thread;
|
||||
|
||||
|
||||
/* CPU local data structure */
|
||||
|
||||
typedef struct cpu_ent {
|
||||
int cpu_num;
|
||||
int cpu_num;
|
||||
|
||||
// thread.c: used to force a reschedule at quantum expiration time
|
||||
int preempted;
|
||||
timer quantum_timer;
|
||||
int preempted;
|
||||
timer quantum_timer;
|
||||
|
||||
// keeping track of CPU activity
|
||||
bigtime_t active_time;
|
||||
bigtime_t last_kernel_time;
|
||||
bigtime_t last_user_time;
|
||||
bigtime_t active_time;
|
||||
bigtime_t last_kernel_time;
|
||||
bigtime_t last_user_time;
|
||||
|
||||
// used in the kernel debugger
|
||||
addr_t fault_handler;
|
||||
addr_t fault_handler_stack_pointer;
|
||||
jmp_buf fault_jump_buffer;
|
||||
addr_t fault_handler;
|
||||
addr_t fault_handler_stack_pointer;
|
||||
jmp_buf fault_jump_buffer;
|
||||
|
||||
bool invoke_scheduler;
|
||||
bool invoke_scheduler_if_idle;
|
||||
bool disabled;
|
||||
struct thread* running_thread;
|
||||
bool invoke_scheduler;
|
||||
bool invoke_scheduler_if_idle;
|
||||
bool disabled;
|
||||
|
||||
// arch-specific stuff
|
||||
arch_cpu_info arch;
|
||||
|
@ -330,8 +330,10 @@ context_switch(struct thread *fromThread, struct thread *toThread)
|
||||
if ((fromThread->flags & THREAD_FLAGS_DEBUGGER_INSTALLED) != 0)
|
||||
user_debug_thread_unscheduled(fromThread);
|
||||
|
||||
toThread->previous_cpu = toThread->cpu = fromThread->cpu;
|
||||
cpu_ent* cpu = fromThread->cpu;
|
||||
toThread->previous_cpu = toThread->cpu = cpu;
|
||||
fromThread->cpu = NULL;
|
||||
cpu->running_thread = toThread;
|
||||
|
||||
arch_thread_set_current_thread(toThread);
|
||||
arch_thread_context_switch(fromThread, toThread);
|
||||
|
@ -162,8 +162,10 @@ context_switch(struct thread *fromThread, struct thread *toThread)
|
||||
if ((fromThread->flags & THREAD_FLAGS_DEBUGGER_INSTALLED) != 0)
|
||||
user_debug_thread_unscheduled(fromThread);
|
||||
|
||||
toThread->previous_cpu = toThread->cpu = fromThread->cpu;
|
||||
cpu_ent* cpu = fromThread->cpu;
|
||||
toThread->previous_cpu = toThread->cpu = cpu;
|
||||
fromThread->cpu = NULL;
|
||||
cpu->running_thread = toThread;
|
||||
|
||||
arch_thread_set_current_thread(toThread);
|
||||
arch_thread_context_switch(fromThread, toThread);
|
||||
|
@ -111,47 +111,71 @@ enqueue_in_run_queue(struct thread *thread)
|
||||
|
||||
thread->next_priority = thread->priority;
|
||||
|
||||
bool reschedule = false;
|
||||
if (thread->priority != B_IDLE_PRIORITY) {
|
||||
int32 currentCPU = smp_get_current_cpu();
|
||||
if (sIdleCPUs != 0) {
|
||||
if (thread->pinned_to_cpu > 0) {
|
||||
// thread is pinned to a CPU -- notify it, if it is idle
|
||||
int32 targetCPU = thread->previous_cpu->cpu_num;
|
||||
if ((sIdleCPUs & (1 << targetCPU)) != 0) {
|
||||
sIdleCPUs &= ~(1 << targetCPU);
|
||||
smp_send_ici(targetCPU, SMP_MSG_RESCHEDULE_IF_IDLE, 0, 0,
|
||||
0, NULL, SMP_MSG_FLAG_ASYNC);
|
||||
}
|
||||
} else {
|
||||
// Thread is not pinned to any CPU -- take it ourselves, if we
|
||||
// are idle, otherwise notify the next idle CPU. In either case
|
||||
// we clear the idle bit of the chosen CPU, so that the
|
||||
// enqueue_in_run_queue() won't try to bother the
|
||||
// same CPU again, if invoked before it handled the interrupt.
|
||||
cpu_mask_t idleCPUs = CLEAR_BIT(sIdleCPUs, currentCPU);
|
||||
if ((sIdleCPUs & (1 << currentCPU)) != 0) {
|
||||
sIdleCPUs = idleCPUs;
|
||||
} else {
|
||||
int32 targetCPU = 0;
|
||||
for (; targetCPU < B_MAX_CPU_COUNT; targetCPU++) {
|
||||
cpu_mask_t mask = 1 << targetCPU;
|
||||
if ((idleCPUs & mask) != 0) {
|
||||
sIdleCPUs &= ~mask;
|
||||
break;
|
||||
}
|
||||
}
|
||||
int32 targetCPU = currentCPU;
|
||||
int32 targetPriority = B_IDLE_PRIORITY;
|
||||
|
||||
smp_send_ici(targetCPU, SMP_MSG_RESCHEDULE_IF_IDLE, 0, 0,
|
||||
0, NULL, SMP_MSG_FLAG_ASYNC);
|
||||
if (thread->pinned_to_cpu > 0) {
|
||||
// the thread is pinned to a specific CPU
|
||||
targetCPU = thread->previous_cpu->cpu_num;
|
||||
targetPriority = gCPU[targetCPU].running_thread->priority;
|
||||
} else if (sIdleCPUs != 0) {
|
||||
// The thread is not pinned to any CPU and there are idle CPUs
|
||||
// -- pick the first available one.
|
||||
if (!CHECK_BIT(sIdleCPUs, currentCPU)) {
|
||||
for (int32 i = 0; i < B_MAX_CPU_COUNT; i++) {
|
||||
if (CHECK_BIT(sIdleCPUs, i)) {
|
||||
targetCPU = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// No idle CPUs -- choose the CPU running the lowest priority
|
||||
// thread. Favor the current CPU as it doesn't require ICI to be
|
||||
// notified.
|
||||
targetPriority = gCPU[currentCPU].running_thread->priority;
|
||||
int32 cpuCount = smp_get_num_cpus();
|
||||
for (int32 i = 0; i < cpuCount; i++) {
|
||||
struct thread* runningThread = gCPU[i].running_thread;
|
||||
if (runningThread->priority < targetPriority) {
|
||||
targetPriority = runningThread->priority;
|
||||
targetCPU = i;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Notify the (potential) target CPU, if appropriate.
|
||||
cpu_mask_t idleBit = 1 << targetCPU;
|
||||
if ((sIdleCPUs & idleBit) != 0) {
|
||||
// The target CPU is idle. Clear its idle bit to avoid it from
|
||||
// being picked by the next
|
||||
sIdleCPUs ^= idleBit;
|
||||
if (targetCPU == currentCPU) {
|
||||
reschedule = true;
|
||||
} else {
|
||||
smp_send_ici(targetCPU, SMP_MSG_RESCHEDULE_IF_IDLE, 0, 0,
|
||||
0, NULL, SMP_MSG_FLAG_ASYNC);
|
||||
}
|
||||
} else if (thread->priority > targetPriority) {
|
||||
// The target CPU is not idle, but runs a thread with a lower
|
||||
// priority. Tell it to reschedule.
|
||||
if (targetCPU == currentCPU) {
|
||||
reschedule = true;
|
||||
} else {
|
||||
smp_send_ici(targetCPU, SMP_MSG_RESCHEDULE, 0, 0, 0, NULL,
|
||||
SMP_MSG_FLAG_ASYNC);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// notify listeners
|
||||
NotifySchedulerListeners(&SchedulerListener::ThreadEnqueuedInRunQueue,
|
||||
thread);
|
||||
return false;
|
||||
|
||||
return reschedule;
|
||||
}
|
||||
|
||||
|
||||
@ -208,8 +232,10 @@ context_switch(struct thread *fromThread, struct thread *toThread)
|
||||
if ((fromThread->flags & THREAD_FLAGS_DEBUGGER_INSTALLED) != 0)
|
||||
user_debug_thread_unscheduled(fromThread);
|
||||
|
||||
toThread->previous_cpu = toThread->cpu = fromThread->cpu;
|
||||
cpu_ent* cpu = fromThread->cpu;
|
||||
toThread->previous_cpu = toThread->cpu = cpu;
|
||||
fromThread->cpu = NULL;
|
||||
cpu->running_thread = toThread;
|
||||
|
||||
arch_thread_set_current_thread(toThread);
|
||||
arch_thread_context_switch(fromThread, toThread);
|
||||
|
@ -2141,6 +2141,8 @@ thread_init(kernel_args *args)
|
||||
return B_NO_MEMORY;
|
||||
}
|
||||
|
||||
gCPU[i].running_thread = thread;
|
||||
|
||||
thread->team = team_get_kernel_team();
|
||||
thread->priority = thread->next_priority = B_IDLE_PRIORITY;
|
||||
thread->state = B_THREAD_RUNNING;
|
||||
|
Loading…
Reference in New Issue
Block a user