From 5c7993bfab257986a80fe20104cea2d6b2e46089 Mon Sep 17 00:00:00 2001 From: Rene Gollent Date: Sat, 12 Dec 2009 00:01:33 +0000 Subject: [PATCH] Small cleanup: As the cpu_ent struct now tracks the running thread directly, the affine scheduler can use that instead of keeping track internally. git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@34638 a95241bf-73f2-0310-859d-f6bbb57e9c96 --- src/system/kernel/Jamfile | 2 +- src/system/kernel/scheduler/scheduler_affine.cpp | 8 ++------ 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/src/system/kernel/Jamfile b/src/system/kernel/Jamfile index 622286c868..b9a5164b41 100644 --- a/src/system/kernel/Jamfile +++ b/src/system/kernel/Jamfile @@ -55,7 +55,7 @@ KernelMergeObject kernel_core.o : # scheduler scheduler.cpp -# scheduler_affine.cpp + scheduler_affine.cpp scheduler_simple.cpp scheduler_simple_smp.cpp scheduler_tracing.cpp diff --git a/src/system/kernel/scheduler/scheduler_affine.cpp b/src/system/kernel/scheduler/scheduler_affine.cpp index afe0685a9c..2a15db853d 100644 --- a/src/system/kernel/scheduler/scheduler_affine.cpp +++ b/src/system/kernel/scheduler/scheduler_affine.cpp @@ -42,7 +42,6 @@ // TODO: consolidate this such that HT/SMT entities on the same physical core // share a queue, once we have the necessary API for retrieving the topology // information -static struct thread* sRunningThreads[B_MAX_CPU_COUNT]; static struct thread* sRunQueue[B_MAX_CPU_COUNT]; static int32 sRunQueueSize[B_MAX_CPU_COUNT]; static struct thread* sIdleThreads; @@ -192,8 +191,7 @@ affine_enqueue_in_run_queue(struct thread *thread) NotifySchedulerListeners(&SchedulerListener::ThreadEnqueuedInRunQueue, thread); - if (sRunningThreads[targetCPU] != NULL - && thread->priority > sRunningThreads[targetCPU]->priority) { + if (thread->priority > gCPU[targetCPU].running_thread->priority) { if (targetCPU == smp_get_current_cpu()) { return true; } else { @@ -503,10 +501,8 @@ affine_reschedule(void) add_timer(quantumTimer, &reschedule_event, quantum, B_ONE_SHOT_RELATIVE_TIMER | B_TIMER_ACQUIRE_THREAD_LOCK); - if (nextThread != oldThread) { - sRunningThreads[currentCPU] = nextThread; + if (nextThread != oldThread) context_switch(oldThread, nextThread); - } } }