From c08ed2db65267bea18a3ba424f98fffde9da6c25 Mon Sep 17 00:00:00 2001 From: Pawel Dziepak Date: Fri, 20 Dec 2013 03:36:01 +0100 Subject: [PATCH] scheduler: Try to keep thread on the same logical CPU Some SMT implementations (e.g. recent AMD microarchitectures) have separate L1d cache for each SMT thread (which AMD decides to call "cores"). This means that we shouldn't move threads to another logical processor too often even if it belongs to the same core. We aren't very strict about this as it would complicate load balancing, but we try to reduce unnecessary migrations. --- src/system/kernel/scheduler/scheduler.cpp | 11 +++--- src/system/kernel/scheduler/scheduler_cpu.cpp | 1 - src/system/kernel/scheduler/scheduler_cpu.h | 1 - .../kernel/scheduler/scheduler_thread.cpp | 39 +++++++++++++++++++ .../kernel/scheduler/scheduler_thread.h | 26 ------------- 5 files changed, 44 insertions(+), 34 deletions(-) diff --git a/src/system/kernel/scheduler/scheduler.cpp b/src/system/kernel/scheduler/scheduler.cpp index b6d303bb65..dcb0583d77 100644 --- a/src/system/kernel/scheduler/scheduler.cpp +++ b/src/system/kernel/scheduler/scheduler.cpp @@ -175,9 +175,8 @@ enqueue(Thread* thread, bool newOne) thread); int32 heapPriority = CPUPriorityHeap::GetKey(targetCPU); - if (threadPriority > atomic_get(&targetCPU->fPriority) - && (threadPriority > heapPriority - || (threadPriority == heapPriority && rescheduleNeeded))) { + if (threadPriority > heapPriority + || (threadPriority == heapPriority && rescheduleNeeded)) { if (targetCPU->fCPUNumber == smp_get_current_cpu()) gCPU[targetCPU->fCPUNumber].invoke_scheduler = true; @@ -237,7 +236,6 @@ scheduler_set_thread_priority(Thread *thread, int32 priority) CPUEntry* cpu = &gCPUEntries[thread->cpu->cpu_num]; SpinLocker coreLocker(threadData->GetCore()->fCPULock); - cpu->fPriority = priority; cpu->UpdatePriority(priority); } @@ -475,7 +473,9 @@ reschedule(int32 nextState) } Thread* nextThread = nextThreadData->GetThread(); - atomic_set(&cpu->fPriority, nextThreadData->GetEffectivePriority()); + SpinLocker cpuLocker(core->fCPULock); + cpu->UpdatePriority(nextThreadData->GetEffectivePriority()); + cpuLocker.Unlock(); if (nextThread != oldThread) { if (enqueueOldThread) { @@ -650,7 +650,6 @@ scheduler_set_cpu_enabled(int32 cpuID, bool enabled) if (enabled) core->fCPUCount++; else { - cpu->fPriority = B_IDLE_PRIORITY; cpu->UpdatePriority(B_IDLE_PRIORITY); core->fCPUCount--; } diff --git a/src/system/kernel/scheduler/scheduler_cpu.cpp b/src/system/kernel/scheduler/scheduler_cpu.cpp index bd812677c2..d55d03d561 100644 --- a/src/system/kernel/scheduler/scheduler_cpu.cpp +++ b/src/system/kernel/scheduler/scheduler_cpu.cpp @@ -42,7 +42,6 @@ ThreadRunQueue::Dump() const CPUEntry::CPUEntry() : - fPriority(B_IDLE_PRIORITY), fLoad(0), fMeasureActiveTime(0), fMeasureTime(0) diff --git a/src/system/kernel/scheduler/scheduler_cpu.h b/src/system/kernel/scheduler/scheduler_cpu.h index b2f5727cff..4d02a646dc 100644 --- a/src/system/kernel/scheduler/scheduler_cpu.h +++ b/src/system/kernel/scheduler/scheduler_cpu.h @@ -54,7 +54,6 @@ struct CPUEntry : public MinMaxHeapLinkImpl { rw_spinlock fSchedulerModeLock; - int32 fPriority; ThreadRunQueue fRunQueue; int32 fLoad; diff --git a/src/system/kernel/scheduler/scheduler_thread.cpp b/src/system/kernel/scheduler/scheduler_thread.cpp index cf53fcda9f..e1f280e1c7 100644 --- a/src/system/kernel/scheduler/scheduler_thread.cpp +++ b/src/system/kernel/scheduler/scheduler_thread.cpp @@ -121,6 +121,45 @@ ThreadData::ComputeQuantum() } +inline CoreEntry* +ThreadData::_ChooseCore() const +{ + ASSERT(!gSingleCore); + return gCurrentMode->choose_core(this); +} + + +inline CPUEntry* +ThreadData::_ChooseCPU(CoreEntry* core, bool& rescheduleNeeded) const +{ + int32 threadPriority = GetEffectivePriority(); + + if (fThread->previous_cpu != NULL) { + CPUEntry* previousCPU = &gCPUEntries[fThread->previous_cpu->cpu_num]; + if (previousCPU->fCore == core) { + SpinLocker cpuLocker(core->fCPULock); + if (CPUPriorityHeap::GetKey(previousCPU) < threadPriority) { + previousCPU->UpdatePriority(threadPriority); + rescheduleNeeded = true; + return previousCPU; + } + } + } + + SpinLocker cpuLocker(core->fCPULock); + CPUEntry* cpu = core->fCPUHeap.PeekMinimum(); + ASSERT(cpu != NULL); + + if (CPUPriorityHeap::GetKey(cpu) < threadPriority) { + cpu->UpdatePriority(threadPriority); + rescheduleNeeded = true; + } else + rescheduleNeeded = false; + + return cpu; +} + + inline bigtime_t ThreadData::_GetBaseQuantum() const { diff --git a/src/system/kernel/scheduler/scheduler_thread.h b/src/system/kernel/scheduler/scheduler_thread.h index 9c98b7e7d0..5f4ab2f12f 100644 --- a/src/system/kernel/scheduler/scheduler_thread.h +++ b/src/system/kernel/scheduler/scheduler_thread.h @@ -335,32 +335,6 @@ ThreadData::_GetMinimalPriority() const } -inline CoreEntry* -ThreadData::_ChooseCore() const -{ - ASSERT(!gSingleCore); - return gCurrentMode->choose_core(this); -} - - -inline CPUEntry* -ThreadData::_ChooseCPU(CoreEntry* core, bool& rescheduleNeeded) const -{ - SpinLocker cpuLocker(core->fCPULock); - CPUEntry* cpu = core->fCPUHeap.PeekMinimum(); - ASSERT(cpu != NULL); - - int32 threadPriority = GetEffectivePriority(); - if (CPUPriorityHeap::GetKey(cpu) < threadPriority) { - cpu->UpdatePriority(threadPriority); - rescheduleNeeded = true; - } else - rescheduleNeeded = false; - - return cpu; -} - - } // namespace Scheduler