scheduler: Try to keep thread on the same logical CPU

Some SMT implementations (e.g. recent AMD microarchitectures) have
separate L1d cache for each SMT thread (which AMD decides to call "cores").
This means that we shouldn't move threads to another logical processor too
often even if it belongs to the same core. We aren't very strict about
this as it would complicate load balancing, but we try to reduce unnecessary
migrations.
This commit is contained in:
Pawel Dziepak 2013-12-20 03:36:01 +01:00
parent ad6b9a1df8
commit c08ed2db65
5 changed files with 44 additions and 34 deletions

View File

@ -175,9 +175,8 @@ enqueue(Thread* thread, bool newOne)
thread);
int32 heapPriority = CPUPriorityHeap::GetKey(targetCPU);
if (threadPriority > atomic_get(&targetCPU->fPriority)
&& (threadPriority > heapPriority
|| (threadPriority == heapPriority && rescheduleNeeded))) {
if (threadPriority > heapPriority
|| (threadPriority == heapPriority && rescheduleNeeded)) {
if (targetCPU->fCPUNumber == smp_get_current_cpu())
gCPU[targetCPU->fCPUNumber].invoke_scheduler = true;
@ -237,7 +236,6 @@ scheduler_set_thread_priority(Thread *thread, int32 priority)
CPUEntry* cpu = &gCPUEntries[thread->cpu->cpu_num];
SpinLocker coreLocker(threadData->GetCore()->fCPULock);
cpu->fPriority = priority;
cpu->UpdatePriority(priority);
}
@ -475,7 +473,9 @@ reschedule(int32 nextState)
}
Thread* nextThread = nextThreadData->GetThread();
atomic_set(&cpu->fPriority, nextThreadData->GetEffectivePriority());
SpinLocker cpuLocker(core->fCPULock);
cpu->UpdatePriority(nextThreadData->GetEffectivePriority());
cpuLocker.Unlock();
if (nextThread != oldThread) {
if (enqueueOldThread) {
@ -650,7 +650,6 @@ scheduler_set_cpu_enabled(int32 cpuID, bool enabled)
if (enabled)
core->fCPUCount++;
else {
cpu->fPriority = B_IDLE_PRIORITY;
cpu->UpdatePriority(B_IDLE_PRIORITY);
core->fCPUCount--;
}

View File

@ -42,7 +42,6 @@ ThreadRunQueue::Dump() const
CPUEntry::CPUEntry()
:
fPriority(B_IDLE_PRIORITY),
fLoad(0),
fMeasureActiveTime(0),
fMeasureTime(0)

View File

@ -54,7 +54,6 @@ struct CPUEntry : public MinMaxHeapLinkImpl<CPUEntry, int32> {
rw_spinlock fSchedulerModeLock;
int32 fPriority;
ThreadRunQueue fRunQueue;
int32 fLoad;

View File

@ -121,6 +121,45 @@ ThreadData::ComputeQuantum()
}
inline CoreEntry*
ThreadData::_ChooseCore() const
{
ASSERT(!gSingleCore);
return gCurrentMode->choose_core(this);
}
inline CPUEntry*
ThreadData::_ChooseCPU(CoreEntry* core, bool& rescheduleNeeded) const
{
int32 threadPriority = GetEffectivePriority();
if (fThread->previous_cpu != NULL) {
CPUEntry* previousCPU = &gCPUEntries[fThread->previous_cpu->cpu_num];
if (previousCPU->fCore == core) {
SpinLocker cpuLocker(core->fCPULock);
if (CPUPriorityHeap::GetKey(previousCPU) < threadPriority) {
previousCPU->UpdatePriority(threadPriority);
rescheduleNeeded = true;
return previousCPU;
}
}
}
SpinLocker cpuLocker(core->fCPULock);
CPUEntry* cpu = core->fCPUHeap.PeekMinimum();
ASSERT(cpu != NULL);
if (CPUPriorityHeap::GetKey(cpu) < threadPriority) {
cpu->UpdatePriority(threadPriority);
rescheduleNeeded = true;
} else
rescheduleNeeded = false;
return cpu;
}
inline bigtime_t
ThreadData::_GetBaseQuantum() const
{

View File

@ -335,32 +335,6 @@ ThreadData::_GetMinimalPriority() const
}
inline CoreEntry*
ThreadData::_ChooseCore() const
{
ASSERT(!gSingleCore);
return gCurrentMode->choose_core(this);
}
inline CPUEntry*
ThreadData::_ChooseCPU(CoreEntry* core, bool& rescheduleNeeded) const
{
SpinLocker cpuLocker(core->fCPULock);
CPUEntry* cpu = core->fCPUHeap.PeekMinimum();
ASSERT(cpu != NULL);
int32 threadPriority = GetEffectivePriority();
if (CPUPriorityHeap::GetKey(cpu) < threadPriority) {
cpu->UpdatePriority(threadPriority);
rescheduleNeeded = true;
} else
rescheduleNeeded = false;
return cpu;
}
} // namespace Scheduler