scheduler: Update load of idle cores

In order to keep the scheduler tickless core load is computed and updated
only during various scheduler events (i.e. thread enqueue, reschedule, etc).
The problem it creates is that if a core becomes idle its load may remain
outdated for an extended period of time thus resulting in suboptimal thread
migration decisions.

The solution to this problem is to add a timer each time an idle thread is
scheudled which, after kLoadMeasureInterval, would fire and force load
update.
This commit is contained in:
Pawel Dziepak 2014-02-03 23:14:37 +01:00
parent 771ae065db
commit 230d1fcfea
3 changed files with 61 additions and 36 deletions

View File

@ -218,28 +218,12 @@ scheduler_set_thread_priority(Thread *thread, int32 priority)
}
static inline void
reschedule_needed()
{
// This function is called as a result of either the timer event set by the
// scheduler or an incoming ICI. Make sure the reschedule() is invoked.
get_cpu_struct()->invoke_scheduler = true;
}
void
scheduler_reschedule_ici()
{
reschedule_needed();
}
static int32
reschedule_event(timer* /* unused */)
{
reschedule_needed();
get_cpu_struct()->preempted = true;
return B_HANDLED_INTERRUPT;
// This function is called as a result of an incoming ICI.
// Make sure the reschedule() is invoked.
get_cpu_struct()->invoke_scheduler = true;
}
@ -444,18 +428,12 @@ reschedule(int32 nextState)
cpu->TrackActivity(oldThreadData, nextThreadData);
if (nextThread != oldThread || oldThread->cpu->preempted) {
timer* quantumTimer = &oldThread->cpu->quantum_timer;
if (!oldThread->cpu->preempted)
cancel_timer(quantumTimer);
cpu->StartQuantumTimer(nextThreadData, oldThread->cpu->preempted);
oldThread->cpu->preempted = false;
if (!nextThreadData->IsIdle()) {
bigtime_t quantum = nextThreadData->GetQuantumLeft();
add_timer(quantumTimer, &reschedule_event, quantum,
B_ONE_SHOT_RELATIVE_TIMER);
if (!nextThreadData->IsIdle())
nextThreadData->Continues();
} else
else
gCurrentMode->rebalance_irqs(true);
nextThreadData->StartQuantum();

View File

@ -81,7 +81,8 @@ CPUEntry::CPUEntry()
:
fLoad(0),
fMeasureActiveTime(0),
fMeasureTime(0)
fMeasureTime(0),
fUpdateLoadEvent(false)
{
B_INITIALIZE_RW_SPINLOCK(&fSchedulerModeLock);
B_INITIALIZE_SPINLOCK(&fQueueLock);
@ -293,6 +294,27 @@ CPUEntry::TrackActivity(ThreadData* oldThreadData, ThreadData* nextThreadData)
}
void
CPUEntry::StartQuantumTimer(ThreadData* thread, bool wasPreempted)
{
cpu_ent* cpu = &gCPU[ID()];
if (!wasPreempted || fUpdateLoadEvent)
cancel_timer(&cpu->quantum_timer);
fUpdateLoadEvent = false;
if (!thread->IsIdle()) {
bigtime_t quantum = thread->GetQuantumLeft();
add_timer(&cpu->quantum_timer, &CPUEntry::_RescheduleEvent, quantum,
B_ONE_SHOT_RELATIVE_TIMER);
} else if (gTrackCoreLoad) {
add_timer(&cpu->quantum_timer, &CPUEntry::_UpdateLoadEvent,
kLoadMeasureInterval, B_ONE_SHOT_RELATIVE_TIMER);
fUpdateLoadEvent = true;
}
}
void
CPUEntry::_RequestPerformanceLevel(ThreadData* threadData)
{
@ -323,6 +345,24 @@ CPUEntry::_RequestPerformanceLevel(ThreadData* threadData)
}
/* static */ int32
CPUEntry::_RescheduleEvent(timer* /* unused */)
{
get_cpu_struct()->invoke_scheduler = true;
get_cpu_struct()->preempted = true;
return B_HANDLED_INTERRUPT;
}
/* static */ int32
CPUEntry::_UpdateLoadEvent(timer* /* unused */)
{
CoreEntry::GetCore(smp_get_current_cpu())->ChangeLoad(0);
CPUEntry::GetCPU(smp_get_current_cpu())->fUpdateLoadEvent = false;
return B_HANDLED_INTERRUPT;
}
CPUPriorityHeap::CPUPriorityHeap(int32 cpuCount)
:
Heap<CPUEntry, int32>(cpuCount)
@ -497,10 +537,8 @@ CoreEntry::_UpdateLoad()
bigtime_t now = system_time();
if (now < kLoadMeasureInterval + fLastLoadUpdate)
return;
if (!try_acquire_write_spinlock(&gCoreHeapsLock))
return;
WriteSpinLocker coreLocker(gCoreHeapsLock, true);
WriteSpinLocker locker(fLoadLock);
WriteSpinLocker coreLocker(gCoreHeapsLock);
int32 newKey = GetLoad();
int32 oldKey = CoreLoadHeap::GetKey(this);

View File

@ -82,12 +82,18 @@ public:
void TrackActivity(ThreadData* oldThreadData,
ThreadData* nextThreadData);
void StartQuantumTimer(ThreadData* thread,
bool wasPreempted);
static inline CPUEntry* GetCPU(int32 cpu);
private:
void _RequestPerformanceLevel(
ThreadData* threadData);
static int32 _RescheduleEvent(timer* /* unused */);
static int32 _UpdateLoadEvent(timer* /* unused */);
int32 fCPUNumber;
CoreEntry* fCore;
@ -101,6 +107,8 @@ private:
bigtime_t fMeasureActiveTime;
bigtime_t fMeasureTime;
bool fUpdateLoadEvent;
friend class DebugDumper;
} CACHE_LINE_ALIGN;
@ -440,10 +448,11 @@ CoreEntry::ChangeLoad(int32 delta)
ASSERT(gTrackCoreLoad);
ASSERT(delta >= -kMaxLoad && delta <= kMaxLoad);
if (delta != 0) {
ReadSpinLocker locker(fLoadLock);
atomic_add(&fCurrentLoad, delta);
atomic_add(&fLoad, delta);
locker.Unlock();
}
_UpdateLoad();
}