scheduler: Relax penalty cancellation requirements

Priority penalties were made more strict in order to prevent situation
when two or more high priority threads uses up all available CPU time
in such manner that they do not receive a penalty but starve low priority
threads.

However, a significant change to thread priorites has been made since and
now priority of all non real time threads varies in a range from 1 to
static priority minus penalty. This means that the scheduler is able to
prevent thread starvation without any complex penalty policies.
This commit is contained in:
Pawel Dziepak 2014-01-31 02:37:10 +01:00
parent 6155ab7b25
commit f116370edd
5 changed files with 5 additions and 56 deletions

View File

@ -182,11 +182,11 @@ scheduler_set_thread_priority(Thread *thread, int32 priority)
TRACE("changing thread %ld priority to %ld (old: %ld, effective: %ld)\n",
thread->id, priority, oldPriority, threadData->GetEffectivePriority());
thread->priority = priority;
threadData->CancelPenalty();
if (priority == thread->priority)
return thread->priority;
thread->priority = priority;
if (thread->state != B_THREAD_READY) {
if (thread->state == B_THREAD_RUNNING) {

View File

@ -361,8 +361,6 @@ CoreEntry::CoreEntry()
:
fCPUCount(0),
fIdleCPUCount(0),
fStarvationCounter(0),
fStarvationCounterIdle(0),
fThreadCount(0),
fActiveTime(0),
fLoad(0),
@ -411,15 +409,11 @@ CoreEntry::Remove(ThreadData* thread)
{
SCHEDULER_ENTER_FUNCTION();
ASSERT(!thread->IsIdle());
ASSERT(thread->IsEnqueued());
thread->SetDequeued();
ASSERT(!thread_is_idle_thread(thread->GetThread()));
if (thread->GetEffectivePriority() == B_LOWEST_ACTIVE_PRIORITY
|| thread->IsCPUBound()) {
atomic_add(&fStarvationCounter, 1);
}
fRunQueue.Remove(thread);
atomic_add(&fThreadCount, -1);
}

View File

@ -154,9 +154,6 @@ public:
inline uint32 RemoveLoad(int32 load, bool force);
inline void ChangeLoad(int32 delta);
inline int32 StarvationCounter() const;
inline int32 StarvationCounterIdle() const;
inline void CPUGoesIdle(CPUEntry* cpu);
inline void CPUWakesUp(CPUEntry* cpu);
@ -181,9 +178,6 @@ private:
CPUPriorityHeap fCPUHeap;
spinlock fCPULock;
int32 fStarvationCounter;
int32 fStarvationCounterIdle;
int32 fThreadCount;
ThreadRunQueue fRunQueue;
spinlock fQueueLock;
@ -454,22 +448,6 @@ CoreEntry::ChangeLoad(int32 delta)
_UpdateLoad();
}
inline int32
CoreEntry::StarvationCounter() const
{
SCHEDULER_ENTER_FUNCTION();
return fStarvationCounter;
}
inline int32
CoreEntry::StarvationCounterIdle() const
{
SCHEDULER_ENTER_FUNCTION();
return fStarvationCounterIdle;
}
/* PackageEntry::CoreGoesIdle and PackageEntry::CoreWakesUp have to be defined
before CoreEntry::CPUGoesIdle and CoreEntry::CPUWakesUp. If they weren't
@ -521,9 +499,6 @@ PackageEntry::CoreWakesUp(CoreEntry* core)
inline void
CoreEntry::CPUGoesIdle(CPUEntry* /* cpu */)
{
atomic_add(&fStarvationCounter, 1);
atomic_add(&fStarvationCounterIdle, 1);
if (gSingleCore)
return;

View File

@ -22,7 +22,6 @@ ThreadData::_InitBase()
fAdditionalPenalty = 0;
fEffectivePriority = GetPriority();
fBaseQuantum = sQuantumLengths[GetEffectivePriority()];
fCPUBound = false;
fTimeUsed = 0;
fStolenTime = 0;
@ -35,8 +34,6 @@ ThreadData::_InitBase()
fWentSleep = 0;
fWentSleepActive = 0;
fWentSleepCount = 0;
fWentSleepCountIdle = 0;
fEnqueued = false;
fReady = false;
@ -144,7 +141,6 @@ ThreadData::Dump() const
kprintf("\tneeded_load:\t\t%" B_PRId32 "%%\n", fNeededLoad / 10);
kprintf("\twent_sleep:\t\t%" B_PRId64 "\n", fWentSleep);
kprintf("\twent_sleep_active:\t%" B_PRId64 "\n", fWentSleepActive);
kprintf("\twent_sleep_count:\t%" B_PRId32 "\n", fWentSleepCount);
kprintf("\tcore:\t\t\t%" B_PRId32 "\n",
fCore != NULL ? fCore->ID() : -1);
if (fCore != NULL && HasCacheExpired())

View File

@ -48,8 +48,6 @@ public:
inline int32 GetEffectivePriority() const;
inline bool IsCPUBound() const { return fCPUBound; }
inline void StartCPUTime();
inline void StopCPUTime();
@ -109,15 +107,12 @@ private:
bigtime_t fWentSleep;
bigtime_t fWentSleepActive;
int32 fWentSleepCount;
int32 fWentSleepCountIdle;
bool fEnqueued;
bool fReady;
Thread* fThread;
bool fCPUBound;
int32 fPriorityPenalty;
int32 fAdditionalPenalty;
@ -211,10 +206,8 @@ ThreadData::_IncreasePenalty()
int32 oldPenalty = fPriorityPenalty++;
const int kMinimalPriority = _GetMinimalPriority();
if (GetPriority() - oldPenalty <= kMinimalPriority) {
if (GetPriority() - oldPenalty <= kMinimalPriority)
fPriorityPenalty = oldPenalty;
fCPUBound = true;
}
_ComputeEffectivePriority();
}
@ -256,7 +249,6 @@ ThreadData::CancelPenalty()
int32 oldPenalty = fPriorityPenalty;
fPriorityPenalty = 0;
fCPUBound = false;
if (oldPenalty != 0) {
TRACE("cancelling thread %ld penalty\n", fThread->id);
@ -272,13 +264,7 @@ ThreadData::ShouldCancelPenalty() const
if (fCore == NULL)
return false;
if (GetEffectivePriority() != B_LOWEST_ACTIVE_PRIORITY && !IsCPUBound()) {
if (fCore->StarvationCounter() != fWentSleepCount)
return true;
}
return fCore->StarvationCounterIdle() != fWentSleepCountIdle;
return system_time() - fWentSleep > gCurrentMode->base_quantum / 2;
}
@ -374,8 +360,6 @@ ThreadData::GoesAway()
fWentSleep = system_time();
fWentSleepActive = fCore->GetActiveTime();
fWentSleepCount = fCore->StarvationCounter();
fWentSleepCountIdle = fCore->StarvationCounterIdle();
if (gTrackCoreLoad)
fLoadMeasurementEpoch = fCore->RemoveLoad(fNeededLoad, false);