scheduler: Work around GCC2 limitations in function inlining

GCC2 won't inline a function if it is used before its definition.
This commit is contained in:
Pawel Dziepak 2014-01-05 05:55:13 +01:00
parent 9e99bf6085
commit cb66faef24
7 changed files with 341 additions and 293 deletions

View File

@ -41,7 +41,7 @@ typedef void (*smp_call_func)(addr_t data1, int32 currentCPU, addr_t data2, addr
class CPUSet {
public:
CPUSet() { ClearAll(); }
inline CPUSet();
inline void ClearAll();
inline void SetAll();
@ -92,6 +92,13 @@ int smp_intercpu_int_handler(int32 cpu);
#endif
inline
CPUSet::CPUSet()
{
memset(fBitmap, 0, sizeof(fBitmap));
}
inline void
CPUSet::ClearAll()
{

View File

@ -31,6 +31,7 @@
#include "scheduler_common.h"
#include "scheduler_cpu.h"
#include "scheduler_locking.h"
#include "scheduler_modes.h"
#include "scheduler_profiler.h"
#include "scheduler_thread.h"
@ -40,93 +41,6 @@
namespace Scheduler {
class SchedulerModeLocking {
public:
bool Lock(int* /* lockable */)
{
CPUEntry::GetCPU(smp_get_current_cpu())->EnterScheduler();
return true;
}
void Unlock(int* /* lockable */)
{
CPUEntry::GetCPU(smp_get_current_cpu())->ExitScheduler();
}
};
class SchedulerModeLocker :
public AutoLocker<int, SchedulerModeLocking> {
public:
SchedulerModeLocker(bool alreadyLocked = false, bool lockIfNotLocked = true)
:
AutoLocker<int, SchedulerModeLocking>(NULL, alreadyLocked,
lockIfNotLocked)
{
}
};
class InterruptsSchedulerModeLocking {
public:
bool Lock(int* lockable)
{
*lockable = disable_interrupts();
CPUEntry::GetCPU(smp_get_current_cpu())->EnterScheduler();
return true;
}
void Unlock(int* lockable)
{
CPUEntry::GetCPU(smp_get_current_cpu())->ExitScheduler();
restore_interrupts(*lockable);
}
};
class InterruptsSchedulerModeLocker :
public AutoLocker<int, InterruptsSchedulerModeLocking> {
public:
InterruptsSchedulerModeLocker(bool alreadyLocked = false,
bool lockIfNotLocked = true)
:
AutoLocker<int, InterruptsSchedulerModeLocking>(&fState, alreadyLocked,
lockIfNotLocked)
{
}
private:
int fState;
};
class InterruptsBigSchedulerLocking {
public:
bool Lock(int* lockable)
{
*lockable = disable_interrupts();
for (int32 i = 0; i < smp_get_num_cpus(); i++)
CPUEntry::GetCPU(i)->LockScheduler();
return true;
}
void Unlock(int* lockable)
{
for (int32 i = 0; i < smp_get_num_cpus(); i++)
CPUEntry::GetCPU(i)->UnlockScheduler();
restore_interrupts(*lockable);
}
};
class InterruptsBigSchedulerLocker :
public AutoLocker<int, InterruptsBigSchedulerLocking> {
public:
InterruptsBigSchedulerLocker()
:
AutoLocker<int, InterruptsBigSchedulerLocking>(&fState, false, true)
{
}
private:
int fState;
};
class ThreadEnqueuer : public ThreadProcessing {
public:
void operator()(ThreadData* thread);
@ -624,6 +538,8 @@ scheduler_on_thread_create(Thread* thread, bool idleThread)
void
scheduler_on_thread_init(Thread* thread)
{
ASSERT(thread->scheduler_data != NULL);
if (thread_is_idle_thread(thread)) {
static int32 sIdleThreadsID;
int32 cpuID = atomic_add(&sIdleThreadsID, 1);

View File

@ -392,14 +392,6 @@ CoreEntry::Remove(ThreadData* thread)
}
inline ThreadData*
CoreEntry::PeekThread() const
{
SCHEDULER_ENTER_FUNCTION();
return fRunQueue.PeekMaximum();
}
void
CoreEntry::UpdateLoad(int32 delta)
{
@ -454,26 +446,6 @@ CoreEntry::UpdateLoad(int32 delta)
}
inline void
CoreEntry::CPUGoesIdle(CPUEntry* /* cpu */)
{
ASSERT(fCPUIdleCount < fCPUCount);
if (++fCPUIdleCount == fCPUCount)
fPackage->CoreGoesIdle(this);
}
inline void
CoreEntry::CPUWakesUp(CPUEntry* /* cpu */)
{
ASSERT(fCPUIdleCount > 0);
if (fCPUIdleCount-- == fCPUCount)
fPackage->CoreWakesUp(this);
}
void
CoreEntry::AddCPU(CPUEntry* cpu)
{
@ -599,48 +571,6 @@ PackageEntry::Init(int32 id)
}
inline void
PackageEntry::CoreGoesIdle(CoreEntry* core)
{
SCHEDULER_ENTER_FUNCTION();
WriteSpinLocker _(fCoreLock);
ASSERT(fIdleCoreCount >= 0);
ASSERT(fIdleCoreCount < fCoreCount);
fIdleCoreCount++;
fIdleCores.Add(core);
if (fIdleCoreCount == fCoreCount) {
// package goes idle
WriteSpinLocker _(gIdlePackageLock);
gIdlePackageList.Add(this);
}
}
inline void
PackageEntry::CoreWakesUp(CoreEntry* core)
{
SCHEDULER_ENTER_FUNCTION();
WriteSpinLocker _(fCoreLock);
ASSERT(fIdleCoreCount > 0);
ASSERT(fIdleCoreCount <= fCoreCount);
fIdleCoreCount--;
fIdleCores.Remove(core);
if (fIdleCoreCount + 1 == fCoreCount) {
// package wakes up
WriteSpinLocker _(gIdlePackageLock);
gIdlePackageList.Remove(this);
}
}
void
PackageEntry::AddIdleCore(CoreEntry* core)
{

View File

@ -85,7 +85,7 @@ public:
static inline CPUEntry* GetCPU(int32 cpu);
private:
inline void _RequestPerformanceLevel(
void _RequestPerformanceLevel(
ThreadData* threadData);
int32 fCPUNumber;
@ -112,21 +112,6 @@ public:
void Dump();
};
class CPURunQueueLocking {
public:
inline bool Lock(CPUEntry* cpu)
{
cpu->LockRunQueue();
return true;
}
inline void Unlock(CPUEntry* cpu)
{
cpu->UnlockRunQueue();
}
};
typedef AutoLocker<CPUEntry, CPURunQueueLocking> CPURunQueueLocker;
class CoreEntry : public MinMaxHeapLinkImpl<CoreEntry, int32>,
public DoublyLinkedListLinkImpl<CoreEntry> {
@ -205,37 +190,6 @@ private:
friend class DebugDumper;
} CACHE_LINE_ALIGN;
class CoreRunQueueLocking {
public:
inline bool Lock(CoreEntry* core)
{
core->LockRunQueue();
return true;
}
inline void Unlock(CoreEntry* core)
{
core->UnlockRunQueue();
}
};
typedef AutoLocker<CoreEntry, CoreRunQueueLocking> CoreRunQueueLocker;
class CoreCPUHeapLocking {
public:
inline bool Lock(CoreEntry* core)
{
core->LockCPUHeap();
return true;
}
inline void Unlock(CoreEntry* core)
{
core->UnlockCPUHeap();
}
};
typedef AutoLocker<CoreEntry, CoreCPUHeapLocking> CoreCPUHeapLocker;
class CoreLoadHeap : public MinMaxHeap<CoreEntry, int32> {
public:
@ -402,6 +356,14 @@ CoreEntry::IncreaseActiveTime(bigtime_t activeTime)
}
inline ThreadData*
CoreEntry::PeekThread() const
{
SCHEDULER_ENTER_FUNCTION();
return fRunQueue.PeekMaximum();
}
inline bigtime_t
CoreEntry::GetActiveTime() const
{
@ -435,6 +397,73 @@ CoreEntry::StarvationCounter() const
}
/* PackageEntry::CoreGoesIdle and PackageEntry::CoreWakesUp have to be defined
before CoreEntry::CPUGoesIdle and CoreEntry::CPUWakesUp. If they weren't
GCC2 wouldn't inline them as, apparently, it doesn't do enough optimization
passes.
*/
inline void
PackageEntry::CoreGoesIdle(CoreEntry* core)
{
SCHEDULER_ENTER_FUNCTION();
WriteSpinLocker _(fCoreLock);
ASSERT(fIdleCoreCount >= 0);
ASSERT(fIdleCoreCount < fCoreCount);
fIdleCoreCount++;
fIdleCores.Add(core);
if (fIdleCoreCount == fCoreCount) {
// package goes idle
WriteSpinLocker _(gIdlePackageLock);
gIdlePackageList.Add(this);
}
}
inline void
PackageEntry::CoreWakesUp(CoreEntry* core)
{
SCHEDULER_ENTER_FUNCTION();
WriteSpinLocker _(fCoreLock);
ASSERT(fIdleCoreCount > 0);
ASSERT(fIdleCoreCount <= fCoreCount);
fIdleCoreCount--;
fIdleCores.Remove(core);
if (fIdleCoreCount + 1 == fCoreCount) {
// package wakes up
WriteSpinLocker _(gIdlePackageLock);
gIdlePackageList.Remove(this);
}
}
inline void
CoreEntry::CPUGoesIdle(CPUEntry* /* cpu */)
{
ASSERT(fCPUIdleCount < fCPUCount);
if (++fCPUIdleCount == fCPUCount)
fPackage->CoreGoesIdle(this);
}
inline void
CoreEntry::CPUWakesUp(CPUEntry* /* cpu */)
{
ASSERT(fCPUIdleCount > 0);
if (fCPUIdleCount-- == fCPUCount)
fPackage->CoreWakesUp(this);
}
/* static */ inline CoreEntry*
CoreEntry::GetCore(int32 cpu)
{

View File

@ -0,0 +1,158 @@
/*
* Copyright 2014, Paweł Dziepak, pdziepak@quarnos.org.
* Distributed under the terms of the MIT License.
*/
#ifndef KERNEL_SCHEDULER_LOCKING_H
#define KERNEL_SCHEDULER_LOCKING_H
#include <util/AutoLock.h>
#include "scheduler_cpu.h"
namespace Scheduler {
class CPURunQueueLocking {
public:
inline bool Lock(CPUEntry* cpu)
{
cpu->LockRunQueue();
return true;
}
inline void Unlock(CPUEntry* cpu)
{
cpu->UnlockRunQueue();
}
};
typedef AutoLocker<CPUEntry, CPURunQueueLocking> CPURunQueueLocker;
class CoreRunQueueLocking {
public:
inline bool Lock(CoreEntry* core)
{
core->LockRunQueue();
return true;
}
inline void Unlock(CoreEntry* core)
{
core->UnlockRunQueue();
}
};
typedef AutoLocker<CoreEntry, CoreRunQueueLocking> CoreRunQueueLocker;
class CoreCPUHeapLocking {
public:
inline bool Lock(CoreEntry* core)
{
core->LockCPUHeap();
return true;
}
inline void Unlock(CoreEntry* core)
{
core->UnlockCPUHeap();
}
};
typedef AutoLocker<CoreEntry, CoreCPUHeapLocking> CoreCPUHeapLocker;
class SchedulerModeLocking {
public:
bool Lock(int* /* lockable */)
{
CPUEntry::GetCPU(smp_get_current_cpu())->EnterScheduler();
return true;
}
void Unlock(int* /* lockable */)
{
CPUEntry::GetCPU(smp_get_current_cpu())->ExitScheduler();
}
};
class SchedulerModeLocker :
public AutoLocker<int, SchedulerModeLocking> {
public:
SchedulerModeLocker(bool alreadyLocked = false, bool lockIfNotLocked = true)
:
AutoLocker<int, SchedulerModeLocking>(NULL, alreadyLocked,
lockIfNotLocked)
{
}
};
class InterruptsSchedulerModeLocking {
public:
bool Lock(int* lockable)
{
*lockable = disable_interrupts();
CPUEntry::GetCPU(smp_get_current_cpu())->EnterScheduler();
return true;
}
void Unlock(int* lockable)
{
CPUEntry::GetCPU(smp_get_current_cpu())->ExitScheduler();
restore_interrupts(*lockable);
}
};
class InterruptsSchedulerModeLocker :
public AutoLocker<int, InterruptsSchedulerModeLocking> {
public:
InterruptsSchedulerModeLocker(bool alreadyLocked = false,
bool lockIfNotLocked = true)
:
AutoLocker<int, InterruptsSchedulerModeLocking>(&fState, alreadyLocked,
lockIfNotLocked)
{
}
private:
int fState;
};
class InterruptsBigSchedulerLocking {
public:
bool Lock(int* lockable)
{
*lockable = disable_interrupts();
for (int32 i = 0; i < smp_get_num_cpus(); i++)
CPUEntry::GetCPU(i)->LockScheduler();
return true;
}
void Unlock(int* lockable)
{
for (int32 i = 0; i < smp_get_num_cpus(); i++)
CPUEntry::GetCPU(i)->UnlockScheduler();
restore_interrupts(*lockable);
}
};
class InterruptsBigSchedulerLocker :
public AutoLocker<int, InterruptsBigSchedulerLocking> {
public:
InterruptsBigSchedulerLocker()
:
AutoLocker<int, InterruptsBigSchedulerLocking>(&fState, false, true)
{
}
private:
int fState;
};
} // namespace Scheduler
#endif // KERNEL_SCHEDULER_LOCKING_H

View File

@ -12,6 +12,58 @@ using namespace Scheduler;
static bigtime_t sQuantumLengths[THREAD_MAX_SET_PRIORITY + 1];
inline CoreEntry*
ThreadData::_ChooseCore() const
{
SCHEDULER_ENTER_FUNCTION();
ASSERT(!gSingleCore);
return gCurrentMode->choose_core(this);
}
inline CPUEntry*
ThreadData::_ChooseCPU(CoreEntry* core, bool& rescheduleNeeded) const
{
SCHEDULER_ENTER_FUNCTION();
int32 threadPriority = GetEffectivePriority();
if (fThread->previous_cpu != NULL) {
CPUEntry* previousCPU = &gCPUEntries[fThread->previous_cpu->cpu_num];
if (previousCPU->Core() == core && !fThread->previous_cpu->disabled) {
CoreCPUHeapLocker _(core);
if (CPUPriorityHeap::GetKey(previousCPU) < threadPriority) {
previousCPU->UpdatePriority(threadPriority);
rescheduleNeeded = true;
return previousCPU;
}
}
}
CoreCPUHeapLocker _(core);
CPUEntry* cpu = core->CPUHeap()->PeekRoot();
ASSERT(cpu != NULL);
if (CPUPriorityHeap::GetKey(cpu) < threadPriority) {
cpu->UpdatePriority(threadPriority);
rescheduleNeeded = true;
} else
rescheduleNeeded = false;
return cpu;
}
inline bigtime_t
ThreadData::_GetBaseQuantum() const
{
SCHEDULER_ENTER_FUNCTION();
return sQuantumLengths[GetEffectivePriority()];
}
ThreadData::ThreadData(Thread* thread)
:
fThread(thread)
@ -190,6 +242,21 @@ ThreadData::ComputeQuantumLengths()
}
inline int32
ThreadData::_GetPenalty() const
{
SCHEDULER_ENTER_FUNCTION();
int32 penalty = fPriorityPenalty;
const int kMinimalPriority = _GetMinimalPriority();
if (kMinimalPriority > 0)
penalty += fAdditionalPenalty % kMinimalPriority;
return penalty;
}
void
ThreadData::_ComputeEffectivePriority() const
{
@ -209,58 +276,6 @@ ThreadData::_ComputeEffectivePriority() const
}
inline CoreEntry*
ThreadData::_ChooseCore() const
{
SCHEDULER_ENTER_FUNCTION();
ASSERT(!gSingleCore);
return gCurrentMode->choose_core(this);
}
inline CPUEntry*
ThreadData::_ChooseCPU(CoreEntry* core, bool& rescheduleNeeded) const
{
SCHEDULER_ENTER_FUNCTION();
int32 threadPriority = GetEffectivePriority();
if (fThread->previous_cpu != NULL) {
CPUEntry* previousCPU = &gCPUEntries[fThread->previous_cpu->cpu_num];
if (previousCPU->Core() == core && !fThread->previous_cpu->disabled) {
CoreCPUHeapLocker _(core);
if (CPUPriorityHeap::GetKey(previousCPU) < threadPriority) {
previousCPU->UpdatePriority(threadPriority);
rescheduleNeeded = true;
return previousCPU;
}
}
}
CoreCPUHeapLocker _(core);
CPUEntry* cpu = core->CPUHeap()->PeekRoot();
ASSERT(cpu != NULL);
if (CPUPriorityHeap::GetKey(cpu) < threadPriority) {
cpu->UpdatePriority(threadPriority);
rescheduleNeeded = true;
} else
rescheduleNeeded = false;
return cpu;
}
inline bigtime_t
ThreadData::_GetBaseQuantum() const
{
SCHEDULER_ENTER_FUNCTION();
return sQuantumLengths[GetEffectivePriority()];
}
/* static */ bigtime_t
ThreadData::_ScaleQuantum(bigtime_t maxQuantum, bigtime_t minQuantum,
int32 maxPriority, int32 minPriority, int32 priority)

View File

@ -11,6 +11,7 @@
#include "scheduler_common.h"
#include "scheduler_cpu.h"
#include "scheduler_locking.h"
#include "scheduler_profiler.h"
@ -19,6 +20,17 @@ namespace Scheduler {
struct ThreadData : public DoublyLinkedListLinkImpl<ThreadData>,
RunQueueLinkImpl<ThreadData> {
private:
inline void _InitBase();
inline int32 _GetMinimalPriority() const;
inline CoreEntry* _ChooseCore() const;
inline CPUEntry* _ChooseCPU(CoreEntry* core,
bool& rescheduleNeeded) const;
inline bigtime_t _GetBaseQuantum() const;
public:
ThreadData(Thread* thread);
@ -72,17 +84,12 @@ public:
inline void UnassignCore() { fCore = NULL; }
static void ComputeQuantumLengths();
private:
inline int32 _GetPenalty() const;
inline int32 _GetMinimalPriority() const;
void _ComputeEffectivePriority() const;
inline CoreEntry* _ChooseCore() const;
inline CPUEntry* _ChooseCPU(CoreEntry* core,
bool& rescheduleNeeded) const;
inline bigtime_t _GetBaseQuantum() const;
static bigtime_t _ScaleQuantum(bigtime_t maxQuantum,
bigtime_t minQuantum, int32 maxPriority,
int32 minPriority, int32 priority);
@ -121,6 +128,21 @@ public:
};
inline int32
ThreadData::_GetMinimalPriority() const
{
SCHEDULER_ENTER_FUNCTION();
const int32 kDivisor = 5;
const int32 kMaximalPriority = 25;
const int32 kMinimalPriority = B_LOWEST_ACTIVE_PRIORITY;
int32 priority = fThread->priority / kDivisor;
return std::max(std::min(priority, kMaximalPriority), kMinimalPriority);
}
inline bool
ThreadData::HasCacheExpired() const
{
@ -259,6 +281,7 @@ ThreadData::Enqueue()
SCHEDULER_ENTER_FUNCTION();
fThread->state = B_THREAD_READY;
ComputeLoad();
fWentSleepCount = 0;
@ -350,36 +373,6 @@ ThreadData::StartQuantum()
}
inline int32
ThreadData::_GetPenalty() const
{
SCHEDULER_ENTER_FUNCTION();
int32 penalty = fPriorityPenalty;
const int kMinimalPriority = _GetMinimalPriority();
if (kMinimalPriority > 0)
penalty += fAdditionalPenalty % kMinimalPriority;
return penalty;
}
inline int32
ThreadData::_GetMinimalPriority() const
{
SCHEDULER_ENTER_FUNCTION();
const int32 kDivisor = 5;
const int32 kMaximalPriority = 25;
const int32 kMinimalPriority = B_LOWEST_ACTIVE_PRIORITY;
int32 priority = fThread->priority / kDivisor;
return std::max(std::min(priority, kMaximalPriority), kMinimalPriority);
}
} // namespace Scheduler