scheduler: Encapsulate PackageEntry fields
Apart from the refactoring this commit takes the opportunity and removes unnecessary read locks when choosing a package and a core from idle lists. The data structures are accessed in a thread safe way and it does not really matter whether the obtained data becomes outdated just when we release the lock or during our search for the appropriate package/core.
This commit is contained in:
parent
cf21c40b50
commit
60e198f2cb
@ -19,7 +19,7 @@ const bigtime_t kCacheExpire = 100000;
|
||||
|
||||
|
||||
static void
|
||||
switch_to_mode(void)
|
||||
switch_to_mode()
|
||||
{
|
||||
}
|
||||
|
||||
@ -35,7 +35,7 @@ has_cache_expired(const ThreadData* threadData)
|
||||
{
|
||||
ASSERT(!gSingleCore);
|
||||
|
||||
CoreEntry* core = threadData->GetCore();
|
||||
CoreEntry* core = threadData->Core();
|
||||
|
||||
bigtime_t activeTime;
|
||||
uint32 count;
|
||||
@ -48,39 +48,19 @@ has_cache_expired(const ThreadData* threadData)
|
||||
}
|
||||
|
||||
|
||||
static inline PackageEntry*
|
||||
get_most_idle_package(void)
|
||||
{
|
||||
PackageEntry* current = &gPackageEntries[0];
|
||||
for (int32 i = 1; i < gPackageCount; i++) {
|
||||
if (gPackageEntries[i].fIdleCoreCount > current->fIdleCoreCount)
|
||||
current = &gPackageEntries[i];
|
||||
}
|
||||
|
||||
if (current->fIdleCoreCount == 0)
|
||||
return NULL;
|
||||
|
||||
return current;
|
||||
}
|
||||
|
||||
|
||||
static CoreEntry*
|
||||
choose_core(const ThreadData* /* threadData */)
|
||||
{
|
||||
ReadSpinLocker locker(gIdlePackageLock);
|
||||
// wake new package
|
||||
PackageEntry* package = gIdlePackageList.Last();
|
||||
if (package == NULL) {
|
||||
// wake new core
|
||||
package = get_most_idle_package();
|
||||
package = PackageEntry::GetMostIdlePackage();
|
||||
}
|
||||
locker.Unlock();
|
||||
|
||||
CoreEntry* core = NULL;
|
||||
if (package != NULL) {
|
||||
ReadSpinLocker _(package->fCoreLock);
|
||||
core = package->fIdleCores.Last();
|
||||
}
|
||||
if (package != NULL)
|
||||
core = package->GetIdleCore();
|
||||
|
||||
if (core == NULL) {
|
||||
ReadSpinLocker coreLocker(gCoreHeapsLock);
|
||||
@ -98,7 +78,7 @@ choose_core(const ThreadData* /* threadData */)
|
||||
static bool
|
||||
should_rebalance(const ThreadData* threadData)
|
||||
{
|
||||
int32 coreLoad = threadData->GetCore()->GetLoad();
|
||||
int32 coreLoad = threadData->Core()->GetLoad();
|
||||
|
||||
// If the thread produces more than 50% of the load, leave it here. In
|
||||
// such situation it is better to move other threads away.
|
||||
|
@ -22,7 +22,7 @@ static CoreEntry* sSmallTaskCore;
|
||||
|
||||
|
||||
static void
|
||||
switch_to_mode(void)
|
||||
switch_to_mode()
|
||||
{
|
||||
sSmallTaskCore = NULL;
|
||||
}
|
||||
@ -46,7 +46,7 @@ has_cache_expired(const ThreadData* threadData)
|
||||
|
||||
|
||||
static CoreEntry*
|
||||
choose_small_task_core(void)
|
||||
choose_small_task_core()
|
||||
{
|
||||
ReadSpinLocker locker(gCoreHeapsLock);
|
||||
CoreEntry* core = gCoreLoadHeap.PeekMaximum();
|
||||
@ -64,27 +64,15 @@ choose_small_task_core(void)
|
||||
|
||||
|
||||
static CoreEntry*
|
||||
choose_idle_core(void)
|
||||
choose_idle_core()
|
||||
{
|
||||
PackageEntry* package = NULL;
|
||||
PackageEntry* package = PackageEntry::GetLeastIdlePackage();
|
||||
|
||||
for (int32 i = 0; i < gPackageCount; i++) {
|
||||
PackageEntry* current = &gPackageEntries[i];
|
||||
if (current->fIdleCoreCount != 0 && (package == NULL
|
||||
|| current->fIdleCoreCount < package->fIdleCoreCount)) {
|
||||
package = current;
|
||||
}
|
||||
}
|
||||
|
||||
if (package == NULL) {
|
||||
ReadSpinLocker _(gIdlePackageLock);
|
||||
if (package == NULL)
|
||||
package = gIdlePackageList.Last();
|
||||
}
|
||||
|
||||
if (package != NULL) {
|
||||
ReadSpinLocker _(package->fCoreLock);
|
||||
return package->fIdleCores.Last();
|
||||
}
|
||||
if (package != NULL)
|
||||
return package->GetIdleCore();
|
||||
|
||||
return NULL;
|
||||
}
|
||||
@ -125,7 +113,7 @@ should_rebalance(const ThreadData* threadData)
|
||||
{
|
||||
ASSERT(!gSingleCore);
|
||||
|
||||
CoreEntry* core = threadData->GetCore();
|
||||
CoreEntry* core = threadData->Core();
|
||||
|
||||
int32 coreLoad = core->GetLoad();
|
||||
if (coreLoad > kHighLoad) {
|
||||
@ -161,7 +149,7 @@ should_rebalance(const ThreadData* threadData)
|
||||
|
||||
|
||||
static inline void
|
||||
pack_irqs(void)
|
||||
pack_irqs()
|
||||
{
|
||||
CoreEntry* smallTaskCore = atomic_pointer_get(&sSmallTaskCore);
|
||||
if (smallTaskCore == NULL)
|
||||
|
@ -61,7 +61,7 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
class BigSchedulerLocking {
|
||||
class InterruptsBigSchedulerLocking {
|
||||
public:
|
||||
bool Lock(int* lockable)
|
||||
{
|
||||
@ -80,11 +80,11 @@ public:
|
||||
};
|
||||
|
||||
class InterruptsBigSchedulerLocker :
|
||||
public AutoLocker<int, BigSchedulerLocking> {
|
||||
public AutoLocker<int, InterruptsBigSchedulerLocking> {
|
||||
public:
|
||||
InterruptsBigSchedulerLocker()
|
||||
:
|
||||
AutoLocker<int, BigSchedulerLocking>(&fState, false, true)
|
||||
AutoLocker<int, InterruptsBigSchedulerLocking>(&fState, false, true)
|
||||
{
|
||||
}
|
||||
|
||||
@ -157,10 +157,10 @@ enqueue(Thread* thread, bool newOne)
|
||||
targetCPU = &gCPUEntries[thread->previous_cpu->cpu_num];
|
||||
} else if (gSingleCore)
|
||||
targetCore = &gCoreEntries[0];
|
||||
else if (threadData->GetCore() != NULL
|
||||
else if (threadData->Core() != NULL
|
||||
&& (!newOne || !threadData->HasCacheExpired())
|
||||
&& !threadData->ShouldRebalance()) {
|
||||
targetCore = threadData->GetCore();
|
||||
targetCore = threadData->Core();
|
||||
}
|
||||
|
||||
bool rescheduleNeeded = threadData->ChooseCoreAndCPU(targetCore, targetCPU);
|
||||
@ -194,7 +194,12 @@ enqueue(Thread* thread, bool newOne)
|
||||
void
|
||||
scheduler_enqueue_in_run_queue(Thread *thread)
|
||||
{
|
||||
InterruptsSchedulerModeLocker _;
|
||||
#if KDEBUG
|
||||
if (are_interrupts_enabled())
|
||||
panic("scheduler_enqueue_in_run_queue: called with interrupts enabled");
|
||||
#endif
|
||||
|
||||
SchedulerModeLocker _;
|
||||
|
||||
TRACE("enqueueing new thread %ld with static priority %ld\n", thread->id,
|
||||
thread->priority);
|
||||
@ -213,6 +218,11 @@ scheduler_enqueue_in_run_queue(Thread *thread)
|
||||
int32
|
||||
scheduler_set_thread_priority(Thread *thread, int32 priority)
|
||||
{
|
||||
#if KDEBUG
|
||||
if (!are_interrupts_enabled())
|
||||
panic("scheduler_set_thread_priority: called with interrupts disabled");
|
||||
#endif
|
||||
|
||||
InterruptsSpinLocker _(thread->scheduler_lock);
|
||||
SchedulerModeLocker modeLocker;
|
||||
|
||||
@ -230,12 +240,12 @@ scheduler_set_thread_priority(Thread *thread, int32 priority)
|
||||
|
||||
if (thread->state != B_THREAD_READY) {
|
||||
if (thread->state == B_THREAD_RUNNING) {
|
||||
ASSERT(threadData->GetCore() != NULL);
|
||||
ASSERT(threadData->Core() != NULL);
|
||||
|
||||
ASSERT(thread->cpu != NULL);
|
||||
CPUEntry* cpu = &gCPUEntries[thread->cpu->cpu_num];
|
||||
|
||||
SpinLocker coreLocker(threadData->GetCore()->fCPULock);
|
||||
SpinLocker coreLocker(threadData->Core()->fCPULock);
|
||||
cpu->UpdatePriority(priority);
|
||||
}
|
||||
|
||||
@ -497,7 +507,7 @@ reschedule(int32 nextState)
|
||||
NotifySchedulerListeners(&SchedulerListener::ThreadScheduled,
|
||||
oldThread, nextThread);
|
||||
|
||||
ASSERT(nextThreadData->GetCore() == core);
|
||||
ASSERT(nextThreadData->Core() == core);
|
||||
nextThread->state = B_THREAD_RUNNING;
|
||||
|
||||
// update CPU heap
|
||||
@ -540,6 +550,11 @@ reschedule(int32 nextState)
|
||||
void
|
||||
scheduler_reschedule(int32 nextState)
|
||||
{
|
||||
#if KDEBUG
|
||||
if (are_interrupts_enabled())
|
||||
panic("scheduler_reschedule: called with interrupts enabled");
|
||||
#endif
|
||||
|
||||
if (!sSchedulerEnabled) {
|
||||
Thread* thread = thread_get_current_thread();
|
||||
if (thread != NULL && nextState != B_THREAD_READY)
|
||||
@ -588,7 +603,7 @@ scheduler_on_thread_destroy(Thread* thread)
|
||||
thread. Interrupts must be disabled and will be disabled when returning.
|
||||
*/
|
||||
void
|
||||
scheduler_start(void)
|
||||
scheduler_start()
|
||||
{
|
||||
InterruptsSpinLocker _(thread_get_current_thread()->scheduler_lock);
|
||||
|
||||
@ -621,7 +636,7 @@ unassign_thread(Thread* thread, void* data)
|
||||
{
|
||||
CoreEntry* core = static_cast<CoreEntry*>(data);
|
||||
|
||||
if (thread->scheduler_data->GetCore() == core
|
||||
if (thread->scheduler_data->Core() == core
|
||||
&& thread->pinned_to_cpu == 0) {
|
||||
thread->scheduler_data->UnassignCore();
|
||||
}
|
||||
@ -631,6 +646,11 @@ unassign_thread(Thread* thread, void* data)
|
||||
void
|
||||
scheduler_set_cpu_enabled(int32 cpuID, bool enabled)
|
||||
{
|
||||
#if KDEBUG
|
||||
if (are_interrupts_enabled())
|
||||
panic("scheduler_set_cpu_enabled: called with interrupts enabled");
|
||||
#endif
|
||||
|
||||
dprintf("scheduler: %s CPU %" B_PRId32 "\n",
|
||||
enabled ? "enabling" : "disabling", cpuID);
|
||||
|
||||
@ -667,12 +687,7 @@ scheduler_set_cpu_enabled(int32 cpuID, bool enabled)
|
||||
gCoreLoadHeap.RemoveMinimum();
|
||||
}
|
||||
|
||||
package->fIdleCores.Remove(core);
|
||||
package->fIdleCoreCount--;
|
||||
package->fCoreCount--;
|
||||
|
||||
if (package->fCoreCount == 0)
|
||||
gIdlePackageList.Remove(package);
|
||||
package->RemoveIdleCore(core);
|
||||
|
||||
// get rid of threads
|
||||
thread_map(unassign_thread, core);
|
||||
@ -689,7 +704,7 @@ scheduler_set_cpu_enabled(int32 cpuID, bool enabled)
|
||||
threadData->fWentSleepCount = -1;
|
||||
}
|
||||
|
||||
ASSERT(threadData->GetCore() == NULL);
|
||||
ASSERT(threadData->Core() == NULL);
|
||||
enqueue(threadData->GetThread(), false);
|
||||
}
|
||||
} else if (oldCPUCount == 0) {
|
||||
@ -701,12 +716,7 @@ scheduler_set_cpu_enabled(int32 cpuID, bool enabled)
|
||||
core->fHighLoad = false;
|
||||
gCoreLoadHeap.Insert(core, 0);
|
||||
|
||||
package->fCoreCount++;
|
||||
package->fIdleCoreCount++;
|
||||
package->fIdleCores.Add(core);
|
||||
|
||||
if (package->fCoreCount == 1)
|
||||
gIdlePackageList.Add(package);
|
||||
package->AddIdleCore(core);
|
||||
}
|
||||
|
||||
if (enabled) {
|
||||
@ -846,11 +856,8 @@ init()
|
||||
|
||||
new(&gIdlePackageList) IdlePackageList;
|
||||
|
||||
for (int32 i = 0; i < packageCount; i++) {
|
||||
gPackageEntries[i].fPackageID = i;
|
||||
gPackageEntries[i].fCoreCount = coreCount / packageCount;
|
||||
gIdlePackageList.Insert(&gPackageEntries[i]);
|
||||
}
|
||||
for (int32 i = 0; i < packageCount; i++)
|
||||
gPackageEntries[i].Init(i);
|
||||
|
||||
for (int32 i = 0; i < coreCount; i++) {
|
||||
gCoreEntries[i].fCoreID = i;
|
||||
@ -869,10 +876,8 @@ init()
|
||||
gCPUEntries[i].fCore = core;
|
||||
core->fPackage = package;
|
||||
|
||||
if (core->fCPUHeap.PeekMaximum() == NULL) {
|
||||
package->fIdleCoreCount++;
|
||||
package->fIdleCores.Insert(core);
|
||||
}
|
||||
if (core->fCPUHeap.PeekMaximum() == NULL)
|
||||
package->AddIdleCore(core);
|
||||
|
||||
result = core->fCPUHeap.Insert(&gCPUEntries[i], B_IDLE_PRIORITY);
|
||||
if (result != B_OK)
|
||||
@ -888,7 +893,7 @@ init()
|
||||
|
||||
|
||||
void
|
||||
scheduler_init(void)
|
||||
scheduler_init()
|
||||
{
|
||||
int32 cpuCount = smp_get_num_cpus();
|
||||
dprintf("scheduler_init: found %" B_PRId32 " logical cpu%s and %" B_PRId32
|
||||
@ -914,7 +919,7 @@ scheduler_init(void)
|
||||
|
||||
|
||||
void
|
||||
scheduler_enable_scheduling(void)
|
||||
scheduler_enable_scheduling()
|
||||
{
|
||||
sSchedulerEnabled = true;
|
||||
}
|
||||
@ -972,7 +977,7 @@ _user_estimate_max_scheduling_latency(thread_id id)
|
||||
BReference<Thread> threadReference(thread, true);
|
||||
|
||||
ThreadData* threadData = thread->scheduler_data;
|
||||
CoreEntry* core = threadData->GetCore();
|
||||
CoreEntry* core = threadData->Core();
|
||||
if (core == NULL)
|
||||
core = &gCoreEntries[get_random<int32>() % gCoreCount];
|
||||
|
||||
@ -1003,7 +1008,7 @@ _user_set_scheduler_mode(int32 mode)
|
||||
|
||||
|
||||
int32
|
||||
_user_get_scheduler_mode(void)
|
||||
_user_get_scheduler_mode()
|
||||
{
|
||||
return gCurrentModeID;
|
||||
}
|
||||
|
@ -45,7 +45,7 @@ const int kLoadDifference = kMaxLoad * 20 / 100;
|
||||
extern bool gSingleCore;
|
||||
|
||||
|
||||
void init_debug_commands(void);
|
||||
void init_debug_commands();
|
||||
|
||||
|
||||
} // namespace Scheduler
|
||||
|
@ -16,6 +16,13 @@
|
||||
using namespace Scheduler;
|
||||
|
||||
|
||||
class Scheduler::DebugDumper {
|
||||
public:
|
||||
static void DumpIdleCoresInPackage(PackageEntry* package);
|
||||
|
||||
};
|
||||
|
||||
|
||||
static CPUPriorityHeap sDebugCPUHeap;
|
||||
static CoreLoadHeap sDebugCoreHeap;
|
||||
|
||||
@ -67,37 +74,10 @@ CPUEntry::UpdatePriority(int32 priority)
|
||||
return;
|
||||
|
||||
PackageEntry* packageEntry = fCore->fPackage;
|
||||
if (maxPriority == B_IDLE_PRIORITY) {
|
||||
WriteSpinLocker _(packageEntry->fCoreLock);
|
||||
|
||||
// core goes idle
|
||||
ASSERT(packageEntry->fIdleCoreCount >= 0);
|
||||
ASSERT(packageEntry->fIdleCoreCount < packageEntry->fCoreCount);
|
||||
|
||||
packageEntry->fIdleCoreCount++;
|
||||
packageEntry->fIdleCores.Add(fCore);
|
||||
|
||||
if (packageEntry->fIdleCoreCount == packageEntry->fCoreCount) {
|
||||
// package goes idle
|
||||
WriteSpinLocker _(gIdlePackageLock);
|
||||
gIdlePackageList.Add(packageEntry);
|
||||
}
|
||||
} else if (corePriority == B_IDLE_PRIORITY) {
|
||||
WriteSpinLocker _(packageEntry->fCoreLock);
|
||||
|
||||
// core wakes up
|
||||
ASSERT(packageEntry->fIdleCoreCount > 0);
|
||||
ASSERT(packageEntry->fIdleCoreCount <= packageEntry->fCoreCount);
|
||||
|
||||
packageEntry->fIdleCoreCount--;
|
||||
packageEntry->fIdleCores.Remove(fCore);
|
||||
|
||||
if (packageEntry->fIdleCoreCount + 1 == packageEntry->fCoreCount) {
|
||||
// package wakes up
|
||||
WriteSpinLocker _(gIdlePackageLock);
|
||||
gIdlePackageList.Remove(packageEntry);
|
||||
}
|
||||
}
|
||||
if (maxPriority == B_IDLE_PRIORITY)
|
||||
packageEntry->CoreGoesIdle(fCore);
|
||||
else if (corePriority == B_IDLE_PRIORITY)
|
||||
packageEntry->CoreWakesUp(fCore);
|
||||
}
|
||||
|
||||
|
||||
@ -371,6 +351,94 @@ PackageEntry::PackageEntry()
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
PackageEntry::Init(int32 id)
|
||||
{
|
||||
fPackageID = id;
|
||||
}
|
||||
|
||||
|
||||
inline void
|
||||
PackageEntry::CoreGoesIdle(CoreEntry* core)
|
||||
{
|
||||
WriteSpinLocker _(fCoreLock);
|
||||
|
||||
ASSERT(fIdleCoreCount >= 0);
|
||||
ASSERT(fIdleCoreCount < fCoreCount);
|
||||
|
||||
fIdleCoreCount++;
|
||||
fIdleCores.Add(core);
|
||||
|
||||
if (fIdleCoreCount == fCoreCount) {
|
||||
// package goes idle
|
||||
WriteSpinLocker _(gIdlePackageLock);
|
||||
gIdlePackageList.Add(this);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
inline void
|
||||
PackageEntry::CoreWakesUp(CoreEntry* core)
|
||||
{
|
||||
WriteSpinLocker _(fCoreLock);
|
||||
|
||||
ASSERT(fIdleCoreCount > 0);
|
||||
ASSERT(fIdleCoreCount <= fCoreCount);
|
||||
|
||||
fIdleCoreCount--;
|
||||
fIdleCores.Remove(core);
|
||||
|
||||
if (fIdleCoreCount + 1 == fCoreCount) {
|
||||
// package wakes up
|
||||
WriteSpinLocker _(gIdlePackageLock);
|
||||
gIdlePackageList.Remove(this);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
PackageEntry::AddIdleCore(CoreEntry* core)
|
||||
{
|
||||
fCoreCount++;
|
||||
fIdleCoreCount++;
|
||||
fIdleCores.Add(core);
|
||||
|
||||
if (fCoreCount == 1)
|
||||
gIdlePackageList.Add(this);
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
PackageEntry::RemoveIdleCore(CoreEntry* core)
|
||||
{
|
||||
fIdleCores.Remove(core);
|
||||
fIdleCoreCount--;
|
||||
fCoreCount--;
|
||||
|
||||
if (fCoreCount == 0)
|
||||
gIdlePackageList.Remove(this);
|
||||
}
|
||||
|
||||
|
||||
/* static */ void
|
||||
DebugDumper::DumpIdleCoresInPackage(PackageEntry* package)
|
||||
{
|
||||
kprintf("%-7" B_PRId32 " ", package->fPackageID);
|
||||
|
||||
DoublyLinkedList<CoreEntry>::ReverseIterator iterator
|
||||
= package->fIdleCores.GetReverseIterator();
|
||||
if (iterator.HasNext()) {
|
||||
while (iterator.HasNext()) {
|
||||
CoreEntry* coreEntry = iterator.Next();
|
||||
kprintf("%" B_PRId32 "%s", coreEntry->fCoreID,
|
||||
iterator.HasNext() ? ", " : "");
|
||||
}
|
||||
} else
|
||||
kprintf("-");
|
||||
kprintf("\n");
|
||||
}
|
||||
|
||||
|
||||
static int
|
||||
dump_run_queue(int argc, char **argv)
|
||||
{
|
||||
@ -429,22 +497,8 @@ dump_idle_cores(int argc, char** argv)
|
||||
if (idleIterator.HasNext()) {
|
||||
kprintf("package cores\n");
|
||||
|
||||
while (idleIterator.HasNext()) {
|
||||
PackageEntry* entry = idleIterator.Next();
|
||||
kprintf("%-7" B_PRId32 " ", entry->fPackageID);
|
||||
|
||||
DoublyLinkedList<CoreEntry>::ReverseIterator iterator
|
||||
= entry->fIdleCores.GetReverseIterator();
|
||||
if (iterator.HasNext()) {
|
||||
while (iterator.HasNext()) {
|
||||
CoreEntry* coreEntry = iterator.Next();
|
||||
kprintf("%" B_PRId32 "%s", coreEntry->fCoreID,
|
||||
iterator.HasNext() ? ", " : "");
|
||||
}
|
||||
} else
|
||||
kprintf("-");
|
||||
kprintf("\n");
|
||||
}
|
||||
while (idleIterator.HasNext())
|
||||
DebugDumper::DumpIdleCoresInPackage(idleIterator.Next());
|
||||
} else
|
||||
kprintf("No idle packages.\n");
|
||||
|
||||
@ -452,7 +506,7 @@ dump_idle_cores(int argc, char** argv)
|
||||
}
|
||||
|
||||
|
||||
void Scheduler::init_debug_commands(void)
|
||||
void Scheduler::init_debug_commands()
|
||||
{
|
||||
new(&sDebugCPUHeap) CPUPriorityHeap(smp_get_num_cpus());
|
||||
new(&sDebugCoreHeap) CoreLoadHeap(smp_get_num_cpus());
|
||||
|
@ -21,6 +21,8 @@
|
||||
namespace Scheduler {
|
||||
|
||||
|
||||
class DebugDumper;
|
||||
|
||||
struct ThreadData;
|
||||
|
||||
struct CPUEntry;
|
||||
@ -122,15 +124,32 @@ public:
|
||||
// packages can go to the deep state of sleep). The heap stores only packages
|
||||
// with at least one core active and one core idle. The packages with all cores
|
||||
// idle are stored in gPackageIdleList (in LIFO manner).
|
||||
struct PackageEntry : public DoublyLinkedListLinkImpl<PackageEntry> {
|
||||
class PackageEntry : public DoublyLinkedListLinkImpl<PackageEntry> {
|
||||
public:
|
||||
PackageEntry();
|
||||
|
||||
void Init(int32 id);
|
||||
|
||||
inline void CoreGoesIdle(CoreEntry* core);
|
||||
inline void CoreWakesUp(CoreEntry* core);
|
||||
|
||||
inline CoreEntry* GetIdleCore() const;
|
||||
|
||||
void AddIdleCore(CoreEntry* core);
|
||||
void RemoveIdleCore(CoreEntry* core);
|
||||
|
||||
static inline PackageEntry* GetMostIdlePackage();
|
||||
static inline PackageEntry* GetLeastIdlePackage();
|
||||
|
||||
private:
|
||||
int32 fPackageID;
|
||||
|
||||
DoublyLinkedList<CoreEntry> fIdleCores;
|
||||
int32 fIdleCoreCount;
|
||||
int32 fCoreCount;
|
||||
rw_spinlock fCoreLock;
|
||||
|
||||
friend class DebugDumper;
|
||||
} CACHE_LINE_ALIGN;
|
||||
typedef DoublyLinkedList<PackageEntry> IdlePackageList;
|
||||
|
||||
@ -163,6 +182,48 @@ CoreEntry::GetCore(int32 cpu)
|
||||
}
|
||||
|
||||
|
||||
inline CoreEntry*
|
||||
PackageEntry::GetIdleCore() const
|
||||
{
|
||||
return fIdleCores.Last();
|
||||
}
|
||||
|
||||
|
||||
/* static */ inline PackageEntry*
|
||||
PackageEntry::GetMostIdlePackage()
|
||||
{
|
||||
PackageEntry* current = &gPackageEntries[0];
|
||||
for (int32 i = 1; i < gPackageCount; i++) {
|
||||
if (gPackageEntries[i].fIdleCoreCount > current->fIdleCoreCount)
|
||||
current = &gPackageEntries[i];
|
||||
}
|
||||
|
||||
if (current->fIdleCoreCount == 0)
|
||||
return NULL;
|
||||
|
||||
return current;
|
||||
}
|
||||
|
||||
|
||||
/* static */ inline PackageEntry*
|
||||
PackageEntry::GetLeastIdlePackage()
|
||||
{
|
||||
PackageEntry* package = NULL;
|
||||
|
||||
for (int32 i = 0; i < gPackageCount; i++) {
|
||||
PackageEntry* current = &gPackageEntries[i];
|
||||
|
||||
int32 currentIdleCoreCount = current->fIdleCoreCount;
|
||||
if (currentIdleCoreCount != 0 && (package == NULL
|
||||
|| currentIdleCoreCount < package->fIdleCoreCount)) {
|
||||
package = current;
|
||||
}
|
||||
}
|
||||
|
||||
return package;
|
||||
}
|
||||
|
||||
|
||||
} // namespace Scheduler
|
||||
|
||||
|
||||
|
@ -21,7 +21,7 @@ struct scheduler_mode_operations {
|
||||
|
||||
bigtime_t maximum_latency;
|
||||
|
||||
void (*switch_to_mode)(void);
|
||||
void (*switch_to_mode)();
|
||||
void (*set_cpu_enabled)(int32 cpu, bool enabled);
|
||||
bool (*has_cache_expired)(
|
||||
const Scheduler::ThreadData* threadData);
|
||||
|
@ -53,7 +53,7 @@ public:
|
||||
inline Thread* GetThread() const { return fThread; }
|
||||
inline int32 GetLoad() const { return fLoad; }
|
||||
|
||||
inline CoreEntry* GetCore() const { return fCore; }
|
||||
inline CoreEntry* Core() const { return fCore; }
|
||||
inline void UnassignCore() { fCore = NULL; }
|
||||
|
||||
bigtime_t fStolenTime;
|
||||
|
Loading…
x
Reference in New Issue
Block a user