scheduler: Calculate correct load on SMT processors
This commit is contained in:
parent
65741c8b56
commit
1a7eb50254
@ -92,19 +92,23 @@ should_rebalance(Thread* thread)
|
||||
|
||||
CoreEntry* coreEntry = &gCoreEntries[schedulerThreadData->previous_core];
|
||||
|
||||
int32 coreLoad = get_core_load(coreEntry);
|
||||
|
||||
// If the thread produces more than 50% of the load, leave it here. In
|
||||
// such situation it is better to move other threads away.
|
||||
if (schedulerThreadData->load >= coreEntry->fLoad / 2)
|
||||
if (schedulerThreadData->load >= coreLoad / 2)
|
||||
return false;
|
||||
|
||||
// If there is high load on this core but this thread does not contribute
|
||||
// significantly consider giving it to someone less busy.
|
||||
if (coreEntry->fLoad > kHighLoad) {
|
||||
if (coreLoad > kHighLoad) {
|
||||
ReadSpinLocker coreLocker(gCoreHeapsLock);
|
||||
|
||||
CoreEntry* other = gCoreLoadHeap->PeekMinimum();
|
||||
if (other != NULL && coreEntry->fLoad - other->fLoad >= kLoadDifference)
|
||||
if (other != NULL && coreLoad - get_core_load(other)
|
||||
>= kLoadDifference) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
// No cpu bound threads - the situation is quite good. Make sure it
|
||||
@ -115,7 +119,7 @@ should_rebalance(Thread* thread)
|
||||
if (other == NULL)
|
||||
other = gCoreHighLoadHeap->PeekMinimum();
|
||||
ASSERT(other != NULL);
|
||||
return coreEntry->fLoad - other->fLoad >= kLoadDifference * 2;
|
||||
return coreLoad - get_core_load(other) >= kLoadDifference * 2;
|
||||
}
|
||||
|
||||
|
||||
@ -161,8 +165,10 @@ rebalance_irqs(bool idle)
|
||||
if (other->fCoreID == thisCore)
|
||||
return;
|
||||
|
||||
if (other->fLoad + kLoadDifference >= gCoreEntries[thisCore].fLoad)
|
||||
if (get_core_load(other) + kLoadDifference
|
||||
>= get_core_load(&gCoreEntries[thisCore])) {
|
||||
return;
|
||||
}
|
||||
|
||||
assign_io_interrupt_to_cpu(chosen->irq, newCPU);
|
||||
}
|
||||
|
@ -46,7 +46,7 @@ try_small_task_packing(Thread* thread)
|
||||
int32 core = sSmallTaskCore;
|
||||
return (core == -1 && gCoreLoadHeap->PeekMaximum() != NULL)
|
||||
|| (core != -1
|
||||
&& gCoreEntries[core].fLoad + thread->scheduler_data->load
|
||||
&& get_core_load(&gCoreEntries[core]) + thread->scheduler_data->load
|
||||
< kHighLoad);
|
||||
}
|
||||
|
||||
@ -139,21 +139,22 @@ should_rebalance(Thread* thread)
|
||||
int32 core = schedulerThreadData->previous_core;
|
||||
CoreEntry* coreEntry = &gCoreEntries[core];
|
||||
|
||||
if (coreEntry->fLoad > kHighLoad) {
|
||||
int32 coreLoad = get_core_load(coreEntry);
|
||||
if (coreLoad > kHighLoad) {
|
||||
ReadSpinLocker coreLocker(gCoreHeapsLock);
|
||||
if (sSmallTaskCore == core) {
|
||||
if (coreEntry->fLoad - schedulerThreadData->load < kHighLoad)
|
||||
if (coreLoad - schedulerThreadData->load < kHighLoad)
|
||||
return true;
|
||||
|
||||
choose_small_task_core();
|
||||
return coreEntry->fLoad > kVeryHighLoad;
|
||||
return coreLoad > kVeryHighLoad;
|
||||
}
|
||||
|
||||
CoreEntry* other = gCoreLoadHeap->PeekMaximum();
|
||||
if (other == NULL)
|
||||
other = gCoreHighLoadHeap->PeekMinimum();
|
||||
ASSERT(other != NULL);
|
||||
return coreEntry->fLoad - other->fLoad >= kLoadDifference / 2;
|
||||
return coreLoad - get_core_load(other) >= kLoadDifference / 2;
|
||||
}
|
||||
|
||||
int32 smallTaskCore = choose_small_task_core();
|
||||
@ -228,8 +229,10 @@ rebalance_irqs(bool idle)
|
||||
if (other->fCoreID == thisCore)
|
||||
return;
|
||||
|
||||
if (other->fLoad + kLoadDifference >= gCoreEntries[thisCore].fLoad)
|
||||
if (get_core_load(other) + kLoadDifference
|
||||
>= get_core_load(&gCoreEntries[thisCore])) {
|
||||
return;
|
||||
}
|
||||
|
||||
assign_io_interrupt_to_cpu(chosen->irq, newCPU);
|
||||
}
|
||||
|
@ -116,6 +116,7 @@ CPUEntry::CPUEntry()
|
||||
|
||||
CoreEntry::CoreEntry()
|
||||
:
|
||||
fCPUCount(0),
|
||||
fActiveTime(0),
|
||||
fLoad(0)
|
||||
{
|
||||
@ -280,13 +281,11 @@ dump_heap(CPUHeap* heap)
|
||||
static void
|
||||
dump_core_load_heap(CoreLoadHeap* heap)
|
||||
{
|
||||
int32 cpuPerCore = smp_get_num_cpus() / gRunQueueCount;
|
||||
|
||||
CoreEntry* entry = heap->PeekMinimum();
|
||||
while (entry) {
|
||||
int32 key = CoreLoadHeap::GetKey(entry);
|
||||
kprintf("%4" B_PRId32 " %3" B_PRId32 "%%\n", entry->fCoreID,
|
||||
entry->fLoad / cpuPerCore / 10);
|
||||
get_core_load(entry) / 10);
|
||||
|
||||
heap->RemoveMinimum();
|
||||
sDebugCoreHeap->Insert(entry, key);
|
||||
@ -402,8 +401,7 @@ update_load_heaps(int32 core)
|
||||
|
||||
WriteSpinLocker coreLocker(gCoreHeapsLock);
|
||||
|
||||
int32 cpuPerCore = smp_get_num_cpus() / gRunQueueCount;
|
||||
int32 newKey = entry->fLoad / cpuPerCore;
|
||||
int32 newKey = get_core_load(entry);
|
||||
int32 oldKey = CoreLoadHeap::GetKey(entry);
|
||||
|
||||
ASSERT(oldKey >= 0 && oldKey <= kMaxLoad);
|
||||
@ -1062,6 +1060,7 @@ update_cpu_performance(Thread* thread, int32 thisCore)
|
||||
{
|
||||
int32 load = max_c(thread->scheduler_data->load,
|
||||
gCoreEntries[thisCore].fLoad);
|
||||
load /= gCoreEntries[thisCore].fCPUCount;
|
||||
load = min_c(max_c(load, 0), kMaxLoad);
|
||||
|
||||
if (load < kTargetLoad) {
|
||||
@ -1433,6 +1432,7 @@ _scheduler_init()
|
||||
|
||||
for (int32 i = 0; i < coreCount; i++) {
|
||||
gCoreEntries[i].fCoreID = i;
|
||||
gCoreEntries[i].fCPUCount = cpuCount / coreCount;
|
||||
|
||||
result = gCoreLoadHeap->Insert(&gCoreEntries[i], 0);
|
||||
if (result != B_OK)
|
||||
|
@ -79,6 +79,8 @@ struct CoreEntry : public MinMaxHeapLinkImpl<CoreEntry, int32>,
|
||||
|
||||
int32 fCoreID;
|
||||
|
||||
int32 fCPUCount;
|
||||
|
||||
spinlock fCPULock;
|
||||
spinlock fQueueLock;
|
||||
|
||||
@ -174,6 +176,13 @@ struct scheduler_thread_data {
|
||||
};
|
||||
|
||||
|
||||
static inline int32
|
||||
get_core_load(struct Scheduler::CoreEntry* core)
|
||||
{
|
||||
return core->fLoad / core->fCPUCount;
|
||||
}
|
||||
|
||||
|
||||
/*! Switches the currently running thread.
|
||||
This is a service function for scheduler implementations.
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user