diff --git a/src/system/kernel/scheduler/low_latency.cpp b/src/system/kernel/scheduler/low_latency.cpp index a3eb28be4a..3e4365768b 100644 --- a/src/system/kernel/scheduler/low_latency.cpp +++ b/src/system/kernel/scheduler/low_latency.cpp @@ -30,9 +30,6 @@ has_cache_expired(Thread* thread) { ASSERT(!gSingleCore); - if (thread_is_idle_thread(thread)) - return false; - scheduler_thread_data* schedulerThreadData = thread->scheduler_data; ASSERT(schedulerThreadData->previous_core >= 0); diff --git a/src/system/kernel/scheduler/power_saving.cpp b/src/system/kernel/scheduler/power_saving.cpp index 75d754f178..aa960e49cd 100644 --- a/src/system/kernel/scheduler/power_saving.cpp +++ b/src/system/kernel/scheduler/power_saving.cpp @@ -36,9 +36,6 @@ has_cache_expired(Thread* thread) { ASSERT(!gSingleCore); - if (thread_is_idle_thread(thread)) - return false; - scheduler_thread_data* schedulerThreadData = thread->scheduler_data; ASSERT(schedulerThreadData->previous_core >= 0); @@ -46,19 +43,6 @@ has_cache_expired(Thread* thread) } -static bool -try_small_task_packing(Thread* thread) -{ - ReadSpinLocker locker(gCoreHeapsLock); - - int32 core = sSmallTaskCore; - return (core == -1 && gCoreLoadHeap->PeekMaximum() != NULL) - || (core != -1 - && get_core_load(&gCoreEntries[core]) + thread->scheduler_data->load - < kHighLoad); -} - - static int32 choose_small_task_core(void) { @@ -107,24 +91,27 @@ choose_core(Thread* thread) { CoreEntry* entry; - if (try_small_task_packing(thread)) { - // try to pack all threads on one core - entry = &gCoreEntries[choose_small_task_core()]; + int32 core = -1; + // try to pack all threads on one core + core = choose_small_task_core(); + + if (core != -1 + && get_core_load(&gCoreEntries[core]) + thread->scheduler_data->load + < kHighLoad) { + entry = &gCoreEntries[core]; } else { ReadSpinLocker coreLocker(gCoreHeapsLock); - if (gCoreLoadHeap->PeekMinimum() != NULL) { - // run immediately on already woken core - entry = gCoreLoadHeap->PeekMinimum(); - } else { + // run immediately on already woken core + entry = gCoreLoadHeap->PeekMinimum(); + if (entry == NULL) { coreLocker.Unlock(); entry = choose_idle_core(); - coreLocker.Lock(); - if (entry == NULL) - entry = gCoreLoadHeap->PeekMinimum(); - if (entry == NULL) + if (entry == NULL) { + coreLocker.Lock(); entry = gCoreHighLoadHeap->PeekMinimum(); + } } } @@ -138,9 +125,6 @@ should_rebalance(Thread* thread) { ASSERT(!gSingleCore); - if (thread_is_idle_thread(thread)) - return false; - scheduler_thread_data* schedulerThreadData = thread->scheduler_data; ASSERT(schedulerThreadData->previous_core >= 0); @@ -151,13 +135,16 @@ should_rebalance(Thread* thread) if (coreLoad > kHighLoad) { ReadSpinLocker coreLocker(gCoreHeapsLock); if (sSmallTaskCore == core) { - if (coreLoad - schedulerThreadData->load < kHighLoad) - return true; - + sSmallTaskCore = -1; choose_small_task_core(); + if (schedulerThreadData->load > coreLoad / 3) + return false; return coreLoad > kVeryHighLoad; } + if (schedulerThreadData->load >= coreLoad / 2) + return false; + CoreEntry* other = gCoreLoadHeap->PeekMaximum(); if (other == NULL) other = gCoreHighLoadHeap->PeekMinimum(); @@ -165,10 +152,15 @@ should_rebalance(Thread* thread) return coreLoad - get_core_load(other) >= kLoadDifference / 2; } + if (coreLoad >= kMediumLoad) + return false; + int32 smallTaskCore = choose_small_task_core(); if (smallTaskCore == -1) return false; - return smallTaskCore != core; + return smallTaskCore != core + && get_core_load(&gCoreEntries[smallTaskCore]) + + thread->scheduler_data->load < kHighLoad; } diff --git a/src/system/kernel/scheduler/scheduler.cpp b/src/system/kernel/scheduler/scheduler.cpp index b1bcef7d15..8fb6cf970f 100644 --- a/src/system/kernel/scheduler/scheduler.cpp +++ b/src/system/kernel/scheduler/scheduler.cpp @@ -120,7 +120,8 @@ CoreEntry::CoreEntry() fCPUCount(0), fThreadCount(0), fActiveTime(0), - fLoad(0) + fLoad(0), + fHighLoad(false) { B_INITIALIZE_SPINLOCK(&fCPULock); B_INITIALIZE_SPINLOCK(&fQueueLock); @@ -264,6 +265,7 @@ dump_cpu_heap(int argc, char** argv) { kprintf("core load\n"); dump_core_load_heap(gCoreLoadHeap); + kprintf("\n"); dump_core_load_heap(gCoreHighLoadHeap); for (int32 i = 0; i < gCoreCount; i++) { @@ -376,23 +378,32 @@ update_load_heaps(int32 core) return; if (newKey > kHighLoad) { - if (oldKey <= kHighLoad) { + if (!entry->fHighLoad) { gCoreLoadHeap->ModifyKey(entry, -1); ASSERT(gCoreLoadHeap->PeekMinimum() == entry); gCoreLoadHeap->RemoveMinimum(); gCoreHighLoadHeap->Insert(entry, newKey); + + entry->fHighLoad = true; } else gCoreHighLoadHeap->ModifyKey(entry, newKey); - } else { - if (oldKey > kHighLoad) { + } else if (newKey < kMediumLoad) { + if (entry->fHighLoad) { gCoreHighLoadHeap->ModifyKey(entry, -1); ASSERT(gCoreHighLoadHeap->PeekMinimum() == entry); gCoreHighLoadHeap->RemoveMinimum(); gCoreLoadHeap->Insert(entry, newKey); + + entry->fHighLoad = false; } else gCoreLoadHeap->ModifyKey(entry, newKey); + } else { + if (entry->fHighLoad) + gCoreHighLoadHeap->ModifyKey(entry, newKey); + else + gCoreLoadHeap->ModifyKey(entry, newKey); } } diff --git a/src/system/kernel/scheduler/scheduler_common.h b/src/system/kernel/scheduler/scheduler_common.h index f379ba9e36..e92b3a6763 100644 --- a/src/system/kernel/scheduler/scheduler_common.h +++ b/src/system/kernel/scheduler/scheduler_common.h @@ -36,6 +36,7 @@ const bigtime_t kCacheExpire = 100000; const int kLowLoad = kMaxLoad * 20 / 100; const int kTargetLoad = kMaxLoad * 55 / 100; const int kHighLoad = kMaxLoad * 70 / 100; +const int kMediumLoad = (kHighLoad + kTargetLoad) / 2; const int kVeryHighLoad = (kMaxLoad + kHighLoad) / 2; const int kLoadDifference = kMaxLoad * 20 / 100; @@ -83,6 +84,7 @@ struct CoreEntry : public MinMaxHeapLinkImpl, bigtime_t fActiveTime; int32 fLoad; + bool fHighLoad; } CACHE_LINE_ALIGN; typedef MinMaxHeap CoreLoadHeap;