scheduler: Improve power saving mode
* Remove possibility to temporarily disable small task packing. * When small task packing target gets overloaded continue packing threads on another core, but avoid migrating the already packed ones. Scheduler still tends to needlessly migrate threads to another cores when under heavier load, but it is now much better than before.
This commit is contained in:
parent
3eb4224bf6
commit
ecfd444935
@ -13,7 +13,6 @@
|
|||||||
using namespace Scheduler;
|
using namespace Scheduler;
|
||||||
|
|
||||||
|
|
||||||
static bigtime_t sDisableSmallTaskPacking;
|
|
||||||
static int32 sSmallTaskCore;
|
static int32 sSmallTaskCore;
|
||||||
|
|
||||||
|
|
||||||
@ -32,54 +31,47 @@ has_cache_expired(Thread* thread)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static inline bool
|
|
||||||
is_small_task_packing_enabled(void)
|
|
||||||
{
|
|
||||||
if (sDisableSmallTaskPacking == -1)
|
|
||||||
return false;
|
|
||||||
return sDisableSmallTaskPacking < system_time();
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
static inline void
|
|
||||||
disable_small_task_packing(void)
|
|
||||||
{
|
|
||||||
ASSERT(!gSingleCore);
|
|
||||||
|
|
||||||
ASSERT(is_small_task_packing_enabled());
|
|
||||||
ASSERT(sSmallTaskCore == gCPUToCore[smp_get_current_cpu()]);
|
|
||||||
|
|
||||||
sDisableSmallTaskPacking = system_time() + kThreadQuantum * 100;
|
|
||||||
sSmallTaskCore = -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
static inline bool
|
|
||||||
is_task_small(Thread* thread)
|
|
||||||
{
|
|
||||||
return thread->scheduler_data->load <= 200;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
static void
|
static void
|
||||||
switch_to_mode(void)
|
switch_to_mode(void)
|
||||||
{
|
{
|
||||||
sDisableSmallTaskPacking = -1;
|
|
||||||
sSmallTaskCore = -1;
|
sSmallTaskCore = -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static bool
|
||||||
|
try_small_task_packing(Thread* thread)
|
||||||
|
{
|
||||||
|
int32 core = sSmallTaskCore;
|
||||||
|
return (core == -1 && gCoreLoadHeap->PeekMaximum() != NULL)
|
||||||
|
|| (core != -1
|
||||||
|
&& gCoreEntries[core].fLoad + thread->scheduler_data->load
|
||||||
|
< kHighLoad);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static int32
|
||||||
|
choose_small_task_core(void)
|
||||||
|
{
|
||||||
|
CoreEntry* candidate = gCoreLoadHeap->PeekMaximum();
|
||||||
|
if (candidate == NULL)
|
||||||
|
return sSmallTaskCore;
|
||||||
|
|
||||||
|
int32 core = candidate->fCoreID;
|
||||||
|
int32 smallTaskCore = atomic_test_and_set(&sSmallTaskCore, core, -1);
|
||||||
|
if (smallTaskCore == -1)
|
||||||
|
return core;
|
||||||
|
return smallTaskCore;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
static int32
|
static int32
|
||||||
choose_core(Thread* thread)
|
choose_core(Thread* thread)
|
||||||
{
|
{
|
||||||
CoreEntry* entry;
|
CoreEntry* entry;
|
||||||
|
|
||||||
if (is_small_task_packing_enabled() && is_task_small(thread)
|
if (try_small_task_packing(thread)) {
|
||||||
&& gCoreLoadHeap->PeekMaximum() != NULL) {
|
|
||||||
// try to pack all threads on one core
|
// try to pack all threads on one core
|
||||||
if (sSmallTaskCore < 0)
|
entry = &gCoreEntries[choose_small_task_core()];
|
||||||
sSmallTaskCore = gCoreLoadHeap->PeekMaximum()->fCoreID;
|
|
||||||
entry = &gCoreEntries[sSmallTaskCore];
|
|
||||||
} else if (gCoreLoadHeap->PeekMinimum() != NULL) {
|
} else if (gCoreLoadHeap->PeekMinimum() != NULL) {
|
||||||
// run immediately on already woken core
|
// run immediately on already woken core
|
||||||
entry = gCoreLoadHeap->PeekMinimum();
|
entry = gCoreLoadHeap->PeekMinimum();
|
||||||
@ -117,34 +109,28 @@ should_rebalance(Thread* thread)
|
|||||||
int32 core = schedulerThreadData->previous_core;
|
int32 core = schedulerThreadData->previous_core;
|
||||||
CoreEntry* coreEntry = &gCoreEntries[core];
|
CoreEntry* coreEntry = &gCoreEntries[core];
|
||||||
|
|
||||||
// If the thread produces more than 50% of the load, leave it here. In
|
if (coreEntry->fLoad > kHighLoad) {
|
||||||
// such situation it is better to move other threads away.
|
if (sSmallTaskCore == core) {
|
||||||
// Unless we are trying to pack small tasks here, in such case get rid
|
SpinLocker coreLocker(gCoreHeapsLock);
|
||||||
// of CPU hungry thread and continue packing.
|
CoreEntry* other = gCoreLoadHeap->PeekMaximum();
|
||||||
if (schedulerThreadData->load >= coreEntry->fLoad / 2)
|
|
||||||
return is_small_task_packing_enabled() && sSmallTaskCore == core;
|
|
||||||
|
|
||||||
// All cores try to give us small tasks, check whether we have enough.
|
if (other == NULL)
|
||||||
if (is_small_task_packing_enabled() && sSmallTaskCore == core) {
|
sSmallTaskCore = -1;
|
||||||
if (coreEntry->fLoad > kHighLoad) {
|
else if (coreEntry->fLoad - schedulerThreadData->load < kHighLoad)
|
||||||
if (!is_task_small(thread))
|
|
||||||
return true;
|
return true;
|
||||||
} else if (coreEntry->fLoad > kVeryHighLoad)
|
else
|
||||||
disable_small_task_packing();
|
sSmallTaskCore = other->fCoreID;
|
||||||
|
return coreEntry->fLoad > kVeryHighLoad;
|
||||||
|
}
|
||||||
|
} else if (coreEntry->fLoad < kHighLoad) {
|
||||||
|
int32 newCore = choose_small_task_core();
|
||||||
|
return newCore != core;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Try small task packing.
|
CoreEntry* other = gCoreHighLoadHeap->PeekMinimum();
|
||||||
if (is_small_task_packing_enabled() && is_task_small(thread))
|
|
||||||
return sSmallTaskCore != core;
|
|
||||||
|
|
||||||
// No cpu bound threads - the situation is quite good. Make sure it
|
|
||||||
// won't get much worse...
|
|
||||||
SpinLocker coreLocker(gCoreHeapsLock);
|
|
||||||
|
|
||||||
CoreEntry* other = gCoreLoadHeap->PeekMinimum();
|
|
||||||
if (other == NULL)
|
if (other == NULL)
|
||||||
other = gCoreHighLoadHeap->PeekMinimum();
|
return false;
|
||||||
return coreEntry->fLoad - other->fLoad >= kLoadDifference;
|
return coreEntry->fLoad - other->fLoad >= kLoadDifference / 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -175,12 +161,12 @@ pack_irqs(void)
|
|||||||
static void
|
static void
|
||||||
rebalance_irqs(bool idle)
|
rebalance_irqs(bool idle)
|
||||||
{
|
{
|
||||||
if (idle && !is_small_task_packing_enabled() && sSmallTaskCore != -1) {
|
if (idle && sSmallTaskCore != -1) {
|
||||||
pack_irqs();
|
pack_irqs();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (idle)
|
if (idle || sSmallTaskCore != -1)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
cpu_ent* cpu = get_cpu_struct();
|
cpu_ent* cpu = get_cpu_struct();
|
||||||
|
@ -1508,7 +1508,7 @@ _scheduler_init()
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
#if 1
|
#if 0
|
||||||
scheduler_set_operation_mode(SCHEDULER_MODE_LOW_LATENCY);
|
scheduler_set_operation_mode(SCHEDULER_MODE_LOW_LATENCY);
|
||||||
#else
|
#else
|
||||||
scheduler_set_operation_mode(SCHEDULER_MODE_POWER_SAVING);
|
scheduler_set_operation_mode(SCHEDULER_MODE_POWER_SAVING);
|
||||||
|
@ -40,11 +40,12 @@ const bigtime_t kMinimalWaitTime = kThreadQuantum / 4;
|
|||||||
|
|
||||||
const bigtime_t kCacheExpire = 100000;
|
const bigtime_t kCacheExpire = 100000;
|
||||||
|
|
||||||
|
const int kLowLoad = kMaxLoad * 20 / 100;
|
||||||
const int kTargetLoad = kMaxLoad * 55 / 100;
|
const int kTargetLoad = kMaxLoad * 55 / 100;
|
||||||
const int kHighLoad = kMaxLoad * 70 / 100;
|
const int kHighLoad = kMaxLoad * 70 / 100;
|
||||||
const int kVeryHighLoad = (kMaxLoad + kHighLoad) / 2;
|
const int kVeryHighLoad = (kMaxLoad + kHighLoad) / 2;
|
||||||
|
|
||||||
const int kLoadDifference = kMaxLoad * 20 / 100;
|
const int kLoadDifference = kMaxLoad * 20 / 100;
|
||||||
const int kLowLoad = kLoadDifference / 2;
|
|
||||||
|
|
||||||
extern bool gSingleCore;
|
extern bool gSingleCore;
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user