scheduler: Move simple inline functions to scheduler_common.h

This commit is contained in:
Pawel Dziepak 2013-11-27 03:50:43 +01:00
parent 28da7e985f
commit 7d7dc357bf
2 changed files with 48 additions and 50 deletions

View File

@ -61,15 +61,15 @@ CoreEntry* gCoreEntries;
CoreLoadHeap* gCoreLoadHeap;
CoreLoadHeap* gCoreHighLoadHeap;
rw_spinlock gCoreHeapsLock = B_RW_SPINLOCK_INITIALIZER;
int32 gCoreCount;
PackageEntry* gPackageEntries;
IdlePackageList* gIdlePackageList;
spinlock gIdlePackageLock;
int32 gPackageCount = B_SPINLOCK_INITIALIZER;
spinlock gIdlePackageLock = B_SPINLOCK_INITIALIZER;
int32 gPackageCount;
ThreadRunQueue* gRunQueues;
ThreadRunQueue* gPinnedRunQueues;
int32 gRunQueueCount;
int32* gCPUToCore;
int32* gCPUToPackage;
@ -163,46 +163,6 @@ scheduler_thread_data::Init()
}
static inline int
get_minimal_priority(Thread* thread)
{
return max_c(min_c(thread->priority, 25) / 5, 1);
}
static inline int32
get_thread_penalty(Thread* thread)
{
int32 penalty = thread->scheduler_data->priority_penalty;
const int kMinimalPriority = get_minimal_priority(thread);
if (kMinimalPriority > 0) {
penalty
+= thread->scheduler_data->additional_penalty % kMinimalPriority;
}
return penalty;
}
static inline int32
get_effective_priority(Thread* thread)
{
if (thread->priority == B_IDLE_PRIORITY)
return thread->priority;
if (thread->priority >= B_FIRST_REAL_TIME_PRIORITY)
return thread->priority;
int32 effectivePriority = thread->priority;
effectivePriority -= get_thread_penalty(thread);
ASSERT(effectivePriority < B_FIRST_REAL_TIME_PRIORITY);
ASSERT(effectivePriority >= B_LOWEST_ACTIVE_PRIORITY);
return effectivePriority;
}
static void
dump_queue(ThreadRunQueue::ConstIterator& iterator)
{
@ -224,7 +184,7 @@ static int
dump_run_queue(int argc, char **argv)
{
int32 cpuCount = smp_get_num_cpus();
int32 coreCount = gRunQueueCount;
int32 coreCount = gCoreCount;
ThreadRunQueue::ConstIterator iterator;
for (int32 i = 0; i < coreCount; i++) {
@ -305,7 +265,7 @@ dump_cpu_heap(int argc, char** argv)
dump_core_load_heap(gCoreLoadHeap);
dump_core_load_heap(gCoreHighLoadHeap);
for (int32 i = 0; i < gRunQueueCount; i++) {
for (int32 i = 0; i < gCoreCount; i++) {
if (gCoreEntries[i].fCPUCount < 2)
continue;
@ -559,7 +519,7 @@ choose_core_and_cpu(Thread* thread, int32& targetCore, int32& targetCPU)
targetCPU = choose_cpu(targetCore);
}
ASSERT(targetCore >= 0 && targetCore < gRunQueueCount);
ASSERT(targetCore >= 0 && targetCore < gCoreCount);
ASSERT(targetCPU >= 0 && targetCPU < smp_get_num_cpus());
}
@ -1598,7 +1558,7 @@ init()
packageCount);
if (result != B_OK)
return result;
gRunQueueCount = coreCount;
gCoreCount = coreCount;
gSingleCore = coreCount == 1;
gPackageCount = packageCount;

View File

@ -33,8 +33,6 @@ const bigtime_t kThreadQuantum = 1000;
const bigtime_t kMinThreadQuantum = 3000;
const bigtime_t kMaxThreadQuantum = 10000;
const bigtime_t kMinimalWaitTime = kThreadQuantum / 4;
const bigtime_t kCacheExpire = 100000;
const int kLowLoad = kMaxLoad * 20 / 100;
@ -94,6 +92,7 @@ extern CoreEntry* gCoreEntries;
extern CoreLoadHeap* gCoreLoadHeap;
extern CoreLoadHeap* gCoreHighLoadHeap;
extern rw_spinlock gCoreHeapsLock;
extern int32 gCoreCount;
// gPackageEntries are used to decide which core should be woken up from the
// idle state. When aiming for performance we should use as many packages as
@ -132,7 +131,6 @@ typedef RunQueue<Thread, THREAD_MAX_SET_PRIORITY> CACHE_LINE_ALIGN
extern ThreadRunQueue* gRunQueues;
extern ThreadRunQueue* gPinnedRunQueues;
extern int32 gRunQueueCount;
// Since CPU IDs used internally by the kernel bear no relation to the actual
// CPU topology the following arrays are used to efficiently get the core
@ -178,4 +176,44 @@ get_core_load(struct Scheduler::CoreEntry* core)
}
static inline int32
get_minimal_priority(Thread* thread)
{
return max_c(min_c(thread->priority, 25) / 5, 1);
}
static inline int32
get_thread_penalty(Thread* thread)
{
int32 penalty = thread->scheduler_data->priority_penalty;
const int kMinimalPriority = get_minimal_priority(thread);
if (kMinimalPriority > 0) {
penalty
+= thread->scheduler_data->additional_penalty % kMinimalPriority;
}
return penalty;
}
static inline int32
get_effective_priority(Thread* thread)
{
if (thread->priority == B_IDLE_PRIORITY)
return thread->priority;
if (thread->priority >= B_FIRST_REAL_TIME_PRIORITY)
return thread->priority;
int32 effectivePriority = thread->priority;
effectivePriority -= get_thread_penalty(thread);
ASSERT(effectivePriority < B_FIRST_REAL_TIME_PRIORITY);
ASSERT(effectivePriority >= B_LOWEST_ACTIVE_PRIORITY);
return effectivePriority;
}
#endif // KERNEL_SCHEDULER_COMMON_H