scheduler: Move simple inline functions to scheduler_common.h
This commit is contained in:
parent
28da7e985f
commit
7d7dc357bf
@ -61,15 +61,15 @@ CoreEntry* gCoreEntries;
|
|||||||
CoreLoadHeap* gCoreLoadHeap;
|
CoreLoadHeap* gCoreLoadHeap;
|
||||||
CoreLoadHeap* gCoreHighLoadHeap;
|
CoreLoadHeap* gCoreHighLoadHeap;
|
||||||
rw_spinlock gCoreHeapsLock = B_RW_SPINLOCK_INITIALIZER;
|
rw_spinlock gCoreHeapsLock = B_RW_SPINLOCK_INITIALIZER;
|
||||||
|
int32 gCoreCount;
|
||||||
|
|
||||||
PackageEntry* gPackageEntries;
|
PackageEntry* gPackageEntries;
|
||||||
IdlePackageList* gIdlePackageList;
|
IdlePackageList* gIdlePackageList;
|
||||||
spinlock gIdlePackageLock;
|
spinlock gIdlePackageLock = B_SPINLOCK_INITIALIZER;
|
||||||
int32 gPackageCount = B_SPINLOCK_INITIALIZER;
|
int32 gPackageCount;
|
||||||
|
|
||||||
ThreadRunQueue* gRunQueues;
|
ThreadRunQueue* gRunQueues;
|
||||||
ThreadRunQueue* gPinnedRunQueues;
|
ThreadRunQueue* gPinnedRunQueues;
|
||||||
int32 gRunQueueCount;
|
|
||||||
|
|
||||||
int32* gCPUToCore;
|
int32* gCPUToCore;
|
||||||
int32* gCPUToPackage;
|
int32* gCPUToPackage;
|
||||||
@ -163,46 +163,6 @@ scheduler_thread_data::Init()
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static inline int
|
|
||||||
get_minimal_priority(Thread* thread)
|
|
||||||
{
|
|
||||||
return max_c(min_c(thread->priority, 25) / 5, 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
static inline int32
|
|
||||||
get_thread_penalty(Thread* thread)
|
|
||||||
{
|
|
||||||
int32 penalty = thread->scheduler_data->priority_penalty;
|
|
||||||
|
|
||||||
const int kMinimalPriority = get_minimal_priority(thread);
|
|
||||||
if (kMinimalPriority > 0) {
|
|
||||||
penalty
|
|
||||||
+= thread->scheduler_data->additional_penalty % kMinimalPriority;
|
|
||||||
}
|
|
||||||
|
|
||||||
return penalty;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
static inline int32
|
|
||||||
get_effective_priority(Thread* thread)
|
|
||||||
{
|
|
||||||
if (thread->priority == B_IDLE_PRIORITY)
|
|
||||||
return thread->priority;
|
|
||||||
if (thread->priority >= B_FIRST_REAL_TIME_PRIORITY)
|
|
||||||
return thread->priority;
|
|
||||||
|
|
||||||
int32 effectivePriority = thread->priority;
|
|
||||||
effectivePriority -= get_thread_penalty(thread);
|
|
||||||
|
|
||||||
ASSERT(effectivePriority < B_FIRST_REAL_TIME_PRIORITY);
|
|
||||||
ASSERT(effectivePriority >= B_LOWEST_ACTIVE_PRIORITY);
|
|
||||||
|
|
||||||
return effectivePriority;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
static void
|
static void
|
||||||
dump_queue(ThreadRunQueue::ConstIterator& iterator)
|
dump_queue(ThreadRunQueue::ConstIterator& iterator)
|
||||||
{
|
{
|
||||||
@ -224,7 +184,7 @@ static int
|
|||||||
dump_run_queue(int argc, char **argv)
|
dump_run_queue(int argc, char **argv)
|
||||||
{
|
{
|
||||||
int32 cpuCount = smp_get_num_cpus();
|
int32 cpuCount = smp_get_num_cpus();
|
||||||
int32 coreCount = gRunQueueCount;
|
int32 coreCount = gCoreCount;
|
||||||
|
|
||||||
ThreadRunQueue::ConstIterator iterator;
|
ThreadRunQueue::ConstIterator iterator;
|
||||||
for (int32 i = 0; i < coreCount; i++) {
|
for (int32 i = 0; i < coreCount; i++) {
|
||||||
@ -305,7 +265,7 @@ dump_cpu_heap(int argc, char** argv)
|
|||||||
dump_core_load_heap(gCoreLoadHeap);
|
dump_core_load_heap(gCoreLoadHeap);
|
||||||
dump_core_load_heap(gCoreHighLoadHeap);
|
dump_core_load_heap(gCoreHighLoadHeap);
|
||||||
|
|
||||||
for (int32 i = 0; i < gRunQueueCount; i++) {
|
for (int32 i = 0; i < gCoreCount; i++) {
|
||||||
if (gCoreEntries[i].fCPUCount < 2)
|
if (gCoreEntries[i].fCPUCount < 2)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
@ -559,7 +519,7 @@ choose_core_and_cpu(Thread* thread, int32& targetCore, int32& targetCPU)
|
|||||||
targetCPU = choose_cpu(targetCore);
|
targetCPU = choose_cpu(targetCore);
|
||||||
}
|
}
|
||||||
|
|
||||||
ASSERT(targetCore >= 0 && targetCore < gRunQueueCount);
|
ASSERT(targetCore >= 0 && targetCore < gCoreCount);
|
||||||
ASSERT(targetCPU >= 0 && targetCPU < smp_get_num_cpus());
|
ASSERT(targetCPU >= 0 && targetCPU < smp_get_num_cpus());
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1598,7 +1558,7 @@ init()
|
|||||||
packageCount);
|
packageCount);
|
||||||
if (result != B_OK)
|
if (result != B_OK)
|
||||||
return result;
|
return result;
|
||||||
gRunQueueCount = coreCount;
|
gCoreCount = coreCount;
|
||||||
gSingleCore = coreCount == 1;
|
gSingleCore = coreCount == 1;
|
||||||
gPackageCount = packageCount;
|
gPackageCount = packageCount;
|
||||||
|
|
||||||
|
@ -33,8 +33,6 @@ const bigtime_t kThreadQuantum = 1000;
|
|||||||
const bigtime_t kMinThreadQuantum = 3000;
|
const bigtime_t kMinThreadQuantum = 3000;
|
||||||
const bigtime_t kMaxThreadQuantum = 10000;
|
const bigtime_t kMaxThreadQuantum = 10000;
|
||||||
|
|
||||||
const bigtime_t kMinimalWaitTime = kThreadQuantum / 4;
|
|
||||||
|
|
||||||
const bigtime_t kCacheExpire = 100000;
|
const bigtime_t kCacheExpire = 100000;
|
||||||
|
|
||||||
const int kLowLoad = kMaxLoad * 20 / 100;
|
const int kLowLoad = kMaxLoad * 20 / 100;
|
||||||
@ -94,6 +92,7 @@ extern CoreEntry* gCoreEntries;
|
|||||||
extern CoreLoadHeap* gCoreLoadHeap;
|
extern CoreLoadHeap* gCoreLoadHeap;
|
||||||
extern CoreLoadHeap* gCoreHighLoadHeap;
|
extern CoreLoadHeap* gCoreHighLoadHeap;
|
||||||
extern rw_spinlock gCoreHeapsLock;
|
extern rw_spinlock gCoreHeapsLock;
|
||||||
|
extern int32 gCoreCount;
|
||||||
|
|
||||||
// gPackageEntries are used to decide which core should be woken up from the
|
// gPackageEntries are used to decide which core should be woken up from the
|
||||||
// idle state. When aiming for performance we should use as many packages as
|
// idle state. When aiming for performance we should use as many packages as
|
||||||
@ -132,7 +131,6 @@ typedef RunQueue<Thread, THREAD_MAX_SET_PRIORITY> CACHE_LINE_ALIGN
|
|||||||
|
|
||||||
extern ThreadRunQueue* gRunQueues;
|
extern ThreadRunQueue* gRunQueues;
|
||||||
extern ThreadRunQueue* gPinnedRunQueues;
|
extern ThreadRunQueue* gPinnedRunQueues;
|
||||||
extern int32 gRunQueueCount;
|
|
||||||
|
|
||||||
// Since CPU IDs used internally by the kernel bear no relation to the actual
|
// Since CPU IDs used internally by the kernel bear no relation to the actual
|
||||||
// CPU topology the following arrays are used to efficiently get the core
|
// CPU topology the following arrays are used to efficiently get the core
|
||||||
@ -178,4 +176,44 @@ get_core_load(struct Scheduler::CoreEntry* core)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static inline int32
|
||||||
|
get_minimal_priority(Thread* thread)
|
||||||
|
{
|
||||||
|
return max_c(min_c(thread->priority, 25) / 5, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static inline int32
|
||||||
|
get_thread_penalty(Thread* thread)
|
||||||
|
{
|
||||||
|
int32 penalty = thread->scheduler_data->priority_penalty;
|
||||||
|
|
||||||
|
const int kMinimalPriority = get_minimal_priority(thread);
|
||||||
|
if (kMinimalPriority > 0) {
|
||||||
|
penalty
|
||||||
|
+= thread->scheduler_data->additional_penalty % kMinimalPriority;
|
||||||
|
}
|
||||||
|
|
||||||
|
return penalty;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static inline int32
|
||||||
|
get_effective_priority(Thread* thread)
|
||||||
|
{
|
||||||
|
if (thread->priority == B_IDLE_PRIORITY)
|
||||||
|
return thread->priority;
|
||||||
|
if (thread->priority >= B_FIRST_REAL_TIME_PRIORITY)
|
||||||
|
return thread->priority;
|
||||||
|
|
||||||
|
int32 effectivePriority = thread->priority;
|
||||||
|
effectivePriority -= get_thread_penalty(thread);
|
||||||
|
|
||||||
|
ASSERT(effectivePriority < B_FIRST_REAL_TIME_PRIORITY);
|
||||||
|
ASSERT(effectivePriority >= B_LOWEST_ACTIVE_PRIORITY);
|
||||||
|
|
||||||
|
return effectivePriority;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
#endif // KERNEL_SCHEDULER_COMMON_H
|
#endif // KERNEL_SCHEDULER_COMMON_H
|
||||||
|
Loading…
Reference in New Issue
Block a user