scheduler: Add scheduler profiler
A bit hackish implementation of a profiler for the scheduler. SCHEDULER_ENTER_FUNCTION at the begining of each function aren't nice and usage of __PRETTY_FUNCTION__ isn't any better (both gcc and clang support it though), but it was quick to implement and doesn't lose information on inlined functions. It's just a tool, not an integral part of the kernal anyway.
This commit is contained in:
parent
ebe5420f84
commit
96dcc73b39
@ -66,6 +66,7 @@ KernelMergeObject kernel_core.o :
|
||||
power_saving.cpp
|
||||
scheduler.cpp
|
||||
scheduler_cpu.cpp
|
||||
scheduler_profiler.cpp
|
||||
scheduler_thread.cpp
|
||||
scheduler_tracing.cpp
|
||||
scheduling_analysis.cpp
|
||||
|
@ -9,6 +9,7 @@
|
||||
#include "scheduler_common.h"
|
||||
#include "scheduler_cpu.h"
|
||||
#include "scheduler_modes.h"
|
||||
#include "scheduler_profiler.h"
|
||||
#include "scheduler_thread.h"
|
||||
|
||||
|
||||
@ -33,10 +34,9 @@ set_cpu_enabled(int32 /* cpu */, bool /* enabled */)
|
||||
static bool
|
||||
has_cache_expired(const ThreadData* threadData)
|
||||
{
|
||||
ASSERT(!gSingleCore);
|
||||
SCHEDULER_ENTER_FUNCTION();
|
||||
|
||||
CoreEntry* core = threadData->Core();
|
||||
|
||||
bigtime_t activeTime = core->GetActiveTime();
|
||||
return activeTime - threadData->WentSleepActive() > kCacheExpire;
|
||||
}
|
||||
@ -45,6 +45,8 @@ has_cache_expired(const ThreadData* threadData)
|
||||
static CoreEntry*
|
||||
choose_core(const ThreadData* /* threadData */)
|
||||
{
|
||||
SCHEDULER_ENTER_FUNCTION();
|
||||
|
||||
// wake new package
|
||||
PackageEntry* package = gIdlePackageList.Last();
|
||||
if (package == NULL) {
|
||||
@ -72,6 +74,8 @@ choose_core(const ThreadData* /* threadData */)
|
||||
static bool
|
||||
should_rebalance(const ThreadData* threadData)
|
||||
{
|
||||
SCHEDULER_ENTER_FUNCTION();
|
||||
|
||||
int32 coreLoad = threadData->Core()->GetLoad();
|
||||
|
||||
// If the thread produces more than 50% of the load, leave it here. In
|
||||
@ -104,6 +108,8 @@ should_rebalance(const ThreadData* threadData)
|
||||
static void
|
||||
rebalance_irqs(bool idle)
|
||||
{
|
||||
SCHEDULER_ENTER_FUNCTION();
|
||||
|
||||
if (idle)
|
||||
return;
|
||||
|
||||
|
@ -10,6 +10,7 @@
|
||||
#include "scheduler_common.h"
|
||||
#include "scheduler_cpu.h"
|
||||
#include "scheduler_modes.h"
|
||||
#include "scheduler_profiler.h"
|
||||
#include "scheduler_thread.h"
|
||||
|
||||
|
||||
@ -39,8 +40,7 @@ set_cpu_enabled(int32 cpu, bool enabled)
|
||||
static bool
|
||||
has_cache_expired(const ThreadData* threadData)
|
||||
{
|
||||
ASSERT(!gSingleCore);
|
||||
|
||||
SCHEDULER_ENTER_FUNCTION();
|
||||
return system_time() - threadData->WentSleep() > kCacheExpire;
|
||||
}
|
||||
|
||||
@ -48,6 +48,8 @@ has_cache_expired(const ThreadData* threadData)
|
||||
static CoreEntry*
|
||||
choose_small_task_core()
|
||||
{
|
||||
SCHEDULER_ENTER_FUNCTION();
|
||||
|
||||
ReadSpinLocker locker(gCoreHeapsLock);
|
||||
CoreEntry* core = gCoreLoadHeap.PeekMaximum();
|
||||
locker.Unlock();
|
||||
@ -66,6 +68,8 @@ choose_small_task_core()
|
||||
static CoreEntry*
|
||||
choose_idle_core()
|
||||
{
|
||||
SCHEDULER_ENTER_FUNCTION();
|
||||
|
||||
PackageEntry* package = PackageEntry::GetLeastIdlePackage();
|
||||
|
||||
if (package == NULL)
|
||||
@ -81,6 +85,8 @@ choose_idle_core()
|
||||
static CoreEntry*
|
||||
choose_core(const ThreadData* threadData)
|
||||
{
|
||||
SCHEDULER_ENTER_FUNCTION();
|
||||
|
||||
CoreEntry* core = NULL;
|
||||
|
||||
// try to pack all threads on one core
|
||||
@ -111,6 +117,8 @@ choose_core(const ThreadData* threadData)
|
||||
static bool
|
||||
should_rebalance(const ThreadData* threadData)
|
||||
{
|
||||
SCHEDULER_ENTER_FUNCTION();
|
||||
|
||||
ASSERT(!gSingleCore);
|
||||
|
||||
CoreEntry* core = threadData->Core();
|
||||
@ -151,6 +159,8 @@ should_rebalance(const ThreadData* threadData)
|
||||
static inline void
|
||||
pack_irqs()
|
||||
{
|
||||
SCHEDULER_ENTER_FUNCTION();
|
||||
|
||||
CoreEntry* smallTaskCore = atomic_pointer_get(&sSmallTaskCore);
|
||||
if (smallTaskCore == NULL)
|
||||
return;
|
||||
@ -177,6 +187,8 @@ pack_irqs()
|
||||
static void
|
||||
rebalance_irqs(bool idle)
|
||||
{
|
||||
SCHEDULER_ENTER_FUNCTION();
|
||||
|
||||
if (idle && sSmallTaskCore != NULL) {
|
||||
pack_irqs();
|
||||
return;
|
||||
|
@ -32,6 +32,7 @@
|
||||
#include "scheduler_common.h"
|
||||
#include "scheduler_cpu.h"
|
||||
#include "scheduler_modes.h"
|
||||
#include "scheduler_profiler.h"
|
||||
#include "scheduler_thread.h"
|
||||
#include "scheduler_tracing.h"
|
||||
|
||||
@ -192,7 +193,7 @@ scheduler_dump_thread_data(Thread* thread)
|
||||
static void
|
||||
enqueue(Thread* thread, bool newOne)
|
||||
{
|
||||
ASSERT(thread != NULL);
|
||||
SCHEDULER_ENTER_FUNCTION();
|
||||
|
||||
ThreadData* threadData = thread->scheduler_data;
|
||||
|
||||
@ -243,10 +244,8 @@ enqueue(Thread* thread, bool newOne)
|
||||
void
|
||||
scheduler_enqueue_in_run_queue(Thread *thread)
|
||||
{
|
||||
#if KDEBUG
|
||||
if (are_interrupts_enabled())
|
||||
panic("scheduler_enqueue_in_run_queue: called with interrupts enabled");
|
||||
#endif
|
||||
ASSERT(!are_interrupts_enabled());
|
||||
SCHEDULER_ENTER_FUNCTION();
|
||||
|
||||
SchedulerModeLocker _;
|
||||
|
||||
@ -267,14 +266,13 @@ scheduler_enqueue_in_run_queue(Thread *thread)
|
||||
int32
|
||||
scheduler_set_thread_priority(Thread *thread, int32 priority)
|
||||
{
|
||||
#if KDEBUG
|
||||
if (!are_interrupts_enabled())
|
||||
panic("scheduler_set_thread_priority: called with interrupts disabled");
|
||||
#endif
|
||||
ASSERT(are_interrupts_enabled());
|
||||
|
||||
InterruptsSpinLocker _(thread->scheduler_lock);
|
||||
SchedulerModeLocker modeLocker;
|
||||
|
||||
SCHEDULER_ENTER_FUNCTION();
|
||||
|
||||
ThreadData* threadData = thread->scheduler_data;
|
||||
int32 oldPriority = thread->priority;
|
||||
|
||||
@ -431,6 +429,8 @@ switch_thread(Thread* fromThread, Thread* toThread)
|
||||
static inline void
|
||||
update_thread_times(Thread* oldThread, Thread* nextThread)
|
||||
{
|
||||
SCHEDULER_ENTER_FUNCTION();
|
||||
|
||||
bigtime_t now = system_time();
|
||||
if (oldThread == nextThread) {
|
||||
SpinLocker _(oldThread->time_lock);
|
||||
@ -459,6 +459,7 @@ static void
|
||||
reschedule(int32 nextState)
|
||||
{
|
||||
ASSERT(!are_interrupts_enabled());
|
||||
SCHEDULER_ENTER_FUNCTION();
|
||||
|
||||
SchedulerModeLocker modeLocker;
|
||||
|
||||
@ -584,6 +585,9 @@ reschedule(int32 nextState)
|
||||
nextThreadData->StartQuantum();
|
||||
|
||||
modeLocker.Unlock();
|
||||
|
||||
SCHEDULER_EXIT_FUNCTION();
|
||||
|
||||
if (nextThread != oldThread)
|
||||
switch_thread(oldThread, nextThread);
|
||||
}
|
||||
@ -596,10 +600,8 @@ reschedule(int32 nextState)
|
||||
void
|
||||
scheduler_reschedule(int32 nextState)
|
||||
{
|
||||
#if KDEBUG
|
||||
if (are_interrupts_enabled())
|
||||
panic("scheduler_reschedule: called with interrupts enabled");
|
||||
#endif
|
||||
ASSERT(!are_interrupts_enabled());
|
||||
SCHEDULER_ENTER_FUNCTION();
|
||||
|
||||
if (!sSchedulerEnabled) {
|
||||
Thread* thread = thread_get_current_thread();
|
||||
@ -652,6 +654,7 @@ void
|
||||
scheduler_start()
|
||||
{
|
||||
InterruptsSpinLocker _(thread_get_current_thread()->scheduler_lock);
|
||||
SCHEDULER_ENTER_FUNCTION();
|
||||
|
||||
reschedule(B_THREAD_READY);
|
||||
}
|
||||
@ -847,6 +850,10 @@ scheduler_init()
|
||||
" cache level%s\n", cpuCount, cpuCount != 1 ? "s" : "",
|
||||
gCPUCacheLevelCount, gCPUCacheLevelCount != 1 ? "s" : "");
|
||||
|
||||
#ifdef SCHEDULER_PROFILING
|
||||
Profiling::Profiler::Initialize();
|
||||
#endif
|
||||
|
||||
status_t result = init();
|
||||
if (result != B_OK)
|
||||
panic("scheduler_init: failed to initialize scheduler\n");
|
||||
@ -923,6 +930,10 @@ _user_estimate_max_scheduling_latency(thread_id id)
|
||||
}
|
||||
BReference<Thread> threadReference(thread, true);
|
||||
|
||||
#ifdef SCHEDULER_PROFILING
|
||||
InterruptsLocker _;
|
||||
#endif
|
||||
|
||||
ThreadData* threadData = thread->scheduler_data;
|
||||
CoreEntry* core = threadData->Core();
|
||||
if (core == NULL)
|
||||
|
@ -99,6 +99,7 @@ CPUEntry::Stop()
|
||||
void
|
||||
CPUEntry::PushFront(ThreadData* thread, int32 priority)
|
||||
{
|
||||
SCHEDULER_ENTER_FUNCTION();
|
||||
fRunQueue.PushFront(thread, priority);
|
||||
}
|
||||
|
||||
@ -106,6 +107,7 @@ CPUEntry::PushFront(ThreadData* thread, int32 priority)
|
||||
void
|
||||
CPUEntry::PushBack(ThreadData* thread, int32 priority)
|
||||
{
|
||||
SCHEDULER_ENTER_FUNCTION();
|
||||
fRunQueue.PushBack(thread, priority);
|
||||
}
|
||||
|
||||
@ -113,6 +115,7 @@ CPUEntry::PushBack(ThreadData* thread, int32 priority)
|
||||
void
|
||||
CPUEntry::Remove(ThreadData* thread)
|
||||
{
|
||||
SCHEDULER_ENTER_FUNCTION();
|
||||
ASSERT(thread->IsEnqueued());
|
||||
thread->SetDequeued();
|
||||
fRunQueue.Remove(thread);
|
||||
@ -122,6 +125,7 @@ CPUEntry::Remove(ThreadData* thread)
|
||||
inline ThreadData*
|
||||
CPUEntry::PeekThread() const
|
||||
{
|
||||
SCHEDULER_ENTER_FUNCTION();
|
||||
return fRunQueue.PeekMaximum();
|
||||
}
|
||||
|
||||
@ -129,6 +133,7 @@ CPUEntry::PeekThread() const
|
||||
ThreadData*
|
||||
CPUEntry::PeekIdleThread() const
|
||||
{
|
||||
SCHEDULER_ENTER_FUNCTION();
|
||||
return fRunQueue.GetHead(B_IDLE_PRIORITY);
|
||||
}
|
||||
|
||||
@ -136,6 +141,8 @@ CPUEntry::PeekIdleThread() const
|
||||
void
|
||||
CPUEntry::UpdatePriority(int32 priority)
|
||||
{
|
||||
SCHEDULER_ENTER_FUNCTION();
|
||||
|
||||
if (gCPU[fCPUNumber].disabled)
|
||||
return;
|
||||
|
||||
@ -161,6 +168,8 @@ CPUEntry::UpdatePriority(int32 priority)
|
||||
void
|
||||
CPUEntry::ComputeLoad()
|
||||
{
|
||||
SCHEDULER_ENTER_FUNCTION();
|
||||
|
||||
ASSERT(!gSingleCore);
|
||||
ASSERT(fCPUNumber == smp_get_current_cpu());
|
||||
|
||||
@ -181,6 +190,8 @@ CPUEntry::ComputeLoad()
|
||||
ThreadData*
|
||||
CPUEntry::ChooseNextThread(ThreadData* oldThread, bool putAtBack)
|
||||
{
|
||||
SCHEDULER_ENTER_FUNCTION();
|
||||
|
||||
CoreRunQueueLocker _(fCore);
|
||||
|
||||
ThreadData* sharedThread = fCore->PeekThread();
|
||||
@ -217,6 +228,8 @@ CPUEntry::ChooseNextThread(ThreadData* oldThread, bool putAtBack)
|
||||
void
|
||||
CPUEntry::TrackActivity(ThreadData* oldThreadData, ThreadData* nextThreadData)
|
||||
{
|
||||
SCHEDULER_ENTER_FUNCTION();
|
||||
|
||||
cpu_ent* cpuEntry = &gCPU[fCPUNumber];
|
||||
|
||||
Thread* oldThread = oldThreadData->GetThread();
|
||||
@ -252,6 +265,8 @@ CPUEntry::TrackActivity(ThreadData* oldThreadData, ThreadData* nextThreadData)
|
||||
inline void
|
||||
CPUEntry::_RequestPerformanceLevel(ThreadData* threadData)
|
||||
{
|
||||
SCHEDULER_ENTER_FUNCTION();
|
||||
|
||||
int32 load = std::max(threadData->GetLoad(), fCore->GetLoad());
|
||||
load = std::min(std::max(load, int32(0)), kMaxLoad);
|
||||
|
||||
@ -332,6 +347,8 @@ CoreEntry::Init(int32 id, PackageEntry* package)
|
||||
void
|
||||
CoreEntry::PushFront(ThreadData* thread, int32 priority)
|
||||
{
|
||||
SCHEDULER_ENTER_FUNCTION();
|
||||
|
||||
fRunQueue.PushFront(thread, priority);
|
||||
atomic_add(&fThreadCount, 1);
|
||||
}
|
||||
@ -340,6 +357,8 @@ CoreEntry::PushFront(ThreadData* thread, int32 priority)
|
||||
void
|
||||
CoreEntry::PushBack(ThreadData* thread, int32 priority)
|
||||
{
|
||||
SCHEDULER_ENTER_FUNCTION();
|
||||
|
||||
fRunQueue.PushBack(thread, priority);
|
||||
fThreadList.Insert(thread);
|
||||
|
||||
@ -350,6 +369,8 @@ CoreEntry::PushBack(ThreadData* thread, int32 priority)
|
||||
void
|
||||
CoreEntry::Remove(ThreadData* thread)
|
||||
{
|
||||
SCHEDULER_ENTER_FUNCTION();
|
||||
|
||||
ASSERT(thread->IsEnqueued());
|
||||
thread->SetDequeued();
|
||||
if (thread_is_idle_thread(thread->GetThread())
|
||||
@ -366,6 +387,7 @@ CoreEntry::Remove(ThreadData* thread)
|
||||
inline ThreadData*
|
||||
CoreEntry::PeekThread() const
|
||||
{
|
||||
SCHEDULER_ENTER_FUNCTION();
|
||||
return fRunQueue.PeekMaximum();
|
||||
}
|
||||
|
||||
@ -373,6 +395,8 @@ CoreEntry::PeekThread() const
|
||||
void
|
||||
CoreEntry::UpdateLoad(int32 delta)
|
||||
{
|
||||
SCHEDULER_ENTER_FUNCTION();
|
||||
|
||||
if (fCPUCount == 0) {
|
||||
fLoad = 0;
|
||||
return;
|
||||
@ -544,6 +568,8 @@ PackageEntry::Init(int32 id)
|
||||
inline void
|
||||
PackageEntry::CoreGoesIdle(CoreEntry* core)
|
||||
{
|
||||
SCHEDULER_ENTER_FUNCTION();
|
||||
|
||||
WriteSpinLocker _(fCoreLock);
|
||||
|
||||
ASSERT(fIdleCoreCount >= 0);
|
||||
@ -563,6 +589,8 @@ PackageEntry::CoreGoesIdle(CoreEntry* core)
|
||||
inline void
|
||||
PackageEntry::CoreWakesUp(CoreEntry* core)
|
||||
{
|
||||
SCHEDULER_ENTER_FUNCTION();
|
||||
|
||||
WriteSpinLocker _(fCoreLock);
|
||||
|
||||
ASSERT(fIdleCoreCount > 0);
|
||||
|
@ -17,6 +17,7 @@
|
||||
#include "RunQueue.h"
|
||||
#include "scheduler_common.h"
|
||||
#include "scheduler_modes.h"
|
||||
#include "scheduler_profiler.h"
|
||||
|
||||
|
||||
namespace Scheduler {
|
||||
@ -277,6 +278,7 @@ extern int32 gPackageCount;
|
||||
inline void
|
||||
CPUEntry::EnterScheduler()
|
||||
{
|
||||
SCHEDULER_ENTER_FUNCTION();
|
||||
acquire_read_spinlock(&fSchedulerModeLock);
|
||||
}
|
||||
|
||||
@ -284,6 +286,7 @@ CPUEntry::EnterScheduler()
|
||||
inline void
|
||||
CPUEntry::ExitScheduler()
|
||||
{
|
||||
SCHEDULER_ENTER_FUNCTION();
|
||||
release_read_spinlock(&fSchedulerModeLock);
|
||||
}
|
||||
|
||||
@ -291,6 +294,7 @@ CPUEntry::ExitScheduler()
|
||||
inline void
|
||||
CPUEntry::LockScheduler()
|
||||
{
|
||||
SCHEDULER_ENTER_FUNCTION();
|
||||
acquire_write_spinlock(&fSchedulerModeLock);
|
||||
}
|
||||
|
||||
@ -298,6 +302,7 @@ CPUEntry::LockScheduler()
|
||||
inline void
|
||||
CPUEntry::UnlockScheduler()
|
||||
{
|
||||
SCHEDULER_ENTER_FUNCTION();
|
||||
release_write_spinlock(&fSchedulerModeLock);
|
||||
}
|
||||
|
||||
@ -305,6 +310,7 @@ CPUEntry::UnlockScheduler()
|
||||
inline void
|
||||
CPUEntry::IncreaseActiveTime(bigtime_t activeTime)
|
||||
{
|
||||
SCHEDULER_ENTER_FUNCTION();
|
||||
fMeasureActiveTime += activeTime;
|
||||
}
|
||||
|
||||
@ -312,6 +318,7 @@ CPUEntry::IncreaseActiveTime(bigtime_t activeTime)
|
||||
/* static */ inline CPUEntry*
|
||||
CPUEntry::GetCPU(int32 cpu)
|
||||
{
|
||||
SCHEDULER_ENTER_FUNCTION();
|
||||
return &gCPUEntries[cpu];
|
||||
}
|
||||
|
||||
@ -319,6 +326,7 @@ CPUEntry::GetCPU(int32 cpu)
|
||||
inline void
|
||||
CoreEntry::LockCPUHeap()
|
||||
{
|
||||
SCHEDULER_ENTER_FUNCTION();
|
||||
acquire_spinlock(&fCPULock);
|
||||
}
|
||||
|
||||
@ -326,6 +334,7 @@ CoreEntry::LockCPUHeap()
|
||||
inline void
|
||||
CoreEntry::UnlockCPUHeap()
|
||||
{
|
||||
SCHEDULER_ENTER_FUNCTION();
|
||||
release_spinlock(&fCPULock);
|
||||
}
|
||||
|
||||
@ -333,6 +342,7 @@ CoreEntry::UnlockCPUHeap()
|
||||
inline CPUPriorityHeap*
|
||||
CoreEntry::CPUHeap()
|
||||
{
|
||||
SCHEDULER_ENTER_FUNCTION();
|
||||
return &fCPUHeap;
|
||||
}
|
||||
|
||||
@ -340,6 +350,7 @@ CoreEntry::CPUHeap()
|
||||
inline void
|
||||
CoreEntry::LockRunQueue()
|
||||
{
|
||||
SCHEDULER_ENTER_FUNCTION();
|
||||
acquire_spinlock(&fQueueLock);
|
||||
}
|
||||
|
||||
@ -347,6 +358,7 @@ CoreEntry::LockRunQueue()
|
||||
inline void
|
||||
CoreEntry::UnlockRunQueue()
|
||||
{
|
||||
SCHEDULER_ENTER_FUNCTION();
|
||||
release_spinlock(&fQueueLock);
|
||||
}
|
||||
|
||||
@ -354,6 +366,7 @@ CoreEntry::UnlockRunQueue()
|
||||
inline void
|
||||
CoreEntry::IncreaseActiveTime(bigtime_t activeTime)
|
||||
{
|
||||
SCHEDULER_ENTER_FUNCTION();
|
||||
WriteSequentialLocker _(fActiveTimeLock);
|
||||
fActiveTime += activeTime;
|
||||
}
|
||||
@ -362,14 +375,14 @@ CoreEntry::IncreaseActiveTime(bigtime_t activeTime)
|
||||
inline bigtime_t
|
||||
CoreEntry::GetActiveTime() const
|
||||
{
|
||||
bigtime_t activeTime;
|
||||
SCHEDULER_ENTER_FUNCTION();
|
||||
|
||||
bigtime_t activeTime;
|
||||
uint32 count;
|
||||
do {
|
||||
count = acquire_read_seqlock(&fActiveTimeLock);
|
||||
activeTime = fActiveTime;
|
||||
} while (!release_read_seqlock(&fActiveTimeLock, count));
|
||||
|
||||
return activeTime;
|
||||
}
|
||||
|
||||
@ -377,6 +390,8 @@ CoreEntry::GetActiveTime() const
|
||||
inline int32
|
||||
CoreEntry::GetLoad() const
|
||||
{
|
||||
SCHEDULER_ENTER_FUNCTION();
|
||||
|
||||
ASSERT(fCPUCount >= 0);
|
||||
return fLoad / fCPUCount;
|
||||
}
|
||||
@ -385,6 +400,7 @@ CoreEntry::GetLoad() const
|
||||
inline int32
|
||||
CoreEntry::StarvationCounter() const
|
||||
{
|
||||
SCHEDULER_ENTER_FUNCTION();
|
||||
return fStarvationCounter;
|
||||
}
|
||||
|
||||
@ -392,6 +408,7 @@ CoreEntry::StarvationCounter() const
|
||||
/* static */ inline CoreEntry*
|
||||
CoreEntry::GetCore(int32 cpu)
|
||||
{
|
||||
SCHEDULER_ENTER_FUNCTION();
|
||||
return gCPUEntries[cpu].Core();
|
||||
}
|
||||
|
||||
@ -399,6 +416,7 @@ CoreEntry::GetCore(int32 cpu)
|
||||
inline CoreEntry*
|
||||
PackageEntry::GetIdleCore() const
|
||||
{
|
||||
SCHEDULER_ENTER_FUNCTION();
|
||||
return fIdleCores.Last();
|
||||
}
|
||||
|
||||
@ -406,6 +424,8 @@ PackageEntry::GetIdleCore() const
|
||||
/* static */ inline PackageEntry*
|
||||
PackageEntry::GetMostIdlePackage()
|
||||
{
|
||||
SCHEDULER_ENTER_FUNCTION();
|
||||
|
||||
PackageEntry* current = &gPackageEntries[0];
|
||||
for (int32 i = 1; i < gPackageCount; i++) {
|
||||
if (gPackageEntries[i].fIdleCoreCount > current->fIdleCoreCount)
|
||||
@ -422,6 +442,8 @@ PackageEntry::GetMostIdlePackage()
|
||||
/* static */ inline PackageEntry*
|
||||
PackageEntry::GetLeastIdlePackage()
|
||||
{
|
||||
SCHEDULER_ENTER_FUNCTION();
|
||||
|
||||
PackageEntry* package = NULL;
|
||||
|
||||
for (int32 i = 0; i < gPackageCount; i++) {
|
||||
|
298
src/system/kernel/scheduler/scheduler_profiler.cpp
Normal file
298
src/system/kernel/scheduler/scheduler_profiler.cpp
Normal file
@ -0,0 +1,298 @@
|
||||
/*
|
||||
* Copyright 2013, Paweł Dziepak, pdziepak@quarnos.org.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*/
|
||||
|
||||
#include "scheduler_profiler.h"
|
||||
|
||||
#include <debug.h>
|
||||
#include <util/AutoLock.h>
|
||||
|
||||
#include <algorithm>
|
||||
|
||||
|
||||
#ifdef SCHEDULER_PROFILING
|
||||
|
||||
|
||||
using namespace Scheduler;
|
||||
using namespace Scheduler::Profiling;
|
||||
|
||||
|
||||
static Profiler* sProfiler;
|
||||
|
||||
static int dump_profiler(int argc, char** argv);
|
||||
|
||||
|
||||
Profiler::Profiler()
|
||||
:
|
||||
kMaxFunctionEntries(1024),
|
||||
kMaxFunctionStackEntries(512),
|
||||
fFunctionData(new(std::nothrow) FunctionData[kMaxFunctionEntries]),
|
||||
fStatus(B_OK)
|
||||
{
|
||||
B_INITIALIZE_SPINLOCK(&fFunctionLock);
|
||||
|
||||
if (fFunctionData == NULL) {
|
||||
fStatus = B_NO_MEMORY;
|
||||
return;
|
||||
}
|
||||
memset(fFunctionData, 0, sizeof(FunctionData) * kMaxFunctionEntries);
|
||||
|
||||
for (int32 i = 0; i < smp_get_num_cpus(); i++) {
|
||||
fFunctionStacks[i]
|
||||
= new(std::nothrow) FunctionEntry[kMaxFunctionStackEntries];
|
||||
if (fFunctionStacks[i] == NULL) {
|
||||
fStatus = B_NO_MEMORY;
|
||||
return;
|
||||
}
|
||||
memset(fFunctionStacks[i], 0,
|
||||
sizeof(FunctionEntry) * kMaxFunctionStackEntries);
|
||||
}
|
||||
memset(fFunctionStackPointers, 0, sizeof(int32) * smp_get_num_cpus());
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
Profiler::EnterFunction(int32 cpu, const char* functionName)
|
||||
{
|
||||
FunctionData* function = _FindFunction(functionName);
|
||||
if (function == NULL)
|
||||
return;
|
||||
atomic_add((int32*)&function->fCalled, 1);
|
||||
|
||||
FunctionEntry* stackEntry
|
||||
= &fFunctionStacks[cpu][fFunctionStackPointers[cpu]];
|
||||
fFunctionStackPointers[cpu]++;
|
||||
|
||||
ASSERT(fFunctionStackPointers[cpu] < kMaxFunctionStackEntries);
|
||||
|
||||
stackEntry->fFunction = function;
|
||||
stackEntry->fEntryTime = system_time();
|
||||
stackEntry->fOthersTime = 0;
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
Profiler::ExitFunction(int32 cpu, const char* functionName)
|
||||
{
|
||||
ASSERT(fFunctionStackPointers[cpu] > 0);
|
||||
fFunctionStackPointers[cpu]--;
|
||||
FunctionEntry* stackEntry
|
||||
= &fFunctionStacks[cpu][fFunctionStackPointers[cpu]];
|
||||
|
||||
bigtime_t timeSpent = system_time() - stackEntry->fEntryTime;
|
||||
atomic_add64(&stackEntry->fFunction->fTimeInclusive, timeSpent);
|
||||
atomic_add64(&stackEntry->fFunction->fTimeExclusive,
|
||||
timeSpent - stackEntry->fOthersTime);
|
||||
|
||||
if (fFunctionStackPointers[cpu] > 0) {
|
||||
stackEntry = &fFunctionStacks[cpu][fFunctionStackPointers[cpu] - 1];
|
||||
stackEntry->fOthersTime += timeSpent;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
Profiler::DumpCalled(uint32 maxCount)
|
||||
{
|
||||
uint32 count = _FunctionCount();
|
||||
|
||||
qsort(fFunctionData, count, sizeof(FunctionData),
|
||||
&_CompareFunctions<uint32, &FunctionData::fCalled>);
|
||||
|
||||
if (maxCount > 0)
|
||||
count = std::min(count, maxCount);
|
||||
_Dump(count);
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
Profiler::DumpTimeInclusive(uint32 maxCount)
|
||||
{
|
||||
uint32 count = _FunctionCount();
|
||||
|
||||
qsort(fFunctionData, count, sizeof(FunctionData),
|
||||
&_CompareFunctions<bigtime_t, &FunctionData::fTimeInclusive>);
|
||||
|
||||
if (maxCount > 0)
|
||||
count = std::min(count, maxCount);
|
||||
_Dump(count);
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
Profiler::DumpTimeExclusive(uint32 maxCount)
|
||||
{
|
||||
uint32 count = _FunctionCount();
|
||||
|
||||
qsort(fFunctionData, count, sizeof(FunctionData),
|
||||
&_CompareFunctions<bigtime_t, &FunctionData::fTimeExclusive>);
|
||||
|
||||
if (maxCount > 0)
|
||||
count = std::min(count, maxCount);
|
||||
_Dump(count);
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
Profiler::DumpTimeInclusivePerCall(uint32 maxCount)
|
||||
{
|
||||
uint32 count = _FunctionCount();
|
||||
|
||||
qsort(fFunctionData, count, sizeof(FunctionData),
|
||||
&_CompareFunctionsPerCall<bigtime_t, &FunctionData::fTimeInclusive>);
|
||||
|
||||
if (maxCount > 0)
|
||||
count = std::min(count, maxCount);
|
||||
_Dump(count);
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
Profiler::DumpTimeExclusivePerCall(uint32 maxCount)
|
||||
{
|
||||
uint32 count = _FunctionCount();
|
||||
|
||||
qsort(fFunctionData, count, sizeof(FunctionData),
|
||||
&_CompareFunctionsPerCall<bigtime_t, &FunctionData::fTimeExclusive>);
|
||||
|
||||
if (maxCount > 0)
|
||||
count = std::min(count, maxCount);
|
||||
_Dump(count);
|
||||
}
|
||||
|
||||
|
||||
/* static */ Profiler*
|
||||
Profiler::Get()
|
||||
{
|
||||
return sProfiler;
|
||||
}
|
||||
|
||||
|
||||
/* static */ void
|
||||
Profiler::Initialize()
|
||||
{
|
||||
sProfiler = new(std::nothrow) Profiler;
|
||||
if (sProfiler == NULL || sProfiler->GetStatus() != B_OK)
|
||||
panic("Scheduler::Profiling::Profiler: could not initialize profiler");
|
||||
|
||||
add_debugger_command_etc("scheduler_profiler", &dump_profiler,
|
||||
"Show data collected by scheduler profiler",
|
||||
"[ <field> [ <count> ] ]\n"
|
||||
"Shows data collected by scheduler profiler\n"
|
||||
" <field> - Field used to sort functions. Available: called,"
|
||||
" time-inclusive, time-inclusive-per-call, time-exclusive,"
|
||||
" time-exclusive-per-call.\n"
|
||||
" (defaults to \"called\")\n"
|
||||
" <count> - Maximum number of showed functions.\n", 0);
|
||||
}
|
||||
|
||||
|
||||
uint32
|
||||
Profiler::_FunctionCount() const
|
||||
{
|
||||
uint32 count;
|
||||
for (count = 0; count < kMaxFunctionEntries; count++) {
|
||||
if (fFunctionData[count].fFunction == NULL)
|
||||
break;
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
Profiler::_Dump(uint32 count)
|
||||
{
|
||||
kprintf("Function calls (%" B_PRId32 " functions):\n", count);
|
||||
kprintf(" called time-inclusive per-call time-exclusive per-call "
|
||||
"function\n");
|
||||
for (uint32 i = 0; i < count; i++) {
|
||||
FunctionData* function = &fFunctionData[i];
|
||||
kprintf("%10" B_PRId32 " %14" B_PRId64 " %8" B_PRId64 " %14" B_PRId64
|
||||
" %8" B_PRId64 " %s\n", function->fCalled,
|
||||
function->fTimeInclusive,
|
||||
function->fTimeInclusive / function->fCalled,
|
||||
function->fTimeExclusive,
|
||||
function->fTimeExclusive / function->fCalled, function->fFunction);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Profiler::FunctionData*
|
||||
Profiler::_FindFunction(const char* function)
|
||||
{
|
||||
for (uint32 i = 0; i < kMaxFunctionEntries; i++) {
|
||||
if (fFunctionData[i].fFunction == NULL)
|
||||
break;
|
||||
if (!strcmp(fFunctionData[i].fFunction, function))
|
||||
return fFunctionData + i;
|
||||
}
|
||||
|
||||
SpinLocker _(fFunctionLock);
|
||||
for (uint32 i = 0; i < kMaxFunctionEntries; i++) {
|
||||
if (fFunctionData[i].fFunction == NULL) {
|
||||
fFunctionData[i].fFunction = function;
|
||||
return fFunctionData + i;
|
||||
}
|
||||
if (!strcmp(fFunctionData[i].fFunction, function))
|
||||
return fFunctionData + i;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
template<typename Type, Type Profiler::FunctionData::*Member>
|
||||
/* static */ int
|
||||
Profiler::_CompareFunctions(const void* _a, const void* _b)
|
||||
{
|
||||
const FunctionData* a = static_cast<const FunctionData*>(_a);
|
||||
const FunctionData* b = static_cast<const FunctionData*>(_b);
|
||||
|
||||
return b->*Member - a->*Member;
|
||||
}
|
||||
|
||||
|
||||
template<typename Type, Type Profiler::FunctionData::*Member>
|
||||
/* static */ int
|
||||
Profiler::_CompareFunctionsPerCall(const void* _a, const void* _b)
|
||||
{
|
||||
const FunctionData* a = static_cast<const FunctionData*>(_a);
|
||||
const FunctionData* b = static_cast<const FunctionData*>(_b);
|
||||
|
||||
return b->*Member / b->fCalled - a->*Member / a->fCalled;
|
||||
}
|
||||
|
||||
|
||||
static int
|
||||
dump_profiler(int argc, char** argv)
|
||||
{
|
||||
if (argc < 2) {
|
||||
Profiler::Get()->DumpCalled(0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32 count = 0;
|
||||
if (argc >= 3)
|
||||
count = parse_expression(argv[2]);
|
||||
count = std::max(count, int32(0));
|
||||
|
||||
if (!strcmp(argv[1], "called"))
|
||||
Profiler::Get()->DumpCalled(count);
|
||||
else if (!strcmp(argv[1], "time-inclusive"))
|
||||
Profiler::Get()->DumpTimeInclusive(count);
|
||||
else if (!strcmp(argv[1], "time-inclusive-per-call"))
|
||||
Profiler::Get()->DumpTimeInclusivePerCall(count);
|
||||
else if (!strcmp(argv[1], "time-exclusive"))
|
||||
Profiler::Get()->DumpTimeExclusive(count);
|
||||
else if (!strcmp(argv[1], "time-exclusive-per-call"))
|
||||
Profiler::Get()->DumpTimeExclusivePerCall(count);
|
||||
else
|
||||
print_debugger_command_usage(argv[0]);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
#endif // SCHEDULER_PROFILING
|
||||
|
135
src/system/kernel/scheduler/scheduler_profiler.h
Normal file
135
src/system/kernel/scheduler/scheduler_profiler.h
Normal file
@ -0,0 +1,135 @@
|
||||
/*
|
||||
* Copyright 2013, Paweł Dziepak, pdziepak@quarnos.org.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*/
|
||||
#ifndef KERNEL_SCHEDULER_PROFILER_H
|
||||
#define KERNEL_SCHEDULER_PROFILER_H
|
||||
|
||||
|
||||
#include <smp.h>
|
||||
|
||||
|
||||
//#define SCHEDULER_PROFILING
|
||||
#ifdef SCHEDULER_PROFILING
|
||||
|
||||
|
||||
#define SCHEDULER_ENTER_FUNCTION() \
|
||||
Scheduler::Profiling::Function schedulerProfiler(__PRETTY_FUNCTION__)
|
||||
|
||||
#define SCHEDULER_EXIT_FUNCTION() \
|
||||
schedulerProfiler.Exit()
|
||||
|
||||
|
||||
namespace Scheduler {
|
||||
|
||||
namespace Profiling {
|
||||
|
||||
class Profiler {
|
||||
public:
|
||||
Profiler();
|
||||
|
||||
void EnterFunction(int32 cpu, const char* function);
|
||||
void ExitFunction(int32 cpu, const char* function);
|
||||
|
||||
void DumpCalled(uint32 count);
|
||||
void DumpTimeInclusive(uint32 count);
|
||||
void DumpTimeExclusive(uint32 count);
|
||||
void DumpTimeInclusivePerCall(uint32 count);
|
||||
void DumpTimeExclusivePerCall(uint32 count);
|
||||
|
||||
status_t GetStatus() const { return fStatus; }
|
||||
|
||||
static Profiler* Get();
|
||||
static void Initialize();
|
||||
|
||||
private:
|
||||
struct FunctionData {
|
||||
const char* fFunction;
|
||||
|
||||
uint32 fCalled;
|
||||
|
||||
bigtime_t fTimeInclusive;
|
||||
bigtime_t fTimeExclusive;
|
||||
};
|
||||
|
||||
struct FunctionEntry {
|
||||
FunctionData* fFunction;
|
||||
|
||||
bigtime_t fEntryTime;
|
||||
bigtime_t fOthersTime;
|
||||
};
|
||||
|
||||
uint32 _FunctionCount() const;
|
||||
void _Dump(uint32 count);
|
||||
|
||||
FunctionData* _FindFunction(const char* function);
|
||||
|
||||
template<typename Type, Type FunctionData::*Member>
|
||||
static int _CompareFunctions(const void* a, const void* b);
|
||||
|
||||
template<typename Type, Type FunctionData::*Member>
|
||||
static int _CompareFunctionsPerCall(const void* a,
|
||||
const void* b);
|
||||
|
||||
const uint32 kMaxFunctionEntries;
|
||||
const uint32 kMaxFunctionStackEntries;
|
||||
|
||||
FunctionEntry* fFunctionStacks[SMP_MAX_CPUS];
|
||||
uint32 fFunctionStackPointers[SMP_MAX_CPUS];
|
||||
|
||||
FunctionData* fFunctionData;
|
||||
spinlock fFunctionLock;
|
||||
|
||||
status_t fStatus;
|
||||
};
|
||||
|
||||
class Function {
|
||||
public:
|
||||
inline Function(const char* functionName);
|
||||
inline ~Function();
|
||||
|
||||
inline void Exit();
|
||||
|
||||
private:
|
||||
const char* fFunctionName;
|
||||
};
|
||||
|
||||
|
||||
Function::Function(const char* functionName)
|
||||
:
|
||||
fFunctionName(functionName)
|
||||
{
|
||||
Profiler::Get()->EnterFunction(smp_get_current_cpu(), fFunctionName);
|
||||
}
|
||||
|
||||
|
||||
Function::~Function()
|
||||
{
|
||||
if (fFunctionName != NULL)
|
||||
Exit();
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
Function::Exit()
|
||||
{
|
||||
Profiler::Get()->ExitFunction(smp_get_current_cpu(), fFunctionName);
|
||||
fFunctionName = NULL;
|
||||
}
|
||||
|
||||
|
||||
} // namespace Profiling
|
||||
|
||||
} // namespace Scheduler
|
||||
|
||||
|
||||
#else // SCHEDULER_PROFILING
|
||||
|
||||
#define SCHEDULER_ENTER_FUNCTION() (void)0
|
||||
#define SCHEDULER_EXIT_FUNCTION() (void)0
|
||||
|
||||
#endif // !SCHEDULER_PROFILING
|
||||
|
||||
|
||||
#endif // KERNEL_SCHEDULER_PROFILER_H
|
||||
|
@ -78,6 +78,8 @@ ThreadData::Dump() const
|
||||
bool
|
||||
ThreadData::ChooseCoreAndCPU(CoreEntry*& targetCore, CPUEntry*& targetCPU)
|
||||
{
|
||||
SCHEDULER_ENTER_FUNCTION();
|
||||
|
||||
bool rescheduleNeeded = false;
|
||||
|
||||
if (targetCore == NULL && targetCPU != NULL)
|
||||
@ -100,6 +102,8 @@ ThreadData::ChooseCoreAndCPU(CoreEntry*& targetCore, CPUEntry*& targetCPU)
|
||||
bigtime_t
|
||||
ThreadData::ComputeQuantum()
|
||||
{
|
||||
SCHEDULER_ENTER_FUNCTION();
|
||||
|
||||
bigtime_t quantum;
|
||||
if (fTimeLeft != 0)
|
||||
quantum = fTimeLeft;
|
||||
@ -127,6 +131,8 @@ ThreadData::ComputeQuantum()
|
||||
/* static */ void
|
||||
ThreadData::ComputeQuantumLengths()
|
||||
{
|
||||
SCHEDULER_ENTER_FUNCTION();
|
||||
|
||||
for (int32 priority = 0; priority <= THREAD_MAX_SET_PRIORITY; priority++) {
|
||||
const bigtime_t kQuantum0 = gCurrentMode->base_quantum;
|
||||
if (priority >= B_URGENT_DISPLAY_PRIORITY) {
|
||||
@ -153,6 +159,8 @@ ThreadData::ComputeQuantumLengths()
|
||||
void
|
||||
ThreadData::_ComputeEffectivePriority() const
|
||||
{
|
||||
SCHEDULER_ENTER_FUNCTION();
|
||||
|
||||
if (thread_is_idle_thread(fThread))
|
||||
fEffectivePriority = B_IDLE_PRIORITY;
|
||||
else if (fThread->priority >= B_FIRST_REAL_TIME_PRIORITY)
|
||||
@ -170,6 +178,8 @@ ThreadData::_ComputeEffectivePriority() const
|
||||
inline CoreEntry*
|
||||
ThreadData::_ChooseCore() const
|
||||
{
|
||||
SCHEDULER_ENTER_FUNCTION();
|
||||
|
||||
ASSERT(!gSingleCore);
|
||||
return gCurrentMode->choose_core(this);
|
||||
}
|
||||
@ -178,6 +188,8 @@ ThreadData::_ChooseCore() const
|
||||
inline CPUEntry*
|
||||
ThreadData::_ChooseCPU(CoreEntry* core, bool& rescheduleNeeded) const
|
||||
{
|
||||
SCHEDULER_ENTER_FUNCTION();
|
||||
|
||||
int32 threadPriority = GetEffectivePriority();
|
||||
|
||||
if (fThread->previous_cpu != NULL) {
|
||||
@ -209,6 +221,8 @@ ThreadData::_ChooseCPU(CoreEntry* core, bool& rescheduleNeeded) const
|
||||
inline bigtime_t
|
||||
ThreadData::_GetBaseQuantum() const
|
||||
{
|
||||
SCHEDULER_ENTER_FUNCTION();
|
||||
|
||||
return sQuantumLengths[GetEffectivePriority()];
|
||||
}
|
||||
|
||||
@ -217,6 +231,8 @@ ThreadData::_GetBaseQuantum() const
|
||||
ThreadData::_ScaleQuantum(bigtime_t maxQuantum, bigtime_t minQuantum,
|
||||
int32 maxPriority, int32 minPriority, int32 priority)
|
||||
{
|
||||
SCHEDULER_ENTER_FUNCTION();
|
||||
|
||||
ASSERT(priority <= maxPriority);
|
||||
ASSERT(priority >= minPriority);
|
||||
|
||||
|
@ -11,6 +11,7 @@
|
||||
|
||||
#include "scheduler_common.h"
|
||||
#include "scheduler_cpu.h"
|
||||
#include "scheduler_profiler.h"
|
||||
|
||||
|
||||
namespace Scheduler {
|
||||
@ -123,6 +124,7 @@ public:
|
||||
inline bool
|
||||
ThreadData::HasCacheExpired() const
|
||||
{
|
||||
SCHEDULER_ENTER_FUNCTION();
|
||||
return gCurrentMode->has_cache_expired(this);
|
||||
}
|
||||
|
||||
@ -130,6 +132,8 @@ ThreadData::HasCacheExpired() const
|
||||
inline bool
|
||||
ThreadData::ShouldRebalance() const
|
||||
{
|
||||
SCHEDULER_ENTER_FUNCTION();
|
||||
|
||||
ASSERT(!gSingleCore);
|
||||
return gCurrentMode->should_rebalance(this);
|
||||
}
|
||||
@ -138,6 +142,8 @@ ThreadData::ShouldRebalance() const
|
||||
inline int32
|
||||
ThreadData::GetEffectivePriority() const
|
||||
{
|
||||
SCHEDULER_ENTER_FUNCTION();
|
||||
|
||||
if (fEffectivePriority == -1)
|
||||
_ComputeEffectivePriority();
|
||||
return fEffectivePriority;
|
||||
@ -147,6 +153,8 @@ ThreadData::GetEffectivePriority() const
|
||||
inline void
|
||||
ThreadData::IncreasePenalty()
|
||||
{
|
||||
SCHEDULER_ENTER_FUNCTION();
|
||||
|
||||
if (fThread->priority < B_LOWEST_ACTIVE_PRIORITY)
|
||||
return;
|
||||
if (fThread->priority >= B_FIRST_REAL_TIME_PRIORITY)
|
||||
@ -170,6 +178,8 @@ ThreadData::IncreasePenalty()
|
||||
inline void
|
||||
ThreadData::CancelPenalty()
|
||||
{
|
||||
SCHEDULER_ENTER_FUNCTION();
|
||||
|
||||
if (fPriorityPenalty != 0) {
|
||||
TRACE("cancelling thread %ld penalty\n", fThread->id);
|
||||
fEffectivePriority = -1;
|
||||
@ -183,6 +193,8 @@ ThreadData::CancelPenalty()
|
||||
inline bool
|
||||
ThreadData::ShouldCancelPenalty() const
|
||||
{
|
||||
SCHEDULER_ENTER_FUNCTION();
|
||||
|
||||
if (fCore == NULL)
|
||||
return false;
|
||||
|
||||
@ -194,6 +206,7 @@ ThreadData::ShouldCancelPenalty() const
|
||||
inline void
|
||||
ThreadData::IncreaseStolenTime(bigtime_t stolenTime)
|
||||
{
|
||||
SCHEDULER_ENTER_FUNCTION();
|
||||
fStolenTime += stolenTime;
|
||||
}
|
||||
|
||||
@ -201,6 +214,8 @@ ThreadData::IncreaseStolenTime(bigtime_t stolenTime)
|
||||
inline void
|
||||
ThreadData::GoesAway()
|
||||
{
|
||||
SCHEDULER_ENTER_FUNCTION();
|
||||
|
||||
fLastInterruptTime = 0;
|
||||
|
||||
fWentSleep = system_time();
|
||||
@ -212,6 +227,8 @@ ThreadData::GoesAway()
|
||||
inline void
|
||||
ThreadData::PutBack()
|
||||
{
|
||||
SCHEDULER_ENTER_FUNCTION();
|
||||
|
||||
ComputeLoad();
|
||||
fWentSleepCount = -1;
|
||||
|
||||
@ -234,6 +251,8 @@ ThreadData::PutBack()
|
||||
inline void
|
||||
ThreadData::Enqueue()
|
||||
{
|
||||
SCHEDULER_ENTER_FUNCTION();
|
||||
|
||||
fThread->state = B_THREAD_READY;
|
||||
ComputeLoad();
|
||||
fWentSleepCount = 0;
|
||||
@ -256,6 +275,8 @@ ThreadData::Enqueue()
|
||||
inline bool
|
||||
ThreadData::Dequeue()
|
||||
{
|
||||
SCHEDULER_ENTER_FUNCTION();
|
||||
|
||||
CoreRunQueueLocker _(fCore);
|
||||
if (!fEnqueued)
|
||||
return false;
|
||||
@ -278,6 +299,8 @@ ThreadData::Dequeue()
|
||||
inline void
|
||||
ThreadData::UpdateActivity(bigtime_t active)
|
||||
{
|
||||
SCHEDULER_ENTER_FUNCTION();
|
||||
|
||||
fMeasureActiveTime += active;
|
||||
CPUEntry::GetCPU(smp_get_current_cpu())->IncreaseActiveTime(active);
|
||||
fCore->IncreaseActiveTime(active);
|
||||
@ -287,6 +310,8 @@ ThreadData::UpdateActivity(bigtime_t active)
|
||||
inline void
|
||||
ThreadData::ComputeLoad()
|
||||
{
|
||||
SCHEDULER_ENTER_FUNCTION();
|
||||
|
||||
if (fLastInterruptTime > 0) {
|
||||
bigtime_t interruptTime = gCPU[smp_get_current_cpu()].interrupt_time;
|
||||
interruptTime -= fLastInterruptTime;
|
||||
@ -300,6 +325,8 @@ ThreadData::ComputeLoad()
|
||||
inline bool
|
||||
ThreadData::HasQuantumEnded(bool wasPreempted, bool hasYielded)
|
||||
{
|
||||
SCHEDULER_ENTER_FUNCTION();
|
||||
|
||||
if (hasYielded) {
|
||||
fTimeLeft = 0;
|
||||
return true;
|
||||
@ -322,6 +349,7 @@ ThreadData::HasQuantumEnded(bool wasPreempted, bool hasYielded)
|
||||
inline void
|
||||
ThreadData::StartQuantum()
|
||||
{
|
||||
SCHEDULER_ENTER_FUNCTION();
|
||||
fQuantumStart = system_time();
|
||||
}
|
||||
|
||||
@ -329,6 +357,8 @@ ThreadData::StartQuantum()
|
||||
inline int32
|
||||
ThreadData::_GetPenalty() const
|
||||
{
|
||||
SCHEDULER_ENTER_FUNCTION();
|
||||
|
||||
int32 penalty = fPriorityPenalty;
|
||||
|
||||
const int kMinimalPriority = _GetMinimalPriority();
|
||||
@ -342,6 +372,8 @@ ThreadData::_GetPenalty() const
|
||||
inline int32
|
||||
ThreadData::_GetMinimalPriority() const
|
||||
{
|
||||
SCHEDULER_ENTER_FUNCTION();
|
||||
|
||||
const int32 kDivisor = 5;
|
||||
|
||||
const int32 kMaximalPriority = 25;
|
||||
|
Loading…
Reference in New Issue
Block a user