kernel: Add another penalty for CPU bound threads
Each thread has its minimal priority that depends on the static priority. However, it is still able to starve threads with even lower priority (e.g. CPU bound threads with lower static priority). To prevent this another penalty is introduced. When the minimal priority is reached penalty (count mod minimal_priority) is added, where count is the number of time slices since the thread reached its minimal priority. This prevents starvation of lower priorirt threads (since all CPU bound threads may have their priority temporaily reduced to 1) but preserves relation between static priorities - when there are two CPU bound threads the one with higher static priority would get more CPU time.
This commit is contained in:
parent
4ade765cab
commit
fee8009184
@ -70,7 +70,7 @@ struct scheduler_ops {
|
||||
|
||||
/*! Dumps scheduler specific thread information.
|
||||
*/
|
||||
void (*dump_thread_data)(struct scheduler_thread_data* scheduler_data);
|
||||
void (*dump_thread_data)(Thread* thread);
|
||||
};
|
||||
|
||||
extern struct scheduler_ops* gScheduler;
|
||||
|
@ -55,6 +55,7 @@ struct scheduler_thread_data {
|
||||
void Init();
|
||||
|
||||
int32 priority_penalty;
|
||||
int32 additional_penalty;
|
||||
int32 forced_yield_count;
|
||||
|
||||
bool lost_cpu;
|
||||
@ -72,6 +73,7 @@ void
|
||||
scheduler_thread_data::Init()
|
||||
{
|
||||
priority_penalty = 0;
|
||||
additional_penalty = 0;
|
||||
forced_yield_count = 0;
|
||||
|
||||
time_left = 0;
|
||||
@ -84,6 +86,28 @@ scheduler_thread_data::Init()
|
||||
}
|
||||
|
||||
|
||||
static inline int
|
||||
simple_get_minimal_priority(Thread* thread)
|
||||
{
|
||||
return min_c(thread->priority, 25) / 5;
|
||||
}
|
||||
|
||||
|
||||
static inline int32
|
||||
simple_get_thread_penalty(Thread* thread)
|
||||
{
|
||||
int32 penalty = thread->scheduler_data->priority_penalty;
|
||||
|
||||
const int kMinimalPriority = simple_get_minimal_priority(thread);
|
||||
if (kMinimalPriority > 0) {
|
||||
penalty
|
||||
+= thread->scheduler_data->additional_penalty % kMinimalPriority;
|
||||
}
|
||||
|
||||
return penalty;
|
||||
|
||||
}
|
||||
|
||||
static inline void
|
||||
dump_queue(SimpleRunQueue::ConstIterator& iterator)
|
||||
{
|
||||
@ -95,7 +119,7 @@ dump_queue(SimpleRunQueue::ConstIterator& iterator)
|
||||
Thread* thread = iterator.Next();
|
||||
kprintf("%p %-7" B_PRId32 " %-8" B_PRId32 " %-8" B_PRId32 " %s\n",
|
||||
thread, thread->id, thread->priority,
|
||||
thread->scheduler_data->priority_penalty, thread->name);
|
||||
simple_get_thread_penalty(thread), thread->name);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -118,10 +142,21 @@ dump_run_queue(int argc, char** argv)
|
||||
|
||||
|
||||
static void
|
||||
simple_dump_thread_data(scheduler_thread_data* schedulerThreadData)
|
||||
simple_dump_thread_data(Thread* thread)
|
||||
{
|
||||
scheduler_thread_data* schedulerThreadData = thread->scheduler_data;
|
||||
|
||||
kprintf("\tpriority_penalty:\t%" B_PRId32 "\n",
|
||||
schedulerThreadData->priority_penalty);
|
||||
|
||||
int32 additionalPenalty = 0;
|
||||
const int kMinimalPriority = simple_get_minimal_priority(thread);
|
||||
if (kMinimalPriority > 0) {
|
||||
additionalPenalty
|
||||
= schedulerThreadData->additional_penalty % kMinimalPriority;
|
||||
}
|
||||
kprintf("\tadditional_penalty:\t%" B_PRId32 " (%" B_PRId32 ")\n",
|
||||
additionalPenalty, schedulerThreadData->additional_penalty);
|
||||
kprintf("\tforced_yield_count:\t%" B_PRId32 "\n",
|
||||
schedulerThreadData->forced_yield_count);
|
||||
kprintf("\tstolen_time:\t\t%" B_PRId64 "\n",
|
||||
@ -138,7 +173,7 @@ simple_get_effective_priority(Thread* thread)
|
||||
return thread->priority;
|
||||
|
||||
int32 effectivePriority = thread->priority;
|
||||
effectivePriority -= thread->scheduler_data->priority_penalty;
|
||||
effectivePriority -= simple_get_thread_penalty(thread);
|
||||
|
||||
ASSERT(effectivePriority < B_FIRST_REAL_TIME_PRIORITY);
|
||||
ASSERT(effectivePriority >= B_LOWEST_ACTIVE_PRIORITY);
|
||||
@ -183,11 +218,11 @@ simple_increase_penalty(Thread* thread)
|
||||
int32 oldPenalty = schedulerThreadData->priority_penalty++;
|
||||
|
||||
ASSERT(thread->priority - oldPenalty >= B_LOWEST_ACTIVE_PRIORITY);
|
||||
const int kMinimalPriority
|
||||
= min_c(thread->priority, 25) / 5;
|
||||
const int kMinimalPriority = simple_get_minimal_priority(thread);
|
||||
if (thread->priority - oldPenalty <= kMinimalPriority) {
|
||||
schedulerThreadData->priority_penalty = oldPenalty;
|
||||
schedulerThreadData->forced_yield_count++;
|
||||
schedulerThreadData->additional_penalty++;
|
||||
}
|
||||
}
|
||||
|
||||
@ -200,6 +235,7 @@ simple_cancel_penalty(Thread* thread)
|
||||
if (schedulerThreadData->priority_penalty != 0)
|
||||
TRACE("cancelling thread %ld penalty\n", thread->id);
|
||||
schedulerThreadData->priority_penalty = 0;
|
||||
schedulerThreadData->additional_penalty = 0;
|
||||
schedulerThreadData->forced_yield_count = 0;
|
||||
}
|
||||
|
||||
|
@ -1788,7 +1788,7 @@ _dump_thread_info(Thread *thread, bool shortInfo)
|
||||
arch_thread_dump_info(&thread->arch_info);
|
||||
if (gScheduler->dump_thread_data != NULL) {
|
||||
kprintf("scheduler data:\n");
|
||||
gScheduler->dump_thread_data(thread->scheduler_data);
|
||||
gScheduler->dump_thread_data(thread);
|
||||
}
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user