kernel: Support sched_yield() properly
sched_yield() should not yield to the threads with lower priority.
This commit is contained in:
parent
ee69e53630
commit
0896565a6e
@ -443,7 +443,8 @@ struct Thread : TeamThreadIteratorEntry<thread_id>, KernelReferenceable,
|
||||
|
||||
bool in_kernel; // protected by time_lock, only written by
|
||||
// this thread
|
||||
bool was_yielded; // protected by scheduler lock
|
||||
bool has_yielded; // protected by scheduler lock
|
||||
bool has_fully_yielded; // protected by scheduler lock
|
||||
struct scheduler_thread_data* scheduler_data; // protected by scheduler lock
|
||||
|
||||
struct user_thread* user_thread; // write-protected by fLock, only
|
||||
|
@ -447,7 +447,7 @@ affine_reschedule(void)
|
||||
|
||||
nextThread->state = B_THREAD_RUNNING;
|
||||
nextThread->next_state = B_THREAD_READY;
|
||||
oldThread->was_yielded = false;
|
||||
oldThread->has_yielded = false;
|
||||
|
||||
// track kernel time (user time is tracked in thread_at_kernel_entry())
|
||||
scheduler_update_thread_times(oldThread, nextThread);
|
||||
|
@ -324,11 +324,17 @@ reschedule_event(timer* /* unused */)
|
||||
}
|
||||
|
||||
|
||||
static inline bool simple_quantum_ended(Thread* thread, bool wasPreempted)
|
||||
static inline bool
|
||||
simple_quantum_ended(Thread* thread, bool wasPreempted, bool hasYielded)
|
||||
{
|
||||
scheduler_thread_data* schedulerThreadData
|
||||
= reinterpret_cast<scheduler_thread_data*>(thread->scheduler_data);
|
||||
|
||||
if (hasYielded) {
|
||||
schedulerThreadData->time_left = 0;
|
||||
return true;
|
||||
}
|
||||
|
||||
bigtime_t time_used = system_time() - schedulerThreadData->quantum_start;
|
||||
schedulerThreadData->time_left -= time_used;
|
||||
schedulerThreadData->time_left = max_c(0, schedulerThreadData->time_left);
|
||||
@ -343,7 +349,8 @@ static inline bool simple_quantum_ended(Thread* thread, bool wasPreempted)
|
||||
}
|
||||
|
||||
|
||||
static inline bigtime_t simple_compute_quantum(Thread* thread)
|
||||
static inline bigtime_t
|
||||
simple_compute_quantum(Thread* thread)
|
||||
{
|
||||
scheduler_thread_data* schedulerThreadData
|
||||
= reinterpret_cast<scheduler_thread_data*>(thread->scheduler_data);
|
||||
@ -424,13 +431,12 @@ simple_reschedule(void)
|
||||
if (!schedulerOldThreadData->lost_cpu)
|
||||
schedulerOldThreadData->cpu_bound = false;
|
||||
|
||||
if (simple_quantum_ended(oldThread, oldThread->cpu->preempted)) {
|
||||
if (simple_quantum_ended(oldThread, oldThread->cpu->preempted,
|
||||
oldThread->has_yielded)) {
|
||||
if (schedulerOldThreadData->cpu_bound)
|
||||
simple_increase_penalty(oldThread);
|
||||
else
|
||||
simple_cancel_penalty(oldThread);
|
||||
|
||||
if (oldThread->was_yielded)
|
||||
if (oldThread->has_fully_yielded)
|
||||
simple_yield(oldThread);
|
||||
|
||||
TRACE("enqueueing thread %ld into run queue priority = %ld\n",
|
||||
@ -457,7 +463,8 @@ simple_reschedule(void)
|
||||
break;
|
||||
}
|
||||
|
||||
oldThread->was_yielded = false;
|
||||
oldThread->has_yielded = false;
|
||||
oldThread->has_fully_yielded = false;
|
||||
schedulerOldThreadData->lost_cpu = false;
|
||||
|
||||
// select thread with the biggest priority
|
||||
|
@ -384,7 +384,7 @@ reschedule(void)
|
||||
|
||||
nextThread->state = B_THREAD_RUNNING;
|
||||
nextThread->next_state = B_THREAD_READY;
|
||||
oldThread->was_yielded = false;
|
||||
oldThread->has_yielded = false;
|
||||
|
||||
// track kernel time (user time is tracked in thread_at_kernel_entry())
|
||||
scheduler_update_thread_times(oldThread, nextThread);
|
||||
|
@ -178,7 +178,8 @@ Thread::Thread(const char* name, thread_id threadID, struct cpu_ent* cpu)
|
||||
signal_stack_size(0),
|
||||
signal_stack_enabled(false),
|
||||
in_kernel(true),
|
||||
was_yielded(false),
|
||||
has_yielded(false),
|
||||
has_fully_yielded(false),
|
||||
user_thread(NULL),
|
||||
fault_handler(0),
|
||||
page_faults_allowed(1),
|
||||
@ -2443,25 +2444,15 @@ peek_next_thread_id()
|
||||
void
|
||||
thread_yield(bool force)
|
||||
{
|
||||
if (force) {
|
||||
Thread *thread = thread_get_current_thread();
|
||||
if (thread == NULL)
|
||||
return;
|
||||
Thread *thread = thread_get_current_thread();
|
||||
if (thread == NULL)
|
||||
return;
|
||||
|
||||
InterruptsSpinLocker _(gSchedulerLock);
|
||||
InterruptsSpinLocker _(gSchedulerLock);
|
||||
|
||||
// mark the thread as yielded, so it will not be scheduled next
|
||||
thread->was_yielded = true;
|
||||
scheduler_reschedule();
|
||||
} else {
|
||||
Thread *thread = thread_get_current_thread();
|
||||
if (thread == NULL)
|
||||
return;
|
||||
|
||||
// Don't force the thread off the CPU, just reschedule.
|
||||
InterruptsSpinLocker _(gSchedulerLock);
|
||||
scheduler_reschedule();
|
||||
}
|
||||
thread->has_yielded = true;
|
||||
thread->has_fully_yielded = force;
|
||||
scheduler_reschedule();
|
||||
}
|
||||
|
||||
|
||||
@ -3512,7 +3503,7 @@ _user_snooze_etc(bigtime_t timeout, int timebase, uint32 flags,
|
||||
void
|
||||
_user_thread_yield(void)
|
||||
{
|
||||
thread_yield(true);
|
||||
thread_yield(false);
|
||||
}
|
||||
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user