diff --git a/headers/private/kernel/thread_types.h b/headers/private/kernel/thread_types.h index 3e4ec90261..23e10e9138 100644 --- a/headers/private/kernel/thread_types.h +++ b/headers/private/kernel/thread_types.h @@ -443,7 +443,8 @@ struct Thread : TeamThreadIteratorEntry, KernelReferenceable, bool in_kernel; // protected by time_lock, only written by // this thread - bool was_yielded; // protected by scheduler lock + bool has_yielded; // protected by scheduler lock + bool has_fully_yielded; // protected by scheduler lock struct scheduler_thread_data* scheduler_data; // protected by scheduler lock struct user_thread* user_thread; // write-protected by fLock, only diff --git a/src/system/kernel/scheduler/scheduler_affine.cpp b/src/system/kernel/scheduler/scheduler_affine.cpp index f5b7294b10..fde10f0d59 100644 --- a/src/system/kernel/scheduler/scheduler_affine.cpp +++ b/src/system/kernel/scheduler/scheduler_affine.cpp @@ -447,7 +447,7 @@ affine_reschedule(void) nextThread->state = B_THREAD_RUNNING; nextThread->next_state = B_THREAD_READY; - oldThread->was_yielded = false; + oldThread->has_yielded = false; // track kernel time (user time is tracked in thread_at_kernel_entry()) scheduler_update_thread_times(oldThread, nextThread); diff --git a/src/system/kernel/scheduler/scheduler_simple.cpp b/src/system/kernel/scheduler/scheduler_simple.cpp index bf1a402e0b..de317f1cb6 100644 --- a/src/system/kernel/scheduler/scheduler_simple.cpp +++ b/src/system/kernel/scheduler/scheduler_simple.cpp @@ -324,11 +324,17 @@ reschedule_event(timer* /* unused */) } -static inline bool simple_quantum_ended(Thread* thread, bool wasPreempted) +static inline bool +simple_quantum_ended(Thread* thread, bool wasPreempted, bool hasYielded) { scheduler_thread_data* schedulerThreadData = reinterpret_cast(thread->scheduler_data); + if (hasYielded) { + schedulerThreadData->time_left = 0; + return true; + } + bigtime_t time_used = system_time() - schedulerThreadData->quantum_start; schedulerThreadData->time_left -= time_used; schedulerThreadData->time_left = max_c(0, schedulerThreadData->time_left); @@ -343,7 +349,8 @@ static inline bool simple_quantum_ended(Thread* thread, bool wasPreempted) } -static inline bigtime_t simple_compute_quantum(Thread* thread) +static inline bigtime_t +simple_compute_quantum(Thread* thread) { scheduler_thread_data* schedulerThreadData = reinterpret_cast(thread->scheduler_data); @@ -424,13 +431,12 @@ simple_reschedule(void) if (!schedulerOldThreadData->lost_cpu) schedulerOldThreadData->cpu_bound = false; - if (simple_quantum_ended(oldThread, oldThread->cpu->preempted)) { + if (simple_quantum_ended(oldThread, oldThread->cpu->preempted, + oldThread->has_yielded)) { if (schedulerOldThreadData->cpu_bound) simple_increase_penalty(oldThread); - else - simple_cancel_penalty(oldThread); - if (oldThread->was_yielded) + if (oldThread->has_fully_yielded) simple_yield(oldThread); TRACE("enqueueing thread %ld into run queue priority = %ld\n", @@ -457,7 +463,8 @@ simple_reschedule(void) break; } - oldThread->was_yielded = false; + oldThread->has_yielded = false; + oldThread->has_fully_yielded = false; schedulerOldThreadData->lost_cpu = false; // select thread with the biggest priority diff --git a/src/system/kernel/scheduler/scheduler_simple_smp.cpp b/src/system/kernel/scheduler/scheduler_simple_smp.cpp index 092fd70176..d30ce9c3a7 100644 --- a/src/system/kernel/scheduler/scheduler_simple_smp.cpp +++ b/src/system/kernel/scheduler/scheduler_simple_smp.cpp @@ -384,7 +384,7 @@ reschedule(void) nextThread->state = B_THREAD_RUNNING; nextThread->next_state = B_THREAD_READY; - oldThread->was_yielded = false; + oldThread->has_yielded = false; // track kernel time (user time is tracked in thread_at_kernel_entry()) scheduler_update_thread_times(oldThread, nextThread); diff --git a/src/system/kernel/thread.cpp b/src/system/kernel/thread.cpp index e2dc89c439..781720c1c6 100644 --- a/src/system/kernel/thread.cpp +++ b/src/system/kernel/thread.cpp @@ -178,7 +178,8 @@ Thread::Thread(const char* name, thread_id threadID, struct cpu_ent* cpu) signal_stack_size(0), signal_stack_enabled(false), in_kernel(true), - was_yielded(false), + has_yielded(false), + has_fully_yielded(false), user_thread(NULL), fault_handler(0), page_faults_allowed(1), @@ -2443,25 +2444,15 @@ peek_next_thread_id() void thread_yield(bool force) { - if (force) { - Thread *thread = thread_get_current_thread(); - if (thread == NULL) - return; + Thread *thread = thread_get_current_thread(); + if (thread == NULL) + return; - InterruptsSpinLocker _(gSchedulerLock); + InterruptsSpinLocker _(gSchedulerLock); - // mark the thread as yielded, so it will not be scheduled next - thread->was_yielded = true; - scheduler_reschedule(); - } else { - Thread *thread = thread_get_current_thread(); - if (thread == NULL) - return; - - // Don't force the thread off the CPU, just reschedule. - InterruptsSpinLocker _(gSchedulerLock); - scheduler_reschedule(); - } + thread->has_yielded = true; + thread->has_fully_yielded = force; + scheduler_reschedule(); } @@ -3512,7 +3503,7 @@ _user_snooze_etc(bigtime_t timeout, int timebase, uint32 flags, void _user_thread_yield(void) { - thread_yield(true); + thread_yield(false); }