anevilyak+mmlr:
* scheduler_enqueue_in_runqueue() now allows the scheduler to return a hint as to whether a reschedule is desirable or not. This is used in a few other places in order to relegate scheduling decisions entirely to the scheduler rather than the priority hacks previously used. There are probably other places in the kernel that could now make use of that information to more intelligently call reschedule() though. * Switch over the default scheduler to scheduler_affine(). git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@32554 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
parent
e58f5f3d02
commit
009ccc2962
@ -16,7 +16,7 @@ struct SchedulerListener;
|
||||
|
||||
|
||||
struct scheduler_ops {
|
||||
void (*enqueue_in_run_queue)(struct thread* thread);
|
||||
bool (*enqueue_in_run_queue)(struct thread* thread);
|
||||
void (*reschedule)(void);
|
||||
void (*set_thread_priority)(struct thread* thread, int32 priority);
|
||||
// called when the thread structure is first created -
|
||||
|
@ -196,9 +196,9 @@ thread_unblock_locked(struct thread* thread, status_t status)
|
||||
|
||||
// wake up the thread, if it is sleeping
|
||||
if (thread->state == B_THREAD_WAITING)
|
||||
scheduler_enqueue_in_run_queue(thread);
|
||||
|
||||
return true;
|
||||
return scheduler_enqueue_in_run_queue(thread);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
|
@ -14,7 +14,7 @@
|
||||
// 0 - Auto-select scheduler based on detected core count
|
||||
// 1 - Always use the simple scheduler
|
||||
// 2 - Always use the affine scheduler
|
||||
#define SCHEDULER_TYPE 1
|
||||
#define SCHEDULER_TYPE 2
|
||||
|
||||
struct scheduler_ops* gScheduler;
|
||||
SchedulerListenerList gSchedulerListeners;
|
||||
|
@ -145,7 +145,7 @@ affine_get_most_idle_cpu()
|
||||
/*! Enqueues the thread into the run queue.
|
||||
Note: thread lock must be held when entering this function
|
||||
*/
|
||||
static void
|
||||
static bool
|
||||
affine_enqueue_in_run_queue(struct thread *thread)
|
||||
{
|
||||
int32 targetCPU = -1;
|
||||
@ -191,15 +191,15 @@ affine_enqueue_in_run_queue(struct thread *thread)
|
||||
|
||||
if (sRunningThreads[targetCPU] != NULL
|
||||
&& thread->priority > sRunningThreads[targetCPU]->priority) {
|
||||
int32 currentCPU = smp_get_current_cpu();
|
||||
if (targetCPU == currentCPU) {
|
||||
// TODO: we want to inform the caller somehow that it should
|
||||
// trigger a reschedule
|
||||
if (targetCPU == smp_get_current_cpu()) {
|
||||
return true;
|
||||
} else {
|
||||
smp_send_ici(targetCPU, SMP_MSG_RESCHEDULE, 0, 0, 0, NULL,
|
||||
SMP_MSG_FLAG_ASYNC);
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline struct thread *
|
||||
|
@ -77,7 +77,7 @@ dump_run_queue(int argc, char **argv)
|
||||
/*! Enqueues the thread into the run queue.
|
||||
Note: thread lock must be held when entering this function
|
||||
*/
|
||||
static void
|
||||
static bool
|
||||
simple_enqueue_in_run_queue(struct thread *thread)
|
||||
{
|
||||
if (thread->state == B_THREAD_RUNNING) {
|
||||
@ -85,7 +85,7 @@ simple_enqueue_in_run_queue(struct thread *thread)
|
||||
// insert it into the run queue. Set the next state to ready so the
|
||||
// thread is inserted into the run queue on the next reschedule.
|
||||
thread->next_state = B_THREAD_READY;
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
|
||||
thread->state = thread->next_state = B_THREAD_READY;
|
||||
@ -150,6 +150,7 @@ simple_enqueue_in_run_queue(struct thread *thread)
|
||||
// notify listeners
|
||||
NotifySchedulerListeners(&SchedulerListener::ThreadEnqueuedInRunQueue,
|
||||
thread);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
|
@ -939,7 +939,6 @@ release_sem_etc(sem_id id, int32 count, uint32 flags)
|
||||
flags |= B_RELEASE_IF_WAITING_ONLY;
|
||||
}
|
||||
|
||||
struct thread* currentThread = thread_get_current_thread();
|
||||
bool reschedule = false;
|
||||
|
||||
SpinLocker threadLocker(gThreadSpinlock);
|
||||
@ -963,13 +962,12 @@ release_sem_etc(sem_id id, int32 count, uint32 flags)
|
||||
break;
|
||||
}
|
||||
|
||||
thread_unblock_locked(entry->thread, B_OK);
|
||||
reschedule |= thread_unblock_locked(entry->thread, B_OK);
|
||||
|
||||
int delta = min_c(count, entry->count);
|
||||
sSems[slot].u.used.count += delta;
|
||||
sSems[slot].u.used.net_count += delta - entry->count;
|
||||
count -= delta;
|
||||
reschedule |= entry->thread->priority > currentThread->priority;
|
||||
} else {
|
||||
// The thread is no longer waiting, but still queued, which
|
||||
// means acquiration failed and we can just remove it.
|
||||
|
@ -2275,14 +2275,10 @@ thread_block_timeout(timer* timer)
|
||||
// easy.
|
||||
|
||||
struct thread* thread = (struct thread*)timer->user_data;
|
||||
if (thread_unblock_locked(thread, B_TIMED_OUT)) {
|
||||
// We actually woke up the thread. If it has a higher priority than the
|
||||
// currently running thread, we invoke the scheduler.
|
||||
// TODO: Is this really such a good idea or should we do that only when
|
||||
// the woken up thread has realtime priority?
|
||||
if (thread->priority > thread_get_current_thread()->priority)
|
||||
return B_INVOKE_SCHEDULER;
|
||||
}
|
||||
// the scheduler will tell us whether to reschedule or not via
|
||||
// thread_unblock_locked's return
|
||||
if (thread_unblock_locked(thread, B_TIMED_OUT))
|
||||
return B_INVOKE_SCHEDULER;
|
||||
|
||||
return B_HANDLED_INTERRUPT;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user