0338371f26
- enqueue_in_run_queue() no longer returns whether rescheduling is supposed to happen. Instead is sets cpu_ent::invoke_scheduler on the current CPU. - reschedule() does now handle cpu_ent::invoke_scheduler_if_idle(). No need to let all callers do that. * thread_unblock[_locked]() no longer return whether rescheduling is supposed to happen. * Got rid of the B_INVOKE_SCHEDULER handling. The interrupt hooks really can't know, when it makes sense to reschedule or not. * Introduced scheduler_reschedule_if_necessary[_locked]() functions for checking+invoking the scheduler. * Some semaphore functions (e.g. delete_sem()) invoke the scheduler now, if they wake up anything with greater priority. I've also tried to add scheduler invocations in the condition variable and mutex/rw_lock code, but that actually has a negative impact on performance, probably because it causes too much ping-ponging between threads when multiple locking primitives are involved. git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@34657 a95241bf-73f2-0310-859d-f6bbb57e9c96
100 lines
2.6 KiB
C
100 lines
2.6 KiB
C
/*
|
|
* Copyright 2008-2009, Ingo Weinhold, ingo_weinhold@gmx.de.
|
|
* Copyright 2005, Axel Dörfler, axeld@pinc-software.de.
|
|
* Distributed under the terms of the MIT License.
|
|
*/
|
|
#ifndef KERNEL_SCHEDULER_H
|
|
#define KERNEL_SCHEDULER_H
|
|
|
|
|
|
#include <cpu.h>
|
|
#include <int.h>
|
|
#include <smp.h>
|
|
#include <thread_types.h>
|
|
|
|
|
|
struct scheduling_analysis;
|
|
struct thread;
|
|
struct SchedulerListener;
|
|
|
|
|
|
struct scheduler_ops {
|
|
void (*enqueue_in_run_queue)(struct thread* thread);
|
|
void (*reschedule)(void);
|
|
void (*set_thread_priority)(struct thread* thread, int32 priority);
|
|
// called when the thread structure is first created -
|
|
// initialization of per-thread housekeeping data structures should
|
|
// be done here
|
|
void (*on_thread_create)(struct thread* thread);
|
|
// called when a thread structure is initialized and made ready for
|
|
// use - should be used to reset the housekeeping data structures
|
|
// if needed
|
|
void (*on_thread_init)(struct thread* thread);
|
|
// called when a thread structure is freed - freeing up any allocated
|
|
// mem on the scheduler's part should be done here
|
|
void (*on_thread_destroy)(struct thread* thread);
|
|
|
|
void (*start)(void);
|
|
};
|
|
|
|
extern struct scheduler_ops* gScheduler;
|
|
|
|
#define scheduler_enqueue_in_run_queue(thread) \
|
|
gScheduler->enqueue_in_run_queue(thread)
|
|
#define scheduler_set_thread_priority(thread, priority) \
|
|
gScheduler->set_thread_priority(thread, priority)
|
|
#define scheduler_reschedule() gScheduler->reschedule()
|
|
#define scheduler_start() gScheduler->start()
|
|
#define scheduler_on_thread_create(thread) \
|
|
gScheduler->on_thread_create(thread)
|
|
#define scheduler_on_thread_init(thread) \
|
|
gScheduler->on_thread_init(thread)
|
|
#define scheduler_on_thread_destroy(thread) \
|
|
gScheduler->on_thread_destroy(thread)
|
|
|
|
#ifdef __cplusplus
|
|
extern "C" {
|
|
#endif
|
|
|
|
void scheduler_add_listener(struct SchedulerListener* listener);
|
|
void scheduler_remove_listener(struct SchedulerListener* listener);
|
|
|
|
void scheduler_init(void);
|
|
|
|
status_t _user_analyze_scheduling(bigtime_t from, bigtime_t until, void* buffer,
|
|
size_t size, struct scheduling_analysis* analysis);
|
|
|
|
#ifdef __cplusplus
|
|
}
|
|
#endif
|
|
|
|
|
|
/*! Reschedules, if necessary.
|
|
The thread spinlock must be held.
|
|
*/
|
|
static inline void
|
|
scheduler_reschedule_if_necessary_locked()
|
|
{
|
|
if (gCPU[smp_get_current_cpu()].invoke_scheduler)
|
|
scheduler_reschedule();
|
|
}
|
|
|
|
|
|
/*! Reschedules, if necessary.
|
|
Is a no-op, if interrupts are disabled.
|
|
*/
|
|
static inline void
|
|
scheduler_reschedule_if_necessary()
|
|
{
|
|
if (are_interrupts_enabled()) {
|
|
cpu_status state = disable_interrupts();
|
|
GRAB_THREAD_LOCK();
|
|
scheduler_reschedule_if_necessary_locked();
|
|
RELEASE_THREAD_LOCK();
|
|
restore_interrupts(state);
|
|
}
|
|
}
|
|
|
|
|
|
#endif /* KERNEL_SCHEDULER_H */
|