2005-10-25 20:59:12 +04:00
|
|
|
/*
|
2010-04-12 00:40:58 +04:00
|
|
|
* Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
|
|
|
|
* Copyright 2005-2010, Axel Dörfler, axeld@pinc-software.de.
|
2005-10-25 20:59:12 +04:00
|
|
|
* Distributed under the terms of the MIT License.
|
|
|
|
*/
|
|
|
|
#ifndef KERNEL_SCHEDULER_H
|
|
|
|
#define KERNEL_SCHEDULER_H
|
|
|
|
|
|
|
|
|
2009-12-14 00:18:27 +03:00
|
|
|
#include <cpu.h>
|
|
|
|
#include <int.h>
|
|
|
|
#include <smp.h>
|
|
|
|
#include <thread_types.h>
|
2008-09-03 19:10:44 +04:00
|
|
|
|
|
|
|
|
|
|
|
struct scheduling_analysis;
|
2005-10-25 20:59:12 +04:00
|
|
|
struct thread;
|
2009-04-18 21:24:58 +04:00
|
|
|
struct SchedulerListener;
|
|
|
|
|
|
|
|
|
2008-10-21 16:37:13 +04:00
|
|
|
struct scheduler_ops {
|
2009-12-14 00:18:27 +03:00
|
|
|
void (*enqueue_in_run_queue)(struct thread* thread);
|
2008-10-21 16:37:13 +04:00
|
|
|
void (*reschedule)(void);
|
|
|
|
void (*set_thread_priority)(struct thread* thread, int32 priority);
|
2010-04-12 00:40:58 +04:00
|
|
|
bigtime_t (*estimate_max_scheduling_latency)(struct thread* thread);
|
|
|
|
|
2009-03-26 03:58:20 +03:00
|
|
|
void (*on_thread_create)(struct thread* thread);
|
2010-04-12 00:40:58 +04:00
|
|
|
// called when the thread structure is first created -
|
|
|
|
// initialization of per-thread housekeeping data structures should
|
|
|
|
// be done here
|
2009-03-26 03:58:20 +03:00
|
|
|
void (*on_thread_init)(struct thread* thread);
|
2010-04-12 00:40:58 +04:00
|
|
|
// called when a thread structure is initialized and made ready for
|
|
|
|
// use - should be used to reset the housekeeping data structures
|
|
|
|
// if needed
|
2009-03-26 03:58:20 +03:00
|
|
|
void (*on_thread_destroy)(struct thread* thread);
|
2010-04-12 00:40:58 +04:00
|
|
|
// called when a thread structure is freed - freeing up any allocated
|
|
|
|
// mem on the scheduler's part should be done here
|
2009-04-18 21:24:58 +04:00
|
|
|
|
2008-10-21 16:37:13 +04:00
|
|
|
void (*start)(void);
|
|
|
|
};
|
|
|
|
|
|
|
|
extern struct scheduler_ops* gScheduler;
|
|
|
|
|
|
|
|
#define scheduler_enqueue_in_run_queue(thread) \
|
|
|
|
gScheduler->enqueue_in_run_queue(thread)
|
|
|
|
#define scheduler_set_thread_priority(thread, priority) \
|
|
|
|
gScheduler->set_thread_priority(thread, priority)
|
|
|
|
#define scheduler_reschedule() gScheduler->reschedule()
|
|
|
|
#define scheduler_start() gScheduler->start()
|
2009-03-26 03:58:20 +03:00
|
|
|
#define scheduler_on_thread_create(thread) \
|
|
|
|
gScheduler->on_thread_create(thread)
|
|
|
|
#define scheduler_on_thread_init(thread) \
|
|
|
|
gScheduler->on_thread_init(thread)
|
|
|
|
#define scheduler_on_thread_destroy(thread) \
|
|
|
|
gScheduler->on_thread_destroy(thread)
|
2008-10-21 16:37:13 +04:00
|
|
|
|
2005-10-25 20:59:12 +04:00
|
|
|
#ifdef __cplusplus
|
|
|
|
extern "C" {
|
|
|
|
#endif
|
|
|
|
|
2009-04-18 21:24:58 +04:00
|
|
|
void scheduler_add_listener(struct SchedulerListener* listener);
|
|
|
|
void scheduler_remove_listener(struct SchedulerListener* listener);
|
|
|
|
|
2005-10-25 20:59:12 +04:00
|
|
|
void scheduler_init(void);
|
2010-04-29 19:10:37 +04:00
|
|
|
void scheduler_enable_scheduling(void);
|
2005-10-25 20:59:12 +04:00
|
|
|
|
2010-04-12 00:40:58 +04:00
|
|
|
bigtime_t _user_estimate_max_scheduling_latency(thread_id thread);
|
2008-09-03 19:10:44 +04:00
|
|
|
status_t _user_analyze_scheduling(bigtime_t from, bigtime_t until, void* buffer,
|
|
|
|
size_t size, struct scheduling_analysis* analysis);
|
|
|
|
|
2005-10-25 20:59:12 +04:00
|
|
|
#ifdef __cplusplus
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2009-12-14 00:18:27 +03:00
|
|
|
|
|
|
|
/*! Reschedules, if necessary.
|
|
|
|
The thread spinlock must be held.
|
|
|
|
*/
|
|
|
|
static inline void
|
|
|
|
scheduler_reschedule_if_necessary_locked()
|
|
|
|
{
|
|
|
|
if (gCPU[smp_get_current_cpu()].invoke_scheduler)
|
|
|
|
scheduler_reschedule();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*! Reschedules, if necessary.
|
|
|
|
Is a no-op, if interrupts are disabled.
|
|
|
|
*/
|
|
|
|
static inline void
|
|
|
|
scheduler_reschedule_if_necessary()
|
|
|
|
{
|
|
|
|
if (are_interrupts_enabled()) {
|
|
|
|
cpu_status state = disable_interrupts();
|
|
|
|
GRAB_THREAD_LOCK();
|
|
|
|
scheduler_reschedule_if_necessary_locked();
|
|
|
|
RELEASE_THREAD_LOCK();
|
|
|
|
restore_interrupts(state);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2005-10-25 20:59:12 +04:00
|
|
|
#endif /* KERNEL_SCHEDULER_H */
|