2005-10-25 20:59:12 +04:00
|
|
|
/*
|
2011-01-11 00:54:38 +03:00
|
|
|
* Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
|
2010-04-12 00:40:58 +04:00
|
|
|
* Copyright 2005-2010, Axel Dörfler, axeld@pinc-software.de.
|
2005-10-25 20:59:12 +04:00
|
|
|
* Distributed under the terms of the MIT License.
|
|
|
|
*/
|
|
|
|
#ifndef KERNEL_SCHEDULER_H
|
|
|
|
#define KERNEL_SCHEDULER_H
|
|
|
|
|
|
|
|
|
2009-12-14 00:18:27 +03:00
|
|
|
#include <cpu.h>
|
|
|
|
#include <int.h>
|
|
|
|
#include <smp.h>
|
|
|
|
#include <thread_types.h>
|
2008-09-03 19:10:44 +04:00
|
|
|
|
|
|
|
|
|
|
|
struct scheduling_analysis;
|
2009-04-18 21:24:58 +04:00
|
|
|
struct SchedulerListener;
|
|
|
|
|
|
|
|
|
2013-10-21 04:17:00 +04:00
|
|
|
typedef enum scheduler_mode {
|
2013-11-07 04:50:20 +04:00
|
|
|
SCHEDULER_MODE_LOW_LATENCY,
|
2013-10-21 04:17:00 +04:00
|
|
|
SCHEDULER_MODE_POWER_SAVING,
|
|
|
|
// ...
|
|
|
|
SCHEDULER_MODE_COUNT
|
|
|
|
} scheduler_mode;
|
|
|
|
|
2008-10-21 16:37:13 +04:00
|
|
|
|
2005-10-25 20:59:12 +04:00
|
|
|
#ifdef __cplusplus
|
|
|
|
extern "C" {
|
|
|
|
#endif
|
|
|
|
|
2013-10-24 04:04:03 +04:00
|
|
|
/*! Enqueues the thread in the ready-to-run queue.
|
2013-11-08 05:41:26 +04:00
|
|
|
The caller must hold the enqueued thread \c scheduler_lock.
|
2013-10-24 04:04:03 +04:00
|
|
|
*/
|
|
|
|
void scheduler_enqueue_in_run_queue(Thread* thread);
|
|
|
|
|
|
|
|
/*! Selects a thread from the ready-to-run queue and, if that's not the
|
|
|
|
calling thread, switches the current CPU's context to run the selected
|
|
|
|
thread.
|
|
|
|
If it's the same thread, the thread will just continue to run.
|
|
|
|
In either case, unless the thread is dead or is sleeping/waiting
|
|
|
|
indefinitely, the function will eventually return.
|
2013-11-08 05:41:26 +04:00
|
|
|
The caller must hold the current thread \c scheduler_lock.
|
2013-10-24 04:04:03 +04:00
|
|
|
*/
|
|
|
|
void scheduler_reschedule(void);
|
|
|
|
|
|
|
|
/*! Sets the given thread's priority.
|
|
|
|
The thread may be running or may be in the ready-to-run queue.
|
|
|
|
*/
|
2013-11-08 05:41:26 +04:00
|
|
|
int32 scheduler_set_thread_priority(Thread* thread, int32 priority);
|
2013-10-24 04:04:03 +04:00
|
|
|
|
|
|
|
/*! Called when the Thread structure is first created.
|
|
|
|
Per-thread housekeeping resources can be allocated.
|
|
|
|
Interrupts must be enabled.
|
|
|
|
*/
|
|
|
|
status_t scheduler_on_thread_create(Thread* thread, bool idleThread);
|
|
|
|
|
|
|
|
/*! Called when a Thread structure is initialized and made ready for
|
|
|
|
use.
|
|
|
|
The per-thread housekeeping data structures are reset, if needed.
|
|
|
|
*/
|
|
|
|
void scheduler_on_thread_init(Thread* thread);
|
|
|
|
|
|
|
|
/*! Called when a Thread structure is freed.
|
|
|
|
Frees up any per-thread resources allocated on the scheduler's part. The
|
|
|
|
function may be called even if on_thread_create() failed.
|
|
|
|
Interrupts must be enabled.
|
|
|
|
*/
|
|
|
|
void scheduler_on_thread_destroy(Thread* thread);
|
|
|
|
|
|
|
|
/*! Called in the early boot process to start thread scheduling on the
|
|
|
|
current CPU.
|
|
|
|
The function is called once for each CPU.
|
|
|
|
*/
|
|
|
|
void scheduler_start(void);
|
|
|
|
|
|
|
|
/*! Sets scheduler operation mode.
|
|
|
|
*/
|
|
|
|
status_t scheduler_set_operation_mode(scheduler_mode mode);
|
|
|
|
|
|
|
|
/*! Dumps scheduler specific thread information.
|
|
|
|
*/
|
|
|
|
void scheduler_dump_thread_data(Thread* thread);
|
|
|
|
|
2009-04-18 21:24:58 +04:00
|
|
|
void scheduler_add_listener(struct SchedulerListener* listener);
|
|
|
|
void scheduler_remove_listener(struct SchedulerListener* listener);
|
|
|
|
|
2005-10-25 20:59:12 +04:00
|
|
|
void scheduler_init(void);
|
2010-04-29 19:10:37 +04:00
|
|
|
void scheduler_enable_scheduling(void);
|
2005-10-25 20:59:12 +04:00
|
|
|
|
2010-04-12 00:40:58 +04:00
|
|
|
bigtime_t _user_estimate_max_scheduling_latency(thread_id thread);
|
2008-09-03 19:10:44 +04:00
|
|
|
status_t _user_analyze_scheduling(bigtime_t from, bigtime_t until, void* buffer,
|
|
|
|
size_t size, struct scheduling_analysis* analysis);
|
|
|
|
|
2005-10-25 20:59:12 +04:00
|
|
|
#ifdef __cplusplus
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2009-12-14 00:18:27 +03:00
|
|
|
|
|
|
|
/*! Reschedules, if necessary.
|
2011-06-12 04:00:23 +04:00
|
|
|
The caller must hold the scheduler lock (with disabled interrupts).
|
2009-12-14 00:18:27 +03:00
|
|
|
*/
|
|
|
|
static inline void
|
|
|
|
scheduler_reschedule_if_necessary_locked()
|
|
|
|
{
|
|
|
|
if (gCPU[smp_get_current_cpu()].invoke_scheduler)
|
|
|
|
scheduler_reschedule();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*! Reschedules, if necessary.
|
|
|
|
Is a no-op, if interrupts are disabled.
|
|
|
|
*/
|
|
|
|
static inline void
|
|
|
|
scheduler_reschedule_if_necessary()
|
|
|
|
{
|
|
|
|
if (are_interrupts_enabled()) {
|
|
|
|
cpu_status state = disable_interrupts();
|
2013-11-08 05:41:26 +04:00
|
|
|
|
|
|
|
Thread* thread = get_cpu_struct()->running_thread;
|
|
|
|
acquire_spinlock(&thread->scheduler_lock);
|
2011-06-12 04:00:23 +04:00
|
|
|
|
2009-12-14 00:18:27 +03:00
|
|
|
scheduler_reschedule_if_necessary_locked();
|
2011-06-12 04:00:23 +04:00
|
|
|
|
2013-11-08 05:41:26 +04:00
|
|
|
release_spinlock(&thread->scheduler_lock);
|
|
|
|
|
2009-12-14 00:18:27 +03:00
|
|
|
restore_interrupts(state);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2005-10-25 20:59:12 +04:00
|
|
|
#endif /* KERNEL_SCHEDULER_H */
|