2002-07-09 16:24:59 +04:00
|
|
|
/* Policy info for timers */
|
|
|
|
/*
|
|
|
|
** Copyright 2001, Travis Geiselbrecht. All rights reserved.
|
|
|
|
** Distributed under the terms of the NewOS License.
|
|
|
|
*/
|
|
|
|
#include <kernel.h>
|
|
|
|
#include <console.h>
|
|
|
|
#include <debug.h>
|
|
|
|
#include <thread.h>
|
|
|
|
#include <arch/int.h>
|
|
|
|
#include <smp.h>
|
|
|
|
#include <vm.h>
|
|
|
|
#include <int.h>
|
|
|
|
#include <timer.h>
|
|
|
|
#include <Errors.h>
|
|
|
|
#include <stage2.h>
|
2002-07-21 02:52:04 +04:00
|
|
|
#include <OS.h>
|
2002-07-09 16:24:59 +04:00
|
|
|
|
|
|
|
#include <arch/cpu.h>
|
|
|
|
#include <arch/timer.h>
|
|
|
|
#include <arch/smp.h>
|
|
|
|
|
2002-07-21 02:52:04 +04:00
|
|
|
static timer * volatile events[SMP_MAX_CPUS] = { NULL, };
|
2002-10-26 20:13:36 +04:00
|
|
|
static spinlock timer_spinlock[SMP_MAX_CPUS] = { 0, };
|
2002-07-09 16:24:59 +04:00
|
|
|
|
|
|
|
int timer_init(kernel_args *ka)
|
|
|
|
{
|
|
|
|
dprintf("init_timer: entry\n");
|
|
|
|
|
|
|
|
return arch_init_timer(ka);
|
|
|
|
}
|
|
|
|
|
|
|
|
// NOTE: expects interrupts to be off
|
2002-07-21 02:52:04 +04:00
|
|
|
static void add_event_to_list(timer *event, timer * volatile *list)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2002-07-21 02:52:04 +04:00
|
|
|
timer *next;
|
|
|
|
timer *last = NULL;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
|
|
|
// stick it in the event list
|
2002-07-21 02:52:04 +04:00
|
|
|
for (next = *list; next; last = next, next = (timer *)next->entry.next) {
|
|
|
|
if ((bigtime_t)next->entry.key >= (bigtime_t)event->entry.key)
|
|
|
|
break;
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
2002-07-21 02:52:04 +04:00
|
|
|
if (last != NULL) {
|
|
|
|
(timer *)event->entry.next = (timer *)last->entry.next;
|
|
|
|
(timer *)last->entry.next = event;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
(timer *)event->entry.next = next;
|
2002-07-09 16:24:59 +04:00
|
|
|
*list = event;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int timer_interrupt()
|
|
|
|
{
|
2002-07-21 02:52:04 +04:00
|
|
|
bigtime_t sched_time;
|
|
|
|
timer *event;
|
2002-10-26 20:13:36 +04:00
|
|
|
spinlock *spinlock;
|
2002-07-09 16:24:59 +04:00
|
|
|
int curr_cpu = smp_get_current_cpu();
|
2002-07-19 20:07:36 +04:00
|
|
|
int rc = B_HANDLED_INTERRUPT;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
|
|
|
// dprintf("timer_interrupt: time 0x%x 0x%x, cpu %d\n", system_time(), smp_get_current_cpu());
|
|
|
|
|
|
|
|
spinlock = &timer_spinlock[curr_cpu];
|
|
|
|
|
|
|
|
acquire_spinlock(spinlock);
|
|
|
|
|
|
|
|
restart_scan:
|
|
|
|
event = events[curr_cpu];
|
2002-07-21 02:52:04 +04:00
|
|
|
if ((event) && ((bigtime_t)event->entry.key < system_time())) {
|
2002-07-09 16:24:59 +04:00
|
|
|
// this event needs to happen
|
2002-07-21 02:52:04 +04:00
|
|
|
int mode = event->flags;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2002-07-21 02:52:04 +04:00
|
|
|
events[curr_cpu] = (timer *)event->entry.next;
|
|
|
|
event->entry.key = 0;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
|
|
|
release_spinlock(spinlock);
|
|
|
|
|
|
|
|
// call the callback
|
|
|
|
// note: if the event is not periodic, it is ok
|
|
|
|
// to delete the event structure inside the callback
|
2002-07-21 02:52:04 +04:00
|
|
|
if (event->hook) {
|
|
|
|
rc = event->hook(event);
|
2002-07-19 20:07:36 +04:00
|
|
|
// if (event->func(event->data) == INT_RESCHEDULE)
|
|
|
|
// rc = INT_RESCHEDULE;
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
acquire_spinlock(spinlock);
|
|
|
|
|
2002-07-21 02:52:04 +04:00
|
|
|
if (mode == B_PERIODIC_TIMER) {
|
2002-07-09 16:24:59 +04:00
|
|
|
// we need to adjust it and add it back to the list
|
2002-07-21 02:52:04 +04:00
|
|
|
sched_time = system_time() + event->period;
|
|
|
|
if (sched_time == 0)
|
|
|
|
sched_time = 1; // if we wrapped around and happen
|
|
|
|
// to hit zero, set it to one, since
|
|
|
|
// zero represents not scheduled
|
|
|
|
event->entry.key = (int64)sched_time;
|
2002-07-09 16:24:59 +04:00
|
|
|
add_event_to_list(event, &events[curr_cpu]);
|
|
|
|
}
|
|
|
|
|
|
|
|
goto restart_scan; // the list may have changed
|
|
|
|
}
|
|
|
|
|
|
|
|
// setup the next hardware timer
|
2002-07-21 02:52:04 +04:00
|
|
|
if (events[curr_cpu] != NULL)
|
|
|
|
arch_timer_set_hardware_timer((bigtime_t)events[curr_cpu]->entry.key - system_time());
|
2002-07-09 16:24:59 +04:00
|
|
|
|
|
|
|
release_spinlock(spinlock);
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2002-07-21 02:52:04 +04:00
|
|
|
status_t add_timer(timer *t, timer_hook hook, bigtime_t period, int32 flags)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2002-07-21 02:52:04 +04:00
|
|
|
bigtime_t sched_time;
|
|
|
|
bigtime_t curr_time = system_time();
|
2002-07-09 16:24:59 +04:00
|
|
|
int state;
|
|
|
|
int curr_cpu;
|
2002-07-21 02:52:04 +04:00
|
|
|
|
|
|
|
if ((!t) || (!hook) || (period < 0))
|
|
|
|
return B_BAD_VALUE;
|
|
|
|
|
|
|
|
sched_time = period;
|
|
|
|
if (flags != B_ONE_SHOT_ABSOLUTE_TIMER)
|
|
|
|
sched_time += curr_time;
|
|
|
|
if (sched_time == 0)
|
|
|
|
sched_time = 1;
|
|
|
|
|
|
|
|
t->entry.key = (int64)sched_time;
|
|
|
|
t->period = period;
|
|
|
|
t->hook = hook;
|
|
|
|
t->flags = flags;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2002-07-25 05:05:51 +04:00
|
|
|
state = disable_interrupts();
|
2002-07-09 16:24:59 +04:00
|
|
|
curr_cpu = smp_get_current_cpu();
|
|
|
|
acquire_spinlock(&timer_spinlock[curr_cpu]);
|
|
|
|
|
2002-07-21 02:52:04 +04:00
|
|
|
add_event_to_list(t, &events[curr_cpu]);
|
|
|
|
t->cpu = curr_cpu;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
|
|
|
// if we were stuck at the head of the list, set the hardware timer
|
2002-07-21 02:52:04 +04:00
|
|
|
if (t == events[curr_cpu])
|
|
|
|
arch_timer_set_hardware_timer(sched_time - curr_time);
|
2002-07-09 16:24:59 +04:00
|
|
|
|
|
|
|
release_spinlock(&timer_spinlock[curr_cpu]);
|
2002-07-25 05:05:51 +04:00
|
|
|
restore_interrupts(state);
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2002-07-21 02:52:04 +04:00
|
|
|
return B_OK;
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* this is a fast path to be called from reschedule and from timer_cancel_event */
|
|
|
|
/* must always be invoked with interrupts disabled */
|
2002-07-21 02:52:04 +04:00
|
|
|
int _local_timer_cancel_event(int curr_cpu, timer *event)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2002-07-21 02:52:04 +04:00
|
|
|
timer *last = NULL;
|
|
|
|
timer *e;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
|
|
|
acquire_spinlock(&timer_spinlock[curr_cpu]);
|
|
|
|
e = events[curr_cpu];
|
2002-07-21 02:52:04 +04:00
|
|
|
while (e != NULL) {
|
|
|
|
if (e == event) {
|
2002-07-09 16:24:59 +04:00
|
|
|
// we found it
|
2002-07-21 02:52:04 +04:00
|
|
|
if (e == events[curr_cpu])
|
|
|
|
events[curr_cpu] = (timer *)e->entry.next;
|
|
|
|
else
|
|
|
|
(timer *)last->entry.next = (timer *)e->entry.next;
|
|
|
|
e->entry.next = NULL;
|
2002-07-09 16:24:59 +04:00
|
|
|
// break out of the whole thing
|
2002-07-21 02:52:04 +04:00
|
|
|
break;
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
last = e;
|
2002-07-21 02:52:04 +04:00
|
|
|
e = (timer *)e->entry.next;
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
2002-07-21 02:52:04 +04:00
|
|
|
if (events[curr_cpu] == NULL)
|
2002-07-09 16:24:59 +04:00
|
|
|
arch_timer_clear_hardware_timer();
|
2002-07-21 02:52:04 +04:00
|
|
|
else
|
|
|
|
arch_timer_set_hardware_timer((bigtime_t)events[curr_cpu]->entry.key - system_time());
|
|
|
|
|
|
|
|
release_spinlock(&timer_spinlock[curr_cpu]);
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2002-07-21 02:52:04 +04:00
|
|
|
return (e == event ? 0 : B_ERROR);
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
2002-07-21 02:52:04 +04:00
|
|
|
int local_timer_cancel_event(timer *event)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
|
|
|
return _local_timer_cancel_event(smp_get_current_cpu(), event);
|
|
|
|
}
|
|
|
|
|
2002-07-21 02:52:04 +04:00
|
|
|
bool cancel_timer(timer *event)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
|
|
|
int state;
|
2002-07-21 02:52:04 +04:00
|
|
|
timer *last = NULL;
|
|
|
|
timer *e;
|
2002-07-09 16:24:59 +04:00
|
|
|
bool foundit = false;
|
|
|
|
int num_cpus = smp_get_num_cpus();
|
|
|
|
int cpu= 0;
|
|
|
|
int curr_cpu;
|
|
|
|
|
2002-07-21 02:52:04 +04:00
|
|
|
// if (event->sched_time == 0)
|
|
|
|
// return 0; // it's not scheduled
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2002-07-25 05:05:51 +04:00
|
|
|
state = disable_interrupts();
|
2002-07-09 16:24:59 +04:00
|
|
|
curr_cpu = smp_get_current_cpu();
|
|
|
|
|
|
|
|
// walk through all of the cpu's timer queues
|
|
|
|
//
|
|
|
|
// We start by peeking our own queue, aiming for
|
|
|
|
// a cheap match. If this fails, we start harassing
|
|
|
|
// other cpus.
|
|
|
|
//
|
2002-07-21 02:52:04 +04:00
|
|
|
if (_local_timer_cancel_event(curr_cpu, event) < 0) {
|
|
|
|
for (cpu = 0; cpu < num_cpus; cpu++) {
|
|
|
|
if (cpu== curr_cpu) continue;
|
2002-07-09 16:24:59 +04:00
|
|
|
acquire_spinlock(&timer_spinlock[cpu]);
|
|
|
|
e = events[cpu];
|
2002-07-21 02:52:04 +04:00
|
|
|
while (e != NULL) {
|
|
|
|
if (e == event) {
|
2002-07-09 16:24:59 +04:00
|
|
|
// we found it
|
|
|
|
foundit = true;
|
2002-07-21 02:52:04 +04:00
|
|
|
if(e == events[cpu])
|
|
|
|
events[cpu] = (timer *)e->entry.next;
|
|
|
|
else
|
|
|
|
(timer *)last->entry.next = (timer *)e->entry.next;
|
|
|
|
e->entry.next = NULL;
|
2002-07-09 16:24:59 +04:00
|
|
|
// break out of the whole thing
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
last = e;
|
2002-07-21 02:52:04 +04:00
|
|
|
e = (timer *)e->entry.next;
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
release_spinlock(&timer_spinlock[cpu]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
done:
|
|
|
|
|
2002-07-21 02:52:04 +04:00
|
|
|
if (foundit)
|
2002-07-09 16:24:59 +04:00
|
|
|
release_spinlock(&timer_spinlock[cpu]);
|
2002-07-25 05:05:51 +04:00
|
|
|
restore_interrupts(state);
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2002-07-21 02:52:04 +04:00
|
|
|
if (foundit && ((bigtime_t)event->entry.key < system_time()))
|
|
|
|
return true;
|
|
|
|
return false;
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
void spin(bigtime_t microseconds)
|
|
|
|
{
|
|
|
|
bigtime_t time = system_time();
|
|
|
|
|
|
|
|
while((system_time() - time) < microseconds)
|
|
|
|
;
|
|
|
|
}
|
|
|
|
|