haiku/src/system/kernel/timer.cpp
Ingo Weinhold e01cebeb0a * cancel_timer():
- If the hook of the timer we're cancelling is currently being
    executed, we do now wait till it is finished. This is how the BeBook
    specifies the function to behave.
  - Periodic timers would not be cancelled, if their hook was being
    invoked at the same time, since they weren't in the queue during
    that time.
  - Since we know the CPU on which the timer is scheduled (timer::cpu),
    we don't have to look through any other CPU queue to find it.
  - Fixed the return value. It should report whether the timer had
    already fired, and was not always doing that.
* Added private add_timer() flag B_TIMER_ACQUIRE_THREAD_LOCK. It causes
  the thread spinlock to be acquired before the event hook is called.
  cancel_timer() doesn't wait for timers with the flag set. Instead we
  check in the timer interrupt function after acquiring the thread
  spinlock whether the timer was cancelled in the meantime. Calling
  cancel_timer() with the thread spinlock being held does thus avoid any
  race conditions and won't deadlock, if the event hook needs to acquire
  the thread spinlock, too. This feature proves handy for some kernel
  internal needs.
* The scheduler uses a B_TIMER_ACQUIRE_THREAD_LOCK timer now and
  cancel_timer() instead of the no longer needed
  _local_timer_cancel_event().


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@25098 a95241bf-73f2-0310-859d-f6bbb57e9c96
2008-04-22 15:29:22 +00:00

311 lines
6.8 KiB
C++

/*
* Copyright 2002-2008, Haiku. All rights reserved.
* Distributed under the terms of the MIT License.
*
* Copyright 2001, Travis Geiselbrecht. All rights reserved.
* Distributed under the terms of the NewOS License.
*/
/*! Policy info for timers */
#include <timer.h>
#include <OS.h>
#include <arch/timer.h>
#include <boot/kernel_args.h>
#include <smp.h>
#include <thread.h>
#include <util/AutoLock.h>
struct per_cpu_timer_data {
spinlock lock;
timer* volatile events;
timer* volatile current_event;
vint32 current_event_in_progress;
};
static per_cpu_timer_data sPerCPU[B_MAX_CPU_COUNT];
/*
static timer * volatile sEvents[B_MAX_CPU_COUNT] = { NULL, };
static timer * volatile sCurrentEvents[B_MAX_CPU_COUNT] = { NULL, };
static spinlock sCurrentEventsCompletion[B_MAX_CPU_COUNT];
static spinlock sTimerSpinlock[B_MAX_CPU_COUNT] = { 0, };
*/
//#define TRACE_TIMER
#ifdef TRACE_TIMER
# define TRACE(x) dprintf x
#else
# define TRACE(x) ;
#endif
#if __INTEL__
# define PAUSE() asm volatile ("pause;")
#else
# define PAUSE()
#endif
status_t
timer_init(kernel_args *args)
{
TRACE(("timer_init: entry\n"));
return arch_init_timer(args);
}
/*! NOTE: expects interrupts to be off */
static void
add_event_to_list(timer *event, timer * volatile *list)
{
timer *next;
timer *last = NULL;
// stick it in the event list
for (next = *list; next; last = next, next = (timer *)next->next) {
if ((bigtime_t)next->schedule_time >= (bigtime_t)event->schedule_time)
break;
}
if (last != NULL) {
event->next = last->next;
last->next = event;
} else {
event->next = next;
*list = event;
}
}
int32
timer_interrupt()
{
timer *event;
spinlock *spinlock;
per_cpu_timer_data& cpuData = sPerCPU[smp_get_current_cpu()];
int32 rc = B_HANDLED_INTERRUPT;
TRACE(("timer_interrupt: time 0x%x 0x%x, cpu %d\n", system_time(),
smp_get_current_cpu()));
spinlock = &cpuData.lock;
acquire_spinlock(spinlock);
event = cpuData.events;
while (event != NULL && ((bigtime_t)event->schedule_time < system_time())) {
// this event needs to happen
int mode = event->flags;
cpuData.events = (timer *)event->next;
cpuData.current_event = event;
cpuData.current_event_in_progress = 1;
event->schedule_time = 0;
release_spinlock(spinlock);
TRACE(("timer_interrupt: calling hook %p for event %p\n", event->hook,
event));
// call the callback
// note: if the event is not periodic, it is ok
// to delete the event structure inside the callback
if (event->hook) {
bool callHook = true;
// we may need to acquire the thread spinlock
if ((mode & B_TIMER_ACQUIRE_THREAD_LOCK) != 0) {
GRAB_THREAD_LOCK();
// If the event has been cancelled in the meantime, we don't
// call the hook anymore.
if (cpuData.current_event == NULL)
callHook = false;
}
if (callHook)
rc = event->hook(event);
if ((mode & B_TIMER_ACQUIRE_THREAD_LOCK) != 0)
RELEASE_THREAD_LOCK();
}
cpuData.current_event_in_progress = 0;
acquire_spinlock(spinlock);
if ((mode & ~B_TIMER_FLAGS) == B_PERIODIC_TIMER
&& cpuData.current_event != NULL) {
// we need to adjust it and add it back to the list
bigtime_t scheduleTime = system_time() + event->period;
if (scheduleTime == 0) {
// if we wrapped around and happen to hit zero, set
// it to one, since zero represents not scheduled
scheduleTime = 1;
}
event->schedule_time = (int64)scheduleTime;
add_event_to_list(event, &cpuData.events);
}
cpuData.current_event = NULL;
event = cpuData.events;
}
// setup the next hardware timer
if (cpuData.events != NULL) {
arch_timer_set_hardware_timer(
(bigtime_t)cpuData.events->schedule_time - system_time());
}
release_spinlock(spinlock);
return rc;
}
status_t
add_timer(timer *event, timer_hook hook, bigtime_t period, int32 flags)
{
bigtime_t scheduleTime;
bigtime_t currentTime = system_time();
cpu_status state;
if (event == NULL || hook == NULL || period < 0)
return B_BAD_VALUE;
TRACE(("add_timer: event %p\n", event));
scheduleTime = period;
if ((flags & ~B_TIMER_FLAGS) != B_ONE_SHOT_ABSOLUTE_TIMER)
scheduleTime += currentTime;
if (scheduleTime == 0)
scheduleTime = 1;
event->schedule_time = (int64)scheduleTime;
event->period = period;
event->hook = hook;
event->flags = flags;
state = disable_interrupts();
int currentCPU = smp_get_current_cpu();
per_cpu_timer_data& cpuData = sPerCPU[currentCPU];
acquire_spinlock(&cpuData.lock);
add_event_to_list(event, &cpuData.events);
event->cpu = currentCPU;
// if we were stuck at the head of the list, set the hardware timer
if (event == cpuData.events)
arch_timer_set_hardware_timer(scheduleTime - currentTime);
release_spinlock(&cpuData.lock);
restore_interrupts(state);
return B_OK;
}
bool
cancel_timer(timer *event)
{
TRACE(("cancel_timer: event %p\n", event));
InterruptsLocker _;
// lock the right CPU spinlock
int cpu = event->cpu;
SpinLocker spinLocker;
while (true) {
if (cpu >= B_MAX_CPU_COUNT)
return false;
spinLocker.SetTo(sPerCPU[cpu].lock, false);
if (cpu == event->cpu)
break;
// cpu field changed while we were trying to lock
spinLocker.Unlock();
cpu = event->cpu;
}
per_cpu_timer_data& cpuData = sPerCPU[cpu];
timer *current = cpuData.events;
if (event != cpuData.current_event) {
// The timer hook is not yet being executed.
timer *last = NULL;
while (current != NULL) {
if (current == event) {
// we found it
if (current == cpuData.events)
cpuData.events = current->next;
else
last->next = current->next;
current->next = NULL;
// break out of the whole thing
break;
}
last = current;
current = current->next;
}
// If not found, we assume this was a one-shot timer and has already
// fired.
if (current == NULL)
return true;
// invalidate CPU field
event->cpu = 0xffff;
// If on the current CPU, also reset the hardware timer.
if (cpu == smp_get_current_cpu()) {
if (cpuData.events == NULL)
arch_timer_clear_hardware_timer();
else {
arch_timer_set_hardware_timer(
(bigtime_t)cpuData.events->schedule_time - system_time());
}
}
return false;
} else {
// The timer hook is currently being executed. We clear the current
// event so that timer_interrupt() will not reschedule periodic timers.
cpuData.current_event = NULL;
current = event;
// Unless this is a kernel-private timer that also requires the thread
// lock to be held while calling the event hook, we'll have to wait
// for the hook to complete. When called from the timer hook we don't
// wait either, of course.
if ((event->flags & B_TIMER_ACQUIRE_THREAD_LOCK) == 0
|| cpu == smp_get_current_cpu()) {
spinLocker.Unlock();
while (cpuData.current_event_in_progress == 1) {
// spin
}
}
return true;
}
}
void
spin(bigtime_t microseconds)
{
bigtime_t time = system_time();
while ((system_time() - time) < microseconds) {
PAUSE();
}
}