kernel: Remove B_TIMER_ACQUIRE_SCHEDULER_LOCK flag

The flag main purpose is to avoid race conditions between event handler
and cancel_timer(). However, cancel_timer() is safe even without
using gSchedulerLock.

If the event is scheduled to happen on other CPU than the CPU that
invokes cancel_timer() then cancel_timer() either disables the event
before its handler starts executing or waits until the event handler
is done.

If the event is scheduled on the same CPU that calls cancel_timer()
then, since cancel_timer() disables interrupts, the event is either
executed before cancel_timer() or when the timer interrupt handler
starts running the event is already disabled.
This commit is contained in:
Pawel Dziepak 2013-10-31 01:49:43 +01:00
parent c8dd9f7780
commit d8fcc8a825
5 changed files with 12 additions and 56 deletions

View File

@ -23,13 +23,8 @@ struct kernel_args;
#define B_TIMER_USE_TIMER_STRUCT_TIMES 0x4000
// For add_timer(): Use the timer::schedule_time (absolute time) and
// timer::period values instead of the period parameter.
#define B_TIMER_ACQUIRE_SCHEDULER_LOCK 0x8000
// The timer hook is invoked with the scheduler lock held. When invoking
// cancel_timer() with the scheduler lock held, too, this helps to avoid
// race conditions.
#define B_TIMER_FLAGS \
(B_TIMER_USE_TIMER_STRUCT_TIMES | B_TIMER_ACQUIRE_SCHEDULER_LOCK \
| B_TIMER_REAL_TIME_BASE)
(B_TIMER_USE_TIMER_STRUCT_TIMES | B_TIMER_REAL_TIME_BASE)
/* Timer info structure */
struct timer_info {

View File

@ -191,6 +191,7 @@ UserTimer::Cancel()
/*static*/ int32
UserTimer::HandleTimerHook(struct timer* timer)
{
InterruptsSpinLocker _(gSchedulerLock);
((UserTimer*)timer->user_data)->HandleTimer();
return B_HANDLED_INTERRUPT;
}
@ -352,9 +353,7 @@ SystemTimeUserTimer::ScheduleKernelTimer(bigtime_t now,
CheckPeriodicOverrun(now);
uint32 timerFlags = B_ONE_SHOT_ABSOLUTE_TIMER
| B_TIMER_USE_TIMER_STRUCT_TIMES | B_TIMER_ACQUIRE_SCHEDULER_LOCK;
// We use B_TIMER_ACQUIRE_SCHEDULER_LOCK to avoid race conditions
// between setting/canceling the timer and the event handler.
| B_TIMER_USE_TIMER_STRUCT_TIMES;
fTimer.schedule_time = std::max(fNextTime, (bigtime_t)0);
fTimer.period = 0;
@ -692,10 +691,7 @@ TeamTimeUserTimer::_Update(bool unscheduling)
// rounding errors.
add_timer(&fTimer, &HandleTimerHook, fTimer.schedule_time,
B_ONE_SHOT_ABSOLUTE_TIMER | B_TIMER_USE_TIMER_STRUCT_TIMES
| B_TIMER_ACQUIRE_SCHEDULER_LOCK);
// We use B_TIMER_ACQUIRE_SCHEDULER_LOCK to avoid race conditions
// between setting/canceling the timer and the event handler.
B_ONE_SHOT_ABSOLUTE_TIMER | B_TIMER_USE_TIMER_STRUCT_TIMES);
// We use B_TIMER_USE_TIMER_STRUCT_TIMES, so period remains 0, which
// our base class expects.
@ -989,11 +985,7 @@ ThreadTimeUserTimer::Start()
fTimer.schedule_time = 0;
fTimer.period = 0;
uint32 flags = B_ONE_SHOT_ABSOLUTE_TIMER
| B_TIMER_USE_TIMER_STRUCT_TIMES | B_TIMER_ACQUIRE_SCHEDULER_LOCK;
// We use B_TIMER_ACQUIRE_SCHEDULER_LOCK to avoid race conditions
// between setting/canceling the timer and the event handler.
uint32 flags = B_ONE_SHOT_ABSOLUTE_TIMER | B_TIMER_USE_TIMER_STRUCT_TIMES;
add_timer(&fTimer, &HandleTimerHook, fTimer.schedule_time, flags);
fScheduled = true;

View File

@ -1507,7 +1507,7 @@ _scheduler_reschedule(void)
if (!thread_is_idle_thread(nextThread)) {
bigtime_t quantum = compute_quantum(oldThread);
add_timer(quantumTimer, &reschedule_event, quantum,
B_ONE_SHOT_RELATIVE_TIMER | B_TIMER_ACQUIRE_SCHEDULER_LOCK);
B_ONE_SHOT_RELATIVE_TIMER);
update_cpu_performance(nextThread, thisCore);
} else

View File

@ -1923,13 +1923,8 @@ thread_exit(void)
}
if (team != kernelTeam) {
// Cancel previously installed alarm timer, if any. Hold the scheduler
// lock to make sure that when cancel_timer() returns, the alarm timer
// hook will not be invoked anymore (since
// B_TIMER_ACQUIRE_SCHEDULER_LOCK is used).
InterruptsSpinLocker schedulerLocker(gSchedulerLock);
// Cancel previously installed alarm timer, if any.
cancel_timer(&thread->alarm);
schedulerLocker.Unlock();
// Delete all user timers associated with the thread.
ThreadLocker threadLocker(thread);
@ -2782,12 +2777,8 @@ thread_preboot_init_percpu(struct kernel_args *args, int32 cpuNum)
static status_t
thread_block_timeout(timer* timer)
{
// The timer has been installed with B_TIMER_ACQUIRE_SCHEDULER_LOCK, so
// we're holding the scheduler lock already. This makes things comfortably
// easy.
Thread* thread = (Thread*)timer->user_data;
thread_unblock_locked(thread, B_TIMED_OUT);
thread_unblock(thread, B_TIMED_OUT);
return B_HANDLED_INTERRUPT;
}
@ -2858,10 +2849,7 @@ thread_block_with_timeout_locked(uint32 timeoutFlags, bigtime_t timeout)
&& timeout != B_INFINITE_TIMEOUT;
if (useTimer) {
// Timer flags: absolute/relative + "acquire thread lock". The latter
// avoids nasty race conditions and deadlock problems that could
// otherwise occur between our cancel_timer() and a concurrently
// executing thread_block_timeout().
// Timer flags: absolute/relative.
uint32 timerFlags;
if ((timeoutFlags & B_RELATIVE_TIMEOUT) != 0) {
timerFlags = B_ONE_SHOT_RELATIVE_TIMER;
@ -2870,7 +2858,6 @@ thread_block_with_timeout_locked(uint32 timeoutFlags, bigtime_t timeout)
if ((timeoutFlags & B_TIMEOUT_REAL_TIME_BASE) != 0)
timerFlags |= B_TIMER_REAL_TIME_BASE;
}
timerFlags |= B_TIMER_ACQUIRE_SCHEDULER_LOCK;
// install the timer
thread->wait.unblock_timer.user_data = thread;

View File

@ -274,25 +274,8 @@ timer_interrupt()
// call the callback
// note: if the event is not periodic, it is ok
// to delete the event structure inside the callback
if (event->hook) {
bool callHook = true;
// we may need to acquire the scheduler lock
if ((mode & B_TIMER_ACQUIRE_SCHEDULER_LOCK) != 0) {
acquire_spinlock(&gSchedulerLock);
// If the event has been cancelled in the meantime, we don't
// call the hook anymore.
if (cpuData.current_event == NULL)
callHook = false;
}
if (callHook)
rc = event->hook(event);
if ((mode & B_TIMER_ACQUIRE_SCHEDULER_LOCK) != 0)
release_spinlock(&gSchedulerLock);
}
if (event->hook)
rc = event->hook(event);
cpuData.current_event_in_progress = 0;
@ -461,8 +444,7 @@ cancel_timer(timer* event)
// lock to be held while calling the event hook, we'll have to wait
// for the hook to complete. When called from the timer hook we don't
// wait either, of course.
if ((event->flags & B_TIMER_ACQUIRE_SCHEDULER_LOCK) == 0
&& cpu != smp_get_current_cpu()) {
if (cpu != smp_get_current_cpu()) {
spinLocker.Unlock();
while (cpuData.current_event_in_progress == 1) {