* All scheduler implementations:

- enqueue_in_run_queue() no longer returns whether rescheduling is supposed
    to happen. Instead is sets cpu_ent::invoke_scheduler on the current CPU.
  - reschedule() does now handle cpu_ent::invoke_scheduler_if_idle(). No need
    to let all callers do that.
* thread_unblock[_locked]() no longer return whether rescheduling is supposed
  to happen.
* Got rid of the B_INVOKE_SCHEDULER handling. The interrupt hooks really
  can't know, when it makes sense to reschedule or not.
* Introduced scheduler_reschedule_if_necessary[_locked]() functions for
  checking+invoking the scheduler.
* Some semaphore functions (e.g. delete_sem()) invoke the scheduler now, if
  they wake up anything with greater priority.
  I've also tried to add scheduler invocations in the condition variable and
  mutex/rw_lock code, but that actually has a negative impact on performance,
  probably because it causes too much ping-ponging between threads when
  multiple locking primitives are involved.


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@34657 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Ingo Weinhold 2009-12-13 21:18:27 +00:00
parent 89e87505cf
commit 0338371f26
15 changed files with 180 additions and 136 deletions

View File

@ -7,7 +7,10 @@
#define KERNEL_SCHEDULER_H
#include <SupportDefs.h>
#include <cpu.h>
#include <int.h>
#include <smp.h>
#include <thread_types.h>
struct scheduling_analysis;
@ -16,7 +19,7 @@ struct SchedulerListener;
struct scheduler_ops {
bool (*enqueue_in_run_queue)(struct thread* thread);
void (*enqueue_in_run_queue)(struct thread* thread);
void (*reschedule)(void);
void (*set_thread_priority)(struct thread* thread, int32 priority);
// called when the thread structure is first created -
@ -65,4 +68,32 @@ status_t _user_analyze_scheduling(bigtime_t from, bigtime_t until, void* buffer,
}
#endif
/*! Reschedules, if necessary.
The thread spinlock must be held.
*/
static inline void
scheduler_reschedule_if_necessary_locked()
{
if (gCPU[smp_get_current_cpu()].invoke_scheduler)
scheduler_reschedule();
}
/*! Reschedules, if necessary.
Is a no-op, if interrupts are disabled.
*/
static inline void
scheduler_reschedule_if_necessary()
{
if (are_interrupts_enabled()) {
cpu_status state = disable_interrupts();
GRAB_THREAD_LOCK();
scheduler_reschedule_if_necessary_locked();
RELEASE_THREAD_LOCK();
restore_interrupts(state);
}
}
#endif /* KERNEL_SCHEDULER_H */

View File

@ -1,4 +1,5 @@
/*
* Copyright 2008-2009, Ingo Weinhold, ingo_weinhold@gmx.de.
* Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de.
* Distributed under the terms of the MIT License.
*
@ -97,7 +98,7 @@ status_t thread_block();
status_t thread_block_with_timeout(uint32 timeoutFlags, bigtime_t timeout);
status_t thread_block_with_timeout_locked(uint32 timeoutFlags,
bigtime_t timeout);
bool thread_unblock(status_t threadID, status_t status);
void thread_unblock(status_t threadID, status_t status);
// used in syscalls.c
status_t _user_set_thread_priority(thread_id thread, int32 newPriority);
@ -188,17 +189,15 @@ thread_block_locked(struct thread* thread)
}
static inline bool
static inline void
thread_unblock_locked(struct thread* thread, status_t status)
{
if (atomic_test_and_set(&thread->wait.status, status, 1) != 1)
return false;
return;
// wake up the thread, if it is sleeping
if (thread->state == B_THREAD_WAITING)
return scheduler_enqueue_in_run_queue(thread);
return false;
scheduler_enqueue_in_run_queue(thread);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright 2003-2006, Haiku Inc. All rights reserved.
* Copyright 2003-2009, Haiku Inc. All rights reserved.
* Distributed under the terms of the MIT License.
*
* Authors:
@ -167,7 +167,6 @@ void
m68k_exception_entry(struct iframe *iframe)
{
int vector = iframe->cpu.vector >> 2;
int ret = B_HANDLED_INTERRUPT;
bool hardwareInterrupt = false;
if (vector != -1) {
@ -234,7 +233,7 @@ m68k_exception_entry(struct iframe *iframe)
addr_t newip;
ret = vm_page_fault(fault_address(iframe), iframe->cpu.pc,
vm_page_fault(fault_address(iframe), iframe->cpu.pc,
fault_was_write(iframe), // store or load
iframe->cpu.sr & SR_S, // was the system in user or supervisor
&newip);
@ -261,18 +260,17 @@ m68k_exception_entry(struct iframe *iframe)
if (!sPIC) {
panic("m68k_exception_entry(): external interrupt although we "
"don't have a PIC driver!");
ret = B_HANDLED_INTERRUPT;
break;
}
#endif
M68KPlatform::Default()->AcknowledgeIOInterrupt(vector);
dprintf("handling I/O interrupts...\n");
ret = int_io_interrupt_handler(vector, true);
int_io_interrupt_handler(vector, true);
#if 0
while ((irq = sPIC->acknowledge_io_interrupt(sPICCookie)) >= 0) {
// TODO: correctly pass level-triggered vs. edge-triggered to the handler!
ret = int_io_interrupt_handler(irq, true);
int_io_interrupt_handler(irq, true);
}
#endif
dprintf("handling I/O interrupts done\n");
@ -285,7 +283,7 @@ dprintf("handling I/O interrupts done\n");
// vectors >= 64 are user defined vectors, used for IRQ
if (vector >= 64) {
if (M68KPlatform::Default()->AcknowledgeIOInterrupt(vector)) {
ret = int_io_interrupt_handler(vector, true);
int_io_interrupt_handler(vector, true);
break;
}
}
@ -294,13 +292,14 @@ dprintf("handling I/O interrupts done\n");
panic("unhandled exception type\n");
}
if (ret == B_INVOKE_SCHEDULER) {
int state = disable_interrupts();
int state = disable_interrupts();
if (thread->cpu->invoke_scheduler) {
GRAB_THREAD_LOCK();
scheduler_reschedule();
RELEASE_THREAD_LOCK();
restore_interrupts(state);
} else if (hardwareInterrupt && thread->post_interrupt_callback != NULL) {
restore_interrupts(state);
void (*callback)(void*) = thread->post_interrupt_callback;
void* data = thread->post_interrupt_data;

View File

@ -1,5 +1,5 @@
/*
* Copyright 2003-2006, Haiku Inc. All rights reserved.
* Copyright 2003-2009, Haiku Inc. All rights reserved.
* Distributed under the terms of the MIT License.
*
* Authors:
@ -96,8 +96,6 @@ extern "C" void ppc_exception_entry(int vector, struct iframe *iframe);
void
ppc_exception_entry(int vector, struct iframe *iframe)
{
int ret = B_HANDLED_INTERRUPT;
if (vector != 0x900) {
dprintf("ppc_exception_entry: time %lld vector 0x%x, iframe %p, "
"srr0: %p\n", system_time(), vector, iframe, (void*)iframe->srr0);
@ -154,7 +152,7 @@ ppc_exception_entry(int vector, struct iframe *iframe)
addr_t newip;
ret = vm_page_fault(iframe->dar, iframe->srr0,
vm_page_fault(iframe->dar, iframe->srr0,
iframe->dsisr & (1 << 25), // store or load
iframe->srr1 & (1 << 14), // was the system in user or supervisor
&newip);
@ -171,7 +169,6 @@ ppc_exception_entry(int vector, struct iframe *iframe)
if (!sPIC) {
panic("ppc_exception_entry(): external interrupt although we "
"don't have a PIC driver!");
ret = B_HANDLED_INTERRUPT;
break;
}
@ -179,7 +176,7 @@ dprintf("handling I/O interrupts...\n");
int irq;
while ((irq = sPIC->acknowledge_io_interrupt(sPICCookie)) >= 0) {
// TODO: correctly pass level-triggered vs. edge-triggered to the handler!
ret = int_io_interrupt_handler(irq, true);
int_io_interrupt_handler(irq, true);
}
dprintf("handling I/O interrupts done\n");
break;
@ -195,7 +192,7 @@ dprintf("handling I/O interrupts done\n");
panic("FP unavailable exception: unimplemented\n");
break;
case 0x900: // decrementer exception
ret = timer_interrupt();
timer_interrupt();
break;
case 0xc00: // system call
panic("system call exception: unimplemented\n");
@ -235,12 +232,21 @@ dprintf("handling I/O interrupts done\n");
panic("unhandled exception type\n");
}
if (ret == B_INVOKE_SCHEDULER) {
int state = disable_interrupts();
cpu_status state = disable_interrupts();
if (thread->cpu->invoke_scheduler) {
GRAB_THREAD_LOCK();
scheduler_reschedule();
RELEASE_THREAD_LOCK();
restore_interrupts(state);
} else if (thread->post_interrupt_callback != NULL) {
restore_interrupts(state);
void (*callback)(void*) = thread->post_interrupt_callback;
void* data = thread->post_interrupt_data;
thread->post_interrupt_callback = NULL;
thread->post_interrupt_data = NULL;
callback(data);
}
// pop iframe

View File

@ -987,7 +987,6 @@ hardware_interrupt(struct iframe* frame)
{
int32 vector = frame->vector - ARCH_INTERRUPT_BASE;
bool levelTriggered = false;
int ret;
struct thread* thread = thread_get_current_thread();
if (sCurrentPIC->is_spurious_interrupt(vector)) {
@ -1001,23 +1000,19 @@ hardware_interrupt(struct iframe* frame)
if (!levelTriggered)
sCurrentPIC->end_of_interrupt(vector);
ret = int_io_interrupt_handler(vector, levelTriggered);
int_io_interrupt_handler(vector, levelTriggered);
if (levelTriggered)
sCurrentPIC->end_of_interrupt(vector);
if (ret == B_INVOKE_SCHEDULER || thread->cpu->invoke_scheduler) {
cpu_status state = disable_interrupts();
cpu_status state = disable_interrupts();
if (thread->cpu->invoke_scheduler) {
GRAB_THREAD_LOCK();
if (ret == B_INVOKE_SCHEDULER || !thread->cpu->invoke_scheduler_if_idle
|| thread->priority == B_IDLE_PRIORITY) {
scheduler_reschedule();
}
scheduler_reschedule();
RELEASE_THREAD_LOCK();
restore_interrupts(state);
} else if (thread->post_interrupt_callback != NULL) {
restore_interrupts(state);
void (*callback)(void*) = thread->post_interrupt_callback;
void* data = thread->post_interrupt_data;

View File

@ -2892,6 +2892,7 @@ _user_debug_thread(thread_id threadID)
// about to acquire a semaphore (before
// thread_prepare_to_block()), we won't interrupt it.
// Maybe we should rather send a signal (SIGTRAP).
scheduler_reschedule_if_necessary_locked();
break;
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright 2007-2008, Ingo Weinhold, ingo_weinhold@gmx.de.
* Copyright 2007-2009, Ingo Weinhold, ingo_weinhold@gmx.de.
* Copyright 2003-2009, Axel Dörfler, axeld@pinc-software.de.
* Distributed under the terms of the MIT License.
*/

View File

@ -88,7 +88,7 @@ dump_int_statistics(int argc, char **argv)
if (error == B_OK && exactMatch) {
if (strchr(imageName, '/') != NULL)
imageName = strrchr(imageName, '/') + 1;
int length = 4 + strlen(imageName);
kprintf(" %s:%-*s (%p)", imageName, 45 - length, symbol,
io->func);
@ -101,7 +101,7 @@ dump_int_statistics(int argc, char **argv)
else
kprintf("%8lld\n", io->handled_count);
}
kprintf("\n");
}
return 0;
@ -173,7 +173,7 @@ int_io_interrupt_handler(int vector, bool levelTriggered)
{
int status = B_UNHANDLED_INTERRUPT;
struct io_handler *io;
bool invokeScheduler = false, handled = false;
bool handled = false;
if (!sVectors[vector].no_lock_vector)
acquire_spinlock(&sVectors[vector].vector_lock);
@ -194,7 +194,7 @@ int_io_interrupt_handler(int vector, bool levelTriggered)
// For edge-triggered interrupts, however, we always need to call
// all handlers, as multiple interrupts cannot be identified. We
// still make sure the return code of this function will issue
// whatever the driver thought would be useful (ie. B_INVOKE_SCHEDULER)
// whatever the driver thought would be useful.
for (io = sVectors[vector].handler_list; io != NULL; io = io->next) {
status = io->func(io->data);
@ -206,15 +206,13 @@ int_io_interrupt_handler(int vector, bool levelTriggered)
if (levelTriggered && status != B_UNHANDLED_INTERRUPT)
break;
if (status == B_HANDLED_INTERRUPT)
if (status == B_HANDLED_INTERRUPT || status == B_INVOKE_SCHEDULER)
handled = true;
else if (status == B_INVOKE_SCHEDULER)
invokeScheduler = true;
}
#if DEBUG_INTERRUPTS
sVectors[vector].trigger_count++;
if (status != B_UNHANDLED_INTERRUPT || handled || invokeScheduler) {
if (status != B_UNHANDLED_INTERRUPT || handled) {
sVectors[vector].handled_count++;
} else {
sVectors[vector].unhandled_count++;
@ -260,8 +258,6 @@ int_io_interrupt_handler(int vector, bool levelTriggered)
// edge triggered return value
if (invokeScheduler)
return B_INVOKE_SCHEDULER;
if (handled)
return B_HANDLED_INTERRUPT;

View File

@ -1,6 +1,6 @@
/*
* Copyright 2009, Rene Gollent, rene@gollent.com.
* Copyright 2008, Ingo Weinhold, ingo_weinhold@gmx.de.
* Copyright 2008-2009, Ingo Weinhold, ingo_weinhold@gmx.de.
* Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de.
* Copyright 2002, Angelo Mottola, a.mottola@libero.it.
* Distributed under the terms of the MIT License.
@ -147,7 +147,7 @@ affine_get_most_idle_cpu()
/*! Enqueues the thread into the run queue.
Note: thread lock must be held when entering this function
*/
static bool
static void
affine_enqueue_in_run_queue(struct thread *thread)
{
int32 targetCPU = -1;
@ -193,14 +193,13 @@ affine_enqueue_in_run_queue(struct thread *thread)
if (thread->priority > gCPU[targetCPU].running_thread->priority) {
if (targetCPU == smp_get_current_cpu()) {
return true;
gCPU[targetCPU].invoke_scheduler = true;
gCPU[targetCPU].invoke_scheduler_if_idle = false;
} else {
smp_send_ici(targetCPU, SMP_MSG_RESCHEDULE, 0, 0, 0, NULL,
SMP_MSG_FLAG_ASYNC);
}
}
return false;
}
@ -347,12 +346,12 @@ context_switch(struct thread *fromThread, struct thread *toThread)
static int32
reschedule_event(timer *unused)
{
// this function is called as a result of the timer event set by the
// scheduler returning this causes a reschedule on the timer event
// This function is called as a result of the timer event set by the
// scheduler. Make sure the reschedule() is invoked.
thread_get_current_thread()->cpu->invoke_scheduler = true;
thread_get_current_thread()->cpu->invoke_scheduler_if_idle = false;
thread_get_current_thread()->cpu->preempted = 1;
return B_INVOKE_SCHEDULER;
return B_HANDLED_INTERRUPT;
}
@ -363,14 +362,23 @@ static void
affine_reschedule(void)
{
int32 currentCPU = smp_get_current_cpu();
struct thread *oldThread = thread_get_current_thread();
// check whether we're only supposed to reschedule, if the current thread
// is idle
if (oldThread->cpu->invoke_scheduler) {
oldThread->cpu->invoke_scheduler = false;
if (oldThread->cpu->invoke_scheduler_if_idle
&& oldThread->priority != B_IDLE_PRIORITY) {
oldThread->cpu->invoke_scheduler_if_idle = false;
return;
}
}
struct thread *nextThread, *prevThread;
TRACE(("reschedule(): cpu %ld, cur_thread = %ld\n", currentCPU, oldThread->id));
oldThread->cpu->invoke_scheduler = false;
oldThread->state = oldThread->next_state;
switch (oldThread->next_state) {
case B_THREAD_RUNNING:

View File

@ -77,7 +77,7 @@ dump_run_queue(int argc, char **argv)
/*! Enqueues the thread into the run queue.
Note: thread lock must be held when entering this function
*/
static bool
static void
simple_enqueue_in_run_queue(struct thread *thread)
{
thread->state = thread->next_state = B_THREAD_READY;
@ -106,7 +106,10 @@ simple_enqueue_in_run_queue(struct thread *thread)
NotifySchedulerListeners(&SchedulerListener::ThreadEnqueuedInRunQueue,
thread);
return thread->priority > thread_get_current_thread()->priority;
if (thread->priority > thread_get_current_thread()->priority) {
gCPU[0].invoke_scheduler = true;
gCPU[0].invoke_scheduler_if_idle = false;
}
}
@ -182,12 +185,12 @@ context_switch(struct thread *fromThread, struct thread *toThread)
static int32
reschedule_event(timer *unused)
{
// this function is called as a result of the timer event set by the
// scheduler returning this causes a reschedule on the timer event
// This function is called as a result of the timer event set by the
// scheduler. Make sure the reschedule() is invoked.
thread_get_current_thread()->cpu->invoke_scheduler = true;
thread_get_current_thread()->cpu->invoke_scheduler_if_idle = false;
thread_get_current_thread()->cpu->preempted = 1;
return B_INVOKE_SCHEDULER;
return B_HANDLED_INTERRUPT;
}
@ -200,9 +203,18 @@ simple_reschedule(void)
struct thread *oldThread = thread_get_current_thread();
struct thread *nextThread, *prevThread;
TRACE(("reschedule(): current thread = %ld\n", oldThread->id));
// check whether we're only supposed to reschedule, if the current thread
// is idle
if (oldThread->cpu->invoke_scheduler) {
oldThread->cpu->invoke_scheduler = false;
if (oldThread->cpu->invoke_scheduler_if_idle
&& oldThread->priority != B_IDLE_PRIORITY) {
oldThread->cpu->invoke_scheduler_if_idle = false;
return;
}
}
oldThread->cpu->invoke_scheduler = false;
TRACE(("reschedule(): current thread = %ld\n", oldThread->id));
oldThread->state = oldThread->next_state;
switch (oldThread->next_state) {

View File

@ -120,17 +120,9 @@ select_cpu(int32 currentCPU, struct thread* thread, int32& targetPriority)
/*! Enqueues the thread into the run queue.
Note: thread lock must be held when entering this function
*/
static bool
static void
enqueue_in_run_queue(struct thread *thread)
{
if (thread->state == B_THREAD_RUNNING) {
// The thread is currently running (on another CPU) and we cannot
// insert it into the run queue. Set the next state to ready so the
// thread is inserted into the run queue on the next reschedule.
thread->next_state = B_THREAD_READY;
return false;
}
thread->state = thread->next_state = B_THREAD_READY;
struct thread *curr, *prev;
@ -153,7 +145,6 @@ enqueue_in_run_queue(struct thread *thread)
thread->next_priority = thread->priority;
bool reschedule = false;
if (thread->priority != B_IDLE_PRIORITY) {
// Select a CPU for the thread to run on. It's not certain that the
// thread will actually run on it, but we will notify the CPU to
@ -167,7 +158,8 @@ enqueue_in_run_queue(struct thread *thread)
// reschedule.
if (thread->priority > targetPriority) {
if (targetCPU == currentCPU) {
reschedule = true;
gCPU[targetCPU].invoke_scheduler = true;
gCPU[targetCPU].invoke_scheduler_if_idle = false;
} else {
if (targetPriority == B_IDLE_PRIORITY) {
smp_send_ici(targetCPU, SMP_MSG_RESCHEDULE_IF_IDLE, 0, 0,
@ -183,8 +175,6 @@ enqueue_in_run_queue(struct thread *thread)
// notify listeners
NotifySchedulerListeners(&SchedulerListener::ThreadEnqueuedInRunQueue,
thread);
return reschedule;
}
@ -260,12 +250,12 @@ context_switch(struct thread *fromThread, struct thread *toThread)
static int32
reschedule_event(timer *unused)
{
// this function is called as a result of the timer event set by the
// scheduler returning this causes a reschedule on the timer event
// This function is called as a result of the timer event set by the
// scheduler. Make sure the reschedule() is invoked.
thread_get_current_thread()->cpu->invoke_scheduler = true;
thread_get_current_thread()->cpu->invoke_scheduler_if_idle = false;
thread_get_current_thread()->cpu->preempted = 1;
return B_INVOKE_SCHEDULER;
return B_HANDLED_INTERRUPT;
}
@ -278,9 +268,18 @@ reschedule(void)
struct thread *oldThread = thread_get_current_thread();
struct thread *nextThread, *prevThread;
TRACE(("reschedule(): cpu %ld, cur_thread = %ld\n", smp_get_current_cpu(), thread_get_current_thread()->id));
// check whether we're only supposed to reschedule, if the current thread
// is idle
if (oldThread->cpu->invoke_scheduler) {
oldThread->cpu->invoke_scheduler = false;
if (oldThread->cpu->invoke_scheduler_if_idle
&& oldThread->priority != B_IDLE_PRIORITY) {
oldThread->cpu->invoke_scheduler_if_idle = false;
return;
}
}
oldThread->cpu->invoke_scheduler = false;
TRACE(("reschedule(): cpu %ld, cur_thread = %ld\n", smp_get_current_cpu(), thread_get_current_thread()->id));
oldThread->state = oldThread->next_state;
switch (oldThread->next_state) {

View File

@ -1,5 +1,5 @@
/*
* Copyright 2008, Ingo Weinhold, ingo_weinhold@gmx.de.
* Copyright 2008-2009, Ingo Weinhold, ingo_weinhold@gmx.de.
* Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de.
* Distributed under the terms of the MIT License.
*
@ -11,31 +11,33 @@
/*! Semaphore code */
#include <sem.h>
#include <stdlib.h>
#include <string.h>
#include <OS.h>
#include <sem.h>
#include <kernel.h>
#include <kscheduler.h>
#include <ksignal.h>
#include <smp.h>
#include <int.h>
#include <arch/int.h>
#include <boot/kernel_args.h>
#include <cpu.h>
#include <debug.h>
#include <int.h>
#include <kernel.h>
#include <ksignal.h>
#include <kscheduler.h>
#include <listeners.h>
#include <scheduling_analysis.h>
#include <thread.h>
#include <smp.h>
#include <syscall_restart.h>
#include <team.h>
#include <thread.h>
#include <util/AutoLock.h>
#include <util/DoublyLinkedList.h>
#include <vfs.h>
#include <vm/vm_page.h>
#include <boot/kernel_args.h>
#include <syscall_restart.h>
#include <wait_for_objects.h>
#include <string.h>
#include <stdlib.h>
#include "kernel_debug_config.h"
@ -383,6 +385,10 @@ delete_sem_internal(sem_id id, bool checkPermission)
char* name;
uninit_sem_locked(sSems[slot], &name);
GRAB_THREAD_LOCK();
scheduler_reschedule_if_necessary_locked();
RELEASE_THREAD_LOCK();
restore_interrupts(state);
free(name);
@ -709,6 +715,8 @@ sem_delete_owned_sems(struct team* team)
free(name);
}
scheduler_reschedule_if_necessary();
}
@ -977,8 +985,6 @@ release_sem_etc(sem_id id, int32 count, uint32 flags)
flags |= B_RELEASE_IF_WAITING_ONLY;
}
bool reschedule = false;
SpinLocker threadLocker(gThreadSpinlock);
while (count > 0) {
@ -1000,7 +1006,7 @@ release_sem_etc(sem_id id, int32 count, uint32 flags)
break;
}
reschedule |= thread_unblock_locked(entry->thread, B_OK);
thread_unblock_locked(entry->thread, B_OK);
int delta = min_c(count, entry->count);
sSems[slot].u.used.count += delta;
@ -1023,10 +1029,10 @@ release_sem_etc(sem_id id, int32 count, uint32 flags)
// If we've unblocked another thread reschedule, if we've not explicitly
// been told not to.
if (reschedule && (flags & B_DO_NOT_RESCHEDULE) == 0) {
if ((flags & B_DO_NOT_RESCHEDULE) == 0) {
semLocker.Unlock();
threadLocker.Lock();
scheduler_reschedule();
scheduler_reschedule_if_necessary_locked();
}
return B_OK;

View File

@ -17,6 +17,7 @@
#include <OS.h>
#include <KernelExport.h>
#include <cpu.h>
#include <debug.h>
#include <kernel.h>
#include <kscheduler.h>
@ -61,7 +62,7 @@ const char * const sigstr[NSIG] = {
static status_t deliver_signal(struct thread *thread, uint signal,
uint32 flags, bool &reschedule);
uint32 flags);
@ -379,12 +380,8 @@ handle_signals(struct thread *thread)
= thread->team->parent->main_thread;
struct sigaction& parentHandler
= parentThread->sig_action[SIGCHLD - 1];
// TODO: do we need to worry about rescheduling here?
bool unused = false;
if ((parentHandler.sa_flags & SA_NOCLDSTOP) == 0) {
deliver_signal(parentThread, SIGCHLD, 0,
unused);
}
if ((parentHandler.sa_flags & SA_NOCLDSTOP) == 0)
deliver_signal(parentThread, SIGCHLD, 0);
}
return true;
@ -490,8 +487,7 @@ is_signal_blocked(int signal)
thread lock held.
*/
static status_t
deliver_signal(struct thread *thread, uint signal, uint32 flags,
bool &reschedule)
deliver_signal(struct thread *thread, uint signal, uint32 flags)
{
if (flags & B_CHECK_PERMISSION) {
// ToDo: introduce euid & uid fields to the team and check permission
@ -503,7 +499,7 @@ deliver_signal(struct thread *thread, uint signal, uint32 flags,
if (thread->team == team_get_kernel_team()) {
// Signals to kernel threads will only wake them up
if (thread->state == B_THREAD_SUSPENDED)
reschedule |= scheduler_enqueue_in_run_queue(thread);
scheduler_enqueue_in_run_queue(thread);
return B_OK;
}
@ -518,7 +514,7 @@ deliver_signal(struct thread *thread, uint signal, uint32 flags,
// Wake up main thread
if (mainThread->state == B_THREAD_SUSPENDED)
reschedule |= scheduler_enqueue_in_run_queue(mainThread);
scheduler_enqueue_in_run_queue(mainThread);
else
thread_interrupt(mainThread, true);
@ -529,7 +525,7 @@ deliver_signal(struct thread *thread, uint signal, uint32 flags,
case SIGKILLTHR:
// Wake up suspended threads and interrupt waiting ones
if (thread->state == B_THREAD_SUSPENDED)
reschedule |= scheduler_enqueue_in_run_queue(thread);
scheduler_enqueue_in_run_queue(thread);
else
thread_interrupt(thread, true);
break;
@ -537,7 +533,7 @@ deliver_signal(struct thread *thread, uint signal, uint32 flags,
case SIGCONT:
// Wake up thread if it was suspended
if (thread->state == B_THREAD_SUSPENDED)
reschedule |= scheduler_enqueue_in_run_queue(thread);
scheduler_enqueue_in_run_queue(thread);
if ((flags & SIGNAL_FLAG_DONT_RESTART_SYSCALL) != 0)
atomic_or(&thread->flags, THREAD_FLAGS_DONT_RESTART_SYSCALL);
@ -567,7 +563,6 @@ send_signal_etc(pid_t id, uint signal, uint32 flags)
status_t status = B_BAD_THREAD_ID;
struct thread *thread;
cpu_status state = 0;
bool reschedule = false;
if (signal < 0 || signal > MAX_SIGNO)
return B_BAD_VALUE;
@ -584,7 +579,7 @@ send_signal_etc(pid_t id, uint signal, uint32 flags)
thread = thread_get_thread_struct_locked(id);
if (thread != NULL)
status = deliver_signal(thread, signal, flags, reschedule);
status = deliver_signal(thread, signal, flags);
} else {
// send a signal to the specified process group
// (the absolute value of the id)
@ -617,7 +612,7 @@ send_signal_etc(pid_t id, uint signal, uint32 flags)
if (thread != NULL) {
// we don't stop because of an error sending the signal; we
// rather want to send as much signals as possible
status = deliver_signal(thread, signal, flags, reschedule);
status = deliver_signal(thread, signal, flags);
}
RELEASE_THREAD_LOCK();
@ -630,9 +625,8 @@ send_signal_etc(pid_t id, uint signal, uint32 flags)
GRAB_THREAD_LOCK();
}
if ((flags & (B_DO_NOT_RESCHEDULE | SIGNAL_FLAG_TEAMS_LOCKED)) == 0
&& reschedule)
scheduler_reschedule();
if ((flags & (B_DO_NOT_RESCHEDULE | SIGNAL_FLAG_TEAMS_LOCKED)) == 0)
scheduler_reschedule_if_necessary_locked();
RELEASE_THREAD_LOCK();
@ -777,7 +771,7 @@ alarm_event(timer *t)
TRACE(("alarm_event: thread = %p\n", thread));
send_signal_etc(thread->id, SIGALRM, B_DO_NOT_RESCHEDULE);
return B_INVOKE_SCHEDULER;
return B_HANDLED_INTERRUPT;
}

View File

@ -1,4 +1,5 @@
/*
* Copyright 2005-2009, Ingo Weinhold, ingo_weinhold@gmx.de.
* Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de.
* Distributed under the terms of the MIT License.
*
@ -2269,10 +2270,7 @@ thread_block_timeout(timer* timer)
// easy.
struct thread* thread = (struct thread*)timer->user_data;
// the scheduler will tell us whether to reschedule or not via
// thread_unblock_locked's return
if (thread_unblock_locked(thread, B_TIMED_OUT))
return B_INVOKE_SCHEDULER;
thread_unblock_locked(thread, B_TIMED_OUT);
return B_HANDLED_INTERRUPT;
}
@ -2286,15 +2284,14 @@ thread_block()
}
bool
void
thread_unblock(status_t threadID, status_t status)
{
InterruptsSpinLocker _(gThreadSpinlock);
struct thread* thread = thread_get_thread_struct_locked(threadID);
if (thread == NULL)
return false;
return thread_unblock_locked(thread, status);
if (thread != NULL)
thread_unblock_locked(thread, status);
}
@ -2989,7 +2986,9 @@ status_t
_user_unblock_thread(thread_id threadID, status_t status)
{
InterruptsSpinLocker locker(gThreadSpinlock);
return user_unblock_thread(threadID, status);
status_t error = user_unblock_thread(threadID, status);
scheduler_reschedule_if_necessary_locked();
return error;
}
@ -3009,9 +3008,12 @@ _user_unblock_threads(thread_id* userThreads, uint32 count, status_t status)
if (user_memcpy(threads, userThreads, count * sizeof(thread_id)) != B_OK)
return B_BAD_ADDRESS;
InterruptsSpinLocker locker(gThreadSpinlock);
for (uint32 i = 0; i < count; i++)
user_unblock_thread(threads[i], status);
scheduler_reschedule_if_necessary_locked();
return B_OK;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright 2002-2008, Haiku. All rights reserved.
* Copyright 2002-2009, Haiku. All rights reserved.
* Distributed under the terms of the MIT License.
*
* Copyright 2001, Travis Geiselbrecht. All rights reserved.
@ -77,7 +77,6 @@ timer_interrupt()
spinlock *spinlock;
per_cpu_timer_data& cpuData = sPerCPU[smp_get_current_cpu()];
int32 rc = B_HANDLED_INTERRUPT;
bool invokeScheduler = false;
TRACE(("timer_interrupt: time %lld, cpu %ld\n", system_time(),
smp_get_current_cpu()));
@ -122,9 +121,6 @@ timer_interrupt()
if ((mode & B_TIMER_ACQUIRE_THREAD_LOCK) != 0)
RELEASE_THREAD_LOCK();
if (rc == B_INVOKE_SCHEDULER)
invokeScheduler = true;
}
cpuData.current_event_in_progress = 0;
@ -157,7 +153,7 @@ timer_interrupt()
release_spinlock(spinlock);
return invokeScheduler ? B_INVOKE_SCHEDULER : rc;
return rc;
}