Changed the signal handling code so that handle_signals() can be called without

having the thread lock held and interrupts disabled.
Cleaned up the signal handling code, and fixed some minor bugs with blockable
vs. non-blockable signals.
thread_debug_info was using uint64 for signals sets instead of sigset_t.


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@14457 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Axel Dörfler 2005-10-20 16:56:04 +00:00
parent 7121425eb5
commit 50374cbdca
5 changed files with 159 additions and 184 deletions

View File

@ -11,29 +11,29 @@
#define KILL_SIGNALS ((1L << (SIGKILL - 1)) | (1L << (SIGKILLTHR - 1)))
#define BLOCKABLE_SIGS (~(KILL_SIGNALS | (1L << (SIGSTOP - 1))))
#ifdef __cplusplus
extern "C" {
#endif
extern int handle_signals(struct thread *t, cpu_status *state);
extern bool handle_signals(struct thread *thread);
extern bool is_kill_signal_pending(void);
extern int has_signals_pending(void *_thread);
extern int sigaction_etc(thread_id threadID, int signal,
const struct sigaction *act, struct sigaction *oact);
const struct sigaction *act, struct sigaction *oldAction);
extern int _user_send_signal(pid_t tid, uint sig);
extern int _user_sigprocmask(int how, const sigset_t *set, sigset_t *oldSet);
extern int _user_sigaction(int sig, const struct sigaction *action, struct sigaction *oldAction);
extern int _user_sigaction(int sig, const struct sigaction *action,
struct sigaction *oldAction);
extern bigtime_t _user_set_alarm(bigtime_t time, uint32 mode);
extern int _user_sigsuspend(const sigset_t *mask);
extern int _user_sigpending(sigset_t *set);
#ifdef __cplusplus
} // extern "C"
}
#endif
#endif /* _KERNEL_SIGNAL_H */

View File

@ -55,9 +55,9 @@ struct thread_debug_info {
port_id debug_port;
// the port the thread is waiting on for commands from the nub thread
uint64 ignore_signals;
sigset_t ignore_signals;
// the signals the debugger is not interested in
uint64 ignore_signals_once;
sigset_t ignore_signals_once;
// the signals the debugger wishes not to be notified of, when they
// occur the next time

View File

@ -396,7 +396,7 @@ arch_setup_signal_frame(struct thread *t, struct sigaction *sa, int sig, int sig
int64
arch_restore_signal_frame(void)
{
struct thread *t = thread_get_current_thread();
struct thread *thread = thread_get_current_thread();
struct iframe *frame = i386_get_current_iframe();
uint32 *stack;
struct vregs *regs;
@ -404,7 +404,7 @@ arch_restore_signal_frame(void)
TRACE(("### arch_restore_signal_frame: entry\n"));
stack = (uint32 *)frame->user_esp;
t->sig_block_mask = stack[0];
atomic_set(&thread->sig_block_mask, stack[0]);
regs = (struct vregs *)stack[1];
frame->eip = regs->eip;
@ -429,18 +429,19 @@ arch_restore_signal_frame(void)
void
arch_check_syscall_restart(struct thread *t)
arch_check_syscall_restart(struct thread *thread)
{
struct iframe *frame = i386_get_current_iframe();
if (frame == NULL)
if (frame == NULL) {
// this thread is obviously new; we didn't come from an interrupt
return;
}
if ((status_t)frame->orig_eax >= 0 && (status_t)frame->eax == EINTR) {
frame->eax = frame->orig_eax;
frame->edx = frame->orig_edx;
frame->eip -= 2;
// undos the "int $99" syscall interrupt (so that it'll be called again)
// undoes the "int $99" syscall interrupt (so that it'll be called again)
}
}

View File

@ -31,6 +31,9 @@
#define SIGNAL_TO_MASK(signal) (1LL << (signal - 1))
#define BLOCKABLE_SIGNALS (~(KILL_SIGNALS | SIGNAL_TO_MASK(SIGSTOP)))
#define DEFAULT_IGNORE_SIGNALS \
(SIGNAL_TO_MASK(SIGCHLD) | SIGNAL_TO_MASK(SIGWINCH) | SIGNAL_TO_MASK(SIGCONT))
const char * const sigstr[NSIG] = {
@ -42,15 +45,14 @@ const char * const sigstr[NSIG] = {
static bool
notify_debugger(struct thread *thread, int signal, struct sigaction *handler,
bool deadly, cpu_status *state)
bool deadly)
{
bool result;
uint64 signalMask = SIGNAL_TO_MASK(signal);
// first check the ignore signal masks the debugger specified for the thread
if (thread->debug_info.ignore_signals_once & signalMask) {
thread->debug_info.ignore_signals_once &= ~signalMask;
atomic_and(&thread->debug_info.ignore_signals_once, ~signalMask);
return true;
}
@ -58,61 +60,52 @@ notify_debugger(struct thread *thread, int signal, struct sigaction *handler,
return true;
// deliver the event
RELEASE_THREAD_LOCK();
restore_interrupts(*state);
result = user_debug_handle_signal(signal, handler, deadly);
*state = disable_interrupts();
GRAB_THREAD_LOCK();
return result;
return user_debug_handle_signal(signal, handler, deadly);
}
/**
* Expects interrupts off and thread lock held.
* The function may release the lock and enable interrupts temporarily, so the
* caller must be aware that operations before calling this function and after
* its return might not belong to the same atomic section.
/** Actually handles the signal - ie. the thread will exit, a custom signal
* handler is prepared, or whatever the signal demands.
*/
int
handle_signals(struct thread *thread, cpu_status *state)
bool
handle_signals(struct thread *thread)
{
uint32 signalMask = thread->sig_pending & (~thread->sig_block_mask);
int i, signal, global_resched = 0;
uint32 signalMask = atomic_get(&thread->sig_pending)
& ~atomic_get(&thread->sig_block_mask);
struct sigaction *handler;
bool reschedule = false;
int32 i;
// If SIGKILL[THR] are pending, we ignore other signals.
// Otherwise check, if the thread shall stop for debugging.
if (signalMask & KILL_SIGNALS) {
signalMask &= KILL_SIGNALS;
} else if (thread->debug_info.flags & B_THREAD_DEBUG_STOP) {
RELEASE_THREAD_LOCK();
restore_interrupts(*state);
user_debug_stop_thread();
*state = disable_interrupts();
GRAB_THREAD_LOCK();
signalMask = thread->sig_pending & (~thread->sig_block_mask);
}
if (signalMask == 0)
return 0;
for (i = 0; i < NSIG; i++) {
if (signalMask & 0x1) {
bool debugSignal = !(~atomic_get(&thread->team->debug_info.flags)
bool debugSignal;
int32 signal = i + 1;
if ((signalMask & SIGNAL_TO_MASK(signal)) == 0)
continue;
// clear the signal that we will handle
atomic_and(&thread->sig_pending, ~SIGNAL_TO_MASK(signal));
debugSignal = !(~atomic_get(&thread->team->debug_info.flags)
& (B_TEAM_DEBUG_SIGNALS | B_TEAM_DEBUG_DEBUGGER_INSTALLED));
signal = i + 1;
// ToDo: since sigaction_etc() could clobber the fields at any time,
// we should actually copy the relevant fields atomically before
// accessing them (only the debugger is calling sigaction_etc()
// right now).
handler = &thread->sig_action[i];
signalMask >>= 1;
thread->sig_pending &= ~SIGNAL_TO_MASK(signal);
TRACE(("Thread 0x%lx received signal %s\n", thread->id, sigstr[signal]));
@ -122,7 +115,7 @@ handle_signals(struct thread *thread, cpu_status *state)
// notify the debugger
if (debugSignal)
notify_debugger(thread, signal, handler, false, state);
notify_debugger(thread, signal, handler, false);
continue;
}
if (handler->sa_handler == SIG_DFL) {
@ -135,22 +128,18 @@ handle_signals(struct thread *thread, cpu_status *state)
case SIGTTOU:
case SIGCONT:
// notify the debugger
if (debugSignal) {
notify_debugger(thread, signal, handler, false, state);
}
if (debugSignal)
notify_debugger(thread, signal, handler, false);
continue;
case SIGSTOP:
// notify the debugger
if (debugSignal) {
if (!notify_debugger(thread, signal, handler, false,
state)) {
if (debugSignal
&& !notify_debugger(thread, signal, handler, false))
continue;
}
}
thread->next_state = B_THREAD_SUSPENDED;
global_resched = 1;
reschedule = true;
continue;
case SIGQUIT:
@ -159,7 +148,8 @@ handle_signals(struct thread *thread, cpu_status *state)
case SIGABRT:
case SIGFPE:
case SIGSEGV:
TRACE(("Shutting down thread 0x%lx due to signal #%d\n", thread->id, signal));
TRACE(("Shutting down thread 0x%lx due to signal #%d\n",
thread->id, signal));
case SIGKILL:
case SIGKILLTHR:
default:
@ -167,76 +157,58 @@ handle_signals(struct thread *thread, cpu_status *state)
thread->exit.reason = THREAD_RETURN_INTERRUPTED;
// notify the debugger
if (debugSignal && signal != SIGKILL
&& signal != SIGKILLTHR) {
if (!notify_debugger(thread, signal, handler, true,
state)) {
if (debugSignal && signal != SIGKILL && signal != SIGKILLTHR
&& !notify_debugger(thread, signal, handler, true))
continue;
}
}
RELEASE_THREAD_LOCK();
restore_interrupts(*state);
// ToDo: when we have more than a thread per process,
// it can likely happen (for any thread other than the first)
// that here, interrupts are still disabled.
// Changing the above line with "enable_interrupts()" fixes
// the problem, though we should find its cause.
// We absolutely need interrupts enabled when we enter
// thread_exit().
// Just search for the cause if it still happens!
thread_exit();
// won't return
}
}
// notify the debugger
if (debugSignal) {
if (!notify_debugger(thread, signal, handler, false, state))
if (debugSignal && !notify_debugger(thread, signal, handler, false))
continue;
}
// ToDo: it's not safe to call arch_setup_signal_frame with
// interrupts disabled since it writes to the user stack
// and may page fault.
// User defined signal handler
TRACE(("### Setting up custom signal handler frame...\n"));
arch_setup_signal_frame(thread, handler, signal, thread->sig_block_mask);
arch_setup_signal_frame(thread, handler, signal, atomic_get(&thread->sig_block_mask));
if (handler->sa_flags & SA_ONESHOT)
handler->sa_handler = SIG_DFL;
if (!(handler->sa_flags & SA_NOMASK))
thread->sig_block_mask |= (handler->sa_mask | SIGNAL_TO_MASK(signal)) & BLOCKABLE_SIGS;
return global_resched;
} else
signalMask >>= 1;
if ((handler->sa_flags & SA_NOMASK) == 0) {
// Update the block mask while the signal handler is running - it
// will be automatically restored when the signal frame is left.
atomic_or(&thread->sig_block_mask,
(handler->sa_mask | SIGNAL_TO_MASK(signal)) & BLOCKABLE_SIGNALS);
}
arch_check_syscall_restart(thread);
return global_resched;
return reschedule;
}
arch_check_syscall_restart(thread);
return reschedule;
}
bool
is_kill_signal_pending()
is_kill_signal_pending(void)
{
bool result;
struct thread *thread = thread_get_current_thread();
cpu_status state = disable_interrupts();
GRAB_THREAD_LOCK();
result = (thread->sig_pending & KILL_SIGNALS);
RELEASE_THREAD_LOCK();
restore_interrupts(state);
return result;
return (atomic_get(&thread_get_current_thread()->sig_pending) & KILL_SIGNALS) != 0;
}
/** Delivers the \a signal to the \a thread, but doesn't handle the signal -
* it just makes sure the thread gets the signal, ie. unblocks it if needed.
* This function must be called with interrupts disabled and the
* thread lock held.
*/
static status_t
deliver_signal(struct thread *thread, uint signal, uint32 flags)
{
@ -253,7 +225,7 @@ deliver_signal(struct thread *thread, uint signal, uint32 flags)
return B_OK;
}
thread->sig_pending |= SIGNAL_TO_MASK(signal);
atomic_or(&thread->sig_pending, SIGNAL_TO_MASK(signal));
switch (signal) {
case SIGKILL:
@ -398,7 +370,7 @@ has_signals_pending(void *_thread)
if (thread == NULL)
thread = thread_get_current_thread();
return thread->sig_pending & ~thread->sig_block_mask;
return atomic_get(&thread->sig_pending) & ~atomic_get(&thread->sig_block_mask);
}
@ -406,20 +378,17 @@ int
sigprocmask(int how, const sigset_t *set, sigset_t *oldSet)
{
struct thread *thread = thread_get_current_thread();
sigset_t oldMask = thread->sig_block_mask;
// ToDo: "sig_block_mask" is probably not the right thing to change?
// At least it's often changed at other places...
sigset_t oldMask = atomic_get(&thread->sig_block_mask);
switch (how) {
case SIG_BLOCK:
atomic_or(&thread->sig_block_mask, *set);
atomic_or(&thread->sig_block_mask, *set & BLOCKABLE_SIGNALS);
break;
case SIG_UNBLOCK:
atomic_and(&thread->sig_block_mask, ~*set);
break;
case SIG_SETMASK:
atomic_set(&thread->sig_block_mask, *set);
atomic_set(&thread->sig_block_mask, *set & BLOCKABLE_SIGNALS);
break;
default:
@ -433,7 +402,7 @@ sigprocmask(int how, const sigset_t *set, sigset_t *oldSet)
}
/** \brief Similar to sigaction(), just for a specified thread.
/** \brief sigaction() for the specified thread.
*
* A \a threadID is < 0 specifies the current thread.
*
@ -441,15 +410,15 @@ sigprocmask(int how, const sigset_t *set, sigset_t *oldSet)
int
sigaction_etc(thread_id threadID, int signal, const struct sigaction *act,
struct sigaction *oact)
struct sigaction *oldAction)
{
struct thread *thread;
cpu_status state;
status_t error = B_OK;
if (signal < 1 || signal > MAX_SIGNO
|| signal == SIGKILL || signal == SIGKILLTHR || signal == SIGSTOP)
return EINVAL;
|| (SIGNAL_TO_MASK(signal) & ~BLOCKABLE_SIGNALS) != 0)
return B_BAD_VALUE;
state = disable_interrupts();
GRAB_THREAD_LOCK();
@ -459,25 +428,28 @@ sigaction_etc(thread_id threadID, int signal, const struct sigaction *act,
: thread_get_thread_struct_locked(threadID));
if (thread) {
if (oact) {
memcpy(oact, &thread->sig_action[signal - 1],
if (oldAction) {
// save previous sigaction structure
memcpy(oldAction, &thread->sig_action[signal - 1],
sizeof(struct sigaction));
}
if (act) {
// set new sigaction structure
memcpy(&thread->sig_action[signal - 1], act,
sizeof(struct sigaction));
thread->sig_action[signal - 1].sa_mask &= BLOCKABLE_SIGNALS;
}
if (act && act->sa_handler == SIG_IGN)
thread->sig_pending &= ~SIGNAL_TO_MASK(signal);
else if (act && act->sa_handler == SIG_DFL) {
if (signal == SIGCONT || signal == SIGCHLD
|| signal == SIGWINCH) {
thread->sig_pending &= ~SIGNAL_TO_MASK(signal);
if (act && act->sa_handler == SIG_IGN) {
// remove pending signal if it should now be ignored
atomic_and(&thread->sig_pending, ~SIGNAL_TO_MASK(signal));
} else if (act && act->sa_handler == SIG_DFL
&& (SIGNAL_TO_MASK(signal) & DEFAULT_IGNORE_SIGNALS) != NULL) {
// remove pending signal for those signals whose default
// action is to ignore them
atomic_and(&thread->sig_pending, ~SIGNAL_TO_MASK(signal));
}
} /*else
dprintf("### custom signal handler set\n");*/
} else
error = B_BAD_THREAD_ID;
@ -489,9 +461,9 @@ sigaction_etc(thread_id threadID, int signal, const struct sigaction *act,
int
sigaction(int signal, const struct sigaction *act, struct sigaction *oact)
sigaction(int signal, const struct sigaction *act, struct sigaction *oldAction)
{
return sigaction_etc(-1, signal, act, oact);
return sigaction_etc(-1, signal, act, oldAction);
}

View File

@ -1097,16 +1097,18 @@ thread_at_kernel_exit(void)
TRACE(("thread_atkernel_exit: exit thread 0x%lx\n", thread->id));
if (handle_signals(thread)) {
state = disable_interrupts();
GRAB_THREAD_LOCK();
if (handle_signals(thread, &state))
scheduler_reschedule();
// was: smp_send_broadcast_ici(SMP_MSG_RESCHEDULE, 0, 0, 0, NULL, SMP_MSG_FLAG_SYNC);
thread->in_kernel = false;
scheduler_reschedule();
RELEASE_THREAD_LOCK();
} else
state = disable_interrupts();
thread->in_kernel = false;
// track kernel time
now = system_time();