d648afb8d7
address space that is fully locked and marked B_KERNEL_AREA. It can thus be accessed by the kernel without additional checks. * For each userland thread we do create a user_thread structure in that area. The structure is accessible from userland via TLS, using the private get_user_thread() function. * Introduced private userland functions [un]defer_signals(). They can be used to cheaply disable/re-enable signal delivery. They use the user_thread::defer_signals/pending_signals fields which are checked/updated by the kernel. git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@25451 a95241bf-73f2-0310-859d-f6bbb57e9c96
1050 lines
25 KiB
C++
1050 lines
25 KiB
C++
/*
|
|
* Copyright 2002-2008, Axel Dörfler, axeld@pinc-software.de.
|
|
* Copyright 2002, Angelo Mottola, a.mottola@libero.it.
|
|
*
|
|
* Distributed under the terms of the MIT License.
|
|
*/
|
|
|
|
/*! POSIX signals handling routines */
|
|
|
|
#include <ksignal.h>
|
|
|
|
#include <stddef.h>
|
|
#include <string.h>
|
|
|
|
#include <OS.h>
|
|
#include <KernelExport.h>
|
|
|
|
#include <debug.h>
|
|
#include <kernel.h>
|
|
#include <kscheduler.h>
|
|
#include <sem.h>
|
|
#include <syscall_restart.h>
|
|
#include <team.h>
|
|
#include <thread.h>
|
|
#include <tracing.h>
|
|
#include <user_debugger.h>
|
|
#include <user_thread.h>
|
|
#include <util/AutoLock.h>
|
|
|
|
|
|
//#define TRACE_SIGNAL
|
|
#ifdef TRACE_SIGNAL
|
|
# define TRACE(x) dprintf x
|
|
#else
|
|
# define TRACE(x) ;
|
|
#endif
|
|
|
|
|
|
#define BLOCKABLE_SIGNALS (~(KILL_SIGNALS | SIGNAL_TO_MASK(SIGSTOP)))
|
|
#define STOP_SIGNALS \
|
|
(SIGNAL_TO_MASK(SIGSTOP) | SIGNAL_TO_MASK(SIGTSTP) \
|
|
| SIGNAL_TO_MASK(SIGTTIN) | SIGNAL_TO_MASK(SIGTTOU))
|
|
#define DEFAULT_IGNORE_SIGNALS \
|
|
(SIGNAL_TO_MASK(SIGCHLD) | SIGNAL_TO_MASK(SIGWINCH) \
|
|
| SIGNAL_TO_MASK(SIGCONT))
|
|
#define NON_DEFERRABLE_SIGNALS \
|
|
(KILL_SIGNALS \
|
|
| SIGNAL_TO_MASK(SIGILL) \
|
|
| SIGNAL_TO_MASK(SIGFPE) \
|
|
| SIGNAL_TO_MASK(SIGSEGV))
|
|
|
|
|
|
const char * const sigstr[NSIG] = {
|
|
"NONE", "HUP", "INT", "QUIT", "ILL", "CHLD", "ABRT", "PIPE",
|
|
"FPE", "KILL", "STOP", "SEGV", "CONT", "TSTP", "ALRM", "TERM",
|
|
"TTIN", "TTOU", "USR1", "USR2", "WINCH", "KILLTHR", "TRAP",
|
|
"POLL", "PROF", "SYS", "URG", "VTALRM", "XCPU", "XFSZ"
|
|
};
|
|
|
|
|
|
static status_t deliver_signal(struct thread *thread, uint signal,
|
|
uint32 flags);
|
|
|
|
|
|
|
|
// #pragma mark - signal tracing
|
|
|
|
|
|
#if SIGNAL_TRACING
|
|
|
|
namespace SignalTracing {
|
|
|
|
|
|
class HandleSignals : public AbstractTraceEntry {
|
|
public:
|
|
HandleSignals(uint32 signals)
|
|
:
|
|
fSignals(signals)
|
|
{
|
|
Initialized();
|
|
}
|
|
|
|
virtual void AddDump(TraceOutput& out)
|
|
{
|
|
out.Print("signal handle: 0x%lx", fSignals);
|
|
}
|
|
|
|
private:
|
|
uint32 fSignals;
|
|
};
|
|
|
|
|
|
class ExecuteSignalHandler : public AbstractTraceEntry {
|
|
public:
|
|
ExecuteSignalHandler(int signal, struct sigaction* handler)
|
|
:
|
|
fSignal(signal),
|
|
fHandler((void*)handler->sa_handler)
|
|
{
|
|
Initialized();
|
|
}
|
|
|
|
virtual void AddDump(TraceOutput& out)
|
|
{
|
|
out.Print("signal exec handler: signal: %d, handler: %p",
|
|
fSignal, fHandler);
|
|
}
|
|
|
|
private:
|
|
int fSignal;
|
|
void* fHandler;
|
|
};
|
|
|
|
|
|
class SendSignal : public AbstractTraceEntry {
|
|
public:
|
|
SendSignal(pid_t target, uint32 signal, uint32 flags)
|
|
:
|
|
fTarget(target),
|
|
fSignal(signal),
|
|
fFlags(flags)
|
|
{
|
|
Initialized();
|
|
}
|
|
|
|
virtual void AddDump(TraceOutput& out)
|
|
{
|
|
out.Print("signal send: target: %ld, signal: %lu (%s), "
|
|
"flags: 0x%lx", fTarget, fSignal,
|
|
(fSignal < NSIG ? sigstr[fSignal] : "invalid"), fFlags);
|
|
}
|
|
|
|
private:
|
|
pid_t fTarget;
|
|
uint32 fSignal;
|
|
uint32 fFlags;
|
|
};
|
|
|
|
|
|
class SigAction : public AbstractTraceEntry {
|
|
public:
|
|
SigAction(struct thread* thread, uint32 signal,
|
|
const struct sigaction* act)
|
|
:
|
|
fThread(thread->id),
|
|
fSignal(signal),
|
|
fAction(*act)
|
|
{
|
|
Initialized();
|
|
}
|
|
|
|
virtual void AddDump(TraceOutput& out)
|
|
{
|
|
out.Print("signal action: thread: %ld, signal: %lu (%s), "
|
|
"action: {handler: %p, flags: 0x%x, mask: 0x%lx}",
|
|
fThread, fSignal,
|
|
(fSignal < NSIG ? sigstr[fSignal] : "invalid"),
|
|
fAction.sa_handler, fAction.sa_flags, fAction.sa_mask);
|
|
}
|
|
|
|
private:
|
|
thread_id fThread;
|
|
uint32 fSignal;
|
|
struct sigaction fAction;
|
|
};
|
|
|
|
|
|
class SigProcMask : public AbstractTraceEntry {
|
|
public:
|
|
SigProcMask(int how, sigset_t mask)
|
|
:
|
|
fHow(how),
|
|
fMask(mask),
|
|
fOldMask(thread_get_current_thread()->sig_block_mask)
|
|
{
|
|
Initialized();
|
|
}
|
|
|
|
virtual void AddDump(TraceOutput& out)
|
|
{
|
|
const char* how = "invalid";
|
|
switch (fHow) {
|
|
case SIG_BLOCK:
|
|
how = "block";
|
|
break;
|
|
case SIG_UNBLOCK:
|
|
how = "unblock";
|
|
break;
|
|
case SIG_SETMASK:
|
|
how = "set";
|
|
break;
|
|
}
|
|
|
|
out.Print("signal proc mask: %s 0x%lx, old mask: 0x%lx", how, fMask,
|
|
fOldMask);
|
|
}
|
|
|
|
private:
|
|
int fHow;
|
|
sigset_t fMask;
|
|
sigset_t fOldMask;
|
|
};
|
|
|
|
} // namespace SignalTracing
|
|
|
|
# define T(x) new(std::nothrow) SignalTracing::x
|
|
|
|
#else
|
|
# define T(x)
|
|
#endif // SIGNAL_TRACING
|
|
|
|
|
|
// #pragma mark -
|
|
|
|
|
|
/*! Updates the thread::flags field according to what signals are pending.
|
|
Interrupts must be disabled and the thread lock must be held.
|
|
*/
|
|
static void
|
|
update_thread_signals_flag(struct thread* thread)
|
|
{
|
|
if (atomic_get(&thread->sig_pending) & ~atomic_get(&thread->sig_block_mask))
|
|
atomic_or(&thread->flags, THREAD_FLAGS_SIGNALS_PENDING);
|
|
else
|
|
atomic_and(&thread->flags, ~THREAD_FLAGS_SIGNALS_PENDING);
|
|
}
|
|
|
|
|
|
void
|
|
update_current_thread_signals_flag()
|
|
{
|
|
InterruptsSpinLocker locker(thread_spinlock);
|
|
|
|
update_thread_signals_flag(thread_get_current_thread());
|
|
}
|
|
|
|
|
|
static bool
|
|
notify_debugger(struct thread *thread, int signal, struct sigaction *handler,
|
|
bool deadly)
|
|
{
|
|
uint64 signalMask = SIGNAL_TO_MASK(signal);
|
|
|
|
// first check the ignore signal masks the debugger specified for the thread
|
|
|
|
if (atomic_get(&thread->debug_info.ignore_signals_once) & signalMask) {
|
|
atomic_and(&thread->debug_info.ignore_signals_once, ~signalMask);
|
|
return true;
|
|
}
|
|
|
|
if (atomic_get(&thread->debug_info.ignore_signals) & signalMask)
|
|
return true;
|
|
|
|
// deliver the event
|
|
return user_debug_handle_signal(signal, handler, deadly);
|
|
}
|
|
|
|
|
|
/*! Actually handles the signal - ie. the thread will exit, a custom signal
|
|
handler is prepared, or whatever the signal demands.
|
|
*/
|
|
bool
|
|
handle_signals(struct thread *thread)
|
|
{
|
|
uint32 signalMask = atomic_get(&thread->sig_pending)
|
|
& ~atomic_get(&thread->sig_block_mask);
|
|
|
|
// If SIGKILL[THR] are pending, we ignore other signals.
|
|
// Otherwise check, if the thread shall stop for debugging.
|
|
if (signalMask & KILL_SIGNALS) {
|
|
signalMask &= KILL_SIGNALS;
|
|
} else if (thread->debug_info.flags & B_THREAD_DEBUG_STOP) {
|
|
user_debug_stop_thread();
|
|
}
|
|
|
|
if (signalMask == 0)
|
|
return 0;
|
|
|
|
if (thread->user_thread->defer_signals > 0
|
|
&& (signalMask & NON_DEFERRABLE_SIGNALS) == 0) {
|
|
thread->user_thread->pending_signals = signalMask;
|
|
return 0;
|
|
}
|
|
|
|
thread->user_thread->pending_signals = 0;
|
|
|
|
bool restart = (atomic_and(&thread->flags,
|
|
~THREAD_FLAGS_DONT_RESTART_SYSCALL)
|
|
& THREAD_FLAGS_DONT_RESTART_SYSCALL) == 0;
|
|
|
|
T(HandleSignals(signalMask));
|
|
|
|
for (int32 i = 0; i < NSIG; i++) {
|
|
bool debugSignal;
|
|
int32 signal = i + 1;
|
|
|
|
if ((signalMask & SIGNAL_TO_MASK(signal)) == 0)
|
|
continue;
|
|
|
|
// clear the signal that we will handle
|
|
atomic_and(&thread->sig_pending, ~SIGNAL_TO_MASK(signal));
|
|
|
|
debugSignal = !(~atomic_get(&thread->team->debug_info.flags)
|
|
& (B_TEAM_DEBUG_SIGNALS | B_TEAM_DEBUG_DEBUGGER_INSTALLED));
|
|
|
|
// TODO: since sigaction_etc() could clobber the fields at any time,
|
|
// we should actually copy the relevant fields atomically before
|
|
// accessing them (only the debugger is calling sigaction_etc()
|
|
// right now).
|
|
// Update: sigaction_etc() is only used by the userland debugger
|
|
// support. We can just as well restrict getting/setting signal
|
|
// handlers to work only when the respective thread is stopped.
|
|
// Then sigaction() could be used instead and we could get rid of
|
|
// sigaction_etc().
|
|
struct sigaction* handler = &thread->sig_action[i];
|
|
|
|
TRACE(("Thread 0x%lx received signal %s\n", thread->id, sigstr[signal]));
|
|
|
|
if (handler->sa_handler == SIG_IGN) {
|
|
// signal is to be ignored
|
|
// ToDo: apply zombie cleaning on SIGCHLD
|
|
|
|
// notify the debugger
|
|
if (debugSignal)
|
|
notify_debugger(thread, signal, handler, false);
|
|
continue;
|
|
} else if (handler->sa_handler == SIG_DFL) {
|
|
// default signal behaviour
|
|
switch (signal) {
|
|
case SIGCHLD:
|
|
case SIGWINCH:
|
|
case SIGURG:
|
|
// notify the debugger
|
|
if (debugSignal)
|
|
notify_debugger(thread, signal, handler, false);
|
|
continue;
|
|
|
|
case SIGCONT:
|
|
// notify the debugger
|
|
if (debugSignal
|
|
&& !notify_debugger(thread, signal, handler, false))
|
|
continue;
|
|
|
|
// notify threads waiting for team state changes
|
|
if (thread == thread->team->main_thread) {
|
|
InterruptsSpinLocker locker(team_spinlock);
|
|
team_set_job_control_state(thread->team,
|
|
JOB_CONTROL_STATE_CONTINUED, signal, false);
|
|
|
|
// The standard states that the system *may* send a
|
|
// SIGCHLD when a child is continued. I haven't found
|
|
// a good reason why we would want to, though.
|
|
}
|
|
continue;
|
|
|
|
case SIGSTOP:
|
|
case SIGTSTP:
|
|
case SIGTTIN:
|
|
case SIGTTOU:
|
|
// notify the debugger
|
|
if (debugSignal
|
|
&& !notify_debugger(thread, signal, handler, false))
|
|
continue;
|
|
|
|
thread->next_state = B_THREAD_SUSPENDED;
|
|
|
|
// notify threads waiting for team state changes
|
|
if (thread == thread->team->main_thread) {
|
|
InterruptsSpinLocker locker(team_spinlock);
|
|
team_set_job_control_state(thread->team,
|
|
JOB_CONTROL_STATE_STOPPED, signal, false);
|
|
|
|
// send a SIGCHLD to the parent (if it does have
|
|
// SA_NOCLDSTOP defined)
|
|
SpinLocker _(thread_spinlock);
|
|
struct thread* parentThread
|
|
= thread->team->parent->main_thread;
|
|
struct sigaction& parentHandler
|
|
= parentThread->sig_action[SIGCHLD - 1];
|
|
if ((parentHandler.sa_flags & SA_NOCLDSTOP) == 0)
|
|
deliver_signal(parentThread, SIGCHLD, 0);
|
|
}
|
|
|
|
return true;
|
|
|
|
case SIGQUIT:
|
|
case SIGILL:
|
|
case SIGTRAP:
|
|
case SIGABRT:
|
|
case SIGFPE:
|
|
case SIGSEGV:
|
|
case SIGPOLL:
|
|
case SIGPROF:
|
|
case SIGSYS:
|
|
case SIGVTALRM:
|
|
case SIGXCPU:
|
|
case SIGXFSZ:
|
|
TRACE(("Shutting down thread 0x%lx due to signal #%ld\n",
|
|
thread->id, signal));
|
|
case SIGKILL:
|
|
case SIGKILLTHR:
|
|
default:
|
|
// if the thread exited normally, the exit reason is already set
|
|
if (thread->exit.reason != THREAD_RETURN_EXIT) {
|
|
thread->exit.reason = THREAD_RETURN_INTERRUPTED;
|
|
thread->exit.signal = (uint16)signal;
|
|
}
|
|
|
|
// notify the debugger
|
|
if (debugSignal && signal != SIGKILL && signal != SIGKILLTHR
|
|
&& !notify_debugger(thread, signal, handler, true))
|
|
continue;
|
|
|
|
thread_exit();
|
|
// won't return
|
|
}
|
|
}
|
|
|
|
// User defined signal handler
|
|
|
|
// notify the debugger
|
|
if (debugSignal && !notify_debugger(thread, signal, handler, false))
|
|
continue;
|
|
|
|
if (!restart || (handler->sa_flags & SA_RESTART) == 0)
|
|
atomic_and(&thread->flags, ~THREAD_FLAGS_RESTART_SYSCALL);
|
|
|
|
T(ExecuteSignalHandler(signal, handler));
|
|
|
|
TRACE(("### Setting up custom signal handler frame...\n"));
|
|
arch_setup_signal_frame(thread, handler, signal,
|
|
atomic_get(&thread->sig_block_mask));
|
|
|
|
if (handler->sa_flags & SA_ONESHOT)
|
|
handler->sa_handler = SIG_DFL;
|
|
if ((handler->sa_flags & SA_NOMASK) == 0) {
|
|
// Update the block mask while the signal handler is running - it
|
|
// will be automatically restored when the signal frame is left.
|
|
atomic_or(&thread->sig_block_mask,
|
|
(handler->sa_mask | SIGNAL_TO_MASK(signal)) & BLOCKABLE_SIGNALS);
|
|
}
|
|
|
|
update_current_thread_signals_flag();
|
|
|
|
return false;
|
|
}
|
|
|
|
// clear syscall restart thread flag, if we're not supposed to restart the
|
|
// syscall
|
|
if (!restart)
|
|
atomic_and(&thread->flags, ~THREAD_FLAGS_RESTART_SYSCALL);
|
|
|
|
update_current_thread_signals_flag();
|
|
|
|
return false;
|
|
}
|
|
|
|
|
|
bool
|
|
is_kill_signal_pending(void)
|
|
{
|
|
return (atomic_get(&thread_get_current_thread()->sig_pending)
|
|
& KILL_SIGNALS) != 0;
|
|
}
|
|
|
|
|
|
bool
|
|
is_signal_blocked(int signal)
|
|
{
|
|
return (atomic_get(&thread_get_current_thread()->sig_block_mask)
|
|
& SIGNAL_TO_MASK(signal)) != 0;
|
|
}
|
|
|
|
|
|
/*! Delivers the \a signal to the \a thread, but doesn't handle the signal -
|
|
it just makes sure the thread gets the signal, ie. unblocks it if needed.
|
|
This function must be called with interrupts disabled and the
|
|
thread lock held.
|
|
*/
|
|
static status_t
|
|
deliver_signal(struct thread *thread, uint signal, uint32 flags)
|
|
{
|
|
if (flags & B_CHECK_PERMISSION) {
|
|
// ToDo: introduce euid & uid fields to the team and check permission
|
|
}
|
|
|
|
if (signal == 0)
|
|
return B_OK;
|
|
|
|
if (thread->team == team_get_kernel_team()) {
|
|
// Signals to kernel threads will only wake them up
|
|
if (thread->state == B_THREAD_SUSPENDED)
|
|
scheduler_enqueue_in_run_queue(thread);
|
|
return B_OK;
|
|
}
|
|
|
|
atomic_or(&thread->sig_pending, SIGNAL_TO_MASK(signal));
|
|
|
|
switch (signal) {
|
|
case SIGKILL:
|
|
{
|
|
struct thread *mainThread = thread->team->main_thread;
|
|
// Forward KILLTHR to the main thread of the team
|
|
|
|
mainThread->sig_pending |= SIGNAL_TO_MASK(SIGKILLTHR);
|
|
// Wake up main thread
|
|
if (mainThread->state == B_THREAD_SUSPENDED)
|
|
scheduler_enqueue_in_run_queue(mainThread);
|
|
else
|
|
thread_interrupt(mainThread, true);
|
|
|
|
// Supposed to fall through
|
|
}
|
|
case SIGKILLTHR:
|
|
// Wake up suspended threads and interrupt waiting ones
|
|
if (thread->state == B_THREAD_SUSPENDED)
|
|
scheduler_enqueue_in_run_queue(thread);
|
|
else
|
|
thread_interrupt(thread, true);
|
|
break;
|
|
|
|
case SIGCONT:
|
|
// Wake up thread if it was suspended
|
|
if (thread->state == B_THREAD_SUSPENDED)
|
|
scheduler_enqueue_in_run_queue(thread);
|
|
|
|
if ((flags & SIGNAL_FLAG_DONT_RESTART_SYSCALL) != 0)
|
|
atomic_or(&thread->flags, THREAD_FLAGS_DONT_RESTART_SYSCALL);
|
|
|
|
atomic_and(&thread->sig_pending, ~STOP_SIGNALS);
|
|
// remove any pending stop signals
|
|
break;
|
|
|
|
default:
|
|
if (thread->sig_pending
|
|
& (~thread->sig_block_mask | SIGNAL_TO_MASK(SIGCHLD))) {
|
|
// Interrupt thread if it was waiting
|
|
thread_interrupt(thread, false);
|
|
}
|
|
break;
|
|
}
|
|
|
|
update_thread_signals_flag(thread);
|
|
|
|
return B_OK;
|
|
}
|
|
|
|
|
|
int
|
|
send_signal_etc(pid_t id, uint signal, uint32 flags)
|
|
{
|
|
status_t status = B_BAD_THREAD_ID;
|
|
struct thread *thread;
|
|
cpu_status state = 0;
|
|
|
|
if (signal < 0 || signal > MAX_SIGNO)
|
|
return B_BAD_VALUE;
|
|
|
|
T(SendSignal(id, signal, flags));
|
|
|
|
if ((flags & SIGNAL_FLAG_TEAMS_LOCKED) == 0)
|
|
state = disable_interrupts();
|
|
|
|
if (id > 0) {
|
|
// send a signal to the specified thread
|
|
|
|
GRAB_THREAD_LOCK();
|
|
|
|
thread = thread_get_thread_struct_locked(id);
|
|
if (thread != NULL)
|
|
status = deliver_signal(thread, signal, flags);
|
|
} else {
|
|
// send a signal to the specified process group
|
|
// (the absolute value of the id)
|
|
|
|
struct process_group *group;
|
|
|
|
// TODO: handle -1 correctly
|
|
if (id == 0 || id == -1) {
|
|
// send a signal to the current team
|
|
id = thread_get_current_thread()->team->id;
|
|
} else
|
|
id = -id;
|
|
|
|
if ((flags & SIGNAL_FLAG_TEAMS_LOCKED) == 0)
|
|
GRAB_TEAM_LOCK();
|
|
|
|
group = team_get_process_group_locked(NULL, id);
|
|
if (group != NULL) {
|
|
struct team *team, *next;
|
|
|
|
// Send a signal to all teams in this process group
|
|
|
|
for (team = group->teams; team != NULL; team = next) {
|
|
next = team->group_next;
|
|
id = team->id;
|
|
|
|
GRAB_THREAD_LOCK();
|
|
|
|
thread = thread_get_thread_struct_locked(id);
|
|
if (thread != NULL) {
|
|
// we don't stop because of an error sending the signal; we
|
|
// rather want to send as much signals as possible
|
|
status = deliver_signal(thread, signal, flags);
|
|
}
|
|
|
|
RELEASE_THREAD_LOCK();
|
|
}
|
|
}
|
|
|
|
if ((flags & SIGNAL_FLAG_TEAMS_LOCKED) == 0)
|
|
RELEASE_TEAM_LOCK();
|
|
|
|
GRAB_THREAD_LOCK();
|
|
}
|
|
|
|
// ToDo: maybe the scheduler should only be invoked if there is reason to do it?
|
|
// (ie. deliver_signal() moved some threads in the running queue?)
|
|
if ((flags & (B_DO_NOT_RESCHEDULE | SIGNAL_FLAG_TEAMS_LOCKED)) == 0)
|
|
scheduler_reschedule();
|
|
|
|
RELEASE_THREAD_LOCK();
|
|
|
|
if ((flags & SIGNAL_FLAG_TEAMS_LOCKED) == 0)
|
|
restore_interrupts(state);
|
|
|
|
return status;
|
|
}
|
|
|
|
|
|
int
|
|
send_signal(pid_t threadID, uint signal)
|
|
{
|
|
// The BeBook states that this function wouldn't be exported
|
|
// for drivers, but, of course, it's wrong.
|
|
return send_signal_etc(threadID, signal, 0);
|
|
}
|
|
|
|
|
|
int
|
|
has_signals_pending(void *_thread)
|
|
{
|
|
struct thread *thread = (struct thread *)_thread;
|
|
if (thread == NULL)
|
|
thread = thread_get_current_thread();
|
|
|
|
return atomic_get(&thread->sig_pending)
|
|
& ~atomic_get(&thread->sig_block_mask);
|
|
}
|
|
|
|
|
|
int
|
|
sigprocmask(int how, const sigset_t *set, sigset_t *oldSet)
|
|
{
|
|
struct thread *thread = thread_get_current_thread();
|
|
sigset_t oldMask = atomic_get(&thread->sig_block_mask);
|
|
|
|
if (set != NULL) {
|
|
T(SigProcMask(how, *set));
|
|
|
|
switch (how) {
|
|
case SIG_BLOCK:
|
|
atomic_or(&thread->sig_block_mask, *set & BLOCKABLE_SIGNALS);
|
|
break;
|
|
case SIG_UNBLOCK:
|
|
atomic_and(&thread->sig_block_mask, ~*set);
|
|
break;
|
|
case SIG_SETMASK:
|
|
atomic_set(&thread->sig_block_mask, *set & BLOCKABLE_SIGNALS);
|
|
break;
|
|
default:
|
|
return B_BAD_VALUE;
|
|
}
|
|
|
|
update_current_thread_signals_flag();
|
|
}
|
|
|
|
if (oldSet != NULL)
|
|
*oldSet = oldMask;
|
|
|
|
return B_OK;
|
|
}
|
|
|
|
|
|
/*! \brief sigaction() for the specified thread.
|
|
A \a threadID is < 0 specifies the current thread.
|
|
*/
|
|
int
|
|
sigaction_etc(thread_id threadID, int signal, const struct sigaction *act,
|
|
struct sigaction *oldAction)
|
|
{
|
|
struct thread *thread;
|
|
cpu_status state;
|
|
status_t error = B_OK;
|
|
|
|
if (signal < 1 || signal > MAX_SIGNO
|
|
|| (SIGNAL_TO_MASK(signal) & ~BLOCKABLE_SIGNALS) != 0)
|
|
return B_BAD_VALUE;
|
|
|
|
state = disable_interrupts();
|
|
GRAB_THREAD_LOCK();
|
|
|
|
thread = (threadID < 0
|
|
? thread_get_current_thread()
|
|
: thread_get_thread_struct_locked(threadID));
|
|
|
|
if (thread) {
|
|
if (oldAction) {
|
|
// save previous sigaction structure
|
|
memcpy(oldAction, &thread->sig_action[signal - 1],
|
|
sizeof(struct sigaction));
|
|
}
|
|
|
|
if (act) {
|
|
T(SigAction(thread, signal, act));
|
|
|
|
// set new sigaction structure
|
|
memcpy(&thread->sig_action[signal - 1], act,
|
|
sizeof(struct sigaction));
|
|
thread->sig_action[signal - 1].sa_mask &= BLOCKABLE_SIGNALS;
|
|
}
|
|
|
|
if (act && act->sa_handler == SIG_IGN) {
|
|
// remove pending signal if it should now be ignored
|
|
atomic_and(&thread->sig_pending, ~SIGNAL_TO_MASK(signal));
|
|
} else if (act && act->sa_handler == SIG_DFL
|
|
&& (SIGNAL_TO_MASK(signal) & DEFAULT_IGNORE_SIGNALS) != 0) {
|
|
// remove pending signal for those signals whose default
|
|
// action is to ignore them
|
|
atomic_and(&thread->sig_pending, ~SIGNAL_TO_MASK(signal));
|
|
}
|
|
} else
|
|
error = B_BAD_THREAD_ID;
|
|
|
|
RELEASE_THREAD_LOCK();
|
|
restore_interrupts(state);
|
|
|
|
return error;
|
|
}
|
|
|
|
|
|
int
|
|
sigaction(int signal, const struct sigaction *act, struct sigaction *oldAction)
|
|
{
|
|
return sigaction_etc(-1, signal, act, oldAction);
|
|
}
|
|
|
|
|
|
/*! Triggers a SIGALRM to the thread that issued the timer and reschedules */
|
|
static int32
|
|
alarm_event(timer *t)
|
|
{
|
|
// The hook can be called from any context, but we have to
|
|
// deliver the signal to the thread that originally called
|
|
// set_alarm().
|
|
// Since thread->alarm is this timer structure, we can just
|
|
// cast it back - ugly but it works for now
|
|
struct thread *thread = (struct thread *)((uint8 *)t
|
|
- offsetof(struct thread, alarm));
|
|
// ToDo: investigate adding one user parameter to the timer structure to fix this hack
|
|
|
|
TRACE(("alarm_event: thread = %p\n", thread));
|
|
send_signal_etc(thread->id, SIGALRM, B_DO_NOT_RESCHEDULE);
|
|
|
|
return B_INVOKE_SCHEDULER;
|
|
}
|
|
|
|
|
|
/*! Sets the alarm timer for the current thread. The timer fires at the
|
|
specified time in the future, periodically or just once, as determined
|
|
by \a mode.
|
|
\return the time left until a previous set alarm would have fired.
|
|
*/
|
|
bigtime_t
|
|
set_alarm(bigtime_t time, uint32 mode)
|
|
{
|
|
struct thread *thread = thread_get_current_thread();
|
|
bigtime_t remainingTime = 0;
|
|
|
|
ASSERT(B_ONE_SHOT_RELATIVE_ALARM == B_ONE_SHOT_RELATIVE_TIMER);
|
|
// just to be sure no one changes the headers some day
|
|
|
|
TRACE(("set_alarm: thread = %p\n", thread));
|
|
|
|
if (thread->alarm.period)
|
|
remainingTime = (bigtime_t)thread->alarm.schedule_time - system_time();
|
|
|
|
cancel_timer(&thread->alarm);
|
|
|
|
if (time != B_INFINITE_TIMEOUT)
|
|
add_timer(&thread->alarm, &alarm_event, time, mode);
|
|
else {
|
|
// this marks the alarm as canceled (for returning the remaining time)
|
|
thread->alarm.period = 0;
|
|
}
|
|
|
|
return remainingTime;
|
|
}
|
|
|
|
|
|
/*! Wait for the specified signals, and return the signal retrieved in
|
|
\a _signal.
|
|
*/
|
|
int
|
|
sigwait(const sigset_t *set, int *_signal)
|
|
{
|
|
struct thread *thread = thread_get_current_thread();
|
|
|
|
while (!has_signals_pending(thread)) {
|
|
thread_prepare_to_block(thread, B_CAN_INTERRUPT,
|
|
THREAD_BLOCK_TYPE_SIGNAL, NULL);
|
|
thread_block();
|
|
}
|
|
|
|
int signalsPending = atomic_get(&thread->sig_pending) & *set;
|
|
|
|
update_current_thread_signals_flag();
|
|
|
|
if (signalsPending) {
|
|
// select the lowest pending signal to return in _signal
|
|
for (int signal = 1; signal < NSIG; signal++) {
|
|
if ((SIGNAL_TO_MASK(signal) & signalsPending) != 0) {
|
|
*_signal = signal;
|
|
return B_OK;
|
|
}
|
|
}
|
|
}
|
|
|
|
return B_INTERRUPTED;
|
|
}
|
|
|
|
|
|
/*! Replace the current signal block mask and wait for any event to happen.
|
|
Before returning, the original signal block mask is reinstantiated.
|
|
*/
|
|
int
|
|
sigsuspend(const sigset_t *mask)
|
|
{
|
|
struct thread *thread = thread_get_current_thread();
|
|
sigset_t oldMask = atomic_get(&thread->sig_block_mask);
|
|
|
|
// Set the new block mask and block until interrupted.
|
|
|
|
atomic_set(&thread->sig_block_mask, *mask & BLOCKABLE_SIGNALS);
|
|
|
|
while (!has_signals_pending(thread)) {
|
|
thread_prepare_to_block(thread, B_CAN_INTERRUPT,
|
|
THREAD_BLOCK_TYPE_SIGNAL, NULL);
|
|
thread_block();
|
|
}
|
|
|
|
// restore the original block mask
|
|
atomic_set(&thread->sig_block_mask, oldMask);
|
|
|
|
update_current_thread_signals_flag();
|
|
|
|
// we're not supposed to actually succeed
|
|
return B_INTERRUPTED;
|
|
}
|
|
|
|
|
|
int
|
|
sigpending(sigset_t *set)
|
|
{
|
|
struct thread *thread = thread_get_current_thread();
|
|
|
|
if (set == NULL)
|
|
return B_BAD_VALUE;
|
|
|
|
*set = atomic_get(&thread->sig_pending);
|
|
return B_OK;
|
|
}
|
|
|
|
|
|
// #pragma mark -
|
|
|
|
|
|
bigtime_t
|
|
_user_set_alarm(bigtime_t time, uint32 mode)
|
|
{
|
|
syscall_64_bit_return_value();
|
|
|
|
return set_alarm(time, mode);
|
|
}
|
|
|
|
|
|
status_t
|
|
_user_send_signal(pid_t team, uint signal)
|
|
{
|
|
return send_signal_etc(team, signal, B_CHECK_PERMISSION);
|
|
}
|
|
|
|
|
|
status_t
|
|
_user_sigprocmask(int how, const sigset_t *userSet, sigset_t *userOldSet)
|
|
{
|
|
sigset_t set, oldSet;
|
|
status_t status;
|
|
|
|
if ((userSet != NULL && user_memcpy(&set, userSet, sizeof(sigset_t)) < B_OK)
|
|
|| (userOldSet != NULL && user_memcpy(&oldSet, userOldSet,
|
|
sizeof(sigset_t)) < B_OK))
|
|
return B_BAD_ADDRESS;
|
|
|
|
status = sigprocmask(how, userSet ? &set : NULL,
|
|
userOldSet ? &oldSet : NULL);
|
|
|
|
// copy old set if asked for
|
|
if (status >= B_OK && userOldSet != NULL
|
|
&& user_memcpy(userOldSet, &oldSet, sizeof(sigset_t)) < B_OK)
|
|
return B_BAD_ADDRESS;
|
|
|
|
return status;
|
|
}
|
|
|
|
|
|
status_t
|
|
_user_sigaction(int signal, const struct sigaction *userAction,
|
|
struct sigaction *userOldAction)
|
|
{
|
|
struct sigaction act, oact;
|
|
status_t status;
|
|
|
|
if ((userAction != NULL && user_memcpy(&act, userAction,
|
|
sizeof(struct sigaction)) < B_OK)
|
|
|| (userOldAction != NULL && user_memcpy(&oact, userOldAction,
|
|
sizeof(struct sigaction)) < B_OK))
|
|
return B_BAD_ADDRESS;
|
|
|
|
status = sigaction(signal, userAction ? &act : NULL,
|
|
userOldAction ? &oact : NULL);
|
|
|
|
// only copy the old action if a pointer has been given
|
|
if (status >= B_OK && userOldAction != NULL
|
|
&& user_memcpy(userOldAction, &oact, sizeof(struct sigaction)) < B_OK)
|
|
return B_BAD_ADDRESS;
|
|
|
|
return status;
|
|
}
|
|
|
|
|
|
status_t
|
|
_user_sigwait(const sigset_t *userSet, int *_userSignal)
|
|
{
|
|
if (userSet == NULL || _userSignal == NULL)
|
|
return B_BAD_VALUE;
|
|
|
|
sigset_t set;
|
|
if (user_memcpy(&set, userSet, sizeof(sigset_t)) < B_OK)
|
|
return B_BAD_ADDRESS;
|
|
|
|
int signal;
|
|
status_t status = sigwait(&set, &signal);
|
|
if (status < B_OK)
|
|
return syscall_restart_handle_post(status);
|
|
|
|
return user_memcpy(_userSignal, &signal, sizeof(int));
|
|
}
|
|
|
|
|
|
status_t
|
|
_user_sigsuspend(const sigset_t *userMask)
|
|
{
|
|
sigset_t mask;
|
|
|
|
if (userMask == NULL)
|
|
return B_BAD_VALUE;
|
|
if (user_memcpy(&mask, userMask, sizeof(sigset_t)) < B_OK)
|
|
return B_BAD_ADDRESS;
|
|
|
|
return sigsuspend(&mask);
|
|
}
|
|
|
|
|
|
status_t
|
|
_user_sigpending(sigset_t *userSet)
|
|
{
|
|
sigset_t set;
|
|
int status;
|
|
|
|
if (userSet == NULL)
|
|
return B_BAD_VALUE;
|
|
if (!IS_USER_ADDRESS(userSet))
|
|
return B_BAD_ADDRESS;
|
|
|
|
status = sigpending(&set);
|
|
if (status == B_OK
|
|
&& user_memcpy(userSet, &set, sizeof(sigset_t)) < B_OK)
|
|
return B_BAD_ADDRESS;
|
|
|
|
return status;
|
|
}
|
|
|
|
|
|
status_t
|
|
_user_set_signal_stack(const stack_t *newUserStack, stack_t *oldUserStack)
|
|
{
|
|
struct thread *thread = thread_get_current_thread();
|
|
struct stack_t newStack, oldStack;
|
|
bool onStack = false;
|
|
|
|
if ((newUserStack != NULL && user_memcpy(&newStack, newUserStack,
|
|
sizeof(stack_t)) < B_OK)
|
|
|| (oldUserStack != NULL && user_memcpy(&oldStack, oldUserStack,
|
|
sizeof(stack_t)) < B_OK))
|
|
return B_BAD_ADDRESS;
|
|
|
|
if (thread->signal_stack_enabled) {
|
|
// determine wether or not the user thread is currently
|
|
// on the active signal stack
|
|
onStack = arch_on_signal_stack(thread);
|
|
}
|
|
|
|
if (oldUserStack != NULL) {
|
|
oldStack.ss_sp = (void *)thread->signal_stack_base;
|
|
oldStack.ss_size = thread->signal_stack_size;
|
|
oldStack.ss_flags = (thread->signal_stack_enabled ? 0 : SS_DISABLE)
|
|
| (onStack ? SS_ONSTACK : 0);
|
|
}
|
|
|
|
if (newUserStack != NULL) {
|
|
// no flags other than SS_DISABLE are allowed
|
|
if ((newStack.ss_flags & ~SS_DISABLE) != 0)
|
|
return B_BAD_VALUE;
|
|
|
|
if ((newStack.ss_flags & SS_DISABLE) == 0) {
|
|
// check if the size is valid
|
|
if (newStack.ss_size < MINSIGSTKSZ)
|
|
return B_NO_MEMORY;
|
|
if (onStack)
|
|
return B_NOT_ALLOWED;
|
|
if (!IS_USER_ADDRESS(newStack.ss_sp))
|
|
return B_BAD_VALUE;
|
|
|
|
thread->signal_stack_base = (addr_t)newStack.ss_sp;
|
|
thread->signal_stack_size = newStack.ss_size;
|
|
thread->signal_stack_enabled = true;
|
|
} else
|
|
thread->signal_stack_enabled = false;
|
|
}
|
|
|
|
// only copy the old stack info if a pointer has been given
|
|
if (oldUserStack != NULL
|
|
&& user_memcpy(oldUserStack, &oldStack, sizeof(stack_t)) < B_OK)
|
|
return B_BAD_ADDRESS;
|
|
|
|
return B_OK;
|
|
}
|
|
|