2005-03-29 02:00:36 +04:00
|
|
|
/*
|
2007-01-13 01:54:21 +03:00
|
|
|
* Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de.
|
2004-11-12 20:21:14 +03:00
|
|
|
* Copyright 2002, Angelo Mottola, a.mottola@libero.it.
|
|
|
|
*
|
|
|
|
* Distributed under the terms of the MIT License.
|
|
|
|
*/
|
2002-10-23 21:31:10 +04:00
|
|
|
|
2004-10-13 18:54:51 +04:00
|
|
|
/* POSIX signals handling routines */
|
2004-09-01 16:05:09 +04:00
|
|
|
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
#include <stddef.h>
|
|
|
|
#include <string.h>
|
|
|
|
|
2002-10-26 02:36:08 +04:00
|
|
|
#include <OS.h>
|
2002-10-26 20:13:36 +04:00
|
|
|
#include <KernelExport.h>
|
2005-10-20 21:19:46 +04:00
|
|
|
|
2007-08-27 03:53:12 +04:00
|
|
|
#include <condition_variable.h>
|
2002-10-23 21:31:10 +04:00
|
|
|
#include <debug.h>
|
2005-10-25 20:59:12 +04:00
|
|
|
#include <kernel.h>
|
|
|
|
#include <kscheduler.h>
|
2003-01-27 06:11:45 +03:00
|
|
|
#include <ksignal.h>
|
2005-10-25 20:59:12 +04:00
|
|
|
#include <sem.h>
|
|
|
|
#include <team.h>
|
|
|
|
#include <thread.h>
|
2005-02-10 05:53:48 +03:00
|
|
|
#include <user_debugger.h>
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
#include <util/AutoLock.h>
|
2002-10-23 21:31:10 +04:00
|
|
|
|
|
|
|
|
2004-11-12 20:21:14 +03:00
|
|
|
//#define TRACE_SIGNAL
|
|
|
|
#ifdef TRACE_SIGNAL
|
|
|
|
# define TRACE(x) dprintf x
|
|
|
|
#else
|
|
|
|
# define TRACE(x) ;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
2005-10-20 20:56:04 +04:00
|
|
|
#define BLOCKABLE_SIGNALS (~(KILL_SIGNALS | SIGNAL_TO_MASK(SIGSTOP)))
|
2007-08-17 17:08:24 +04:00
|
|
|
#define STOP_SIGNALS \
|
|
|
|
(SIGNAL_TO_MASK(SIGSTOP) | SIGNAL_TO_MASK(SIGTSTP) \
|
|
|
|
| SIGNAL_TO_MASK(SIGTTIN) | SIGNAL_TO_MASK(SIGTTOU))
|
2005-10-20 20:56:04 +04:00
|
|
|
#define DEFAULT_IGNORE_SIGNALS \
|
2007-08-17 17:08:24 +04:00
|
|
|
(SIGNAL_TO_MASK(SIGCHLD) | SIGNAL_TO_MASK(SIGWINCH) \
|
|
|
|
| SIGNAL_TO_MASK(SIGCONT))
|
2003-01-27 16:55:57 +03:00
|
|
|
|
2002-11-17 07:49:14 +03:00
|
|
|
|
|
|
|
const char * const sigstr[NSIG] = {
|
2002-10-23 21:31:10 +04:00
|
|
|
"NONE", "HUP", "INT", "QUIT", "ILL", "CHLD", "ABRT", "PIPE",
|
|
|
|
"FPE", "KILL", "STOP", "SEGV", "CONT", "TSTP", "ALRM", "TERM",
|
2005-10-31 15:39:29 +03:00
|
|
|
"TTIN", "TTOU", "USR1", "USR2", "WINCH", "KILLTHR", "TRAP",
|
|
|
|
"POLL", "PROF", "SYS", "URG", "VTALRM", "XCPU", "XFSZ"
|
2002-10-23 21:31:10 +04:00
|
|
|
};
|
|
|
|
|
|
|
|
|
2007-08-28 18:24:34 +04:00
|
|
|
static status_t deliver_signal(struct thread *thread, uint signal,
|
|
|
|
uint32 flags);
|
|
|
|
|
|
|
|
|
2005-02-24 19:28:45 +03:00
|
|
|
static bool
|
2005-03-13 00:45:49 +03:00
|
|
|
notify_debugger(struct thread *thread, int signal, struct sigaction *handler,
|
2005-10-20 20:56:04 +04:00
|
|
|
bool deadly)
|
2005-02-24 19:28:45 +03:00
|
|
|
{
|
2005-03-13 00:45:49 +03:00
|
|
|
uint64 signalMask = SIGNAL_TO_MASK(signal);
|
|
|
|
|
|
|
|
// first check the ignore signal masks the debugger specified for the thread
|
|
|
|
|
2005-10-20 21:19:46 +04:00
|
|
|
if (atomic_get(&thread->debug_info.ignore_signals_once) & signalMask) {
|
2005-10-20 20:56:04 +04:00
|
|
|
atomic_and(&thread->debug_info.ignore_signals_once, ~signalMask);
|
2005-03-13 00:45:49 +03:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2005-10-20 21:19:46 +04:00
|
|
|
if (atomic_get(&thread->debug_info.ignore_signals) & signalMask)
|
2005-03-13 00:45:49 +03:00
|
|
|
return true;
|
|
|
|
|
|
|
|
// deliver the event
|
2005-10-20 20:56:04 +04:00
|
|
|
return user_debug_handle_signal(signal, handler, deadly);
|
2005-02-24 19:28:45 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-08-28 18:24:34 +04:00
|
|
|
/*! Actually handles the signal - ie. the thread will exit, a custom signal
|
|
|
|
handler is prepared, or whatever the signal demands.
|
|
|
|
*/
|
2005-10-20 20:56:04 +04:00
|
|
|
bool
|
|
|
|
handle_signals(struct thread *thread)
|
2002-10-23 21:31:10 +04:00
|
|
|
{
|
2005-10-20 20:56:04 +04:00
|
|
|
uint32 signalMask = atomic_get(&thread->sig_pending)
|
|
|
|
& ~atomic_get(&thread->sig_block_mask);
|
2002-10-23 21:31:10 +04:00
|
|
|
struct sigaction *handler;
|
2005-10-20 20:56:04 +04:00
|
|
|
bool reschedule = false;
|
2007-01-05 22:27:48 +03:00
|
|
|
bool restart = false;
|
2005-10-20 20:56:04 +04:00
|
|
|
int32 i;
|
2003-01-27 16:55:57 +03:00
|
|
|
|
2005-02-25 17:20:47 +03:00
|
|
|
// If SIGKILL[THR] are pending, we ignore other signals.
|
|
|
|
// Otherwise check, if the thread shall stop for debugging.
|
|
|
|
if (signalMask & KILL_SIGNALS) {
|
|
|
|
signalMask &= KILL_SIGNALS;
|
|
|
|
} else if (thread->debug_info.flags & B_THREAD_DEBUG_STOP) {
|
2005-02-24 19:28:45 +03:00
|
|
|
user_debug_stop_thread();
|
|
|
|
}
|
2005-02-10 05:53:48 +03:00
|
|
|
|
2005-02-24 19:28:45 +03:00
|
|
|
if (signalMask == 0)
|
2003-01-27 16:55:57 +03:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
for (i = 0; i < NSIG; i++) {
|
2005-10-20 20:56:04 +04:00
|
|
|
bool debugSignal;
|
|
|
|
int32 signal = i + 1;
|
2005-02-10 05:53:48 +03:00
|
|
|
|
2005-10-20 20:56:04 +04:00
|
|
|
if ((signalMask & SIGNAL_TO_MASK(signal)) == 0)
|
|
|
|
continue;
|
2003-01-27 16:55:57 +03:00
|
|
|
|
2005-10-20 20:56:04 +04:00
|
|
|
// clear the signal that we will handle
|
|
|
|
atomic_and(&thread->sig_pending, ~SIGNAL_TO_MASK(signal));
|
2003-01-27 16:55:57 +03:00
|
|
|
|
2005-10-20 20:56:04 +04:00
|
|
|
debugSignal = !(~atomic_get(&thread->team->debug_info.flags)
|
|
|
|
& (B_TEAM_DEBUG_SIGNALS | B_TEAM_DEBUG_DEBUGGER_INSTALLED));
|
2005-02-24 19:28:45 +03:00
|
|
|
|
2007-09-04 00:35:27 +04:00
|
|
|
// TODO: since sigaction_etc() could clobber the fields at any time,
|
2005-10-20 20:56:04 +04:00
|
|
|
// we should actually copy the relevant fields atomically before
|
|
|
|
// accessing them (only the debugger is calling sigaction_etc()
|
|
|
|
// right now).
|
2007-09-04 00:35:27 +04:00
|
|
|
// Update: sigaction_etc() is only used by the userland debugger
|
|
|
|
// support. We can just as well restrict getting/setting signal
|
|
|
|
// handlers to work only when the respective thread is stopped.
|
|
|
|
// Then sigaction() could be used instead and we could get rid of
|
|
|
|
// sigaction_etc().
|
2005-10-20 20:56:04 +04:00
|
|
|
handler = &thread->sig_action[i];
|
2003-01-27 16:55:57 +03:00
|
|
|
|
2005-10-20 20:56:04 +04:00
|
|
|
TRACE(("Thread 0x%lx received signal %s\n", thread->id, sigstr[signal]));
|
2003-01-27 16:55:57 +03:00
|
|
|
|
2007-01-05 22:27:48 +03:00
|
|
|
if ((handler->sa_flags & SA_RESTART) != 0)
|
|
|
|
restart = true;
|
|
|
|
|
2005-10-20 20:56:04 +04:00
|
|
|
if (handler->sa_handler == SIG_IGN) {
|
|
|
|
// signal is to be ignored
|
|
|
|
// ToDo: apply zombie cleaning on SIGCHLD
|
2003-01-27 16:55:57 +03:00
|
|
|
|
2005-02-24 19:28:45 +03:00
|
|
|
// notify the debugger
|
2005-10-20 20:56:04 +04:00
|
|
|
if (debugSignal)
|
|
|
|
notify_debugger(thread, signal, handler, false);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (handler->sa_handler == SIG_DFL) {
|
|
|
|
// default signal behaviour
|
|
|
|
switch (signal) {
|
|
|
|
case SIGCHLD:
|
|
|
|
case SIGWINCH:
|
2005-10-31 15:39:29 +03:00
|
|
|
case SIGURG:
|
2005-10-20 20:56:04 +04:00
|
|
|
// notify the debugger
|
|
|
|
if (debugSignal)
|
|
|
|
notify_debugger(thread, signal, handler, false);
|
2005-02-10 05:53:48 +03:00
|
|
|
continue;
|
|
|
|
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
case SIGCONT:
|
|
|
|
// notify the debugger
|
|
|
|
if (debugSignal
|
|
|
|
&& !notify_debugger(thread, signal, handler, false))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// notify threads waiting for team state changes
|
|
|
|
if (thread == thread->team->main_thread) {
|
|
|
|
InterruptsSpinLocker locker(team_spinlock);
|
|
|
|
team_set_job_control_state(thread->team,
|
|
|
|
JOB_CONTROL_STATE_CONTINUED, signal, false);
|
2007-08-29 01:34:05 +04:00
|
|
|
|
|
|
|
// The standard states that the system *may* send a
|
|
|
|
// SIGCHLD when a child is continued. I haven't found
|
|
|
|
// a good reason why we would want to, though.
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
}
|
|
|
|
continue;
|
|
|
|
|
2005-10-20 20:56:04 +04:00
|
|
|
case SIGSTOP:
|
2007-08-17 17:08:24 +04:00
|
|
|
case SIGTSTP:
|
|
|
|
case SIGTTIN:
|
|
|
|
case SIGTTOU:
|
2005-10-20 20:56:04 +04:00
|
|
|
// notify the debugger
|
|
|
|
if (debugSignal
|
|
|
|
&& !notify_debugger(thread, signal, handler, false))
|
|
|
|
continue;
|
2004-03-16 05:23:32 +03:00
|
|
|
|
2005-10-20 20:56:04 +04:00
|
|
|
thread->next_state = B_THREAD_SUSPENDED;
|
|
|
|
reschedule = true;
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
|
|
|
|
// notify threads waiting for team state changes
|
|
|
|
if (thread == thread->team->main_thread) {
|
|
|
|
InterruptsSpinLocker locker(team_spinlock);
|
|
|
|
team_set_job_control_state(thread->team,
|
|
|
|
JOB_CONTROL_STATE_STOPPED, signal, false);
|
2007-08-28 18:24:34 +04:00
|
|
|
|
2007-08-29 01:34:05 +04:00
|
|
|
// send a SIGCHLD to the parent (if it does have
|
|
|
|
// SA_NOCLDSTOP defined)
|
2007-08-28 18:24:34 +04:00
|
|
|
SpinLocker _(thread_spinlock);
|
2007-08-29 01:34:05 +04:00
|
|
|
struct thread* parentThread
|
|
|
|
= thread->team->parent->main_thread;
|
|
|
|
struct sigaction& parentHandler
|
|
|
|
= parentThread->sig_action[SIGCHLD - 1];
|
|
|
|
if ((parentHandler.sa_flags & SA_NOCLDSTOP) == 0)
|
|
|
|
deliver_signal(parentThread, SIGCHLD, 0);
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
}
|
2005-10-20 20:56:04 +04:00
|
|
|
continue;
|
2003-01-27 16:55:57 +03:00
|
|
|
|
2005-10-20 20:56:04 +04:00
|
|
|
case SIGQUIT:
|
|
|
|
case SIGILL:
|
|
|
|
case SIGTRAP:
|
|
|
|
case SIGABRT:
|
|
|
|
case SIGFPE:
|
|
|
|
case SIGSEGV:
|
2005-10-31 15:39:29 +03:00
|
|
|
case SIGPOLL:
|
|
|
|
case SIGPROF:
|
|
|
|
case SIGSYS:
|
|
|
|
case SIGVTALRM:
|
|
|
|
case SIGXCPU:
|
|
|
|
case SIGXFSZ:
|
2006-11-10 23:26:54 +03:00
|
|
|
TRACE(("Shutting down thread 0x%lx due to signal #%ld\n",
|
2005-10-20 20:56:04 +04:00
|
|
|
thread->id, signal));
|
|
|
|
case SIGKILL:
|
|
|
|
case SIGKILLTHR:
|
|
|
|
default:
|
2006-05-05 19:33:34 +04:00
|
|
|
// if the thread exited normally, the exit reason is already set
|
|
|
|
if (thread->exit.reason != THREAD_RETURN_EXIT) {
|
2005-10-20 20:56:04 +04:00
|
|
|
thread->exit.reason = THREAD_RETURN_INTERRUPTED;
|
2006-05-05 19:33:34 +04:00
|
|
|
thread->exit.signal = (uint16)signal;
|
|
|
|
}
|
2005-10-20 20:56:04 +04:00
|
|
|
|
|
|
|
// notify the debugger
|
|
|
|
if (debugSignal && signal != SIGKILL && signal != SIGKILLTHR
|
|
|
|
&& !notify_debugger(thread, signal, handler, true))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
thread_exit();
|
|
|
|
// won't return
|
|
|
|
}
|
|
|
|
}
|
2003-01-27 16:55:57 +03:00
|
|
|
|
2005-10-20 20:56:04 +04:00
|
|
|
// notify the debugger
|
|
|
|
if (debugSignal && !notify_debugger(thread, signal, handler, false))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// User defined signal handler
|
|
|
|
TRACE(("### Setting up custom signal handler frame...\n"));
|
|
|
|
arch_setup_signal_frame(thread, handler, signal, atomic_get(&thread->sig_block_mask));
|
|
|
|
|
|
|
|
if (handler->sa_flags & SA_ONESHOT)
|
|
|
|
handler->sa_handler = SIG_DFL;
|
|
|
|
if ((handler->sa_flags & SA_NOMASK) == 0) {
|
|
|
|
// Update the block mask while the signal handler is running - it
|
|
|
|
// will be automatically restored when the signal frame is left.
|
|
|
|
atomic_or(&thread->sig_block_mask,
|
|
|
|
(handler->sa_mask | SIGNAL_TO_MASK(signal)) & BLOCKABLE_SIGNALS);
|
|
|
|
}
|
|
|
|
|
|
|
|
return reschedule;
|
2002-10-23 21:31:10 +04:00
|
|
|
}
|
2003-01-27 16:55:57 +03:00
|
|
|
|
2007-01-05 22:27:48 +03:00
|
|
|
// only restart if SA_RESTART was set on at least one handler
|
|
|
|
if (restart)
|
|
|
|
arch_check_syscall_restart(thread);
|
|
|
|
|
2005-10-20 20:56:04 +04:00
|
|
|
return reschedule;
|
2002-10-23 21:31:10 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2005-02-25 17:20:47 +03:00
|
|
|
bool
|
2005-10-20 20:56:04 +04:00
|
|
|
is_kill_signal_pending(void)
|
2005-02-25 17:20:47 +03:00
|
|
|
{
|
2007-08-17 17:08:24 +04:00
|
|
|
return (atomic_get(&thread_get_current_thread()->sig_pending)
|
|
|
|
& KILL_SIGNALS) != 0;
|
2005-02-25 17:20:47 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-09-06 06:03:43 +04:00
|
|
|
bool
|
|
|
|
is_signal_blocked(int signal)
|
|
|
|
{
|
|
|
|
return (atomic_get(&thread_get_current_thread()->sig_block_mask)
|
|
|
|
& SIGNAL_TO_MASK(signal)) != 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-08-27 03:53:12 +04:00
|
|
|
/*! Tries to interrupt a thread waiting for a semaphore or a condition variable.
|
|
|
|
Interrupts must be disabled, the thread lock be held. Note, that the thread
|
|
|
|
lock may temporarily be released.
|
|
|
|
*/
|
|
|
|
static status_t
|
|
|
|
signal_interrupt_thread(struct thread* thread)
|
|
|
|
{
|
|
|
|
if (thread->sem.blocking >= 0)
|
|
|
|
return sem_interrupt_thread(thread);
|
|
|
|
else if (thread->condition_variable_entry)
|
|
|
|
return condition_variable_interrupt_thread(thread);
|
|
|
|
return B_BAD_VALUE;
|
|
|
|
}
|
|
|
|
|
2005-10-20 20:56:04 +04:00
|
|
|
|
2007-08-27 03:53:12 +04:00
|
|
|
/*! Delivers the \a signal to the \a thread, but doesn't handle the signal -
|
|
|
|
it just makes sure the thread gets the signal, ie. unblocks it if needed.
|
|
|
|
This function must be called with interrupts disabled and the
|
|
|
|
thread lock held.
|
|
|
|
*/
|
2004-10-14 18:48:40 +04:00
|
|
|
static status_t
|
|
|
|
deliver_signal(struct thread *thread, uint signal, uint32 flags)
|
|
|
|
{
|
|
|
|
if (flags & B_CHECK_PERMISSION) {
|
|
|
|
// ToDo: introduce euid & uid fields to the team and check permission
|
|
|
|
}
|
2006-05-22 14:30:32 +04:00
|
|
|
|
|
|
|
if (signal == 0)
|
|
|
|
return B_OK;
|
2004-10-14 18:48:40 +04:00
|
|
|
|
|
|
|
if (thread->team == team_get_kernel_team()) {
|
|
|
|
// Signals to kernel threads will only wake them up
|
|
|
|
if (thread->state == B_THREAD_SUSPENDED) {
|
|
|
|
thread->state = thread->next_state = B_THREAD_READY;
|
|
|
|
scheduler_enqueue_in_run_queue(thread);
|
|
|
|
}
|
|
|
|
return B_OK;
|
|
|
|
}
|
|
|
|
|
2005-10-20 20:56:04 +04:00
|
|
|
atomic_or(&thread->sig_pending, SIGNAL_TO_MASK(signal));
|
2004-10-14 18:48:40 +04:00
|
|
|
|
|
|
|
switch (signal) {
|
|
|
|
case SIGKILL:
|
|
|
|
{
|
|
|
|
struct thread *mainThread = thread->team->main_thread;
|
|
|
|
// Forward KILLTHR to the main thread of the team
|
|
|
|
|
|
|
|
mainThread->sig_pending |= SIGNAL_TO_MASK(SIGKILLTHR);
|
|
|
|
// Wake up main thread
|
|
|
|
if (mainThread->state == B_THREAD_SUSPENDED) {
|
|
|
|
mainThread->state = mainThread->next_state = B_THREAD_READY;
|
|
|
|
scheduler_enqueue_in_run_queue(mainThread);
|
|
|
|
} else if (mainThread->state == B_THREAD_WAITING)
|
2007-08-27 03:53:12 +04:00
|
|
|
signal_interrupt_thread(mainThread);
|
2004-10-14 18:48:40 +04:00
|
|
|
|
|
|
|
// Supposed to fall through
|
|
|
|
}
|
|
|
|
case SIGKILLTHR:
|
|
|
|
// Wake up suspended threads and interrupt waiting ones
|
|
|
|
if (thread->state == B_THREAD_SUSPENDED) {
|
|
|
|
thread->state = thread->next_state = B_THREAD_READY;
|
|
|
|
scheduler_enqueue_in_run_queue(thread);
|
|
|
|
} else if (thread->state == B_THREAD_WAITING)
|
2007-08-27 03:53:12 +04:00
|
|
|
signal_interrupt_thread(thread);
|
2004-10-14 18:48:40 +04:00
|
|
|
break;
|
2007-08-17 17:08:24 +04:00
|
|
|
|
2004-10-14 18:48:40 +04:00
|
|
|
case SIGCONT:
|
|
|
|
// Wake up thread if it was suspended
|
|
|
|
if (thread->state == B_THREAD_SUSPENDED) {
|
|
|
|
thread->state = thread->next_state = B_THREAD_READY;
|
|
|
|
scheduler_enqueue_in_run_queue(thread);
|
|
|
|
}
|
2007-08-17 17:08:24 +04:00
|
|
|
|
|
|
|
atomic_and(&thread->sig_pending, ~STOP_SIGNALS);
|
|
|
|
// remove any pending stop signals
|
2004-10-14 18:48:40 +04:00
|
|
|
break;
|
2005-03-29 02:00:36 +04:00
|
|
|
|
2004-10-14 18:48:40 +04:00
|
|
|
default:
|
2007-08-17 17:08:24 +04:00
|
|
|
if (thread->sig_pending
|
|
|
|
& (~thread->sig_block_mask | SIGNAL_TO_MASK(SIGCHLD))) {
|
2004-10-14 18:48:40 +04:00
|
|
|
// Interrupt thread if it was waiting
|
|
|
|
if (thread->state == B_THREAD_WAITING)
|
2007-08-27 03:53:12 +04:00
|
|
|
signal_interrupt_thread(thread);
|
2004-10-14 18:48:40 +04:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return B_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2002-10-23 21:31:10 +04:00
|
|
|
int
|
2004-10-14 18:48:40 +04:00
|
|
|
send_signal_etc(pid_t id, uint signal, uint32 flags)
|
2002-10-23 21:31:10 +04:00
|
|
|
{
|
2004-10-14 18:48:40 +04:00
|
|
|
status_t status = B_BAD_THREAD_ID;
|
2003-01-27 06:11:45 +03:00
|
|
|
struct thread *thread;
|
2007-09-06 06:03:43 +04:00
|
|
|
cpu_status state = 0;
|
2003-01-27 06:11:45 +03:00
|
|
|
|
2006-05-22 14:30:32 +04:00
|
|
|
if (signal < 0 || signal > MAX_SIGNO)
|
2002-10-23 21:31:10 +04:00
|
|
|
return B_BAD_VALUE;
|
2003-01-27 06:11:45 +03:00
|
|
|
|
2007-09-06 06:03:43 +04:00
|
|
|
if ((flags & SIGNAL_FLAG_TEAMS_LOCKED) == 0)
|
|
|
|
state = disable_interrupts();
|
2006-09-19 02:51:07 +04:00
|
|
|
|
2004-10-14 18:48:40 +04:00
|
|
|
if (id > 0) {
|
|
|
|
// send a signal to the specified thread
|
2003-01-27 06:11:45 +03:00
|
|
|
|
2004-10-14 18:48:40 +04:00
|
|
|
GRAB_THREAD_LOCK();
|
|
|
|
|
|
|
|
thread = thread_get_thread_struct_locked(id);
|
|
|
|
if (thread != NULL)
|
|
|
|
status = deliver_signal(thread, signal, flags);
|
2003-01-27 16:55:57 +03:00
|
|
|
} else {
|
2004-10-14 18:48:40 +04:00
|
|
|
// send a signal to the specified process group
|
|
|
|
// (the absolute value of the id)
|
|
|
|
|
2006-09-26 16:51:59 +04:00
|
|
|
struct process_group *group;
|
2006-09-19 02:51:07 +04:00
|
|
|
|
2006-08-29 05:41:16 +04:00
|
|
|
// TODO: handle -1 correctly
|
|
|
|
if (id == 0 || id == -1) {
|
|
|
|
// send a signal to the current team
|
2007-01-13 01:54:21 +03:00
|
|
|
id = thread_get_current_thread()->team->id;
|
2006-09-26 16:51:59 +04:00
|
|
|
} else
|
2006-08-29 05:41:16 +04:00
|
|
|
id = -id;
|
2004-10-14 18:48:40 +04:00
|
|
|
|
2007-09-06 06:03:43 +04:00
|
|
|
if ((flags & SIGNAL_FLAG_TEAMS_LOCKED) == 0)
|
|
|
|
GRAB_TEAM_LOCK();
|
2006-09-26 16:51:59 +04:00
|
|
|
|
|
|
|
group = team_get_process_group_locked(NULL, id);
|
|
|
|
if (group != NULL) {
|
|
|
|
struct team *team, *next;
|
|
|
|
|
|
|
|
// Send a signal to all teams in this process group
|
|
|
|
|
|
|
|
for (team = group->teams; team != NULL; team = next) {
|
|
|
|
next = team->group_next;
|
2007-01-13 01:54:21 +03:00
|
|
|
id = team->id;
|
2006-09-26 16:51:59 +04:00
|
|
|
|
|
|
|
GRAB_THREAD_LOCK();
|
|
|
|
|
|
|
|
thread = thread_get_thread_struct_locked(id);
|
|
|
|
if (thread != NULL) {
|
|
|
|
// we don't stop because of an error sending the signal; we
|
|
|
|
// rather want to send as much signals as possible
|
|
|
|
status = deliver_signal(thread, signal, flags);
|
2002-10-23 21:31:10 +04:00
|
|
|
}
|
2006-09-26 16:51:59 +04:00
|
|
|
|
|
|
|
RELEASE_THREAD_LOCK();
|
2004-10-14 18:48:40 +04:00
|
|
|
}
|
2002-10-23 21:31:10 +04:00
|
|
|
}
|
2006-09-26 16:51:59 +04:00
|
|
|
|
2007-09-06 06:03:43 +04:00
|
|
|
if ((flags & SIGNAL_FLAG_TEAMS_LOCKED) == 0)
|
|
|
|
RELEASE_TEAM_LOCK();
|
|
|
|
|
2006-08-29 05:41:16 +04:00
|
|
|
GRAB_THREAD_LOCK();
|
2004-10-14 18:48:40 +04:00
|
|
|
}
|
|
|
|
|
2005-10-26 00:18:29 +04:00
|
|
|
// ToDo: maybe the scheduler should only be invoked if there is reason to do it?
|
2004-10-14 18:48:40 +04:00
|
|
|
// (ie. deliver_signal() moved some threads in the running queue?)
|
2007-09-06 06:03:43 +04:00
|
|
|
if ((flags & (B_DO_NOT_RESCHEDULE | SIGNAL_FLAG_TEAMS_LOCKED)) == 0)
|
2003-01-27 06:11:45 +03:00
|
|
|
scheduler_reschedule();
|
|
|
|
|
2002-10-23 21:31:10 +04:00
|
|
|
RELEASE_THREAD_LOCK();
|
2007-09-06 06:03:43 +04:00
|
|
|
|
|
|
|
if ((flags & SIGNAL_FLAG_TEAMS_LOCKED) == 0)
|
|
|
|
restore_interrupts(state);
|
2003-01-27 16:55:57 +03:00
|
|
|
|
2004-10-14 18:48:40 +04:00
|
|
|
return status;
|
2002-10-23 21:31:10 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2002-11-18 04:58:00 +03:00
|
|
|
int
|
2003-01-27 16:55:57 +03:00
|
|
|
send_signal(pid_t threadID, uint signal)
|
2002-11-18 04:58:00 +03:00
|
|
|
{
|
2004-03-16 05:23:32 +03:00
|
|
|
// The BeBook states that this function wouldn't be exported
|
|
|
|
// for drivers, but, of course, it's wrong.
|
2003-01-27 16:55:57 +03:00
|
|
|
return send_signal_etc(threadID, signal, 0);
|
2002-11-18 04:58:00 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2002-10-23 21:31:10 +04:00
|
|
|
int
|
2003-01-27 06:11:45 +03:00
|
|
|
has_signals_pending(void *_thread)
|
2002-10-23 21:31:10 +04:00
|
|
|
{
|
2003-01-27 06:11:45 +03:00
|
|
|
struct thread *thread = (struct thread *)_thread;
|
|
|
|
if (thread == NULL)
|
|
|
|
thread = thread_get_current_thread();
|
2002-10-23 21:31:10 +04:00
|
|
|
|
2005-10-20 20:56:04 +04:00
|
|
|
return atomic_get(&thread->sig_pending) & ~atomic_get(&thread->sig_block_mask);
|
2002-10-23 21:31:10 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2004-09-01 16:05:09 +04:00
|
|
|
int
|
|
|
|
sigprocmask(int how, const sigset_t *set, sigset_t *oldSet)
|
|
|
|
{
|
|
|
|
struct thread *thread = thread_get_current_thread();
|
2005-10-20 20:56:04 +04:00
|
|
|
sigset_t oldMask = atomic_get(&thread->sig_block_mask);
|
2004-09-01 16:05:09 +04:00
|
|
|
|
2006-08-18 13:37:32 +04:00
|
|
|
if (set != NULL) {
|
|
|
|
switch (how) {
|
|
|
|
case SIG_BLOCK:
|
|
|
|
atomic_or(&thread->sig_block_mask, *set & BLOCKABLE_SIGNALS);
|
|
|
|
break;
|
|
|
|
case SIG_UNBLOCK:
|
|
|
|
atomic_and(&thread->sig_block_mask, ~*set);
|
|
|
|
break;
|
|
|
|
case SIG_SETMASK:
|
|
|
|
atomic_set(&thread->sig_block_mask, *set & BLOCKABLE_SIGNALS);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return B_BAD_VALUE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-08-18 12:00:07 +04:00
|
|
|
if (oldSet != NULL)
|
|
|
|
*oldSet = oldMask;
|
|
|
|
|
2004-09-01 16:05:09 +04:00
|
|
|
return B_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2005-10-20 20:56:04 +04:00
|
|
|
/** \brief sigaction() for the specified thread.
|
2005-03-13 00:45:49 +03:00
|
|
|
*
|
|
|
|
* A \a threadID is < 0 specifies the current thread.
|
|
|
|
*
|
|
|
|
*/
|
2005-03-29 02:00:36 +04:00
|
|
|
|
2002-10-23 21:31:10 +04:00
|
|
|
int
|
2005-03-13 00:45:49 +03:00
|
|
|
sigaction_etc(thread_id threadID, int signal, const struct sigaction *act,
|
2005-10-20 20:56:04 +04:00
|
|
|
struct sigaction *oldAction)
|
2002-10-23 21:31:10 +04:00
|
|
|
{
|
2003-01-27 16:55:57 +03:00
|
|
|
struct thread *thread;
|
|
|
|
cpu_status state;
|
2005-03-13 00:45:49 +03:00
|
|
|
status_t error = B_OK;
|
2003-01-27 06:11:45 +03:00
|
|
|
|
2003-01-27 16:55:57 +03:00
|
|
|
if (signal < 1 || signal > MAX_SIGNO
|
2005-10-20 20:56:04 +04:00
|
|
|
|| (SIGNAL_TO_MASK(signal) & ~BLOCKABLE_SIGNALS) != 0)
|
|
|
|
return B_BAD_VALUE;
|
2003-01-27 06:11:45 +03:00
|
|
|
|
2002-10-23 21:31:10 +04:00
|
|
|
state = disable_interrupts();
|
|
|
|
GRAB_THREAD_LOCK();
|
2003-01-27 06:11:45 +03:00
|
|
|
|
2005-03-13 00:45:49 +03:00
|
|
|
thread = (threadID < 0
|
|
|
|
? thread_get_current_thread()
|
|
|
|
: thread_get_thread_struct_locked(threadID));
|
2003-01-27 06:11:45 +03:00
|
|
|
|
2005-03-13 00:45:49 +03:00
|
|
|
if (thread) {
|
2005-10-20 20:56:04 +04:00
|
|
|
if (oldAction) {
|
|
|
|
// save previous sigaction structure
|
|
|
|
memcpy(oldAction, &thread->sig_action[signal - 1],
|
2005-03-13 00:45:49 +03:00
|
|
|
sizeof(struct sigaction));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (act) {
|
2005-10-20 20:56:04 +04:00
|
|
|
// set new sigaction structure
|
2005-03-13 00:45:49 +03:00
|
|
|
memcpy(&thread->sig_action[signal - 1], act,
|
|
|
|
sizeof(struct sigaction));
|
2005-10-20 20:56:04 +04:00
|
|
|
thread->sig_action[signal - 1].sa_mask &= BLOCKABLE_SIGNALS;
|
2005-03-13 00:45:49 +03:00
|
|
|
}
|
2003-01-27 06:11:45 +03:00
|
|
|
|
2005-10-20 20:56:04 +04:00
|
|
|
if (act && act->sa_handler == SIG_IGN) {
|
|
|
|
// remove pending signal if it should now be ignored
|
|
|
|
atomic_and(&thread->sig_pending, ~SIGNAL_TO_MASK(signal));
|
|
|
|
} else if (act && act->sa_handler == SIG_DFL
|
|
|
|
&& (SIGNAL_TO_MASK(signal) & DEFAULT_IGNORE_SIGNALS) != NULL) {
|
|
|
|
// remove pending signal for those signals whose default
|
|
|
|
// action is to ignore them
|
|
|
|
atomic_and(&thread->sig_pending, ~SIGNAL_TO_MASK(signal));
|
|
|
|
}
|
2005-03-13 00:45:49 +03:00
|
|
|
} else
|
|
|
|
error = B_BAD_THREAD_ID;
|
2003-01-27 06:11:45 +03:00
|
|
|
|
2002-10-23 21:31:10 +04:00
|
|
|
RELEASE_THREAD_LOCK();
|
|
|
|
restore_interrupts(state);
|
2003-01-27 06:11:45 +03:00
|
|
|
|
2005-03-13 00:45:49 +03:00
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int
|
2005-10-20 20:56:04 +04:00
|
|
|
sigaction(int signal, const struct sigaction *act, struct sigaction *oldAction)
|
2005-03-13 00:45:49 +03:00
|
|
|
{
|
2005-10-20 20:56:04 +04:00
|
|
|
return sigaction_etc(-1, signal, act, oldAction);
|
2002-10-23 21:31:10 +04:00
|
|
|
}
|
|
|
|
|
2002-10-26 02:36:08 +04:00
|
|
|
|
2005-03-29 02:00:36 +04:00
|
|
|
/** Triggers a SIGALRM to the thread that issued the timer and reschedules */
|
2003-01-27 06:11:45 +03:00
|
|
|
|
2002-10-26 02:36:08 +04:00
|
|
|
static int32
|
|
|
|
alarm_event(timer *t)
|
|
|
|
{
|
2004-01-26 05:01:46 +03:00
|
|
|
// The hook can be called from any context, but we have to
|
|
|
|
// deliver the signal to the thread that originally called
|
|
|
|
// set_alarm().
|
|
|
|
// Since thread->alarm is this timer structure, we can just
|
|
|
|
// cast it back - ugly but it works for now
|
|
|
|
struct thread *thread = (struct thread *)((uint8 *)t - offsetof(struct thread, alarm));
|
|
|
|
// ToDo: investigate adding one user parameter to the timer structure to fix this hack
|
|
|
|
|
2004-11-12 20:21:14 +03:00
|
|
|
TRACE(("alarm_event: thread = %p\n", thread));
|
2004-01-26 05:01:46 +03:00
|
|
|
send_signal_etc(thread->id, SIGALRM, B_DO_NOT_RESCHEDULE);
|
2003-01-27 06:11:45 +03:00
|
|
|
|
2002-10-26 02:36:08 +04:00
|
|
|
return B_INVOKE_SCHEDULER;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-08-21 02:44:53 +04:00
|
|
|
/** Sets the alarm timer for the current thread. The timer fires at the
|
|
|
|
* specified time in the future, periodically or just once, as determined
|
|
|
|
* by \a mode.
|
|
|
|
* \return the time left until a previous set alarm would have fired.
|
|
|
|
*/
|
|
|
|
|
2002-10-26 02:36:08 +04:00
|
|
|
bigtime_t
|
2003-01-27 06:11:45 +03:00
|
|
|
set_alarm(bigtime_t time, uint32 mode)
|
2002-10-26 02:36:08 +04:00
|
|
|
{
|
2003-01-27 16:55:57 +03:00
|
|
|
struct thread *thread = thread_get_current_thread();
|
2006-08-21 02:44:53 +04:00
|
|
|
bigtime_t remainingTime = 0;
|
2003-01-27 06:11:45 +03:00
|
|
|
|
2003-01-27 16:55:57 +03:00
|
|
|
ASSERT(B_ONE_SHOT_RELATIVE_ALARM == B_ONE_SHOT_RELATIVE_TIMER);
|
|
|
|
// just to be sure no one changes the headers some day
|
2002-11-17 03:52:43 +03:00
|
|
|
|
2004-11-12 20:21:14 +03:00
|
|
|
TRACE(("set_alarm: thread = %p\n", thread));
|
2004-01-26 05:01:46 +03:00
|
|
|
|
2003-01-27 16:55:57 +03:00
|
|
|
if (thread->alarm.period)
|
2006-08-21 02:44:53 +04:00
|
|
|
remainingTime = (bigtime_t)thread->alarm.entry.key - system_time();
|
2003-01-27 06:11:45 +03:00
|
|
|
|
2003-01-27 16:55:57 +03:00
|
|
|
cancel_timer(&thread->alarm);
|
|
|
|
|
|
|
|
if (time != B_INFINITE_TIMEOUT)
|
|
|
|
add_timer(&thread->alarm, &alarm_event, time, mode);
|
2006-08-21 02:44:53 +04:00
|
|
|
else {
|
|
|
|
// this marks the alarm as canceled (for returning the remaining time)
|
|
|
|
thread->alarm.period = 0;
|
|
|
|
}
|
2003-01-27 06:11:45 +03:00
|
|
|
|
2006-08-21 02:44:53 +04:00
|
|
|
return remainingTime;
|
2002-10-26 02:36:08 +04:00
|
|
|
}
|
|
|
|
|
2003-01-27 06:11:45 +03:00
|
|
|
|
2005-10-20 22:09:50 +04:00
|
|
|
/** Replace the current signal block mask and wait for any event to happen.
|
|
|
|
* Before returning, the original signal block mask is reinstantiated.
|
|
|
|
*/
|
|
|
|
|
2005-10-20 21:19:46 +04:00
|
|
|
int
|
|
|
|
sigsuspend(const sigset_t *mask)
|
|
|
|
{
|
2005-10-20 22:09:50 +04:00
|
|
|
struct thread *thread = thread_get_current_thread();
|
|
|
|
sigset_t oldMask = atomic_get(&thread->sig_block_mask);
|
|
|
|
cpu_status state;
|
|
|
|
|
|
|
|
// set the new block mask and suspend ourselves - we cannot use
|
|
|
|
// SIGSTOP for this, as signals are only handled upon kernel exit
|
|
|
|
|
|
|
|
atomic_set(&thread->sig_block_mask, *mask);
|
|
|
|
|
|
|
|
while (true) {
|
|
|
|
thread->next_state = B_THREAD_SUSPENDED;
|
|
|
|
|
|
|
|
state = disable_interrupts();
|
|
|
|
GRAB_THREAD_LOCK();
|
|
|
|
|
|
|
|
scheduler_reschedule();
|
|
|
|
|
|
|
|
RELEASE_THREAD_LOCK();
|
|
|
|
restore_interrupts(state);
|
|
|
|
|
|
|
|
if (has_signals_pending(thread))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// restore the original block mask
|
|
|
|
atomic_set(&thread->sig_block_mask, oldMask);
|
|
|
|
|
|
|
|
// we're not supposed to actually succeed
|
|
|
|
// ToDo: could this get us into trouble with SA_RESTART handlers?
|
|
|
|
return B_INTERRUPTED;
|
2005-10-20 21:19:46 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int
|
|
|
|
sigpending(sigset_t *set)
|
|
|
|
{
|
|
|
|
struct thread *thread = thread_get_current_thread();
|
|
|
|
|
|
|
|
if (set == NULL)
|
|
|
|
return B_BAD_VALUE;
|
|
|
|
|
|
|
|
*set = atomic_get(&thread->sig_pending);
|
|
|
|
return B_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2003-01-27 06:11:45 +03:00
|
|
|
// #pragma mark -
|
|
|
|
|
|
|
|
|
|
|
|
bigtime_t
|
2004-08-29 00:51:47 +04:00
|
|
|
_user_set_alarm(bigtime_t time, uint32 mode)
|
2003-01-27 06:11:45 +03:00
|
|
|
{
|
|
|
|
return set_alarm(time, mode);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-08-16 22:01:47 +04:00
|
|
|
status_t
|
2004-08-29 00:51:47 +04:00
|
|
|
_user_send_signal(pid_t team, uint signal)
|
2003-01-27 06:11:45 +03:00
|
|
|
{
|
2004-10-14 18:48:40 +04:00
|
|
|
return send_signal_etc(team, signal, B_CHECK_PERMISSION);
|
2003-01-27 06:11:45 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-08-16 22:01:47 +04:00
|
|
|
status_t
|
2004-09-01 16:05:09 +04:00
|
|
|
_user_sigprocmask(int how, const sigset_t *userSet, sigset_t *userOldSet)
|
|
|
|
{
|
|
|
|
sigset_t set, oldSet;
|
|
|
|
status_t status;
|
|
|
|
|
2006-08-18 12:00:07 +04:00
|
|
|
if ((userSet != NULL && user_memcpy(&set, userSet, sizeof(sigset_t)) < B_OK)
|
2007-08-16 22:01:47 +04:00
|
|
|
|| (userOldSet != NULL && user_memcpy(&oldSet, userOldSet,
|
|
|
|
sizeof(sigset_t)) < B_OK))
|
2004-09-01 16:05:09 +04:00
|
|
|
return B_BAD_ADDRESS;
|
|
|
|
|
2007-08-16 22:01:47 +04:00
|
|
|
status = sigprocmask(how, userSet ? &set : NULL,
|
|
|
|
userOldSet ? &oldSet : NULL);
|
2004-09-01 16:05:09 +04:00
|
|
|
|
|
|
|
// copy old set if asked for
|
2007-08-16 22:01:47 +04:00
|
|
|
if (status >= B_OK && userOldSet != NULL
|
|
|
|
&& user_memcpy(userOldSet, &oldSet, sizeof(sigset_t)) < B_OK)
|
2004-09-01 16:05:09 +04:00
|
|
|
return B_BAD_ADDRESS;
|
|
|
|
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-08-16 22:01:47 +04:00
|
|
|
status_t
|
|
|
|
_user_sigaction(int signal, const struct sigaction *userAction,
|
|
|
|
struct sigaction *userOldAction)
|
2003-01-27 06:11:45 +03:00
|
|
|
{
|
|
|
|
struct sigaction act, oact;
|
2004-09-01 16:05:09 +04:00
|
|
|
status_t status;
|
2003-01-27 06:11:45 +03:00
|
|
|
|
2007-08-16 22:01:47 +04:00
|
|
|
if ((userAction != NULL && user_memcpy(&act, userAction,
|
|
|
|
sizeof(struct sigaction)) < B_OK)
|
|
|
|
|| (userOldAction != NULL && user_memcpy(&oact, userOldAction,
|
|
|
|
sizeof(struct sigaction)) < B_OK))
|
2003-01-27 06:11:45 +03:00
|
|
|
return B_BAD_ADDRESS;
|
|
|
|
|
2007-08-16 22:01:47 +04:00
|
|
|
status = sigaction(signal, userAction ? &act : NULL,
|
|
|
|
userOldAction ? &oact : NULL);
|
2003-01-27 06:11:45 +03:00
|
|
|
|
|
|
|
// only copy the old action if a pointer has been given
|
2004-09-01 16:05:09 +04:00
|
|
|
if (status >= B_OK && userOldAction != NULL
|
2003-01-27 06:11:45 +03:00
|
|
|
&& user_memcpy(userOldAction, &oact, sizeof(struct sigaction)) < B_OK)
|
|
|
|
return B_BAD_ADDRESS;
|
|
|
|
|
2004-09-01 16:05:09 +04:00
|
|
|
return status;
|
2003-01-27 06:11:45 +03:00
|
|
|
}
|
|
|
|
|
2005-09-08 16:26:16 +04:00
|
|
|
|
2007-08-16 22:01:47 +04:00
|
|
|
status_t
|
2005-10-20 21:19:46 +04:00
|
|
|
_user_sigsuspend(const sigset_t *userMask)
|
2005-09-13 20:12:20 +04:00
|
|
|
{
|
2005-10-20 21:19:46 +04:00
|
|
|
sigset_t mask;
|
2005-09-08 16:26:16 +04:00
|
|
|
|
2005-10-20 21:19:46 +04:00
|
|
|
if (userMask == NULL)
|
|
|
|
return B_BAD_VALUE;
|
|
|
|
if (user_memcpy(&mask, userMask, sizeof(sigset_t)) < B_OK)
|
2005-09-08 16:26:16 +04:00
|
|
|
return B_BAD_ADDRESS;
|
|
|
|
|
2005-10-20 21:19:46 +04:00
|
|
|
return sigsuspend(&mask);
|
2005-09-08 16:26:16 +04:00
|
|
|
}
|
2005-09-13 20:12:20 +04:00
|
|
|
|
|
|
|
|
2007-08-16 22:01:47 +04:00
|
|
|
status_t
|
2005-10-20 21:19:46 +04:00
|
|
|
_user_sigpending(sigset_t *userSet)
|
2005-09-16 16:52:59 +04:00
|
|
|
{
|
2005-10-20 21:19:46 +04:00
|
|
|
sigset_t set;
|
|
|
|
int status;
|
|
|
|
|
|
|
|
if (userSet == NULL)
|
2005-09-13 20:12:20 +04:00
|
|
|
return B_BAD_VALUE;
|
2005-10-20 21:19:46 +04:00
|
|
|
if (!IS_USER_ADDRESS(userSet))
|
|
|
|
return B_BAD_ADDRESS;
|
2005-09-13 20:12:20 +04:00
|
|
|
|
2005-10-20 21:19:46 +04:00
|
|
|
status = sigpending(&set);
|
|
|
|
if (status == B_OK
|
|
|
|
&& user_memcpy(userSet, &set, sizeof(sigset_t)) < B_OK)
|
|
|
|
return B_BAD_ADDRESS;
|
|
|
|
|
|
|
|
return status;
|
2005-09-13 20:12:20 +04:00
|
|
|
}
|
|
|
|
|
2007-08-16 22:01:47 +04:00
|
|
|
|
|
|
|
status_t
|
|
|
|
_user_set_signal_stack(const stack_t *newUserStack, stack_t *oldUserStack)
|
|
|
|
{
|
|
|
|
struct thread *thread = thread_get_current_thread();
|
|
|
|
struct stack_t newStack, oldStack;
|
|
|
|
bool onStack = false;
|
|
|
|
|
|
|
|
if ((newUserStack != NULL && user_memcpy(&newStack, newUserStack,
|
|
|
|
sizeof(stack_t)) < B_OK)
|
|
|
|
|| (oldUserStack != NULL && user_memcpy(&oldStack, oldUserStack,
|
|
|
|
sizeof(stack_t)) < B_OK))
|
|
|
|
return B_BAD_ADDRESS;
|
|
|
|
|
|
|
|
if (thread->signal_stack_enabled) {
|
|
|
|
// determine wether or not the user thread is currently
|
|
|
|
// on the active signal stack
|
|
|
|
onStack = arch_on_signal_stack(thread);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (oldUserStack != NULL) {
|
|
|
|
oldStack.ss_sp = (void *)thread->signal_stack_base;
|
|
|
|
oldStack.ss_size = thread->signal_stack_size;
|
|
|
|
oldStack.ss_flags = (thread->signal_stack_enabled ? 0 : SS_DISABLE)
|
|
|
|
| (onStack ? SS_ONSTACK : 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (newUserStack != NULL) {
|
|
|
|
// no flags other than SS_DISABLE are allowed
|
|
|
|
if ((newStack.ss_flags & ~SS_DISABLE) != 0)
|
|
|
|
return B_BAD_VALUE;
|
|
|
|
|
|
|
|
if ((newStack.ss_flags & SS_DISABLE) == 0) {
|
|
|
|
// check if the size is valid
|
|
|
|
if (newStack.ss_size < MINSIGSTKSZ)
|
|
|
|
return B_NO_MEMORY;
|
|
|
|
if (onStack)
|
|
|
|
return B_NOT_ALLOWED;
|
|
|
|
if (!IS_USER_ADDRESS(newStack.ss_sp))
|
|
|
|
return B_BAD_VALUE;
|
|
|
|
|
|
|
|
thread->signal_stack_base = (addr_t)newStack.ss_sp;
|
|
|
|
thread->signal_stack_size = newStack.ss_size;
|
|
|
|
thread->signal_stack_enabled = true;
|
|
|
|
} else
|
|
|
|
thread->signal_stack_enabled = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// only copy the old stack info if a pointer has been given
|
|
|
|
if (oldUserStack != NULL
|
|
|
|
&& user_memcpy(oldUserStack, &oldStack, sizeof(stack_t)) < B_OK)
|
|
|
|
return B_BAD_ADDRESS;
|
|
|
|
|
|
|
|
return B_OK;
|
|
|
|
}
|
|
|
|
|