* Implemented sigaltstack() and set_signal_stack(), thus closing bug #1401.
* On exec() the new function thread_reset_for_exec() is called which clears the signals and cancels an eventually set alarm. Both things weren't done before... * Some minor cleanups. git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@21989 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
parent
f2286d02b9
commit
0b70ea5992
@ -21,10 +21,13 @@ status_t arch_team_init_team_struct(struct team *t, bool kernel);
|
||||
status_t arch_thread_init_thread_struct(struct thread *t);
|
||||
status_t arch_thread_init_tls(struct thread *thread);
|
||||
void arch_thread_context_switch(struct thread *t_from, struct thread *t_to);
|
||||
status_t arch_thread_init_kthread_stack(struct thread *t, int (*start_func)(void), void (*entry_func)(void), void (*exit_func)(void));
|
||||
status_t arch_thread_init_kthread_stack(struct thread *t,
|
||||
int (*start_func)(void), void (*entry_func)(void), void (*exit_func)(void));
|
||||
void arch_thread_dump_info(void *info);
|
||||
status_t arch_thread_enter_userspace(struct thread *t, addr_t entry, void *args1, void *args2);
|
||||
void arch_thread_switch_kstack_and_call(struct thread *t, addr_t new_kstack, void (*func)(void *), void *arg);
|
||||
status_t arch_thread_enter_userspace(struct thread *t, addr_t entry,
|
||||
void *args1, void *args2);
|
||||
void arch_thread_switch_kstack_and_call(struct thread *t, addr_t new_kstack,
|
||||
void (*func)(void *), void *arg);
|
||||
|
||||
// ToDo: doing this this way is an ugly hack - please fix me!
|
||||
// (those functions are "static inline" for x86 - since
|
||||
@ -34,7 +37,9 @@ struct thread *arch_thread_get_current_thread(void);
|
||||
void arch_thread_set_current_thread(struct thread *t);
|
||||
#endif
|
||||
|
||||
status_t arch_setup_signal_frame(struct thread *t, struct sigaction *sa, int sig, int sig_mask);
|
||||
bool arch_on_signal_stack(struct thread *thread);
|
||||
status_t arch_setup_signal_frame(struct thread *t, struct sigaction *sa,
|
||||
int signal, int signalMask);
|
||||
int64 arch_restore_signal_frame(void);
|
||||
void arch_check_syscall_restart(struct thread *t);
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2003-2005, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
|
||||
* Copyright 2003-2007, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*/
|
||||
#ifndef _KERNEL_SIGNAL_H
|
||||
@ -22,15 +22,18 @@ extern bool is_kill_signal_pending(void);
|
||||
extern int has_signals_pending(void *_thread);
|
||||
|
||||
extern int sigaction_etc(thread_id threadID, int signal,
|
||||
const struct sigaction *act, struct sigaction *oldAction);
|
||||
const struct sigaction *newAction, struct sigaction *oldAction);
|
||||
|
||||
extern int _user_send_signal(pid_t tid, uint sig);
|
||||
extern int _user_sigprocmask(int how, const sigset_t *set, sigset_t *oldSet);
|
||||
extern int _user_sigaction(int sig, const struct sigaction *action,
|
||||
struct sigaction *oldAction);
|
||||
extern status_t _user_send_signal(pid_t tid, uint sig);
|
||||
extern status_t _user_sigprocmask(int how, const sigset_t *set,
|
||||
sigset_t *oldSet);
|
||||
extern status_t _user_sigaction(int sig, const struct sigaction *newAction,
|
||||
struct sigaction *oldAction);
|
||||
extern bigtime_t _user_set_alarm(bigtime_t time, uint32 mode);
|
||||
extern int _user_sigsuspend(const sigset_t *mask);
|
||||
extern int _user_sigpending(sigset_t *set);
|
||||
extern status_t _user_sigsuspend(const sigset_t *mask);
|
||||
extern status_t _user_sigpending(sigset_t *set);
|
||||
extern status_t _user_set_signal_stack(const stack_t *newUserStack,
|
||||
stack_t *oldUserStack);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
@ -110,12 +110,16 @@ extern status_t _kern_get_next_team_info(int32 *cookie, team_info *info);
|
||||
extern status_t _kern_get_team_usage_info(team_id team, int32 who, team_usage_info *info, size_t size);
|
||||
|
||||
// signal functions
|
||||
extern int _kern_send_signal(pid_t tid, uint sig);
|
||||
extern int _kern_sigprocmask(int how, const sigset_t *set, sigset_t *oldSet);
|
||||
extern int _kern_sigaction(int sig, const struct sigaction *action, struct sigaction *oldAction);
|
||||
extern status_t _kern_send_signal(pid_t tid, uint sig);
|
||||
extern status_t _kern_sigprocmask(int how, const sigset_t *set,
|
||||
sigset_t *oldSet);
|
||||
extern status_t _kern_sigaction(int sig, const struct sigaction *action,
|
||||
struct sigaction *oldAction);
|
||||
extern bigtime_t _kern_set_alarm(bigtime_t time, uint32 mode);
|
||||
extern int _kern_sigsuspend(const sigset_t *mask);
|
||||
extern int _kern_sigpending(sigset_t *set);
|
||||
extern status_t _kern_sigsuspend(const sigset_t *mask);
|
||||
extern status_t _kern_sigpending(sigset_t *set);
|
||||
extern status_t _kern_set_signal_stack(const stack_t *newStack,
|
||||
stack_t *oldStack);
|
||||
|
||||
// image functions
|
||||
extern image_id _kern_register_image(image_info *info, size_t size);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2002-2006, Axel Dörfler, axeld@pinc-software.de.
|
||||
* Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*
|
||||
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
|
||||
@ -23,11 +23,12 @@ extern "C" {
|
||||
void thread_enqueue(struct thread *t, struct thread_queue *q);
|
||||
struct thread *thread_lookat_queue(struct thread_queue *q);
|
||||
struct thread *thread_dequeue(struct thread_queue *q);
|
||||
struct thread *thread_dequeue_id(struct thread_queue *q, thread_id thr_id);
|
||||
struct thread *thread_dequeue_id(struct thread_queue *q, thread_id id);
|
||||
|
||||
void thread_at_kernel_entry(void);
|
||||
// called when the thread enters the kernel on behalf of the thread
|
||||
void thread_at_kernel_exit(void);
|
||||
void thread_reset_for_exec(void);
|
||||
|
||||
status_t thread_init(struct kernel_args *args);
|
||||
status_t thread_preboot_init_percpu(struct kernel_args *args, int32 cpuNum);
|
||||
|
@ -140,6 +140,9 @@ struct thread {
|
||||
sigset_t sig_pending;
|
||||
sigset_t sig_block_mask;
|
||||
struct sigaction sig_action[32];
|
||||
addr_t signal_stack_base;
|
||||
size_t signal_stack_size;
|
||||
bool signal_stack_enabled;
|
||||
|
||||
bool in_kernel;
|
||||
bool was_yielded;
|
||||
|
@ -214,6 +214,13 @@ arch_thread_enter_userspace(struct thread *thread, addr_t entry, void *arg1, voi
|
||||
}
|
||||
|
||||
|
||||
bool
|
||||
arch_on_signal_stack(struct thread *thread)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
arch_setup_signal_frame(struct thread *thread, struct sigaction *sa, int sig, int sigMask)
|
||||
{
|
||||
|
@ -76,29 +76,30 @@ x86_pop_iframe(struct iframe_stack *stack)
|
||||
}
|
||||
|
||||
|
||||
/** Returns the current iframe structure of the running thread.
|
||||
* This function must only be called in a context where it's actually
|
||||
* sure that such iframe exists; ie. from syscalls, but usually not
|
||||
* from standard kernel threads.
|
||||
*/
|
||||
|
||||
/*!
|
||||
Returns the current iframe structure of the running thread.
|
||||
This function must only be called in a context where it's actually
|
||||
sure that such iframe exists; ie. from syscalls, but usually not
|
||||
from standard kernel threads.
|
||||
*/
|
||||
static struct iframe *
|
||||
i386_get_current_iframe(void)
|
||||
get_current_iframe(void)
|
||||
{
|
||||
struct thread *thread = thread_get_current_thread();
|
||||
|
||||
ASSERT(thread->arch_info.iframes.index >= 0);
|
||||
return thread->arch_info.iframes.frames[thread->arch_info.iframes.index - 1];
|
||||
return thread->arch_info.iframes.frames[
|
||||
thread->arch_info.iframes.index - 1];
|
||||
}
|
||||
|
||||
|
||||
/** \brief Returns the current thread's topmost (i.e. most recent)
|
||||
* userland->kernel transition iframe (usually the first one, save for
|
||||
* interrupts in signal handlers).
|
||||
* \return The iframe, or \c NULL, if there is no such iframe (e.g. when
|
||||
* the thread is a kernel thread).
|
||||
*/
|
||||
|
||||
/*!
|
||||
\brief Returns the current thread's topmost (i.e. most recent)
|
||||
userland->kernel transition iframe (usually the first one, save for
|
||||
interrupts in signal handlers).
|
||||
\return The iframe, or \c NULL, if there is no such iframe (e.g. when
|
||||
the thread is a kernel thread).
|
||||
*/
|
||||
struct iframe *
|
||||
i386_get_user_iframe(void)
|
||||
{
|
||||
@ -158,6 +159,16 @@ set_tls_context(struct thread *thread)
|
||||
static uint32 *
|
||||
get_signal_stack(struct thread *thread, struct iframe *frame, int signal)
|
||||
{
|
||||
// use the alternate signal stack if we should and can
|
||||
if (thread->signal_stack_enabled
|
||||
&& (thread->sig_action[signal].sa_flags & SA_ONSTACK) != 0
|
||||
&& (frame->user_esp < thread->signal_stack_base
|
||||
|| frame->user_esp > thread->signal_stack_base
|
||||
+ thread->signal_stack_size)) {
|
||||
return (uint32 *)(thread->signal_stack_base
|
||||
+ thread->signal_stack_size);
|
||||
}
|
||||
|
||||
return (uint32 *)frame->user_esp;
|
||||
}
|
||||
|
||||
@ -360,11 +371,22 @@ arch_thread_enter_userspace(struct thread *t, addr_t entry, void *args1, void *a
|
||||
}
|
||||
|
||||
|
||||
bool
|
||||
arch_on_signal_stack(struct thread *thread)
|
||||
{
|
||||
struct iframe *frame = get_current_iframe();
|
||||
|
||||
return frame->user_esp >= thread->signal_stack_base
|
||||
&& frame->user_esp < thread->signal_stack_base
|
||||
+ thread->signal_stack_size;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
arch_setup_signal_frame(struct thread *thread, struct sigaction *sa,
|
||||
int signal, int signalMask)
|
||||
{
|
||||
struct iframe *frame = i386_get_current_iframe();
|
||||
struct iframe *frame = get_current_iframe();
|
||||
uint32 *userStack = (uint32 *)frame->user_esp;
|
||||
uint32 *signalCode;
|
||||
uint32 *userRegs;
|
||||
@ -444,7 +466,7 @@ int64
|
||||
arch_restore_signal_frame(void)
|
||||
{
|
||||
struct thread *thread = thread_get_current_thread();
|
||||
struct iframe *frame = i386_get_current_iframe();
|
||||
struct iframe *frame = get_current_iframe();
|
||||
int32 signalMask;
|
||||
uint32 *userStack;
|
||||
struct vregs regs;
|
||||
@ -483,7 +505,7 @@ arch_restore_signal_frame(void)
|
||||
void
|
||||
arch_check_syscall_restart(struct thread *thread)
|
||||
{
|
||||
struct iframe *frame = i386_get_current_iframe();
|
||||
struct iframe *frame = get_current_iframe();
|
||||
if (frame == NULL) {
|
||||
// this thread is obviously new; we didn't come from an interrupt
|
||||
return;
|
||||
@ -506,7 +528,7 @@ arch_check_syscall_restart(struct thread *thread)
|
||||
void
|
||||
arch_store_fork_frame(struct arch_fork_arg *arg)
|
||||
{
|
||||
struct iframe *frame = i386_get_current_iframe();
|
||||
struct iframe *frame = get_current_iframe();
|
||||
|
||||
// we need to copy the threads current iframe
|
||||
arg->iframe = *frame;
|
||||
|
@ -591,44 +591,51 @@ _user_set_alarm(bigtime_t time, uint32 mode)
|
||||
}
|
||||
|
||||
|
||||
int
|
||||
status_t
|
||||
_user_send_signal(pid_t team, uint signal)
|
||||
{
|
||||
return send_signal_etc(team, signal, B_CHECK_PERMISSION);
|
||||
}
|
||||
|
||||
|
||||
int
|
||||
status_t
|
||||
_user_sigprocmask(int how, const sigset_t *userSet, sigset_t *userOldSet)
|
||||
{
|
||||
sigset_t set, oldSet;
|
||||
status_t status;
|
||||
|
||||
if ((userSet != NULL && user_memcpy(&set, userSet, sizeof(sigset_t)) < B_OK)
|
||||
|| (userOldSet != NULL && user_memcpy(&oldSet, userOldSet, sizeof(sigset_t)) < B_OK))
|
||||
|| (userOldSet != NULL && user_memcpy(&oldSet, userOldSet,
|
||||
sizeof(sigset_t)) < B_OK))
|
||||
return B_BAD_ADDRESS;
|
||||
|
||||
status = sigprocmask(how, userSet ? &set : NULL, userOldSet ? &oldSet : NULL);
|
||||
status = sigprocmask(how, userSet ? &set : NULL,
|
||||
userOldSet ? &oldSet : NULL);
|
||||
|
||||
// copy old set if asked for
|
||||
if (status >= B_OK && userOldSet != NULL && user_memcpy(userOldSet, &oldSet, sizeof(sigset_t)) < B_OK)
|
||||
if (status >= B_OK && userOldSet != NULL
|
||||
&& user_memcpy(userOldSet, &oldSet, sizeof(sigset_t)) < B_OK)
|
||||
return B_BAD_ADDRESS;
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
|
||||
int
|
||||
_user_sigaction(int signal, const struct sigaction *userAction, struct sigaction *userOldAction)
|
||||
status_t
|
||||
_user_sigaction(int signal, const struct sigaction *userAction,
|
||||
struct sigaction *userOldAction)
|
||||
{
|
||||
struct sigaction act, oact;
|
||||
status_t status;
|
||||
|
||||
if ((userAction != NULL && user_memcpy(&act, userAction, sizeof(struct sigaction)) < B_OK)
|
||||
|| (userOldAction != NULL && user_memcpy(&oact, userOldAction, sizeof(struct sigaction)) < B_OK))
|
||||
if ((userAction != NULL && user_memcpy(&act, userAction,
|
||||
sizeof(struct sigaction)) < B_OK)
|
||||
|| (userOldAction != NULL && user_memcpy(&oact, userOldAction,
|
||||
sizeof(struct sigaction)) < B_OK))
|
||||
return B_BAD_ADDRESS;
|
||||
|
||||
status = sigaction(signal, userAction ? &act : NULL, userOldAction ? &oact : NULL);
|
||||
status = sigaction(signal, userAction ? &act : NULL,
|
||||
userOldAction ? &oact : NULL);
|
||||
|
||||
// only copy the old action if a pointer has been given
|
||||
if (status >= B_OK && userOldAction != NULL
|
||||
@ -639,7 +646,7 @@ _user_sigaction(int signal, const struct sigaction *userAction, struct sigaction
|
||||
}
|
||||
|
||||
|
||||
int
|
||||
status_t
|
||||
_user_sigsuspend(const sigset_t *userMask)
|
||||
{
|
||||
sigset_t mask;
|
||||
@ -653,7 +660,7 @@ _user_sigsuspend(const sigset_t *userMask)
|
||||
}
|
||||
|
||||
|
||||
int
|
||||
status_t
|
||||
_user_sigpending(sigset_t *userSet)
|
||||
{
|
||||
sigset_t set;
|
||||
@ -672,3 +679,59 @@ _user_sigpending(sigset_t *userSet)
|
||||
return status;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
_user_set_signal_stack(const stack_t *newUserStack, stack_t *oldUserStack)
|
||||
{
|
||||
struct thread *thread = thread_get_current_thread();
|
||||
struct stack_t newStack, oldStack;
|
||||
bool onStack = false;
|
||||
|
||||
if ((newUserStack != NULL && user_memcpy(&newStack, newUserStack,
|
||||
sizeof(stack_t)) < B_OK)
|
||||
|| (oldUserStack != NULL && user_memcpy(&oldStack, oldUserStack,
|
||||
sizeof(stack_t)) < B_OK))
|
||||
return B_BAD_ADDRESS;
|
||||
|
||||
if (thread->signal_stack_enabled) {
|
||||
// determine wether or not the user thread is currently
|
||||
// on the active signal stack
|
||||
onStack = arch_on_signal_stack(thread);
|
||||
}
|
||||
|
||||
if (oldUserStack != NULL) {
|
||||
oldStack.ss_sp = (void *)thread->signal_stack_base;
|
||||
oldStack.ss_size = thread->signal_stack_size;
|
||||
oldStack.ss_flags = (thread->signal_stack_enabled ? 0 : SS_DISABLE)
|
||||
| (onStack ? SS_ONSTACK : 0);
|
||||
}
|
||||
|
||||
if (newUserStack != NULL) {
|
||||
// no flags other than SS_DISABLE are allowed
|
||||
if ((newStack.ss_flags & ~SS_DISABLE) != 0)
|
||||
return B_BAD_VALUE;
|
||||
|
||||
if ((newStack.ss_flags & SS_DISABLE) == 0) {
|
||||
// check if the size is valid
|
||||
if (newStack.ss_size < MINSIGSTKSZ)
|
||||
return B_NO_MEMORY;
|
||||
if (onStack)
|
||||
return B_NOT_ALLOWED;
|
||||
if (!IS_USER_ADDRESS(newStack.ss_sp))
|
||||
return B_BAD_VALUE;
|
||||
|
||||
thread->signal_stack_base = (addr_t)newStack.ss_sp;
|
||||
thread->signal_stack_size = newStack.ss_size;
|
||||
thread->signal_stack_enabled = true;
|
||||
} else
|
||||
thread->signal_stack_enabled = false;
|
||||
}
|
||||
|
||||
// only copy the old stack info if a pointer has been given
|
||||
if (oldUserStack != NULL
|
||||
&& user_memcpy(oldUserStack, &oldStack, sizeof(stack_t)) < B_OK)
|
||||
return B_BAD_ADDRESS;
|
||||
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
@ -984,7 +984,8 @@ exec_team(const char *path, int32 argCount, char * const *args,
|
||||
struct thread *thread;
|
||||
thread_id nubThreadID = -1;
|
||||
|
||||
TRACE(("exec_team(path = \"%s\", argc = %ld, envCount = %ld): team %lx\n", args[0], argCount, envCount, team->id));
|
||||
TRACE(("exec_team(path = \"%s\", argc = %ld, envCount = %ld): team %lx\n",
|
||||
args[0], argCount, envCount, team->id));
|
||||
|
||||
// switching the kernel at run time is probably not a good idea :)
|
||||
if (team == team_get_kernel_team())
|
||||
@ -1031,9 +1032,10 @@ exec_team(const char *path, int32 argCount, char * const *args,
|
||||
teamArgs->args[0] = strdup(path);
|
||||
|
||||
// ToDo: remove team resources if there are any left
|
||||
// alarm, signals
|
||||
// thread_atkernel_exit() might not be called at all
|
||||
|
||||
thread_reset_for_exec();
|
||||
|
||||
user_debug_prepare_for_exec();
|
||||
|
||||
vm_delete_areas(team->address_space);
|
||||
|
@ -166,6 +166,18 @@ thread_struct_hash(void *_t, const void *_key, uint32 range)
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
reset_signals(struct thread *thread)
|
||||
{
|
||||
thread->sig_pending = 0;
|
||||
thread->sig_block_mask = 0;
|
||||
memset(thread->sig_action, 0, 32 * sizeof(struct sigaction));
|
||||
thread->signal_stack_base = 0;
|
||||
thread->signal_stack_size = 0;
|
||||
thread->signal_stack_enabled = false;
|
||||
}
|
||||
|
||||
|
||||
/*!
|
||||
Allocates and fills in thread structure (or reuses one from the
|
||||
dead queue).
|
||||
@ -224,9 +236,7 @@ create_thread_struct(struct thread *inthread, const char *name,
|
||||
thread->priority = thread->next_priority = -1;
|
||||
thread->args1 = NULL; thread->args2 = NULL;
|
||||
thread->alarm.period = 0;
|
||||
thread->sig_pending = 0;
|
||||
thread->sig_block_mask = 0;
|
||||
memset(thread->sig_action, 0, 32 * sizeof(struct sigaction));
|
||||
reset_signals(thread);
|
||||
thread->in_kernel = true;
|
||||
thread->was_yielded = false;
|
||||
thread->user_time = 0;
|
||||
@ -499,6 +509,143 @@ create_thread(const char *name, team_id teamID, thread_entry_func entry,
|
||||
}
|
||||
|
||||
|
||||
/*!
|
||||
Finds a free death stack for us and allocates it.
|
||||
Must be called with interrupts enabled.
|
||||
*/
|
||||
static uint32
|
||||
get_death_stack(void)
|
||||
{
|
||||
cpu_status state;
|
||||
uint32 bit;
|
||||
int32 i;
|
||||
|
||||
acquire_sem(sDeathStackSem);
|
||||
|
||||
state = disable_interrupts();
|
||||
|
||||
// grap the thread lock, find a free spot and release
|
||||
GRAB_THREAD_LOCK();
|
||||
bit = sDeathStackBitmap;
|
||||
bit = (~bit) & ~((~bit) - 1);
|
||||
sDeathStackBitmap |= bit;
|
||||
RELEASE_THREAD_LOCK();
|
||||
|
||||
restore_interrupts(state);
|
||||
|
||||
// sanity checks
|
||||
if (!bit)
|
||||
panic("get_death_stack: couldn't find free stack!\n");
|
||||
|
||||
if (bit & (bit - 1))
|
||||
panic("get_death_stack: impossible bitmap result!\n");
|
||||
|
||||
// bit to number
|
||||
for (i = -1; bit; i++) {
|
||||
bit >>= 1;
|
||||
}
|
||||
|
||||
TRACE(("get_death_stack: returning 0x%lx\n", sDeathStacks[i].address));
|
||||
|
||||
return (uint32)i;
|
||||
}
|
||||
|
||||
|
||||
/*! Returns the thread's death stack to the pool. */
|
||||
static void
|
||||
put_death_stack(uint32 index)
|
||||
{
|
||||
cpu_status state;
|
||||
|
||||
TRACE(("put_death_stack...: passed %lu\n", index));
|
||||
|
||||
if (index >= sNumDeathStacks)
|
||||
panic("put_death_stack: passed invalid stack index %ld\n", index);
|
||||
|
||||
if (!(sDeathStackBitmap & (1 << index)))
|
||||
panic("put_death_stack: passed invalid stack index %ld\n", index);
|
||||
|
||||
state = disable_interrupts();
|
||||
|
||||
GRAB_THREAD_LOCK();
|
||||
sDeathStackBitmap &= ~(1 << index);
|
||||
RELEASE_THREAD_LOCK();
|
||||
|
||||
restore_interrupts(state);
|
||||
|
||||
release_sem_etc(sDeathStackSem, 1, B_DO_NOT_RESCHEDULE);
|
||||
// we must not have acquired the thread lock when releasing a semaphore
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
thread_exit2(void *_args)
|
||||
{
|
||||
struct thread_exit_args args;
|
||||
|
||||
// copy the arguments over, since the source is probably on the kernel
|
||||
// stack we're about to delete
|
||||
memcpy(&args, _args, sizeof(struct thread_exit_args));
|
||||
|
||||
// we can't let the interrupts disabled at this point
|
||||
enable_interrupts();
|
||||
|
||||
TRACE(("thread_exit2, running on death stack 0x%lx\n", args.death_stack));
|
||||
|
||||
// delete the old kernel stack area
|
||||
TRACE(("thread_exit2: deleting old kernel stack id 0x%lx for thread 0x%lx\n",
|
||||
args.old_kernel_stack, args.thread->id));
|
||||
|
||||
delete_area(args.old_kernel_stack);
|
||||
|
||||
// remove this thread from all of the global lists
|
||||
TRACE(("thread_exit2: removing thread 0x%lx from global lists\n",
|
||||
args.thread->id));
|
||||
|
||||
disable_interrupts();
|
||||
GRAB_TEAM_LOCK();
|
||||
|
||||
remove_thread_from_team(team_get_kernel_team(), args.thread);
|
||||
|
||||
RELEASE_TEAM_LOCK();
|
||||
enable_interrupts();
|
||||
// needed for the debugger notification below
|
||||
|
||||
TRACE(("thread_exit2: done removing thread from lists\n"));
|
||||
|
||||
if (args.death_sem >= 0)
|
||||
release_sem_etc(args.death_sem, 1, B_DO_NOT_RESCHEDULE);
|
||||
|
||||
// notify the debugger
|
||||
if (args.original_team_id >= 0
|
||||
&& args.original_team_id != team_get_kernel_team_id()) {
|
||||
user_debug_thread_deleted(args.original_team_id, args.thread->id);
|
||||
}
|
||||
|
||||
disable_interrupts();
|
||||
|
||||
// Set the next state to be gone: this will cause the thread structure
|
||||
// to be returned to a ready pool upon reschedule.
|
||||
// Note, we need to have disabled interrupts at this point, or else
|
||||
// we could get rescheduled too early.
|
||||
args.thread->next_state = THREAD_STATE_FREE_ON_RESCHED;
|
||||
|
||||
// return the death stack and reschedule one last time
|
||||
|
||||
put_death_stack(args.death_stack);
|
||||
|
||||
GRAB_THREAD_LOCK();
|
||||
scheduler_reschedule();
|
||||
// requires thread lock to be held
|
||||
|
||||
// never get to here
|
||||
panic("thread_exit2: made it where it shouldn't have!\n");
|
||||
}
|
||||
|
||||
|
||||
// #pragma mark - debugger calls
|
||||
|
||||
|
||||
static int
|
||||
make_thread_unreal(int argc, char **argv)
|
||||
{
|
||||
@ -927,138 +1074,7 @@ dump_next_thread_in_team(int argc, char **argv)
|
||||
}
|
||||
|
||||
|
||||
/*!
|
||||
Finds a free death stack for us and allocates it.
|
||||
Must be called with interrupts enabled.
|
||||
*/
|
||||
static uint32
|
||||
get_death_stack(void)
|
||||
{
|
||||
cpu_status state;
|
||||
uint32 bit;
|
||||
int32 i;
|
||||
|
||||
acquire_sem(sDeathStackSem);
|
||||
|
||||
state = disable_interrupts();
|
||||
|
||||
// grap the thread lock, find a free spot and release
|
||||
GRAB_THREAD_LOCK();
|
||||
bit = sDeathStackBitmap;
|
||||
bit = (~bit) & ~((~bit) - 1);
|
||||
sDeathStackBitmap |= bit;
|
||||
RELEASE_THREAD_LOCK();
|
||||
|
||||
restore_interrupts(state);
|
||||
|
||||
// sanity checks
|
||||
if (!bit)
|
||||
panic("get_death_stack: couldn't find free stack!\n");
|
||||
|
||||
if (bit & (bit - 1))
|
||||
panic("get_death_stack: impossible bitmap result!\n");
|
||||
|
||||
// bit to number
|
||||
for (i = -1; bit; i++) {
|
||||
bit >>= 1;
|
||||
}
|
||||
|
||||
TRACE(("get_death_stack: returning 0x%lx\n", sDeathStacks[i].address));
|
||||
|
||||
return (uint32)i;
|
||||
}
|
||||
|
||||
|
||||
/*! Returns the thread's death stack to the pool. */
|
||||
static void
|
||||
put_death_stack(uint32 index)
|
||||
{
|
||||
cpu_status state;
|
||||
|
||||
TRACE(("put_death_stack...: passed %lu\n", index));
|
||||
|
||||
if (index >= sNumDeathStacks)
|
||||
panic("put_death_stack: passed invalid stack index %ld\n", index);
|
||||
|
||||
if (!(sDeathStackBitmap & (1 << index)))
|
||||
panic("put_death_stack: passed invalid stack index %ld\n", index);
|
||||
|
||||
state = disable_interrupts();
|
||||
|
||||
GRAB_THREAD_LOCK();
|
||||
sDeathStackBitmap &= ~(1 << index);
|
||||
RELEASE_THREAD_LOCK();
|
||||
|
||||
restore_interrupts(state);
|
||||
|
||||
release_sem_etc(sDeathStackSem, 1, B_DO_NOT_RESCHEDULE);
|
||||
// we must not have acquired the thread lock when releasing a semaphore
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
thread_exit2(void *_args)
|
||||
{
|
||||
struct thread_exit_args args;
|
||||
|
||||
// copy the arguments over, since the source is probably on the kernel
|
||||
// stack we're about to delete
|
||||
memcpy(&args, _args, sizeof(struct thread_exit_args));
|
||||
|
||||
// we can't let the interrupts disabled at this point
|
||||
enable_interrupts();
|
||||
|
||||
TRACE(("thread_exit2, running on death stack 0x%lx\n", args.death_stack));
|
||||
|
||||
// delete the old kernel stack area
|
||||
TRACE(("thread_exit2: deleting old kernel stack id 0x%lx for thread 0x%lx\n",
|
||||
args.old_kernel_stack, args.thread->id));
|
||||
|
||||
delete_area(args.old_kernel_stack);
|
||||
|
||||
// remove this thread from all of the global lists
|
||||
TRACE(("thread_exit2: removing thread 0x%lx from global lists\n",
|
||||
args.thread->id));
|
||||
|
||||
disable_interrupts();
|
||||
GRAB_TEAM_LOCK();
|
||||
|
||||
remove_thread_from_team(team_get_kernel_team(), args.thread);
|
||||
|
||||
RELEASE_TEAM_LOCK();
|
||||
enable_interrupts();
|
||||
// needed for the debugger notification below
|
||||
|
||||
TRACE(("thread_exit2: done removing thread from lists\n"));
|
||||
|
||||
if (args.death_sem >= 0)
|
||||
release_sem_etc(args.death_sem, 1, B_DO_NOT_RESCHEDULE);
|
||||
|
||||
// notify the debugger
|
||||
if (args.original_team_id >= 0
|
||||
&& args.original_team_id != team_get_kernel_team_id()) {
|
||||
user_debug_thread_deleted(args.original_team_id, args.thread->id);
|
||||
}
|
||||
|
||||
disable_interrupts();
|
||||
|
||||
// Set the next state to be gone: this will cause the thread structure
|
||||
// to be returned to a ready pool upon reschedule.
|
||||
// Note, we need to have disabled interrupts at this point, or else
|
||||
// we could get rescheduled too early.
|
||||
args.thread->next_state = THREAD_STATE_FREE_ON_RESCHED;
|
||||
|
||||
// return the death stack and reschedule one last time
|
||||
|
||||
put_death_stack(args.death_stack);
|
||||
|
||||
GRAB_THREAD_LOCK();
|
||||
scheduler_reschedule();
|
||||
// requires thread lock to be held
|
||||
|
||||
// never get to here
|
||||
panic("thread_exit2: made it where it shouldn't have!\n");
|
||||
}
|
||||
// #pragma mark - private kernel API
|
||||
|
||||
|
||||
void
|
||||
@ -1358,7 +1374,14 @@ thread_at_kernel_exit(void)
|
||||
}
|
||||
|
||||
|
||||
// #pragma mark - private kernel API
|
||||
void
|
||||
thread_reset_for_exec(void)
|
||||
{
|
||||
struct thread *thread = thread_get_current_thread();
|
||||
|
||||
cancel_timer(&thread->alarm);
|
||||
reset_signals(thread);
|
||||
}
|
||||
|
||||
|
||||
/*! Insert a thread to the tail of a queue */
|
||||
|
@ -6,7 +6,9 @@ MergeObject posix_signal.o :
|
||||
kill.c
|
||||
raise.c
|
||||
send_signal.c
|
||||
set_signal_stack.c
|
||||
sigaction.c
|
||||
sigaltstack.c
|
||||
signal.c
|
||||
sigpending.c
|
||||
sigprocmask.c
|
||||
|
28
src/system/libroot/posix/signal/set_signal_stack.c
Normal file
28
src/system/libroot/posix/signal/set_signal_stack.c
Normal file
@ -0,0 +1,28 @@
|
||||
/*
|
||||
* Copyright 2007, Axel Dörfler, axeld@pinc-software.de.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*/
|
||||
|
||||
|
||||
#include <errno.h>
|
||||
#include <signal.h>
|
||||
|
||||
#include <syscalls.h>
|
||||
|
||||
|
||||
void
|
||||
set_signal_stack(void *ptr, size_t size)
|
||||
{
|
||||
stack_t alternateStack;
|
||||
status_t status;
|
||||
|
||||
alternateStack.ss_sp = ptr;
|
||||
alternateStack.ss_size = size;
|
||||
alternateStack.ss_flags = 0;
|
||||
|
||||
status = _kern_set_signal_stack(&alternateStack, NULL);
|
||||
if (status < B_OK)
|
||||
errno = status;
|
||||
}
|
||||
|
||||
|
23
src/system/libroot/posix/signal/sigaltstack.c
Normal file
23
src/system/libroot/posix/signal/sigaltstack.c
Normal file
@ -0,0 +1,23 @@
|
||||
/*
|
||||
* Copyright 2007, Axel Dörfler, axeld@pinc-software.de.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*/
|
||||
|
||||
|
||||
#include <errno.h>
|
||||
#include <signal.h>
|
||||
|
||||
#include <syscalls.h>
|
||||
|
||||
|
||||
int
|
||||
sigaltstack(const stack_t *alternateStack, stack_t *oldAlternateStack)
|
||||
{
|
||||
status_t status =_kern_set_signal_stack(alternateStack, oldAlternateStack);
|
||||
if (status < B_OK) {
|
||||
errno = status;
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
Loading…
Reference in New Issue
Block a user