2004-02-22 17:52:59 +03:00
|
|
|
/*
|
2008-02-21 16:19:54 +03:00
|
|
|
* Copyright 2002-2008, Axel Dörfler, axeld@pinc-software.de.
|
2004-11-18 21:15:39 +03:00
|
|
|
* Distributed under the terms of the MIT License.
|
|
|
|
*
|
|
|
|
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
|
|
|
|
* Distributed under the terms of the NewOS License.
|
|
|
|
*/
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2007-06-01 02:05:57 +04:00
|
|
|
/*! Threading routines */
|
|
|
|
|
2004-10-14 18:46:12 +04:00
|
|
|
|
2007-10-02 23:47:31 +04:00
|
|
|
#include <thread.h>
|
|
|
|
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <string.h>
|
|
|
|
#include <sys/resource.h>
|
|
|
|
|
2003-05-03 20:20:38 +04:00
|
|
|
#include <OS.h>
|
|
|
|
|
2007-10-02 23:47:31 +04:00
|
|
|
#include <util/AutoLock.h>
|
|
|
|
#include <util/khash.h>
|
|
|
|
|
|
|
|
#include <boot/kernel_args.h>
|
2007-08-27 03:53:12 +04:00
|
|
|
#include <condition_variable.h>
|
2002-07-09 16:24:59 +04:00
|
|
|
#include <cpu.h>
|
2005-12-20 18:54:45 +03:00
|
|
|
#include <int.h>
|
2003-01-12 19:30:09 +03:00
|
|
|
#include <kimage.h>
|
2005-10-25 20:59:12 +04:00
|
|
|
#include <kscheduler.h>
|
2005-12-20 18:54:45 +03:00
|
|
|
#include <ksignal.h>
|
|
|
|
#include <smp.h>
|
2004-03-16 05:50:25 +03:00
|
|
|
#include <syscalls.h>
|
axeld + bonefish:
* Implemented automatic syscall restarts:
- A syscall can indicate that it has been interrupted and can be
restarted by setting a respective bit in thread::flags. It can
store parameters it wants to be preserved for the restart in
thread::syscall_restart::parameters. Another thread::flags bit
indicates whether it has been restarted.
- handle_signals() clears the restart flag, if the handled signal
has a handler function installed and SA_RESTART is not set. Another
thread flag (THREAD_FLAGS_DONT_RESTART_SYSCALL) can prevent syscalls
from being restarted, even if they could be (not used yet, but we
might want to use it in resume_thread(), so that we stay
behaviorally compatible with BeOS).
- The architecture specific syscall handler restarts the syscall, if
the restart flag is set. Implemented for x86 only.
- Added some support functions in the private <syscall_restart.h> to
simplify the syscall restart code in the syscalls.
- Adjusted all syscalls that can potentially be restarted accordingly.
- _user_ioctl() sets new thread flag THREAD_FLAGS_IOCTL_SYSCALL while
calling the underlying FS's/driver's hook, so that syscall restarts
can also be supported there.
* thread_at_kernel_exit() invokes handle_signals() in a loop now, as
long as the latter indicates that the thread shall be suspended, so
that after waking up signals received in the meantime will be handled
before the thread returns to userland. Adjusted handle_signals()
accordingly -- when encountering a suspending signal we don't check
for further signals.
* Fixed sigsuspend(): Suspending the thread and rescheduling doesn't
result in the correct behavior. Instead we employ a temporary
condition variable and interruptably wait on it. The POSIX test
suite test passes, now.
* Made the switch_sem[_etc]() behavior on interruption consistent.
Depending on when the signal arrived (before the call or when already
waiting) the first semaphore would or wouldn't be released. Now we
consistently release it.
* Refactored _user_{read,write}[v]() syscalls. Use a common function for
either pair. The iovec version doesn't fail anymore, if anything could
be read/written at all. It also checks whether a complete vector
could be read/written, so that we won't skip data, if the underlying
FS/driver couldn't read/write more ATM.
* Some refactoring in the x86 syscall handler: The int 99 and sysenter
handlers use a common subroutine to avoid code duplication.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@23983 a95241bf-73f2-0310-859d-f6bbb57e9c96
2008-02-17 18:48:30 +03:00
|
|
|
#include <syscall_restart.h>
|
2005-12-20 18:54:45 +03:00
|
|
|
#include <team.h>
|
2003-01-07 12:48:01 +03:00
|
|
|
#include <tls.h>
|
2005-03-19 04:58:05 +03:00
|
|
|
#include <user_runtime.h>
|
2005-12-20 18:54:45 +03:00
|
|
|
#include <vfs.h>
|
2007-09-27 16:21:33 +04:00
|
|
|
#include <vm.h>
|
2005-12-20 18:54:45 +03:00
|
|
|
#include <vm_address_space.h>
|
2007-10-02 23:47:31 +04:00
|
|
|
#include <wait_for_objects.h>
|
2005-12-20 18:54:45 +03:00
|
|
|
|
2004-03-16 05:50:25 +03:00
|
|
|
|
|
|
|
//#define TRACE_THREAD
|
2004-03-16 06:14:48 +03:00
|
|
|
#ifdef TRACE_THREAD
|
2002-11-29 11:42:52 +03:00
|
|
|
# define TRACE(x) dprintf x
|
|
|
|
#else
|
|
|
|
# define TRACE(x) ;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
2002-08-19 12:28:39 +04:00
|
|
|
#define THREAD_MAX_MESSAGE_SIZE 65536
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2006-05-30 18:17:09 +04:00
|
|
|
// used to pass messages between thread_exit and thread_exit2
|
|
|
|
|
|
|
|
struct thread_exit_args {
|
|
|
|
struct thread *thread;
|
|
|
|
area_id old_kernel_stack;
|
|
|
|
uint32 death_stack;
|
|
|
|
sem_id death_sem;
|
|
|
|
team_id original_team_id;
|
|
|
|
};
|
2004-03-05 15:55:27 +03:00
|
|
|
|
2002-07-09 16:24:59 +04:00
|
|
|
struct thread_key {
|
|
|
|
thread_id id;
|
|
|
|
};
|
|
|
|
|
|
|
|
// global
|
2002-10-26 20:13:36 +04:00
|
|
|
spinlock thread_spinlock = 0;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
|
|
|
// thread list
|
2007-02-19 03:11:24 +03:00
|
|
|
static struct thread sIdleThreads[B_MAX_CPU_COUNT];
|
2007-08-27 00:37:54 +04:00
|
|
|
static hash_table *sThreadHash = NULL;
|
2004-03-17 18:29:45 +03:00
|
|
|
static thread_id sNextThreadID = 1;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2004-12-01 00:11:37 +03:00
|
|
|
// some arbitrary chosen limits - should probably depend on the available
|
|
|
|
// memory (the limit is not yet enforced)
|
|
|
|
static int32 sMaxThreads = 4096;
|
|
|
|
static int32 sUsedThreads = 0;
|
|
|
|
|
2004-03-17 18:29:45 +03:00
|
|
|
static sem_id sSnoozeSem = -1;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
|
|
|
// death stacks - used temporarily as a thread cleans itself up
|
|
|
|
struct death_stack {
|
2004-11-08 16:52:11 +03:00
|
|
|
area_id area;
|
|
|
|
addr_t address;
|
|
|
|
bool in_use;
|
2002-07-09 16:24:59 +04:00
|
|
|
};
|
2004-03-17 18:29:45 +03:00
|
|
|
static struct death_stack *sDeathStacks;
|
|
|
|
static unsigned int sNumDeathStacks;
|
|
|
|
static unsigned int volatile sDeathStackBitmap;
|
|
|
|
static sem_id sDeathStackSem;
|
2007-10-02 02:46:56 +04:00
|
|
|
static spinlock sDeathStackLock = 0;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2002-08-16 17:14:29 +04:00
|
|
|
// The dead queue is used as a pool from which to retrieve and reuse previously
|
|
|
|
// allocated thread structs when creating a new thread. It should be gone once
|
|
|
|
// the slab allocator is in.
|
2002-08-04 03:39:50 +04:00
|
|
|
struct thread_queue dead_q;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2002-10-26 16:58:52 +04:00
|
|
|
static void thread_kthread_entry(void);
|
2002-07-09 16:24:59 +04:00
|
|
|
static void thread_kthread_exit(void);
|
|
|
|
|
2002-08-16 17:14:29 +04:00
|
|
|
|
2007-06-01 02:05:57 +04:00
|
|
|
/*!
|
|
|
|
Inserts a thread into a team.
|
|
|
|
You must hold the team lock when you call this function.
|
|
|
|
*/
|
2002-10-05 23:38:42 +04:00
|
|
|
static void
|
2005-11-02 13:24:37 +03:00
|
|
|
insert_thread_into_team(struct team *team, struct thread *thread)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2005-11-02 13:24:37 +03:00
|
|
|
thread->team_next = team->thread_list;
|
|
|
|
team->thread_list = thread;
|
|
|
|
team->num_threads++;
|
|
|
|
|
|
|
|
if (team->num_threads == 1) {
|
2002-07-09 16:24:59 +04:00
|
|
|
// this was the first thread
|
2005-11-02 13:24:37 +03:00
|
|
|
team->main_thread = thread;
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
2005-11-02 13:24:37 +03:00
|
|
|
thread->team = team;
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
2002-10-05 23:38:42 +04:00
|
|
|
|
2007-06-01 02:05:57 +04:00
|
|
|
/*!
|
|
|
|
Removes a thread from a team.
|
|
|
|
You must hold the team lock when you call this function.
|
|
|
|
*/
|
2002-10-05 23:38:42 +04:00
|
|
|
static void
|
2005-11-02 13:24:37 +03:00
|
|
|
remove_thread_from_team(struct team *team, struct thread *thread)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
|
|
|
struct thread *temp, *last = NULL;
|
|
|
|
|
2005-11-02 13:24:37 +03:00
|
|
|
for (temp = team->thread_list; temp != NULL; temp = temp->team_next) {
|
|
|
|
if (temp == thread) {
|
2002-10-05 23:38:42 +04:00
|
|
|
if (last == NULL)
|
2005-11-02 13:24:37 +03:00
|
|
|
team->thread_list = temp->team_next;
|
2002-10-05 23:38:42 +04:00
|
|
|
else
|
2002-08-03 04:41:27 +04:00
|
|
|
last->team_next = temp->team_next;
|
2002-10-05 23:38:42 +04:00
|
|
|
|
2005-11-02 13:24:37 +03:00
|
|
|
team->num_threads--;
|
2002-07-09 16:24:59 +04:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
last = temp;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2002-10-05 23:38:42 +04:00
|
|
|
|
|
|
|
static int
|
|
|
|
thread_struct_compare(void *_t, const void *_key)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2007-08-27 00:37:54 +04:00
|
|
|
struct thread *thread = (struct thread*)_t;
|
|
|
|
const struct thread_key *key = (const struct thread_key*)_key;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2005-11-02 13:24:37 +03:00
|
|
|
if (thread->id == key->id)
|
2002-10-05 23:38:42 +04:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
return 1;
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
2002-10-05 23:38:42 +04:00
|
|
|
|
2002-11-29 11:42:52 +03:00
|
|
|
static uint32
|
|
|
|
thread_struct_hash(void *_t, const void *_key, uint32 range)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2007-08-27 00:37:54 +04:00
|
|
|
struct thread *thread = (struct thread*)_t;
|
|
|
|
const struct thread_key *key = (const struct thread_key*)_key;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2005-11-02 13:24:37 +03:00
|
|
|
if (thread != NULL)
|
|
|
|
return thread->id % range;
|
2002-10-05 23:38:42 +04:00
|
|
|
|
2005-08-26 06:02:33 +04:00
|
|
|
return (uint32)key->id % range;
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
2007-06-01 02:05:57 +04:00
|
|
|
|
2007-08-16 22:01:47 +04:00
|
|
|
static void
|
|
|
|
reset_signals(struct thread *thread)
|
|
|
|
{
|
|
|
|
thread->sig_pending = 0;
|
|
|
|
thread->sig_block_mask = 0;
|
|
|
|
memset(thread->sig_action, 0, 32 * sizeof(struct sigaction));
|
|
|
|
thread->signal_stack_base = 0;
|
|
|
|
thread->signal_stack_size = 0;
|
|
|
|
thread->signal_stack_enabled = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-06-01 02:05:57 +04:00
|
|
|
/*!
|
|
|
|
Allocates and fills in thread structure (or reuses one from the
|
|
|
|
dead queue).
|
|
|
|
|
|
|
|
\param threadID The ID to be assigned to the new thread. If
|
|
|
|
\code < 0 \endcode a fresh one is allocated.
|
|
|
|
\param thread initialize this thread struct if nonnull
|
|
|
|
*/
|
2005-03-09 04:59:44 +03:00
|
|
|
|
2002-10-05 23:38:42 +04:00
|
|
|
static struct thread *
|
2007-06-01 02:05:57 +04:00
|
|
|
create_thread_struct(struct thread *inthread, const char *name,
|
|
|
|
thread_id threadID, struct cpu_ent *cpu)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2005-11-02 13:24:37 +03:00
|
|
|
struct thread *thread;
|
2002-12-04 19:40:07 +03:00
|
|
|
cpu_status state;
|
2002-08-19 12:28:39 +04:00
|
|
|
char temp[64];
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2007-02-19 03:11:24 +03:00
|
|
|
if (inthread == NULL) {
|
|
|
|
// try to recycle one from the dead queue first
|
|
|
|
state = disable_interrupts();
|
|
|
|
GRAB_THREAD_LOCK();
|
|
|
|
thread = thread_dequeue(&dead_q);
|
|
|
|
RELEASE_THREAD_LOCK();
|
|
|
|
restore_interrupts(state);
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2007-02-19 03:11:24 +03:00
|
|
|
// if not, create a new one
|
|
|
|
if (thread == NULL) {
|
|
|
|
thread = (struct thread *)malloc(sizeof(struct thread));
|
|
|
|
if (thread == NULL)
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
thread = inthread;
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
2005-05-29 04:20:18 +04:00
|
|
|
if (name != NULL)
|
2005-11-02 13:24:37 +03:00
|
|
|
strlcpy(thread->name, name, B_OS_NAME_LENGTH);
|
2005-05-29 04:20:18 +04:00
|
|
|
else
|
2005-11-02 13:24:37 +03:00
|
|
|
strcpy(thread->name, "unnamed thread");
|
|
|
|
|
2008-01-11 03:36:44 +03:00
|
|
|
thread->flags = 0;
|
2005-11-02 13:24:37 +03:00
|
|
|
thread->id = threadID >= 0 ? threadID : allocate_thread_id();
|
|
|
|
thread->team = NULL;
|
2007-03-01 11:09:28 +03:00
|
|
|
thread->cpu = cpu;
|
2005-11-02 13:24:37 +03:00
|
|
|
thread->sem.blocking = -1;
|
2007-08-27 03:53:12 +04:00
|
|
|
thread->condition_variable_entry = NULL;
|
2005-11-02 13:24:37 +03:00
|
|
|
thread->fault_handler = 0;
|
|
|
|
thread->page_faults_allowed = 1;
|
|
|
|
thread->kernel_stack_area = -1;
|
|
|
|
thread->kernel_stack_base = 0;
|
|
|
|
thread->user_stack_area = -1;
|
|
|
|
thread->user_stack_base = 0;
|
|
|
|
thread->user_local_storage = 0;
|
|
|
|
thread->kernel_errno = 0;
|
|
|
|
thread->team_next = NULL;
|
|
|
|
thread->queue_next = NULL;
|
2006-01-31 04:58:49 +03:00
|
|
|
thread->priority = thread->next_priority = -1;
|
2005-11-02 13:24:37 +03:00
|
|
|
thread->args1 = NULL; thread->args2 = NULL;
|
2006-08-21 02:44:53 +04:00
|
|
|
thread->alarm.period = 0;
|
2007-08-16 22:01:47 +04:00
|
|
|
reset_signals(thread);
|
2005-11-02 13:24:37 +03:00
|
|
|
thread->in_kernel = true;
|
2007-02-06 05:29:17 +03:00
|
|
|
thread->was_yielded = false;
|
2005-11-02 13:24:37 +03:00
|
|
|
thread->user_time = 0;
|
|
|
|
thread->kernel_time = 0;
|
|
|
|
thread->last_time = 0;
|
|
|
|
thread->exit.status = 0;
|
|
|
|
thread->exit.reason = 0;
|
2006-05-05 19:33:34 +04:00
|
|
|
thread->exit.signal = 0;
|
2005-11-02 13:24:37 +03:00
|
|
|
list_init(&thread->exit.waiters);
|
2007-10-02 23:47:31 +04:00
|
|
|
thread->select_infos = NULL;
|
2005-11-02 13:24:37 +03:00
|
|
|
|
2008-01-29 01:58:02 +03:00
|
|
|
sprintf(temp, "thread_%ld_retcode_sem", thread->id);
|
2005-11-02 13:24:37 +03:00
|
|
|
thread->exit.sem = create_sem(0, temp);
|
|
|
|
if (thread->exit.sem < B_OK)
|
2002-08-19 12:28:39 +04:00
|
|
|
goto err1;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2005-11-02 13:24:37 +03:00
|
|
|
sprintf(temp, "%s send", thread->name);
|
|
|
|
thread->msg.write_sem = create_sem(1, temp);
|
|
|
|
if (thread->msg.write_sem < B_OK)
|
2002-07-09 16:24:59 +04:00
|
|
|
goto err2;
|
|
|
|
|
2005-11-02 13:24:37 +03:00
|
|
|
sprintf(temp, "%s receive", thread->name);
|
|
|
|
thread->msg.read_sem = create_sem(0, temp);
|
|
|
|
if (thread->msg.read_sem < B_OK)
|
2002-08-19 12:28:39 +04:00
|
|
|
goto err3;
|
|
|
|
|
2005-11-02 13:24:37 +03:00
|
|
|
if (arch_thread_init_thread_struct(thread) < B_OK)
|
2002-08-19 12:28:39 +04:00
|
|
|
goto err4;
|
|
|
|
|
2005-11-02 13:24:37 +03:00
|
|
|
return thread;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2002-08-19 12:28:39 +04:00
|
|
|
err4:
|
2005-11-02 13:24:37 +03:00
|
|
|
delete_sem(thread->msg.read_sem);
|
2002-08-19 12:28:39 +04:00
|
|
|
err3:
|
2005-11-02 13:24:37 +03:00
|
|
|
delete_sem(thread->msg.write_sem);
|
2002-07-09 16:24:59 +04:00
|
|
|
err2:
|
2005-11-02 13:24:37 +03:00
|
|
|
delete_sem(thread->exit.sem);
|
2002-07-09 16:24:59 +04:00
|
|
|
err1:
|
2004-06-11 05:45:33 +04:00
|
|
|
// ToDo: put them in the dead queue instead?
|
2007-02-19 03:11:24 +03:00
|
|
|
if (inthread == NULL)
|
|
|
|
free(thread);
|
2002-07-09 16:24:59 +04:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2002-10-05 23:38:42 +04:00
|
|
|
|
|
|
|
static void
|
2004-06-11 05:45:33 +04:00
|
|
|
delete_thread_struct(struct thread *thread)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2004-10-13 18:52:52 +04:00
|
|
|
delete_sem(thread->exit.sem);
|
|
|
|
delete_sem(thread->msg.write_sem);
|
|
|
|
delete_sem(thread->msg.read_sem);
|
2004-06-11 05:45:33 +04:00
|
|
|
|
|
|
|
// ToDo: put them in the dead queue instead?
|
|
|
|
free(thread);
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
2002-10-05 23:38:42 +04:00
|
|
|
|
2007-06-01 02:05:57 +04:00
|
|
|
/*! This function gets run by a new thread before anything else */
|
2004-10-12 07:55:46 +04:00
|
|
|
static void
|
|
|
|
thread_kthread_entry(void)
|
|
|
|
{
|
2005-02-11 06:10:21 +03:00
|
|
|
struct thread *thread = thread_get_current_thread();
|
|
|
|
|
2004-10-12 07:55:46 +04:00
|
|
|
// simulates the thread spinlock release that would occur if the thread had been
|
|
|
|
// rescheded from. The resched didn't happen because the thread is new.
|
|
|
|
RELEASE_THREAD_LOCK();
|
2005-02-11 06:10:21 +03:00
|
|
|
|
|
|
|
// start tracking time
|
|
|
|
thread->last_time = system_time();
|
|
|
|
|
2004-10-12 07:55:46 +04:00
|
|
|
enable_interrupts(); // this essentially simulates a return-from-interrupt
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
thread_kthread_exit(void)
|
|
|
|
{
|
2005-02-11 06:10:21 +03:00
|
|
|
struct thread *thread = thread_get_current_thread();
|
2004-10-12 07:55:46 +04:00
|
|
|
|
2005-02-11 06:10:21 +03:00
|
|
|
thread->exit.reason = THREAD_RETURN_EXIT;
|
2004-10-12 07:55:46 +04:00
|
|
|
thread_exit();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-06-01 02:05:57 +04:00
|
|
|
/*!
|
|
|
|
Initializes the thread and jumps to its userspace entry point.
|
|
|
|
This function is called at creation time of every user thread,
|
|
|
|
but not for a team's main thread.
|
|
|
|
*/
|
2002-10-05 23:38:42 +04:00
|
|
|
static int
|
|
|
|
_create_user_thread_kentry(void)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2003-01-07 12:48:01 +03:00
|
|
|
struct thread *thread = thread_get_current_thread();
|
2002-07-09 16:24:59 +04:00
|
|
|
|
|
|
|
// a signal may have been delivered here
|
2005-02-11 06:10:21 +03:00
|
|
|
thread_at_kernel_exit();
|
|
|
|
|
2002-07-09 16:24:59 +04:00
|
|
|
// jump to the entry point in user space
|
2007-01-12 21:26:32 +03:00
|
|
|
arch_thread_enter_userspace(thread, (addr_t)thread->entry,
|
|
|
|
thread->args1, thread->args2);
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2007-01-12 21:26:32 +03:00
|
|
|
// only get here if the above call fails
|
2002-07-09 16:24:59 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2002-10-05 23:38:42 +04:00
|
|
|
|
2007-06-01 02:05:57 +04:00
|
|
|
/*! Initializes the thread and calls it kernel space entry point. */
|
2002-10-05 23:38:42 +04:00
|
|
|
static int
|
|
|
|
_create_kernel_thread_kentry(void)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2003-01-07 12:48:01 +03:00
|
|
|
struct thread *thread = thread_get_current_thread();
|
2007-08-27 00:37:54 +04:00
|
|
|
int (*func)(void *args) = (int (*)(void *))thread->entry;
|
2003-01-07 12:48:01 +03:00
|
|
|
|
2002-07-09 16:24:59 +04:00
|
|
|
// call the entry function with the appropriate args
|
2003-04-18 13:38:28 +04:00
|
|
|
return func(thread->args1);
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
2002-09-24 04:08:50 +04:00
|
|
|
|
2007-06-01 02:05:57 +04:00
|
|
|
/*!
|
|
|
|
Creates a new thread in the team with the specified team ID.
|
2005-03-09 04:59:44 +03:00
|
|
|
|
2007-06-01 02:05:57 +04:00
|
|
|
\param threadID The ID to be assigned to the new thread. If
|
|
|
|
\code < 0 \endcode a fresh one is allocated.
|
|
|
|
*/
|
2002-09-24 04:08:50 +04:00
|
|
|
static thread_id
|
2004-09-01 19:48:36 +04:00
|
|
|
create_thread(const char *name, team_id teamID, thread_entry_func entry,
|
2005-03-08 21:16:16 +03:00
|
|
|
void *args1, void *args2, int32 priority, bool kernel, thread_id threadID)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2005-11-02 13:24:37 +03:00
|
|
|
struct thread *thread, *currentThread;
|
2003-01-07 12:48:01 +03:00
|
|
|
struct team *team;
|
2002-12-04 19:40:07 +03:00
|
|
|
cpu_status state;
|
2004-06-11 05:45:33 +04:00
|
|
|
char stack_name[B_OS_NAME_LENGTH];
|
|
|
|
status_t status;
|
2002-07-09 16:24:59 +04:00
|
|
|
bool abort = false;
|
2005-02-24 19:14:18 +03:00
|
|
|
bool debugNewThread = false;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2007-06-01 02:05:57 +04:00
|
|
|
TRACE(("create_thread(%s, id = %ld, %s)\n", name, threadID,
|
|
|
|
kernel ? "kernel" : "user"));
|
2005-05-29 04:20:18 +04:00
|
|
|
|
2007-03-01 11:09:28 +03:00
|
|
|
thread = create_thread_struct(NULL, name, threadID, NULL);
|
2005-11-02 13:24:37 +03:00
|
|
|
if (thread == NULL)
|
2004-06-11 05:45:33 +04:00
|
|
|
return B_NO_MEMORY;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2005-11-02 13:24:37 +03:00
|
|
|
thread->priority = priority == -1 ? B_NORMAL_PRIORITY : priority;
|
2006-01-31 04:58:49 +03:00
|
|
|
thread->next_priority = thread->priority;
|
2004-06-11 05:45:33 +04:00
|
|
|
// ToDo: this could be dangerous in case someone calls resume_thread() on us
|
2005-11-02 13:24:37 +03:00
|
|
|
thread->state = B_THREAD_SUSPENDED;
|
|
|
|
thread->next_state = B_THREAD_SUSPENDED;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2005-02-10 06:04:53 +03:00
|
|
|
// init debug structure
|
2005-11-02 13:24:37 +03:00
|
|
|
clear_thread_debug_info(&thread->debug_info, false);
|
2005-02-10 06:04:53 +03:00
|
|
|
|
2008-01-29 01:58:02 +03:00
|
|
|
snprintf(stack_name, B_OS_NAME_LENGTH, "%s_%ld_kstack", name, thread->id);
|
2007-06-01 02:05:57 +04:00
|
|
|
thread->kernel_stack_area = create_area(stack_name,
|
|
|
|
(void **)&thread->kernel_stack_base, B_ANY_KERNEL_ADDRESS,
|
|
|
|
KERNEL_STACK_SIZE, B_FULL_LOCK,
|
2004-11-18 21:15:39 +03:00
|
|
|
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA | B_KERNEL_STACK_AREA);
|
2004-06-11 05:45:33 +04:00
|
|
|
|
2005-11-02 13:24:37 +03:00
|
|
|
if (thread->kernel_stack_area < 0) {
|
2004-06-11 05:45:33 +04:00
|
|
|
// we're not yet part of a team, so we can just bail out
|
2005-11-02 13:24:37 +03:00
|
|
|
status = thread->kernel_stack_area;
|
2007-08-07 05:54:08 +04:00
|
|
|
|
|
|
|
dprintf("create_thread: error creating kernel stack: %s!\n",
|
|
|
|
strerror(status));
|
|
|
|
|
2005-11-02 13:24:37 +03:00
|
|
|
delete_thread_struct(thread);
|
2004-06-11 05:45:33 +04:00
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2008-01-11 03:36:44 +03:00
|
|
|
thread->kernel_stack_top = thread->kernel_stack_base + KERNEL_STACK_SIZE;
|
|
|
|
|
2002-07-25 05:05:51 +04:00
|
|
|
state = disable_interrupts();
|
2002-07-09 16:24:59 +04:00
|
|
|
GRAB_THREAD_LOCK();
|
|
|
|
|
2005-02-24 19:14:18 +03:00
|
|
|
// If the new thread belongs to the same team as the current thread,
|
|
|
|
// it may inherit some of the thread debug flags.
|
|
|
|
currentThread = thread_get_current_thread();
|
|
|
|
if (currentThread && currentThread->team->id == teamID) {
|
|
|
|
// inherit all user flags...
|
|
|
|
int32 debugFlags = currentThread->debug_info.flags
|
|
|
|
& B_THREAD_DEBUG_USER_FLAG_MASK;
|
|
|
|
|
|
|
|
// ... save the syscall tracing flags, unless explicitely specified
|
|
|
|
if (!(debugFlags & B_THREAD_DEBUG_SYSCALL_TRACE_CHILD_THREADS)) {
|
|
|
|
debugFlags &= ~(B_THREAD_DEBUG_PRE_SYSCALL
|
|
|
|
| B_THREAD_DEBUG_POST_SYSCALL);
|
|
|
|
}
|
|
|
|
|
2005-11-02 13:24:37 +03:00
|
|
|
thread->debug_info.flags = debugFlags;
|
2005-02-24 19:14:18 +03:00
|
|
|
|
|
|
|
// stop the new thread, if desired
|
|
|
|
debugNewThread = debugFlags & B_THREAD_DEBUG_STOP_CHILD_THREADS;
|
|
|
|
}
|
|
|
|
|
2002-07-09 16:24:59 +04:00
|
|
|
// insert into global list
|
2005-11-02 13:24:37 +03:00
|
|
|
hash_insert(sThreadHash, thread);
|
2004-12-01 00:11:37 +03:00
|
|
|
sUsedThreads++;
|
2002-07-09 16:24:59 +04:00
|
|
|
RELEASE_THREAD_LOCK();
|
|
|
|
|
2002-08-03 04:41:27 +04:00
|
|
|
GRAB_TEAM_LOCK();
|
|
|
|
// look at the team, make sure it's not being deleted
|
2003-01-07 12:48:01 +03:00
|
|
|
team = team_get_team_struct_locked(teamID);
|
2005-02-24 19:14:18 +03:00
|
|
|
if (team != NULL && team->state != TEAM_STATE_DEATH) {
|
|
|
|
// Debug the new thread, if the parent thread required that (see above),
|
|
|
|
// or the respective global team debug flag is set. But only, if a
|
|
|
|
// debugger is installed for the team.
|
2005-02-28 03:39:51 +03:00
|
|
|
debugNewThread |= (atomic_get(&team->debug_info.flags)
|
|
|
|
& B_TEAM_DEBUG_STOP_NEW_THREADS);
|
2005-02-24 19:14:18 +03:00
|
|
|
if (debugNewThread
|
2005-02-28 03:39:51 +03:00
|
|
|
&& (atomic_get(&team->debug_info.flags)
|
|
|
|
& B_TEAM_DEBUG_DEBUGGER_INSTALLED)) {
|
2005-11-02 13:24:37 +03:00
|
|
|
thread->debug_info.flags |= B_THREAD_DEBUG_STOP;
|
2005-02-24 19:14:18 +03:00
|
|
|
}
|
|
|
|
|
2005-11-02 13:24:37 +03:00
|
|
|
insert_thread_into_team(team, thread);
|
2005-02-24 19:14:18 +03:00
|
|
|
} else
|
2002-07-09 16:24:59 +04:00
|
|
|
abort = true;
|
2002-10-05 23:38:42 +04:00
|
|
|
|
2002-08-03 04:41:27 +04:00
|
|
|
RELEASE_TEAM_LOCK();
|
2002-09-24 04:08:50 +04:00
|
|
|
if (abort) {
|
2002-07-09 16:24:59 +04:00
|
|
|
GRAB_THREAD_LOCK();
|
2005-11-02 13:24:37 +03:00
|
|
|
hash_remove(sThreadHash, thread);
|
2002-07-09 16:24:59 +04:00
|
|
|
RELEASE_THREAD_LOCK();
|
|
|
|
}
|
2002-07-25 05:05:51 +04:00
|
|
|
restore_interrupts(state);
|
2002-09-24 04:08:50 +04:00
|
|
|
if (abort) {
|
2005-11-02 13:24:37 +03:00
|
|
|
delete_area(thread->kernel_stack_area);
|
|
|
|
delete_thread_struct(thread);
|
2004-03-16 05:50:25 +03:00
|
|
|
return B_BAD_TEAM_ID;
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
2005-11-02 13:24:37 +03:00
|
|
|
thread->args1 = args1;
|
|
|
|
thread->args2 = args2;
|
|
|
|
thread->entry = entry;
|
|
|
|
status = thread->id;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2002-09-24 04:08:50 +04:00
|
|
|
if (kernel) {
|
2002-07-09 16:24:59 +04:00
|
|
|
// this sets up an initial kthread stack that runs the entry
|
2003-01-07 12:48:01 +03:00
|
|
|
|
2007-06-01 02:05:57 +04:00
|
|
|
// Note: whatever function wants to set up a user stack later for this
|
|
|
|
// thread must initialize the TLS for it
|
2005-11-02 13:24:37 +03:00
|
|
|
arch_thread_init_kthread_stack(thread, &_create_kernel_thread_kentry,
|
|
|
|
&thread_kthread_entry, &thread_kthread_exit);
|
2002-07-09 16:24:59 +04:00
|
|
|
} else {
|
|
|
|
// create user stack
|
2003-01-06 11:10:03 +03:00
|
|
|
|
2003-01-07 12:48:01 +03:00
|
|
|
// the stack will be between USER_STACK_REGION and the main thread stack area
|
|
|
|
// (the user stack of the main thread is created in team_create_team())
|
2005-11-02 13:24:37 +03:00
|
|
|
thread->user_stack_base = USER_STACK_REGION;
|
|
|
|
thread->user_stack_size = USER_STACK_SIZE;
|
2003-01-06 11:10:03 +03:00
|
|
|
|
2008-01-29 01:58:02 +03:00
|
|
|
snprintf(stack_name, B_OS_NAME_LENGTH, "%s_%ld_stack", name, thread->id);
|
2005-11-02 13:24:37 +03:00
|
|
|
thread->user_stack_area = create_area_etc(team, stack_name,
|
|
|
|
(void **)&thread->user_stack_base, B_BASE_ADDRESS,
|
|
|
|
thread->user_stack_size + TLS_SIZE, B_NO_LOCK,
|
2004-11-18 21:15:39 +03:00
|
|
|
B_READ_AREA | B_WRITE_AREA | B_STACK_AREA);
|
2007-01-12 21:26:32 +03:00
|
|
|
if (thread->user_stack_area < B_OK
|
|
|
|
|| arch_thread_init_tls(thread) < B_OK) {
|
|
|
|
// great, we have a fully running thread without a (usable) stack
|
|
|
|
dprintf("create_thread: unable to create proper user stack!\n");
|
2005-11-02 13:24:37 +03:00
|
|
|
status = thread->user_stack_area;
|
|
|
|
kill_thread(thread->id);
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
2003-01-07 12:48:01 +03:00
|
|
|
|
2008-01-11 03:36:44 +03:00
|
|
|
user_debug_update_new_thread_flags(thread->id);
|
|
|
|
|
2002-07-09 16:24:59 +04:00
|
|
|
// copy the user entry over to the args field in the thread struct
|
|
|
|
// the function this will call will immediately switch the thread into
|
|
|
|
// user space.
|
2007-06-01 02:05:57 +04:00
|
|
|
arch_thread_init_kthread_stack(thread, &_create_user_thread_kentry,
|
|
|
|
&thread_kthread_entry, &thread_kthread_exit);
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
2004-06-11 05:45:33 +04:00
|
|
|
return status;
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-08-16 22:01:47 +04:00
|
|
|
/*!
|
|
|
|
Finds a free death stack for us and allocates it.
|
|
|
|
Must be called with interrupts enabled.
|
|
|
|
*/
|
|
|
|
static uint32
|
|
|
|
get_death_stack(void)
|
|
|
|
{
|
|
|
|
cpu_status state;
|
|
|
|
uint32 bit;
|
|
|
|
int32 i;
|
|
|
|
|
|
|
|
acquire_sem(sDeathStackSem);
|
|
|
|
|
2007-10-02 02:46:56 +04:00
|
|
|
// grab the death stack and thread locks, find a free spot and release
|
|
|
|
|
2007-08-16 22:01:47 +04:00
|
|
|
state = disable_interrupts();
|
|
|
|
|
2007-10-02 02:46:56 +04:00
|
|
|
acquire_spinlock(&sDeathStackLock);
|
2007-08-16 22:01:47 +04:00
|
|
|
GRAB_THREAD_LOCK();
|
2007-10-02 02:46:56 +04:00
|
|
|
|
2007-08-16 22:01:47 +04:00
|
|
|
bit = sDeathStackBitmap;
|
|
|
|
bit = (~bit) & ~((~bit) - 1);
|
|
|
|
sDeathStackBitmap |= bit;
|
2007-10-02 02:46:56 +04:00
|
|
|
|
2007-08-16 22:01:47 +04:00
|
|
|
RELEASE_THREAD_LOCK();
|
2007-10-02 02:46:56 +04:00
|
|
|
release_spinlock(&sDeathStackLock);
|
2007-08-16 22:01:47 +04:00
|
|
|
|
|
|
|
restore_interrupts(state);
|
|
|
|
|
|
|
|
// sanity checks
|
|
|
|
if (!bit)
|
|
|
|
panic("get_death_stack: couldn't find free stack!\n");
|
|
|
|
|
|
|
|
if (bit & (bit - 1))
|
|
|
|
panic("get_death_stack: impossible bitmap result!\n");
|
|
|
|
|
|
|
|
// bit to number
|
|
|
|
for (i = -1; bit; i++) {
|
|
|
|
bit >>= 1;
|
|
|
|
}
|
|
|
|
|
2007-10-11 11:46:55 +04:00
|
|
|
TRACE(("get_death_stack: returning %#lx\n", sDeathStacks[i].address));
|
2007-08-16 22:01:47 +04:00
|
|
|
|
|
|
|
return (uint32)i;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-10-02 02:46:56 +04:00
|
|
|
/*! Returns the thread's death stack to the pool.
|
|
|
|
Interrupts must be disabled and the sDeathStackLock be held.
|
|
|
|
*/
|
2007-08-16 22:01:47 +04:00
|
|
|
static void
|
|
|
|
put_death_stack(uint32 index)
|
|
|
|
{
|
|
|
|
TRACE(("put_death_stack...: passed %lu\n", index));
|
|
|
|
|
|
|
|
if (index >= sNumDeathStacks)
|
|
|
|
panic("put_death_stack: passed invalid stack index %ld\n", index);
|
|
|
|
|
|
|
|
if (!(sDeathStackBitmap & (1 << index)))
|
|
|
|
panic("put_death_stack: passed invalid stack index %ld\n", index);
|
|
|
|
|
|
|
|
GRAB_THREAD_LOCK();
|
|
|
|
sDeathStackBitmap &= ~(1 << index);
|
|
|
|
RELEASE_THREAD_LOCK();
|
|
|
|
|
|
|
|
release_sem_etc(sDeathStackSem, 1, B_DO_NOT_RESCHEDULE);
|
2007-10-02 02:46:56 +04:00
|
|
|
// we must not hold the thread lock when releasing a semaphore
|
2007-08-16 22:01:47 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
thread_exit2(void *_args)
|
|
|
|
{
|
|
|
|
struct thread_exit_args args;
|
|
|
|
|
|
|
|
// copy the arguments over, since the source is probably on the kernel
|
|
|
|
// stack we're about to delete
|
|
|
|
memcpy(&args, _args, sizeof(struct thread_exit_args));
|
|
|
|
|
|
|
|
// we can't let the interrupts disabled at this point
|
|
|
|
enable_interrupts();
|
|
|
|
|
2007-10-11 11:46:55 +04:00
|
|
|
TRACE(("thread_exit2, running on death stack %#lx\n", args.death_stack));
|
2007-08-16 22:01:47 +04:00
|
|
|
|
|
|
|
// delete the old kernel stack area
|
2007-10-11 11:46:55 +04:00
|
|
|
TRACE(("thread_exit2: deleting old kernel stack id %ld for thread %ld\n",
|
2007-08-16 22:01:47 +04:00
|
|
|
args.old_kernel_stack, args.thread->id));
|
|
|
|
|
|
|
|
delete_area(args.old_kernel_stack);
|
|
|
|
|
|
|
|
// remove this thread from all of the global lists
|
2007-10-11 11:46:55 +04:00
|
|
|
TRACE(("thread_exit2: removing thread %ld from global lists\n",
|
2007-08-16 22:01:47 +04:00
|
|
|
args.thread->id));
|
|
|
|
|
|
|
|
disable_interrupts();
|
|
|
|
GRAB_TEAM_LOCK();
|
|
|
|
|
|
|
|
remove_thread_from_team(team_get_kernel_team(), args.thread);
|
|
|
|
|
|
|
|
RELEASE_TEAM_LOCK();
|
|
|
|
enable_interrupts();
|
|
|
|
// needed for the debugger notification below
|
|
|
|
|
|
|
|
TRACE(("thread_exit2: done removing thread from lists\n"));
|
|
|
|
|
|
|
|
if (args.death_sem >= 0)
|
|
|
|
release_sem_etc(args.death_sem, 1, B_DO_NOT_RESCHEDULE);
|
|
|
|
|
|
|
|
// notify the debugger
|
|
|
|
if (args.original_team_id >= 0
|
|
|
|
&& args.original_team_id != team_get_kernel_team_id()) {
|
|
|
|
user_debug_thread_deleted(args.original_team_id, args.thread->id);
|
|
|
|
}
|
|
|
|
|
|
|
|
disable_interrupts();
|
|
|
|
|
|
|
|
// Set the next state to be gone: this will cause the thread structure
|
|
|
|
// to be returned to a ready pool upon reschedule.
|
|
|
|
// Note, we need to have disabled interrupts at this point, or else
|
|
|
|
// we could get rescheduled too early.
|
|
|
|
args.thread->next_state = THREAD_STATE_FREE_ON_RESCHED;
|
|
|
|
|
|
|
|
// return the death stack and reschedule one last time
|
|
|
|
|
2007-10-02 02:46:56 +04:00
|
|
|
// Note that we need to hold sDeathStackLock until we've got the thread
|
|
|
|
// lock. Otherwise someone else might grab our stack in the meantime.
|
|
|
|
acquire_spinlock(&sDeathStackLock);
|
2007-08-16 22:01:47 +04:00
|
|
|
put_death_stack(args.death_stack);
|
|
|
|
|
|
|
|
GRAB_THREAD_LOCK();
|
2007-10-02 02:46:56 +04:00
|
|
|
release_spinlock(&sDeathStackLock);
|
|
|
|
|
2007-08-16 22:01:47 +04:00
|
|
|
scheduler_reschedule();
|
|
|
|
// requires thread lock to be held
|
|
|
|
|
|
|
|
// never get to here
|
|
|
|
panic("thread_exit2: made it where it shouldn't have!\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-11-18 14:59:53 +03:00
|
|
|
/*!
|
|
|
|
Fills the thread_info structure with information from the specified
|
|
|
|
thread.
|
|
|
|
The thread lock must be held when called.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
fill_thread_info(struct thread *thread, thread_info *info, size_t size)
|
|
|
|
{
|
|
|
|
info->thread = thread->id;
|
|
|
|
info->team = thread->team->id;
|
|
|
|
|
|
|
|
strlcpy(info->name, thread->name, B_OS_NAME_LENGTH);
|
|
|
|
|
|
|
|
if (thread->state == B_THREAD_WAITING) {
|
|
|
|
if (thread->sem.blocking == sSnoozeSem)
|
|
|
|
info->state = B_THREAD_ASLEEP;
|
|
|
|
else if (thread->sem.blocking == thread->msg.read_sem)
|
|
|
|
info->state = B_THREAD_RECEIVING;
|
|
|
|
else
|
|
|
|
info->state = B_THREAD_WAITING;
|
|
|
|
} else
|
|
|
|
info->state = (thread_state)thread->state;
|
|
|
|
|
|
|
|
info->priority = thread->priority;
|
|
|
|
info->sem = thread->sem.blocking;
|
|
|
|
info->user_time = thread->user_time;
|
|
|
|
info->kernel_time = thread->kernel_time;
|
|
|
|
info->stack_base = (void *)thread->user_stack_base;
|
|
|
|
info->stack_end = (void *)(thread->user_stack_base
|
|
|
|
+ thread->user_stack_size);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static status_t
|
|
|
|
send_data_etc(thread_id id, int32 code, const void *buffer,
|
|
|
|
size_t bufferSize, int32 flags)
|
|
|
|
{
|
|
|
|
struct thread *target;
|
|
|
|
sem_id cachedSem;
|
|
|
|
cpu_status state;
|
|
|
|
status_t status;
|
|
|
|
cbuf *data;
|
|
|
|
|
|
|
|
state = disable_interrupts();
|
|
|
|
GRAB_THREAD_LOCK();
|
|
|
|
target = thread_get_thread_struct_locked(id);
|
|
|
|
if (!target) {
|
|
|
|
RELEASE_THREAD_LOCK();
|
|
|
|
restore_interrupts(state);
|
|
|
|
return B_BAD_THREAD_ID;
|
|
|
|
}
|
|
|
|
cachedSem = target->msg.write_sem;
|
|
|
|
RELEASE_THREAD_LOCK();
|
|
|
|
restore_interrupts(state);
|
|
|
|
|
|
|
|
if (bufferSize > THREAD_MAX_MESSAGE_SIZE)
|
|
|
|
return B_NO_MEMORY;
|
|
|
|
|
|
|
|
status = acquire_sem_etc(cachedSem, 1, flags, 0);
|
|
|
|
if (status == B_INTERRUPTED) {
|
|
|
|
// We got interrupted by a signal
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
if (status != B_OK) {
|
|
|
|
// Any other acquisition problems may be due to thread deletion
|
|
|
|
return B_BAD_THREAD_ID;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (bufferSize > 0) {
|
|
|
|
data = cbuf_get_chain(bufferSize);
|
|
|
|
if (data == NULL)
|
|
|
|
return B_NO_MEMORY;
|
|
|
|
status = cbuf_user_memcpy_to_chain(data, 0, buffer, bufferSize);
|
|
|
|
if (status < B_OK) {
|
|
|
|
cbuf_free_chain(data);
|
|
|
|
return B_NO_MEMORY;
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
data = NULL;
|
|
|
|
|
|
|
|
state = disable_interrupts();
|
|
|
|
GRAB_THREAD_LOCK();
|
|
|
|
|
|
|
|
// The target thread could have been deleted at this point
|
|
|
|
target = thread_get_thread_struct_locked(id);
|
|
|
|
if (target == NULL) {
|
|
|
|
RELEASE_THREAD_LOCK();
|
|
|
|
restore_interrupts(state);
|
|
|
|
cbuf_free_chain(data);
|
|
|
|
return B_BAD_THREAD_ID;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Save message informations
|
|
|
|
target->msg.sender = thread_get_current_thread()->id;
|
|
|
|
target->msg.code = code;
|
|
|
|
target->msg.size = bufferSize;
|
|
|
|
target->msg.buffer = data;
|
|
|
|
cachedSem = target->msg.read_sem;
|
|
|
|
|
|
|
|
RELEASE_THREAD_LOCK();
|
|
|
|
restore_interrupts(state);
|
|
|
|
|
|
|
|
release_sem(cachedSem);
|
|
|
|
return B_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int32
|
|
|
|
receive_data_etc(thread_id *_sender, void *buffer, size_t bufferSize,
|
|
|
|
int32 flags)
|
|
|
|
{
|
|
|
|
struct thread *thread = thread_get_current_thread();
|
|
|
|
status_t status;
|
|
|
|
size_t size;
|
|
|
|
int32 code;
|
|
|
|
|
|
|
|
status = acquire_sem_etc(thread->msg.read_sem, 1, flags, 0);
|
|
|
|
if (status < B_OK) {
|
|
|
|
// Actually, we're not supposed to return error codes
|
|
|
|
// but since the only reason this can fail is that we
|
|
|
|
// were killed, it's probably okay to do so (but also
|
|
|
|
// meaningless).
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (buffer != NULL && bufferSize != 0) {
|
|
|
|
size = min_c(bufferSize, thread->msg.size);
|
|
|
|
status = cbuf_user_memcpy_from_chain(buffer, thread->msg.buffer,
|
|
|
|
0, size);
|
|
|
|
if (status < B_OK) {
|
|
|
|
cbuf_free_chain(thread->msg.buffer);
|
|
|
|
release_sem(thread->msg.write_sem);
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
*_sender = thread->msg.sender;
|
|
|
|
code = thread->msg.code;
|
|
|
|
|
|
|
|
cbuf_free_chain(thread->msg.buffer);
|
|
|
|
release_sem(thread->msg.write_sem);
|
|
|
|
|
|
|
|
return code;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-08-16 22:01:47 +04:00
|
|
|
// #pragma mark - debugger calls
|
|
|
|
|
|
|
|
|
2005-11-04 00:03:51 +03:00
|
|
|
static int
|
2006-01-29 22:50:00 +03:00
|
|
|
make_thread_unreal(int argc, char **argv)
|
2005-11-04 00:03:51 +03:00
|
|
|
{
|
|
|
|
struct thread *thread;
|
|
|
|
struct hash_iterator i;
|
2006-01-29 22:50:00 +03:00
|
|
|
int32 id = -1;
|
|
|
|
|
|
|
|
if (argc > 2) {
|
2008-01-26 21:45:35 +03:00
|
|
|
print_debugger_command_usage(argv[0]);
|
2006-01-29 22:50:00 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (argc > 1)
|
|
|
|
id = strtoul(argv[1], NULL, 0);
|
2005-11-04 00:03:51 +03:00
|
|
|
|
|
|
|
hash_open(sThreadHash, &i);
|
2006-01-29 22:50:00 +03:00
|
|
|
|
2007-08-27 00:37:54 +04:00
|
|
|
while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
|
2006-01-29 22:50:00 +03:00
|
|
|
if (id != -1 && thread->id != id)
|
|
|
|
continue;
|
|
|
|
|
2005-11-04 00:03:51 +03:00
|
|
|
if (thread->priority > B_DISPLAY_PRIORITY) {
|
2006-01-31 04:58:49 +03:00
|
|
|
thread->priority = thread->next_priority = B_NORMAL_PRIORITY;
|
2007-10-11 11:46:55 +04:00
|
|
|
kprintf("thread %ld made unreal\n", thread->id);
|
2005-11-04 00:03:51 +03:00
|
|
|
}
|
|
|
|
}
|
2006-01-29 22:50:00 +03:00
|
|
|
|
2005-11-04 00:03:51 +03:00
|
|
|
hash_close(sThreadHash, &i, false);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-07-29 05:30:59 +04:00
|
|
|
static int
|
|
|
|
set_thread_prio(int argc, char **argv)
|
|
|
|
{
|
|
|
|
struct thread *thread;
|
|
|
|
struct hash_iterator i;
|
|
|
|
int32 id;
|
|
|
|
int32 prio;
|
|
|
|
|
|
|
|
if (argc > 3 || argc < 2) {
|
2008-01-26 21:45:35 +03:00
|
|
|
print_debugger_command_usage(argv[0]);
|
2007-07-29 05:30:59 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
prio = strtoul(argv[1], NULL, 0);
|
|
|
|
if (prio > B_MAX_PRIORITY)
|
|
|
|
prio = B_MAX_PRIORITY;
|
|
|
|
if (prio < B_MIN_PRIORITY)
|
|
|
|
prio = B_MIN_PRIORITY;
|
|
|
|
|
|
|
|
if (argc > 2)
|
|
|
|
id = strtoul(argv[2], NULL, 0);
|
|
|
|
else
|
|
|
|
id = thread_get_current_thread()->id;
|
|
|
|
|
|
|
|
hash_open(sThreadHash, &i);
|
|
|
|
|
2007-08-27 00:37:54 +04:00
|
|
|
while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
|
2007-07-29 05:30:59 +04:00
|
|
|
if (thread->id != id)
|
|
|
|
continue;
|
|
|
|
thread->priority = thread->next_priority = prio;
|
2007-10-11 11:46:55 +04:00
|
|
|
kprintf("thread %ld set to priority %ld\n", id, prio);
|
2007-07-29 05:30:59 +04:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (!thread)
|
2007-10-11 11:46:55 +04:00
|
|
|
kprintf("thread %ld (%#lx) not found\n", id, id);
|
2007-07-29 05:30:59 +04:00
|
|
|
|
|
|
|
hash_close(sThreadHash, &i, false);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-07-29 04:24:54 +04:00
|
|
|
static int
|
|
|
|
make_thread_suspended(int argc, char **argv)
|
|
|
|
{
|
|
|
|
struct thread *thread;
|
|
|
|
struct hash_iterator i;
|
|
|
|
int32 id;
|
|
|
|
|
2007-07-29 05:06:36 +04:00
|
|
|
if (argc > 2) {
|
2008-01-26 21:45:35 +03:00
|
|
|
print_debugger_command_usage(argv[0]);
|
2007-07-29 04:24:54 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-07-29 05:06:36 +04:00
|
|
|
if (argc == 1)
|
|
|
|
id = thread_get_current_thread()->id;
|
|
|
|
else
|
|
|
|
id = strtoul(argv[1], NULL, 0);
|
2007-07-29 04:24:54 +04:00
|
|
|
|
|
|
|
hash_open(sThreadHash, &i);
|
|
|
|
|
2007-08-27 00:37:54 +04:00
|
|
|
while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
|
2007-07-29 04:24:54 +04:00
|
|
|
if (thread->id != id)
|
|
|
|
continue;
|
|
|
|
|
2008-02-03 19:18:19 +03:00
|
|
|
thread->next_state = B_THREAD_SUSPENDED;
|
2007-10-11 11:46:55 +04:00
|
|
|
kprintf("thread %ld suspended\n", id);
|
2007-07-29 04:24:54 +04:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (!thread)
|
2007-10-11 11:46:55 +04:00
|
|
|
kprintf("thread %ld (%#lx) not found\n", id, id);
|
2007-07-29 04:24:54 +04:00
|
|
|
|
|
|
|
hash_close(sThreadHash, &i, false);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-07-29 05:06:36 +04:00
|
|
|
static int
|
|
|
|
make_thread_resumed(int argc, char **argv)
|
|
|
|
{
|
|
|
|
struct thread *thread;
|
|
|
|
struct hash_iterator i;
|
|
|
|
int32 id;
|
|
|
|
|
|
|
|
if (argc != 2) {
|
2008-01-26 21:45:35 +03:00
|
|
|
print_debugger_command_usage(argv[0]);
|
2007-07-29 05:06:36 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
// force user to enter a thread id, as using
|
|
|
|
// the current thread is usually not intended
|
|
|
|
id = strtoul(argv[1], NULL, 0);
|
|
|
|
|
|
|
|
hash_open(sThreadHash, &i);
|
|
|
|
|
2007-08-27 00:37:54 +04:00
|
|
|
while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
|
2007-07-29 05:06:36 +04:00
|
|
|
if (thread->id != id)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (thread->state == B_THREAD_SUSPENDED) {
|
|
|
|
scheduler_enqueue_in_run_queue(thread);
|
2007-10-11 11:46:55 +04:00
|
|
|
kprintf("thread %ld resumed\n", thread->id);
|
2007-07-29 05:06:36 +04:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (!thread)
|
2007-10-11 11:46:55 +04:00
|
|
|
kprintf("thread %ld (%#lx) not found\n", id, id);
|
2007-07-29 05:06:36 +04:00
|
|
|
|
|
|
|
hash_close(sThreadHash, &i, false);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
drop_into_debugger(int argc, char **argv)
|
|
|
|
{
|
|
|
|
status_t err;
|
|
|
|
int32 id;
|
|
|
|
|
|
|
|
if (argc > 2) {
|
2008-01-26 21:45:35 +03:00
|
|
|
print_debugger_command_usage(argv[0]);
|
2007-07-29 05:06:36 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (argc == 1)
|
|
|
|
id = thread_get_current_thread()->id;
|
|
|
|
else
|
|
|
|
id = strtoul(argv[1], NULL, 0);
|
|
|
|
|
|
|
|
err = _user_debug_thread(id);
|
|
|
|
if (err)
|
|
|
|
kprintf("drop failed\n");
|
|
|
|
else
|
2007-10-11 11:46:55 +04:00
|
|
|
kprintf("thread %ld dropped into user debugger\n", id);
|
2007-07-29 05:06:36 +04:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2002-09-24 04:08:50 +04:00
|
|
|
static const char *
|
2004-10-27 16:50:48 +04:00
|
|
|
state_to_text(struct thread *thread, int32 state)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2002-09-24 04:08:50 +04:00
|
|
|
switch (state) {
|
2002-08-05 00:10:06 +04:00
|
|
|
case B_THREAD_READY:
|
2004-10-27 16:50:48 +04:00
|
|
|
return "ready";
|
|
|
|
|
2002-08-05 00:10:06 +04:00
|
|
|
case B_THREAD_RUNNING:
|
2004-10-27 16:50:48 +04:00
|
|
|
return "running";
|
|
|
|
|
2002-08-05 00:10:06 +04:00
|
|
|
case B_THREAD_WAITING:
|
2004-10-27 16:50:48 +04:00
|
|
|
if (thread->sem.blocking == sSnoozeSem)
|
|
|
|
return "zzz";
|
|
|
|
if (thread->sem.blocking == thread->msg.read_sem)
|
|
|
|
return "receive";
|
|
|
|
|
|
|
|
return "waiting";
|
|
|
|
|
2002-08-05 00:10:06 +04:00
|
|
|
case B_THREAD_SUSPENDED:
|
2004-10-27 16:50:48 +04:00
|
|
|
return "suspended";
|
|
|
|
|
2002-07-09 16:24:59 +04:00
|
|
|
case THREAD_STATE_FREE_ON_RESCHED:
|
2004-10-27 16:50:48 +04:00
|
|
|
return "death";
|
|
|
|
|
2002-07-09 16:24:59 +04:00
|
|
|
default:
|
|
|
|
return "UNKNOWN";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2002-09-24 04:08:50 +04:00
|
|
|
|
|
|
|
static void
|
2005-11-02 13:24:37 +03:00
|
|
|
_dump_thread_info(struct thread *thread)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2007-01-13 20:13:50 +03:00
|
|
|
struct death_entry *death = NULL;
|
|
|
|
|
2005-11-02 13:24:37 +03:00
|
|
|
kprintf("THREAD: %p\n", thread);
|
2007-10-11 11:46:55 +04:00
|
|
|
kprintf("id: %ld (%#lx)\n", thread->id, thread->id);
|
2005-11-02 13:24:37 +03:00
|
|
|
kprintf("name: \"%s\"\n", thread->name);
|
2005-08-02 20:27:00 +04:00
|
|
|
kprintf("all_next: %p\nteam_next: %p\nq_next: %p\n",
|
2005-11-02 13:24:37 +03:00
|
|
|
thread->all_next, thread->team_next, thread->queue_next);
|
2006-01-31 04:58:49 +03:00
|
|
|
kprintf("priority: %ld (next %ld)\n", thread->priority, thread->next_priority);
|
2005-11-02 13:24:37 +03:00
|
|
|
kprintf("state: %s\n", state_to_text(thread, thread->state));
|
|
|
|
kprintf("next_state: %s\n", state_to_text(thread, thread->next_state));
|
|
|
|
kprintf("cpu: %p ", thread->cpu);
|
|
|
|
if (thread->cpu)
|
2006-04-30 02:38:19 +04:00
|
|
|
kprintf("(%d)\n", thread->cpu->cpu_num);
|
2002-07-09 16:24:59 +04:00
|
|
|
else
|
2005-08-02 20:27:00 +04:00
|
|
|
kprintf("\n");
|
2007-10-11 11:46:55 +04:00
|
|
|
kprintf("sig_pending: %#lx\n", thread->sig_pending);
|
2005-11-02 13:24:37 +03:00
|
|
|
kprintf("in_kernel: %d\n", thread->in_kernel);
|
2007-10-11 11:46:55 +04:00
|
|
|
kprintf(" sem.blocking: %ld\n", thread->sem.blocking);
|
|
|
|
kprintf(" sem.count: %ld\n", thread->sem.count);
|
|
|
|
kprintf(" sem.acquire_status: %#lx\n", thread->sem.acquire_status);
|
|
|
|
kprintf(" sem.flags: %#lx\n", thread->sem.flags);
|
2007-08-27 03:53:12 +04:00
|
|
|
|
|
|
|
kprintf("condition variables:");
|
|
|
|
PrivateConditionVariableEntry* entry = thread->condition_variable_entry;
|
|
|
|
while (entry != NULL) {
|
|
|
|
kprintf(" %p", entry->Variable());
|
|
|
|
entry = entry->ThreadNext();
|
|
|
|
}
|
|
|
|
kprintf("\n");
|
|
|
|
|
2005-11-02 13:24:37 +03:00
|
|
|
kprintf("fault_handler: %p\n", (void *)thread->fault_handler);
|
|
|
|
kprintf("args: %p %p\n", thread->args1, thread->args2);
|
|
|
|
kprintf("entry: %p\n", (void *)thread->entry);
|
|
|
|
kprintf("team: %p, \"%s\"\n", thread->team, thread->team->name);
|
2007-10-11 11:46:55 +04:00
|
|
|
kprintf(" exit.sem: %ld\n", thread->exit.sem);
|
|
|
|
kprintf(" exit.status: %#lx (%s)\n", thread->exit.status, strerror(thread->exit.status));
|
|
|
|
kprintf(" exit.reason: %#x\n", thread->exit.reason);
|
|
|
|
kprintf(" exit.signal: %#x\n", thread->exit.signal);
|
2007-01-13 20:13:50 +03:00
|
|
|
kprintf(" exit.waiters:\n");
|
2007-08-27 00:37:54 +04:00
|
|
|
while ((death = (struct death_entry*)list_get_next_item(
|
|
|
|
&thread->exit.waiters, death)) != NULL) {
|
2007-10-11 11:46:55 +04:00
|
|
|
kprintf("\t%p (group %ld, thread %ld)\n", death, death->group_id, death->thread);
|
2007-01-13 20:13:50 +03:00
|
|
|
}
|
|
|
|
|
2007-10-11 11:46:55 +04:00
|
|
|
kprintf("kernel_stack_area: %ld\n", thread->kernel_stack_area);
|
2005-11-02 13:24:37 +03:00
|
|
|
kprintf("kernel_stack_base: %p\n", (void *)thread->kernel_stack_base);
|
2007-10-11 11:46:55 +04:00
|
|
|
kprintf("user_stack_area: %ld\n", thread->user_stack_area);
|
2005-11-02 13:24:37 +03:00
|
|
|
kprintf("user_stack_base: %p\n", (void *)thread->user_stack_base);
|
|
|
|
kprintf("user_local_storage: %p\n", (void *)thread->user_local_storage);
|
2007-10-11 11:46:55 +04:00
|
|
|
kprintf("kernel_errno: %#x (%s)\n", thread->kernel_errno,
|
|
|
|
strerror(thread->kernel_errno));
|
2005-11-02 13:24:37 +03:00
|
|
|
kprintf("kernel_time: %Ld\n", thread->kernel_time);
|
|
|
|
kprintf("user_time: %Ld\n", thread->user_time);
|
2008-01-11 03:36:44 +03:00
|
|
|
kprintf("flags: 0x%lx\n", thread->flags);
|
2005-08-02 20:27:00 +04:00
|
|
|
kprintf("architecture dependant section:\n");
|
2005-11-02 13:24:37 +03:00
|
|
|
arch_thread_dump_info(&thread->arch_info);
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
2002-09-24 04:08:50 +04:00
|
|
|
|
|
|
|
static int
|
|
|
|
dump_thread_info(int argc, char **argv)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2004-10-23 21:09:51 +04:00
|
|
|
const char *name = NULL;
|
2005-11-02 13:24:37 +03:00
|
|
|
struct thread *thread;
|
2005-08-19 03:33:54 +04:00
|
|
|
int32 id = -1;
|
2002-07-09 16:24:59 +04:00
|
|
|
struct hash_iterator i;
|
2005-08-18 19:09:30 +04:00
|
|
|
bool found = false;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2004-10-23 21:09:51 +04:00
|
|
|
if (argc > 2) {
|
2008-01-26 21:45:35 +03:00
|
|
|
print_debugger_command_usage(argv[0]);
|
2002-07-18 02:07:37 +04:00
|
|
|
return 0;
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
2004-10-23 21:09:51 +04:00
|
|
|
if (argc == 1) {
|
2007-01-12 21:26:32 +03:00
|
|
|
_dump_thread_info(thread_get_current_thread());
|
|
|
|
return 0;
|
2004-10-23 21:09:51 +04:00
|
|
|
} else {
|
|
|
|
name = argv[1];
|
2005-08-18 19:09:30 +04:00
|
|
|
id = strtoul(argv[1], NULL, 0);
|
2005-10-06 12:40:35 +04:00
|
|
|
|
|
|
|
if (IS_KERNEL_ADDRESS(id)) {
|
|
|
|
// semi-hack
|
|
|
|
_dump_thread_info((struct thread *)id);
|
|
|
|
return 0;
|
|
|
|
}
|
2004-10-23 21:09:51 +04:00
|
|
|
}
|
2002-07-09 16:24:59 +04:00
|
|
|
|
|
|
|
// walk through the thread list, trying to match name or id
|
2004-03-17 18:29:45 +03:00
|
|
|
hash_open(sThreadHash, &i);
|
2007-08-27 00:37:54 +04:00
|
|
|
while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
|
2005-11-02 13:24:37 +03:00
|
|
|
if ((name != NULL && !strcmp(name, thread->name)) || thread->id == id) {
|
|
|
|
_dump_thread_info(thread);
|
2005-08-18 19:09:30 +04:00
|
|
|
found = true;
|
2002-07-09 16:24:59 +04:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2004-03-17 18:29:45 +03:00
|
|
|
hash_close(sThreadHash, &i, false);
|
2005-08-18 19:09:30 +04:00
|
|
|
|
|
|
|
if (!found)
|
|
|
|
kprintf("thread \"%s\" (%ld) doesn't exist!\n", argv[1], id);
|
2002-07-18 02:07:37 +04:00
|
|
|
return 0;
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
2002-09-24 04:08:50 +04:00
|
|
|
|
|
|
|
static int
|
|
|
|
dump_thread_list(int argc, char **argv)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2005-11-02 13:24:37 +03:00
|
|
|
struct thread *thread;
|
2002-07-09 16:24:59 +04:00
|
|
|
struct hash_iterator i;
|
2006-01-29 22:50:00 +03:00
|
|
|
bool realTimeOnly = false;
|
2005-08-02 20:27:00 +04:00
|
|
|
int32 requiredState = 0;
|
2005-11-02 13:24:37 +03:00
|
|
|
team_id team = -1;
|
2005-08-20 19:57:56 +04:00
|
|
|
sem_id sem = -1;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2006-01-29 22:50:00 +03:00
|
|
|
if (!strcmp(argv[0], "realtime"))
|
|
|
|
realTimeOnly = true;
|
|
|
|
else if (!strcmp(argv[0], "ready"))
|
2005-08-02 20:27:00 +04:00
|
|
|
requiredState = B_THREAD_READY;
|
|
|
|
else if (!strcmp(argv[0], "running"))
|
|
|
|
requiredState = B_THREAD_RUNNING;
|
2005-08-20 19:57:56 +04:00
|
|
|
else if (!strcmp(argv[0], "waiting")) {
|
2005-08-02 20:27:00 +04:00
|
|
|
requiredState = B_THREAD_WAITING;
|
|
|
|
|
2005-08-20 19:57:56 +04:00
|
|
|
if (argc > 1) {
|
|
|
|
sem = strtoul(argv[1], NULL, 0);
|
|
|
|
if (sem == 0)
|
|
|
|
kprintf("ignoring invalid semaphore argument.\n");
|
|
|
|
}
|
2005-11-02 13:24:37 +03:00
|
|
|
} else if (argc > 1) {
|
|
|
|
team = strtoul(argv[1], NULL, 0);
|
|
|
|
if (team == 0)
|
|
|
|
kprintf("ignoring invalid team argument.\n");
|
2005-08-20 19:57:56 +04:00
|
|
|
}
|
|
|
|
|
2007-08-10 00:08:25 +04:00
|
|
|
kprintf("thread id state sem/cv cpu pri stack team "
|
|
|
|
"name\n");
|
2004-10-27 16:50:48 +04:00
|
|
|
|
2004-03-17 18:29:45 +03:00
|
|
|
hash_open(sThreadHash, &i);
|
2007-08-27 00:37:54 +04:00
|
|
|
while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
|
2005-11-02 13:24:37 +03:00
|
|
|
// filter out threads not matching the search criteria
|
|
|
|
if ((requiredState && thread->state != requiredState)
|
|
|
|
|| (sem > 0 && thread->sem.blocking != sem)
|
2006-01-29 22:50:00 +03:00
|
|
|
|| (team > 0 && thread->team->id != team)
|
|
|
|
|| (realTimeOnly && thread->priority < B_REAL_TIME_DISPLAY_PRIORITY))
|
2005-08-20 19:57:56 +04:00
|
|
|
continue;
|
2005-08-02 20:27:00 +04:00
|
|
|
|
2007-10-11 11:46:55 +04:00
|
|
|
kprintf("%p %6ld %-9s", thread, thread->id, state_to_text(thread,
|
2007-06-01 02:05:57 +04:00
|
|
|
thread->state));
|
2004-10-27 16:50:48 +04:00
|
|
|
|
2007-08-10 00:08:25 +04:00
|
|
|
// does it block on a semaphore or a condition variable?
|
|
|
|
if (thread->state == B_THREAD_WAITING) {
|
2007-08-27 03:53:12 +04:00
|
|
|
if (thread->condition_variable_entry)
|
|
|
|
kprintf("%p ", thread->condition_variable_entry->Variable());
|
2007-08-10 00:08:25 +04:00
|
|
|
else
|
2007-10-11 11:46:55 +04:00
|
|
|
kprintf("%10ld ", thread->sem.blocking);
|
2007-08-10 00:08:25 +04:00
|
|
|
} else
|
|
|
|
kprintf(" - ");
|
2004-10-27 16:50:48 +04:00
|
|
|
|
|
|
|
// on which CPU does it run?
|
2005-11-02 13:24:37 +03:00
|
|
|
if (thread->cpu)
|
2006-04-30 02:38:19 +04:00
|
|
|
kprintf("%2d", thread->cpu->cpu_num);
|
2002-07-09 16:24:59 +04:00
|
|
|
else
|
2005-08-02 20:27:00 +04:00
|
|
|
kprintf(" -");
|
2004-10-27 16:50:48 +04:00
|
|
|
|
2007-10-11 11:46:55 +04:00
|
|
|
kprintf("%4ld %p%5ld %s\n", thread->priority,
|
2006-01-29 22:50:00 +03:00
|
|
|
(void *)thread->kernel_stack_base, thread->team->id,
|
|
|
|
thread->name != NULL ? thread->name : "<NULL>");
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
2004-03-17 18:29:45 +03:00
|
|
|
hash_close(sThreadHash, &i, false);
|
2002-07-18 02:07:37 +04:00
|
|
|
return 0;
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
2002-09-24 04:08:50 +04:00
|
|
|
|
2007-08-16 22:01:47 +04:00
|
|
|
// #pragma mark - private kernel API
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2002-09-24 04:08:50 +04:00
|
|
|
|
|
|
|
void
|
2002-10-26 05:11:15 +04:00
|
|
|
thread_exit(void)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2002-12-04 19:40:07 +03:00
|
|
|
cpu_status state;
|
2004-10-13 18:52:52 +04:00
|
|
|
struct thread *thread = thread_get_current_thread();
|
2004-10-14 18:46:12 +04:00
|
|
|
struct process_group *freeGroup = NULL;
|
2004-10-13 18:52:52 +04:00
|
|
|
struct team *team = thread->team;
|
2007-01-13 01:54:21 +03:00
|
|
|
thread_id parentID = -1;
|
2004-10-13 17:10:27 +04:00
|
|
|
bool deleteTeam = false;
|
2006-08-18 01:45:37 +04:00
|
|
|
sem_id cachedDeathSem = -1;
|
2004-03-05 15:55:27 +03:00
|
|
|
status_t status;
|
2005-02-10 06:04:53 +03:00
|
|
|
struct thread_debug_info debugInfo;
|
2005-02-24 19:14:18 +03:00
|
|
|
team_id teamID = team->id;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2007-10-11 11:46:55 +04:00
|
|
|
TRACE(("thread %ld exiting %s w/return code %#lx\n", thread->id,
|
|
|
|
thread->exit.reason == THREAD_RETURN_INTERRUPTED
|
|
|
|
? "due to signal" : "normally", thread->exit.status));
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2007-02-06 05:29:17 +03:00
|
|
|
if (!are_interrupts_enabled())
|
|
|
|
panic("thread_exit() called with interrupts disabled!\n");
|
|
|
|
|
2002-07-09 16:24:59 +04:00
|
|
|
// boost our priority to get this over with
|
2006-01-31 04:58:49 +03:00
|
|
|
thread->priority = thread->next_priority = B_URGENT_DISPLAY_PRIORITY;
|
2004-03-05 15:55:27 +03:00
|
|
|
|
2002-10-26 02:36:08 +04:00
|
|
|
// Cancel previously installed alarm timer, if any
|
2004-10-13 18:52:52 +04:00
|
|
|
cancel_timer(&thread->alarm);
|
2004-10-05 02:41:34 +04:00
|
|
|
|
2004-11-08 16:52:11 +03:00
|
|
|
// delete the user stack area first, we won't need it anymore
|
2005-12-20 16:29:11 +03:00
|
|
|
if (team->address_space != NULL && thread->user_stack_area >= 0) {
|
2004-11-08 16:52:11 +03:00
|
|
|
area_id area = thread->user_stack_area;
|
|
|
|
thread->user_stack_area = -1;
|
|
|
|
delete_area_etc(team, area);
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
struct job_control_entry *death = NULL;
|
2007-11-28 03:07:32 +03:00
|
|
|
struct death_entry* threadDeathEntry = NULL;
|
|
|
|
|
2003-01-07 12:48:01 +03:00
|
|
|
if (team != team_get_kernel_team()) {
|
2004-10-14 18:46:12 +04:00
|
|
|
if (team->main_thread == thread) {
|
|
|
|
// this was the main thread in this team, so we will delete that as well
|
|
|
|
deleteTeam = true;
|
2007-11-28 03:07:32 +03:00
|
|
|
} else
|
|
|
|
threadDeathEntry = (death_entry*)malloc(sizeof(death_entry));
|
2004-10-14 18:46:12 +04:00
|
|
|
|
2002-08-03 04:41:27 +04:00
|
|
|
// remove this thread from the current team and add it to the kernel
|
|
|
|
// put the thread into the kernel team until it dies
|
2002-07-25 05:05:51 +04:00
|
|
|
state = disable_interrupts();
|
2002-08-03 04:41:27 +04:00
|
|
|
GRAB_TEAM_LOCK();
|
2007-01-15 15:41:08 +03:00
|
|
|
GRAB_THREAD_LOCK();
|
|
|
|
// removing the thread and putting its death entry to the parent
|
|
|
|
// team needs to be an atomic operation
|
2004-03-16 05:50:25 +03:00
|
|
|
|
2004-11-26 17:58:01 +03:00
|
|
|
// remember how long this thread lasted
|
|
|
|
team->dead_threads_kernel_time += thread->kernel_time;
|
|
|
|
team->dead_threads_user_time += thread->user_time;
|
|
|
|
|
2004-10-13 18:52:52 +04:00
|
|
|
remove_thread_from_team(team, thread);
|
|
|
|
insert_thread_into_team(team_get_kernel_team(), thread);
|
2004-03-16 05:50:25 +03:00
|
|
|
|
2005-03-25 21:28:24 +03:00
|
|
|
cachedDeathSem = team->death_sem;
|
|
|
|
|
2004-10-14 18:46:12 +04:00
|
|
|
if (deleteTeam) {
|
|
|
|
struct team *parent = team->parent;
|
2004-03-16 05:50:25 +03:00
|
|
|
|
|
|
|
// remember who our parent was so we can send a signal
|
2007-01-13 01:54:21 +03:00
|
|
|
parentID = parent->id;
|
2004-03-16 05:50:25 +03:00
|
|
|
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
// Set the team job control state to "dead" and detach the job
|
|
|
|
// control entry from our team struct.
|
|
|
|
team_set_job_control_state(team, JOB_CONTROL_STATE_DEAD, 0, true);
|
|
|
|
death = team->job_control_entry;
|
|
|
|
team->job_control_entry = NULL;
|
|
|
|
|
2004-10-14 18:46:12 +04:00
|
|
|
if (death != NULL) {
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
death->team = NULL;
|
|
|
|
death->group_id = team->group_id;
|
|
|
|
death->thread = thread->id;
|
|
|
|
death->status = thread->exit.status;
|
|
|
|
death->reason = thread->exit.reason;
|
|
|
|
death->signal = thread->exit.signal;
|
2004-10-14 18:46:12 +04:00
|
|
|
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
// team_set_job_control_state() already moved our entry
|
|
|
|
// into the parent's list. We just check the soft limit of
|
|
|
|
// death entries.
|
|
|
|
if (parent->dead_children->count > MAX_DEAD_CHILDREN) {
|
|
|
|
death = parent->dead_children->entries.RemoveHead();
|
2007-08-28 00:30:34 +04:00
|
|
|
parent->dead_children->count--;
|
2004-10-14 18:46:12 +04:00
|
|
|
} else
|
|
|
|
death = NULL;
|
|
|
|
|
2007-01-15 15:41:08 +03:00
|
|
|
RELEASE_THREAD_LOCK();
|
|
|
|
} else
|
|
|
|
RELEASE_THREAD_LOCK();
|
2004-10-14 18:46:12 +04:00
|
|
|
|
|
|
|
team_remove_team(team, &freeGroup);
|
2008-01-19 02:39:13 +03:00
|
|
|
|
|
|
|
send_signal_etc(parentID, SIGCHLD,
|
|
|
|
SIGNAL_FLAG_TEAMS_LOCKED | B_DO_NOT_RESCHEDULE);
|
2007-11-28 03:07:32 +03:00
|
|
|
} else {
|
|
|
|
// The thread is not the main thread. We store a thread death
|
|
|
|
// entry for it, unless someone is already waiting it.
|
|
|
|
if (threadDeathEntry != NULL
|
|
|
|
&& list_is_empty(&thread->exit.waiters)) {
|
|
|
|
threadDeathEntry->thread = thread->id;
|
|
|
|
threadDeathEntry->status = thread->exit.status;
|
|
|
|
threadDeathEntry->reason = thread->exit.reason;
|
|
|
|
threadDeathEntry->signal = thread->exit.signal;
|
|
|
|
|
|
|
|
// add entry -- remove and old one, if we hit the limit
|
|
|
|
list_add_item(&team->dead_threads, threadDeathEntry);
|
|
|
|
team->dead_threads_count++;
|
|
|
|
threadDeathEntry = NULL;
|
|
|
|
|
|
|
|
if (team->dead_threads_count > MAX_DEAD_THREADS) {
|
|
|
|
threadDeathEntry = (death_entry*)list_remove_head_item(
|
|
|
|
&team->dead_threads);
|
|
|
|
team->dead_threads_count--;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-01-15 15:41:08 +03:00
|
|
|
RELEASE_THREAD_LOCK();
|
2007-11-28 03:07:32 +03:00
|
|
|
}
|
2007-01-15 15:41:08 +03:00
|
|
|
|
2002-08-03 04:41:27 +04:00
|
|
|
RELEASE_TEAM_LOCK();
|
2007-01-15 15:41:08 +03:00
|
|
|
|
2002-07-09 16:24:59 +04:00
|
|
|
// swap address spaces, to make sure we're running on the kernel's pgdir
|
2005-12-20 16:29:11 +03:00
|
|
|
vm_swap_address_space(vm_kernel_address_space());
|
2002-07-25 05:05:51 +04:00
|
|
|
restore_interrupts(state);
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2007-10-11 11:46:55 +04:00
|
|
|
TRACE(("thread_exit: thread %ld now a kernel thread!\n", thread->id));
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
2002-10-05 23:38:42 +04:00
|
|
|
|
2007-11-28 03:07:32 +03:00
|
|
|
if (threadDeathEntry != NULL)
|
|
|
|
free(threadDeathEntry);
|
|
|
|
|
2004-03-16 05:50:25 +03:00
|
|
|
// delete the team if we're its main thread
|
2004-10-13 17:10:27 +04:00
|
|
|
if (deleteTeam) {
|
2008-02-21 04:00:14 +03:00
|
|
|
// TODO: Deleting the process group is actually a problem. According to
|
|
|
|
// the POSIX standard the process should become a zombie and live on
|
|
|
|
// until it is reaped. Hence the process group would continue to exist
|
|
|
|
// for that time as well. That is moving processes to it (setpgid())
|
|
|
|
// should work. This can actually happen e.g. when executing something
|
|
|
|
// like "echo foobar | wc" in the shell. The built-in "echo" could
|
|
|
|
// exit() even before setpgid() has been invoked for the "wc" child.
|
|
|
|
// Cf. bug #1799.
|
2004-10-14 18:46:12 +04:00
|
|
|
team_delete_process_group(freeGroup);
|
2004-03-16 05:50:25 +03:00
|
|
|
team_delete_team(team);
|
2003-01-12 19:30:09 +03:00
|
|
|
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
// we need to delete any death entry that made it to here
|
2004-10-14 18:46:12 +04:00
|
|
|
if (death != NULL)
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
delete death;
|
2004-10-14 18:46:12 +04:00
|
|
|
|
|
|
|
cachedDeathSem = -1;
|
2005-03-25 21:28:24 +03:00
|
|
|
}
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2007-04-25 12:25:10 +04:00
|
|
|
state = disable_interrupts();
|
|
|
|
GRAB_THREAD_LOCK();
|
|
|
|
|
|
|
|
// remove thread from hash, so it's no longer accessible
|
|
|
|
hash_remove(sThreadHash, thread);
|
|
|
|
sUsedThreads--;
|
|
|
|
|
|
|
|
// Stop debugging for this thread
|
|
|
|
debugInfo = thread->debug_info;
|
|
|
|
clear_thread_debug_info(&thread->debug_info, true);
|
|
|
|
|
2007-10-02 23:47:31 +04:00
|
|
|
// Remove the select infos. We notify them a little later.
|
|
|
|
select_info* selectInfos = thread->select_infos;
|
|
|
|
thread->select_infos = NULL;
|
|
|
|
|
2007-04-25 12:25:10 +04:00
|
|
|
RELEASE_THREAD_LOCK();
|
|
|
|
restore_interrupts(state);
|
|
|
|
|
|
|
|
destroy_thread_debug_info(&debugInfo);
|
|
|
|
|
2007-10-02 23:47:31 +04:00
|
|
|
// notify select infos
|
|
|
|
select_info* info = selectInfos;
|
|
|
|
while (info != NULL) {
|
|
|
|
select_sync* sync = info->sync;
|
|
|
|
|
|
|
|
notify_select_events(info, B_EVENT_INVALID);
|
|
|
|
info = info->next;
|
|
|
|
put_select_sync(sync);
|
|
|
|
}
|
|
|
|
|
2007-04-25 12:25:10 +04:00
|
|
|
// shutdown the thread messaging
|
|
|
|
|
|
|
|
status = acquire_sem_etc(thread->msg.write_sem, 1, B_RELATIVE_TIMEOUT, 0);
|
|
|
|
if (status == B_WOULD_BLOCK) {
|
|
|
|
// there is data waiting for us, so let us eat it
|
|
|
|
thread_id sender;
|
|
|
|
|
|
|
|
delete_sem(thread->msg.write_sem);
|
|
|
|
// first, let's remove all possibly waiting writers
|
|
|
|
receive_data_etc(&sender, NULL, 0, B_RELATIVE_TIMEOUT);
|
|
|
|
} else {
|
|
|
|
// we probably own the semaphore here, and we're the last to do so
|
|
|
|
delete_sem(thread->msg.write_sem);
|
|
|
|
}
|
|
|
|
// now we can safely remove the msg.read_sem
|
|
|
|
delete_sem(thread->msg.read_sem);
|
|
|
|
|
2004-10-13 18:52:52 +04:00
|
|
|
// fill all death entries and delete the sem that others will use to wait on us
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2004-10-13 18:52:52 +04:00
|
|
|
sem_id cachedExitSem = thread->exit.sem;
|
|
|
|
cpu_status state;
|
|
|
|
|
|
|
|
state = disable_interrupts();
|
|
|
|
GRAB_THREAD_LOCK();
|
|
|
|
|
|
|
|
// make sure no one will grab this semaphore again
|
|
|
|
thread->exit.sem = -1;
|
|
|
|
|
|
|
|
// fill all death entries
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
death_entry* entry = NULL;
|
|
|
|
while ((entry = (struct death_entry*)list_get_next_item(
|
|
|
|
&thread->exit.waiters, entry)) != NULL) {
|
|
|
|
entry->status = thread->exit.status;
|
|
|
|
entry->reason = thread->exit.reason;
|
|
|
|
entry->signal = thread->exit.signal;
|
2004-10-13 18:52:52 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
RELEASE_THREAD_LOCK();
|
|
|
|
restore_interrupts(state);
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2004-10-13 18:52:52 +04:00
|
|
|
delete_sem(cachedExitSem);
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
{
|
|
|
|
struct thread_exit_args args;
|
|
|
|
|
2004-10-13 18:52:52 +04:00
|
|
|
args.thread = thread;
|
2004-11-08 16:52:11 +03:00
|
|
|
args.old_kernel_stack = thread->kernel_stack_area;
|
2006-05-30 18:17:09 +04:00
|
|
|
args.death_stack = get_death_stack();
|
2004-10-14 18:46:12 +04:00
|
|
|
args.death_sem = cachedDeathSem;
|
2005-02-28 03:39:51 +03:00
|
|
|
args.original_team_id = teamID;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2007-10-02 02:46:56 +04:00
|
|
|
|
2006-05-30 18:17:09 +04:00
|
|
|
disable_interrupts();
|
|
|
|
|
|
|
|
// set the new kernel stack officially to the death stack, it won't be
|
|
|
|
// switched until the next function is called. This must be done now
|
|
|
|
// before a context switch, or we'll stay on the old stack
|
|
|
|
thread->kernel_stack_area = sDeathStacks[args.death_stack].area;
|
|
|
|
thread->kernel_stack_base = sDeathStacks[args.death_stack].address;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
|
|
|
// we will continue in thread_exit2(), on the new stack
|
2007-06-01 02:05:57 +04:00
|
|
|
arch_thread_switch_kstack_and_call(thread, thread->kernel_stack_base
|
|
|
|
+ KERNEL_STACK_SIZE, thread_exit2, &args);
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
panic("never can get here\n");
|
|
|
|
}
|
|
|
|
|
2002-09-24 04:08:50 +04:00
|
|
|
|
|
|
|
struct thread *
|
|
|
|
thread_get_thread_struct(thread_id id)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2005-11-02 13:24:37 +03:00
|
|
|
struct thread *thread;
|
2002-12-04 19:40:07 +03:00
|
|
|
cpu_status state;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2002-07-25 05:05:51 +04:00
|
|
|
state = disable_interrupts();
|
2002-07-09 16:24:59 +04:00
|
|
|
GRAB_THREAD_LOCK();
|
|
|
|
|
2005-11-02 13:24:37 +03:00
|
|
|
thread = thread_get_thread_struct_locked(id);
|
2002-07-09 16:24:59 +04:00
|
|
|
|
|
|
|
RELEASE_THREAD_LOCK();
|
2002-07-25 05:05:51 +04:00
|
|
|
restore_interrupts(state);
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2005-11-02 13:24:37 +03:00
|
|
|
return thread;
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
2002-10-23 21:31:10 +04:00
|
|
|
|
|
|
|
struct thread *
|
|
|
|
thread_get_thread_struct_locked(thread_id id)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
|
|
|
struct thread_key key;
|
|
|
|
|
|
|
|
key.id = id;
|
|
|
|
|
2007-08-27 00:37:54 +04:00
|
|
|
return (struct thread*)hash_lookup(sThreadHash, &key);
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
2002-09-24 04:08:50 +04:00
|
|
|
|
2007-06-01 02:05:57 +04:00
|
|
|
/*!
|
|
|
|
Called in the interrupt handler code when a thread enters
|
|
|
|
the kernel for any reason.
|
|
|
|
Only tracks time for now.
|
2008-01-11 03:36:44 +03:00
|
|
|
Interrupts are disabled.
|
2007-06-01 02:05:57 +04:00
|
|
|
*/
|
2002-09-24 04:08:50 +04:00
|
|
|
void
|
2008-01-11 03:36:44 +03:00
|
|
|
thread_at_kernel_entry(bigtime_t now)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2005-02-11 06:10:21 +03:00
|
|
|
struct thread *thread = thread_get_current_thread();
|
2004-03-16 06:14:48 +03:00
|
|
|
|
2008-01-11 03:36:44 +03:00
|
|
|
TRACE(("thread_at_kernel_entry: entry thread %ld\n", thread->id));
|
2002-07-09 16:24:59 +04:00
|
|
|
|
|
|
|
// track user time
|
2005-02-11 06:10:21 +03:00
|
|
|
thread->user_time += now - thread->last_time;
|
|
|
|
thread->last_time = now;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2005-02-11 06:10:21 +03:00
|
|
|
thread->in_kernel = true;
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
2002-09-24 04:08:50 +04:00
|
|
|
|
2007-06-01 02:05:57 +04:00
|
|
|
/*!
|
|
|
|
Called whenever a thread exits kernel space to user space.
|
|
|
|
Tracks time, handles signals, ...
|
|
|
|
*/
|
2002-09-24 04:08:50 +04:00
|
|
|
void
|
2005-02-11 06:10:21 +03:00
|
|
|
thread_at_kernel_exit(void)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2005-02-11 06:10:21 +03:00
|
|
|
struct thread *thread = thread_get_current_thread();
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2008-01-11 03:36:44 +03:00
|
|
|
TRACE(("thread_at_kernel_exit: exit thread %ld\n", thread->id));
|
2002-07-09 16:24:59 +04:00
|
|
|
|
axeld + bonefish:
* Implemented automatic syscall restarts:
- A syscall can indicate that it has been interrupted and can be
restarted by setting a respective bit in thread::flags. It can
store parameters it wants to be preserved for the restart in
thread::syscall_restart::parameters. Another thread::flags bit
indicates whether it has been restarted.
- handle_signals() clears the restart flag, if the handled signal
has a handler function installed and SA_RESTART is not set. Another
thread flag (THREAD_FLAGS_DONT_RESTART_SYSCALL) can prevent syscalls
from being restarted, even if they could be (not used yet, but we
might want to use it in resume_thread(), so that we stay
behaviorally compatible with BeOS).
- The architecture specific syscall handler restarts the syscall, if
the restart flag is set. Implemented for x86 only.
- Added some support functions in the private <syscall_restart.h> to
simplify the syscall restart code in the syscalls.
- Adjusted all syscalls that can potentially be restarted accordingly.
- _user_ioctl() sets new thread flag THREAD_FLAGS_IOCTL_SYSCALL while
calling the underlying FS's/driver's hook, so that syscall restarts
can also be supported there.
* thread_at_kernel_exit() invokes handle_signals() in a loop now, as
long as the latter indicates that the thread shall be suspended, so
that after waking up signals received in the meantime will be handled
before the thread returns to userland. Adjusted handle_signals()
accordingly -- when encountering a suspending signal we don't check
for further signals.
* Fixed sigsuspend(): Suspending the thread and rescheduling doesn't
result in the correct behavior. Instead we employ a temporary
condition variable and interruptably wait on it. The POSIX test
suite test passes, now.
* Made the switch_sem[_etc]() behavior on interruption consistent.
Depending on when the signal arrived (before the call or when already
waiting) the first semaphore would or wouldn't be released. Now we
consistently release it.
* Refactored _user_{read,write}[v]() syscalls. Use a common function for
either pair. The iovec version doesn't fail anymore, if anything could
be read/written at all. It also checks whether a complete vector
could be read/written, so that we won't skip data, if the underlying
FS/driver couldn't read/write more ATM.
* Some refactoring in the x86 syscall handler: The int 99 and sysenter
handlers use a common subroutine to avoid code duplication.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@23983 a95241bf-73f2-0310-859d-f6bbb57e9c96
2008-02-17 18:48:30 +03:00
|
|
|
while (handle_signals(thread)) {
|
|
|
|
InterruptsSpinLocker _(thread_spinlock);
|
2004-06-11 05:45:33 +04:00
|
|
|
scheduler_reschedule();
|
axeld + bonefish:
* Implemented automatic syscall restarts:
- A syscall can indicate that it has been interrupted and can be
restarted by setting a respective bit in thread::flags. It can
store parameters it wants to be preserved for the restart in
thread::syscall_restart::parameters. Another thread::flags bit
indicates whether it has been restarted.
- handle_signals() clears the restart flag, if the handled signal
has a handler function installed and SA_RESTART is not set. Another
thread flag (THREAD_FLAGS_DONT_RESTART_SYSCALL) can prevent syscalls
from being restarted, even if they could be (not used yet, but we
might want to use it in resume_thread(), so that we stay
behaviorally compatible with BeOS).
- The architecture specific syscall handler restarts the syscall, if
the restart flag is set. Implemented for x86 only.
- Added some support functions in the private <syscall_restart.h> to
simplify the syscall restart code in the syscalls.
- Adjusted all syscalls that can potentially be restarted accordingly.
- _user_ioctl() sets new thread flag THREAD_FLAGS_IOCTL_SYSCALL while
calling the underlying FS's/driver's hook, so that syscall restarts
can also be supported there.
* thread_at_kernel_exit() invokes handle_signals() in a loop now, as
long as the latter indicates that the thread shall be suspended, so
that after waking up signals received in the meantime will be handled
before the thread returns to userland. Adjusted handle_signals()
accordingly -- when encountering a suspending signal we don't check
for further signals.
* Fixed sigsuspend(): Suspending the thread and rescheduling doesn't
result in the correct behavior. Instead we employ a temporary
condition variable and interruptably wait on it. The POSIX test
suite test passes, now.
* Made the switch_sem[_etc]() behavior on interruption consistent.
Depending on when the signal arrived (before the call or when already
waiting) the first semaphore would or wouldn't be released. Now we
consistently release it.
* Refactored _user_{read,write}[v]() syscalls. Use a common function for
either pair. The iovec version doesn't fail anymore, if anything could
be read/written at all. It also checks whether a complete vector
could be read/written, so that we won't skip data, if the underlying
FS/driver couldn't read/write more ATM.
* Some refactoring in the x86 syscall handler: The int 99 and sysenter
handlers use a common subroutine to avoid code duplication.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@23983 a95241bf-73f2-0310-859d-f6bbb57e9c96
2008-02-17 18:48:30 +03:00
|
|
|
}
|
2002-07-09 16:24:59 +04:00
|
|
|
|
axeld + bonefish:
* Implemented automatic syscall restarts:
- A syscall can indicate that it has been interrupted and can be
restarted by setting a respective bit in thread::flags. It can
store parameters it wants to be preserved for the restart in
thread::syscall_restart::parameters. Another thread::flags bit
indicates whether it has been restarted.
- handle_signals() clears the restart flag, if the handled signal
has a handler function installed and SA_RESTART is not set. Another
thread flag (THREAD_FLAGS_DONT_RESTART_SYSCALL) can prevent syscalls
from being restarted, even if they could be (not used yet, but we
might want to use it in resume_thread(), so that we stay
behaviorally compatible with BeOS).
- The architecture specific syscall handler restarts the syscall, if
the restart flag is set. Implemented for x86 only.
- Added some support functions in the private <syscall_restart.h> to
simplify the syscall restart code in the syscalls.
- Adjusted all syscalls that can potentially be restarted accordingly.
- _user_ioctl() sets new thread flag THREAD_FLAGS_IOCTL_SYSCALL while
calling the underlying FS's/driver's hook, so that syscall restarts
can also be supported there.
* thread_at_kernel_exit() invokes handle_signals() in a loop now, as
long as the latter indicates that the thread shall be suspended, so
that after waking up signals received in the meantime will be handled
before the thread returns to userland. Adjusted handle_signals()
accordingly -- when encountering a suspending signal we don't check
for further signals.
* Fixed sigsuspend(): Suspending the thread and rescheduling doesn't
result in the correct behavior. Instead we employ a temporary
condition variable and interruptably wait on it. The POSIX test
suite test passes, now.
* Made the switch_sem[_etc]() behavior on interruption consistent.
Depending on when the signal arrived (before the call or when already
waiting) the first semaphore would or wouldn't be released. Now we
consistently release it.
* Refactored _user_{read,write}[v]() syscalls. Use a common function for
either pair. The iovec version doesn't fail anymore, if anything could
be read/written at all. It also checks whether a complete vector
could be read/written, so that we won't skip data, if the underlying
FS/driver couldn't read/write more ATM.
* Some refactoring in the x86 syscall handler: The int 99 and sysenter
handlers use a common subroutine to avoid code duplication.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@23983 a95241bf-73f2-0310-859d-f6bbb57e9c96
2008-02-17 18:48:30 +03:00
|
|
|
cpu_status state = disable_interrupts();
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2005-10-20 20:56:04 +04:00
|
|
|
thread->in_kernel = false;
|
2004-06-11 05:45:33 +04:00
|
|
|
|
2002-07-09 16:24:59 +04:00
|
|
|
// track kernel time
|
axeld + bonefish:
* Implemented automatic syscall restarts:
- A syscall can indicate that it has been interrupted and can be
restarted by setting a respective bit in thread::flags. It can
store parameters it wants to be preserved for the restart in
thread::syscall_restart::parameters. Another thread::flags bit
indicates whether it has been restarted.
- handle_signals() clears the restart flag, if the handled signal
has a handler function installed and SA_RESTART is not set. Another
thread flag (THREAD_FLAGS_DONT_RESTART_SYSCALL) can prevent syscalls
from being restarted, even if they could be (not used yet, but we
might want to use it in resume_thread(), so that we stay
behaviorally compatible with BeOS).
- The architecture specific syscall handler restarts the syscall, if
the restart flag is set. Implemented for x86 only.
- Added some support functions in the private <syscall_restart.h> to
simplify the syscall restart code in the syscalls.
- Adjusted all syscalls that can potentially be restarted accordingly.
- _user_ioctl() sets new thread flag THREAD_FLAGS_IOCTL_SYSCALL while
calling the underlying FS's/driver's hook, so that syscall restarts
can also be supported there.
* thread_at_kernel_exit() invokes handle_signals() in a loop now, as
long as the latter indicates that the thread shall be suspended, so
that after waking up signals received in the meantime will be handled
before the thread returns to userland. Adjusted handle_signals()
accordingly -- when encountering a suspending signal we don't check
for further signals.
* Fixed sigsuspend(): Suspending the thread and rescheduling doesn't
result in the correct behavior. Instead we employ a temporary
condition variable and interruptably wait on it. The POSIX test
suite test passes, now.
* Made the switch_sem[_etc]() behavior on interruption consistent.
Depending on when the signal arrived (before the call or when already
waiting) the first semaphore would or wouldn't be released. Now we
consistently release it.
* Refactored _user_{read,write}[v]() syscalls. Use a common function for
either pair. The iovec version doesn't fail anymore, if anything could
be read/written at all. It also checks whether a complete vector
could be read/written, so that we won't skip data, if the underlying
FS/driver couldn't read/write more ATM.
* Some refactoring in the x86 syscall handler: The int 99 and sysenter
handlers use a common subroutine to avoid code duplication.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@23983 a95241bf-73f2-0310-859d-f6bbb57e9c96
2008-02-17 18:48:30 +03:00
|
|
|
bigtime_t now = system_time();
|
2005-02-11 06:10:21 +03:00
|
|
|
thread->kernel_time += now - thread->last_time;
|
|
|
|
thread->last_time = now;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2002-07-25 05:05:51 +04:00
|
|
|
restore_interrupts(state);
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
2002-08-04 06:04:37 +04:00
|
|
|
|
2008-01-11 03:36:44 +03:00
|
|
|
/*! The quick version of thread_kernel_exit(), in case no signals are pending
|
|
|
|
and no debugging shall be done.
|
|
|
|
Interrupts are disabled in this case.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
thread_at_kernel_exit_no_signals(void)
|
|
|
|
{
|
|
|
|
struct thread *thread = thread_get_current_thread();
|
|
|
|
|
|
|
|
TRACE(("thread_at_kernel_exit_no_signals: exit thread %ld\n", thread->id));
|
|
|
|
|
|
|
|
thread->in_kernel = false;
|
|
|
|
|
|
|
|
// track kernel time
|
|
|
|
bigtime_t now = system_time();
|
|
|
|
thread->kernel_time += now - thread->last_time;
|
|
|
|
thread->last_time = now;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-08-16 22:01:47 +04:00
|
|
|
void
|
|
|
|
thread_reset_for_exec(void)
|
|
|
|
{
|
|
|
|
struct thread *thread = thread_get_current_thread();
|
|
|
|
|
|
|
|
cancel_timer(&thread->alarm);
|
|
|
|
reset_signals(thread);
|
|
|
|
}
|
2002-10-05 23:38:42 +04:00
|
|
|
|
2003-01-27 06:17:36 +03:00
|
|
|
|
2007-06-01 02:05:57 +04:00
|
|
|
/*! Insert a thread to the tail of a queue */
|
2003-01-27 06:17:36 +03:00
|
|
|
void
|
|
|
|
thread_enqueue(struct thread *thread, struct thread_queue *queue)
|
|
|
|
{
|
|
|
|
thread->queue_next = NULL;
|
|
|
|
if (queue->head == NULL) {
|
|
|
|
queue->head = thread;
|
|
|
|
queue->tail = thread;
|
|
|
|
} else {
|
|
|
|
queue->tail->queue_next = thread;
|
|
|
|
queue->tail = thread;
|
2002-08-19 12:28:39 +04:00
|
|
|
}
|
2003-01-27 06:17:36 +03:00
|
|
|
}
|
2002-10-05 23:38:42 +04:00
|
|
|
|
2002-08-19 12:28:39 +04:00
|
|
|
|
2003-01-27 06:17:36 +03:00
|
|
|
struct thread *
|
|
|
|
thread_lookat_queue(struct thread_queue *queue)
|
|
|
|
{
|
|
|
|
return queue->head;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
struct thread *
|
|
|
|
thread_dequeue(struct thread_queue *queue)
|
|
|
|
{
|
|
|
|
struct thread *thread = queue->head;
|
|
|
|
|
|
|
|
if (thread != NULL) {
|
|
|
|
queue->head = thread->queue_next;
|
|
|
|
if (queue->tail == thread)
|
|
|
|
queue->tail = NULL;
|
|
|
|
}
|
|
|
|
return thread;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
struct thread *
|
2005-11-02 13:24:37 +03:00
|
|
|
thread_dequeue_id(struct thread_queue *q, thread_id id)
|
2003-01-27 06:17:36 +03:00
|
|
|
{
|
2005-11-02 13:24:37 +03:00
|
|
|
struct thread *thread;
|
2003-01-27 06:17:36 +03:00
|
|
|
struct thread *last = NULL;
|
|
|
|
|
2005-11-02 13:24:37 +03:00
|
|
|
thread = q->head;
|
|
|
|
while (thread != NULL) {
|
|
|
|
if (thread->id == id) {
|
2003-01-27 06:17:36 +03:00
|
|
|
if (last == NULL)
|
2005-11-02 13:24:37 +03:00
|
|
|
q->head = thread->queue_next;
|
2003-01-27 06:17:36 +03:00
|
|
|
else
|
2005-11-02 13:24:37 +03:00
|
|
|
last->queue_next = thread->queue_next;
|
2003-01-27 06:17:36 +03:00
|
|
|
|
2005-11-02 13:24:37 +03:00
|
|
|
if (q->tail == thread)
|
2003-01-27 06:17:36 +03:00
|
|
|
q->tail = last;
|
|
|
|
break;
|
|
|
|
}
|
2005-11-02 13:24:37 +03:00
|
|
|
last = thread;
|
|
|
|
thread = thread->queue_next;
|
2003-01-27 06:17:36 +03:00
|
|
|
}
|
2005-11-02 13:24:37 +03:00
|
|
|
return thread;
|
2003-01-27 06:17:36 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2004-12-01 00:11:37 +03:00
|
|
|
thread_id
|
2006-01-31 05:29:02 +03:00
|
|
|
allocate_thread_id(void)
|
2005-03-08 21:16:16 +03:00
|
|
|
{
|
|
|
|
return atomic_add(&sNextThreadID, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
thread_id
|
2006-01-31 05:29:02 +03:00
|
|
|
peek_next_thread_id(void)
|
2004-12-01 00:11:37 +03:00
|
|
|
{
|
2005-03-08 21:16:16 +03:00
|
|
|
return atomic_get(&sNextThreadID);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-02-07 14:40:31 +03:00
|
|
|
/*! Yield the CPU to other threads.
|
|
|
|
If \a force is \c true, the thread will almost guaranteedly be unscheduled.
|
|
|
|
If \c false, it will continue to run, if there's no other thread in ready
|
|
|
|
state, and if it has a higher priority than the other ready threads, it
|
|
|
|
still has a good chance to continue.
|
|
|
|
*/
|
2006-01-31 05:29:02 +03:00
|
|
|
void
|
2008-02-07 14:40:31 +03:00
|
|
|
thread_yield(bool force)
|
2006-01-31 05:29:02 +03:00
|
|
|
{
|
2008-02-07 14:40:31 +03:00
|
|
|
if (force) {
|
|
|
|
// snooze for roughly 3 thread quantums
|
|
|
|
snooze_etc(9000, B_SYSTEM_TIMEBASE, B_RELATIVE_TIMEOUT | B_CAN_INTERRUPT);
|
2007-07-05 22:27:27 +04:00
|
|
|
#if 0
|
2008-02-07 14:40:31 +03:00
|
|
|
cpu_status state;
|
2006-01-31 05:29:02 +03:00
|
|
|
|
2008-02-07 14:40:31 +03:00
|
|
|
struct thread *thread = thread_get_current_thread();
|
|
|
|
if (thread == NULL)
|
|
|
|
return;
|
2006-01-31 05:29:02 +03:00
|
|
|
|
2008-02-07 14:40:31 +03:00
|
|
|
state = disable_interrupts();
|
|
|
|
GRAB_THREAD_LOCK();
|
2006-01-31 05:29:02 +03:00
|
|
|
|
2008-02-07 14:40:31 +03:00
|
|
|
// mark the thread as yielded, so it will not be scheduled next
|
|
|
|
//thread->was_yielded = true;
|
|
|
|
thread->next_priority = B_LOWEST_ACTIVE_PRIORITY;
|
|
|
|
scheduler_reschedule();
|
2006-01-31 05:29:02 +03:00
|
|
|
|
2008-02-07 14:40:31 +03:00
|
|
|
RELEASE_THREAD_LOCK();
|
|
|
|
restore_interrupts(state);
|
2007-07-05 22:27:27 +04:00
|
|
|
#endif
|
2008-02-07 14:40:31 +03:00
|
|
|
} else {
|
|
|
|
struct thread *thread = thread_get_current_thread();
|
|
|
|
if (thread == NULL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
// Don't force the thread off the CPU, just reschedule.
|
|
|
|
InterruptsSpinLocker _(thread_spinlock);
|
|
|
|
scheduler_reschedule();
|
|
|
|
}
|
2006-01-31 05:29:02 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-06-01 02:05:57 +04:00
|
|
|
/*!
|
|
|
|
Kernel private thread creation function.
|
2005-03-09 04:59:44 +03:00
|
|
|
|
2007-06-01 02:05:57 +04:00
|
|
|
\param threadID The ID to be assigned to the new thread. If
|
|
|
|
\code < 0 \endcode a fresh one is allocated.
|
|
|
|
*/
|
2005-03-08 21:16:16 +03:00
|
|
|
thread_id
|
|
|
|
spawn_kernel_thread_etc(thread_func function, const char *name, int32 priority,
|
|
|
|
void *arg, team_id team, thread_id threadID)
|
|
|
|
{
|
|
|
|
return create_thread(name, team, (thread_entry_func)function, arg, NULL,
|
|
|
|
priority, true, threadID);
|
2004-12-01 00:11:37 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-03-08 19:41:03 +03:00
|
|
|
status_t
|
2007-06-01 02:05:57 +04:00
|
|
|
wait_for_thread_etc(thread_id id, uint32 flags, bigtime_t timeout,
|
|
|
|
status_t *_returnCode)
|
2006-03-08 19:41:03 +03:00
|
|
|
{
|
2007-01-13 21:37:50 +03:00
|
|
|
sem_id exitSem = B_BAD_THREAD_ID;
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
struct death_entry death;
|
|
|
|
job_control_entry* freeDeath = NULL;
|
2006-03-08 19:41:03 +03:00
|
|
|
struct thread *thread;
|
|
|
|
cpu_status state;
|
2007-01-10 02:58:59 +03:00
|
|
|
status_t status = B_OK;
|
|
|
|
|
|
|
|
if (id < B_OK)
|
|
|
|
return B_BAD_THREAD_ID;
|
2006-03-08 19:41:03 +03:00
|
|
|
|
|
|
|
// we need to resume the thread we're waiting for first
|
|
|
|
|
|
|
|
state = disable_interrupts();
|
|
|
|
GRAB_THREAD_LOCK();
|
|
|
|
|
|
|
|
thread = thread_get_thread_struct_locked(id);
|
|
|
|
if (thread != NULL) {
|
|
|
|
// remember the semaphore we have to wait on and place our death entry
|
2007-01-13 21:37:50 +03:00
|
|
|
exitSem = thread->exit.sem;
|
2006-03-08 19:41:03 +03:00
|
|
|
list_add_link_to_head(&thread->exit.waiters, &death);
|
|
|
|
}
|
|
|
|
|
2007-11-28 03:07:32 +03:00
|
|
|
death_entry* threadDeathEntry = NULL;
|
|
|
|
|
2006-03-08 19:41:03 +03:00
|
|
|
RELEASE_THREAD_LOCK();
|
2007-01-10 02:58:59 +03:00
|
|
|
|
|
|
|
if (thread == NULL) {
|
|
|
|
// we couldn't find this thread - maybe it's already gone, and we'll
|
2007-08-17 17:08:24 +04:00
|
|
|
// find its death entry in our team
|
2007-01-10 02:58:59 +03:00
|
|
|
GRAB_TEAM_LOCK();
|
|
|
|
|
2007-11-28 03:07:32 +03:00
|
|
|
struct team* team = thread_get_current_thread()->team;
|
|
|
|
|
|
|
|
// check the child death entries first (i.e. main threads of child
|
|
|
|
// teams)
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
bool deleteEntry;
|
2007-11-28 03:07:32 +03:00
|
|
|
freeDeath = team_get_death_entry(team, id, &deleteEntry);
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
if (freeDeath != NULL) {
|
|
|
|
death.status = freeDeath->status;
|
|
|
|
if (!deleteEntry)
|
|
|
|
freeDeath = NULL;
|
2007-11-28 03:07:32 +03:00
|
|
|
} else {
|
|
|
|
// check the thread death entries of the team (non-main threads)
|
|
|
|
while ((threadDeathEntry = (death_entry*)list_get_next_item(
|
|
|
|
&team->dead_threads, threadDeathEntry)) != NULL) {
|
|
|
|
if (threadDeathEntry->thread == id) {
|
|
|
|
list_remove_item(&team->dead_threads, threadDeathEntry);
|
|
|
|
team->dead_threads_count--;
|
|
|
|
death.status = threadDeathEntry->status;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (threadDeathEntry == NULL)
|
|
|
|
status = B_BAD_THREAD_ID;
|
|
|
|
}
|
2007-01-10 02:58:59 +03:00
|
|
|
|
|
|
|
RELEASE_TEAM_LOCK();
|
|
|
|
}
|
|
|
|
|
2006-03-08 19:41:03 +03:00
|
|
|
restore_interrupts(state);
|
|
|
|
|
2007-01-10 02:58:59 +03:00
|
|
|
if (thread == NULL && status == B_OK) {
|
|
|
|
// we found the thread's death entry in our team
|
|
|
|
if (_returnCode)
|
|
|
|
*_returnCode = death.status;
|
|
|
|
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
delete freeDeath;
|
2007-11-28 03:07:32 +03:00
|
|
|
free(threadDeathEntry);
|
2007-01-10 02:58:59 +03:00
|
|
|
return B_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
// we need to wait for the death of the thread
|
|
|
|
|
2007-01-13 21:37:50 +03:00
|
|
|
if (exitSem < B_OK)
|
2006-03-08 19:41:03 +03:00
|
|
|
return B_BAD_THREAD_ID;
|
|
|
|
|
2007-01-10 02:58:59 +03:00
|
|
|
resume_thread(id);
|
2007-01-13 21:37:50 +03:00
|
|
|
// make sure we don't wait forever on a suspended thread
|
2007-01-10 02:58:59 +03:00
|
|
|
|
2007-01-13 21:37:50 +03:00
|
|
|
status = acquire_sem_etc(exitSem, 1, flags, timeout);
|
2006-03-08 19:41:03 +03:00
|
|
|
|
|
|
|
if (status == B_OK) {
|
|
|
|
// this should never happen as the thread deletes the semaphore on exit
|
2007-10-11 11:46:55 +04:00
|
|
|
panic("could acquire exit_sem for thread %ld\n", id);
|
2006-03-08 19:41:03 +03:00
|
|
|
} else if (status == B_BAD_SEM_ID) {
|
|
|
|
// this is the way the thread normally exits
|
|
|
|
status = B_OK;
|
|
|
|
|
|
|
|
if (_returnCode)
|
|
|
|
*_returnCode = death.status;
|
|
|
|
} else {
|
|
|
|
// We were probably interrupted; we need to remove our death entry now.
|
|
|
|
state = disable_interrupts();
|
|
|
|
GRAB_THREAD_LOCK();
|
|
|
|
|
|
|
|
thread = thread_get_thread_struct_locked(id);
|
|
|
|
if (thread != NULL)
|
|
|
|
list_remove_link(&death);
|
|
|
|
|
|
|
|
RELEASE_THREAD_LOCK();
|
|
|
|
restore_interrupts(state);
|
2007-01-13 21:37:50 +03:00
|
|
|
|
|
|
|
// If the thread is already gone, we need to wait for its exit semaphore
|
|
|
|
// to make sure our death entry stays valid - it won't take long
|
|
|
|
if (thread == NULL)
|
|
|
|
acquire_sem(exitSem);
|
2006-03-08 19:41:03 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-10-02 23:47:31 +04:00
|
|
|
status_t
|
|
|
|
select_thread(int32 id, struct select_info* info, bool kernel)
|
|
|
|
{
|
|
|
|
InterruptsSpinLocker locker(thread_spinlock);
|
|
|
|
|
|
|
|
// get thread
|
|
|
|
struct thread* thread = thread_get_thread_struct_locked(id);
|
|
|
|
if (thread == NULL)
|
|
|
|
return B_BAD_THREAD_ID;
|
|
|
|
|
|
|
|
// We support only B_EVENT_INVALID at the moment.
|
|
|
|
info->selected_events &= B_EVENT_INVALID;
|
|
|
|
|
|
|
|
// add info to list
|
|
|
|
if (info->selected_events != 0) {
|
|
|
|
info->next = thread->select_infos;
|
|
|
|
thread->select_infos = info;
|
|
|
|
|
|
|
|
// we need a sync reference
|
|
|
|
atomic_add(&info->sync->ref_count, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
return B_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
status_t
|
|
|
|
deselect_thread(int32 id, struct select_info* info, bool kernel)
|
|
|
|
{
|
|
|
|
InterruptsSpinLocker locker(thread_spinlock);
|
|
|
|
|
|
|
|
// get thread
|
|
|
|
struct thread* thread = thread_get_thread_struct_locked(id);
|
|
|
|
if (thread == NULL)
|
|
|
|
return B_BAD_THREAD_ID;
|
|
|
|
|
|
|
|
// remove info from list
|
|
|
|
select_info** infoLocation = &thread->select_infos;
|
|
|
|
while (*infoLocation != NULL && *infoLocation != info)
|
|
|
|
infoLocation = &(*infoLocation)->next;
|
|
|
|
|
|
|
|
if (*infoLocation != info)
|
|
|
|
return B_OK;
|
|
|
|
|
|
|
|
*infoLocation = info->next;
|
|
|
|
|
|
|
|
locker.Unlock();
|
|
|
|
|
|
|
|
// surrender sync reference
|
|
|
|
put_select_sync(info->sync);
|
|
|
|
|
|
|
|
return B_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2004-12-01 00:11:37 +03:00
|
|
|
int32
|
|
|
|
thread_max_threads(void)
|
|
|
|
{
|
|
|
|
return sMaxThreads;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int32
|
|
|
|
thread_used_threads(void)
|
|
|
|
{
|
|
|
|
return sUsedThreads;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2004-03-17 18:29:45 +03:00
|
|
|
status_t
|
2004-10-26 02:30:44 +04:00
|
|
|
thread_init(kernel_args *args)
|
2003-01-27 06:17:36 +03:00
|
|
|
{
|
2005-07-01 04:31:38 +04:00
|
|
|
uint32 i;
|
2003-01-27 06:17:36 +03:00
|
|
|
|
2004-03-16 06:14:48 +03:00
|
|
|
TRACE(("thread_init: entry\n"));
|
2003-01-27 06:17:36 +03:00
|
|
|
|
|
|
|
// create the thread hash table
|
2005-03-09 04:59:44 +03:00
|
|
|
sThreadHash = hash_init(15, offsetof(struct thread, all_next),
|
2003-01-27 06:17:36 +03:00
|
|
|
&thread_struct_compare, &thread_struct_hash);
|
|
|
|
|
|
|
|
// zero out the dead thread structure q
|
|
|
|
memset(&dead_q, 0, sizeof(dead_q));
|
|
|
|
|
|
|
|
// allocate snooze sem
|
2004-03-17 18:29:45 +03:00
|
|
|
sSnoozeSem = create_sem(0, "snooze sem");
|
|
|
|
if (sSnoozeSem < 0) {
|
2003-01-27 06:17:36 +03:00
|
|
|
panic("error creating snooze sem\n");
|
2004-03-17 18:29:45 +03:00
|
|
|
return sSnoozeSem;
|
2003-01-27 06:17:36 +03:00
|
|
|
}
|
|
|
|
|
2004-10-26 02:30:44 +04:00
|
|
|
if (arch_thread_init(args) < B_OK)
|
|
|
|
panic("arch_thread_init() failed!\n");
|
|
|
|
|
2005-03-08 21:16:16 +03:00
|
|
|
// skip all thread IDs including B_SYSTEM_TEAM, which is reserved
|
2005-03-09 04:59:44 +03:00
|
|
|
sNextThreadID = B_SYSTEM_TEAM + 1;
|
2005-03-08 21:16:16 +03:00
|
|
|
|
2003-01-27 06:17:36 +03:00
|
|
|
// create an idle thread for each cpu
|
2004-11-08 14:12:05 +03:00
|
|
|
|
2004-10-26 02:30:44 +04:00
|
|
|
for (i = 0; i < args->num_cpus; i++) {
|
2005-07-01 04:31:38 +04:00
|
|
|
struct thread *thread;
|
2004-11-08 14:12:05 +03:00
|
|
|
area_info info;
|
2005-07-01 04:31:38 +04:00
|
|
|
char name[64];
|
2003-01-27 06:17:36 +03:00
|
|
|
|
2005-07-01 04:31:38 +04:00
|
|
|
sprintf(name, "idle thread %lu", i + 1);
|
2007-02-19 03:11:24 +03:00
|
|
|
thread = create_thread_struct(&sIdleThreads[i], name,
|
2007-03-01 11:09:28 +03:00
|
|
|
i == 0 ? team_get_kernel_team_id() : -1, &gCPU[i]);
|
2005-07-01 04:31:38 +04:00
|
|
|
if (thread == NULL) {
|
2003-01-27 06:17:36 +03:00
|
|
|
panic("error creating idle thread struct\n");
|
2005-07-01 04:31:38 +04:00
|
|
|
return B_NO_MEMORY;
|
2003-01-27 06:17:36 +03:00
|
|
|
}
|
2005-07-01 04:31:38 +04:00
|
|
|
|
|
|
|
thread->team = team_get_kernel_team();
|
2006-01-31 04:58:49 +03:00
|
|
|
thread->priority = thread->next_priority = B_IDLE_PRIORITY;
|
2005-07-01 04:31:38 +04:00
|
|
|
thread->state = B_THREAD_RUNNING;
|
|
|
|
thread->next_state = B_THREAD_READY;
|
|
|
|
sprintf(name, "idle thread %lu kstack", i + 1);
|
|
|
|
thread->kernel_stack_area = find_area(name);
|
2006-02-01 23:03:55 +03:00
|
|
|
thread->entry = NULL;
|
2005-07-01 04:31:38 +04:00
|
|
|
|
|
|
|
if (get_area_info(thread->kernel_stack_area, &info) != B_OK)
|
2004-11-08 16:52:11 +03:00
|
|
|
panic("error finding idle kstack area\n");
|
2003-01-27 06:17:36 +03:00
|
|
|
|
2005-07-01 04:31:38 +04:00
|
|
|
thread->kernel_stack_base = (addr_t)info.address;
|
|
|
|
|
|
|
|
hash_insert(sThreadHash, thread);
|
|
|
|
insert_thread_into_team(thread->team, thread);
|
2003-01-27 06:17:36 +03:00
|
|
|
}
|
2004-12-01 00:11:37 +03:00
|
|
|
sUsedThreads = args->num_cpus;
|
2003-01-27 06:17:36 +03:00
|
|
|
|
|
|
|
// create a set of death stacks
|
2004-11-08 14:12:05 +03:00
|
|
|
|
2004-03-17 18:29:45 +03:00
|
|
|
sNumDeathStacks = smp_get_num_cpus();
|
2004-11-08 14:12:05 +03:00
|
|
|
if (sNumDeathStacks > 8 * sizeof(sDeathStackBitmap)) {
|
|
|
|
// clamp values for really beefy machines
|
|
|
|
sNumDeathStacks = 8 * sizeof(sDeathStackBitmap);
|
2003-01-27 06:17:36 +03:00
|
|
|
}
|
2004-03-17 18:29:45 +03:00
|
|
|
sDeathStackBitmap = 0;
|
2007-06-01 02:05:57 +04:00
|
|
|
sDeathStacks = (struct death_stack *)malloc(sNumDeathStacks
|
|
|
|
* sizeof(struct death_stack));
|
2004-03-17 18:29:45 +03:00
|
|
|
if (sDeathStacks == NULL) {
|
2003-01-27 06:17:36 +03:00
|
|
|
panic("error creating death stacks\n");
|
2004-03-17 18:29:45 +03:00
|
|
|
return B_NO_MEMORY;
|
2003-01-27 06:17:36 +03:00
|
|
|
}
|
|
|
|
{
|
|
|
|
char temp[64];
|
|
|
|
|
2004-03-17 18:29:45 +03:00
|
|
|
for (i = 0; i < sNumDeathStacks; i++) {
|
2005-07-01 04:31:38 +04:00
|
|
|
sprintf(temp, "death stack %lu", i);
|
2007-06-01 02:05:57 +04:00
|
|
|
sDeathStacks[i].area = create_area(temp,
|
|
|
|
(void **)&sDeathStacks[i].address, B_ANY_KERNEL_ADDRESS,
|
|
|
|
KERNEL_STACK_SIZE, B_FULL_LOCK,
|
2004-11-18 21:15:39 +03:00
|
|
|
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA | B_KERNEL_STACK_AREA);
|
2004-03-17 18:29:45 +03:00
|
|
|
if (sDeathStacks[i].area < 0) {
|
2003-01-27 06:17:36 +03:00
|
|
|
panic("error creating death stacks\n");
|
2004-03-17 18:29:45 +03:00
|
|
|
return sDeathStacks[i].area;
|
2003-01-27 06:17:36 +03:00
|
|
|
}
|
2004-03-17 18:29:45 +03:00
|
|
|
sDeathStacks[i].in_use = false;
|
2003-01-27 06:17:36 +03:00
|
|
|
}
|
|
|
|
}
|
2005-03-09 04:59:44 +03:00
|
|
|
sDeathStackSem = create_sem(sNumDeathStacks, "death stack availability");
|
2003-01-27 06:17:36 +03:00
|
|
|
|
|
|
|
// set up some debugger commands
|
2008-01-26 21:45:35 +03:00
|
|
|
add_debugger_command_etc("threads", &dump_thread_list, "List all threads",
|
2008-01-27 01:48:05 +03:00
|
|
|
"[ <team> ]\n"
|
|
|
|
"Prints a list of all existing threads, or, if a team ID is given,\n"
|
|
|
|
"all threads of the specified team.\n"
|
|
|
|
" <team> - The ID of the team whose threads shall be listed.\n", 0);
|
2008-01-26 21:45:35 +03:00
|
|
|
add_debugger_command_etc("ready", &dump_thread_list,
|
|
|
|
"List all ready threads",
|
|
|
|
"\n"
|
|
|
|
"Prints a list of all threads in ready state.\n", 0);
|
|
|
|
add_debugger_command_etc("running", &dump_thread_list,
|
|
|
|
"List all running threads",
|
|
|
|
"\n"
|
|
|
|
"Prints a list of all threads in running state.\n", 0);
|
|
|
|
add_debugger_command_etc("waiting", &dump_thread_list,
|
|
|
|
"List all waiting threads (optionally for a specific semaphore)",
|
|
|
|
"[ <sem> ]\n"
|
|
|
|
"Prints a list of all threads in waiting state. If a semaphore is\n"
|
|
|
|
"specified, only the threads waiting on that semaphore are listed.\n"
|
|
|
|
" <sem> - ID of the semaphore.\n", 0);
|
|
|
|
add_debugger_command_etc("realtime", &dump_thread_list,
|
|
|
|
"List all realtime threads",
|
|
|
|
"\n"
|
|
|
|
"Prints a list of all threads with realtime priority.\n", 0);
|
|
|
|
add_debugger_command_etc("thread", &dump_thread_info,
|
|
|
|
"Dump info about a particular thread",
|
|
|
|
"[ <id> | <address> | <name> ]\n"
|
|
|
|
"Prints information about the specified thread. If no argument is\n"
|
|
|
|
"given the current thread is selected.\n"
|
|
|
|
" <id> - The ID of the thread.\n"
|
|
|
|
" <address> - The address of the thread structure.\n"
|
|
|
|
" <name> - The thread's name.\n", 0);
|
|
|
|
add_debugger_command_etc("unreal", &make_thread_unreal,
|
|
|
|
"Set realtime priority threads to normal priority",
|
|
|
|
"[ <id> ]\n"
|
|
|
|
"Sets the priority of all realtime threads or, if given, the one\n"
|
|
|
|
"with the specified ID to \"normal\" priority.\n"
|
|
|
|
" <id> - The ID of the thread.\n", 0);
|
|
|
|
add_debugger_command_etc("suspend", &make_thread_suspended,
|
|
|
|
"Suspend a thread",
|
|
|
|
"[ <id> ]\n"
|
|
|
|
"Suspends the thread with the given ID. If no ID argument is given\n"
|
|
|
|
"the current thread is selected.\n"
|
|
|
|
" <id> - The ID of the thread.\n", 0);
|
|
|
|
add_debugger_command_etc("resume", &make_thread_resumed, "Resume a thread",
|
|
|
|
"<id>\n"
|
|
|
|
"Resumes the specified thread, if it is currently suspended.\n"
|
|
|
|
" <id> - The ID of the thread.\n", 0);
|
|
|
|
add_debugger_command_etc("drop", &drop_into_debugger,
|
|
|
|
"Drop a thread into the userland debugger",
|
|
|
|
"<id>\n"
|
|
|
|
"Drops the specified (userland) thread into the userland debugger\n"
|
|
|
|
"after leaving the kernel debugger.\n"
|
|
|
|
" <id> - The ID of the thread.\n", 0);
|
|
|
|
add_debugger_command_etc("priority", &set_thread_prio,
|
|
|
|
"Set a thread's priority",
|
|
|
|
"<priority> [ <id> ]\n"
|
|
|
|
"Sets the priority of the thread with the specified ID to the given\n"
|
|
|
|
"priority. If no thread ID is given, the current thread is selected.\n"
|
|
|
|
" <priority> - The thread's new priority (0 - 120)\n"
|
|
|
|
" <id> - The ID of the thread.\n", 0);
|
2003-01-27 06:17:36 +03:00
|
|
|
|
2004-03-17 18:29:45 +03:00
|
|
|
return B_OK;
|
2003-01-27 06:17:36 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2004-03-17 18:29:45 +03:00
|
|
|
status_t
|
2007-02-19 03:11:24 +03:00
|
|
|
thread_preboot_init_percpu(struct kernel_args *args, int32 cpuNum)
|
2003-01-27 06:17:36 +03:00
|
|
|
{
|
2007-02-19 03:11:24 +03:00
|
|
|
// set up the cpu pointer in the not yet initialized per-cpu idle thread
|
|
|
|
// so that get_current_cpu and friends will work, which is crucial for
|
|
|
|
// a lot of low level routines
|
|
|
|
sIdleThreads[cpuNum].cpu = &gCPU[cpuNum];
|
|
|
|
arch_thread_set_current_thread(&sIdleThreads[cpuNum]);
|
2004-03-17 18:29:45 +03:00
|
|
|
return B_OK;
|
2003-01-27 06:17:36 +03:00
|
|
|
}
|
|
|
|
|
2006-03-08 19:41:03 +03:00
|
|
|
// #pragma mark - public kernel API
|
2003-01-27 06:17:36 +03:00
|
|
|
|
|
|
|
|
2004-06-11 05:45:33 +04:00
|
|
|
void
|
|
|
|
exit_thread(status_t returnValue)
|
|
|
|
{
|
|
|
|
struct thread *thread = thread_get_current_thread();
|
|
|
|
|
2004-10-13 18:52:52 +04:00
|
|
|
thread->exit.status = returnValue;
|
|
|
|
thread->exit.reason = THREAD_RETURN_EXIT;
|
2004-06-11 05:45:33 +04:00
|
|
|
|
2005-10-06 12:43:10 +04:00
|
|
|
// if called from a kernel thread, we don't deliver the signal,
|
|
|
|
// we just exit directly to keep the user space behaviour of
|
|
|
|
// this function
|
|
|
|
if (thread->team != team_get_kernel_team())
|
|
|
|
send_signal_etc(thread->id, SIGKILLTHR, B_DO_NOT_RESCHEDULE);
|
|
|
|
else
|
|
|
|
thread_exit();
|
2004-06-11 05:45:33 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2004-03-03 04:00:16 +03:00
|
|
|
status_t
|
|
|
|
kill_thread(thread_id id)
|
|
|
|
{
|
2006-08-29 05:36:54 +04:00
|
|
|
if (id <= 0)
|
|
|
|
return B_BAD_VALUE;
|
|
|
|
|
2006-03-14 04:54:49 +03:00
|
|
|
return send_signal(id, SIGKILLTHR);
|
2004-03-03 04:00:16 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2002-08-19 12:28:39 +04:00
|
|
|
status_t
|
2004-02-23 09:29:59 +03:00
|
|
|
send_data(thread_id thread, int32 code, const void *buffer, size_t bufferSize)
|
2002-08-19 12:28:39 +04:00
|
|
|
{
|
2004-02-23 09:29:59 +03:00
|
|
|
return send_data_etc(thread, code, buffer, bufferSize, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2005-06-12 15:14:37 +04:00
|
|
|
int32
|
2004-02-23 09:29:59 +03:00
|
|
|
receive_data(thread_id *sender, void *buffer, size_t bufferSize)
|
|
|
|
{
|
|
|
|
return receive_data_etc(sender, buffer, bufferSize, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2002-08-19 12:28:39 +04:00
|
|
|
bool
|
|
|
|
has_data(thread_id thread)
|
|
|
|
{
|
|
|
|
int32 count;
|
2002-10-05 23:38:42 +04:00
|
|
|
|
2007-06-01 02:05:57 +04:00
|
|
|
if (get_sem_count(thread_get_current_thread()->msg.read_sem,
|
|
|
|
&count) != B_OK)
|
2002-08-19 12:28:39 +04:00
|
|
|
return false;
|
2003-01-27 06:17:36 +03:00
|
|
|
|
2002-09-24 04:08:50 +04:00
|
|
|
return count == 0 ? false : true;
|
2002-08-19 12:28:39 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2002-08-04 06:04:37 +04:00
|
|
|
status_t
|
|
|
|
_get_thread_info(thread_id id, thread_info *info, size_t size)
|
|
|
|
{
|
2004-02-23 06:46:42 +03:00
|
|
|
status_t status = B_OK;
|
|
|
|
struct thread *thread;
|
2002-12-04 19:40:07 +03:00
|
|
|
cpu_status state;
|
2004-02-23 06:46:42 +03:00
|
|
|
|
|
|
|
if (info == NULL || size != sizeof(thread_info) || id < B_OK)
|
|
|
|
return B_BAD_VALUE;
|
2002-10-05 23:38:42 +04:00
|
|
|
|
2002-08-04 06:04:37 +04:00
|
|
|
state = disable_interrupts();
|
|
|
|
GRAB_THREAD_LOCK();
|
2002-10-05 23:38:42 +04:00
|
|
|
|
2004-02-23 06:46:42 +03:00
|
|
|
thread = thread_get_thread_struct_locked(id);
|
|
|
|
if (thread == NULL) {
|
|
|
|
status = B_BAD_VALUE;
|
2002-08-04 06:04:37 +04:00
|
|
|
goto err;
|
|
|
|
}
|
2004-02-23 06:46:42 +03:00
|
|
|
|
|
|
|
fill_thread_info(thread, info, size);
|
2002-10-05 23:38:42 +04:00
|
|
|
|
2002-08-04 06:04:37 +04:00
|
|
|
err:
|
|
|
|
RELEASE_THREAD_LOCK();
|
|
|
|
restore_interrupts(state);
|
|
|
|
|
2004-02-23 06:46:42 +03:00
|
|
|
return status;
|
2002-08-04 06:04:37 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
status_t
|
2007-06-01 02:05:57 +04:00
|
|
|
_get_next_thread_info(team_id team, int32 *_cookie, thread_info *info,
|
|
|
|
size_t size)
|
2002-08-04 06:04:37 +04:00
|
|
|
{
|
2004-02-23 06:46:42 +03:00
|
|
|
status_t status = B_BAD_VALUE;
|
|
|
|
struct thread *thread = NULL;
|
2002-12-04 19:40:07 +03:00
|
|
|
cpu_status state;
|
2002-08-04 06:04:37 +04:00
|
|
|
int slot;
|
2005-03-09 05:05:18 +03:00
|
|
|
thread_id lastThreadID;
|
2002-10-05 23:38:42 +04:00
|
|
|
|
2004-02-23 06:46:42 +03:00
|
|
|
if (info == NULL || size != sizeof(thread_info) || team < B_OK)
|
|
|
|
return B_BAD_VALUE;
|
|
|
|
|
|
|
|
if (team == B_CURRENT_TEAM)
|
|
|
|
team = team_get_current_team_id();
|
|
|
|
else if (!team_is_valid(team))
|
2002-08-04 06:04:37 +04:00
|
|
|
return B_BAD_VALUE;
|
2002-10-05 23:38:42 +04:00
|
|
|
|
2004-02-23 06:46:42 +03:00
|
|
|
slot = *_cookie;
|
|
|
|
|
2002-08-04 06:04:37 +04:00
|
|
|
state = disable_interrupts();
|
|
|
|
GRAB_THREAD_LOCK();
|
2002-10-05 23:38:42 +04:00
|
|
|
|
2005-03-09 05:05:18 +03:00
|
|
|
lastThreadID = peek_next_thread_id();
|
|
|
|
if (slot >= lastThreadID)
|
2004-02-23 06:46:42 +03:00
|
|
|
goto err;
|
2002-10-05 23:38:42 +04:00
|
|
|
|
2005-03-09 05:05:18 +03:00
|
|
|
while (slot < lastThreadID
|
2007-06-01 02:05:57 +04:00
|
|
|
&& (!(thread = thread_get_thread_struct_locked(slot))
|
|
|
|
|| thread->team->id != team))
|
2002-08-04 06:04:37 +04:00
|
|
|
slot++;
|
2002-10-05 23:38:42 +04:00
|
|
|
|
2004-02-23 06:46:42 +03:00
|
|
|
if (thread != NULL && thread->team->id == team) {
|
|
|
|
fill_thread_info(thread, info, size);
|
|
|
|
|
|
|
|
*_cookie = slot + 1;
|
|
|
|
status = B_OK;
|
2002-08-04 06:04:37 +04:00
|
|
|
}
|
2004-02-23 06:46:42 +03:00
|
|
|
|
2002-08-04 06:04:37 +04:00
|
|
|
err:
|
|
|
|
RELEASE_THREAD_LOCK();
|
|
|
|
restore_interrupts(state);
|
2002-10-05 23:38:42 +04:00
|
|
|
|
2004-02-23 06:46:42 +03:00
|
|
|
return status;
|
2002-08-04 06:04:37 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2004-03-02 20:40:14 +03:00
|
|
|
thread_id
|
|
|
|
find_thread(const char *name)
|
|
|
|
{
|
2004-03-17 18:08:36 +03:00
|
|
|
struct hash_iterator iterator;
|
|
|
|
struct thread *thread;
|
|
|
|
cpu_status state;
|
|
|
|
|
2004-03-02 20:40:14 +03:00
|
|
|
if (name == NULL)
|
|
|
|
return thread_get_current_thread_id();
|
|
|
|
|
2004-03-17 18:08:36 +03:00
|
|
|
state = disable_interrupts();
|
|
|
|
GRAB_THREAD_LOCK();
|
|
|
|
|
|
|
|
// ToDo: this might not be in the same order as find_thread() in BeOS
|
2004-03-17 18:29:45 +03:00
|
|
|
// which could be theoretically problematic.
|
2004-03-17 18:08:36 +03:00
|
|
|
// ToDo: scanning the whole list with the thread lock held isn't exactly
|
|
|
|
// cheap either - although this function is probably used very rarely.
|
|
|
|
|
2004-03-17 18:29:45 +03:00
|
|
|
hash_open(sThreadHash, &iterator);
|
2007-08-27 00:37:54 +04:00
|
|
|
while ((thread = (struct thread*)hash_next(sThreadHash, &iterator))
|
|
|
|
!= NULL) {
|
2004-03-17 18:08:36 +03:00
|
|
|
// Search through hash
|
|
|
|
if (thread->name != NULL && !strcmp(thread->name, name)) {
|
|
|
|
thread_id id = thread->id;
|
|
|
|
|
|
|
|
RELEASE_THREAD_LOCK();
|
|
|
|
restore_interrupts(state);
|
|
|
|
return id;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
RELEASE_THREAD_LOCK();
|
|
|
|
restore_interrupts(state);
|
2004-03-02 20:40:14 +03:00
|
|
|
|
2006-04-16 21:53:10 +04:00
|
|
|
return B_NAME_NOT_FOUND;
|
2004-03-02 20:40:14 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2004-04-07 01:38:45 +04:00
|
|
|
status_t
|
|
|
|
rename_thread(thread_id id, const char *name)
|
|
|
|
{
|
2005-05-29 04:29:45 +04:00
|
|
|
struct thread *thread = thread_get_current_thread();
|
|
|
|
status_t status = B_BAD_THREAD_ID;
|
|
|
|
cpu_status state;
|
2004-04-07 01:38:45 +04:00
|
|
|
|
|
|
|
if (name == NULL)
|
|
|
|
return B_BAD_VALUE;
|
|
|
|
|
2005-05-29 04:29:45 +04:00
|
|
|
state = disable_interrupts();
|
|
|
|
GRAB_THREAD_LOCK();
|
2004-04-07 01:38:45 +04:00
|
|
|
|
2005-05-29 04:29:45 +04:00
|
|
|
if (thread->id != id)
|
2004-04-07 01:38:45 +04:00
|
|
|
thread = thread_get_thread_struct_locked(id);
|
|
|
|
|
2005-05-29 04:29:45 +04:00
|
|
|
if (thread != NULL) {
|
2007-03-25 16:42:30 +04:00
|
|
|
if (thread->team == thread_get_current_thread()->team) {
|
|
|
|
strlcpy(thread->name, name, B_OS_NAME_LENGTH);
|
|
|
|
status = B_OK;
|
|
|
|
} else
|
|
|
|
status = B_NOT_ALLOWED;
|
2004-04-07 01:38:45 +04:00
|
|
|
}
|
|
|
|
|
2005-05-29 04:29:45 +04:00
|
|
|
RELEASE_THREAD_LOCK();
|
|
|
|
restore_interrupts(state);
|
|
|
|
|
2004-04-07 01:38:45 +04:00
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2003-01-27 06:17:36 +03:00
|
|
|
status_t
|
|
|
|
set_thread_priority(thread_id id, int32 priority)
|
|
|
|
{
|
|
|
|
struct thread *thread;
|
|
|
|
int32 oldPriority;
|
|
|
|
|
|
|
|
// make sure the passed in priority is within bounds
|
|
|
|
if (priority > B_MAX_PRIORITY)
|
|
|
|
priority = B_MAX_PRIORITY;
|
|
|
|
if (priority < B_MIN_PRIORITY)
|
|
|
|
priority = B_MIN_PRIORITY;
|
|
|
|
|
|
|
|
thread = thread_get_current_thread();
|
|
|
|
if (thread->id == id) {
|
|
|
|
// it's ourself, so we know we aren't in the run queue, and we can manipulate
|
|
|
|
// our structure directly
|
|
|
|
oldPriority = thread->priority;
|
|
|
|
// note that this might not return the correct value if we are preempted
|
|
|
|
// here, and another thread changes our priority before the next line is
|
|
|
|
// executed
|
2006-01-31 04:58:49 +03:00
|
|
|
thread->priority = thread->next_priority = priority;
|
2003-01-27 06:17:36 +03:00
|
|
|
} else {
|
|
|
|
cpu_status state = disable_interrupts();
|
|
|
|
GRAB_THREAD_LOCK();
|
|
|
|
|
|
|
|
thread = thread_get_thread_struct_locked(id);
|
|
|
|
if (thread) {
|
|
|
|
oldPriority = thread->priority;
|
2006-01-31 04:58:49 +03:00
|
|
|
thread->next_priority = priority;
|
2003-01-27 06:17:36 +03:00
|
|
|
if (thread->state == B_THREAD_READY && thread->priority != priority) {
|
|
|
|
// if the thread is in the run queue, we reinsert it at a new position
|
|
|
|
scheduler_remove_from_run_queue(thread);
|
|
|
|
thread->priority = priority;
|
|
|
|
scheduler_enqueue_in_run_queue(thread);
|
|
|
|
} else
|
|
|
|
thread->priority = priority;
|
|
|
|
} else
|
|
|
|
oldPriority = B_BAD_THREAD_ID;
|
|
|
|
|
|
|
|
RELEASE_THREAD_LOCK();
|
|
|
|
restore_interrupts(state);
|
|
|
|
}
|
|
|
|
|
|
|
|
return oldPriority;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
status_t
|
|
|
|
snooze_etc(bigtime_t timeout, int timebase, uint32 flags)
|
|
|
|
{
|
|
|
|
status_t status;
|
|
|
|
|
|
|
|
if (timebase != B_SYSTEM_TIMEBASE)
|
|
|
|
return B_BAD_VALUE;
|
|
|
|
|
2004-11-08 16:52:11 +03:00
|
|
|
status = acquire_sem_etc(sSnoozeSem, 1, flags, timeout);
|
2003-01-27 06:17:36 +03:00
|
|
|
if (status == B_TIMED_OUT || status == B_WOULD_BLOCK)
|
|
|
|
return B_OK;
|
|
|
|
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-06-01 02:05:57 +04:00
|
|
|
/*! snooze() for internal kernel use only; doesn't interrupt on signals. */
|
2003-01-27 06:17:36 +03:00
|
|
|
status_t
|
|
|
|
snooze(bigtime_t timeout)
|
|
|
|
{
|
|
|
|
return snooze_etc(timeout, B_SYSTEM_TIMEBASE, B_RELATIVE_TIMEOUT);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-06-01 02:05:57 +04:00
|
|
|
/*!
|
|
|
|
snooze_until() for internal kernel use only; doesn't interrupt on
|
|
|
|
signals.
|
|
|
|
*/
|
2003-01-27 06:17:36 +03:00
|
|
|
status_t
|
|
|
|
snooze_until(bigtime_t timeout, int timebase)
|
|
|
|
{
|
|
|
|
return snooze_etc(timeout, timebase, B_ABSOLUTE_TIMEOUT);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
status_t
|
2006-03-08 19:41:03 +03:00
|
|
|
wait_for_thread(thread_id thread, status_t *_returnCode)
|
2003-01-27 06:17:36 +03:00
|
|
|
{
|
2006-03-08 19:41:03 +03:00
|
|
|
return wait_for_thread_etc(thread, 0, 0, _returnCode);
|
2003-01-27 06:17:36 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
status_t
|
|
|
|
suspend_thread(thread_id id)
|
|
|
|
{
|
2006-08-29 05:36:54 +04:00
|
|
|
if (id <= 0)
|
|
|
|
return B_BAD_VALUE;
|
|
|
|
|
2004-10-13 17:10:27 +04:00
|
|
|
return send_signal(id, SIGSTOP);
|
2003-01-27 06:17:36 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
status_t
|
|
|
|
resume_thread(thread_id id)
|
|
|
|
{
|
2006-08-29 05:36:54 +04:00
|
|
|
if (id <= 0)
|
|
|
|
return B_BAD_VALUE;
|
|
|
|
|
2008-02-21 16:19:54 +03:00
|
|
|
return send_signal_etc(id, SIGCONT, SIGNAL_FLAG_DONT_RESTART_SYSCALL);
|
|
|
|
// This retains compatibility to BeOS which documents the
|
|
|
|
// combination of suspend_thread() and resume_thread() to
|
|
|
|
// interrupt threads waiting on semaphores.
|
2003-01-27 06:17:36 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
thread_id
|
2005-03-08 21:16:16 +03:00
|
|
|
spawn_kernel_thread(thread_func function, const char *name, int32 priority,
|
|
|
|
void *arg)
|
2003-01-27 06:17:36 +03:00
|
|
|
{
|
2005-03-08 21:16:16 +03:00
|
|
|
return create_thread(name, team_get_kernel_team()->id,
|
|
|
|
(thread_entry_func)function, arg, NULL, priority, true, -1);
|
2003-01-27 06:17:36 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-02-19 18:57:58 +03:00
|
|
|
/* TODO: split this; have kernel version set kerrno */
|
2002-10-05 23:38:42 +04:00
|
|
|
int
|
|
|
|
getrlimit(int resource, struct rlimit * rlp)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2002-10-05 23:38:42 +04:00
|
|
|
if (!rlp)
|
2007-02-19 18:57:58 +03:00
|
|
|
return B_BAD_ADDRESS;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2002-10-05 23:38:42 +04:00
|
|
|
switch (resource) {
|
|
|
|
case RLIMIT_NOFILE:
|
2007-02-19 18:48:02 +03:00
|
|
|
case RLIMIT_NOVMON:
|
2002-10-05 23:38:42 +04:00
|
|
|
return vfs_getrlimit(resource, rlp);
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2002-10-05 23:38:42 +04:00
|
|
|
default:
|
2007-02-19 18:57:58 +03:00
|
|
|
return EINVAL;
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
2002-10-05 23:38:42 +04:00
|
|
|
return 0;
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
2002-10-05 23:38:42 +04:00
|
|
|
|
2007-02-19 18:57:58 +03:00
|
|
|
/* TODO: split this; have kernel version set kerrno */
|
2002-10-05 23:38:42 +04:00
|
|
|
int
|
|
|
|
setrlimit(int resource, const struct rlimit * rlp)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2002-10-05 23:38:42 +04:00
|
|
|
if (!rlp)
|
2007-02-19 18:57:58 +03:00
|
|
|
return B_BAD_ADDRESS;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2002-10-05 23:38:42 +04:00
|
|
|
switch (resource) {
|
2002-07-09 16:24:59 +04:00
|
|
|
case RLIMIT_NOFILE:
|
2007-02-19 18:48:02 +03:00
|
|
|
case RLIMIT_NOVMON:
|
2002-10-05 23:38:42 +04:00
|
|
|
return vfs_setrlimit(resource, rlp);
|
2002-07-09 16:24:59 +04:00
|
|
|
|
|
|
|
default:
|
2007-02-19 18:57:58 +03:00
|
|
|
return EINVAL;
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2002-10-05 23:38:42 +04:00
|
|
|
|
2006-03-08 19:41:03 +03:00
|
|
|
// #pragma mark - syscalls
|
2003-01-27 06:17:36 +03:00
|
|
|
|
2002-11-18 05:04:57 +03:00
|
|
|
|
2003-01-27 06:17:36 +03:00
|
|
|
void
|
2004-06-11 05:45:33 +04:00
|
|
|
_user_exit_thread(status_t returnValue)
|
2002-12-07 03:25:50 +03:00
|
|
|
{
|
2004-06-11 05:45:33 +04:00
|
|
|
exit_thread(returnValue);
|
2002-12-07 03:25:50 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2003-01-27 06:17:36 +03:00
|
|
|
status_t
|
2004-03-03 04:00:16 +03:00
|
|
|
_user_kill_thread(thread_id thread)
|
|
|
|
{
|
|
|
|
return kill_thread(thread);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
status_t
|
|
|
|
_user_resume_thread(thread_id thread)
|
2003-01-27 06:17:36 +03:00
|
|
|
{
|
|
|
|
return resume_thread(thread);
|
|
|
|
}
|
|
|
|
|
2002-12-07 03:25:50 +03:00
|
|
|
|
2003-01-27 06:17:36 +03:00
|
|
|
status_t
|
2004-03-03 04:00:16 +03:00
|
|
|
_user_suspend_thread(thread_id thread)
|
2003-01-27 06:17:36 +03:00
|
|
|
{
|
|
|
|
return suspend_thread(thread);
|
|
|
|
}
|
2002-12-07 03:25:50 +03:00
|
|
|
|
2002-11-18 05:04:57 +03:00
|
|
|
|
2004-04-07 01:38:45 +04:00
|
|
|
status_t
|
|
|
|
_user_rename_thread(thread_id thread, const char *userName)
|
|
|
|
{
|
|
|
|
char name[B_OS_NAME_LENGTH];
|
|
|
|
|
|
|
|
if (!IS_USER_ADDRESS(userName)
|
|
|
|
|| userName == NULL
|
|
|
|
|| user_strlcpy(name, userName, B_OS_NAME_LENGTH) < B_OK)
|
|
|
|
return B_BAD_ADDRESS;
|
|
|
|
|
|
|
|
return rename_thread(thread, name);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2003-01-27 06:17:36 +03:00
|
|
|
int32
|
2004-03-03 04:00:16 +03:00
|
|
|
_user_set_thread_priority(thread_id thread, int32 newPriority)
|
2002-11-18 05:04:57 +03:00
|
|
|
{
|
2003-01-27 06:17:36 +03:00
|
|
|
return set_thread_priority(thread, newPriority);
|
2002-11-18 05:04:57 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
thread_id
|
2007-06-01 02:05:57 +04:00
|
|
|
_user_spawn_thread(int32 (*entry)(thread_func, void *), const char *userName,
|
|
|
|
int32 priority, void *data1, void *data2)
|
2002-11-18 05:04:57 +03:00
|
|
|
{
|
2004-02-23 06:46:42 +03:00
|
|
|
char name[B_OS_NAME_LENGTH];
|
2005-02-24 19:14:18 +03:00
|
|
|
thread_id threadID;
|
2002-11-18 05:04:57 +03:00
|
|
|
|
2004-02-22 17:52:59 +03:00
|
|
|
if (!IS_USER_ADDRESS(entry) || entry == NULL
|
2005-05-29 04:20:18 +04:00
|
|
|
|| (userName != NULL && (!IS_USER_ADDRESS(userName)
|
|
|
|
|| user_strlcpy(name, userName, B_OS_NAME_LENGTH) < B_OK)))
|
2003-01-27 06:17:36 +03:00
|
|
|
return B_BAD_ADDRESS;
|
2002-11-18 05:04:57 +03:00
|
|
|
|
2005-05-29 04:20:18 +04:00
|
|
|
threadID = create_thread(userName != NULL ? name : "user thread",
|
|
|
|
thread_get_current_thread()->team->id, entry,
|
2005-03-08 21:16:16 +03:00
|
|
|
data1, data2, priority, false, -1);
|
2005-02-24 19:14:18 +03:00
|
|
|
|
|
|
|
user_debug_thread_created(threadID);
|
|
|
|
|
|
|
|
return threadID;
|
2003-01-27 06:17:36 +03:00
|
|
|
}
|
2002-11-18 05:04:57 +03:00
|
|
|
|
|
|
|
|
2003-01-27 06:17:36 +03:00
|
|
|
status_t
|
2004-03-03 04:00:16 +03:00
|
|
|
_user_snooze_etc(bigtime_t timeout, int timebase, uint32 flags)
|
2003-01-27 06:17:36 +03:00
|
|
|
{
|
axeld + bonefish:
* Implemented automatic syscall restarts:
- A syscall can indicate that it has been interrupted and can be
restarted by setting a respective bit in thread::flags. It can
store parameters it wants to be preserved for the restart in
thread::syscall_restart::parameters. Another thread::flags bit
indicates whether it has been restarted.
- handle_signals() clears the restart flag, if the handled signal
has a handler function installed and SA_RESTART is not set. Another
thread flag (THREAD_FLAGS_DONT_RESTART_SYSCALL) can prevent syscalls
from being restarted, even if they could be (not used yet, but we
might want to use it in resume_thread(), so that we stay
behaviorally compatible with BeOS).
- The architecture specific syscall handler restarts the syscall, if
the restart flag is set. Implemented for x86 only.
- Added some support functions in the private <syscall_restart.h> to
simplify the syscall restart code in the syscalls.
- Adjusted all syscalls that can potentially be restarted accordingly.
- _user_ioctl() sets new thread flag THREAD_FLAGS_IOCTL_SYSCALL while
calling the underlying FS's/driver's hook, so that syscall restarts
can also be supported there.
* thread_at_kernel_exit() invokes handle_signals() in a loop now, as
long as the latter indicates that the thread shall be suspended, so
that after waking up signals received in the meantime will be handled
before the thread returns to userland. Adjusted handle_signals()
accordingly -- when encountering a suspending signal we don't check
for further signals.
* Fixed sigsuspend(): Suspending the thread and rescheduling doesn't
result in the correct behavior. Instead we employ a temporary
condition variable and interruptably wait on it. The POSIX test
suite test passes, now.
* Made the switch_sem[_etc]() behavior on interruption consistent.
Depending on when the signal arrived (before the call or when already
waiting) the first semaphore would or wouldn't be released. Now we
consistently release it.
* Refactored _user_{read,write}[v]() syscalls. Use a common function for
either pair. The iovec version doesn't fail anymore, if anything could
be read/written at all. It also checks whether a complete vector
could be read/written, so that we won't skip data, if the underlying
FS/driver couldn't read/write more ATM.
* Some refactoring in the x86 syscall handler: The int 99 and sysenter
handlers use a common subroutine to avoid code duplication.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@23983 a95241bf-73f2-0310-859d-f6bbb57e9c96
2008-02-17 18:48:30 +03:00
|
|
|
// NOTE: We only know the system timebase at the moment.
|
|
|
|
syscall_restart_handle_timeout_pre(flags, timeout);
|
|
|
|
|
|
|
|
status_t error = snooze_etc(timeout, timebase, flags | B_CAN_INTERRUPT);
|
|
|
|
|
|
|
|
return syscall_restart_handle_timeout_post(error, timeout);
|
2003-01-27 06:17:36 +03:00
|
|
|
}
|
2002-10-05 23:38:42 +04:00
|
|
|
|
|
|
|
|
2006-01-31 05:29:02 +03:00
|
|
|
void
|
|
|
|
_user_thread_yield(void)
|
|
|
|
{
|
2008-02-07 14:40:31 +03:00
|
|
|
thread_yield(true);
|
2006-01-31 05:29:02 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2003-01-27 06:17:36 +03:00
|
|
|
status_t
|
2004-03-03 04:00:16 +03:00
|
|
|
_user_get_thread_info(thread_id id, thread_info *userInfo)
|
2002-10-05 23:38:42 +04:00
|
|
|
{
|
2003-01-27 06:17:36 +03:00
|
|
|
thread_info info;
|
|
|
|
status_t status;
|
2002-10-05 23:38:42 +04:00
|
|
|
|
2004-02-22 17:52:59 +03:00
|
|
|
if (!IS_USER_ADDRESS(userInfo))
|
2003-01-27 06:17:36 +03:00
|
|
|
return B_BAD_ADDRESS;
|
2002-10-05 23:38:42 +04:00
|
|
|
|
2003-01-27 06:17:36 +03:00
|
|
|
status = _get_thread_info(id, &info, sizeof(thread_info));
|
|
|
|
|
2007-06-01 02:05:57 +04:00
|
|
|
if (status >= B_OK
|
|
|
|
&& user_memcpy(userInfo, &info, sizeof(thread_info)) < B_OK)
|
2003-01-27 06:17:36 +03:00
|
|
|
return B_BAD_ADDRESS;
|
2002-10-05 23:38:42 +04:00
|
|
|
|
2003-01-27 06:17:36 +03:00
|
|
|
return status;
|
2002-10-05 23:38:42 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
status_t
|
2007-06-01 02:05:57 +04:00
|
|
|
_user_get_next_thread_info(team_id team, int32 *userCookie,
|
|
|
|
thread_info *userInfo)
|
2002-10-05 23:38:42 +04:00
|
|
|
{
|
2003-01-27 06:17:36 +03:00
|
|
|
status_t status;
|
|
|
|
thread_info info;
|
|
|
|
int32 cookie;
|
|
|
|
|
2004-02-22 17:52:59 +03:00
|
|
|
if (!IS_USER_ADDRESS(userCookie) || !IS_USER_ADDRESS(userInfo)
|
2003-01-27 06:17:36 +03:00
|
|
|
|| user_memcpy(&cookie, userCookie, sizeof(int32)) < B_OK)
|
|
|
|
return B_BAD_ADDRESS;
|
|
|
|
|
|
|
|
status = _get_next_thread_info(team, &cookie, &info, sizeof(thread_info));
|
|
|
|
if (status < B_OK)
|
|
|
|
return status;
|
|
|
|
|
|
|
|
if (user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK
|
|
|
|
|| user_memcpy(userInfo, &info, sizeof(thread_info)) < B_OK)
|
|
|
|
return B_BAD_ADDRESS;
|
|
|
|
|
|
|
|
return status;
|
2002-10-05 23:38:42 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2004-03-02 20:40:14 +03:00
|
|
|
thread_id
|
|
|
|
_user_find_thread(const char *userName)
|
|
|
|
{
|
|
|
|
char name[B_OS_NAME_LENGTH];
|
|
|
|
|
2004-03-09 18:45:34 +03:00
|
|
|
if (userName == NULL)
|
|
|
|
return find_thread(NULL);
|
|
|
|
|
2004-03-02 20:40:14 +03:00
|
|
|
if (!IS_USER_ADDRESS(userName)
|
|
|
|
|| user_strlcpy(name, userName, sizeof(name)) < B_OK)
|
|
|
|
return B_BAD_ADDRESS;
|
|
|
|
|
|
|
|
return find_thread(name);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2002-10-05 23:38:42 +04:00
|
|
|
status_t
|
2004-03-03 04:00:16 +03:00
|
|
|
_user_wait_for_thread(thread_id id, status_t *userReturnCode)
|
2002-10-05 23:38:42 +04:00
|
|
|
{
|
2003-01-27 06:17:36 +03:00
|
|
|
status_t returnCode;
|
|
|
|
status_t status;
|
2002-10-05 23:38:42 +04:00
|
|
|
|
2006-01-15 16:26:18 +03:00
|
|
|
if (userReturnCode != NULL && !IS_USER_ADDRESS(userReturnCode))
|
2003-01-27 06:17:36 +03:00
|
|
|
return B_BAD_ADDRESS;
|
2002-10-05 23:38:42 +04:00
|
|
|
|
2006-03-08 19:41:03 +03:00
|
|
|
status = wait_for_thread_etc(id, B_CAN_INTERRUPT, 0, &returnCode);
|
2002-10-05 23:38:42 +04:00
|
|
|
|
2004-10-13 17:10:27 +04:00
|
|
|
if (status == B_OK && userReturnCode != NULL
|
axeld + bonefish:
* Implemented automatic syscall restarts:
- A syscall can indicate that it has been interrupted and can be
restarted by setting a respective bit in thread::flags. It can
store parameters it wants to be preserved for the restart in
thread::syscall_restart::parameters. Another thread::flags bit
indicates whether it has been restarted.
- handle_signals() clears the restart flag, if the handled signal
has a handler function installed and SA_RESTART is not set. Another
thread flag (THREAD_FLAGS_DONT_RESTART_SYSCALL) can prevent syscalls
from being restarted, even if they could be (not used yet, but we
might want to use it in resume_thread(), so that we stay
behaviorally compatible with BeOS).
- The architecture specific syscall handler restarts the syscall, if
the restart flag is set. Implemented for x86 only.
- Added some support functions in the private <syscall_restart.h> to
simplify the syscall restart code in the syscalls.
- Adjusted all syscalls that can potentially be restarted accordingly.
- _user_ioctl() sets new thread flag THREAD_FLAGS_IOCTL_SYSCALL while
calling the underlying FS's/driver's hook, so that syscall restarts
can also be supported there.
* thread_at_kernel_exit() invokes handle_signals() in a loop now, as
long as the latter indicates that the thread shall be suspended, so
that after waking up signals received in the meantime will be handled
before the thread returns to userland. Adjusted handle_signals()
accordingly -- when encountering a suspending signal we don't check
for further signals.
* Fixed sigsuspend(): Suspending the thread and rescheduling doesn't
result in the correct behavior. Instead we employ a temporary
condition variable and interruptably wait on it. The POSIX test
suite test passes, now.
* Made the switch_sem[_etc]() behavior on interruption consistent.
Depending on when the signal arrived (before the call or when already
waiting) the first semaphore would or wouldn't be released. Now we
consistently release it.
* Refactored _user_{read,write}[v]() syscalls. Use a common function for
either pair. The iovec version doesn't fail anymore, if anything could
be read/written at all. It also checks whether a complete vector
could be read/written, so that we won't skip data, if the underlying
FS/driver couldn't read/write more ATM.
* Some refactoring in the x86 syscall handler: The int 99 and sysenter
handlers use a common subroutine to avoid code duplication.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@23983 a95241bf-73f2-0310-859d-f6bbb57e9c96
2008-02-17 18:48:30 +03:00
|
|
|
&& user_memcpy(userReturnCode, &returnCode, sizeof(status_t)) < B_OK) {
|
2003-01-27 06:17:36 +03:00
|
|
|
return B_BAD_ADDRESS;
|
axeld + bonefish:
* Implemented automatic syscall restarts:
- A syscall can indicate that it has been interrupted and can be
restarted by setting a respective bit in thread::flags. It can
store parameters it wants to be preserved for the restart in
thread::syscall_restart::parameters. Another thread::flags bit
indicates whether it has been restarted.
- handle_signals() clears the restart flag, if the handled signal
has a handler function installed and SA_RESTART is not set. Another
thread flag (THREAD_FLAGS_DONT_RESTART_SYSCALL) can prevent syscalls
from being restarted, even if they could be (not used yet, but we
might want to use it in resume_thread(), so that we stay
behaviorally compatible with BeOS).
- The architecture specific syscall handler restarts the syscall, if
the restart flag is set. Implemented for x86 only.
- Added some support functions in the private <syscall_restart.h> to
simplify the syscall restart code in the syscalls.
- Adjusted all syscalls that can potentially be restarted accordingly.
- _user_ioctl() sets new thread flag THREAD_FLAGS_IOCTL_SYSCALL while
calling the underlying FS's/driver's hook, so that syscall restarts
can also be supported there.
* thread_at_kernel_exit() invokes handle_signals() in a loop now, as
long as the latter indicates that the thread shall be suspended, so
that after waking up signals received in the meantime will be handled
before the thread returns to userland. Adjusted handle_signals()
accordingly -- when encountering a suspending signal we don't check
for further signals.
* Fixed sigsuspend(): Suspending the thread and rescheduling doesn't
result in the correct behavior. Instead we employ a temporary
condition variable and interruptably wait on it. The POSIX test
suite test passes, now.
* Made the switch_sem[_etc]() behavior on interruption consistent.
Depending on when the signal arrived (before the call or when already
waiting) the first semaphore would or wouldn't be released. Now we
consistently release it.
* Refactored _user_{read,write}[v]() syscalls. Use a common function for
either pair. The iovec version doesn't fail anymore, if anything could
be read/written at all. It also checks whether a complete vector
could be read/written, so that we won't skip data, if the underlying
FS/driver couldn't read/write more ATM.
* Some refactoring in the x86 syscall handler: The int 99 and sysenter
handlers use a common subroutine to avoid code duplication.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@23983 a95241bf-73f2-0310-859d-f6bbb57e9c96
2008-02-17 18:48:30 +03:00
|
|
|
}
|
2002-10-05 23:38:42 +04:00
|
|
|
|
axeld + bonefish:
* Implemented automatic syscall restarts:
- A syscall can indicate that it has been interrupted and can be
restarted by setting a respective bit in thread::flags. It can
store parameters it wants to be preserved for the restart in
thread::syscall_restart::parameters. Another thread::flags bit
indicates whether it has been restarted.
- handle_signals() clears the restart flag, if the handled signal
has a handler function installed and SA_RESTART is not set. Another
thread flag (THREAD_FLAGS_DONT_RESTART_SYSCALL) can prevent syscalls
from being restarted, even if they could be (not used yet, but we
might want to use it in resume_thread(), so that we stay
behaviorally compatible with BeOS).
- The architecture specific syscall handler restarts the syscall, if
the restart flag is set. Implemented for x86 only.
- Added some support functions in the private <syscall_restart.h> to
simplify the syscall restart code in the syscalls.
- Adjusted all syscalls that can potentially be restarted accordingly.
- _user_ioctl() sets new thread flag THREAD_FLAGS_IOCTL_SYSCALL while
calling the underlying FS's/driver's hook, so that syscall restarts
can also be supported there.
* thread_at_kernel_exit() invokes handle_signals() in a loop now, as
long as the latter indicates that the thread shall be suspended, so
that after waking up signals received in the meantime will be handled
before the thread returns to userland. Adjusted handle_signals()
accordingly -- when encountering a suspending signal we don't check
for further signals.
* Fixed sigsuspend(): Suspending the thread and rescheduling doesn't
result in the correct behavior. Instead we employ a temporary
condition variable and interruptably wait on it. The POSIX test
suite test passes, now.
* Made the switch_sem[_etc]() behavior on interruption consistent.
Depending on when the signal arrived (before the call or when already
waiting) the first semaphore would or wouldn't be released. Now we
consistently release it.
* Refactored _user_{read,write}[v]() syscalls. Use a common function for
either pair. The iovec version doesn't fail anymore, if anything could
be read/written at all. It also checks whether a complete vector
could be read/written, so that we won't skip data, if the underlying
FS/driver couldn't read/write more ATM.
* Some refactoring in the x86 syscall handler: The int 99 and sysenter
handlers use a common subroutine to avoid code duplication.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@23983 a95241bf-73f2-0310-859d-f6bbb57e9c96
2008-02-17 18:48:30 +03:00
|
|
|
return syscall_restart_handle_post(status);
|
2003-01-27 06:17:36 +03:00
|
|
|
}
|
2002-10-05 23:38:42 +04:00
|
|
|
|
2003-01-27 06:17:36 +03:00
|
|
|
|
|
|
|
bool
|
2004-03-03 04:00:16 +03:00
|
|
|
_user_has_data(thread_id thread)
|
2003-01-27 06:17:36 +03:00
|
|
|
{
|
|
|
|
return has_data(thread);
|
2002-10-05 23:38:42 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
status_t
|
2007-06-01 02:05:57 +04:00
|
|
|
_user_send_data(thread_id thread, int32 code, const void *buffer,
|
|
|
|
size_t bufferSize)
|
2002-10-05 23:38:42 +04:00
|
|
|
{
|
2004-02-22 17:52:59 +03:00
|
|
|
if (!IS_USER_ADDRESS(buffer))
|
2002-10-05 23:38:42 +04:00
|
|
|
return B_BAD_ADDRESS;
|
2003-01-27 06:17:36 +03:00
|
|
|
|
2007-06-01 02:05:57 +04:00
|
|
|
return send_data_etc(thread, code, buffer, bufferSize,
|
|
|
|
B_KILL_CAN_INTERRUPT);
|
2003-01-27 06:17:36 +03:00
|
|
|
// supports userland buffers
|
2002-10-05 23:38:42 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
status_t
|
2004-03-03 04:00:16 +03:00
|
|
|
_user_receive_data(thread_id *_userSender, void *buffer, size_t bufferSize)
|
2002-10-05 23:38:42 +04:00
|
|
|
{
|
2003-01-27 06:17:36 +03:00
|
|
|
thread_id sender;
|
2002-10-05 23:38:42 +04:00
|
|
|
status_t code;
|
|
|
|
|
2004-02-23 09:29:59 +03:00
|
|
|
if (!IS_USER_ADDRESS(_userSender)
|
2004-02-22 17:52:59 +03:00
|
|
|
|| !IS_USER_ADDRESS(buffer))
|
2002-10-05 23:38:42 +04:00
|
|
|
return B_BAD_ADDRESS;
|
2003-01-27 06:17:36 +03:00
|
|
|
|
2005-06-12 15:14:37 +04:00
|
|
|
code = receive_data_etc(&sender, buffer, bufferSize, B_KILL_CAN_INTERRUPT);
|
2003-01-27 06:17:36 +03:00
|
|
|
// supports userland buffers
|
|
|
|
|
2004-02-23 09:29:59 +03:00
|
|
|
if (user_memcpy(_userSender, &sender, sizeof(thread_id)) < B_OK)
|
2002-10-05 23:38:42 +04:00
|
|
|
return B_BAD_ADDRESS;
|
2003-01-27 06:17:36 +03:00
|
|
|
|
2002-10-05 23:38:42 +04:00
|
|
|
return code;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2004-02-22 17:52:59 +03:00
|
|
|
// ToDo: the following two functions don't belong here
|
|
|
|
|
|
|
|
|
2002-10-05 23:38:42 +04:00
|
|
|
int
|
2004-03-03 04:00:16 +03:00
|
|
|
_user_getrlimit(int resource, struct rlimit *urlp)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2002-10-05 23:38:42 +04:00
|
|
|
struct rlimit rl;
|
|
|
|
int ret;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2002-10-05 23:38:42 +04:00
|
|
|
if (urlp == NULL)
|
2002-07-14 14:10:22 +04:00
|
|
|
return EINVAL;
|
2002-10-05 23:38:42 +04:00
|
|
|
|
2004-02-22 17:52:59 +03:00
|
|
|
if (!IS_USER_ADDRESS(urlp))
|
2003-01-27 06:17:36 +03:00
|
|
|
return B_BAD_ADDRESS;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2002-10-05 23:38:42 +04:00
|
|
|
ret = getrlimit(resource, &rl);
|
|
|
|
|
|
|
|
if (ret == 0) {
|
|
|
|
ret = user_memcpy(urlp, &rl, sizeof(struct rlimit));
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
return 0;
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
2002-10-05 23:38:42 +04:00
|
|
|
return ret;
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
2002-10-05 23:38:42 +04:00
|
|
|
|
|
|
|
int
|
2004-03-03 04:00:16 +03:00
|
|
|
_user_setrlimit(int resource, const struct rlimit *userResourceLimit)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2004-02-22 17:52:59 +03:00
|
|
|
struct rlimit resourceLimit;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2004-02-22 17:52:59 +03:00
|
|
|
if (userResourceLimit == NULL)
|
2002-10-05 23:38:42 +04:00
|
|
|
return EINVAL;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2004-02-22 17:52:59 +03:00
|
|
|
if (!IS_USER_ADDRESS(userResourceLimit)
|
2007-06-01 02:05:57 +04:00
|
|
|
|| user_memcpy(&resourceLimit, userResourceLimit,
|
|
|
|
sizeof(struct rlimit)) < B_OK)
|
2003-01-27 06:17:36 +03:00
|
|
|
return B_BAD_ADDRESS;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2004-02-22 17:52:59 +03:00
|
|
|
return setrlimit(resource, &resourceLimit);
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
2002-10-05 23:38:42 +04:00
|
|
|
|