2004-02-22 17:52:59 +03:00
|
|
|
/*
|
2008-05-11 20:25:35 +04:00
|
|
|
* Copyright 2008, Ingo Weinhold, ingo_weinhold@gmx.de.
|
2009-10-23 06:06:51 +04:00
|
|
|
* Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de.
|
2004-11-18 21:15:39 +03:00
|
|
|
* Distributed under the terms of the MIT License.
|
|
|
|
*
|
|
|
|
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
|
|
|
|
* Distributed under the terms of the NewOS License.
|
|
|
|
*/
|
2002-08-04 03:39:50 +04:00
|
|
|
|
2009-10-26 16:34:43 +03:00
|
|
|
|
2007-10-11 12:30:18 +04:00
|
|
|
/*! Team functions */
|
2004-10-07 21:17:04 +04:00
|
|
|
|
2009-10-26 16:34:43 +03:00
|
|
|
|
2007-08-28 00:30:34 +04:00
|
|
|
#include <stdio.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <string.h>
|
|
|
|
#include <sys/wait.h>
|
|
|
|
|
2002-08-04 03:39:50 +04:00
|
|
|
#include <OS.h>
|
2004-03-16 05:46:28 +03:00
|
|
|
|
2007-08-28 00:30:34 +04:00
|
|
|
#include <AutoDeleter.h>
|
2008-05-15 15:55:09 +04:00
|
|
|
#include <FindDirectory.h>
|
2007-08-28 00:30:34 +04:00
|
|
|
|
2008-05-15 15:55:09 +04:00
|
|
|
#include <boot_device.h>
|
2005-12-20 18:54:45 +03:00
|
|
|
#include <elf.h>
|
|
|
|
#include <file_cache.h>
|
2008-05-15 15:55:09 +04:00
|
|
|
#include <fs/KPath.h>
|
2008-03-09 20:56:27 +03:00
|
|
|
#include <heap.h>
|
2002-08-04 03:39:50 +04:00
|
|
|
#include <int.h>
|
2005-12-20 18:54:45 +03:00
|
|
|
#include <kernel.h>
|
2003-01-12 19:29:28 +03:00
|
|
|
#include <kimage.h>
|
2005-10-25 20:59:12 +04:00
|
|
|
#include <kscheduler.h>
|
2007-09-04 01:35:24 +04:00
|
|
|
#include <ksignal.h>
|
2009-03-15 13:21:56 +03:00
|
|
|
#include <Notifications.h>
|
2005-12-20 18:54:45 +03:00
|
|
|
#include <port.h>
|
2008-05-07 03:16:04 +04:00
|
|
|
#include <posix/realtime_sem.h>
|
2008-07-29 16:03:41 +04:00
|
|
|
#include <posix/xsi_semaphore.h>
|
2005-12-20 18:54:45 +03:00
|
|
|
#include <sem.h>
|
2004-10-14 18:46:12 +04:00
|
|
|
#include <syscall_process_info.h>
|
axeld + bonefish:
* Implemented automatic syscall restarts:
- A syscall can indicate that it has been interrupted and can be
restarted by setting a respective bit in thread::flags. It can
store parameters it wants to be preserved for the restart in
thread::syscall_restart::parameters. Another thread::flags bit
indicates whether it has been restarted.
- handle_signals() clears the restart flag, if the handled signal
has a handler function installed and SA_RESTART is not set. Another
thread flag (THREAD_FLAGS_DONT_RESTART_SYSCALL) can prevent syscalls
from being restarted, even if they could be (not used yet, but we
might want to use it in resume_thread(), so that we stay
behaviorally compatible with BeOS).
- The architecture specific syscall handler restarts the syscall, if
the restart flag is set. Implemented for x86 only.
- Added some support functions in the private <syscall_restart.h> to
simplify the syscall restart code in the syscalls.
- Adjusted all syscalls that can potentially be restarted accordingly.
- _user_ioctl() sets new thread flag THREAD_FLAGS_IOCTL_SYSCALL while
calling the underlying FS's/driver's hook, so that syscall restarts
can also be supported there.
* thread_at_kernel_exit() invokes handle_signals() in a loop now, as
long as the latter indicates that the thread shall be suspended, so
that after waking up signals received in the meantime will be handled
before the thread returns to userland. Adjusted handle_signals()
accordingly -- when encountering a suspending signal we don't check
for further signals.
* Fixed sigsuspend(): Suspending the thread and rescheduling doesn't
result in the correct behavior. Instead we employ a temporary
condition variable and interruptably wait on it. The POSIX test
suite test passes, now.
* Made the switch_sem[_etc]() behavior on interruption consistent.
Depending on when the signal arrived (before the call or when already
waiting) the first semaphore would or wouldn't be released. Now we
consistently release it.
* Refactored _user_{read,write}[v]() syscalls. Use a common function for
either pair. The iovec version doesn't fail anymore, if anything could
be read/written at all. It also checks whether a complete vector
could be read/written, so that we won't skip data, if the underlying
FS/driver couldn't read/write more ATM.
* Some refactoring in the x86 syscall handler: The int 99 and sysenter
handlers use a common subroutine to avoid code duplication.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@23983 a95241bf-73f2-0310-859d-f6bbb57e9c96
2008-02-17 18:48:30 +03:00
|
|
|
#include <syscall_restart.h>
|
2005-12-20 18:54:45 +03:00
|
|
|
#include <syscalls.h>
|
|
|
|
#include <team.h>
|
2003-01-07 12:40:59 +03:00
|
|
|
#include <tls.h>
|
2008-01-17 04:59:17 +03:00
|
|
|
#include <tracing.h>
|
2005-12-20 18:54:45 +03:00
|
|
|
#include <user_runtime.h>
|
2008-05-11 20:25:35 +04:00
|
|
|
#include <user_thread.h>
|
2008-03-11 20:12:02 +03:00
|
|
|
#include <usergroup.h>
|
2004-12-14 02:02:18 +03:00
|
|
|
#include <vfs.h>
|
2005-12-20 18:54:45 +03:00
|
|
|
#include <vm.h>
|
|
|
|
#include <vm_address_space.h>
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
#include <util/AutoLock.h>
|
2005-12-20 18:54:45 +03:00
|
|
|
#include <util/khash.h>
|
2002-08-04 03:39:50 +04:00
|
|
|
|
2004-03-16 06:14:48 +03:00
|
|
|
//#define TRACE_TEAM
|
|
|
|
#ifdef TRACE_TEAM
|
2003-11-12 18:37:44 +03:00
|
|
|
# define TRACE(x) dprintf x
|
|
|
|
#else
|
|
|
|
# define TRACE(x) ;
|
|
|
|
#endif
|
|
|
|
|
2002-08-04 03:39:50 +04:00
|
|
|
|
|
|
|
struct team_key {
|
|
|
|
team_id id;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct team_arg {
|
2008-06-24 07:37:07 +04:00
|
|
|
char *path;
|
|
|
|
char **flat_args;
|
|
|
|
size_t flat_args_size;
|
2004-10-07 19:34:17 +04:00
|
|
|
uint32 arg_count;
|
|
|
|
uint32 env_count;
|
2007-07-27 06:32:19 +04:00
|
|
|
port_id error_port;
|
|
|
|
uint32 error_token;
|
2002-08-04 03:39:50 +04:00
|
|
|
};
|
|
|
|
|
2004-10-12 08:03:52 +04:00
|
|
|
struct fork_arg {
|
2009-05-21 03:10:13 +04:00
|
|
|
area_id user_stack_area;
|
|
|
|
addr_t user_stack_base;
|
|
|
|
size_t user_stack_size;
|
|
|
|
addr_t user_local_storage;
|
|
|
|
sigset_t sig_block_mask;
|
|
|
|
struct sigaction sig_action[32];
|
|
|
|
addr_t signal_stack_base;
|
|
|
|
size_t signal_stack_size;
|
|
|
|
bool signal_stack_enabled;
|
|
|
|
|
2008-05-11 20:25:35 +04:00
|
|
|
struct user_thread* user_thread;
|
2004-10-12 08:03:52 +04:00
|
|
|
|
|
|
|
struct arch_fork_arg arch_info;
|
|
|
|
};
|
|
|
|
|
2009-03-15 13:21:56 +03:00
|
|
|
class TeamNotificationService : public DefaultNotificationService {
|
|
|
|
public:
|
|
|
|
TeamNotificationService();
|
|
|
|
|
2009-04-12 01:45:25 +04:00
|
|
|
void Notify(uint32 eventCode, struct team* team);
|
2009-03-15 13:21:56 +03:00
|
|
|
};
|
|
|
|
|
2004-10-14 18:46:12 +04:00
|
|
|
|
2007-08-27 00:37:54 +04:00
|
|
|
static hash_table *sTeamHash = NULL;
|
|
|
|
static hash_table *sGroupHash = NULL;
|
2006-08-25 02:29:54 +04:00
|
|
|
static struct team *sKernelTeam = NULL;
|
2002-08-04 03:39:50 +04:00
|
|
|
|
2004-12-01 00:11:37 +03:00
|
|
|
// some arbitrary chosen limits - should probably depend on the available
|
|
|
|
// memory (the limit is not yet enforced)
|
|
|
|
static int32 sMaxTeams = 2048;
|
|
|
|
static int32 sUsedTeams = 1;
|
|
|
|
|
2009-03-15 13:21:56 +03:00
|
|
|
static TeamNotificationService sNotificationService;
|
|
|
|
|
2008-08-02 18:55:53 +04:00
|
|
|
spinlock gTeamSpinlock = B_SPINLOCK_INITIALIZER;
|
2002-08-04 03:39:50 +04:00
|
|
|
|
2006-08-25 03:41:54 +04:00
|
|
|
|
2008-01-17 04:59:17 +03:00
|
|
|
// #pragma mark - Tracing
|
|
|
|
|
|
|
|
|
2008-04-28 23:01:00 +04:00
|
|
|
#if TEAM_TRACING
|
2008-01-17 04:59:17 +03:00
|
|
|
namespace TeamTracing {
|
|
|
|
|
|
|
|
class TeamForked : public AbstractTraceEntry {
|
2008-08-02 18:55:53 +04:00
|
|
|
public:
|
|
|
|
TeamForked(thread_id forkedThread)
|
|
|
|
:
|
|
|
|
fForkedThread(forkedThread)
|
|
|
|
{
|
|
|
|
Initialized();
|
|
|
|
}
|
2008-01-17 04:59:17 +03:00
|
|
|
|
2008-08-02 18:55:53 +04:00
|
|
|
virtual void AddDump(TraceOutput& out)
|
|
|
|
{
|
|
|
|
out.Print("team forked, new thread %ld", fForkedThread);
|
|
|
|
}
|
2008-01-17 04:59:17 +03:00
|
|
|
|
2008-08-02 18:55:53 +04:00
|
|
|
private:
|
|
|
|
thread_id fForkedThread;
|
2008-01-17 04:59:17 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
class ExecTeam : public AbstractTraceEntry {
|
2008-08-02 18:55:53 +04:00
|
|
|
public:
|
|
|
|
ExecTeam(const char* path, int32 argCount, const char* const* args,
|
|
|
|
int32 envCount, const char* const* env)
|
|
|
|
:
|
|
|
|
fArgCount(argCount),
|
|
|
|
fArgs(NULL)
|
|
|
|
{
|
|
|
|
fPath = alloc_tracing_buffer_strcpy(path, B_PATH_NAME_LENGTH,
|
|
|
|
false);
|
|
|
|
|
|
|
|
// determine the buffer size we need for the args
|
|
|
|
size_t argBufferSize = 0;
|
|
|
|
for (int32 i = 0; i < argCount; i++)
|
|
|
|
argBufferSize += strlen(args[i]) + 1;
|
|
|
|
|
|
|
|
// allocate a buffer
|
|
|
|
fArgs = (char*)alloc_tracing_buffer(argBufferSize);
|
|
|
|
if (fArgs) {
|
|
|
|
char* buffer = fArgs;
|
|
|
|
for (int32 i = 0; i < argCount; i++) {
|
|
|
|
size_t argSize = strlen(args[i]) + 1;
|
|
|
|
memcpy(buffer, args[i], argSize);
|
|
|
|
buffer += argSize;
|
2008-01-17 04:59:17 +03:00
|
|
|
}
|
2008-08-02 18:55:53 +04:00
|
|
|
}
|
2008-01-17 04:59:17 +03:00
|
|
|
|
2008-08-02 18:55:53 +04:00
|
|
|
// ignore env for the time being
|
|
|
|
(void)envCount;
|
|
|
|
(void)env;
|
2008-01-17 04:59:17 +03:00
|
|
|
|
2008-08-02 18:55:53 +04:00
|
|
|
Initialized();
|
|
|
|
}
|
2008-01-17 04:59:17 +03:00
|
|
|
|
2008-08-02 18:55:53 +04:00
|
|
|
virtual void AddDump(TraceOutput& out)
|
|
|
|
{
|
|
|
|
out.Print("team exec, \"%p\", args:", fPath);
|
2008-01-17 04:59:17 +03:00
|
|
|
|
2008-08-03 03:48:29 +04:00
|
|
|
if (fArgs != NULL) {
|
|
|
|
char* args = fArgs;
|
|
|
|
for (int32 i = 0; !out.IsFull() && i < fArgCount; i++) {
|
|
|
|
out.Print(" \"%s\"", args);
|
|
|
|
args += strlen(args) + 1;
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
out.Print(" <too long>");
|
2008-08-02 18:55:53 +04:00
|
|
|
}
|
2008-01-17 04:59:17 +03:00
|
|
|
|
2008-08-02 18:55:53 +04:00
|
|
|
private:
|
|
|
|
char* fPath;
|
|
|
|
int32 fArgCount;
|
|
|
|
char* fArgs;
|
2008-01-17 04:59:17 +03:00
|
|
|
};
|
|
|
|
|
2008-01-18 03:01:32 +03:00
|
|
|
|
|
|
|
static const char*
|
|
|
|
job_control_state_name(job_control_state state)
|
|
|
|
{
|
|
|
|
switch (state) {
|
|
|
|
case JOB_CONTROL_STATE_NONE:
|
|
|
|
return "none";
|
|
|
|
case JOB_CONTROL_STATE_STOPPED:
|
|
|
|
return "stopped";
|
|
|
|
case JOB_CONTROL_STATE_CONTINUED:
|
|
|
|
return "continued";
|
|
|
|
case JOB_CONTROL_STATE_DEAD:
|
|
|
|
return "dead";
|
|
|
|
default:
|
|
|
|
return "invalid";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
class SetJobControlState : public AbstractTraceEntry {
|
2008-08-02 18:55:53 +04:00
|
|
|
public:
|
|
|
|
SetJobControlState(team_id team, job_control_state newState, int signal)
|
|
|
|
:
|
|
|
|
fTeam(team),
|
|
|
|
fNewState(newState),
|
|
|
|
fSignal(signal)
|
|
|
|
{
|
|
|
|
Initialized();
|
|
|
|
}
|
2008-01-18 03:01:32 +03:00
|
|
|
|
2008-08-02 18:55:53 +04:00
|
|
|
virtual void AddDump(TraceOutput& out)
|
|
|
|
{
|
|
|
|
out.Print("team set job control state, team %ld, "
|
|
|
|
"new state: %s, signal: %d",
|
|
|
|
fTeam, job_control_state_name(fNewState), fSignal);
|
|
|
|
}
|
2008-01-18 03:01:32 +03:00
|
|
|
|
2008-08-02 18:55:53 +04:00
|
|
|
private:
|
|
|
|
team_id fTeam;
|
|
|
|
job_control_state fNewState;
|
|
|
|
int fSignal;
|
2008-01-18 03:01:32 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
class WaitForChild : public AbstractTraceEntry {
|
2008-08-02 18:55:53 +04:00
|
|
|
public:
|
|
|
|
WaitForChild(pid_t child, uint32 flags)
|
|
|
|
:
|
|
|
|
fChild(child),
|
|
|
|
fFlags(flags)
|
|
|
|
{
|
|
|
|
Initialized();
|
|
|
|
}
|
2008-01-18 03:01:32 +03:00
|
|
|
|
2008-08-02 18:55:53 +04:00
|
|
|
virtual void AddDump(TraceOutput& out)
|
|
|
|
{
|
|
|
|
out.Print("team wait for child, child: %ld, "
|
|
|
|
"flags: 0x%lx", fChild, fFlags);
|
|
|
|
}
|
2008-01-18 03:01:32 +03:00
|
|
|
|
2008-08-02 18:55:53 +04:00
|
|
|
private:
|
|
|
|
pid_t fChild;
|
|
|
|
uint32 fFlags;
|
2008-01-18 03:01:32 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
class WaitForChildDone : public AbstractTraceEntry {
|
2008-08-02 18:55:53 +04:00
|
|
|
public:
|
|
|
|
WaitForChildDone(const job_control_entry& entry)
|
|
|
|
:
|
|
|
|
fState(entry.state),
|
|
|
|
fTeam(entry.thread),
|
|
|
|
fStatus(entry.status),
|
|
|
|
fReason(entry.reason),
|
|
|
|
fSignal(entry.signal)
|
|
|
|
{
|
|
|
|
Initialized();
|
|
|
|
}
|
2008-01-18 03:01:32 +03:00
|
|
|
|
2008-08-02 18:55:53 +04:00
|
|
|
WaitForChildDone(status_t error)
|
|
|
|
:
|
|
|
|
fTeam(error)
|
|
|
|
{
|
|
|
|
Initialized();
|
|
|
|
}
|
2008-01-18 03:01:32 +03:00
|
|
|
|
2008-08-02 18:55:53 +04:00
|
|
|
virtual void AddDump(TraceOutput& out)
|
|
|
|
{
|
|
|
|
if (fTeam >= 0) {
|
|
|
|
out.Print("team wait for child done, team: %ld, "
|
|
|
|
"state: %s, status: 0x%lx, reason: 0x%x, signal: %d\n",
|
|
|
|
fTeam, job_control_state_name(fState), fStatus, fReason,
|
|
|
|
fSignal);
|
|
|
|
} else {
|
|
|
|
out.Print("team wait for child failed, error: "
|
|
|
|
"0x%lx, ", fTeam);
|
2008-01-18 03:01:32 +03:00
|
|
|
}
|
2008-08-02 18:55:53 +04:00
|
|
|
}
|
2008-01-18 03:01:32 +03:00
|
|
|
|
2008-08-02 18:55:53 +04:00
|
|
|
private:
|
|
|
|
job_control_state fState;
|
|
|
|
team_id fTeam;
|
|
|
|
status_t fStatus;
|
|
|
|
uint16 fReason;
|
|
|
|
uint16 fSignal;
|
2008-01-18 03:01:32 +03:00
|
|
|
};
|
|
|
|
|
2008-01-17 04:59:17 +03:00
|
|
|
} // namespace TeamTracing
|
|
|
|
|
|
|
|
# define T(x) new(std::nothrow) TeamTracing::x;
|
|
|
|
#else
|
|
|
|
# define T(x) ;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
2009-03-15 13:21:56 +03:00
|
|
|
// #pragma mark - TeamNotificationService
|
|
|
|
|
|
|
|
|
|
|
|
TeamNotificationService::TeamNotificationService()
|
|
|
|
: DefaultNotificationService("teams")
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void
|
2009-04-12 01:45:25 +04:00
|
|
|
TeamNotificationService::Notify(uint32 eventCode, struct team* team)
|
2009-03-15 13:21:56 +03:00
|
|
|
{
|
2009-04-12 01:45:25 +04:00
|
|
|
char eventBuffer[128];
|
2009-03-15 13:21:56 +03:00
|
|
|
KMessage event;
|
|
|
|
event.SetTo(eventBuffer, sizeof(eventBuffer), TEAM_MONITOR);
|
2009-04-12 01:45:25 +04:00
|
|
|
event.AddInt32("event", eventCode);
|
|
|
|
event.AddInt32("team", team->id);
|
|
|
|
event.AddPointer("teamStruct", team);
|
2009-03-15 13:21:56 +03:00
|
|
|
|
2009-04-12 01:45:25 +04:00
|
|
|
DefaultNotificationService::Notify(event, eventCode);
|
2009-03-15 13:21:56 +03:00
|
|
|
}
|
|
|
|
|
2008-01-17 04:59:17 +03:00
|
|
|
|
2006-08-25 03:41:54 +04:00
|
|
|
// #pragma mark - Private functions
|
2002-08-04 03:39:50 +04:00
|
|
|
|
|
|
|
|
2002-09-24 03:24:12 +04:00
|
|
|
static void
|
2005-04-12 12:13:12 +04:00
|
|
|
_dump_team_info(struct team *team)
|
2002-08-04 03:39:50 +04:00
|
|
|
{
|
2005-11-02 13:43:30 +03:00
|
|
|
kprintf("TEAM: %p\n", team);
|
2007-10-11 12:30:18 +04:00
|
|
|
kprintf("id: %ld (%#lx)\n", team->id, team->id);
|
2005-11-02 13:43:30 +03:00
|
|
|
kprintf("name: '%s'\n", team->name);
|
2006-12-18 15:56:27 +03:00
|
|
|
kprintf("args: '%s'\n", team->args);
|
2005-11-02 13:43:30 +03:00
|
|
|
kprintf("next: %p\n", team->next);
|
|
|
|
kprintf("parent: %p", team->parent);
|
|
|
|
if (team->parent != NULL) {
|
2007-10-11 12:30:18 +04:00
|
|
|
kprintf(" (id = %ld)\n", team->parent->id);
|
2005-11-02 13:43:30 +03:00
|
|
|
} else
|
|
|
|
kprintf("\n");
|
|
|
|
|
|
|
|
kprintf("children: %p\n", team->children);
|
|
|
|
kprintf("num_threads: %d\n", team->num_threads);
|
|
|
|
kprintf("state: %d\n", team->state);
|
2008-02-21 03:46:22 +03:00
|
|
|
kprintf("flags: 0x%lx\n", team->flags);
|
2005-11-02 13:43:30 +03:00
|
|
|
kprintf("io_context: %p\n", team->io_context);
|
2005-12-20 16:29:11 +03:00
|
|
|
if (team->address_space)
|
2007-10-11 12:30:18 +04:00
|
|
|
kprintf("address_space: %p\n", team->address_space);
|
2005-11-02 13:43:30 +03:00
|
|
|
kprintf("main_thread: %p\n", team->main_thread);
|
|
|
|
kprintf("thread_list: %p\n", team->thread_list);
|
2007-10-11 12:30:18 +04:00
|
|
|
kprintf("group_id: %ld\n", team->group_id);
|
|
|
|
kprintf("session_id: %ld\n", team->session_id);
|
2002-08-04 03:39:50 +04:00
|
|
|
}
|
|
|
|
|
2002-09-24 03:24:12 +04:00
|
|
|
|
|
|
|
static int
|
|
|
|
dump_team_info(int argc, char **argv)
|
2002-08-04 03:39:50 +04:00
|
|
|
{
|
2005-11-02 13:38:28 +03:00
|
|
|
struct hash_iterator iterator;
|
2005-04-12 12:13:12 +04:00
|
|
|
struct team *team;
|
|
|
|
team_id id = -1;
|
2005-10-06 12:36:37 +04:00
|
|
|
bool found = false;
|
|
|
|
|
2008-01-26 13:47:27 +03:00
|
|
|
if (argc < 2) {
|
|
|
|
struct thread* thread = thread_get_current_thread();
|
|
|
|
if (thread != NULL && thread->team != NULL)
|
|
|
|
_dump_team_info(thread->team);
|
|
|
|
else
|
|
|
|
kprintf("No current team!\n");
|
2005-10-06 12:36:37 +04:00
|
|
|
return 0;
|
|
|
|
}
|
2002-08-04 03:39:50 +04:00
|
|
|
|
2005-10-06 12:36:37 +04:00
|
|
|
id = strtoul(argv[1], NULL, 0);
|
|
|
|
if (IS_KERNEL_ADDRESS(id)) {
|
|
|
|
// semi-hack
|
|
|
|
_dump_team_info((struct team *)id);
|
|
|
|
return 0;
|
2002-08-04 03:39:50 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
// walk through the thread list, trying to match name or id
|
2006-08-25 02:29:54 +04:00
|
|
|
hash_open(sTeamHash, &iterator);
|
2007-08-27 00:37:54 +04:00
|
|
|
while ((team = (struct team*)hash_next(sTeamHash, &iterator)) != NULL) {
|
2005-04-12 12:13:12 +04:00
|
|
|
if ((team->name && strcmp(argv[1], team->name) == 0) || team->id == id) {
|
|
|
|
_dump_team_info(team);
|
2005-10-06 12:36:37 +04:00
|
|
|
found = true;
|
2002-08-04 03:39:50 +04:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2006-08-25 02:29:54 +04:00
|
|
|
hash_close(sTeamHash, &iterator, false);
|
2005-10-06 12:36:37 +04:00
|
|
|
|
|
|
|
if (!found)
|
|
|
|
kprintf("team \"%s\" (%ld) doesn't exist!\n", argv[1], id);
|
2002-08-04 03:39:50 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2005-11-02 13:38:28 +03:00
|
|
|
static int
|
|
|
|
dump_teams(int argc, char **argv)
|
|
|
|
{
|
|
|
|
struct hash_iterator iterator;
|
|
|
|
struct team *team;
|
|
|
|
|
2007-11-18 14:57:35 +03:00
|
|
|
kprintf("team id parent name\n");
|
2006-08-25 02:29:54 +04:00
|
|
|
hash_open(sTeamHash, &iterator);
|
2005-11-02 13:38:28 +03:00
|
|
|
|
2007-08-27 00:37:54 +04:00
|
|
|
while ((team = (struct team*)hash_next(sTeamHash, &iterator)) != NULL) {
|
2007-11-18 14:57:35 +03:00
|
|
|
kprintf("%p%7ld %p %s\n", team, team->id, team->parent, team->name);
|
2005-11-02 13:38:28 +03:00
|
|
|
}
|
|
|
|
|
2006-08-25 02:29:54 +04:00
|
|
|
hash_close(sTeamHash, &iterator, false);
|
2005-11-02 13:38:28 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-08-25 02:58:48 +04:00
|
|
|
static int
|
2006-08-25 03:41:54 +04:00
|
|
|
team_struct_compare(void *_p, const void *_key)
|
2006-08-25 02:58:48 +04:00
|
|
|
{
|
2007-08-27 00:37:54 +04:00
|
|
|
struct team *p = (struct team*)_p;
|
|
|
|
const struct team_key *key = (const struct team_key*)_key;
|
2006-08-25 02:58:48 +04:00
|
|
|
|
2006-08-25 03:41:54 +04:00
|
|
|
if (p->id == key->id)
|
2006-08-25 02:58:48 +04:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static uint32
|
2006-08-25 03:41:54 +04:00
|
|
|
team_struct_hash(void *_p, const void *_key, uint32 range)
|
2006-08-25 02:58:48 +04:00
|
|
|
{
|
2007-08-27 00:37:54 +04:00
|
|
|
struct team *p = (struct team*)_p;
|
|
|
|
const struct team_key *key = (const struct team_key*)_key;
|
2006-08-25 02:58:48 +04:00
|
|
|
|
2006-08-25 03:41:54 +04:00
|
|
|
if (p != NULL)
|
|
|
|
return p->id % range;
|
2006-08-25 02:58:48 +04:00
|
|
|
|
|
|
|
return (uint32)key->id % range;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2002-08-05 09:26:52 +04:00
|
|
|
static int
|
2006-08-25 03:41:54 +04:00
|
|
|
process_group_compare(void *_group, const void *_key)
|
2002-08-04 03:39:50 +04:00
|
|
|
{
|
2007-08-27 00:37:54 +04:00
|
|
|
struct process_group *group = (struct process_group*)_group;
|
|
|
|
const struct team_key *key = (const struct team_key*)_key;
|
2002-08-04 03:39:50 +04:00
|
|
|
|
2006-08-25 03:41:54 +04:00
|
|
|
if (group->id == key->id)
|
2002-11-29 11:38:52 +03:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
return 1;
|
2002-08-04 03:39:50 +04:00
|
|
|
}
|
|
|
|
|
2002-08-05 09:26:52 +04:00
|
|
|
|
2002-11-29 11:38:52 +03:00
|
|
|
static uint32
|
2006-08-25 03:41:54 +04:00
|
|
|
process_group_hash(void *_group, const void *_key, uint32 range)
|
2002-08-04 03:39:50 +04:00
|
|
|
{
|
2007-08-27 00:37:54 +04:00
|
|
|
struct process_group *group = (struct process_group*)_group;
|
|
|
|
const struct team_key *key = (const struct team_key*)_key;
|
2002-08-04 03:39:50 +04:00
|
|
|
|
2006-08-25 03:41:54 +04:00
|
|
|
if (group != NULL)
|
|
|
|
return group->id % range;
|
2002-08-05 09:26:52 +04:00
|
|
|
|
2005-08-26 06:02:33 +04:00
|
|
|
return (uint32)key->id % range;
|
2002-08-04 03:39:50 +04:00
|
|
|
}
|
|
|
|
|
2002-08-05 09:26:52 +04:00
|
|
|
|
2004-03-16 05:46:28 +03:00
|
|
|
static void
|
|
|
|
insert_team_into_parent(struct team *parent, struct team *team)
|
2002-08-04 03:39:50 +04:00
|
|
|
{
|
2004-03-16 05:46:28 +03:00
|
|
|
ASSERT(parent != NULL);
|
|
|
|
|
|
|
|
team->siblings_next = parent->children;
|
|
|
|
parent->children = team;
|
|
|
|
team->parent = parent;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-10-11 12:30:18 +04:00
|
|
|
/*! Note: must have team lock held */
|
2004-03-16 05:46:28 +03:00
|
|
|
static void
|
|
|
|
remove_team_from_parent(struct team *parent, struct team *team)
|
|
|
|
{
|
|
|
|
struct team *child, *last = NULL;
|
|
|
|
|
|
|
|
for (child = parent->children; child != NULL; child = child->siblings_next) {
|
|
|
|
if (child == team) {
|
|
|
|
if (last == NULL)
|
|
|
|
parent->children = child->siblings_next;
|
|
|
|
else
|
|
|
|
last->siblings_next = child->siblings_next;
|
|
|
|
|
|
|
|
team->parent = NULL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
last = child;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-10-11 12:30:18 +04:00
|
|
|
/*! Reparent each of our children
|
|
|
|
Note: must have team lock held
|
|
|
|
*/
|
2004-03-16 05:46:28 +03:00
|
|
|
static void
|
|
|
|
reparent_children(struct team *team)
|
|
|
|
{
|
2007-01-29 18:33:31 +03:00
|
|
|
struct team *child;
|
2004-03-16 05:46:28 +03:00
|
|
|
|
2007-01-29 18:33:31 +03:00
|
|
|
while ((child = team->children) != NULL) {
|
2004-03-16 05:46:28 +03:00
|
|
|
// remove the child from the current proc and add to the parent
|
|
|
|
remove_team_from_parent(team, child);
|
2007-01-29 18:33:31 +03:00
|
|
|
insert_team_into_parent(sKernelTeam, child);
|
2004-03-16 05:46:28 +03:00
|
|
|
}
|
2007-10-04 02:20:30 +04:00
|
|
|
|
|
|
|
// move job control entries too
|
|
|
|
sKernelTeam->stopped_children->entries.MoveFrom(
|
|
|
|
&team->stopped_children->entries);
|
|
|
|
sKernelTeam->continued_children->entries.MoveFrom(
|
|
|
|
&team->continued_children->entries);
|
|
|
|
|
|
|
|
// Note, we don't move the dead children entries. Those will be deleted
|
|
|
|
// when the team structure is deleted.
|
2002-08-04 03:39:50 +04:00
|
|
|
}
|
|
|
|
|
2002-08-05 09:26:52 +04:00
|
|
|
|
2008-02-21 03:46:22 +03:00
|
|
|
static bool
|
|
|
|
is_session_leader(struct team *team)
|
|
|
|
{
|
|
|
|
return team->session_id == team->id;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2004-10-14 18:46:12 +04:00
|
|
|
static bool
|
|
|
|
is_process_group_leader(struct team *team)
|
|
|
|
{
|
2007-01-13 01:54:21 +03:00
|
|
|
return team->group_id == team->id;
|
2004-10-14 18:46:12 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
2008-03-09 20:56:27 +03:00
|
|
|
deferred_delete_process_group(struct process_group *group)
|
2004-10-14 18:46:12 +04:00
|
|
|
{
|
|
|
|
if (group == NULL)
|
|
|
|
return;
|
|
|
|
|
2008-03-09 20:56:27 +03:00
|
|
|
// remove_group_from_session() keeps this pointer around
|
|
|
|
// only if the session can be freed as well
|
|
|
|
if (group->session) {
|
|
|
|
TRACE(("deferred_delete_process_group(): frees session %ld\n",
|
|
|
|
group->session->id));
|
|
|
|
deferred_free(group->session);
|
|
|
|
}
|
2004-10-14 18:46:12 +04:00
|
|
|
|
2008-03-09 20:56:27 +03:00
|
|
|
deferred_free(group);
|
2004-10-14 18:46:12 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-10-11 12:30:18 +04:00
|
|
|
/*! Removes a group from a session, and puts the session object
|
|
|
|
back into the session cache, if it's not used anymore.
|
|
|
|
You must hold the team lock when calling this function.
|
|
|
|
*/
|
2004-10-14 18:46:12 +04:00
|
|
|
static void
|
|
|
|
remove_group_from_session(struct process_group *group)
|
|
|
|
{
|
|
|
|
struct process_session *session = group->session;
|
|
|
|
|
|
|
|
// the group must be in any session to let this function have any effect
|
|
|
|
if (session == NULL)
|
|
|
|
return;
|
|
|
|
|
2006-08-25 02:58:48 +04:00
|
|
|
hash_remove(sGroupHash, group);
|
2004-10-14 18:46:12 +04:00
|
|
|
|
|
|
|
// we cannot free the resource here, so we're keeping the group link
|
|
|
|
// around - this way it'll be freed by free_process_group()
|
2006-08-25 02:58:48 +04:00
|
|
|
if (--session->group_count > 0)
|
2004-10-14 18:46:12 +04:00
|
|
|
group->session = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-03-09 20:56:27 +03:00
|
|
|
/*! Team lock must be held.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
acquire_process_group_ref(pid_t groupID)
|
|
|
|
{
|
|
|
|
process_group* group = team_get_process_group_locked(NULL, groupID);
|
|
|
|
if (group == NULL) {
|
|
|
|
panic("acquire_process_group_ref(): unknown group ID: %ld", groupID);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
group->refs++;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*! Team lock must be held.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
release_process_group_ref(pid_t groupID)
|
|
|
|
{
|
|
|
|
process_group* group = team_get_process_group_locked(NULL, groupID);
|
|
|
|
if (group == NULL) {
|
|
|
|
panic("release_process_group_ref(): unknown group ID: %ld", groupID);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (group->refs <= 0) {
|
|
|
|
panic("release_process_group_ref(%ld): ref count already 0", groupID);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (--group->refs > 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
// group is no longer used
|
|
|
|
|
|
|
|
remove_group_from_session(group);
|
|
|
|
deferred_delete_process_group(group);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*! You must hold the team lock when calling this function. */
|
|
|
|
static void
|
|
|
|
insert_group_into_session(struct process_session *session, struct process_group *group)
|
|
|
|
{
|
|
|
|
if (group == NULL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
group->session = session;
|
|
|
|
hash_insert(sGroupHash, group);
|
|
|
|
session->group_count++;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*! You must hold the team lock when calling this function. */
|
|
|
|
static void
|
|
|
|
insert_team_into_group(struct process_group *group, struct team *team)
|
|
|
|
{
|
|
|
|
team->group = group;
|
|
|
|
team->group_id = group->id;
|
|
|
|
team->session_id = group->session->id;
|
|
|
|
|
|
|
|
team->group_next = group->teams;
|
|
|
|
group->teams = team;
|
|
|
|
acquire_process_group_ref(group->id);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*! Removes the team from the group.
|
2004-10-14 18:46:12 +04:00
|
|
|
|
2007-10-11 12:30:18 +04:00
|
|
|
\param team the team that'll be removed from it's group
|
|
|
|
*/
|
2004-10-14 18:46:12 +04:00
|
|
|
static void
|
2008-03-09 20:56:27 +03:00
|
|
|
remove_team_from_group(struct team *team)
|
2004-10-14 18:46:12 +04:00
|
|
|
{
|
|
|
|
struct process_group *group = team->group;
|
|
|
|
struct team *current, *last = NULL;
|
|
|
|
|
|
|
|
// the team must be in any team to let this function have any effect
|
|
|
|
if (group == NULL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
for (current = group->teams; current != NULL; current = current->group_next) {
|
|
|
|
if (current == team) {
|
|
|
|
if (last == NULL)
|
|
|
|
group->teams = current->group_next;
|
|
|
|
else
|
|
|
|
last->group_next = current->group_next;
|
|
|
|
|
|
|
|
team->group = NULL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
last = current;
|
|
|
|
}
|
|
|
|
|
|
|
|
team->group = NULL;
|
|
|
|
team->group_next = NULL;
|
|
|
|
|
2008-03-09 20:56:27 +03:00
|
|
|
release_process_group_ref(group->id);
|
2004-10-14 18:46:12 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static struct process_group *
|
|
|
|
create_process_group(pid_t id)
|
|
|
|
{
|
|
|
|
struct process_group *group = (struct process_group *)malloc(sizeof(struct process_group));
|
|
|
|
if (group == NULL)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
group->id = id;
|
2008-03-09 20:56:27 +03:00
|
|
|
group->refs = 0;
|
2004-10-14 18:46:12 +04:00
|
|
|
group->session = NULL;
|
|
|
|
group->teams = NULL;
|
2007-09-06 06:16:25 +04:00
|
|
|
group->orphaned = true;
|
2004-10-14 18:46:12 +04:00
|
|
|
return group;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static struct process_session *
|
|
|
|
create_process_session(pid_t id)
|
|
|
|
{
|
2007-09-06 06:16:25 +04:00
|
|
|
struct process_session *session
|
|
|
|
= (struct process_session *)malloc(sizeof(struct process_session));
|
2004-10-14 18:46:12 +04:00
|
|
|
if (session == NULL)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
session->id = id;
|
2006-08-25 02:58:48 +04:00
|
|
|
session->group_count = 0;
|
2007-09-06 06:16:25 +04:00
|
|
|
session->controlling_tty = -1;
|
|
|
|
session->foreground_group = -1;
|
2004-10-14 18:46:12 +04:00
|
|
|
|
|
|
|
return session;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-01-26 13:36:20 +03:00
|
|
|
static void
|
|
|
|
set_team_name(struct team* team, const char* name)
|
|
|
|
{
|
|
|
|
if (const char* lastSlash = strrchr(name, '/'))
|
|
|
|
name = lastSlash + 1;
|
|
|
|
|
|
|
|
strlcpy(team->name, name, B_OS_NAME_LENGTH);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2002-08-05 09:26:52 +04:00
|
|
|
static struct team *
|
|
|
|
create_team_struct(const char *name, bool kernel)
|
2002-08-04 03:39:50 +04:00
|
|
|
{
|
2004-02-23 07:08:09 +03:00
|
|
|
struct team *team = (struct team *)malloc(sizeof(struct team));
|
2003-01-06 11:10:54 +03:00
|
|
|
if (team == NULL)
|
2004-02-23 07:08:09 +03:00
|
|
|
return NULL;
|
2007-08-28 00:30:34 +04:00
|
|
|
MemoryDeleter teamDeleter(team);
|
2002-08-05 09:26:52 +04:00
|
|
|
|
2004-02-23 07:08:09 +03:00
|
|
|
team->next = team->siblings_next = team->children = team->parent = NULL;
|
2005-03-08 21:18:10 +03:00
|
|
|
team->id = allocate_thread_id();
|
2008-01-26 13:36:20 +03:00
|
|
|
set_team_name(team, name);
|
2007-01-15 02:22:49 +03:00
|
|
|
team->args[0] = '\0';
|
2003-01-06 11:10:54 +03:00
|
|
|
team->num_threads = 0;
|
|
|
|
team->io_context = NULL;
|
2005-12-20 16:29:11 +03:00
|
|
|
team->address_space = NULL;
|
2008-05-06 07:39:36 +04:00
|
|
|
team->realtime_sem_context = NULL;
|
2008-08-16 22:20:54 +04:00
|
|
|
team->xsi_sem_context = NULL;
|
2003-01-06 11:10:54 +03:00
|
|
|
team->thread_list = NULL;
|
|
|
|
team->main_thread = NULL;
|
2005-03-12 18:13:51 +03:00
|
|
|
team->loading_info = NULL;
|
2003-01-06 11:10:54 +03:00
|
|
|
team->state = TEAM_STATE_BIRTH;
|
2008-02-21 03:46:22 +03:00
|
|
|
team->flags = 0;
|
2003-01-06 11:10:54 +03:00
|
|
|
team->death_sem = -1;
|
2008-05-11 20:25:35 +04:00
|
|
|
team->user_data_area = -1;
|
|
|
|
team->user_data = 0;
|
|
|
|
team->used_user_data = 0;
|
|
|
|
team->user_data_size = 0;
|
|
|
|
team->free_user_threads = NULL;
|
2004-10-14 18:46:12 +04:00
|
|
|
|
2008-03-30 04:08:13 +04:00
|
|
|
team->supplementary_groups = NULL;
|
|
|
|
team->supplementary_group_count = 0;
|
|
|
|
|
2004-11-26 17:58:01 +03:00
|
|
|
team->dead_threads_kernel_time = 0;
|
|
|
|
team->dead_threads_user_time = 0;
|
|
|
|
|
2007-11-28 03:07:32 +03:00
|
|
|
// dead threads
|
|
|
|
list_init(&team->dead_threads);
|
|
|
|
team->dead_threads_count = 0;
|
|
|
|
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
// dead children
|
|
|
|
team->dead_children = new(nothrow) team_dead_children;
|
2007-08-28 00:30:34 +04:00
|
|
|
if (team->dead_children == NULL)
|
|
|
|
return NULL;
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
ObjectDeleter<team_dead_children> deadChildrenDeleter(team->dead_children);
|
2007-08-28 00:30:34 +04:00
|
|
|
|
|
|
|
team->dead_children->count = 0;
|
|
|
|
team->dead_children->kernel_time = 0;
|
|
|
|
team->dead_children->user_time = 0;
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
|
|
|
|
// stopped children
|
|
|
|
team->stopped_children = new(nothrow) team_job_control_children;
|
|
|
|
if (team->stopped_children == NULL)
|
|
|
|
return NULL;
|
|
|
|
ObjectDeleter<team_job_control_children> stoppedChildrenDeleter(
|
|
|
|
team->stopped_children);
|
|
|
|
|
|
|
|
// continued children
|
|
|
|
team->continued_children = new(nothrow) team_job_control_children;
|
|
|
|
if (team->continued_children == NULL)
|
|
|
|
return NULL;
|
|
|
|
ObjectDeleter<team_job_control_children> continuedChildrenDeleter(
|
|
|
|
team->continued_children);
|
|
|
|
|
|
|
|
// job control entry
|
|
|
|
team->job_control_entry = new(nothrow) job_control_entry;
|
|
|
|
if (team->job_control_entry == NULL)
|
|
|
|
return NULL;
|
|
|
|
ObjectDeleter<job_control_entry> jobControlEntryDeleter(
|
|
|
|
team->job_control_entry);
|
|
|
|
team->job_control_entry->state = JOB_CONTROL_STATE_NONE;
|
|
|
|
team->job_control_entry->thread = team->id;
|
|
|
|
team->job_control_entry->team = team;
|
2004-10-14 18:46:12 +04:00
|
|
|
|
2009-10-23 06:06:51 +04:00
|
|
|
list_init(&team->sem_list);
|
2009-10-26 16:34:43 +03:00
|
|
|
list_init(&team->port_list);
|
2003-01-27 02:31:38 +03:00
|
|
|
list_init(&team->image_list);
|
2005-08-03 16:00:42 +04:00
|
|
|
list_init(&team->watcher_list);
|
2003-01-06 11:10:54 +03:00
|
|
|
|
2005-02-28 03:37:16 +03:00
|
|
|
clear_team_debug_info(&team->debug_info, true);
|
2005-02-10 06:03:53 +03:00
|
|
|
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
if (arch_team_init_team_struct(team, kernel) < 0)
|
2007-08-28 00:30:34 +04:00
|
|
|
return NULL;
|
2002-08-04 03:39:50 +04:00
|
|
|
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
// publish dead/stopped/continued children condition vars
|
2008-08-04 17:09:40 +04:00
|
|
|
team->dead_children->condition_variable.Init(team->dead_children,
|
2008-04-20 19:15:58 +04:00
|
|
|
"team children");
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
|
|
|
|
// keep all allocated structures
|
|
|
|
jobControlEntryDeleter.Detach();
|
|
|
|
continuedChildrenDeleter.Detach();
|
|
|
|
stoppedChildrenDeleter.Detach();
|
2007-08-28 00:30:34 +04:00
|
|
|
deadChildrenDeleter.Detach();
|
|
|
|
teamDeleter.Detach();
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
|
2003-01-06 11:10:54 +03:00
|
|
|
return team;
|
2002-08-04 03:39:50 +04:00
|
|
|
}
|
|
|
|
|
2002-08-05 09:26:52 +04:00
|
|
|
|
|
|
|
static void
|
2003-01-06 11:10:54 +03:00
|
|
|
delete_team_struct(struct team *team)
|
2002-08-04 03:39:50 +04:00
|
|
|
{
|
2007-11-28 03:07:32 +03:00
|
|
|
while (death_entry* threadDeathEntry = (death_entry*)list_remove_head_item(
|
|
|
|
&team->dead_threads)) {
|
|
|
|
free(threadDeathEntry);
|
|
|
|
}
|
|
|
|
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
while (job_control_entry* entry = team->dead_children->entries.RemoveHead())
|
|
|
|
delete entry;
|
2004-10-14 18:46:12 +04:00
|
|
|
|
2008-05-11 20:25:35 +04:00
|
|
|
while (free_user_thread* entry = team->free_user_threads) {
|
|
|
|
team->free_user_threads = entry->next;
|
|
|
|
free(entry);
|
|
|
|
}
|
|
|
|
|
2008-03-30 04:08:13 +04:00
|
|
|
malloc_referenced_release(team->supplementary_groups);
|
|
|
|
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
delete team->job_control_entry;
|
|
|
|
// usually already NULL and transferred to the parent
|
|
|
|
delete team->continued_children;
|
|
|
|
delete team->stopped_children;
|
|
|
|
delete team->dead_children;
|
2003-01-06 11:10:54 +03:00
|
|
|
free(team);
|
2002-08-04 03:39:50 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-05-11 20:25:35 +04:00
|
|
|
static status_t
|
|
|
|
create_team_user_data(struct team* team)
|
|
|
|
{
|
|
|
|
void* address = (void*)KERNEL_USER_DATA_BASE;
|
|
|
|
size_t size = 4 * B_PAGE_SIZE;
|
2008-07-17 02:55:17 +04:00
|
|
|
team->user_data_area = create_area_etc(team->id, "user area", &address,
|
2009-07-30 01:30:35 +04:00
|
|
|
B_BASE_ADDRESS, size, B_FULL_LOCK, B_READ_AREA | B_WRITE_AREA, 0, 0);
|
2008-05-11 20:25:35 +04:00
|
|
|
if (team->user_data_area < 0)
|
|
|
|
return team->user_data_area;
|
|
|
|
|
|
|
|
team->user_data = (addr_t)address;
|
|
|
|
team->used_user_data = 0;
|
|
|
|
team->user_data_size = size;
|
|
|
|
team->free_user_threads = NULL;
|
|
|
|
|
|
|
|
return B_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
delete_team_user_data(struct team* team)
|
|
|
|
{
|
|
|
|
if (team->user_data_area >= 0) {
|
2008-07-17 02:55:17 +04:00
|
|
|
vm_delete_area(team->id, team->user_data_area, true);
|
2008-05-11 20:25:35 +04:00
|
|
|
team->user_data = 0;
|
|
|
|
team->used_user_data = 0;
|
|
|
|
team->user_data_size = 0;
|
|
|
|
team->user_data_area = -1;
|
|
|
|
while (free_user_thread* entry = team->free_user_threads) {
|
|
|
|
team->free_user_threads = entry->next;
|
|
|
|
free(entry);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-06-24 07:37:07 +04:00
|
|
|
static status_t
|
|
|
|
copy_user_process_args(const char* const* userFlatArgs, size_t flatArgsSize,
|
|
|
|
int32 argCount, int32 envCount, char**& _flatArgs)
|
|
|
|
{
|
|
|
|
if (argCount < 0 || envCount < 0)
|
|
|
|
return B_BAD_VALUE;
|
|
|
|
|
|
|
|
if (flatArgsSize > MAX_PROCESS_ARGS_SIZE)
|
|
|
|
return B_TOO_MANY_ARGS;
|
|
|
|
if ((argCount + envCount + 2) * sizeof(char*) > flatArgsSize)
|
|
|
|
return B_BAD_VALUE;
|
|
|
|
|
|
|
|
if (!IS_USER_ADDRESS(userFlatArgs))
|
|
|
|
return B_BAD_ADDRESS;
|
|
|
|
|
|
|
|
// allocate kernel memory
|
|
|
|
char** flatArgs = (char**)malloc(flatArgsSize);
|
|
|
|
if (flatArgs == NULL)
|
|
|
|
return B_NO_MEMORY;
|
|
|
|
|
|
|
|
if (user_memcpy(flatArgs, userFlatArgs, flatArgsSize) != B_OK) {
|
|
|
|
free(flatArgs);
|
|
|
|
return B_BAD_ADDRESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
// check and relocate the array
|
|
|
|
status_t error = B_OK;
|
|
|
|
const char* stringBase = (char*)flatArgs + argCount + envCount + 2;
|
|
|
|
const char* stringEnd = (char*)flatArgs + flatArgsSize;
|
|
|
|
for (int32 i = 0; i < argCount + envCount + 2; i++) {
|
|
|
|
if (i == argCount || i == argCount + envCount + 1) {
|
|
|
|
// check array null termination
|
|
|
|
if (flatArgs[i] != NULL) {
|
|
|
|
error = B_BAD_VALUE;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// check string
|
|
|
|
char* arg = (char*)flatArgs + (flatArgs[i] - (char*)userFlatArgs);
|
|
|
|
size_t maxLen = stringEnd - arg;
|
|
|
|
if (arg < stringBase || arg >= stringEnd
|
|
|
|
|| strnlen(arg, maxLen) == maxLen) {
|
|
|
|
error = B_BAD_VALUE;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
flatArgs[i] = arg;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (error == B_OK)
|
|
|
|
_flatArgs = flatArgs;
|
|
|
|
else
|
|
|
|
free(flatArgs);
|
|
|
|
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-08-25 03:41:54 +04:00
|
|
|
static void
|
|
|
|
free_team_arg(struct team_arg *teamArg)
|
|
|
|
{
|
2008-06-24 07:37:07 +04:00
|
|
|
if (teamArg != NULL) {
|
|
|
|
free(teamArg->flat_args);
|
|
|
|
free(teamArg->path);
|
|
|
|
free(teamArg);
|
|
|
|
}
|
2004-03-16 05:46:28 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-08-25 03:41:54 +04:00
|
|
|
static status_t
|
2008-06-24 07:37:07 +04:00
|
|
|
create_team_arg(struct team_arg **_teamArg, const char *path, char** flatArgs,
|
|
|
|
size_t flatArgsSize, int32 argCount, int32 envCount, port_id port,
|
|
|
|
uint32 token)
|
2004-03-16 05:46:28 +03:00
|
|
|
{
|
2008-06-24 07:37:07 +04:00
|
|
|
struct team_arg *teamArg = (struct team_arg*)malloc(sizeof(team_arg));
|
2004-10-07 19:34:17 +04:00
|
|
|
if (teamArg == NULL)
|
2005-10-05 19:32:48 +04:00
|
|
|
return B_NO_MEMORY;
|
|
|
|
|
2008-06-24 07:37:07 +04:00
|
|
|
teamArg->path = strdup(path);
|
|
|
|
if (teamArg->path == NULL) {
|
2008-05-24 20:17:56 +04:00
|
|
|
free(teamArg);
|
2008-06-24 07:37:07 +04:00
|
|
|
return B_NO_MEMORY;
|
2008-05-24 20:17:56 +04:00
|
|
|
}
|
2004-10-07 19:34:17 +04:00
|
|
|
|
2008-06-24 07:37:07 +04:00
|
|
|
// copy the args over
|
2005-10-05 19:32:48 +04:00
|
|
|
|
2008-06-24 07:37:07 +04:00
|
|
|
teamArg->flat_args = flatArgs;
|
|
|
|
teamArg->flat_args_size = flatArgsSize;
|
2005-10-05 19:32:48 +04:00
|
|
|
teamArg->arg_count = argCount;
|
2004-10-07 19:34:17 +04:00
|
|
|
teamArg->env_count = envCount;
|
2007-07-27 06:32:19 +04:00
|
|
|
teamArg->error_port = port;
|
|
|
|
teamArg->error_token = token;
|
2004-10-07 19:34:17 +04:00
|
|
|
|
2005-10-05 19:32:48 +04:00
|
|
|
*_teamArg = teamArg;
|
|
|
|
return B_OK;
|
2004-10-07 19:34:17 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2005-09-16 16:52:59 +04:00
|
|
|
static int32
|
2004-10-12 08:03:52 +04:00
|
|
|
team_create_thread_start(void *args)
|
2002-08-04 03:39:50 +04:00
|
|
|
{
|
2005-09-13 19:44:30 +04:00
|
|
|
status_t err;
|
2002-08-04 03:39:50 +04:00
|
|
|
struct thread *t;
|
2003-01-07 12:40:59 +03:00
|
|
|
struct team *team;
|
2007-08-27 00:37:54 +04:00
|
|
|
struct team_arg *teamArgs = (struct team_arg*)args;
|
2004-10-14 22:07:04 +04:00
|
|
|
const char *path;
|
2004-10-20 04:19:38 +04:00
|
|
|
addr_t entry;
|
2002-08-04 03:39:50 +04:00
|
|
|
char ustack_name[128];
|
2004-10-07 19:34:17 +04:00
|
|
|
uint32 sizeLeft;
|
2007-07-27 06:32:19 +04:00
|
|
|
char **userArgs;
|
|
|
|
char **userEnv;
|
|
|
|
struct user_space_program_args *programArgs;
|
2004-10-07 19:34:17 +04:00
|
|
|
uint32 argCount, envCount, i;
|
2002-08-04 03:39:50 +04:00
|
|
|
|
|
|
|
t = thread_get_current_thread();
|
2003-01-07 12:40:59 +03:00
|
|
|
team = t->team;
|
2008-06-24 07:37:07 +04:00
|
|
|
cache_node_launched(teamArgs->arg_count, teamArgs->flat_args);
|
2002-08-04 03:39:50 +04:00
|
|
|
|
2004-10-14 22:07:04 +04:00
|
|
|
TRACE(("team_create_thread_start: entry thread %ld\n", t->id));
|
2002-08-04 03:39:50 +04:00
|
|
|
|
2008-05-11 20:25:35 +04:00
|
|
|
// get a user thread for the main thread
|
|
|
|
t->user_thread = team_allocate_user_thread(team);
|
|
|
|
|
2004-11-08 16:53:34 +03:00
|
|
|
// create an initial primary stack area
|
2002-08-04 03:39:50 +04:00
|
|
|
|
2004-11-18 21:15:39 +03:00
|
|
|
// Main stack area layout is currently as follows (starting from 0):
|
|
|
|
//
|
2008-06-24 07:37:07 +04:00
|
|
|
// size | usage
|
|
|
|
// ---------------------------------+--------------------------------
|
|
|
|
// USER_MAIN_THREAD_STACK_SIZE | actual stack
|
|
|
|
// TLS_SIZE | TLS data
|
|
|
|
// sizeof(user_space_program_args) | argument structure for the runtime
|
|
|
|
// | loader
|
|
|
|
// flat arguments size | flat process arguments and environment
|
2004-11-18 21:15:39 +03:00
|
|
|
|
2005-03-11 02:06:51 +03:00
|
|
|
// ToDo: ENV_SIZE is a) limited, and b) not used after libroot copied it to the heap
|
2004-06-10 05:43:16 +04:00
|
|
|
// ToDo: we could reserve the whole USER_STACK_REGION upfront...
|
2003-01-07 12:40:59 +03:00
|
|
|
|
2008-08-05 21:19:46 +04:00
|
|
|
sizeLeft = PAGE_ALIGN(USER_MAIN_THREAD_STACK_SIZE
|
|
|
|
+ USER_STACK_GUARD_PAGES * B_PAGE_SIZE + TLS_SIZE
|
2008-06-24 07:37:07 +04:00
|
|
|
+ sizeof(struct user_space_program_args) + teamArgs->flat_args_size);
|
2004-10-07 19:34:17 +04:00
|
|
|
t->user_stack_base = USER_STACK_REGION + USER_STACK_REGION_SIZE - sizeLeft;
|
2008-08-05 21:19:46 +04:00
|
|
|
t->user_stack_size = USER_MAIN_THREAD_STACK_SIZE
|
|
|
|
+ USER_STACK_GUARD_PAGES * B_PAGE_SIZE;
|
2004-11-08 16:53:34 +03:00
|
|
|
// the exact location at the end of the user stack area
|
2003-01-07 12:40:59 +03:00
|
|
|
|
2004-06-09 01:35:14 +04:00
|
|
|
sprintf(ustack_name, "%s_main_stack", team->name);
|
2008-07-17 02:55:17 +04:00
|
|
|
t->user_stack_area = create_area_etc(team->id, ustack_name,
|
|
|
|
(void **)&t->user_stack_base, B_EXACT_ADDRESS, sizeLeft, B_NO_LOCK,
|
2009-07-30 01:30:35 +04:00
|
|
|
B_READ_AREA | B_WRITE_AREA | B_STACK_AREA, 0, 0);
|
2004-11-08 16:53:34 +03:00
|
|
|
if (t->user_stack_area < 0) {
|
2008-10-02 01:58:59 +04:00
|
|
|
dprintf("team_create_thread_start: could not create default user stack "
|
|
|
|
"region: %s\n", strerror(t->user_stack_area));
|
2007-02-13 09:42:58 +03:00
|
|
|
|
2005-10-05 19:32:48 +04:00
|
|
|
free_team_arg(teamArgs);
|
2004-11-08 16:53:34 +03:00
|
|
|
return t->user_stack_area;
|
2002-08-04 03:39:50 +04:00
|
|
|
}
|
|
|
|
|
2003-01-07 12:40:59 +03:00
|
|
|
// now that the TLS area is allocated, initialize TLS
|
|
|
|
arch_thread_init_tls(t);
|
|
|
|
|
2004-10-07 19:34:17 +04:00
|
|
|
argCount = teamArgs->arg_count;
|
|
|
|
envCount = teamArgs->env_count;
|
|
|
|
|
2007-07-27 06:32:19 +04:00
|
|
|
programArgs = (struct user_space_program_args *)(t->user_stack_base
|
2008-06-24 07:37:07 +04:00
|
|
|
+ t->user_stack_size + TLS_SIZE);
|
2002-08-04 03:39:50 +04:00
|
|
|
|
2008-06-24 07:37:07 +04:00
|
|
|
userArgs = (char**)(programArgs + 1);
|
|
|
|
userEnv = userArgs + argCount + 1;
|
|
|
|
path = teamArgs->path;
|
2002-08-04 03:39:50 +04:00
|
|
|
|
2008-06-24 07:37:07 +04:00
|
|
|
if (user_strlcpy(programArgs->program_path, path,
|
2007-07-27 06:32:19 +04:00
|
|
|
sizeof(programArgs->program_path)) < B_OK
|
|
|
|
|| user_memcpy(&programArgs->arg_count, &argCount, sizeof(int32)) < B_OK
|
|
|
|
|| user_memcpy(&programArgs->args, &userArgs, sizeof(char **)) < B_OK
|
|
|
|
|| user_memcpy(&programArgs->env_count, &envCount, sizeof(int32)) < B_OK
|
|
|
|
|| user_memcpy(&programArgs->env, &userEnv, sizeof(char **)) < B_OK
|
|
|
|
|| user_memcpy(&programArgs->error_port, &teamArgs->error_port,
|
|
|
|
sizeof(port_id)) < B_OK
|
|
|
|
|| user_memcpy(&programArgs->error_token, &teamArgs->error_token,
|
2008-06-24 07:37:07 +04:00
|
|
|
sizeof(uint32)) < B_OK
|
|
|
|
|| user_memcpy(userArgs, teamArgs->flat_args,
|
|
|
|
teamArgs->flat_args_size) < B_OK) {
|
2007-07-27 06:32:19 +04:00
|
|
|
// the team deletion process will clean this mess
|
|
|
|
return B_BAD_ADDRESS;
|
|
|
|
}
|
2002-08-04 03:39:50 +04:00
|
|
|
|
2004-10-14 22:07:04 +04:00
|
|
|
TRACE(("team_create_thread_start: loading elf binary '%s'\n", path));
|
2002-08-04 03:39:50 +04:00
|
|
|
|
2006-12-18 15:56:27 +03:00
|
|
|
// add args to info member
|
2007-02-19 09:57:38 +03:00
|
|
|
team->args[0] = 0;
|
2007-02-20 23:27:23 +03:00
|
|
|
strlcpy(team->args, path, sizeof(team->args));
|
2007-02-19 09:57:38 +03:00
|
|
|
for (i = 1; i < argCount; i++) {
|
|
|
|
strlcat(team->args, " ", sizeof(team->args));
|
2008-06-24 07:37:07 +04:00
|
|
|
strlcat(team->args, teamArgs->flat_args[i], sizeof(team->args));
|
2006-12-18 15:56:27 +03:00
|
|
|
}
|
2006-05-30 04:21:22 +04:00
|
|
|
|
2004-10-14 22:07:04 +04:00
|
|
|
free_team_arg(teamArgs);
|
2008-05-15 15:55:09 +04:00
|
|
|
// the arguments are already on the user stack, we no longer need
|
|
|
|
// them in this form
|
2002-08-04 03:39:50 +04:00
|
|
|
|
2008-07-09 07:58:38 +04:00
|
|
|
// NOTE: Normally arch_thread_enter_userspace() never returns, that is
|
|
|
|
// automatic variables with function scope will never be destroyed.
|
|
|
|
{
|
|
|
|
// find runtime_loader path
|
|
|
|
KPath runtimeLoaderPath;
|
|
|
|
err = find_directory(B_BEOS_SYSTEM_DIRECTORY, gBootDevice, false,
|
|
|
|
runtimeLoaderPath.LockBuffer(), runtimeLoaderPath.BufferSize());
|
|
|
|
if (err < B_OK) {
|
|
|
|
TRACE(("team_create_thread_start: find_directory() failed: %s\n",
|
2008-07-17 02:55:17 +04:00
|
|
|
strerror(err)));
|
2008-07-09 07:58:38 +04:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
runtimeLoaderPath.UnlockBuffer();
|
|
|
|
err = runtimeLoaderPath.Append("runtime_loader");
|
|
|
|
|
|
|
|
if (err == B_OK)
|
|
|
|
err = elf_load_user_image(runtimeLoaderPath.Path(), team, 0, &entry);
|
2008-05-15 15:55:09 +04:00
|
|
|
}
|
|
|
|
|
2005-03-09 04:43:56 +03:00
|
|
|
if (err < B_OK) {
|
2004-06-10 05:43:16 +04:00
|
|
|
// Luckily, we don't have to clean up the mess we created - that's
|
|
|
|
// done for us by the normal team deletion process
|
2008-05-15 15:55:09 +04:00
|
|
|
TRACE(("team_create_thread_start: elf_load_user_image() failed: "
|
2008-07-17 02:55:17 +04:00
|
|
|
"%s\n", strerror(err)));
|
2004-06-10 05:43:16 +04:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2007-10-11 12:30:18 +04:00
|
|
|
TRACE(("team_create_thread_start: loaded elf. entry = %#lx\n", entry));
|
2002-08-04 03:39:50 +04:00
|
|
|
|
2003-01-07 12:40:59 +03:00
|
|
|
team->state = TEAM_STATE_NORMAL;
|
2002-08-04 03:39:50 +04:00
|
|
|
|
|
|
|
// jump to the entry point in user space
|
2007-07-27 06:32:19 +04:00
|
|
|
return arch_thread_enter_userspace(t, entry, programArgs, NULL);
|
2007-01-12 21:26:32 +03:00
|
|
|
// only returns in case of error
|
2002-08-04 03:39:50 +04:00
|
|
|
}
|
|
|
|
|
2002-08-05 09:26:52 +04:00
|
|
|
|
2004-10-14 22:07:04 +04:00
|
|
|
static thread_id
|
2009-03-02 03:26:22 +03:00
|
|
|
load_image_internal(char**& _flatArgs, size_t flatArgsSize, int32 argCount,
|
|
|
|
int32 envCount, int32 priority, team_id parentID, uint32 flags,
|
|
|
|
port_id errorPort, uint32 errorToken)
|
2002-08-04 03:39:50 +04:00
|
|
|
{
|
2008-06-24 07:37:07 +04:00
|
|
|
char** flatArgs = _flatArgs;
|
2009-03-02 03:26:22 +03:00
|
|
|
struct team *team;
|
2004-06-11 05:36:29 +04:00
|
|
|
const char *threadName;
|
2004-10-14 22:07:04 +04:00
|
|
|
thread_id thread;
|
2005-10-05 19:32:48 +04:00
|
|
|
status_t status;
|
2004-06-11 05:36:29 +04:00
|
|
|
cpu_status state;
|
2003-01-12 19:29:28 +03:00
|
|
|
struct team_arg *teamArgs;
|
2005-03-12 18:13:51 +03:00
|
|
|
struct team_loading_info loadingInfo;
|
2009-03-02 03:26:22 +03:00
|
|
|
io_context* parentIOContext = NULL;
|
2002-08-04 03:39:50 +04:00
|
|
|
|
2008-06-24 07:37:07 +04:00
|
|
|
if (flatArgs == NULL || argCount == 0)
|
2004-10-14 22:07:04 +04:00
|
|
|
return B_BAD_VALUE;
|
2002-08-04 03:39:50 +04:00
|
|
|
|
2008-06-24 07:37:07 +04:00
|
|
|
const char* path = flatArgs[0];
|
|
|
|
|
2009-03-02 03:26:22 +03:00
|
|
|
TRACE(("load_image_internal: name '%s', args = %p, argCount = %ld\n",
|
2008-06-24 07:37:07 +04:00
|
|
|
path, flatArgs, argCount));
|
2004-10-14 22:07:04 +04:00
|
|
|
|
2008-06-24 07:37:07 +04:00
|
|
|
team = create_team_struct(path, false);
|
2003-01-07 12:40:59 +03:00
|
|
|
if (team == NULL)
|
2004-03-16 05:46:28 +03:00
|
|
|
return B_NO_MEMORY;
|
2002-08-04 03:39:50 +04:00
|
|
|
|
2005-03-12 18:13:51 +03:00
|
|
|
if (flags & B_WAIT_TILL_LOADED) {
|
|
|
|
loadingInfo.thread = thread_get_current_thread();
|
|
|
|
loadingInfo.result = B_ERROR;
|
|
|
|
loadingInfo.done = false;
|
|
|
|
team->loading_info = &loadingInfo;
|
|
|
|
}
|
|
|
|
|
2009-03-02 03:26:22 +03:00
|
|
|
InterruptsSpinLocker teamLocker(gTeamSpinlock);
|
2008-03-11 20:12:02 +03:00
|
|
|
|
2009-03-02 03:26:22 +03:00
|
|
|
// get the parent team
|
|
|
|
struct team* parent;
|
|
|
|
|
|
|
|
if (parentID == B_CURRENT_TEAM)
|
|
|
|
parent = thread_get_current_thread()->team;
|
|
|
|
else
|
|
|
|
parent = team_get_team_struct_locked(parentID);
|
|
|
|
|
|
|
|
if (parent == NULL) {
|
|
|
|
teamLocker.Unlock();
|
|
|
|
status = B_BAD_TEAM_ID;
|
|
|
|
goto err0;
|
|
|
|
}
|
|
|
|
|
|
|
|
// inherit the parent's user/group
|
|
|
|
inherit_parent_user_and_group_locked(team, parent);
|
2004-03-16 05:46:28 +03:00
|
|
|
|
2006-08-25 02:29:54 +04:00
|
|
|
hash_insert(sTeamHash, team);
|
2004-03-16 05:46:28 +03:00
|
|
|
insert_team_into_parent(parent, team);
|
2004-10-14 18:46:12 +04:00
|
|
|
insert_team_into_group(parent->group, team);
|
2004-12-01 00:11:37 +03:00
|
|
|
sUsedTeams++;
|
2004-03-16 05:46:28 +03:00
|
|
|
|
2009-03-02 03:26:22 +03:00
|
|
|
// get a reference to the parent's I/O context -- we need it to create ours
|
|
|
|
parentIOContext = parent->io_context;
|
|
|
|
vfs_get_io_context(parentIOContext);
|
|
|
|
|
|
|
|
teamLocker.Unlock();
|
|
|
|
|
|
|
|
// check the executable's set-user/group-id permission
|
|
|
|
update_set_id_user_and_group(team, path);
|
2002-08-04 03:39:50 +04:00
|
|
|
|
2008-06-24 07:37:07 +04:00
|
|
|
status = create_team_arg(&teamArgs, path, flatArgs, flatArgsSize, argCount,
|
|
|
|
envCount, errorPort, errorToken);
|
|
|
|
|
2005-10-05 19:32:48 +04:00
|
|
|
if (status != B_OK)
|
2002-08-04 03:39:50 +04:00
|
|
|
goto err1;
|
|
|
|
|
2008-06-24 07:37:07 +04:00
|
|
|
_flatArgs = NULL;
|
|
|
|
// args are owned by the team_arg structure now
|
|
|
|
|
2002-12-03 17:17:53 +03:00
|
|
|
// create a new io_context for this team
|
2009-11-25 19:16:22 +03:00
|
|
|
team->io_context = vfs_new_io_context(parentIOContext, true);
|
2003-01-07 12:40:59 +03:00
|
|
|
if (!team->io_context) {
|
2005-10-05 19:32:48 +04:00
|
|
|
status = B_NO_MEMORY;
|
2004-10-07 19:34:17 +04:00
|
|
|
goto err2;
|
2002-08-04 03:39:50 +04:00
|
|
|
}
|
|
|
|
|
2009-03-02 03:26:22 +03:00
|
|
|
// We don't need the parent's I/O context any longer.
|
|
|
|
vfs_put_io_context(parentIOContext);
|
|
|
|
parentIOContext = NULL;
|
|
|
|
|
2008-01-13 02:48:52 +03:00
|
|
|
// remove any fds that have the CLOEXEC flag set (emulating BeOS behaviour)
|
|
|
|
vfs_exec_io_context(team->io_context);
|
|
|
|
|
2002-08-04 03:39:50 +04:00
|
|
|
// create an address space for this team
|
2005-12-20 16:29:11 +03:00
|
|
|
status = vm_create_address_space(team->id, USER_BASE, USER_SIZE, false,
|
|
|
|
&team->address_space);
|
2005-10-05 19:32:48 +04:00
|
|
|
if (status < B_OK)
|
2004-10-07 19:34:17 +04:00
|
|
|
goto err3;
|
2002-08-04 03:39:50 +04:00
|
|
|
|
2004-06-11 05:36:29 +04:00
|
|
|
// cut the path from the main thread name
|
2008-06-24 07:37:07 +04:00
|
|
|
threadName = strrchr(path, '/');
|
2004-06-11 05:36:29 +04:00
|
|
|
if (threadName != NULL)
|
|
|
|
threadName++;
|
|
|
|
else
|
2008-06-24 07:37:07 +04:00
|
|
|
threadName = path;
|
2004-06-11 05:36:29 +04:00
|
|
|
|
2008-05-11 20:25:35 +04:00
|
|
|
// create the user data area
|
|
|
|
status = create_team_user_data(team);
|
|
|
|
if (status != B_OK)
|
|
|
|
goto err4;
|
|
|
|
|
2009-04-12 01:45:25 +04:00
|
|
|
// notify team listeners
|
|
|
|
sNotificationService.Notify(TEAM_ADDED, team);
|
|
|
|
|
2005-10-05 19:32:48 +04:00
|
|
|
// Create a kernel thread, but under the context of the new team
|
|
|
|
// The new thread will take over ownership of teamArgs
|
2007-07-27 06:32:19 +04:00
|
|
|
thread = spawn_kernel_thread_etc(team_create_thread_start, threadName,
|
|
|
|
B_NORMAL_PRIORITY, teamArgs, team->id, team->id);
|
2004-10-14 22:07:04 +04:00
|
|
|
if (thread < 0) {
|
2005-10-05 19:32:48 +04:00
|
|
|
status = thread;
|
2008-05-11 20:25:35 +04:00
|
|
|
goto err5;
|
2002-08-04 03:39:50 +04:00
|
|
|
}
|
|
|
|
|
2005-03-12 18:13:51 +03:00
|
|
|
// wait for the loader of the new team to finish its work
|
|
|
|
if (flags & B_WAIT_TILL_LOADED) {
|
|
|
|
struct thread *mainThread;
|
|
|
|
|
|
|
|
state = disable_interrupts();
|
|
|
|
GRAB_THREAD_LOCK();
|
|
|
|
|
|
|
|
mainThread = thread_get_thread_struct_locked(thread);
|
|
|
|
if (mainThread) {
|
|
|
|
// resume the team's main thread
|
2008-01-25 18:55:54 +03:00
|
|
|
if (mainThread->state == B_THREAD_SUSPENDED)
|
2005-03-12 18:13:51 +03:00
|
|
|
scheduler_enqueue_in_run_queue(mainThread);
|
|
|
|
|
|
|
|
// Now suspend ourselves until loading is finished.
|
2005-08-05 02:45:04 +04:00
|
|
|
// We will be woken either by the thread, when it finished or
|
2005-03-12 18:13:51 +03:00
|
|
|
// aborted loading, or when the team is going to die (e.g. is
|
|
|
|
// killed). In either case the one setting `loadingInfo.done' is
|
|
|
|
// responsible for removing the info from the team structure.
|
|
|
|
while (!loadingInfo.done) {
|
|
|
|
thread_get_current_thread()->next_state = B_THREAD_SUSPENDED;
|
|
|
|
scheduler_reschedule();
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Impressive! Someone managed to kill the thread in this short
|
|
|
|
// time.
|
|
|
|
}
|
|
|
|
|
|
|
|
RELEASE_THREAD_LOCK();
|
|
|
|
restore_interrupts(state);
|
|
|
|
|
|
|
|
if (loadingInfo.result < B_OK)
|
|
|
|
return loadingInfo.result;
|
2008-07-17 02:55:17 +04:00
|
|
|
}
|
2005-03-12 18:13:51 +03:00
|
|
|
|
2005-02-24 19:11:25 +03:00
|
|
|
// notify the debugger
|
|
|
|
user_debug_team_created(team->id);
|
|
|
|
|
2004-10-14 22:07:04 +04:00
|
|
|
return thread;
|
2002-08-04 03:39:50 +04:00
|
|
|
|
2008-05-11 20:25:35 +04:00
|
|
|
err5:
|
2009-08-22 07:07:11 +04:00
|
|
|
sNotificationService.Notify(TEAM_REMOVED, team);
|
2008-05-11 20:25:35 +04:00
|
|
|
delete_team_user_data(team);
|
2004-10-07 19:34:17 +04:00
|
|
|
err4:
|
2005-12-20 16:29:11 +03:00
|
|
|
vm_put_address_space(team->address_space);
|
2002-08-04 03:39:50 +04:00
|
|
|
err3:
|
2009-03-02 03:26:22 +03:00
|
|
|
vfs_put_io_context(team->io_context);
|
2002-08-04 03:39:50 +04:00
|
|
|
err2:
|
2004-10-07 19:34:17 +04:00
|
|
|
free_team_arg(teamArgs);
|
2002-08-04 03:39:50 +04:00
|
|
|
err1:
|
2009-03-02 03:26:22 +03:00
|
|
|
if (parentIOContext != NULL)
|
|
|
|
vfs_put_io_context(parentIOContext);
|
|
|
|
|
2002-08-04 03:39:50 +04:00
|
|
|
// remove the team structure from the team hash table and delete the team structure
|
|
|
|
state = disable_interrupts();
|
|
|
|
GRAB_TEAM_LOCK();
|
2004-03-16 05:46:28 +03:00
|
|
|
|
2008-03-09 20:56:27 +03:00
|
|
|
remove_team_from_group(team);
|
2009-03-02 03:26:22 +03:00
|
|
|
remove_team_from_parent(team->parent, team);
|
2006-08-25 02:29:54 +04:00
|
|
|
hash_remove(sTeamHash, team);
|
2004-03-16 05:46:28 +03:00
|
|
|
|
2002-08-04 03:39:50 +04:00
|
|
|
RELEASE_TEAM_LOCK();
|
|
|
|
restore_interrupts(state);
|
2004-10-14 18:46:12 +04:00
|
|
|
|
2009-03-02 03:26:22 +03:00
|
|
|
err0:
|
2003-01-07 12:40:59 +03:00
|
|
|
delete_team_struct(team);
|
2004-10-07 19:34:17 +04:00
|
|
|
|
2005-10-05 19:32:48 +04:00
|
|
|
return status;
|
2002-08-04 03:39:50 +04:00
|
|
|
}
|
|
|
|
|
2002-08-05 09:26:52 +04:00
|
|
|
|
2007-10-11 12:30:18 +04:00
|
|
|
/*! Almost shuts down the current team and loads a new image into it.
|
|
|
|
If successful, this function does not return and will takeover ownership of
|
|
|
|
the arguments provided.
|
|
|
|
This function may only be called from user space.
|
|
|
|
*/
|
2004-10-07 19:34:17 +04:00
|
|
|
static status_t
|
2008-06-24 07:37:07 +04:00
|
|
|
exec_team(const char *path, char**& _flatArgs, size_t flatArgsSize,
|
|
|
|
int32 argCount, int32 envCount)
|
2004-10-07 19:34:17 +04:00
|
|
|
{
|
2008-07-09 07:58:38 +04:00
|
|
|
// NOTE: Since this function normally doesn't return, don't use automatic
|
|
|
|
// variables that need destruction in the function scope.
|
2008-06-24 07:37:07 +04:00
|
|
|
char** flatArgs = _flatArgs;
|
2004-10-07 19:34:17 +04:00
|
|
|
struct team *team = thread_get_current_thread()->team;
|
|
|
|
struct team_arg *teamArgs;
|
2004-11-21 01:05:50 +03:00
|
|
|
const char *threadName;
|
2005-03-25 21:28:24 +03:00
|
|
|
status_t status = B_OK;
|
|
|
|
cpu_status state;
|
|
|
|
struct thread *thread;
|
|
|
|
thread_id nubThreadID = -1;
|
2004-10-07 19:34:17 +04:00
|
|
|
|
2007-10-11 12:30:18 +04:00
|
|
|
TRACE(("exec_team(path = \"%s\", argc = %ld, envCount = %ld): team %ld\n",
|
2008-06-24 07:37:07 +04:00
|
|
|
path, argCount, envCount, team->id));
|
|
|
|
|
|
|
|
T(ExecTeam(path, argCount, flatArgs, envCount, flatArgs + argCount + 1));
|
2004-10-07 19:34:17 +04:00
|
|
|
|
|
|
|
// switching the kernel at run time is probably not a good idea :)
|
|
|
|
if (team == team_get_kernel_team())
|
|
|
|
return B_NOT_ALLOWED;
|
|
|
|
|
|
|
|
// we currently need to be single threaded here
|
|
|
|
// ToDo: maybe we should just kill all other threads and
|
|
|
|
// make the current thread the team's main thread?
|
2005-03-25 21:28:24 +03:00
|
|
|
if (team->main_thread != thread_get_current_thread())
|
2004-10-07 19:34:17 +04:00
|
|
|
return B_NOT_ALLOWED;
|
|
|
|
|
2005-03-25 21:28:24 +03:00
|
|
|
// The debug nub thread, a pure kernel thread, is allowed to survive.
|
|
|
|
// We iterate through the thread list to make sure that there's no other
|
|
|
|
// thread.
|
|
|
|
state = disable_interrupts();
|
|
|
|
GRAB_TEAM_LOCK();
|
|
|
|
GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info);
|
|
|
|
|
|
|
|
if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)
|
|
|
|
nubThreadID = team->debug_info.nub_thread;
|
|
|
|
|
|
|
|
RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
|
|
|
|
|
|
|
|
for (thread = team->thread_list; thread; thread = thread->team_next) {
|
|
|
|
if (thread != team->main_thread && thread->id != nubThreadID) {
|
|
|
|
status = B_NOT_ALLOWED;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
RELEASE_TEAM_LOCK();
|
|
|
|
restore_interrupts(state);
|
|
|
|
|
|
|
|
if (status != B_OK)
|
|
|
|
return status;
|
|
|
|
|
2008-06-24 07:37:07 +04:00
|
|
|
status = create_team_arg(&teamArgs, path, flatArgs, flatArgsSize, argCount,
|
|
|
|
envCount, -1, 0);
|
|
|
|
|
2005-10-05 19:32:48 +04:00
|
|
|
if (status != B_OK)
|
|
|
|
return status;
|
|
|
|
|
2008-06-24 07:37:07 +04:00
|
|
|
_flatArgs = NULL;
|
|
|
|
// args are owned by the team_arg structure now
|
2004-10-07 19:34:17 +04:00
|
|
|
|
|
|
|
// ToDo: remove team resources if there are any left
|
2004-10-12 08:03:52 +04:00
|
|
|
// thread_atkernel_exit() might not be called at all
|
2004-10-07 19:34:17 +04:00
|
|
|
|
2007-08-16 22:01:47 +04:00
|
|
|
thread_reset_for_exec();
|
|
|
|
|
2005-03-25 21:28:24 +03:00
|
|
|
user_debug_prepare_for_exec();
|
|
|
|
|
2008-05-11 20:25:35 +04:00
|
|
|
delete_team_user_data(team);
|
2005-12-20 16:29:11 +03:00
|
|
|
vm_delete_areas(team->address_space);
|
2008-08-16 22:20:54 +04:00
|
|
|
xsi_sem_undo(team);
|
2009-10-26 16:34:43 +03:00
|
|
|
delete_owned_ports(team);
|
2009-10-23 06:06:51 +04:00
|
|
|
sem_delete_owned_sems(team);
|
2004-10-07 19:34:17 +04:00
|
|
|
remove_images(team);
|
|
|
|
vfs_exec_io_context(team->io_context);
|
2008-05-06 07:39:36 +04:00
|
|
|
delete_realtime_sem_context(team->realtime_sem_context);
|
|
|
|
team->realtime_sem_context = NULL;
|
2004-10-07 19:34:17 +04:00
|
|
|
|
2008-05-11 20:25:35 +04:00
|
|
|
status = create_team_user_data(team);
|
|
|
|
if (status != B_OK) {
|
|
|
|
// creating the user data failed -- we're toast
|
|
|
|
// TODO: We should better keep the old user area in the first place.
|
|
|
|
exit_thread(status);
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2005-03-25 21:28:24 +03:00
|
|
|
user_debug_finish_after_exec();
|
|
|
|
|
2004-11-21 01:31:28 +03:00
|
|
|
// rename the team
|
|
|
|
|
2008-01-26 13:36:20 +03:00
|
|
|
set_team_name(team, path);
|
2004-11-21 01:31:28 +03:00
|
|
|
|
|
|
|
// cut the path from the team name and rename the main thread, too
|
2005-10-05 19:32:48 +04:00
|
|
|
threadName = strrchr(path, '/');
|
2004-11-21 01:05:50 +03:00
|
|
|
if (threadName != NULL)
|
|
|
|
threadName++;
|
|
|
|
else
|
2005-10-05 19:32:48 +04:00
|
|
|
threadName = path;
|
2004-11-21 01:05:50 +03:00
|
|
|
rename_thread(thread_get_current_thread_id(), threadName);
|
|
|
|
|
2008-02-21 03:46:22 +03:00
|
|
|
atomic_or(&team->flags, TEAM_FLAG_EXEC_DONE);
|
|
|
|
|
2008-03-11 20:12:02 +03:00
|
|
|
// Update user/group according to the executable's set-user/group-id
|
|
|
|
// permission.
|
|
|
|
update_set_id_user_and_group(team, path);
|
|
|
|
|
2008-09-20 17:59:41 +04:00
|
|
|
user_debug_team_exec();
|
|
|
|
|
2009-04-12 01:45:25 +04:00
|
|
|
// notify team listeners
|
|
|
|
sNotificationService.Notify(TEAM_EXEC, team);
|
|
|
|
|
2004-10-12 08:03:52 +04:00
|
|
|
status = team_create_thread_start(teamArgs);
|
2004-10-07 19:34:17 +04:00
|
|
|
// this one usually doesn't return...
|
|
|
|
|
2005-10-05 19:32:48 +04:00
|
|
|
// sorry, we have to kill us, there is no way out anymore
|
|
|
|
// (without any areas left and all that)
|
2004-10-07 19:34:17 +04:00
|
|
|
exit_thread(status);
|
2005-10-05 19:32:48 +04:00
|
|
|
|
|
|
|
// we return a status here since the signal that is sent by the
|
|
|
|
// call above is not immediately handled
|
2004-10-07 21:17:04 +04:00
|
|
|
return B_ERROR;
|
2004-10-07 19:34:17 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-10-11 12:30:18 +04:00
|
|
|
/*! This is the first function to be called from the newly created
|
|
|
|
main child thread.
|
|
|
|
It will fill in everything what's left to do from fork_arg, and
|
|
|
|
return from the parent's fork() syscall to the child.
|
|
|
|
*/
|
2004-10-12 08:03:52 +04:00
|
|
|
static int32
|
|
|
|
fork_team_thread_start(void *_args)
|
|
|
|
{
|
|
|
|
struct thread *thread = thread_get_current_thread();
|
|
|
|
struct fork_arg *forkArgs = (struct fork_arg *)_args;
|
|
|
|
|
|
|
|
struct arch_fork_arg archArgs = forkArgs->arch_info;
|
|
|
|
// we need a local copy of the arch dependent part
|
|
|
|
|
2004-11-08 16:53:34 +03:00
|
|
|
thread->user_stack_area = forkArgs->user_stack_area;
|
2004-10-12 08:03:52 +04:00
|
|
|
thread->user_stack_base = forkArgs->user_stack_base;
|
|
|
|
thread->user_stack_size = forkArgs->user_stack_size;
|
|
|
|
thread->user_local_storage = forkArgs->user_local_storage;
|
2006-08-18 12:26:37 +04:00
|
|
|
thread->sig_block_mask = forkArgs->sig_block_mask;
|
2008-05-11 20:25:35 +04:00
|
|
|
thread->user_thread = forkArgs->user_thread;
|
2009-05-21 03:10:13 +04:00
|
|
|
memcpy(thread->sig_action, forkArgs->sig_action,
|
|
|
|
sizeof(forkArgs->sig_action));
|
|
|
|
thread->signal_stack_base = forkArgs->signal_stack_base;
|
|
|
|
thread->signal_stack_size = forkArgs->signal_stack_size;
|
|
|
|
thread->signal_stack_enabled = forkArgs->signal_stack_enabled;
|
2004-10-12 08:03:52 +04:00
|
|
|
|
|
|
|
arch_thread_init_tls(thread);
|
|
|
|
|
|
|
|
free(forkArgs);
|
|
|
|
|
|
|
|
// set frame of the parent thread to this one, too
|
|
|
|
|
|
|
|
arch_restore_fork_frame(&archArgs);
|
|
|
|
// This one won't return here
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2004-10-10 21:30:42 +04:00
|
|
|
static thread_id
|
|
|
|
fork_team(void)
|
|
|
|
{
|
2004-10-12 08:03:52 +04:00
|
|
|
struct thread *parentThread = thread_get_current_thread();
|
2008-01-17 04:59:17 +03:00
|
|
|
struct team *parentTeam = parentThread->team, *team;
|
2004-10-12 08:03:52 +04:00
|
|
|
struct fork_arg *forkArgs;
|
2004-10-10 21:30:42 +04:00
|
|
|
struct area_info info;
|
2004-10-12 08:03:52 +04:00
|
|
|
thread_id threadID;
|
2004-10-10 21:30:42 +04:00
|
|
|
status_t status;
|
|
|
|
int32 cookie;
|
|
|
|
|
2007-10-11 12:30:18 +04:00
|
|
|
TRACE(("fork_team(): team %ld\n", parentTeam->id));
|
2004-10-10 21:30:42 +04:00
|
|
|
|
2004-10-12 08:03:52 +04:00
|
|
|
if (parentTeam == team_get_kernel_team())
|
2004-10-10 21:30:42 +04:00
|
|
|
return B_NOT_ALLOWED;
|
|
|
|
|
|
|
|
// create a new team
|
2009-03-02 03:26:22 +03:00
|
|
|
// TODO: this is very similar to load_image_internal() - maybe we can do
|
|
|
|
// something about it :)
|
2004-10-10 21:30:42 +04:00
|
|
|
|
2004-10-12 08:03:52 +04:00
|
|
|
team = create_team_struct(parentTeam->name, false);
|
2004-10-10 21:30:42 +04:00
|
|
|
if (team == NULL)
|
|
|
|
return B_NO_MEMORY;
|
|
|
|
|
2007-01-15 02:22:49 +03:00
|
|
|
strlcpy(team->args, parentTeam->args, sizeof(team->args));
|
|
|
|
|
2009-08-22 07:07:11 +04:00
|
|
|
InterruptsSpinLocker teamLocker(gTeamSpinlock);
|
2008-03-11 20:12:02 +03:00
|
|
|
|
2009-08-22 07:07:11 +04:00
|
|
|
// Inherit the parent's user/group.
|
|
|
|
inherit_parent_user_and_group_locked(team, parentTeam);
|
2004-10-10 21:30:42 +04:00
|
|
|
|
2006-08-25 02:29:54 +04:00
|
|
|
hash_insert(sTeamHash, team);
|
2004-10-12 08:03:52 +04:00
|
|
|
insert_team_into_parent(parentTeam, team);
|
2004-10-15 20:14:51 +04:00
|
|
|
insert_team_into_group(parentTeam->group, team);
|
2004-12-01 00:11:37 +03:00
|
|
|
sUsedTeams++;
|
2004-10-10 21:30:42 +04:00
|
|
|
|
2009-08-22 07:07:11 +04:00
|
|
|
teamLocker.Unlock();
|
2004-10-10 21:30:42 +04:00
|
|
|
|
2009-09-28 06:54:38 +04:00
|
|
|
// inherit some team debug flags
|
|
|
|
team->debug_info.flags |= atomic_get(&parentTeam->debug_info.flags)
|
|
|
|
& B_TEAM_DEBUG_INHERITED_FLAGS;
|
|
|
|
|
2004-10-12 08:03:52 +04:00
|
|
|
forkArgs = (struct fork_arg *)malloc(sizeof(struct fork_arg));
|
|
|
|
if (forkArgs == NULL) {
|
|
|
|
status = B_NO_MEMORY;
|
|
|
|
goto err1;
|
|
|
|
}
|
|
|
|
|
2004-10-10 21:30:42 +04:00
|
|
|
// create a new io_context for this team
|
2009-11-25 19:16:22 +03:00
|
|
|
team->io_context = vfs_new_io_context(parentTeam->io_context, false);
|
2004-10-10 21:30:42 +04:00
|
|
|
if (!team->io_context) {
|
|
|
|
status = B_NO_MEMORY;
|
|
|
|
goto err2;
|
|
|
|
}
|
|
|
|
|
2008-05-06 07:39:36 +04:00
|
|
|
// duplicate the realtime sem context
|
|
|
|
if (parentTeam->realtime_sem_context) {
|
|
|
|
team->realtime_sem_context = clone_realtime_sem_context(
|
|
|
|
parentTeam->realtime_sem_context);
|
|
|
|
if (team->realtime_sem_context == NULL) {
|
|
|
|
status = B_NO_MEMORY;
|
|
|
|
goto err25;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2004-10-10 21:30:42 +04:00
|
|
|
// create an address space for this team
|
2005-12-20 16:29:11 +03:00
|
|
|
status = vm_create_address_space(team->id, USER_BASE, USER_SIZE, false,
|
|
|
|
&team->address_space);
|
2004-10-10 21:30:42 +04:00
|
|
|
if (status < B_OK)
|
|
|
|
goto err3;
|
|
|
|
|
|
|
|
// copy all areas of the team
|
|
|
|
// ToDo: should be able to handle stack areas differently (ie. don't have them copy-on-write)
|
|
|
|
// ToDo: all stacks of other threads than the current one could be left out
|
|
|
|
|
2008-05-11 20:25:35 +04:00
|
|
|
forkArgs->user_thread = NULL;
|
|
|
|
|
2004-10-10 21:30:42 +04:00
|
|
|
cookie = 0;
|
|
|
|
while (get_next_area_info(B_CURRENT_TEAM, &cookie, &info) == B_OK) {
|
2008-07-25 03:03:59 +04:00
|
|
|
if (info.area == parentTeam->user_data_area) {
|
|
|
|
// don't clone the user area; just create a new one
|
|
|
|
status = create_team_user_data(team);
|
|
|
|
if (status != B_OK)
|
|
|
|
break;
|
2004-10-12 08:03:52 +04:00
|
|
|
|
2008-05-11 20:25:35 +04:00
|
|
|
forkArgs->user_thread = team_allocate_user_thread(team);
|
2008-07-25 03:03:59 +04:00
|
|
|
} else {
|
|
|
|
void *address;
|
|
|
|
area_id area = vm_copy_area(team->address_space->id, info.name,
|
|
|
|
&address, B_CLONE_ADDRESS, info.protection, info.area);
|
|
|
|
if (area < B_OK) {
|
|
|
|
status = area;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (info.area == parentThread->user_stack_area)
|
|
|
|
forkArgs->user_stack_area = area;
|
2008-05-11 20:25:35 +04:00
|
|
|
}
|
2004-10-10 21:30:42 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
if (status < B_OK)
|
|
|
|
goto err4;
|
|
|
|
|
2008-05-11 20:25:35 +04:00
|
|
|
if (forkArgs->user_thread == NULL) {
|
|
|
|
#if KDEBUG
|
|
|
|
panic("user data area not found, parent area is %ld",
|
|
|
|
parentTeam->user_data_area);
|
|
|
|
#endif
|
|
|
|
status = B_ERROR;
|
|
|
|
goto err4;
|
|
|
|
}
|
|
|
|
|
2004-10-12 08:03:52 +04:00
|
|
|
forkArgs->user_stack_base = parentThread->user_stack_base;
|
|
|
|
forkArgs->user_stack_size = parentThread->user_stack_size;
|
|
|
|
forkArgs->user_local_storage = parentThread->user_local_storage;
|
2006-08-18 12:26:37 +04:00
|
|
|
forkArgs->sig_block_mask = parentThread->sig_block_mask;
|
2009-05-21 03:10:13 +04:00
|
|
|
memcpy(forkArgs->sig_action, parentThread->sig_action,
|
|
|
|
sizeof(forkArgs->sig_action));
|
|
|
|
forkArgs->signal_stack_base = parentThread->signal_stack_base;
|
|
|
|
forkArgs->signal_stack_size = parentThread->signal_stack_size;
|
|
|
|
forkArgs->signal_stack_enabled = parentThread->signal_stack_enabled;
|
|
|
|
|
2004-10-12 08:03:52 +04:00
|
|
|
arch_store_fork_frame(&forkArgs->arch_info);
|
|
|
|
|
2008-08-11 04:30:00 +04:00
|
|
|
// copy image list
|
|
|
|
image_info imageInfo;
|
|
|
|
cookie = 0;
|
|
|
|
while (get_next_image_info(parentTeam->id, &cookie, &imageInfo) == B_OK) {
|
|
|
|
image_id image = register_image(team, &imageInfo, sizeof(imageInfo));
|
|
|
|
if (image < 0)
|
|
|
|
goto err5;
|
|
|
|
}
|
2004-10-12 08:03:52 +04:00
|
|
|
|
2009-04-12 01:45:25 +04:00
|
|
|
// notify team listeners
|
|
|
|
sNotificationService.Notify(TEAM_ADDED, team);
|
|
|
|
|
2004-10-12 08:03:52 +04:00
|
|
|
// create a kernel thread under the context of the new team
|
2006-10-11 02:39:57 +04:00
|
|
|
threadID = spawn_kernel_thread_etc(fork_team_thread_start,
|
|
|
|
parentThread->name, parentThread->priority, forkArgs,
|
|
|
|
team->id, team->id);
|
2004-10-12 08:03:52 +04:00
|
|
|
if (threadID < 0) {
|
|
|
|
status = threadID;
|
2008-08-11 04:30:00 +04:00
|
|
|
goto err5;
|
2004-10-10 21:30:42 +04:00
|
|
|
}
|
|
|
|
|
2005-02-24 19:11:25 +03:00
|
|
|
// notify the debugger
|
|
|
|
user_debug_team_created(team->id);
|
|
|
|
|
2008-01-17 04:59:17 +03:00
|
|
|
T(TeamForked(threadID));
|
|
|
|
|
2004-10-12 08:03:52 +04:00
|
|
|
resume_thread(threadID);
|
|
|
|
return threadID;
|
2004-10-10 21:30:42 +04:00
|
|
|
|
2008-08-11 04:30:00 +04:00
|
|
|
err5:
|
2009-08-22 07:07:11 +04:00
|
|
|
sNotificationService.Notify(TEAM_REMOVED, team);
|
2008-08-11 04:30:00 +04:00
|
|
|
remove_images(team);
|
2004-10-10 21:30:42 +04:00
|
|
|
err4:
|
2005-12-20 16:29:11 +03:00
|
|
|
vm_delete_address_space(team->address_space);
|
2004-10-10 21:30:42 +04:00
|
|
|
err3:
|
2008-05-06 07:39:36 +04:00
|
|
|
delete_realtime_sem_context(team->realtime_sem_context);
|
|
|
|
err25:
|
2009-03-02 03:26:22 +03:00
|
|
|
vfs_put_io_context(team->io_context);
|
2004-10-10 21:30:42 +04:00
|
|
|
err2:
|
2004-10-12 08:03:52 +04:00
|
|
|
free(forkArgs);
|
|
|
|
err1:
|
2004-10-10 21:30:42 +04:00
|
|
|
// remove the team structure from the team hash table and delete the team structure
|
2009-08-22 07:07:11 +04:00
|
|
|
teamLocker.Lock();
|
2004-10-10 21:30:42 +04:00
|
|
|
|
2008-03-09 20:56:27 +03:00
|
|
|
remove_team_from_group(team);
|
2004-10-12 08:03:52 +04:00
|
|
|
remove_team_from_parent(parentTeam, team);
|
2006-08-25 02:29:54 +04:00
|
|
|
hash_remove(sTeamHash, team);
|
2004-10-10 21:30:42 +04:00
|
|
|
|
2009-08-22 07:07:11 +04:00
|
|
|
teamLocker.Unlock();
|
2004-10-15 20:14:51 +04:00
|
|
|
|
2004-10-10 21:30:42 +04:00
|
|
|
delete_team_struct(team);
|
|
|
|
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-10-11 12:30:18 +04:00
|
|
|
/*! Returns if the specified \a team has any children belonging to the
|
|
|
|
specified \a group.
|
|
|
|
Must be called with the team lock held.
|
|
|
|
*/
|
2007-01-29 18:33:31 +03:00
|
|
|
static bool
|
|
|
|
has_children_in_group(struct team *parent, pid_t groupID)
|
2004-10-14 19:42:56 +04:00
|
|
|
{
|
2007-01-29 18:33:31 +03:00
|
|
|
struct team *team;
|
2007-01-28 17:31:42 +03:00
|
|
|
|
2007-01-29 18:33:31 +03:00
|
|
|
struct process_group *group = team_get_process_group_locked(
|
|
|
|
parent->group->session, groupID);
|
|
|
|
if (group == NULL)
|
|
|
|
return false;
|
2004-10-14 19:42:56 +04:00
|
|
|
|
|
|
|
for (team = group->teams; team; team = team->group_next) {
|
2007-01-29 18:33:31 +03:00
|
|
|
if (team->parent == parent)
|
|
|
|
return true;
|
2004-10-14 19:42:56 +04:00
|
|
|
}
|
|
|
|
|
2007-01-29 18:33:31 +03:00
|
|
|
return false;
|
2004-10-14 19:42:56 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
static job_control_entry*
|
|
|
|
get_job_control_entry(team_job_control_children* children, pid_t id)
|
|
|
|
{
|
|
|
|
for (JobControlEntryList::Iterator it = children->entries.GetIterator();
|
|
|
|
job_control_entry* entry = it.Next();) {
|
|
|
|
|
|
|
|
if (id > 0) {
|
|
|
|
if (entry->thread == id)
|
|
|
|
return entry;
|
|
|
|
} else if (id == -1) {
|
|
|
|
return entry;
|
|
|
|
} else {
|
|
|
|
pid_t processGroup
|
|
|
|
= (entry->team ? entry->team->group_id : entry->group_id);
|
|
|
|
if (processGroup == -id)
|
|
|
|
return entry;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2004-09-02 05:41:06 +04:00
|
|
|
|
2007-09-04 01:35:24 +04:00
|
|
|
static job_control_entry*
|
|
|
|
get_job_control_entry(struct team* team, pid_t id, uint32 flags)
|
|
|
|
{
|
|
|
|
job_control_entry* entry = get_job_control_entry(team->dead_children, id);
|
|
|
|
|
|
|
|
if (entry == NULL && (flags & WCONTINUED) != 0)
|
|
|
|
entry = get_job_control_entry(team->continued_children, id);
|
|
|
|
|
|
|
|
if (entry == NULL && (flags & WUNTRACED) != 0)
|
|
|
|
entry = get_job_control_entry(team->stopped_children, id);
|
|
|
|
|
|
|
|
return entry;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-03-09 20:56:27 +03:00
|
|
|
job_control_entry::job_control_entry()
|
|
|
|
:
|
|
|
|
has_group_ref(false)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
job_control_entry::~job_control_entry()
|
|
|
|
{
|
|
|
|
if (has_group_ref) {
|
2008-08-02 18:55:53 +04:00
|
|
|
InterruptsSpinLocker locker(gTeamSpinlock);
|
2008-03-09 20:56:27 +03:00
|
|
|
release_process_group_ref(group_id);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*! Team and thread lock must be held.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
job_control_entry::InitDeadState()
|
|
|
|
{
|
|
|
|
if (team != NULL) {
|
|
|
|
struct thread* thread = team->main_thread;
|
|
|
|
group_id = team->group_id;
|
|
|
|
this->thread = thread->id;
|
|
|
|
status = thread->exit.status;
|
|
|
|
reason = thread->exit.reason;
|
|
|
|
signal = thread->exit.signal;
|
|
|
|
team = NULL;
|
|
|
|
acquire_process_group_ref(group_id);
|
|
|
|
has_group_ref = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
job_control_entry&
|
|
|
|
job_control_entry::operator=(const job_control_entry& other)
|
|
|
|
{
|
|
|
|
state = other.state;
|
|
|
|
thread = other.thread;
|
|
|
|
has_group_ref = false;
|
|
|
|
team = other.team;
|
|
|
|
group_id = other.group_id;
|
|
|
|
status = other.status;
|
|
|
|
reason = other.reason;
|
|
|
|
signal = other.signal;
|
|
|
|
|
|
|
|
return *this;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
/*! This is the kernel backend for waitpid(). It is a bit more powerful when it
|
|
|
|
comes to the reason why a thread has died than waitpid() can be.
|
|
|
|
*/
|
2004-09-15 19:45:37 +04:00
|
|
|
static thread_id
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
wait_for_child(pid_t child, uint32 flags, int32 *_reason,
|
|
|
|
status_t *_returnCode)
|
2004-09-02 05:41:06 +04:00
|
|
|
{
|
2007-09-04 01:35:24 +04:00
|
|
|
struct thread* thread = thread_get_current_thread();
|
|
|
|
struct team* team = thread->team;
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
struct job_control_entry foundEntry;
|
|
|
|
struct job_control_entry* freeDeathEntry = NULL;
|
2004-10-14 18:46:12 +04:00
|
|
|
status_t status = B_OK;
|
2004-09-02 05:41:06 +04:00
|
|
|
|
2004-10-14 18:46:12 +04:00
|
|
|
TRACE(("wait_for_child(child = %ld, flags = %ld)\n", child, flags));
|
2004-09-15 19:45:37 +04:00
|
|
|
|
2008-01-18 03:01:32 +03:00
|
|
|
T(WaitForChild(child, flags));
|
|
|
|
|
2007-01-29 18:33:31 +03:00
|
|
|
if (child == 0) {
|
|
|
|
// wait for all children in the process group of the calling team
|
|
|
|
child = -team->group_id;
|
2004-09-02 05:41:06 +04:00
|
|
|
}
|
2004-10-14 18:46:12 +04:00
|
|
|
|
2007-09-04 01:35:24 +04:00
|
|
|
bool ignoreFoundEntries = false;
|
|
|
|
bool ignoreFoundEntriesChecked = false;
|
|
|
|
|
2004-10-14 18:46:12 +04:00
|
|
|
while (true) {
|
2008-08-02 18:55:53 +04:00
|
|
|
InterruptsSpinLocker locker(gTeamSpinlock);
|
2006-08-18 01:40:55 +04:00
|
|
|
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
// check whether any condition holds
|
2007-09-04 01:35:24 +04:00
|
|
|
job_control_entry* entry = get_job_control_entry(team, child, flags);
|
2006-08-18 01:40:55 +04:00
|
|
|
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
// If we don't have an entry yet, check whether there are any children
|
|
|
|
// complying to the process group specification at all.
|
|
|
|
if (entry == NULL) {
|
|
|
|
// No success yet -- check whether there are any children we could
|
|
|
|
// wait for.
|
|
|
|
bool childrenExist = false;
|
|
|
|
if (child == -1) {
|
2007-01-29 18:33:31 +03:00
|
|
|
childrenExist = team->children != NULL;
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
} else if (child < -1) {
|
2007-01-29 18:33:31 +03:00
|
|
|
childrenExist = has_children_in_group(team, -child);
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
} else {
|
|
|
|
if (struct team* childTeam = team_get_team_struct_locked(child))
|
2007-08-30 04:57:12 +04:00
|
|
|
childrenExist = childTeam->parent == team;
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
}
|
2007-01-29 18:33:31 +03:00
|
|
|
|
|
|
|
if (!childrenExist) {
|
|
|
|
// there is no child we could wait for
|
2006-08-18 01:40:55 +04:00
|
|
|
status = ECHILD;
|
2006-08-21 01:27:12 +04:00
|
|
|
} else {
|
2007-01-29 18:33:31 +03:00
|
|
|
// the children we're waiting for are still running
|
2006-08-21 01:27:12 +04:00
|
|
|
status = B_WOULD_BLOCK;
|
2006-08-18 01:40:55 +04:00
|
|
|
}
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
} else {
|
|
|
|
// got something
|
|
|
|
foundEntry = *entry;
|
|
|
|
if (entry->state == JOB_CONTROL_STATE_DEAD) {
|
|
|
|
// The child is dead. Reap its death entry.
|
|
|
|
freeDeathEntry = entry;
|
|
|
|
team->dead_children->entries.Remove(entry);
|
|
|
|
team->dead_children->count--;
|
|
|
|
} else {
|
|
|
|
// The child is well. Reset its job control state.
|
|
|
|
team_set_job_control_state(entry->team,
|
|
|
|
JOB_CONTROL_STATE_NONE, 0, false);
|
|
|
|
}
|
2006-08-18 01:40:55 +04:00
|
|
|
}
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
|
2008-04-22 22:32:15 +04:00
|
|
|
// If we haven't got anything yet, prepare for waiting for the
|
2008-04-20 19:15:58 +04:00
|
|
|
// condition variable.
|
2008-04-22 22:32:15 +04:00
|
|
|
ConditionVariableEntry deadWaitEntry;
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
|
2008-04-20 19:15:58 +04:00
|
|
|
if (status == B_WOULD_BLOCK && (flags & WNOHANG) == 0)
|
2008-08-04 17:09:40 +04:00
|
|
|
team->dead_children->condition_variable.Add(&deadWaitEntry);
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
|
|
|
|
locker.Unlock();
|
2006-08-21 01:27:12 +04:00
|
|
|
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
// we got our entry and can return to our caller
|
2007-09-04 01:35:24 +04:00
|
|
|
if (status == B_OK) {
|
|
|
|
if (ignoreFoundEntries) {
|
|
|
|
// ... unless we shall ignore found entries
|
|
|
|
delete freeDeathEntry;
|
|
|
|
freeDeathEntry = NULL;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2006-08-21 01:27:12 +04:00
|
|
|
break;
|
2007-09-04 01:35:24 +04:00
|
|
|
}
|
|
|
|
|
2008-01-18 03:01:32 +03:00
|
|
|
if (status != B_WOULD_BLOCK || (flags & WNOHANG) != 0) {
|
|
|
|
T(WaitForChildDone(status));
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
return status;
|
2008-01-18 03:01:32 +03:00
|
|
|
}
|
2004-10-14 18:46:12 +04:00
|
|
|
|
2008-05-17 14:21:37 +04:00
|
|
|
status = deadWaitEntry.Wait(B_CAN_INTERRUPT);
|
2008-01-18 03:01:32 +03:00
|
|
|
if (status == B_INTERRUPTED) {
|
|
|
|
T(WaitForChildDone(status));
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
return status;
|
2008-01-18 03:01:32 +03:00
|
|
|
}
|
2007-09-04 01:35:24 +04:00
|
|
|
|
|
|
|
// If SA_NOCLDWAIT is set or SIGCHLD is ignored, we shall wait until
|
|
|
|
// all our children are dead and fail with ECHILD. We check the
|
|
|
|
// condition at this point.
|
|
|
|
if (!ignoreFoundEntriesChecked) {
|
|
|
|
struct sigaction& handler = thread->sig_action[SIGCHLD - 1];
|
|
|
|
if ((handler.sa_flags & SA_NOCLDWAIT) != 0
|
|
|
|
|| handler.sa_handler == SIG_IGN) {
|
|
|
|
ignoreFoundEntries = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
ignoreFoundEntriesChecked = true;
|
|
|
|
}
|
2004-10-14 18:46:12 +04:00
|
|
|
}
|
|
|
|
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
delete freeDeathEntry;
|
2004-10-14 19:42:56 +04:00
|
|
|
|
2004-10-18 19:16:00 +04:00
|
|
|
// when we got here, we have a valid death entry, and
|
|
|
|
// already got unregistered from the team or group
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
int reason = 0;
|
|
|
|
switch (foundEntry.state) {
|
|
|
|
case JOB_CONTROL_STATE_DEAD:
|
|
|
|
reason = foundEntry.reason;
|
|
|
|
break;
|
|
|
|
case JOB_CONTROL_STATE_STOPPED:
|
|
|
|
reason = THREAD_STOPPED;
|
|
|
|
break;
|
|
|
|
case JOB_CONTROL_STATE_CONTINUED:
|
|
|
|
reason = THREAD_CONTINUED;
|
|
|
|
break;
|
|
|
|
case JOB_CONTROL_STATE_NONE:
|
|
|
|
// can't happen
|
|
|
|
break;
|
|
|
|
}
|
2004-10-18 19:16:00 +04:00
|
|
|
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
*_returnCode = foundEntry.status;
|
|
|
|
*_reason = (foundEntry.signal << 16) | reason;
|
2007-01-29 18:33:31 +03:00
|
|
|
|
2007-09-04 01:35:24 +04:00
|
|
|
// If SIGCHLD is blocked, we shall clear pending SIGCHLDs, if no other child
|
|
|
|
// status is available.
|
2007-09-06 06:16:25 +04:00
|
|
|
if (is_signal_blocked(SIGCHLD)) {
|
2008-08-02 18:55:53 +04:00
|
|
|
InterruptsSpinLocker locker(gTeamSpinlock);
|
2007-09-04 01:35:24 +04:00
|
|
|
|
|
|
|
if (get_job_control_entry(team, child, flags) == NULL)
|
|
|
|
atomic_and(&thread->sig_pending, ~SIGNAL_TO_MASK(SIGCHLD));
|
|
|
|
}
|
2007-08-28 18:28:22 +04:00
|
|
|
|
2008-05-20 03:21:58 +04:00
|
|
|
// When the team is dead, the main thread continues to live in the kernel
|
|
|
|
// team for a very short time. To avoid surprises for the caller we rather
|
|
|
|
// wait until the thread is really gone.
|
|
|
|
if (foundEntry.state == JOB_CONTROL_STATE_DEAD)
|
|
|
|
wait_for_thread(foundEntry.thread, NULL);
|
|
|
|
|
2008-01-18 03:01:32 +03:00
|
|
|
T(WaitForChildDone(foundEntry));
|
|
|
|
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
return foundEntry.thread;
|
2004-09-02 05:41:06 +04:00
|
|
|
}
|
2004-09-15 19:45:37 +04:00
|
|
|
|
|
|
|
|
2007-10-11 12:30:18 +04:00
|
|
|
/*! Fills the team_info structure with information from the specified
|
|
|
|
team.
|
|
|
|
The team lock must be held when called.
|
|
|
|
*/
|
2006-12-18 15:56:27 +03:00
|
|
|
static status_t
|
|
|
|
fill_team_info(struct team *team, team_info *info, size_t size)
|
|
|
|
{
|
|
|
|
if (size != sizeof(team_info))
|
|
|
|
return B_BAD_VALUE;
|
|
|
|
|
|
|
|
// ToDo: Set more informations for team_info
|
|
|
|
memset(info, 0, size);
|
|
|
|
|
|
|
|
info->team = team->id;
|
|
|
|
info->thread_count = team->num_threads;
|
|
|
|
info->image_count = count_images(team);
|
2008-07-17 02:55:17 +04:00
|
|
|
//info->area_count =
|
2006-12-18 15:56:27 +03:00
|
|
|
info->debugger_nub_thread = team->debug_info.nub_thread;
|
|
|
|
info->debugger_nub_port = team->debug_info.nub_port;
|
2008-07-17 02:55:17 +04:00
|
|
|
//info->uid =
|
|
|
|
//info->gid =
|
2006-12-18 15:56:27 +03:00
|
|
|
|
|
|
|
strlcpy(info->args, team->args, sizeof(info->args));
|
|
|
|
info->argc = 1;
|
|
|
|
|
|
|
|
return B_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-09-06 06:16:25 +04:00
|
|
|
/*! Updates the \c orphaned field of a process_group and returns its new value.
|
|
|
|
Interrupts must be disabled and team lock be held.
|
|
|
|
*/
|
|
|
|
static bool
|
|
|
|
update_orphaned_process_group(process_group* group, pid_t dyingProcess)
|
|
|
|
{
|
|
|
|
// Orphaned Process Group: "A process group in which the parent of every
|
|
|
|
// member is either itself a member of the group or is not a member of the
|
|
|
|
// group's session." (Open Group Base Specs Issue 6)
|
|
|
|
|
|
|
|
// once orphaned, things won't change (exception: cf. setpgid())
|
|
|
|
if (group->orphaned)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
struct team* team = group->teams;
|
|
|
|
while (team != NULL) {
|
|
|
|
struct team* parent = team->parent;
|
2008-02-21 19:15:00 +03:00
|
|
|
if (team->id != dyingProcess && parent != NULL
|
|
|
|
&& parent->id != dyingProcess
|
2007-09-06 06:16:25 +04:00
|
|
|
&& parent->group_id != group->id
|
|
|
|
&& parent->session_id == group->session->id) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
team = team->group_next;
|
|
|
|
}
|
|
|
|
|
|
|
|
group->orphaned = true;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*! Returns whether the process group contains stopped processes.
|
|
|
|
Interrupts must be disabled and team lock be held.
|
|
|
|
*/
|
|
|
|
static bool
|
|
|
|
process_group_has_stopped_processes(process_group* group)
|
|
|
|
{
|
2008-08-02 18:55:53 +04:00
|
|
|
SpinLocker _(gThreadSpinlock);
|
2007-09-06 06:16:25 +04:00
|
|
|
|
|
|
|
struct team* team = group->teams;
|
|
|
|
while (team != NULL) {
|
|
|
|
if (team->main_thread->state == B_THREAD_SUSPENDED)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
team = team->group_next;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-08-25 03:41:54 +04:00
|
|
|
// #pragma mark - Private kernel API
|
|
|
|
|
|
|
|
|
|
|
|
status_t
|
|
|
|
team_init(kernel_args *args)
|
|
|
|
{
|
|
|
|
struct process_session *session;
|
|
|
|
struct process_group *group;
|
|
|
|
|
|
|
|
// create the team hash table
|
|
|
|
sTeamHash = hash_init(16, offsetof(struct team, next),
|
|
|
|
&team_struct_compare, &team_struct_hash);
|
|
|
|
|
|
|
|
sGroupHash = hash_init(16, offsetof(struct process_group, next),
|
|
|
|
&process_group_compare, &process_group_hash);
|
|
|
|
|
|
|
|
// create initial session and process groups
|
|
|
|
|
|
|
|
session = create_process_session(1);
|
|
|
|
if (session == NULL)
|
|
|
|
panic("Could not create initial session.\n");
|
|
|
|
|
|
|
|
group = create_process_group(1);
|
|
|
|
if (group == NULL)
|
|
|
|
panic("Could not create initial process group.\n");
|
|
|
|
|
|
|
|
insert_group_into_session(session, group);
|
|
|
|
|
|
|
|
// create the kernel team
|
|
|
|
sKernelTeam = create_team_struct("kernel_team", true);
|
|
|
|
if (sKernelTeam == NULL)
|
|
|
|
panic("could not create kernel team!\n");
|
|
|
|
strcpy(sKernelTeam->args, sKernelTeam->name);
|
|
|
|
sKernelTeam->state = TEAM_STATE_NORMAL;
|
|
|
|
|
2008-03-11 20:12:02 +03:00
|
|
|
sKernelTeam->saved_set_uid = 0;
|
|
|
|
sKernelTeam->real_uid = 0;
|
|
|
|
sKernelTeam->effective_uid = 0;
|
|
|
|
sKernelTeam->saved_set_gid = 0;
|
|
|
|
sKernelTeam->real_gid = 0;
|
|
|
|
sKernelTeam->effective_gid = 0;
|
2008-03-30 04:08:13 +04:00
|
|
|
sKernelTeam->supplementary_groups = NULL;
|
|
|
|
sKernelTeam->supplementary_group_count = 0;
|
2008-03-11 20:12:02 +03:00
|
|
|
|
2006-08-25 03:41:54 +04:00
|
|
|
insert_team_into_group(group, sKernelTeam);
|
|
|
|
|
2009-11-25 19:16:22 +03:00
|
|
|
sKernelTeam->io_context = vfs_new_io_context(NULL, false);
|
2006-08-25 03:41:54 +04:00
|
|
|
if (sKernelTeam->io_context == NULL)
|
|
|
|
panic("could not create io_context for kernel team!\n");
|
|
|
|
|
|
|
|
// stick it in the team hash
|
|
|
|
hash_insert(sTeamHash, sKernelTeam);
|
|
|
|
|
2008-01-26 13:47:27 +03:00
|
|
|
add_debugger_command_etc("team", &dump_team_info,
|
|
|
|
"Dump info about a particular team",
|
|
|
|
"[ <id> | <address> | <name> ]\n"
|
|
|
|
"Prints information about the specified team. If no argument is given\n"
|
|
|
|
"the current team is selected.\n"
|
|
|
|
" <id> - The ID of the team.\n"
|
|
|
|
" <address> - The address of the team structure.\n"
|
|
|
|
" <name> - The team's name.\n", 0);
|
|
|
|
add_debugger_command_etc("teams", &dump_teams, "List all teams",
|
|
|
|
"\n"
|
|
|
|
"Prints a list of all existing teams.\n", 0);
|
2009-03-15 13:21:56 +03:00
|
|
|
|
|
|
|
new(&sNotificationService) TeamNotificationService();
|
|
|
|
|
|
|
|
return B_OK;
|
2006-08-25 03:41:54 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int32
|
|
|
|
team_max_teams(void)
|
|
|
|
{
|
|
|
|
return sMaxTeams;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int32
|
|
|
|
team_used_teams(void)
|
|
|
|
{
|
|
|
|
return sUsedTeams;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2009-04-12 01:45:25 +04:00
|
|
|
/*! Iterates through the list of teams. The team spinlock must be held.
|
|
|
|
*/
|
|
|
|
struct team*
|
|
|
|
team_iterate_through_teams(team_iterator_callback callback, void* cookie)
|
|
|
|
{
|
|
|
|
struct hash_iterator iterator;
|
|
|
|
hash_open(sTeamHash, &iterator);
|
|
|
|
|
|
|
|
struct team* team;
|
|
|
|
while ((team = (struct team*)hash_next(sTeamHash, &iterator)) != NULL) {
|
|
|
|
if (callback(team, cookie))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
hash_close(sTeamHash, &iterator, false);
|
|
|
|
|
|
|
|
return team;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
/*! Fills the provided death entry if it's in the team.
|
|
|
|
You need to have the team lock held when calling this function.
|
|
|
|
*/
|
|
|
|
job_control_entry*
|
|
|
|
team_get_death_entry(struct team *team, thread_id child, bool* _deleteEntry)
|
2007-01-10 02:58:59 +03:00
|
|
|
{
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
if (child <= 0)
|
|
|
|
return NULL;
|
2007-01-10 02:58:59 +03:00
|
|
|
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
job_control_entry* entry = get_job_control_entry(team->dead_children,
|
|
|
|
child);
|
|
|
|
if (entry) {
|
|
|
|
// remove the entry only, if the caller is the parent of the found team
|
|
|
|
if (team_get_current_team_id() == entry->thread) {
|
|
|
|
team->dead_children->entries.Remove(entry);
|
2007-08-28 00:30:34 +04:00
|
|
|
team->dead_children->count--;
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
*_deleteEntry = true;
|
|
|
|
} else {
|
|
|
|
*_deleteEntry = false;
|
2007-01-10 02:58:59 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
return entry;
|
2007-01-10 02:58:59 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-10-11 12:30:18 +04:00
|
|
|
/*! Quick check to see if we have a valid team ID. */
|
2006-08-25 03:41:54 +04:00
|
|
|
bool
|
|
|
|
team_is_valid(team_id id)
|
|
|
|
{
|
|
|
|
struct team *team;
|
|
|
|
cpu_status state;
|
|
|
|
|
|
|
|
if (id <= 0)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
state = disable_interrupts();
|
|
|
|
GRAB_TEAM_LOCK();
|
|
|
|
|
|
|
|
team = team_get_team_struct_locked(id);
|
|
|
|
|
|
|
|
RELEASE_TEAM_LOCK();
|
|
|
|
restore_interrupts(state);
|
|
|
|
|
|
|
|
return team != NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
struct team *
|
|
|
|
team_get_team_struct_locked(team_id id)
|
|
|
|
{
|
|
|
|
struct team_key key;
|
|
|
|
key.id = id;
|
|
|
|
|
2007-08-27 00:37:54 +04:00
|
|
|
return (struct team*)hash_lookup(sTeamHash, &key);
|
2006-08-25 03:41:54 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-10-11 12:30:18 +04:00
|
|
|
/*! This searches the session of the team for the specified group ID.
|
|
|
|
You must hold the team lock when you call this function.
|
|
|
|
*/
|
2006-08-29 05:41:16 +04:00
|
|
|
struct process_group *
|
2006-09-26 16:51:59 +04:00
|
|
|
team_get_process_group_locked(struct process_session *session, pid_t id)
|
2006-08-29 05:41:16 +04:00
|
|
|
{
|
|
|
|
struct process_group *group;
|
|
|
|
struct team_key key;
|
|
|
|
key.id = id;
|
|
|
|
|
|
|
|
group = (struct process_group *)hash_lookup(sGroupHash, &key);
|
2006-09-26 16:51:59 +04:00
|
|
|
if (group != NULL && (session == NULL || session == group->session))
|
2006-08-29 05:41:16 +04:00
|
|
|
return group;
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-08-25 03:41:54 +04:00
|
|
|
void
|
|
|
|
team_delete_process_group(struct process_group *group)
|
|
|
|
{
|
|
|
|
if (group == NULL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
TRACE(("team_delete_process_group(id = %ld)\n", group->id));
|
|
|
|
|
|
|
|
// remove_group_from_session() keeps this pointer around
|
|
|
|
// only if the session can be freed as well
|
|
|
|
if (group->session) {
|
|
|
|
TRACE(("team_delete_process_group(): frees session %ld\n", group->session->id));
|
|
|
|
free(group->session);
|
|
|
|
}
|
|
|
|
|
|
|
|
free(group);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-09-06 06:16:25 +04:00
|
|
|
void
|
|
|
|
team_set_controlling_tty(int32 ttyIndex)
|
|
|
|
{
|
|
|
|
struct team* team = thread_get_current_thread()->team;
|
|
|
|
|
2008-08-02 18:55:53 +04:00
|
|
|
InterruptsSpinLocker _(gTeamSpinlock);
|
2007-09-06 06:16:25 +04:00
|
|
|
|
|
|
|
team->group->session->controlling_tty = ttyIndex;
|
|
|
|
team->group->session->foreground_group = -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int32
|
|
|
|
team_get_controlling_tty()
|
|
|
|
{
|
|
|
|
struct team* team = thread_get_current_thread()->team;
|
|
|
|
|
2008-08-02 18:55:53 +04:00
|
|
|
InterruptsSpinLocker _(gTeamSpinlock);
|
2007-09-06 06:16:25 +04:00
|
|
|
|
|
|
|
return team->group->session->controlling_tty;
|
|
|
|
}
|
|
|
|
|
2006-08-25 03:41:54 +04:00
|
|
|
|
2007-09-06 06:16:25 +04:00
|
|
|
status_t
|
|
|
|
team_set_foreground_process_group(int32 ttyIndex, pid_t processGroupID)
|
|
|
|
{
|
|
|
|
struct thread* thread = thread_get_current_thread();
|
|
|
|
struct team* team = thread->team;
|
|
|
|
|
2008-08-02 18:55:53 +04:00
|
|
|
InterruptsSpinLocker locker(gTeamSpinlock);
|
2007-09-06 06:16:25 +04:00
|
|
|
|
|
|
|
process_session* session = team->group->session;
|
|
|
|
|
|
|
|
// must be the controlling tty of the calling process
|
|
|
|
if (session->controlling_tty != ttyIndex)
|
|
|
|
return ENOTTY;
|
|
|
|
|
|
|
|
// check process group -- must belong to our session
|
|
|
|
process_group* group = team_get_process_group_locked(session,
|
|
|
|
processGroupID);
|
|
|
|
if (group == NULL)
|
|
|
|
return B_BAD_VALUE;
|
|
|
|
|
|
|
|
// If we are a background group, we can't do that unharmed, only if we
|
|
|
|
// ignore or block SIGTTOU. Otherwise the group gets a SIGTTOU.
|
|
|
|
if (session->foreground_group != -1
|
|
|
|
&& session->foreground_group != team->group_id
|
|
|
|
&& thread->sig_action[SIGTTOU - 1].sa_handler != SIG_IGN
|
|
|
|
&& !is_signal_blocked(SIGTTOU)) {
|
|
|
|
pid_t groupID = team->group->id;
|
|
|
|
locker.Unlock();
|
|
|
|
send_signal(-groupID, SIGTTOU);
|
|
|
|
return B_INTERRUPTED;
|
|
|
|
}
|
|
|
|
|
|
|
|
team->group->session->foreground_group = processGroupID;
|
|
|
|
|
|
|
|
return B_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*! Removes the specified team from the global team hash, and from its parent.
|
|
|
|
It also moves all of its children up to the parent.
|
|
|
|
You must hold the team lock when you call this function.
|
|
|
|
*/
|
2006-08-25 03:41:54 +04:00
|
|
|
void
|
2008-03-09 20:56:27 +03:00
|
|
|
team_remove_team(struct team *team)
|
2006-08-25 03:41:54 +04:00
|
|
|
{
|
|
|
|
struct team *parent = team->parent;
|
|
|
|
|
|
|
|
// remember how long this team lasted
|
2007-08-28 00:30:34 +04:00
|
|
|
parent->dead_children->kernel_time += team->dead_threads_kernel_time
|
|
|
|
+ team->dead_children->kernel_time;
|
|
|
|
parent->dead_children->user_time += team->dead_threads_user_time
|
|
|
|
+ team->dead_children->user_time;
|
2006-08-25 03:41:54 +04:00
|
|
|
|
2008-05-02 01:53:12 +04:00
|
|
|
// Also grab the thread spinlock while removing the team from the hash.
|
|
|
|
// This makes the following sequence safe: grab teams lock, lookup team,
|
|
|
|
// grab threads lock, unlock teams lock,
|
|
|
|
// mutex_lock_threads_lock(<team related lock>), as used in the VFS code to
|
|
|
|
// lock another team's IO context.
|
|
|
|
GRAB_THREAD_LOCK();
|
2006-08-25 03:41:54 +04:00
|
|
|
hash_remove(sTeamHash, team);
|
2008-05-02 01:53:12 +04:00
|
|
|
RELEASE_THREAD_LOCK();
|
2006-08-25 03:41:54 +04:00
|
|
|
sUsedTeams--;
|
|
|
|
|
|
|
|
team->state = TEAM_STATE_DEATH;
|
|
|
|
|
2007-09-06 06:16:25 +04:00
|
|
|
// If we're a controlling process (i.e. a session leader with controlling
|
|
|
|
// terminal), there's a bit of signalling we have to do.
|
|
|
|
if (team->session_id == team->id
|
|
|
|
&& team->group->session->controlling_tty >= 0) {
|
|
|
|
process_session* session = team->group->session;
|
|
|
|
|
|
|
|
session->controlling_tty = -1;
|
|
|
|
|
2008-07-17 02:55:17 +04:00
|
|
|
// send SIGHUP to the foreground
|
2007-09-06 06:16:25 +04:00
|
|
|
if (session->foreground_group >= 0) {
|
|
|
|
send_signal_etc(-session->foreground_group, SIGHUP,
|
|
|
|
SIGNAL_FLAG_TEAMS_LOCKED);
|
|
|
|
}
|
|
|
|
|
|
|
|
// send SIGHUP + SIGCONT to all newly-orphaned process groups with
|
|
|
|
// stopped processes
|
|
|
|
struct team* child = team->children;
|
|
|
|
while (child != NULL) {
|
|
|
|
process_group* childGroup = child->group;
|
|
|
|
if (!childGroup->orphaned
|
|
|
|
&& update_orphaned_process_group(childGroup, team->id)
|
|
|
|
&& process_group_has_stopped_processes(childGroup)) {
|
|
|
|
send_signal_etc(-childGroup->id, SIGHUP,
|
|
|
|
SIGNAL_FLAG_TEAMS_LOCKED);
|
|
|
|
send_signal_etc(-childGroup->id, SIGCONT,
|
|
|
|
SIGNAL_FLAG_TEAMS_LOCKED);
|
|
|
|
}
|
2008-07-17 02:55:17 +04:00
|
|
|
|
2007-09-06 06:16:25 +04:00
|
|
|
child = child->siblings_next;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// update "orphaned" flags of all children's process groups
|
|
|
|
struct team* child = team->children;
|
|
|
|
while (child != NULL) {
|
|
|
|
process_group* childGroup = child->group;
|
|
|
|
if (!childGroup->orphaned)
|
|
|
|
update_orphaned_process_group(childGroup, team->id);
|
2008-07-17 02:55:17 +04:00
|
|
|
|
2007-09-06 06:16:25 +04:00
|
|
|
child = child->siblings_next;
|
|
|
|
}
|
|
|
|
|
|
|
|
// update "orphaned" flag of this team's process group
|
|
|
|
update_orphaned_process_group(team->group, team->id);
|
|
|
|
}
|
|
|
|
|
2006-08-25 03:41:54 +04:00
|
|
|
// reparent each of the team's children
|
|
|
|
reparent_children(team);
|
|
|
|
|
|
|
|
// remove us from our process group
|
2008-03-09 20:56:27 +03:00
|
|
|
remove_team_from_group(team);
|
2006-08-25 03:41:54 +04:00
|
|
|
|
|
|
|
// remove us from our parent
|
|
|
|
remove_team_from_parent(parent, team);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
team_delete_team(struct team *team)
|
|
|
|
{
|
|
|
|
team_id teamID = team->id;
|
|
|
|
port_id debuggerPort = -1;
|
|
|
|
cpu_status state;
|
|
|
|
|
|
|
|
if (team->num_threads > 0) {
|
|
|
|
// there are other threads still in this team,
|
|
|
|
// cycle through and signal kill on each of the threads
|
|
|
|
// ToDo: this can be optimized. There's got to be a better solution.
|
|
|
|
struct thread *temp_thread;
|
|
|
|
char death_sem_name[B_OS_NAME_LENGTH];
|
|
|
|
sem_id deathSem;
|
|
|
|
int32 threadCount;
|
|
|
|
|
|
|
|
sprintf(death_sem_name, "team %ld death sem", teamID);
|
|
|
|
deathSem = create_sem(0, death_sem_name);
|
|
|
|
if (deathSem < 0)
|
|
|
|
panic("team_delete_team: cannot init death sem for team %ld\n", teamID);
|
|
|
|
|
|
|
|
state = disable_interrupts();
|
|
|
|
GRAB_TEAM_LOCK();
|
|
|
|
|
|
|
|
team->death_sem = deathSem;
|
|
|
|
threadCount = team->num_threads;
|
|
|
|
|
|
|
|
// If the team was being debugged, that will stop with the termination
|
|
|
|
// of the nub thread. The team structure has already been removed from
|
|
|
|
// the team hash table at this point, so noone can install a debugger
|
|
|
|
// anymore. We fetch the debugger's port to send it a message at the
|
|
|
|
// bitter end.
|
|
|
|
GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info);
|
|
|
|
|
|
|
|
if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)
|
|
|
|
debuggerPort = team->debug_info.debugger_port;
|
|
|
|
|
|
|
|
RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
|
|
|
|
|
|
|
|
// we can safely walk the list because of the lock. no new threads can be created
|
|
|
|
// because of the TEAM_STATE_DEATH flag on the team
|
|
|
|
temp_thread = team->thread_list;
|
|
|
|
while (temp_thread) {
|
|
|
|
struct thread *next = temp_thread->team_next;
|
|
|
|
|
|
|
|
send_signal_etc(temp_thread->id, SIGKILLTHR, B_DO_NOT_RESCHEDULE);
|
|
|
|
temp_thread = next;
|
|
|
|
}
|
|
|
|
|
|
|
|
RELEASE_TEAM_LOCK();
|
|
|
|
restore_interrupts(state);
|
|
|
|
|
|
|
|
// wait until all threads in team are dead.
|
|
|
|
acquire_sem_etc(team->death_sem, threadCount, 0, 0);
|
|
|
|
delete_sem(team->death_sem);
|
|
|
|
}
|
|
|
|
|
|
|
|
// If someone is waiting for this team to be loaded, but it dies
|
|
|
|
// unexpectedly before being done, we need to notify the waiting
|
|
|
|
// thread now.
|
|
|
|
|
|
|
|
state = disable_interrupts();
|
|
|
|
GRAB_TEAM_LOCK();
|
|
|
|
|
|
|
|
if (team->loading_info) {
|
|
|
|
// there's indeed someone waiting
|
|
|
|
struct team_loading_info *loadingInfo = team->loading_info;
|
|
|
|
team->loading_info = NULL;
|
|
|
|
|
|
|
|
loadingInfo->result = B_ERROR;
|
|
|
|
loadingInfo->done = true;
|
|
|
|
|
|
|
|
GRAB_THREAD_LOCK();
|
|
|
|
|
|
|
|
// wake up the waiting thread
|
2008-01-25 18:55:54 +03:00
|
|
|
if (loadingInfo->thread->state == B_THREAD_SUSPENDED)
|
2006-08-25 03:41:54 +04:00
|
|
|
scheduler_enqueue_in_run_queue(loadingInfo->thread);
|
|
|
|
|
|
|
|
RELEASE_THREAD_LOCK();
|
|
|
|
}
|
|
|
|
|
|
|
|
RELEASE_TEAM_LOCK();
|
|
|
|
restore_interrupts(state);
|
|
|
|
|
|
|
|
// notify team watchers
|
|
|
|
|
|
|
|
{
|
|
|
|
// we're not reachable from anyone anymore at this point, so we
|
|
|
|
// can safely access the list without any locking
|
|
|
|
struct team_watcher *watcher;
|
2007-08-27 00:37:54 +04:00
|
|
|
while ((watcher = (struct team_watcher*)list_remove_head_item(
|
|
|
|
&team->watcher_list)) != NULL) {
|
2006-08-25 03:41:54 +04:00
|
|
|
watcher->hook(teamID, watcher->data);
|
|
|
|
free(watcher);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-04-12 01:45:25 +04:00
|
|
|
sNotificationService.Notify(TEAM_REMOVED, team);
|
2009-03-15 13:21:56 +03:00
|
|
|
|
2006-08-25 03:41:54 +04:00
|
|
|
// free team resources
|
|
|
|
|
2009-03-02 03:26:22 +03:00
|
|
|
vfs_put_io_context(team->io_context);
|
2008-05-06 07:39:36 +04:00
|
|
|
delete_realtime_sem_context(team->realtime_sem_context);
|
2008-08-16 22:20:54 +04:00
|
|
|
xsi_sem_undo(team);
|
2009-10-26 16:34:43 +03:00
|
|
|
delete_owned_ports(team);
|
2009-10-23 06:06:51 +04:00
|
|
|
sem_delete_owned_sems(team);
|
2006-08-25 03:41:54 +04:00
|
|
|
remove_images(team);
|
2007-08-31 18:20:28 +04:00
|
|
|
vm_delete_address_space(team->address_space);
|
2006-08-25 03:41:54 +04:00
|
|
|
|
|
|
|
delete_team_struct(team);
|
|
|
|
|
|
|
|
// notify the debugger, that the team is gone
|
|
|
|
user_debug_team_deleted(teamID, debuggerPort);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
struct team *
|
|
|
|
team_get_kernel_team(void)
|
|
|
|
{
|
|
|
|
return sKernelTeam;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
team_id
|
|
|
|
team_get_kernel_team_id(void)
|
|
|
|
{
|
|
|
|
if (!sKernelTeam)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return sKernelTeam->id;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
team_id
|
|
|
|
team_get_current_team_id(void)
|
|
|
|
{
|
|
|
|
return thread_get_current_thread()->team->id;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
status_t
|
|
|
|
team_get_address_space(team_id id, vm_address_space **_addressSpace)
|
|
|
|
{
|
|
|
|
cpu_status state;
|
|
|
|
struct team *team;
|
|
|
|
status_t status;
|
|
|
|
|
|
|
|
// ToDo: we need to do something about B_SYSTEM_TEAM vs. its real ID (1)
|
|
|
|
if (id == 1) {
|
|
|
|
// we're the kernel team, so we don't have to go through all
|
|
|
|
// the hassle (locking and hash lookup)
|
|
|
|
*_addressSpace = vm_get_kernel_address_space();
|
|
|
|
return B_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
state = disable_interrupts();
|
|
|
|
GRAB_TEAM_LOCK();
|
|
|
|
|
|
|
|
team = team_get_team_struct_locked(id);
|
|
|
|
if (team != NULL) {
|
|
|
|
atomic_add(&team->address_space->ref_count, 1);
|
|
|
|
*_addressSpace = team->address_space;
|
|
|
|
status = B_OK;
|
|
|
|
} else
|
|
|
|
status = B_BAD_VALUE;
|
|
|
|
|
|
|
|
RELEASE_TEAM_LOCK();
|
|
|
|
restore_interrupts(state);
|
|
|
|
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
/*! Sets the team's job control state.
|
2007-10-04 02:20:30 +04:00
|
|
|
Interrupts must be disabled and the team lock be held.
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
\a threadsLocked indicates whether the thread lock is being held, too.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
team_set_job_control_state(struct team* team, job_control_state newState,
|
|
|
|
int signal, bool threadsLocked)
|
|
|
|
{
|
|
|
|
if (team == NULL || team->job_control_entry == NULL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
// don't touch anything, if the state stays the same or the team is already
|
|
|
|
// dead
|
|
|
|
job_control_entry* entry = team->job_control_entry;
|
|
|
|
if (entry->state == newState || entry->state == JOB_CONTROL_STATE_DEAD)
|
|
|
|
return;
|
|
|
|
|
2008-01-18 03:01:32 +03:00
|
|
|
T(SetJobControlState(team->id, newState, signal));
|
|
|
|
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
// remove from the old list
|
|
|
|
switch (entry->state) {
|
|
|
|
case JOB_CONTROL_STATE_NONE:
|
|
|
|
// entry is in no list ATM
|
|
|
|
break;
|
|
|
|
case JOB_CONTROL_STATE_DEAD:
|
|
|
|
// can't get here
|
|
|
|
break;
|
|
|
|
case JOB_CONTROL_STATE_STOPPED:
|
|
|
|
team->parent->stopped_children->entries.Remove(entry);
|
|
|
|
break;
|
|
|
|
case JOB_CONTROL_STATE_CONTINUED:
|
|
|
|
team->parent->continued_children->entries.Remove(entry);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
entry->state = newState;
|
|
|
|
entry->signal = signal;
|
|
|
|
|
|
|
|
// add to new list
|
|
|
|
team_job_control_children* childList = NULL;
|
|
|
|
switch (entry->state) {
|
|
|
|
case JOB_CONTROL_STATE_NONE:
|
|
|
|
// entry doesn't get into any list
|
|
|
|
break;
|
|
|
|
case JOB_CONTROL_STATE_DEAD:
|
|
|
|
childList = team->parent->dead_children;
|
|
|
|
team->parent->dead_children->count++;
|
|
|
|
break;
|
|
|
|
case JOB_CONTROL_STATE_STOPPED:
|
|
|
|
childList = team->parent->stopped_children;
|
|
|
|
break;
|
|
|
|
case JOB_CONTROL_STATE_CONTINUED:
|
|
|
|
childList = team->parent->continued_children;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (childList != NULL) {
|
|
|
|
childList->entries.Add(entry);
|
2008-04-20 19:15:58 +04:00
|
|
|
team->parent->dead_children->condition_variable.NotifyAll(
|
|
|
|
threadsLocked);
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-10-11 12:30:18 +04:00
|
|
|
/*! Adds a hook to the team that is called as soon as this
|
|
|
|
team goes away.
|
|
|
|
This call might get public in the future.
|
|
|
|
*/
|
2005-08-03 16:00:42 +04:00
|
|
|
status_t
|
|
|
|
start_watching_team(team_id teamID, void (*hook)(team_id, void *), void *data)
|
|
|
|
{
|
|
|
|
struct team_watcher *watcher;
|
|
|
|
struct team *team;
|
|
|
|
cpu_status state;
|
|
|
|
|
|
|
|
if (hook == NULL || teamID < B_OK)
|
|
|
|
return B_BAD_VALUE;
|
|
|
|
|
2007-08-27 00:37:54 +04:00
|
|
|
watcher = (struct team_watcher*)malloc(sizeof(struct team_watcher));
|
2005-08-03 16:00:42 +04:00
|
|
|
if (watcher == NULL)
|
|
|
|
return B_NO_MEMORY;
|
|
|
|
|
2005-08-03 20:57:40 +04:00
|
|
|
watcher->hook = hook;
|
|
|
|
watcher->data = data;
|
|
|
|
|
2005-08-03 16:00:42 +04:00
|
|
|
// find team and add watcher
|
|
|
|
|
|
|
|
state = disable_interrupts();
|
|
|
|
GRAB_TEAM_LOCK();
|
|
|
|
|
|
|
|
team = team_get_team_struct_locked(teamID);
|
|
|
|
if (team != NULL)
|
|
|
|
list_add_item(&team->watcher_list, watcher);
|
|
|
|
|
|
|
|
RELEASE_TEAM_LOCK();
|
|
|
|
restore_interrupts(state);
|
|
|
|
|
|
|
|
if (team == NULL) {
|
|
|
|
free(watcher);
|
|
|
|
return B_BAD_TEAM_ID;
|
|
|
|
}
|
|
|
|
|
|
|
|
return B_OK;
|
|
|
|
}
|
2008-07-17 02:55:17 +04:00
|
|
|
|
|
|
|
|
2005-08-03 16:00:42 +04:00
|
|
|
status_t
|
|
|
|
stop_watching_team(team_id teamID, void (*hook)(team_id, void *), void *data)
|
|
|
|
{
|
|
|
|
struct team_watcher *watcher = NULL;
|
|
|
|
struct team *team;
|
|
|
|
cpu_status state;
|
|
|
|
|
|
|
|
if (hook == NULL || teamID < B_OK)
|
|
|
|
return B_BAD_VALUE;
|
|
|
|
|
|
|
|
// find team and remove watcher (if present)
|
|
|
|
|
|
|
|
state = disable_interrupts();
|
|
|
|
GRAB_TEAM_LOCK();
|
|
|
|
|
|
|
|
team = team_get_team_struct_locked(teamID);
|
|
|
|
if (team != NULL) {
|
|
|
|
// search for watcher
|
2007-08-27 00:37:54 +04:00
|
|
|
while ((watcher = (struct team_watcher*)list_get_next_item(
|
|
|
|
&team->watcher_list, watcher)) != NULL) {
|
2005-08-03 16:00:42 +04:00
|
|
|
if (watcher->hook == hook && watcher->data == data) {
|
|
|
|
// got it!
|
|
|
|
list_remove_item(&team->watcher_list, watcher);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
RELEASE_TEAM_LOCK();
|
|
|
|
restore_interrupts(state);
|
|
|
|
|
|
|
|
if (watcher == NULL)
|
|
|
|
return B_ENTRY_NOT_FOUND;
|
|
|
|
|
|
|
|
free(watcher);
|
|
|
|
return B_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-05-11 20:25:35 +04:00
|
|
|
/*! The team lock must be held or the team must still be single threaded.
|
|
|
|
*/
|
|
|
|
struct user_thread*
|
|
|
|
team_allocate_user_thread(struct team* team)
|
|
|
|
{
|
|
|
|
if (team->user_data == 0)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
user_thread* thread = NULL;
|
|
|
|
|
|
|
|
// take an entry from the free list, if any
|
|
|
|
if (struct free_user_thread* entry = team->free_user_threads) {
|
|
|
|
thread = entry->thread;
|
|
|
|
team->free_user_threads = entry->next;
|
|
|
|
deferred_free(entry);
|
|
|
|
return thread;
|
|
|
|
} else {
|
|
|
|
// enough space left?
|
|
|
|
size_t needed = _ALIGN(sizeof(user_thread));
|
|
|
|
if (team->user_data_size - team->used_user_data < needed)
|
|
|
|
return NULL;
|
|
|
|
// TODO: This imposes a per team thread limit! We should resize the
|
|
|
|
// area, if necessary. That's problematic at this point, though, since
|
|
|
|
// we've got the team lock.
|
|
|
|
|
|
|
|
thread = (user_thread*)(team->user_data + team->used_user_data);
|
|
|
|
team->used_user_data += needed;
|
|
|
|
}
|
|
|
|
|
|
|
|
thread->defer_signals = 0;
|
|
|
|
thread->pending_signals = 0;
|
|
|
|
thread->wait_status = B_OK;
|
|
|
|
|
|
|
|
return thread;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*! The team lock must not be held. \a thread must be the current thread.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
team_free_user_thread(struct thread* thread)
|
|
|
|
{
|
|
|
|
user_thread* userThread = thread->user_thread;
|
|
|
|
if (userThread == NULL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
// create a free list entry
|
|
|
|
free_user_thread* entry
|
|
|
|
= (free_user_thread*)malloc(sizeof(free_user_thread));
|
|
|
|
if (entry == NULL) {
|
|
|
|
// we have to leak the user thread :-/
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2008-08-02 18:55:53 +04:00
|
|
|
InterruptsSpinLocker _(gTeamSpinlock);
|
2008-05-11 20:25:35 +04:00
|
|
|
|
2009-03-18 04:46:29 +03:00
|
|
|
// detach from thread
|
|
|
|
SpinLocker threadLocker(gThreadSpinlock);
|
|
|
|
thread->user_thread = NULL;
|
|
|
|
threadLocker.Unlock();
|
|
|
|
|
2008-05-11 20:25:35 +04:00
|
|
|
entry->thread = userThread;
|
|
|
|
entry->next = thread->team->free_user_threads;
|
|
|
|
thread->team->free_user_threads = entry;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-08-25 03:41:54 +04:00
|
|
|
// #pragma mark - Public kernel API
|
2004-09-02 05:41:06 +04:00
|
|
|
|
|
|
|
|
2004-10-14 22:07:04 +04:00
|
|
|
thread_id
|
|
|
|
load_image(int32 argCount, const char **args, const char **env)
|
2009-03-02 03:26:22 +03:00
|
|
|
{
|
|
|
|
return load_image_etc(argCount, args, env, B_NORMAL_PRIORITY,
|
|
|
|
B_CURRENT_TEAM, B_WAIT_TILL_LOADED);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
thread_id
|
|
|
|
load_image_etc(int32 argCount, const char* const* args,
|
|
|
|
const char* const* env, int32 priority, team_id parentID, uint32 flags)
|
2004-10-14 22:07:04 +04:00
|
|
|
{
|
2008-06-24 07:37:07 +04:00
|
|
|
// we need to flatten the args and environment
|
|
|
|
|
|
|
|
if (args == NULL)
|
|
|
|
return B_BAD_VALUE;
|
|
|
|
|
|
|
|
// determine total needed size
|
|
|
|
int32 argSize = 0;
|
|
|
|
for (int32 i = 0; i < argCount; i++)
|
|
|
|
argSize += strlen(args[i]) + 1;
|
|
|
|
|
2004-10-14 22:07:04 +04:00
|
|
|
int32 envCount = 0;
|
2008-06-24 07:37:07 +04:00
|
|
|
int32 envSize = 0;
|
|
|
|
while (env != NULL && env[envCount] != NULL)
|
|
|
|
envSize += strlen(env[envCount++]) + 1;
|
|
|
|
|
|
|
|
int32 size = (argCount + envCount + 2) * sizeof(char*) + argSize + envSize;
|
|
|
|
if (size > MAX_PROCESS_ARGS_SIZE)
|
|
|
|
return B_TOO_MANY_ARGS;
|
|
|
|
|
|
|
|
// allocate space
|
|
|
|
char** flatArgs = (char**)malloc(size);
|
|
|
|
if (flatArgs == NULL)
|
|
|
|
return B_NO_MEMORY;
|
|
|
|
|
|
|
|
char** slot = flatArgs;
|
|
|
|
char* stringSpace = (char*)(flatArgs + argCount + envCount + 2);
|
|
|
|
|
|
|
|
// copy arguments and environment
|
|
|
|
for (int32 i = 0; i < argCount; i++) {
|
|
|
|
int32 argSize = strlen(args[i]) + 1;
|
|
|
|
memcpy(stringSpace, args[i], argSize);
|
|
|
|
*slot++ = stringSpace;
|
|
|
|
stringSpace += argSize;
|
|
|
|
}
|
2004-10-15 05:52:07 +04:00
|
|
|
|
2008-06-24 07:37:07 +04:00
|
|
|
*slot++ = NULL;
|
2004-10-14 22:07:04 +04:00
|
|
|
|
2008-06-24 07:37:07 +04:00
|
|
|
for (int32 i = 0; i < envCount; i++) {
|
|
|
|
int32 envSize = strlen(env[i]) + 1;
|
|
|
|
memcpy(stringSpace, env[i], envSize);
|
|
|
|
*slot++ = stringSpace;
|
|
|
|
stringSpace += envSize;
|
|
|
|
}
|
|
|
|
|
|
|
|
*slot++ = NULL;
|
|
|
|
|
2009-03-02 03:26:22 +03:00
|
|
|
thread_id thread = load_image_internal(flatArgs, size, argCount, envCount,
|
|
|
|
B_NORMAL_PRIORITY, parentID, B_WAIT_TILL_LOADED, -1, 0);
|
2008-06-24 07:37:07 +04:00
|
|
|
|
|
|
|
free(flatArgs);
|
2009-03-02 03:26:22 +03:00
|
|
|
// load_image_internal() unset our variable if it took over ownership
|
2008-06-24 07:37:07 +04:00
|
|
|
|
|
|
|
return thread;
|
2004-10-14 22:07:04 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2004-03-03 03:57:00 +03:00
|
|
|
status_t
|
|
|
|
wait_for_team(team_id id, status_t *_returnCode)
|
|
|
|
{
|
|
|
|
struct team *team;
|
|
|
|
thread_id thread;
|
|
|
|
cpu_status state;
|
|
|
|
|
|
|
|
// find main thread and wait for that
|
|
|
|
|
|
|
|
state = disable_interrupts();
|
|
|
|
GRAB_TEAM_LOCK();
|
|
|
|
|
|
|
|
team = team_get_team_struct_locked(id);
|
2007-01-13 01:54:21 +03:00
|
|
|
if (team != NULL && team->main_thread != NULL)
|
2004-03-03 03:57:00 +03:00
|
|
|
thread = team->main_thread->id;
|
|
|
|
else
|
|
|
|
thread = B_BAD_THREAD_ID;
|
|
|
|
|
|
|
|
RELEASE_TEAM_LOCK();
|
|
|
|
restore_interrupts(state);
|
|
|
|
|
|
|
|
if (thread < 0)
|
|
|
|
return thread;
|
|
|
|
|
|
|
|
return wait_for_thread(thread, _returnCode);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
status_t
|
|
|
|
kill_team(team_id id)
|
2002-08-04 03:39:50 +04:00
|
|
|
{
|
2007-01-13 01:54:21 +03:00
|
|
|
status_t status = B_OK;
|
|
|
|
thread_id threadID = -1;
|
2003-01-07 12:40:59 +03:00
|
|
|
struct team *team;
|
2007-01-13 01:54:21 +03:00
|
|
|
cpu_status state;
|
2002-08-04 03:39:50 +04:00
|
|
|
|
|
|
|
state = disable_interrupts();
|
|
|
|
GRAB_TEAM_LOCK();
|
|
|
|
|
2003-01-07 12:40:59 +03:00
|
|
|
team = team_get_team_struct_locked(id);
|
2005-07-17 03:38:07 +04:00
|
|
|
if (team != NULL) {
|
2007-01-13 01:54:21 +03:00
|
|
|
if (team != sKernelTeam) {
|
|
|
|
threadID = team->id;
|
|
|
|
// the team ID is the same as the ID of its main thread
|
|
|
|
} else
|
|
|
|
status = B_NOT_ALLOWED;
|
2005-07-17 03:38:07 +04:00
|
|
|
} else
|
2007-01-13 01:54:21 +03:00
|
|
|
status = B_BAD_THREAD_ID;
|
2002-08-04 03:39:50 +04:00
|
|
|
|
|
|
|
RELEASE_TEAM_LOCK();
|
|
|
|
restore_interrupts(state);
|
2003-01-07 12:40:59 +03:00
|
|
|
|
2007-01-13 01:54:21 +03:00
|
|
|
if (status < B_OK)
|
|
|
|
return status;
|
2002-08-04 03:39:50 +04:00
|
|
|
|
|
|
|
// just kill the main thread in the team. The cleanup code there will
|
|
|
|
// take care of the team
|
2007-01-13 01:54:21 +03:00
|
|
|
return kill_thread(threadID);
|
2002-08-04 03:39:50 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
status_t
|
|
|
|
_get_team_info(team_id id, team_info *info, size_t size)
|
|
|
|
{
|
2005-03-09 04:43:56 +03:00
|
|
|
cpu_status state;
|
2007-01-13 01:54:21 +03:00
|
|
|
status_t status = B_OK;
|
2002-08-04 03:39:50 +04:00
|
|
|
struct team *team;
|
2004-02-23 07:08:09 +03:00
|
|
|
|
2002-08-04 03:39:50 +04:00
|
|
|
state = disable_interrupts();
|
|
|
|
GRAB_TEAM_LOCK();
|
2004-02-23 07:08:09 +03:00
|
|
|
|
2004-11-26 19:37:41 +03:00
|
|
|
if (id == B_CURRENT_TEAM)
|
|
|
|
team = thread_get_current_thread()->team;
|
|
|
|
else
|
|
|
|
team = team_get_team_struct_locked(id);
|
|
|
|
|
|
|
|
if (team == NULL) {
|
2007-01-13 01:54:21 +03:00
|
|
|
status = B_BAD_TEAM_ID;
|
2002-08-04 03:39:50 +04:00
|
|
|
goto err;
|
|
|
|
}
|
2002-12-03 17:17:53 +03:00
|
|
|
|
2007-01-13 01:54:21 +03:00
|
|
|
status = fill_team_info(team, info, size);
|
2002-12-03 17:17:53 +03:00
|
|
|
|
2002-08-04 03:39:50 +04:00
|
|
|
err:
|
|
|
|
RELEASE_TEAM_LOCK();
|
|
|
|
restore_interrupts(state);
|
2007-01-13 01:54:21 +03:00
|
|
|
|
|
|
|
return status;
|
2002-08-04 03:39:50 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
status_t
|
|
|
|
_get_next_team_info(int32 *cookie, team_info *info, size_t size)
|
|
|
|
{
|
2002-12-03 17:17:53 +03:00
|
|
|
status_t status = B_BAD_TEAM_ID;
|
2002-08-04 03:39:50 +04:00
|
|
|
struct team *team = NULL;
|
2002-12-03 17:17:53 +03:00
|
|
|
int32 slot = *cookie;
|
2005-03-09 04:43:56 +03:00
|
|
|
team_id lastTeamID;
|
2006-05-30 04:21:22 +04:00
|
|
|
cpu_status state;
|
2002-12-03 17:17:53 +03:00
|
|
|
|
2006-05-30 04:21:22 +04:00
|
|
|
if (slot < 1)
|
|
|
|
slot = 1;
|
|
|
|
|
|
|
|
state = disable_interrupts();
|
2002-08-04 03:39:50 +04:00
|
|
|
GRAB_TEAM_LOCK();
|
2002-12-03 17:17:53 +03:00
|
|
|
|
2005-03-09 04:43:56 +03:00
|
|
|
lastTeamID = peek_next_thread_id();
|
|
|
|
if (slot >= lastTeamID)
|
2002-12-03 17:17:53 +03:00
|
|
|
goto err;
|
|
|
|
|
|
|
|
// get next valid team
|
2005-03-09 04:43:56 +03:00
|
|
|
while (slot < lastTeamID && !(team = team_get_team_struct_locked(slot)))
|
2002-08-04 03:39:50 +04:00
|
|
|
slot++;
|
2002-12-03 17:17:53 +03:00
|
|
|
|
2002-08-04 03:39:50 +04:00
|
|
|
if (team) {
|
2002-12-03 17:17:53 +03:00
|
|
|
status = fill_team_info(team, info, size);
|
|
|
|
*cookie = ++slot;
|
2002-08-04 03:39:50 +04:00
|
|
|
}
|
2002-12-03 17:17:53 +03:00
|
|
|
|
2002-08-04 03:39:50 +04:00
|
|
|
err:
|
|
|
|
RELEASE_TEAM_LOCK();
|
|
|
|
restore_interrupts(state);
|
|
|
|
|
2002-12-03 17:17:53 +03:00
|
|
|
return status;
|
2002-08-04 03:39:50 +04:00
|
|
|
}
|
|
|
|
|
2002-08-05 09:26:52 +04:00
|
|
|
|
2004-11-26 00:20:17 +03:00
|
|
|
status_t
|
2004-11-26 17:58:01 +03:00
|
|
|
_get_team_usage_info(team_id id, int32 who, team_usage_info *info, size_t size)
|
2004-11-26 00:20:17 +03:00
|
|
|
{
|
2004-11-26 17:58:01 +03:00
|
|
|
bigtime_t kernelTime = 0, userTime = 0;
|
|
|
|
status_t status = B_OK;
|
|
|
|
struct team *team;
|
|
|
|
cpu_status state;
|
|
|
|
|
|
|
|
if (size != sizeof(team_usage_info)
|
2004-11-27 15:57:36 +03:00
|
|
|
|| (who != B_TEAM_USAGE_SELF && who != B_TEAM_USAGE_CHILDREN))
|
2004-11-26 00:20:17 +03:00
|
|
|
return B_BAD_VALUE;
|
|
|
|
|
2004-11-26 17:58:01 +03:00
|
|
|
state = disable_interrupts();
|
|
|
|
GRAB_TEAM_LOCK();
|
2004-11-26 00:20:17 +03:00
|
|
|
|
2004-11-26 19:34:32 +03:00
|
|
|
if (id == B_CURRENT_TEAM)
|
|
|
|
team = thread_get_current_thread()->team;
|
|
|
|
else
|
|
|
|
team = team_get_team_struct_locked(id);
|
|
|
|
|
2004-11-26 17:58:01 +03:00
|
|
|
if (team == NULL) {
|
|
|
|
status = B_BAD_TEAM_ID;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (who) {
|
2004-11-27 15:57:36 +03:00
|
|
|
case B_TEAM_USAGE_SELF:
|
2004-11-26 17:58:01 +03:00
|
|
|
{
|
|
|
|
struct thread *thread = team->thread_list;
|
|
|
|
|
|
|
|
for (; thread != NULL; thread = thread->team_next) {
|
|
|
|
kernelTime += thread->kernel_time;
|
|
|
|
userTime += thread->user_time;
|
|
|
|
}
|
|
|
|
|
|
|
|
kernelTime += team->dead_threads_kernel_time;
|
|
|
|
userTime += team->dead_threads_user_time;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2004-11-27 15:57:36 +03:00
|
|
|
case B_TEAM_USAGE_CHILDREN:
|
2004-11-26 17:58:01 +03:00
|
|
|
{
|
|
|
|
struct team *child = team->children;
|
|
|
|
for (; child != NULL; child = child->siblings_next) {
|
|
|
|
struct thread *thread = team->thread_list;
|
|
|
|
|
|
|
|
for (; thread != NULL; thread = thread->team_next) {
|
|
|
|
kernelTime += thread->kernel_time;
|
|
|
|
userTime += thread->user_time;
|
|
|
|
}
|
|
|
|
|
|
|
|
kernelTime += child->dead_threads_kernel_time;
|
|
|
|
userTime += child->dead_threads_user_time;
|
|
|
|
}
|
|
|
|
|
2007-08-28 00:30:34 +04:00
|
|
|
kernelTime += team->dead_children->kernel_time;
|
|
|
|
userTime += team->dead_children->user_time;
|
2004-11-26 17:58:01 +03:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
RELEASE_TEAM_LOCK();
|
|
|
|
restore_interrupts(state);
|
|
|
|
|
|
|
|
if (status == B_OK) {
|
|
|
|
info->kernel_time = kernelTime;
|
|
|
|
info->user_time = userTime;
|
|
|
|
}
|
|
|
|
|
|
|
|
return status;
|
2004-11-26 00:20:17 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2004-10-14 18:46:12 +04:00
|
|
|
pid_t
|
|
|
|
getpid(void)
|
|
|
|
{
|
2007-01-13 01:54:21 +03:00
|
|
|
return thread_get_current_thread()->team->id;
|
2004-10-14 18:46:12 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
pid_t
|
|
|
|
getppid(void)
|
|
|
|
{
|
|
|
|
struct team *team = thread_get_current_thread()->team;
|
|
|
|
cpu_status state;
|
|
|
|
pid_t parent;
|
|
|
|
|
|
|
|
state = disable_interrupts();
|
|
|
|
GRAB_TEAM_LOCK();
|
|
|
|
|
2007-01-13 01:54:21 +03:00
|
|
|
parent = team->parent->id;
|
2004-10-14 18:46:12 +04:00
|
|
|
|
|
|
|
RELEASE_TEAM_LOCK();
|
|
|
|
restore_interrupts(state);
|
|
|
|
|
|
|
|
return parent;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
pid_t
|
|
|
|
getpgid(pid_t process)
|
|
|
|
{
|
|
|
|
struct thread *thread;
|
|
|
|
pid_t result = -1;
|
|
|
|
cpu_status state;
|
|
|
|
|
|
|
|
if (process == 0)
|
2007-01-13 01:54:21 +03:00
|
|
|
process = thread_get_current_thread()->team->id;
|
2004-10-14 18:46:12 +04:00
|
|
|
|
|
|
|
state = disable_interrupts();
|
|
|
|
GRAB_THREAD_LOCK();
|
|
|
|
|
|
|
|
thread = thread_get_thread_struct_locked(process);
|
|
|
|
if (thread != NULL)
|
|
|
|
result = thread->team->group_id;
|
|
|
|
|
|
|
|
RELEASE_THREAD_LOCK();
|
|
|
|
restore_interrupts(state);
|
|
|
|
|
|
|
|
return thread != NULL ? result : B_BAD_VALUE;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
pid_t
|
|
|
|
getsid(pid_t process)
|
|
|
|
{
|
|
|
|
struct thread *thread;
|
|
|
|
pid_t result = -1;
|
|
|
|
cpu_status state;
|
|
|
|
|
|
|
|
if (process == 0)
|
2007-01-13 01:54:21 +03:00
|
|
|
process = thread_get_current_thread()->team->id;
|
2004-10-14 18:46:12 +04:00
|
|
|
|
|
|
|
state = disable_interrupts();
|
|
|
|
GRAB_THREAD_LOCK();
|
|
|
|
|
|
|
|
thread = thread_get_thread_struct_locked(process);
|
|
|
|
if (thread != NULL)
|
|
|
|
result = thread->team->session_id;
|
|
|
|
|
|
|
|
RELEASE_THREAD_LOCK();
|
|
|
|
restore_interrupts(state);
|
|
|
|
|
|
|
|
return thread != NULL ? result : B_BAD_VALUE;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-08-25 03:41:54 +04:00
|
|
|
// #pragma mark - User syscalls
|
2002-12-03 17:17:53 +03:00
|
|
|
|
|
|
|
|
2004-09-15 19:45:37 +04:00
|
|
|
status_t
|
2008-06-24 07:37:07 +04:00
|
|
|
_user_exec(const char *userPath, const char* const* userFlatArgs,
|
|
|
|
size_t flatArgsSize, int32 argCount, int32 envCount)
|
2004-09-15 19:45:37 +04:00
|
|
|
{
|
2008-07-09 07:58:38 +04:00
|
|
|
// NOTE: Since this function normally doesn't return, don't use automatic
|
|
|
|
// variables that need destruction in the function scope.
|
2004-10-07 19:34:17 +04:00
|
|
|
char path[B_PATH_NAME_LENGTH];
|
2004-09-15 19:45:37 +04:00
|
|
|
|
2008-06-24 07:37:07 +04:00
|
|
|
if (!IS_USER_ADDRESS(userPath) || !IS_USER_ADDRESS(userFlatArgs)
|
2004-10-07 19:34:17 +04:00
|
|
|
|| user_strlcpy(path, userPath, sizeof(path)) < B_OK)
|
|
|
|
return B_BAD_ADDRESS;
|
2004-09-15 19:45:37 +04:00
|
|
|
|
2008-06-24 07:37:07 +04:00
|
|
|
// copy and relocate the flat arguments
|
|
|
|
char** flatArgs;
|
|
|
|
status_t error = copy_user_process_args(userFlatArgs, flatArgsSize,
|
|
|
|
argCount, envCount, flatArgs);
|
|
|
|
|
|
|
|
if (error == B_OK) {
|
|
|
|
error = exec_team(path, flatArgs, _ALIGN(flatArgsSize), argCount,
|
|
|
|
envCount);
|
|
|
|
// this one only returns in case of error
|
|
|
|
}
|
|
|
|
|
|
|
|
free(flatArgs);
|
|
|
|
return error;
|
2004-09-15 19:45:37 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
thread_id
|
|
|
|
_user_fork(void)
|
|
|
|
{
|
2004-10-10 21:30:42 +04:00
|
|
|
return fork_team();
|
2004-09-15 19:45:37 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
thread_id
|
|
|
|
_user_wait_for_child(thread_id child, uint32 flags, int32 *_userReason, status_t *_userReturnCode)
|
|
|
|
{
|
|
|
|
status_t returnCode;
|
|
|
|
int32 reason;
|
|
|
|
thread_id deadChild;
|
|
|
|
|
|
|
|
if ((_userReason != NULL && !IS_USER_ADDRESS(_userReason))
|
|
|
|
|| (_userReturnCode != NULL && !IS_USER_ADDRESS(_userReturnCode)))
|
|
|
|
return B_BAD_ADDRESS;
|
|
|
|
|
|
|
|
deadChild = wait_for_child(child, flags, &reason, &returnCode);
|
|
|
|
|
|
|
|
if (deadChild >= B_OK) {
|
|
|
|
// copy result data on successful completion
|
axeld + bonefish:
* Implemented automatic syscall restarts:
- A syscall can indicate that it has been interrupted and can be
restarted by setting a respective bit in thread::flags. It can
store parameters it wants to be preserved for the restart in
thread::syscall_restart::parameters. Another thread::flags bit
indicates whether it has been restarted.
- handle_signals() clears the restart flag, if the handled signal
has a handler function installed and SA_RESTART is not set. Another
thread flag (THREAD_FLAGS_DONT_RESTART_SYSCALL) can prevent syscalls
from being restarted, even if they could be (not used yet, but we
might want to use it in resume_thread(), so that we stay
behaviorally compatible with BeOS).
- The architecture specific syscall handler restarts the syscall, if
the restart flag is set. Implemented for x86 only.
- Added some support functions in the private <syscall_restart.h> to
simplify the syscall restart code in the syscalls.
- Adjusted all syscalls that can potentially be restarted accordingly.
- _user_ioctl() sets new thread flag THREAD_FLAGS_IOCTL_SYSCALL while
calling the underlying FS's/driver's hook, so that syscall restarts
can also be supported there.
* thread_at_kernel_exit() invokes handle_signals() in a loop now, as
long as the latter indicates that the thread shall be suspended, so
that after waking up signals received in the meantime will be handled
before the thread returns to userland. Adjusted handle_signals()
accordingly -- when encountering a suspending signal we don't check
for further signals.
* Fixed sigsuspend(): Suspending the thread and rescheduling doesn't
result in the correct behavior. Instead we employ a temporary
condition variable and interruptably wait on it. The POSIX test
suite test passes, now.
* Made the switch_sem[_etc]() behavior on interruption consistent.
Depending on when the signal arrived (before the call or when already
waiting) the first semaphore would or wouldn't be released. Now we
consistently release it.
* Refactored _user_{read,write}[v]() syscalls. Use a common function for
either pair. The iovec version doesn't fail anymore, if anything could
be read/written at all. It also checks whether a complete vector
could be read/written, so that we won't skip data, if the underlying
FS/driver couldn't read/write more ATM.
* Some refactoring in the x86 syscall handler: The int 99 and sysenter
handlers use a common subroutine to avoid code duplication.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@23983 a95241bf-73f2-0310-859d-f6bbb57e9c96
2008-02-17 18:48:30 +03:00
|
|
|
if ((_userReason != NULL
|
|
|
|
&& user_memcpy(_userReason, &reason, sizeof(int32)) < B_OK)
|
|
|
|
|| (_userReturnCode != NULL
|
|
|
|
&& user_memcpy(_userReturnCode, &returnCode, sizeof(status_t))
|
|
|
|
< B_OK)) {
|
2004-09-15 19:45:37 +04:00
|
|
|
return B_BAD_ADDRESS;
|
axeld + bonefish:
* Implemented automatic syscall restarts:
- A syscall can indicate that it has been interrupted and can be
restarted by setting a respective bit in thread::flags. It can
store parameters it wants to be preserved for the restart in
thread::syscall_restart::parameters. Another thread::flags bit
indicates whether it has been restarted.
- handle_signals() clears the restart flag, if the handled signal
has a handler function installed and SA_RESTART is not set. Another
thread flag (THREAD_FLAGS_DONT_RESTART_SYSCALL) can prevent syscalls
from being restarted, even if they could be (not used yet, but we
might want to use it in resume_thread(), so that we stay
behaviorally compatible with BeOS).
- The architecture specific syscall handler restarts the syscall, if
the restart flag is set. Implemented for x86 only.
- Added some support functions in the private <syscall_restart.h> to
simplify the syscall restart code in the syscalls.
- Adjusted all syscalls that can potentially be restarted accordingly.
- _user_ioctl() sets new thread flag THREAD_FLAGS_IOCTL_SYSCALL while
calling the underlying FS's/driver's hook, so that syscall restarts
can also be supported there.
* thread_at_kernel_exit() invokes handle_signals() in a loop now, as
long as the latter indicates that the thread shall be suspended, so
that after waking up signals received in the meantime will be handled
before the thread returns to userland. Adjusted handle_signals()
accordingly -- when encountering a suspending signal we don't check
for further signals.
* Fixed sigsuspend(): Suspending the thread and rescheduling doesn't
result in the correct behavior. Instead we employ a temporary
condition variable and interruptably wait on it. The POSIX test
suite test passes, now.
* Made the switch_sem[_etc]() behavior on interruption consistent.
Depending on when the signal arrived (before the call or when already
waiting) the first semaphore would or wouldn't be released. Now we
consistently release it.
* Refactored _user_{read,write}[v]() syscalls. Use a common function for
either pair. The iovec version doesn't fail anymore, if anything could
be read/written at all. It also checks whether a complete vector
could be read/written, so that we won't skip data, if the underlying
FS/driver couldn't read/write more ATM.
* Some refactoring in the x86 syscall handler: The int 99 and sysenter
handlers use a common subroutine to avoid code duplication.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@23983 a95241bf-73f2-0310-859d-f6bbb57e9c96
2008-02-17 18:48:30 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return deadChild;
|
2004-09-15 19:45:37 +04:00
|
|
|
}
|
|
|
|
|
axeld + bonefish:
* Implemented automatic syscall restarts:
- A syscall can indicate that it has been interrupted and can be
restarted by setting a respective bit in thread::flags. It can
store parameters it wants to be preserved for the restart in
thread::syscall_restart::parameters. Another thread::flags bit
indicates whether it has been restarted.
- handle_signals() clears the restart flag, if the handled signal
has a handler function installed and SA_RESTART is not set. Another
thread flag (THREAD_FLAGS_DONT_RESTART_SYSCALL) can prevent syscalls
from being restarted, even if they could be (not used yet, but we
might want to use it in resume_thread(), so that we stay
behaviorally compatible with BeOS).
- The architecture specific syscall handler restarts the syscall, if
the restart flag is set. Implemented for x86 only.
- Added some support functions in the private <syscall_restart.h> to
simplify the syscall restart code in the syscalls.
- Adjusted all syscalls that can potentially be restarted accordingly.
- _user_ioctl() sets new thread flag THREAD_FLAGS_IOCTL_SYSCALL while
calling the underlying FS's/driver's hook, so that syscall restarts
can also be supported there.
* thread_at_kernel_exit() invokes handle_signals() in a loop now, as
long as the latter indicates that the thread shall be suspended, so
that after waking up signals received in the meantime will be handled
before the thread returns to userland. Adjusted handle_signals()
accordingly -- when encountering a suspending signal we don't check
for further signals.
* Fixed sigsuspend(): Suspending the thread and rescheduling doesn't
result in the correct behavior. Instead we employ a temporary
condition variable and interruptably wait on it. The POSIX test
suite test passes, now.
* Made the switch_sem[_etc]() behavior on interruption consistent.
Depending on when the signal arrived (before the call or when already
waiting) the first semaphore would or wouldn't be released. Now we
consistently release it.
* Refactored _user_{read,write}[v]() syscalls. Use a common function for
either pair. The iovec version doesn't fail anymore, if anything could
be read/written at all. It also checks whether a complete vector
could be read/written, so that we won't skip data, if the underlying
FS/driver couldn't read/write more ATM.
* Some refactoring in the x86 syscall handler: The int 99 and sysenter
handlers use a common subroutine to avoid code duplication.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@23983 a95241bf-73f2-0310-859d-f6bbb57e9c96
2008-02-17 18:48:30 +03:00
|
|
|
return syscall_restart_handle_post(deadChild);
|
2004-09-15 19:45:37 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2004-10-14 18:46:12 +04:00
|
|
|
pid_t
|
|
|
|
_user_process_info(pid_t process, int32 which)
|
|
|
|
{
|
|
|
|
// we only allow to return the parent of the current process
|
|
|
|
if (which == PARENT_ID
|
2007-01-13 01:54:21 +03:00
|
|
|
&& process != 0 && process != thread_get_current_thread()->team->id)
|
2004-10-14 18:46:12 +04:00
|
|
|
return B_BAD_VALUE;
|
|
|
|
|
|
|
|
switch (which) {
|
|
|
|
case SESSION_ID:
|
|
|
|
return getsid(process);
|
|
|
|
case GROUP_ID:
|
|
|
|
return getpgid(process);
|
|
|
|
case PARENT_ID:
|
|
|
|
return getppid();
|
|
|
|
}
|
|
|
|
|
|
|
|
return B_BAD_VALUE;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
pid_t
|
|
|
|
_user_setpgid(pid_t processID, pid_t groupID)
|
|
|
|
{
|
2005-04-05 16:39:27 +04:00
|
|
|
struct thread *thread = thread_get_current_thread();
|
|
|
|
struct team *currentTeam = thread->team;
|
2004-10-14 18:46:12 +04:00
|
|
|
struct team *team;
|
2007-01-29 18:33:31 +03:00
|
|
|
|
2004-10-14 18:46:12 +04:00
|
|
|
if (groupID < 0)
|
|
|
|
return B_BAD_VALUE;
|
|
|
|
|
2005-04-05 16:39:27 +04:00
|
|
|
if (processID == 0)
|
2007-01-13 01:54:21 +03:00
|
|
|
processID = currentTeam->id;
|
2005-04-05 16:39:27 +04:00
|
|
|
|
2008-02-21 03:46:22 +03:00
|
|
|
// if the group ID is not specified, use the target process' ID
|
|
|
|
if (groupID == 0)
|
|
|
|
groupID = processID;
|
|
|
|
|
2007-01-13 01:54:21 +03:00
|
|
|
if (processID == currentTeam->id) {
|
2005-04-05 16:39:27 +04:00
|
|
|
// we set our own group
|
2005-03-25 21:38:27 +03:00
|
|
|
|
2008-02-21 03:46:22 +03:00
|
|
|
// we must not change our process group ID if we're a session leader
|
|
|
|
if (is_session_leader(currentTeam))
|
2004-10-14 18:46:12 +04:00
|
|
|
return B_NOT_ALLOWED;
|
|
|
|
} else {
|
2008-02-21 03:46:22 +03:00
|
|
|
// another team is the target of the call -- check it out
|
2008-08-02 18:55:53 +04:00
|
|
|
InterruptsSpinLocker _(gTeamSpinlock);
|
2004-10-14 18:46:12 +04:00
|
|
|
|
2008-02-21 03:46:22 +03:00
|
|
|
team = team_get_team_struct_locked(processID);
|
|
|
|
if (team == NULL)
|
|
|
|
return ESRCH;
|
2004-10-14 18:46:12 +04:00
|
|
|
|
2008-02-21 03:46:22 +03:00
|
|
|
// The team must be a child of the calling team and in the same session.
|
|
|
|
// (If that's the case it isn't a session leader either.)
|
|
|
|
if (team->parent != currentTeam
|
|
|
|
|| team->session_id != currentTeam->session_id) {
|
2007-09-05 23:36:38 +04:00
|
|
|
return B_NOT_ALLOWED;
|
|
|
|
}
|
2004-10-14 18:46:12 +04:00
|
|
|
|
2008-02-21 03:46:22 +03:00
|
|
|
if (team->group_id == groupID)
|
|
|
|
return groupID;
|
2004-10-14 18:46:12 +04:00
|
|
|
|
2008-02-21 03:46:22 +03:00
|
|
|
// The call is also supposed to fail on a child, when the child already
|
|
|
|
// has executed exec*() [EACCES].
|
|
|
|
if ((team->flags & TEAM_FLAG_EXEC_DONE) != 0)
|
|
|
|
return EACCES;
|
2007-09-05 23:36:38 +04:00
|
|
|
}
|
2004-10-14 18:46:12 +04:00
|
|
|
|
2007-09-05 23:36:38 +04:00
|
|
|
struct process_group *group = NULL;
|
2004-10-14 18:46:12 +04:00
|
|
|
if (groupID == processID) {
|
2008-02-21 03:46:22 +03:00
|
|
|
// A new process group might be needed.
|
2004-10-14 18:46:12 +04:00
|
|
|
group = create_process_group(groupID);
|
|
|
|
if (group == NULL)
|
|
|
|
return B_NO_MEMORY;
|
2007-09-06 06:16:25 +04:00
|
|
|
|
2008-02-21 03:46:22 +03:00
|
|
|
// Assume orphaned. We consider the situation of the team's parent
|
|
|
|
// below.
|
|
|
|
group->orphaned = true;
|
2004-10-14 18:46:12 +04:00
|
|
|
}
|
|
|
|
|
2007-09-05 23:36:38 +04:00
|
|
|
status_t status = B_OK;
|
|
|
|
struct process_group *freeGroup = NULL;
|
|
|
|
|
2008-08-02 18:55:53 +04:00
|
|
|
InterruptsSpinLocker locker(gTeamSpinlock);
|
2004-10-14 18:46:12 +04:00
|
|
|
|
2008-02-21 03:46:22 +03:00
|
|
|
team = team_get_team_struct_locked(processID);
|
2004-10-14 18:46:12 +04:00
|
|
|
if (team != NULL) {
|
2008-02-21 03:46:22 +03:00
|
|
|
// check the conditions again -- they might have changed in the meantime
|
|
|
|
if (is_session_leader(team)
|
|
|
|
|| team->session_id != currentTeam->session_id) {
|
|
|
|
status = B_NOT_ALLOWED;
|
|
|
|
} else if (team != currentTeam
|
|
|
|
&& (team->flags & TEAM_FLAG_EXEC_DONE) != 0) {
|
|
|
|
status = EACCES;
|
|
|
|
} else if (team->group_id == groupID) {
|
|
|
|
// the team is already in the desired process group
|
|
|
|
freeGroup = group;
|
2004-10-14 18:46:12 +04:00
|
|
|
} else {
|
2008-02-21 03:46:22 +03:00
|
|
|
// Check if a process group with the requested ID already exists.
|
|
|
|
struct process_group *targetGroup
|
|
|
|
= team_get_process_group_locked(team->group->session, groupID);
|
|
|
|
if (targetGroup != NULL) {
|
|
|
|
// In case of processID == groupID we have to free the
|
|
|
|
// allocated group.
|
2008-03-09 20:56:27 +03:00
|
|
|
freeGroup = group;
|
2008-02-21 03:46:22 +03:00
|
|
|
} else if (processID == groupID) {
|
|
|
|
// We created a new process group, let us insert it into the
|
|
|
|
// team's session.
|
|
|
|
insert_group_into_session(team->group->session, group);
|
|
|
|
targetGroup = group;
|
|
|
|
}
|
2004-10-14 18:46:12 +04:00
|
|
|
|
2007-09-05 23:36:38 +04:00
|
|
|
if (targetGroup != NULL) {
|
2008-02-21 03:46:22 +03:00
|
|
|
// we got a group, let's move the team there
|
2007-09-06 06:16:25 +04:00
|
|
|
process_group* oldGroup = team->group;
|
2008-02-21 03:46:22 +03:00
|
|
|
|
2008-03-09 20:56:27 +03:00
|
|
|
remove_team_from_group(team);
|
2008-02-21 03:46:22 +03:00
|
|
|
insert_team_into_group(targetGroup, team);
|
|
|
|
|
|
|
|
// Update the "orphaned" flag of all potentially affected
|
|
|
|
// groups.
|
|
|
|
|
|
|
|
// the team's old group
|
|
|
|
if (oldGroup->teams != NULL) {
|
|
|
|
oldGroup->orphaned = false;
|
|
|
|
update_orphaned_process_group(oldGroup, -1);
|
|
|
|
}
|
|
|
|
|
|
|
|
// the team's new group
|
|
|
|
struct team* parent = team->parent;
|
|
|
|
targetGroup->orphaned &= parent == NULL
|
|
|
|
|| parent->group == targetGroup
|
|
|
|
|| team->parent->session_id != team->session_id;
|
|
|
|
|
|
|
|
// children's groups
|
|
|
|
struct team* child = team->children;
|
|
|
|
while (child != NULL) {
|
|
|
|
child->group->orphaned = false;
|
|
|
|
update_orphaned_process_group(child->group, -1);
|
|
|
|
|
|
|
|
child = child->siblings_next;
|
2007-09-06 06:16:25 +04:00
|
|
|
}
|
2004-10-14 18:46:12 +04:00
|
|
|
} else
|
|
|
|
status = B_NOT_ALLOWED;
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
status = B_NOT_ALLOWED;
|
|
|
|
|
2007-09-05 23:36:38 +04:00
|
|
|
// Changing the process group might have changed the situation for a parent
|
|
|
|
// waiting in wait_for_child(). Hence we notify it.
|
2008-04-20 19:15:58 +04:00
|
|
|
if (status == B_OK)
|
2007-09-05 23:36:38 +04:00
|
|
|
team->parent->dead_children->condition_variable.NotifyAll(false);
|
|
|
|
|
|
|
|
locker.Unlock();
|
2004-10-14 18:46:12 +04:00
|
|
|
|
2008-02-21 03:46:22 +03:00
|
|
|
if (status != B_OK) {
|
2006-08-25 02:58:48 +04:00
|
|
|
// in case of error, the group hasn't been added into the hash
|
2004-10-14 18:46:12 +04:00
|
|
|
team_delete_process_group(group);
|
2006-08-25 02:58:48 +04:00
|
|
|
}
|
2004-10-14 18:46:12 +04:00
|
|
|
|
|
|
|
team_delete_process_group(freeGroup);
|
|
|
|
|
|
|
|
return status == B_OK ? groupID : status;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
pid_t
|
|
|
|
_user_setsid(void)
|
|
|
|
{
|
|
|
|
struct team *team = thread_get_current_thread()->team;
|
|
|
|
struct process_session *session;
|
2008-03-09 20:56:27 +03:00
|
|
|
struct process_group *group;
|
2004-10-14 18:46:12 +04:00
|
|
|
cpu_status state;
|
|
|
|
bool failed = false;
|
|
|
|
|
|
|
|
// the team must not already be a process group leader
|
|
|
|
if (is_process_group_leader(team))
|
|
|
|
return B_NOT_ALLOWED;
|
|
|
|
|
2007-01-13 01:54:21 +03:00
|
|
|
group = create_process_group(team->id);
|
2004-10-14 18:46:12 +04:00
|
|
|
if (group == NULL)
|
|
|
|
return B_NO_MEMORY;
|
|
|
|
|
|
|
|
session = create_process_session(group->id);
|
|
|
|
if (session == NULL) {
|
|
|
|
team_delete_process_group(group);
|
|
|
|
return B_NO_MEMORY;
|
|
|
|
}
|
|
|
|
|
|
|
|
state = disable_interrupts();
|
|
|
|
GRAB_TEAM_LOCK();
|
|
|
|
|
|
|
|
// this may have changed since the check above
|
|
|
|
if (!is_process_group_leader(team)) {
|
2008-03-09 20:56:27 +03:00
|
|
|
remove_team_from_group(team);
|
2004-10-14 18:46:12 +04:00
|
|
|
|
|
|
|
insert_group_into_session(session, group);
|
|
|
|
insert_team_into_group(group, team);
|
|
|
|
} else
|
|
|
|
failed = true;
|
|
|
|
|
|
|
|
RELEASE_TEAM_LOCK();
|
|
|
|
restore_interrupts(state);
|
|
|
|
|
|
|
|
if (failed) {
|
|
|
|
team_delete_process_group(group);
|
|
|
|
free(session);
|
|
|
|
return B_NOT_ALLOWED;
|
2008-03-09 20:56:27 +03:00
|
|
|
}
|
2004-10-14 18:46:12 +04:00
|
|
|
|
|
|
|
return team->group_id;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2003-01-27 06:09:33 +03:00
|
|
|
status_t
|
2004-03-03 03:57:00 +03:00
|
|
|
_user_wait_for_team(team_id id, status_t *_userReturnCode)
|
2002-12-03 17:17:53 +03:00
|
|
|
{
|
2003-01-27 06:09:33 +03:00
|
|
|
status_t returnCode;
|
|
|
|
status_t status;
|
2002-12-03 17:17:53 +03:00
|
|
|
|
2004-09-15 19:45:37 +04:00
|
|
|
if (_userReturnCode != NULL && !IS_USER_ADDRESS(_userReturnCode))
|
2002-12-03 17:17:53 +03:00
|
|
|
return B_BAD_ADDRESS;
|
|
|
|
|
2003-01-27 06:09:33 +03:00
|
|
|
status = wait_for_team(id, &returnCode);
|
2004-08-13 23:08:35 +04:00
|
|
|
if (status >= B_OK && _userReturnCode != NULL) {
|
2003-01-27 06:09:33 +03:00
|
|
|
if (user_memcpy(_userReturnCode, &returnCode, sizeof(returnCode)) < B_OK)
|
2002-12-03 17:17:53 +03:00
|
|
|
return B_BAD_ADDRESS;
|
axeld + bonefish:
* Implemented automatic syscall restarts:
- A syscall can indicate that it has been interrupted and can be
restarted by setting a respective bit in thread::flags. It can
store parameters it wants to be preserved for the restart in
thread::syscall_restart::parameters. Another thread::flags bit
indicates whether it has been restarted.
- handle_signals() clears the restart flag, if the handled signal
has a handler function installed and SA_RESTART is not set. Another
thread flag (THREAD_FLAGS_DONT_RESTART_SYSCALL) can prevent syscalls
from being restarted, even if they could be (not used yet, but we
might want to use it in resume_thread(), so that we stay
behaviorally compatible with BeOS).
- The architecture specific syscall handler restarts the syscall, if
the restart flag is set. Implemented for x86 only.
- Added some support functions in the private <syscall_restart.h> to
simplify the syscall restart code in the syscalls.
- Adjusted all syscalls that can potentially be restarted accordingly.
- _user_ioctl() sets new thread flag THREAD_FLAGS_IOCTL_SYSCALL while
calling the underlying FS's/driver's hook, so that syscall restarts
can also be supported there.
* thread_at_kernel_exit() invokes handle_signals() in a loop now, as
long as the latter indicates that the thread shall be suspended, so
that after waking up signals received in the meantime will be handled
before the thread returns to userland. Adjusted handle_signals()
accordingly -- when encountering a suspending signal we don't check
for further signals.
* Fixed sigsuspend(): Suspending the thread and rescheduling doesn't
result in the correct behavior. Instead we employ a temporary
condition variable and interruptably wait on it. The POSIX test
suite test passes, now.
* Made the switch_sem[_etc]() behavior on interruption consistent.
Depending on when the signal arrived (before the call or when already
waiting) the first semaphore would or wouldn't be released. Now we
consistently release it.
* Refactored _user_{read,write}[v]() syscalls. Use a common function for
either pair. The iovec version doesn't fail anymore, if anything could
be read/written at all. It also checks whether a complete vector
could be read/written, so that we won't skip data, if the underlying
FS/driver couldn't read/write more ATM.
* Some refactoring in the x86 syscall handler: The int 99 and sysenter
handlers use a common subroutine to avoid code duplication.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@23983 a95241bf-73f2-0310-859d-f6bbb57e9c96
2008-02-17 18:48:30 +03:00
|
|
|
return B_OK;
|
2002-12-03 17:17:53 +03:00
|
|
|
}
|
|
|
|
|
axeld + bonefish:
* Implemented automatic syscall restarts:
- A syscall can indicate that it has been interrupted and can be
restarted by setting a respective bit in thread::flags. It can
store parameters it wants to be preserved for the restart in
thread::syscall_restart::parameters. Another thread::flags bit
indicates whether it has been restarted.
- handle_signals() clears the restart flag, if the handled signal
has a handler function installed and SA_RESTART is not set. Another
thread flag (THREAD_FLAGS_DONT_RESTART_SYSCALL) can prevent syscalls
from being restarted, even if they could be (not used yet, but we
might want to use it in resume_thread(), so that we stay
behaviorally compatible with BeOS).
- The architecture specific syscall handler restarts the syscall, if
the restart flag is set. Implemented for x86 only.
- Added some support functions in the private <syscall_restart.h> to
simplify the syscall restart code in the syscalls.
- Adjusted all syscalls that can potentially be restarted accordingly.
- _user_ioctl() sets new thread flag THREAD_FLAGS_IOCTL_SYSCALL while
calling the underlying FS's/driver's hook, so that syscall restarts
can also be supported there.
* thread_at_kernel_exit() invokes handle_signals() in a loop now, as
long as the latter indicates that the thread shall be suspended, so
that after waking up signals received in the meantime will be handled
before the thread returns to userland. Adjusted handle_signals()
accordingly -- when encountering a suspending signal we don't check
for further signals.
* Fixed sigsuspend(): Suspending the thread and rescheduling doesn't
result in the correct behavior. Instead we employ a temporary
condition variable and interruptably wait on it. The POSIX test
suite test passes, now.
* Made the switch_sem[_etc]() behavior on interruption consistent.
Depending on when the signal arrived (before the call or when already
waiting) the first semaphore would or wouldn't be released. Now we
consistently release it.
* Refactored _user_{read,write}[v]() syscalls. Use a common function for
either pair. The iovec version doesn't fail anymore, if anything could
be read/written at all. It also checks whether a complete vector
could be read/written, so that we won't skip data, if the underlying
FS/driver couldn't read/write more ATM.
* Some refactoring in the x86 syscall handler: The int 99 and sysenter
handlers use a common subroutine to avoid code duplication.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@23983 a95241bf-73f2-0310-859d-f6bbb57e9c96
2008-02-17 18:48:30 +03:00
|
|
|
return syscall_restart_handle_post(status);
|
2002-12-03 17:17:53 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-06-24 07:37:07 +04:00
|
|
|
thread_id
|
|
|
|
_user_load_image(const char* const* userFlatArgs, size_t flatArgsSize,
|
|
|
|
int32 argCount, int32 envCount, int32 priority, uint32 flags,
|
|
|
|
port_id errorPort, uint32 errorToken)
|
2002-12-03 17:17:53 +03:00
|
|
|
{
|
2009-03-02 03:26:22 +03:00
|
|
|
TRACE(("_user_load_image: argc = %ld\n", argCount));
|
2002-12-03 17:17:53 +03:00
|
|
|
|
2008-06-24 07:37:07 +04:00
|
|
|
if (argCount < 1)
|
2004-10-14 22:07:04 +04:00
|
|
|
return B_BAD_VALUE;
|
2002-12-03 17:17:53 +03:00
|
|
|
|
2008-06-24 07:37:07 +04:00
|
|
|
// copy and relocate the flat arguments
|
|
|
|
char** flatArgs;
|
|
|
|
status_t error = copy_user_process_args(userFlatArgs, flatArgsSize,
|
|
|
|
argCount, envCount, flatArgs);
|
|
|
|
if (error != B_OK)
|
|
|
|
return error;
|
|
|
|
|
2009-03-02 03:26:22 +03:00
|
|
|
thread_id thread = load_image_internal(flatArgs, _ALIGN(flatArgsSize),
|
|
|
|
argCount, envCount, priority, B_CURRENT_TEAM, flags, errorPort,
|
|
|
|
errorToken);
|
2002-12-03 17:17:53 +03:00
|
|
|
|
2008-06-24 07:37:07 +04:00
|
|
|
free(flatArgs);
|
2009-03-02 03:26:22 +03:00
|
|
|
// load_image_internal() unset our variable if it took over ownership
|
2008-06-24 07:37:07 +04:00
|
|
|
|
|
|
|
return thread;
|
2002-12-03 17:17:53 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2004-12-01 07:26:10 +03:00
|
|
|
void
|
|
|
|
_user_exit_team(status_t returnValue)
|
|
|
|
{
|
|
|
|
struct thread *thread = thread_get_current_thread();
|
|
|
|
|
|
|
|
thread->exit.status = returnValue;
|
|
|
|
thread->exit.reason = THREAD_RETURN_EXIT;
|
|
|
|
|
|
|
|
send_signal(thread->id, SIGKILL);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2002-12-03 17:17:53 +03:00
|
|
|
status_t
|
2004-03-03 03:57:00 +03:00
|
|
|
_user_kill_team(team_id team)
|
|
|
|
{
|
|
|
|
return kill_team(team);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
status_t
|
|
|
|
_user_get_team_info(team_id id, team_info *userInfo)
|
2002-12-03 17:17:53 +03:00
|
|
|
{
|
2003-11-12 18:37:44 +03:00
|
|
|
status_t status;
|
|
|
|
team_info info;
|
|
|
|
|
2004-02-22 17:52:59 +03:00
|
|
|
if (!IS_USER_ADDRESS(userInfo))
|
2008-07-17 02:55:17 +04:00
|
|
|
return B_BAD_ADDRESS;
|
2003-11-12 18:37:44 +03:00
|
|
|
|
|
|
|
status = _get_team_info(id, &info, sizeof(team_info));
|
|
|
|
if (status == B_OK) {
|
|
|
|
if (user_memcpy(userInfo, &info, sizeof(team_info)) < B_OK)
|
|
|
|
return B_BAD_ADDRESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
return status;
|
2002-12-03 17:17:53 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
status_t
|
2004-03-03 03:57:00 +03:00
|
|
|
_user_get_next_team_info(int32 *userCookie, team_info *userInfo)
|
2002-12-03 17:17:53 +03:00
|
|
|
{
|
2003-11-12 18:37:44 +03:00
|
|
|
status_t status;
|
|
|
|
team_info info;
|
|
|
|
int32 cookie;
|
|
|
|
|
2004-02-22 17:52:59 +03:00
|
|
|
if (!IS_USER_ADDRESS(userCookie)
|
|
|
|
|| !IS_USER_ADDRESS(userInfo)
|
2003-11-12 18:37:44 +03:00
|
|
|
|| user_memcpy(&cookie, userCookie, sizeof(int32)) < B_OK)
|
|
|
|
return B_BAD_ADDRESS;
|
|
|
|
|
|
|
|
status = _get_next_team_info(&cookie, &info, sizeof(team_info));
|
|
|
|
if (status != B_OK)
|
|
|
|
return status;
|
|
|
|
|
|
|
|
if (user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK
|
|
|
|
|| user_memcpy(userInfo, &info, sizeof(team_info)) < B_OK)
|
|
|
|
return B_BAD_ADDRESS;
|
|
|
|
|
|
|
|
return status;
|
2002-12-03 17:17:53 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2004-03-03 03:57:00 +03:00
|
|
|
team_id
|
|
|
|
_user_get_current_team(void)
|
|
|
|
{
|
|
|
|
return team_get_current_team_id();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2004-11-26 00:20:17 +03:00
|
|
|
status_t
|
|
|
|
_user_get_team_usage_info(team_id team, int32 who, team_usage_info *userInfo, size_t size)
|
|
|
|
{
|
|
|
|
team_usage_info info;
|
|
|
|
status_t status;
|
|
|
|
|
|
|
|
if (!IS_USER_ADDRESS(userInfo))
|
|
|
|
return B_BAD_ADDRESS;
|
|
|
|
|
|
|
|
status = _get_team_usage_info(team, who, &info, size);
|
|
|
|
if (status != B_OK)
|
|
|
|
return status;
|
|
|
|
|
|
|
|
if (user_memcpy(userInfo, &info, size) < B_OK)
|
|
|
|
return B_BAD_ADDRESS;
|
|
|
|
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|