2004-02-22 17:52:59 +03:00
|
|
|
/*
|
2011-01-11 00:54:38 +03:00
|
|
|
* Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
|
2010-08-18 15:07:20 +04:00
|
|
|
* Copyright 2002-2010, Axel Dörfler, axeld@pinc-software.de.
|
2004-11-18 21:15:39 +03:00
|
|
|
* Distributed under the terms of the MIT License.
|
|
|
|
*
|
|
|
|
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
|
|
|
|
* Distributed under the terms of the NewOS License.
|
|
|
|
*/
|
2002-08-04 03:39:50 +04:00
|
|
|
|
2009-10-26 16:34:43 +03:00
|
|
|
|
2007-10-11 12:30:18 +04:00
|
|
|
/*! Team functions */
|
2004-10-07 21:17:04 +04:00
|
|
|
|
2009-10-26 16:34:43 +03:00
|
|
|
|
2009-11-27 21:10:03 +03:00
|
|
|
#include <team.h>
|
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
#include <errno.h>
|
2007-08-28 00:30:34 +04:00
|
|
|
#include <stdio.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <string.h>
|
|
|
|
#include <sys/wait.h>
|
|
|
|
|
2002-08-04 03:39:50 +04:00
|
|
|
#include <OS.h>
|
2004-03-16 05:46:28 +03:00
|
|
|
|
2007-08-28 00:30:34 +04:00
|
|
|
#include <AutoDeleter.h>
|
2008-05-15 15:55:09 +04:00
|
|
|
#include <FindDirectory.h>
|
2007-08-28 00:30:34 +04:00
|
|
|
|
2010-11-16 22:42:08 +03:00
|
|
|
#include <extended_system_info_defs.h>
|
|
|
|
|
2008-05-15 15:55:09 +04:00
|
|
|
#include <boot_device.h>
|
2005-12-20 18:54:45 +03:00
|
|
|
#include <elf.h>
|
|
|
|
#include <file_cache.h>
|
2008-05-15 15:55:09 +04:00
|
|
|
#include <fs/KPath.h>
|
2008-03-09 20:56:27 +03:00
|
|
|
#include <heap.h>
|
2002-08-04 03:39:50 +04:00
|
|
|
#include <int.h>
|
2005-12-20 18:54:45 +03:00
|
|
|
#include <kernel.h>
|
2003-01-12 19:29:28 +03:00
|
|
|
#include <kimage.h>
|
2005-10-25 20:59:12 +04:00
|
|
|
#include <kscheduler.h>
|
2007-09-04 01:35:24 +04:00
|
|
|
#include <ksignal.h>
|
2009-03-15 13:21:56 +03:00
|
|
|
#include <Notifications.h>
|
2005-12-20 18:54:45 +03:00
|
|
|
#include <port.h>
|
2008-05-07 03:16:04 +04:00
|
|
|
#include <posix/realtime_sem.h>
|
2008-07-29 16:03:41 +04:00
|
|
|
#include <posix/xsi_semaphore.h>
|
2005-12-20 18:54:45 +03:00
|
|
|
#include <sem.h>
|
2004-10-14 18:46:12 +04:00
|
|
|
#include <syscall_process_info.h>
|
axeld + bonefish:
* Implemented automatic syscall restarts:
- A syscall can indicate that it has been interrupted and can be
restarted by setting a respective bit in thread::flags. It can
store parameters it wants to be preserved for the restart in
thread::syscall_restart::parameters. Another thread::flags bit
indicates whether it has been restarted.
- handle_signals() clears the restart flag, if the handled signal
has a handler function installed and SA_RESTART is not set. Another
thread flag (THREAD_FLAGS_DONT_RESTART_SYSCALL) can prevent syscalls
from being restarted, even if they could be (not used yet, but we
might want to use it in resume_thread(), so that we stay
behaviorally compatible with BeOS).
- The architecture specific syscall handler restarts the syscall, if
the restart flag is set. Implemented for x86 only.
- Added some support functions in the private <syscall_restart.h> to
simplify the syscall restart code in the syscalls.
- Adjusted all syscalls that can potentially be restarted accordingly.
- _user_ioctl() sets new thread flag THREAD_FLAGS_IOCTL_SYSCALL while
calling the underlying FS's/driver's hook, so that syscall restarts
can also be supported there.
* thread_at_kernel_exit() invokes handle_signals() in a loop now, as
long as the latter indicates that the thread shall be suspended, so
that after waking up signals received in the meantime will be handled
before the thread returns to userland. Adjusted handle_signals()
accordingly -- when encountering a suspending signal we don't check
for further signals.
* Fixed sigsuspend(): Suspending the thread and rescheduling doesn't
result in the correct behavior. Instead we employ a temporary
condition variable and interruptably wait on it. The POSIX test
suite test passes, now.
* Made the switch_sem[_etc]() behavior on interruption consistent.
Depending on when the signal arrived (before the call or when already
waiting) the first semaphore would or wouldn't be released. Now we
consistently release it.
* Refactored _user_{read,write}[v]() syscalls. Use a common function for
either pair. The iovec version doesn't fail anymore, if anything could
be read/written at all. It also checks whether a complete vector
could be read/written, so that we won't skip data, if the underlying
FS/driver couldn't read/write more ATM.
* Some refactoring in the x86 syscall handler: The int 99 and sysenter
handlers use a common subroutine to avoid code duplication.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@23983 a95241bf-73f2-0310-859d-f6bbb57e9c96
2008-02-17 18:48:30 +03:00
|
|
|
#include <syscall_restart.h>
|
2005-12-20 18:54:45 +03:00
|
|
|
#include <syscalls.h>
|
2003-01-07 12:40:59 +03:00
|
|
|
#include <tls.h>
|
2008-01-17 04:59:17 +03:00
|
|
|
#include <tracing.h>
|
2005-12-20 18:54:45 +03:00
|
|
|
#include <user_runtime.h>
|
2008-05-11 20:25:35 +04:00
|
|
|
#include <user_thread.h>
|
2008-03-11 20:12:02 +03:00
|
|
|
#include <usergroup.h>
|
2004-12-14 02:02:18 +03:00
|
|
|
#include <vfs.h>
|
2009-12-02 21:05:10 +03:00
|
|
|
#include <vm/vm.h>
|
|
|
|
#include <vm/VMAddressSpace.h>
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
#include <util/AutoLock.h>
|
2011-06-12 04:00:23 +04:00
|
|
|
|
|
|
|
#include "TeamThreadTables.h"
|
|
|
|
|
2002-08-04 03:39:50 +04:00
|
|
|
|
2004-03-16 06:14:48 +03:00
|
|
|
//#define TRACE_TEAM
|
|
|
|
#ifdef TRACE_TEAM
|
2003-11-12 18:37:44 +03:00
|
|
|
# define TRACE(x) dprintf x
|
|
|
|
#else
|
|
|
|
# define TRACE(x) ;
|
|
|
|
#endif
|
|
|
|
|
2002-08-04 03:39:50 +04:00
|
|
|
|
|
|
|
struct team_key {
|
|
|
|
team_id id;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct team_arg {
|
2008-06-24 07:37:07 +04:00
|
|
|
char *path;
|
|
|
|
char **flat_args;
|
|
|
|
size_t flat_args_size;
|
2004-10-07 19:34:17 +04:00
|
|
|
uint32 arg_count;
|
|
|
|
uint32 env_count;
|
2011-01-02 22:12:19 +03:00
|
|
|
mode_t umask;
|
2007-07-27 06:32:19 +04:00
|
|
|
port_id error_port;
|
|
|
|
uint32 error_token;
|
2002-08-04 03:39:50 +04:00
|
|
|
};
|
|
|
|
|
2009-05-21 03:10:13 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
namespace {
|
2004-10-12 08:03:52 +04:00
|
|
|
|
|
|
|
|
2009-03-15 13:21:56 +03:00
|
|
|
class TeamNotificationService : public DefaultNotificationService {
|
|
|
|
public:
|
|
|
|
TeamNotificationService();
|
|
|
|
|
2011-01-11 00:54:38 +03:00
|
|
|
void Notify(uint32 eventCode, Team* team);
|
2009-03-15 13:21:56 +03:00
|
|
|
};
|
|
|
|
|
2004-10-14 18:46:12 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
// #pragma mark - TeamTable
|
|
|
|
|
|
|
|
|
|
|
|
typedef BKernel::TeamThreadTable<Team> TeamTable;
|
|
|
|
|
2010-12-16 04:15:35 +03:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
// #pragma mark - ProcessGroupHashDefinition
|
|
|
|
|
|
|
|
|
|
|
|
struct ProcessGroupHashDefinition {
|
|
|
|
typedef pid_t KeyType;
|
|
|
|
typedef ProcessGroup ValueType;
|
|
|
|
|
|
|
|
size_t HashKey(pid_t key) const
|
2010-12-16 04:15:35 +03:00
|
|
|
{
|
|
|
|
return key;
|
|
|
|
}
|
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
size_t Hash(ProcessGroup* value) const
|
2010-12-16 04:15:35 +03:00
|
|
|
{
|
|
|
|
return HashKey(value->id);
|
|
|
|
}
|
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
bool Compare(pid_t key, ProcessGroup* value) const
|
2010-12-16 04:15:35 +03:00
|
|
|
{
|
|
|
|
return value->id == key;
|
|
|
|
}
|
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
ProcessGroup*& GetLink(ProcessGroup* value) const
|
2010-12-16 04:15:35 +03:00
|
|
|
{
|
|
|
|
return value->next;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
typedef BOpenHashTable<ProcessGroupHashDefinition> ProcessGroupHashTable;
|
|
|
|
|
|
|
|
|
|
|
|
} // unnamed namespace
|
|
|
|
|
2010-12-16 04:15:35 +03:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
// #pragma mark -
|
|
|
|
|
|
|
|
|
|
|
|
// the team_id -> Team hash table and the lock protecting it
|
|
|
|
static TeamTable sTeamHash;
|
|
|
|
static spinlock sTeamHashLock = B_SPINLOCK_INITIALIZER;
|
|
|
|
|
|
|
|
// the pid_t -> ProcessGroup hash table and the lock protecting it
|
|
|
|
static ProcessGroupHashTable sGroupHash;
|
|
|
|
static spinlock sGroupHashLock = B_SPINLOCK_INITIALIZER;
|
2010-12-16 04:15:35 +03:00
|
|
|
|
2011-01-11 00:54:38 +03:00
|
|
|
static Team* sKernelTeam = NULL;
|
2002-08-04 03:39:50 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
// A list of process groups of children of dying session leaders that need to
|
|
|
|
// be signalled, if they have become orphaned and contain stopped processes.
|
|
|
|
static ProcessGroupList sOrphanedCheckProcessGroups;
|
|
|
|
static mutex sOrphanedCheckLock
|
|
|
|
= MUTEX_INITIALIZER("orphaned process group check");
|
|
|
|
|
|
|
|
// some arbitrarily chosen limits -- should probably depend on the available
|
2004-12-01 00:11:37 +03:00
|
|
|
// memory (the limit is not yet enforced)
|
|
|
|
static int32 sMaxTeams = 2048;
|
|
|
|
static int32 sUsedTeams = 1;
|
|
|
|
|
2009-03-15 13:21:56 +03:00
|
|
|
static TeamNotificationService sNotificationService;
|
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
|
|
|
|
// #pragma mark - TeamListIterator
|
|
|
|
|
|
|
|
|
|
|
|
TeamListIterator::TeamListIterator()
|
|
|
|
{
|
|
|
|
// queue the entry
|
|
|
|
InterruptsSpinLocker locker(sTeamHashLock);
|
|
|
|
sTeamHash.InsertIteratorEntry(&fEntry);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
TeamListIterator::~TeamListIterator()
|
|
|
|
{
|
|
|
|
// remove the entry
|
|
|
|
InterruptsSpinLocker locker(sTeamHashLock);
|
|
|
|
sTeamHash.RemoveIteratorEntry(&fEntry);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
Team*
|
|
|
|
TeamListIterator::Next()
|
|
|
|
{
|
|
|
|
// get the next team -- if there is one, get reference for it
|
|
|
|
InterruptsSpinLocker locker(sTeamHashLock);
|
|
|
|
Team* team = sTeamHash.NextElement(&fEntry);
|
|
|
|
if (team != NULL)
|
|
|
|
team->AcquireReference();
|
|
|
|
|
|
|
|
return team;
|
|
|
|
}
|
2002-08-04 03:39:50 +04:00
|
|
|
|
2006-08-25 03:41:54 +04:00
|
|
|
|
2008-01-17 04:59:17 +03:00
|
|
|
// #pragma mark - Tracing
|
|
|
|
|
|
|
|
|
2008-04-28 23:01:00 +04:00
|
|
|
#if TEAM_TRACING
|
2008-01-17 04:59:17 +03:00
|
|
|
namespace TeamTracing {
|
|
|
|
|
|
|
|
class TeamForked : public AbstractTraceEntry {
|
2008-08-02 18:55:53 +04:00
|
|
|
public:
|
|
|
|
TeamForked(thread_id forkedThread)
|
|
|
|
:
|
|
|
|
fForkedThread(forkedThread)
|
|
|
|
{
|
|
|
|
Initialized();
|
|
|
|
}
|
2008-01-17 04:59:17 +03:00
|
|
|
|
2008-08-02 18:55:53 +04:00
|
|
|
virtual void AddDump(TraceOutput& out)
|
|
|
|
{
|
|
|
|
out.Print("team forked, new thread %ld", fForkedThread);
|
|
|
|
}
|
2008-01-17 04:59:17 +03:00
|
|
|
|
2008-08-02 18:55:53 +04:00
|
|
|
private:
|
|
|
|
thread_id fForkedThread;
|
2008-01-17 04:59:17 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
class ExecTeam : public AbstractTraceEntry {
|
2008-08-02 18:55:53 +04:00
|
|
|
public:
|
|
|
|
ExecTeam(const char* path, int32 argCount, const char* const* args,
|
|
|
|
int32 envCount, const char* const* env)
|
|
|
|
:
|
|
|
|
fArgCount(argCount),
|
|
|
|
fArgs(NULL)
|
|
|
|
{
|
|
|
|
fPath = alloc_tracing_buffer_strcpy(path, B_PATH_NAME_LENGTH,
|
|
|
|
false);
|
|
|
|
|
|
|
|
// determine the buffer size we need for the args
|
|
|
|
size_t argBufferSize = 0;
|
|
|
|
for (int32 i = 0; i < argCount; i++)
|
|
|
|
argBufferSize += strlen(args[i]) + 1;
|
|
|
|
|
|
|
|
// allocate a buffer
|
|
|
|
fArgs = (char*)alloc_tracing_buffer(argBufferSize);
|
|
|
|
if (fArgs) {
|
|
|
|
char* buffer = fArgs;
|
|
|
|
for (int32 i = 0; i < argCount; i++) {
|
|
|
|
size_t argSize = strlen(args[i]) + 1;
|
|
|
|
memcpy(buffer, args[i], argSize);
|
|
|
|
buffer += argSize;
|
2008-01-17 04:59:17 +03:00
|
|
|
}
|
2008-08-02 18:55:53 +04:00
|
|
|
}
|
2008-01-17 04:59:17 +03:00
|
|
|
|
2008-08-02 18:55:53 +04:00
|
|
|
// ignore env for the time being
|
|
|
|
(void)envCount;
|
|
|
|
(void)env;
|
2008-01-17 04:59:17 +03:00
|
|
|
|
2008-08-02 18:55:53 +04:00
|
|
|
Initialized();
|
|
|
|
}
|
2008-01-17 04:59:17 +03:00
|
|
|
|
2008-08-02 18:55:53 +04:00
|
|
|
virtual void AddDump(TraceOutput& out)
|
|
|
|
{
|
|
|
|
out.Print("team exec, \"%p\", args:", fPath);
|
2008-01-17 04:59:17 +03:00
|
|
|
|
2008-08-03 03:48:29 +04:00
|
|
|
if (fArgs != NULL) {
|
|
|
|
char* args = fArgs;
|
|
|
|
for (int32 i = 0; !out.IsFull() && i < fArgCount; i++) {
|
|
|
|
out.Print(" \"%s\"", args);
|
|
|
|
args += strlen(args) + 1;
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
out.Print(" <too long>");
|
2008-08-02 18:55:53 +04:00
|
|
|
}
|
2008-01-17 04:59:17 +03:00
|
|
|
|
2008-08-02 18:55:53 +04:00
|
|
|
private:
|
|
|
|
char* fPath;
|
|
|
|
int32 fArgCount;
|
|
|
|
char* fArgs;
|
2008-01-17 04:59:17 +03:00
|
|
|
};
|
|
|
|
|
2008-01-18 03:01:32 +03:00
|
|
|
|
|
|
|
static const char*
|
|
|
|
job_control_state_name(job_control_state state)
|
|
|
|
{
|
|
|
|
switch (state) {
|
|
|
|
case JOB_CONTROL_STATE_NONE:
|
|
|
|
return "none";
|
|
|
|
case JOB_CONTROL_STATE_STOPPED:
|
|
|
|
return "stopped";
|
|
|
|
case JOB_CONTROL_STATE_CONTINUED:
|
|
|
|
return "continued";
|
|
|
|
case JOB_CONTROL_STATE_DEAD:
|
|
|
|
return "dead";
|
|
|
|
default:
|
|
|
|
return "invalid";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
class SetJobControlState : public AbstractTraceEntry {
|
2008-08-02 18:55:53 +04:00
|
|
|
public:
|
2011-06-12 04:00:23 +04:00
|
|
|
SetJobControlState(team_id team, job_control_state newState, Signal* signal)
|
2008-08-02 18:55:53 +04:00
|
|
|
:
|
|
|
|
fTeam(team),
|
|
|
|
fNewState(newState),
|
2011-06-12 04:00:23 +04:00
|
|
|
fSignal(signal != NULL ? signal->Number() : 0)
|
2008-08-02 18:55:53 +04:00
|
|
|
{
|
|
|
|
Initialized();
|
|
|
|
}
|
2008-01-18 03:01:32 +03:00
|
|
|
|
2008-08-02 18:55:53 +04:00
|
|
|
virtual void AddDump(TraceOutput& out)
|
|
|
|
{
|
|
|
|
out.Print("team set job control state, team %ld, "
|
|
|
|
"new state: %s, signal: %d",
|
|
|
|
fTeam, job_control_state_name(fNewState), fSignal);
|
|
|
|
}
|
2008-01-18 03:01:32 +03:00
|
|
|
|
2008-08-02 18:55:53 +04:00
|
|
|
private:
|
|
|
|
team_id fTeam;
|
|
|
|
job_control_state fNewState;
|
|
|
|
int fSignal;
|
2008-01-18 03:01:32 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
class WaitForChild : public AbstractTraceEntry {
|
2008-08-02 18:55:53 +04:00
|
|
|
public:
|
|
|
|
WaitForChild(pid_t child, uint32 flags)
|
|
|
|
:
|
|
|
|
fChild(child),
|
|
|
|
fFlags(flags)
|
|
|
|
{
|
|
|
|
Initialized();
|
|
|
|
}
|
2008-01-18 03:01:32 +03:00
|
|
|
|
2008-08-02 18:55:53 +04:00
|
|
|
virtual void AddDump(TraceOutput& out)
|
|
|
|
{
|
|
|
|
out.Print("team wait for child, child: %ld, "
|
|
|
|
"flags: 0x%lx", fChild, fFlags);
|
|
|
|
}
|
2008-01-18 03:01:32 +03:00
|
|
|
|
2008-08-02 18:55:53 +04:00
|
|
|
private:
|
|
|
|
pid_t fChild;
|
|
|
|
uint32 fFlags;
|
2008-01-18 03:01:32 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
class WaitForChildDone : public AbstractTraceEntry {
|
2008-08-02 18:55:53 +04:00
|
|
|
public:
|
|
|
|
WaitForChildDone(const job_control_entry& entry)
|
|
|
|
:
|
|
|
|
fState(entry.state),
|
|
|
|
fTeam(entry.thread),
|
|
|
|
fStatus(entry.status),
|
|
|
|
fReason(entry.reason),
|
|
|
|
fSignal(entry.signal)
|
|
|
|
{
|
|
|
|
Initialized();
|
|
|
|
}
|
2008-01-18 03:01:32 +03:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
WaitForChildDone(status_t error)
|
|
|
|
:
|
|
|
|
fTeam(error)
|
|
|
|
{
|
|
|
|
Initialized();
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual void AddDump(TraceOutput& out)
|
|
|
|
{
|
|
|
|
if (fTeam >= 0) {
|
|
|
|
out.Print("team wait for child done, team: %ld, "
|
|
|
|
"state: %s, status: 0x%lx, reason: 0x%x, signal: %d\n",
|
|
|
|
fTeam, job_control_state_name(fState), fStatus, fReason,
|
|
|
|
fSignal);
|
|
|
|
} else {
|
|
|
|
out.Print("team wait for child failed, error: "
|
|
|
|
"0x%lx, ", fTeam);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
job_control_state fState;
|
|
|
|
team_id fTeam;
|
|
|
|
status_t fStatus;
|
|
|
|
uint16 fReason;
|
|
|
|
uint16 fSignal;
|
|
|
|
};
|
|
|
|
|
|
|
|
} // namespace TeamTracing
|
|
|
|
|
|
|
|
# define T(x) new(std::nothrow) TeamTracing::x;
|
|
|
|
#else
|
|
|
|
# define T(x) ;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
|
|
// #pragma mark - TeamNotificationService
|
|
|
|
|
|
|
|
|
|
|
|
TeamNotificationService::TeamNotificationService()
|
|
|
|
: DefaultNotificationService("teams")
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
TeamNotificationService::Notify(uint32 eventCode, Team* team)
|
|
|
|
{
|
|
|
|
char eventBuffer[128];
|
|
|
|
KMessage event;
|
|
|
|
event.SetTo(eventBuffer, sizeof(eventBuffer), TEAM_MONITOR);
|
|
|
|
event.AddInt32("event", eventCode);
|
|
|
|
event.AddInt32("team", team->id);
|
|
|
|
event.AddPointer("teamStruct", team);
|
|
|
|
|
|
|
|
DefaultNotificationService::Notify(event, eventCode);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// #pragma mark - Team
|
|
|
|
|
|
|
|
|
|
|
|
Team::Team(team_id id, bool kernel)
|
|
|
|
{
|
|
|
|
// allocate an ID
|
|
|
|
this->id = id;
|
|
|
|
visible = true;
|
|
|
|
serial_number = -1;
|
|
|
|
|
|
|
|
// init mutex
|
|
|
|
if (kernel) {
|
|
|
|
mutex_init(&fLock, "Team:kernel");
|
|
|
|
} else {
|
|
|
|
char lockName[16];
|
|
|
|
snprintf(lockName, sizeof(lockName), "Team:%" B_PRId32, id);
|
|
|
|
mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME);
|
|
|
|
}
|
|
|
|
|
|
|
|
hash_next = siblings_next = children = parent = NULL;
|
|
|
|
fName[0] = '\0';
|
|
|
|
fArgs[0] = '\0';
|
|
|
|
num_threads = 0;
|
|
|
|
io_context = NULL;
|
|
|
|
address_space = NULL;
|
|
|
|
realtime_sem_context = NULL;
|
|
|
|
xsi_sem_context = NULL;
|
|
|
|
thread_list = NULL;
|
|
|
|
main_thread = NULL;
|
|
|
|
loading_info = NULL;
|
|
|
|
state = TEAM_STATE_BIRTH;
|
|
|
|
flags = 0;
|
|
|
|
death_entry = NULL;
|
|
|
|
user_data_area = -1;
|
|
|
|
user_data = 0;
|
|
|
|
used_user_data = 0;
|
|
|
|
user_data_size = 0;
|
|
|
|
free_user_threads = NULL;
|
|
|
|
|
|
|
|
supplementary_groups = NULL;
|
|
|
|
supplementary_group_count = 0;
|
|
|
|
|
|
|
|
dead_threads_kernel_time = 0;
|
|
|
|
dead_threads_user_time = 0;
|
|
|
|
cpu_clock_offset = 0;
|
|
|
|
|
|
|
|
// dead threads
|
|
|
|
list_init(&dead_threads);
|
|
|
|
dead_threads_count = 0;
|
|
|
|
|
|
|
|
// dead children
|
|
|
|
dead_children.count = 0;
|
|
|
|
dead_children.kernel_time = 0;
|
|
|
|
dead_children.user_time = 0;
|
|
|
|
|
|
|
|
// job control entry
|
|
|
|
job_control_entry = new(nothrow) ::job_control_entry;
|
|
|
|
if (job_control_entry != NULL) {
|
|
|
|
job_control_entry->state = JOB_CONTROL_STATE_NONE;
|
|
|
|
job_control_entry->thread = id;
|
|
|
|
job_control_entry->team = this;
|
|
|
|
}
|
|
|
|
|
|
|
|
// exit status -- setting initialized to false suffices
|
|
|
|
exit.initialized = false;
|
|
|
|
|
|
|
|
list_init(&sem_list);
|
|
|
|
list_init(&port_list);
|
|
|
|
list_init(&image_list);
|
|
|
|
list_init(&watcher_list);
|
|
|
|
|
|
|
|
clear_team_debug_info(&debug_info, true);
|
|
|
|
|
|
|
|
// init dead/stopped/continued children condition vars
|
|
|
|
dead_children.condition_variable.Init(&dead_children, "team children");
|
|
|
|
|
|
|
|
fQueuedSignalsCounter = new(std::nothrow) BKernel::QueuedSignalsCounter(
|
|
|
|
kernel ? -1 : MAX_QUEUED_SIGNALS);
|
|
|
|
memset(fSignalActions, 0, sizeof(fSignalActions));
|
|
|
|
|
|
|
|
fUserDefinedTimerCount = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
Team::~Team()
|
|
|
|
{
|
|
|
|
// get rid of all associated data
|
|
|
|
PrepareForDeletion();
|
|
|
|
|
|
|
|
vfs_put_io_context(io_context);
|
|
|
|
delete_owned_ports(this);
|
|
|
|
sem_delete_owned_sems(this);
|
|
|
|
|
|
|
|
DeleteUserTimers(false);
|
|
|
|
|
|
|
|
fPendingSignals.Clear();
|
|
|
|
|
|
|
|
if (fQueuedSignalsCounter != NULL)
|
|
|
|
fQueuedSignalsCounter->ReleaseReference();
|
|
|
|
|
|
|
|
while (thread_death_entry* threadDeathEntry
|
|
|
|
= (thread_death_entry*)list_remove_head_item(&dead_threads)) {
|
|
|
|
free(threadDeathEntry);
|
|
|
|
}
|
|
|
|
|
|
|
|
while (::job_control_entry* entry = dead_children.entries.RemoveHead())
|
|
|
|
delete entry;
|
|
|
|
|
|
|
|
while (free_user_thread* entry = free_user_threads) {
|
|
|
|
free_user_threads = entry->next;
|
|
|
|
free(entry);
|
|
|
|
}
|
|
|
|
|
|
|
|
malloc_referenced_release(supplementary_groups);
|
|
|
|
|
|
|
|
delete job_control_entry;
|
|
|
|
// usually already NULL and transferred to the parent
|
|
|
|
|
|
|
|
mutex_destroy(&fLock);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*static*/ Team*
|
|
|
|
Team::Create(team_id id, const char* name, bool kernel)
|
|
|
|
{
|
|
|
|
// create the team object
|
|
|
|
Team* team = new(std::nothrow) Team(id, kernel);
|
|
|
|
if (team == NULL)
|
|
|
|
return NULL;
|
|
|
|
ObjectDeleter<Team> teamDeleter(team);
|
|
|
|
|
|
|
|
if (name != NULL)
|
|
|
|
team->SetName(name);
|
|
|
|
|
|
|
|
// check initialization
|
|
|
|
if (team->job_control_entry == NULL || team->fQueuedSignalsCounter == NULL)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
// finish initialization (arch specifics)
|
|
|
|
if (arch_team_init_team_struct(team, kernel) != B_OK)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
if (!kernel) {
|
|
|
|
status_t error = user_timer_create_team_timers(team);
|
|
|
|
if (error != B_OK)
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
// everything went fine
|
|
|
|
return teamDeleter.Detach();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*! \brief Returns the team with the given ID.
|
|
|
|
Returns a reference to the team.
|
|
|
|
Team and thread spinlock must not be held.
|
|
|
|
*/
|
|
|
|
/*static*/ Team*
|
|
|
|
Team::Get(team_id id)
|
|
|
|
{
|
|
|
|
if (id == B_CURRENT_TEAM) {
|
|
|
|
Team* team = thread_get_current_thread()->team;
|
|
|
|
team->AcquireReference();
|
|
|
|
return team;
|
|
|
|
}
|
|
|
|
|
|
|
|
InterruptsSpinLocker locker(sTeamHashLock);
|
|
|
|
Team* team = sTeamHash.Lookup(id);
|
|
|
|
if (team != NULL)
|
|
|
|
team->AcquireReference();
|
|
|
|
return team;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*! \brief Returns the team with the given ID in a locked state.
|
|
|
|
Returns a reference to the team.
|
|
|
|
Team and thread spinlock must not be held.
|
|
|
|
*/
|
|
|
|
/*static*/ Team*
|
|
|
|
Team::GetAndLock(team_id id)
|
|
|
|
{
|
|
|
|
// get the team
|
|
|
|
Team* team = Get(id);
|
|
|
|
if (team == NULL)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
// lock it
|
|
|
|
team->Lock();
|
|
|
|
|
|
|
|
// only return the team, when it isn't already dying
|
|
|
|
if (team->state >= TEAM_STATE_SHUTDOWN) {
|
|
|
|
team->Unlock();
|
|
|
|
team->ReleaseReference();
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return team;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*! Locks the team and its parent team (if any).
|
|
|
|
The caller must hold a reference to the team or otherwise make sure that
|
|
|
|
it won't be deleted.
|
|
|
|
If the team doesn't have a parent, only the team itself is locked. If the
|
|
|
|
team's parent is the kernel team and \a dontLockParentIfKernel is \c true,
|
|
|
|
only the team itself is locked.
|
|
|
|
|
|
|
|
\param dontLockParentIfKernel If \c true, the team's parent team is only
|
|
|
|
locked, if it is not the kernel team.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
Team::LockTeamAndParent(bool dontLockParentIfKernel)
|
|
|
|
{
|
|
|
|
// The locking order is parent -> child. Since the parent can change as long
|
|
|
|
// as we don't lock the team, we need to do a trial and error loop.
|
|
|
|
Lock();
|
|
|
|
|
|
|
|
while (true) {
|
|
|
|
// If the team doesn't have a parent, we're done. Otherwise try to lock
|
|
|
|
// the parent.This will succeed in most cases, simplifying things.
|
|
|
|
Team* parent = this->parent;
|
|
|
|
if (parent == NULL || (dontLockParentIfKernel && parent == sKernelTeam)
|
|
|
|
|| parent->TryLock()) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// get a temporary reference to the parent, unlock this team, lock the
|
|
|
|
// parent, and re-lock this team
|
|
|
|
BReference<Team> parentReference(parent);
|
|
|
|
|
|
|
|
Unlock();
|
|
|
|
parent->Lock();
|
|
|
|
Lock();
|
|
|
|
|
|
|
|
// If the parent hasn't changed in the meantime, we're done.
|
|
|
|
if (this->parent == parent)
|
|
|
|
return;
|
|
|
|
|
|
|
|
// The parent has changed -- unlock and retry.
|
|
|
|
parent->Unlock();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*! Unlocks the team and its parent team (if any).
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
Team::UnlockTeamAndParent()
|
|
|
|
{
|
|
|
|
if (parent != NULL)
|
|
|
|
parent->Unlock();
|
|
|
|
|
|
|
|
Unlock();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*! Locks the team, its parent team (if any), and the team's process group.
|
|
|
|
The caller must hold a reference to the team or otherwise make sure that
|
|
|
|
it won't be deleted.
|
|
|
|
If the team doesn't have a parent, only the team itself is locked.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
Team::LockTeamParentAndProcessGroup()
|
|
|
|
{
|
|
|
|
LockTeamAndProcessGroup();
|
|
|
|
|
|
|
|
// We hold the group's and the team's lock, but not the parent team's lock.
|
|
|
|
// If we have a parent, try to lock it.
|
|
|
|
if (this->parent == NULL || this->parent->TryLock())
|
|
|
|
return;
|
|
|
|
|
|
|
|
// No success -- unlock the team and let LockTeamAndParent() do the rest of
|
|
|
|
// the job.
|
|
|
|
Unlock();
|
|
|
|
LockTeamAndParent(false);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*! Unlocks the team, its parent team (if any), and the team's process group.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
Team::UnlockTeamParentAndProcessGroup()
|
|
|
|
{
|
|
|
|
group->Unlock();
|
|
|
|
|
|
|
|
if (parent != NULL)
|
|
|
|
parent->Unlock();
|
|
|
|
|
|
|
|
Unlock();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
Team::LockTeamAndProcessGroup()
|
|
|
|
{
|
|
|
|
// The locking order is process group -> child. Since the process group can
|
|
|
|
// change as long as we don't lock the team, we need to do a trial and error
|
|
|
|
// loop.
|
|
|
|
Lock();
|
|
|
|
|
|
|
|
while (true) {
|
|
|
|
// Try to lock the group. This will succeed in most cases, simplifying
|
|
|
|
// things.
|
|
|
|
ProcessGroup* group = this->group;
|
|
|
|
if (group->TryLock())
|
|
|
|
return;
|
|
|
|
|
|
|
|
// get a temporary reference to the group, unlock this team, lock the
|
|
|
|
// group, and re-lock this team
|
|
|
|
BReference<ProcessGroup> groupReference(group);
|
|
|
|
|
|
|
|
Unlock();
|
|
|
|
group->Lock();
|
|
|
|
Lock();
|
|
|
|
|
|
|
|
// If the group hasn't changed in the meantime, we're done.
|
|
|
|
if (this->group == group)
|
|
|
|
return;
|
|
|
|
|
|
|
|
// The group has changed -- unlock and retry.
|
|
|
|
group->Unlock();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
Team::UnlockTeamAndProcessGroup()
|
|
|
|
{
|
|
|
|
group->Unlock();
|
|
|
|
Unlock();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
Team::SetName(const char* name)
|
|
|
|
{
|
|
|
|
if (const char* lastSlash = strrchr(name, '/'))
|
|
|
|
name = lastSlash + 1;
|
|
|
|
|
|
|
|
strlcpy(fName, name, B_OS_NAME_LENGTH);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
Team::SetArgs(const char* args)
|
|
|
|
{
|
|
|
|
strlcpy(fArgs, args, sizeof(fArgs));
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
Team::SetArgs(const char* path, const char* const* otherArgs, int otherArgCount)
|
|
|
|
{
|
|
|
|
fArgs[0] = '\0';
|
|
|
|
strlcpy(fArgs, path, sizeof(fArgs));
|
|
|
|
for (int i = 0; i < otherArgCount; i++) {
|
|
|
|
strlcat(fArgs, " ", sizeof(fArgs));
|
|
|
|
strlcat(fArgs, otherArgs[i], sizeof(fArgs));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
Team::ResetSignalsOnExec()
|
|
|
|
{
|
|
|
|
// We are supposed to keep pending signals. Signal actions shall be reset
|
|
|
|
// partially: SIG_IGN and SIG_DFL dispositions shall be kept as they are
|
|
|
|
// (for SIGCHLD it's implementation-defined). Others shall be reset to
|
|
|
|
// SIG_DFL. SA_ONSTACK shall be cleared. There's no mention of the other
|
|
|
|
// flags, but since there aren't any handlers, they make little sense, so
|
|
|
|
// we clear them.
|
|
|
|
|
|
|
|
for (uint32 i = 1; i <= MAX_SIGNAL_NUMBER; i++) {
|
|
|
|
struct sigaction& action = SignalActionFor(i);
|
|
|
|
if (action.sa_handler != SIG_IGN && action.sa_handler != SIG_DFL)
|
|
|
|
action.sa_handler = SIG_DFL;
|
|
|
|
|
|
|
|
action.sa_mask = 0;
|
|
|
|
action.sa_flags = 0;
|
|
|
|
action.sa_userdata = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
Team::InheritSignalActions(Team* parent)
|
|
|
|
{
|
|
|
|
memcpy(fSignalActions, parent->fSignalActions, sizeof(fSignalActions));
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*! Adds the given user timer to the team and, if user-defined, assigns it an
|
|
|
|
ID.
|
|
|
|
|
|
|
|
The caller must hold the team's lock.
|
|
|
|
|
|
|
|
\param timer The timer to be added. If it doesn't have an ID yet, it is
|
|
|
|
considered user-defined and will be assigned an ID.
|
|
|
|
\return \c B_OK, if the timer was added successfully, another error code
|
|
|
|
otherwise.
|
|
|
|
*/
|
|
|
|
status_t
|
|
|
|
Team::AddUserTimer(UserTimer* timer)
|
|
|
|
{
|
|
|
|
// don't allow addition of timers when already shutting the team down
|
|
|
|
if (state >= TEAM_STATE_SHUTDOWN)
|
|
|
|
return B_BAD_TEAM_ID;
|
|
|
|
|
|
|
|
// If the timer is user-defined, check timer limit and increment
|
|
|
|
// user-defined count.
|
|
|
|
if (timer->ID() < 0 && !CheckAddUserDefinedTimer())
|
|
|
|
return EAGAIN;
|
|
|
|
|
|
|
|
fUserTimers.AddTimer(timer);
|
|
|
|
|
|
|
|
return B_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*! Removes the given user timer from the team.
|
|
|
|
|
|
|
|
The caller must hold the team's lock.
|
|
|
|
|
|
|
|
\param timer The timer to be removed.
|
|
|
|
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
Team::RemoveUserTimer(UserTimer* timer)
|
|
|
|
{
|
|
|
|
fUserTimers.RemoveTimer(timer);
|
|
|
|
|
|
|
|
if (timer->ID() >= USER_TIMER_FIRST_USER_DEFINED_ID)
|
|
|
|
UserDefinedTimersRemoved(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*! Deletes all (or all user-defined) user timers of the team.
|
|
|
|
|
|
|
|
Timer's belonging to the team's threads are not affected.
|
|
|
|
The caller must hold the team's lock.
|
|
|
|
|
|
|
|
\param userDefinedOnly If \c true, only the user-defined timers are deleted,
|
|
|
|
otherwise all timers are deleted.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
Team::DeleteUserTimers(bool userDefinedOnly)
|
|
|
|
{
|
|
|
|
int32 count = fUserTimers.DeleteTimers(userDefinedOnly);
|
|
|
|
UserDefinedTimersRemoved(count);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*! If not at the limit yet, increments the team's user-defined timer count.
|
|
|
|
\return \c true, if the limit wasn't reached yet, \c false otherwise.
|
|
|
|
*/
|
|
|
|
bool
|
|
|
|
Team::CheckAddUserDefinedTimer()
|
|
|
|
{
|
|
|
|
int32 oldCount = atomic_add(&fUserDefinedTimerCount, 1);
|
|
|
|
if (oldCount >= MAX_USER_TIMERS_PER_TEAM) {
|
|
|
|
atomic_add(&fUserDefinedTimerCount, -1);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*! Subtracts the given count for the team's user-defined timer count.
|
|
|
|
\param count The count to subtract.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
Team::UserDefinedTimersRemoved(int32 count)
|
|
|
|
{
|
|
|
|
atomic_add(&fUserDefinedTimerCount, -count);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
Team::DeactivateCPUTimeUserTimers()
|
|
|
|
{
|
|
|
|
while (TeamTimeUserTimer* timer = fCPUTimeUserTimers.Head())
|
|
|
|
timer->Deactivate();
|
|
|
|
|
|
|
|
while (TeamUserTimeUserTimer* timer = fUserTimeUserTimers.Head())
|
|
|
|
timer->Deactivate();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*! Returns the team's current total CPU time (kernel + user + offset).
|
|
|
|
|
|
|
|
The caller must hold the scheduler lock.
|
|
|
|
|
|
|
|
\param ignoreCurrentRun If \c true and the current thread is one team's
|
|
|
|
threads, don't add the time since the last time \c last_time was
|
|
|
|
updated. Should be used in "thread unscheduled" scheduler callbacks,
|
|
|
|
since although the thread is still running at that time, its time has
|
|
|
|
already been stopped.
|
|
|
|
\return The team's current total CPU time.
|
|
|
|
*/
|
|
|
|
bigtime_t
|
|
|
|
Team::CPUTime(bool ignoreCurrentRun) const
|
|
|
|
{
|
|
|
|
bigtime_t time = cpu_clock_offset + dead_threads_kernel_time
|
|
|
|
+ dead_threads_user_time;
|
|
|
|
|
|
|
|
Thread* currentThread = thread_get_current_thread();
|
|
|
|
bigtime_t now = system_time();
|
|
|
|
|
|
|
|
for (Thread* thread = thread_list; thread != NULL;
|
|
|
|
thread = thread->team_next) {
|
|
|
|
SpinLocker threadTimeLocker(thread->time_lock);
|
|
|
|
time += thread->kernel_time + thread->user_time;
|
|
|
|
|
|
|
|
if (thread->IsRunning()) {
|
|
|
|
if (!ignoreCurrentRun || thread != currentThread)
|
|
|
|
time += now - thread->last_time;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return time;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*! Returns the team's current user CPU time.
|
|
|
|
|
|
|
|
The caller must hold the scheduler lock.
|
|
|
|
|
|
|
|
\return The team's current user CPU time.
|
|
|
|
*/
|
|
|
|
bigtime_t
|
|
|
|
Team::UserCPUTime() const
|
|
|
|
{
|
|
|
|
bigtime_t time = dead_threads_user_time;
|
|
|
|
|
|
|
|
bigtime_t now = system_time();
|
|
|
|
|
|
|
|
for (Thread* thread = thread_list; thread != NULL;
|
|
|
|
thread = thread->team_next) {
|
|
|
|
SpinLocker threadTimeLocker(thread->time_lock);
|
|
|
|
time += thread->user_time;
|
|
|
|
|
|
|
|
if (thread->IsRunning() && !thread->in_kernel)
|
|
|
|
time += now - thread->last_time;
|
|
|
|
}
|
|
|
|
|
|
|
|
return time;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// #pragma mark - ProcessGroup
|
|
|
|
|
|
|
|
|
|
|
|
ProcessGroup::ProcessGroup(pid_t id)
|
|
|
|
:
|
|
|
|
id(id),
|
|
|
|
teams(NULL),
|
|
|
|
fSession(NULL),
|
|
|
|
fInOrphanedCheckList(false)
|
|
|
|
{
|
|
|
|
char lockName[32];
|
|
|
|
snprintf(lockName, sizeof(lockName), "Group:%" B_PRId32, id);
|
|
|
|
mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
ProcessGroup::~ProcessGroup()
|
|
|
|
{
|
|
|
|
TRACE(("ProcessGroup::~ProcessGroup(): id = %ld\n", group->id));
|
|
|
|
|
|
|
|
// If the group is in the orphaned check list, remove it.
|
|
|
|
MutexLocker orphanedCheckLocker(sOrphanedCheckLock);
|
|
|
|
|
|
|
|
if (fInOrphanedCheckList)
|
|
|
|
sOrphanedCheckProcessGroups.Remove(this);
|
|
|
|
|
|
|
|
orphanedCheckLocker.Unlock();
|
|
|
|
|
|
|
|
// remove group from the hash table and from the session
|
|
|
|
if (fSession != NULL) {
|
|
|
|
InterruptsSpinLocker groupHashLocker(sGroupHashLock);
|
|
|
|
sGroupHash.RemoveUnchecked(this);
|
|
|
|
groupHashLocker.Unlock();
|
|
|
|
|
|
|
|
fSession->ReleaseReference();
|
|
|
|
}
|
|
|
|
|
|
|
|
mutex_destroy(&fLock);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*static*/ ProcessGroup*
|
|
|
|
ProcessGroup::Get(pid_t id)
|
|
|
|
{
|
|
|
|
InterruptsSpinLocker groupHashLocker(sGroupHashLock);
|
|
|
|
ProcessGroup* group = sGroupHash.Lookup(id);
|
|
|
|
if (group != NULL)
|
|
|
|
group->AcquireReference();
|
|
|
|
return group;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*! Adds the group the given session and makes it publicly accessible.
|
|
|
|
The caller must not hold the process group hash lock.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
ProcessGroup::Publish(ProcessSession* session)
|
|
|
|
{
|
|
|
|
InterruptsSpinLocker groupHashLocker(sGroupHashLock);
|
|
|
|
PublishLocked(session);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*! Adds the group to the given session and makes it publicly accessible.
|
|
|
|
The caller must hold the process group hash lock.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
ProcessGroup::PublishLocked(ProcessSession* session)
|
|
|
|
{
|
|
|
|
ASSERT(sGroupHash.Lookup(this->id) == NULL);
|
|
|
|
|
|
|
|
fSession = session;
|
|
|
|
fSession->AcquireReference();
|
|
|
|
|
|
|
|
sGroupHash.InsertUnchecked(this);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*! Checks whether the process group is orphaned.
|
|
|
|
The caller must hold the group's lock.
|
|
|
|
\return \c true, if the group is orphaned, \c false otherwise.
|
|
|
|
*/
|
|
|
|
bool
|
|
|
|
ProcessGroup::IsOrphaned() const
|
|
|
|
{
|
|
|
|
// Orphaned Process Group: "A process group in which the parent of every
|
|
|
|
// member is either itself a member of the group or is not a member of the
|
|
|
|
// group's session." (Open Group Base Specs Issue 7)
|
|
|
|
bool orphaned = true;
|
2008-01-18 03:01:32 +03:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
Team* team = teams;
|
|
|
|
while (orphaned && team != NULL) {
|
|
|
|
team->LockTeamAndParent(false);
|
|
|
|
|
|
|
|
Team* parent = team->parent;
|
|
|
|
if (parent != NULL && parent->group_id != id
|
|
|
|
&& parent->session_id == fSession->id) {
|
|
|
|
orphaned = false;
|
2008-01-18 03:01:32 +03:00
|
|
|
}
|
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
team->UnlockTeamAndParent();
|
2008-01-18 03:01:32 +03:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
team = team->group_next;
|
|
|
|
}
|
2008-01-17 04:59:17 +03:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
return orphaned;
|
|
|
|
}
|
2008-01-17 04:59:17 +03:00
|
|
|
|
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
void
|
|
|
|
ProcessGroup::ScheduleOrphanedCheck()
|
|
|
|
{
|
|
|
|
MutexLocker orphanedCheckLocker(sOrphanedCheckLock);
|
|
|
|
|
|
|
|
if (!fInOrphanedCheckList) {
|
|
|
|
sOrphanedCheckProcessGroups.Add(this);
|
|
|
|
fInOrphanedCheckList = true;
|
|
|
|
}
|
|
|
|
}
|
2009-03-15 13:21:56 +03:00
|
|
|
|
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
void
|
|
|
|
ProcessGroup::UnsetOrphanedCheck()
|
2009-03-15 13:21:56 +03:00
|
|
|
{
|
2011-06-12 04:00:23 +04:00
|
|
|
fInOrphanedCheckList = false;
|
2009-03-15 13:21:56 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
// #pragma mark - ProcessSession
|
|
|
|
|
|
|
|
|
|
|
|
ProcessSession::ProcessSession(pid_t id)
|
|
|
|
:
|
|
|
|
id(id),
|
|
|
|
controlling_tty(-1),
|
|
|
|
foreground_group(-1)
|
2009-03-15 13:21:56 +03:00
|
|
|
{
|
2011-06-12 04:00:23 +04:00
|
|
|
char lockName[32];
|
|
|
|
snprintf(lockName, sizeof(lockName), "Session:%" B_PRId32, id);
|
|
|
|
mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME);
|
|
|
|
}
|
2009-03-15 13:21:56 +03:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
|
|
|
|
ProcessSession::~ProcessSession()
|
|
|
|
{
|
|
|
|
mutex_destroy(&fLock);
|
2009-03-15 13:21:56 +03:00
|
|
|
}
|
|
|
|
|
2008-01-17 04:59:17 +03:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
// #pragma mark - KDL functions
|
2002-08-04 03:39:50 +04:00
|
|
|
|
|
|
|
|
2002-09-24 03:24:12 +04:00
|
|
|
static void
|
2011-01-11 00:54:38 +03:00
|
|
|
_dump_team_info(Team* team)
|
2002-08-04 03:39:50 +04:00
|
|
|
{
|
2005-11-02 13:43:30 +03:00
|
|
|
kprintf("TEAM: %p\n", team);
|
2009-12-12 22:08:04 +03:00
|
|
|
kprintf("id: %ld (%#lx)\n", team->id, team->id);
|
2011-06-12 04:00:23 +04:00
|
|
|
kprintf("serial_number: %" B_PRId64 "\n", team->serial_number);
|
|
|
|
kprintf("name: '%s'\n", team->Name());
|
|
|
|
kprintf("args: '%s'\n", team->Args());
|
|
|
|
kprintf("hash_next: %p\n", team->hash_next);
|
2009-12-12 22:08:04 +03:00
|
|
|
kprintf("parent: %p", team->parent);
|
2005-11-02 13:43:30 +03:00
|
|
|
if (team->parent != NULL) {
|
2007-10-11 12:30:18 +04:00
|
|
|
kprintf(" (id = %ld)\n", team->parent->id);
|
2005-11-02 13:43:30 +03:00
|
|
|
} else
|
|
|
|
kprintf("\n");
|
|
|
|
|
2009-12-12 22:08:04 +03:00
|
|
|
kprintf("children: %p\n", team->children);
|
|
|
|
kprintf("num_threads: %d\n", team->num_threads);
|
|
|
|
kprintf("state: %d\n", team->state);
|
|
|
|
kprintf("flags: 0x%lx\n", team->flags);
|
|
|
|
kprintf("io_context: %p\n", team->io_context);
|
2005-12-20 16:29:11 +03:00
|
|
|
if (team->address_space)
|
2009-12-12 22:08:04 +03:00
|
|
|
kprintf("address_space: %p\n", team->address_space);
|
|
|
|
kprintf("user data: %p (area %ld)\n", (void*)team->user_data,
|
|
|
|
team->user_data_area);
|
|
|
|
kprintf("free user thread: %p\n", team->free_user_threads);
|
|
|
|
kprintf("main_thread: %p\n", team->main_thread);
|
|
|
|
kprintf("thread_list: %p\n", team->thread_list);
|
|
|
|
kprintf("group_id: %ld\n", team->group_id);
|
|
|
|
kprintf("session_id: %ld\n", team->session_id);
|
2002-08-04 03:39:50 +04:00
|
|
|
}
|
|
|
|
|
2002-09-24 03:24:12 +04:00
|
|
|
|
|
|
|
static int
|
2009-11-27 21:10:03 +03:00
|
|
|
dump_team_info(int argc, char** argv)
|
2002-08-04 03:39:50 +04:00
|
|
|
{
|
2005-04-12 12:13:12 +04:00
|
|
|
team_id id = -1;
|
2005-10-06 12:36:37 +04:00
|
|
|
bool found = false;
|
|
|
|
|
2008-01-26 13:47:27 +03:00
|
|
|
if (argc < 2) {
|
2011-01-11 00:54:38 +03:00
|
|
|
Thread* thread = thread_get_current_thread();
|
2008-01-26 13:47:27 +03:00
|
|
|
if (thread != NULL && thread->team != NULL)
|
|
|
|
_dump_team_info(thread->team);
|
|
|
|
else
|
|
|
|
kprintf("No current team!\n");
|
2005-10-06 12:36:37 +04:00
|
|
|
return 0;
|
|
|
|
}
|
2002-08-04 03:39:50 +04:00
|
|
|
|
2005-10-06 12:36:37 +04:00
|
|
|
id = strtoul(argv[1], NULL, 0);
|
|
|
|
if (IS_KERNEL_ADDRESS(id)) {
|
|
|
|
// semi-hack
|
2011-01-11 00:54:38 +03:00
|
|
|
_dump_team_info((Team*)id);
|
2005-10-06 12:36:37 +04:00
|
|
|
return 0;
|
2002-08-04 03:39:50 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
// walk through the thread list, trying to match name or id
|
2011-06-12 04:00:23 +04:00
|
|
|
for (TeamTable::Iterator it = sTeamHash.GetIterator();
|
2011-01-11 00:54:38 +03:00
|
|
|
Team* team = it.Next();) {
|
2011-06-12 04:00:23 +04:00
|
|
|
if ((team->Name() && strcmp(argv[1], team->Name()) == 0)
|
2009-11-27 21:10:03 +03:00
|
|
|
|| team->id == id) {
|
2005-04-12 12:13:12 +04:00
|
|
|
_dump_team_info(team);
|
2005-10-06 12:36:37 +04:00
|
|
|
found = true;
|
2002-08-04 03:39:50 +04:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2005-10-06 12:36:37 +04:00
|
|
|
|
|
|
|
if (!found)
|
|
|
|
kprintf("team \"%s\" (%ld) doesn't exist!\n", argv[1], id);
|
2002-08-04 03:39:50 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2005-11-02 13:38:28 +03:00
|
|
|
static int
|
2009-11-27 21:10:03 +03:00
|
|
|
dump_teams(int argc, char** argv)
|
2005-11-02 13:38:28 +03:00
|
|
|
{
|
2007-11-18 14:57:35 +03:00
|
|
|
kprintf("team id parent name\n");
|
2005-11-02 13:38:28 +03:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
for (TeamTable::Iterator it = sTeamHash.GetIterator();
|
2011-01-11 00:54:38 +03:00
|
|
|
Team* team = it.Next();) {
|
2011-06-12 04:00:23 +04:00
|
|
|
kprintf("%p%7ld %p %s\n", team, team->id, team->parent, team->Name());
|
2005-11-02 13:38:28 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
// #pragma mark - Private functions
|
2002-08-04 03:39:50 +04:00
|
|
|
|
2002-08-05 09:26:52 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
/*! Inserts team \a team into the child list of team \a parent.
|
2002-08-04 03:39:50 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
The caller must hold the lock of both \a parent and \a team.
|
2002-08-05 09:26:52 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
\param parent The parent team.
|
|
|
|
\param team The team to be inserted into \a parent's child list.
|
|
|
|
*/
|
2004-03-16 05:46:28 +03:00
|
|
|
static void
|
2011-01-11 00:54:38 +03:00
|
|
|
insert_team_into_parent(Team* parent, Team* team)
|
2002-08-04 03:39:50 +04:00
|
|
|
{
|
2004-03-16 05:46:28 +03:00
|
|
|
ASSERT(parent != NULL);
|
|
|
|
|
|
|
|
team->siblings_next = parent->children;
|
|
|
|
parent->children = team;
|
|
|
|
team->parent = parent;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
/*! Removes team \a team from the child list of team \a parent.
|
|
|
|
|
|
|
|
The caller must hold the lock of both \a parent and \a team.
|
|
|
|
|
|
|
|
\param parent The parent team.
|
|
|
|
\param team The team to be removed from \a parent's child list.
|
|
|
|
*/
|
2004-03-16 05:46:28 +03:00
|
|
|
static void
|
2011-01-11 00:54:38 +03:00
|
|
|
remove_team_from_parent(Team* parent, Team* team)
|
2004-03-16 05:46:28 +03:00
|
|
|
{
|
2011-01-11 00:54:38 +03:00
|
|
|
Team* child;
|
|
|
|
Team* last = NULL;
|
2004-03-16 05:46:28 +03:00
|
|
|
|
2009-11-27 21:10:03 +03:00
|
|
|
for (child = parent->children; child != NULL;
|
|
|
|
child = child->siblings_next) {
|
2004-03-16 05:46:28 +03:00
|
|
|
if (child == team) {
|
|
|
|
if (last == NULL)
|
|
|
|
parent->children = child->siblings_next;
|
|
|
|
else
|
|
|
|
last->siblings_next = child->siblings_next;
|
|
|
|
|
|
|
|
team->parent = NULL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
last = child;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
/*! Returns whether the given team is a session leader.
|
|
|
|
The caller must hold the team's lock or its process group's lock.
|
2007-10-11 12:30:18 +04:00
|
|
|
*/
|
2008-02-21 03:46:22 +03:00
|
|
|
static bool
|
2011-01-11 00:54:38 +03:00
|
|
|
is_session_leader(Team* team)
|
2008-02-21 03:46:22 +03:00
|
|
|
{
|
|
|
|
return team->session_id == team->id;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
/*! Returns whether the given team is a process group leader.
|
|
|
|
The caller must hold the team's lock or its process group's lock.
|
|
|
|
*/
|
2004-10-14 18:46:12 +04:00
|
|
|
static bool
|
2011-01-11 00:54:38 +03:00
|
|
|
is_process_group_leader(Team* team)
|
2004-10-14 18:46:12 +04:00
|
|
|
{
|
2007-01-13 01:54:21 +03:00
|
|
|
return team->group_id == team->id;
|
2004-10-14 18:46:12 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
/*! Inserts the given team into the given process group.
|
|
|
|
The caller must hold the process group's lock, the team's lock, and the
|
|
|
|
team's parent's lock.
|
2008-03-09 20:56:27 +03:00
|
|
|
*/
|
|
|
|
static void
|
2011-06-12 04:00:23 +04:00
|
|
|
insert_team_into_group(ProcessGroup* group, Team* team)
|
2008-03-09 20:56:27 +03:00
|
|
|
{
|
|
|
|
team->group = group;
|
|
|
|
team->group_id = group->id;
|
2011-06-12 04:00:23 +04:00
|
|
|
team->session_id = group->Session()->id;
|
2008-03-09 20:56:27 +03:00
|
|
|
|
|
|
|
team->group_next = group->teams;
|
|
|
|
group->teams = team;
|
2011-06-12 04:00:23 +04:00
|
|
|
group->AcquireReference();
|
2008-03-09 20:56:27 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
/*! Removes the given team from its process group.
|
2004-10-14 18:46:12 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
The caller must hold the process group's lock, the team's lock, and the
|
|
|
|
team's parent's lock. Interrupts must be enabled.
|
|
|
|
|
|
|
|
\param team The team that'll be removed from its process group.
|
2007-10-11 12:30:18 +04:00
|
|
|
*/
|
2004-10-14 18:46:12 +04:00
|
|
|
static void
|
2011-01-11 00:54:38 +03:00
|
|
|
remove_team_from_group(Team* team)
|
2004-10-14 18:46:12 +04:00
|
|
|
{
|
2011-06-12 04:00:23 +04:00
|
|
|
ProcessGroup* group = team->group;
|
2011-01-11 00:54:38 +03:00
|
|
|
Team* current;
|
|
|
|
Team* last = NULL;
|
2004-10-14 18:46:12 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
// the team must be in a process group to let this function have any effect
|
2004-10-14 18:46:12 +04:00
|
|
|
if (group == NULL)
|
|
|
|
return;
|
|
|
|
|
2009-11-27 21:10:03 +03:00
|
|
|
for (current = group->teams; current != NULL;
|
|
|
|
current = current->group_next) {
|
2004-10-14 18:46:12 +04:00
|
|
|
if (current == team) {
|
|
|
|
if (last == NULL)
|
|
|
|
group->teams = current->group_next;
|
|
|
|
else
|
|
|
|
last->group_next = current->group_next;
|
|
|
|
|
|
|
|
team->group = NULL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
last = current;
|
|
|
|
}
|
|
|
|
|
|
|
|
team->group = NULL;
|
|
|
|
team->group_next = NULL;
|
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
group->ReleaseReference();
|
2002-08-04 03:39:50 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-05-11 20:25:35 +04:00
|
|
|
static status_t
|
2011-01-11 00:54:38 +03:00
|
|
|
create_team_user_data(Team* team)
|
2008-05-11 20:25:35 +04:00
|
|
|
{
|
* Introduced structures {virtual,physical}_address_restrictions, which specify
restrictions for virtual/physical addresses.
* vm_page_allocate_page_run():
- Fixed conversion of base/limit to array indexes. sPhysicalPageOffset was not
taken into account.
- Takes a physical_address_restrictions instead of base/limit and also
supports alignment and boundary restrictions, now.
* map_backing_store(), VM[User,Kernel]AddressSpace::InsertArea()/
ReserveAddressRange() take a virtual_address_restrictions parameter, now. They
also support an alignment independent from the range size.
* create_area_etc(), vm_create_anonymous_area(): Take
{virtual,physical}_address_restrictions parameters, now.
* Removed no longer needed B_PHYSICAL_BASE_ADDRESS.
* DMAResources:
- Fixed potential overflows of uint32 when initializing from device node
attributes.
- Fixed bounce buffer creation TODOs: By using create_area_etc() with the
new restrictions parameters we can directly support physical high address,
boundary, and alignment.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@37131 a95241bf-73f2-0310-859d-f6bbb57e9c96
2010-06-14 20:25:14 +04:00
|
|
|
void* address;
|
2008-05-11 20:25:35 +04:00
|
|
|
size_t size = 4 * B_PAGE_SIZE;
|
* Introduced structures {virtual,physical}_address_restrictions, which specify
restrictions for virtual/physical addresses.
* vm_page_allocate_page_run():
- Fixed conversion of base/limit to array indexes. sPhysicalPageOffset was not
taken into account.
- Takes a physical_address_restrictions instead of base/limit and also
supports alignment and boundary restrictions, now.
* map_backing_store(), VM[User,Kernel]AddressSpace::InsertArea()/
ReserveAddressRange() take a virtual_address_restrictions parameter, now. They
also support an alignment independent from the range size.
* create_area_etc(), vm_create_anonymous_area(): Take
{virtual,physical}_address_restrictions parameters, now.
* Removed no longer needed B_PHYSICAL_BASE_ADDRESS.
* DMAResources:
- Fixed potential overflows of uint32 when initializing from device node
attributes.
- Fixed bounce buffer creation TODOs: By using create_area_etc() with the
new restrictions parameters we can directly support physical high address,
boundary, and alignment.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@37131 a95241bf-73f2-0310-859d-f6bbb57e9c96
2010-06-14 20:25:14 +04:00
|
|
|
virtual_address_restrictions virtualRestrictions = {};
|
|
|
|
virtualRestrictions.address = (void*)KERNEL_USER_DATA_BASE;
|
|
|
|
virtualRestrictions.address_specification = B_BASE_ADDRESS;
|
|
|
|
physical_address_restrictions physicalRestrictions = {};
|
|
|
|
team->user_data_area = create_area_etc(team->id, "user area", size,
|
|
|
|
B_FULL_LOCK, B_READ_AREA | B_WRITE_AREA, 0, &virtualRestrictions,
|
|
|
|
&physicalRestrictions, &address);
|
2008-05-11 20:25:35 +04:00
|
|
|
if (team->user_data_area < 0)
|
|
|
|
return team->user_data_area;
|
|
|
|
|
|
|
|
team->user_data = (addr_t)address;
|
|
|
|
team->used_user_data = 0;
|
|
|
|
team->user_data_size = size;
|
|
|
|
team->free_user_threads = NULL;
|
|
|
|
|
|
|
|
return B_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
2011-01-11 00:54:38 +03:00
|
|
|
delete_team_user_data(Team* team)
|
2008-05-11 20:25:35 +04:00
|
|
|
{
|
|
|
|
if (team->user_data_area >= 0) {
|
2008-07-17 02:55:17 +04:00
|
|
|
vm_delete_area(team->id, team->user_data_area, true);
|
2008-05-11 20:25:35 +04:00
|
|
|
team->user_data = 0;
|
|
|
|
team->used_user_data = 0;
|
|
|
|
team->user_data_size = 0;
|
|
|
|
team->user_data_area = -1;
|
|
|
|
while (free_user_thread* entry = team->free_user_threads) {
|
|
|
|
team->free_user_threads = entry->next;
|
|
|
|
free(entry);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-06-24 07:37:07 +04:00
|
|
|
static status_t
|
|
|
|
copy_user_process_args(const char* const* userFlatArgs, size_t flatArgsSize,
|
|
|
|
int32 argCount, int32 envCount, char**& _flatArgs)
|
|
|
|
{
|
|
|
|
if (argCount < 0 || envCount < 0)
|
|
|
|
return B_BAD_VALUE;
|
|
|
|
|
|
|
|
if (flatArgsSize > MAX_PROCESS_ARGS_SIZE)
|
|
|
|
return B_TOO_MANY_ARGS;
|
|
|
|
if ((argCount + envCount + 2) * sizeof(char*) > flatArgsSize)
|
|
|
|
return B_BAD_VALUE;
|
|
|
|
|
|
|
|
if (!IS_USER_ADDRESS(userFlatArgs))
|
|
|
|
return B_BAD_ADDRESS;
|
|
|
|
|
|
|
|
// allocate kernel memory
|
|
|
|
char** flatArgs = (char**)malloc(flatArgsSize);
|
|
|
|
if (flatArgs == NULL)
|
|
|
|
return B_NO_MEMORY;
|
|
|
|
|
|
|
|
if (user_memcpy(flatArgs, userFlatArgs, flatArgsSize) != B_OK) {
|
|
|
|
free(flatArgs);
|
|
|
|
return B_BAD_ADDRESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
// check and relocate the array
|
|
|
|
status_t error = B_OK;
|
|
|
|
const char* stringBase = (char*)flatArgs + argCount + envCount + 2;
|
|
|
|
const char* stringEnd = (char*)flatArgs + flatArgsSize;
|
|
|
|
for (int32 i = 0; i < argCount + envCount + 2; i++) {
|
|
|
|
if (i == argCount || i == argCount + envCount + 1) {
|
|
|
|
// check array null termination
|
|
|
|
if (flatArgs[i] != NULL) {
|
|
|
|
error = B_BAD_VALUE;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// check string
|
|
|
|
char* arg = (char*)flatArgs + (flatArgs[i] - (char*)userFlatArgs);
|
|
|
|
size_t maxLen = stringEnd - arg;
|
|
|
|
if (arg < stringBase || arg >= stringEnd
|
|
|
|
|| strnlen(arg, maxLen) == maxLen) {
|
|
|
|
error = B_BAD_VALUE;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
flatArgs[i] = arg;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (error == B_OK)
|
|
|
|
_flatArgs = flatArgs;
|
|
|
|
else
|
|
|
|
free(flatArgs);
|
|
|
|
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-08-25 03:41:54 +04:00
|
|
|
static void
|
2009-11-27 21:10:03 +03:00
|
|
|
free_team_arg(struct team_arg* teamArg)
|
2006-08-25 03:41:54 +04:00
|
|
|
{
|
2008-06-24 07:37:07 +04:00
|
|
|
if (teamArg != NULL) {
|
|
|
|
free(teamArg->flat_args);
|
|
|
|
free(teamArg->path);
|
|
|
|
free(teamArg);
|
|
|
|
}
|
2004-03-16 05:46:28 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-08-25 03:41:54 +04:00
|
|
|
static status_t
|
2009-11-27 21:10:03 +03:00
|
|
|
create_team_arg(struct team_arg** _teamArg, const char* path, char** flatArgs,
|
2011-01-02 22:12:19 +03:00
|
|
|
size_t flatArgsSize, int32 argCount, int32 envCount, mode_t umask,
|
|
|
|
port_id port, uint32 token)
|
2004-03-16 05:46:28 +03:00
|
|
|
{
|
2009-11-27 21:10:03 +03:00
|
|
|
struct team_arg* teamArg = (struct team_arg*)malloc(sizeof(team_arg));
|
2004-10-07 19:34:17 +04:00
|
|
|
if (teamArg == NULL)
|
2005-10-05 19:32:48 +04:00
|
|
|
return B_NO_MEMORY;
|
|
|
|
|
2008-06-24 07:37:07 +04:00
|
|
|
teamArg->path = strdup(path);
|
|
|
|
if (teamArg->path == NULL) {
|
2008-05-24 20:17:56 +04:00
|
|
|
free(teamArg);
|
2008-06-24 07:37:07 +04:00
|
|
|
return B_NO_MEMORY;
|
2008-05-24 20:17:56 +04:00
|
|
|
}
|
2004-10-07 19:34:17 +04:00
|
|
|
|
2008-06-24 07:37:07 +04:00
|
|
|
// copy the args over
|
2005-10-05 19:32:48 +04:00
|
|
|
|
2008-06-24 07:37:07 +04:00
|
|
|
teamArg->flat_args = flatArgs;
|
|
|
|
teamArg->flat_args_size = flatArgsSize;
|
2005-10-05 19:32:48 +04:00
|
|
|
teamArg->arg_count = argCount;
|
2004-10-07 19:34:17 +04:00
|
|
|
teamArg->env_count = envCount;
|
2011-01-02 22:12:19 +03:00
|
|
|
teamArg->umask = umask;
|
2007-07-27 06:32:19 +04:00
|
|
|
teamArg->error_port = port;
|
|
|
|
teamArg->error_token = token;
|
2004-10-07 19:34:17 +04:00
|
|
|
|
2005-10-05 19:32:48 +04:00
|
|
|
*_teamArg = teamArg;
|
|
|
|
return B_OK;
|
2004-10-07 19:34:17 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
static status_t
|
|
|
|
team_create_thread_start_internal(void* args)
|
2002-08-04 03:39:50 +04:00
|
|
|
{
|
2005-09-13 19:44:30 +04:00
|
|
|
status_t err;
|
2011-01-11 00:54:38 +03:00
|
|
|
Thread* thread;
|
|
|
|
Team* team;
|
2009-11-27 21:10:03 +03:00
|
|
|
struct team_arg* teamArgs = (struct team_arg*)args;
|
|
|
|
const char* path;
|
2004-10-20 04:19:38 +04:00
|
|
|
addr_t entry;
|
2009-11-27 21:10:03 +03:00
|
|
|
char** userArgs;
|
|
|
|
char** userEnv;
|
|
|
|
struct user_space_program_args* programArgs;
|
2011-06-12 04:00:23 +04:00
|
|
|
uint32 argCount, envCount;
|
2002-08-04 03:39:50 +04:00
|
|
|
|
2009-11-27 21:10:03 +03:00
|
|
|
thread = thread_get_current_thread();
|
|
|
|
team = thread->team;
|
2008-06-24 07:37:07 +04:00
|
|
|
cache_node_launched(teamArgs->arg_count, teamArgs->flat_args);
|
2002-08-04 03:39:50 +04:00
|
|
|
|
2009-11-27 21:10:03 +03:00
|
|
|
TRACE(("team_create_thread_start: entry thread %ld\n", thread->id));
|
2002-08-04 03:39:50 +04:00
|
|
|
|
2004-11-18 21:15:39 +03:00
|
|
|
// Main stack area layout is currently as follows (starting from 0):
|
|
|
|
//
|
2008-06-24 07:37:07 +04:00
|
|
|
// size | usage
|
|
|
|
// ---------------------------------+--------------------------------
|
|
|
|
// USER_MAIN_THREAD_STACK_SIZE | actual stack
|
|
|
|
// TLS_SIZE | TLS data
|
|
|
|
// sizeof(user_space_program_args) | argument structure for the runtime
|
|
|
|
// | loader
|
|
|
|
// flat arguments size | flat process arguments and environment
|
2004-11-18 21:15:39 +03:00
|
|
|
|
2009-11-27 21:10:03 +03:00
|
|
|
// TODO: ENV_SIZE is a) limited, and b) not used after libroot copied it to
|
|
|
|
// the heap
|
|
|
|
// TODO: we could reserve the whole USER_STACK_REGION upfront...
|
2003-01-07 12:40:59 +03:00
|
|
|
|
2004-10-07 19:34:17 +04:00
|
|
|
argCount = teamArgs->arg_count;
|
|
|
|
envCount = teamArgs->env_count;
|
|
|
|
|
2009-11-27 21:10:03 +03:00
|
|
|
programArgs = (struct user_space_program_args*)(thread->user_stack_base
|
|
|
|
+ thread->user_stack_size + TLS_SIZE);
|
2002-08-04 03:39:50 +04:00
|
|
|
|
2008-06-24 07:37:07 +04:00
|
|
|
userArgs = (char**)(programArgs + 1);
|
|
|
|
userEnv = userArgs + argCount + 1;
|
|
|
|
path = teamArgs->path;
|
2002-08-04 03:39:50 +04:00
|
|
|
|
2008-06-24 07:37:07 +04:00
|
|
|
if (user_strlcpy(programArgs->program_path, path,
|
2007-07-27 06:32:19 +04:00
|
|
|
sizeof(programArgs->program_path)) < B_OK
|
|
|
|
|| user_memcpy(&programArgs->arg_count, &argCount, sizeof(int32)) < B_OK
|
2009-11-27 21:10:03 +03:00
|
|
|
|| user_memcpy(&programArgs->args, &userArgs, sizeof(char**)) < B_OK
|
2007-07-27 06:32:19 +04:00
|
|
|
|| user_memcpy(&programArgs->env_count, &envCount, sizeof(int32)) < B_OK
|
2009-11-27 21:10:03 +03:00
|
|
|
|| user_memcpy(&programArgs->env, &userEnv, sizeof(char**)) < B_OK
|
2007-07-27 06:32:19 +04:00
|
|
|
|| user_memcpy(&programArgs->error_port, &teamArgs->error_port,
|
|
|
|
sizeof(port_id)) < B_OK
|
|
|
|
|| user_memcpy(&programArgs->error_token, &teamArgs->error_token,
|
2008-06-24 07:37:07 +04:00
|
|
|
sizeof(uint32)) < B_OK
|
2011-01-02 22:12:19 +03:00
|
|
|
|| user_memcpy(&programArgs->umask, &teamArgs->umask, sizeof(mode_t)) < B_OK
|
2008-06-24 07:37:07 +04:00
|
|
|
|| user_memcpy(userArgs, teamArgs->flat_args,
|
|
|
|
teamArgs->flat_args_size) < B_OK) {
|
2007-07-27 06:32:19 +04:00
|
|
|
// the team deletion process will clean this mess
|
2010-08-18 15:07:20 +04:00
|
|
|
free_team_arg(teamArgs);
|
2007-07-27 06:32:19 +04:00
|
|
|
return B_BAD_ADDRESS;
|
|
|
|
}
|
2002-08-04 03:39:50 +04:00
|
|
|
|
2004-10-14 22:07:04 +04:00
|
|
|
TRACE(("team_create_thread_start: loading elf binary '%s'\n", path));
|
2002-08-04 03:39:50 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
// set team args and update state
|
|
|
|
team->Lock();
|
|
|
|
team->SetArgs(path, teamArgs->flat_args + 1, argCount - 1);
|
|
|
|
team->state = TEAM_STATE_NORMAL;
|
|
|
|
team->Unlock();
|
2006-05-30 04:21:22 +04:00
|
|
|
|
2004-10-14 22:07:04 +04:00
|
|
|
free_team_arg(teamArgs);
|
2008-05-15 15:55:09 +04:00
|
|
|
// the arguments are already on the user stack, we no longer need
|
|
|
|
// them in this form
|
2002-08-04 03:39:50 +04:00
|
|
|
|
2008-07-09 07:58:38 +04:00
|
|
|
// NOTE: Normally arch_thread_enter_userspace() never returns, that is
|
|
|
|
// automatic variables with function scope will never be destroyed.
|
|
|
|
{
|
|
|
|
// find runtime_loader path
|
|
|
|
KPath runtimeLoaderPath;
|
2011-06-16 11:00:06 +04:00
|
|
|
err = find_directory(B_SYSTEM_DIRECTORY, gBootDevice, false,
|
2008-07-09 07:58:38 +04:00
|
|
|
runtimeLoaderPath.LockBuffer(), runtimeLoaderPath.BufferSize());
|
|
|
|
if (err < B_OK) {
|
|
|
|
TRACE(("team_create_thread_start: find_directory() failed: %s\n",
|
2008-07-17 02:55:17 +04:00
|
|
|
strerror(err)));
|
2008-07-09 07:58:38 +04:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
runtimeLoaderPath.UnlockBuffer();
|
|
|
|
err = runtimeLoaderPath.Append("runtime_loader");
|
|
|
|
|
2009-11-27 21:10:03 +03:00
|
|
|
if (err == B_OK) {
|
|
|
|
err = elf_load_user_image(runtimeLoaderPath.Path(), team, 0,
|
|
|
|
&entry);
|
|
|
|
}
|
2008-05-15 15:55:09 +04:00
|
|
|
}
|
|
|
|
|
2005-03-09 04:43:56 +03:00
|
|
|
if (err < B_OK) {
|
2004-06-10 05:43:16 +04:00
|
|
|
// Luckily, we don't have to clean up the mess we created - that's
|
|
|
|
// done for us by the normal team deletion process
|
2008-05-15 15:55:09 +04:00
|
|
|
TRACE(("team_create_thread_start: elf_load_user_image() failed: "
|
2008-07-17 02:55:17 +04:00
|
|
|
"%s\n", strerror(err)));
|
2004-06-10 05:43:16 +04:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2007-10-11 12:30:18 +04:00
|
|
|
TRACE(("team_create_thread_start: loaded elf. entry = %#lx\n", entry));
|
2002-08-04 03:39:50 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
// enter userspace -- returns only in case of error
|
|
|
|
return thread_enter_userspace_new_team(thread, (addr_t)entry,
|
|
|
|
programArgs, NULL);
|
|
|
|
}
|
|
|
|
|
2002-08-04 03:39:50 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
static status_t
|
|
|
|
team_create_thread_start(void* args)
|
|
|
|
{
|
|
|
|
team_create_thread_start_internal(args);
|
2011-06-14 17:04:31 +04:00
|
|
|
team_init_exit_info_on_error(thread_get_current_thread()->team);
|
2011-06-12 04:00:23 +04:00
|
|
|
thread_exit();
|
|
|
|
// does not return
|
|
|
|
return B_OK;
|
2002-08-04 03:39:50 +04:00
|
|
|
}
|
|
|
|
|
2002-08-05 09:26:52 +04:00
|
|
|
|
2004-10-14 22:07:04 +04:00
|
|
|
static thread_id
|
2009-03-02 03:26:22 +03:00
|
|
|
load_image_internal(char**& _flatArgs, size_t flatArgsSize, int32 argCount,
|
|
|
|
int32 envCount, int32 priority, team_id parentID, uint32 flags,
|
|
|
|
port_id errorPort, uint32 errorToken)
|
2002-08-04 03:39:50 +04:00
|
|
|
{
|
2008-06-24 07:37:07 +04:00
|
|
|
char** flatArgs = _flatArgs;
|
2004-10-14 22:07:04 +04:00
|
|
|
thread_id thread;
|
2005-10-05 19:32:48 +04:00
|
|
|
status_t status;
|
2009-11-27 21:10:03 +03:00
|
|
|
struct team_arg* teamArgs;
|
2005-03-12 18:13:51 +03:00
|
|
|
struct team_loading_info loadingInfo;
|
2009-03-02 03:26:22 +03:00
|
|
|
io_context* parentIOContext = NULL;
|
2011-06-12 04:00:23 +04:00
|
|
|
team_id teamID;
|
2002-08-04 03:39:50 +04:00
|
|
|
|
2008-06-24 07:37:07 +04:00
|
|
|
if (flatArgs == NULL || argCount == 0)
|
2004-10-14 22:07:04 +04:00
|
|
|
return B_BAD_VALUE;
|
2002-08-04 03:39:50 +04:00
|
|
|
|
2008-06-24 07:37:07 +04:00
|
|
|
const char* path = flatArgs[0];
|
|
|
|
|
2009-03-02 03:26:22 +03:00
|
|
|
TRACE(("load_image_internal: name '%s', args = %p, argCount = %ld\n",
|
2008-06-24 07:37:07 +04:00
|
|
|
path, flatArgs, argCount));
|
2004-10-14 22:07:04 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
// cut the path from the main thread name
|
|
|
|
const char* threadName = strrchr(path, '/');
|
|
|
|
if (threadName != NULL)
|
|
|
|
threadName++;
|
|
|
|
else
|
|
|
|
threadName = path;
|
|
|
|
|
|
|
|
// create the main thread object
|
|
|
|
Thread* mainThread;
|
|
|
|
status = Thread::Create(threadName, mainThread);
|
|
|
|
if (status != B_OK)
|
|
|
|
return status;
|
|
|
|
BReference<Thread> mainThreadReference(mainThread, true);
|
|
|
|
|
|
|
|
// create team object
|
|
|
|
Team* team = Team::Create(mainThread->id, path, false);
|
2003-01-07 12:40:59 +03:00
|
|
|
if (team == NULL)
|
2004-03-16 05:46:28 +03:00
|
|
|
return B_NO_MEMORY;
|
2011-06-12 04:00:23 +04:00
|
|
|
BReference<Team> teamReference(team, true);
|
2002-08-04 03:39:50 +04:00
|
|
|
|
2005-03-12 18:13:51 +03:00
|
|
|
if (flags & B_WAIT_TILL_LOADED) {
|
|
|
|
loadingInfo.thread = thread_get_current_thread();
|
|
|
|
loadingInfo.result = B_ERROR;
|
|
|
|
loadingInfo.done = false;
|
|
|
|
team->loading_info = &loadingInfo;
|
|
|
|
}
|
|
|
|
|
2009-03-02 03:26:22 +03:00
|
|
|
// get the parent team
|
2011-06-12 04:00:23 +04:00
|
|
|
Team* parent = Team::Get(parentID);
|
|
|
|
if (parent == NULL)
|
|
|
|
return B_BAD_TEAM_ID;
|
|
|
|
BReference<Team> parentReference(parent, true);
|
|
|
|
|
|
|
|
parent->LockTeamAndProcessGroup();
|
|
|
|
team->Lock();
|
|
|
|
|
|
|
|
// inherit the parent's user/group
|
|
|
|
inherit_parent_user_and_group(team, parent);
|
2009-03-02 03:26:22 +03:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
InterruptsSpinLocker teamsLocker(sTeamHashLock);
|
2009-03-02 03:26:22 +03:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
sTeamHash.Insert(team);
|
|
|
|
sUsedTeams++;
|
2009-03-02 03:26:22 +03:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
teamsLocker.Unlock();
|
2004-03-16 05:46:28 +03:00
|
|
|
|
|
|
|
insert_team_into_parent(parent, team);
|
2004-10-14 18:46:12 +04:00
|
|
|
insert_team_into_group(parent->group, team);
|
2004-03-16 05:46:28 +03:00
|
|
|
|
2009-03-02 03:26:22 +03:00
|
|
|
// get a reference to the parent's I/O context -- we need it to create ours
|
|
|
|
parentIOContext = parent->io_context;
|
|
|
|
vfs_get_io_context(parentIOContext);
|
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
team->Unlock();
|
|
|
|
parent->UnlockTeamAndProcessGroup();
|
|
|
|
|
|
|
|
// notify team listeners
|
|
|
|
sNotificationService.Notify(TEAM_ADDED, team);
|
2009-03-02 03:26:22 +03:00
|
|
|
|
|
|
|
// check the executable's set-user/group-id permission
|
|
|
|
update_set_id_user_and_group(team, path);
|
2002-08-04 03:39:50 +04:00
|
|
|
|
2008-06-24 07:37:07 +04:00
|
|
|
status = create_team_arg(&teamArgs, path, flatArgs, flatArgsSize, argCount,
|
2011-01-02 22:12:19 +03:00
|
|
|
envCount, (mode_t)-1, errorPort, errorToken);
|
2005-10-05 19:32:48 +04:00
|
|
|
if (status != B_OK)
|
2002-08-04 03:39:50 +04:00
|
|
|
goto err1;
|
|
|
|
|
2008-06-24 07:37:07 +04:00
|
|
|
_flatArgs = NULL;
|
|
|
|
// args are owned by the team_arg structure now
|
|
|
|
|
2002-12-03 17:17:53 +03:00
|
|
|
// create a new io_context for this team
|
2009-11-25 19:16:22 +03:00
|
|
|
team->io_context = vfs_new_io_context(parentIOContext, true);
|
2003-01-07 12:40:59 +03:00
|
|
|
if (!team->io_context) {
|
2005-10-05 19:32:48 +04:00
|
|
|
status = B_NO_MEMORY;
|
2004-10-07 19:34:17 +04:00
|
|
|
goto err2;
|
2002-08-04 03:39:50 +04:00
|
|
|
}
|
|
|
|
|
2009-03-02 03:26:22 +03:00
|
|
|
// We don't need the parent's I/O context any longer.
|
|
|
|
vfs_put_io_context(parentIOContext);
|
|
|
|
parentIOContext = NULL;
|
|
|
|
|
2008-01-13 02:48:52 +03:00
|
|
|
// remove any fds that have the CLOEXEC flag set (emulating BeOS behaviour)
|
|
|
|
vfs_exec_io_context(team->io_context);
|
|
|
|
|
2002-08-04 03:39:50 +04:00
|
|
|
// create an address space for this team
|
2009-12-02 19:12:15 +03:00
|
|
|
status = VMAddressSpace::Create(team->id, USER_BASE, USER_SIZE, false,
|
2005-12-20 16:29:11 +03:00
|
|
|
&team->address_space);
|
2009-11-27 21:10:03 +03:00
|
|
|
if (status != B_OK)
|
2004-10-07 19:34:17 +04:00
|
|
|
goto err3;
|
2002-08-04 03:39:50 +04:00
|
|
|
|
2008-05-11 20:25:35 +04:00
|
|
|
// create the user data area
|
|
|
|
status = create_team_user_data(team);
|
|
|
|
if (status != B_OK)
|
|
|
|
goto err4;
|
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
// In case we start the main thread, we shouldn't access the team object
|
|
|
|
// afterwards, so cache the team's ID.
|
|
|
|
teamID = team->id;
|
2009-04-12 01:45:25 +04:00
|
|
|
|
2005-10-05 19:32:48 +04:00
|
|
|
// Create a kernel thread, but under the context of the new team
|
2011-06-12 04:00:23 +04:00
|
|
|
// The new thread will take over ownership of teamArgs.
|
2011-06-14 17:39:54 +04:00
|
|
|
{
|
|
|
|
ThreadCreationAttributes threadAttributes(team_create_thread_start,
|
|
|
|
threadName, B_NORMAL_PRIORITY, teamArgs, teamID, mainThread);
|
|
|
|
threadAttributes.additional_stack_size = sizeof(user_space_program_args)
|
|
|
|
+ teamArgs->flat_args_size;
|
|
|
|
thread = thread_create_thread(threadAttributes, false);
|
|
|
|
if (thread < 0) {
|
|
|
|
status = thread;
|
|
|
|
goto err5;
|
|
|
|
}
|
2002-08-04 03:39:50 +04:00
|
|
|
}
|
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
// The team has been created successfully, so we keep the reference. Or
|
|
|
|
// more precisely: It's owned by the team's main thread, now.
|
|
|
|
teamReference.Detach();
|
|
|
|
|
2005-03-12 18:13:51 +03:00
|
|
|
// wait for the loader of the new team to finish its work
|
2009-11-27 21:10:03 +03:00
|
|
|
if ((flags & B_WAIT_TILL_LOADED) != 0) {
|
2011-06-12 04:00:23 +04:00
|
|
|
InterruptsSpinLocker schedulerLocker(gSchedulerLock);
|
|
|
|
|
|
|
|
// resume the team's main thread
|
|
|
|
if (mainThread != NULL && mainThread->state == B_THREAD_SUSPENDED)
|
|
|
|
scheduler_enqueue_in_run_queue(mainThread);
|
|
|
|
|
|
|
|
// Now suspend ourselves until loading is finished. We will be woken
|
|
|
|
// either by the thread, when it finished or aborted loading, or when
|
|
|
|
// the team is going to die (e.g. is killed). In either case the one
|
|
|
|
// setting `loadingInfo.done' is responsible for removing the info from
|
|
|
|
// the team structure.
|
|
|
|
while (!loadingInfo.done) {
|
|
|
|
thread_get_current_thread()->next_state = B_THREAD_SUSPENDED;
|
|
|
|
scheduler_reschedule();
|
2005-03-12 18:13:51 +03:00
|
|
|
}
|
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
schedulerLocker.Unlock();
|
2005-03-12 18:13:51 +03:00
|
|
|
|
|
|
|
if (loadingInfo.result < B_OK)
|
|
|
|
return loadingInfo.result;
|
2008-07-17 02:55:17 +04:00
|
|
|
}
|
2005-03-12 18:13:51 +03:00
|
|
|
|
2005-02-24 19:11:25 +03:00
|
|
|
// notify the debugger
|
2011-06-12 04:00:23 +04:00
|
|
|
user_debug_team_created(teamID);
|
2005-02-24 19:11:25 +03:00
|
|
|
|
2004-10-14 22:07:04 +04:00
|
|
|
return thread;
|
2002-08-04 03:39:50 +04:00
|
|
|
|
2008-05-11 20:25:35 +04:00
|
|
|
err5:
|
|
|
|
delete_team_user_data(team);
|
2004-10-07 19:34:17 +04:00
|
|
|
err4:
|
2009-12-02 19:12:15 +03:00
|
|
|
team->address_space->Put();
|
2002-08-04 03:39:50 +04:00
|
|
|
err3:
|
2009-03-02 03:26:22 +03:00
|
|
|
vfs_put_io_context(team->io_context);
|
2002-08-04 03:39:50 +04:00
|
|
|
err2:
|
2004-10-07 19:34:17 +04:00
|
|
|
free_team_arg(teamArgs);
|
2002-08-04 03:39:50 +04:00
|
|
|
err1:
|
2009-03-02 03:26:22 +03:00
|
|
|
if (parentIOContext != NULL)
|
|
|
|
vfs_put_io_context(parentIOContext);
|
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
// Remove the team structure from the process group, the parent team, and
|
|
|
|
// the team hash table and delete the team structure.
|
|
|
|
parent->LockTeamAndProcessGroup();
|
|
|
|
team->Lock();
|
2004-03-16 05:46:28 +03:00
|
|
|
|
2008-03-09 20:56:27 +03:00
|
|
|
remove_team_from_group(team);
|
2009-03-02 03:26:22 +03:00
|
|
|
remove_team_from_parent(team->parent, team);
|
2004-03-16 05:46:28 +03:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
team->Unlock();
|
|
|
|
parent->UnlockTeamAndProcessGroup();
|
2004-10-14 18:46:12 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
teamsLocker.Lock();
|
|
|
|
sTeamHash.Remove(team);
|
|
|
|
teamsLocker.Unlock();
|
|
|
|
|
|
|
|
sNotificationService.Notify(TEAM_REMOVED, team);
|
2004-10-07 19:34:17 +04:00
|
|
|
|
2005-10-05 19:32:48 +04:00
|
|
|
return status;
|
2002-08-04 03:39:50 +04:00
|
|
|
}
|
|
|
|
|
2002-08-05 09:26:52 +04:00
|
|
|
|
2007-10-11 12:30:18 +04:00
|
|
|
/*! Almost shuts down the current team and loads a new image into it.
|
|
|
|
If successful, this function does not return and will takeover ownership of
|
|
|
|
the arguments provided.
|
2011-06-12 04:00:23 +04:00
|
|
|
This function may only be called in a userland team (caused by one of the
|
|
|
|
exec*() syscalls).
|
2007-10-11 12:30:18 +04:00
|
|
|
*/
|
2004-10-07 19:34:17 +04:00
|
|
|
static status_t
|
2009-11-27 21:10:03 +03:00
|
|
|
exec_team(const char* path, char**& _flatArgs, size_t flatArgsSize,
|
2011-01-02 22:12:19 +03:00
|
|
|
int32 argCount, int32 envCount, mode_t umask)
|
2004-10-07 19:34:17 +04:00
|
|
|
{
|
2008-07-09 07:58:38 +04:00
|
|
|
// NOTE: Since this function normally doesn't return, don't use automatic
|
|
|
|
// variables that need destruction in the function scope.
|
2008-06-24 07:37:07 +04:00
|
|
|
char** flatArgs = _flatArgs;
|
2011-01-11 00:54:38 +03:00
|
|
|
Team* team = thread_get_current_thread()->team;
|
2009-11-27 21:10:03 +03:00
|
|
|
struct team_arg* teamArgs;
|
|
|
|
const char* threadName;
|
2005-03-25 21:28:24 +03:00
|
|
|
thread_id nubThreadID = -1;
|
2004-10-07 19:34:17 +04:00
|
|
|
|
2007-10-11 12:30:18 +04:00
|
|
|
TRACE(("exec_team(path = \"%s\", argc = %ld, envCount = %ld): team %ld\n",
|
2008-06-24 07:37:07 +04:00
|
|
|
path, argCount, envCount, team->id));
|
|
|
|
|
|
|
|
T(ExecTeam(path, argCount, flatArgs, envCount, flatArgs + argCount + 1));
|
2004-10-07 19:34:17 +04:00
|
|
|
|
|
|
|
// switching the kernel at run time is probably not a good idea :)
|
|
|
|
if (team == team_get_kernel_team())
|
|
|
|
return B_NOT_ALLOWED;
|
|
|
|
|
|
|
|
// we currently need to be single threaded here
|
2011-06-12 04:00:23 +04:00
|
|
|
// TODO: maybe we should just kill all other threads and
|
2004-10-07 19:34:17 +04:00
|
|
|
// make the current thread the team's main thread?
|
2011-06-12 04:00:23 +04:00
|
|
|
Thread* currentThread = thread_get_current_thread();
|
|
|
|
if (currentThread != team->main_thread)
|
2004-10-07 19:34:17 +04:00
|
|
|
return B_NOT_ALLOWED;
|
|
|
|
|
2005-03-25 21:28:24 +03:00
|
|
|
// The debug nub thread, a pure kernel thread, is allowed to survive.
|
|
|
|
// We iterate through the thread list to make sure that there's no other
|
|
|
|
// thread.
|
2011-06-12 04:00:23 +04:00
|
|
|
TeamLocker teamLocker(team);
|
|
|
|
InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
|
2005-03-25 21:28:24 +03:00
|
|
|
|
|
|
|
if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)
|
|
|
|
nubThreadID = team->debug_info.nub_thread;
|
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
debugInfoLocker.Unlock();
|
2005-03-25 21:28:24 +03:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
for (Thread* thread = team->thread_list; thread != NULL;
|
|
|
|
thread = thread->team_next) {
|
|
|
|
if (thread != team->main_thread && thread->id != nubThreadID)
|
|
|
|
return B_NOT_ALLOWED;
|
2005-03-25 21:28:24 +03:00
|
|
|
}
|
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
team->DeleteUserTimers(true);
|
|
|
|
team->ResetSignalsOnExec();
|
2005-03-25 21:28:24 +03:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
teamLocker.Unlock();
|
2005-03-25 21:28:24 +03:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
status_t status = create_team_arg(&teamArgs, path, flatArgs, flatArgsSize,
|
|
|
|
argCount, envCount, umask, -1, 0);
|
2005-10-05 19:32:48 +04:00
|
|
|
if (status != B_OK)
|
|
|
|
return status;
|
|
|
|
|
2008-06-24 07:37:07 +04:00
|
|
|
_flatArgs = NULL;
|
|
|
|
// args are owned by the team_arg structure now
|
2004-10-07 19:34:17 +04:00
|
|
|
|
2010-08-18 15:07:20 +04:00
|
|
|
// TODO: remove team resources if there are any left
|
2004-10-12 08:03:52 +04:00
|
|
|
// thread_atkernel_exit() might not be called at all
|
2004-10-07 19:34:17 +04:00
|
|
|
|
2007-08-16 22:01:47 +04:00
|
|
|
thread_reset_for_exec();
|
|
|
|
|
2005-03-25 21:28:24 +03:00
|
|
|
user_debug_prepare_for_exec();
|
|
|
|
|
2008-05-11 20:25:35 +04:00
|
|
|
delete_team_user_data(team);
|
2010-01-16 01:32:51 +03:00
|
|
|
vm_delete_areas(team->address_space, false);
|
2008-08-16 22:20:54 +04:00
|
|
|
xsi_sem_undo(team);
|
2009-10-26 16:34:43 +03:00
|
|
|
delete_owned_ports(team);
|
2009-10-23 06:06:51 +04:00
|
|
|
sem_delete_owned_sems(team);
|
2004-10-07 19:34:17 +04:00
|
|
|
remove_images(team);
|
|
|
|
vfs_exec_io_context(team->io_context);
|
2008-05-06 07:39:36 +04:00
|
|
|
delete_realtime_sem_context(team->realtime_sem_context);
|
|
|
|
team->realtime_sem_context = NULL;
|
2004-10-07 19:34:17 +04:00
|
|
|
|
2008-05-11 20:25:35 +04:00
|
|
|
status = create_team_user_data(team);
|
|
|
|
if (status != B_OK) {
|
|
|
|
// creating the user data failed -- we're toast
|
|
|
|
// TODO: We should better keep the old user area in the first place.
|
2010-08-18 15:07:20 +04:00
|
|
|
free_team_arg(teamArgs);
|
2008-05-11 20:25:35 +04:00
|
|
|
exit_thread(status);
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2005-03-25 21:28:24 +03:00
|
|
|
user_debug_finish_after_exec();
|
|
|
|
|
2004-11-21 01:31:28 +03:00
|
|
|
// rename the team
|
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
team->Lock();
|
|
|
|
team->SetName(path);
|
|
|
|
team->Unlock();
|
2004-11-21 01:31:28 +03:00
|
|
|
|
|
|
|
// cut the path from the team name and rename the main thread, too
|
2005-10-05 19:32:48 +04:00
|
|
|
threadName = strrchr(path, '/');
|
2004-11-21 01:05:50 +03:00
|
|
|
if (threadName != NULL)
|
|
|
|
threadName++;
|
|
|
|
else
|
2005-10-05 19:32:48 +04:00
|
|
|
threadName = path;
|
2004-11-21 01:05:50 +03:00
|
|
|
rename_thread(thread_get_current_thread_id(), threadName);
|
|
|
|
|
2008-02-21 03:46:22 +03:00
|
|
|
atomic_or(&team->flags, TEAM_FLAG_EXEC_DONE);
|
|
|
|
|
2008-03-11 20:12:02 +03:00
|
|
|
// Update user/group according to the executable's set-user/group-id
|
|
|
|
// permission.
|
|
|
|
update_set_id_user_and_group(team, path);
|
|
|
|
|
2008-09-20 17:59:41 +04:00
|
|
|
user_debug_team_exec();
|
|
|
|
|
2009-04-12 01:45:25 +04:00
|
|
|
// notify team listeners
|
|
|
|
sNotificationService.Notify(TEAM_EXEC, team);
|
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
// get a user thread for the thread
|
|
|
|
user_thread* userThread = team_allocate_user_thread(team);
|
|
|
|
// cannot fail (the allocation for the team would have failed already)
|
|
|
|
ThreadLocker currentThreadLocker(currentThread);
|
|
|
|
currentThread->user_thread = userThread;
|
|
|
|
currentThreadLocker.Unlock();
|
|
|
|
|
|
|
|
// create the user stack for the thread
|
|
|
|
status = thread_create_user_stack(currentThread->team, currentThread, NULL,
|
|
|
|
0, sizeof(user_space_program_args) + teamArgs->flat_args_size);
|
|
|
|
if (status == B_OK) {
|
|
|
|
// prepare the stack, load the runtime loader, and enter userspace
|
|
|
|
team_create_thread_start(teamArgs);
|
|
|
|
// does never return
|
|
|
|
} else
|
|
|
|
free_team_arg(teamArgs);
|
2004-10-07 19:34:17 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
// Sorry, we have to kill ourselves, there is no way out anymore
|
|
|
|
// (without any areas left and all that).
|
2004-10-07 19:34:17 +04:00
|
|
|
exit_thread(status);
|
2005-10-05 19:32:48 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
// We return a status here since the signal that is sent by the
|
|
|
|
// call above is not immediately handled.
|
2004-10-07 21:17:04 +04:00
|
|
|
return B_ERROR;
|
2004-10-07 19:34:17 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2004-10-10 21:30:42 +04:00
|
|
|
static thread_id
|
|
|
|
fork_team(void)
|
|
|
|
{
|
2011-01-11 00:54:38 +03:00
|
|
|
Thread* parentThread = thread_get_current_thread();
|
|
|
|
Team* parentTeam = parentThread->team;
|
|
|
|
Team* team;
|
2011-06-12 04:00:23 +04:00
|
|
|
arch_fork_arg* forkArgs;
|
2004-10-10 21:30:42 +04:00
|
|
|
struct area_info info;
|
2004-10-12 08:03:52 +04:00
|
|
|
thread_id threadID;
|
2004-10-10 21:30:42 +04:00
|
|
|
status_t status;
|
|
|
|
int32 cookie;
|
|
|
|
|
2007-10-11 12:30:18 +04:00
|
|
|
TRACE(("fork_team(): team %ld\n", parentTeam->id));
|
2004-10-10 21:30:42 +04:00
|
|
|
|
2004-10-12 08:03:52 +04:00
|
|
|
if (parentTeam == team_get_kernel_team())
|
2004-10-10 21:30:42 +04:00
|
|
|
return B_NOT_ALLOWED;
|
|
|
|
|
|
|
|
// create a new team
|
2009-03-02 03:26:22 +03:00
|
|
|
// TODO: this is very similar to load_image_internal() - maybe we can do
|
|
|
|
// something about it :)
|
2004-10-10 21:30:42 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
// create the main thread object
|
|
|
|
Thread* thread;
|
|
|
|
status = Thread::Create(parentThread->name, thread);
|
|
|
|
if (status != B_OK)
|
|
|
|
return status;
|
|
|
|
BReference<Thread> threadReference(thread, true);
|
|
|
|
|
|
|
|
// create the team object
|
|
|
|
team = Team::Create(thread->id, NULL, false);
|
2004-10-10 21:30:42 +04:00
|
|
|
if (team == NULL)
|
|
|
|
return B_NO_MEMORY;
|
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
parentTeam->LockTeamAndProcessGroup();
|
|
|
|
team->Lock();
|
2007-01-15 02:22:49 +03:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
team->SetName(parentTeam->Name());
|
|
|
|
team->SetArgs(parentTeam->Args());
|
2008-03-11 20:12:02 +03:00
|
|
|
|
2009-08-22 07:07:11 +04:00
|
|
|
// Inherit the parent's user/group.
|
2011-06-12 04:00:23 +04:00
|
|
|
inherit_parent_user_and_group(team, parentTeam);
|
|
|
|
|
|
|
|
// inherit signal handlers
|
|
|
|
team->InheritSignalActions(parentTeam);
|
|
|
|
|
|
|
|
InterruptsSpinLocker teamsLocker(sTeamHashLock);
|
|
|
|
|
|
|
|
sTeamHash.Insert(team);
|
|
|
|
sUsedTeams++;
|
|
|
|
|
|
|
|
teamsLocker.Unlock();
|
2004-10-10 21:30:42 +04:00
|
|
|
|
2004-10-12 08:03:52 +04:00
|
|
|
insert_team_into_parent(parentTeam, team);
|
2004-10-15 20:14:51 +04:00
|
|
|
insert_team_into_group(parentTeam->group, team);
|
2004-10-10 21:30:42 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
team->Unlock();
|
|
|
|
parentTeam->UnlockTeamAndProcessGroup();
|
|
|
|
|
|
|
|
// notify team listeners
|
|
|
|
sNotificationService.Notify(TEAM_ADDED, team);
|
2004-10-10 21:30:42 +04:00
|
|
|
|
2009-09-28 06:54:38 +04:00
|
|
|
// inherit some team debug flags
|
|
|
|
team->debug_info.flags |= atomic_get(&parentTeam->debug_info.flags)
|
|
|
|
& B_TEAM_DEBUG_INHERITED_FLAGS;
|
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
forkArgs = (arch_fork_arg*)malloc(sizeof(arch_fork_arg));
|
2004-10-12 08:03:52 +04:00
|
|
|
if (forkArgs == NULL) {
|
|
|
|
status = B_NO_MEMORY;
|
|
|
|
goto err1;
|
|
|
|
}
|
|
|
|
|
2004-10-10 21:30:42 +04:00
|
|
|
// create a new io_context for this team
|
2009-11-25 19:16:22 +03:00
|
|
|
team->io_context = vfs_new_io_context(parentTeam->io_context, false);
|
2004-10-10 21:30:42 +04:00
|
|
|
if (!team->io_context) {
|
|
|
|
status = B_NO_MEMORY;
|
|
|
|
goto err2;
|
|
|
|
}
|
|
|
|
|
2008-05-06 07:39:36 +04:00
|
|
|
// duplicate the realtime sem context
|
|
|
|
if (parentTeam->realtime_sem_context) {
|
|
|
|
team->realtime_sem_context = clone_realtime_sem_context(
|
|
|
|
parentTeam->realtime_sem_context);
|
|
|
|
if (team->realtime_sem_context == NULL) {
|
|
|
|
status = B_NO_MEMORY;
|
|
|
|
goto err25;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2004-10-10 21:30:42 +04:00
|
|
|
// create an address space for this team
|
2009-12-02 19:12:15 +03:00
|
|
|
status = VMAddressSpace::Create(team->id, USER_BASE, USER_SIZE, false,
|
2005-12-20 16:29:11 +03:00
|
|
|
&team->address_space);
|
2004-10-10 21:30:42 +04:00
|
|
|
if (status < B_OK)
|
|
|
|
goto err3;
|
|
|
|
|
|
|
|
// copy all areas of the team
|
2009-11-27 21:10:03 +03:00
|
|
|
// TODO: should be able to handle stack areas differently (ie. don't have
|
|
|
|
// them copy-on-write)
|
2008-05-11 20:25:35 +04:00
|
|
|
|
2004-10-10 21:30:42 +04:00
|
|
|
cookie = 0;
|
|
|
|
while (get_next_area_info(B_CURRENT_TEAM, &cookie, &info) == B_OK) {
|
2008-07-25 03:03:59 +04:00
|
|
|
if (info.area == parentTeam->user_data_area) {
|
|
|
|
// don't clone the user area; just create a new one
|
|
|
|
status = create_team_user_data(team);
|
|
|
|
if (status != B_OK)
|
|
|
|
break;
|
2004-10-12 08:03:52 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
thread->user_thread = team_allocate_user_thread(team);
|
2008-07-25 03:03:59 +04:00
|
|
|
} else {
|
2009-11-27 21:10:03 +03:00
|
|
|
void* address;
|
2009-12-02 19:12:15 +03:00
|
|
|
area_id area = vm_copy_area(team->address_space->ID(), info.name,
|
2008-07-25 03:03:59 +04:00
|
|
|
&address, B_CLONE_ADDRESS, info.protection, info.area);
|
|
|
|
if (area < B_OK) {
|
|
|
|
status = area;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (info.area == parentThread->user_stack_area)
|
2011-06-12 04:00:23 +04:00
|
|
|
thread->user_stack_area = area;
|
2008-05-11 20:25:35 +04:00
|
|
|
}
|
2004-10-10 21:30:42 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
if (status < B_OK)
|
|
|
|
goto err4;
|
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
if (thread->user_thread == NULL) {
|
2008-05-11 20:25:35 +04:00
|
|
|
#if KDEBUG
|
|
|
|
panic("user data area not found, parent area is %ld",
|
|
|
|
parentTeam->user_data_area);
|
|
|
|
#endif
|
|
|
|
status = B_ERROR;
|
|
|
|
goto err4;
|
|
|
|
}
|
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
thread->user_stack_base = parentThread->user_stack_base;
|
|
|
|
thread->user_stack_size = parentThread->user_stack_size;
|
|
|
|
thread->user_local_storage = parentThread->user_local_storage;
|
|
|
|
thread->sig_block_mask = parentThread->sig_block_mask;
|
|
|
|
thread->signal_stack_base = parentThread->signal_stack_base;
|
|
|
|
thread->signal_stack_size = parentThread->signal_stack_size;
|
|
|
|
thread->signal_stack_enabled = parentThread->signal_stack_enabled;
|
2009-05-21 03:10:13 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
arch_store_fork_frame(forkArgs);
|
2004-10-12 08:03:52 +04:00
|
|
|
|
2008-08-11 04:30:00 +04:00
|
|
|
// copy image list
|
|
|
|
image_info imageInfo;
|
|
|
|
cookie = 0;
|
|
|
|
while (get_next_image_info(parentTeam->id, &cookie, &imageInfo) == B_OK) {
|
|
|
|
image_id image = register_image(team, &imageInfo, sizeof(imageInfo));
|
|
|
|
if (image < 0)
|
|
|
|
goto err5;
|
|
|
|
}
|
2004-10-12 08:03:52 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
// create the main thread
|
|
|
|
{
|
|
|
|
ThreadCreationAttributes threadCreationAttributes(NULL,
|
|
|
|
parentThread->name, parentThread->priority, NULL, team->id, thread);
|
|
|
|
threadCreationAttributes.forkArgs = forkArgs;
|
|
|
|
threadID = thread_create_thread(threadCreationAttributes, false);
|
|
|
|
if (threadID < 0) {
|
|
|
|
status = threadID;
|
|
|
|
goto err5;
|
|
|
|
}
|
2004-10-10 21:30:42 +04:00
|
|
|
}
|
|
|
|
|
2005-02-24 19:11:25 +03:00
|
|
|
// notify the debugger
|
|
|
|
user_debug_team_created(team->id);
|
|
|
|
|
2008-01-17 04:59:17 +03:00
|
|
|
T(TeamForked(threadID));
|
|
|
|
|
2004-10-12 08:03:52 +04:00
|
|
|
resume_thread(threadID);
|
|
|
|
return threadID;
|
2004-10-10 21:30:42 +04:00
|
|
|
|
2008-08-11 04:30:00 +04:00
|
|
|
err5:
|
|
|
|
remove_images(team);
|
2004-10-10 21:30:42 +04:00
|
|
|
err4:
|
2009-12-02 19:12:15 +03:00
|
|
|
team->address_space->RemoveAndPut();
|
2004-10-10 21:30:42 +04:00
|
|
|
err3:
|
2008-05-06 07:39:36 +04:00
|
|
|
delete_realtime_sem_context(team->realtime_sem_context);
|
|
|
|
err25:
|
2009-03-02 03:26:22 +03:00
|
|
|
vfs_put_io_context(team->io_context);
|
2004-10-10 21:30:42 +04:00
|
|
|
err2:
|
2004-10-12 08:03:52 +04:00
|
|
|
free(forkArgs);
|
|
|
|
err1:
|
2011-06-12 04:00:23 +04:00
|
|
|
// Remove the team structure from the process group, the parent team, and
|
|
|
|
// the team hash table and delete the team structure.
|
|
|
|
parentTeam->LockTeamAndProcessGroup();
|
|
|
|
team->Lock();
|
2004-10-10 21:30:42 +04:00
|
|
|
|
2008-03-09 20:56:27 +03:00
|
|
|
remove_team_from_group(team);
|
2011-06-12 04:00:23 +04:00
|
|
|
remove_team_from_parent(team->parent, team);
|
2004-10-10 21:30:42 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
team->Unlock();
|
|
|
|
parentTeam->UnlockTeamAndProcessGroup();
|
|
|
|
|
|
|
|
teamsLocker.Lock();
|
|
|
|
sTeamHash.Remove(team);
|
|
|
|
teamsLocker.Unlock();
|
2004-10-15 20:14:51 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
sNotificationService.Notify(TEAM_REMOVED, team);
|
|
|
|
|
|
|
|
team->ReleaseReference();
|
2004-10-10 21:30:42 +04:00
|
|
|
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
/*! Returns if the specified team \a parent has any children belonging to the
|
|
|
|
process group with the specified ID \a groupID.
|
|
|
|
The caller must hold \a parent's lock.
|
2007-10-11 12:30:18 +04:00
|
|
|
*/
|
2007-01-29 18:33:31 +03:00
|
|
|
static bool
|
2011-01-11 00:54:38 +03:00
|
|
|
has_children_in_group(Team* parent, pid_t groupID)
|
2004-10-14 19:42:56 +04:00
|
|
|
{
|
2011-06-12 04:00:23 +04:00
|
|
|
for (Team* child = parent->children; child != NULL;
|
|
|
|
child = child->siblings_next) {
|
|
|
|
TeamLocker childLocker(child);
|
|
|
|
if (child->group_id == groupID)
|
2007-01-29 18:33:31 +03:00
|
|
|
return true;
|
2004-10-14 19:42:56 +04:00
|
|
|
}
|
|
|
|
|
2007-01-29 18:33:31 +03:00
|
|
|
return false;
|
2004-10-14 19:42:56 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
/*! Returns the first job control entry from \a children, which matches \a id.
|
|
|
|
\a id can be:
|
|
|
|
- \code > 0 \endcode: Matching an entry with that team ID.
|
|
|
|
- \code == -1 \endcode: Matching any entry.
|
|
|
|
- \code < -1 \endcode: Matching any entry with a process group ID of \c -id.
|
|
|
|
\c 0 is an invalid value for \a id.
|
|
|
|
|
|
|
|
The caller must hold the lock of the team that \a children belongs to.
|
|
|
|
|
|
|
|
\param children The job control entry list to check.
|
|
|
|
\param id The match criterion.
|
|
|
|
\return The first matching entry or \c NULL, if none matches.
|
|
|
|
*/
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
static job_control_entry*
|
2011-01-11 00:54:38 +03:00
|
|
|
get_job_control_entry(team_job_control_children& children, pid_t id)
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
{
|
2011-01-11 00:54:38 +03:00
|
|
|
for (JobControlEntryList::Iterator it = children.entries.GetIterator();
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
job_control_entry* entry = it.Next();) {
|
|
|
|
|
|
|
|
if (id > 0) {
|
|
|
|
if (entry->thread == id)
|
|
|
|
return entry;
|
|
|
|
} else if (id == -1) {
|
|
|
|
return entry;
|
|
|
|
} else {
|
|
|
|
pid_t processGroup
|
|
|
|
= (entry->team ? entry->team->group_id : entry->group_id);
|
|
|
|
if (processGroup == -id)
|
|
|
|
return entry;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2004-09-02 05:41:06 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
/*! Returns the first job control entry from one of team's dead, continued, or
|
|
|
|
stopped children which matches \a id.
|
|
|
|
\a id can be:
|
|
|
|
- \code > 0 \endcode: Matching an entry with that team ID.
|
|
|
|
- \code == -1 \endcode: Matching any entry.
|
|
|
|
- \code < -1 \endcode: Matching any entry with a process group ID of \c -id.
|
|
|
|
\c 0 is an invalid value for \a id.
|
|
|
|
|
|
|
|
The caller must hold \a team's lock.
|
|
|
|
|
|
|
|
\param team The team whose dead, stopped, and continued child lists shall be
|
|
|
|
checked.
|
|
|
|
\param id The match criterion.
|
|
|
|
\param flags Specifies which children shall be considered. Dead children
|
|
|
|
always are. Stopped children are considered when \a flags is ORed
|
|
|
|
bitwise with \c WUNTRACED, continued children when \a flags is ORed
|
|
|
|
bitwise with \c WCONTINUED.
|
|
|
|
\return The first matching entry or \c NULL, if none matches.
|
|
|
|
*/
|
2007-09-04 01:35:24 +04:00
|
|
|
static job_control_entry*
|
2011-01-11 00:54:38 +03:00
|
|
|
get_job_control_entry(Team* team, pid_t id, uint32 flags)
|
2007-09-04 01:35:24 +04:00
|
|
|
{
|
|
|
|
job_control_entry* entry = get_job_control_entry(team->dead_children, id);
|
|
|
|
|
|
|
|
if (entry == NULL && (flags & WCONTINUED) != 0)
|
|
|
|
entry = get_job_control_entry(team->continued_children, id);
|
|
|
|
|
|
|
|
if (entry == NULL && (flags & WUNTRACED) != 0)
|
|
|
|
entry = get_job_control_entry(team->stopped_children, id);
|
|
|
|
|
|
|
|
return entry;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-03-09 20:56:27 +03:00
|
|
|
job_control_entry::job_control_entry()
|
|
|
|
:
|
|
|
|
has_group_ref(false)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
job_control_entry::~job_control_entry()
|
|
|
|
{
|
|
|
|
if (has_group_ref) {
|
2011-06-12 04:00:23 +04:00
|
|
|
InterruptsSpinLocker groupHashLocker(sGroupHashLock);
|
|
|
|
|
|
|
|
ProcessGroup* group = sGroupHash.Lookup(group_id);
|
|
|
|
if (group == NULL) {
|
|
|
|
panic("job_control_entry::~job_control_entry(): unknown group "
|
|
|
|
"ID: %ld", group_id);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
groupHashLocker.Unlock();
|
|
|
|
|
|
|
|
group->ReleaseReference();
|
2008-03-09 20:56:27 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
/*! Invoked when the owning team is dying, initializing the entry according to
|
|
|
|
the dead state.
|
|
|
|
|
|
|
|
The caller must hold the owning team's lock and the scheduler lock.
|
2008-03-09 20:56:27 +03:00
|
|
|
*/
|
|
|
|
void
|
|
|
|
job_control_entry::InitDeadState()
|
|
|
|
{
|
|
|
|
if (team != NULL) {
|
2011-06-12 04:00:23 +04:00
|
|
|
ASSERT(team->exit.initialized);
|
|
|
|
|
2008-03-09 20:56:27 +03:00
|
|
|
group_id = team->group_id;
|
2011-06-12 04:00:23 +04:00
|
|
|
team->group->AcquireReference();
|
2008-03-09 20:56:27 +03:00
|
|
|
has_group_ref = true;
|
2011-06-12 04:00:23 +04:00
|
|
|
|
|
|
|
thread = team->id;
|
|
|
|
status = team->exit.status;
|
|
|
|
reason = team->exit.reason;
|
|
|
|
signal = team->exit.signal;
|
|
|
|
signaling_user = team->exit.signaling_user;
|
|
|
|
|
|
|
|
team = NULL;
|
2008-03-09 20:56:27 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
job_control_entry&
|
|
|
|
job_control_entry::operator=(const job_control_entry& other)
|
|
|
|
{
|
|
|
|
state = other.state;
|
|
|
|
thread = other.thread;
|
2011-06-12 04:00:23 +04:00
|
|
|
signal = other.signal;
|
2008-03-09 20:56:27 +03:00
|
|
|
has_group_ref = false;
|
2011-06-12 04:00:23 +04:00
|
|
|
signaling_user = other.signaling_user;
|
2008-03-09 20:56:27 +03:00
|
|
|
team = other.team;
|
|
|
|
group_id = other.group_id;
|
|
|
|
status = other.status;
|
|
|
|
reason = other.reason;
|
|
|
|
|
|
|
|
return *this;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
/*! This is the kernel backend for waitid().
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
*/
|
2004-09-15 19:45:37 +04:00
|
|
|
static thread_id
|
2011-06-12 04:00:23 +04:00
|
|
|
wait_for_child(pid_t child, uint32 flags, siginfo_t& _info)
|
2004-09-02 05:41:06 +04:00
|
|
|
{
|
2011-01-11 00:54:38 +03:00
|
|
|
Thread* thread = thread_get_current_thread();
|
|
|
|
Team* team = thread->team;
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
struct job_control_entry foundEntry;
|
|
|
|
struct job_control_entry* freeDeathEntry = NULL;
|
2004-10-14 18:46:12 +04:00
|
|
|
status_t status = B_OK;
|
2004-09-02 05:41:06 +04:00
|
|
|
|
2004-10-14 18:46:12 +04:00
|
|
|
TRACE(("wait_for_child(child = %ld, flags = %ld)\n", child, flags));
|
2004-09-15 19:45:37 +04:00
|
|
|
|
2008-01-18 03:01:32 +03:00
|
|
|
T(WaitForChild(child, flags));
|
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
pid_t originalChild = child;
|
2004-10-14 18:46:12 +04:00
|
|
|
|
2007-09-04 01:35:24 +04:00
|
|
|
bool ignoreFoundEntries = false;
|
|
|
|
bool ignoreFoundEntriesChecked = false;
|
|
|
|
|
2004-10-14 18:46:12 +04:00
|
|
|
while (true) {
|
2011-06-12 04:00:23 +04:00
|
|
|
// lock the team
|
|
|
|
TeamLocker teamLocker(team);
|
|
|
|
|
|
|
|
// A 0 child argument means to wait for all children in the process
|
|
|
|
// group of the calling team.
|
|
|
|
child = originalChild == 0 ? -team->group_id : originalChild;
|
2006-08-18 01:40:55 +04:00
|
|
|
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
// check whether any condition holds
|
2007-09-04 01:35:24 +04:00
|
|
|
job_control_entry* entry = get_job_control_entry(team, child, flags);
|
2006-08-18 01:40:55 +04:00
|
|
|
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
// If we don't have an entry yet, check whether there are any children
|
|
|
|
// complying to the process group specification at all.
|
|
|
|
if (entry == NULL) {
|
2011-06-12 04:00:23 +04:00
|
|
|
// No success yet -- check whether there are any children complying
|
|
|
|
// to the process group specification at all.
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
bool childrenExist = false;
|
|
|
|
if (child == -1) {
|
2007-01-29 18:33:31 +03:00
|
|
|
childrenExist = team->children != NULL;
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
} else if (child < -1) {
|
2007-01-29 18:33:31 +03:00
|
|
|
childrenExist = has_children_in_group(team, -child);
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
} else {
|
2011-06-12 04:00:23 +04:00
|
|
|
if (Team* childTeam = Team::Get(child)) {
|
|
|
|
BReference<Team> childTeamReference(childTeam, true);
|
|
|
|
TeamLocker childTeamLocker(childTeam);
|
2007-08-30 04:57:12 +04:00
|
|
|
childrenExist = childTeam->parent == team;
|
2011-06-12 04:00:23 +04:00
|
|
|
}
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
}
|
2007-01-29 18:33:31 +03:00
|
|
|
|
|
|
|
if (!childrenExist) {
|
|
|
|
// there is no child we could wait for
|
2006-08-18 01:40:55 +04:00
|
|
|
status = ECHILD;
|
2006-08-21 01:27:12 +04:00
|
|
|
} else {
|
2007-01-29 18:33:31 +03:00
|
|
|
// the children we're waiting for are still running
|
2006-08-21 01:27:12 +04:00
|
|
|
status = B_WOULD_BLOCK;
|
2006-08-18 01:40:55 +04:00
|
|
|
}
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
} else {
|
|
|
|
// got something
|
|
|
|
foundEntry = *entry;
|
2011-06-12 04:00:23 +04:00
|
|
|
|
|
|
|
// unless WNOWAIT has been specified, "consume" the wait state
|
|
|
|
if ((flags & WNOWAIT) == 0 || ignoreFoundEntries) {
|
|
|
|
if (entry->state == JOB_CONTROL_STATE_DEAD) {
|
|
|
|
// The child is dead. Reap its death entry.
|
|
|
|
freeDeathEntry = entry;
|
|
|
|
team->dead_children.entries.Remove(entry);
|
|
|
|
team->dead_children.count--;
|
|
|
|
} else {
|
|
|
|
// The child is well. Reset its job control state.
|
|
|
|
team_set_job_control_state(entry->team,
|
|
|
|
JOB_CONTROL_STATE_NONE, NULL, false);
|
|
|
|
}
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
}
|
2006-08-18 01:40:55 +04:00
|
|
|
}
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
|
2008-04-22 22:32:15 +04:00
|
|
|
// If we haven't got anything yet, prepare for waiting for the
|
2008-04-20 19:15:58 +04:00
|
|
|
// condition variable.
|
2008-04-22 22:32:15 +04:00
|
|
|
ConditionVariableEntry deadWaitEntry;
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
|
2008-04-20 19:15:58 +04:00
|
|
|
if (status == B_WOULD_BLOCK && (flags & WNOHANG) == 0)
|
2011-01-11 00:54:38 +03:00
|
|
|
team->dead_children.condition_variable.Add(&deadWaitEntry);
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
teamLocker.Unlock();
|
2006-08-21 01:27:12 +04:00
|
|
|
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
// we got our entry and can return to our caller
|
2007-09-04 01:35:24 +04:00
|
|
|
if (status == B_OK) {
|
|
|
|
if (ignoreFoundEntries) {
|
|
|
|
// ... unless we shall ignore found entries
|
|
|
|
delete freeDeathEntry;
|
|
|
|
freeDeathEntry = NULL;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2006-08-21 01:27:12 +04:00
|
|
|
break;
|
2007-09-04 01:35:24 +04:00
|
|
|
}
|
|
|
|
|
2008-01-18 03:01:32 +03:00
|
|
|
if (status != B_WOULD_BLOCK || (flags & WNOHANG) != 0) {
|
|
|
|
T(WaitForChildDone(status));
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
return status;
|
2008-01-18 03:01:32 +03:00
|
|
|
}
|
2004-10-14 18:46:12 +04:00
|
|
|
|
2008-05-17 14:21:37 +04:00
|
|
|
status = deadWaitEntry.Wait(B_CAN_INTERRUPT);
|
2008-01-18 03:01:32 +03:00
|
|
|
if (status == B_INTERRUPTED) {
|
|
|
|
T(WaitForChildDone(status));
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
return status;
|
2008-01-18 03:01:32 +03:00
|
|
|
}
|
2007-09-04 01:35:24 +04:00
|
|
|
|
|
|
|
// If SA_NOCLDWAIT is set or SIGCHLD is ignored, we shall wait until
|
|
|
|
// all our children are dead and fail with ECHILD. We check the
|
|
|
|
// condition at this point.
|
|
|
|
if (!ignoreFoundEntriesChecked) {
|
2011-06-12 04:00:23 +04:00
|
|
|
teamLocker.Lock();
|
|
|
|
|
|
|
|
struct sigaction& handler = team->SignalActionFor(SIGCHLD);
|
2007-09-04 01:35:24 +04:00
|
|
|
if ((handler.sa_flags & SA_NOCLDWAIT) != 0
|
|
|
|
|| handler.sa_handler == SIG_IGN) {
|
|
|
|
ignoreFoundEntries = true;
|
|
|
|
}
|
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
teamLocker.Unlock();
|
|
|
|
|
2007-09-04 01:35:24 +04:00
|
|
|
ignoreFoundEntriesChecked = true;
|
|
|
|
}
|
2004-10-14 18:46:12 +04:00
|
|
|
}
|
|
|
|
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
delete freeDeathEntry;
|
2004-10-14 19:42:56 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
// When we got here, we have a valid death entry, and already got
|
|
|
|
// unregistered from the team or group. Fill in the returned info.
|
|
|
|
memset(&_info, 0, sizeof(_info));
|
|
|
|
_info.si_signo = SIGCHLD;
|
|
|
|
_info.si_pid = foundEntry.thread;
|
|
|
|
_info.si_uid = foundEntry.signaling_user;
|
|
|
|
// TODO: Fill in si_errno?
|
|
|
|
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
switch (foundEntry.state) {
|
|
|
|
case JOB_CONTROL_STATE_DEAD:
|
2011-06-12 04:00:23 +04:00
|
|
|
_info.si_code = foundEntry.reason;
|
|
|
|
_info.si_status = foundEntry.reason == CLD_EXITED
|
|
|
|
? foundEntry.status : foundEntry.signal;
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
break;
|
|
|
|
case JOB_CONTROL_STATE_STOPPED:
|
2011-06-12 04:00:23 +04:00
|
|
|
_info.si_code = CLD_STOPPED;
|
|
|
|
_info.si_status = foundEntry.signal;
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
break;
|
|
|
|
case JOB_CONTROL_STATE_CONTINUED:
|
2011-06-12 04:00:23 +04:00
|
|
|
_info.si_code = CLD_CONTINUED;
|
|
|
|
_info.si_status = 0;
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
break;
|
|
|
|
case JOB_CONTROL_STATE_NONE:
|
|
|
|
// can't happen
|
|
|
|
break;
|
|
|
|
}
|
2004-10-18 19:16:00 +04:00
|
|
|
|
2007-09-04 01:35:24 +04:00
|
|
|
// If SIGCHLD is blocked, we shall clear pending SIGCHLDs, if no other child
|
|
|
|
// status is available.
|
2011-06-12 04:00:23 +04:00
|
|
|
TeamLocker teamLocker(team);
|
|
|
|
InterruptsSpinLocker schedulerLocker(gSchedulerLock);
|
2007-09-04 01:35:24 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
if (is_team_signal_blocked(team, SIGCHLD)) {
|
2007-09-04 01:35:24 +04:00
|
|
|
if (get_job_control_entry(team, child, flags) == NULL)
|
2011-06-12 04:00:23 +04:00
|
|
|
team->RemovePendingSignals(SIGNAL_TO_MASK(SIGCHLD));
|
2007-09-04 01:35:24 +04:00
|
|
|
}
|
2007-08-28 18:28:22 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
schedulerLocker.Unlock();
|
|
|
|
teamLocker.Unlock();
|
|
|
|
|
2008-05-20 03:21:58 +04:00
|
|
|
// When the team is dead, the main thread continues to live in the kernel
|
|
|
|
// team for a very short time. To avoid surprises for the caller we rather
|
|
|
|
// wait until the thread is really gone.
|
|
|
|
if (foundEntry.state == JOB_CONTROL_STATE_DEAD)
|
|
|
|
wait_for_thread(foundEntry.thread, NULL);
|
|
|
|
|
2008-01-18 03:01:32 +03:00
|
|
|
T(WaitForChildDone(foundEntry));
|
|
|
|
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
return foundEntry.thread;
|
2004-09-02 05:41:06 +04:00
|
|
|
}
|
2004-09-15 19:45:37 +04:00
|
|
|
|
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
/*! Fills the team_info structure with information from the specified team.
|
|
|
|
Interrupts must be enabled. The team must not be locked.
|
2007-10-11 12:30:18 +04:00
|
|
|
*/
|
2006-12-18 15:56:27 +03:00
|
|
|
static status_t
|
2011-01-11 00:54:38 +03:00
|
|
|
fill_team_info(Team* team, team_info* info, size_t size)
|
2006-12-18 15:56:27 +03:00
|
|
|
{
|
|
|
|
if (size != sizeof(team_info))
|
|
|
|
return B_BAD_VALUE;
|
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
// TODO: Set more informations for team_info
|
2006-12-18 15:56:27 +03:00
|
|
|
memset(info, 0, size);
|
|
|
|
|
|
|
|
info->team = team->id;
|
2011-06-12 04:00:23 +04:00
|
|
|
// immutable
|
2006-12-18 15:56:27 +03:00
|
|
|
info->image_count = count_images(team);
|
2011-06-12 04:00:23 +04:00
|
|
|
// protected by sImageMutex
|
|
|
|
|
|
|
|
TeamLocker teamLocker(team);
|
|
|
|
InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
|
|
|
|
|
|
|
|
info->thread_count = team->num_threads;
|
2008-07-17 02:55:17 +04:00
|
|
|
//info->area_count =
|
2006-12-18 15:56:27 +03:00
|
|
|
info->debugger_nub_thread = team->debug_info.nub_thread;
|
|
|
|
info->debugger_nub_port = team->debug_info.nub_port;
|
2008-07-17 02:55:17 +04:00
|
|
|
//info->uid =
|
|
|
|
//info->gid =
|
2006-12-18 15:56:27 +03:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
strlcpy(info->args, team->Args(), sizeof(info->args));
|
2006-12-18 15:56:27 +03:00
|
|
|
info->argc = 1;
|
|
|
|
|
|
|
|
return B_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
/*! Returns whether the process group contains stopped processes.
|
|
|
|
The caller must hold the process group's lock.
|
2007-09-06 06:16:25 +04:00
|
|
|
*/
|
|
|
|
static bool
|
2011-06-12 04:00:23 +04:00
|
|
|
process_group_has_stopped_processes(ProcessGroup* group)
|
2007-09-06 06:16:25 +04:00
|
|
|
{
|
2011-01-11 00:54:38 +03:00
|
|
|
Team* team = group->teams;
|
2007-09-06 06:16:25 +04:00
|
|
|
while (team != NULL) {
|
2011-06-12 04:00:23 +04:00
|
|
|
// the parent team's lock guards the job control entry -- acquire it
|
|
|
|
team->LockTeamAndParent(false);
|
|
|
|
|
|
|
|
if (team->job_control_entry != NULL
|
|
|
|
&& team->job_control_entry->state == JOB_CONTROL_STATE_STOPPED) {
|
|
|
|
team->UnlockTeamAndParent();
|
|
|
|
return true;
|
2007-09-06 06:16:25 +04:00
|
|
|
}
|
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
team->UnlockTeamAndParent();
|
|
|
|
|
2007-09-06 06:16:25 +04:00
|
|
|
team = team->group_next;
|
|
|
|
}
|
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
return false;
|
2007-09-06 06:16:25 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
/*! Iterates through all process groups queued in team_remove_team() and signals
|
|
|
|
those that are orphaned and have stopped processes.
|
|
|
|
The caller must not hold any team or process group locks.
|
2007-09-06 06:16:25 +04:00
|
|
|
*/
|
2011-06-12 04:00:23 +04:00
|
|
|
static void
|
|
|
|
orphaned_process_group_check()
|
2007-09-06 06:16:25 +04:00
|
|
|
{
|
2011-06-12 04:00:23 +04:00
|
|
|
// process as long as there are groups in the list
|
|
|
|
while (true) {
|
|
|
|
// remove the head from the list
|
|
|
|
MutexLocker orphanedCheckLocker(sOrphanedCheckLock);
|
2007-09-06 06:16:25 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
ProcessGroup* group = sOrphanedCheckProcessGroups.RemoveHead();
|
|
|
|
if (group == NULL)
|
|
|
|
return;
|
2007-09-06 06:16:25 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
group->UnsetOrphanedCheck();
|
|
|
|
BReference<ProcessGroup> groupReference(group);
|
|
|
|
|
|
|
|
orphanedCheckLocker.Unlock();
|
|
|
|
|
|
|
|
AutoLocker<ProcessGroup> groupLocker(group);
|
|
|
|
|
|
|
|
// If the group is orphaned and contains stopped processes, we're
|
|
|
|
// supposed to send SIGHUP + SIGCONT.
|
|
|
|
if (group->IsOrphaned() && process_group_has_stopped_processes(group)) {
|
|
|
|
Thread* currentThread = thread_get_current_thread();
|
|
|
|
|
|
|
|
Signal signal(SIGHUP, SI_USER, B_OK, currentThread->team->id);
|
|
|
|
send_signal_to_process_group_locked(group, signal, 0);
|
|
|
|
|
|
|
|
signal.SetNumber(SIGCONT);
|
|
|
|
send_signal_to_process_group_locked(group, signal, 0);
|
|
|
|
}
|
2007-09-06 06:16:25 +04:00
|
|
|
}
|
2011-06-12 04:00:23 +04:00
|
|
|
}
|
2007-09-06 06:16:25 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
|
|
|
|
static status_t
|
|
|
|
common_get_team_usage_info(team_id id, int32 who, team_usage_info* info,
|
|
|
|
uint32 flags)
|
|
|
|
{
|
|
|
|
if (who != B_TEAM_USAGE_SELF && who != B_TEAM_USAGE_CHILDREN)
|
|
|
|
return B_BAD_VALUE;
|
|
|
|
|
|
|
|
// get the team
|
|
|
|
Team* team = Team::GetAndLock(id);
|
|
|
|
if (team == NULL)
|
|
|
|
return B_BAD_TEAM_ID;
|
|
|
|
BReference<Team> teamReference(team, true);
|
|
|
|
TeamLocker teamLocker(team, true);
|
|
|
|
|
|
|
|
if ((flags & B_CHECK_PERMISSION) != 0) {
|
|
|
|
uid_t uid = geteuid();
|
|
|
|
if (uid != 0 && uid != team->effective_uid)
|
|
|
|
return B_NOT_ALLOWED;
|
|
|
|
}
|
|
|
|
|
|
|
|
bigtime_t kernelTime = 0;
|
|
|
|
bigtime_t userTime = 0;
|
|
|
|
|
|
|
|
switch (who) {
|
|
|
|
case B_TEAM_USAGE_SELF:
|
|
|
|
{
|
|
|
|
Thread* thread = team->thread_list;
|
|
|
|
|
|
|
|
for (; thread != NULL; thread = thread->team_next) {
|
|
|
|
InterruptsSpinLocker threadTimeLocker(thread->time_lock);
|
|
|
|
kernelTime += thread->kernel_time;
|
|
|
|
userTime += thread->user_time;
|
|
|
|
}
|
|
|
|
|
|
|
|
kernelTime += team->dead_threads_kernel_time;
|
|
|
|
userTime += team->dead_threads_user_time;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case B_TEAM_USAGE_CHILDREN:
|
|
|
|
{
|
|
|
|
Team* child = team->children;
|
|
|
|
for (; child != NULL; child = child->siblings_next) {
|
|
|
|
TeamLocker childLocker(child);
|
|
|
|
|
|
|
|
Thread* thread = team->thread_list;
|
|
|
|
|
|
|
|
for (; thread != NULL; thread = thread->team_next) {
|
|
|
|
InterruptsSpinLocker threadTimeLocker(thread->time_lock);
|
|
|
|
kernelTime += thread->kernel_time;
|
|
|
|
userTime += thread->user_time;
|
|
|
|
}
|
|
|
|
|
|
|
|
kernelTime += child->dead_threads_kernel_time;
|
|
|
|
userTime += child->dead_threads_user_time;
|
|
|
|
}
|
|
|
|
|
|
|
|
kernelTime += team->dead_children.kernel_time;
|
|
|
|
userTime += team->dead_children.user_time;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
info->kernel_time = kernelTime;
|
|
|
|
info->user_time = userTime;
|
|
|
|
|
|
|
|
return B_OK;
|
2007-09-06 06:16:25 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-08-25 03:41:54 +04:00
|
|
|
// #pragma mark - Private kernel API
|
|
|
|
|
|
|
|
|
|
|
|
status_t
|
2009-11-27 21:10:03 +03:00
|
|
|
team_init(kernel_args* args)
|
2006-08-25 03:41:54 +04:00
|
|
|
{
|
|
|
|
// create the team hash table
|
2011-06-12 04:00:23 +04:00
|
|
|
new(&sTeamHash) TeamTable;
|
|
|
|
if (sTeamHash.Init(64) != B_OK)
|
2010-12-16 04:15:35 +03:00
|
|
|
panic("Failed to init team hash table!");
|
2006-08-25 03:41:54 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
new(&sGroupHash) ProcessGroupHashTable;
|
|
|
|
if (sGroupHash.Init() != B_OK)
|
|
|
|
panic("Failed to init process group hash table!");
|
2006-08-25 03:41:54 +04:00
|
|
|
|
|
|
|
// create initial session and process groups
|
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
ProcessSession* session = new(std::nothrow) ProcessSession(1);
|
2006-08-25 03:41:54 +04:00
|
|
|
if (session == NULL)
|
|
|
|
panic("Could not create initial session.\n");
|
2011-06-12 04:00:23 +04:00
|
|
|
BReference<ProcessSession> sessionReference(session, true);
|
2006-08-25 03:41:54 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
ProcessGroup* group = new(std::nothrow) ProcessGroup(1);
|
2006-08-25 03:41:54 +04:00
|
|
|
if (group == NULL)
|
|
|
|
panic("Could not create initial process group.\n");
|
2011-06-12 04:00:23 +04:00
|
|
|
BReference<ProcessGroup> groupReference(group, true);
|
2006-08-25 03:41:54 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
group->Publish(session);
|
2006-08-25 03:41:54 +04:00
|
|
|
|
|
|
|
// create the kernel team
|
2011-06-12 04:00:23 +04:00
|
|
|
sKernelTeam = Team::Create(1, "kernel_team", true);
|
2006-08-25 03:41:54 +04:00
|
|
|
if (sKernelTeam == NULL)
|
|
|
|
panic("could not create kernel team!\n");
|
2011-06-12 04:00:23 +04:00
|
|
|
sKernelTeam->SetArgs(sKernelTeam->Name());
|
2006-08-25 03:41:54 +04:00
|
|
|
sKernelTeam->state = TEAM_STATE_NORMAL;
|
|
|
|
|
2008-03-11 20:12:02 +03:00
|
|
|
sKernelTeam->saved_set_uid = 0;
|
|
|
|
sKernelTeam->real_uid = 0;
|
|
|
|
sKernelTeam->effective_uid = 0;
|
|
|
|
sKernelTeam->saved_set_gid = 0;
|
|
|
|
sKernelTeam->real_gid = 0;
|
|
|
|
sKernelTeam->effective_gid = 0;
|
2008-03-30 04:08:13 +04:00
|
|
|
sKernelTeam->supplementary_groups = NULL;
|
|
|
|
sKernelTeam->supplementary_group_count = 0;
|
2008-03-11 20:12:02 +03:00
|
|
|
|
2006-08-25 03:41:54 +04:00
|
|
|
insert_team_into_group(group, sKernelTeam);
|
|
|
|
|
2009-11-25 19:16:22 +03:00
|
|
|
sKernelTeam->io_context = vfs_new_io_context(NULL, false);
|
2006-08-25 03:41:54 +04:00
|
|
|
if (sKernelTeam->io_context == NULL)
|
|
|
|
panic("could not create io_context for kernel team!\n");
|
|
|
|
|
|
|
|
// stick it in the team hash
|
2011-06-12 04:00:23 +04:00
|
|
|
sTeamHash.Insert(sKernelTeam);
|
2006-08-25 03:41:54 +04:00
|
|
|
|
2008-01-26 13:47:27 +03:00
|
|
|
add_debugger_command_etc("team", &dump_team_info,
|
|
|
|
"Dump info about a particular team",
|
|
|
|
"[ <id> | <address> | <name> ]\n"
|
|
|
|
"Prints information about the specified team. If no argument is given\n"
|
|
|
|
"the current team is selected.\n"
|
|
|
|
" <id> - The ID of the team.\n"
|
|
|
|
" <address> - The address of the team structure.\n"
|
|
|
|
" <name> - The team's name.\n", 0);
|
|
|
|
add_debugger_command_etc("teams", &dump_teams, "List all teams",
|
|
|
|
"\n"
|
|
|
|
"Prints a list of all existing teams.\n", 0);
|
2009-03-15 13:21:56 +03:00
|
|
|
|
|
|
|
new(&sNotificationService) TeamNotificationService();
|
|
|
|
|
|
|
|
return B_OK;
|
2006-08-25 03:41:54 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int32
|
|
|
|
team_max_teams(void)
|
|
|
|
{
|
|
|
|
return sMaxTeams;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int32
|
|
|
|
team_used_teams(void)
|
|
|
|
{
|
2011-06-12 04:00:23 +04:00
|
|
|
InterruptsSpinLocker teamsLocker(sTeamHashLock);
|
2006-08-25 03:41:54 +04:00
|
|
|
return sUsedTeams;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
/*! Returns a death entry of a child team specified by ID (if any).
|
|
|
|
The caller must hold the team's lock.
|
2009-04-12 01:45:25 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
\param team The team whose dead children list to check.
|
|
|
|
\param child The ID of the child for whose death entry to lock. Must be > 0.
|
|
|
|
\param _deleteEntry Return variable, indicating whether the caller needs to
|
|
|
|
delete the returned entry.
|
|
|
|
\return The death entry of the matching team, or \c NULL, if no death entry
|
|
|
|
for the team was found.
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
*/
|
|
|
|
job_control_entry*
|
2011-01-11 00:54:38 +03:00
|
|
|
team_get_death_entry(Team* team, thread_id child, bool* _deleteEntry)
|
2007-01-10 02:58:59 +03:00
|
|
|
{
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
if (child <= 0)
|
|
|
|
return NULL;
|
2007-01-10 02:58:59 +03:00
|
|
|
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
job_control_entry* entry = get_job_control_entry(team->dead_children,
|
|
|
|
child);
|
|
|
|
if (entry) {
|
|
|
|
// remove the entry only, if the caller is the parent of the found team
|
|
|
|
if (team_get_current_team_id() == entry->thread) {
|
2011-01-11 00:54:38 +03:00
|
|
|
team->dead_children.entries.Remove(entry);
|
|
|
|
team->dead_children.count--;
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
*_deleteEntry = true;
|
|
|
|
} else {
|
|
|
|
*_deleteEntry = false;
|
2007-01-10 02:58:59 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
return entry;
|
2007-01-10 02:58:59 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-10-11 12:30:18 +04:00
|
|
|
/*! Quick check to see if we have a valid team ID. */
|
2006-08-25 03:41:54 +04:00
|
|
|
bool
|
|
|
|
team_is_valid(team_id id)
|
|
|
|
{
|
|
|
|
if (id <= 0)
|
|
|
|
return false;
|
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
InterruptsSpinLocker teamsLocker(sTeamHashLock);
|
2006-08-25 03:41:54 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
return team_get_team_struct_locked(id) != NULL;
|
2006-08-25 03:41:54 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-01-11 00:54:38 +03:00
|
|
|
Team*
|
2006-08-25 03:41:54 +04:00
|
|
|
team_get_team_struct_locked(team_id id)
|
|
|
|
{
|
2010-12-16 04:15:35 +03:00
|
|
|
return sTeamHash.Lookup(id);
|
2006-08-25 03:41:54 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-09-06 06:16:25 +04:00
|
|
|
void
|
|
|
|
team_set_controlling_tty(int32 ttyIndex)
|
|
|
|
{
|
2011-06-12 04:00:23 +04:00
|
|
|
// lock the team, so its session won't change while we're playing with it
|
2011-01-11 00:54:38 +03:00
|
|
|
Team* team = thread_get_current_thread()->team;
|
2011-06-12 04:00:23 +04:00
|
|
|
TeamLocker teamLocker(team);
|
2007-09-06 06:16:25 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
// get and lock the session
|
|
|
|
ProcessSession* session = team->group->Session();
|
|
|
|
AutoLocker<ProcessSession> sessionLocker(session);
|
2007-09-06 06:16:25 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
// set the session's fields
|
|
|
|
session->controlling_tty = ttyIndex;
|
|
|
|
session->foreground_group = -1;
|
2007-09-06 06:16:25 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int32
|
|
|
|
team_get_controlling_tty()
|
|
|
|
{
|
2011-06-12 04:00:23 +04:00
|
|
|
// lock the team, so its session won't change while we're playing with it
|
2011-01-11 00:54:38 +03:00
|
|
|
Team* team = thread_get_current_thread()->team;
|
2011-06-12 04:00:23 +04:00
|
|
|
TeamLocker teamLocker(team);
|
2007-09-06 06:16:25 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
// get and lock the session
|
|
|
|
ProcessSession* session = team->group->Session();
|
|
|
|
AutoLocker<ProcessSession> sessionLocker(session);
|
2007-09-06 06:16:25 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
// get the session's field
|
|
|
|
return session->controlling_tty;
|
2007-09-06 06:16:25 +04:00
|
|
|
}
|
|
|
|
|
2006-08-25 03:41:54 +04:00
|
|
|
|
2007-09-06 06:16:25 +04:00
|
|
|
status_t
|
|
|
|
team_set_foreground_process_group(int32 ttyIndex, pid_t processGroupID)
|
|
|
|
{
|
2011-06-12 04:00:23 +04:00
|
|
|
// lock the team, so its session won't change while we're playing with it
|
2011-01-11 00:54:38 +03:00
|
|
|
Thread* thread = thread_get_current_thread();
|
|
|
|
Team* team = thread->team;
|
2011-06-12 04:00:23 +04:00
|
|
|
TeamLocker teamLocker(team);
|
2007-09-06 06:16:25 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
// get and lock the session
|
|
|
|
ProcessSession* session = team->group->Session();
|
|
|
|
AutoLocker<ProcessSession> sessionLocker(session);
|
2007-09-06 06:16:25 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
// check given TTY -- must be the controlling tty of the calling process
|
2007-09-06 06:16:25 +04:00
|
|
|
if (session->controlling_tty != ttyIndex)
|
|
|
|
return ENOTTY;
|
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
// check given process group -- must belong to our session
|
|
|
|
{
|
|
|
|
InterruptsSpinLocker groupHashLocker(sGroupHashLock);
|
|
|
|
ProcessGroup* group = sGroupHash.Lookup(processGroupID);
|
|
|
|
if (group == NULL || group->Session() != session)
|
|
|
|
return B_BAD_VALUE;
|
|
|
|
}
|
2007-09-06 06:16:25 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
// If we are a background group, we can do that unharmed only when we
|
2007-09-06 06:16:25 +04:00
|
|
|
// ignore or block SIGTTOU. Otherwise the group gets a SIGTTOU.
|
|
|
|
if (session->foreground_group != -1
|
|
|
|
&& session->foreground_group != team->group_id
|
2011-06-12 04:00:23 +04:00
|
|
|
&& team->SignalActionFor(SIGTTOU).sa_handler != SIG_IGN) {
|
|
|
|
InterruptsSpinLocker schedulerLocker(gSchedulerLock);
|
|
|
|
|
|
|
|
if (!is_team_signal_blocked(team, SIGTTOU)) {
|
|
|
|
pid_t groupID = team->group_id;
|
|
|
|
|
|
|
|
schedulerLocker.Unlock();
|
|
|
|
sessionLocker.Unlock();
|
|
|
|
teamLocker.Unlock();
|
|
|
|
|
|
|
|
Signal signal(SIGTTOU, SI_USER, B_OK, team->id);
|
|
|
|
send_signal_to_process_group(groupID, signal, 0);
|
|
|
|
return B_INTERRUPTED;
|
|
|
|
}
|
2007-09-06 06:16:25 +04:00
|
|
|
}
|
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
session->foreground_group = processGroupID;
|
2007-09-06 06:16:25 +04:00
|
|
|
|
|
|
|
return B_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
/*! Removes the specified team from the global team hash, from its process
|
|
|
|
group, and from its parent.
|
|
|
|
It also moves all of its children to the kernel team.
|
|
|
|
|
|
|
|
The caller must hold the following locks:
|
|
|
|
- \a team's process group's lock,
|
|
|
|
- the kernel team's lock,
|
|
|
|
- \a team's parent team's lock (might be the kernel team), and
|
|
|
|
- \a team's lock.
|
2007-09-06 06:16:25 +04:00
|
|
|
*/
|
2006-08-25 03:41:54 +04:00
|
|
|
void
|
2011-06-12 04:00:23 +04:00
|
|
|
team_remove_team(Team* team, pid_t& _signalGroup)
|
2006-08-25 03:41:54 +04:00
|
|
|
{
|
2011-01-11 00:54:38 +03:00
|
|
|
Team* parent = team->parent;
|
2006-08-25 03:41:54 +04:00
|
|
|
|
|
|
|
// remember how long this team lasted
|
2011-01-11 00:54:38 +03:00
|
|
|
parent->dead_children.kernel_time += team->dead_threads_kernel_time
|
|
|
|
+ team->dead_children.kernel_time;
|
|
|
|
parent->dead_children.user_time += team->dead_threads_user_time
|
|
|
|
+ team->dead_children.user_time;
|
2006-08-25 03:41:54 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
// remove the team from the hash table
|
|
|
|
InterruptsSpinLocker teamsLocker(sTeamHashLock);
|
|
|
|
sTeamHash.Remove(team);
|
2006-08-25 03:41:54 +04:00
|
|
|
sUsedTeams--;
|
2011-06-12 04:00:23 +04:00
|
|
|
teamsLocker.Unlock();
|
2006-08-25 03:41:54 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
// The team can no longer be accessed by ID. Navigation to it is still
|
|
|
|
// possible from its process group and its parent and children, but that
|
|
|
|
// will be rectified shortly.
|
2006-08-25 03:41:54 +04:00
|
|
|
team->state = TEAM_STATE_DEATH;
|
|
|
|
|
2007-09-06 06:16:25 +04:00
|
|
|
// If we're a controlling process (i.e. a session leader with controlling
|
2011-06-12 04:00:23 +04:00
|
|
|
// terminal), there's a bit of signalling we have to do. We can't do any of
|
|
|
|
// the signaling here due to the bunch of locks we're holding, but we need
|
|
|
|
// to determine, whom to signal.
|
|
|
|
_signalGroup = -1;
|
|
|
|
bool isSessionLeader = false;
|
2007-09-06 06:16:25 +04:00
|
|
|
if (team->session_id == team->id
|
2011-06-12 04:00:23 +04:00
|
|
|
&& team->group->Session()->controlling_tty >= 0) {
|
|
|
|
isSessionLeader = true;
|
|
|
|
|
|
|
|
ProcessSession* session = team->group->Session();
|
|
|
|
|
|
|
|
AutoLocker<ProcessSession> sessionLocker(session);
|
2007-09-06 06:16:25 +04:00
|
|
|
|
|
|
|
session->controlling_tty = -1;
|
2011-06-12 04:00:23 +04:00
|
|
|
_signalGroup = session->foreground_group;
|
|
|
|
}
|
2007-09-06 06:16:25 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
// remove us from our process group
|
|
|
|
remove_team_from_group(team);
|
2007-09-06 06:16:25 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
// move the team's children to the kernel team
|
|
|
|
while (Team* child = team->children) {
|
|
|
|
// remove the child from the current team and add it to the kernel team
|
|
|
|
TeamLocker childLocker(child);
|
2008-07-17 02:55:17 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
remove_team_from_parent(team, child);
|
|
|
|
insert_team_into_parent(sKernelTeam, child);
|
|
|
|
|
|
|
|
// move job control entries too
|
|
|
|
sKernelTeam->stopped_children.entries.MoveFrom(
|
|
|
|
&team->stopped_children.entries);
|
|
|
|
sKernelTeam->continued_children.entries.MoveFrom(
|
|
|
|
&team->continued_children.entries);
|
|
|
|
|
|
|
|
// If the team was a session leader with controlling terminal,
|
|
|
|
// we need to send SIGHUP + SIGCONT to all newly-orphaned process
|
|
|
|
// groups with stopped processes. Due to locking complications we can't
|
|
|
|
// do that here, so we only check whether we were a reason for the
|
|
|
|
// child's process group not being an orphan and, if so, schedule a
|
|
|
|
// later check (cf. orphaned_process_group_check()).
|
|
|
|
if (isSessionLeader) {
|
|
|
|
ProcessGroup* childGroup = child->group;
|
|
|
|
if (childGroup->Session()->id == team->session_id
|
|
|
|
&& childGroup->id != team->group_id) {
|
|
|
|
childGroup->ScheduleOrphanedCheck();
|
|
|
|
}
|
2007-09-06 06:16:25 +04:00
|
|
|
}
|
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
// Note, we don't move the dead children entries. Those will be deleted
|
|
|
|
// when the team structure is deleted.
|
2007-09-06 06:16:25 +04:00
|
|
|
}
|
|
|
|
|
2006-08-25 03:41:54 +04:00
|
|
|
// remove us from our parent
|
|
|
|
remove_team_from_parent(parent, team);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
/*! Kills all threads but the main thread of the team and shuts down user
|
|
|
|
debugging for it.
|
|
|
|
To be called on exit of the team's main thread. No locks must be held.
|
|
|
|
|
2010-01-20 12:34:53 +03:00
|
|
|
\param team The team in question.
|
|
|
|
\return The port of the debugger for the team, -1 if none. To be passed to
|
|
|
|
team_delete_team().
|
|
|
|
*/
|
|
|
|
port_id
|
2011-06-12 04:00:23 +04:00
|
|
|
team_shutdown_team(Team* team)
|
2006-08-25 03:41:54 +04:00
|
|
|
{
|
2010-01-20 12:34:53 +03:00
|
|
|
ASSERT(thread_get_current_thread() == team->main_thread);
|
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
TeamLocker teamLocker(team);
|
|
|
|
|
2010-01-20 12:34:53 +03:00
|
|
|
// Make sure debugging changes won't happen anymore.
|
2006-08-25 03:41:54 +04:00
|
|
|
port_id debuggerPort = -1;
|
2010-01-20 12:34:53 +03:00
|
|
|
while (true) {
|
|
|
|
// If a debugger change is in progress for the team, we'll have to
|
|
|
|
// wait until it is done.
|
|
|
|
ConditionVariableEntry waitForDebuggerEntry;
|
|
|
|
bool waitForDebugger = false;
|
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
|
2006-08-25 03:41:54 +04:00
|
|
|
|
2010-01-20 12:34:53 +03:00
|
|
|
if (team->debug_info.debugger_changed_condition != NULL) {
|
|
|
|
team->debug_info.debugger_changed_condition->Add(
|
|
|
|
&waitForDebuggerEntry);
|
|
|
|
waitForDebugger = true;
|
|
|
|
} else if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) {
|
|
|
|
// The team is being debugged. That will stop with the termination
|
2011-06-12 04:00:23 +04:00
|
|
|
// of the nub thread. Since we set the team state to death, no one
|
|
|
|
// can install a debugger anymore. We fetch the debugger's port to
|
|
|
|
// send it a message at the bitter end.
|
2010-01-20 12:34:53 +03:00
|
|
|
debuggerPort = team->debug_info.debugger_port;
|
2009-11-27 21:10:03 +03:00
|
|
|
}
|
2006-08-25 03:41:54 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
debugInfoLocker.Unlock();
|
2006-08-25 03:41:54 +04:00
|
|
|
|
2010-01-20 12:34:53 +03:00
|
|
|
if (!waitForDebugger)
|
|
|
|
break;
|
2006-08-25 03:41:54 +04:00
|
|
|
|
2010-01-20 12:34:53 +03:00
|
|
|
// wait for the debugger change to be finished
|
2011-06-12 04:00:23 +04:00
|
|
|
teamLocker.Unlock();
|
2006-08-25 03:41:54 +04:00
|
|
|
|
2010-01-20 12:34:53 +03:00
|
|
|
waitForDebuggerEntry.Wait();
|
2006-08-25 03:41:54 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
teamLocker.Lock();
|
2010-01-20 12:34:53 +03:00
|
|
|
}
|
2006-08-25 03:41:54 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
// Mark the team as shutting down. That will prevent new threads from being
|
|
|
|
// created and debugger changes from taking place.
|
|
|
|
team->state = TEAM_STATE_SHUTDOWN;
|
|
|
|
|
|
|
|
// delete all timers
|
|
|
|
team->DeleteUserTimers(false);
|
|
|
|
|
|
|
|
// deactivate CPU time user timers for the team
|
|
|
|
InterruptsSpinLocker schedulerLocker(gSchedulerLock);
|
|
|
|
|
|
|
|
if (team->HasActiveCPUTimeUserTimers())
|
|
|
|
team->DeactivateCPUTimeUserTimers();
|
|
|
|
|
|
|
|
schedulerLocker.Unlock();
|
|
|
|
|
2010-01-20 12:34:53 +03:00
|
|
|
// kill all threads but the main thread
|
|
|
|
team_death_entry deathEntry;
|
|
|
|
deathEntry.condition.Init(team, "team death");
|
2006-08-25 03:41:54 +04:00
|
|
|
|
2010-01-20 12:34:53 +03:00
|
|
|
while (true) {
|
|
|
|
team->death_entry = &deathEntry;
|
|
|
|
deathEntry.remaining_threads = 0;
|
|
|
|
|
2011-01-11 00:54:38 +03:00
|
|
|
Thread* thread = team->thread_list;
|
2010-01-20 12:34:53 +03:00
|
|
|
while (thread != NULL) {
|
|
|
|
if (thread != team->main_thread) {
|
2011-06-12 04:00:23 +04:00
|
|
|
Signal signal(SIGKILLTHR, SI_USER, B_OK, team->id);
|
|
|
|
send_signal_to_thread(thread, signal, B_DO_NOT_RESCHEDULE);
|
2010-01-20 12:34:53 +03:00
|
|
|
deathEntry.remaining_threads++;
|
|
|
|
}
|
|
|
|
|
|
|
|
thread = thread->team_next;
|
2006-08-25 03:41:54 +04:00
|
|
|
}
|
|
|
|
|
2010-01-20 12:34:53 +03:00
|
|
|
if (deathEntry.remaining_threads == 0)
|
|
|
|
break;
|
|
|
|
|
|
|
|
// there are threads to wait for
|
|
|
|
ConditionVariableEntry entry;
|
|
|
|
deathEntry.condition.Add(&entry);
|
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
teamLocker.Unlock();
|
2006-08-25 03:41:54 +04:00
|
|
|
|
2010-01-20 12:34:53 +03:00
|
|
|
entry.Wait();
|
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
teamLocker.Lock();
|
2006-08-25 03:41:54 +04:00
|
|
|
}
|
|
|
|
|
2010-01-20 12:34:53 +03:00
|
|
|
team->death_entry = NULL;
|
|
|
|
|
|
|
|
return debuggerPort;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
/*! Called on team exit to notify threads waiting on the team and free most
|
|
|
|
resources associated with it.
|
|
|
|
The caller shouldn't hold any locks.
|
|
|
|
*/
|
2010-01-20 12:34:53 +03:00
|
|
|
void
|
2011-01-11 00:54:38 +03:00
|
|
|
team_delete_team(Team* team, port_id debuggerPort)
|
2010-01-20 12:34:53 +03:00
|
|
|
{
|
2011-06-12 04:00:23 +04:00
|
|
|
// Not quite in our job description, but work that has been left by
|
|
|
|
// team_remove_team() and that can be done now that we're not holding any
|
|
|
|
// locks.
|
|
|
|
orphaned_process_group_check();
|
|
|
|
|
2010-01-20 12:34:53 +03:00
|
|
|
team_id teamID = team->id;
|
|
|
|
|
|
|
|
ASSERT(team->num_threads == 0);
|
|
|
|
|
2006-08-25 03:41:54 +04:00
|
|
|
// If someone is waiting for this team to be loaded, but it dies
|
|
|
|
// unexpectedly before being done, we need to notify the waiting
|
|
|
|
// thread now.
|
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
TeamLocker teamLocker(team);
|
2006-08-25 03:41:54 +04:00
|
|
|
|
|
|
|
if (team->loading_info) {
|
|
|
|
// there's indeed someone waiting
|
2009-11-27 21:10:03 +03:00
|
|
|
struct team_loading_info* loadingInfo = team->loading_info;
|
2006-08-25 03:41:54 +04:00
|
|
|
team->loading_info = NULL;
|
|
|
|
|
|
|
|
loadingInfo->result = B_ERROR;
|
|
|
|
loadingInfo->done = true;
|
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
InterruptsSpinLocker schedulerLocker(gSchedulerLock);
|
2006-08-25 03:41:54 +04:00
|
|
|
|
|
|
|
// wake up the waiting thread
|
2008-01-25 18:55:54 +03:00
|
|
|
if (loadingInfo->thread->state == B_THREAD_SUSPENDED)
|
2006-08-25 03:41:54 +04:00
|
|
|
scheduler_enqueue_in_run_queue(loadingInfo->thread);
|
|
|
|
}
|
|
|
|
|
|
|
|
// notify team watchers
|
|
|
|
|
|
|
|
{
|
|
|
|
// we're not reachable from anyone anymore at this point, so we
|
|
|
|
// can safely access the list without any locking
|
2009-11-27 21:10:03 +03:00
|
|
|
struct team_watcher* watcher;
|
2007-08-27 00:37:54 +04:00
|
|
|
while ((watcher = (struct team_watcher*)list_remove_head_item(
|
|
|
|
&team->watcher_list)) != NULL) {
|
2006-08-25 03:41:54 +04:00
|
|
|
watcher->hook(teamID, watcher->data);
|
|
|
|
free(watcher);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
teamLocker.Unlock();
|
|
|
|
|
2009-04-12 01:45:25 +04:00
|
|
|
sNotificationService.Notify(TEAM_REMOVED, team);
|
2009-03-15 13:21:56 +03:00
|
|
|
|
2006-08-25 03:41:54 +04:00
|
|
|
// free team resources
|
|
|
|
|
2008-05-06 07:39:36 +04:00
|
|
|
delete_realtime_sem_context(team->realtime_sem_context);
|
2008-08-16 22:20:54 +04:00
|
|
|
xsi_sem_undo(team);
|
2006-08-25 03:41:54 +04:00
|
|
|
remove_images(team);
|
2009-12-02 19:12:15 +03:00
|
|
|
team->address_space->RemoveAndPut();
|
2006-08-25 03:41:54 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
team->ReleaseReference();
|
2006-08-25 03:41:54 +04:00
|
|
|
|
|
|
|
// notify the debugger, that the team is gone
|
|
|
|
user_debug_team_deleted(teamID, debuggerPort);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-01-11 00:54:38 +03:00
|
|
|
Team*
|
2006-08-25 03:41:54 +04:00
|
|
|
team_get_kernel_team(void)
|
|
|
|
{
|
|
|
|
return sKernelTeam;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
team_id
|
|
|
|
team_get_kernel_team_id(void)
|
|
|
|
{
|
|
|
|
if (!sKernelTeam)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return sKernelTeam->id;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
team_id
|
|
|
|
team_get_current_team_id(void)
|
|
|
|
{
|
|
|
|
return thread_get_current_thread()->team->id;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
status_t
|
2009-12-01 20:27:09 +03:00
|
|
|
team_get_address_space(team_id id, VMAddressSpace** _addressSpace)
|
2006-08-25 03:41:54 +04:00
|
|
|
{
|
2011-06-12 04:00:23 +04:00
|
|
|
if (id == sKernelTeam->id) {
|
2006-08-25 03:41:54 +04:00
|
|
|
// we're the kernel team, so we don't have to go through all
|
|
|
|
// the hassle (locking and hash lookup)
|
2009-12-02 19:12:15 +03:00
|
|
|
*_addressSpace = VMAddressSpace::GetKernel();
|
2006-08-25 03:41:54 +04:00
|
|
|
return B_OK;
|
|
|
|
}
|
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
InterruptsSpinLocker teamsLocker(sTeamHashLock);
|
2006-08-25 03:41:54 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
Team* team = team_get_team_struct_locked(id);
|
|
|
|
if (team == NULL)
|
|
|
|
return B_BAD_VALUE;
|
2006-08-25 03:41:54 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
team->address_space->Get();
|
|
|
|
*_addressSpace = team->address_space;
|
|
|
|
return B_OK;
|
2006-08-25 03:41:54 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
/*! Sets the team's job control state.
|
2011-06-12 04:00:23 +04:00
|
|
|
The caller must hold the parent team's lock. Interrupts are allowed to be
|
|
|
|
enabled or disabled. In the latter case the scheduler lock may be held as
|
|
|
|
well.
|
|
|
|
\a team The team whose job control state shall be set.
|
|
|
|
\a newState The new state to be set.
|
|
|
|
\a signal The signal the new state was caused by. Can \c NULL, if none. Then
|
|
|
|
the caller is responsible for filling in the following fields of the
|
|
|
|
entry before releasing the parent team's lock, unless the new state is
|
|
|
|
\c JOB_CONTROL_STATE_NONE:
|
|
|
|
- \c signal: The number of the signal causing the state change.
|
|
|
|
- \c signaling_user: The real UID of the user sending the signal.
|
|
|
|
\a schedulerLocked indicates whether the scheduler lock is being held, too.
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
*/
|
|
|
|
void
|
2011-01-11 00:54:38 +03:00
|
|
|
team_set_job_control_state(Team* team, job_control_state newState,
|
2011-06-12 04:00:23 +04:00
|
|
|
Signal* signal, bool schedulerLocked)
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
{
|
|
|
|
if (team == NULL || team->job_control_entry == NULL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
// don't touch anything, if the state stays the same or the team is already
|
|
|
|
// dead
|
|
|
|
job_control_entry* entry = team->job_control_entry;
|
|
|
|
if (entry->state == newState || entry->state == JOB_CONTROL_STATE_DEAD)
|
|
|
|
return;
|
|
|
|
|
2008-01-18 03:01:32 +03:00
|
|
|
T(SetJobControlState(team->id, newState, signal));
|
|
|
|
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
// remove from the old list
|
|
|
|
switch (entry->state) {
|
|
|
|
case JOB_CONTROL_STATE_NONE:
|
|
|
|
// entry is in no list ATM
|
|
|
|
break;
|
|
|
|
case JOB_CONTROL_STATE_DEAD:
|
|
|
|
// can't get here
|
|
|
|
break;
|
|
|
|
case JOB_CONTROL_STATE_STOPPED:
|
2011-01-11 00:54:38 +03:00
|
|
|
team->parent->stopped_children.entries.Remove(entry);
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
break;
|
|
|
|
case JOB_CONTROL_STATE_CONTINUED:
|
2011-01-11 00:54:38 +03:00
|
|
|
team->parent->continued_children.entries.Remove(entry);
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
entry->state = newState;
|
2011-06-12 04:00:23 +04:00
|
|
|
|
|
|
|
if (signal != NULL) {
|
|
|
|
entry->signal = signal->Number();
|
|
|
|
entry->signaling_user = signal->SendingUser();
|
|
|
|
}
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
|
|
|
|
// add to new list
|
|
|
|
team_job_control_children* childList = NULL;
|
|
|
|
switch (entry->state) {
|
|
|
|
case JOB_CONTROL_STATE_NONE:
|
|
|
|
// entry doesn't get into any list
|
|
|
|
break;
|
|
|
|
case JOB_CONTROL_STATE_DEAD:
|
2011-01-11 00:54:38 +03:00
|
|
|
childList = &team->parent->dead_children;
|
|
|
|
team->parent->dead_children.count++;
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
break;
|
|
|
|
case JOB_CONTROL_STATE_STOPPED:
|
2011-01-11 00:54:38 +03:00
|
|
|
childList = &team->parent->stopped_children;
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
break;
|
|
|
|
case JOB_CONTROL_STATE_CONTINUED:
|
2011-01-11 00:54:38 +03:00
|
|
|
childList = &team->parent->continued_children;
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (childList != NULL) {
|
|
|
|
childList->entries.Add(entry);
|
2011-01-11 00:54:38 +03:00
|
|
|
team->parent->dead_children.condition_variable.NotifyAll(
|
2011-06-12 04:00:23 +04:00
|
|
|
schedulerLocked);
|
* Introduced new job_control_entry structure which, among other things,
is used instead of death_entry for team::dead_children.
* Added team::{stopped,continued}_children, which, analoguously to
dead_children, are used to track the state of stopped/continued
children.
* A team does have a job_control_entry, which is allocated at team
creation time. It will be inserted into the parent's
{stopped,continued}_children lists as the team's main thread is
stopped/continued and removed when waitpid() retrieves the child
state. When the team dies the entry is detached from the team and goes
into the parent's dead_children list.
* Removed the wait_for_any field from team_dead_children. It was solely
used to avoid deletion of the contained entries in certain situations.
wait_for_child() (the waitpid() backend) always deletes an entry now,
regardless of whether other threads are waiting; that's in
accordance with the waidpid() specification. wait_for_thread() removes
the entry only, if the caller is the parent of the respective team.
* Introduced team_set_job_control_state() which performes the job
control entry transitions between the respective lists and wakes up
threads waiting in wait_for_child(). It is invoked on team death and
when the team's main thread receives job control signals.
* Reorganized wait_for_child(). It handles WCONTINUED and WUNTRACED now,
too. Removed a block that interpreted the supplied ID as thread ID.
* Added missing parts in waitpid().
Job control starts to work, though it seems to have some glitches.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22088 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-08-28 07:29:14 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-06-14 17:04:31 +04:00
|
|
|
/*! Inits the given team's exit information, if not yet initialized, to some
|
|
|
|
generic "killed" status.
|
|
|
|
The caller must not hold the team's lock. Interrupts must be enabled.
|
|
|
|
|
|
|
|
\param team The team whose exit info shall be initialized.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
team_init_exit_info_on_error(Team* team)
|
|
|
|
{
|
|
|
|
TeamLocker teamLocker(team);
|
|
|
|
|
|
|
|
if (!team->exit.initialized) {
|
|
|
|
team->exit.reason = CLD_KILLED;
|
|
|
|
team->exit.signal = SIGKILL;
|
|
|
|
team->exit.signaling_user = geteuid();
|
|
|
|
team->exit.status = 0;
|
|
|
|
team->exit.initialized = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
/*! Adds a hook to the team that is called as soon as this team goes away.
|
2007-10-11 12:30:18 +04:00
|
|
|
This call might get public in the future.
|
|
|
|
*/
|
2005-08-03 16:00:42 +04:00
|
|
|
status_t
|
2009-11-27 21:10:03 +03:00
|
|
|
start_watching_team(team_id teamID, void (*hook)(team_id, void*), void* data)
|
2005-08-03 16:00:42 +04:00
|
|
|
{
|
|
|
|
if (hook == NULL || teamID < B_OK)
|
|
|
|
return B_BAD_VALUE;
|
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
// create the watcher object
|
|
|
|
team_watcher* watcher = (team_watcher*)malloc(sizeof(team_watcher));
|
2005-08-03 16:00:42 +04:00
|
|
|
if (watcher == NULL)
|
|
|
|
return B_NO_MEMORY;
|
|
|
|
|
2005-08-03 20:57:40 +04:00
|
|
|
watcher->hook = hook;
|
|
|
|
watcher->data = data;
|
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
// add watcher, if the team isn't already dying
|
|
|
|
// get the team
|
|
|
|
Team* team = Team::GetAndLock(teamID);
|
2005-08-03 16:00:42 +04:00
|
|
|
if (team == NULL) {
|
|
|
|
free(watcher);
|
|
|
|
return B_BAD_TEAM_ID;
|
|
|
|
}
|
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
list_add_item(&team->watcher_list, watcher);
|
|
|
|
|
|
|
|
team->UnlockAndReleaseReference();
|
|
|
|
|
2005-08-03 16:00:42 +04:00
|
|
|
return B_OK;
|
|
|
|
}
|
2008-07-17 02:55:17 +04:00
|
|
|
|
|
|
|
|
2005-08-03 16:00:42 +04:00
|
|
|
status_t
|
2009-11-27 21:10:03 +03:00
|
|
|
stop_watching_team(team_id teamID, void (*hook)(team_id, void*), void* data)
|
2005-08-03 16:00:42 +04:00
|
|
|
{
|
2011-06-12 04:00:23 +04:00
|
|
|
if (hook == NULL || teamID < 0)
|
2005-08-03 16:00:42 +04:00
|
|
|
return B_BAD_VALUE;
|
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
// get team and remove watcher (if present)
|
|
|
|
Team* team = Team::GetAndLock(teamID);
|
|
|
|
if (team == NULL)
|
|
|
|
return B_BAD_TEAM_ID;
|
2005-08-03 16:00:42 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
// search for watcher
|
|
|
|
team_watcher* watcher = NULL;
|
|
|
|
while ((watcher = (team_watcher*)list_get_next_item(
|
|
|
|
&team->watcher_list, watcher)) != NULL) {
|
|
|
|
if (watcher->hook == hook && watcher->data == data) {
|
|
|
|
// got it!
|
|
|
|
list_remove_item(&team->watcher_list, watcher);
|
|
|
|
break;
|
2005-08-03 16:00:42 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
team->UnlockAndReleaseReference();
|
2005-08-03 16:00:42 +04:00
|
|
|
|
|
|
|
if (watcher == NULL)
|
|
|
|
return B_ENTRY_NOT_FOUND;
|
|
|
|
|
|
|
|
free(watcher);
|
|
|
|
return B_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
/*! Allocates a user_thread structure from the team.
|
|
|
|
The team lock must be held, unless the function is called for the team's
|
|
|
|
main thread. Interrupts must be enabled.
|
2008-05-11 20:25:35 +04:00
|
|
|
*/
|
|
|
|
struct user_thread*
|
2011-01-11 00:54:38 +03:00
|
|
|
team_allocate_user_thread(Team* team)
|
2008-05-11 20:25:35 +04:00
|
|
|
{
|
|
|
|
if (team->user_data == 0)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
// take an entry from the free list, if any
|
|
|
|
if (struct free_user_thread* entry = team->free_user_threads) {
|
2011-06-12 04:00:23 +04:00
|
|
|
user_thread* thread = entry->thread;
|
2008-05-11 20:25:35 +04:00
|
|
|
team->free_user_threads = entry->next;
|
2011-06-12 04:00:23 +04:00
|
|
|
free(entry);
|
2008-05-11 20:25:35 +04:00
|
|
|
return thread;
|
2011-06-12 04:00:23 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
while (true) {
|
2008-05-11 20:25:35 +04:00
|
|
|
// enough space left?
|
2011-06-12 04:00:23 +04:00
|
|
|
size_t needed = ROUNDUP(sizeof(user_thread), 8);
|
|
|
|
if (team->user_data_size - team->used_user_data < needed) {
|
|
|
|
// try to resize the area
|
|
|
|
if (resize_area(team->user_data_area,
|
|
|
|
team->user_data_size + B_PAGE_SIZE) != B_OK) {
|
|
|
|
return NULL;
|
|
|
|
}
|
2008-05-11 20:25:35 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
// resized user area successfully -- try to allocate the user_thread
|
|
|
|
// again
|
|
|
|
team->user_data_size += B_PAGE_SIZE;
|
|
|
|
continue;
|
|
|
|
}
|
2008-05-11 20:25:35 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
// allocate the user_thread
|
|
|
|
user_thread* thread
|
|
|
|
= (user_thread*)(team->user_data + team->used_user_data);
|
|
|
|
team->used_user_data += needed;
|
2008-05-11 20:25:35 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
return thread;
|
|
|
|
}
|
2008-05-11 20:25:35 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
/*! Frees the given user_thread structure.
|
|
|
|
The team's lock must not be held. Interrupts must be enabled.
|
|
|
|
\param team The team the user thread was allocated from.
|
|
|
|
\param userThread The user thread to free.
|
2008-05-11 20:25:35 +04:00
|
|
|
*/
|
|
|
|
void
|
2011-06-12 04:00:23 +04:00
|
|
|
team_free_user_thread(Team* team, struct user_thread* userThread)
|
2008-05-11 20:25:35 +04:00
|
|
|
{
|
|
|
|
if (userThread == NULL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
// create a free list entry
|
|
|
|
free_user_thread* entry
|
|
|
|
= (free_user_thread*)malloc(sizeof(free_user_thread));
|
|
|
|
if (entry == NULL) {
|
|
|
|
// we have to leak the user thread :-/
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
// add to free list
|
|
|
|
TeamLocker teamLocker(team);
|
2009-03-18 04:46:29 +03:00
|
|
|
|
2008-05-11 20:25:35 +04:00
|
|
|
entry->thread = userThread;
|
2011-06-12 04:00:23 +04:00
|
|
|
entry->next = team->free_user_threads;
|
|
|
|
team->free_user_threads = entry;
|
2008-05-11 20:25:35 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-12-16 04:15:35 +03:00
|
|
|
// #pragma mark - Associated data interface
|
|
|
|
|
|
|
|
|
|
|
|
AssociatedData::AssociatedData()
|
|
|
|
:
|
|
|
|
fOwner(NULL)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
AssociatedData::~AssociatedData()
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
AssociatedData::OwnerDeleted(AssociatedDataOwner* owner)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
AssociatedDataOwner::AssociatedDataOwner()
|
|
|
|
{
|
|
|
|
mutex_init(&fLock, "associated data owner");
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
AssociatedDataOwner::~AssociatedDataOwner()
|
|
|
|
{
|
|
|
|
mutex_destroy(&fLock);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
bool
|
|
|
|
AssociatedDataOwner::AddData(AssociatedData* data)
|
|
|
|
{
|
|
|
|
MutexLocker locker(fLock);
|
|
|
|
|
|
|
|
if (data->Owner() != NULL)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
data->AcquireReference();
|
|
|
|
fList.Add(data);
|
|
|
|
data->SetOwner(this);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
bool
|
|
|
|
AssociatedDataOwner::RemoveData(AssociatedData* data)
|
|
|
|
{
|
|
|
|
MutexLocker locker(fLock);
|
|
|
|
|
|
|
|
if (data->Owner() != this)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
data->SetOwner(NULL);
|
|
|
|
fList.Remove(data);
|
|
|
|
|
|
|
|
locker.Unlock();
|
|
|
|
|
|
|
|
data->ReleaseReference();
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
AssociatedDataOwner::PrepareForDeletion()
|
|
|
|
{
|
|
|
|
MutexLocker locker(fLock);
|
|
|
|
|
|
|
|
// move all data to a temporary list and unset the owner
|
|
|
|
DataList list;
|
|
|
|
list.MoveFrom(&fList);
|
|
|
|
|
|
|
|
for (DataList::Iterator it = list.GetIterator();
|
|
|
|
AssociatedData* data = it.Next();) {
|
|
|
|
data->SetOwner(NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
locker.Unlock();
|
|
|
|
|
|
|
|
// call the notification hooks and release our references
|
|
|
|
while (AssociatedData* data = list.RemoveHead()) {
|
|
|
|
data->OwnerDeleted(this);
|
|
|
|
data->ReleaseReference();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*! Associates data with the current team.
|
|
|
|
When the team is deleted, the data object is notified.
|
|
|
|
The team acquires a reference to the object.
|
|
|
|
|
|
|
|
\param data The data object.
|
|
|
|
\return \c true on success, \c false otherwise. Fails only when the supplied
|
|
|
|
data object is already associated with another owner.
|
|
|
|
*/
|
|
|
|
bool
|
|
|
|
team_associate_data(AssociatedData* data)
|
|
|
|
{
|
|
|
|
return thread_get_current_thread()->team->AddData(data);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*! Dissociates data from the current team.
|
|
|
|
Balances an earlier call to team_associate_data().
|
|
|
|
|
|
|
|
\param data The data object.
|
|
|
|
\return \c true on success, \c false otherwise. Fails only when the data
|
|
|
|
object is not associated with the current team.
|
|
|
|
*/
|
|
|
|
bool
|
|
|
|
team_dissociate_data(AssociatedData* data)
|
|
|
|
{
|
|
|
|
return thread_get_current_thread()->team->RemoveData(data);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-08-25 03:41:54 +04:00
|
|
|
// #pragma mark - Public kernel API
|
2004-09-02 05:41:06 +04:00
|
|
|
|
|
|
|
|
2004-10-14 22:07:04 +04:00
|
|
|
thread_id
|
2009-11-27 21:10:03 +03:00
|
|
|
load_image(int32 argCount, const char** args, const char** env)
|
2009-03-02 03:26:22 +03:00
|
|
|
{
|
|
|
|
return load_image_etc(argCount, args, env, B_NORMAL_PRIORITY,
|
|
|
|
B_CURRENT_TEAM, B_WAIT_TILL_LOADED);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
thread_id
|
|
|
|
load_image_etc(int32 argCount, const char* const* args,
|
|
|
|
const char* const* env, int32 priority, team_id parentID, uint32 flags)
|
2004-10-14 22:07:04 +04:00
|
|
|
{
|
2008-06-24 07:37:07 +04:00
|
|
|
// we need to flatten the args and environment
|
|
|
|
|
|
|
|
if (args == NULL)
|
|
|
|
return B_BAD_VALUE;
|
|
|
|
|
|
|
|
// determine total needed size
|
|
|
|
int32 argSize = 0;
|
|
|
|
for (int32 i = 0; i < argCount; i++)
|
|
|
|
argSize += strlen(args[i]) + 1;
|
|
|
|
|
2004-10-14 22:07:04 +04:00
|
|
|
int32 envCount = 0;
|
2008-06-24 07:37:07 +04:00
|
|
|
int32 envSize = 0;
|
|
|
|
while (env != NULL && env[envCount] != NULL)
|
|
|
|
envSize += strlen(env[envCount++]) + 1;
|
|
|
|
|
|
|
|
int32 size = (argCount + envCount + 2) * sizeof(char*) + argSize + envSize;
|
|
|
|
if (size > MAX_PROCESS_ARGS_SIZE)
|
|
|
|
return B_TOO_MANY_ARGS;
|
|
|
|
|
|
|
|
// allocate space
|
|
|
|
char** flatArgs = (char**)malloc(size);
|
|
|
|
if (flatArgs == NULL)
|
|
|
|
return B_NO_MEMORY;
|
|
|
|
|
|
|
|
char** slot = flatArgs;
|
|
|
|
char* stringSpace = (char*)(flatArgs + argCount + envCount + 2);
|
|
|
|
|
|
|
|
// copy arguments and environment
|
|
|
|
for (int32 i = 0; i < argCount; i++) {
|
|
|
|
int32 argSize = strlen(args[i]) + 1;
|
|
|
|
memcpy(stringSpace, args[i], argSize);
|
|
|
|
*slot++ = stringSpace;
|
|
|
|
stringSpace += argSize;
|
|
|
|
}
|
2004-10-15 05:52:07 +04:00
|
|
|
|
2008-06-24 07:37:07 +04:00
|
|
|
*slot++ = NULL;
|
2004-10-14 22:07:04 +04:00
|
|
|
|
2008-06-24 07:37:07 +04:00
|
|
|
for (int32 i = 0; i < envCount; i++) {
|
|
|
|
int32 envSize = strlen(env[i]) + 1;
|
|
|
|
memcpy(stringSpace, env[i], envSize);
|
|
|
|
*slot++ = stringSpace;
|
|
|
|
stringSpace += envSize;
|
|
|
|
}
|
|
|
|
|
|
|
|
*slot++ = NULL;
|
|
|
|
|
2009-03-02 03:26:22 +03:00
|
|
|
thread_id thread = load_image_internal(flatArgs, size, argCount, envCount,
|
|
|
|
B_NORMAL_PRIORITY, parentID, B_WAIT_TILL_LOADED, -1, 0);
|
2008-06-24 07:37:07 +04:00
|
|
|
|
|
|
|
free(flatArgs);
|
2009-03-02 03:26:22 +03:00
|
|
|
// load_image_internal() unset our variable if it took over ownership
|
2008-06-24 07:37:07 +04:00
|
|
|
|
|
|
|
return thread;
|
2004-10-14 22:07:04 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2004-03-03 03:57:00 +03:00
|
|
|
status_t
|
2009-11-27 21:10:03 +03:00
|
|
|
wait_for_team(team_id id, status_t* _returnCode)
|
2004-03-03 03:57:00 +03:00
|
|
|
{
|
2011-06-12 04:00:23 +04:00
|
|
|
// check whether the team exists
|
|
|
|
InterruptsSpinLocker teamsLocker(sTeamHashLock);
|
2004-03-03 03:57:00 +03:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
Team* team = team_get_team_struct_locked(id);
|
|
|
|
if (team == NULL)
|
|
|
|
return B_BAD_TEAM_ID;
|
2004-03-03 03:57:00 +03:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
id = team->id;
|
2004-03-03 03:57:00 +03:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
teamsLocker.Unlock();
|
2004-03-03 03:57:00 +03:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
// wait for the main thread (it has the same ID as the team)
|
|
|
|
return wait_for_thread(id, _returnCode);
|
2004-03-03 03:57:00 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
status_t
|
|
|
|
kill_team(team_id id)
|
2002-08-04 03:39:50 +04:00
|
|
|
{
|
2011-06-12 04:00:23 +04:00
|
|
|
InterruptsSpinLocker teamsLocker(sTeamHashLock);
|
2002-08-04 03:39:50 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
Team* team = team_get_team_struct_locked(id);
|
|
|
|
if (team == NULL)
|
|
|
|
return B_BAD_TEAM_ID;
|
2002-08-04 03:39:50 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
id = team->id;
|
2002-08-04 03:39:50 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
teamsLocker.Unlock();
|
2003-01-07 12:40:59 +03:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
if (team == sKernelTeam)
|
|
|
|
return B_NOT_ALLOWED;
|
2002-08-04 03:39:50 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
// Just kill the team's main thread (it has same ID as the team). The
|
|
|
|
// cleanup code there will take care of the team.
|
|
|
|
return kill_thread(id);
|
2002-08-04 03:39:50 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
status_t
|
2009-11-27 21:10:03 +03:00
|
|
|
_get_team_info(team_id id, team_info* info, size_t size)
|
2002-08-04 03:39:50 +04:00
|
|
|
{
|
2011-06-12 04:00:23 +04:00
|
|
|
// get the team
|
|
|
|
Team* team = Team::Get(id);
|
|
|
|
if (team == NULL)
|
|
|
|
return B_BAD_TEAM_ID;
|
|
|
|
BReference<Team> teamReference(team, true);
|
2007-01-13 01:54:21 +03:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
// fill in the info
|
|
|
|
return fill_team_info(team, info, size);
|
2002-08-04 03:39:50 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
status_t
|
2009-11-27 21:10:03 +03:00
|
|
|
_get_next_team_info(int32* cookie, team_info* info, size_t size)
|
2002-08-04 03:39:50 +04:00
|
|
|
{
|
2002-12-03 17:17:53 +03:00
|
|
|
int32 slot = *cookie;
|
2006-05-30 04:21:22 +04:00
|
|
|
if (slot < 1)
|
|
|
|
slot = 1;
|
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
InterruptsSpinLocker locker(sTeamHashLock);
|
2002-12-03 17:17:53 +03:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
team_id lastTeamID = peek_next_thread_id();
|
|
|
|
// TODO: This is broken, since the id can wrap around!
|
2002-12-03 17:17:53 +03:00
|
|
|
|
|
|
|
// get next valid team
|
2011-06-12 04:00:23 +04:00
|
|
|
Team* team = NULL;
|
2005-03-09 04:43:56 +03:00
|
|
|
while (slot < lastTeamID && !(team = team_get_team_struct_locked(slot)))
|
2002-08-04 03:39:50 +04:00
|
|
|
slot++;
|
2002-12-03 17:17:53 +03:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
if (team == NULL)
|
|
|
|
return B_BAD_TEAM_ID;
|
2002-12-03 17:17:53 +03:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
// get a reference to the team and unlock
|
|
|
|
BReference<Team> teamReference(team);
|
|
|
|
locker.Unlock();
|
2002-08-04 03:39:50 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
// fill in the info
|
|
|
|
*cookie = ++slot;
|
|
|
|
return fill_team_info(team, info, size);
|
2002-08-04 03:39:50 +04:00
|
|
|
}
|
|
|
|
|
2002-08-05 09:26:52 +04:00
|
|
|
|
2004-11-26 00:20:17 +03:00
|
|
|
status_t
|
2009-11-27 21:10:03 +03:00
|
|
|
_get_team_usage_info(team_id id, int32 who, team_usage_info* info, size_t size)
|
2004-11-26 00:20:17 +03:00
|
|
|
{
|
2011-06-12 04:00:23 +04:00
|
|
|
if (size != sizeof(team_usage_info))
|
2004-11-26 00:20:17 +03:00
|
|
|
return B_BAD_VALUE;
|
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
return common_get_team_usage_info(id, who, info, 0);
|
2004-11-26 00:20:17 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2004-10-14 18:46:12 +04:00
|
|
|
pid_t
|
|
|
|
getpid(void)
|
|
|
|
{
|
2007-01-13 01:54:21 +03:00
|
|
|
return thread_get_current_thread()->team->id;
|
2004-10-14 18:46:12 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
pid_t
|
|
|
|
getppid(void)
|
|
|
|
{
|
2011-01-11 00:54:38 +03:00
|
|
|
Team* team = thread_get_current_thread()->team;
|
2004-10-14 18:46:12 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
TeamLocker teamLocker(team);
|
2004-10-14 18:46:12 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
return team->parent->id;
|
2004-10-14 18:46:12 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
pid_t
|
2011-06-12 04:00:23 +04:00
|
|
|
getpgid(pid_t id)
|
2004-10-14 18:46:12 +04:00
|
|
|
{
|
2011-06-12 04:00:23 +04:00
|
|
|
if (id < 0) {
|
|
|
|
errno = EINVAL;
|
|
|
|
return -1;
|
|
|
|
}
|
2004-10-14 18:46:12 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
if (id == 0) {
|
|
|
|
// get process group of the calling process
|
|
|
|
Team* team = thread_get_current_thread()->team;
|
|
|
|
TeamLocker teamLocker(team);
|
|
|
|
return team->group_id;
|
|
|
|
}
|
2004-10-14 18:46:12 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
// get the team
|
|
|
|
Team* team = Team::GetAndLock(id);
|
|
|
|
if (team == NULL) {
|
|
|
|
errno = ESRCH;
|
|
|
|
return -1;
|
|
|
|
}
|
2004-10-14 18:46:12 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
// get the team's process group ID
|
|
|
|
pid_t groupID = team->group_id;
|
2004-10-14 18:46:12 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
team->UnlockAndReleaseReference();
|
2004-10-14 18:46:12 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
return groupID;
|
2004-10-14 18:46:12 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
pid_t
|
2011-06-12 04:00:23 +04:00
|
|
|
getsid(pid_t id)
|
2004-10-14 18:46:12 +04:00
|
|
|
{
|
2011-06-12 04:00:23 +04:00
|
|
|
if (id < 0) {
|
|
|
|
errno = EINVAL;
|
|
|
|
return -1;
|
|
|
|
}
|
2004-10-14 18:46:12 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
if (id == 0) {
|
|
|
|
// get session of the calling process
|
|
|
|
Team* team = thread_get_current_thread()->team;
|
|
|
|
TeamLocker teamLocker(team);
|
|
|
|
return team->session_id;
|
|
|
|
}
|
2004-10-14 18:46:12 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
// get the team
|
|
|
|
Team* team = Team::GetAndLock(id);
|
|
|
|
if (team == NULL) {
|
|
|
|
errno = ESRCH;
|
|
|
|
return -1;
|
|
|
|
}
|
2004-10-14 18:46:12 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
// get the team's session ID
|
|
|
|
pid_t sessionID = team->session_id;
|
2004-10-14 18:46:12 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
team->UnlockAndReleaseReference();
|
2004-10-14 18:46:12 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
return sessionID;
|
2004-10-14 18:46:12 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-08-25 03:41:54 +04:00
|
|
|
// #pragma mark - User syscalls
|
2002-12-03 17:17:53 +03:00
|
|
|
|
|
|
|
|
2004-09-15 19:45:37 +04:00
|
|
|
status_t
|
2009-11-27 21:10:03 +03:00
|
|
|
_user_exec(const char* userPath, const char* const* userFlatArgs,
|
2011-01-02 22:12:19 +03:00
|
|
|
size_t flatArgsSize, int32 argCount, int32 envCount, mode_t umask)
|
2004-09-15 19:45:37 +04:00
|
|
|
{
|
2008-07-09 07:58:38 +04:00
|
|
|
// NOTE: Since this function normally doesn't return, don't use automatic
|
|
|
|
// variables that need destruction in the function scope.
|
2004-10-07 19:34:17 +04:00
|
|
|
char path[B_PATH_NAME_LENGTH];
|
2004-09-15 19:45:37 +04:00
|
|
|
|
2008-06-24 07:37:07 +04:00
|
|
|
if (!IS_USER_ADDRESS(userPath) || !IS_USER_ADDRESS(userFlatArgs)
|
2004-10-07 19:34:17 +04:00
|
|
|
|| user_strlcpy(path, userPath, sizeof(path)) < B_OK)
|
|
|
|
return B_BAD_ADDRESS;
|
2004-09-15 19:45:37 +04:00
|
|
|
|
2008-06-24 07:37:07 +04:00
|
|
|
// copy and relocate the flat arguments
|
|
|
|
char** flatArgs;
|
|
|
|
status_t error = copy_user_process_args(userFlatArgs, flatArgsSize,
|
|
|
|
argCount, envCount, flatArgs);
|
|
|
|
|
|
|
|
if (error == B_OK) {
|
|
|
|
error = exec_team(path, flatArgs, _ALIGN(flatArgsSize), argCount,
|
2011-01-02 22:12:19 +03:00
|
|
|
envCount, umask);
|
2008-06-24 07:37:07 +04:00
|
|
|
// this one only returns in case of error
|
|
|
|
}
|
|
|
|
|
|
|
|
free(flatArgs);
|
|
|
|
return error;
|
2004-09-15 19:45:37 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
thread_id
|
|
|
|
_user_fork(void)
|
|
|
|
{
|
2004-10-10 21:30:42 +04:00
|
|
|
return fork_team();
|
2004-09-15 19:45:37 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
pid_t
|
|
|
|
_user_wait_for_child(thread_id child, uint32 flags, siginfo_t* userInfo)
|
2004-09-15 19:45:37 +04:00
|
|
|
{
|
2011-06-12 04:00:23 +04:00
|
|
|
if (userInfo != NULL && !IS_USER_ADDRESS(userInfo))
|
2004-09-15 19:45:37 +04:00
|
|
|
return B_BAD_ADDRESS;
|
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
siginfo_t info;
|
|
|
|
pid_t foundChild = wait_for_child(child, flags, info);
|
|
|
|
if (foundChild < 0)
|
|
|
|
return syscall_restart_handle_post(foundChild);
|
axeld + bonefish:
* Implemented automatic syscall restarts:
- A syscall can indicate that it has been interrupted and can be
restarted by setting a respective bit in thread::flags. It can
store parameters it wants to be preserved for the restart in
thread::syscall_restart::parameters. Another thread::flags bit
indicates whether it has been restarted.
- handle_signals() clears the restart flag, if the handled signal
has a handler function installed and SA_RESTART is not set. Another
thread flag (THREAD_FLAGS_DONT_RESTART_SYSCALL) can prevent syscalls
from being restarted, even if they could be (not used yet, but we
might want to use it in resume_thread(), so that we stay
behaviorally compatible with BeOS).
- The architecture specific syscall handler restarts the syscall, if
the restart flag is set. Implemented for x86 only.
- Added some support functions in the private <syscall_restart.h> to
simplify the syscall restart code in the syscalls.
- Adjusted all syscalls that can potentially be restarted accordingly.
- _user_ioctl() sets new thread flag THREAD_FLAGS_IOCTL_SYSCALL while
calling the underlying FS's/driver's hook, so that syscall restarts
can also be supported there.
* thread_at_kernel_exit() invokes handle_signals() in a loop now, as
long as the latter indicates that the thread shall be suspended, so
that after waking up signals received in the meantime will be handled
before the thread returns to userland. Adjusted handle_signals()
accordingly -- when encountering a suspending signal we don't check
for further signals.
* Fixed sigsuspend(): Suspending the thread and rescheduling doesn't
result in the correct behavior. Instead we employ a temporary
condition variable and interruptably wait on it. The POSIX test
suite test passes, now.
* Made the switch_sem[_etc]() behavior on interruption consistent.
Depending on when the signal arrived (before the call or when already
waiting) the first semaphore would or wouldn't be released. Now we
consistently release it.
* Refactored _user_{read,write}[v]() syscalls. Use a common function for
either pair. The iovec version doesn't fail anymore, if anything could
be read/written at all. It also checks whether a complete vector
could be read/written, so that we won't skip data, if the underlying
FS/driver couldn't read/write more ATM.
* Some refactoring in the x86 syscall handler: The int 99 and sysenter
handlers use a common subroutine to avoid code duplication.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@23983 a95241bf-73f2-0310-859d-f6bbb57e9c96
2008-02-17 18:48:30 +03:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
// copy info back to userland
|
|
|
|
if (userInfo != NULL && user_memcpy(userInfo, &info, sizeof(info)) != B_OK)
|
|
|
|
return B_BAD_ADDRESS;
|
2004-09-15 19:45:37 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
return foundChild;
|
2004-09-15 19:45:37 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2004-10-14 18:46:12 +04:00
|
|
|
pid_t
|
|
|
|
_user_process_info(pid_t process, int32 which)
|
|
|
|
{
|
|
|
|
// we only allow to return the parent of the current process
|
|
|
|
if (which == PARENT_ID
|
2007-01-13 01:54:21 +03:00
|
|
|
&& process != 0 && process != thread_get_current_thread()->team->id)
|
2004-10-14 18:46:12 +04:00
|
|
|
return B_BAD_VALUE;
|
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
pid_t result;
|
2004-10-14 18:46:12 +04:00
|
|
|
switch (which) {
|
|
|
|
case SESSION_ID:
|
2011-06-12 04:00:23 +04:00
|
|
|
result = getsid(process);
|
|
|
|
break;
|
2004-10-14 18:46:12 +04:00
|
|
|
case GROUP_ID:
|
2011-06-12 04:00:23 +04:00
|
|
|
result = getpgid(process);
|
|
|
|
break;
|
2004-10-14 18:46:12 +04:00
|
|
|
case PARENT_ID:
|
2011-06-12 04:00:23 +04:00
|
|
|
result = getppid();
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return B_BAD_VALUE;
|
2004-10-14 18:46:12 +04:00
|
|
|
}
|
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
return result >= 0 ? result : errno;
|
2004-10-14 18:46:12 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
pid_t
|
|
|
|
_user_setpgid(pid_t processID, pid_t groupID)
|
|
|
|
{
|
2011-06-12 04:00:23 +04:00
|
|
|
// setpgid() can be called either by the parent of the target process or
|
|
|
|
// by the process itself to do one of two things:
|
|
|
|
// * Create a new process group with the target process' ID and the target
|
|
|
|
// process as group leader.
|
|
|
|
// * Set the target process' process group to an already existing one in the
|
|
|
|
// same session.
|
2007-01-29 18:33:31 +03:00
|
|
|
|
2004-10-14 18:46:12 +04:00
|
|
|
if (groupID < 0)
|
|
|
|
return B_BAD_VALUE;
|
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
Team* currentTeam = thread_get_current_thread()->team;
|
2005-04-05 16:39:27 +04:00
|
|
|
if (processID == 0)
|
2007-01-13 01:54:21 +03:00
|
|
|
processID = currentTeam->id;
|
2005-04-05 16:39:27 +04:00
|
|
|
|
2008-02-21 03:46:22 +03:00
|
|
|
// if the group ID is not specified, use the target process' ID
|
|
|
|
if (groupID == 0)
|
|
|
|
groupID = processID;
|
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
// We loop when running into the following race condition: We create a new
|
|
|
|
// process group, because there isn't one with that ID yet, but later when
|
|
|
|
// trying to publish it, we find that someone else created and published
|
|
|
|
// a group with that ID in the meantime. In that case we just restart the
|
|
|
|
// whole action.
|
|
|
|
while (true) {
|
|
|
|
// Look up the process group by ID. If it doesn't exist yet and we are
|
|
|
|
// allowed to create a new one, do that.
|
|
|
|
ProcessGroup* group = ProcessGroup::Get(groupID);
|
|
|
|
bool newGroup = false;
|
|
|
|
if (group == NULL) {
|
|
|
|
if (groupID != processID)
|
|
|
|
return B_NOT_ALLOWED;
|
|
|
|
|
|
|
|
group = new(std::nothrow) ProcessGroup(groupID);
|
|
|
|
if (group == NULL)
|
|
|
|
return B_NO_MEMORY;
|
|
|
|
|
|
|
|
newGroup = true;
|
|
|
|
}
|
|
|
|
BReference<ProcessGroup> groupReference(group, true);
|
2004-10-14 18:46:12 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
// get the target team
|
|
|
|
Team* team = Team::Get(processID);
|
2008-02-21 03:46:22 +03:00
|
|
|
if (team == NULL)
|
|
|
|
return ESRCH;
|
2011-06-12 04:00:23 +04:00
|
|
|
BReference<Team> teamReference(team, true);
|
|
|
|
|
|
|
|
// lock the new process group and the team's current process group
|
|
|
|
while (true) {
|
|
|
|
// lock the team's current process group
|
|
|
|
team->LockProcessGroup();
|
|
|
|
|
|
|
|
ProcessGroup* oldGroup = team->group;
|
|
|
|
if (oldGroup == group) {
|
|
|
|
// it's the same as the target group, so just bail out
|
|
|
|
oldGroup->Unlock();
|
|
|
|
return group->id;
|
|
|
|
}
|
2004-10-14 18:46:12 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
oldGroup->AcquireReference();
|
2004-10-14 18:46:12 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
// lock the target process group, if locking order allows it
|
|
|
|
if (newGroup || group->id > oldGroup->id) {
|
|
|
|
group->Lock();
|
|
|
|
break;
|
|
|
|
}
|
2004-10-14 18:46:12 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
// try to lock
|
|
|
|
if (group->TryLock())
|
|
|
|
break;
|
2007-09-06 06:16:25 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
// no dice -- unlock the team's current process group and relock in
|
|
|
|
// the correct order
|
|
|
|
oldGroup->Unlock();
|
2004-10-14 18:46:12 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
group->Lock();
|
|
|
|
oldGroup->Lock();
|
2007-09-05 23:36:38 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
// check whether things are still the same
|
|
|
|
TeamLocker teamLocker(team);
|
|
|
|
if (team->group == oldGroup)
|
|
|
|
break;
|
2004-10-14 18:46:12 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
// something changed -- unlock everything and retry
|
|
|
|
teamLocker.Unlock();
|
|
|
|
oldGroup->Unlock();
|
|
|
|
group->Unlock();
|
|
|
|
oldGroup->ReleaseReference();
|
|
|
|
}
|
2004-10-14 18:46:12 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
// we now have references and locks of both new and old process group
|
|
|
|
BReference<ProcessGroup> oldGroupReference(team->group, true);
|
|
|
|
AutoLocker<ProcessGroup> oldGroupLocker(team->group, true);
|
|
|
|
AutoLocker<ProcessGroup> groupLocker(group, true);
|
2008-02-21 03:46:22 +03:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
// also lock the target team and its parent
|
|
|
|
team->LockTeamAndParent(false);
|
|
|
|
TeamLocker parentLocker(team->parent, true);
|
|
|
|
TeamLocker teamLocker(team, true);
|
2008-02-21 03:46:22 +03:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
// perform the checks
|
|
|
|
if (team == currentTeam) {
|
|
|
|
// we set our own group
|
2008-02-21 03:46:22 +03:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
// we must not change our process group ID if we're a session leader
|
|
|
|
if (is_session_leader(currentTeam))
|
|
|
|
return B_NOT_ALLOWED;
|
|
|
|
} else {
|
|
|
|
// Calling team != target team. The target team must be a child of
|
|
|
|
// the calling team and in the same session. (If that's the case it
|
|
|
|
// isn't a session leader either.)
|
|
|
|
if (team->parent != currentTeam
|
|
|
|
|| team->session_id != currentTeam->session_id) {
|
|
|
|
return B_NOT_ALLOWED;
|
|
|
|
}
|
2008-02-21 03:46:22 +03:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
// The call is also supposed to fail on a child, when the child has
|
|
|
|
// already executed exec*() [EACCES].
|
|
|
|
if ((team->flags & TEAM_FLAG_EXEC_DONE) != 0)
|
|
|
|
return EACCES;
|
|
|
|
}
|
2008-02-21 03:46:22 +03:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
// If we created a new process group, publish it now.
|
|
|
|
if (newGroup) {
|
|
|
|
InterruptsSpinLocker groupHashLocker(sGroupHashLock);
|
|
|
|
if (sGroupHash.Lookup(groupID)) {
|
|
|
|
// A group with the group ID appeared since we first checked.
|
|
|
|
// Back to square one.
|
|
|
|
continue;
|
|
|
|
}
|
2008-02-21 03:46:22 +03:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
group->PublishLocked(team->group->Session());
|
|
|
|
} else if (group->Session()->id != team->session_id) {
|
|
|
|
// The existing target process group belongs to a different session.
|
|
|
|
// That's not allowed.
|
|
|
|
return B_NOT_ALLOWED;
|
2004-10-14 18:46:12 +04:00
|
|
|
}
|
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
// Everything is ready -- set the group.
|
|
|
|
remove_team_from_group(team);
|
|
|
|
insert_team_into_group(group, team);
|
2007-09-05 23:36:38 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
// Changing the process group might have changed the situation for a
|
|
|
|
// parent waiting in wait_for_child(). Hence we notify it.
|
|
|
|
team->parent->dead_children.condition_variable.NotifyAll(false);
|
2004-10-14 18:46:12 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
return group->id;
|
2006-08-25 02:58:48 +04:00
|
|
|
}
|
2004-10-14 18:46:12 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
pid_t
|
|
|
|
_user_setsid(void)
|
|
|
|
{
|
2011-01-11 00:54:38 +03:00
|
|
|
Team* team = thread_get_current_thread()->team;
|
2004-10-14 18:46:12 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
// create a new process group and session
|
|
|
|
ProcessGroup* group = new(std::nothrow) ProcessGroup(team->id);
|
2004-10-14 18:46:12 +04:00
|
|
|
if (group == NULL)
|
|
|
|
return B_NO_MEMORY;
|
2011-06-12 04:00:23 +04:00
|
|
|
BReference<ProcessGroup> groupReference(group, true);
|
|
|
|
AutoLocker<ProcessGroup> groupLocker(group);
|
2004-10-14 18:46:12 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
ProcessSession* session = new(std::nothrow) ProcessSession(group->id);
|
|
|
|
if (session == NULL)
|
2004-10-14 18:46:12 +04:00
|
|
|
return B_NO_MEMORY;
|
2011-06-12 04:00:23 +04:00
|
|
|
BReference<ProcessSession> sessionReference(session, true);
|
2004-10-14 18:46:12 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
// lock the team's current process group, parent, and the team itself
|
|
|
|
team->LockTeamParentAndProcessGroup();
|
|
|
|
AutoLocker<ProcessGroup> oldGroupLocker(team->group, true);
|
|
|
|
TeamLocker parentLocker(team->parent, true);
|
|
|
|
TeamLocker teamLocker(team, true);
|
2004-10-14 18:46:12 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
// the team must not already be a process group leader
|
|
|
|
if (is_process_group_leader(team))
|
|
|
|
return B_NOT_ALLOWED;
|
2004-10-14 18:46:12 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
// remove the team from the old and add it to the new process group
|
|
|
|
remove_team_from_group(team);
|
|
|
|
group->Publish(session);
|
|
|
|
insert_team_into_group(group, team);
|
2004-10-14 18:46:12 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
// Changing the process group might have changed the situation for a
|
|
|
|
// parent waiting in wait_for_child(). Hence we notify it.
|
|
|
|
team->parent->dead_children.condition_variable.NotifyAll(false);
|
2004-10-14 18:46:12 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
return group->id;
|
2004-10-14 18:46:12 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2003-01-27 06:09:33 +03:00
|
|
|
status_t
|
2009-11-27 21:10:03 +03:00
|
|
|
_user_wait_for_team(team_id id, status_t* _userReturnCode)
|
2002-12-03 17:17:53 +03:00
|
|
|
{
|
2003-01-27 06:09:33 +03:00
|
|
|
status_t returnCode;
|
|
|
|
status_t status;
|
2002-12-03 17:17:53 +03:00
|
|
|
|
2004-09-15 19:45:37 +04:00
|
|
|
if (_userReturnCode != NULL && !IS_USER_ADDRESS(_userReturnCode))
|
2002-12-03 17:17:53 +03:00
|
|
|
return B_BAD_ADDRESS;
|
|
|
|
|
2003-01-27 06:09:33 +03:00
|
|
|
status = wait_for_team(id, &returnCode);
|
2004-08-13 23:08:35 +04:00
|
|
|
if (status >= B_OK && _userReturnCode != NULL) {
|
2009-11-27 21:10:03 +03:00
|
|
|
if (user_memcpy(_userReturnCode, &returnCode, sizeof(returnCode))
|
|
|
|
!= B_OK)
|
2002-12-03 17:17:53 +03:00
|
|
|
return B_BAD_ADDRESS;
|
axeld + bonefish:
* Implemented automatic syscall restarts:
- A syscall can indicate that it has been interrupted and can be
restarted by setting a respective bit in thread::flags. It can
store parameters it wants to be preserved for the restart in
thread::syscall_restart::parameters. Another thread::flags bit
indicates whether it has been restarted.
- handle_signals() clears the restart flag, if the handled signal
has a handler function installed and SA_RESTART is not set. Another
thread flag (THREAD_FLAGS_DONT_RESTART_SYSCALL) can prevent syscalls
from being restarted, even if they could be (not used yet, but we
might want to use it in resume_thread(), so that we stay
behaviorally compatible with BeOS).
- The architecture specific syscall handler restarts the syscall, if
the restart flag is set. Implemented for x86 only.
- Added some support functions in the private <syscall_restart.h> to
simplify the syscall restart code in the syscalls.
- Adjusted all syscalls that can potentially be restarted accordingly.
- _user_ioctl() sets new thread flag THREAD_FLAGS_IOCTL_SYSCALL while
calling the underlying FS's/driver's hook, so that syscall restarts
can also be supported there.
* thread_at_kernel_exit() invokes handle_signals() in a loop now, as
long as the latter indicates that the thread shall be suspended, so
that after waking up signals received in the meantime will be handled
before the thread returns to userland. Adjusted handle_signals()
accordingly -- when encountering a suspending signal we don't check
for further signals.
* Fixed sigsuspend(): Suspending the thread and rescheduling doesn't
result in the correct behavior. Instead we employ a temporary
condition variable and interruptably wait on it. The POSIX test
suite test passes, now.
* Made the switch_sem[_etc]() behavior on interruption consistent.
Depending on when the signal arrived (before the call or when already
waiting) the first semaphore would or wouldn't be released. Now we
consistently release it.
* Refactored _user_{read,write}[v]() syscalls. Use a common function for
either pair. The iovec version doesn't fail anymore, if anything could
be read/written at all. It also checks whether a complete vector
could be read/written, so that we won't skip data, if the underlying
FS/driver couldn't read/write more ATM.
* Some refactoring in the x86 syscall handler: The int 99 and sysenter
handlers use a common subroutine to avoid code duplication.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@23983 a95241bf-73f2-0310-859d-f6bbb57e9c96
2008-02-17 18:48:30 +03:00
|
|
|
return B_OK;
|
2002-12-03 17:17:53 +03:00
|
|
|
}
|
|
|
|
|
axeld + bonefish:
* Implemented automatic syscall restarts:
- A syscall can indicate that it has been interrupted and can be
restarted by setting a respective bit in thread::flags. It can
store parameters it wants to be preserved for the restart in
thread::syscall_restart::parameters. Another thread::flags bit
indicates whether it has been restarted.
- handle_signals() clears the restart flag, if the handled signal
has a handler function installed and SA_RESTART is not set. Another
thread flag (THREAD_FLAGS_DONT_RESTART_SYSCALL) can prevent syscalls
from being restarted, even if they could be (not used yet, but we
might want to use it in resume_thread(), so that we stay
behaviorally compatible with BeOS).
- The architecture specific syscall handler restarts the syscall, if
the restart flag is set. Implemented for x86 only.
- Added some support functions in the private <syscall_restart.h> to
simplify the syscall restart code in the syscalls.
- Adjusted all syscalls that can potentially be restarted accordingly.
- _user_ioctl() sets new thread flag THREAD_FLAGS_IOCTL_SYSCALL while
calling the underlying FS's/driver's hook, so that syscall restarts
can also be supported there.
* thread_at_kernel_exit() invokes handle_signals() in a loop now, as
long as the latter indicates that the thread shall be suspended, so
that after waking up signals received in the meantime will be handled
before the thread returns to userland. Adjusted handle_signals()
accordingly -- when encountering a suspending signal we don't check
for further signals.
* Fixed sigsuspend(): Suspending the thread and rescheduling doesn't
result in the correct behavior. Instead we employ a temporary
condition variable and interruptably wait on it. The POSIX test
suite test passes, now.
* Made the switch_sem[_etc]() behavior on interruption consistent.
Depending on when the signal arrived (before the call or when already
waiting) the first semaphore would or wouldn't be released. Now we
consistently release it.
* Refactored _user_{read,write}[v]() syscalls. Use a common function for
either pair. The iovec version doesn't fail anymore, if anything could
be read/written at all. It also checks whether a complete vector
could be read/written, so that we won't skip data, if the underlying
FS/driver couldn't read/write more ATM.
* Some refactoring in the x86 syscall handler: The int 99 and sysenter
handlers use a common subroutine to avoid code duplication.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@23983 a95241bf-73f2-0310-859d-f6bbb57e9c96
2008-02-17 18:48:30 +03:00
|
|
|
return syscall_restart_handle_post(status);
|
2002-12-03 17:17:53 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-06-24 07:37:07 +04:00
|
|
|
thread_id
|
|
|
|
_user_load_image(const char* const* userFlatArgs, size_t flatArgsSize,
|
|
|
|
int32 argCount, int32 envCount, int32 priority, uint32 flags,
|
|
|
|
port_id errorPort, uint32 errorToken)
|
2002-12-03 17:17:53 +03:00
|
|
|
{
|
2009-03-02 03:26:22 +03:00
|
|
|
TRACE(("_user_load_image: argc = %ld\n", argCount));
|
2002-12-03 17:17:53 +03:00
|
|
|
|
2008-06-24 07:37:07 +04:00
|
|
|
if (argCount < 1)
|
2004-10-14 22:07:04 +04:00
|
|
|
return B_BAD_VALUE;
|
2002-12-03 17:17:53 +03:00
|
|
|
|
2008-06-24 07:37:07 +04:00
|
|
|
// copy and relocate the flat arguments
|
|
|
|
char** flatArgs;
|
|
|
|
status_t error = copy_user_process_args(userFlatArgs, flatArgsSize,
|
|
|
|
argCount, envCount, flatArgs);
|
|
|
|
if (error != B_OK)
|
|
|
|
return error;
|
|
|
|
|
2009-03-02 03:26:22 +03:00
|
|
|
thread_id thread = load_image_internal(flatArgs, _ALIGN(flatArgsSize),
|
|
|
|
argCount, envCount, priority, B_CURRENT_TEAM, flags, errorPort,
|
|
|
|
errorToken);
|
2002-12-03 17:17:53 +03:00
|
|
|
|
2008-06-24 07:37:07 +04:00
|
|
|
free(flatArgs);
|
2009-03-02 03:26:22 +03:00
|
|
|
// load_image_internal() unset our variable if it took over ownership
|
2008-06-24 07:37:07 +04:00
|
|
|
|
|
|
|
return thread;
|
2002-12-03 17:17:53 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2004-12-01 07:26:10 +03:00
|
|
|
void
|
|
|
|
_user_exit_team(status_t returnValue)
|
|
|
|
{
|
2011-01-11 00:54:38 +03:00
|
|
|
Thread* thread = thread_get_current_thread();
|
|
|
|
Team* team = thread->team;
|
2004-12-01 07:26:10 +03:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
// set this thread's exit status
|
|
|
|
thread->exit.status = returnValue;
|
|
|
|
|
|
|
|
// set the team exit status
|
|
|
|
TeamLocker teamLocker(team);
|
2010-04-13 17:29:56 +04:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
if (!team->exit.initialized) {
|
|
|
|
team->exit.reason = CLD_EXITED;
|
|
|
|
team->exit.signal = 0;
|
|
|
|
team->exit.signaling_user = 0;
|
|
|
|
team->exit.status = returnValue;
|
|
|
|
team->exit.initialized = true;
|
2010-04-13 17:29:56 +04:00
|
|
|
}
|
2004-12-01 07:26:10 +03:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
teamLocker.Unlock();
|
|
|
|
|
|
|
|
// Stop the thread, if the team is being debugged and that has been
|
|
|
|
// requested.
|
|
|
|
if ((atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_PREVENT_EXIT) != 0)
|
2010-04-14 22:03:48 +04:00
|
|
|
user_debug_stop_thread();
|
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
// Send this thread a SIGKILL. This makes sure the thread will not return to
|
|
|
|
// userland. The signal handling code forwards the signal to the main
|
|
|
|
// thread (if that's not already this one), which will take the team down.
|
|
|
|
Signal signal(SIGKILL, SI_USER, B_OK, team->id);
|
|
|
|
send_signal_to_thread(thread, signal, 0);
|
2004-12-01 07:26:10 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2002-12-03 17:17:53 +03:00
|
|
|
status_t
|
2004-03-03 03:57:00 +03:00
|
|
|
_user_kill_team(team_id team)
|
|
|
|
{
|
|
|
|
return kill_team(team);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
status_t
|
2009-11-27 21:10:03 +03:00
|
|
|
_user_get_team_info(team_id id, team_info* userInfo)
|
2002-12-03 17:17:53 +03:00
|
|
|
{
|
2003-11-12 18:37:44 +03:00
|
|
|
status_t status;
|
|
|
|
team_info info;
|
|
|
|
|
2004-02-22 17:52:59 +03:00
|
|
|
if (!IS_USER_ADDRESS(userInfo))
|
2008-07-17 02:55:17 +04:00
|
|
|
return B_BAD_ADDRESS;
|
2003-11-12 18:37:44 +03:00
|
|
|
|
|
|
|
status = _get_team_info(id, &info, sizeof(team_info));
|
|
|
|
if (status == B_OK) {
|
|
|
|
if (user_memcpy(userInfo, &info, sizeof(team_info)) < B_OK)
|
|
|
|
return B_BAD_ADDRESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
return status;
|
2002-12-03 17:17:53 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
status_t
|
2009-11-27 21:10:03 +03:00
|
|
|
_user_get_next_team_info(int32* userCookie, team_info* userInfo)
|
2002-12-03 17:17:53 +03:00
|
|
|
{
|
2003-11-12 18:37:44 +03:00
|
|
|
status_t status;
|
|
|
|
team_info info;
|
|
|
|
int32 cookie;
|
|
|
|
|
2004-02-22 17:52:59 +03:00
|
|
|
if (!IS_USER_ADDRESS(userCookie)
|
|
|
|
|| !IS_USER_ADDRESS(userInfo)
|
2003-11-12 18:37:44 +03:00
|
|
|
|| user_memcpy(&cookie, userCookie, sizeof(int32)) < B_OK)
|
|
|
|
return B_BAD_ADDRESS;
|
|
|
|
|
|
|
|
status = _get_next_team_info(&cookie, &info, sizeof(team_info));
|
|
|
|
if (status != B_OK)
|
|
|
|
return status;
|
|
|
|
|
|
|
|
if (user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK
|
|
|
|
|| user_memcpy(userInfo, &info, sizeof(team_info)) < B_OK)
|
|
|
|
return B_BAD_ADDRESS;
|
|
|
|
|
|
|
|
return status;
|
2002-12-03 17:17:53 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2004-03-03 03:57:00 +03:00
|
|
|
team_id
|
|
|
|
_user_get_current_team(void)
|
|
|
|
{
|
|
|
|
return team_get_current_team_id();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2004-11-26 00:20:17 +03:00
|
|
|
status_t
|
2009-11-27 21:10:03 +03:00
|
|
|
_user_get_team_usage_info(team_id team, int32 who, team_usage_info* userInfo,
|
|
|
|
size_t size)
|
2004-11-26 00:20:17 +03:00
|
|
|
{
|
2011-06-12 04:00:23 +04:00
|
|
|
if (size != sizeof(team_usage_info))
|
|
|
|
return B_BAD_VALUE;
|
2004-11-26 00:20:17 +03:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
team_usage_info info;
|
|
|
|
status_t status = common_get_team_usage_info(team, who, &info,
|
|
|
|
B_CHECK_PERMISSION);
|
2004-11-26 00:20:17 +03:00
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
if (userInfo == NULL || !IS_USER_ADDRESS(userInfo)
|
|
|
|
|| user_memcpy(userInfo, &info, size) != B_OK) {
|
2004-11-26 00:20:17 +03:00
|
|
|
return B_BAD_ADDRESS;
|
2011-06-12 04:00:23 +04:00
|
|
|
}
|
2004-11-26 00:20:17 +03:00
|
|
|
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2010-11-16 22:42:08 +03:00
|
|
|
|
|
|
|
status_t
|
|
|
|
_user_get_extended_team_info(team_id teamID, uint32 flags, void* buffer,
|
|
|
|
size_t size, size_t* _sizeNeeded)
|
|
|
|
{
|
|
|
|
// check parameters
|
|
|
|
if ((buffer != NULL && !IS_USER_ADDRESS(buffer))
|
|
|
|
|| (buffer == NULL && size > 0)
|
|
|
|
|| _sizeNeeded == NULL || !IS_USER_ADDRESS(_sizeNeeded)) {
|
|
|
|
return B_BAD_ADDRESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
KMessage info;
|
|
|
|
|
|
|
|
if ((flags & B_TEAM_INFO_BASIC) != 0) {
|
2011-06-12 04:00:23 +04:00
|
|
|
// allocate memory for a copy of the needed team data
|
|
|
|
struct ExtendedTeamData {
|
|
|
|
team_id id;
|
|
|
|
pid_t group_id;
|
|
|
|
pid_t session_id;
|
|
|
|
uid_t real_uid;
|
|
|
|
gid_t real_gid;
|
|
|
|
uid_t effective_uid;
|
|
|
|
gid_t effective_gid;
|
|
|
|
char name[B_OS_NAME_LENGTH];
|
|
|
|
};
|
|
|
|
|
|
|
|
ExtendedTeamData* teamClone
|
|
|
|
= (ExtendedTeamData*)malloc(sizeof(ExtendedTeamData));
|
|
|
|
// It would be nicer to use new, but then we'd have to use
|
|
|
|
// ObjectDeleter and declare the structure outside of the function
|
|
|
|
// due to template parameter restrictions.
|
2010-11-16 22:42:08 +03:00
|
|
|
if (teamClone == NULL)
|
|
|
|
return B_NO_MEMORY;
|
2011-06-12 04:00:23 +04:00
|
|
|
MemoryDeleter teamCloneDeleter(teamClone);
|
2010-11-16 22:42:08 +03:00
|
|
|
|
|
|
|
io_context* ioContext;
|
|
|
|
{
|
|
|
|
// get the team structure
|
2011-06-12 04:00:23 +04:00
|
|
|
Team* team = Team::GetAndLock(teamID);
|
2010-11-16 22:42:08 +03:00
|
|
|
if (team == NULL)
|
|
|
|
return B_BAD_TEAM_ID;
|
2011-06-12 04:00:23 +04:00
|
|
|
BReference<Team> teamReference(team, true);
|
|
|
|
TeamLocker teamLocker(team, true);
|
|
|
|
|
|
|
|
// copy the data
|
|
|
|
teamClone->id = team->id;
|
|
|
|
strlcpy(teamClone->name, team->Name(), sizeof(teamClone->name));
|
|
|
|
teamClone->group_id = team->group_id;
|
|
|
|
teamClone->session_id = team->session_id;
|
|
|
|
teamClone->real_uid = team->real_uid;
|
|
|
|
teamClone->real_gid = team->real_gid;
|
|
|
|
teamClone->effective_uid = team->effective_uid;
|
|
|
|
teamClone->effective_gid = team->effective_gid;
|
2010-11-16 22:42:08 +03:00
|
|
|
|
|
|
|
// also fetch a reference to the I/O context
|
|
|
|
ioContext = team->io_context;
|
|
|
|
vfs_get_io_context(ioContext);
|
|
|
|
}
|
|
|
|
CObjectDeleter<io_context> ioContextPutter(ioContext,
|
|
|
|
&vfs_put_io_context);
|
|
|
|
|
|
|
|
// add the basic data to the info message
|
|
|
|
if (info.AddInt32("id", teamClone->id) != B_OK
|
|
|
|
|| info.AddString("name", teamClone->name) != B_OK
|
|
|
|
|| info.AddInt32("process group", teamClone->group_id) != B_OK
|
|
|
|
|| info.AddInt32("session", teamClone->session_id) != B_OK
|
|
|
|
|| info.AddInt32("uid", teamClone->real_uid) != B_OK
|
|
|
|
|| info.AddInt32("gid", teamClone->real_gid) != B_OK
|
|
|
|
|| info.AddInt32("euid", teamClone->effective_uid) != B_OK
|
|
|
|
|| info.AddInt32("egid", teamClone->effective_gid) != B_OK) {
|
|
|
|
return B_NO_MEMORY;
|
|
|
|
}
|
|
|
|
|
|
|
|
// get the current working directory from the I/O context
|
|
|
|
dev_t cwdDevice;
|
|
|
|
ino_t cwdDirectory;
|
|
|
|
{
|
|
|
|
MutexLocker ioContextLocker(ioContext->io_mutex);
|
|
|
|
vfs_vnode_to_node_ref(ioContext->cwd, &cwdDevice, &cwdDirectory);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (info.AddInt32("cwd device", cwdDevice) != B_OK
|
|
|
|
|| info.AddInt64("cwd directory", cwdDirectory) != B_OK) {
|
|
|
|
return B_NO_MEMORY;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO: Support the other flags!
|
|
|
|
|
|
|
|
// copy the needed size and, if it fits, the message back to userland
|
|
|
|
size_t sizeNeeded = info.ContentSize();
|
|
|
|
if (user_memcpy(_sizeNeeded, &sizeNeeded, sizeof(sizeNeeded)) != B_OK)
|
|
|
|
return B_BAD_ADDRESS;
|
|
|
|
|
|
|
|
if (sizeNeeded > size)
|
|
|
|
return B_BUFFER_OVERFLOW;
|
|
|
|
|
|
|
|
if (user_memcpy(buffer, info.Buffer(), sizeNeeded) != B_OK)
|
|
|
|
return B_BAD_ADDRESS;
|
|
|
|
|
|
|
|
return B_OK;
|
|
|
|
}
|