Changed the team shutdown process a bit:

* The threads beside the main thread are killed earlier now (in the new
  team_shutdown_team()), before removing the team from the team hash and from
  its process group. This fixes #5296.
* Use a condition variable instead of a semaphore to wait for the non-main
  threads to die. We notify the condition right after a thread has left the
  team. The semaphore was released by the undertaker.


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@35196 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Ingo Weinhold 2010-01-20 09:34:53 +00:00
parent 15779f70b9
commit 5662ae4585
5 changed files with 124 additions and 84 deletions

View File

@ -23,7 +23,8 @@ extern "C" {
status_t team_init(struct kernel_args *args);
status_t wait_for_team(team_id id, status_t *returnCode);
void team_remove_team(struct team *team);
void team_delete_team(struct team *team);
port_id team_shutdown_team(struct team *team, cpu_status& state);
void team_delete_team(struct team *team, port_id debuggerPort);
struct process_group *team_get_process_group_locked(
struct process_session *session, pid_t id);
void team_delete_process_group(struct process_group *group);

View File

@ -10,13 +10,14 @@
#ifndef _ASSEMBLER
#include <smp.h>
#include <arch/thread_types.h>
#include <condition_variable.h>
#include <signal.h>
#include <smp.h>
#include <thread_defs.h>
#include <timer.h>
#include <user_debugger.h>
#include <util/list.h>
#include <arch/thread_types.h>
extern spinlock gThreadSpinlock;
@ -98,6 +99,7 @@ struct team_watcher {
void *data;
};
#define MAX_DEAD_CHILDREN 32
// this is a soft limit for the number of child death entries in a team
#define MAX_DEAD_THREADS 32
@ -150,6 +152,12 @@ struct team_dead_children : team_job_control_children {
};
struct team_death_entry {
int32 remaining_threads;
ConditionVariable condition;
};
#endif // __cplusplus
@ -178,7 +186,7 @@ struct team {
struct io_context *io_context;
struct realtime_sem_context *realtime_sem_context;
struct xsi_sem_context *xsi_sem_context;
sem_id death_sem; // semaphore to wait on for dying threads
struct team_death_entry *death_entry;
struct list dead_threads;
int dead_threads_count;

View File

@ -1,5 +1,5 @@
/*
* Copyright 2005-2009, Ingo Weinhold, ingo_weinhold@gmx.de.
* Copyright 2005-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
* Distributed under the terms of the MIT License.
*/
@ -378,7 +378,7 @@ prepare_debugger_change(team_id teamID, ConditionVariable& condition,
InterruptsSpinLocker teamLocker(gTeamSpinlock);
team = team_get_team_struct_locked(teamID);
if (team == NULL)
if (team == NULL || team->death_entry != NULL)
return B_BAD_TEAM_ID;
// don't allow messing with the kernel team

View File

@ -1,5 +1,5 @@
/*
* Copyright 2008, Ingo Weinhold, ingo_weinhold@gmx.de.
* Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
* Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de.
* Distributed under the terms of the MIT License.
*
@ -759,7 +759,7 @@ create_team_struct(const char* name, bool kernel)
team->loading_info = NULL;
team->state = TEAM_STATE_BIRTH;
team->flags = 0;
team->death_sem = -1;
team->death_entry = NULL;
team->user_data_area = -1;
team->user_data = 0;
team->used_user_data = 0;
@ -2382,70 +2382,115 @@ team_remove_team(struct team* team)
}
void
team_delete_team(struct team* team)
/*! Kills all threads but the main thread of the team.
To be called on exit of the team's main thread. The teams spinlock must be
held. The function may temporarily drop the spinlock, but will reacquire it
before it returns.
\param team The team in question.
\param state The CPU state as returned by disable_interrupts(). Will be
adjusted, if the function needs to unlock and relock.
\return The port of the debugger for the team, -1 if none. To be passed to
team_delete_team().
*/
port_id
team_shutdown_team(struct team* team, cpu_status& state)
{
team_id teamID = team->id;
ASSERT(thread_get_current_thread() == team->main_thread);
// Make sure debugging changes won't happen anymore.
port_id debuggerPort = -1;
cpu_status state;
while (true) {
// If a debugger change is in progress for the team, we'll have to
// wait until it is done.
ConditionVariableEntry waitForDebuggerEntry;
bool waitForDebugger = false;
if (team->num_threads > 0) {
// there are other threads still in this team,
// cycle through and signal kill on each of the threads
// ToDo: this can be optimized. There's got to be a better solution.
struct thread* temp_thread;
char deathSemName[B_OS_NAME_LENGTH];
sem_id deathSem;
int32 threadCount;
sprintf(deathSemName, "team %ld death sem", teamID);
deathSem = create_sem(0, deathSemName);
if (deathSem < 0) {
panic("team_delete_team: cannot init death sem for team %ld\n",
teamID);
}
state = disable_interrupts();
GRAB_TEAM_LOCK();
team->death_sem = deathSem;
threadCount = team->num_threads;
// If the team was being debugged, that will stop with the termination
// of the nub thread. The team structure has already been removed from
// the team hash table at this point, so noone can install a debugger
// anymore. We fetch the debugger's port to send it a message at the
// bitter end.
GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info);
if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)
if (team->debug_info.debugger_changed_condition != NULL) {
team->debug_info.debugger_changed_condition->Add(
&waitForDebuggerEntry);
waitForDebugger = true;
} else if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) {
// The team is being debugged. That will stop with the termination
// of the nub thread. Since we won't let go of the team lock, unless
// we set team::death_entry or until we have removed the tem from
// the team hash, no-one can install a debugger anymore. We fetch
// the debugger's port to send it a message at the bitter end.
debuggerPort = team->debug_info.debugger_port;
}
RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
// We can safely walk the list because of the lock. no new threads can
// be created because of the TEAM_STATE_DEATH flag on the team
temp_thread = team->thread_list;
while (temp_thread) {
struct thread* next = temp_thread->team_next;
if (!waitForDebugger)
break;
send_signal_etc(temp_thread->id, SIGKILLTHR, B_DO_NOT_RESCHEDULE);
temp_thread = next;
// wait for the debugger change to be finished
RELEASE_TEAM_LOCK();
restore_interrupts(state);
waitForDebuggerEntry.Wait();
state = disable_interrupts();
GRAB_TEAM_LOCK();
}
// kill all threads but the main thread
team_death_entry deathEntry;
deathEntry.condition.Init(team, "team death");
while (true) {
team->death_entry = &deathEntry;
deathEntry.remaining_threads = 0;
struct thread* thread = team->thread_list;
while (thread != NULL) {
if (thread != team->main_thread) {
send_signal_etc(thread->id, SIGKILLTHR,
B_DO_NOT_RESCHEDULE | SIGNAL_FLAG_TEAMS_LOCKED);
deathEntry.remaining_threads++;
}
thread = thread->team_next;
}
if (deathEntry.remaining_threads == 0)
break;
// there are threads to wait for
ConditionVariableEntry entry;
deathEntry.condition.Add(&entry);
RELEASE_TEAM_LOCK();
restore_interrupts(state);
// wait until all threads in team are dead.
acquire_sem_etc(team->death_sem, threadCount, 0, 0);
delete_sem(team->death_sem);
entry.Wait();
state = disable_interrupts();
GRAB_TEAM_LOCK();
}
team->death_entry = NULL;
// That makes the team "undead" again, but we have the teams spinlock
// and our caller won't drop it until after removing the team from the
// teams hash table.
return debuggerPort;
}
void
team_delete_team(struct team* team, port_id debuggerPort)
{
team_id teamID = team->id;
ASSERT(team->num_threads == 0);
// If someone is waiting for this team to be loaded, but it dies
// unexpectedly before being done, we need to notify the waiting
// thread now.
state = disable_interrupts();
cpu_status state = disable_interrupts();
GRAB_TEAM_LOCK();
if (team->loading_info) {

View File

@ -1,5 +1,5 @@
/*
* Copyright 2005-2009, Ingo Weinhold, ingo_weinhold@gmx.de.
* Copyright 2005-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
* Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de.
* Distributed under the terms of the MIT License.
*
@ -78,13 +78,11 @@ static int32 sUsedThreads = 0;
struct UndertakerEntry : DoublyLinkedListLinkImpl<UndertakerEntry> {
struct thread* thread;
team_id teamID;
sem_id deathSem;
UndertakerEntry(struct thread* thread, team_id teamID, sem_id deathSem)
UndertakerEntry(struct thread* thread, team_id teamID)
:
thread(thread),
teamID(teamID),
deathSem(deathSem)
teamID(teamID)
{
}
};
@ -486,8 +484,10 @@ create_thread(thread_creation_attributes& attributes, bool kernel)
// look at the team, make sure it's not being deleted
team = team_get_team_struct_locked(attributes.team);
if (team == NULL || team->state == TEAM_STATE_DEATH)
if (team == NULL || team->state == TEAM_STATE_DEATH
|| team->death_entry != NULL) {
abort = true;
}
if (!abort && !kernel) {
thread->user_thread = team_allocate_user_thread(team);
@ -622,9 +622,6 @@ undertaker(void* /*args*/)
enable_interrupts();
// needed for the debugger notification below
if (entry.deathSem >= 0)
release_sem_etc(entry.deathSem, 1, B_DO_NOT_RESCHEDULE);
// free the thread structure
locker.Lock();
thread_enqueue(thread, &dead_q);
@ -1368,8 +1365,6 @@ thread_exit(void)
struct thread *thread = thread_get_current_thread();
struct team *team = thread->team;
thread_id parentID = -1;
bool deleteTeam = false;
sem_id cachedDeathSem = -1;
status_t status;
struct thread_debug_info debugInfo;
team_id teamID = team->id;
@ -1396,14 +1391,14 @@ thread_exit(void)
struct job_control_entry *death = NULL;
struct death_entry* threadDeathEntry = NULL;
ConditionVariableEntry waitForDebuggerEntry;
bool waitForDebugger = false;
bool deleteTeam = false;
port_id debuggerPort = -1;
if (team != team_get_kernel_team()) {
user_debug_thread_exiting(thread);
if (team->main_thread == thread) {
// this was the main thread in this team, so we will delete that as well
// The main thread is exiting. Shut down the whole team.
deleteTeam = true;
} else {
threadDeathEntry = (death_entry*)malloc(sizeof(death_entry));
@ -1414,6 +1409,10 @@ thread_exit(void)
// put the thread into the kernel team until it dies
state = disable_interrupts();
GRAB_TEAM_LOCK();
if (deleteTeam)
debuggerPort = team_shutdown_team(team, state);
GRAB_THREAD_LOCK();
// removing the thread and putting its death entry to the parent
// team needs to be an atomic operation
@ -1425,19 +1424,12 @@ thread_exit(void)
remove_thread_from_team(team, thread);
insert_thread_into_team(team_get_kernel_team(), thread);
cachedDeathSem = team->death_sem;
if (team->death_entry != NULL) {
if (--team->death_entry->remaining_threads == 0)
team->death_entry->condition.NotifyOne(true, B_OK);
}
if (deleteTeam) {
// If a debugger change is in progess for the team, we'll have to
// wait until it is done later.
GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info);
if (team->debug_info.debugger_changed_condition != NULL) {
team->debug_info.debugger_changed_condition->Add(
&waitForDebuggerEntry);
waitForDebugger = true;
}
RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
struct team *parent = team->parent;
// remember who our parent was so we can send a signal
@ -1508,17 +1500,11 @@ thread_exit(void)
// delete the team if we're its main thread
if (deleteTeam) {
// wait for a debugger change to be finished first
if (waitForDebugger)
waitForDebuggerEntry.Wait();
team_delete_team(team);
team_delete_team(team, debuggerPort);
// we need to delete any death entry that made it to here
if (death != NULL)
delete death;
cachedDeathSem = -1;
}
state = disable_interrupts();
@ -1602,7 +1588,7 @@ thread_exit(void)
user_debug_thread_deleted(teamID, thread->id);
// enqueue in the undertaker list and reschedule for the last time
UndertakerEntry undertakerEntry(thread, teamID, cachedDeathSem);
UndertakerEntry undertakerEntry(thread, teamID);
disable_interrupts();
GRAB_THREAD_LOCK();