* Introduced a set of functions (thread_prepare_to_block(),

thread_block(), thread_unblock(),...) that allow a thread to wait for
  something without needing a semaphore or condition variable. It can
  simply block and another thread can unblock it. Supports timeouts and
  interrupting. Both semaphores and condition variables use this
  common mechanism, now.
* Semaphores:
  - Some simplifications due to the thread blocking mechanism.
  - Changed locking order to sem -> thread. It was the other way around
    before and when introducing the wait_for_objects() support I had
    also introduced a situation where the locking was reverse, which
    could potentially cause a dead lock on SMP systems.
  - Instead of queueing thread structures, a semaphore queues
    queued_thread entries now, which are created on the stack. The
    thread::sem structure could thus be removed.
  - Added sem_entry::net_count, which is sem_entry::count plus the
    acquisition count of all waiting threads. This number is needed in
    remove_thread_from_sem() and instead of computing it there we
    maintain it.
  - Fixed remove_thread_from_sem(). It would not unblock threads, if
    the sem count was <= 0.
  - Made sem::last_acquirer unconditional. It is actually needed for
    sem_info::latest_holder. Fixed fill_sem_info() accordingly.
  - Added some optional tracing output, though only via ktrace_printf().
* Condition variables:
  - Could be simplified significantly through the use of the thread
    blocking mechanism. Removed a good deal of unnecessary code.
  - Moved the ConditionVariableEntry "flags" parameter from Wait() to
    Add(), and adjusted all places where condition variables are used
    accordingly.
* snooze() uses thread_block_with_timeout() instead of a semaphore.
* Simplified thread interrupting in the signal and user debugger code.
  Instead of separate functions for threads waiting on a semaphore or
  condititon variable, we only have a single thread_interrupt(), now.



git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@25099 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Ingo Weinhold 2008-04-22 16:22:42 +00:00
parent e01cebeb0a
commit b95f6d4710
12 changed files with 492 additions and 551 deletions

View File

@ -1,5 +1,5 @@
/*
* Copyright 2007, Ingo Weinhold, bonefish@cs.tu-berlin.de.
* Copyright 2007-2008, Ingo Weinhold, bonefish@cs.tu-berlin.de.
* Distributed under the terms of the MIT License.
*/
#ifndef _KERNEL_CONDITION_VARIABLE_H
@ -40,24 +40,16 @@ public:
inline PrivateConditionVariable* Variable() const
{ return fVariable; }
class Private;
protected:
bool Add(const void* object);
status_t Wait(uint32 flags);
bool Add(const void* object, uint32 flags);
status_t Wait();
status_t Wait(const void* object, uint32 flags);
private:
void _Remove();
protected:
PrivateConditionVariable* fVariable;
struct thread* fThread;
uint32 fFlags;
status_t fResult;
friend class PrivateConditionVariable;
friend class Private;
};
@ -103,8 +95,8 @@ public:
template<typename Type = void>
class ConditionVariableEntry : public PrivateConditionVariableEntry {
public:
inline bool Add(const Type* object);
inline status_t Wait(uint32 flags = 0);
inline bool Add(const Type* object, uint32 flags = 0);
inline status_t Wait();
inline status_t Wait(const Type* object, uint32 flags = 0);
};
@ -143,17 +135,17 @@ ConditionVariable<Type>::NotifyAll(bool threadsLocked)
template<typename Type>
inline bool
ConditionVariableEntry<Type>::Add(const Type* object)
ConditionVariableEntry<Type>::Add(const Type* object, uint32 flags)
{
return PrivateConditionVariableEntry::Add(object);
return PrivateConditionVariableEntry::Add(object, flags);
}
template<typename Type>
inline status_t
ConditionVariableEntry<Type>::Wait(uint32 flags)
ConditionVariableEntry<Type>::Wait()
{
return PrivateConditionVariableEntry::Wait(flags);
return PrivateConditionVariableEntry::Wait();
}
@ -170,8 +162,6 @@ extern "C" {
struct thread;
extern status_t condition_variable_interrupt_thread(struct thread* thread);
extern void condition_variable_init();
#ifdef __cplusplus

View File

@ -22,7 +22,6 @@ extern "C" {
extern status_t sem_init(struct kernel_args *args);
extern int sem_delete_owned_sems(team_id owner);
extern status_t sem_interrupt_thread(struct thread *t);
extern int32 sem_used_sems(void);
extern int32 sem_max_sems(void);

View File

@ -13,6 +13,11 @@
#include <thread_types.h>
#include <arch/thread.h>
// For the thread blocking inline functions only.
#include <kscheduler.h>
#include <ksignal.h>
struct kernel_args;
struct select_info;
@ -72,6 +77,12 @@ status_t deselect_thread(int32 object, struct select_info *info, bool kernel);
#define syscall_64_bit_return_value() arch_syscall_64_bit_return_value()
status_t thread_block();
status_t thread_block_with_timeout(uint32 timeoutFlags, bigtime_t timeout);
status_t thread_block_with_timeout_locked(uint32 timeoutFlags,
bigtime_t timeout);
bool thread_unblock(status_t threadID, status_t status);
// used in syscalls.c
status_t _user_set_thread_priority(thread_id thread, int32 newPriority);
status_t _user_rename_thread(thread_id thread, const char *name);
@ -100,4 +111,87 @@ int _user_setrlimit(int resource, const struct rlimit * rlp);
}
#endif
/*!
\a thread must be the current thread.
Thread lock can be, but doesn't need to be held.
*/
static inline bool
thread_is_interrupted(struct thread* thread, uint32 flags)
{
return ((flags & B_CAN_INTERRUPT)
&& (thread->sig_pending & ~thread->sig_block_mask) != 0)
|| ((flags & B_KILL_CAN_INTERRUPT)
&& (thread->sig_pending & KILL_SIGNALS));
}
static inline bool
thread_is_blocked(struct thread* thread)
{
return thread->wait.status == 1;
}
/*!
\a thread must be the current thread.
Thread lock can be, but doesn't need to be locked.
*/
static inline void
thread_prepare_to_block(struct thread* thread, uint32 flags, uint32 type,
void* object)
{
thread->wait.flags = flags;
thread->wait.type = type;
thread->wait.object = object;
atomic_set(&thread->wait.status, 1);
// Set status last to guarantee that the other fields are initialized
// when a thread is waiting.
}
static inline status_t
thread_block_locked(struct thread* thread)
{
if (thread->wait.status == 1) {
// check for signals, if interruptable
if (thread_is_interrupted(thread, thread->wait.flags)) {
thread->wait.status = B_INTERRUPTED;
} else {
thread->next_state = B_THREAD_WAITING;
scheduler_reschedule();
}
}
return thread->wait.status;
}
static inline bool
thread_unblock_locked(struct thread* thread, status_t status)
{
if (atomic_test_and_set(&thread->wait.status, status, 1) != 1)
return false;
// wake up the thread, if it is sleeping
if (thread->state == B_THREAD_WAITING)
scheduler_enqueue_in_run_queue(thread);
return true;
}
static inline status_t
thread_interrupt(struct thread* thread, bool kill)
{
if ((thread->wait.flags & B_CAN_INTERRUPT) != 0
|| (kill && (thread->wait.flags & B_KILL_CAN_INTERRUPT) != 0)) {
thread_unblock_locked(thread, B_INTERRUPTED);
return B_OK;
}
return B_NOT_ALLOWED;
}
#endif /* _THREAD_H */

View File

@ -55,6 +55,15 @@ typedef enum job_control_state {
#define THREAD_STOPPED 0x3
#define THREAD_CONTINUED 0x4
// The type of object a thread blocks on (thread::wait::type, set by
// thread_prepare_to_block()).
enum {
THREAD_BLOCK_TYPE_SEMAPHORE = 0,
THREAD_BLOCK_TYPE_CONDITION_VARIABLE = 1,
THREAD_BLOCK_TYPE_SNOOZE = 2,
THREAD_BLOCK_TYPE_USER_BASE = 10000
};
struct image;
// defined in image.c
struct select_info;
@ -233,12 +242,12 @@ struct thread {
bool was_yielded;
struct {
sem_id blocking;
int32 count;
int32 acquire_count;
status_t acquire_status;
int32 flags;
} sem;
status_t status; // current wait status
uint32 flags; // interrupable flags
uint32 type; // type of the object waited on
void* object; // pointer to the object waited on
timer unblock_timer; // timer for block with timeout
} wait;
struct PrivateConditionVariableEntry *condition_variable_entry;

View File

@ -408,14 +408,14 @@ RequestOwner::Wait(bool interruptable)
// add an entry to wait on
ConditionVariableEntry<> entry;
entry.Add(this);
entry.Add(this, interruptable ? B_CAN_INTERRUPT : 0);
locker.Unlock();
// wait
TRACE(("%p->RequestOwner::Wait(): waiting for condition...\n", this));
error = entry.Wait(interruptable ? B_CAN_INTERRUPT : 0);
error = entry.Wait();
TRACE(("%p->RequestOwner::Wait(): condition occurred: %lx\n", this,
error));

View File

@ -1,5 +1,5 @@
/*
* Copyright 2007, Ingo Weinhold, bonefish@cs.tu-berlin.de.
* Copyright 2007-2008, Ingo Weinhold, bonefish@cs.tu-berlin.de.
* Distributed under the terms of the MIT License.
*/
@ -92,30 +92,36 @@ dump_condition_variable(int argc, char** argv)
bool
PrivateConditionVariableEntry::Add(const void* object)
PrivateConditionVariableEntry::Add(const void* object, uint32 flags)
{
ASSERT(object != NULL);
fThread = thread_get_current_thread();
fFlags = 0;
fResult = B_OK;
InterruptsLocker _;
SpinLocker locker(sConditionVariablesLock);
// add to the queue for the variable
fVariable = sConditionVariableHash.Lookup(object);
if (fVariable)
fVariable->fEntries.Add(this);
else
fResult = B_ENTRY_NOT_FOUND;
return (fVariable != NULL);
struct thread* thread = thread_get_current_thread();
thread_prepare_to_block(thread, flags, THREAD_BLOCK_TYPE_CONDITION_VARIABLE,
fVariable);
if (fVariable == NULL) {
SpinLocker threadLocker(thread_spinlock);
thread_unblock_locked(thread, B_ENTRY_NOT_FOUND);
return false;
}
// add to the queue for the variable
fVariable->fEntries.Add(this);
return true;
}
status_t
PrivateConditionVariableEntry::Wait(uint32 flags)
PrivateConditionVariableEntry::Wait()
{
if (!are_interrupts_enabled()) {
panic("wait_for_condition_variable_entry() called with interrupts "
@ -124,77 +130,32 @@ PrivateConditionVariableEntry::Wait(uint32 flags)
}
InterruptsLocker _;
SpinLocker threadLocker(thread_spinlock);
SpinLocker locker(sConditionVariablesLock);
// check whether this entry has already been notified
// (set the flags while at it)
if (fVariable == NULL)
return fResult;
fFlags = flags;
// When interruptable, check pending signals first
struct thread* thread = thread_get_current_thread();
if (((flags & B_CAN_INTERRUPT)
&& (thread->sig_pending & ~thread->sig_block_mask) != 0)
|| ((flags & B_KILL_CAN_INTERRUPT)
&& (thread->sig_pending & KILL_SIGNALS))) {
// remove entry from the variables
_Remove();
return B_INTERRUPTED;
}
// wait
thread->next_state = B_THREAD_WAITING;
thread->condition_variable_entry = this;
thread->sem.blocking = -1;
locker.Unlock();
scheduler_reschedule();
status_t error = thread_block_locked(thread_get_current_thread());
threadLocker.Unlock();
return fResult;
SpinLocker locker(sConditionVariablesLock);
// remove entry from variable, if not done yet
if (fVariable != NULL) {
fVariable->fEntries.Remove(this);
fVariable = NULL;
}
return error;
}
status_t
PrivateConditionVariableEntry::Wait(const void* object, uint32 flags)
{
if (Add(object))
return Wait(flags);
if (Add(object, flags))
return Wait();
return B_ENTRY_NOT_FOUND;
}
/*! Removes the entry from its variable.
Interrupts must be disabled, sConditionVariablesLock must be held.
*/
void
PrivateConditionVariableEntry::_Remove()
{
if (fVariable) {
fVariable->fEntries.Remove(this);
fVariable = NULL;
}
}
class PrivateConditionVariableEntry::Private {
public:
inline Private(PrivateConditionVariableEntry& entry)
: fEntry(entry)
{
}
inline uint32 Flags() const { return fEntry.fFlags; }
inline void Remove() const { fEntry._Remove(); }
inline void SetResult(status_t result) { fEntry.fResult = result; }
private:
PrivateConditionVariableEntry& fEntry;
};
// #pragma mark - PrivateConditionVariable
@ -306,15 +267,7 @@ PrivateConditionVariable::_Notify(bool all, status_t result)
while (PrivateConditionVariableEntry* entry = fEntries.RemoveHead()) {
entry->fVariable = NULL;
struct thread* thread = entry->fThread;
if (thread->condition_variable_entry != NULL)
thread->condition_variable_entry->fResult = result;
// wake up the thread
thread->condition_variable_entry = NULL;
if (thread->state == B_THREAD_WAITING)
scheduler_enqueue_in_run_queue(thread);
thread_unblock_locked(entry->fThread, B_OK);
if (!all)
break;
@ -325,41 +278,6 @@ PrivateConditionVariable::_Notify(bool all, status_t result)
// #pragma mark -
/*! Interrupts must be disabled, thread lock must be held.
*/
status_t
condition_variable_interrupt_thread(struct thread* thread)
{
SpinLocker locker(sConditionVariablesLock);
if (thread == NULL || thread->state != B_THREAD_WAITING
|| thread->condition_variable_entry == NULL) {
return B_BAD_VALUE;
}
PrivateConditionVariableEntry* entry = thread->condition_variable_entry;
uint32 flags = PrivateConditionVariableEntry::Private(*entry).Flags();
// interruptable?
if ((flags & B_CAN_INTERRUPT) == 0
&& ((flags & B_KILL_CAN_INTERRUPT) == 0
|| (thread->sig_pending & KILL_SIGNALS) == 0)) {
return B_NOT_ALLOWED;
}
PrivateConditionVariableEntry::Private(*entry).SetResult(B_INTERRUPTED);
// remove entry from its variable
PrivateConditionVariableEntry::Private(*entry).Remove();
// wake up the thread
thread->condition_variable_entry = NULL;
scheduler_enqueue_in_run_queue(thread);
return B_OK;
}
void
condition_variable_init()
{
@ -383,4 +301,3 @@ condition_variable_init()
"\n"
"Lists all existing condition variables\n", 0);
}

View File

@ -2366,12 +2366,13 @@ _user_debug_thread(thread_id threadID)
scheduler_enqueue_in_run_queue(thread);
break;
case B_THREAD_WAITING:
// thread waiting: interrupt it
if (thread->sem.blocking >= 0)
sem_interrupt_thread(thread);
else if (thread->condition_variable_entry)
condition_variable_interrupt_thread(thread);
default:
// thread may be waiting: interrupt it
thread_interrupt(thread, false);
// TODO: If the thread is already in the kernel and e.g.
// about to acquire a semaphore (before
// thread_prepare_to_block()), we won't interrupt it.
// Maybe we should rather send a signal (SIGTRAP).
break;
}
}

View File

@ -359,13 +359,13 @@ Inode::WriteDataToBuffer(const void *_data, size_t *_length, bool nonBlocking)
return B_WOULD_BLOCK;
ConditionVariableEntry<> entry;
entry.Add(this);
entry.Add(this, B_CAN_INTERRUPT);
WriteRequest request(minToWrite);
fWriteRequests.Add(&request);
benaphore_unlock(&fRequestLock);
status_t status = entry.Wait(B_CAN_INTERRUPT);
status_t status = entry.Wait();
benaphore_lock(&fRequestLock);
fWriteRequests.Remove(&request);
@ -473,11 +473,11 @@ Inode::WaitForReadRequest(ReadRequest &request)
// add the entry to wait on
ConditionVariableEntry<> entry;
entry.Add(&request);
entry.Add(&request, B_CAN_INTERRUPT);
// wait
benaphore_unlock(&fRequestLock);
status_t status = entry.Wait(B_CAN_INTERRUPT);
status_t status = entry.Wait();
benaphore_lock(&fRequestLock);
// unpublish the condition variable

View File

@ -1,4 +1,5 @@
/*
* Copyright 2008, Ingo Weinhold, ingo_weinhold@gmx.de.
* Copyright 2002-2008, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
* Distributed under the terms of the MIT License.
*
@ -21,6 +22,8 @@
#include <debug.h>
#include <thread.h>
#include <team.h>
#include <util/AutoLock.h>
#include <util/DoublyLinkedList.h>
#include <vfs.h>
#include <vm_low_memory.h>
#include <vm_page.h>
@ -34,13 +37,36 @@
//#define TRACE_SEM
#ifdef TRACE_SEM
# define TRACE(x) dprintf x
# define TRACE(x) dprintf_no_syslog x
#else
# define TRACE(x) ;
#endif
//#define KTRACE_SEM
#ifdef KTRACE_SEM
# define KTRACE(x...) ktrace_printf(x)
#else
# define KTRACE(x...) do {} while (false)
#endif
#define DEBUG_LAST_ACQUIRER
struct queued_thread : DoublyLinkedListLinkImpl<queued_thread> {
queued_thread(struct thread *thread, int32 count)
:
thread(thread),
count(count),
queued(false)
{
}
struct thread *thread;
int32 count;
bool queued;
};
typedef DoublyLinkedList<queued_thread> ThreadQueue;
struct sem_entry {
sem_id id;
spinlock lock; // protects only the id field when unused
@ -48,12 +74,14 @@ struct sem_entry {
// when slot in use
struct {
int32 count;
struct thread_queue queue;
int32 net_count;
// count + acquisition count of all blocked
// threads
char *name;
team_id owner; // if set to -1, means owned by a port
select_info *select_infos;
#ifdef DEBUG_LAST_ACQUIRER
thread_id last_acquirer;
#ifdef DEBUG_LAST_ACQUIRER
int32 last_acquire_count;
thread_id last_releaser;
int32 last_release_count;
@ -66,6 +94,8 @@ struct sem_entry {
struct sem_entry *next;
} unused;
} u;
ThreadQueue queue; // should be in u.used, but has a constructor
};
static const int32 kMaxSemaphores = 131072;
@ -84,24 +114,13 @@ static spinlock sem_spinlock = 0;
#define GRAB_SEM_LOCK(s) acquire_spinlock(&(s).lock)
#define RELEASE_SEM_LOCK(s) release_spinlock(&(s).lock)
static int remove_thread_from_sem(struct thread *thread, struct sem_entry *sem,
struct thread_queue *queue, status_t acquireStatus, bool hasThreadLock);
struct sem_timeout_args {
thread_id blocked_thread;
sem_id blocked_sem_id;
int32 sem_count;
};
static int
dump_sem_list(int argc, char **argv)
{
const char *name = NULL;
team_id owner = -1;
#ifdef DEBUG_LAST_ACQUIRER
thread_id last = -1;
#endif
int32 i;
if (argc > 2) {
@ -109,38 +128,26 @@ dump_sem_list(int argc, char **argv)
owner = strtoul(argv[2], NULL, 0);
else if (!strcmp(argv[1], "name"))
name = argv[2];
#ifdef DEBUG_LAST_ACQUIRER
else if (!strcmp(argv[1], "last"))
last = strtoul(argv[2], NULL, 0);
#endif
} else if (argc > 1)
owner = strtoul(argv[1], NULL, 0);
kprintf("sem id count team"
#ifdef DEBUG_LAST_ACQUIRER
" last"
#endif
" name\n");
kprintf("sem id count team last name\n");
for (i = 0; i < sMaxSems; i++) {
struct sem_entry *sem = &sSems[i];
if (sem->id < 0
#ifdef DEBUG_LAST_ACQUIRER
|| (last != -1 && sem->u.used.last_acquirer != last)
#endif
|| (name != NULL && strstr(sem->u.used.name, name) == NULL)
|| (owner != -1 && sem->u.used.owner != owner))
continue;
kprintf("%p %6ld %5ld %6ld "
#ifdef DEBUG_LAST_ACQUIRER
"%6ld "
#endif
" %s\n", sem, sem->id, sem->u.used.count,
sem->u.used.owner,
#ifdef DEBUG_LAST_ACQUIRER
sem->u.used.last_acquirer > 0 ? sem->u.used.last_acquirer : 0,
#endif
sem->u.used.name);
}
@ -158,12 +165,10 @@ dump_sem(struct sem_entry *sem)
kprintf("owner: %ld\n", sem->u.used.owner);
kprintf("count: %ld\n", sem->u.used.count);
kprintf("queue: ");
if (sem->u.used.queue.head != NULL) {
struct thread *thread = sem->u.used.queue.head;
while (thread != NULL) {
kprintf(" %ld", thread->id);
thread = thread->queue_next;
}
if (!sem->queue.IsEmpty()) {
ThreadQueue::Iterator it = sem->queue.GetIterator();
while (queued_thread* entry = it.Next())
kprintf(" %ld", entry->thread->id);
kprintf("\n");
} else
kprintf(" -\n");
@ -178,16 +183,19 @@ dump_sem(struct sem_entry *sem)
kprintf("last released by: %ld, count: %ld\n", sem->u.used.last_releaser,
sem->u.used.last_release_count);
if (sem->u.used.last_releaser != 0)
set_debug_variable("_releaser", sem->u.used.last_releaser);
else
unset_debug_variable("_releaser");
#else
kprintf("last acquired by: %ld\n", sem->u.used.last_acquirer);
#endif
if (sem->u.used.last_acquirer != 0)
set_debug_variable("_acquirer", sem->u.used.last_acquirer);
else
unset_debug_variable("_acquirer");
if (sem->u.used.last_releaser != 0)
set_debug_variable("_releaser", sem->u.used.last_releaser);
else
unset_debug_variable("_releaser");
#endif
} else {
kprintf("next: %p\n", sem->u.unused.next);
kprintf("next_id: %ld\n", sem->u.unused.next_id);
@ -238,13 +246,6 @@ dump_sem_info(int argc, char **argv)
}
static inline void
clear_thread_queue(struct thread_queue *queue)
{
queue->head = queue->tail = NULL;
}
/*! \brief Appends a semaphore slot to the free list.
The semaphore list must be locked.
@ -281,56 +282,6 @@ notify_sem_select_events(struct sem_entry* sem, uint16 events)
}
/*! Called from a timer handler. Wakes up a semaphore */
static int32
sem_timeout(timer *data)
{
struct sem_timeout_args *args = (struct sem_timeout_args *)data->user_data;
struct thread *thread;
int slot;
int state;
struct thread_queue wakeupQueue;
thread = thread_get_thread_struct(args->blocked_thread);
if (thread == NULL)
return B_HANDLED_INTERRUPT;
slot = args->blocked_sem_id % sMaxSems;
state = disable_interrupts();
GRAB_SEM_LOCK(sSems[slot]);
TRACE(("sem_timeout: called on %p sem %ld, thread %ld\n",
data, args->blocked_sem_id, args->blocked_thread));
if (sSems[slot].id != args->blocked_sem_id) {
// this thread was not waiting on this semaphore
panic("sem_timeout: thread %ld was trying to wait on sem %ld which "
"doesn't exist!\n", args->blocked_thread, args->blocked_sem_id);
RELEASE_SEM_LOCK(sSems[slot]);
restore_interrupts(state);
return B_HANDLED_INTERRUPT;
}
clear_thread_queue(&wakeupQueue);
remove_thread_from_sem(thread, &sSems[slot], &wakeupQueue, B_TIMED_OUT,
false);
RELEASE_SEM_LOCK(sSems[slot]);
GRAB_THREAD_LOCK();
// put the threads in the run q here to make sure we dont deadlock in sem_interrupt_thread
while ((thread = thread_dequeue(&wakeupQueue)) != NULL)
scheduler_enqueue_in_run_queue(thread);
RELEASE_THREAD_LOCK();
restore_interrupts(state);
return B_INVOKE_SCHEDULER;
}
/*! Fills the thread_info structure with information from the specified
thread.
The thread lock must be held when called.
@ -342,13 +293,7 @@ fill_sem_info(struct sem_entry *sem, sem_info *info, size_t size)
info->team = sem->u.used.owner;
strlcpy(info->name, sem->u.used.name, sizeof(info->name));
info->count = sem->u.used.count;
// ToDo: not sure if this is the latest holder, or the next
// holder...
if (sem->u.used.queue.head != NULL)
info->latest_holder = sem->u.used.queue.head->id;
else
info->latest_holder = -1;
info->latest_holder = sem->u.used.last_acquirer;
}
@ -389,17 +334,12 @@ sem_init(kernel_args *args)
add_debugger_command_etc("sems", &dump_sem_list,
"Dump a list of all active semaphores (for team, with name, etc.)",
"[ ([ \"team\" | \"owner\" ] <team>) | (\"name\" <name>) ]"
#ifdef DEBUG_LAST_ACQUIRER
" | (\"last\" <last acquirer>)"
#endif
"\n"
" | (\"last\" <last acquirer>)\n"
"Prints a list of all active semaphores meeting the given\n"
"requirement. If no argument is given, all sems are listed.\n"
" <team> - The team owning the semaphores.\n"
" <name> - Part of the name of the semaphores.\n"
#ifdef DEBUG_LAST_ACQUIRER
" <last acquirer> - The thread that last acquired the semaphore.\n"
#endif
, 0);
add_debugger_command_etc("sem", &dump_sem_info,
"Dump info about a particular semaphore",
@ -478,7 +418,8 @@ create_sem_etc(int32 count, const char *name, team_id owner)
GRAB_SEM_LOCK(*sem);
sem->id = sem->u.unused.next_id;
sem->u.used.count = count;
clear_thread_queue(&sem->u.used.queue);
sem->u.used.net_count = count;
new(&sem->queue) ThreadQueue;
sem->u.used.name = tempName;
sem->u.used.owner = owner;
sem->u.used.select_infos = NULL;
@ -486,6 +427,9 @@ create_sem_etc(int32 count, const char *name, team_id owner)
RELEASE_SEM_LOCK(*sem);
atomic_add(&sUsedSems, 1);
KTRACE("create_sem_etc(count: %ld, name: %s, owner: %ld) -> %ld",
count, name, owner, id);
}
RELEASE_SEM_LIST_LOCK();
@ -572,95 +516,52 @@ deselect_sem(int32 id, struct select_info* info, bool kernel)
}
/*! Wake up a thread that's blocked on a semaphore
this function must be entered with interrupts disabled and THREADLOCK held
*/
status_t
sem_interrupt_thread(struct thread *thread)
{
struct thread_queue wakeupQueue;
int32 slot;
TRACE(("sem_interrupt_thread: called on thread %p (%ld), blocked on sem %ld\n",
thread, thread->id, thread->sem.blocking));
if (thread->state != B_THREAD_WAITING || thread->sem.blocking < 0)
return B_BAD_VALUE;
if ((thread->sem.flags & B_CAN_INTERRUPT) == 0
&& ((thread->sem.flags & B_KILL_CAN_INTERRUPT) == 0
|| (thread->sig_pending & KILL_SIGNALS) == 0)) {
return B_NOT_ALLOWED;
}
slot = thread->sem.blocking % sMaxSems;
GRAB_SEM_LOCK(sSems[slot]);
if (sSems[slot].id != thread->sem.blocking) {
panic("sem_interrupt_thread: thread %ld blocks on sem %ld, but that "
"sem doesn't exist!\n", thread->id, thread->sem.blocking);
}
clear_thread_queue(&wakeupQueue);
status_t result = remove_thread_from_sem(thread, &sSems[slot],
&wakeupQueue, B_INTERRUPTED, true);
RELEASE_SEM_LOCK(sSems[slot]);
if (result != B_OK) {
// The thread is not in the wait queue anymore. Probably it just timed
// out before we locked the sem.
return result;
}
while ((thread = thread_dequeue(&wakeupQueue)) != NULL)
scheduler_enqueue_in_run_queue(thread);
return B_OK;
}
/*! Forcibly removes a thread from a semaphores wait queue. May have to wake up
other threads in the process. All threads that need to be woken up are added
to the passed in thread_queue.
Must be called with semaphore lock held.
other threads in the process.
Must be called with semaphore lock held. The thread lock must not be held.
*/
static int
remove_thread_from_sem(struct thread *thread, struct sem_entry *sem,
struct thread_queue *queue, status_t acquireStatus, bool hasThreadLock)
static void
remove_thread_from_sem(queued_thread *entry, struct sem_entry *sem)
{
// remove the thread from the queue and place it in the supplied queue
if (thread_dequeue_id(&sem->u.used.queue, thread->id) != thread)
return B_ENTRY_NOT_FOUND;
if (!entry->queued)
return;
sem->u.used.count += thread->sem.acquire_count;
thread->sem.acquire_status = acquireStatus;
thread_enqueue(thread, queue);
sem->queue.Remove(entry);
entry->queued = false;
sem->u.used.count += entry->count;
// now see if more threads need to be woken up
while (sem->u.used.count > 0
&& thread_lookat_queue(&sem->u.used.queue) != NULL) {
int32 delta = min_c(thread->sem.count, sem->u.used.count);
// We're done with this entry. We only have to check, if other threads
// need unblocking, too.
thread->sem.count -= delta;
if (thread->sem.count <= 0) {
thread = thread_dequeue(&sem->u.used.queue);
thread_enqueue(thread, queue);
// Now see if more threads need to be woken up. We get the thread lock for
// that time, so the blocking state of threads won't change. We need that
// lock anyway when unblocking a thread.
GRAB_THREAD_LOCK();
while ((entry = sem->queue.Head()) != NULL) {
if (thread_is_blocked(entry->thread)) {
// The thread is still waiting. If its count is satisfied, unblock
// it. Otherwise we can't unblock any other thread.
if (entry->count > sem->u.used.net_count)
break;
thread_unblock_locked(entry->thread, B_OK);
sem->u.used.net_count -= entry->count;
} else {
// The thread is no longer waiting, but still queued, which means
// acquiration failed and we can just remove it.
sem->u.used.count += entry->count;
}
sem->u.used.count -= delta;
sem->queue.Remove(entry);
entry->queued = false;
}
if (sem->u.used.count > 0 && sem->u.used.select_infos != NULL) {
if (hasThreadLock)
RELEASE_THREAD_LOCK();
RELEASE_THREAD_LOCK();
// select notification, if the semaphore is now acquirable
if (sem->u.used.count > 0)
notify_sem_select_events(sem, B_EVENT_ACQUIRE_SEMAPHORE);
if (hasThreadLock)
GRAB_THREAD_LOCK();
}
return B_OK;
}
@ -732,21 +633,14 @@ create_sem(int32 count, const char *name)
status_t
delete_sem(sem_id id)
{
struct thread_queue releaseQueue;
int32 releasedThreads;
struct thread *thread;
cpu_status state;
int32 slot;
char *name;
if (sSemsActive == false)
return B_NO_MORE_SEMS;
if (id < 0)
return B_BAD_SEM_ID;
slot = id % sMaxSems;
int32 slot = id % sMaxSems;
state = disable_interrupts();
cpu_status state = disable_interrupts();
GRAB_SEM_LOCK(sSems[slot]);
if (sSems[slot].id != id) {
@ -756,22 +650,21 @@ delete_sem(sem_id id)
return B_BAD_SEM_ID;
}
KTRACE("delete_sem(sem: %ld)", id);
notify_sem_select_events(&sSems[slot], B_EVENT_INVALID);
sSems[slot].u.used.select_infos = NULL;
releasedThreads = 0;
clear_thread_queue(&releaseQueue);
// free any threads waiting for this semaphore
while ((thread = thread_dequeue(&sSems[slot].u.used.queue)) != NULL) {
thread->sem.acquire_status = B_BAD_SEM_ID;
thread->sem.count = 0;
thread_enqueue(thread, &releaseQueue);
releasedThreads++;
GRAB_THREAD_LOCK();
while (queued_thread* entry = sSems[slot].queue.RemoveHead()) {
entry->queued = false;
thread_unblock_locked(entry->thread, B_BAD_SEM_ID);
}
RELEASE_THREAD_LOCK();
sSems[slot].id = -1;
name = sSems[slot].u.used.name;
char *name = sSems[slot].u.used.name;
sSems[slot].u.used.name = NULL;
RELEASE_SEM_LOCK(sSems[slot]);
@ -782,15 +675,6 @@ delete_sem(sem_id id)
atomic_add(&sUsedSems, -1);
RELEASE_SEM_LIST_LOCK();
if (releasedThreads > 0) {
GRAB_THREAD_LOCK();
while ((thread = thread_dequeue(&releaseQueue)) != NULL)
scheduler_enqueue_in_run_queue(thread);
scheduler_reschedule();
RELEASE_THREAD_LOCK();
}
restore_interrupts(state);
free(name);
@ -876,21 +760,20 @@ switch_sem_etc(sem_id semToBeReleased, sem_id id, int32 count,
}
}
KTRACE("switch_sem_etc(semToBeReleased: %ld, sem: %ld, count: %ld, "
"flags: 0x%lx, timeout: %lld)", semToBeReleased, id, count, flags,
timeout);
if ((sSems[slot].u.used.count -= count) < 0) {
// we need to block
struct thread *thread = thread_get_current_thread();
timer timeoutTimer;
// stick it on the stack, since we may be blocking here
struct sem_timeout_args args;
TRACE(("switch_sem_etc(id = %ld): block name = %s, thread = %p,"
" name = %s\n", id, sSems[slot].u.used.name, thread, thread->name));
// do a quick check to see if the thread has any pending signals
// this should catch most of the cases where the thread had a signal
if (((flags & B_CAN_INTERRUPT) && (thread->sig_pending & ~thread->sig_block_mask) != 0)
|| ((flags & B_KILL_CAN_INTERRUPT)
&& (thread->sig_pending & KILL_SIGNALS))) {
if (thread_is_interrupted(thread, flags)) {
sSems[slot].u.used.count += count;
status = B_INTERRUPTED;
// the other semaphore will be released later
@ -900,90 +783,55 @@ switch_sem_etc(sem_id semToBeReleased, sem_id id, int32 count,
if ((flags & (B_RELATIVE_TIMEOUT | B_ABSOLUTE_TIMEOUT)) == 0)
timeout = B_INFINITE_TIMEOUT;
thread->next_state = B_THREAD_WAITING;
thread->sem.flags = flags;
thread->sem.blocking = id;
thread->sem.acquire_count = count;
thread->sem.count = min_c(-sSems[slot].u.used.count, count);
// store the count we need to restore upon release
thread->sem.acquire_status = B_NO_ERROR;
thread_enqueue(thread, &sSems[slot].u.used.queue);
// enqueue in the semaphore queue and get ready to wait
queued_thread queueEntry(thread, count);
sSems[slot].queue.Add(&queueEntry);
queueEntry.queued = true;
if (timeout != B_INFINITE_TIMEOUT) {
TRACE(("switch_sem_etc: setting timeout sem for %Ld usecs, sem %ld, thread %ld\n",
timeout, id, thread->id));
// set up an event to go off with the thread struct as the data
args.blocked_sem_id = id;
args.blocked_thread = thread->id;
args.sem_count = count;
timeoutTimer.user_data = &args;
add_timer(&timeoutTimer, &sem_timeout, timeout,
flags & B_RELATIVE_TIMEOUT ?
B_ONE_SHOT_RELATIVE_TIMER : B_ONE_SHOT_ABSOLUTE_TIMER);
}
thread_prepare_to_block(thread, flags, THREAD_BLOCK_TYPE_SEMAPHORE,
(void*)(addr_t)id);
RELEASE_SEM_LOCK(sSems[slot]);
if (semToBeReleased >= B_OK) {
// release the other semaphore, if any
if (semToBeReleased >= 0) {
release_sem_etc(semToBeReleased, 1, B_DO_NOT_RESCHEDULE);
semToBeReleased = -1;
}
GRAB_THREAD_LOCK();
// check again to see if a signal is pending.
// it may have been delivered while setting up the sem, though it's pretty unlikely
if (((flags & B_CAN_INTERRUPT)
&& (thread->sig_pending & ~thread->sig_block_mask) != 0)
|| ((flags & B_KILL_CAN_INTERRUPT)
&& (thread->sig_pending & KILL_SIGNALS))) {
struct thread_queue wakeupQueue;
// ok, so a tiny race happened where a signal was delivered to this thread while
// it was setting up the sem. We can only be sure a signal wasn't delivered
// here, since the threadlock is held. The previous check would have found most
// instances, but there was a race, so we have to handle it. It'll be more messy...
clear_thread_queue(&wakeupQueue);
GRAB_SEM_LOCK(sSems[slot]);
if (sSems[slot].id == id) {
remove_thread_from_sem(thread, &sSems[slot], &wakeupQueue,
B_INTERRUPTED, true);
}
RELEASE_SEM_LOCK(sSems[slot]);
struct thread *wakeupThread;
while ((wakeupThread = thread_dequeue(&wakeupQueue)) != NULL)
scheduler_enqueue_in_run_queue(wakeupThread);
status_t acquireStatus = timeout == B_INFINITE_TIMEOUT
? thread_block_locked(thread)
: thread_block_with_timeout_locked(flags, timeout);
// fall through and reschedule since another thread with a higher priority may have been woken up
}
scheduler_reschedule();
RELEASE_THREAD_LOCK();
GRAB_SEM_LOCK(sSems[slot]);
if (timeout != B_INFINITE_TIMEOUT) {
if (thread->sem.acquire_status != B_TIMED_OUT) {
// cancel the timer event, the sem may have been deleted or interrupted
// with the timer still active
cancel_timer(&timeoutTimer);
}
}
// If we're still queued, this means the acquiration failed, and we
// need to remove our entry and (potentially) wake up other threads.
if (queueEntry.queued)
remove_thread_from_sem(&queueEntry, &sSems[slot]);
#ifdef DEBUG_LAST_ACQUIRER
if (thread->sem.acquire_status >= B_OK) {
if (acquireStatus >= B_OK) {
sSems[slot].u.used.last_acquirer = thread_get_current_thread_id();
#ifdef DEBUG_LAST_ACQUIRER
sSems[slot].u.used.last_acquire_count = count;
}
#endif
}
RELEASE_SEM_LOCK(sSems[slot]);
restore_interrupts(state);
TRACE(("switch_sem_etc(sem %ld): exit block name %s, "
"thread %ld (%s)\n", id, sSems[slot].u.used.name, thread->id,
thread->name));
return thread->sem.acquire_status;
KTRACE("switch_sem_etc() done: 0x%lx", acquireStatus);
return acquireStatus;
} else {
#ifdef DEBUG_LAST_ACQUIRER
sSems[slot].u.used.net_count -= count;
sSems[slot].u.used.last_acquirer = thread_get_current_thread_id();
#ifdef DEBUG_LAST_ACQUIRER
sSems[slot].u.used.last_acquire_count = count;
#endif
}
@ -1004,6 +852,8 @@ err:
_user_debugger("Thread tried to acquire kernel semaphore.");
#endif
KTRACE("switch_sem_etc() done: 0x%lx", status);
return status;
}
@ -1018,10 +868,7 @@ release_sem(sem_id id)
status_t
release_sem_etc(sem_id id, int32 count, uint32 flags)
{
struct thread_queue releaseQueue;
int32 slot = id % sMaxSems;
cpu_status state;
status_t status = B_OK;
if (kernel_startup)
return B_OK;
@ -1032,13 +879,12 @@ release_sem_etc(sem_id id, int32 count, uint32 flags)
if (count <= 0 && (flags & B_RELEASE_ALL) == 0)
return B_BAD_VALUE;
state = disable_interrupts();
GRAB_SEM_LOCK(sSems[slot]);
InterruptsLocker _;
SpinLocker semLocker(sSems[slot].lock);
if (sSems[slot].id != id) {
TRACE(("sem_release_etc: invalid sem_id %ld\n", id));
status = B_BAD_SEM_ID;
goto err;
return B_BAD_SEM_ID;
}
// ToDo: the B_CHECK_PERMISSION flag should be made private, as it
@ -1047,81 +893,80 @@ release_sem_etc(sem_id id, int32 count, uint32 flags)
&& sSems[slot].u.used.owner == team_get_kernel_team_id()) {
dprintf("thread %ld tried to release kernel semaphore.\n",
thread_get_current_thread_id());
status = B_NOT_ALLOWED;
goto err;
return B_NOT_ALLOWED;
}
#ifdef DEBUG_LAST_ACQUIRER
KTRACE("release_sem_etc(sem: %ld, count: %ld, flags: 0x%lx)", id, count,
flags);
sSems[slot].u.used.last_acquirer = -sSems[slot].u.used.last_acquirer;
#ifdef DEBUG_LAST_ACQUIRER
sSems[slot].u.used.last_releaser = thread_get_current_thread_id();
sSems[slot].u.used.last_release_count = count;
#endif
// clear out a queue we will use to hold all of the threads that we will have to
// put back into the run list. This is done so the thread lock wont be held
// while this sems lock is held since the two locks are grabbed in the other
// order in sem_interrupt_thread.
clear_thread_queue(&releaseQueue);
if (flags & B_RELEASE_ALL) {
count = -sSems[slot].u.used.count;
count = sSems[slot].u.used.net_count - sSems[slot].u.used.count;
// is there anything to do for us at all?
if (count == 0)
goto err;
return B_OK;
// Don't release more than necessary -- there might be interrupted/
// timed out threads in the queue.
flags |= B_RELEASE_IF_WAITING_ONLY;
}
SpinLocker threadLocker(thread_spinlock);
while (count > 0) {
int delta = count;
if (sSems[slot].u.used.count < 0) {
struct thread *thread = thread_lookat_queue(&sSems[slot].u.used.queue);
delta = min_c(count, thread->sem.count);
thread->sem.count -= delta;
if (thread->sem.count <= 0) {
// release this thread
thread = thread_dequeue(&sSems[slot].u.used.queue);
thread_enqueue(thread, &releaseQueue);
thread->sem.count = 0;
queued_thread* entry = sSems[slot].queue.Head();
if (entry == NULL) {
if ((flags & B_RELEASE_IF_WAITING_ONLY) == 0) {
sSems[slot].u.used.count += count;
sSems[slot].u.used.net_count += count;
}
} else if (flags & B_RELEASE_IF_WAITING_ONLY)
break;
}
sSems[slot].u.used.count += delta;
count -= delta;
if (thread_is_blocked(entry->thread)) {
// The thread is still waiting. If its count is satisfied,
// unblock it. Otherwise we can't unblock any other thread.
if (entry->count > sSems[slot].u.used.net_count + count) {
sSems[slot].u.used.count += count;
sSems[slot].u.used.net_count += count;
break;
}
thread_unblock_locked(entry->thread, B_OK);
int delta = min_c(count, entry->count);
sSems[slot].u.used.count += delta;
sSems[slot].u.used.net_count += delta - entry->count;
count -= delta;
} else {
// The thread is no longer waiting, but still queued, which
// means acquiration failed and we can just remove it.
sSems[slot].u.used.count += entry->count;
}
sSems[slot].queue.Remove(entry);
entry->queued = false;
}
threadLocker.Unlock();
if (sSems[slot].u.used.count > 0)
notify_sem_select_events(&sSems[slot], B_EVENT_ACQUIRE_SEMAPHORE);
RELEASE_SEM_LOCK(sSems[slot]);
// pull off any items in the release queue and put them in the run queue
if (releaseQueue.head != NULL) {
struct thread *thread;
GRAB_THREAD_LOCK();
while ((thread = thread_dequeue(&releaseQueue)) != NULL) {
#if 0
// temporarily place thread in a run queue with a higher priority to boost it up
thread->next_priority = thread->priority >= B_FIRST_REAL_TIME_PRIORITY ?
thread->priority : thread->priority + 1;
#endif
scheduler_enqueue_in_run_queue(thread);
}
if ((flags & B_DO_NOT_RESCHEDULE) == 0)
scheduler_reschedule();
RELEASE_THREAD_LOCK();
// reschedule, if we've not explicitly been told not to
if ((flags & B_DO_NOT_RESCHEDULE) == 0) {
semLocker.Unlock();
threadLocker.Lock();
scheduler_reschedule();
}
goto outnolock;
err:
RELEASE_SEM_LOCK(sSems[slot]);
outnolock:
restore_interrupts(state);
return status;
return B_OK;
}

View File

@ -459,20 +459,6 @@ is_signal_blocked(int signal)
}
/*! Tries to interrupt a thread waiting for a semaphore or a condition variable.
Interrupts must be disabled, the thread lock be held.
*/
static status_t
signal_interrupt_thread(struct thread* thread)
{
if (thread->sem.blocking >= 0)
return sem_interrupt_thread(thread);
else if (thread->condition_variable_entry)
return condition_variable_interrupt_thread(thread);
return B_BAD_VALUE;
}
/*! Delivers the \a signal to the \a thread, but doesn't handle the signal -
it just makes sure the thread gets the signal, ie. unblocks it if needed.
This function must be called with interrupts disabled and the
@ -505,19 +491,13 @@ deliver_signal(struct thread *thread, uint signal, uint32 flags)
mainThread->sig_pending |= SIGNAL_TO_MASK(SIGKILLTHR);
// Wake up main thread
if (mainThread->state == B_THREAD_SUSPENDED)
scheduler_enqueue_in_run_queue(mainThread);
else if (mainThread->state == B_THREAD_WAITING)
signal_interrupt_thread(mainThread);
thread_interrupt(mainThread, true);
// Supposed to fall through
}
case SIGKILLTHR:
// Wake up suspended threads and interrupt waiting ones
if (thread->state == B_THREAD_SUSPENDED)
scheduler_enqueue_in_run_queue(thread);
else if (thread->state == B_THREAD_WAITING)
signal_interrupt_thread(thread);
thread_interrupt(thread, true);
break;
case SIGCONT:
@ -536,8 +516,7 @@ deliver_signal(struct thread *thread, uint signal, uint32 flags)
if (thread->sig_pending
& (~thread->sig_block_mask | SIGNAL_TO_MASK(SIGCHLD))) {
// Interrupt thread if it was waiting
if (thread->state == B_THREAD_WAITING)
signal_interrupt_thread(thread);
thread_interrupt(thread, false);
}
break;
}

View File

@ -1745,7 +1745,7 @@ wait_for_child(pid_t child, uint32 flags, int32 *_reason,
ConditionVariableEntry<team_dead_children> deadWaitEntry;
if (status == B_WOULD_BLOCK && (flags & WNOHANG) == 0)
deadWaitEntry.Add(team->dead_children);
deadWaitEntry.Add(team->dead_children, B_CAN_INTERRUPT);
locker.Unlock();
@ -1766,7 +1766,7 @@ wait_for_child(pid_t child, uint32 flags, int32 *_reason,
return status;
}
status = deadWaitEntry.Wait(B_CAN_INTERRUPT);
status = deadWaitEntry.Wait();
if (status == B_INTERRUPTED) {
T(WaitForChildDone(status));
return status;

View File

@ -78,8 +78,6 @@ static thread_id sNextThreadID = 1;
static int32 sMaxThreads = 4096;
static int32 sUsedThreads = 0;
static sem_id sSnoozeSem = -1;
// death stacks - used temporarily as a thread cleans itself up
struct death_stack {
area_id area;
@ -226,8 +224,6 @@ create_thread_struct(struct thread *inthread, const char *name,
thread->id = threadID >= 0 ? threadID : allocate_thread_id();
thread->team = NULL;
thread->cpu = cpu;
thread->sem.blocking = -1;
thread->condition_variable_entry = NULL;
thread->fault_handler = 0;
thread->page_faults_allowed = 1;
thread->kernel_stack_area = -1;
@ -659,6 +655,28 @@ thread_exit2(void *_args)
}
static sem_id
get_thread_wait_sem(struct thread* thread)
{
if (thread->state == B_THREAD_WAITING
&& thread->wait.type == THREAD_BLOCK_TYPE_SEMAPHORE) {
return (sem_id)(addr_t)thread->wait.object;
}
return -1;
}
static PrivateConditionVariable*
get_thread_wait_cvar(struct thread* thread)
{
if (thread->state == B_THREAD_WAITING
&& thread->wait.type == THREAD_BLOCK_TYPE_CONDITION_VARIABLE) {
return (PrivateConditionVariable*)thread->wait.object;
}
return NULL;
}
/*!
Fills the thread_info structure with information from the specified
thread.
@ -673,25 +691,37 @@ fill_thread_info(struct thread *thread, thread_info *info, size_t size)
strlcpy(info->name, thread->name, B_OS_NAME_LENGTH);
if (thread->state == B_THREAD_WAITING) {
if (thread->sem.blocking == sSnoozeSem)
info->state = B_THREAD_ASLEEP;
else if (thread->sem.blocking == thread->msg.read_sem)
info->state = B_THREAD_RECEIVING;
else
info->state = B_THREAD_WAITING;
info->state = B_THREAD_WAITING;
switch (thread->wait.type) {
case THREAD_BLOCK_TYPE_SNOOZE:
info->state = B_THREAD_ASLEEP;
break;
case THREAD_BLOCK_TYPE_SEMAPHORE:
{
sem_id sem = (sem_id)(addr_t)thread->wait.object;
if (sem == thread->msg.read_sem)
info->state = B_THREAD_RECEIVING;
break;
}
case THREAD_BLOCK_TYPE_CONDITION_VARIABLE:
default:
break;
}
} else
info->state = (thread_state)thread->state;
info->priority = thread->priority;
info->sem = thread->sem.blocking;
info->user_time = thread->user_time;
info->kernel_time = thread->kernel_time;
info->stack_base = (void *)thread->user_stack_base;
info->stack_end = (void *)(thread->user_stack_base
+ thread->user_stack_size);
info->sem = get_thread_wait_sem(thread);
}
static status_t
send_data_etc(thread_id id, int32 code, const void *buffer,
size_t bufferSize, int32 flags)
@ -989,12 +1019,26 @@ state_to_text(struct thread *thread, int32 state)
return "running";
case B_THREAD_WAITING:
if (thread->sem.blocking == sSnoozeSem)
return "zzz";
if (thread->sem.blocking == thread->msg.read_sem)
return "receive";
{
switch (thread->wait.type) {
case THREAD_BLOCK_TYPE_SNOOZE:
return "zzz";
case THREAD_BLOCK_TYPE_SEMAPHORE:
{
sem_id sem = (sem_id)(addr_t)thread->wait.object;
if (sem == thread->msg.read_sem)
return "receive";
break;
}
case THREAD_BLOCK_TYPE_CONDITION_VARIABLE:
default:
break;
}
return "waiting";
}
case B_THREAD_SUSPENDED:
return "suspended";
@ -1029,14 +1073,8 @@ _dump_thread_info(struct thread *thread)
kprintf("sig_pending: %#lx (blocked: %#lx)\n", thread->sig_pending,
thread->sig_block_mask);
kprintf("in_kernel: %d\n", thread->in_kernel);
kprintf(" sem.blocking: %ld\n", thread->sem.blocking);
kprintf(" sem.count: %ld\n", thread->sem.count);
kprintf(" sem.acquire_status: %#lx\n", thread->sem.acquire_status);
kprintf(" sem.flags: %#lx\n", thread->sem.flags);
PrivateConditionVariableEntry* entry = thread->condition_variable_entry;
kprintf("condition variable: %p\n", entry ? entry->Variable() : NULL);
kprintf("sem blocking: %ld\n", get_thread_wait_sem(thread));
kprintf("condition variable: %p\n", get_thread_wait_cvar(thread));
kprintf("fault_handler: %p\n", (void *)thread->fault_handler);
kprintf("args: %p %p\n", thread->args1, thread->args2);
kprintf("entry: %p\n", (void *)thread->entry);
@ -1165,7 +1203,7 @@ dump_thread_list(int argc, char **argv)
if ((requiredState && thread->state != requiredState)
|| (calling && !arch_debug_contains_call(thread, callSymbol,
callStart, callEnd))
|| (sem > 0 && thread->sem.blocking != sem)
|| (sem > 0 && get_thread_wait_sem(thread) != sem)
|| (team > 0 && thread->team->id != team)
|| (realTimeOnly && thread->priority < B_REAL_TIME_DISPLAY_PRIORITY))
continue;
@ -1175,10 +1213,10 @@ dump_thread_list(int argc, char **argv)
// does it block on a semaphore or a condition variable?
if (thread->state == B_THREAD_WAITING) {
if (thread->condition_variable_entry)
kprintf("%p ", thread->condition_variable_entry->Variable());
if (get_thread_wait_cvar(thread))
kprintf("%p ", get_thread_wait_cvar(thread));
else
kprintf("%10ld ", thread->sem.blocking);
kprintf("%10ld ", get_thread_wait_sem(thread));
} else
kprintf(" - ");
@ -1881,13 +1919,6 @@ thread_init(kernel_args *args)
// zero out the dead thread structure q
memset(&dead_q, 0, sizeof(dead_q));
// allocate snooze sem
sSnoozeSem = create_sem(0, "snooze sem");
if (sSnoozeSem < 0) {
panic("error creating snooze sem\n");
return sSnoozeSem;
}
if (arch_thread_init(args) < B_OK)
panic("arch_thread_init() failed!\n");
@ -2039,6 +2070,77 @@ thread_preboot_init_percpu(struct kernel_args *args, int32 cpuNum)
return B_OK;
}
// #pragma mark - thread blocking API
static status_t
thread_block_timeout(timer* timer)
{
// The timer has been installed with B_TIMER_ACQUIRE_THREAD_LOCK, so
// we're holding the thread lock already. This makes things comfortably
// easy.
struct thread* thread = (struct thread*)timer->user_data;
if (thread_unblock_locked(thread, B_TIMED_OUT))
return B_INVOKE_SCHEDULER;
return B_HANDLED_INTERRUPT;
}
status_t
thread_block()
{
InterruptsSpinLocker _(thread_spinlock);
return thread_block_locked(thread_get_current_thread());
}
bool
thread_unblock(status_t threadID, status_t status)
{
InterruptsSpinLocker _(thread_spinlock);
struct thread* thread = thread_get_thread_struct_locked(threadID);
if (thread == NULL)
return false;
return thread_unblock_locked(thread, status);
}
status_t
thread_block_with_timeout_locked(uint32 timeoutFlags, bigtime_t timeout)
{
struct thread* thread = thread_get_current_thread();
if (thread->wait.status != 1)
return thread->wait.status;
// Timer flags: absolute/relative + "acquire thread lock". The latter
// avoids nasty race conditions and deadlock problems that could otherwise
// occur between our cancel_timer() and a concurrently executing
// thread_block_timeout().
uint32 timerFlags = (timeoutFlags & B_RELATIVE_TIMEOUT)
? B_ONE_SHOT_RELATIVE_TIMER : B_ONE_SHOT_ABSOLUTE_TIMER;
timerFlags |= B_TIMER_ACQUIRE_THREAD_LOCK;
// install the timer
thread->wait.unblock_timer.user_data = thread;
add_timer(&thread->wait.unblock_timer, &thread_block_timeout, timeout,
timerFlags);
// block
status_t error = thread_block_locked(thread);
// cancel timer, if it didn't fire
if (error != B_TIMED_OUT)
cancel_timer(&thread->wait.unblock_timer);
return error;
}
// #pragma mark - public kernel API
@ -2297,7 +2399,12 @@ snooze_etc(bigtime_t timeout, int timebase, uint32 flags)
if (timebase != B_SYSTEM_TIMEBASE)
return B_BAD_VALUE;
status = acquire_sem_etc(sSnoozeSem, 1, flags, timeout);
InterruptsSpinLocker _(thread_spinlock);
struct thread* thread = thread_get_current_thread();
thread_prepare_to_block(thread, flags, THREAD_BLOCK_TYPE_SNOOZE, NULL);
status = thread_block_with_timeout_locked(flags, timeout);
if (status == B_TIMED_OUT || status == B_WOULD_BLOCK)
return B_OK;