Cleaned up the source, made it compatible with our style guide (i.e. global

variables now have the 'g' prefix).
Fixed a very unlikely race condition in create_sem_etc() (can only happen if
that call isn't used properly).
Fixed some wrong return values (i.e. B_BAD_TEAM_ID wasn't returned anywhere).
Optimized _get_next_sem_info(): it now only grabs the semaphore lock
if it has good reason to do so.
Added some comments.


git-svn-id: file:///srv/svn/repos/haiku/trunk/current@1308 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Axel Dörfler 2002-09-30 03:43:32 +00:00
parent 79f4cb3e2c
commit 3fa1fa66f1
1 changed files with 295 additions and 198 deletions

View File

@ -22,21 +22,31 @@
#include <string.h>
#include <stdlib.h>
#define TRACE_SEM 0
#if TRACE_SEM
# define TRACE(x) dprintf x
# define TRACE_BLOCK(x) dprintf x
#else
# define TRACE(x) ;
# define TRACE_BLOCK(x) ;
#endif
struct sem_entry {
sem_id id;
int count;
sem_id id;
int count;
struct thread_queue q;
char *name;
int lock;
team_id owner; // if set to -1, means owned by a port
char *name;
int lock;
team_id owner; // if set to -1, means owned by a port
};
#define MAX_SEMS 4096
static struct sem_entry *sems = NULL;
static region_id sem_region = 0;
static bool sems_active = false;
static sem_id next_sem = 0;
static struct sem_entry *gSems = NULL;
static region_id gSemRegion = 0;
static bool gSemsActive = false;
static sem_id gNextSemID = 0;
static int sem_spinlock = 0;
#define GRAB_SEM_LIST_LOCK() acquire_spinlock(&sem_spinlock)
@ -61,16 +71,16 @@ dump_sem_list(int argc, char **argv)
{
int i;
for (i=0; i<MAX_SEMS; i++) {
if (sems[i].id >= 0)
dprintf("%p\tid: 0x%lx\t\tname: '%s'\n", &sems[i], sems[i].id, sems[i].name);
for (i = 0; i < MAX_SEMS; i++) {
if (gSems[i].id >= 0)
dprintf("%p\tid: 0x%lx\t\tname: '%s'\n", &gSems[i], gSems[i].id, gSems[i].name);
}
return 0;
}
static void
_dump_sem_info(struct sem_entry *sem)
dump_sem(struct sem_entry *sem)
{
dprintf("SEM: %p\n", sem);
dprintf("name: '%s'\n", sem->name);
@ -96,27 +106,26 @@ dump_sem_info(int argc, char **argv)
if (num > KERNEL_BASE && num <= (KERNEL_BASE + (KERNEL_SIZE - 1))) {
// XXX semi-hack
_dump_sem_info((struct sem_entry *)num);
dump_sem((struct sem_entry *)num);
return 0;
}
else {
} else {
unsigned slot = num % MAX_SEMS;
if (sems[slot].id != (int)num) {
if (gSems[slot].id != (int)num) {
dprintf("sem 0x%lx doesn't exist!\n", num);
return 0;
}
_dump_sem_info(&sems[slot]);
dump_sem(&gSems[slot]);
return 0;
}
}
// walk through the sem list, trying to match name
for (i=0; i<MAX_SEMS; i++) {
if (sems[i].name != NULL)
if (strcmp(argv[1], sems[i].name) == 0) {
_dump_sem_info(&sems[i]);
return 0;
}
for (i = 0; i < MAX_SEMS; i++) {
if (gSems[i].name != NULL
&& strcmp(argv[1], gSems[i].name) == 0) {
dump_sem(&gSems[i]);
return 0;
}
}
return 0;
}
@ -127,31 +136,41 @@ sem_init(kernel_args *ka)
{
int i;
dprintf("sem_init: entry\n");
TRACE(("sem_init: entry\n"));
// create and initialize semaphore table
sem_region = vm_create_anonymous_region(vm_get_kernel_aspace_id(), "sem_table", (void **)&sems,
REGION_ADDR_ANY_ADDRESS, sizeof(struct sem_entry) * MAX_SEMS, REGION_WIRING_WIRED, LOCK_RW|LOCK_KERNEL);
if (sem_region < 0) {
gSemRegion = vm_create_anonymous_region(vm_get_kernel_aspace_id(), "sem_table",
(void **)&gSems, REGION_ADDR_ANY_ADDRESS, sizeof(struct sem_entry) * MAX_SEMS,
REGION_WIRING_WIRED, LOCK_RW | LOCK_KERNEL);
if (gSemRegion < 0)
panic("unable to allocate semaphore table!\n");
}
memset(sems, 0, sizeof(struct sem_entry) * MAX_SEMS);
for (i=0; i<MAX_SEMS; i++)
sems[i].id = -1;
memset(gSems, 0, sizeof(struct sem_entry) * MAX_SEMS);
for (i = 0; i < MAX_SEMS; i++)
gSems[i].id = -1;
// add debugger commands
add_debugger_command("sems", &dump_sem_list, "Dump a list of all active semaphores");
add_debugger_command("sem", &dump_sem_info, "Dump info about a particular semaphore");
dprintf("sem_init: exit\n");
TRACE(("sem_init: exit\n"));
sems_active = true;
gSemsActive = true;
return 0;
}
sem_id create_sem_etc(int32 count, const char *name, team_id owner)
/** Creates a semaphore with the given parameters.
* Note, the team_id is not checked, it must be correct, or else
* that semaphore might not be deleted.
* This function is only available from within the kernel, and
* should not be made public - if possible, we should remove it
* completely (and have only create_sem() exported).
*/
sem_id
create_sem_etc(int32 count, const char *name, team_id owner)
{
int i;
int state;
@ -159,9 +178,9 @@ sem_id create_sem_etc(int32 count, const char *name, team_id owner)
char *temp_name;
int name_len;
if (sems_active == false)
if (gSemsActive == false)
return B_NO_MORE_SEMS;
if (name == NULL)
name = "unnamed semaphore";
@ -169,34 +188,39 @@ sem_id create_sem_etc(int32 count, const char *name, team_id owner)
name_len = min(name_len, SYS_MAX_OS_NAME_LEN);
temp_name = (char *)kmalloc(name_len);
if (temp_name == NULL)
return ENOMEM;
return B_NO_MEMORY;
strlcpy(temp_name, name, name_len);
state = disable_interrupts();
GRAB_SEM_LIST_LOCK();
// find the first empty spot
for (i=0; i<MAX_SEMS; i++) {
if (sems[i].id == -1) {
for (i = 0; i < MAX_SEMS; i++) {
if (gSems[i].id == -1) {
// make the sem id be a multiple of the slot it's in
if (i >= next_sem % MAX_SEMS)
next_sem += i - next_sem % MAX_SEMS;
if (i >= gNextSemID % MAX_SEMS)
gNextSemID += i - gNextSemID % MAX_SEMS;
else
next_sem += MAX_SEMS - (next_sem % MAX_SEMS - i);
sems[i].id = next_sem++;
gNextSemID += MAX_SEMS - (gNextSemID % MAX_SEMS - i);
gSems[i].id = gNextSemID++;
sems[i].lock = 0;
GRAB_SEM_LOCK(sems[i]);
// Set the owner while the sem list lock is hold, or else it
// might get lost if we have a sem_delete_owned_sems() running
// (although this should be impossible (if create_sem_etc() is
// just properly, I just feel better with it).
gSems[i].owner = owner;
gSems[i].lock = 0;
GRAB_SEM_LOCK(gSems[i]);
RELEASE_SEM_LIST_LOCK();
sems[i].q.tail = NULL;
sems[i].q.head = NULL;
sems[i].count = count;
sems[i].name = temp_name;
sems[i].owner = owner;
retval = sems[i].id;
gSems[i].q.tail = NULL;
gSems[i].q.head = NULL;
gSems[i].count = count;
gSems[i].name = temp_name;
retval = gSems[i].id;
RELEASE_SEM_LOCK(sems[i]);
RELEASE_SEM_LOCK(gSems[i]);
goto out;
}
}
@ -210,27 +234,32 @@ out:
return retval;
}
sem_id create_sem(int32 count, const char *name)
sem_id
create_sem(int32 count, const char *name)
{
return create_sem_etc(count, name, team_get_kernel_team_id());
}
status_t delete_sem(sem_id id)
status_t
delete_sem(sem_id id)
{
return delete_sem_etc(id, 0);
}
status_t delete_sem_etc(sem_id id, status_t return_code)
status_t
delete_sem_etc(sem_id id, status_t return_code)
{
int slot;
int state;
status_t err = B_NO_ERROR;
struct thread *t;
int released_threads;
char *old_name;
struct thread_queue release_queue;
if (sems_active == false)
if (gSemsActive == false)
return B_NO_MORE_SEMS;
if (id < 0)
return B_BAD_SEM_ID;
@ -238,12 +267,12 @@ status_t delete_sem_etc(sem_id id, status_t return_code)
slot = id % MAX_SEMS;
state = disable_interrupts();
GRAB_SEM_LOCK(sems[slot]);
GRAB_SEM_LOCK(gSems[slot]);
if (sems[slot].id != id) {
RELEASE_SEM_LOCK(sems[slot]);
if (gSems[slot].id != id) {
RELEASE_SEM_LOCK(gSems[slot]);
restore_interrupts(state);
dprintf("delete_sem: invalid sem_id %ld\n", id);
TRACE(("delete_sem: invalid sem_id %ld\n", id));
return B_BAD_SEM_ID;
}
@ -251,7 +280,7 @@ status_t delete_sem_etc(sem_id id, status_t return_code)
release_queue.head = release_queue.tail = NULL;
// free any threads waiting for this semaphore
while ((t = thread_dequeue(&sems[slot].q)) != NULL) {
while ((t = thread_dequeue(&gSems[slot].q)) != NULL) {
t->state = B_THREAD_READY;
t->sem_errcode = B_BAD_SEM_ID;
t->sem_deleted_retcode = return_code;
@ -260,11 +289,11 @@ status_t delete_sem_etc(sem_id id, status_t return_code)
released_threads++;
}
sems[slot].id = -1;
old_name = sems[slot].name;
sems[slot].name = NULL;
gSems[slot].id = -1;
old_name = gSems[slot].name;
gSems[slot].name = NULL;
RELEASE_SEM_LOCK(sems[slot]);
RELEASE_SEM_LOCK(gSems[slot]);
if (released_threads > 0) {
GRAB_THREAD_LOCK();
@ -279,11 +308,14 @@ status_t delete_sem_etc(sem_id id, status_t return_code)
kfree(old_name);
return err;
return B_OK;
}
// Called from a timer handler. Wakes up a semaphore
static int32 sem_timeout(timer *data)
/** Called from a timer handler. Wakes up a semaphore */
static int32
sem_timeout(timer *data)
{
struct sem_timeout_args *args = (struct sem_timeout_args *)data->entry.prev;
struct thread *t;
@ -297,20 +329,20 @@ static int32 sem_timeout(timer *data)
slot = args->blocked_sem_id % MAX_SEMS;
state = disable_interrupts();
GRAB_SEM_LOCK(sems[slot]);
GRAB_SEM_LOCK(gSems[slot]);
// dprintf("sem_timeout: called on 0x%x sem %d, tid %d\n", to, to->sem_id, to->thread_id);
TRACE(("sem_timeout: called on 0x%x sem %d, tid %d\n", to, to->sem_id, to->thread_id));
if (sems[slot].id != args->blocked_sem_id) {
if (gSems[slot].id != args->blocked_sem_id) {
// this thread was not waiting on this semaphore
panic("sem_timeout: thid %ld was trying to wait on sem %ld which doesn't exist!\n",
args->blocked_thread, args->blocked_sem_id);
}
wakeup_queue.head = wakeup_queue.tail = NULL;
remove_thread_from_sem(t, &sems[slot], &wakeup_queue, B_TIMED_OUT);
remove_thread_from_sem(t, &gSems[slot], &wakeup_queue, B_TIMED_OUT);
RELEASE_SEM_LOCK(sems[slot]);
RELEASE_SEM_LOCK(gSems[slot]);
GRAB_THREAD_LOCK();
// put the threads in the run q here to make sure we dont deadlock in sem_interrupt_thread
@ -325,52 +357,55 @@ static int32 sem_timeout(timer *data)
}
status_t acquire_sem(sem_id id)
status_t
acquire_sem(sem_id id)
{
return acquire_sem_etc(id, 1, 0, 0);
}
status_t acquire_sem_etc(sem_id id, int32 count, uint32 flags, bigtime_t timeout)
status_t
acquire_sem_etc(sem_id id, int32 count, uint32 flags, bigtime_t timeout)
{
int slot = id % MAX_SEMS;
int state;
status_t err = 0;
status_t status = B_OK;
if (sems_active == false)
if (gSemsActive == false)
return B_NO_MORE_SEMS;
if (id < 0) {
dprintf("acquire_sem_etc: invalid sem handle %ld\n", id);
if (id < 0)
return B_BAD_SEM_ID;
}
if (count <= 0)
return EINVAL;
return B_BAD_VALUE;
state = disable_interrupts();
GRAB_SEM_LOCK(sems[slot]);
GRAB_SEM_LOCK(gSems[slot]);
if (sems[slot].id != id) {
dprintf("acquire_sem_etc: bad sem_id %ld\n", id);
err = B_BAD_SEM_ID;
if (gSems[slot].id != id) {
TRACE(("acquire_sem_etc: bad sem_id %ld\n", id));
status = B_BAD_SEM_ID;
goto err;
}
if (sems[slot].count - count < 0 && (flags & B_TIMEOUT) != 0 && timeout <= 0) {
if (gSems[slot].count - count < 0 && (flags & B_TIMEOUT) != 0 && timeout <= 0) {
// immediate timeout
err = B_TIMED_OUT;
status = B_TIMED_OUT;
goto err;
}
if ((sems[slot].count -= count) < 0) {
if ((gSems[slot].count -= count) < 0) {
// we need to block
struct thread *t = thread_get_current_thread();
timer timeout_timer; // stick it on the stack, since we may be blocking here
struct sem_timeout_args args;
TRACE_BLOCK(("acquire_sem_etc(id = %ld): block name = %s, thread = %p, name = %s\n", id, gSems[slot].name, t, t->name));
// do a quick check to see if the thread has any pending kill signals
// this should catch most of the cases where the thread had a signal
if ((flags & B_CAN_INTERRUPT) && (t->pending_signals & SIG_KILL)) {
sems[slot].count += count;
err = EINTR;
gSems[slot].count += count;
status = B_INTERRUPTED;
goto err;
}
@ -378,14 +413,15 @@ status_t acquire_sem_etc(sem_id id, int32 count, uint32 flags, bigtime_t timeout
t->sem_flags = flags;
t->sem_blocking = id;
t->sem_acquire_count = count;
t->sem_count = min(-sems[slot].count, count); // store the count we need to restore upon release
t->sem_count = min(-gSems[slot].count, count); // store the count we need to restore upon release
t->sem_deleted_retcode = 0;
t->sem_errcode = B_NO_ERROR;
thread_enqueue(t, &sems[slot].q);
thread_enqueue(t, &gSems[slot].q);
if ((flags & (B_TIMEOUT | B_ABSOLUTE_TIMEOUT)) != 0) {
// dprintf("sem_acquire_etc: setting timeout sem for %d %d usecs, semid %d, tid %d\n",
// timeout, sem_id, t->id);
TRACE(("sem_acquire_etc: setting timeout sem for %d %d usecs, semid %d, tid %d\n",
timeout, sem_id, t->id));
// set up an event to go off with the thread struct as the data
args.blocked_sem_id = id;
args.blocked_thread = t->id;
@ -398,7 +434,7 @@ status_t acquire_sem_etc(sem_id id, int32 count, uint32 flags, bigtime_t timeout
B_ONE_SHOT_RELATIVE_TIMER : B_ONE_SHOT_ABSOLUTE_TIMER);
}
RELEASE_SEM_LOCK(sems[slot]);
RELEASE_SEM_LOCK(gSems[slot]);
GRAB_THREAD_LOCK();
// check again to see if a kill signal is pending.
// it may have been delivered while setting up the sem, though it's pretty unlikely
@ -409,11 +445,11 @@ status_t acquire_sem_etc(sem_id id, int32 count, uint32 flags, bigtime_t timeout
// here, since the threadlock is held. The previous check would have found most
// instances, but there was a race, so we have to handle it. It'll be more messy...
wakeup_queue.head = wakeup_queue.tail = NULL;
GRAB_SEM_LOCK(sems[slot]);
if (sems[slot].id == id) {
remove_thread_from_sem(t, &sems[slot], &wakeup_queue, EINTR);
GRAB_SEM_LOCK(gSems[slot]);
if (gSems[slot].id == id) {
remove_thread_from_sem(t, &gSems[slot], &wakeup_queue, EINTR);
}
RELEASE_SEM_LOCK(sems[slot]);
RELEASE_SEM_LOCK(gSems[slot]);
while ((t = thread_dequeue(&wakeup_queue)) != NULL) {
thread_enqueue_run_q(t);
}
@ -432,43 +468,47 @@ status_t acquire_sem_etc(sem_id id, int32 count, uint32 flags, bigtime_t timeout
restore_interrupts(state);
TRACE_BLOCK(("acquire_sem_etc(id = %ld): exit block name = %s, thread = %p (%s)\n", id, gSems[slot].name, t, t->name));
return t->sem_errcode;
}
err:
RELEASE_SEM_LOCK(sems[slot]);
RELEASE_SEM_LOCK(gSems[slot]);
restore_interrupts(state);
return err;
return status;
}
status_t release_sem(sem_id id)
status_t
release_sem(sem_id id)
{
return release_sem_etc(id, 1, 0);
}
status_t release_sem_etc(sem_id id, int32 count, uint32 flags)
status_t
release_sem_etc(sem_id id, int32 count, uint32 flags)
{
int slot = id % MAX_SEMS;
int state;
int released_threads = 0;
status_t err = 0;
struct thread_queue release_queue;
status_t status = B_OK;
if (sems_active == false)
if (gSemsActive == false)
return B_NO_MORE_SEMS;
if (id < 0)
return B_BAD_SEM_ID;
if (count <= 0)
return EINVAL;
return B_BAD_VALUE;
state = disable_interrupts();
GRAB_SEM_LOCK(sems[slot]);
GRAB_SEM_LOCK(gSems[slot]);
if (sems[slot].id != id) {
dprintf("sem_release_etc: invalid sem_id %ld\n", id);
err = B_BAD_SEM_ID;
if (gSems[slot].id != id) {
TRACE(("sem_release_etc: invalid sem_id %ld\n", id));
status = B_BAD_SEM_ID;
goto err;
}
@ -480,14 +520,14 @@ status_t release_sem_etc(sem_id id, int32 count, uint32 flags)
while (count > 0) {
int delta = count;
if (sems[slot].count < 0) {
struct thread *t = thread_lookat_queue(&sems[slot].q);
if (gSems[slot].count < 0) {
struct thread *t = thread_lookat_queue(&gSems[slot].q);
delta = min(count, t->sem_count);
t->sem_count -= delta;
if (t->sem_count <= 0) {
// release this thread
t = thread_dequeue(&sems[slot].q);
t = thread_dequeue(&gSems[slot].q);
thread_enqueue(t, &release_queue);
t->state = B_THREAD_READY;
released_threads++;
@ -496,10 +536,10 @@ status_t release_sem_etc(sem_id id, int32 count, uint32 flags)
}
}
sems[slot].count += delta;
gSems[slot].count += delta;
count -= delta;
}
RELEASE_SEM_LOCK(sems[slot]);
RELEASE_SEM_LOCK(gSems[slot]);
// pull off any items in the release queue and put them in the run queue
if (released_threads > 0) {
@ -521,19 +561,21 @@ status_t release_sem_etc(sem_id id, int32 count, uint32 flags)
goto outnolock;
err:
RELEASE_SEM_LOCK(sems[slot]);
RELEASE_SEM_LOCK(gSems[slot]);
outnolock:
restore_interrupts(state);
return err;
return status;
}
status_t get_sem_count(sem_id id, int32* thread_count)
status_t
get_sem_count(sem_id id, int32 *thread_count)
{
int slot;
int state;
if (sems_active == false)
if (gSemsActive == false)
return B_NO_MORE_SEMS;
if (id < 0)
return B_BAD_SEM_ID;
@ -543,29 +585,37 @@ status_t get_sem_count(sem_id id, int32* thread_count)
slot = id % MAX_SEMS;
state = disable_interrupts();
GRAB_SEM_LOCK(sems[slot]);
GRAB_SEM_LOCK(gSems[slot]);
if (sems[slot].id != id) {
RELEASE_SEM_LOCK(sems[slot]);
if (gSems[slot].id != id) {
RELEASE_SEM_LOCK(gSems[slot]);
restore_interrupts(state);
dprintf("sem_get_count: invalid sem_id %ld\n", id);
TRACE(("sem_get_count: invalid sem_id %ld\n", id));
return B_BAD_SEM_ID;
}
*thread_count = sems[slot].count;
*thread_count = gSems[slot].count;
RELEASE_SEM_LOCK(sems[slot]);
RELEASE_SEM_LOCK(gSems[slot]);
restore_interrupts(state);
return B_NO_ERROR;
}
status_t _get_sem_info(sem_id id, struct sem_info *info, size_t sz)
/** The underscore is needed for binary compatibility with BeOS.
* OS.h contains the following macro:
* #define get_sem_info(sem, info) \
* _get_sem_info((sem), (info), sizeof(*(info)))
*/
status_t
_get_sem_info(sem_id id, struct sem_info *info, size_t sz)
{
int state;
int slot;
if (sems_active == false)
if (gSemsActive == false)
return B_NO_MORE_SEMS;
if (id < 0)
return B_BAD_SEM_ID;
@ -575,39 +625,47 @@ status_t _get_sem_info(sem_id id, struct sem_info *info, size_t sz)
slot = id % MAX_SEMS;
state = disable_interrupts();
GRAB_SEM_LOCK(sems[slot]);
GRAB_SEM_LOCK(gSems[slot]);
if (sems[slot].id != id) {
RELEASE_SEM_LOCK(sems[slot]);
if (gSems[slot].id != id) {
RELEASE_SEM_LOCK(gSems[slot]);
restore_interrupts(state);
dprintf("get_sem_info: invalid sem_id %ld\n", id);
TRACE(("get_sem_info: invalid sem_id %ld\n", id));
return B_BAD_SEM_ID;
}
info->sem = sems[slot].id;
info->team = sems[slot].owner;
strncpy(info->name, sems[slot].name, SYS_MAX_OS_NAME_LEN-1);
info->count = sems[slot].count;
info->latest_holder = sems[slot].q.head->id; // XXX not sure if this is correct
info->sem = gSems[slot].id;
info->team = gSems[slot].owner;
strncpy(info->name, gSems[slot].name, SYS_MAX_OS_NAME_LEN-1);
info->count = gSems[slot].count;
info->latest_holder = gSems[slot].q.head->id; // XXX not sure if this is correct
RELEASE_SEM_LOCK(sems[slot]);
RELEASE_SEM_LOCK(gSems[slot]);
restore_interrupts(state);
return B_NO_ERROR;
}
status_t _get_next_sem_info(team_id team, int32 *cookie, struct sem_info *info, size_t sz)
/** The underscore is needed for binary compatibility with BeOS.
* OS.h contains the following macro:
* #define get_next_sem_info(team, cookie, info) \
* _get_next_sem_info((team), (cookie), (info), sizeof(*(info)))
*/
status_t
_get_next_sem_info(team_id team, int32 *cookie, struct sem_info *info, size_t sz)
{
int state;
int slot;
if (sems_active == false)
if (gSemsActive == false)
return B_NO_MORE_SEMS;
if (cookie == NULL)
return EINVAL;
/* prevents sems[].owner == -1 >= means owned by a port */
/* prevents gSems[].owner == -1 >= means owned by a port */
if (team < 0)
return EINVAL;
return B_BAD_TEAM_ID;
if (*cookie == NULL) {
// return first found
@ -624,21 +682,22 @@ status_t _get_next_sem_info(team_id team, int32 *cookie, struct sem_info *info,
GRAB_SEM_LIST_LOCK();
while (slot < MAX_SEMS) {
GRAB_SEM_LOCK(sems[slot]);
if (sems[slot].id != -1)
if (sems[slot].owner == team) {
if (gSems[slot].id != -1 && gSems[slot].owner == team) {
GRAB_SEM_LOCK(gSems[slot]);
if (gSems[slot].id != -1 && gSems[slot].owner == team) {
// found one!
info->sem = sems[slot].id;
info->team = sems[slot].owner;
strncpy(info->name, sems[slot].name, SYS_MAX_OS_NAME_LEN-1);
info->count = sems[slot].count;
info->latest_holder = sems[slot].q.head->id; // XXX not sure if this is the latest holder, or the next holder...
info->sem = gSems[slot].id;
info->team = gSems[slot].owner;
strncpy(info->name, gSems[slot].name, SYS_MAX_OS_NAME_LEN-1);
info->count = gSems[slot].count;
info->latest_holder = gSems[slot].q.head->id; // XXX not sure if this is the latest holder, or the next holder...
RELEASE_SEM_LOCK(sems[slot]);
RELEASE_SEM_LOCK(gSems[slot]);
slot++;
break;
}
RELEASE_SEM_LOCK(sems[slot]);
RELEASE_SEM_LOCK(gSems[slot]);
}
slot++;
}
RELEASE_SEM_LIST_LOCK();
@ -650,50 +709,56 @@ status_t _get_next_sem_info(team_id team, int32 *cookie, struct sem_info *info,
return B_NO_ERROR;
}
status_t set_sem_owner(sem_id id, team_id team)
status_t
set_sem_owner(sem_id id, team_id team)
{
int state;
int slot;
if (sems_active == false)
if (gSemsActive == false)
return B_NO_MORE_SEMS;
if (id < 0)
return B_BAD_SEM_ID;
if (team < 0)
return EINVAL;
return B_BAD_TEAM_ID;
// XXX: todo check if team exists
// if (team_get_team_struct(team) == NULL)
// return B_BAD_SEM_ID; // team_id doesn't exist right now
// check if the team ID is valid
if (team_get_team_struct(team) == NULL)
return B_BAD_TEAM_ID;
slot = id % MAX_SEMS;
state = disable_interrupts();
GRAB_SEM_LOCK(sems[slot]);
GRAB_SEM_LOCK(gSems[slot]);
if (sems[slot].id != id) {
RELEASE_SEM_LOCK(sems[slot]);
if (gSems[slot].id != id) {
RELEASE_SEM_LOCK(gSems[slot]);
restore_interrupts(state);
dprintf("set_sem_owner: invalid sem_id %ld\n", id);
TRACE(("set_sem_owner: invalid sem_id %ld\n", id));
return B_BAD_SEM_ID;
}
sems[slot].owner = team;
gSems[slot].owner = team;
RELEASE_SEM_LOCK(sems[slot]);
RELEASE_SEM_LOCK(gSems[slot]);
restore_interrupts(state);
return B_NO_ERROR;
}
// Wake up a thread that's blocked on a semaphore
// this function must be entered with interrupts disabled and THREADLOCK held
status_t sem_interrupt_thread(struct thread *t)
/** Wake up a thread that's blocked on a semaphore
* this function must be entered with interrupts disabled and THREADLOCK held
*/
status_t
sem_interrupt_thread(struct thread *t)
{
int slot;
struct thread_queue wakeup_queue;
// dprintf("sem_interrupt_thread: called on thread %p (%d), blocked on sem 0x%x\n", t, t->id, t->sem_blocking);
TRACE(("sem_interrupt_thread: called on thread %p (%d), blocked on sem 0x%x\n", t, t->id, t->sem_blocking));
if (t->state != B_THREAD_WAITING || t->sem_blocking < 0)
return EINVAL;
@ -702,17 +767,17 @@ status_t sem_interrupt_thread(struct thread *t)
slot = t->sem_blocking % MAX_SEMS;
GRAB_SEM_LOCK(sems[slot]);
GRAB_SEM_LOCK(gSems[slot]);
if (sems[slot].id != t->sem_blocking) {
if (gSems[slot].id != t->sem_blocking) {
panic("sem_interrupt_thread: thread 0x%lx sez it's blocking on sem 0x%lx, but that sem doesn't exist!\n", t->id, t->sem_blocking);
}
wakeup_queue.head = wakeup_queue.tail = NULL;
if (remove_thread_from_sem(t, &sems[slot], &wakeup_queue, EINTR) == ERR_NOT_FOUND)
if (remove_thread_from_sem(t, &gSems[slot], &wakeup_queue, EINTR) == ERR_NOT_FOUND)
panic("sem_interrupt_thread: thread 0x%lx not found in sem 0x%lx's wait queue\n", t->id, t->sem_blocking);
RELEASE_SEM_LOCK(sems[slot]);
RELEASE_SEM_LOCK(gSems[slot]);
while ((t = thread_dequeue(&wakeup_queue)) != NULL) {
thread_enqueue_run_q(t);
@ -721,10 +786,14 @@ status_t sem_interrupt_thread(struct thread *t)
return B_NO_ERROR;
}
// forcibly removes a thread from a semaphores wait q. May have to wake up other threads in the
// process. All threads that need to be woken up are added to the passed in thread_queue.
// must be called with sem lock held
static int remove_thread_from_sem(struct thread *t, struct sem_entry *sem, struct thread_queue *queue, int sem_errcode)
/** forcibly removes a thread from a semaphores wait q. May have to wake up other threads in the
* process. All threads that need to be woken up are added to the passed in thread_queue.
* must be called with sem lock held
*/
static int
remove_thread_from_sem(struct thread *t, struct sem_entry *sem, struct thread_queue *queue, int sem_errcode)
{
struct thread *t1;
@ -752,23 +821,27 @@ static int remove_thread_from_sem(struct thread *t, struct sem_entry *sem, struc
return B_NO_ERROR;
}
/* this function cycles through the sem table, deleting all the sems that are owned by
the passed team_id */
int sem_delete_owned_sems(team_id owner)
/** this function cycles through the sem table, deleting all the sems that are owned by
* the passed team_id
*/
int
sem_delete_owned_sems(team_id owner)
{
int state;
int i;
int count = 0;
if (owner < 0)
return B_BAD_SEM_ID;
return B_BAD_TEAM_ID;
state = disable_interrupts();
GRAB_SEM_LIST_LOCK();
for (i=0; i<MAX_SEMS; i++) {
if(sems[i].id != -1 && sems[i].owner == owner) {
sem_id id = sems[i].id;
for (i = 0; i < MAX_SEMS; i++) {
if (gSems[i].id != -1 && gSems[i].owner == owner) {
sem_id id = gSems[i].id;
RELEASE_SEM_LIST_LOCK();
restore_interrupts(state);
@ -787,7 +860,9 @@ int sem_delete_owned_sems(team_id owner)
return count;
}
sem_id user_create_sem(int32 count, const char *uname)
sem_id
user_create_sem(int32 count, const char *uname)
{
if (uname != NULL) {
char name[SYS_MAX_OS_NAME_LEN];
@ -803,44 +878,57 @@ sem_id user_create_sem(int32 count, const char *uname)
return create_sem_etc(count, name, team_get_current_team_id());
}
else {
return create_sem_etc(count, NULL, team_get_current_team_id());
}
return create_sem_etc(count, NULL, team_get_current_team_id());
}
status_t user_delete_sem(sem_id id)
status_t
user_delete_sem(sem_id id)
{
return delete_sem(id);
}
status_t user_delete_sem_etc(sem_id id, status_t return_code)
status_t
user_delete_sem_etc(sem_id id, status_t return_code)
{
return delete_sem_etc(id, return_code);
}
status_t user_acquire_sem(sem_id id)
status_t
user_acquire_sem(sem_id id)
{
return user_acquire_sem_etc(id, 1, 0, 0);
}
status_t user_acquire_sem_etc(sem_id id, int32 count, uint32 flags, bigtime_t timeout)
status_t
user_acquire_sem_etc(sem_id id, int32 count, uint32 flags, bigtime_t timeout)
{
flags = flags | B_CAN_INTERRUPT;
return acquire_sem_etc(id, count, flags, timeout);
}
status_t user_release_sem(sem_id id)
status_t
user_release_sem(sem_id id)
{
return release_sem_etc(id, 1, 0);
}
status_t user_release_sem_etc(sem_id id, int32 count, uint32 flags)
status_t
user_release_sem_etc(sem_id id, int32 count, uint32 flags)
{
return release_sem_etc(id, count, flags);
}
status_t user_get_sem_count(sem_id uid, int32* uthread_count)
status_t
user_get_sem_count(sem_id uid, int32* uthread_count)
{
int32 thread_count;
status_t rc;
@ -849,10 +937,13 @@ status_t user_get_sem_count(sem_id uid, int32* uthread_count)
rc2 = user_memcpy(uthread_count, &thread_count, sizeof(int32));
if (rc2 < 0)
return rc2;
return rc;
}
status_t user_get_sem_info(sem_id uid, struct sem_info *uinfo, size_t sz)
status_t
user_get_sem_info(sem_id uid, struct sem_info *uinfo, size_t sz)
{
struct sem_info info;
status_t rc;
@ -865,10 +956,13 @@ status_t user_get_sem_info(sem_id uid, struct sem_info *uinfo, size_t sz)
rc2 = user_memcpy(uinfo, &info, sz);
if (rc2 < 0)
return rc2;
return rc;
}
status_t user_get_next_sem_info(team_id uteam, int32 *ucookie, struct sem_info *uinfo, size_t sz)
status_t
user_get_next_sem_info(team_id uteam, int32 *ucookie, struct sem_info *uinfo, size_t sz)
{
struct sem_info info;
int32 cookie;
@ -888,10 +982,13 @@ status_t user_get_next_sem_info(team_id uteam, int32 *ucookie, struct sem_info *
rc2 = user_memcpy(ucookie, &cookie, sizeof(int32));
if (rc2 < 0)
return rc2;
return rc;
}
status_t user_set_sem_owner(sem_id uid, team_id uteam)
status_t
user_set_sem_owner(sem_id uid, team_id uteam)
{
return set_sem_owner(uid, uteam);
}