beos compatible timer routines and style cleanups in timer and sem code

git-svn-id: file:///srv/svn/repos/haiku/trunk/current@353 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
lillo 2002-07-20 22:52:04 +00:00
parent 4918394f73
commit 01fb96c4c3
3 changed files with 199 additions and 224 deletions

View File

@ -34,16 +34,15 @@ struct sem_entry {
#define MAX_SEMS 4096 #define MAX_SEMS 4096
static struct sem_entry *sems = NULL; static struct sem_entry *sems = NULL;
static region_id sem_region = 0; static region_id sem_region = 0;
static bool sems_active = false; static bool sems_active = false;
static sem_id next_sem = 0;
static sem_id next_sem = 0;
static int sem_spinlock = 0; static int sem_spinlock = 0;
#define GRAB_SEM_LIST_LOCK() acquire_spinlock(&sem_spinlock) #define GRAB_SEM_LIST_LOCK() acquire_spinlock(&sem_spinlock)
#define RELEASE_SEM_LIST_LOCK() release_spinlock(&sem_spinlock) #define RELEASE_SEM_LIST_LOCK() release_spinlock(&sem_spinlock)
#define GRAB_SEM_LOCK(s) acquire_spinlock(&(s).lock) #define GRAB_SEM_LOCK(s) acquire_spinlock(&(s).lock)
#define RELEASE_SEM_LOCK(s) release_spinlock(&(s).lock) #define RELEASE_SEM_LOCK(s) release_spinlock(&(s).lock)
// used in functions that may put a bunch of threads in the run q at once // used in functions that may put a bunch of threads in the run q at once
#define READY_THREAD_CACHE_SIZE 16 #define READY_THREAD_CACHE_SIZE 16
@ -60,10 +59,9 @@ static int dump_sem_list(int argc, char **argv)
{ {
int i; int i;
for(i=0; i<MAX_SEMS; i++) { for (i=0; i<MAX_SEMS; i++) {
if(sems[i].id >= 0) { if (sems[i].id >= 0)
dprintf("%p\tid: 0x%x\t\tname: '%s'\n", &sems[i], sems[i].id, sems[i].name); dprintf("%p\tid: 0x%x\t\tname: '%s'\n", &sems[i], sems[i].id, sems[i].name);
}
} }
return 0; return 0;
} }
@ -81,22 +79,23 @@ static int dump_sem_info(int argc, char **argv)
{ {
int i; int i;
if(argc < 2) { if (argc < 2) {
dprintf("sem: not enough arguments\n"); dprintf("sem: not enough arguments\n");
return 0; return 0;
} }
// if the argument looks like a hex number, treat it as such // if the argument looks like a hex number, treat it as such
if(strlen(argv[1]) > 2 && argv[1][0] == '0' && argv[1][1] == 'x') { if (strlen(argv[1]) > 2 && argv[1][0] == '0' && argv[1][1] == 'x') {
unsigned long num = atoul(argv[1]); unsigned long num = atoul(argv[1]);
if(num > KERNEL_BASE && num <= (KERNEL_BASE + (KERNEL_SIZE - 1))) { if (num > KERNEL_BASE && num <= (KERNEL_BASE + (KERNEL_SIZE - 1))) {
// XXX semi-hack // XXX semi-hack
_dump_sem_info((struct sem_entry *)num); _dump_sem_info((struct sem_entry *)num);
return 0; return 0;
} else { }
else {
unsigned slot = num % MAX_SEMS; unsigned slot = num % MAX_SEMS;
if(sems[slot].id != (int)num) { if (sems[slot].id != (int)num) {
dprintf("sem 0x%lx doesn't exist!\n", num); dprintf("sem 0x%lx doesn't exist!\n", num);
return 0; return 0;
} }
@ -106,9 +105,9 @@ static int dump_sem_info(int argc, char **argv)
} }
// walk through the sem list, trying to match name // walk through the sem list, trying to match name
for(i=0; i<MAX_SEMS; i++) { for (i=0; i<MAX_SEMS; i++) {
if (sems[i].name != NULL) if (sems[i].name != NULL)
if(strcmp(argv[1], sems[i].name) == 0) { if (strcmp(argv[1], sems[i].name) == 0) {
_dump_sem_info(&sems[i]); _dump_sem_info(&sems[i]);
return 0; return 0;
} }
@ -124,12 +123,12 @@ int sem_init(kernel_args *ka)
// create and initialize semaphore table // create and initialize semaphore table
sem_region = vm_create_anonymous_region(vm_get_kernel_aspace_id(), "sem_table", (void **)&sems, sem_region = vm_create_anonymous_region(vm_get_kernel_aspace_id(), "sem_table", (void **)&sems,
REGION_ADDR_ANY_ADDRESS, sizeof(struct sem_entry) * MAX_SEMS, REGION_WIRING_WIRED, LOCK_RW|LOCK_KERNEL); REGION_ADDR_ANY_ADDRESS, sizeof(struct sem_entry) * MAX_SEMS, REGION_WIRING_WIRED, LOCK_RW|LOCK_KERNEL);
if(sem_region < 0) { if (sem_region < 0) {
panic("unable to allocate semaphore table!\n"); panic("unable to allocate semaphore table!\n");
} }
memset(sems, 0, sizeof(struct sem_entry) * MAX_SEMS); memset(sems, 0, sizeof(struct sem_entry) * MAX_SEMS);
for(i=0; i<MAX_SEMS; i++) for (i=0; i<MAX_SEMS; i++)
sems[i].id = -1; sems[i].id = -1;
// add debugger commands // add debugger commands
@ -150,20 +149,21 @@ sem_id create_sem_etc(int count, const char *name, proc_id owner)
sem_id retval = B_NO_MORE_SEMS; sem_id retval = B_NO_MORE_SEMS;
char *temp_name; char *temp_name;
if(sems_active == false) if (sems_active == false)
return B_NO_MORE_SEMS; return B_NO_MORE_SEMS;
if(name) { if (name) {
int name_len = strlen(name); int name_len = strlen(name);
temp_name = (char *)kmalloc(min(name_len + 1, SYS_MAX_OS_NAME_LEN)); temp_name = (char *)kmalloc(min(name_len + 1, SYS_MAX_OS_NAME_LEN));
if(temp_name == NULL) if (temp_name == NULL)
return ENOMEM; return ENOMEM;
strncpy(temp_name, name, SYS_MAX_OS_NAME_LEN-1); strncpy(temp_name, name, SYS_MAX_OS_NAME_LEN-1);
temp_name[SYS_MAX_OS_NAME_LEN-1] = 0; temp_name[SYS_MAX_OS_NAME_LEN-1] = 0;
} else { }
else {
temp_name = (char *)kmalloc(sizeof("default_sem_name")+1); temp_name = (char *)kmalloc(sizeof("default_sem_name")+1);
if(temp_name == NULL) if (temp_name == NULL)
return ENOMEM; return ENOMEM;
strcpy(temp_name, "default_sem_name"); strcpy(temp_name, "default_sem_name");
} }
@ -172,14 +172,13 @@ sem_id create_sem_etc(int count, const char *name, proc_id owner)
GRAB_SEM_LIST_LOCK(); GRAB_SEM_LIST_LOCK();
// find the first empty spot // find the first empty spot
for(i=0; i<MAX_SEMS; i++) { for (i=0; i<MAX_SEMS; i++) {
if(sems[i].id == -1) { if (sems[i].id == -1) {
// make the sem id be a multiple of the slot it's in // make the sem id be a multiple of the slot it's in
if(i >= next_sem % MAX_SEMS) { if (i >= next_sem % MAX_SEMS)
next_sem += i - next_sem % MAX_SEMS; next_sem += i - next_sem % MAX_SEMS;
} else { else
next_sem += MAX_SEMS - (next_sem % MAX_SEMS - i); next_sem += MAX_SEMS - (next_sem % MAX_SEMS - i);
}
sems[i].id = next_sem++; sems[i].id = next_sem++;
sems[i].lock = 0; sems[i].lock = 0;
@ -198,7 +197,6 @@ sem_id create_sem_etc(int count, const char *name, proc_id owner)
} }
} }
//err:
RELEASE_SEM_LIST_LOCK(); RELEASE_SEM_LIST_LOCK();
kfree(temp_name); kfree(temp_name);
@ -228,9 +226,9 @@ int delete_sem_etc(sem_id id, int return_code)
char *old_name; char *old_name;
struct thread_queue release_queue; struct thread_queue release_queue;
if(sems_active == false) if (sems_active == false)
return B_NO_MORE_SEMS; return B_NO_MORE_SEMS;
if(id < 0) if (id < 0)
return B_BAD_SEM_ID; return B_BAD_SEM_ID;
slot = id % MAX_SEMS; slot = id % MAX_SEMS;
@ -238,7 +236,7 @@ int delete_sem_etc(sem_id id, int return_code)
state = int_disable_interrupts(); state = int_disable_interrupts();
GRAB_SEM_LOCK(sems[slot]); GRAB_SEM_LOCK(sems[slot]);
if(sems[slot].id != id) { if (sems[slot].id != id) {
RELEASE_SEM_LOCK(sems[slot]); RELEASE_SEM_LOCK(sems[slot]);
int_restore_interrupts(state); int_restore_interrupts(state);
dprintf("delete_sem: invalid sem_id %d\n", id); dprintf("delete_sem: invalid sem_id %d\n", id);
@ -249,7 +247,7 @@ int delete_sem_etc(sem_id id, int return_code)
release_queue.head = release_queue.tail = NULL; release_queue.head = release_queue.tail = NULL;
// free any threads waiting for this semaphore // free any threads waiting for this semaphore
while((t = thread_dequeue(&sems[slot].q)) != NULL) { while ((t = thread_dequeue(&sems[slot].q)) != NULL) {
t->state = THREAD_STATE_READY; t->state = THREAD_STATE_READY;
t->sem_errcode = B_BAD_SEM_ID; t->sem_errcode = B_BAD_SEM_ID;
t->sem_deleted_retcode = return_code; t->sem_deleted_retcode = return_code;
@ -264,9 +262,9 @@ int delete_sem_etc(sem_id id, int return_code)
RELEASE_SEM_LOCK(sems[slot]); RELEASE_SEM_LOCK(sems[slot]);
if(released_threads > 0) { if (released_threads > 0) {
GRAB_THREAD_LOCK(); GRAB_THREAD_LOCK();
while((t = thread_dequeue(&release_queue)) != NULL) { while ((t = thread_dequeue(&release_queue)) != NULL) {
thread_enqueue_run_q(t); thread_enqueue_run_q(t);
} }
thread_resched(); thread_resched();
@ -281,16 +279,16 @@ int delete_sem_etc(sem_id id, int return_code)
} }
// Called from a timer handler. Wakes up a semaphore // Called from a timer handler. Wakes up a semaphore
static int sem_timeout(void *data) static int sem_timeout(timer *data)
{ {
struct sem_timeout_args *args = (struct sem_timeout_args *)data; struct sem_timeout_args *args = (struct sem_timeout_args *)data->entry.prev;
struct thread *t; struct thread *t;
int slot; int slot;
int state; int state;
struct thread_queue wakeup_queue; struct thread_queue wakeup_queue;
t = thread_get_thread_struct(args->blocked_thread); t = thread_get_thread_struct(args->blocked_thread);
if(t == NULL) if (t == NULL)
return B_HANDLED_INTERRUPT; return B_HANDLED_INTERRUPT;
slot = args->blocked_sem_id % MAX_SEMS; slot = args->blocked_sem_id % MAX_SEMS;
@ -299,7 +297,7 @@ static int sem_timeout(void *data)
// dprintf("sem_timeout: called on 0x%x sem %d, tid %d\n", to, to->sem_id, to->thread_id); // dprintf("sem_timeout: called on 0x%x sem %d, tid %d\n", to, to->sem_id, to->thread_id);
if(sems[slot].id != args->blocked_sem_id) { if (sems[slot].id != args->blocked_sem_id) {
// this thread was not waiting on this semaphore // this thread was not waiting on this semaphore
panic("sem_timeout: thid %d was trying to wait on sem %d which doesn't exist!\n", panic("sem_timeout: thid %d was trying to wait on sem %d which doesn't exist!\n",
args->blocked_thread, args->blocked_sem_id); args->blocked_thread, args->blocked_sem_id);
@ -312,7 +310,7 @@ static int sem_timeout(void *data)
GRAB_THREAD_LOCK(); GRAB_THREAD_LOCK();
// put the threads in the run q here to make sure we dont deadlock in sem_interrupt_thread // put the threads in the run q here to make sure we dont deadlock in sem_interrupt_thread
while((t = thread_dequeue(&wakeup_queue)) != NULL) { while ((t = thread_dequeue(&wakeup_queue)) != NULL) {
thread_enqueue_run_q(t); thread_enqueue_run_q(t);
} }
RELEASE_THREAD_LOCK(); RELEASE_THREAD_LOCK();
@ -334,41 +332,39 @@ int acquire_sem_etc(sem_id id, int count, int flags, bigtime_t timeout)
int state; int state;
int err = 0; int err = 0;
if(sems_active == false) if (sems_active == false)
return B_NO_MORE_SEMS; return B_NO_MORE_SEMS;
if (id < 0) {
if(id < 0) {
dprintf("acquire_sem_etc: invalid sem handle %d\n", id); dprintf("acquire_sem_etc: invalid sem handle %d\n", id);
return B_BAD_SEM_ID; return B_BAD_SEM_ID;
} }
if (count <= 0)
if(count <= 0)
return EINVAL; return EINVAL;
state = int_disable_interrupts(); state = int_disable_interrupts();
GRAB_SEM_LOCK(sems[slot]); GRAB_SEM_LOCK(sems[slot]);
if(sems[slot].id != id) { if (sems[slot].id != id) {
dprintf("acquire_sem_etc: bad sem_id %d\n", id); dprintf("acquire_sem_etc: bad sem_id %d\n", id);
err = B_BAD_SEM_ID; err = B_BAD_SEM_ID;
goto err; goto err;
} }
if(sems[slot].count - count < 0 && (flags & B_TIMEOUT) != 0 && timeout <= 0) { if (sems[slot].count - count < 0 && (flags & B_TIMEOUT) != 0 && timeout <= 0) {
// immediate timeout // immediate timeout
err = B_TIMED_OUT; err = B_TIMED_OUT;
goto err; goto err;
} }
if((sems[slot].count -= count) < 0) { if ((sems[slot].count -= count) < 0) {
// we need to block // we need to block
struct thread *t = thread_get_current_thread(); struct thread *t = thread_get_current_thread();
struct timer_event timer; // stick it on the stack, since we may be blocking here timer timeout_timer; // stick it on the stack, since we may be blocking here
struct sem_timeout_args args; struct sem_timeout_args args;
// do a quick check to see if the thread has any pending kill signals // do a quick check to see if the thread has any pending kill signals
// this should catch most of the cases where the thread had a signal // this should catch most of the cases where the thread had a signal
if((flags & B_CAN_INTERRUPT) && (t->pending_signals & SIG_KILL)) { if ((flags & B_CAN_INTERRUPT) && (t->pending_signals & SIG_KILL)) {
sems[slot].count += count; sems[slot].count += count;
err = EINTR; err = EINTR;
goto err; goto err;
@ -383,27 +379,26 @@ int acquire_sem_etc(sem_id id, int count, int flags, bigtime_t timeout)
t->sem_errcode = B_NO_ERROR; t->sem_errcode = B_NO_ERROR;
thread_enqueue(t, &sems[slot].q); thread_enqueue(t, &sems[slot].q);
if((flags & (B_TIMEOUT | B_ABSOLUTE_TIMEOUT)) != 0) { if ((flags & (B_TIMEOUT | B_ABSOLUTE_TIMEOUT)) != 0) {
int the_timeout = timeout;
// dprintf("sem_acquire_etc: setting timeout sem for %d %d usecs, semid %d, tid %d\n", // dprintf("sem_acquire_etc: setting timeout sem for %d %d usecs, semid %d, tid %d\n",
// timeout, sem_id, t->id); // timeout, sem_id, t->id);
// set up an event to go off with the thread struct as the data // set up an event to go off with the thread struct as the data
if (flags & B_ABSOLUTE_TIMEOUT)
the_timeout -= system_time();
args.blocked_sem_id = id; args.blocked_sem_id = id;
args.blocked_thread = t->id; args.blocked_thread = t->id;
args.sem_count = count; args.sem_count = count;
timer_setup_timer(&sem_timeout, &args, &timer); // another evil hack: pass the args into timer->entry.prev
timer_set_event(the_timeout, TIMER_MODE_ONESHOT, &timer); timeout_timer.entry.prev = (qent *)&args;
add_timer(&timeout_timer, &sem_timeout, timeout,
flags & B_RELATIVE_TIMEOUT ?
B_ONE_SHOT_RELATIVE_TIMER : B_ONE_SHOT_ABSOLUTE_TIMER);
} }
RELEASE_SEM_LOCK(sems[slot]); RELEASE_SEM_LOCK(sems[slot]);
GRAB_THREAD_LOCK(); GRAB_THREAD_LOCK();
// check again to see if a kill signal is pending. // check again to see if a kill signal is pending.
// it may have been delivered while setting up the sem, though it's pretty unlikely // it may have been delivered while setting up the sem, though it's pretty unlikely
if((flags & B_CAN_INTERRUPT) && (t->pending_signals & SIG_KILL)) { if ((flags & B_CAN_INTERRUPT) && (t->pending_signals & SIG_KILL)) {
struct thread_queue wakeup_queue; struct thread_queue wakeup_queue;
// ok, so a tiny race happened where a signal was delivered to this thread while // ok, so a tiny race happened where a signal was delivered to this thread while
// it was setting up the sem. We can only be sure a signal wasn't delivered // it was setting up the sem. We can only be sure a signal wasn't delivered
@ -411,11 +406,11 @@ int acquire_sem_etc(sem_id id, int count, int flags, bigtime_t timeout)
// instances, but there was a race, so we have to handle it. It'll be more messy... // instances, but there was a race, so we have to handle it. It'll be more messy...
wakeup_queue.head = wakeup_queue.tail = NULL; wakeup_queue.head = wakeup_queue.tail = NULL;
GRAB_SEM_LOCK(sems[slot]); GRAB_SEM_LOCK(sems[slot]);
if(sems[slot].id == id) { if (sems[slot].id == id) {
remove_thread_from_sem(t, &sems[slot], &wakeup_queue, EINTR); remove_thread_from_sem(t, &sems[slot], &wakeup_queue, EINTR);
} }
RELEASE_SEM_LOCK(sems[slot]); RELEASE_SEM_LOCK(sems[slot]);
while((t = thread_dequeue(&wakeup_queue)) != NULL) { while ((t = thread_dequeue(&wakeup_queue)) != NULL) {
thread_enqueue_run_q(t); thread_enqueue_run_q(t);
} }
// fall through and reschedule since another thread with a higher priority may have been woken up // fall through and reschedule since another thread with a higher priority may have been woken up
@ -423,11 +418,11 @@ int acquire_sem_etc(sem_id id, int count, int flags, bigtime_t timeout)
thread_resched(); thread_resched();
RELEASE_THREAD_LOCK(); RELEASE_THREAD_LOCK();
if((flags & B_TIMEOUT) != 0) { if ((flags & (B_TIMEOUT | B_ABSOLUTE_TIMEOUT)) != 0) {
if(t->sem_errcode != B_TIMED_OUT) { if (t->sem_errcode != B_TIMED_OUT) {
// cancel the timer event, the sem may have been deleted or interrupted // cancel the timer event, the sem may have been deleted or interrupted
// with the timer still active // with the timer still active
timer_cancel_event(&timer); cancel_timer(&timeout_timer);
} }
} }
@ -457,19 +452,17 @@ int release_sem_etc(sem_id id, int count, int flags)
int err = 0; int err = 0;
struct thread_queue release_queue; struct thread_queue release_queue;
if(sems_active == false) if (sems_active == false)
return B_NO_MORE_SEMS; return B_NO_MORE_SEMS;
if (id < 0)
if(id < 0)
return B_BAD_SEM_ID; return B_BAD_SEM_ID;
if (count <= 0)
if(count <= 0)
return EINVAL; return EINVAL;
state = int_disable_interrupts(); state = int_disable_interrupts();
GRAB_SEM_LOCK(sems[slot]); GRAB_SEM_LOCK(sems[slot]);
if(sems[slot].id != id) { if (sems[slot].id != id) {
dprintf("sem_release_etc: invalid sem_id %d\n", id); dprintf("sem_release_etc: invalid sem_id %d\n", id);
err = B_BAD_SEM_ID; err = B_BAD_SEM_ID;
goto err; goto err;
@ -481,14 +474,14 @@ int release_sem_etc(sem_id id, int count, int flags)
// order in sem_interrupt_thread. // order in sem_interrupt_thread.
release_queue.head = release_queue.tail = NULL; release_queue.head = release_queue.tail = NULL;
while(count > 0) { while (count > 0) {
int delta = count; int delta = count;
if(sems[slot].count < 0) { if (sems[slot].count < 0) {
struct thread *t = thread_lookat_queue(&sems[slot].q); struct thread *t = thread_lookat_queue(&sems[slot].q);
delta = min(count, t->sem_count); delta = min(count, t->sem_count);
t->sem_count -= delta; t->sem_count -= delta;
if(t->sem_count <= 0) { if (t->sem_count <= 0) {
// release this thread // release this thread
t = thread_dequeue(&sems[slot].q); t = thread_dequeue(&sems[slot].q);
thread_enqueue(t, &release_queue); thread_enqueue(t, &release_queue);
@ -505,13 +498,13 @@ int release_sem_etc(sem_id id, int count, int flags)
RELEASE_SEM_LOCK(sems[slot]); RELEASE_SEM_LOCK(sems[slot]);
// pull off any items in the release queue and put them in the run queue // pull off any items in the release queue and put them in the run queue
if(released_threads > 0) { if (released_threads > 0) {
struct thread *t; struct thread *t;
GRAB_THREAD_LOCK(); GRAB_THREAD_LOCK();
while((t = thread_dequeue(&release_queue)) != NULL) { while ((t = thread_dequeue(&release_queue)) != NULL) {
thread_enqueue_run_q(t); thread_enqueue_run_q(t);
} }
if((flags & B_DO_NOT_RESCHEDULE) == 0) { if ((flags & B_DO_NOT_RESCHEDULE) == 0) {
thread_resched(); thread_resched();
} }
RELEASE_THREAD_LOCK(); RELEASE_THREAD_LOCK();
@ -530,11 +523,10 @@ int get_sem_count(sem_id id, int32* thread_count)
{ {
int slot; int slot;
int state; int state;
// int count;
if(sems_active == false) if (sems_active == false)
return B_NO_MORE_SEMS; return B_NO_MORE_SEMS;
if(id < 0) if (id < 0)
return B_BAD_SEM_ID; return B_BAD_SEM_ID;
if (thread_count == NULL) if (thread_count == NULL)
return EINVAL; return EINVAL;
@ -544,7 +536,7 @@ int get_sem_count(sem_id id, int32* thread_count)
state = int_disable_interrupts(); state = int_disable_interrupts();
GRAB_SEM_LOCK(sems[slot]); GRAB_SEM_LOCK(sems[slot]);
if(sems[slot].id != id) { if (sems[slot].id != id) {
RELEASE_SEM_LOCK(sems[slot]); RELEASE_SEM_LOCK(sems[slot]);
int_restore_interrupts(state); int_restore_interrupts(state);
dprintf("sem_get_count: invalid sem_id %d\n", id); dprintf("sem_get_count: invalid sem_id %d\n", id);
@ -564,9 +556,9 @@ int _get_sem_info(sem_id id, struct sem_info *info, size_t sz)
int state; int state;
int slot; int slot;
if(sems_active == false) if (sems_active == false)
return B_NO_MORE_SEMS; return B_NO_MORE_SEMS;
if(id < 0) if (id < 0)
return B_BAD_SEM_ID; return B_BAD_SEM_ID;
if (info == NULL) if (info == NULL)
return EINVAL; return EINVAL;
@ -576,7 +568,7 @@ int _get_sem_info(sem_id id, struct sem_info *info, size_t sz)
state = int_disable_interrupts(); state = int_disable_interrupts();
GRAB_SEM_LOCK(sems[slot]); GRAB_SEM_LOCK(sems[slot]);
if(sems[slot].id != id) { if (sems[slot].id != id) {
RELEASE_SEM_LOCK(sems[slot]); RELEASE_SEM_LOCK(sems[slot]);
int_restore_interrupts(state); int_restore_interrupts(state);
dprintf("get_sem_info: invalid sem_id %d\n", id); dprintf("get_sem_info: invalid sem_id %d\n", id);
@ -600,9 +592,8 @@ int _get_next_sem_info(proc_id proc, uint32 *cookie, struct sem_info *info, size
int state; int state;
int slot; int slot;
if(sems_active == false) if (sems_active == false)
return B_NO_MORE_SEMS; return B_NO_MORE_SEMS;
if (cookie == NULL) if (cookie == NULL)
return EINVAL; return EINVAL;
/* prevents sems[].owner == -1 >= means owned by a port */ /* prevents sems[].owner == -1 >= means owned by a port */
@ -612,7 +603,8 @@ int _get_next_sem_info(proc_id proc, uint32 *cookie, struct sem_info *info, size
if (*cookie == NULL) { if (*cookie == NULL) {
// return first found // return first found
slot = 0; slot = 0;
} else { }
else {
// start at index cookie, but check cookie against MAX_PORTS // start at index cookie, but check cookie against MAX_PORTS
slot = *cookie; slot = *cookie;
if (slot >= MAX_SEMS) if (slot >= MAX_SEMS)
@ -654,11 +646,11 @@ int set_sem_owner(sem_id id, proc_id proc)
int state; int state;
int slot; int slot;
if(sems_active == false) if (sems_active == false)
return B_NO_MORE_SEMS; return B_NO_MORE_SEMS;
if(id < 0) if (id < 0)
return B_BAD_SEM_ID; return B_BAD_SEM_ID;
if (proc < NULL) if (proc < 0)
return EINVAL; return EINVAL;
// XXX: todo check if proc exists // XXX: todo check if proc exists
@ -670,7 +662,7 @@ int set_sem_owner(sem_id id, proc_id proc)
state = int_disable_interrupts(); state = int_disable_interrupts();
GRAB_SEM_LOCK(sems[slot]); GRAB_SEM_LOCK(sems[slot]);
if(sems[slot].id != id) { if (sems[slot].id != id) {
RELEASE_SEM_LOCK(sems[slot]); RELEASE_SEM_LOCK(sems[slot]);
int_restore_interrupts(state); int_restore_interrupts(state);
dprintf("set_sem_owner: invalid sem_id %d\n", id); dprintf("set_sem_owner: invalid sem_id %d\n", id);
@ -694,26 +686,26 @@ int sem_interrupt_thread(struct thread *t)
// dprintf("sem_interrupt_thread: called on thread %p (%d), blocked on sem 0x%x\n", t, t->id, t->sem_blocking); // dprintf("sem_interrupt_thread: called on thread %p (%d), blocked on sem 0x%x\n", t, t->id, t->sem_blocking);
if(t->state != THREAD_STATE_WAITING || t->sem_blocking < 0) if (t->state != THREAD_STATE_WAITING || t->sem_blocking < 0)
return EINVAL; return EINVAL;
if((t->sem_flags & B_CAN_INTERRUPT) == 0) if ((t->sem_flags & B_CAN_INTERRUPT) == 0)
return ERR_SEM_NOT_INTERRUPTABLE; return ERR_SEM_NOT_INTERRUPTABLE;
slot = t->sem_blocking % MAX_SEMS; slot = t->sem_blocking % MAX_SEMS;
GRAB_SEM_LOCK(sems[slot]); GRAB_SEM_LOCK(sems[slot]);
if(sems[slot].id != t->sem_blocking) { if (sems[slot].id != t->sem_blocking) {
panic("sem_interrupt_thread: thread 0x%x sez it's blocking on sem 0x%x, but that sem doesn't exist!\n", t->id, t->sem_blocking); panic("sem_interrupt_thread: thread 0x%x sez it's blocking on sem 0x%x, but that sem doesn't exist!\n", t->id, t->sem_blocking);
} }
wakeup_queue.head = wakeup_queue.tail = NULL; wakeup_queue.head = wakeup_queue.tail = NULL;
if(remove_thread_from_sem(t, &sems[slot], &wakeup_queue, EINTR) == ERR_NOT_FOUND) if (remove_thread_from_sem(t, &sems[slot], &wakeup_queue, EINTR) == ERR_NOT_FOUND)
panic("sem_interrupt_thread: thread 0x%x not found in sem 0x%x's wait queue\n", t->id, t->sem_blocking); panic("sem_interrupt_thread: thread 0x%x not found in sem 0x%x's wait queue\n", t->id, t->sem_blocking);
RELEASE_SEM_LOCK(sems[slot]); RELEASE_SEM_LOCK(sems[slot]);
while((t = thread_dequeue(&wakeup_queue)) != NULL) { while ((t = thread_dequeue(&wakeup_queue)) != NULL) {
thread_enqueue_run_q(t); thread_enqueue_run_q(t);
} }
@ -737,7 +729,7 @@ static int remove_thread_from_sem(struct thread *t, struct sem_entry *sem, struc
thread_enqueue(t, queue); thread_enqueue(t, queue);
// now see if more threads need to be woken up // now see if more threads need to be woken up
while(sem->count > 0 && (t1 = thread_lookat_queue(&sem->q))) { while (sem->count > 0 && (t1 = thread_lookat_queue(&sem->q))) {
int delta = min(t->sem_count, sem->count); int delta = min(t->sem_count, sem->count);
t->sem_count -= delta; t->sem_count -= delta;
@ -765,7 +757,7 @@ int sem_delete_owned_sems(proc_id owner)
state = int_disable_interrupts(); state = int_disable_interrupts();
GRAB_SEM_LIST_LOCK(); GRAB_SEM_LIST_LOCK();
for(i=0; i<MAX_SEMS; i++) { for (i=0; i<MAX_SEMS; i++) {
if(sems[i].id != -1 && sems[i].owner == owner) { if(sems[i].id != -1 && sems[i].owner == owner) {
sem_id id = sems[i].id; sem_id id = sems[i].id;
@ -788,20 +780,21 @@ int sem_delete_owned_sems(proc_id owner)
sem_id user_create_sem(int count, const char *uname) sem_id user_create_sem(int count, const char *uname)
{ {
if(uname != NULL) { if (uname != NULL) {
char name[SYS_MAX_OS_NAME_LEN]; char name[SYS_MAX_OS_NAME_LEN];
int rc; int rc;
if((addr)uname >= KERNEL_BASE && (addr)uname <= KERNEL_TOP) if ((addr)uname >= KERNEL_BASE && (addr)uname <= KERNEL_TOP)
return ERR_VM_BAD_USER_MEMORY; return ERR_VM_BAD_USER_MEMORY;
rc = user_strncpy(name, uname, SYS_MAX_OS_NAME_LEN-1); rc = user_strncpy(name, uname, SYS_MAX_OS_NAME_LEN-1);
if(rc < 0) if (rc < 0)
return rc; return rc;
name[SYS_MAX_OS_NAME_LEN-1] = 0; name[SYS_MAX_OS_NAME_LEN-1] = 0;
return create_sem_etc(count, name, proc_get_current_proc_id()); return create_sem_etc(count, name, proc_get_current_proc_id());
} else { }
else {
return create_sem_etc(count, NULL, proc_get_current_proc_id()); return create_sem_etc(count, NULL, proc_get_current_proc_id());
} }
} }
@ -844,7 +837,7 @@ int user_get_sem_count(sem_id uid, int32* uthread_count)
int rc, rc2; int rc, rc2;
rc = get_sem_count(uid, &thread_count); rc = get_sem_count(uid, &thread_count);
rc2 = user_memcpy(uthread_count, &thread_count, sizeof(int32)); rc2 = user_memcpy(uthread_count, &thread_count, sizeof(int32));
if(rc2 < 0) if (rc2 < 0)
return rc2; return rc2;
return rc; return rc;
} }
@ -854,12 +847,12 @@ int user_get_sem_info(sem_id uid, struct sem_info *uinfo, size_t sz)
struct sem_info info; struct sem_info info;
int rc, rc2; int rc, rc2;
if((addr)uinfo >= KERNEL_BASE && (addr)uinfo <= KERNEL_TOP) if ((addr)uinfo >= KERNEL_BASE && (addr)uinfo <= KERNEL_TOP)
return ERR_VM_BAD_USER_MEMORY; return ERR_VM_BAD_USER_MEMORY;
rc = _get_sem_info(uid, &info, sz); rc = _get_sem_info(uid, &info, sz);
rc2 = user_memcpy(uinfo, &info, sz); rc2 = user_memcpy(uinfo, &info, sz);
if(rc2 < 0) if (rc2 < 0)
return rc2; return rc2;
return rc; return rc;
} }
@ -870,18 +863,18 @@ int user_get_next_sem_info(proc_id uproc, uint32 *ucookie, struct sem_info *uinf
uint32 cookie; uint32 cookie;
int rc, rc2; int rc, rc2;
if((addr)uinfo >= KERNEL_BASE && (addr)uinfo <= KERNEL_TOP) if ((addr)uinfo >= KERNEL_BASE && (addr)uinfo <= KERNEL_TOP)
return ERR_VM_BAD_USER_MEMORY; return ERR_VM_BAD_USER_MEMORY;
rc2 = user_memcpy(&cookie, ucookie, sizeof(uint32)); rc2 = user_memcpy(&cookie, ucookie, sizeof(uint32));
if(rc2 < 0) if (rc2 < 0)
return rc2; return rc2;
rc = _get_next_sem_info(uproc, &cookie, &info, sz); rc = _get_next_sem_info(uproc, &cookie, &info, sz);
rc2 = user_memcpy(uinfo, &info, sz); rc2 = user_memcpy(uinfo, &info, sz);
if(rc2 < 0) if (rc2 < 0)
return rc2; return rc2;
rc2 = user_memcpy(ucookie, &cookie, sizeof(uint32)); rc2 = user_memcpy(ucookie, &cookie, sizeof(uint32));
if(rc2 < 0) if (rc2 < 0)
return rc2; return rc2;
return rc; return rc;
} }

View File

@ -1475,7 +1475,7 @@ static int _rand(void)
return((next >> 16) & 0x7FFF); return((next >> 16) & 0x7FFF);
} }
static int reschedule_event(void *unused) static int reschedule_event(timer *unused)
{ {
// this function is called as a result of the timer event set by the scheduler // this function is called as a result of the timer event set by the scheduler
// returning this causes a reschedule on the timer event // returning this causes a reschedule on the timer event
@ -1491,7 +1491,7 @@ void thread_resched(void)
struct thread *old_thread = thread_get_current_thread(); struct thread *old_thread = thread_get_current_thread();
int i; int i;
bigtime_t quantum; bigtime_t quantum;
struct timer_event *quantum_timer; timer *quantum_timer;
// dprintf("top of thread_resched: cpu %d, cur_thread = 0x%x\n", smp_get_current_cpu(), thread_get_current_thread()); // dprintf("top of thread_resched: cpu %d, cur_thread = 0x%x\n", smp_get_current_cpu(), thread_get_current_thread());
@ -1560,9 +1560,8 @@ found_thread:
if(!old_thread->cpu->info.preempted) { if(!old_thread->cpu->info.preempted) {
_local_timer_cancel_event(old_thread->cpu->info.cpu_num, quantum_timer); _local_timer_cancel_event(old_thread->cpu->info.cpu_num, quantum_timer);
} }
old_thread->cpu->info.preempted= 0; old_thread->cpu->info.preempted = 0;
timer_setup_timer(&reschedule_event, NULL, quantum_timer); add_timer(quantum_timer, &reschedule_event, quantum, B_ONE_SHOT_RELATIVE_TIMER);
timer_set_event(quantum, TIMER_MODE_ONESHOT, quantum_timer);
if(next_thread != old_thread) { if(next_thread != old_thread) {
// dprintf("thread_resched: cpu %d switching from thread %d to %d\n", // dprintf("thread_resched: cpu %d switching from thread %d to %d\n",

View File

@ -14,12 +14,13 @@
#include <timer.h> #include <timer.h>
#include <Errors.h> #include <Errors.h>
#include <stage2.h> #include <stage2.h>
#include <OS.h>
#include <arch/cpu.h> #include <arch/cpu.h>
#include <arch/timer.h> #include <arch/timer.h>
#include <arch/smp.h> #include <arch/smp.h>
static struct timer_event * volatile events[SMP_MAX_CPUS] = { NULL, }; static timer * volatile events[SMP_MAX_CPUS] = { NULL, };
static spinlock_t timer_spinlock[SMP_MAX_CPUS] = { 0, }; static spinlock_t timer_spinlock[SMP_MAX_CPUS] = { 0, };
int timer_init(kernel_args *ka) int timer_init(kernel_args *ka)
@ -30,31 +31,31 @@ int timer_init(kernel_args *ka)
} }
// NOTE: expects interrupts to be off // NOTE: expects interrupts to be off
static void add_event_to_list(struct timer_event *event, struct timer_event * volatile *list) static void add_event_to_list(timer *event, timer * volatile *list)
{ {
struct timer_event *next; timer *next;
struct timer_event *last = NULL; timer *last = NULL;
// stick it in the event list // stick it in the event list
next = *list; for (next = *list; next; last = next, next = (timer *)next->entry.next) {
while(next != NULL && next->sched_time < event->sched_time) { if ((bigtime_t)next->entry.key >= (bigtime_t)event->entry.key)
last = next; break;
next = next->next;
} }
if(last != NULL) { if (last != NULL) {
event->next = last->next; (timer *)event->entry.next = (timer *)last->entry.next;
last->next = event; (timer *)last->entry.next = event;
} else { }
event->next = next; else {
(timer *)event->entry.next = next;
*list = event; *list = event;
} }
} }
int timer_interrupt() int timer_interrupt()
{ {
bigtime_t curr_time = system_time(); bigtime_t sched_time;
struct timer_event *event; timer *event;
spinlock_t *spinlock; spinlock_t *spinlock;
int curr_cpu = smp_get_current_cpu(); int curr_cpu = smp_get_current_cpu();
int rc = B_HANDLED_INTERRUPT; int rc = B_HANDLED_INTERRUPT;
@ -67,33 +68,34 @@ int timer_interrupt()
restart_scan: restart_scan:
event = events[curr_cpu]; event = events[curr_cpu];
if(event != NULL && event->sched_time < curr_time) { if ((event) && ((bigtime_t)event->entry.key < system_time())) {
// this event needs to happen // this event needs to happen
int mode = event->mode; int mode = event->flags;
events[curr_cpu] = event->next; events[curr_cpu] = (timer *)event->entry.next;
event->sched_time = 0; event->entry.key = 0;
release_spinlock(spinlock); release_spinlock(spinlock);
// call the callback // call the callback
// note: if the event is not periodic, it is ok // note: if the event is not periodic, it is ok
// to delete the event structure inside the callback // to delete the event structure inside the callback
if(event->func != NULL) { if (event->hook) {
rc = event->func(event->data); rc = event->hook(event);
// if (event->func(event->data) == INT_RESCHEDULE) // if (event->func(event->data) == INT_RESCHEDULE)
// rc = INT_RESCHEDULE; // rc = INT_RESCHEDULE;
} }
acquire_spinlock(spinlock); acquire_spinlock(spinlock);
if(mode == TIMER_MODE_PERIODIC) { if (mode == B_PERIODIC_TIMER) {
// we need to adjust it and add it back to the list // we need to adjust it and add it back to the list
event->sched_time = system_time() + event->periodic_time; sched_time = system_time() + event->period;
if(event->sched_time == 0) if (sched_time == 0)
event->sched_time = 1; // if we wrapped around and happen sched_time = 1; // if we wrapped around and happen
// to hit zero, set it to one, since // to hit zero, set it to one, since
// zero represents not scheduled // zero represents not scheduled
event->entry.key = (int64)sched_time;
add_event_to_list(event, &events[curr_cpu]); add_event_to_list(event, &events[curr_cpu]);
} }
@ -101,122 +103,103 @@ restart_scan:
} }
// setup the next hardware timer // setup the next hardware timer
if(events[curr_cpu] != NULL) if (events[curr_cpu] != NULL)
arch_timer_set_hardware_timer(events[curr_cpu]->sched_time - system_time()); arch_timer_set_hardware_timer((bigtime_t)events[curr_cpu]->entry.key - system_time());
release_spinlock(spinlock); release_spinlock(spinlock);
return rc; return rc;
} }
void timer_setup_timer(timer_callback func, void *data, struct timer_event *event) status_t add_timer(timer *t, timer_hook hook, bigtime_t period, int32 flags)
{
event->func = func;
event->data = data;
event->sched_time = 0;
}
int timer_set_event(bigtime_t relative_time, timer_mode mode, struct timer_event *event)
{ {
bigtime_t sched_time;
bigtime_t curr_time = system_time();
int state; int state;
int curr_cpu; int curr_cpu;
if(event == NULL) if ((!t) || (!hook) || (period < 0))
return EINVAL; return B_BAD_VALUE;
if(relative_time < 0) sched_time = period;
relative_time = 0; if (flags != B_ONE_SHOT_ABSOLUTE_TIMER)
sched_time += curr_time;
if(event->sched_time != 0) if (sched_time == 0)
panic("timer_set_event: event %p in list already!\n", event); sched_time = 1;
event->sched_time = system_time() + relative_time; t->entry.key = (int64)sched_time;
if(event->sched_time == 0) t->period = period;
event->sched_time = 1; // if we wrapped around and happen t->hook = hook;
// to hit zero, set it to one, since t->flags = flags;
// zero represents not scheduled
event->mode = mode;
if(event->mode == TIMER_MODE_PERIODIC)
event->periodic_time = relative_time;
state = int_disable_interrupts(); state = int_disable_interrupts();
curr_cpu = smp_get_current_cpu(); curr_cpu = smp_get_current_cpu();
acquire_spinlock(&timer_spinlock[curr_cpu]); acquire_spinlock(&timer_spinlock[curr_cpu]);
add_event_to_list(event, &events[curr_cpu]); add_event_to_list(t, &events[curr_cpu]);
t->cpu = curr_cpu;
// if we were stuck at the head of the list, set the hardware timer // if we were stuck at the head of the list, set the hardware timer
if(event == events[curr_cpu]) { if (t == events[curr_cpu])
arch_timer_set_hardware_timer(relative_time); arch_timer_set_hardware_timer(sched_time - curr_time);
}
release_spinlock(&timer_spinlock[curr_cpu]); release_spinlock(&timer_spinlock[curr_cpu]);
int_restore_interrupts(state); int_restore_interrupts(state);
return 0; return B_OK;
} }
/* this is a fast path to be called from reschedule and from timer_cancel_event */ /* this is a fast path to be called from reschedule and from timer_cancel_event */
/* must always be invoked with interrupts disabled */ /* must always be invoked with interrupts disabled */
int _local_timer_cancel_event(int curr_cpu, struct timer_event *event) int _local_timer_cancel_event(int curr_cpu, timer *event)
{ {
struct timer_event *last = NULL; timer *last = NULL;
struct timer_event *e; timer *e;
bool foundit = false;
acquire_spinlock(&timer_spinlock[curr_cpu]); acquire_spinlock(&timer_spinlock[curr_cpu]);
e = events[curr_cpu]; e = events[curr_cpu];
while(e != NULL) { while (e != NULL) {
if(e == event) { if (e == event) {
// we found it // we found it
foundit = true; if (e == events[curr_cpu])
if(e == events[curr_cpu]) { events[curr_cpu] = (timer *)e->entry.next;
events[curr_cpu] = e->next; else
} else { (timer *)last->entry.next = (timer *)e->entry.next;
last->next = e->next; e->entry.next = NULL;
}
e->next = NULL;
// break out of the whole thing // break out of the whole thing
goto done; break;
} }
last = e; last = e;
e = e->next; e = (timer *)e->entry.next;
} }
release_spinlock(&timer_spinlock[curr_cpu]);
done:
if(events[curr_cpu] == NULL) { if (events[curr_cpu] == NULL)
arch_timer_clear_hardware_timer(); arch_timer_clear_hardware_timer();
} else { else
arch_timer_set_hardware_timer(events[curr_cpu]->sched_time - system_time()); arch_timer_set_hardware_timer((bigtime_t)events[curr_cpu]->entry.key - system_time());
}
release_spinlock(&timer_spinlock[curr_cpu]);
if(foundit) { return (e == event ? 0 : B_ERROR);
release_spinlock(&timer_spinlock[curr_cpu]);
}
return (foundit ? 0 : B_ERROR);
} }
int local_timer_cancel_event(struct timer_event *event) int local_timer_cancel_event(timer *event)
{ {
return _local_timer_cancel_event(smp_get_current_cpu(), event); return _local_timer_cancel_event(smp_get_current_cpu(), event);
} }
int timer_cancel_event(struct timer_event *event) bool cancel_timer(timer *event)
{ {
int state; int state;
struct timer_event *last = NULL; timer *last = NULL;
struct timer_event *e; timer *e;
bool foundit = false; bool foundit = false;
int num_cpus = smp_get_num_cpus(); int num_cpus = smp_get_num_cpus();
int cpu= 0; int cpu= 0;
int curr_cpu; int curr_cpu;
if(event->sched_time == 0) // if (event->sched_time == 0)
return 0; // it's not scheduled // return 0; // it's not scheduled
state = int_disable_interrupts(); state = int_disable_interrupts();
curr_cpu = smp_get_current_cpu(); curr_cpu = smp_get_current_cpu();
@ -227,38 +210,38 @@ int timer_cancel_event(struct timer_event *event)
// a cheap match. If this fails, we start harassing // a cheap match. If this fails, we start harassing
// other cpus. // other cpus.
// //
if(_local_timer_cancel_event(curr_cpu, event) < 0) { if (_local_timer_cancel_event(curr_cpu, event) < 0) {
for(cpu = 0; cpu < num_cpus; cpu++) { for (cpu = 0; cpu < num_cpus; cpu++) {
if(cpu== curr_cpu) continue; if (cpu== curr_cpu) continue;
acquire_spinlock(&timer_spinlock[cpu]); acquire_spinlock(&timer_spinlock[cpu]);
e = events[cpu]; e = events[cpu];
while(e != NULL) { while (e != NULL) {
if(e == event) { if (e == event) {
// we found it // we found it
foundit = true; foundit = true;
if(e == events[cpu]) { if(e == events[cpu])
events[cpu] = e->next; events[cpu] = (timer *)e->entry.next;
} else { else
last->next = e->next; (timer *)last->entry.next = (timer *)e->entry.next;
} e->entry.next = NULL;
e->next = NULL;
// break out of the whole thing // break out of the whole thing
goto done; goto done;
} }
last = e; last = e;
e = e->next; e = (timer *)e->entry.next;
} }
release_spinlock(&timer_spinlock[cpu]); release_spinlock(&timer_spinlock[cpu]);
} }
} }
done: done:
if(foundit) { if (foundit)
release_spinlock(&timer_spinlock[cpu]); release_spinlock(&timer_spinlock[cpu]);
}
int_restore_interrupts(state); int_restore_interrupts(state);
return (foundit ? 0 : B_ERROR); if (foundit && ((bigtime_t)event->entry.key < system_time()))
return true;
return false;
} }
void spin(bigtime_t microseconds) void spin(bigtime_t microseconds)