Simplify pthread__upcall() a bit by moving lock resolution before the big

switch statement, and moving upcall-type-specific code into that switch.
Beneficial side effect: don't manipulate a statelock before lock resolution
occurs.
This commit is contained in:
nathanw 2003-01-30 01:04:50 +00:00
parent 7d8ac1f6c2
commit a8773f45f8
1 changed files with 72 additions and 77 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: pthread_sa.c,v 1.4 2003/01/25 00:43:38 nathanw Exp $ */
/* $NetBSD: pthread_sa.c,v 1.5 2003/01/30 01:04:50 nathanw Exp $ */
/*-
* Copyright (c) 2001 The NetBSD Foundation, Inc.
@ -90,7 +90,6 @@ pthread__upcall(int type, struct sa_t *sas[], int ev, int intr, void *arg)
{
pthread_t t, self, next, intqueue;
int first = 1;
int runalarms = 0;
siginfo_t *si;
PTHREADD_ADD(PTHREADD_UPCALLS);
@ -100,55 +99,13 @@ pthread__upcall(int type, struct sa_t *sas[], int ev, int intr, void *arg)
if (sas[0]->sa_id > pthread__maxlwps)
pthread__maxlwps = sas[0]->sa_id;
SDPRINTF(("(up %p) type %d LWP %d ev %d intr %d\n", self,
type, sas[0]->sa_id, ev ? sas[1]->sa_id : 0,
intr ? sas[ev+intr]->sa_id : 0));
switch (type) {
case SA_UPCALL_BLOCKED:
t = pthread__sa_id(sas[1]);
pthread_spinlock(self, &t->pt_statelock);
t->pt_state = PT_STATE_BLOCKED_SYS;
pthread_spinunlock(self, &t->pt_statelock);
#ifdef PTHREAD__DEBUG
t->blocks++;
#endif
t->pt_blockedlwp = sas[1]->sa_id;
if (t->pt_cancel)
_lwp_wakeup(t->pt_blockedlwp);
t->pt_uc = sas[1]->sa_context;
if (type == SA_UPCALL_BLOCKED)
first++; /* Don't handle this SA in the usual processing. */
PTHREADD_ADD(PTHREADD_UP_BLOCK);
break;
case SA_UPCALL_NEWPROC:
PTHREADD_ADD(PTHREADD_UP_NEW);
break;
case SA_UPCALL_PREEMPTED:
PTHREADD_ADD(PTHREADD_UP_PREEMPT);
break;
case SA_UPCALL_UNBLOCKED:
PTHREADD_ADD(PTHREADD_UP_UNBLOCK);
break;
case SA_UPCALL_SIGNAL:
PTHREADD_ADD(PTHREADD_UP_SIGNAL);
break;
case SA_UPCALL_SIGEV:
PTHREADD_ADD(PTHREADD_UP_SIGEV);
si = arg;
if (si->si_sigval.sival_int == PT_ALARMTIMER_MAGIC)
runalarms = 1;
/*
* PT_RRTIMER_MAGIC doesn't need explicit handling;
* the per-thread work below will put the interrupted
* thread on the back of the run queue, and
* pthread_next() will get one from the front.
*/
break;
case SA_UPCALL_USER:
/* We don't send ourselves one of these. */
default:
/*CONSTCOND*/
assert(0);
}
/*
* Do per-thread work, including saving the context.
@ -162,40 +119,35 @@ pthread__upcall(int type, struct sa_t *sas[], int ev, int intr, void *arg)
&intqueue, self) > 0)
pthread__resolve_locks(self, &intqueue);
}
/* We can take spinlocks now */
pthread__sched_idle2(self);
if (intqueue)
pthread__sched_bulk(self, intqueue);
/*
* Run the alarm queue (handled after lock resolution since
* alarm handling requires locks).
*/
if (runalarms)
pthread__alarm_process(self, arg);
/*
* Note that we handle signals after handling
* spinlock preemption. This is because spinlocks are only
* used internally to the thread library and we don't want to
* expose the middle of them to a signal. While this means
* that synchronous instruction traps that occur inside
* critical sections in this library (SIGFPE, SIGILL, SIGBUS,
* SIGSEGV) won't be handled at the precise location where
* they occured, that's okay, because (1) we don't use any FP
* and (2) SIGILL/SIGBUS/SIGSEGV should really just core dump.
*
* This also means that a thread that was interrupted to take
* a signal will be on a run queue, and not in upcall limbo.
*/
if (type == SA_UPCALL_SIGNAL) {
si = arg;
if (ev)
pthread__signal(self, pthread__sa_id(sas[1]),
si->si_signo, si->si_code);
else
pthread__signal(self, NULL, si->si_signo, si->si_code);
} else if (type == SA_UPCALL_UNBLOCKED) {
switch (type) {
case SA_UPCALL_BLOCKED:
t = pthread__sa_id(sas[1]);
pthread_spinlock(self, &t->pt_statelock);
t->pt_state = PT_STATE_BLOCKED_SYS;
t->pt_blockedlwp = sas[1]->sa_id;
if (t->pt_cancel)
_lwp_wakeup(t->pt_blockedlwp);
pthread_spinunlock(self, &t->pt_statelock);
#ifdef PTHREAD__DEBUG
t->blocks++;
#endif
t->pt_uc = sas[1]->sa_context;
PTHREADD_ADD(PTHREADD_UP_BLOCK);
break;
case SA_UPCALL_NEWPROC:
PTHREADD_ADD(PTHREADD_UP_NEW);
break;
case SA_UPCALL_PREEMPTED:
PTHREADD_ADD(PTHREADD_UP_PREEMPT);
break;
case SA_UPCALL_UNBLOCKED:
PTHREADD_ADD(PTHREADD_UP_UNBLOCK);
/*
* A signal may have been presented to this thread while
* it was in the kernel.
@ -203,6 +155,49 @@ pthread__upcall(int type, struct sa_t *sas[], int ev, int intr, void *arg)
t = pthread__sa_id(sas[1]);
if (t->pt_flags & PT_FLAG_SIGDEFERRED)
pthread__signal_deferred(self, t);
break;
case SA_UPCALL_SIGNAL:
PTHREADD_ADD(PTHREADD_UP_SIGNAL);
/*
* Note that we handle signals after handling
* spinlock preemption. This is because spinlocks are only
* used internally to the thread library and we don't want to
* expose the middle of them to a signal. While this means
* that synchronous instruction traps that occur inside
* critical sections in this library (SIGFPE, SIGILL, SIGBUS,
* SIGSEGV) won't be handled at the precise location where
* they occured, that's okay, because (1) we don't use any FP
* and (2) SIGILL/SIGBUS/SIGSEGV should really just core dump.
*
* This also means that a thread that was interrupted to take
* a signal will be on a run queue, and not in upcall limbo.
*/
si = arg;
if (ev)
pthread__signal(self, pthread__sa_id(sas[1]),
si->si_signo, si->si_code);
else
pthread__signal(self, NULL, si->si_signo, si->si_code);
break;
case SA_UPCALL_SIGEV:
PTHREADD_ADD(PTHREADD_UP_SIGEV);
si = arg;
SDPRINTF(("(up %p) sigev val %x\n", self,
si->si_sigval.sival_int));
if (si->si_sigval.sival_int == PT_ALARMTIMER_MAGIC)
pthread__alarm_process(self, arg);
/*
* PT_RRTIMER_MAGIC doesn't need explicit handling;
* the per-thread work below will put the interrupted
* thread on the back of the run queue, and
* pthread_next() will get one from the front.
*/
break;
case SA_UPCALL_USER:
/* We don't send ourselves one of these. */
default:
/*CONSTCOND*/
assert(0);
}
/*