fix SA/pthread pagefault failure:
- prevent BLOCKED upcalls on double page faults and during upcalls - make libpthread handle blocked threads which hold locks - prevent UNBLOCKED upcalls from overtaking their BLOCKED upcall this adds a new syscall sa_unblockyield see also http://mail-index.netbsd.org/tech-kern/2003/09/15/0020.html
This commit is contained in:
parent
d973127f38
commit
5c40d56471
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: sa.h,v 1.2 2003/01/18 10:32:11 thorpej Exp $ */
|
||||
/* $NetBSD: sa.h,v 1.3 2003/09/16 13:51:31 cl Exp $ */
|
||||
|
||||
/*-
|
||||
* Copyright (c) 2000 The NetBSD Foundation, Inc.
|
||||
|
@ -53,6 +53,7 @@ int sa_enable __P((void));
|
|||
int sa_setconcurrency __P((int));
|
||||
int sa_yield __P((void));
|
||||
int sa_preempt __P((int));
|
||||
int sa_unblockyield __P((int, void *, stack_t *));
|
||||
__END_DECLS
|
||||
|
||||
#endif /* !_SA_H_ */
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
# $NetBSD: Makefile.inc,v 1.143 2003/09/13 22:50:21 christos Exp $
|
||||
# $NetBSD: Makefile.inc,v 1.144 2003/09/16 13:51:34 cl Exp $
|
||||
# @(#)Makefile.inc 8.3 (Berkeley) 10/24/94
|
||||
|
||||
# sys sources
|
||||
|
@ -79,7 +79,7 @@ ASM= access.S acct.S bind.S chdir.S chflags.S \
|
|||
_lwp_suspend.S _lwp_continue.S _lwp_wakeup.S \
|
||||
_lwp_getprivate.S _lwp_setprivate.S \
|
||||
sa_register.S sa_stacks.S sa_enable.S sa_setconcurrency.S sa_yield.S \
|
||||
sa_preempt.S \
|
||||
sa_preempt.S sa_unblockyield.S \
|
||||
timer_create.S timer_delete.S timer_gettime.S timer_getoverrun.S \
|
||||
timer_settime.S
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: pthread_run.c,v 1.11 2003/06/26 01:26:39 nathanw Exp $ */
|
||||
/* $NetBSD: pthread_run.c,v 1.12 2003/09/16 13:51:35 cl Exp $ */
|
||||
|
||||
/*-
|
||||
* Copyright (c) 2001 The NetBSD Foundation, Inc.
|
||||
|
@ -37,7 +37,7 @@
|
|||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__RCSID("$NetBSD: pthread_run.c,v 1.11 2003/06/26 01:26:39 nathanw Exp $");
|
||||
__RCSID("$NetBSD: pthread_run.c,v 1.12 2003/09/16 13:51:35 cl Exp $");
|
||||
|
||||
#include <ucontext.h>
|
||||
|
||||
|
@ -193,6 +193,8 @@ pthread__sched_idle2(pthread_t self)
|
|||
{
|
||||
pthread_t idlethread, qhead, next;
|
||||
|
||||
/* XXXconcurrency: only reidle threads on same vp */
|
||||
|
||||
qhead = NULL;
|
||||
pthread_spinlock(self, &pthread__deadqueue_lock);
|
||||
idlethread = PTQ_FIRST(&pthread__reidlequeue);
|
||||
|
@ -227,6 +229,7 @@ pthread__sched_bulk(pthread_t self, pthread_t qhead)
|
|||
for ( ; qhead && (qhead != self) ; qhead = next) {
|
||||
next = qhead->pt_next;
|
||||
pthread__assert(qhead->pt_spinlocks == 0);
|
||||
pthread__assert(qhead->pt_type != PT_THREAD_UPCALL);
|
||||
if (qhead->pt_type == PT_THREAD_NORMAL) {
|
||||
qhead->pt_state = PT_STATE_RUNNABLE;
|
||||
qhead->pt_next = NULL;
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: pthread_sa.c,v 1.15 2003/09/12 00:37:17 christos Exp $ */
|
||||
/* $NetBSD: pthread_sa.c,v 1.16 2003/09/16 13:51:35 cl Exp $ */
|
||||
|
||||
/*-
|
||||
* Copyright (c) 2001 The NetBSD Foundation, Inc.
|
||||
|
@ -37,7 +37,7 @@
|
|||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__RCSID("$NetBSD: pthread_sa.c,v 1.15 2003/09/12 00:37:17 christos Exp $");
|
||||
__RCSID("$NetBSD: pthread_sa.c,v 1.16 2003/09/16 13:51:35 cl Exp $");
|
||||
|
||||
#include <err.h>
|
||||
#include <errno.h>
|
||||
|
@ -89,7 +89,7 @@ void
|
|||
pthread__upcall(int type, struct sa_t *sas[], int ev, int intr, void *arg)
|
||||
{
|
||||
pthread_t t, self, next, intqueue, schedqueue;
|
||||
int flags, first = 1;
|
||||
int flags;
|
||||
siginfo_t *si;
|
||||
|
||||
PTHREADD_ADD(PTHREADD_UPCALLS);
|
||||
|
@ -104,17 +104,52 @@ pthread__upcall(int type, struct sa_t *sas[], int ev, int intr, void *arg)
|
|||
type, sas[0]->sa_id, ev ? sas[1]->sa_id : 0,
|
||||
intr ? sas[ev+intr]->sa_id : 0));
|
||||
|
||||
if (type == SA_UPCALL_BLOCKED)
|
||||
first++; /* Don't handle this SA in the usual processing. */
|
||||
|
||||
/*
|
||||
* Do per-thread work, including saving the context.
|
||||
* Briefly run any threads that were in a critical section.
|
||||
* This includes any upcalls that have been interupted, so
|
||||
* they can do their own version of this dance.
|
||||
*/
|
||||
if ((ev + intr) >= first) {
|
||||
pthread__find_interrupted(sas + first, ev + intr,
|
||||
if (type == SA_UPCALL_BLOCKED) {
|
||||
/* Don't handle this SA in the usual processing. */
|
||||
t = pthread__sa_id(sas[1]);
|
||||
pthread__assert(t->pt_type != PT_THREAD_UPCALL);
|
||||
pthread__assert(self->pt_spinlocks == 0);
|
||||
if ((t->pt_spinlocks > 0) || (t->pt_next)) {
|
||||
SDPRINTF(("(up %p) unblocking %p "
|
||||
"(uc: T %p pc: %lx sp: %lx) "
|
||||
"spinlocks: %d, pt_next: %p\n",
|
||||
self, t, sas[1]->sa_context,
|
||||
pthread__uc_pc(sas[1]->sa_context),
|
||||
pthread__uc_sp(sas[1]->sa_context),
|
||||
t->pt_spinlocks, t->pt_next));
|
||||
if (sa_unblockyield(sas[1]->sa_id, &self->pt_next,
|
||||
&self->pt_stack) != 0)
|
||||
pthread__abort();
|
||||
SDPRINTF(("(up %p) unblocking switchto to %p "
|
||||
"(uc: T %p pc: %lx) chain %p\n",
|
||||
self, t, sas[1]->sa_context,
|
||||
pthread__uc_pc(sas[1]->sa_context),
|
||||
self->pt_next));
|
||||
self->pt_switchtouc = sas[1]->sa_context;
|
||||
self->pt_switchto = t;
|
||||
pthread__switch(self, self->pt_next);
|
||||
/*NOTREACHED*/
|
||||
pthread__abort();
|
||||
}
|
||||
/* We can take spinlocks now */
|
||||
if (t->pt_type == PT_THREAD_IDLE) {
|
||||
SDPRINTF(("(up %p) unblocking idle %p (uc: %c %p)\n",
|
||||
self, t, PUC(t)));
|
||||
if (sa_unblockyield(sas[1]->sa_id, NULL,
|
||||
&self->pt_stack) != 0)
|
||||
pthread__abort();
|
||||
/* XXX need flaglock? */
|
||||
if ((t->pt_flags & PT_FLAG_IDLED) == 0)
|
||||
pthread__sched_bulk(self, t);
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* Do per-thread work, including saving the context.
|
||||
* Briefly run any threads that were in a critical section.
|
||||
* This includes any upcalls that have been interupted, so
|
||||
* they can do their own version of this dance.
|
||||
*/
|
||||
pthread__find_interrupted(sas + 1, ev + intr,
|
||||
&intqueue, &schedqueue, self);
|
||||
if (intqueue != self)
|
||||
pthread__resolve_locks(self, &intqueue);
|
||||
|
@ -129,7 +164,11 @@ pthread__upcall(int type, struct sa_t *sas[], int ev, int intr, void *arg)
|
|||
|
||||
switch (type) {
|
||||
case SA_UPCALL_BLOCKED:
|
||||
PTHREADD_ADD(PTHREADD_UP_BLOCK);
|
||||
t = pthread__sa_id(sas[1]);
|
||||
pthread__assert(t->pt_type != PT_THREAD_UPCALL);
|
||||
if (t->pt_type == PT_THREAD_IDLE)
|
||||
break;
|
||||
pthread_spinlock(self, &t->pt_statelock);
|
||||
t->pt_state = PT_STATE_BLOCKED_SYS;
|
||||
t->pt_blockedlwp = sas[1]->sa_id;
|
||||
|
@ -142,8 +181,6 @@ pthread__upcall(int type, struct sa_t *sas[], int ev, int intr, void *arg)
|
|||
t->pt_trapuc = sas[1]->sa_context;
|
||||
SDPRINTF(("(up %p) blocker %d %p(%d)\n", self, 1, t,
|
||||
t->pt_type));
|
||||
|
||||
PTHREADD_ADD(PTHREADD_UP_BLOCK);
|
||||
break;
|
||||
case SA_UPCALL_NEWPROC:
|
||||
PTHREADD_ADD(PTHREADD_UP_NEW);
|
||||
|
@ -317,10 +354,14 @@ pthread__find_interrupted(struct sa_t *sas[], int nsas, pthread_t *intqhead,
|
|||
}
|
||||
pthread__assert(victim != self);
|
||||
if (resume) {
|
||||
pthread__assert(victim->pt_parent == NULL);
|
||||
victim->pt_parent = self;
|
||||
pthread__assert(victim->pt_next == NULL);
|
||||
victim->pt_next = nextint;
|
||||
nextint = victim;
|
||||
} else {
|
||||
pthread__assert(victim->pt_parent == NULL);
|
||||
pthread__assert(victim->pt_next == NULL);
|
||||
victim->pt_next = nextsched;
|
||||
nextsched = victim;
|
||||
}
|
||||
|
@ -420,6 +461,11 @@ pthread__resolve_locks(pthread_t self, pthread_t *intqueuep)
|
|||
if (victim->pt_switchto) {
|
||||
/* We're done with you. */
|
||||
SDPRINTF((" recyclable"));
|
||||
/*
|
||||
* Clear trap context, which is
|
||||
* no longer useful.
|
||||
*/
|
||||
victim->pt_trapuc = NULL;
|
||||
if (prev)
|
||||
prev->pt_next = next;
|
||||
else
|
||||
|
@ -474,14 +520,14 @@ pthread__resolve_locks(pthread_t self, pthread_t *intqueuep)
|
|||
SDPRINTF((" switchto: %p (uc %p)", switchto,
|
||||
switchto->pt_uc));
|
||||
|
||||
pthread__assert(switchto->pt_spinlocks == 0);
|
||||
/*
|
||||
* Threads can have switchto set to themselves
|
||||
* if they hit new_preempt. Don't put them
|
||||
* on the run queue twice.
|
||||
*/
|
||||
if (switchto != victim) {
|
||||
if (switchto->pt_next) {
|
||||
if ((switchto->pt_next) ||
|
||||
(switchto->pt_spinlocks != 0)) {
|
||||
/*
|
||||
* The thread being switched
|
||||
* to was preempted and
|
||||
|
@ -494,7 +540,9 @@ pthread__resolve_locks(pthread_t self, pthread_t *intqueuep)
|
|||
tmp->pt_parent != NULL;
|
||||
tmp = tmp->pt_parent)
|
||||
SDPRINTF((" parent: %p", tmp));
|
||||
pthread__assert(tmp->pt_parent == NULL);
|
||||
tmp->pt_parent = self;
|
||||
pthread__assert(tmp->pt_next == NULL);
|
||||
tmp->pt_next = intqueue;
|
||||
intqueue = tmp;
|
||||
} else {
|
||||
|
@ -519,6 +567,7 @@ pthread__resolve_locks(pthread_t self, pthread_t *intqueuep)
|
|||
self, intqueue, PUC(intqueue),
|
||||
pthread__uc_pc(UC(intqueue)),
|
||||
pthread__uc_sp(UC(intqueue))));
|
||||
pthread__assert(intqueue->pt_state != PT_STATE_BLOCKED_SYS);
|
||||
pthread__switch(self, intqueue);
|
||||
SDPRINTF(("(rl %p) returned from chain\n",
|
||||
self));
|
||||
|
@ -536,6 +585,7 @@ pthread__resolve_locks(pthread_t self, pthread_t *intqueuep)
|
|||
PUC(self->pt_next),
|
||||
pthread__uc_pc(UC(self->pt_next)),
|
||||
pthread__uc_sp(UC(self->pt_next))));
|
||||
pthread__assert(self->pt_next->pt_state != PT_STATE_BLOCKED_SYS);
|
||||
pthread__switch(self, self->pt_next);
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue