diff --git a/sys/kern/kern_sleepq.c b/sys/kern/kern_sleepq.c index 618c2720b059..78e55abfa14c 100644 --- a/sys/kern/kern_sleepq.c +++ b/sys/kern/kern_sleepq.c @@ -1,4 +1,4 @@ -/* $NetBSD: kern_sleepq.c,v 1.10 2007/07/09 21:10:53 ad Exp $ */ +/* $NetBSD: kern_sleepq.c,v 1.11 2007/08/01 23:30:54 ad Exp $ */ /*- * Copyright (c) 2006, 2007 The NetBSD Foundation, Inc. @@ -42,7 +42,7 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: kern_sleepq.c,v 1.10 2007/07/09 21:10:53 ad Exp $"); +__KERNEL_RCSID(0, "$NetBSD: kern_sleepq.c,v 1.11 2007/08/01 23:30:54 ad Exp $"); #include "opt_ktrace.h" @@ -240,6 +240,7 @@ sleepq_block(int timo, bool catch) int error = 0, sig; struct proc *p; lwp_t *l = curlwp; + bool early = false; #ifdef KTRACE if (KTRPOINT(l->l_proc, KTR_CSW)) @@ -255,37 +256,33 @@ sleepq_block(int timo, bool catch) if ((l->l_flag & LW_PENDSIG) != 0 && sigispending(l, 0)) { /* lwp_unsleep() will release the lock */ lwp_unsleep(l); - error = EINTR; - goto catchit; + early = true; } if ((l->l_flag & (LW_CANCELLED|LW_WEXIT|LW_WCORE)) != 0) { l->l_flag &= ~LW_CANCELLED; /* lwp_unsleep() will release the lock */ lwp_unsleep(l); - error = EINTR; - goto catchit; + early = true; } } - if (timo) - callout_reset(&l->l_tsleep_ch, timo, sleepq_timeout, l); + if (!early) { + if (timo) + callout_reset(&l->l_tsleep_ch, timo, sleepq_timeout, l); + mi_switch(l); - mi_switch(l); - - /* - * When we reach this point, the LWP and sleep queue are unlocked. - */ - if (timo) { - /* - * Even if the callout appears to have fired, we need to - * stop it in order to synchronise with other CPUs. - */ - if (callout_stop(&l->l_tsleep_ch)) - error = EWOULDBLOCK; + /* The LWP and sleep queue are now unlocked. */ + if (timo) { + /* + * Even if the callout appears to have fired, we need to + * stop it in order to synchronise with other CPUs. + */ + if (callout_stop(&l->l_tsleep_ch)) + error = EWOULDBLOCK; + } } if (catch && error == 0) { - catchit: p = l->l_proc; if ((l->l_flag & (LW_CANCELLED | LW_WEXIT | LW_WCORE)) != 0) error = EINTR;