Give callout_halt() an additional 'kmutex_t *interlock' argument. If there

is a need to block and wait for the callout to complete, and there is an
interlock, it will be dropped while waiting and reacquired before return.
This commit is contained in:
ad 2008-04-22 12:04:22 +00:00
parent c7566c2cde
commit 43d8bae932
7 changed files with 54 additions and 36 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: test_callout1.c,v 1.1 2008/03/28 20:44:57 ad Exp $ */
/* $NetBSD: test_callout1.c,v 1.2 2008/04/22 12:04:22 ad Exp $ */
/*-
* Copyright (c) 2007, 2008 The NetBSD Foundation, Inc.
@ -37,7 +37,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: test_callout1.c,v 1.1 2008/03/28 20:44:57 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: test_callout1.c,v 1.2 2008/04/22 12:04:22 ad Exp $");
#include <sys/param.h>
#include <sys/ioctl.h>
@ -79,7 +79,7 @@ test_softint(void *cookie)
{
printf("l_ncsw = %d\n", (int)curlwp->l_ncsw);
callout_halt(&test_ch);
callout_halt(&test_ch, NULL);
printf("l_ncsw = %d\n", (int)curlwp->l_ncsw);
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: kern_event.c,v 1.54 2008/04/22 11:44:24 ad Exp $ */
/* $NetBSD: kern_event.c,v 1.55 2008/04/22 12:04:22 ad Exp $ */
/*-
* Copyright (c) 2008 The NetBSD Foundation, Inc.
@ -62,7 +62,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: kern_event.c,v 1.54 2008/04/22 11:44:24 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: kern_event.c,v 1.55 2008/04/22 12:04:22 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -619,7 +619,7 @@ filt_timerdetach(struct knote *kn)
callout_t *calloutp;
calloutp = (callout_t *)kn->kn_hook;
callout_halt(calloutp);
callout_halt(calloutp, NULL);
callout_destroy(calloutp);
kmem_free(calloutp, sizeof(*calloutp));
atomic_dec_uint(&kq_ncallouts);

View File

@ -1,4 +1,4 @@
/* $NetBSD: kern_sleepq.c,v 1.25 2008/04/12 17:02:08 ad Exp $ */
/* $NetBSD: kern_sleepq.c,v 1.26 2008/04/22 12:04:22 ad Exp $ */
/*-
* Copyright (c) 2006, 2007, 2008 The NetBSD Foundation, Inc.
@ -42,7 +42,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: kern_sleepq.c,v 1.25 2008/04/12 17:02:08 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: kern_sleepq.c,v 1.26 2008/04/22 12:04:22 ad Exp $");
#include <sys/param.h>
#include <sys/kernel.h>
@ -275,7 +275,7 @@ sleepq_block(int timo, bool catch)
* Even if the callout appears to have fired, we need to
* stop it in order to synchronise with other CPUs.
*/
if (callout_halt(&l->l_timeout_ch))
if (callout_halt(&l->l_timeout_ch, NULL))
error = EWOULDBLOCK;
}
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: kern_time.c,v 1.143 2008/04/21 12:56:31 ad Exp $ */
/* $NetBSD: kern_time.c,v 1.144 2008/04/22 12:04:22 ad Exp $ */
/*-
* Copyright (c) 2000, 2004, 2005, 2007, 2008 The NetBSD Foundation, Inc.
@ -68,7 +68,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: kern_time.c,v 1.143 2008/04/21 12:56:31 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: kern_time.c,v 1.144 2008/04/22 12:04:22 ad Exp $");
#include <sys/param.h>
#include <sys/resourcevar.h>
@ -1207,14 +1207,11 @@ itimerfree(struct ptimers *pts, int index)
pt = pts->pts_timers[index];
pts->pts_timers[index] = NULL;
if (pt->pt_type == CLOCK_REALTIME) {
mutex_spin_exit(&timer_lock);
callout_halt(&pt->pt_ch);
} else if (pt->pt_queued) {
if (pt->pt_type == CLOCK_REALTIME)
callout_halt(&pt->pt_ch, &timer_lock);
else if (pt->pt_queued)
TAILQ_REMOVE(&timer_queue, pt, pt_chain);
mutex_spin_exit(&timer_lock);
} else
mutex_spin_exit(&timer_lock);
mutex_spin_exit(&timer_lock);
callout_destroy(&pt->pt_ch);
pool_put(&ptimer_pool, pt);
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: kern_timeout.c,v 1.36 2008/04/22 11:45:28 ad Exp $ */
/* $NetBSD: kern_timeout.c,v 1.37 2008/04/22 12:04:22 ad Exp $ */
/*-
* Copyright (c) 2003, 2006, 2007, 2008 The NetBSD Foundation, Inc.
@ -66,7 +66,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: kern_timeout.c,v 1.36 2008/04/22 11:45:28 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: kern_timeout.c,v 1.37 2008/04/22 12:04:22 ad Exp $");
/*
* Timeouts are kept in a hierarchical timing wheel. The c_time is the
@ -457,21 +457,24 @@ callout_stop(callout_t *cs)
* Cancel a pending callout. If in-flight, block until it completes.
* May not be called from a hard interrupt handler. If the callout
* can take locks, the caller of callout_halt() must not hold any of
* those locks, otherwise the two could deadlock.
* those locks, otherwise the two could deadlock. If 'interlock' is
* non-NULL and we must wait for the callout to complete, it will be
* released and re-acquired before returning.
*/
bool
callout_halt(callout_t *cs)
callout_halt(callout_t *cs, kmutex_t *interlock)
{
callout_impl_t *c = (callout_impl_t *)cs;
struct callout_cpu *cc;
struct lwp *l;
kmutex_t *lock;
kmutex_t *lock, *relock;
bool expired;
KASSERT(c->c_magic == CALLOUT_MAGIC);
KASSERT(!cpu_intr_p());
lock = callout_lock(c);
relock = NULL;
expired = ((c->c_flags & CALLOUT_FIRED) != 0);
if ((c->c_flags & CALLOUT_PENDING) != 0)
@ -483,18 +486,34 @@ callout_halt(callout_t *cs)
cc = c->c_cpu;
if (__predict_true(cc->cc_active != c || cc->cc_lwp == l))
break;
KASSERT(l->l_wchan == NULL);
cc->cc_nwait++;
cc->cc_ev_block.ev_count++;
l->l_kpriority = true;
sleepq_enter(&cc->cc_sleepq, l);
sleepq_enqueue(&cc->cc_sleepq, cc, "callout", &sleep_syncobj);
KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
sleepq_block(0, false);
if (interlock != NULL) {
/*
* Avoid potential scheduler lock order problems by
* dropping the interlock without the callout lock
* held.
*/
mutex_spin_exit(lock);
mutex_exit(interlock);
relock = interlock;
interlock = NULL;
} else {
/* XXX Better to do priority inheritance. */
KASSERT(l->l_wchan == NULL);
cc->cc_nwait++;
cc->cc_ev_block.ev_count++;
l->l_kpriority = true;
sleepq_enter(&cc->cc_sleepq, l);
sleepq_enqueue(&cc->cc_sleepq, cc, "callout",
&sleep_syncobj);
KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
sleepq_block(0, false);
}
lock = callout_lock(c);
}
mutex_spin_exit(lock);
if (__predict_false(relock != NULL))
mutex_enter(relock);
return expired;
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: tty.c,v 1.218 2008/04/21 12:49:20 ad Exp $ */
/* $NetBSD: tty.c,v 1.219 2008/04/22 12:04:22 ad Exp $ */
/*-
* Copyright (c) 2008 The NetBSD Foundation, Inc.
@ -70,7 +70,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: tty.c,v 1.218 2008/04/21 12:49:20 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: tty.c,v 1.219 2008/04/22 12:04:22 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -2664,7 +2664,7 @@ ttyfree(struct tty *tp)
mutex_exit(&tty_lock);
mutex_exit(&proclist_lock);
callout_halt(&tp->t_rstrt_ch);
callout_halt(&tp->t_rstrt_ch, NULL);
callout_destroy(&tp->t_rstrt_ch);
ttyldisc_release(tp->t_linesw);
clfree(&tp->t_rawq);

View File

@ -1,4 +1,4 @@
/* $NetBSD: callout.h,v 1.28 2008/04/22 11:45:28 ad Exp $ */
/* $NetBSD: callout.h,v 1.29 2008/04/22 12:04:22 ad Exp $ */
/*-
* Copyright (c) 2000, 2003, 2006, 2007, 2008 The NetBSD Foundation, Inc.
@ -102,6 +102,8 @@ typedef struct callout_impl {
#endif /* _CALLOUT_PRIVATE */
#ifdef _KERNEL
#include <sys/mutex.h>
struct cpu_info;
void callout_startup(void);
@ -114,7 +116,7 @@ void callout_setfunc(callout_t *, void (*)(void *), void *);
void callout_reset(callout_t *, int, void (*)(void *), void *);
void callout_schedule(callout_t *, int);
bool callout_stop(callout_t *);
bool callout_halt(callout_t *);
bool callout_halt(callout_t *, kmutex_t *);
bool callout_pending(callout_t *);
bool callout_expired(callout_t *);
bool callout_active(callout_t *);