From 6f1556138612d45b480faf1e036fc72e8e7ec6d1 Mon Sep 17 00:00:00 2001 From: ozaki-r Date: Mon, 21 Nov 2016 00:54:21 +0000 Subject: [PATCH] Fix a race condition of low priority xcall xc_lowpri and xc_thread are racy and xc_wait may return during/before executing all xcall callbacks, resulting in a kernel panic at worst. xc_lowpri serializes multiple jobs by a mutex and a cv. If all xcall callbacks are done, xc_wait returns and also xc_lowpri accepts a next job. The problem is that a counter that counts the number of finished xcall callbacks is incremented *before* actually executing a xcall callback (see xc_tailp++ in xc_thread). So xc_lowpri accepts a next job before all xcall callbacks complete and a next job begins to run its xcall callbacks. Even worse the counter is global and shared between jobs, so if a xcall callback of the next job completes, the shared counter is incremented, which confuses wc_wait of the previous job as all xcall callbacks of the previous job are done and wc_wait of the previous job returns during/before executing its xcall callbacks. How to fix: there are actually two counters that count the number of finished xcall callbacks for low priority xcall for historical reasons (I guess): xc_tailp and xc_low_pri.xc_donep. xc_low_pri.xc_donep is incremented correctly while xc_tailp is incremented wrongly, i.e., before executing a xcall callback. We can fix the issue by dropping xc_tailp and using only xc_low_pri.xc_donep. PR kern/51632 --- sys/kern/subr_xcall.c | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/sys/kern/subr_xcall.c b/sys/kern/subr_xcall.c index fb4630ffa181..f0265e759da5 100644 --- a/sys/kern/subr_xcall.c +++ b/sys/kern/subr_xcall.c @@ -1,4 +1,4 @@ -/* $NetBSD: subr_xcall.c,v 1.18 2013/11/26 21:13:05 rmind Exp $ */ +/* $NetBSD: subr_xcall.c,v 1.19 2016/11/21 00:54:21 ozaki-r Exp $ */ /*- * Copyright (c) 2007-2010 The NetBSD Foundation, Inc. @@ -74,7 +74,7 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: subr_xcall.c,v 1.18 2013/11/26 21:13:05 rmind Exp $"); +__KERNEL_RCSID(0, "$NetBSD: subr_xcall.c,v 1.19 2016/11/21 00:54:21 ozaki-r Exp $"); #include #include @@ -105,7 +105,6 @@ typedef struct { /* Low priority xcall structures. */ static xc_state_t xc_low_pri __cacheline_aligned; -static uint64_t xc_tailp __cacheline_aligned; /* High priority xcall structures. */ static xc_state_t xc_high_pri __cacheline_aligned; @@ -134,7 +133,6 @@ xc_init(void) memset(xclo, 0, sizeof(xc_state_t)); mutex_init(&xclo->xc_lock, MUTEX_DEFAULT, IPL_NONE); cv_init(&xclo->xc_busy, "xclocv"); - xc_tailp = 0; memset(xchi, 0, sizeof(xc_state_t)); mutex_init(&xchi->xc_lock, MUTEX_DEFAULT, IPL_SOFTSERIAL); @@ -256,7 +254,7 @@ xc_lowpri(xcfunc_t func, void *arg1, void *arg2, struct cpu_info *ci) uint64_t where; mutex_enter(&xc->xc_lock); - while (xc->xc_headp != xc_tailp) { + while (xc->xc_headp != xc->xc_donep) { cv_wait(&xc->xc_busy, &xc->xc_lock); } xc->xc_arg1 = arg1; @@ -277,7 +275,7 @@ xc_lowpri(xcfunc_t func, void *arg1, void *arg2, struct cpu_info *ci) ci->ci_data.cpu_xcall_pending = true; cv_signal(&ci->ci_data.cpu_xcall); } - KASSERT(xc_tailp < xc->xc_headp); + KASSERT(xc->xc_donep < xc->xc_headp); where = xc->xc_headp; mutex_exit(&xc->xc_lock); @@ -302,7 +300,7 @@ xc_thread(void *cookie) mutex_enter(&xc->xc_lock); for (;;) { while (!ci->ci_data.cpu_xcall_pending) { - if (xc->xc_headp == xc_tailp) { + if (xc->xc_headp == xc->xc_donep) { cv_broadcast(&xc->xc_busy); } cv_wait(&ci->ci_data.cpu_xcall, &xc->xc_lock); @@ -312,7 +310,6 @@ xc_thread(void *cookie) func = xc->xc_func; arg1 = xc->xc_arg1; arg2 = xc->xc_arg2; - xc_tailp++; mutex_exit(&xc->xc_lock); KASSERT(func != NULL);