From 7c330ba82f90ae56c518469715dd07a836a9dfc0 Mon Sep 17 00:00:00 2001 From: rmind Date: Mon, 14 Jul 2008 01:27:15 +0000 Subject: [PATCH] Fix the locking against oneself, migrate LWPs only from runqueue. Part of the fix for PR/38882. --- sys/kern/kern_cpu.c | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/sys/kern/kern_cpu.c b/sys/kern/kern_cpu.c index c4717eb7c288..69fde9b3f1bb 100644 --- a/sys/kern/kern_cpu.c +++ b/sys/kern/kern_cpu.c @@ -1,4 +1,4 @@ -/* $NetBSD: kern_cpu.c,v 1.33 2008/06/22 13:59:06 ad Exp $ */ +/* $NetBSD: kern_cpu.c,v 1.34 2008/07/14 01:27:15 rmind Exp $ */ /*- * Copyright (c) 2007, 2008 The NetBSD Foundation, Inc. @@ -57,7 +57,7 @@ #include -__KERNEL_RCSID(0, "$NetBSD: kern_cpu.c,v 1.33 2008/06/22 13:59:06 ad Exp $"); +__KERNEL_RCSID(0, "$NetBSD: kern_cpu.c,v 1.34 2008/07/14 01:27:15 rmind Exp $"); #include #include @@ -275,19 +275,24 @@ cpu_xc_offline(struct cpu_info *ci) mutex_enter(proc_lock); spc_dlock(ci, mci); LIST_FOREACH(l, &alllwp, l_list) { - lwp_lock(l); - if (l->l_cpu != ci || (l->l_pflag & LP_BOUND) != 0) { - lwp_unlock(l); + /* + * Since runqueues are locked - LWPs cannot be enqueued (and + * cannot change the state), thus is safe to perform the + * checks without locking each LWP. + */ + if (l->l_cpu != ci || (l->l_pflag & LP_BOUND) != 0 || + l->l_stat != LSRUN) continue; - } - if (l->l_stat == LSRUN && (l->l_flag & LW_INMEM) != 0) { + /* At this point, we are sure about the state of LWP */ + KASSERT(lwp_locked(l, spc->spc_mutex)); + if ((l->l_flag & LW_INMEM) != 0) { sched_dequeue(l); l->l_cpu = mci; lwp_setlock(l, mspc->spc_mutex); sched_enqueue(l, false); - lwp_unlock(l); } else { - lwp_migrate(l, mci); + l->l_cpu = mci; + lwp_setlock(l, mspc->spc_mutex); } } spc_dunlock(ci, mci);