For secondary CPUs, the idle LWP is the first to run, and it's directly

entered from MD code without a trip through mi_switch().  Make the picture
look good in case the CPU takes an interrupt before it calls idle_loop().
This commit is contained in:
ad 2020-01-25 20:29:43 +00:00
parent 56ca6a8885
commit 9c6efdb46e
1 changed files with 18 additions and 9 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: kern_idle.c,v 1.30 2020/01/08 17:38:42 ad Exp $ */
/* $NetBSD: kern_idle.c,v 1.31 2020/01/25 20:29:43 ad Exp $ */
/*-
* Copyright (c)2002, 2006, 2007 YAMAMOTO Takashi,
@ -28,7 +28,7 @@
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: kern_idle.c,v 1.30 2020/01/08 17:38:42 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: kern_idle.c,v 1.31 2020/01/25 20:29:43 ad Exp $");
#include <sys/param.h>
#include <sys/cpu.h>
@ -49,17 +49,15 @@ idle_loop(void *dummy)
struct schedstate_percpu *spc;
struct lwp *l = curlwp;
kcpuset_atomic_set(kcpuset_running, cpu_index(ci));
spc = &ci->ci_schedstate;
ci->ci_onproc = l;
/* Update start time for this thread. */
lwp_lock(l);
spc = &ci->ci_schedstate;
KASSERT(lwp_locked(l, spc->spc_lwplock));
kcpuset_atomic_set(kcpuset_running, cpu_index(ci));
/* Update start time for this thread. */
binuptime(&l->l_stime);
spc->spc_flags |= SPCF_RUNNING;
l->l_stat = LSONPROC;
l->l_flag |= LW_RUNNING;
KASSERT(l->l_stat == LSONPROC);
KASSERT((l->l_flag & LW_RUNNING) != 0);
lwp_unlock(l);
/*
@ -114,6 +112,17 @@ create_idle_lwp(struct cpu_info *ci)
panic("create_idle_lwp: error %d", error);
lwp_lock(l);
l->l_flag |= LW_IDLE;
if (ci != lwp0.l_cpu) {
/*
* For secondary CPUs, the idle LWP is the first to run, and
* it's directly entered from MD code without a trip through
* mi_switch(). Make the picture look good in case the CPU
* takes an interrupt before it calls idle_loop().
*/
l->l_stat = LSONPROC;
l->l_flag |= LW_RUNNING;
ci->ci_onproc = l;
}
lwp_unlock(l);
ci->ci_data.cpu_idlelwp = l;