Leave the idle LWPs in state LSIDL even when running, so they don't mess up

output from ps/top/etc.  Correctness isn't at stake, LWPs in other states
are temporarily on the CPU at times too (e.g.  LSZOMB, LSSLEEP).
This commit is contained in:
ad 2020-03-26 19:42:39 +00:00
parent 4186b1557d
commit abbb7ed584
2 changed files with 11 additions and 18 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: kern_idle.c,v 1.32 2020/02/15 18:12:15 ad Exp $ */
/* $NetBSD: kern_idle.c,v 1.33 2020/03/26 19:42:39 ad Exp $ */
/*-
* Copyright (c)2002, 2006, 2007 YAMAMOTO Takashi,
@ -28,7 +28,7 @@
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: kern_idle.c,v 1.32 2020/02/15 18:12:15 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: kern_idle.c,v 1.33 2020/03/26 19:42:39 ad Exp $");
#include <sys/param.h>
#include <sys/cpu.h>
@ -56,8 +56,8 @@ idle_loop(void *dummy)
/* Update start time for this thread. */
binuptime(&l->l_stime);
spc->spc_flags |= SPCF_RUNNING;
KASSERT(l->l_stat == LSONPROC);
KASSERT((l->l_pflag & LP_RUNNING) != 0);
l->l_stat = LSIDL;
lwp_unlock(l);
/*
@ -91,11 +91,10 @@ idle_loop(void *dummy)
}
KASSERT(l->l_mutex == l->l_cpu->ci_schedstate.spc_lwplock);
lwp_lock(l);
l->l_stat = LSIDL;
spc_lock(l->l_cpu);
mi_switch(l);
KASSERT(curlwp == l);
KASSERT(l->l_stat == LSONPROC);
KASSERT(l->l_stat == LSIDL);
}
}
@ -119,7 +118,7 @@ create_idle_lwp(struct cpu_info *ci)
* mi_switch(). Make the picture look good in case the CPU
* takes an interrupt before it calls idle_loop().
*/
l->l_stat = LSONPROC;
l->l_stat = LSIDL;
l->l_pflag |= LP_RUNNING;
ci->ci_onproc = l;
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: kern_synch.c,v 1.344 2020/03/14 20:23:51 ad Exp $ */
/* $NetBSD: kern_synch.c,v 1.345 2020/03/26 19:42:39 ad Exp $ */
/*-
* Copyright (c) 1999, 2000, 2004, 2006, 2007, 2008, 2009, 2019, 2020
@ -69,7 +69,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.344 2020/03/14 20:23:51 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.345 2020/03/26 19:42:39 ad Exp $");
#include "opt_kstack.h"
#include "opt_dtrace.h"
@ -387,11 +387,7 @@ kpreempt(uintptr_t where)
atomic_swap_uint(&l->l_dopreempt, 0);
return true;
}
if (__predict_false((l->l_flag & LW_IDLE) != 0)) {
/* Can't preempt idle loop, don't count as failure. */
atomic_swap_uint(&l->l_dopreempt, 0);
return true;
}
KASSERT((l->l_flag & LW_IDLE) == 0);
if (__predict_false(l->l_nopreempt != 0)) {
/* LWP holds preemption disabled, explicitly. */
if ((dop & DOPREEMPT_COUNTED) == 0) {
@ -547,12 +543,10 @@ nextlwp(struct cpu_info *ci, struct schedstate_percpu *spc)
lwp_setlock(newl, spc->spc_lwplock);
} else {
/*
* Updates to newl here are unlocked, but newl is the idle
* LWP and thus sheltered from outside interference, so no
* harm is going to come of it.
* The idle LWP does not get set to LSONPROC, because
* otherwise it screws up the output from top(1) etc.
*/
newl = ci->ci_data.cpu_idlelwp;
newl->l_stat = LSONPROC;
newl->l_pflag |= LP_RUNNING;
spc->spc_curpriority = PRI_IDLE;
spc->spc_flags = (spc->spc_flags & ~SPCF_SWITCHCLEAR) |
@ -840,7 +834,7 @@ mi_switch(lwp_t *l)
}
KASSERT(l == curlwp);
KASSERT(l->l_stat == LSONPROC);
KASSERT(l->l_stat == LSONPROC || (l->l_flag & LW_IDLE) != 0);
SYSCALL_TIME_WAKEUP(l);
LOCKDEBUG_BARRIER(NULL, 1);