- Move the LW_RUNNING flag back into l_pflag: updating l_flag without lock
in softint_dispatch() is risky. May help with the "softint screwup" panic. - Correct the memory barriers around zombies switching into oblivion.
This commit is contained in:
parent
f48ddf7b41
commit
82002773ec
@ -1,4 +1,4 @@
|
||||
/* $NetBSD: pmap.c,v 1.188 2020/01/08 17:38:42 ad Exp $ */
|
||||
/* $NetBSD: pmap.c,v 1.189 2020/02/15 18:12:14 ad Exp $ */
|
||||
/*
|
||||
* Copyright (c) 1994, 1998, 1999, 2003 Ludd, University of Lule}, Sweden.
|
||||
* All rights reserved.
|
||||
@ -25,7 +25,7 @@
|
||||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.188 2020/01/08 17:38:42 ad Exp $");
|
||||
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.189 2020/02/15 18:12:14 ad Exp $");
|
||||
|
||||
#include "opt_ddb.h"
|
||||
#include "opt_cputype.h"
|
||||
@ -699,7 +699,7 @@ pmap_vax_swappable(struct lwp *l, struct pmap *pm)
|
||||
return false;
|
||||
if (l->l_proc->p_vmspace->vm_map.pmap == pm)
|
||||
return false;
|
||||
if ((l->l_flag & LW_RUNNING) != 0)
|
||||
if ((l->l_pflag & LP_RUNNING) != 0)
|
||||
return false;
|
||||
if (l->l_class != SCHED_OTHER)
|
||||
return false;
|
||||
|
@ -1,7 +1,7 @@
|
||||
/* $NetBSD: db_proc.c,v 1.9 2020/01/08 17:38:42 ad Exp $ */
|
||||
/* $NetBSD: db_proc.c,v 1.10 2020/02/15 18:12:14 ad Exp $ */
|
||||
|
||||
/*-
|
||||
* Copyright (c) 2009 The NetBSD Foundation, Inc.
|
||||
* Copyright (c) 2009, 2020 The NetBSD Foundation, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This code is derived from software contributed to The NetBSD Foundation
|
||||
@ -61,7 +61,7 @@
|
||||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__KERNEL_RCSID(0, "$NetBSD: db_proc.c,v 1.9 2020/01/08 17:38:42 ad Exp $");
|
||||
__KERNEL_RCSID(0, "$NetBSD: db_proc.c,v 1.10 2020/02/15 18:12:14 ad Exp $");
|
||||
|
||||
#ifndef _KERNEL
|
||||
#include <stdbool.h>
|
||||
@ -196,7 +196,7 @@ db_show_all_procs(db_expr_t addr, bool haddr, db_expr_t count,
|
||||
sizeof(db_nbuf));
|
||||
}
|
||||
run = (l.l_stat == LSONPROC ||
|
||||
(l.l_flag & LW_RUNNING) != 0);
|
||||
(l.l_pflag & LP_RUNNING) != 0);
|
||||
if (l.l_cpu != NULL) {
|
||||
db_read_bytes((db_addr_t)
|
||||
&l.l_cpu->ci_data.cpu_index,
|
||||
@ -254,7 +254,7 @@ db_show_all_procs(db_expr_t addr, bool haddr, db_expr_t count,
|
||||
wbuf[0] = '\0';
|
||||
}
|
||||
run = (l.l_stat == LSONPROC ||
|
||||
(l.l_flag & LW_RUNNING) != 0);
|
||||
(l.l_pflag & LP_RUNNING) != 0);
|
||||
db_read_bytes((db_addr_t)&p.p_emul->e_name,
|
||||
sizeof(ename), (char *)&ename);
|
||||
|
||||
@ -332,7 +332,7 @@ db_show_proc(db_expr_t addr, bool haddr, db_expr_t count, const char *modif)
|
||||
db_read_bytes((db_addr_t)lp, sizeof(l), (char *)&l);
|
||||
|
||||
run = (l.l_stat == LSONPROC ||
|
||||
(l.l_flag & LW_RUNNING) != 0);
|
||||
(l.l_pflag & LP_RUNNING) != 0);
|
||||
|
||||
db_printf("%slwp %d", (run ? "> " : " "), l.l_lid);
|
||||
if (l.l_name != NULL) {
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* $NetBSD: init_main.c,v 1.519 2020/01/28 16:35:39 ad Exp $ */
|
||||
/* $NetBSD: init_main.c,v 1.520 2020/02/15 18:12:15 ad Exp $ */
|
||||
|
||||
/*-
|
||||
* Copyright (c) 2008, 2009, 2019 The NetBSD Foundation, Inc.
|
||||
@ -97,7 +97,7 @@
|
||||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__KERNEL_RCSID(0, "$NetBSD: init_main.c,v 1.519 2020/01/28 16:35:39 ad Exp $");
|
||||
__KERNEL_RCSID(0, "$NetBSD: init_main.c,v 1.520 2020/02/15 18:12:15 ad Exp $");
|
||||
|
||||
#include "opt_ddb.h"
|
||||
#include "opt_inet.h"
|
||||
@ -290,7 +290,7 @@ main(void)
|
||||
#ifndef LWP0_CPU_INFO
|
||||
l->l_cpu = curcpu();
|
||||
#endif
|
||||
l->l_flag |= LW_RUNNING;
|
||||
l->l_pflag |= LP_RUNNING;
|
||||
|
||||
/*
|
||||
* Attempt to find console and initialize
|
||||
|
@ -1,7 +1,7 @@
|
||||
/* $NetBSD: kern_exit.c,v 1.282 2020/01/29 15:47:52 ad Exp $ */
|
||||
/* $NetBSD: kern_exit.c,v 1.283 2020/02/15 18:12:15 ad Exp $ */
|
||||
|
||||
/*-
|
||||
* Copyright (c) 1998, 1999, 2006, 2007, 2008 The NetBSD Foundation, Inc.
|
||||
* Copyright (c) 1998, 1999, 2006, 2007, 2008, 2020 The NetBSD Foundation, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This code is derived from software contributed to The NetBSD Foundation
|
||||
@ -67,7 +67,7 @@
|
||||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__KERNEL_RCSID(0, "$NetBSD: kern_exit.c,v 1.282 2020/01/29 15:47:52 ad Exp $");
|
||||
__KERNEL_RCSID(0, "$NetBSD: kern_exit.c,v 1.283 2020/02/15 18:12:15 ad Exp $");
|
||||
|
||||
#include "opt_ktrace.h"
|
||||
#include "opt_dtrace.h"
|
||||
@ -592,9 +592,6 @@ exit1(struct lwp *l, int exitcode, int signo)
|
||||
*/
|
||||
cpu_lwp_free(l, 1);
|
||||
|
||||
/* For the LW_RUNNING check in lwp_free(). */
|
||||
membar_exit();
|
||||
|
||||
/* Switch away into oblivion. */
|
||||
lwp_lock(l);
|
||||
spc_lock(l->l_cpu);
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* $NetBSD: kern_idle.c,v 1.31 2020/01/25 20:29:43 ad Exp $ */
|
||||
/* $NetBSD: kern_idle.c,v 1.32 2020/02/15 18:12:15 ad Exp $ */
|
||||
|
||||
/*-
|
||||
* Copyright (c)2002, 2006, 2007 YAMAMOTO Takashi,
|
||||
@ -28,7 +28,7 @@
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
|
||||
__KERNEL_RCSID(0, "$NetBSD: kern_idle.c,v 1.31 2020/01/25 20:29:43 ad Exp $");
|
||||
__KERNEL_RCSID(0, "$NetBSD: kern_idle.c,v 1.32 2020/02/15 18:12:15 ad Exp $");
|
||||
|
||||
#include <sys/param.h>
|
||||
#include <sys/cpu.h>
|
||||
@ -57,7 +57,7 @@ idle_loop(void *dummy)
|
||||
binuptime(&l->l_stime);
|
||||
spc->spc_flags |= SPCF_RUNNING;
|
||||
KASSERT(l->l_stat == LSONPROC);
|
||||
KASSERT((l->l_flag & LW_RUNNING) != 0);
|
||||
KASSERT((l->l_pflag & LP_RUNNING) != 0);
|
||||
lwp_unlock(l);
|
||||
|
||||
/*
|
||||
@ -120,7 +120,7 @@ create_idle_lwp(struct cpu_info *ci)
|
||||
* takes an interrupt before it calls idle_loop().
|
||||
*/
|
||||
l->l_stat = LSONPROC;
|
||||
l->l_flag |= LW_RUNNING;
|
||||
l->l_pflag |= LP_RUNNING;
|
||||
ci->ci_onproc = l;
|
||||
}
|
||||
lwp_unlock(l);
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* $NetBSD: kern_lwp.c,v 1.226 2020/02/15 17:13:55 ad Exp $ */
|
||||
/* $NetBSD: kern_lwp.c,v 1.227 2020/02/15 18:12:15 ad Exp $ */
|
||||
|
||||
/*-
|
||||
* Copyright (c) 2001, 2006, 2007, 2008, 2009, 2019, 2020
|
||||
@ -80,7 +80,7 @@
|
||||
* LWP. The LWP may in fact be executing on a processor, may be
|
||||
* sleeping or idle. It is expected to take the necessary action to
|
||||
* stop executing or become "running" again within a short timeframe.
|
||||
* The LW_RUNNING flag in lwp::l_flag indicates that an LWP is running.
|
||||
* The LP_RUNNING flag in lwp::l_pflag indicates that an LWP is running.
|
||||
* Importantly, it indicates that its state is tied to a CPU.
|
||||
*
|
||||
* LSZOMB:
|
||||
@ -211,7 +211,7 @@
|
||||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.226 2020/02/15 17:13:55 ad Exp $");
|
||||
__KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.227 2020/02/15 18:12:15 ad Exp $");
|
||||
|
||||
#include "opt_ddb.h"
|
||||
#include "opt_lockdebug.h"
|
||||
@ -1044,16 +1044,26 @@ lwp_start(lwp_t *l, int flags)
|
||||
void
|
||||
lwp_startup(struct lwp *prev, struct lwp *new_lwp)
|
||||
{
|
||||
kmutex_t *lock;
|
||||
|
||||
KASSERTMSG(new_lwp == curlwp, "l %p curlwp %p prevlwp %p", new_lwp, curlwp, prev);
|
||||
KASSERT(kpreempt_disabled());
|
||||
KASSERT(prev != NULL);
|
||||
KASSERT((prev->l_flag & LW_RUNNING) != 0);
|
||||
KASSERT((prev->l_pflag & LP_RUNNING) != 0);
|
||||
KASSERT(curcpu()->ci_mtx_count == -2);
|
||||
|
||||
/* Immediately mark previous LWP as no longer running, and unlock. */
|
||||
prev->l_flag &= ~LW_RUNNING;
|
||||
lwp_unlock(prev);
|
||||
/*
|
||||
* Immediately mark the previous LWP as no longer running and unlock
|
||||
* (to keep lock wait times short as possible). If a zombie, don't
|
||||
* touch after clearing LP_RUNNING as it could be reaped by another
|
||||
* CPU. Issue a memory barrier to ensure this.
|
||||
*/
|
||||
lock = prev->l_mutex;
|
||||
if (__predict_false(prev->l_stat == LSZOMB)) {
|
||||
membar_sync();
|
||||
}
|
||||
prev->l_pflag &= ~LP_RUNNING;
|
||||
mutex_spin_exit(lock);
|
||||
|
||||
/* Correct spin mutex count after mi_switch(). */
|
||||
curcpu()->ci_mtx_count = 0;
|
||||
@ -1224,8 +1234,6 @@ lwp_exit(struct lwp *l)
|
||||
cpu_lwp_free(l, 0);
|
||||
|
||||
if (current) {
|
||||
/* For the LW_RUNNING check in lwp_free(). */
|
||||
membar_exit();
|
||||
/* Switch away into oblivion. */
|
||||
lwp_lock(l);
|
||||
spc_lock(l->l_cpu);
|
||||
@ -1304,8 +1312,8 @@ lwp_free(struct lwp *l, bool recycle, bool last)
|
||||
* all locks to avoid deadlock against interrupt handlers on
|
||||
* the target CPU.
|
||||
*/
|
||||
membar_enter();
|
||||
while (__predict_false((l->l_flag & LW_RUNNING) != 0)) {
|
||||
membar_consumer();
|
||||
while (__predict_false((l->l_pflag & LP_RUNNING) != 0)) {
|
||||
SPINLOCK_BACKOFF_HOOK;
|
||||
}
|
||||
#endif
|
||||
@ -1371,7 +1379,7 @@ lwp_migrate(lwp_t *l, struct cpu_info *tci)
|
||||
KASSERT(tci != NULL);
|
||||
|
||||
/* If LWP is still on the CPU, it must be handled like LSONPROC */
|
||||
if ((l->l_flag & LW_RUNNING) != 0) {
|
||||
if ((l->l_pflag & LP_RUNNING) != 0) {
|
||||
lstat = LSONPROC;
|
||||
}
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* $NetBSD: kern_resource.c,v 1.184 2020/01/08 17:38:42 ad Exp $ */
|
||||
/* $NetBSD: kern_resource.c,v 1.185 2020/02/15 18:12:15 ad Exp $ */
|
||||
|
||||
/*-
|
||||
* Copyright (c) 1982, 1986, 1991, 1993
|
||||
@ -37,7 +37,7 @@
|
||||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__KERNEL_RCSID(0, "$NetBSD: kern_resource.c,v 1.184 2020/01/08 17:38:42 ad Exp $");
|
||||
__KERNEL_RCSID(0, "$NetBSD: kern_resource.c,v 1.185 2020/02/15 18:12:15 ad Exp $");
|
||||
|
||||
#include <sys/param.h>
|
||||
#include <sys/systm.h>
|
||||
@ -506,7 +506,7 @@ calcru(struct proc *p, struct timeval *up, struct timeval *sp,
|
||||
LIST_FOREACH(l, &p->p_lwps, l_sibling) {
|
||||
lwp_lock(l);
|
||||
bintime_add(&tm, &l->l_rtime);
|
||||
if ((l->l_flag & LW_RUNNING) != 0 &&
|
||||
if ((l->l_pflag & LP_RUNNING) != 0 &&
|
||||
(l->l_pflag & (LP_INTR | LP_TIMEINTR)) != LP_INTR) {
|
||||
struct bintime diff;
|
||||
/*
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* $NetBSD: kern_sleepq.c,v 1.60 2020/02/01 19:29:27 christos Exp $ */
|
||||
/* $NetBSD: kern_sleepq.c,v 1.61 2020/02/15 18:12:15 ad Exp $ */
|
||||
|
||||
/*-
|
||||
* Copyright (c) 2006, 2007, 2008, 2009, 2019, 2020 The NetBSD Foundation, Inc.
|
||||
@ -35,7 +35,7 @@
|
||||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__KERNEL_RCSID(0, "$NetBSD: kern_sleepq.c,v 1.60 2020/02/01 19:29:27 christos Exp $");
|
||||
__KERNEL_RCSID(0, "$NetBSD: kern_sleepq.c,v 1.61 2020/02/15 18:12:15 ad Exp $");
|
||||
|
||||
#include <sys/param.h>
|
||||
#include <sys/kernel.h>
|
||||
@ -143,7 +143,7 @@ sleepq_remove(sleepq_t *sq, lwp_t *l)
|
||||
* If the LWP is still on the CPU, mark it as LSONPROC. It may be
|
||||
* about to call mi_switch(), in which case it will yield.
|
||||
*/
|
||||
if ((l->l_flag & LW_RUNNING) != 0) {
|
||||
if ((l->l_pflag & LP_RUNNING) != 0) {
|
||||
l->l_stat = LSONPROC;
|
||||
l->l_slptime = 0;
|
||||
lwp_setlock(l, spc->spc_lwplock);
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* $NetBSD: kern_softint.c,v 1.59 2020/01/26 18:52:55 ad Exp $ */
|
||||
/* $NetBSD: kern_softint.c,v 1.60 2020/02/15 18:12:15 ad Exp $ */
|
||||
|
||||
/*-
|
||||
* Copyright (c) 2007, 2008, 2019, 2020 The NetBSD Foundation, Inc.
|
||||
@ -170,7 +170,7 @@
|
||||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__KERNEL_RCSID(0, "$NetBSD: kern_softint.c,v 1.59 2020/01/26 18:52:55 ad Exp $");
|
||||
__KERNEL_RCSID(0, "$NetBSD: kern_softint.c,v 1.60 2020/02/15 18:12:15 ad Exp $");
|
||||
|
||||
#include <sys/param.h>
|
||||
#include <sys/proc.h>
|
||||
@ -842,7 +842,7 @@ softint_dispatch(lwp_t *pinned, int s)
|
||||
lwp_t *l;
|
||||
|
||||
#ifdef DIAGNOSTIC
|
||||
if ((pinned->l_flag & LW_RUNNING) == 0 || curlwp->l_stat != LSIDL) {
|
||||
if ((pinned->l_pflag & LP_RUNNING) == 0 || curlwp->l_stat != LSIDL) {
|
||||
struct lwp *onproc = curcpu()->ci_onproc;
|
||||
int s2 = splhigh();
|
||||
printf("curcpu=%d, spl=%d curspl=%d\n"
|
||||
@ -881,7 +881,7 @@ softint_dispatch(lwp_t *pinned, int s)
|
||||
membar_producer(); /* for calcru */
|
||||
l->l_pflag |= LP_TIMEINTR;
|
||||
}
|
||||
l->l_flag |= LW_RUNNING;
|
||||
l->l_pflag |= LP_RUNNING;
|
||||
softint_execute(si, l, s);
|
||||
if (timing) {
|
||||
binuptime(&now);
|
||||
@ -911,7 +911,7 @@ softint_dispatch(lwp_t *pinned, int s)
|
||||
/* NOTREACHED */
|
||||
}
|
||||
l->l_switchto = NULL;
|
||||
l->l_flag &= ~LW_RUNNING;
|
||||
l->l_pflag &= ~LP_RUNNING;
|
||||
}
|
||||
|
||||
#endif /* !__HAVE_FAST_SOFTINTS */
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* $NetBSD: kern_synch.c,v 1.338 2020/01/24 20:05:15 ad Exp $ */
|
||||
/* $NetBSD: kern_synch.c,v 1.339 2020/02/15 18:12:15 ad Exp $ */
|
||||
|
||||
/*-
|
||||
* Copyright (c) 1999, 2000, 2004, 2006, 2007, 2008, 2009, 2019
|
||||
@ -69,7 +69,7 @@
|
||||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.338 2020/01/24 20:05:15 ad Exp $");
|
||||
__KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.339 2020/02/15 18:12:15 ad Exp $");
|
||||
|
||||
#include "opt_kstack.h"
|
||||
#include "opt_dtrace.h"
|
||||
@ -484,17 +484,15 @@ nextlwp(struct cpu_info *ci, struct schedstate_percpu *spc)
|
||||
sched_dequeue(newl);
|
||||
KASSERT(lwp_locked(newl, spc->spc_mutex));
|
||||
KASSERT(newl->l_cpu == ci);
|
||||
newl->l_stat = LSONPROC;
|
||||
newl->l_flag |= LW_RUNNING;
|
||||
lwp_setlock(newl, spc->spc_lwplock);
|
||||
spc->spc_flags &= ~(SPCF_SWITCHCLEAR | SPCF_IDLE);
|
||||
} else {
|
||||
newl = ci->ci_data.cpu_idlelwp;
|
||||
newl->l_stat = LSONPROC;
|
||||
newl->l_flag |= LW_RUNNING;
|
||||
spc->spc_flags = (spc->spc_flags & ~SPCF_SWITCHCLEAR) |
|
||||
SPCF_IDLE;
|
||||
}
|
||||
newl->l_stat = LSONPROC;
|
||||
newl->l_pflag |= LP_RUNNING;
|
||||
|
||||
/*
|
||||
* Only clear want_resched if there are no pending (slow) software
|
||||
@ -515,7 +513,7 @@ nextlwp(struct cpu_info *ci, struct schedstate_percpu *spc)
|
||||
* NOTE: l->l_cpu is not changed in this routine, because an LWP never
|
||||
* changes its own l_cpu (that would screw up curcpu on many ports and could
|
||||
* cause all kinds of other evil stuff). l_cpu is always changed by some
|
||||
* other actor, when it's known the LWP is not running (the LW_RUNNING flag
|
||||
* other actor, when it's known the LWP is not running (the LP_RUNNING flag
|
||||
* is checked under lock).
|
||||
*/
|
||||
void
|
||||
@ -524,6 +522,7 @@ mi_switch(lwp_t *l)
|
||||
struct cpu_info *ci;
|
||||
struct schedstate_percpu *spc;
|
||||
struct lwp *newl;
|
||||
kmutex_t *lock;
|
||||
int oldspl;
|
||||
struct bintime bt;
|
||||
bool returning;
|
||||
@ -538,7 +537,7 @@ mi_switch(lwp_t *l)
|
||||
binuptime(&bt);
|
||||
|
||||
KASSERTMSG(l == curlwp, "l %p curlwp %p", l, curlwp);
|
||||
KASSERT((l->l_flag & LW_RUNNING) != 0);
|
||||
KASSERT((l->l_pflag & LP_RUNNING) != 0);
|
||||
KASSERT(l->l_cpu == curcpu() || l->l_stat == LSRUN);
|
||||
ci = curcpu();
|
||||
spc = &ci->ci_schedstate;
|
||||
@ -567,7 +566,7 @@ mi_switch(lwp_t *l)
|
||||
/* There are pending soft interrupts, so pick one. */
|
||||
newl = softint_picklwp();
|
||||
newl->l_stat = LSONPROC;
|
||||
newl->l_flag |= LW_RUNNING;
|
||||
newl->l_pflag |= LP_RUNNING;
|
||||
}
|
||||
#endif /* !__HAVE_FAST_SOFTINTS */
|
||||
|
||||
@ -694,7 +693,7 @@ mi_switch(lwp_t *l)
|
||||
*/
|
||||
if (returning) {
|
||||
/* Keep IPL_SCHED after this; MD code will fix up. */
|
||||
l->l_flag &= ~LW_RUNNING;
|
||||
l->l_pflag &= ~LP_RUNNING;
|
||||
lwp_unlock(l);
|
||||
} else {
|
||||
/* A normal LWP: save old VM context. */
|
||||
@ -732,12 +731,20 @@ mi_switch(lwp_t *l)
|
||||
KASSERT(ci->ci_mtx_count == -2);
|
||||
|
||||
/*
|
||||
* Immediately mark the previous LWP as no longer running,
|
||||
* and unlock it. We'll still be at IPL_SCHED afterwards.
|
||||
* Immediately mark the previous LWP as no longer running
|
||||
* and unlock (to keep lock wait times short as possible).
|
||||
* We'll still be at IPL_SCHED afterwards. If a zombie,
|
||||
* don't touch after clearing LP_RUNNING as it could be
|
||||
* reaped by another CPU. Issue a memory barrier to ensure
|
||||
* this.
|
||||
*/
|
||||
KASSERT((prevlwp->l_flag & LW_RUNNING) != 0);
|
||||
prevlwp->l_flag &= ~LW_RUNNING;
|
||||
lwp_unlock(prevlwp);
|
||||
KASSERT((prevlwp->l_pflag & LP_RUNNING) != 0);
|
||||
lock = prevlwp->l_mutex;
|
||||
if (__predict_false(prevlwp->l_stat == LSZOMB)) {
|
||||
membar_sync();
|
||||
}
|
||||
prevlwp->l_pflag &= ~LP_RUNNING;
|
||||
mutex_spin_exit(lock);
|
||||
|
||||
/*
|
||||
* Switched away - we have new curlwp.
|
||||
@ -833,7 +840,7 @@ setrunnable(struct lwp *l)
|
||||
* If the LWP is still on the CPU, mark it as LSONPROC. It may be
|
||||
* about to call mi_switch(), in which case it will yield.
|
||||
*/
|
||||
if ((l->l_flag & LW_RUNNING) != 0) {
|
||||
if ((l->l_pflag & LP_RUNNING) != 0) {
|
||||
l->l_stat = LSONPROC;
|
||||
l->l_slptime = 0;
|
||||
lwp_unlock(l);
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* $NetBSD: lwproc.c,v 1.43 2020/01/08 17:38:42 ad Exp $ */
|
||||
/* $NetBSD: lwproc.c,v 1.44 2020/02/15 18:12:15 ad Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 2010, 2011 Antti Kantee. All Rights Reserved.
|
||||
@ -28,7 +28,7 @@
|
||||
#define RUMP__CURLWP_PRIVATE
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__KERNEL_RCSID(0, "$NetBSD: lwproc.c,v 1.43 2020/01/08 17:38:42 ad Exp $");
|
||||
__KERNEL_RCSID(0, "$NetBSD: lwproc.c,v 1.44 2020/02/15 18:12:15 ad Exp $");
|
||||
|
||||
#include <sys/param.h>
|
||||
#include <sys/atomic.h>
|
||||
@ -476,12 +476,12 @@ rump_lwproc_switch(struct lwp *newlwp)
|
||||
|
||||
KASSERT(!(l->l_flag & LW_WEXIT) || newlwp);
|
||||
|
||||
if (__predict_false(newlwp && (newlwp->l_flag & LW_RUNNING)))
|
||||
if (__predict_false(newlwp && (newlwp->l_pflag & LP_RUNNING)))
|
||||
panic("lwp %p (%d:%d) already running",
|
||||
newlwp, newlwp->l_proc->p_pid, newlwp->l_lid);
|
||||
|
||||
if (newlwp == NULL) {
|
||||
l->l_flag &= ~LW_RUNNING;
|
||||
l->l_pflag &= ~LP_RUNNING;
|
||||
l->l_flag |= LW_RUMP_CLEAR;
|
||||
return;
|
||||
}
|
||||
@ -496,7 +496,7 @@ rump_lwproc_switch(struct lwp *newlwp)
|
||||
|
||||
newlwp->l_cpu = newlwp->l_target_cpu = l->l_cpu;
|
||||
newlwp->l_mutex = l->l_mutex;
|
||||
newlwp->l_flag |= LW_RUNNING;
|
||||
newlwp->l_pflag |= LP_RUNNING;
|
||||
|
||||
lwproc_curlwpop(RUMPUSER_LWP_SET, newlwp);
|
||||
curcpu()->ci_curlwp = newlwp;
|
||||
@ -513,7 +513,7 @@ rump_lwproc_switch(struct lwp *newlwp)
|
||||
mutex_exit(newlwp->l_proc->p_lock);
|
||||
|
||||
l->l_mutex = &unruntime_lock;
|
||||
l->l_flag &= ~LW_RUNNING;
|
||||
l->l_pflag &= ~LP_RUNNING;
|
||||
l->l_flag &= ~LW_PENDSIG;
|
||||
l->l_stat = LSRUN;
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* $NetBSD: scheduler.c,v 1.49 2020/01/08 17:38:42 ad Exp $ */
|
||||
/* $NetBSD: scheduler.c,v 1.50 2020/02/15 18:12:15 ad Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 2010, 2011 Antti Kantee. All Rights Reserved.
|
||||
@ -26,7 +26,7 @@
|
||||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__KERNEL_RCSID(0, "$NetBSD: scheduler.c,v 1.49 2020/01/08 17:38:42 ad Exp $");
|
||||
__KERNEL_RCSID(0, "$NetBSD: scheduler.c,v 1.50 2020/02/15 18:12:15 ad Exp $");
|
||||
|
||||
#include <sys/param.h>
|
||||
#include <sys/atomic.h>
|
||||
@ -409,7 +409,7 @@ rump_unschedule()
|
||||
/* release lwp0 */
|
||||
rump_unschedule_cpu(&lwp0);
|
||||
lwp0.l_mutex = &unruntime_lock;
|
||||
lwp0.l_flag &= ~LW_RUNNING;
|
||||
lwp0.l_pflag &= ~LP_RUNNING;
|
||||
lwp0rele();
|
||||
rump_lwproc_curlwp_clear(&lwp0);
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* $NetBSD: lwp.h,v 1.201 2020/02/15 17:13:55 ad Exp $ */
|
||||
/* $NetBSD: lwp.h,v 1.202 2020/02/15 18:12:15 ad Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 2001, 2006, 2007, 2008, 2009, 2010, 2019, 2020
|
||||
@ -235,7 +235,10 @@ extern int maxlwp __read_mostly; /* max number of lwps */
|
||||
|
||||
#endif /* _KERNEL || _KMEMUSER */
|
||||
|
||||
/* These flags are kept in l_flag. */
|
||||
/*
|
||||
* These flags are kept in l_flag, and they are modified only with the LWP
|
||||
* locked.
|
||||
*/
|
||||
#define LW_IDLE 0x00000001 /* Idle lwp. */
|
||||
#define LW_LWPCTL 0x00000002 /* Adjust lwpctl in userret */
|
||||
#define LW_CVLOCKDEBUG 0x00000004 /* Waker does lockdebug */
|
||||
@ -250,11 +253,15 @@ extern int maxlwp __read_mostly; /* max number of lwps */
|
||||
#define LW_CANCELLED 0x02000000 /* tsleep should not sleep */
|
||||
#define LW_WREBOOT 0x08000000 /* System is rebooting, please suspend */
|
||||
#define LW_UNPARKED 0x10000000 /* Unpark op pending */
|
||||
#define LW_RUNNING 0x20000000 /* Active on a CPU */
|
||||
#define LW_RUMP_CLEAR 0x40000000 /* Clear curlwp in RUMP scheduler */
|
||||
#define LW_RUMP_QEXIT 0x80000000 /* LWP should exit ASAP */
|
||||
|
||||
/* The second set of flags is kept in l_pflag. */
|
||||
/*
|
||||
* The second set of flags is kept in l_pflag, and they are modified only by
|
||||
* the LWP itself, or modified when it's known the LWP cannot be running.
|
||||
* LP_RUNNING is typically updated with the LWP locked, but not always in
|
||||
* the case of soft interrupt handlers.
|
||||
*/
|
||||
#define LP_KTRACTIVE 0x00000001 /* Executing ktrace operation */
|
||||
#define LP_KTRCSW 0x00000002 /* ktrace context switch marker */
|
||||
#define LP_KTRCSWUSER 0x00000004 /* ktrace context switch marker */
|
||||
@ -267,10 +274,14 @@ extern int maxlwp __read_mostly; /* max number of lwps */
|
||||
#define LP_SINGLESTEP 0x00000400 /* Single step thread in ptrace(2) */
|
||||
#define LP_TIMEINTR 0x00010000 /* Time this soft interrupt */
|
||||
#define LP_PREEMPTING 0x00020000 /* mi_switch called involuntarily */
|
||||
#define LP_RUNNING 0x20000000 /* Active on a CPU */
|
||||
#define LP_TELEPORT 0x40000000 /* Teleport to new CPU on preempt() */
|
||||
#define LP_BOUND 0x80000000 /* Bound to a CPU */
|
||||
|
||||
/* The third set is kept in l_prflag. */
|
||||
/*
|
||||
* The third set of flags is kept in l_prflag and they are modified only
|
||||
* with p_lock held.
|
||||
*/
|
||||
#define LPR_DETACHED 0x00800000 /* Won't be waited for. */
|
||||
#define LPR_CRMOD 0x00000100 /* Credentials modified */
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user