Hopefully fix some problems seen with MP support on non-x86, in particular

where curcpu() is defined as curlwp->l_cpu:

- mi_switch(): undo the ~2007ish optimisation to unlock curlwp before
  calling cpu_switchto().  It's not safe to let other actors mess with the
  LWP (in particular l->l_cpu) while it's still context switching.  This
  removes l->l_ctxswtch.

- Move the LP_RUNNING flag into l->l_flag and rename to LW_RUNNING since
  it's now covered by the LWP's lock.

- Ditch lwp_exit_switchaway() and just call mi_switch() instead.  Everything
  is in cache anyway so it wasn't buying much by trying to avoid saving old
  state.  This means cpu_switchto() will never be called with prevlwp ==
  NULL.

- Remove some KERNEL_LOCK handling which hasn't been needed for years.
This commit is contained in:
ad 2020-01-08 17:38:41 +00:00
parent b4d38fb000
commit 2ddceed1d9
40 changed files with 189 additions and 340 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: cpuswitch.S,v 1.13 2019/12/20 07:16:43 ryo Exp $ */
/* $NetBSD: cpuswitch.S,v 1.14 2020/01/08 17:38:41 ad Exp $ */
/*-
* Copyright (c) 2014 The NetBSD Foundation, Inc.
@ -37,7 +37,7 @@
#include "opt_ddb.h"
#include "opt_kasan.h"
RCSID("$NetBSD: cpuswitch.S,v 1.13 2019/12/20 07:16:43 ryo Exp $")
RCSID("$NetBSD: cpuswitch.S,v 1.14 2020/01/08 17:38:41 ad Exp $")
/*
* At IPL_SCHED:
@ -178,7 +178,6 @@ END(cpu_switchto_softint)
* cpu_switchto() bottom half arranges to start this when softlwp.
* kernel thread is to yield CPU for the pinned_lwp in the above.
* curcpu()->ci_mtx_count += 1;
* softlwp->l_ctxswtch = 0;
* this returns as if cpu_switchto_softint finished normally.
* }
*/
@ -189,7 +188,6 @@ ENTRY_NP(softint_cleanup)
ldr w2, [x3, #CI_MTX_COUNT] /* ->ci_mtx_count */
add w2, w2, #1
str w2, [x3, #CI_MTX_COUNT]
str wzr, [x0, #L_CTXSWTCH] /* softlwp->l_ctxswtch = 0 */
msr daif, x19 /* restore interrupt mask */
ldp x19, x20, [sp], #16 /* restore */

View File

@ -1,4 +1,4 @@
# $NetBSD: genassym.cf,v 1.17 2019/12/28 17:19:43 jmcneill Exp $
# $NetBSD: genassym.cf,v 1.18 2020/01/08 17:38:41 ad Exp $
#-
# Copyright (c) 2014 The NetBSD Foundation, Inc.
# All rights reserved.
@ -148,7 +148,6 @@ define L_PRIORITY offsetof(struct lwp, l_priority)
define L_WCHAN offsetof(struct lwp, l_wchan)
define L_STAT offsetof(struct lwp, l_stat)
define L_PROC offsetof(struct lwp, l_proc)
define L_CTXSWTCH offsetof(struct lwp, l_ctxswtch)
define L_PRIVATE offsetof(struct lwp, l_private)
define L_MD_FLAGS offsetof(struct lwp, l_md.md_flags)
define L_MD_UTF offsetof(struct lwp, l_md.md_utf)

View File

@ -1,4 +1,4 @@
# $NetBSD: genassym.cf,v 1.80 2019/12/30 23:32:29 thorpej Exp $
# $NetBSD: genassym.cf,v 1.81 2020/01/08 17:38:41 ad Exp $
#
# Copyright (c) 1998, 2006, 2007, 2008 The NetBSD Foundation, Inc.
@ -150,7 +150,6 @@ define VM_MAXUSER_ADDRESS (unsigned long long)VM_MAXUSER_ADDRESS
define L_PCB offsetof(struct lwp, l_addr)
define L_FLAG offsetof(struct lwp, l_flag)
define L_PROC offsetof(struct lwp, l_proc)
define L_CTXSWTCH offsetof(struct lwp, l_ctxswtch)
define L_NCSW offsetof(struct lwp, l_ncsw)
define L_NOPREEMPT offsetof(struct lwp, l_nopreempt)
define L_DOPREEMPT offsetof(struct lwp, l_dopreempt)

View File

@ -1,4 +1,4 @@
/* $NetBSD: locore.S,v 1.195 2019/12/15 02:58:21 manu Exp $ */
/* $NetBSD: locore.S,v 1.196 2020/01/08 17:38:41 ad Exp $ */
/*
* Copyright-o-rama!
@ -1836,14 +1836,10 @@ ENTRY(cpu_switchto)
movq %rdi,%r13 /* oldlwp */
movq %rsi,%r12 /* newlwp */
testq %r13,%r13 /* oldlwp = NULL ? */
jz .Lskip_save
/* Save old context. */
movq L_PCB(%r13),%rax
movq %rsp,PCB_RSP(%rax)
movq %rbp,PCB_RBP(%rax)
.Lskip_save:
/* Switch to newlwp's stack. */
movq L_PCB(%r12),%r14

View File

@ -1,4 +1,4 @@
/* $NetBSD: spl.S,v 1.42 2019/11/14 16:23:52 maxv Exp $ */
/* $NetBSD: spl.S,v 1.43 2020/01/08 17:38:41 ad Exp $ */
/*
* Copyright (c) 2003 Wasabi Systems, Inc.
@ -174,7 +174,6 @@ IDTVEC_END(softintr)
*/
ENTRY(softintr_ret)
incl CPUVAR(MTX_COUNT) /* re-adjust after mi_switch */
movl $0,L_CTXSWTCH(%rax) /* %rax from cpu_switchto */
cli
jmp *%r13 /* back to Xspllower/Xdoreti */
END(softintr_ret)

View File

@ -1,4 +1,4 @@
/* $NetBSD: cpuswitch.S,v 1.95 2019/10/29 16:18:23 joerg Exp $ */
/* $NetBSD: cpuswitch.S,v 1.96 2020/01/08 17:38:41 ad Exp $ */
/*
* Copyright 2003 Wasabi Systems, Inc.
@ -87,7 +87,7 @@
#include <arm/asm.h>
#include <arm/locore.h>
RCSID("$NetBSD: cpuswitch.S,v 1.95 2019/10/29 16:18:23 joerg Exp $")
RCSID("$NetBSD: cpuswitch.S,v 1.96 2020/01/08 17:38:41 ad Exp $")
/* LINTSTUB: include <sys/param.h> */
@ -460,9 +460,6 @@ ENTRY_NP(softint_tramp)
add r3, r3, #1
str r3, [r7, #(CI_MTX_COUNT)]
mov r3, #0 /* tell softint_dispatch */
str r3, [r0, #(L_CTXSWTCH)] /* the soft lwp blocked */
msr cpsr_c, r6 /* restore interrupts */
pop {r4, r6, r7, pc} /* pop stack and return */
END(softint_tramp)

View File

@ -1,4 +1,4 @@
# $NetBSD: genassym.cf,v 1.82 2019/11/24 11:23:16 skrll Exp $
# $NetBSD: genassym.cf,v 1.83 2020/01/08 17:38:41 ad Exp $
# Copyright (c) 1982, 1990 The Regents of the University of California.
# All rights reserved.
@ -160,7 +160,6 @@ define L_PRIORITY offsetof(struct lwp, l_priority)
define L_WCHAN offsetof(struct lwp, l_wchan)
define L_STAT offsetof(struct lwp, l_stat)
define L_PROC offsetof(struct lwp, l_proc)
define L_CTXSWTCH offsetof(struct lwp, l_ctxswtch)
define L_PRIVATE offsetof(struct lwp, l_private)
define L_FLAG offsetof(struct lwp, l_flag)
define L_MD_FLAGS offsetof(struct lwp, l_md.md_flags)

View File

@ -1,4 +1,4 @@
# $NetBSD: genassym.cf,v 1.1 2014/02/24 07:23:43 skrll Exp $
# $NetBSD: genassym.cf,v 1.2 2020/01/08 17:38:41 ad Exp $
# $OpenBSD: genassym.cf,v 1.18 2001/09/20 18:31:14 mickey Exp $
@ -196,7 +196,6 @@ member L_STAT l_stat
member L_WCHAN l_wchan
member L_MD l_md
member L_MD_REGS l_md.md_regs
member L_CTXSWTCH l_ctxswtch
struct pcb
member PCB_FPREGS pcb_fpregs

View File

@ -1,4 +1,4 @@
# $NetBSD: genassym.cf,v 1.117 2019/12/30 23:32:29 thorpej Exp $
# $NetBSD: genassym.cf,v 1.118 2020/01/08 17:38:41 ad Exp $
#
# Copyright (c) 1998, 2006, 2007, 2008 The NetBSD Foundation, Inc.
@ -166,7 +166,6 @@ define L_FLAG offsetof(struct lwp, l_flag)
define L_PROC offsetof(struct lwp, l_proc)
define L_MD_REGS offsetof(struct lwp, l_md.md_regs)
define L_MD_FLAGS offsetof(struct lwp, l_md.md_flags)
define L_CTXSWTCH offsetof(struct lwp, l_ctxswtch)
define L_MD_ASTPENDING offsetof(struct lwp, l_md.md_astpending)
define L_CPU offsetof(struct lwp, l_cpu)
define L_NCSW offsetof(struct lwp, l_ncsw)

View File

@ -1,4 +1,4 @@
/* $NetBSD: locore.S,v 1.174 2019/11/21 19:27:54 ad Exp $ */
/* $NetBSD: locore.S,v 1.175 2020/01/08 17:38:41 ad Exp $ */
/*
* Copyright-o-rama!
@ -128,7 +128,7 @@
*/
#include <machine/asm.h>
__KERNEL_RCSID(0, "$NetBSD: locore.S,v 1.174 2019/11/21 19:27:54 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: locore.S,v 1.175 2020/01/08 17:38:41 ad Exp $");
#include "opt_copy_symtab.h"
#include "opt_ddb.h"
@ -1316,14 +1316,10 @@ ENTRY(cpu_switchto)
movl 20(%esp),%edi /* newlwp */
movl 24(%esp),%edx /* returning */
testl %esi,%esi /* oldlwp = NULL ? */
jz skip_save
/* Save old context. */
movl L_PCB(%esi),%eax
movl %esp,PCB_ESP(%eax)
movl %ebp,PCB_EBP(%eax)
skip_save:
/* Switch to newlwp's stack. */
movl L_PCB(%edi),%ebx

View File

@ -1,4 +1,4 @@
/* $NetBSD: spl.S,v 1.49 2019/10/12 06:31:03 maxv Exp $ */
/* $NetBSD: spl.S,v 1.50 2020/01/08 17:38:41 ad Exp $ */
/*
* Copyright (c) 1998, 2007, 2008 The NetBSD Foundation, Inc.
@ -30,7 +30,7 @@
*/
#include <machine/asm.h>
__KERNEL_RCSID(0, "$NetBSD: spl.S,v 1.49 2019/10/12 06:31:03 maxv Exp $");
__KERNEL_RCSID(0, "$NetBSD: spl.S,v 1.50 2020/01/08 17:38:41 ad Exp $");
#include "opt_ddb.h"
#include "opt_spldebug.h"
@ -404,7 +404,6 @@ IDTVEC_END(softintr)
*/
ENTRY(softintr_ret)
incl CPUVAR(MTX_COUNT) /* re-adjust after mi_switch */
movl $0,L_CTXSWTCH(%eax) /* %eax from cpu_switchto */
cli
jmp *%esi /* back to splx/doreti */
END(softintr_ret)

View File

@ -1,4 +1,4 @@
# $NetBSD: genassym.cf,v 1.67 2016/07/11 16:15:36 matt Exp $
# $NetBSD: genassym.cf,v 1.68 2020/01/08 17:38:42 ad Exp $
#
# Copyright (c) 1992, 1993
# The Regents of the University of California. All rights reserved.
@ -137,7 +137,6 @@ define MIPS_XKSEG_START MIPS_XKSEG_START
# Important offsets into the lwp and proc structs & associated constants
define L_CPU offsetof(struct lwp, l_cpu)
define L_CTXSWITCH offsetof(struct lwp, l_ctxswtch)
define L_PCB offsetof(struct lwp, l_addr)
define L_PRIORITY offsetof(struct lwp, l_priority)
define L_PRIVATE offsetof(struct lwp, l_private)

View File

@ -1,4 +1,4 @@
/* $NetBSD: locore.S,v 1.220 2019/09/05 15:48:13 skrll Exp $ */
/* $NetBSD: locore.S,v 1.221 2020/01/08 17:38:42 ad Exp $ */
/*
* Copyright (c) 1992, 1993
@ -63,7 +63,7 @@
#include <mips/trap.h>
#include <mips/locore.h>
RCSID("$NetBSD: locore.S,v 1.220 2019/09/05 15:48:13 skrll Exp $")
RCSID("$NetBSD: locore.S,v 1.221 2020/01/08 17:38:42 ad Exp $")
#include "assym.h"
@ -377,7 +377,6 @@ softint_cleanup:
REG_L ra, CALLFRAME_RA(sp)
REG_L v0, CALLFRAME_S0(sp) # get softint lwp
NOP_L # load delay
PTR_S zero, L_CTXSWITCH(v0) # clear l_ctxswtch
#if IPL_SCHED != IPL_HIGH
j _C_LABEL(splhigh_noprof)
#else

View File

@ -1,4 +1,4 @@
/* $NetBSD: mips_softint.c,v 1.7 2015/06/06 04:43:41 matt Exp $ */
/* $NetBSD: mips_softint.c,v 1.8 2020/01/08 17:38:42 ad Exp $ */
/*-
* Copyright (c) 2009, 2010 The NetBSD Foundation, Inc.
@ -30,7 +30,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: mips_softint.c,v 1.7 2015/06/06 04:43:41 matt Exp $");
__KERNEL_RCSID(0, "$NetBSD: mips_softint.c,v 1.8 2020/01/08 17:38:42 ad Exp $");
#include <sys/param.h>
#include <sys/cpu.h>
@ -100,7 +100,6 @@ softint_trigger(uintptr_t si)
ci->ci_softints ^= SOFTINT_##level##_MASK; \
softint_fast_dispatch(ci->ci_softlwps[SOFTINT_##level], \
IPL_SOFT##level); \
KASSERT(ci->ci_softlwps[SOFTINT_##level]->l_ctxswtch == 0); \
KASSERTMSG(ci->ci_cpl == IPL_HIGH, "cpl (%d) != HIGH", ci->ci_cpl); \
continue; \
}

View File

@ -1,4 +1,4 @@
# $NetBSD: genassym.cf,v 1.11 2019/11/23 19:40:36 ad Exp $
# $NetBSD: genassym.cf,v 1.12 2020/01/08 17:38:42 ad Exp $
#-
# Copyright (c) 2010, 2011 The NetBSD Foundation, Inc.
@ -151,7 +151,6 @@ define PCB_ONFAULT offsetof(struct pcb, pcb_onfault)
define PCB_USPRG0 offsetof(struct pcb, pcb_usprg0)
define L_CPU offsetof(struct lwp, l_cpu)
define L_CTXSWTCH offsetof(struct lwp, l_ctxswtch)
define L_MD_ASTPENDING offsetof(struct lwp, l_md.md_astpending)
define L_MD_UTF offsetof(struct lwp, l_md.md_utf)
define L_PCB offsetof(struct lwp, l_addr)

View File

@ -1,4 +1,4 @@
/* $NetBSD: locore_subr.S,v 1.57 2019/04/06 03:06:27 thorpej Exp $ */
/* $NetBSD: locore_subr.S,v 1.58 2020/01/08 17:38:42 ad Exp $ */
/*
* Copyright (c) 2001 Wasabi Systems, Inc.
@ -319,8 +319,6 @@ _ENTRY(softint_cleanup)
ldint %r5, CI_MTX_COUNT(%r7)
addi %r5, %r5, 1
stint %r5, CI_MTX_COUNT(%r7)
li %r0, 0
stptr %r0, L_CTXSWTCH(%r3) /* clear ctxswitch of old lwp */
ldreg %r0, CFRAME_R31(%r1) /* get saved MSR */
#if defined(PPC_IBM4XX) || defined(PPC_BOOKE)
wrtee %r0 /* restore EE */

View File

@ -1,4 +1,4 @@
/* $NetBSD: softint_machdep.c,v 1.3 2011/09/27 01:02:36 jym Exp $ */
/* $NetBSD: softint_machdep.c,v 1.4 2020/01/08 17:38:42 ad Exp $ */
/*-
* Copyright (c) 2010, 2011 The NetBSD Foundation, Inc.
* All rights reserved.
@ -58,7 +58,6 @@ softint_deliver(struct cpu_info *ci, int ipl)
KASSERT(ci->ci_data.cpu_softints & (1 << ipl));
ci->ci_data.cpu_softints ^= 1 << ipl;
softint_fast_dispatch(ci->ci_softlwps[si_level], ipl);
KASSERT(ci->ci_softlwps[si_level]->l_ctxswtch == 0);
KASSERTMSG(ci->ci_cpl == IPL_HIGH,
"%s: cpl (%d) != HIGH", __func__, ci->ci_cpl);
}

View File

@ -1,4 +1,4 @@
# $NetBSD: genassym.cf,v 1.6 2019/11/23 19:40:36 ad Exp $
# $NetBSD: genassym.cf,v 1.7 2020/01/08 17:38:42 ad Exp $
#-
# Copyright (c) 2014 The NetBSD Foundation, Inc.
# All rights reserved.
@ -111,7 +111,6 @@ define TF_BADADDR offsetof(struct trapframe, tf_badaddr)
define TF_SR offsetof(struct trapframe, tf_sr)
define L_CPU offsetof(struct lwp, l_cpu)
define L_CTXSWTCH offsetof(struct lwp, l_ctxswtch)
define L_MD_ASTPENDING offsetof(struct lwp, l_md.md_astpending)
define L_MD_ONFAULT offsetof(struct lwp, l_md.md_onfault)
define L_MD_USP offsetof(struct lwp, l_md.md_usp)

View File

@ -1,4 +1,4 @@
/* $NetBSD: locore.S,v 1.9 2019/06/16 07:42:52 maxv Exp $ */
/* $NetBSD: locore.S,v 1.10 2020/01/08 17:38:42 ad Exp $ */
/*-
* Copyright (c) 2014 The NetBSD Foundation, Inc.
* All rights reserved.
@ -239,7 +239,6 @@ ENTRY_NP(cpu_fast_switchto_cleanup)
REG_L a0, CALLFRAME_S0(sp) // get pinned LWP
addi t0, t0, 1 // increment mutex count
INT_S t0, CI_MTX_COUNT(a1) // save it
PTR_S zero, L_CTXSWTCH(a0) // clear l_ctxswitch
addi sp, sp, CALLFRAME_SIZ // remove callframe
#if IPL_SCHED != IPL_HIGH
tail _C_LABEL(splhigh) // go back to IPL HIGH

View File

@ -1,4 +1,4 @@
# $NetBSD: genassym.cf,v 1.82 2019/11/23 19:40:37 ad Exp $
# $NetBSD: genassym.cf,v 1.83 2020/01/08 17:38:42 ad Exp $
#
# Copyright (c) 1997 The NetBSD Foundation, Inc.
@ -112,7 +112,6 @@ define USRSTACK USRSTACK
define PAGE_SIZE PAGE_SIZE
# Important offsets into the lwp and proc structs & associated constants
define L_CTXSWTCH offsetof(struct lwp, l_ctxswtch)
define L_PCB offsetof(struct lwp, l_addr)
define L_PROC offsetof(struct lwp, l_proc)
define L_TF offsetof(struct lwp, l_md.md_tf)

View File

@ -1,4 +1,4 @@
/* $NetBSD: locore.s,v 1.421 2019/07/18 18:21:45 palle Exp $ */
/* $NetBSD: locore.s,v 1.422 2020/01/08 17:38:42 ad Exp $ */
/*
* Copyright (c) 2006-2010 Matthew R. Green
@ -6693,7 +6693,6 @@ softint_fastintr_ret:
ld [%l0 + CI_MTX_COUNT], %o1
inc %o1 ! ci_mtx_count++
st %o1, [%l0 + CI_MTX_COUNT]
st %g0, [%o0 + L_CTXSWTCH] ! prev->l_ctxswtch = 0
STPTR %l6, [%l0 + CI_EINTSTACK] ! restore ci_eintstack
wrpr %g0, %l7, %pil ! restore ipl

View File

@ -1,4 +1,4 @@
# $NetBSD: genassym.cf,v 1.53 2018/04/25 09:28:42 ragge Exp $
# $NetBSD: genassym.cf,v 1.54 2020/01/08 17:38:42 ad Exp $
#
# Copyright (c) 1997 Ludd, University of Lule}, Sweden.
# All rights reserved.
@ -51,7 +51,6 @@ define L_PCB offsetof(struct lwp, l_addr)
define L_CPU offsetof(struct lwp, l_cpu)
define L_STAT offsetof(struct lwp, l_stat)
define L_PROC offsetof(struct lwp, l_proc)
define L_CTXSWTCH offsetof(struct lwp, l_ctxswtch)
define L_PRIVATE offsetof(struct lwp, l_private)
define P_VMSPACE offsetof(struct proc, p_vmspace)

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.c,v 1.187 2019/11/10 21:16:33 chs Exp $ */
/* $NetBSD: pmap.c,v 1.188 2020/01/08 17:38:42 ad Exp $ */
/*
* Copyright (c) 1994, 1998, 1999, 2003 Ludd, University of Lule}, Sweden.
* All rights reserved.
@ -25,7 +25,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.187 2019/11/10 21:16:33 chs Exp $");
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.188 2020/01/08 17:38:42 ad Exp $");
#include "opt_ddb.h"
#include "opt_cputype.h"
@ -699,7 +699,7 @@ pmap_vax_swappable(struct lwp *l, struct pmap *pm)
return false;
if (l->l_proc->p_vmspace->vm_map.pmap == pm)
return false;
if ((l->l_pflag & LP_RUNNING) != 0)
if ((l->l_flag & LW_RUNNING) != 0)
return false;
if (l->l_class != SCHED_OTHER)
return false;

View File

@ -1,4 +1,4 @@
/* $NetBSD: subr.S,v 1.36 2019/04/06 03:06:28 thorpej Exp $ */
/* $NetBSD: subr.S,v 1.37 2020/01/08 17:38:42 ad Exp $ */
/*
* Copyright (c) 1994 Ludd, University of Lule}, Sweden.
@ -297,7 +297,6 @@ _C_LABEL(vax_mp_tramp):
softint_cleanup:
movl L_CPU(%r0),%r1 /* get cpu_info */
incl CI_MTX_COUNT(%r1) /* increment mutex count */
clrl L_CTXSWTCH(%r0) /* clear l_ctxswtch of old lwp */
movl L_PCB(%r0),%r1 /* get PCB of softint LWP */
softint_exit:
popr $0x3 /* restore r0 and r1 */

View File

@ -1,4 +1,4 @@
/* $NetBSD: db_proc.c,v 1.8 2018/11/02 11:59:59 maxv Exp $ */
/* $NetBSD: db_proc.c,v 1.9 2020/01/08 17:38:42 ad Exp $ */
/*-
* Copyright (c) 2009 The NetBSD Foundation, Inc.
@ -61,7 +61,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: db_proc.c,v 1.8 2018/11/02 11:59:59 maxv Exp $");
__KERNEL_RCSID(0, "$NetBSD: db_proc.c,v 1.9 2020/01/08 17:38:42 ad Exp $");
#ifndef _KERNEL
#include <stdbool.h>
@ -196,7 +196,7 @@ db_show_all_procs(db_expr_t addr, bool haddr, db_expr_t count,
sizeof(db_nbuf));
}
run = (l.l_stat == LSONPROC ||
(l.l_pflag & LP_RUNNING) != 0);
(l.l_flag & LW_RUNNING) != 0);
if (l.l_cpu != NULL) {
db_read_bytes((db_addr_t)
&l.l_cpu->ci_data.cpu_index,
@ -254,7 +254,7 @@ db_show_all_procs(db_expr_t addr, bool haddr, db_expr_t count,
wbuf[0] = '\0';
}
run = (l.l_stat == LSONPROC ||
(l.l_pflag & LP_RUNNING) != 0);
(l.l_flag & LW_RUNNING) != 0);
db_read_bytes((db_addr_t)&p.p_emul->e_name,
sizeof(ename), (char *)&ename);
@ -332,7 +332,7 @@ db_show_proc(db_expr_t addr, bool haddr, db_expr_t count, const char *modif)
db_read_bytes((db_addr_t)lp, sizeof(l), (char *)&l);
run = (l.l_stat == LSONPROC ||
(l.l_pflag & LP_RUNNING) != 0);
(l.l_flag & LW_RUNNING) != 0);
db_printf("%slwp %d", (run ? "> " : " "), l.l_lid);
if (l.l_name != NULL) {

View File

@ -1,4 +1,4 @@
/* $NetBSD: init_main.c,v 1.517 2020/01/02 15:42:27 thorpej Exp $ */
/* $NetBSD: init_main.c,v 1.518 2020/01/08 17:38:42 ad Exp $ */
/*-
* Copyright (c) 2008, 2009, 2019 The NetBSD Foundation, Inc.
@ -97,7 +97,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: init_main.c,v 1.517 2020/01/02 15:42:27 thorpej Exp $");
__KERNEL_RCSID(0, "$NetBSD: init_main.c,v 1.518 2020/01/08 17:38:42 ad Exp $");
#include "opt_ddb.h"
#include "opt_inet.h"
@ -290,7 +290,7 @@ main(void)
#ifndef LWP0_CPU_INFO
l->l_cpu = curcpu();
#endif
l->l_pflag |= LP_RUNNING;
l->l_flag |= LW_RUNNING;
/*
* Attempt to find console and initialize

View File

@ -1,4 +1,4 @@
/* $NetBSD: kern_exec.c,v 1.485 2019/12/06 21:36:10 ad Exp $ */
/* $NetBSD: kern_exec.c,v 1.486 2020/01/08 17:38:42 ad Exp $ */
/*-
* Copyright (c) 2008, 2019 The NetBSD Foundation, Inc.
@ -62,7 +62,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: kern_exec.c,v 1.485 2019/12/06 21:36:10 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: kern_exec.c,v 1.486 2020/01/08 17:38:42 ad Exp $");
#include "opt_exec.h"
#include "opt_execfmt.h"
@ -1366,7 +1366,6 @@ execve_runproc(struct lwp *l, struct execve_data * restrict data,
spc_lock(l->l_cpu);
mi_switch(l);
ksiginfo_queue_drain(&kq);
KERNEL_LOCK(l->l_biglocks, l);
} else {
mutex_exit(proc_lock);
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: kern_exit.c,v 1.278 2019/12/06 21:36:10 ad Exp $ */
/* $NetBSD: kern_exit.c,v 1.279 2020/01/08 17:38:42 ad Exp $ */
/*-
* Copyright (c) 1998, 1999, 2006, 2007, 2008 The NetBSD Foundation, Inc.
@ -67,7 +67,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: kern_exit.c,v 1.278 2019/12/06 21:36:10 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: kern_exit.c,v 1.279 2020/01/08 17:38:42 ad Exp $");
#include "opt_ktrace.h"
#include "opt_dtrace.h"
@ -204,6 +204,8 @@ exit1(struct lwp *l, int exitcode, int signo)
p = l->l_proc;
/* Verify that we hold no locks other than p->p_lock. */
LOCKDEBUG_BARRIER(p->p_lock, 0);
KASSERT(mutex_owned(p->p_lock));
KASSERT(p->p_vmspace != NULL);
@ -247,7 +249,6 @@ exit1(struct lwp *l, int exitcode, int signo)
lwp_lock(l);
spc_lock(l->l_cpu);
mi_switch(l);
KERNEL_LOCK(l->l_biglocks, l);
mutex_enter(p->p_lock);
}
@ -569,9 +570,6 @@ exit1(struct lwp *l, int exitcode, int signo)
rw_exit(&p->p_reflock);
mutex_exit(proc_lock);
/* Verify that we hold no locks other than the kernel lock. */
LOCKDEBUG_BARRIER(&kernel_lock, 0);
/*
* NOTE: WE ARE NO LONGER ALLOWED TO SLEEP!
*/
@ -583,17 +581,14 @@ exit1(struct lwp *l, int exitcode, int signo)
*/
cpu_lwp_free(l, 1);
pmap_deactivate(l);
/* For the LW_RUNNING check in lwp_free(). */
membar_exit();
/* This process no longer needs to hold the kernel lock. */
#ifdef notyet
/* XXXSMP hold in lwp_userret() */
KERNEL_UNLOCK_LAST(l);
#else
KERNEL_UNLOCK_ALL(l, NULL);
#endif
lwp_exit_switchaway(l);
/* Switch away into oblivion. */
lwp_lock(l);
spc_lock(l->l_cpu);
mi_switch(l);
panic("exit1");
}
void
@ -601,9 +596,7 @@ exit_lwps(struct lwp *l)
{
proc_t *p = l->l_proc;
lwp_t *l2;
int nlocks;
KERNEL_UNLOCK_ALL(l, &nlocks);
retry:
KASSERT(mutex_owned(p->p_lock));
@ -637,7 +630,6 @@ retry:
}
}
KERNEL_LOCK(nlocks, l);
KASSERT(p->p_nlwps == 1);
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: kern_idle.c,v 1.29 2019/12/31 22:42:51 ad Exp $ */
/* $NetBSD: kern_idle.c,v 1.30 2020/01/08 17:38:42 ad Exp $ */
/*-
* Copyright (c)2002, 2006, 2007 YAMAMOTO Takashi,
@ -28,7 +28,7 @@
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: kern_idle.c,v 1.29 2019/12/31 22:42:51 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: kern_idle.c,v 1.30 2020/01/08 17:38:42 ad Exp $");
#include <sys/param.h>
#include <sys/cpu.h>
@ -59,7 +59,7 @@ idle_loop(void *dummy)
binuptime(&l->l_stime);
spc->spc_flags |= SPCF_RUNNING;
l->l_stat = LSONPROC;
l->l_pflag |= LP_RUNNING;
l->l_flag |= LW_RUNNING;
lwp_unlock(l);
/*

View File

@ -1,4 +1,4 @@
/* $NetBSD: kern_kthread.c,v 1.44 2019/11/23 19:42:52 ad Exp $ */
/* $NetBSD: kern_kthread.c,v 1.45 2020/01/08 17:38:42 ad Exp $ */
/*-
* Copyright (c) 1998, 1999, 2007, 2009, 2019 The NetBSD Foundation, Inc.
@ -31,7 +31,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: kern_kthread.c,v 1.44 2019/11/23 19:42:52 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: kern_kthread.c,v 1.45 2020/01/08 17:38:42 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -178,6 +178,11 @@ kthread_exit(int ecode)
mutex_exit(&kthread_lock);
}
/* If the kernel lock is held, we need to drop it now. */
if ((l->l_pflag & LP_MPSAFE) == 0) {
KERNEL_UNLOCK_LAST(l);
}
/* And exit.. */
lwp_exit(l);
panic("kthread_exit");

View File

@ -1,4 +1,4 @@
/* $NetBSD: kern_lwp.c,v 1.217 2019/12/06 21:36:10 ad Exp $ */
/* $NetBSD: kern_lwp.c,v 1.218 2020/01/08 17:38:42 ad Exp $ */
/*-
* Copyright (c) 2001, 2006, 2007, 2008, 2009, 2019 The NetBSD Foundation, Inc.
@ -79,7 +79,7 @@
* LWP. The LWP may in fact be executing on a processor, may be
* sleeping or idle. It is expected to take the necessary action to
* stop executing or become "running" again within a short timeframe.
* The LP_RUNNING flag in lwp::l_pflag indicates that an LWP is running.
* The LW_RUNNING flag in lwp::l_flag indicates that an LWP is running.
* Importantly, it indicates that its state is tied to a CPU.
*
* LSZOMB:
@ -209,7 +209,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.217 2019/12/06 21:36:10 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.218 2020/01/08 17:38:42 ad Exp $");
#include "opt_ddb.h"
#include "opt_lockdebug.h"
@ -1015,29 +1015,33 @@ lwp_start(lwp_t *l, int flags)
void
lwp_startup(struct lwp *prev, struct lwp *new_lwp)
{
KASSERTMSG(new_lwp == curlwp, "l %p curlwp %p prevlwp %p", new_lwp, curlwp, prev);
SDT_PROBE(proc, kernel, , lwp__start, new_lwp, 0, 0, 0, 0);
KASSERT(kpreempt_disabled());
if (prev != NULL) {
/*
* Normalize the count of the spin-mutexes, it was
* increased in mi_switch(). Unmark the state of
* context switch - it is finished for previous LWP.
*/
curcpu()->ci_mtx_count++;
membar_exit();
prev->l_ctxswtch = 0;
}
KPREEMPT_DISABLE(new_lwp);
if (__predict_true(new_lwp->l_proc->p_vmspace))
KASSERT(prev != NULL);
KASSERT((prev->l_flag & LW_RUNNING) != 0);
KASSERT(curcpu()->ci_mtx_count == -2);
/* Immediately mark previous LWP as no longer running, and unlock. */
prev->l_flag &= ~LW_RUNNING;
lwp_unlock(prev);
/* Correct spin mutex count after mi_switch(). */
curcpu()->ci_mtx_count = 0;
/* Install new VM context. */
if (__predict_true(new_lwp->l_proc->p_vmspace)) {
pmap_activate(new_lwp);
}
/* We remain at IPL_SCHED from mi_switch() - reset it. */
spl0();
LOCKDEBUG_BARRIER(NULL, 0);
KPREEMPT_ENABLE(new_lwp);
if ((new_lwp->l_pflag & LP_MPSAFE) == 0) {
SDT_PROBE(proc, kernel, , lwp__start, new_lwp, 0, 0, 0, 0);
/* For kthreads, acquire kernel lock if not MPSAFE. */
if (__predict_false((new_lwp->l_pflag & LP_MPSAFE) == 0)) {
KERNEL_LOCK(1, new_lwp);
}
}
@ -1059,10 +1063,8 @@ lwp_exit(struct lwp *l)
SDT_PROBE(proc, kernel, , lwp__exit, l, 0, 0, 0, 0);
/*
* Verify that we hold no locks other than the kernel lock.
*/
LOCKDEBUG_BARRIER(&kernel_lock, 0);
/* Verify that we hold no locks */
LOCKDEBUG_BARRIER(NULL, 0);
/*
* If we are the last live LWP in a process, we need to exit the
@ -1193,19 +1195,13 @@ lwp_exit(struct lwp *l)
cpu_lwp_free(l, 0);
if (current) {
pmap_deactivate(l);
/*
* Release the kernel lock, and switch away into
* oblivion.
*/
#ifdef notyet
/* XXXSMP hold in lwp_userret() */
KERNEL_UNLOCK_LAST(l);
#else
KERNEL_UNLOCK_ALL(l, NULL);
#endif
lwp_exit_switchaway(l);
/* For the LW_RUNNING check in lwp_free(). */
membar_exit();
/* Switch away into oblivion. */
lwp_lock(l);
spc_lock(l->l_cpu);
mi_switch(l);
panic("lwp_exit");
}
}
@ -1232,6 +1228,7 @@ lwp_free(struct lwp *l, bool recycle, bool last)
*/
if (p != &proc0 && p->p_nlwps != 1)
(void)chglwpcnt(kauth_cred_getuid(p->p_cred), -1);
/*
* If this was not the last LWP in the process, then adjust
* counters and unlock.
@ -1268,11 +1265,12 @@ lwp_free(struct lwp *l, bool recycle, bool last)
* all locks to avoid deadlock against interrupt handlers on
* the target CPU.
*/
if ((l->l_pflag & LP_RUNNING) != 0 || l->l_cpu->ci_curlwp == l) {
membar_enter();
if ((l->l_flag & LW_RUNNING) != 0) {
int count;
(void)count; /* XXXgcc */
KERNEL_UNLOCK_ALL(curlwp, &count);
while ((l->l_pflag & LP_RUNNING) != 0 ||
while ((l->l_flag & LW_RUNNING) != 0 ||
l->l_cpu->ci_curlwp == l)
SPINLOCK_BACKOFF_HOOK;
KERNEL_LOCK(count, curlwp);
@ -1340,7 +1338,7 @@ lwp_migrate(lwp_t *l, struct cpu_info *tci)
KASSERT(tci != NULL);
/* If LWP is still on the CPU, it must be handled like LSONPROC */
if ((l->l_pflag & LP_RUNNING) != 0) {
if ((l->l_flag & LW_RUNNING) != 0) {
lstat = LSONPROC;
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: kern_resource.c,v 1.183 2019/11/21 17:50:49 ad Exp $ */
/* $NetBSD: kern_resource.c,v 1.184 2020/01/08 17:38:42 ad Exp $ */
/*-
* Copyright (c) 1982, 1986, 1991, 1993
@ -37,7 +37,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: kern_resource.c,v 1.183 2019/11/21 17:50:49 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: kern_resource.c,v 1.184 2020/01/08 17:38:42 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -506,7 +506,7 @@ calcru(struct proc *p, struct timeval *up, struct timeval *sp,
LIST_FOREACH(l, &p->p_lwps, l_sibling) {
lwp_lock(l);
bintime_add(&tm, &l->l_rtime);
if ((l->l_pflag & LP_RUNNING) != 0 &&
if ((l->l_flag & LW_RUNNING) != 0 &&
(l->l_pflag & (LP_INTR | LP_TIMEINTR)) != LP_INTR) {
struct bintime diff;
/*

View File

@ -1,4 +1,4 @@
/* $NetBSD: kern_runq.c,v 1.55 2020/01/05 20:26:56 ad Exp $ */
/* $NetBSD: kern_runq.c,v 1.56 2020/01/08 17:38:42 ad Exp $ */
/*-
* Copyright (c) 2019 The NetBSD Foundation, Inc.
@ -56,7 +56,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: kern_runq.c,v 1.55 2020/01/05 20:26:56 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: kern_runq.c,v 1.56 2020/01/08 17:38:42 ad Exp $");
#include "opt_dtrace.h"
@ -612,17 +612,6 @@ sched_catchlwp(struct cpu_info *ci)
/* Grab the thread, and move to the local run queue */
sched_dequeue(l);
/*
* If LWP is still context switching, we may need to
* spin-wait before changing its CPU.
*/
if (__predict_false(l->l_ctxswtch != 0)) {
u_int count;
count = SPINLOCK_BACKOFF_MIN;
while (l->l_ctxswtch)
SPINLOCK_BACKOFF(count);
}
l->l_cpu = curci;
lwp_unlock_to(l, curspc->spc_mutex);
sched_enqueue(l);

View File

@ -1,4 +1,4 @@
/* $NetBSD: kern_sleepq.c,v 1.56 2019/12/17 18:08:15 ad Exp $ */
/* $NetBSD: kern_sleepq.c,v 1.57 2020/01/08 17:38:42 ad Exp $ */
/*-
* Copyright (c) 2006, 2007, 2008, 2009, 2019 The NetBSD Foundation, Inc.
@ -35,7 +35,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: kern_sleepq.c,v 1.56 2019/12/17 18:08:15 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: kern_sleepq.c,v 1.57 2020/01/08 17:38:42 ad Exp $");
#include <sys/param.h>
#include <sys/kernel.h>
@ -137,7 +137,7 @@ sleepq_remove(sleepq_t *sq, lwp_t *l)
* If the LWP is still on the CPU, mark it as LSONPROC. It may be
* about to call mi_switch(), in which case it will yield.
*/
if ((l->l_pflag & LP_RUNNING) != 0) {
if ((l->l_flag & LW_RUNNING) != 0) {
l->l_stat = LSONPROC;
l->l_slptime = 0;
lwp_setlock(l, spc->spc_lwplock);

View File

@ -1,4 +1,4 @@
/* $NetBSD: kern_softint.c,v 1.56 2019/12/16 22:47:54 ad Exp $ */
/* $NetBSD: kern_softint.c,v 1.57 2020/01/08 17:38:42 ad Exp $ */
/*-
* Copyright (c) 2007, 2008, 2019 The NetBSD Foundation, Inc.
@ -170,7 +170,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: kern_softint.c,v 1.56 2019/12/16 22:47:54 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: kern_softint.c,v 1.57 2020/01/08 17:38:42 ad Exp $");
#include <sys/param.h>
#include <sys/proc.h>
@ -851,7 +851,7 @@ softint_dispatch(lwp_t *pinned, int s)
u_int timing;
lwp_t *l;
KASSERT((pinned->l_pflag & LP_RUNNING) != 0);
KASSERT((pinned->l_flag & LW_RUNNING) != 0);
l = curlwp;
si = l->l_private;
@ -861,7 +861,7 @@ softint_dispatch(lwp_t *pinned, int s)
* the LWP locked, at this point no external agents will want to
* modify the interrupt LWP's state.
*/
timing = (softint_timing ? LP_TIMEINTR : 0);
timing = softint_timing;
l->l_switchto = pinned;
l->l_stat = LSONPROC;
@ -872,8 +872,9 @@ softint_dispatch(lwp_t *pinned, int s)
if (timing) {
binuptime(&l->l_stime);
membar_producer(); /* for calcru */
l->l_pflag |= LP_TIMEINTR;
}
l->l_pflag |= (LP_RUNNING | timing);
l->l_flag |= LW_RUNNING;
softint_execute(si, l, s);
if (timing) {
binuptime(&now);
@ -892,17 +893,18 @@ softint_dispatch(lwp_t *pinned, int s)
* That's not be a problem: we are lowering to level 's' which will
* prevent softint_dispatch() from being reentered at level 's',
* until the priority is finally dropped to IPL_NONE on entry to
* the LWP chosen by lwp_exit_switchaway().
* the LWP chosen by mi_switch().
*/
l->l_stat = LSIDL;
if (l->l_switchto == NULL) {
splx(s);
pmap_deactivate(l);
lwp_exit_switchaway(l);
lwp_lock(l);
spc_lock(l->l_cpu);
mi_switch(l);
/* NOTREACHED */
}
l->l_switchto = NULL;
l->l_pflag &= ~LP_RUNNING;
l->l_flag &= ~LW_RUNNING;
}
#endif /* !__HAVE_FAST_SOFTINTS */

View File

@ -1,4 +1,4 @@
/* $NetBSD: kern_synch.c,v 1.334 2019/12/21 11:54:04 ad Exp $ */
/* $NetBSD: kern_synch.c,v 1.335 2020/01/08 17:38:42 ad Exp $ */
/*-
* Copyright (c) 1999, 2000, 2004, 2006, 2007, 2008, 2009, 2019
@ -69,7 +69,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.334 2019/12/21 11:54:04 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.335 2020/01/08 17:38:42 ad Exp $");
#include "opt_kstack.h"
#include "opt_dtrace.h"
@ -485,13 +485,13 @@ nextlwp(struct cpu_info *ci, struct schedstate_percpu *spc)
KASSERT(lwp_locked(newl, spc->spc_mutex));
KASSERT(newl->l_cpu == ci);
newl->l_stat = LSONPROC;
newl->l_pflag |= LP_RUNNING;
newl->l_flag |= LW_RUNNING;
lwp_setlock(newl, spc->spc_lwplock);
spc->spc_flags &= ~(SPCF_SWITCHCLEAR | SPCF_IDLE);
} else {
newl = ci->ci_data.cpu_idlelwp;
newl->l_stat = LSONPROC;
newl->l_pflag |= LP_RUNNING;
newl->l_flag |= LW_RUNNING;
spc->spc_flags = (spc->spc_flags & ~SPCF_SWITCHCLEAR) |
SPCF_IDLE;
}
@ -512,8 +512,11 @@ nextlwp(struct cpu_info *ci, struct schedstate_percpu *spc)
/*
* The machine independent parts of context switch.
*
* NOTE: do not use l->l_cpu in this routine. The caller may have enqueued
* itself onto another CPU's run queue, so l->l_cpu may point elsewhere.
* NOTE: l->l_cpu is not changed in this routine, because an LWP never
* changes its own l_cpu (that would screw up curcpu on many ports and could
* cause all kinds of other evil stuff). l_cpu is always changed by some
* other actor, when it's known the LWP is not running (the LW_RUNNING flag
* is checked under lock).
*/
void
mi_switch(lwp_t *l)
@ -534,7 +537,7 @@ mi_switch(lwp_t *l)
binuptime(&bt);
KASSERTMSG(l == curlwp, "l %p curlwp %p", l, curlwp);
KASSERT((l->l_pflag & LP_RUNNING) != 0);
KASSERT((l->l_flag & LW_RUNNING) != 0);
KASSERT(l->l_cpu == curcpu() || l->l_stat == LSRUN);
ci = curcpu();
spc = &ci->ci_schedstate;
@ -563,7 +566,7 @@ mi_switch(lwp_t *l)
/* There are pending soft interrupts, so pick one. */
newl = softint_picklwp();
newl->l_stat = LSONPROC;
newl->l_pflag |= LP_RUNNING;
newl->l_flag |= LW_RUNNING;
}
#endif /* !__HAVE_FAST_SOFTINTS */
@ -652,57 +655,48 @@ mi_switch(lwp_t *l)
/* We're down to only one lock, so do debug checks. */
LOCKDEBUG_BARRIER(l->l_mutex, 1);
/*
* Mark that context switch is going to be performed
* for this LWP, to protect it from being switched
* to on another CPU.
*/
KASSERT(l->l_ctxswtch == 0);
l->l_ctxswtch = 1;
/* Count the context switch. */
CPU_COUNT(CPU_COUNT_NSWTCH, 1);
l->l_ncsw++;
if ((l->l_pflag & LP_PREEMPTING) != 0)
if ((l->l_pflag & LP_PREEMPTING) != 0) {
l->l_nivcsw++;
KASSERT((l->l_pflag & LP_RUNNING) != 0);
l->l_pflag &= ~(LP_RUNNING | LP_PREEMPTING);
l->l_pflag &= ~LP_PREEMPTING;
}
/*
* Increase the count of spin-mutexes before the release
* of the last lock - we must remain at IPL_SCHED during
* the context switch.
* of the last lock - we must remain at IPL_SCHED after
* releasing the lock.
*/
KASSERTMSG(ci->ci_mtx_count == -1,
"%s: cpu%u: ci_mtx_count (%d) != -1 "
"(block with spin-mutex held)",
__func__, cpu_index(ci), ci->ci_mtx_count);
oldspl = MUTEX_SPIN_OLDSPL(ci);
ci->ci_mtx_count--;
lwp_unlock(l);
/* Count the context switch on this CPU. */
CPU_COUNT(CPU_COUNT_NSWTCH, 1);
ci->ci_mtx_count = -2;
/* Update status for lwpctl, if present. */
if (l->l_lwpctl != NULL)
l->l_lwpctl->lc_curcpu = LWPCTL_CPU_NONE;
/*
* Save old VM context, unless a soft interrupt
* handler is blocking.
*/
if (!returning)
pmap_deactivate(l);
/*
* We may need to spin-wait if 'newl' is still
* context switching on another CPU.
*/
if (__predict_false(newl->l_ctxswtch != 0)) {
u_int count;
count = SPINLOCK_BACKOFF_MIN;
while (newl->l_ctxswtch)
SPINLOCK_BACKOFF(count);
if (l->l_lwpctl != NULL) {
l->l_lwpctl->lc_curcpu = (l->l_stat == LSZOMB ?
LWPCTL_CPU_EXITED : LWPCTL_CPU_NONE);
}
/*
* If curlwp is a soft interrupt LWP, there's nobody on the
* other side to unlock - we're returning into an assembly
* trampoline. Unlock now. This is safe because this is a
* kernel LWP and is bound to current CPU: the worst anyone
* else will do to it, is to put it back onto this CPU's run
* queue (and the CPU is busy here right now!).
*/
if (returning) {
/* Keep IPL_SCHED after this; MD code will fix up. */
l->l_flag &= ~LW_RUNNING;
lwp_unlock(l);
} else {
/* A normal LWP: save old VM context. */
pmap_deactivate(l);
}
membar_enter();
/*
* If DTrace has set the active vtime enum to anything
@ -730,6 +724,17 @@ mi_switch(lwp_t *l)
#endif
KASSERTMSG(l == curlwp, "l %p curlwp %p prevlwp %p",
l, curlwp, prevlwp);
KASSERT(prevlwp != NULL);
KASSERT(l->l_cpu == ci);
KASSERT(ci->ci_mtx_count == -2);
/*
* Immediately mark the previous LWP as no longer running,
* and unlock it. We'll still be at IPL_SCHED afterwards.
*/
KASSERT((prevlwp->l_flag & LW_RUNNING) != 0);
prevlwp->l_flag &= ~LW_RUNNING;
lwp_unlock(prevlwp);
/*
* Switched away - we have new curlwp.
@ -738,14 +743,6 @@ mi_switch(lwp_t *l)
pmap_activate(l);
pcu_switchpoint(l);
if (prevlwp != NULL) {
/* Normalize the count of the spin-mutexes */
ci->ci_mtx_count++;
/* Unmark the state of context switch */
membar_exit();
prevlwp->l_ctxswtch = 0;
}
/* Update status for lwpctl, if present. */
if (l->l_lwpctl != NULL) {
l->l_lwpctl->lc_curcpu = (int)cpu_index(ci);
@ -753,17 +750,18 @@ mi_switch(lwp_t *l)
}
/*
* Note that, unless the caller disabled preemption, we can
* be preempted at any time after this splx().
* Normalize the spin mutex count and restore the previous
* SPL. Note that, unless the caller disabled preemption,
* we can be preempted at any time after this splx().
*/
KASSERT(l->l_cpu == ci);
KASSERT(ci->ci_mtx_count == -1);
ci->ci_mtx_count = 0;
splx(oldspl);
} else {
/* Nothing to do - just unlock and return. */
mutex_spin_exit(spc->spc_mutex);
l->l_pflag &= ~LP_PREEMPTING;
/* We're down to only one lock, so do debug checks. */
LOCKDEBUG_BARRIER(l->l_mutex, 1);
lwp_unlock(l);
}
@ -774,105 +772,6 @@ mi_switch(lwp_t *l)
LOCKDEBUG_BARRIER(NULL, 1);
}
/*
* The machine independent parts of context switch to oblivion.
* Does not return. Call with the LWP unlocked.
*/
void
lwp_exit_switchaway(lwp_t *l)
{
struct cpu_info *ci;
struct lwp *newl;
struct bintime bt;
ci = l->l_cpu;
KASSERT(kpreempt_disabled());
KASSERT(l->l_stat == LSZOMB || l->l_stat == LSIDL);
KASSERT(ci == curcpu());
LOCKDEBUG_BARRIER(NULL, 0);
kstack_check_magic(l);
/* Count time spent in current system call */
SYSCALL_TIME_SLEEP(l);
binuptime(&bt);
updatertime(l, &bt);
/* Must stay at IPL_SCHED even after releasing run queue lock. */
(void)splsched();
/*
* Let sched_nextlwp() select the LWP to run the CPU next.
* If no LWP is runnable, select the idle LWP.
*
* Note that spc_lwplock might not necessary be held, and
* new thread would be unlocked after setting the LWP-lock.
*/
spc_lock(ci);
#ifndef __HAVE_FAST_SOFTINTS
if (ci->ci_data.cpu_softints != 0) {
/* There are pending soft interrupts, so pick one. */
newl = softint_picklwp();
newl->l_stat = LSONPROC;
newl->l_pflag |= LP_RUNNING;
} else
#endif /* !__HAVE_FAST_SOFTINTS */
{
newl = nextlwp(ci, &ci->ci_schedstate);
}
/* Update the new LWP's start time. */
newl->l_stime = bt;
l->l_pflag &= ~LP_RUNNING;
/*
* ci_curlwp changes when a fast soft interrupt occurs.
* We use ci_onproc to keep track of which kernel or
* user thread is running 'underneath' the software
* interrupt. This is important for time accounting,
* itimers and forcing user threads to preempt (aston).
*/
ci->ci_onproc = newl;
/* Unlock the run queue. */
spc_unlock(ci);
/* Count the context switch on this CPU. */
CPU_COUNT(CPU_COUNT_NSWTCH, 1);
/* Update status for lwpctl, if present. */
if (l->l_lwpctl != NULL)
l->l_lwpctl->lc_curcpu = LWPCTL_CPU_EXITED;
/*
* We may need to spin-wait if 'newl' is still
* context switching on another CPU.
*/
if (__predict_false(newl->l_ctxswtch != 0)) {
u_int count;
count = SPINLOCK_BACKOFF_MIN;
while (newl->l_ctxswtch)
SPINLOCK_BACKOFF(count);
}
membar_enter();
/*
* If DTrace has set the active vtime enum to anything
* other than INACTIVE (0), then it should have set the
* function to call.
*/
if (__predict_false(dtrace_vtime_active)) {
(*dtrace_vtime_switch_func)(newl);
}
/* Switch to the new LWP.. */
(void)cpu_switchto(NULL, newl, false);
for (;;) continue; /* XXX: convince gcc about "noreturn" */
/* NOTREACHED */
}
/*
* setrunnable: change LWP state to be runnable, placing it on the run queue.
*
@ -931,7 +830,7 @@ setrunnable(struct lwp *l)
* If the LWP is still on the CPU, mark it as LSONPROC. It may be
* about to call mi_switch(), in which case it will yield.
*/
if ((l->l_pflag & LP_RUNNING) != 0) {
if ((l->l_flag & LW_RUNNING) != 0) {
l->l_stat = LSONPROC;
l->l_slptime = 0;
lwp_unlock(l);

View File

@ -1,4 +1,4 @@
/* $NetBSD: lwproc.c,v 1.42 2019/05/17 03:34:26 ozaki-r Exp $ */
/* $NetBSD: lwproc.c,v 1.43 2020/01/08 17:38:42 ad Exp $ */
/*
* Copyright (c) 2010, 2011 Antti Kantee. All Rights Reserved.
@ -28,7 +28,7 @@
#define RUMP__CURLWP_PRIVATE
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: lwproc.c,v 1.42 2019/05/17 03:34:26 ozaki-r Exp $");
__KERNEL_RCSID(0, "$NetBSD: lwproc.c,v 1.43 2020/01/08 17:38:42 ad Exp $");
#include <sys/param.h>
#include <sys/atomic.h>
@ -476,12 +476,12 @@ rump_lwproc_switch(struct lwp *newlwp)
KASSERT(!(l->l_flag & LW_WEXIT) || newlwp);
if (__predict_false(newlwp && (newlwp->l_pflag & LP_RUNNING)))
if (__predict_false(newlwp && (newlwp->l_flag & LW_RUNNING)))
panic("lwp %p (%d:%d) already running",
newlwp, newlwp->l_proc->p_pid, newlwp->l_lid);
if (newlwp == NULL) {
l->l_pflag &= ~LP_RUNNING;
l->l_flag &= ~LW_RUNNING;
l->l_flag |= LW_RUMP_CLEAR;
return;
}
@ -496,7 +496,7 @@ rump_lwproc_switch(struct lwp *newlwp)
newlwp->l_cpu = newlwp->l_target_cpu = l->l_cpu;
newlwp->l_mutex = l->l_mutex;
newlwp->l_pflag |= LP_RUNNING;
newlwp->l_flag |= LW_RUNNING;
lwproc_curlwpop(RUMPUSER_LWP_SET, newlwp);
curcpu()->ci_curlwp = newlwp;
@ -513,7 +513,7 @@ rump_lwproc_switch(struct lwp *newlwp)
mutex_exit(newlwp->l_proc->p_lock);
l->l_mutex = &unruntime_lock;
l->l_pflag &= ~LP_RUNNING;
l->l_flag &= ~LW_RUNNING;
l->l_flag &= ~LW_PENDSIG;
l->l_stat = LSRUN;

View File

@ -1,4 +1,4 @@
/* $NetBSD: scheduler.c,v 1.48 2019/12/16 22:47:55 ad Exp $ */
/* $NetBSD: scheduler.c,v 1.49 2020/01/08 17:38:42 ad Exp $ */
/*
* Copyright (c) 2010, 2011 Antti Kantee. All Rights Reserved.
@ -26,7 +26,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: scheduler.c,v 1.48 2019/12/16 22:47:55 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: scheduler.c,v 1.49 2020/01/08 17:38:42 ad Exp $");
#include <sys/param.h>
#include <sys/atomic.h>
@ -409,7 +409,7 @@ rump_unschedule()
/* release lwp0 */
rump_unschedule_cpu(&lwp0);
lwp0.l_mutex = &unruntime_lock;
lwp0.l_pflag &= ~LP_RUNNING;
lwp0.l_flag &= ~LW_RUNNING;
lwp0rele();
rump_lwproc_curlwp_clear(&lwp0);

View File

@ -1,4 +1,4 @@
/* $NetBSD: lwp.h,v 1.192 2019/12/01 15:34:47 ad Exp $ */
/* $NetBSD: lwp.h,v 1.193 2020/01/08 17:38:43 ad Exp $ */
/*
* Copyright (c) 2001, 2006, 2007, 2008, 2009, 2010, 2019
@ -90,7 +90,6 @@ struct lwp {
} l_sched;
struct cpu_info *volatile l_cpu;/* s: CPU we're on if LSONPROC */
kmutex_t * volatile l_mutex; /* l: ptr to mutex on sched state */
int l_ctxswtch; /* l: performing a context switch */
void *l_addr; /* l: PCB address; use lwp_getpcb() */
struct mdlwp l_md; /* l: machine-dependent fields. */
int l_flag; /* l: misc flag values */
@ -252,6 +251,7 @@ extern int maxlwp __read_mostly; /* max number of lwps */
#define LW_CANCELLED 0x02000000 /* tsleep should not sleep */
#define LW_WREBOOT 0x08000000 /* System is rebooting, please suspend */
#define LW_UNPARKED 0x10000000 /* Unpark op pending */
#define LW_RUNNING 0x20000000 /* Active on a CPU */
#define LW_RUMP_CLEAR 0x40000000 /* Clear curlwp in RUMP scheduler */
#define LW_RUMP_QEXIT 0x80000000 /* LWP should exit ASAP */
@ -268,7 +268,6 @@ extern int maxlwp __read_mostly; /* max number of lwps */
#define LP_SINGLESTEP 0x00000400 /* Single step thread in ptrace(2) */
#define LP_TIMEINTR 0x00010000 /* Time this soft interrupt */
#define LP_PREEMPTING 0x00020000 /* mi_switch called involuntarily */
#define LP_RUNNING 0x20000000 /* Active on a CPU */
#define LP_BOUND 0x80000000 /* Bound to a CPU */
/* The third set is kept in l_prflag. */
@ -341,7 +340,6 @@ void lwp_continue(lwp_t *);
void lwp_unsleep(lwp_t *, bool);
void lwp_unstop(lwp_t *);
void lwp_exit(lwp_t *);
void lwp_exit_switchaway(lwp_t *) __dead;
int lwp_suspend(lwp_t *, lwp_t *);
int lwp_create1(lwp_t *, const void *, size_t, u_long, lwpid_t *);
void lwp_start(lwp_t *, int);

View File

@ -1,4 +1,4 @@
/* $NetBSD: t_lwproc.c,v 1.9 2017/01/13 21:30:43 christos Exp $ */
/* $NetBSD: t_lwproc.c,v 1.10 2020/01/08 17:38:43 ad Exp $ */
/*
* Copyright (c) 2010 The NetBSD Foundation, Inc.
@ -239,7 +239,7 @@ ATF_TC_BODY(nullswitch, tc)
RZ(rump_pub_lwproc_newlwp(0));
l = rump_pub_lwproc_curlwp();
rump_pub_lwproc_switch(NULL);
/* if remains LP_RUNNING, next call will panic */
/* if remains LW_RUNNING, next call will panic */
rump_pub_lwproc_switch(l);
}