Eliminate l->l_ncsw and l->l_nivcsw. From memory think they were added

before we had per-LWP struct rusage; the same is now tracked there.
This commit is contained in:
ad 2023-10-04 20:28:05 +00:00
parent 7521c57073
commit a355028fa4
24 changed files with 133 additions and 150 deletions

View File

@ -1,7 +1,7 @@
/* $NetBSD: trap.c,v 1.49 2023/07/16 21:36:40 riastradh Exp $ */
/* $NetBSD: trap.c,v 1.50 2023/10/04 20:28:05 ad Exp $ */
/*-
* Copyright (c) 2014 The NetBSD Foundation, Inc.
* Copyright (c) 2014, 2023 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
@ -31,7 +31,7 @@
#include <sys/cdefs.h>
__KERNEL_RCSID(1, "$NetBSD: trap.c,v 1.49 2023/07/16 21:36:40 riastradh Exp $");
__KERNEL_RCSID(1, "$NetBSD: trap.c,v 1.50 2023/10/04 20:28:05 ad Exp $");
#include "opt_arm_intr_impl.h"
#include "opt_compat_netbsd32.h"
@ -1034,8 +1034,8 @@ do_trapsignal1(
bool
cpu_intr_p(void)
{
uint64_t ncsw;
int idepth;
long pctr;
lwp_t *l;
#ifdef __HAVE_PIC_FAST_SOFTINTS
@ -1050,11 +1050,9 @@ cpu_intr_p(void)
return false;
}
do {
ncsw = l->l_ncsw;
__insn_barrier();
pctr = lwp_pctr();
idepth = l->l_cpu->ci_intr_depth;
__insn_barrier();
} while (__predict_false(ncsw != l->l_ncsw));
} while (__predict_false(pctr != lwp_pctr()));
return idepth > 0;
}

View File

@ -1,7 +1,7 @@
/* $NetBSD: cpufunc.S,v 1.65 2020/11/30 17:02:27 bouyer Exp $ */
/* $NetBSD: cpufunc.S,v 1.66 2023/10/04 20:28:05 ad Exp $ */
/*
* Copyright (c) 1998, 2007, 2008, 2020 The NetBSD Foundation, Inc.
* Copyright (c) 1998, 2007, 2008, 2020, 2023 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
@ -234,12 +234,13 @@ END(x86_hotpatch)
#define CPU_COUNTER_FENCE(counter, fence) \
ENTRY(cpu_ ## counter ## _ ## fence) ;\
movq CPUVAR(CURLWP), %rcx ;\
leaq L_RU+RU_NIVCSW(%rcx), %rcx ;\
1: ;\
movq L_NCSW(%rcx), %rdi ;\
movq (%rcx), %rdi ;\
SERIALIZE_ ## fence ;\
rdtsc ;\
ADD_ ## counter ;\
cmpq %rdi, L_NCSW(%rcx) ;\
cmpq %rdi, (%rcx) ;\
jne 2f ;\
KMSAN_INIT_RET(RSIZE_ ## counter) ;\
ret ;\
@ -256,13 +257,14 @@ CPU_COUNTER_FENCE(counter32, mfence)
ENTRY(cpu_ ## counter ## _cpuid) ;\
movq %rbx, %r9 ;\
movq CPUVAR(CURLWP), %r8 ;\
leaq L_RU+RU_NIVCSW(%r8), %r8 ;\
1: ;\
movq L_NCSW(%r8), %rdi ;\
movq (%r8), %rdi ;\
xor %eax, %eax ;\
cpuid ;\
rdtsc ;\
ADD_ ## counter ;\
cmpq %rdi, L_NCSW(%r8) ;\
cmpq %rdi, (%r8) ;\
jne 2f ;\
movq %r9, %rbx ;\
KMSAN_INIT_RET(RSIZE_ ## counter) ;\

View File

@ -1,7 +1,7 @@
# $NetBSD: genassym.cf,v 1.96 2023/09/23 14:41:15 ad Exp $
# $NetBSD: genassym.cf,v 1.97 2023/10/04 20:28:05 ad Exp $
#
# Copyright (c) 1998, 2006, 2007, 2008 The NetBSD Foundation, Inc.
# Copyright (c) 1998, 2006, 2007, 2008, 2023 The NetBSD Foundation, Inc.
# All rights reserved.
#
# This code is derived from software contributed to The NetBSD Foundation
@ -157,13 +157,15 @@ define L_PCB offsetof(struct lwp, l_addr)
define L_CPU offsetof(struct lwp, l_cpu)
define L_FLAG offsetof(struct lwp, l_flag)
define L_PROC offsetof(struct lwp, l_proc)
define L_NCSW offsetof(struct lwp, l_ncsw)
define L_RU offsetof(struct lwp, l_ru)
define L_NOPREEMPT offsetof(struct lwp, l_nopreempt)
define L_DOPREEMPT offsetof(struct lwp, l_dopreempt)
define L_MD_REGS offsetof(struct lwp, l_md.md_regs)
define L_MD_FLAGS offsetof(struct lwp, l_md.md_flags)
define L_MD_ASTPENDING offsetof(struct lwp, l_md.md_astpending)
define RU_NIVCSW offsetof(struct rusage, ru_nivcsw)
define LW_SYSTEM LW_SYSTEM
define MDL_IRET MDL_IRET
define MDL_COMPAT32 MDL_COMPAT32

View File

@ -1,4 +1,4 @@
/* $NetBSD: arm_machdep.c,v 1.67 2021/02/21 08:47:13 skrll Exp $ */
/* $NetBSD: arm_machdep.c,v 1.68 2023/10/04 20:28:05 ad Exp $ */
/*
* Copyright (c) 2001 Wasabi Systems, Inc.
@ -80,7 +80,7 @@
#include <sys/param.h>
__KERNEL_RCSID(0, "$NetBSD: arm_machdep.c,v 1.67 2021/02/21 08:47:13 skrll Exp $");
__KERNEL_RCSID(0, "$NetBSD: arm_machdep.c,v 1.68 2023/10/04 20:28:05 ad Exp $");
#include <sys/atomic.h>
#include <sys/cpu.h>
@ -284,20 +284,18 @@ cpu_intr_p(void)
#ifdef __HAVE_PIC_FAST_SOFTINTS
int cpl;
#endif
uint64_t ncsw;
int idepth;
long pctr;
lwp_t *l;
l = curlwp;
do {
ncsw = l->l_ncsw;
__insn_barrier();
pctr = lwp_pctr();
idepth = l->l_cpu->ci_intr_depth;
#ifdef __HAVE_PIC_FAST_SOFTINTS
cpl = l->l_cpu->ci_cpl;
#endif
__insn_barrier();
} while (__predict_false(ncsw != l->l_ncsw));
} while (__predict_false(pctr != lwp_pctr()));
#ifdef __HAVE_PIC_FAST_SOFTINTS
if (cpl < IPL_VM)

View File

@ -1,7 +1,7 @@
/* $NetBSD: cpufunc.S,v 1.49 2020/07/19 07:35:08 maxv Exp $ */
/* $NetBSD: cpufunc.S,v 1.50 2023/10/04 20:28:05 ad Exp $ */
/*-
* Copyright (c) 1998, 2007, 2020 The NetBSD Foundation, Inc.
* Copyright (c) 1998, 2007, 2020, 2023 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
@ -38,7 +38,7 @@
#include <sys/errno.h>
#include <machine/asm.h>
__KERNEL_RCSID(0, "$NetBSD: cpufunc.S,v 1.49 2020/07/19 07:35:08 maxv Exp $");
__KERNEL_RCSID(0, "$NetBSD: cpufunc.S,v 1.50 2023/10/04 20:28:05 ad Exp $");
#include "opt_xen.h"
@ -174,12 +174,13 @@ END(msr_onfault)
ENTRY(cpu_ ## counter ## _ ## fence) ;\
pushl %ebx ;\
movl CPUVAR(CURLWP), %ecx ;\
leal L_RU+RU_NIVCSW(%ecx), %ecx ;\
1: ;\
movl L_NCSW(%ecx), %ebx ;\
movl (%ecx), %ebx ;\
SERIALIZE_ ## fence ;\
rdtsc ;\
ADD_ ## counter ;\
cmpl %ebx, L_NCSW(%ecx) ;\
cmpl %ebx, (%ecx) ;\
jne 2f ;\
popl %ebx ;\
ret ;\
@ -197,15 +198,16 @@ ENTRY(cpu_ ## counter ## _cpuid) ;\
pushl %ebx ;\
pushl %esi ;\
movl CPUVAR(CURLWP), %ecx ;\
leal L_RU+RU_NIVCSW(%ecx), %ecx ;\
1: ;\
movl L_NCSW(%ecx), %esi ;\
movl (%ecx), %esi ;\
pushl %ecx ;\
xor %eax, %eax ;\
cpuid ;\
rdtsc ;\
ADD_ ## counter ;\
popl %ecx ;\
cmpl %esi, L_NCSW(%ecx) ;\
cmpl %esi, (%ecx) ;\
jne 2f ;\
popl %esi ;\
popl %ebx ;\

View File

@ -1,7 +1,7 @@
# $NetBSD: genassym.cf,v 1.134 2023/09/23 14:41:15 ad Exp $
# $NetBSD: genassym.cf,v 1.135 2023/10/04 20:28:05 ad Exp $
#
# Copyright (c) 1998, 2006, 2007, 2008 The NetBSD Foundation, Inc.
# Copyright (c) 1998, 2006, 2007, 2008, 2023 The NetBSD Foundation, Inc.
# All rights reserved.
#
# This code is derived from software contributed to The NetBSD Foundation
@ -166,13 +166,15 @@ define L_PCB offsetof(struct lwp, l_addr)
define L_CPU offsetof(struct lwp, l_cpu)
define L_FLAG offsetof(struct lwp, l_flag)
define L_PROC offsetof(struct lwp, l_proc)
define L_NCSW offsetof(struct lwp, l_ncsw)
define L_RU offsetof(struct lwp, l_ru)
define L_NOPREEMPT offsetof(struct lwp, l_nopreempt)
define L_DOPREEMPT offsetof(struct lwp, l_dopreempt)
define L_MD_REGS offsetof(struct lwp, l_md.md_regs)
define L_MD_FLAGS offsetof(struct lwp, l_md.md_flags)
define L_MD_ASTPENDING offsetof(struct lwp, l_md.md_astpending)
define RU_NIVCSW offsetof(struct rusage, ru_nivcsw)
define LW_SYSTEM LW_SYSTEM
define MDL_FPU_IN_CPU MDL_FPU_IN_CPU

View File

@ -1,7 +1,7 @@
/* $NetBSD: cpu_subr.c,v 1.63 2023/02/26 07:13:54 skrll Exp $ */
/* $NetBSD: cpu_subr.c,v 1.64 2023/10/04 20:28:05 ad Exp $ */
/*-
* Copyright (c) 2010, 2019 The NetBSD Foundation, Inc.
* Copyright (c) 2010, 2019, 2023 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
@ -30,7 +30,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: cpu_subr.c,v 1.63 2023/02/26 07:13:54 skrll Exp $");
__KERNEL_RCSID(0, "$NetBSD: cpu_subr.c,v 1.64 2023/10/04 20:28:05 ad Exp $");
#include "opt_cputype.h"
#include "opt_ddb.h"
@ -625,17 +625,15 @@ cpu_idle(void)
bool
cpu_intr_p(void)
{
uint64_t ncsw;
int idepth;
long pctr;
lwp_t *l;
l = curlwp;
do {
ncsw = l->l_ncsw;
__insn_barrier();
pctr = lwp_pctr();
idepth = l->l_cpu->ci_idepth;
__insn_barrier();
} while (__predict_false(ncsw != l->l_ncsw));
} while (__predict_false(pctr != lwp_pctr()));
return idepth != 0;
}

View File

@ -1,7 +1,7 @@
/* $NetBSD: machdep.c,v 1.306 2022/10/26 23:38:08 riastradh Exp $ */
/* $NetBSD: machdep.c,v 1.307 2023/10/04 20:28:05 ad Exp $ */
/*-
* Copyright (c) 1996, 1997, 1998, 2019 The NetBSD Foundation, Inc.
* Copyright (c) 1996, 1997, 1998, 2019, 2023 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
@ -71,7 +71,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.306 2022/10/26 23:38:08 riastradh Exp $");
__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.307 2023/10/04 20:28:05 ad Exp $");
#include "opt_ddb.h"
#include "opt_multiprocessor.h"
@ -2668,17 +2668,15 @@ cpu_signotify(struct lwp *l)
bool
cpu_intr_p(void)
{
uint64_t ncsw;
int idepth;
long pctr;
lwp_t *l;
l = curlwp;
do {
ncsw = l->l_ncsw;
__insn_barrier();
pctr = lwp_pctr();
idepth = l->l_cpu->ci_idepth;
__insn_barrier();
} while (__predict_false(ncsw != l->l_ncsw));
} while (__predict_false(pctr != lwp_pctr()));
return idepth >= 0;
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: cpu.c,v 1.83 2019/12/03 15:20:59 riastradh Exp $ */
/* $NetBSD: cpu.c,v 1.84 2023/10/04 20:28:06 ad Exp $ */
/*-
* Copyright (c) 2007 Jared D. McNeill <jmcneill@invisible.ca>
@ -30,7 +30,7 @@
#include "opt_hz.h"
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.83 2019/12/03 15:20:59 riastradh Exp $");
__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.84 2023/10/04 20:28:06 ad Exp $");
#include <sys/param.h>
#include <sys/conf.h>
@ -528,17 +528,15 @@ cpu_rootconf(void)
bool
cpu_intr_p(void)
{
uint64_t ncsw;
int idepth;
long pctr;
lwp_t *l;
l = curlwp;
do {
ncsw = l->l_ncsw;
__insn_barrier();
pctr = lwp_pctr();
idepth = l->l_cpu->ci_idepth;
__insn_barrier();
} while (__predict_false(ncsw != l->l_ncsw));
} while (__predict_false(pctr != lwp_pctr()));
return idepth >= 0;
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap_private.h,v 1.4 2022/09/24 11:05:18 riastradh Exp $ */
/* $NetBSD: pmap_private.h,v 1.5 2023/10/04 20:28:06 ad Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -197,7 +197,7 @@ struct pmap {
of pmap */
kcpuset_t *pm_xen_ptp_cpus; /* mask of CPUs which have this pmap's
ptp mapped */
uint64_t pm_ncsw; /* for assertions */
long pm_pctr; /* for assertions */
LIST_HEAD(,vm_page) pm_gc_ptp; /* PTPs queued for free */
/* Used by NVMM and Xen */

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.c,v 1.425 2023/07/26 21:45:28 riastradh Exp $ */
/* $NetBSD: pmap.c,v 1.426 2023/10/04 20:28:06 ad Exp $ */
/*
* Copyright (c) 2008, 2010, 2016, 2017, 2019, 2020 The NetBSD Foundation, Inc.
@ -130,7 +130,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.425 2023/07/26 21:45:28 riastradh Exp $");
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.426 2023/10/04 20:28:06 ad Exp $");
#include "opt_user_ldt.h"
#include "opt_lockdebug.h"
@ -822,7 +822,7 @@ pmap_map_ptes(struct pmap *pmap, struct pmap **pmap2, pd_entry_t **ptepp,
}
KASSERT(ci->ci_tlbstate == TLBSTATE_VALID);
#ifdef DIAGNOSTIC
pmap->pm_ncsw = lwp_pctr();
pmap->pm_pctr = lwp_pctr();
#endif
*ptepp = PTE_BASE;
@ -861,7 +861,7 @@ pmap_unmap_ptes(struct pmap *pmap, struct pmap * pmap2)
ci = l->l_cpu;
KASSERT(mutex_owned(&pmap->pm_lock));
KASSERT(pmap->pm_ncsw == lwp_pctr());
KASSERT(pmap->pm_pctr == lwp_pctr());
#if defined(XENPV) && defined(__x86_64__)
KASSERT(ci->ci_normal_pdes[PTP_LEVELS - 2] != L4_BASE);
@ -3573,7 +3573,7 @@ pmap_load(void)
struct cpu_info *ci;
struct pmap *pmap, *oldpmap;
struct lwp *l;
uint64_t ncsw;
uint64_t pctr;
int ilevel __diagused;
u_long psl __diagused;
@ -3585,7 +3585,7 @@ pmap_load(void)
return;
}
l = ci->ci_curlwp;
ncsw = l->l_ncsw;
pctr = lwp_pctr();
__insn_barrier();
/* should be able to take ipis. */
@ -3624,7 +3624,7 @@ pmap_load(void)
pmap_destroy(oldpmap);
__insn_barrier();
if (l->l_ncsw != ncsw) {
if (lwp_pctr() != pctr) {
goto retry;
}

View File

@ -1,8 +1,8 @@
/* $NetBSD: x86_machdep.c,v 1.153 2022/12/23 16:05:44 bouyer Exp $ */
/* $NetBSD: x86_machdep.c,v 1.154 2023/10/04 20:28:06 ad Exp $ */
/*-
* Copyright (c) 2002, 2006, 2007 YAMAMOTO Takashi,
* Copyright (c) 2005, 2008, 2009, 2019 The NetBSD Foundation, Inc.
* Copyright (c) 2005, 2008, 2009, 2019, 2023 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
@ -31,7 +31,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: x86_machdep.c,v 1.153 2022/12/23 16:05:44 bouyer Exp $");
__KERNEL_RCSID(0, "$NetBSD: x86_machdep.c,v 1.154 2023/10/04 20:28:06 ad Exp $");
#include "opt_modular.h"
#include "opt_physmem.h"
@ -380,8 +380,8 @@ cpu_need_proftick(struct lwp *l)
bool
cpu_intr_p(void)
{
uint64_t ncsw;
int idepth;
long pctr;
lwp_t *l;
l = curlwp;
@ -390,11 +390,9 @@ cpu_intr_p(void)
return false;
}
do {
ncsw = l->l_ncsw;
__insn_barrier();
pctr = lwp_pctr();
idepth = l->l_cpu->ci_idepth;
__insn_barrier();
} while (__predict_false(ncsw != l->l_ncsw));
} while (__predict_false(pctr != lwp_pctr()));
return idepth >= 0;
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: kern_cctr.c,v 1.12 2020/10/10 18:18:04 thorpej Exp $ */
/* $NetBSD: kern_cctr.c,v 1.13 2023/10/04 20:28:06 ad Exp $ */
/*-
* Copyright (c) 2020 Jason R. Thorpe
@ -75,7 +75,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: kern_cctr.c,v 1.12 2020/10/10 18:18:04 thorpej Exp $");
__KERNEL_RCSID(0, "$NetBSD: kern_cctr.c,v 1.13 2023/10/04 20:28:06 ad Exp $");
#include <sys/param.h>
#include <sys/atomic.h>
@ -184,20 +184,14 @@ u_int
cc_get_timecount(struct timecounter *tc)
{
#if defined(MULTIPROCESSOR)
int64_t rcc, ncsw;
int64_t rcc;
long pctr;
retry:
ncsw = curlwp->l_ncsw;
__insn_barrier();
/* N.B. the delta is always 0 on the primary. */
rcc = cpu_counter32() - curcpu()->ci_cc.cc_delta;
__insn_barrier();
if (ncsw != curlwp->l_ncsw) {
/* Was preempted */
goto retry;
}
do {
pctr = lwp_pctr();
/* N.B. the delta is always 0 on the primary. */
rcc = cpu_counter32() - curcpu()->ci_cc.cc_delta;
} while (pctr != lwp_pctr());
return rcc;
#else

View File

@ -1,4 +1,4 @@
/* $NetBSD: kern_entropy.c,v 1.65 2023/08/05 11:21:24 riastradh Exp $ */
/* $NetBSD: kern_entropy.c,v 1.66 2023/10/04 20:28:06 ad Exp $ */
/*-
* Copyright (c) 2019 The NetBSD Foundation, Inc.
@ -77,7 +77,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: kern_entropy.c,v 1.65 2023/08/05 11:21:24 riastradh Exp $");
__KERNEL_RCSID(0, "$NetBSD: kern_entropy.c,v 1.66 2023/10/04 20:28:06 ad Exp $");
#include <sys/param.h>
#include <sys/types.h>
@ -156,7 +156,7 @@ struct entropy_cpu {
*/
struct entropy_cpu_lock {
int ecl_s;
uint64_t ecl_ncsw;
long ecl_pctr;
};
/*
@ -541,7 +541,7 @@ entropy_cpu_get(struct entropy_cpu_lock *lock)
lock->ecl_s = splsoftserial();
KASSERT(!ec->ec_locked);
ec->ec_locked = true;
lock->ecl_ncsw = curlwp->l_ncsw;
lock->ecl_pctr = lwp_pctr();
__insn_barrier();
return ec;
@ -555,7 +555,7 @@ entropy_cpu_put(struct entropy_cpu_lock *lock, struct entropy_cpu *ec)
KASSERT(ec->ec_locked);
__insn_barrier();
KASSERT(lock->ecl_ncsw == curlwp->l_ncsw);
KASSERT(lock->ecl_pctr == lwp_pctr());
ec->ec_locked = false;
splx(lock->ecl_s);
percpu_putref(entropy_percpu);

View File

@ -1,4 +1,4 @@
/* $NetBSD: kern_exit.c,v 1.293 2021/12/05 08:13:12 msaitoh Exp $ */
/* $NetBSD: kern_exit.c,v 1.294 2023/10/04 20:28:06 ad Exp $ */
/*-
* Copyright (c) 1998, 1999, 2006, 2007, 2008, 2020 The NetBSD Foundation, Inc.
@ -67,7 +67,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: kern_exit.c,v 1.293 2021/12/05 08:13:12 msaitoh Exp $");
__KERNEL_RCSID(0, "$NetBSD: kern_exit.c,v 1.294 2023/10/04 20:28:06 ad Exp $");
#include "opt_ktrace.h"
#include "opt_dtrace.h"
@ -1194,8 +1194,6 @@ proc_free(struct proc *p, struct wrusage *wru)
* This cannot be done any earlier else it might get done twice.
*/
l = LIST_FIRST(&p->p_lwps);
p->p_stats->p_ru.ru_nvcsw += (l->l_ncsw - l->l_nivcsw);
p->p_stats->p_ru.ru_nivcsw += l->l_nivcsw;
ruadd(&p->p_stats->p_ru, &l->l_ru);
ruadd(&p->p_stats->p_ru, &p->p_stats->p_cru);
ruadd(&parent->p_stats->p_cru, &p->p_stats->p_ru);

View File

@ -1,7 +1,8 @@
/* $NetBSD: kern_lock.c,v 1.186 2023/07/07 18:02:52 riastradh Exp $ */
/* $NetBSD: kern_lock.c,v 1.187 2023/10/04 20:28:06 ad Exp $ */
/*-
* Copyright (c) 2002, 2006, 2007, 2008, 2009, 2020 The NetBSD Foundation, Inc.
* Copyright (c) 2002, 2006, 2007, 2008, 2009, 2020, 2023
* The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
@ -31,7 +32,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: kern_lock.c,v 1.186 2023/07/07 18:02:52 riastradh Exp $");
__KERNEL_RCSID(0, "$NetBSD: kern_lock.c,v 1.187 2023/10/04 20:28:06 ad Exp $");
#ifdef _KERNEL_OPT
#include "opt_lockdebug.h"
@ -67,9 +68,8 @@ __cpu_simple_lock_t kernel_lock[CACHE_LINE_SIZE / sizeof(__cpu_simple_lock_t)]
void
assert_sleepable(void)
{
struct lwp *l = curlwp;
const char *reason;
uint64_t ncsw;
long pctr;
bool idle;
if (__predict_false(panicstr != NULL)) {
@ -83,11 +83,9 @@ assert_sleepable(void)
* routine may be called in delicate situations.
*/
do {
ncsw = l->l_ncsw;
__insn_barrier();
pctr = lwp_pctr();
idle = CURCPU_IDLE_P();
__insn_barrier();
} while (__predict_false(ncsw != l->l_ncsw));
} while (__predict_false(pctr != lwp_pctr()));
reason = NULL;
if (__predict_false(idle) && !cold) {

View File

@ -1,4 +1,4 @@
/* $NetBSD: kern_lwp.c,v 1.257 2023/09/23 20:23:07 ad Exp $ */
/* $NetBSD: kern_lwp.c,v 1.258 2023/10/04 20:28:06 ad Exp $ */
/*-
* Copyright (c) 2001, 2006, 2007, 2008, 2009, 2019, 2020, 2023
@ -217,7 +217,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.257 2023/09/23 20:23:07 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.258 2023/10/04 20:28:06 ad Exp $");
#include "opt_ddb.h"
#include "opt_lockdebug.h"
@ -1297,8 +1297,6 @@ lwp_free(struct lwp *l, bool recycle, bool last)
p->p_pctcpu += l->l_pctcpu;
ru = &p->p_stats->p_ru;
ruadd(ru, &l->l_ru);
ru->ru_nvcsw += (l->l_ncsw - l->l_nivcsw);
ru->ru_nivcsw += l->l_nivcsw;
LIST_REMOVE(l, l_sibling);
p->p_nlwps--;
p->p_nzlwps--;
@ -2139,11 +2137,11 @@ lwp_ctl_exit(void)
* preemption across operations that can tolerate preemption without
* crashing, but which may generate incorrect results if preempted.
*/
uint64_t
long
lwp_pctr(void)
{
return curlwp->l_ncsw;
return curlwp->l_ru.ru_nvcsw + curlwp->l_ru.ru_nivcsw;
}
/*

View File

@ -1,7 +1,8 @@
/* $NetBSD: kern_proc.c,v 1.271 2023/09/04 09:13:23 simonb Exp $ */
/* $NetBSD: kern_proc.c,v 1.272 2023/10/04 20:28:06 ad Exp $ */
/*-
* Copyright (c) 1999, 2006, 2007, 2008, 2020 The NetBSD Foundation, Inc.
* Copyright (c) 1999, 2006, 2007, 2008, 2020, 2023
* The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
@ -62,7 +63,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: kern_proc.c,v 1.271 2023/09/04 09:13:23 simonb Exp $");
__KERNEL_RCSID(0, "$NetBSD: kern_proc.c,v 1.272 2023/10/04 20:28:06 ad Exp $");
#ifdef _KERNEL_OPT
#include "opt_kstack.h"
@ -2755,7 +2756,7 @@ void
fill_kproc2(struct proc *p, struct kinfo_proc2 *ki, bool zombie, bool allowaddr)
{
struct tty *tp;
struct lwp *l, *l2;
struct lwp *l;
struct timeval ut, st, rt;
sigset_t ss1, ss2;
struct rusage ru;
@ -2909,13 +2910,9 @@ fill_kproc2(struct proc *p, struct kinfo_proc2 *ki, bool zombie, bool allowaddr)
ki->p_ustime_usec = st.tv_usec;
memcpy(&ru, &p->p_stats->p_ru, sizeof(ru));
ki->p_uru_nvcsw = 0;
ki->p_uru_nivcsw = 0;
LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
ki->p_uru_nvcsw += (l2->l_ncsw - l2->l_nivcsw);
ki->p_uru_nivcsw += l2->l_nivcsw;
ruadd(&ru, &l2->l_ru);
}
rulwps(p, &ru);
ki->p_uru_nvcsw = ru.ru_nvcsw;
ki->p_uru_nivcsw = ru.ru_nivcsw;
ki->p_uru_maxrss = ru.ru_maxrss;
ki->p_uru_ixrss = ru.ru_ixrss;
ki->p_uru_idrss = ru.ru_idrss;

View File

@ -1,4 +1,4 @@
/* $NetBSD: kern_resource.c,v 1.194 2023/09/23 18:21:11 ad Exp $ */
/* $NetBSD: kern_resource.c,v 1.195 2023/10/04 20:28:06 ad Exp $ */
/*-
* Copyright (c) 1982, 1986, 1991, 1993
@ -37,7 +37,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: kern_resource.c,v 1.194 2023/09/23 18:21:11 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: kern_resource.c,v 1.195 2023/10/04 20:28:06 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -665,8 +665,6 @@ rulwps(proc_t *p, struct rusage *ru)
LIST_FOREACH(l, &p->p_lwps, l_sibling) {
ruadd(ru, &l->l_ru);
ru->ru_nvcsw += (l->l_ncsw - l->l_nivcsw);
ru->ru_nivcsw += l->l_nivcsw;
}
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: kern_synch.c,v 1.360 2023/09/23 20:23:07 ad Exp $ */
/* $NetBSD: kern_synch.c,v 1.361 2023/10/04 20:28:06 ad Exp $ */
/*-
* Copyright (c) 1999, 2000, 2004, 2006, 2007, 2008, 2009, 2019, 2020, 2023
@ -69,7 +69,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.360 2023/09/23 20:23:07 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.361 2023/10/04 20:28:06 ad Exp $");
#include "opt_kstack.h"
#include "opt_ddb.h"
@ -742,10 +742,11 @@ mi_switch(lwp_t *l)
/* Count the context switch. */
CPU_COUNT(CPU_COUNT_NSWTCH, 1);
l->l_ncsw++;
if ((l->l_pflag & LP_PREEMPTING) != 0) {
l->l_nivcsw++;
l->l_ru.ru_nivcsw++;
l->l_pflag &= ~LP_PREEMPTING;
} else {
l->l_ru.ru_nvcsw++;
}
/*

View File

@ -1,7 +1,7 @@
/* $NetBSD: subr_pserialize.c,v 1.23 2023/04/16 04:52:19 riastradh Exp $ */
/* $NetBSD: subr_pserialize.c,v 1.24 2023/10/04 20:28:06 ad Exp $ */
/*-
* Copyright (c) 2010, 2011 The NetBSD Foundation, Inc.
* Copyright (c) 2010, 2011, 2023 The NetBSD Foundation, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -31,7 +31,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: subr_pserialize.c,v 1.23 2023/04/16 04:52:19 riastradh Exp $");
__KERNEL_RCSID(0, "$NetBSD: subr_pserialize.c,v 1.24 2023/10/04 20:28:06 ad Exp $");
#include <sys/param.h>
@ -174,21 +174,18 @@ pserialize_in_read_section(void)
bool
pserialize_not_in_read_section(void)
{
struct lwp *l = curlwp;
uint64_t ncsw;
bool notin;
long pctr;
ncsw = l->l_ncsw;
__insn_barrier();
pctr = lwp_pctr();
notin = __predict_true(curcpu()->ci_psz_read_depth == 0);
__insn_barrier();
/*
* If we had a context switch, we're definitely not in a
* pserialize read section because pserialize read sections
* block preemption.
*/
if (__predict_false(ncsw != l->l_ncsw))
if (__predict_false(pctr != lwp_pctr()))
notin = true;
return notin;

View File

@ -1,4 +1,4 @@
/* $NetBSD: lwproc.c,v 1.54 2023/02/22 21:44:45 riastradh Exp $ */
/* $NetBSD: lwproc.c,v 1.55 2023/10/04 20:28:06 ad Exp $ */
/*
* Copyright (c) 2010, 2011 Antti Kantee. All Rights Reserved.
@ -28,7 +28,7 @@
#define RUMP__CURLWP_PRIVATE
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: lwproc.c,v 1.54 2023/02/22 21:44:45 riastradh Exp $");
__KERNEL_RCSID(0, "$NetBSD: lwproc.c,v 1.55 2023/10/04 20:28:06 ad Exp $");
#include <sys/param.h>
#include <sys/atomic.h>
@ -513,6 +513,7 @@ rump_lwproc_switch(struct lwp *newlwp)
l->l_pflag &= ~LP_RUNNING;
l->l_flag &= ~LW_PENDSIG;
l->l_stat = LSRUN;
l->l_ru.ru_nvcsw++;
if (l->l_flag & LW_WEXIT) {
l->l_stat = LSIDL;
@ -582,3 +583,10 @@ rump_lwproc_sysent_usenative()
panic("don't use rump_lwproc_sysent_usenative()");
curproc->p_emul = &emul_netbsd;
}
long
lwp_pctr(void)
{
return curlwp->l_ru.ru_nvcsw;
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: scheduler.c,v 1.53 2022/04/09 23:45:14 riastradh Exp $ */
/* $NetBSD: scheduler.c,v 1.54 2023/10/04 20:28:06 ad Exp $ */
/*
* Copyright (c) 2010, 2011 Antti Kantee. All Rights Reserved.
@ -26,7 +26,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: scheduler.c,v 1.53 2022/04/09 23:45:14 riastradh Exp $");
__KERNEL_RCSID(0, "$NetBSD: scheduler.c,v 1.54 2023/10/04 20:28:06 ad Exp $");
#include <sys/param.h>
#include <sys/atomic.h>
@ -372,7 +372,7 @@ rump_schedule_cpu_interlock(struct lwp *l, void *interlock)
ci = rcpu->rcpu_ci;
l->l_cpu = l->l_target_cpu = ci;
l->l_mutex = rcpu->rcpu_ci->ci_schedstate.spc_mutex;
l->l_ncsw++;
l->l_ru.ru_nvcsw++;
l->l_stat = LSONPROC;
/*

View File

@ -1,4 +1,4 @@
/* $NetBSD: lwp.h,v 1.224 2023/09/25 18:30:44 riastradh Exp $ */
/* $NetBSD: lwp.h,v 1.225 2023/10/04 20:28:06 ad Exp $ */
/*
* Copyright (c) 2001, 2006, 2007, 2008, 2009, 2010, 2019, 2020, 2023
@ -121,8 +121,6 @@ struct lwp {
psetid_t l_psid; /* l: assigned processor-set ID */
fixpt_t l_pctcpu; /* p: %cpu during l_swtime */
fixpt_t l_estcpu; /* l: cpu time for SCHED_4BSD */
volatile uint64_t l_ncsw; /* l: total context switches */
volatile uint64_t l_nivcsw; /* l: involuntary context switches */
SLIST_HEAD(, turnstile) l_pi_lenders; /* l: ts lending us priority */
struct cpu_info *l_target_cpu; /* l: target CPU to migrate */
struct lwpctl *l_lwpctl; /* p: lwpctl block kernel address */
@ -381,7 +379,7 @@ lwp_t * lwp_find(proc_t *, int);
void lwp_userret(lwp_t *);
void lwp_need_userret(lwp_t *);
void lwp_free(lwp_t *, bool, bool);
uint64_t lwp_pctr(void);
long lwp_pctr(void);
int lwp_setprivate(lwp_t *, void *);
int do_lwp_create(lwp_t *, void *, u_long, lwp_t **, const sigset_t *,
const stack_t *);