Rip out pserialize(9) logic now that the RCU patent has expired.

pserialize_perform() is now basically just xc_barrier(XC_HIGHPRI).
No more tentacles throughout the scheduler.  Simplify the psz read
count for diagnostic assertions by putting it unconditionally into
cpu_info.

From rmind@, tidied up by me.
This commit is contained in:
riastradh 2019-12-03 05:07:48 +00:00
parent 2a8e1ccd61
commit a0c864ecf3
8 changed files with 49 additions and 242 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: rmixl_cpu.c,v 1.10 2019/12/01 15:34:45 ad Exp $ */
/* $NetBSD: rmixl_cpu.c,v 1.11 2019/12/03 05:07:48 riastradh Exp $ */
/*
* Copyright 2002 Wasabi Systems, Inc.
@ -38,7 +38,7 @@
#include "locators.h"
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: rmixl_cpu.c,v 1.10 2019/12/01 15:34:45 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: rmixl_cpu.c,v 1.11 2019/12/03 05:07:48 riastradh Exp $");
#include "opt_multiprocessor.h"
#include "opt_ddb.h"
@ -414,8 +414,6 @@ rmixl_cpu_data_print(struct cpu_data *dp)
{
printf("cpu_biglock_wanted %p\n", dp->cpu_biglock_wanted);
printf("cpu_callout %p\n", dp->cpu_callout);
printf("cpu_unused1 %p\n", dp->cpu_unused1);
printf("cpu_unused2 %d\n", dp->cpu_unused2);
printf("&cpu_schedstate %p\n", &dp->cpu_schedstate); /* TBD */
printf("&cpu_xcall %p\n", &dp->cpu_xcall); /* TBD */
printf("cpu_xcall_pending %d\n", dp->cpu_xcall_pending);
@ -423,9 +421,7 @@ rmixl_cpu_data_print(struct cpu_data *dp)
printf("cpu_lockstat %p\n", dp->cpu_lockstat);
printf("cpu_index %d\n", dp->cpu_index);
printf("cpu_biglock_count %d\n", dp->cpu_biglock_count);
printf("cpu_spin_locks %d\n", dp->cpu_spin_locks);
printf("cpu_simple_locks %d\n", dp->cpu_simple_locks);
printf("cpu_spin_locks2 %d\n", dp->cpu_spin_locks2);
printf("cpu_psz_read_depth %d\n", dp->cpu_psz_read_depth);
printf("cpu_lkdebug_recurse %d\n", dp->cpu_lkdebug_recurse);
printf("cpu_softints %d\n", dp->cpu_softints);
printf("cpu_nsyscall %"PRIu64"\n", dp->cpu_nsyscall);

View File

@ -1,4 +1,4 @@
/* $NetBSD: kern_lwp.c,v 1.215 2019/12/01 15:27:58 ad Exp $ */
/* $NetBSD: kern_lwp.c,v 1.216 2019/12/03 05:07:48 riastradh Exp $ */
/*-
* Copyright (c) 2001, 2006, 2007, 2008, 2009, 2019 The NetBSD Foundation, Inc.
@ -209,7 +209,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.215 2019/12/01 15:27:58 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.216 2019/12/03 05:07:48 riastradh Exp $");
#include "opt_ddb.h"
#include "opt_lockdebug.h"
@ -225,7 +225,6 @@ __KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.215 2019/12/01 15:27:58 ad Exp $");
#include <sys/syscallargs.h>
#include <sys/syscall_stats.h>
#include <sys/kauth.h>
#include <sys/pserialize.h>
#include <sys/sleepq.h>
#include <sys/lockdebug.h>
#include <sys/kmem.h>
@ -1036,9 +1035,6 @@ lwp_startup(struct lwp *prev, struct lwp *new_lwp)
pmap_activate(new_lwp);
spl0();
/* Note trip through cpu_switchto(). */
pserialize_switchpoint();
LOCKDEBUG_BARRIER(NULL, 0);
KPREEMPT_ENABLE(new_lwp);
if ((new_lwp->l_pflag & LP_MPSAFE) == 0) {

View File

@ -1,4 +1,4 @@
/* $NetBSD: kern_softint.c,v 1.52 2019/12/01 15:34:46 ad Exp $ */
/* $NetBSD: kern_softint.c,v 1.53 2019/12/03 05:07:48 riastradh Exp $ */
/*-
* Copyright (c) 2007, 2008, 2019 The NetBSD Foundation, Inc.
@ -170,7 +170,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: kern_softint.c,v 1.52 2019/12/01 15:34:46 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: kern_softint.c,v 1.53 2019/12/03 05:07:48 riastradh Exp $");
#include <sys/param.h>
#include <sys/proc.h>
@ -182,7 +182,6 @@ __KERNEL_RCSID(0, "$NetBSD: kern_softint.c,v 1.52 2019/12/01 15:34:46 ad Exp $")
#include <sys/evcnt.h>
#include <sys/cpu.h>
#include <sys/xcall.h>
#include <sys/pserialize.h>
#include <net/netisr.h>
@ -884,9 +883,6 @@ softint_dispatch(lwp_t *pinned, int s)
l->l_pflag &= ~LP_TIMEINTR;
}
/* Indicate a soft-interrupt switch. */
pserialize_switchpoint();
/*
* If we blocked while handling the interrupt, the pinned LWP is
* gone so switch to the idle LWP. It will select a new LWP to

View File

@ -1,4 +1,4 @@
/* $NetBSD: kern_synch.c,v 1.327 2019/12/01 15:34:46 ad Exp $ */
/* $NetBSD: kern_synch.c,v 1.328 2019/12/03 05:07:48 riastradh Exp $ */
/*-
* Copyright (c) 1999, 2000, 2004, 2006, 2007, 2008, 2009, 2019
@ -69,7 +69,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.327 2019/12/01 15:34:46 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.328 2019/12/03 05:07:48 riastradh Exp $");
#include "opt_kstack.h"
#include "opt_dtrace.h"
@ -743,9 +743,6 @@ mi_switch(lwp_t *l)
l->l_lwpctl->lc_pctr++;
}
/* Note trip through cpu_switchto(). */
pserialize_switchpoint();
KASSERT(l->l_cpu == ci);
splx(oldspl);
/*
@ -755,7 +752,6 @@ mi_switch(lwp_t *l)
retval = 1;
} else {
/* Nothing to do - just unlock and return. */
pserialize_switchpoint();
mutex_spin_exit(spc->spc_mutex);
l->l_pflag &= ~LP_PREEMPTING;
lwp_unlock(l);

View File

@ -1,4 +1,4 @@
/* $NetBSD: subr_pserialize.c,v 1.13 2019/10/06 15:11:17 uwe Exp $ */
/* $NetBSD: subr_pserialize.c,v 1.14 2019/12/03 05:07:49 riastradh Exp $ */
/*-
* Copyright (c) 2010, 2011 The NetBSD Foundation, Inc.
@ -28,57 +28,25 @@
/*
* Passive serialization.
*
* Implementation accurately matches the lapsed US patent 4809168, therefore
* code is patent-free in the United States. Your use of this code is at
* your own risk.
*
* Note for NetBSD developers: all changes to this source file must be
* approved by the <core>.
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: subr_pserialize.c,v 1.13 2019/10/06 15:11:17 uwe Exp $");
__KERNEL_RCSID(0, "$NetBSD: subr_pserialize.c,v 1.14 2019/12/03 05:07:49 riastradh Exp $");
#include <sys/param.h>
#include <sys/condvar.h>
#include <sys/atomic.h>
#include <sys/cpu.h>
#include <sys/evcnt.h>
#include <sys/kmem.h>
#include <sys/mutex.h>
#include <sys/pserialize.h>
#include <sys/proc.h>
#include <sys/queue.h>
#include <sys/xcall.h>
struct pserialize {
TAILQ_ENTRY(pserialize) psz_chain;
lwp_t * psz_owner;
kcpuset_t * psz_target;
kcpuset_t * psz_pass;
};
static u_int psz_work_todo __cacheline_aligned;
static kmutex_t psz_lock __cacheline_aligned;
static struct evcnt psz_ev_excl __cacheline_aligned;
/*
* As defined in "Method 1":
* q0: "0 MP checkpoints have occured".
* q1: "1 MP checkpoint has occured".
* q2: "2 MP checkpoints have occured".
*/
static TAILQ_HEAD(, pserialize) psz_queue0 __cacheline_aligned;
static TAILQ_HEAD(, pserialize) psz_queue1 __cacheline_aligned;
static TAILQ_HEAD(, pserialize) psz_queue2 __cacheline_aligned;
#ifdef LOCKDEBUG
#include <sys/percpu.h>
static percpu_t *psz_debug_nreads __cacheline_aligned;
#endif
/*
* pserialize_init:
*
@ -88,16 +56,8 @@ void
pserialize_init(void)
{
psz_work_todo = 0;
TAILQ_INIT(&psz_queue0);
TAILQ_INIT(&psz_queue1);
TAILQ_INIT(&psz_queue2);
mutex_init(&psz_lock, MUTEX_DEFAULT, IPL_SCHED);
evcnt_attach_dynamic(&psz_ev_excl, EVCNT_TYPE_MISC, NULL,
"pserialize", "exclusive access");
#ifdef LOCKDEBUG
psz_debug_nreads = percpu_alloc(sizeof(uint32_t));
#endif
}
/*
@ -110,11 +70,7 @@ pserialize_create(void)
{
pserialize_t psz;
psz = kmem_zalloc(sizeof(struct pserialize), KM_SLEEP);
kcpuset_create(&psz->psz_target, true);
kcpuset_create(&psz->psz_pass, true);
psz->psz_owner = NULL;
psz = kmem_zalloc(sizeof(*psz), KM_SLEEP);
return psz;
}
@ -128,25 +84,19 @@ pserialize_destroy(pserialize_t psz)
{
KASSERT(psz->psz_owner == NULL);
kcpuset_destroy(psz->psz_target);
kcpuset_destroy(psz->psz_pass);
kmem_free(psz, sizeof(struct pserialize));
kmem_free(psz, sizeof(*psz));
}
/*
* pserialize_perform:
*
* Perform the write side of passive serialization. The calling
* thread holds an exclusive lock on the data object(s) being updated.
* We wait until every processor in the system has made at least two
* passes through cpu_switchto(). The wait is made with the caller's
* update lock held, but is short term.
* Perform the write side of passive serialization. This operation
* MUST be serialized at a caller level (e.g. with a mutex or by a
* single-threaded use).
*/
void
pserialize_perform(pserialize_t psz)
{
int n;
KASSERT(!cpu_intr_p());
KASSERT(!cpu_softintr_p());
@ -155,46 +105,23 @@ pserialize_perform(pserialize_t psz)
return;
}
KASSERT(psz->psz_owner == NULL);
KASSERT(ncpu > 0);
if (__predict_false(mp_online == false)) {
psz_ev_excl.ev_count++;
return;
}
/*
* Set up the object and put it onto the queue. The lock
* activity here provides the necessary memory barrier to
* make the caller's data update completely visible to
* other processors.
*/
psz->psz_owner = curlwp;
kcpuset_copy(psz->psz_target, kcpuset_running);
kcpuset_zero(psz->psz_pass);
mutex_spin_enter(&psz_lock);
TAILQ_INSERT_TAIL(&psz_queue0, psz, psz_chain);
psz_work_todo++;
n = 0;
do {
mutex_spin_exit(&psz_lock);
/*
* Force some context switch activity on every CPU, as
* the system may not be busy. Pause to not flood.
*/
if (n++ > 1)
kpause("psrlz", false, 1, NULL);
xc_barrier(XC_HIGHPRI);
mutex_spin_enter(&psz_lock);
} while (!kcpuset_iszero(psz->psz_target));
psz_ev_excl.ev_count++;
mutex_spin_exit(&psz_lock);
/*
* Broadcast a NOP to all CPUs and wait until all of them complete.
*/
xc_barrier(XC_HIGHPRI);
KASSERT(psz->psz_owner == curlwp);
psz->psz_owner = NULL;
atomic_store_relaxed(&psz_ev_excl.ev_count,
1 + atomic_load_relaxed(&psz_ev_excl.ev_count));
}
int
@ -202,18 +129,9 @@ pserialize_read_enter(void)
{
int s;
KASSERT(!cpu_intr_p());
s = splsoftserial();
#ifdef LOCKDEBUG
{
uint32_t *nreads;
nreads = percpu_getref(psz_debug_nreads);
(*nreads)++;
if (*nreads == 0)
panic("nreads overflow");
percpu_putref(psz_debug_nreads);
}
#endif
curcpu()->ci_psz_read_depth++;
__insn_barrier();
return s;
}
@ -221,138 +139,47 @@ void
pserialize_read_exit(int s)
{
#ifdef LOCKDEBUG
{
uint32_t *nreads;
nreads = percpu_getref(psz_debug_nreads);
(*nreads)--;
if (*nreads == UINT_MAX)
panic("nreads underflow");
percpu_putref(psz_debug_nreads);
}
#endif
KASSERT(kpreempt_disabled());
__insn_barrier();
if (__predict_false(curcpu()->ci_psz_read_depth-- == 0))
panic("mismatching pserialize_read_exit()");
splx(s);
}
/*
* pserialize_switchpoint:
*
* Monitor system context switch activity. Called from machine
* independent code after mi_switch() returns.
*/
void
pserialize_switchpoint(void)
{
pserialize_t psz, next;
cpuid_t cid;
/*
* If no updates pending, bail out. No need to lock in order to
* test psz_work_todo; the only ill effect of missing an update
* would be to delay LWPs waiting in pserialize_perform(). That
* will not happen because updates are on the queue before an
* xcall is generated (serialization) to tickle every CPU.
*/
if (__predict_true(psz_work_todo == 0)) {
return;
}
mutex_spin_enter(&psz_lock);
cid = cpu_index(curcpu());
/*
* At first, scan through the second queue and update each request,
* if passed all processors, then transfer to the third queue.
*/
for (psz = TAILQ_FIRST(&psz_queue1); psz != NULL; psz = next) {
next = TAILQ_NEXT(psz, psz_chain);
kcpuset_set(psz->psz_pass, cid);
if (!kcpuset_match(psz->psz_pass, psz->psz_target)) {
continue;
}
kcpuset_zero(psz->psz_pass);
TAILQ_REMOVE(&psz_queue1, psz, psz_chain);
TAILQ_INSERT_TAIL(&psz_queue2, psz, psz_chain);
}
/*
* Scan through the first queue and update each request,
* if passed all processors, then move to the second queue.
*/
for (psz = TAILQ_FIRST(&psz_queue0); psz != NULL; psz = next) {
next = TAILQ_NEXT(psz, psz_chain);
kcpuset_set(psz->psz_pass, cid);
if (!kcpuset_match(psz->psz_pass, psz->psz_target)) {
continue;
}
kcpuset_zero(psz->psz_pass);
TAILQ_REMOVE(&psz_queue0, psz, psz_chain);
TAILQ_INSERT_TAIL(&psz_queue1, psz, psz_chain);
}
/*
* Process the third queue: entries have been seen twice on every
* processor, remove from the queue and notify the updating thread.
*/
while ((psz = TAILQ_FIRST(&psz_queue2)) != NULL) {
TAILQ_REMOVE(&psz_queue2, psz, psz_chain);
kcpuset_zero(psz->psz_target);
psz_work_todo--;
}
mutex_spin_exit(&psz_lock);
}
/*
* pserialize_in_read_section:
*
* True if the caller is in a pserialize read section. To be used only
* for diagnostic assertions where we want to guarantee the condition like:
* True if the caller is in a pserialize read section. To be used
* only for diagnostic assertions where we want to guarantee the
* condition like:
*
* KASSERT(pserialize_in_read_section());
* KASSERT(pserialize_in_read_section());
*/
bool
pserialize_in_read_section(void)
{
#ifdef LOCKDEBUG
uint32_t *nreads;
bool in;
/* Not initialized yet */
if (__predict_false(psz_debug_nreads == NULL))
return true;
nreads = percpu_getref(psz_debug_nreads);
in = *nreads != 0;
percpu_putref(psz_debug_nreads);
return in;
#else
return true;
#endif
return kpreempt_disabled() && curcpu()->ci_psz_read_depth > 0;
}
/*
* pserialize_not_in_read_section:
*
* True if the caller is not in a pserialize read section. To be used only
* for diagnostic assertions where we want to guarantee the condition like:
* True if the caller is not in a pserialize read section. To be
* used only for diagnostic assertions where we want to guarantee
* the condition like:
*
* KASSERT(pserialize_not_in_read_section());
* KASSERT(pserialize_not_in_read_section());
*/
bool
pserialize_not_in_read_section(void)
{
#ifdef LOCKDEBUG
uint32_t *nreads;
bool notin;
/* Not initialized yet */
if (__predict_false(psz_debug_nreads == NULL))
return true;
nreads = percpu_getref(psz_debug_nreads);
notin = *nreads == 0;
percpu_putref(psz_debug_nreads);
kpreempt_disable();
notin = (curcpu()->ci_psz_read_depth == 0);
kpreempt_enable();
return notin;
#else
return true;
#endif
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: rump.c,v 1.335 2019/10/15 18:36:38 christos Exp $ */
/* $NetBSD: rump.c,v 1.336 2019/12/03 05:07:49 riastradh Exp $ */
/*
* Copyright (c) 2007-2011 Antti Kantee. All Rights Reserved.
@ -26,7 +26,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: rump.c,v 1.335 2019/10/15 18:36:38 christos Exp $");
__KERNEL_RCSID(0, "$NetBSD: rump.c,v 1.336 2019/12/03 05:07:49 riastradh Exp $");
#include <sys/systm.h>
#define ELFSIZE ARCH_ELFSIZE
@ -57,7 +57,6 @@ __KERNEL_RCSID(0, "$NetBSD: rump.c,v 1.335 2019/10/15 18:36:38 christos Exp $");
#include <sys/percpu.h>
#include <sys/pipe.h>
#include <sys/pool.h>
#include <sys/pserialize.h>
#include <sys/queue.h>
#include <sys/reboot.h>
#include <sys/resourcevar.h>
@ -306,7 +305,6 @@ rump_init(void)
kprintf_init();
percpu_init();
pserialize_init();
kauth_init();
@ -731,14 +729,11 @@ rump_allbetsareoff_setid(pid_t pid, int lid)
p->p_pid = pid;
}
#include <sys/pserialize.h>
static void
ipiemu(void *a1, void *a2)
{
xc__highpri_intr(NULL);
pserialize_switchpoint();
}
void

View File

@ -1,4 +1,4 @@
/* $NetBSD: cpu_data.h,v 1.41 2019/12/02 23:22:43 ad Exp $ */
/* $NetBSD: cpu_data.h,v 1.42 2019/12/03 05:07:49 riastradh Exp $ */
/*-
* Copyright (c) 2004, 2006, 2007, 2008, 2019 The NetBSD Foundation, Inc.
@ -68,6 +68,7 @@ struct cpu_data {
lwp_t *cpu_biglock_wanted; /* LWP spinning on biglock */
kcondvar_t cpu_xcall; /* cross-call support */
int cpu_xcall_pending; /* cross-call support */
u_int cpu_psz_read_depth; /* pserialize(9) read depth */
uint32_t cpu_ipipend[IPI_BITWORDS]; /* pending IPIs */
struct schedstate_percpu cpu_schedstate; /* scheduler state */
@ -127,6 +128,7 @@ struct cpu_data {
#define ci_pcu_curlwp ci_data.cpu_pcu_curlwp
#define ci_kcpuset ci_data.cpu_kcpuset
#define ci_ipipend ci_data.cpu_ipipend
#define ci_psz_read_depth ci_data.cpu_psz_read_depth
#define ci_package_id ci_data.cpu_package_id
#define ci_core_id ci_data.cpu_core_id

View File

@ -1,4 +1,4 @@
/* $NetBSD: pserialize.h,v 1.2 2017/11/21 08:49:15 ozaki-r Exp $ */
/* $NetBSD: pserialize.h,v 1.3 2019/12/03 05:07:49 riastradh Exp $ */
/*-
* Copyright (c) 2010, 2011 The NetBSD Foundation, Inc.
@ -35,7 +35,6 @@ struct pserialize;
typedef struct pserialize *pserialize_t;
void pserialize_init(void);
void pserialize_switchpoint(void);
pserialize_t pserialize_create(void);
void pserialize_destroy(pserialize_t);