Pull up following revision(s) (requested by riastradh in ticket #264):

sys/arch/alpha/include/asm.h: revision 1.45 (patch)
        (also applied to locore.s and multiproc.s)

alpha: Add missing barriers in cpu_switchto.
Details in comments.
(accidently missed in original commit for ticket #264)
This commit is contained in:
martin 2023-07-31 17:59:23 +00:00
parent eaf1a2261b
commit a9b96c8f60
2 changed files with 30 additions and 5 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: locore.s,v 1.123 2019/04/06 03:06:24 thorpej Exp $ */
/* $NetBSD: locore.s,v 1.123.4.1 2023/07/31 17:59:23 martin Exp $ */
/*-
* Copyright (c) 1999, 2000, 2019 The NetBSD Foundation, Inc.
@ -67,7 +67,7 @@
#include <machine/asm.h>
__KERNEL_RCSID(0, "$NetBSD: locore.s,v 1.123 2019/04/06 03:06:24 thorpej Exp $");
__KERNEL_RCSID(0, "$NetBSD: locore.s,v 1.123.4.1 2023/07/31 17:59:23 martin Exp $");
#include "assym.h"
@ -686,8 +686,28 @@ LEAF(cpu_switchto, 0)
SWITCH_CONTEXT /* swap the context */
GET_CPUINFO
/*
* Issue barriers to coordinate mutex_exit on this CPU with
* mutex_vector_enter on another CPU.
*
* 1. Any prior mutex_exit by oldlwp must be visible to other
* CPUs before we set ci_curlwp := newlwp on this one,
* requiring a store-before-store barrier.
*
* 2. ci_curlwp := newlwp must be visible on all other CPUs
* before any subsequent mutex_exit by newlwp can even test
* whether there might be waiters, requiring a
* store-before-load barrier.
*
* See kern_mutex.c for details -- this is necessary for
* adaptive mutexes to detect whether the lwp is on the CPU in
* order to safely block without requiring atomic r/m/w in
* mutex_exit.
*/
GET_CPUINFO /* v0 = curcpu() */
wmb /* store-before-store XXX patch out if !MP? */
stq s2, CPU_INFO_CURLWP(v0) /* curlwp = l */
mb /* store-before-load XXX patch out if !MP? */
/*
* Now running on the new PCB.

View File

@ -1,4 +1,4 @@
/* $NetBSD: multiproc.s,v 1.13 2008/04/28 20:23:10 martin Exp $ */
/* $NetBSD: multiproc.s,v 1.13.92.1 2023/07/31 17:59:23 martin Exp $ */
/*-
* Copyright (c) 1998, 2000 The NetBSD Foundation, Inc.
@ -30,7 +30,7 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
__KERNEL_RCSID(5, "$NetBSD: multiproc.s,v 1.13 2008/04/28 20:23:10 martin Exp $")
__KERNEL_RCSID(5, "$NetBSD: multiproc.s,v 1.13.92.1 2023/07/31 17:59:23 martin Exp $")
/*
* Multiprocessor glue code.
@ -65,6 +65,11 @@ NESTED_NOPROFILE(cpu_spinup_trampoline,0,0,ra,0,0)
/* Switch to this CPU's idle thread. */
ldq a0, CPU_INFO_IDLE_LWP(s0)
/*
* No membar needed because we're not switching from a
* previous lwp, and the idle lwp we're switching to can't be
* holding locks already; see cpu_switchto.
*/
stq a0, CPU_INFO_CURLWP(s0) /* set curlwp */
ldq a0, L_MD_PCBPADDR(a0)
SWITCH_CONTEXT