Track the SSIR per-cpu, rather than globally.

This commit is contained in:
thorpej 2020-09-05 18:01:42 +00:00
parent eced30c1f4
commit e446c2af54
5 changed files with 32 additions and 42 deletions

View File

@ -1,4 +1,4 @@
# $NetBSD: genassym.cf,v 1.24 2020/09/05 16:29:07 thorpej Exp $ # $NetBSD: genassym.cf,v 1.25 2020/09/05 18:01:42 thorpej Exp $
# #
# Copyright (c) 1982, 1990, 1993 # Copyright (c) 1982, 1990, 1993
@ -188,4 +188,5 @@ define SYS_exit SYS_exit
# CPU info # CPU info
define CPU_INFO_CURLWP offsetof(struct cpu_info, ci_curlwp) define CPU_INFO_CURLWP offsetof(struct cpu_info, ci_curlwp)
define CPU_INFO_IDLE_LWP offsetof(struct cpu_info, ci_data.cpu_idlelwp) define CPU_INFO_IDLE_LWP offsetof(struct cpu_info, ci_data.cpu_idlelwp)
define CPU_INFO_SSIR offsetof(struct cpu_info, ci_ssir)
define CPU_INFO_SIZEOF sizeof(struct cpu_info) define CPU_INFO_SIZEOF sizeof(struct cpu_info)

View File

@ -1,4 +1,4 @@
/* $NetBSD: interrupt.c,v 1.83 2020/09/05 16:29:07 thorpej Exp $ */ /* $NetBSD: interrupt.c,v 1.84 2020/09/05 18:01:42 thorpej Exp $ */
/*- /*-
* Copyright (c) 2000, 2001 The NetBSD Foundation, Inc. * Copyright (c) 2000, 2001 The NetBSD Foundation, Inc.
@ -65,7 +65,7 @@
#include <sys/cdefs.h> /* RCS ID & Copyright macro defns */ #include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
__KERNEL_RCSID(0, "$NetBSD: interrupt.c,v 1.83 2020/09/05 16:29:07 thorpej Exp $"); __KERNEL_RCSID(0, "$NetBSD: interrupt.c,v 1.84 2020/09/05 18:01:42 thorpej Exp $");
#include <sys/param.h> #include <sys/param.h>
#include <sys/systm.h> #include <sys/systm.h>
@ -450,24 +450,21 @@ badaddr_read(void *addr, size_t size, void *rptr)
return (rv); return (rv);
} }
volatile unsigned long ssir;
/* /*
* spl0: * spllower:
* *
* Lower interrupt priority to IPL 0 -- must check for * Lower interrupt priority. May need to check for software
* software interrupts. * interrupts.
*/ */
void void
spl0(void) spllower(int ipl)
{ {
if (ssir) { if (ipl == ALPHA_PSL_IPL_0 && curcpu()->ci_ssir) {
(void) alpha_pal_swpipl(ALPHA_PSL_IPL_SOFT_LO); (void) alpha_pal_swpipl(ALPHA_PSL_IPL_SOFT_LO);
softintr_dispatch(); softintr_dispatch();
} }
(void) alpha_pal_swpipl(ipl);
(void) alpha_pal_swpipl(ALPHA_PSL_IPL_0);
} }
/* /*
@ -491,9 +488,7 @@ softintr_dispatch(void)
void void
softint_trigger(uintptr_t machdep) softint_trigger(uintptr_t machdep)
{ {
atomic_or_ulong(&curcpu()->ci_ssir, 1 << (x))
/* XXX Needs to be per-CPU */
atomic_or_ulong(&ssir, 1 << (x))
} }
#endif #endif

View File

@ -1,4 +1,4 @@
/* $NetBSD: locore.s,v 1.131 2020/09/05 16:29:07 thorpej Exp $ */ /* $NetBSD: locore.s,v 1.132 2020/09/05 18:01:42 thorpej Exp $ */
/*- /*-
* Copyright (c) 1999, 2000, 2019 The NetBSD Foundation, Inc. * Copyright (c) 1999, 2000, 2019 The NetBSD Foundation, Inc.
@ -67,7 +67,7 @@
#include <machine/asm.h> #include <machine/asm.h>
__KERNEL_RCSID(0, "$NetBSD: locore.s,v 1.131 2020/09/05 16:29:07 thorpej Exp $"); __KERNEL_RCSID(0, "$NetBSD: locore.s,v 1.132 2020/09/05 18:01:42 thorpej Exp $");
#include "assym.h" #include "assym.h"
@ -239,8 +239,6 @@ XNESTED(esigcode,0)
* exception_return: return from trap, exception, or syscall * exception_return: return from trap, exception, or syscall
*/ */
IMPORT(ssir, 8)
LEAF(exception_return, 1) /* XXX should be NESTED */ LEAF(exception_return, 1) /* XXX should be NESTED */
br pv, 1f br pv, 1f
1: LDGP(pv) 1: LDGP(pv)
@ -249,8 +247,13 @@ LEAF(exception_return, 1) /* XXX should be NESTED */
and s1, ALPHA_PSL_IPL_MASK, t0 /* look at the saved IPL */ and s1, ALPHA_PSL_IPL_MASK, t0 /* look at the saved IPL */
bne t0, 5f /* != 0: can't do AST or SIR */ bne t0, 5f /* != 0: can't do AST or SIR */
/* see if we can do an SIR */ /* GET_CURLWP clobbers v0, t0, t8...t11. */
2: ldq t1, ssir /* SIR pending? */ GET_CURLWP
mov v0, s0 /* s0 = curlwp */
/* see if a soft interrupt is pending. */
2: ldq t1, L_CPU(s0) /* t1 = curlwp->l_cpu */
ldq t1, CPU_INFO_SSIR(t1) /* soft int pending? */
bne t1, 6f /* yes */ bne t1, 6f /* yes */
/* no */ /* no */
@ -258,16 +261,13 @@ LEAF(exception_return, 1) /* XXX should be NESTED */
beq t0, 5f /* no: just return */ beq t0, 5f /* no: just return */
/* yes */ /* yes */
/* GET_CURLWP clobbers v0, t0, t8...t11. */
3: GET_CURLWP
/* check for AST */ /* check for AST */
ldl t3, L_MD_ASTPENDING(v0) /* AST pending? */ 3: ldl t3, L_MD_ASTPENDING(s0) /* AST pending? */
bne t3, 7f /* yes */ bne t3, 7f /* yes */
/* no: headed back to user space */ /* no: headed back to user space */
/* Enable the FPU based on whether MDLWP_FPACTIVE is set. */ /* Enable the FPU based on whether MDLWP_FPACTIVE is set. */
4: ldq t2, L_MD_FLAGS(v0) 4: ldq t2, L_MD_FLAGS(s0)
cmplt t2, zero, a0 cmplt t2, zero, a0
call_pal PAL_OSF1_wrfen call_pal PAL_OSF1_wrfen
@ -294,7 +294,7 @@ LEAF(exception_return, 1) /* XXX should be NESTED */
br 2b br 2b
/* We've got an AST */ /* We've got an AST */
7: stl zero, L_MD_ASTPENDING(v0) /* no AST pending */ 7: stl zero, L_MD_ASTPENDING(s0) /* no AST pending */
ldiq a0, ALPHA_PSL_IPL_0 /* drop IPL to zero */ ldiq a0, ALPHA_PSL_IPL_0 /* drop IPL to zero */
call_pal PAL_OSF1_swpipl call_pal PAL_OSF1_swpipl

View File

@ -1,4 +1,4 @@
/* $NetBSD: cpu.h,v 1.94 2020/09/04 15:50:09 thorpej Exp $ */ /* $NetBSD: cpu.h,v 1.95 2020/09/05 18:01:42 thorpej Exp $ */
/*- /*-
* Copyright (c) 1998, 1999, 2000, 2001 The NetBSD Foundation, Inc. * Copyright (c) 1998, 1999, 2000, 2001 The NetBSD Foundation, Inc.
@ -108,6 +108,7 @@ struct cpu_info {
volatile int ci_mtx_oldspl; /* [MI] for spin mutex splx() */ volatile int ci_mtx_oldspl; /* [MI] for spin mutex splx() */
u_long ci_intrdepth; /* interrupt trap depth */ u_long ci_intrdepth; /* interrupt trap depth */
volatile u_long ci_ssir; /* simulated software interrupt reg */
struct cpu_softc *ci_softc; /* pointer to our device */ struct cpu_softc *ci_softc; /* pointer to our device */
struct pmap *ci_pmap; /* currently-activated pmap */ struct pmap *ci_pmap; /* currently-activated pmap */
@ -137,8 +138,9 @@ struct cpu_info {
struct trapframe *ci_db_regs; /* registers for debuggers */ struct trapframe *ci_db_regs; /* registers for debuggers */
}; };
/* Ensure cpu_info::ci_curlwp is within the signed 16-bit displacement. */ /* Ensure some cpu_info fields are within the signed 16-bit displacement. */
__CTASSERT(offsetof(struct cpu_info, ci_curlwp) <= 0x7ff0); __CTASSERT(offsetof(struct cpu_info, ci_curlwp) <= 0x7ff0);
__CTASSERT(offsetof(struct cpu_info, ci_ssir) <= 0x7ff0);
#endif /* _KERNEL || _KMEMUSER */ #endif /* _KERNEL || _KMEMUSER */

View File

@ -1,4 +1,4 @@
/* $NetBSD: intr.h,v 1.74 2020/09/05 16:29:08 thorpej Exp $ */ /* $NetBSD: intr.h,v 1.75 2020/09/05 18:01:42 thorpej Exp $ */
/*- /*-
* Copyright (c) 2000, 2001, 2002 The NetBSD Foundation, Inc. * Copyright (c) 2000, 2001, 2002 The NetBSD Foundation, Inc.
@ -132,20 +132,12 @@ makeiplcookie(ipl_t ipl)
#ifdef _KERNEL #ifdef _KERNEL
/* Simulated software interrupt register. */
extern volatile unsigned long ssir;
/* IPL-lowering/restoring macros */ /* IPL-lowering/restoring macros */
void spl0(void); void spllower(int);
#define splx(s) spllower(s)
#define spl0() spllower(ALPHA_PSL_IPL_0)
static __inline void
splx(int s)
{
if (s == ALPHA_PSL_IPL_0 && ssir != 0)
spl0();
else
alpha_pal_swpipl(s);
}
/* IPL-raising functions/macros */ /* IPL-raising functions/macros */
static __inline int static __inline int
_splraise(int s) _splraise(int s)