NetBSD/sys/arch/xen/x86/xen_ipi.c

355 lines
8.1 KiB
C
Raw Normal View History

/* $NetBSD: xen_ipi.c,v 1.39 2020/05/07 19:48:58 bouyer Exp $ */
2011-08-10 15:39:44 +04:00
/*-
* Copyright (c) 2011, 2019 The NetBSD Foundation, Inc.
2011-08-10 15:39:44 +04:00
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Cherry G. Mathew <cherry@zyx.in>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h> /* RCS ID macro */
/*
* Based on: x86/ipi.c
*/
__KERNEL_RCSID(0, "$NetBSD: xen_ipi.c,v 1.39 2020/05/07 19:48:58 bouyer Exp $");
#include "opt_ddb.h"
2011-08-10 15:39:44 +04:00
#include <sys/types.h>
#include <sys/atomic.h>
#include <sys/cpu.h>
Apply patch proposed in PR port-xen/45975 (this does not solve the exact problem reported here but is part of the solution): xen_kpm_sync() is not working as expected, leading to races between CPUs. 1 the check (xpq_cpu != &x86_curcpu) is always false because we have different x86_curcpu symbols with different addresses in the kernel. Fortunably, all addresses dissaemble to the same code. Because of this we always use the code intended for bootstrap, which doesn't use cross-calls or lock. 2 once 1 above is fixed, xen_kpm_sync() will use xcalls to sync other CPUs, which cause it to sleep and pmap.c doesn't like that. It triggers this KASSERT() in pmap_unmap_ptes(): KASSERT(pmap->pm_ncsw == curlwp->l_ncsw); 3 pmap->pm_cpus is not safe for the purpose of xen_kpm_sync(), which needs to know on which CPU a pmap is loaded *now*: pmap->pm_cpus is cleared before cpu_load_pmap() is called to switch to a new pmap, leaving a window where a pmap is still in a CPU's ci_kpm_pdir but not in pm_cpus. As a virtual CPU may be preempted by the hypervisor at any time, it can be large enough to let another CPU free the PTP and reuse it as a normal page. To fix 2), avoid cross-calls and IPIs completely, and instead use a mutex to update all CPU's ci_kpm_pdir from the local CPU. It's safe because we just need to update the table page, a tlbflush IPI will happen later. As a side effect, we don't need a different code for bootstrap, fixing 1). The mutex added to struct cpu needs a small headers reorganisation. to fix 3), introduce a pm_xen_ptp_cpus which is updated from cpu_pmap_load(), whith the ci_kpm_mtx mutex held. Checking it with ci_kpm_mtx held will avoid overwriting the wrong pmap's ci_kpm_pdir. While there I removed the unused pmap_is_active() function; and added some more details to DIAGNOSTIC panics.
2012-02-17 22:40:18 +04:00
#include <sys/mutex.h>
2011-08-10 15:39:44 +04:00
#include <sys/device.h>
#include <sys/xcall.h>
#include <sys/ipi.h>
2011-08-10 15:39:44 +04:00
#include <sys/errno.h>
#include <sys/systm.h>
#include <x86/fpu.h>
2011-08-10 15:39:44 +04:00
#include <machine/frame.h>
#include <machine/segments.h>
#include <xen/evtchn.h>
2011-08-10 15:39:44 +04:00
#include <xen/intr.h>
#include <xen/intrdefs.h>
#include <xen/hypervisor.h>
#include <xen/include/public/vcpu.h>
2011-08-10 15:39:44 +04:00
#ifdef DDB
2011-08-10 15:39:44 +04:00
extern void ddb_ipi(struct trapframe);
static void xen_ipi_ddb(struct cpu_info *, struct intrframe *);
#endif
2011-08-10 15:39:44 +04:00
static void xen_ipi_halt(struct cpu_info *, struct intrframe *);
static void xen_ipi_synch_fpu(struct cpu_info *, struct intrframe *);
2011-08-10 15:39:44 +04:00
static void xen_ipi_xcall(struct cpu_info *, struct intrframe *);
static void xen_ipi_hvcb(struct cpu_info *, struct intrframe *);
static void xen_ipi_generic(struct cpu_info *, struct intrframe *);
static void xen_ipi_ast(struct cpu_info *, struct intrframe *);
static void xen_ipi_kpreempt(struct cpu_info *ci, struct intrframe *);
2011-08-10 15:39:44 +04:00
static void (*xen_ipifunc[XEN_NIPIS])(struct cpu_info *, struct intrframe *) =
2011-08-10 15:39:44 +04:00
{ /* In order of priority (see: xen/include/intrdefs.h */
xen_ipi_halt,
xen_ipi_synch_fpu,
#ifdef DDB
2011-08-10 15:39:44 +04:00
xen_ipi_ddb,
#else
NULL,
#endif
xen_ipi_xcall,
xen_ipi_hvcb,
xen_ipi_generic,
xen_ipi_ast,
xen_ipi_kpreempt
2011-08-10 15:39:44 +04:00
};
static int
xen_ipi_handler(void *arg)
2011-08-10 15:39:44 +04:00
{
uint32_t pending;
int bit;
struct cpu_info *ci;
struct intrframe *regs;
2011-08-10 15:39:44 +04:00
ci = curcpu();
regs = arg;
KASSERT(ci == arg);
2011-08-10 15:39:44 +04:00
pending = atomic_swap_32(&ci->ci_ipis, 0);
KDASSERT((pending >> XEN_NIPIS) == 0);
while ((bit = ffs(pending)) != 0) {
bit--;
pending &= ~(1 << bit);
ci->ci_ipi_events[bit].ev_count++;
if (xen_ipifunc[bit] != NULL) {
(*xen_ipifunc[bit])(ci, regs);
2011-08-11 00:38:45 +04:00
} else {
panic("xen_ipifunc[%d] unsupported!\n", bit);
2011-08-10 15:39:44 +04:00
/* NOTREACHED */
}
}
return 0;
2011-08-10 15:39:44 +04:00
}
/* Must be called once for every cpu that expects to send/recv ipis */
void
xen_ipi_init(void)
{
cpuid_t vcpu;
evtchn_port_t evtchn;
struct cpu_info *ci;
char intr_xname[INTRDEVNAMEBUF];
2011-08-10 15:39:44 +04:00
ci = curcpu();
vcpu = ci->ci_vcpuid;
2011-12-07 19:47:41 +04:00
KASSERT(vcpu < XEN_LEGACY_MAX_VCPUS);
2011-08-10 15:39:44 +04:00
2011-08-11 00:38:45 +04:00
evtchn = bind_vcpu_to_evtch(vcpu);
ci->ci_ipi_evtchn = evtchn;
2011-08-10 15:39:44 +04:00
KASSERT(evtchn != -1 && evtchn < NR_EVENT_CHANNELS);
snprintf(intr_xname, sizeof(intr_xname), "%s ipi",
device_xname(ci->ci_dev));
if (event_set_handler(evtchn, xen_ipi_handler, ci, IPL_HIGH, NULL,
intr_xname, true, ci) == NULL) {
panic("%s: unable to register ipi handler\n", __func__);
2011-08-10 15:39:44 +04:00
/* NOTREACHED */
}
hypervisor_unmask_event(evtchn);
2011-08-10 15:39:44 +04:00
}
#ifdef DIAGNOSTIC
2011-08-10 15:39:44 +04:00
static inline bool /* helper */
valid_ipimask(uint32_t ipimask)
{
uint32_t masks = XEN_IPI_GENERIC | XEN_IPI_HVCB | XEN_IPI_XCALL |
XEN_IPI_DDB | XEN_IPI_SYNCH_FPU |
XEN_IPI_HALT | XEN_IPI_AST | XEN_IPI_KPREEMPT;
2011-08-10 15:39:44 +04:00
if (ipimask & ~masks) {
return false;
2011-08-11 00:38:45 +04:00
} else {
2011-08-10 15:39:44 +04:00
return true;
}
}
#endif
2011-08-10 15:39:44 +04:00
int
xen_send_ipi(struct cpu_info *ci, uint32_t ipimask)
{
evtchn_port_t evtchn;
KASSERT(ci != NULL && ci != curcpu());
2011-08-10 15:39:44 +04:00
2011-08-16 00:17:12 +04:00
if ((ci->ci_flags & CPUF_RUNNING) == 0) {
2011-08-10 15:39:44 +04:00
return ENOENT;
}
evtchn = ci->ci_ipi_evtchn;
2011-08-11 00:38:45 +04:00
KASSERTMSG(valid_ipimask(ipimask) == true,
"xen_send_ipi() called with invalid ipimask\n");
2011-08-10 15:39:44 +04:00
atomic_or_32(&ci->ci_ipis, ipimask);
hypervisor_notify_via_evtchn(evtchn);
return 0;
}
void
xen_broadcast_ipi(uint32_t ipimask)
{
struct cpu_info *ci, *self = curcpu();
CPU_INFO_ITERATOR cii;
2011-08-11 00:38:45 +04:00
KASSERTMSG(valid_ipimask(ipimask) == true,
"xen_broadcast_ipi() called with invalid ipimask\n");
2011-08-10 15:39:44 +04:00
/*
* XXX-cherry: there's an implicit broadcast sending order
* which I dislike. Randomise this ? :-)
*/
for (CPU_INFO_FOREACH(cii, ci)) {
if (ci == NULL)
continue;
if (ci == self)
continue;
if (ci->ci_data.cpu_idlelwp == NULL)
continue;
if ((ci->ci_flags & CPUF_PRESENT) == 0)
continue;
if (ci->ci_flags & (CPUF_RUNNING)) {
if (0 != xen_send_ipi(ci, ipimask)) {
panic("xen_ipi of %x from %s to %s failed\n",
ipimask, cpu_name(curcpu()),
cpu_name(ci));
}
}
}
}
/* MD wrapper for the xcall(9) callback. */
static void
xen_ipi_halt(struct cpu_info *ci, struct intrframe *intrf)
{
KASSERT(ci == curcpu());
KASSERT(ci != NULL);
if (HYPERVISOR_vcpu_op(VCPUOP_down, ci->ci_vcpuid, NULL)) {
panic("%s shutdown failed.\n", device_xname(ci->ci_dev));
2011-08-10 15:39:44 +04:00
}
}
static void
xen_ipi_synch_fpu(struct cpu_info *ci, struct intrframe *intrf)
{
KASSERT(ci != NULL);
KASSERT(intrf != NULL);
panic("%s: impossible", __func__);
}
#ifdef DDB
2011-08-10 15:39:44 +04:00
static void
xen_ipi_ddb(struct cpu_info *ci, struct intrframe *intrf)
{
KASSERT(ci != NULL);
KASSERT(intrf != NULL);
#ifdef __x86_64__
ddb_ipi(intrf->if_tf);
#else
struct trapframe tf;
tf.tf_gs = intrf->if_gs;
tf.tf_fs = intrf->if_fs;
tf.tf_es = intrf->if_es;
tf.tf_ds = intrf->if_ds;
tf.tf_edi = intrf->if_edi;
tf.tf_esi = intrf->if_esi;
tf.tf_ebp = intrf->if_ebp;
tf.tf_ebx = intrf->if_ebx;
tf.tf_ecx = intrf->if_ecx;
tf.tf_eax = intrf->if_eax;
tf.tf_trapno = intrf->__if_trapno;
tf.tf_err = intrf->__if_err;
tf.tf_eip = intrf->if_eip;
tf.tf_cs = intrf->if_cs;
tf.tf_eflags = intrf->if_eflags;
tf.tf_esp = intrf->if_esp;
tf.tf_ss = intrf->if_ss;
ddb_ipi(tf);
2011-08-10 15:39:44 +04:00
#endif
}
#endif /* DDB */
2011-08-10 15:39:44 +04:00
static void
xen_ipi_xcall(struct cpu_info *ci, struct intrframe *intrf)
{
KASSERT(ci != NULL);
KASSERT(intrf != NULL);
xc_ipi_handler();
}
static void
xen_ipi_ast(struct cpu_info *ci, struct intrframe *intrf)
{
KASSERT(ci != NULL);
KASSERT(intrf != NULL);
aston(ci->ci_onproc);
}
static void
xen_ipi_generic(struct cpu_info *ci, struct intrframe *intrf)
{
KASSERT(ci != NULL);
KASSERT(intrf != NULL);
ipi_cpu_handler();
}
static void
xen_ipi_hvcb(struct cpu_info *ci, struct intrframe *intrf)
{
KASSERT(ci != NULL);
KASSERT(intrf != NULL);
KASSERT(ci == curcpu());
KASSERT(!ci->ci_vcpu->evtchn_upcall_mask);
hypervisor_force_callback();
}
static void
xen_ipi_kpreempt(struct cpu_info *ci, struct intrframe * intrf)
{
softint_trigger(1 << SIR_PREEMPT);
}
#ifdef XENPV
2011-08-10 15:39:44 +04:00
void
xc_send_ipi(struct cpu_info *ci)
{
KASSERT(kpreempt_disabled());
KASSERT(curcpu() != ci);
if (ci) {
if (0 != xen_send_ipi(ci, XEN_IPI_XCALL)) {
panic("xen_send_ipi(XEN_IPI_XCALL) failed\n");
2011-08-11 00:38:45 +04:00
}
2011-08-10 15:39:44 +04:00
} else {
xen_broadcast_ipi(XEN_IPI_XCALL);
}
}
void
cpu_ipi(struct cpu_info *ci)
{
KASSERT(kpreempt_disabled());
KASSERT(curcpu() != ci);
if (ci) {
if (0 != xen_send_ipi(ci, XEN_IPI_GENERIC)) {
panic("xen_send_ipi(XEN_IPI_GENERIC) failed\n");
}
} else {
xen_broadcast_ipi(XEN_IPI_GENERIC);
}
}
#endif /* XENPV */