2008-11-05 19:29:27 +03:00
|
|
|
/*
|
|
|
|
* QEMU KVM support
|
|
|
|
*
|
|
|
|
* Copyright (C) 2006-2008 Qumranet Technologies
|
|
|
|
* Copyright IBM, Corp. 2008
|
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Anthony Liguori <aliguori@us.ibm.com>
|
|
|
|
*
|
|
|
|
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
|
|
|
* See the COPYING file in the top-level directory.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2016-01-26 21:17:03 +03:00
|
|
|
#include "qemu/osdep.h"
|
2020-09-30 13:04:40 +03:00
|
|
|
#include "qapi/qapi-events-run-state.h"
|
include/qemu/osdep.h: Don't include qapi/error.h
Commit 57cb38b included qapi/error.h into qemu/osdep.h to get the
Error typedef. Since then, we've moved to include qemu/osdep.h
everywhere. Its file comment explains: "To avoid getting into
possible circular include dependencies, this file should not include
any other QEMU headers, with the exceptions of config-host.h,
compiler.h, os-posix.h and os-win32.h, all of which are doing a
similar job to this file and are under similar constraints."
qapi/error.h doesn't do a similar job, and it doesn't adhere to
similar constraints: it includes qapi-types.h. That's in excess of
100KiB of crap most .c files don't actually need.
Add the typedef to qemu/typedefs.h, and include that instead of
qapi/error.h. Include qapi/error.h in .c files that need it and don't
get it now. Include qapi-types.h in qom/object.h for uint16List.
Update scripts/clean-includes accordingly. Update it further to match
reality: replace config.h by config-target.h, add sysemu/os-posix.h,
sysemu/os-win32.h. Update the list of includes in the qemu/osdep.h
comment quoted above similarly.
This reduces the number of objects depending on qapi/error.h from "all
of them" to less than a third. Unfortunately, the number depending on
qapi-types.h shrinks only a little. More work is needed for that one.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
[Fix compilation without the spice devel packages. - Paolo]
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2016-03-14 11:01:28 +03:00
|
|
|
#include "qapi/error.h"
|
2022-09-29 10:20:14 +03:00
|
|
|
#include "qapi/visitor.h"
|
2008-11-05 19:29:27 +03:00
|
|
|
#include <sys/ioctl.h>
|
2010-10-21 19:35:04 +04:00
|
|
|
#include <sys/utsname.h>
|
2022-02-17 09:04:29 +03:00
|
|
|
#include <sys/syscall.h>
|
2008-11-05 19:29:27 +03:00
|
|
|
|
|
|
|
#include <linux/kvm.h>
|
2018-04-17 21:47:50 +03:00
|
|
|
#include "standard-headers/asm-x86/kvm_para.h"
|
2022-12-06 13:48:53 +03:00
|
|
|
#include "hw/xen/interface/arch-x86/cpuid.h"
|
2008-11-05 19:29:27 +03:00
|
|
|
|
2016-03-15 18:58:45 +03:00
|
|
|
#include "cpu.h"
|
2021-03-22 16:27:40 +03:00
|
|
|
#include "host-cpu.h"
|
2012-12-17 21:20:04 +04:00
|
|
|
#include "sysemu/sysemu.h"
|
2017-01-10 13:59:55 +03:00
|
|
|
#include "sysemu/hw_accel.h"
|
2015-06-18 19:30:16 +03:00
|
|
|
#include "sysemu/kvm_int.h"
|
2019-08-12 08:23:59 +03:00
|
|
|
#include "sysemu/runstate.h"
|
2012-07-26 18:35:13 +04:00
|
|
|
#include "kvm_i386.h"
|
2021-10-07 19:17:07 +03:00
|
|
|
#include "sev.h"
|
2022-12-03 20:51:13 +03:00
|
|
|
#include "xen-emu.h"
|
2015-11-10 15:52:43 +03:00
|
|
|
#include "hyperv.h"
|
2017-07-13 23:15:21 +03:00
|
|
|
#include "hyperv-proto.h"
|
2015-11-10 15:52:43 +03:00
|
|
|
|
2012-12-17 21:19:49 +04:00
|
|
|
#include "exec/gdbstub.h"
|
2012-12-17 21:20:00 +04:00
|
|
|
#include "qemu/host-utils.h"
|
Include qemu/main-loop.h less
In my "build everything" tree, changing qemu/main-loop.h triggers a
recompile of some 5600 out of 6600 objects (not counting tests and
objects that don't depend on qemu/osdep.h). It includes block/aio.h,
which in turn includes qemu/event_notifier.h, qemu/notify.h,
qemu/processor.h, qemu/qsp.h, qemu/queue.h, qemu/thread-posix.h,
qemu/thread.h, qemu/timer.h, and a few more.
Include qemu/main-loop.h only where it's needed. Touching it now
recompiles only some 1700 objects. For block/aio.h and
qemu/event_notifier.h, these numbers drop from 5600 to 2800. For the
others, they shrink only slightly.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Message-Id: <20190812052359.30071-21-armbru@redhat.com>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
2019-08-12 08:23:50 +03:00
|
|
|
#include "qemu/main-loop.h"
|
2022-12-21 16:35:49 +03:00
|
|
|
#include "qemu/ratelimit.h"
|
2012-12-17 21:20:00 +04:00
|
|
|
#include "qemu/config-file.h"
|
2015-10-16 18:38:22 +03:00
|
|
|
#include "qemu/error-report.h"
|
2022-02-26 21:07:23 +03:00
|
|
|
#include "qemu/memalign.h"
|
2019-12-12 16:14:40 +03:00
|
|
|
#include "hw/i386/x86.h"
|
2023-01-14 02:35:46 +03:00
|
|
|
#include "hw/i386/kvm/xen_evtchn.h"
|
2022-12-03 20:51:13 +03:00
|
|
|
#include "hw/i386/pc.h"
|
2013-02-05 20:06:20 +04:00
|
|
|
#include "hw/i386/apic.h"
|
2013-03-08 22:21:50 +04:00
|
|
|
#include "hw/i386/apic_internal.h"
|
|
|
|
#include "hw/i386/apic-msidef.h"
|
2016-07-14 08:56:25 +03:00
|
|
|
#include "hw/i386/intel_iommu.h"
|
2016-07-14 08:56:32 +03:00
|
|
|
#include "hw/i386/x86-iommu.h"
|
2019-08-19 01:54:01 +03:00
|
|
|
#include "hw/i386/e820_memory_layout.h"
|
2015-11-10 15:52:43 +03:00
|
|
|
|
2022-12-03 20:51:13 +03:00
|
|
|
#include "hw/xen/xen.h"
|
|
|
|
|
2012-12-12 16:24:50 +04:00
|
|
|
#include "hw/pci/pci.h"
|
2015-12-17 19:16:08 +03:00
|
|
|
#include "hw/pci/msi.h"
|
2017-05-09 09:00:44 +03:00
|
|
|
#include "hw/pci/msix.h"
|
2017-04-06 13:00:28 +03:00
|
|
|
#include "migration/blocker.h"
|
2015-04-08 14:30:58 +03:00
|
|
|
#include "exec/memattrs.h"
|
2016-07-14 08:56:25 +03:00
|
|
|
#include "trace.h"
|
2008-11-05 19:29:27 +03:00
|
|
|
|
2022-02-16 13:25:00 +03:00
|
|
|
#include CONFIG_DEVICES
|
|
|
|
|
2008-11-05 19:29:27 +03:00
|
|
|
//#define DEBUG_KVM
|
|
|
|
|
|
|
|
#ifdef DEBUG_KVM
|
2010-04-18 18:22:14 +04:00
|
|
|
#define DPRINTF(fmt, ...) \
|
2008-11-05 19:29:27 +03:00
|
|
|
do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
|
|
|
|
#else
|
2010-04-18 18:22:14 +04:00
|
|
|
#define DPRINTF(fmt, ...) \
|
2008-11-05 19:29:27 +03:00
|
|
|
do { } while (0)
|
|
|
|
#endif
|
|
|
|
|
2020-03-12 19:54:29 +03:00
|
|
|
/* From arch/x86/kvm/lapic.h */
|
|
|
|
#define KVM_APIC_BUS_CYCLE_NS 1
|
|
|
|
#define KVM_APIC_BUS_FREQUENCY (1000000000ULL / KVM_APIC_BUS_CYCLE_NS)
|
|
|
|
|
2009-10-22 16:26:56 +04:00
|
|
|
#define MSR_KVM_WALL_CLOCK 0x11
|
|
|
|
#define MSR_KVM_SYSTEM_TIME 0x12
|
|
|
|
|
2015-12-16 22:06:43 +03:00
|
|
|
/* A 4096-byte buffer can hold the 8-byte kvm_msrs header, plus
|
|
|
|
* 255 kvm_msr_entry structs */
|
|
|
|
#define MSR_BUF_SIZE 4096
|
2015-12-16 22:06:42 +03:00
|
|
|
|
2020-01-20 21:21:42 +03:00
|
|
|
static void kvm_init_msrs(X86CPU *cpu);
|
|
|
|
|
2011-01-21 23:48:17 +03:00
|
|
|
const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
|
|
|
|
KVM_CAP_INFO(SET_TSS_ADDR),
|
|
|
|
KVM_CAP_INFO(EXT_CPUID),
|
|
|
|
KVM_CAP_INFO(MP_STATE),
|
2023-10-17 14:24:33 +03:00
|
|
|
KVM_CAP_INFO(SIGNAL_MSI),
|
2023-10-17 15:15:29 +03:00
|
|
|
KVM_CAP_INFO(IRQ_ROUTING),
|
2023-10-17 14:15:11 +03:00
|
|
|
KVM_CAP_INFO(DEBUGREGS),
|
2023-10-17 14:18:15 +03:00
|
|
|
KVM_CAP_INFO(XSAVE),
|
2023-10-17 15:16:58 +03:00
|
|
|
KVM_CAP_INFO(VCPU_EVENTS),
|
|
|
|
KVM_CAP_INFO(X86_ROBUST_SINGLESTEP),
|
2023-10-17 15:30:44 +03:00
|
|
|
KVM_CAP_INFO(MCE),
|
2023-10-17 15:30:44 +03:00
|
|
|
KVM_CAP_INFO(ADJUST_CLOCK),
|
2023-10-17 15:30:44 +03:00
|
|
|
KVM_CAP_INFO(SET_IDENTITY_MAP_ADDR),
|
2011-01-21 23:48:17 +03:00
|
|
|
KVM_CAP_LAST_INFO
|
|
|
|
};
|
2010-10-21 19:35:04 +04:00
|
|
|
|
2011-01-21 23:48:13 +03:00
|
|
|
static bool has_msr_star;
|
|
|
|
static bool has_msr_hsave_pa;
|
2015-09-23 09:27:33 +03:00
|
|
|
static bool has_msr_tsc_aux;
|
2012-11-27 09:32:18 +04:00
|
|
|
static bool has_msr_tsc_adjust;
|
2011-10-05 23:52:32 +04:00
|
|
|
static bool has_msr_tsc_deadline;
|
2013-08-19 05:33:30 +04:00
|
|
|
static bool has_msr_feature_control;
|
2011-10-04 18:26:35 +04:00
|
|
|
static bool has_msr_misc_enable;
|
2015-06-18 19:28:42 +03:00
|
|
|
static bool has_msr_smbase;
|
2013-12-05 04:32:12 +04:00
|
|
|
static bool has_msr_bndcfgs;
|
2010-10-21 19:35:04 +04:00
|
|
|
static int lm_capable_kernel;
|
2014-01-23 22:16:12 +04:00
|
|
|
static bool has_msr_hv_hypercall;
|
2015-09-09 15:41:30 +03:00
|
|
|
static bool has_msr_hv_crash;
|
2015-09-16 12:59:42 +03:00
|
|
|
static bool has_msr_hv_reset;
|
2015-09-16 12:59:43 +03:00
|
|
|
static bool has_msr_hv_vpindex;
|
2018-07-02 16:41:56 +03:00
|
|
|
static bool hv_vpindex_settable;
|
2015-09-16 12:59:44 +03:00
|
|
|
static bool has_msr_hv_runtime;
|
2015-11-11 13:18:38 +03:00
|
|
|
static bool has_msr_hv_synic;
|
2015-11-25 18:21:25 +03:00
|
|
|
static bool has_msr_hv_stimer;
|
2017-08-07 11:57:03 +03:00
|
|
|
static bool has_msr_hv_frequencies;
|
2018-04-11 14:50:36 +03:00
|
|
|
static bool has_msr_hv_reenlightenment;
|
2022-02-16 13:24:59 +03:00
|
|
|
static bool has_msr_hv_syndbg_options;
|
2014-12-03 05:36:23 +03:00
|
|
|
static bool has_msr_xss;
|
2019-10-11 10:41:03 +03:00
|
|
|
static bool has_msr_umwait;
|
2018-01-09 18:45:14 +03:00
|
|
|
static bool has_msr_spec_ctrl;
|
2021-11-01 16:23:00 +03:00
|
|
|
static bool has_tsc_scale_msr;
|
2019-11-20 15:19:22 +03:00
|
|
|
static bool has_msr_tsx_ctrl;
|
2018-05-22 00:54:24 +03:00
|
|
|
static bool has_msr_virt_ssbd;
|
2018-02-27 13:22:12 +03:00
|
|
|
static bool has_msr_smi_count;
|
2018-11-26 07:17:28 +03:00
|
|
|
static bool has_msr_arch_capabs;
|
2019-06-17 18:36:54 +03:00
|
|
|
static bool has_msr_core_capabs;
|
2019-07-01 19:32:17 +03:00
|
|
|
static bool has_msr_vmx_vmfunc;
|
2020-02-11 20:55:16 +03:00
|
|
|
static bool has_msr_ucode_rev;
|
2020-03-31 19:27:52 +03:00
|
|
|
static bool has_msr_vmx_procbased_ctls2;
|
2020-05-29 10:43:47 +03:00
|
|
|
static bool has_msr_perf_capabs;
|
2021-02-05 11:33:24 +03:00
|
|
|
static bool has_msr_pkrs;
|
2009-05-03 18:04:01 +04:00
|
|
|
|
2017-12-27 17:04:26 +03:00
|
|
|
static uint32_t has_architectural_pmu_version;
|
|
|
|
static uint32_t num_architectural_pmu_gp_counters;
|
|
|
|
static uint32_t num_architectural_pmu_fixed_counters;
|
2013-07-25 19:05:22 +04:00
|
|
|
|
2022-02-17 09:04:32 +03:00
|
|
|
static int has_xsave2;
|
2015-10-15 21:30:20 +03:00
|
|
|
static int has_xcrs;
|
2021-11-01 16:22:58 +03:00
|
|
|
static int has_sregs2;
|
2019-06-19 19:21:39 +03:00
|
|
|
static int has_exception_payload;
|
2022-09-29 10:20:11 +03:00
|
|
|
static int has_triple_fault_event;
|
2015-10-15 21:30:20 +03:00
|
|
|
|
2016-06-22 09:56:21 +03:00
|
|
|
static bool has_msr_mcg_ext_ctl;
|
|
|
|
|
target-i386: kvm: cache KVM_GET_SUPPORTED_CPUID data
KVM_GET_SUPPORTED_CPUID ioctl is called frequently when initializing
CPU. Depends on CPU features and CPU count, the number of calls can be
extremely high which slows down QEMU booting significantly. In our
testing, we saw 5922 calls with switches:
-cpu SandyBridge -smp 6,sockets=6,cores=1,threads=1
This ioctl takes more than 100ms, which is almost half of the total
QEMU startup time.
While for most cases the data returned from two different invocations
are not changed, that means, we can cache the data to avoid trapping
into kernel for the second time. To make sure the cache safe one
assumption is desirable: the ioctl is stateless. This is not true for
CPUID leaves in general (such as CPUID leaf 0xD, whose value depends
on guest XCR0 and IA32_XSS) but it is true of KVM_GET_SUPPORTED_CPUID,
which runs before there is a value for XCR0 and IA32_XSS.
Signed-off-by: Chao Peng <chao.p.peng@linux.intel.com>
Message-Id: <1465784487-23482-1-git-send-email-chao.p.peng@linux.intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2016-06-13 05:21:27 +03:00
|
|
|
static struct kvm_cpuid2 *cpuid_cache;
|
2021-04-22 19:11:19 +03:00
|
|
|
static struct kvm_cpuid2 *hv_cpuid_cache;
|
2018-10-15 07:47:23 +03:00
|
|
|
static struct kvm_msr_list *kvm_feature_msrs;
|
target-i386: kvm: cache KVM_GET_SUPPORTED_CPUID data
KVM_GET_SUPPORTED_CPUID ioctl is called frequently when initializing
CPU. Depends on CPU features and CPU count, the number of calls can be
extremely high which slows down QEMU booting significantly. In our
testing, we saw 5922 calls with switches:
-cpu SandyBridge -smp 6,sockets=6,cores=1,threads=1
This ioctl takes more than 100ms, which is almost half of the total
QEMU startup time.
While for most cases the data returned from two different invocations
are not changed, that means, we can cache the data to avoid trapping
into kernel for the second time. To make sure the cache safe one
assumption is desirable: the ioctl is stateless. This is not true for
CPUID leaves in general (such as CPUID leaf 0xD, whose value depends
on guest XCR0 and IA32_XSS) but it is true of KVM_GET_SUPPORTED_CPUID,
which runs before there is a value for XCR0 and IA32_XSS.
Signed-off-by: Chao Peng <chao.p.peng@linux.intel.com>
Message-Id: <1465784487-23482-1-git-send-email-chao.p.peng@linux.intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2016-06-13 05:21:27 +03:00
|
|
|
|
2022-10-05 01:56:42 +03:00
|
|
|
static KVMMSRHandlers msr_handlers[KVM_MSR_FILTER_MAX_RANGES];
|
|
|
|
|
2021-05-21 07:38:20 +03:00
|
|
|
#define BUS_LOCK_SLICE_TIME 1000000000ULL /* ns */
|
|
|
|
static RateLimit bus_lock_ratelimit_ctrl;
|
2022-02-15 22:52:53 +03:00
|
|
|
static int kvm_get_one_msr(X86CPU *cpu, int index, uint64_t *value);
|
2021-05-21 07:38:20 +03:00
|
|
|
|
2015-06-18 19:30:52 +03:00
|
|
|
bool kvm_has_smm(void)
|
|
|
|
{
|
2021-01-26 20:36:48 +03:00
|
|
|
return kvm_vm_check_extension(kvm_state, KVM_CAP_X86_SMM);
|
2015-06-18 19:30:52 +03:00
|
|
|
}
|
|
|
|
|
2016-11-21 13:50:04 +03:00
|
|
|
bool kvm_has_adjust_clock_stable(void)
|
|
|
|
{
|
|
|
|
int ret = kvm_check_extension(kvm_state, KVM_CAP_ADJUST_CLOCK);
|
|
|
|
|
2022-09-22 13:05:23 +03:00
|
|
|
return (ret & KVM_CLOCK_TSC_STABLE);
|
2016-11-21 13:50:04 +03:00
|
|
|
}
|
|
|
|
|
target/i386: kvm: Demand nested migration kernel capabilities only when vCPU may have enabled VMX
Previous to this change, a vCPU exposed with VMX running on a kernel
without KVM_CAP_NESTED_STATE or KVM_CAP_EXCEPTION_PAYLOAD resulted in
adding a migration blocker. This was because when the code was written
it was thought there is no way to reliably know if a vCPU is utilising
VMX or not at runtime. However, it turns out that this can be known to
some extent:
In order for a vCPU to enter VMX operation it must have CR4.VMXE set.
Since it was set, CR4.VMXE must remain set as long as the vCPU is in
VMX operation. This is because CR4.VMXE is one of the bits set
in MSR_IA32_VMX_CR4_FIXED1.
There is one exception to the above statement when vCPU enters SMM mode.
When a vCPU enters SMM mode, it temporarily exits VMX operation and
may also reset CR4.VMXE during execution in SMM mode.
When the vCPU exits SMM mode, vCPU state is restored to be in VMX operation
and CR4.VMXE is restored to its original state of being set.
Therefore, when the vCPU is not in SMM mode, we can infer whether
VMX is being used by examining CR4.VMXE. Otherwise, we cannot
know for certain but assume the worse that vCPU may utilise VMX.
Summaring all the above, a vCPU may have enabled VMX in case
CR4.VMXE is set or vCPU is in SMM mode.
Therefore, remove migration blocker and check before migration
(cpu_pre_save()) if the vCPU may have enabled VMX. If true, only then
require relevant kernel capabilities.
While at it, demand KVM_CAP_EXCEPTION_PAYLOAD only when the vCPU is in
guest-mode and there is a pending/injected exception. Otherwise, this
kernel capability is not required for proper migration.
Reviewed-by: Joao Martins <joao.m.martins@oracle.com>
Signed-off-by: Liran Alon <liran.alon@oracle.com>
Reviewed-by: Maran Wilson <maran.wilson@oracle.com>
Tested-by: Maran Wilson <maran.wilson@oracle.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2019-07-06 00:06:36 +03:00
|
|
|
bool kvm_has_exception_payload(void)
|
|
|
|
{
|
|
|
|
return has_exception_payload;
|
|
|
|
}
|
|
|
|
|
intel_iommu: reject broken EIM
Cluster x2APIC cannot work without KVM's x2apic API when the maximal
APIC ID is greater than 8 and only KVM's LAPIC can support x2APIC, so we
forbid other APICs and also the old KVM case with less than 9, to
simplify the code.
There is no point in enabling EIM in forbidden APICs, so we keep it
enabled only for the KVM APIC; unconditionally, because making the
option depend on KVM version would be a maintanance burden.
Old QEMUs would enable eim whenever intremap was on, which would trick
guests into thinking that they can enable cluster x2APIC even if any
interrupt destination would get clamped to 8 bits.
Depending on your configuration, QEMU could notice that the destination
LAPIC is not present and report it with a very non-obvious:
KVM: injection failed, MSI lost (Operation not permitted)
Or the guest could say something about unexpected interrupts, because
clamping leads to aliasing so interrupts were being delivered to
incorrect VCPUs.
KVM_X2APIC_API is the feature that allows us to enable EIM for KVM.
QEMU 2.7 allowed EIM whenever interrupt remapping was enabled. In order
to keep backward compatibility, we again allow guests to misbehave in
non-obvious ways, and make it the default for old machine types.
A user can enable the buggy mode it with "x-buggy-eim=on".
Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
Reviewed-by: Eduardo Habkost <ehabkost@redhat.com>
Reviewed-by: Peter Xu <peterx@redhat.com>
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
2016-10-10 18:28:47 +03:00
|
|
|
static bool kvm_x2apic_api_set_flags(uint64_t flags)
|
|
|
|
{
|
2020-01-21 14:03:48 +03:00
|
|
|
KVMState *s = KVM_STATE(current_accel());
|
intel_iommu: reject broken EIM
Cluster x2APIC cannot work without KVM's x2apic API when the maximal
APIC ID is greater than 8 and only KVM's LAPIC can support x2APIC, so we
forbid other APICs and also the old KVM case with less than 9, to
simplify the code.
There is no point in enabling EIM in forbidden APICs, so we keep it
enabled only for the KVM APIC; unconditionally, because making the
option depend on KVM version would be a maintanance burden.
Old QEMUs would enable eim whenever intremap was on, which would trick
guests into thinking that they can enable cluster x2APIC even if any
interrupt destination would get clamped to 8 bits.
Depending on your configuration, QEMU could notice that the destination
LAPIC is not present and report it with a very non-obvious:
KVM: injection failed, MSI lost (Operation not permitted)
Or the guest could say something about unexpected interrupts, because
clamping leads to aliasing so interrupts were being delivered to
incorrect VCPUs.
KVM_X2APIC_API is the feature that allows us to enable EIM for KVM.
QEMU 2.7 allowed EIM whenever interrupt remapping was enabled. In order
to keep backward compatibility, we again allow guests to misbehave in
non-obvious ways, and make it the default for old machine types.
A user can enable the buggy mode it with "x-buggy-eim=on".
Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
Reviewed-by: Eduardo Habkost <ehabkost@redhat.com>
Reviewed-by: Peter Xu <peterx@redhat.com>
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
2016-10-10 18:28:47 +03:00
|
|
|
|
|
|
|
return !kvm_vm_enable_cap(s, KVM_CAP_X2APIC_API, 0, flags);
|
|
|
|
}
|
|
|
|
|
2016-10-19 15:05:38 +03:00
|
|
|
#define MEMORIZE(fn, _result) \
|
2016-10-10 18:28:48 +03:00
|
|
|
({ \
|
|
|
|
static bool _memorized; \
|
|
|
|
\
|
|
|
|
if (_memorized) { \
|
|
|
|
return _result; \
|
|
|
|
} \
|
|
|
|
_memorized = true; \
|
|
|
|
_result = fn; \
|
|
|
|
})
|
|
|
|
|
2016-10-19 15:05:38 +03:00
|
|
|
static bool has_x2apic_api;
|
|
|
|
|
|
|
|
bool kvm_has_x2apic_api(void)
|
|
|
|
{
|
|
|
|
return has_x2apic_api;
|
|
|
|
}
|
|
|
|
|
intel_iommu: reject broken EIM
Cluster x2APIC cannot work without KVM's x2apic API when the maximal
APIC ID is greater than 8 and only KVM's LAPIC can support x2APIC, so we
forbid other APICs and also the old KVM case with less than 9, to
simplify the code.
There is no point in enabling EIM in forbidden APICs, so we keep it
enabled only for the KVM APIC; unconditionally, because making the
option depend on KVM version would be a maintanance burden.
Old QEMUs would enable eim whenever intremap was on, which would trick
guests into thinking that they can enable cluster x2APIC even if any
interrupt destination would get clamped to 8 bits.
Depending on your configuration, QEMU could notice that the destination
LAPIC is not present and report it with a very non-obvious:
KVM: injection failed, MSI lost (Operation not permitted)
Or the guest could say something about unexpected interrupts, because
clamping leads to aliasing so interrupts were being delivered to
incorrect VCPUs.
KVM_X2APIC_API is the feature that allows us to enable EIM for KVM.
QEMU 2.7 allowed EIM whenever interrupt remapping was enabled. In order
to keep backward compatibility, we again allow guests to misbehave in
non-obvious ways, and make it the default for old machine types.
A user can enable the buggy mode it with "x-buggy-eim=on".
Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
Reviewed-by: Eduardo Habkost <ehabkost@redhat.com>
Reviewed-by: Peter Xu <peterx@redhat.com>
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
2016-10-10 18:28:47 +03:00
|
|
|
bool kvm_enable_x2apic(void)
|
|
|
|
{
|
2016-10-10 18:28:48 +03:00
|
|
|
return MEMORIZE(
|
|
|
|
kvm_x2apic_api_set_flags(KVM_X2APIC_API_USE_32BIT_IDS |
|
2016-10-19 15:05:38 +03:00
|
|
|
KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK),
|
|
|
|
has_x2apic_api);
|
intel_iommu: reject broken EIM
Cluster x2APIC cannot work without KVM's x2apic API when the maximal
APIC ID is greater than 8 and only KVM's LAPIC can support x2APIC, so we
forbid other APICs and also the old KVM case with less than 9, to
simplify the code.
There is no point in enabling EIM in forbidden APICs, so we keep it
enabled only for the KVM APIC; unconditionally, because making the
option depend on KVM version would be a maintanance burden.
Old QEMUs would enable eim whenever intremap was on, which would trick
guests into thinking that they can enable cluster x2APIC even if any
interrupt destination would get clamped to 8 bits.
Depending on your configuration, QEMU could notice that the destination
LAPIC is not present and report it with a very non-obvious:
KVM: injection failed, MSI lost (Operation not permitted)
Or the guest could say something about unexpected interrupts, because
clamping leads to aliasing so interrupts were being delivered to
incorrect VCPUs.
KVM_X2APIC_API is the feature that allows us to enable EIM for KVM.
QEMU 2.7 allowed EIM whenever interrupt remapping was enabled. In order
to keep backward compatibility, we again allow guests to misbehave in
non-obvious ways, and make it the default for old machine types.
A user can enable the buggy mode it with "x-buggy-eim=on".
Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
Reviewed-by: Eduardo Habkost <ehabkost@redhat.com>
Reviewed-by: Peter Xu <peterx@redhat.com>
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
2016-10-10 18:28:47 +03:00
|
|
|
}
|
|
|
|
|
2018-07-02 16:41:56 +03:00
|
|
|
bool kvm_hv_vpindex_settable(void)
|
|
|
|
{
|
|
|
|
return hv_vpindex_settable;
|
|
|
|
}
|
|
|
|
|
2015-11-05 06:51:03 +03:00
|
|
|
static int kvm_get_tsc(CPUState *cs)
|
|
|
|
{
|
|
|
|
X86CPU *cpu = X86_CPU(cs);
|
|
|
|
CPUX86State *env = &cpu->env;
|
2022-02-15 22:52:53 +03:00
|
|
|
uint64_t value;
|
2015-11-05 06:51:03 +03:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (env->tsc_valid) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
env->tsc_valid = !runstate_is_running();
|
|
|
|
|
2022-02-15 22:52:53 +03:00
|
|
|
ret = kvm_get_one_msr(cpu, MSR_IA32_TSC, &value);
|
2015-11-05 06:51:03 +03:00
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2022-02-15 22:52:53 +03:00
|
|
|
env->tsc = value;
|
2015-11-05 06:51:03 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-10-31 12:36:08 +03:00
|
|
|
static inline void do_kvm_synchronize_tsc(CPUState *cpu, run_on_cpu_data arg)
|
2015-11-05 06:51:03 +03:00
|
|
|
{
|
|
|
|
kvm_get_tsc(cpu);
|
|
|
|
}
|
|
|
|
|
|
|
|
void kvm_synchronize_all_tsc(void)
|
|
|
|
{
|
|
|
|
CPUState *cpu;
|
|
|
|
|
|
|
|
if (kvm_enabled()) {
|
|
|
|
CPU_FOREACH(cpu) {
|
2016-10-31 12:36:08 +03:00
|
|
|
run_on_cpu(cpu, do_kvm_synchronize_tsc, RUN_ON_CPU_NULL);
|
2015-11-05 06:51:03 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-05-03 18:04:01 +04:00
|
|
|
static struct kvm_cpuid2 *try_get_cpuid(KVMState *s, int max)
|
|
|
|
{
|
|
|
|
struct kvm_cpuid2 *cpuid;
|
|
|
|
int r, size;
|
|
|
|
|
|
|
|
size = sizeof(*cpuid) + max * sizeof(*cpuid->entries);
|
2014-12-04 16:46:46 +03:00
|
|
|
cpuid = g_malloc0(size);
|
2009-05-03 18:04:01 +04:00
|
|
|
cpuid->nent = max;
|
|
|
|
r = kvm_ioctl(s, KVM_GET_SUPPORTED_CPUID, cpuid);
|
2009-05-19 21:55:21 +04:00
|
|
|
if (r == 0 && cpuid->nent >= max) {
|
|
|
|
r = -E2BIG;
|
|
|
|
}
|
2009-05-03 18:04:01 +04:00
|
|
|
if (r < 0) {
|
|
|
|
if (r == -E2BIG) {
|
2011-08-21 07:09:37 +04:00
|
|
|
g_free(cpuid);
|
2009-05-03 18:04:01 +04:00
|
|
|
return NULL;
|
|
|
|
} else {
|
|
|
|
fprintf(stderr, "KVM_GET_SUPPORTED_CPUID failed: %s\n",
|
|
|
|
strerror(-r));
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return cpuid;
|
|
|
|
}
|
|
|
|
|
2012-10-05 00:48:58 +04:00
|
|
|
/* Run KVM_GET_SUPPORTED_CPUID ioctl(), allocating a buffer large enough
|
|
|
|
* for all entries.
|
|
|
|
*/
|
|
|
|
static struct kvm_cpuid2 *get_supported_cpuid(KVMState *s)
|
|
|
|
{
|
|
|
|
struct kvm_cpuid2 *cpuid;
|
|
|
|
int max = 1;
|
target-i386: kvm: cache KVM_GET_SUPPORTED_CPUID data
KVM_GET_SUPPORTED_CPUID ioctl is called frequently when initializing
CPU. Depends on CPU features and CPU count, the number of calls can be
extremely high which slows down QEMU booting significantly. In our
testing, we saw 5922 calls with switches:
-cpu SandyBridge -smp 6,sockets=6,cores=1,threads=1
This ioctl takes more than 100ms, which is almost half of the total
QEMU startup time.
While for most cases the data returned from two different invocations
are not changed, that means, we can cache the data to avoid trapping
into kernel for the second time. To make sure the cache safe one
assumption is desirable: the ioctl is stateless. This is not true for
CPUID leaves in general (such as CPUID leaf 0xD, whose value depends
on guest XCR0 and IA32_XSS) but it is true of KVM_GET_SUPPORTED_CPUID,
which runs before there is a value for XCR0 and IA32_XSS.
Signed-off-by: Chao Peng <chao.p.peng@linux.intel.com>
Message-Id: <1465784487-23482-1-git-send-email-chao.p.peng@linux.intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2016-06-13 05:21:27 +03:00
|
|
|
|
|
|
|
if (cpuid_cache != NULL) {
|
|
|
|
return cpuid_cache;
|
|
|
|
}
|
2012-10-05 00:48:58 +04:00
|
|
|
while ((cpuid = try_get_cpuid(s, max)) == NULL) {
|
|
|
|
max *= 2;
|
|
|
|
}
|
target-i386: kvm: cache KVM_GET_SUPPORTED_CPUID data
KVM_GET_SUPPORTED_CPUID ioctl is called frequently when initializing
CPU. Depends on CPU features and CPU count, the number of calls can be
extremely high which slows down QEMU booting significantly. In our
testing, we saw 5922 calls with switches:
-cpu SandyBridge -smp 6,sockets=6,cores=1,threads=1
This ioctl takes more than 100ms, which is almost half of the total
QEMU startup time.
While for most cases the data returned from two different invocations
are not changed, that means, we can cache the data to avoid trapping
into kernel for the second time. To make sure the cache safe one
assumption is desirable: the ioctl is stateless. This is not true for
CPUID leaves in general (such as CPUID leaf 0xD, whose value depends
on guest XCR0 and IA32_XSS) but it is true of KVM_GET_SUPPORTED_CPUID,
which runs before there is a value for XCR0 and IA32_XSS.
Signed-off-by: Chao Peng <chao.p.peng@linux.intel.com>
Message-Id: <1465784487-23482-1-git-send-email-chao.p.peng@linux.intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2016-06-13 05:21:27 +03:00
|
|
|
cpuid_cache = cpuid;
|
2012-10-05 00:48:58 +04:00
|
|
|
return cpuid;
|
|
|
|
}
|
|
|
|
|
2020-09-10 10:01:31 +03:00
|
|
|
static bool host_tsx_broken(void)
|
2017-03-09 21:12:11 +03:00
|
|
|
{
|
|
|
|
int family, model, stepping;\
|
|
|
|
char vendor[CPUID_VENDOR_SZ + 1];
|
|
|
|
|
2021-03-22 16:27:40 +03:00
|
|
|
host_cpu_vendor_fms(vendor, &family, &model, &stepping);
|
2017-03-09 21:12:11 +03:00
|
|
|
|
|
|
|
/* Check if we are running on a Haswell host known to have broken TSX */
|
|
|
|
return !strcmp(vendor, CPUID_VENDOR_INTEL) &&
|
|
|
|
(family == 6) &&
|
|
|
|
((model == 63 && stepping < 4) ||
|
|
|
|
model == 60 || model == 69 || model == 70);
|
|
|
|
}
|
2011-03-18 01:42:05 +03:00
|
|
|
|
2012-10-05 00:48:56 +04:00
|
|
|
/* Returns the value for a specific register on the cpuid entry
|
|
|
|
*/
|
|
|
|
static uint32_t cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry, int reg)
|
|
|
|
{
|
|
|
|
uint32_t ret = 0;
|
|
|
|
switch (reg) {
|
|
|
|
case R_EAX:
|
|
|
|
ret = entry->eax;
|
|
|
|
break;
|
|
|
|
case R_EBX:
|
|
|
|
ret = entry->ebx;
|
|
|
|
break;
|
|
|
|
case R_ECX:
|
|
|
|
ret = entry->ecx;
|
|
|
|
break;
|
|
|
|
case R_EDX:
|
|
|
|
ret = entry->edx;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2012-10-05 00:48:57 +04:00
|
|
|
/* Find matching entry for function/index on kvm_cpuid2 struct
|
|
|
|
*/
|
|
|
|
static struct kvm_cpuid_entry2 *cpuid_find_entry(struct kvm_cpuid2 *cpuid,
|
|
|
|
uint32_t function,
|
|
|
|
uint32_t index)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
for (i = 0; i < cpuid->nent; ++i) {
|
|
|
|
if (cpuid->entries[i].function == function &&
|
|
|
|
cpuid->entries[i].index == index) {
|
|
|
|
return &cpuid->entries[i];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/* not found: */
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2011-06-08 18:11:05 +04:00
|
|
|
uint32_t kvm_arch_get_supported_cpuid(KVMState *s, uint32_t function,
|
2010-06-17 11:18:13 +04:00
|
|
|
uint32_t index, int reg)
|
2009-05-03 18:04:01 +04:00
|
|
|
{
|
|
|
|
struct kvm_cpuid2 *cpuid;
|
|
|
|
uint32_t ret = 0;
|
2023-02-27 12:41:46 +03:00
|
|
|
uint32_t cpuid_1_edx, unused;
|
2022-02-17 09:04:29 +03:00
|
|
|
uint64_t bitmask;
|
2009-05-03 18:04:01 +04:00
|
|
|
|
2012-10-05 00:48:58 +04:00
|
|
|
cpuid = get_supported_cpuid(s);
|
2009-05-03 18:04:01 +04:00
|
|
|
|
2012-10-05 00:48:57 +04:00
|
|
|
struct kvm_cpuid_entry2 *entry = cpuid_find_entry(cpuid, function, index);
|
|
|
|
if (entry) {
|
|
|
|
ret = cpuid_entry_get_reg(entry, reg);
|
2009-05-03 18:04:01 +04:00
|
|
|
}
|
|
|
|
|
2012-10-05 00:48:53 +04:00
|
|
|
/* Fixups for the data returned by KVM, below */
|
|
|
|
|
2012-10-05 00:48:59 +04:00
|
|
|
if (function == 1 && reg == R_EDX) {
|
|
|
|
/* KVM before 2.6.30 misreports the following features */
|
|
|
|
ret |= CPUID_MTRR | CPUID_PAT | CPUID_MCE | CPUID_MCA;
|
2023-10-10 09:05:39 +03:00
|
|
|
/* KVM never reports CPUID_HT but QEMU can support when vcpus > 1 */
|
|
|
|
ret |= CPUID_HT;
|
i386: kvm: set CPUID_EXT_HYPERVISOR on kvm_arch_get_supported_cpuid()
Full grep for kvm_arch_get_supported_cpuid:
kvm.h:uint32_t kvm_arch_get_supported_cpuid(KVMState *env, uint32_t function,
target-i386/cpu.c: x86_cpu_def->cpuid_7_0_ebx_features = kvm_arch_get_supported_cpuid(kvm_state, 0x7, 0, R_EBX);
target-i386/cpu.c: *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
target-i386/cpu.c: *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
target-i386/cpu.c: *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
target-i386/cpu.c: *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
target-i386/cpu.c: *eax = kvm_arch_get_supported_cpuid(s, 0xd, count, R_EAX);
target-i386/cpu.c: *ebx = kvm_arch_get_supported_cpuid(s, 0xd, count, R_EBX);
target-i386/cpu.c: *ecx = kvm_arch_get_supported_cpuid(s, 0xd, count, R_ECX);
target-i386/cpu.c: *edx = kvm_arch_get_supported_cpuid(s, 0xd, count, R_EDX);
target-i386/kvm.c:uint32_t kvm_arch_get_supported_cpuid(KVMState *s, uint32_t function,
target-i386/kvm.c: cpuid_1_edx = kvm_arch_get_supported_cpuid(s, 1, 0, R_EDX);
target-i386/kvm.c: env->cpuid_features &= kvm_arch_get_supported_cpuid(s, 1, 0, R_EDX);
* target-i386/kvm.c: env->cpuid_ext_features &= kvm_arch_get_supported_cpuid(s, 1, 0, R_ECX);
target-i386/kvm.c: env->cpuid_ext2_features &= kvm_arch_get_supported_cpuid(s, 0x80000001,
target-i386/kvm.c: env->cpuid_ext3_features &= kvm_arch_get_supported_cpuid(s, 0x80000001,
target-i386/kvm.c: env->cpuid_svm_features &= kvm_arch_get_supported_cpuid(s, 0x8000000A,
target-i386/kvm.c: kvm_arch_get_supported_cpuid(s, KVM_CPUID_FEATURES, 0, R_EAX);
target-i386/kvm.c: kvm_arch_get_supported_cpuid(s, 0xC0000001, 0, R_EDX);
Note that there is only one call for CPUID[1].ECX above (*), and it is
the one that gets hacked to include CPUID_EXT_HYPERVISOR, so we can
simply make kvm_arch_get_supported_cpuid() set it, to let the rest of
the code automatically know that the flag can be safely set by QEMU.
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
2012-10-05 00:49:00 +04:00
|
|
|
} else if (function == 1 && reg == R_ECX) {
|
|
|
|
/* We can set the hypervisor flag, even if KVM does not return it on
|
|
|
|
* GET_SUPPORTED_CPUID
|
|
|
|
*/
|
|
|
|
ret |= CPUID_EXT_HYPERVISOR;
|
i386: kvm: set CPUID_EXT_TSC_DEADLINE_TIMER on kvm_arch_get_supported_cpuid()
This moves the CPUID_EXT_TSC_DEADLINE_TIMER CPUID flag hacking from
kvm_arch_init_vcpu() to kvm_arch_get_supported_cpuid().
Full git grep for kvm_arch_get_supported_cpuid:
kvm.h:uint32_t kvm_arch_get_supported_cpuid(KVMState *env, uint32_t function,
target-i386/cpu.c: x86_cpu_def->cpuid_7_0_ebx_features = kvm_arch_get_supported_cpuid(kvm_state, 0x7, 0, R_EBX);
target-i386/cpu.c: *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
target-i386/cpu.c: *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
target-i386/cpu.c: *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
target-i386/cpu.c: *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
target-i386/cpu.c: *eax = kvm_arch_get_supported_cpuid(s, 0xd, count, R_EAX);
target-i386/cpu.c: *ebx = kvm_arch_get_supported_cpuid(s, 0xd, count, R_EBX);
target-i386/cpu.c: *ecx = kvm_arch_get_supported_cpuid(s, 0xd, count, R_ECX);
target-i386/cpu.c: *edx = kvm_arch_get_supported_cpuid(s, 0xd, count, R_EDX);
target-i386/kvm.c:uint32_t kvm_arch_get_supported_cpuid(KVMState *s, uint32_t function,
target-i386/kvm.c: cpuid_1_edx = kvm_arch_get_supported_cpuid(s, 1, 0, R_EDX);
target-i386/kvm.c: env->cpuid_features &= kvm_arch_get_supported_cpuid(s, 1, 0, R_EDX);
* target-i386/kvm.c: env->cpuid_ext_features &= kvm_arch_get_supported_cpuid(s, 1, 0, R_ECX);
target-i386/kvm.c: env->cpuid_ext2_features &= kvm_arch_get_supported_cpuid(s, 0x80000001,
target-i386/kvm.c: env->cpuid_ext3_features &= kvm_arch_get_supported_cpuid(s, 0x80000001,
target-i386/kvm.c: env->cpuid_svm_features &= kvm_arch_get_supported_cpuid(s, 0x8000000A,
target-i386/kvm.c: kvm_arch_get_supported_cpuid(s, KVM_CPUID_FEATURES, 0, R_EAX);
target-i386/kvm.c: kvm_arch_get_supported_cpuid(s, 0xC0000001, 0, R_EDX);
Note that there is only one call for CPUID[1].ECX above (*), and it is
the one that gets hacked to include CPUID_EXT_TSC_DEADLINE_TIMER, so we
can simply make kvm_arch_get_supported_cpuid() set it, to let the rest
of the code know the flag can be safely set by QEMU.
One thing I was worrying about when doing this is that now
kvm_arch_get_supported_cpuid() depends on kvm_irqchip_in_kernel(). But
the 'kvm_kernel_irqchip' global variable is initialized during
kvm_init(), that is called very early, and kvm_init() is already a
requirement to run the GET_SUPPORTED_CPUID ioctl() (as kvm_init() is the
function that initializes the 'kvm_state' global variable).
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
2012-10-05 00:49:01 +04:00
|
|
|
/* tsc-deadline flag is not returned by GET_SUPPORTED_CPUID, but it
|
|
|
|
* can be enabled if the kernel has KVM_CAP_TSC_DEADLINE_TIMER,
|
|
|
|
* and the irqchip is in the kernel.
|
|
|
|
*/
|
|
|
|
if (kvm_irqchip_in_kernel() &&
|
|
|
|
kvm_check_extension(s, KVM_CAP_TSC_DEADLINE_TIMER)) {
|
|
|
|
ret |= CPUID_EXT_TSC_DEADLINE_TIMER;
|
|
|
|
}
|
2012-10-05 00:49:02 +04:00
|
|
|
|
|
|
|
/* x2apic is reported by GET_SUPPORTED_CPUID, but it can't be enabled
|
|
|
|
* without the in-kernel irqchip
|
|
|
|
*/
|
|
|
|
if (!kvm_irqchip_in_kernel()) {
|
|
|
|
ret &= ~CPUID_EXT_X2APIC;
|
2009-05-03 18:04:01 +04:00
|
|
|
}
|
2018-06-22 22:22:05 +03:00
|
|
|
|
|
|
|
if (enable_cpu_pm) {
|
|
|
|
int disable_exits = kvm_check_extension(s,
|
|
|
|
KVM_CAP_X86_DISABLE_EXITS);
|
|
|
|
|
|
|
|
if (disable_exits & KVM_X86_DISABLE_EXITS_MWAIT) {
|
|
|
|
ret |= CPUID_EXT_MONITOR;
|
|
|
|
}
|
|
|
|
}
|
2015-06-07 12:15:08 +03:00
|
|
|
} else if (function == 6 && reg == R_EAX) {
|
|
|
|
ret |= CPUID_6_EAX_ARAT; /* safe to allow because of emulated APIC */
|
2017-03-09 21:12:11 +03:00
|
|
|
} else if (function == 7 && index == 0 && reg == R_EBX) {
|
2023-02-27 12:41:46 +03:00
|
|
|
/* Not new instructions, just an optimization. */
|
|
|
|
uint32_t ebx;
|
|
|
|
host_cpuid(7, 0, &unused, &ebx, &unused, &unused);
|
|
|
|
ret |= ebx & CPUID_7_0_EBX_ERMS;
|
|
|
|
|
2020-09-10 10:01:31 +03:00
|
|
|
if (host_tsx_broken()) {
|
2017-03-09 21:12:11 +03:00
|
|
|
ret &= ~(CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_HLE);
|
|
|
|
}
|
2019-01-26 01:06:05 +03:00
|
|
|
} else if (function == 7 && index == 0 && reg == R_EDX) {
|
2023-02-27 12:41:46 +03:00
|
|
|
/* Not new instructions, just an optimization. */
|
|
|
|
uint32_t edx;
|
|
|
|
host_cpuid(7, 0, &unused, &unused, &unused, &edx);
|
|
|
|
ret |= edx & CPUID_7_0_EDX_FSRM;
|
|
|
|
|
2019-01-26 01:06:05 +03:00
|
|
|
/*
|
|
|
|
* Linux v4.17-v4.20 incorrectly return ARCH_CAPABILITIES on SVM hosts.
|
|
|
|
* We can detect the bug by checking if MSR_IA32_ARCH_CAPABILITIES is
|
|
|
|
* returned by KVM_GET_MSR_INDEX_LIST.
|
|
|
|
*/
|
|
|
|
if (!has_msr_arch_capabs) {
|
|
|
|
ret &= ~CPUID_7_0_EDX_ARCH_CAPABILITIES;
|
|
|
|
}
|
2023-02-27 12:41:46 +03:00
|
|
|
} else if (function == 7 && index == 1 && reg == R_EAX) {
|
|
|
|
/* Not new instructions, just an optimization. */
|
|
|
|
uint32_t eax;
|
|
|
|
host_cpuid(7, 1, &eax, &unused, &unused, &unused);
|
|
|
|
ret |= eax & (CPUID_7_1_EAX_FZRM | CPUID_7_1_EAX_FSRS | CPUID_7_1_EAX_FSRC);
|
2023-07-06 08:49:46 +03:00
|
|
|
} else if (function == 7 && index == 2 && reg == R_EDX) {
|
|
|
|
uint32_t edx;
|
|
|
|
host_cpuid(7, 2, &unused, &unused, &unused, &edx);
|
|
|
|
ret |= edx & CPUID_7_2_EDX_MCDT_NO;
|
2022-02-17 09:04:29 +03:00
|
|
|
} else if (function == 0xd && index == 0 &&
|
|
|
|
(reg == R_EAX || reg == R_EDX)) {
|
2022-03-18 18:23:47 +03:00
|
|
|
/*
|
|
|
|
* The value returned by KVM_GET_SUPPORTED_CPUID does not include
|
|
|
|
* features that still have to be enabled with the arch_prctl
|
|
|
|
* system call. QEMU needs the full value, which is retrieved
|
|
|
|
* with KVM_GET_DEVICE_ATTR.
|
|
|
|
*/
|
2022-02-17 09:04:29 +03:00
|
|
|
struct kvm_device_attr attr = {
|
|
|
|
.group = 0,
|
|
|
|
.attr = KVM_X86_XCOMP_GUEST_SUPP,
|
|
|
|
.addr = (unsigned long) &bitmask
|
|
|
|
};
|
|
|
|
|
|
|
|
bool sys_attr = kvm_check_extension(s, KVM_CAP_SYS_ATTRIBUTES);
|
|
|
|
if (!sys_attr) {
|
2022-03-18 18:23:47 +03:00
|
|
|
return ret;
|
2022-02-17 09:04:29 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
int rc = kvm_ioctl(s, KVM_GET_DEVICE_ATTR, &attr);
|
2022-03-18 18:23:47 +03:00
|
|
|
if (rc < 0) {
|
|
|
|
if (rc != -ENXIO) {
|
|
|
|
warn_report("KVM_GET_DEVICE_ATTR(0, KVM_X86_XCOMP_GUEST_SUPP) "
|
|
|
|
"error: %d", rc);
|
|
|
|
}
|
|
|
|
return ret;
|
2022-02-17 09:04:29 +03:00
|
|
|
}
|
|
|
|
ret = (reg == R_EAX) ? bitmask : bitmask >> 32;
|
2018-06-14 04:18:22 +03:00
|
|
|
} else if (function == 0x80000001 && reg == R_ECX) {
|
|
|
|
/*
|
|
|
|
* It's safe to enable TOPOEXT even if it's not returned by
|
|
|
|
* GET_SUPPORTED_CPUID. Unconditionally enabling TOPOEXT here allows
|
|
|
|
* us to keep CPU models including TOPOEXT runnable on older kernels.
|
|
|
|
*/
|
|
|
|
ret |= CPUID_EXT3_TOPOEXT;
|
2012-10-05 00:48:59 +04:00
|
|
|
} else if (function == 0x80000001 && reg == R_EDX) {
|
|
|
|
/* On Intel, kvm returns cpuid according to the Intel spec,
|
|
|
|
* so add missing bits according to the AMD spec:
|
|
|
|
*/
|
|
|
|
cpuid_1_edx = kvm_arch_get_supported_cpuid(s, 1, 0, R_EDX);
|
|
|
|
ret |= cpuid_1_edx & CPUID_EXT2_AMD_ALIASES;
|
2016-08-12 21:14:32 +03:00
|
|
|
} else if (function == KVM_CPUID_FEATURES && reg == R_EAX) {
|
|
|
|
/* kvm_pv_unhalt is reported by GET_SUPPORTED_CPUID, but it can't
|
|
|
|
* be enabled without the in-kernel irqchip
|
|
|
|
*/
|
|
|
|
if (!kvm_irqchip_in_kernel()) {
|
|
|
|
ret &= ~(1U << KVM_FEATURE_PV_UNHALT);
|
|
|
|
}
|
2020-10-05 17:18:19 +03:00
|
|
|
if (kvm_irqchip_is_split()) {
|
|
|
|
ret |= 1U << KVM_FEATURE_MSI_EXT_DEST_ID;
|
|
|
|
}
|
2018-02-09 17:15:25 +03:00
|
|
|
} else if (function == KVM_CPUID_FEATURES && reg == R_EDX) {
|
2018-05-25 16:27:53 +03:00
|
|
|
ret |= 1U << KVM_HINTS_REALTIME;
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2011-03-18 01:42:05 +03:00
|
|
|
|
|
|
|
return ret;
|
2010-01-13 16:25:06 +03:00
|
|
|
}
|
|
|
|
|
2019-07-01 18:38:54 +03:00
|
|
|
uint64_t kvm_arch_get_supported_msr_feature(KVMState *s, uint32_t index)
|
2018-10-15 07:47:23 +03:00
|
|
|
{
|
|
|
|
struct {
|
|
|
|
struct kvm_msrs info;
|
|
|
|
struct kvm_msr_entry entries[1];
|
2019-09-24 10:47:38 +03:00
|
|
|
} msr_data = {};
|
2019-07-01 19:32:17 +03:00
|
|
|
uint64_t value;
|
|
|
|
uint32_t ret, can_be_one, must_be_one;
|
2018-10-15 07:47:23 +03:00
|
|
|
|
|
|
|
if (kvm_feature_msrs == NULL) { /* Host doesn't support feature MSRs */
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check if requested MSR is supported feature MSR */
|
|
|
|
int i;
|
|
|
|
for (i = 0; i < kvm_feature_msrs->nmsrs; i++)
|
|
|
|
if (kvm_feature_msrs->indices[i] == index) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (i == kvm_feature_msrs->nmsrs) {
|
|
|
|
return 0; /* if the feature MSR is not supported, simply return 0 */
|
|
|
|
}
|
|
|
|
|
|
|
|
msr_data.info.nmsrs = 1;
|
|
|
|
msr_data.entries[0].index = index;
|
|
|
|
|
|
|
|
ret = kvm_ioctl(s, KVM_GET_MSRS, &msr_data);
|
|
|
|
if (ret != 1) {
|
|
|
|
error_report("KVM get MSR (index=0x%x) feature failed, %s",
|
|
|
|
index, strerror(-ret));
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
|
2019-07-01 19:32:17 +03:00
|
|
|
value = msr_data.entries[0].data;
|
|
|
|
switch (index) {
|
|
|
|
case MSR_IA32_VMX_PROCBASED_CTLS2:
|
2020-03-31 19:27:52 +03:00
|
|
|
if (!has_msr_vmx_procbased_ctls2) {
|
|
|
|
/* KVM forgot to add these bits for some time, do this ourselves. */
|
|
|
|
if (kvm_arch_get_supported_cpuid(s, 0xD, 1, R_ECX) &
|
|
|
|
CPUID_XSAVE_XSAVES) {
|
|
|
|
value |= (uint64_t)VMX_SECONDARY_EXEC_XSAVES << 32;
|
|
|
|
}
|
|
|
|
if (kvm_arch_get_supported_cpuid(s, 1, 0, R_ECX) &
|
|
|
|
CPUID_EXT_RDRAND) {
|
|
|
|
value |= (uint64_t)VMX_SECONDARY_EXEC_RDRAND_EXITING << 32;
|
|
|
|
}
|
|
|
|
if (kvm_arch_get_supported_cpuid(s, 7, 0, R_EBX) &
|
|
|
|
CPUID_7_0_EBX_INVPCID) {
|
|
|
|
value |= (uint64_t)VMX_SECONDARY_EXEC_ENABLE_INVPCID << 32;
|
|
|
|
}
|
|
|
|
if (kvm_arch_get_supported_cpuid(s, 7, 0, R_EBX) &
|
|
|
|
CPUID_7_0_EBX_RDSEED) {
|
|
|
|
value |= (uint64_t)VMX_SECONDARY_EXEC_RDSEED_EXITING << 32;
|
|
|
|
}
|
|
|
|
if (kvm_arch_get_supported_cpuid(s, 0x80000001, 0, R_EDX) &
|
|
|
|
CPUID_EXT2_RDTSCP) {
|
|
|
|
value |= (uint64_t)VMX_SECONDARY_EXEC_RDTSCP << 32;
|
|
|
|
}
|
2019-07-02 15:58:48 +03:00
|
|
|
}
|
|
|
|
/* fall through */
|
2019-07-01 19:32:17 +03:00
|
|
|
case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
|
|
|
|
case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
|
|
|
|
case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
|
|
|
|
case MSR_IA32_VMX_TRUE_EXIT_CTLS:
|
|
|
|
/*
|
|
|
|
* Return true for bits that can be one, but do not have to be one.
|
|
|
|
* The SDM tells us which bits could have a "must be one" setting,
|
|
|
|
* so we can do the opposite transformation in make_vmx_msr_value.
|
|
|
|
*/
|
|
|
|
must_be_one = (uint32_t)value;
|
|
|
|
can_be_one = (uint32_t)(value >> 32);
|
|
|
|
return can_be_one & ~must_be_one;
|
|
|
|
|
|
|
|
default:
|
|
|
|
return value;
|
|
|
|
}
|
2018-10-15 07:47:23 +03:00
|
|
|
}
|
|
|
|
|
2010-10-11 22:31:18 +04:00
|
|
|
static int kvm_get_mce_cap_supported(KVMState *s, uint64_t *mce_cap,
|
|
|
|
int *max_banks)
|
|
|
|
{
|
2023-10-17 15:30:44 +03:00
|
|
|
*max_banks = kvm_check_extension(s, KVM_CAP_MCE);
|
|
|
|
return kvm_ioctl(s, KVM_X86_GET_MCE_CAP_SUPPORTED, mce_cap);
|
2010-10-11 22:31:18 +04:00
|
|
|
}
|
|
|
|
|
2012-05-03 17:13:58 +04:00
|
|
|
static void kvm_mce_inject(X86CPU *cpu, hwaddr paddr, int code)
|
2010-10-11 22:31:18 +04:00
|
|
|
{
|
2016-06-22 09:56:21 +03:00
|
|
|
CPUState *cs = CPU(cpu);
|
2012-05-03 17:13:58 +04:00
|
|
|
CPUX86State *env = &cpu->env;
|
2011-03-02 10:56:16 +03:00
|
|
|
uint64_t status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN |
|
|
|
|
MCI_STATUS_MISCV | MCI_STATUS_ADDRV | MCI_STATUS_S;
|
|
|
|
uint64_t mcg_status = MCG_STATUS_MCIP;
|
2016-06-22 09:56:21 +03:00
|
|
|
int flags = 0;
|
2010-10-11 22:31:18 +04:00
|
|
|
|
2011-03-02 10:56:16 +03:00
|
|
|
if (code == BUS_MCEERR_AR) {
|
|
|
|
status |= MCI_STATUS_AR | 0x134;
|
2022-01-20 11:46:34 +03:00
|
|
|
mcg_status |= MCG_STATUS_RIPV | MCG_STATUS_EIPV;
|
2011-03-02 10:56:16 +03:00
|
|
|
} else {
|
|
|
|
status |= 0xc0;
|
|
|
|
mcg_status |= MCG_STATUS_RIPV;
|
2011-03-02 10:56:12 +03:00
|
|
|
}
|
2016-06-22 09:56:21 +03:00
|
|
|
|
|
|
|
flags = cpu_x86_support_mca_broadcast(env) ? MCE_INJECT_BROADCAST : 0;
|
|
|
|
/* We need to read back the value of MSR_EXT_MCG_CTL that was set by the
|
|
|
|
* guest kernel back into env->mcg_ext_ctl.
|
|
|
|
*/
|
|
|
|
cpu_synchronize_state(cs);
|
|
|
|
if (env->mcg_ext_ctl & MCG_EXT_CTL_LMCE_EN) {
|
|
|
|
mcg_status |= MCG_STATUS_LMCE;
|
|
|
|
flags = 0;
|
|
|
|
}
|
|
|
|
|
2012-05-03 17:22:54 +04:00
|
|
|
cpu_x86_inject_mce(NULL, cpu, 9, status, mcg_status, paddr,
|
2016-06-22 09:56:21 +03:00
|
|
|
(MCM_ADDR_PHYS << 6) | 0xc, flags);
|
2011-03-02 10:56:12 +03:00
|
|
|
}
|
|
|
|
|
2020-09-30 13:04:40 +03:00
|
|
|
static void emit_hypervisor_memory_failure(MemoryFailureAction action, bool ar)
|
|
|
|
{
|
|
|
|
MemoryFailureFlags mff = {.action_required = ar, .recursive = false};
|
|
|
|
|
|
|
|
qapi_event_send_memory_failure(MEMORY_FAILURE_RECIPIENT_HYPERVISOR, action,
|
|
|
|
&mff);
|
|
|
|
}
|
|
|
|
|
2019-10-09 19:44:59 +03:00
|
|
|
static void hardware_memory_error(void *host_addr)
|
2011-03-02 10:56:12 +03:00
|
|
|
{
|
2020-09-30 13:04:40 +03:00
|
|
|
emit_hypervisor_memory_failure(MEMORY_FAILURE_ACTION_FATAL, true);
|
2019-10-09 19:44:59 +03:00
|
|
|
error_report("QEMU got Hardware memory error at addr %p", host_addr);
|
2011-03-02 10:56:12 +03:00
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
|
2017-02-08 14:48:54 +03:00
|
|
|
void kvm_arch_on_sigbus_vcpu(CPUState *c, int code, void *addr)
|
2011-03-02 10:56:12 +03:00
|
|
|
{
|
2012-10-31 09:57:49 +04:00
|
|
|
X86CPU *cpu = X86_CPU(c);
|
|
|
|
CPUX86State *env = &cpu->env;
|
2011-03-02 10:56:12 +03:00
|
|
|
ram_addr_t ram_addr;
|
2012-10-23 14:30:10 +04:00
|
|
|
hwaddr paddr;
|
2011-03-02 10:56:12 +03:00
|
|
|
|
2017-02-09 12:04:34 +03:00
|
|
|
/* If we get an action required MCE, it has been injected by KVM
|
|
|
|
* while the VM was running. An action optional MCE instead should
|
|
|
|
* be coming from the main thread, which qemu_init_sigbus identifies
|
|
|
|
* as the "early kill" thread.
|
|
|
|
*/
|
2017-02-09 11:50:02 +03:00
|
|
|
assert(code == BUS_MCEERR_AR || code == BUS_MCEERR_AO);
|
2017-02-08 16:25:49 +03:00
|
|
|
|
|
|
|
if ((env->mcg_cap & MCG_SER_P) && addr) {
|
2016-03-25 14:55:08 +03:00
|
|
|
ram_addr = qemu_ram_addr_from_host(addr);
|
2017-02-08 16:25:49 +03:00
|
|
|
if (ram_addr != RAM_ADDR_INVALID &&
|
|
|
|
kvm_physical_memory_addr_from_host(c->kvm_state, addr, &paddr)) {
|
|
|
|
kvm_hwpoison_page_add(ram_addr);
|
|
|
|
kvm_mce_inject(cpu, paddr, code);
|
2019-10-09 19:44:59 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Use different logging severity based on error type.
|
|
|
|
* If there is additional MCE reporting on the hypervisor, QEMU VA
|
|
|
|
* could be another source to identify the PA and MCE details.
|
|
|
|
*/
|
|
|
|
if (code == BUS_MCEERR_AR) {
|
|
|
|
error_report("Guest MCE Memory Error at QEMU addr %p and "
|
|
|
|
"GUEST addr 0x%" HWADDR_PRIx " of type %s injected",
|
|
|
|
addr, paddr, "BUS_MCEERR_AR");
|
|
|
|
} else {
|
|
|
|
warn_report("Guest MCE Memory Error at QEMU addr %p and "
|
|
|
|
"GUEST addr 0x%" HWADDR_PRIx " of type %s injected",
|
|
|
|
addr, paddr, "BUS_MCEERR_AO");
|
|
|
|
}
|
|
|
|
|
2017-02-08 14:48:54 +03:00
|
|
|
return;
|
2011-03-02 10:56:12 +03:00
|
|
|
}
|
2017-02-08 16:25:49 +03:00
|
|
|
|
2019-10-09 19:44:59 +03:00
|
|
|
if (code == BUS_MCEERR_AO) {
|
|
|
|
warn_report("Hardware memory error at addr %p of type %s "
|
|
|
|
"for memory used by QEMU itself instead of guest system!",
|
|
|
|
addr, "BUS_MCEERR_AO");
|
|
|
|
}
|
2011-03-02 10:56:12 +03:00
|
|
|
}
|
2017-02-08 16:25:49 +03:00
|
|
|
|
|
|
|
if (code == BUS_MCEERR_AR) {
|
2019-10-09 19:44:59 +03:00
|
|
|
hardware_memory_error(addr);
|
2017-02-08 16:25:49 +03:00
|
|
|
}
|
|
|
|
|
2020-09-30 13:04:40 +03:00
|
|
|
/* Hope we are lucky for AO MCE, just notify a event */
|
|
|
|
emit_hypervisor_memory_failure(MEMORY_FAILURE_ACTION_IGNORE, false);
|
2011-03-02 10:56:12 +03:00
|
|
|
}
|
|
|
|
|
2019-06-19 19:21:39 +03:00
|
|
|
static void kvm_queue_exception(CPUX86State *env,
|
|
|
|
int32_t exception_nr,
|
|
|
|
uint8_t exception_has_payload,
|
|
|
|
uint64_t exception_payload)
|
|
|
|
{
|
|
|
|
assert(env->exception_nr == -1);
|
|
|
|
assert(!env->exception_pending);
|
|
|
|
assert(!env->exception_injected);
|
|
|
|
assert(!env->exception_has_payload);
|
|
|
|
|
|
|
|
env->exception_nr = exception_nr;
|
|
|
|
|
|
|
|
if (has_exception_payload) {
|
|
|
|
env->exception_pending = 1;
|
|
|
|
|
|
|
|
env->exception_has_payload = exception_has_payload;
|
|
|
|
env->exception_payload = exception_payload;
|
|
|
|
} else {
|
|
|
|
env->exception_injected = 1;
|
|
|
|
|
|
|
|
if (exception_nr == EXCP01_DB) {
|
|
|
|
assert(exception_has_payload);
|
|
|
|
env->dr[6] = exception_payload;
|
|
|
|
} else if (exception_nr == EXCP0E_PAGE) {
|
|
|
|
assert(exception_has_payload);
|
|
|
|
env->cr[2] = exception_payload;
|
|
|
|
} else {
|
|
|
|
assert(!exception_has_payload);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-11 18:20:20 +03:00
|
|
|
static void cpu_update_state(void *opaque, bool running, RunState state)
|
2011-02-03 22:19:53 +03:00
|
|
|
{
|
2012-03-14 04:38:21 +04:00
|
|
|
CPUX86State *env = opaque;
|
2011-02-03 22:19:53 +03:00
|
|
|
|
|
|
|
if (running) {
|
|
|
|
env->tsc_valid = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-01-23 00:25:02 +04:00
|
|
|
unsigned long kvm_arch_vcpu_id(CPUState *cs)
|
2013-01-23 00:25:01 +04:00
|
|
|
{
|
2013-01-23 00:25:02 +04:00
|
|
|
X86CPU *cpu = X86_CPU(cs);
|
2014-12-19 04:20:10 +03:00
|
|
|
return cpu->apic_id;
|
2013-01-23 00:25:01 +04:00
|
|
|
}
|
|
|
|
|
2013-06-05 17:18:40 +04:00
|
|
|
#ifndef KVM_CPUID_SIGNATURE_NEXT
|
|
|
|
#define KVM_CPUID_SIGNATURE_NEXT 0x40000100
|
|
|
|
#endif
|
|
|
|
|
|
|
|
static bool hyperv_enabled(X86CPU *cpu)
|
|
|
|
{
|
2021-04-22 19:11:25 +03:00
|
|
|
return kvm_check_extension(kvm_state, KVM_CAP_HYPERV) > 0 &&
|
2020-05-15 14:48:47 +03:00
|
|
|
((cpu->hyperv_spinlock_attempts != HYPERV_SPINLOCK_NEVER_NOTIFY) ||
|
2019-05-17 17:19:20 +03:00
|
|
|
cpu->hyperv_features || cpu->hyperv_passthrough);
|
2013-06-05 17:18:40 +04:00
|
|
|
}
|
|
|
|
|
2020-06-16 19:58:05 +03:00
|
|
|
/*
|
|
|
|
* Check whether target_freq is within conservative
|
|
|
|
* ntp correctable bounds (250ppm) of freq
|
|
|
|
*/
|
|
|
|
static inline bool freq_within_bounds(int freq, int target_freq)
|
|
|
|
{
|
|
|
|
int max_freq = freq + (freq * 250 / 1000000);
|
|
|
|
int min_freq = freq - (freq * 250 / 1000000);
|
|
|
|
|
|
|
|
if (target_freq >= min_freq && target_freq <= max_freq) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2015-11-24 06:33:56 +03:00
|
|
|
static int kvm_arch_set_tsc_khz(CPUState *cs)
|
|
|
|
{
|
|
|
|
X86CPU *cpu = X86_CPU(cs);
|
|
|
|
CPUX86State *env = &cpu->env;
|
2020-06-16 19:58:05 +03:00
|
|
|
int r, cur_freq;
|
|
|
|
bool set_ioctl = false;
|
2015-11-24 06:33:56 +03:00
|
|
|
|
|
|
|
if (!env->tsc_khz) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-06-16 19:58:05 +03:00
|
|
|
cur_freq = kvm_check_extension(cs->kvm_state, KVM_CAP_GET_TSC_KHZ) ?
|
|
|
|
kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ) : -ENOTSUP;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If TSC scaling is supported, attempt to set TSC frequency.
|
|
|
|
*/
|
|
|
|
if (kvm_check_extension(cs->kvm_state, KVM_CAP_TSC_CONTROL)) {
|
|
|
|
set_ioctl = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If desired TSC frequency is within bounds of NTP correction,
|
|
|
|
* attempt to set TSC frequency.
|
|
|
|
*/
|
|
|
|
if (cur_freq != -ENOTSUP && freq_within_bounds(cur_freq, env->tsc_khz)) {
|
|
|
|
set_ioctl = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
r = set_ioctl ?
|
2015-11-24 06:33:56 +03:00
|
|
|
kvm_vcpu_ioctl(cs, KVM_SET_TSC_KHZ, env->tsc_khz) :
|
|
|
|
-ENOTSUP;
|
2020-06-16 19:58:05 +03:00
|
|
|
|
2015-11-24 06:33:56 +03:00
|
|
|
if (r < 0) {
|
|
|
|
/* When KVM_SET_TSC_KHZ fails, it's an error only if the current
|
|
|
|
* TSC frequency doesn't match the one we want.
|
|
|
|
*/
|
2020-06-16 19:58:05 +03:00
|
|
|
cur_freq = kvm_check_extension(cs->kvm_state, KVM_CAP_GET_TSC_KHZ) ?
|
|
|
|
kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ) :
|
|
|
|
-ENOTSUP;
|
2015-11-24 06:33:56 +03:00
|
|
|
if (cur_freq <= 0 || cur_freq != env->tsc_khz) {
|
2017-07-12 16:57:41 +03:00
|
|
|
warn_report("TSC frequency mismatch between "
|
|
|
|
"VM (%" PRId64 " kHz) and host (%d kHz), "
|
|
|
|
"and TSC scaling unavailable",
|
|
|
|
env->tsc_khz, cur_freq);
|
2015-11-24 06:33:56 +03:00
|
|
|
return r;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-08-07 11:57:02 +03:00
|
|
|
static bool tsc_is_stable_and_known(CPUX86State *env)
|
|
|
|
{
|
|
|
|
if (!env->tsc_khz) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return (env->features[FEAT_8000_0007_EDX] & CPUID_APM_INVTSC)
|
|
|
|
|| env->user_tsc_khz;
|
|
|
|
}
|
|
|
|
|
2022-05-25 14:59:44 +03:00
|
|
|
#define DEFAULT_EVMCS_VERSION ((1 << 8) | 1)
|
|
|
|
|
2019-05-17 17:19:17 +03:00
|
|
|
static struct {
|
|
|
|
const char *desc;
|
|
|
|
struct {
|
2021-04-22 19:11:18 +03:00
|
|
|
uint32_t func;
|
|
|
|
int reg;
|
2019-05-17 17:19:17 +03:00
|
|
|
uint32_t bits;
|
|
|
|
} flags[2];
|
2019-05-17 17:19:21 +03:00
|
|
|
uint64_t dependencies;
|
2019-05-17 17:19:17 +03:00
|
|
|
} kvm_hyperv_properties[] = {
|
|
|
|
[HYPERV_FEAT_RELAXED] = {
|
|
|
|
.desc = "relaxed timing (hv-relaxed)",
|
|
|
|
.flags = {
|
2021-04-22 19:11:18 +03:00
|
|
|
{.func = HV_CPUID_ENLIGHTMENT_INFO, .reg = R_EAX,
|
2019-05-17 17:19:17 +03:00
|
|
|
.bits = HV_RELAXED_TIMING_RECOMMENDED}
|
|
|
|
}
|
|
|
|
},
|
|
|
|
[HYPERV_FEAT_VAPIC] = {
|
|
|
|
.desc = "virtual APIC (hv-vapic)",
|
|
|
|
.flags = {
|
2021-04-22 19:11:18 +03:00
|
|
|
{.func = HV_CPUID_FEATURES, .reg = R_EAX,
|
2021-09-02 12:35:27 +03:00
|
|
|
.bits = HV_APIC_ACCESS_AVAILABLE}
|
2019-05-17 17:19:17 +03:00
|
|
|
}
|
|
|
|
},
|
|
|
|
[HYPERV_FEAT_TIME] = {
|
|
|
|
.desc = "clocksources (hv-time)",
|
|
|
|
.flags = {
|
2021-04-22 19:11:18 +03:00
|
|
|
{.func = HV_CPUID_FEATURES, .reg = R_EAX,
|
2021-06-08 15:08:15 +03:00
|
|
|
.bits = HV_TIME_REF_COUNT_AVAILABLE | HV_REFERENCE_TSC_AVAILABLE}
|
2019-05-17 17:19:17 +03:00
|
|
|
}
|
|
|
|
},
|
|
|
|
[HYPERV_FEAT_CRASH] = {
|
|
|
|
.desc = "crash MSRs (hv-crash)",
|
|
|
|
.flags = {
|
2021-04-22 19:11:18 +03:00
|
|
|
{.func = HV_CPUID_FEATURES, .reg = R_EDX,
|
2019-05-17 17:19:17 +03:00
|
|
|
.bits = HV_GUEST_CRASH_MSR_AVAILABLE}
|
|
|
|
}
|
|
|
|
},
|
|
|
|
[HYPERV_FEAT_RESET] = {
|
|
|
|
.desc = "reset MSR (hv-reset)",
|
|
|
|
.flags = {
|
2021-04-22 19:11:18 +03:00
|
|
|
{.func = HV_CPUID_FEATURES, .reg = R_EAX,
|
2019-05-17 17:19:17 +03:00
|
|
|
.bits = HV_RESET_AVAILABLE}
|
|
|
|
}
|
|
|
|
},
|
|
|
|
[HYPERV_FEAT_VPINDEX] = {
|
|
|
|
.desc = "VP_INDEX MSR (hv-vpindex)",
|
|
|
|
.flags = {
|
2021-04-22 19:11:18 +03:00
|
|
|
{.func = HV_CPUID_FEATURES, .reg = R_EAX,
|
2019-05-17 17:19:17 +03:00
|
|
|
.bits = HV_VP_INDEX_AVAILABLE}
|
|
|
|
}
|
|
|
|
},
|
|
|
|
[HYPERV_FEAT_RUNTIME] = {
|
|
|
|
.desc = "VP_RUNTIME MSR (hv-runtime)",
|
|
|
|
.flags = {
|
2021-04-22 19:11:18 +03:00
|
|
|
{.func = HV_CPUID_FEATURES, .reg = R_EAX,
|
2019-05-17 17:19:17 +03:00
|
|
|
.bits = HV_VP_RUNTIME_AVAILABLE}
|
|
|
|
}
|
|
|
|
},
|
|
|
|
[HYPERV_FEAT_SYNIC] = {
|
|
|
|
.desc = "synthetic interrupt controller (hv-synic)",
|
|
|
|
.flags = {
|
2021-04-22 19:11:18 +03:00
|
|
|
{.func = HV_CPUID_FEATURES, .reg = R_EAX,
|
2019-05-17 17:19:17 +03:00
|
|
|
.bits = HV_SYNIC_AVAILABLE}
|
|
|
|
}
|
|
|
|
},
|
|
|
|
[HYPERV_FEAT_STIMER] = {
|
|
|
|
.desc = "synthetic timers (hv-stimer)",
|
|
|
|
.flags = {
|
2021-04-22 19:11:18 +03:00
|
|
|
{.func = HV_CPUID_FEATURES, .reg = R_EAX,
|
2019-05-17 17:19:17 +03:00
|
|
|
.bits = HV_SYNTIMERS_AVAILABLE}
|
2019-05-17 17:19:21 +03:00
|
|
|
},
|
|
|
|
.dependencies = BIT(HYPERV_FEAT_SYNIC) | BIT(HYPERV_FEAT_TIME)
|
2019-05-17 17:19:17 +03:00
|
|
|
},
|
|
|
|
[HYPERV_FEAT_FREQUENCIES] = {
|
|
|
|
.desc = "frequency MSRs (hv-frequencies)",
|
|
|
|
.flags = {
|
2021-04-22 19:11:18 +03:00
|
|
|
{.func = HV_CPUID_FEATURES, .reg = R_EAX,
|
2019-05-17 17:19:17 +03:00
|
|
|
.bits = HV_ACCESS_FREQUENCY_MSRS},
|
2021-04-22 19:11:18 +03:00
|
|
|
{.func = HV_CPUID_FEATURES, .reg = R_EDX,
|
2019-05-17 17:19:17 +03:00
|
|
|
.bits = HV_FREQUENCY_MSRS_AVAILABLE}
|
|
|
|
}
|
|
|
|
},
|
|
|
|
[HYPERV_FEAT_REENLIGHTENMENT] = {
|
|
|
|
.desc = "reenlightenment MSRs (hv-reenlightenment)",
|
|
|
|
.flags = {
|
2021-04-22 19:11:18 +03:00
|
|
|
{.func = HV_CPUID_FEATURES, .reg = R_EAX,
|
2019-05-17 17:19:17 +03:00
|
|
|
.bits = HV_ACCESS_REENLIGHTENMENTS_CONTROL}
|
|
|
|
}
|
|
|
|
},
|
|
|
|
[HYPERV_FEAT_TLBFLUSH] = {
|
|
|
|
.desc = "paravirtualized TLB flush (hv-tlbflush)",
|
|
|
|
.flags = {
|
2021-04-22 19:11:18 +03:00
|
|
|
{.func = HV_CPUID_ENLIGHTMENT_INFO, .reg = R_EAX,
|
2019-05-17 17:19:17 +03:00
|
|
|
.bits = HV_REMOTE_TLB_FLUSH_RECOMMENDED |
|
|
|
|
HV_EX_PROCESSOR_MASKS_RECOMMENDED}
|
2019-05-17 17:19:22 +03:00
|
|
|
},
|
|
|
|
.dependencies = BIT(HYPERV_FEAT_VPINDEX)
|
2019-05-17 17:19:17 +03:00
|
|
|
},
|
|
|
|
[HYPERV_FEAT_EVMCS] = {
|
|
|
|
.desc = "enlightened VMCS (hv-evmcs)",
|
|
|
|
.flags = {
|
2021-04-22 19:11:18 +03:00
|
|
|
{.func = HV_CPUID_ENLIGHTMENT_INFO, .reg = R_EAX,
|
2019-05-17 17:19:17 +03:00
|
|
|
.bits = HV_ENLIGHTENED_VMCS_RECOMMENDED}
|
2019-05-17 17:19:23 +03:00
|
|
|
},
|
|
|
|
.dependencies = BIT(HYPERV_FEAT_VAPIC)
|
2019-05-17 17:19:17 +03:00
|
|
|
},
|
|
|
|
[HYPERV_FEAT_IPI] = {
|
|
|
|
.desc = "paravirtualized IPI (hv-ipi)",
|
|
|
|
.flags = {
|
2021-04-22 19:11:18 +03:00
|
|
|
{.func = HV_CPUID_ENLIGHTMENT_INFO, .reg = R_EAX,
|
2019-05-17 17:19:17 +03:00
|
|
|
.bits = HV_CLUSTER_IPI_RECOMMENDED |
|
|
|
|
HV_EX_PROCESSOR_MASKS_RECOMMENDED}
|
2019-05-17 17:19:22 +03:00
|
|
|
},
|
|
|
|
.dependencies = BIT(HYPERV_FEAT_VPINDEX)
|
2019-05-17 17:19:17 +03:00
|
|
|
},
|
2019-05-17 17:19:24 +03:00
|
|
|
[HYPERV_FEAT_STIMER_DIRECT] = {
|
|
|
|
.desc = "direct mode synthetic timers (hv-stimer-direct)",
|
|
|
|
.flags = {
|
2021-04-22 19:11:18 +03:00
|
|
|
{.func = HV_CPUID_FEATURES, .reg = R_EDX,
|
2019-05-17 17:19:24 +03:00
|
|
|
.bits = HV_STIMER_DIRECT_MODE_AVAILABLE}
|
|
|
|
},
|
|
|
|
.dependencies = BIT(HYPERV_FEAT_STIMER)
|
|
|
|
},
|
2021-09-02 12:35:28 +03:00
|
|
|
[HYPERV_FEAT_AVIC] = {
|
|
|
|
.desc = "AVIC/APICv support (hv-avic/hv-apicv)",
|
|
|
|
.flags = {
|
|
|
|
{.func = HV_CPUID_ENLIGHTMENT_INFO, .reg = R_EAX,
|
|
|
|
.bits = HV_DEPRECATING_AEOI_RECOMMENDED}
|
|
|
|
}
|
|
|
|
},
|
2022-02-16 13:25:00 +03:00
|
|
|
#ifdef CONFIG_SYNDBG
|
2022-02-16 13:24:59 +03:00
|
|
|
[HYPERV_FEAT_SYNDBG] = {
|
|
|
|
.desc = "Enable synthetic kernel debugger channel (hv-syndbg)",
|
|
|
|
.flags = {
|
|
|
|
{.func = HV_CPUID_FEATURES, .reg = R_EDX,
|
|
|
|
.bits = HV_FEATURE_DEBUG_MSRS_AVAILABLE}
|
|
|
|
},
|
|
|
|
.dependencies = BIT(HYPERV_FEAT_SYNIC) | BIT(HYPERV_FEAT_RELAXED)
|
|
|
|
},
|
2022-02-16 13:25:00 +03:00
|
|
|
#endif
|
2022-05-25 14:59:45 +03:00
|
|
|
[HYPERV_FEAT_MSR_BITMAP] = {
|
|
|
|
.desc = "enlightened MSR-Bitmap (hv-emsr-bitmap)",
|
|
|
|
.flags = {
|
|
|
|
{.func = HV_CPUID_NESTED_FEATURES, .reg = R_EAX,
|
|
|
|
.bits = HV_NESTED_MSR_BITMAP}
|
|
|
|
}
|
|
|
|
},
|
2022-05-25 14:59:46 +03:00
|
|
|
[HYPERV_FEAT_XMM_INPUT] = {
|
|
|
|
.desc = "XMM fast hypercall input (hv-xmm-input)",
|
|
|
|
.flags = {
|
|
|
|
{.func = HV_CPUID_FEATURES, .reg = R_EDX,
|
|
|
|
.bits = HV_HYPERCALL_XMM_INPUT_AVAILABLE}
|
|
|
|
}
|
|
|
|
},
|
2022-05-25 14:59:47 +03:00
|
|
|
[HYPERV_FEAT_TLBFLUSH_EXT] = {
|
|
|
|
.desc = "Extended gva ranges for TLB flush hypercalls (hv-tlbflush-ext)",
|
|
|
|
.flags = {
|
|
|
|
{.func = HV_CPUID_FEATURES, .reg = R_EDX,
|
|
|
|
.bits = HV_EXT_GVA_RANGES_FLUSH_AVAILABLE}
|
|
|
|
},
|
|
|
|
.dependencies = BIT(HYPERV_FEAT_TLBFLUSH)
|
|
|
|
},
|
2022-05-25 14:59:48 +03:00
|
|
|
[HYPERV_FEAT_TLBFLUSH_DIRECT] = {
|
|
|
|
.desc = "direct TLB flush (hv-tlbflush-direct)",
|
|
|
|
.flags = {
|
|
|
|
{.func = HV_CPUID_NESTED_FEATURES, .reg = R_EAX,
|
|
|
|
.bits = HV_NESTED_DIRECT_FLUSH}
|
|
|
|
},
|
|
|
|
.dependencies = BIT(HYPERV_FEAT_VAPIC)
|
|
|
|
},
|
2019-05-17 17:19:17 +03:00
|
|
|
};
|
|
|
|
|
2021-04-22 19:11:24 +03:00
|
|
|
static struct kvm_cpuid2 *try_get_hv_cpuid(CPUState *cs, int max,
|
|
|
|
bool do_sys_ioctl)
|
2019-05-17 17:19:17 +03:00
|
|
|
{
|
|
|
|
struct kvm_cpuid2 *cpuid;
|
|
|
|
int r, size;
|
|
|
|
|
|
|
|
size = sizeof(*cpuid) + max * sizeof(*cpuid->entries);
|
|
|
|
cpuid = g_malloc0(size);
|
|
|
|
cpuid->nent = max;
|
|
|
|
|
2021-04-22 19:11:24 +03:00
|
|
|
if (do_sys_ioctl) {
|
|
|
|
r = kvm_ioctl(kvm_state, KVM_GET_SUPPORTED_HV_CPUID, cpuid);
|
|
|
|
} else {
|
|
|
|
r = kvm_vcpu_ioctl(cs, KVM_GET_SUPPORTED_HV_CPUID, cpuid);
|
|
|
|
}
|
2019-05-17 17:19:17 +03:00
|
|
|
if (r == 0 && cpuid->nent >= max) {
|
|
|
|
r = -E2BIG;
|
|
|
|
}
|
|
|
|
if (r < 0) {
|
|
|
|
if (r == -E2BIG) {
|
|
|
|
g_free(cpuid);
|
|
|
|
return NULL;
|
|
|
|
} else {
|
|
|
|
fprintf(stderr, "KVM_GET_SUPPORTED_HV_CPUID failed: %s\n",
|
|
|
|
strerror(-r));
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return cpuid;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Run KVM_GET_SUPPORTED_HV_CPUID ioctl(), allocating a buffer large enough
|
|
|
|
* for all entries.
|
|
|
|
*/
|
|
|
|
static struct kvm_cpuid2 *get_supported_hv_cpuid(CPUState *cs)
|
|
|
|
{
|
|
|
|
struct kvm_cpuid2 *cpuid;
|
2022-02-16 13:24:59 +03:00
|
|
|
/* 0x40000000..0x40000005, 0x4000000A, 0x40000080..0x40000082 leaves */
|
|
|
|
int max = 11;
|
2021-04-22 19:11:21 +03:00
|
|
|
int i;
|
2021-04-22 19:11:24 +03:00
|
|
|
bool do_sys_ioctl;
|
|
|
|
|
|
|
|
do_sys_ioctl =
|
|
|
|
kvm_check_extension(kvm_state, KVM_CAP_SYS_HYPERV_CPUID) > 0;
|
2019-05-17 17:19:17 +03:00
|
|
|
|
2021-07-16 14:58:51 +03:00
|
|
|
/*
|
|
|
|
* Non-empty KVM context is needed when KVM_CAP_SYS_HYPERV_CPUID is
|
|
|
|
* unsupported, kvm_hyperv_expand_features() checks for that.
|
|
|
|
*/
|
|
|
|
assert(do_sys_ioctl || cs->kvm_state);
|
|
|
|
|
2019-05-17 17:19:17 +03:00
|
|
|
/*
|
|
|
|
* When the buffer is too small, KVM_GET_SUPPORTED_HV_CPUID fails with
|
|
|
|
* -E2BIG, however, it doesn't report back the right size. Keep increasing
|
|
|
|
* it and re-trying until we succeed.
|
|
|
|
*/
|
2021-04-22 19:11:24 +03:00
|
|
|
while ((cpuid = try_get_hv_cpuid(cs, max, do_sys_ioctl)) == NULL) {
|
2019-05-17 17:19:17 +03:00
|
|
|
max++;
|
|
|
|
}
|
2021-04-22 19:11:21 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* KVM_GET_SUPPORTED_HV_CPUID does not set EVMCS CPUID bit before
|
|
|
|
* KVM_CAP_HYPERV_ENLIGHTENED_VMCS is enabled but we want to get the
|
|
|
|
* information early, just check for the capability and set the bit
|
|
|
|
* manually.
|
|
|
|
*/
|
2021-04-22 19:11:24 +03:00
|
|
|
if (!do_sys_ioctl && kvm_check_extension(cs->kvm_state,
|
2021-04-22 19:11:21 +03:00
|
|
|
KVM_CAP_HYPERV_ENLIGHTENED_VMCS) > 0) {
|
|
|
|
for (i = 0; i < cpuid->nent; i++) {
|
|
|
|
if (cpuid->entries[i].function == HV_CPUID_ENLIGHTMENT_INFO) {
|
|
|
|
cpuid->entries[i].eax |= HV_ENLIGHTENED_VMCS_RECOMMENDED;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-17 17:19:17 +03:00
|
|
|
return cpuid;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* When KVM_GET_SUPPORTED_HV_CPUID is not supported we fill CPUID feature
|
|
|
|
* leaves from KVM_CAP_HYPERV* and present MSRs data.
|
|
|
|
*/
|
|
|
|
static struct kvm_cpuid2 *get_supported_hv_cpuid_legacy(CPUState *cs)
|
2016-06-24 13:49:36 +03:00
|
|
|
{
|
|
|
|
X86CPU *cpu = X86_CPU(cs);
|
2019-05-17 17:19:17 +03:00
|
|
|
struct kvm_cpuid2 *cpuid;
|
|
|
|
struct kvm_cpuid_entry2 *entry_feat, *entry_recomm;
|
|
|
|
|
|
|
|
/* HV_CPUID_FEATURES, HV_CPUID_ENLIGHTMENT_INFO */
|
|
|
|
cpuid = g_malloc0(sizeof(*cpuid) + 2 * sizeof(*cpuid->entries));
|
|
|
|
cpuid->nent = 2;
|
|
|
|
|
|
|
|
/* HV_CPUID_VENDOR_AND_MAX_FUNCTIONS */
|
|
|
|
entry_feat = &cpuid->entries[0];
|
|
|
|
entry_feat->function = HV_CPUID_FEATURES;
|
|
|
|
|
|
|
|
entry_recomm = &cpuid->entries[1];
|
|
|
|
entry_recomm->function = HV_CPUID_ENLIGHTMENT_INFO;
|
|
|
|
entry_recomm->ebx = cpu->hyperv_spinlock_attempts;
|
|
|
|
|
|
|
|
if (kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV) > 0) {
|
|
|
|
entry_feat->eax |= HV_HYPERCALL_AVAILABLE;
|
|
|
|
entry_feat->eax |= HV_APIC_ACCESS_AVAILABLE;
|
|
|
|
entry_feat->edx |= HV_CPU_DYNAMIC_PARTITIONING_AVAILABLE;
|
|
|
|
entry_recomm->eax |= HV_RELAXED_TIMING_RECOMMENDED;
|
|
|
|
entry_recomm->eax |= HV_APIC_ACCESS_RECOMMENDED;
|
|
|
|
}
|
2016-06-24 13:49:36 +03:00
|
|
|
|
2019-05-17 17:19:17 +03:00
|
|
|
if (kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV_TIME) > 0) {
|
|
|
|
entry_feat->eax |= HV_TIME_REF_COUNT_AVAILABLE;
|
|
|
|
entry_feat->eax |= HV_REFERENCE_TSC_AVAILABLE;
|
2016-06-24 13:49:36 +03:00
|
|
|
}
|
2019-05-17 17:19:17 +03:00
|
|
|
|
|
|
|
if (has_msr_hv_frequencies) {
|
|
|
|
entry_feat->eax |= HV_ACCESS_FREQUENCY_MSRS;
|
|
|
|
entry_feat->edx |= HV_FREQUENCY_MSRS_AVAILABLE;
|
2016-06-24 13:49:36 +03:00
|
|
|
}
|
2019-05-17 17:19:17 +03:00
|
|
|
|
|
|
|
if (has_msr_hv_crash) {
|
|
|
|
entry_feat->edx |= HV_GUEST_CRASH_MSR_AVAILABLE;
|
2018-03-30 20:02:08 +03:00
|
|
|
}
|
2019-05-17 17:19:17 +03:00
|
|
|
|
|
|
|
if (has_msr_hv_reenlightenment) {
|
|
|
|
entry_feat->eax |= HV_ACCESS_REENLIGHTENMENTS_CONTROL;
|
2016-06-24 13:49:36 +03:00
|
|
|
}
|
2019-05-17 17:19:17 +03:00
|
|
|
|
|
|
|
if (has_msr_hv_reset) {
|
|
|
|
entry_feat->eax |= HV_RESET_AVAILABLE;
|
2016-06-24 13:49:36 +03:00
|
|
|
}
|
2019-05-17 17:19:17 +03:00
|
|
|
|
|
|
|
if (has_msr_hv_vpindex) {
|
|
|
|
entry_feat->eax |= HV_VP_INDEX_AVAILABLE;
|
2018-04-11 14:50:36 +03:00
|
|
|
}
|
2019-05-17 17:19:17 +03:00
|
|
|
|
|
|
|
if (has_msr_hv_runtime) {
|
|
|
|
entry_feat->eax |= HV_VP_RUNTIME_AVAILABLE;
|
2016-06-24 13:49:36 +03:00
|
|
|
}
|
2019-05-17 17:19:17 +03:00
|
|
|
|
|
|
|
if (has_msr_hv_synic) {
|
|
|
|
unsigned int cap = cpu->hyperv_synic_kvm_only ?
|
|
|
|
KVM_CAP_HYPERV_SYNIC : KVM_CAP_HYPERV_SYNIC2;
|
|
|
|
|
|
|
|
if (kvm_check_extension(cs->kvm_state, cap) > 0) {
|
|
|
|
entry_feat->eax |= HV_SYNIC_AVAILABLE;
|
2018-03-30 20:02:09 +03:00
|
|
|
}
|
2016-06-24 13:49:36 +03:00
|
|
|
}
|
2019-05-17 17:19:17 +03:00
|
|
|
|
|
|
|
if (has_msr_hv_stimer) {
|
|
|
|
entry_feat->eax |= HV_SYNTIMERS_AVAILABLE;
|
2016-06-24 13:49:36 +03:00
|
|
|
}
|
2018-09-21 11:22:10 +03:00
|
|
|
|
2022-02-16 13:24:59 +03:00
|
|
|
if (has_msr_hv_syndbg_options) {
|
|
|
|
entry_feat->edx |= HV_GUEST_DEBUGGING_AVAILABLE;
|
|
|
|
entry_feat->edx |= HV_FEATURE_DEBUG_MSRS_AVAILABLE;
|
|
|
|
entry_feat->ebx |= HV_PARTITION_DEBUGGING_ALLOWED;
|
|
|
|
}
|
|
|
|
|
2019-05-17 17:19:17 +03:00
|
|
|
if (kvm_check_extension(cs->kvm_state,
|
|
|
|
KVM_CAP_HYPERV_TLBFLUSH) > 0) {
|
|
|
|
entry_recomm->eax |= HV_REMOTE_TLB_FLUSH_RECOMMENDED;
|
|
|
|
entry_recomm->eax |= HV_EX_PROCESSOR_MASKS_RECOMMENDED;
|
|
|
|
}
|
2016-06-24 13:49:36 +03:00
|
|
|
|
2019-05-17 17:19:17 +03:00
|
|
|
if (kvm_check_extension(cs->kvm_state,
|
|
|
|
KVM_CAP_HYPERV_ENLIGHTENED_VMCS) > 0) {
|
|
|
|
entry_recomm->eax |= HV_ENLIGHTENED_VMCS_RECOMMENDED;
|
2016-06-24 13:49:36 +03:00
|
|
|
}
|
2019-05-17 17:19:17 +03:00
|
|
|
|
|
|
|
if (kvm_check_extension(cs->kvm_state,
|
|
|
|
KVM_CAP_HYPERV_SEND_IPI) > 0) {
|
|
|
|
entry_recomm->eax |= HV_CLUSTER_IPI_RECOMMENDED;
|
|
|
|
entry_recomm->eax |= HV_EX_PROCESSOR_MASKS_RECOMMENDED;
|
2016-06-24 13:49:36 +03:00
|
|
|
}
|
2019-05-17 17:19:17 +03:00
|
|
|
|
|
|
|
return cpuid;
|
|
|
|
}
|
|
|
|
|
2021-04-22 19:11:19 +03:00
|
|
|
static uint32_t hv_cpuid_get_host(CPUState *cs, uint32_t func, int reg)
|
2021-04-22 19:11:17 +03:00
|
|
|
{
|
|
|
|
struct kvm_cpuid_entry2 *entry;
|
2021-04-22 19:11:19 +03:00
|
|
|
struct kvm_cpuid2 *cpuid;
|
|
|
|
|
|
|
|
if (hv_cpuid_cache) {
|
|
|
|
cpuid = hv_cpuid_cache;
|
|
|
|
} else {
|
|
|
|
if (kvm_check_extension(kvm_state, KVM_CAP_HYPERV_CPUID) > 0) {
|
|
|
|
cpuid = get_supported_hv_cpuid(cs);
|
|
|
|
} else {
|
2021-07-16 14:58:51 +03:00
|
|
|
/*
|
|
|
|
* 'cs->kvm_state' may be NULL when Hyper-V features are expanded
|
|
|
|
* before KVM context is created but this is only done when
|
|
|
|
* KVM_CAP_SYS_HYPERV_CPUID is supported and it implies
|
|
|
|
* KVM_CAP_HYPERV_CPUID.
|
|
|
|
*/
|
|
|
|
assert(cs->kvm_state);
|
|
|
|
|
2021-04-22 19:11:19 +03:00
|
|
|
cpuid = get_supported_hv_cpuid_legacy(cs);
|
|
|
|
}
|
|
|
|
hv_cpuid_cache = cpuid;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!cpuid) {
|
|
|
|
return 0;
|
|
|
|
}
|
2021-04-22 19:11:17 +03:00
|
|
|
|
|
|
|
entry = cpuid_find_entry(cpuid, func, 0);
|
|
|
|
if (!entry) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return cpuid_entry_get_reg(entry, reg);
|
|
|
|
}
|
|
|
|
|
2021-04-22 19:11:19 +03:00
|
|
|
static bool hyperv_feature_supported(CPUState *cs, int feature)
|
2021-04-22 19:11:16 +03:00
|
|
|
{
|
2021-04-22 19:11:18 +03:00
|
|
|
uint32_t func, bits;
|
|
|
|
int i, reg;
|
2021-04-22 19:11:16 +03:00
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(kvm_hyperv_properties[feature].flags); i++) {
|
2021-04-22 19:11:18 +03:00
|
|
|
|
|
|
|
func = kvm_hyperv_properties[feature].flags[i].func;
|
|
|
|
reg = kvm_hyperv_properties[feature].flags[i].reg;
|
2021-04-22 19:11:16 +03:00
|
|
|
bits = kvm_hyperv_properties[feature].flags[i].bits;
|
|
|
|
|
2021-04-22 19:11:18 +03:00
|
|
|
if (!func) {
|
2021-04-22 19:11:16 +03:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2021-04-22 19:11:19 +03:00
|
|
|
if ((hv_cpuid_get_host(cs, func, reg) & bits) != bits) {
|
2021-04-22 19:11:16 +03:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2021-06-08 15:08:14 +03:00
|
|
|
/* Checks that all feature dependencies are enabled */
|
|
|
|
static bool hv_feature_check_deps(X86CPU *cpu, int feature, Error **errp)
|
2019-05-17 17:19:17 +03:00
|
|
|
{
|
2019-05-17 17:19:21 +03:00
|
|
|
uint64_t deps;
|
2021-04-22 19:11:16 +03:00
|
|
|
int dep_feat;
|
2019-05-17 17:19:17 +03:00
|
|
|
|
2019-05-17 17:19:21 +03:00
|
|
|
deps = kvm_hyperv_properties[feature].dependencies;
|
2019-06-24 22:39:13 +03:00
|
|
|
while (deps) {
|
|
|
|
dep_feat = ctz64(deps);
|
2019-05-17 17:19:21 +03:00
|
|
|
if (!(hyperv_feat_enabled(cpu, dep_feat))) {
|
2021-04-22 19:11:22 +03:00
|
|
|
error_setg(errp, "Hyper-V %s requires Hyper-V %s",
|
|
|
|
kvm_hyperv_properties[feature].desc,
|
|
|
|
kvm_hyperv_properties[dep_feat].desc);
|
2021-06-08 15:08:14 +03:00
|
|
|
return false;
|
2019-05-17 17:19:21 +03:00
|
|
|
}
|
2019-06-24 22:39:13 +03:00
|
|
|
deps &= ~(1ull << dep_feat);
|
2019-05-17 17:19:21 +03:00
|
|
|
}
|
|
|
|
|
2021-06-08 15:08:14 +03:00
|
|
|
return true;
|
2019-05-17 17:19:17 +03:00
|
|
|
}
|
|
|
|
|
2021-04-22 19:11:18 +03:00
|
|
|
static uint32_t hv_build_cpuid_leaf(CPUState *cs, uint32_t func, int reg)
|
2021-04-22 19:11:15 +03:00
|
|
|
{
|
|
|
|
X86CPU *cpu = X86_CPU(cs);
|
|
|
|
uint32_t r = 0;
|
|
|
|
int i, j;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(kvm_hyperv_properties); i++) {
|
|
|
|
if (!hyperv_feat_enabled(cpu, i)) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (j = 0; j < ARRAY_SIZE(kvm_hyperv_properties[i].flags); j++) {
|
2021-04-22 19:11:18 +03:00
|
|
|
if (kvm_hyperv_properties[i].flags[j].func != func) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (kvm_hyperv_properties[i].flags[j].reg != reg) {
|
2021-04-22 19:11:15 +03:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
r |= kvm_hyperv_properties[i].flags[j].bits;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-05-25 14:59:44 +03:00
|
|
|
/* HV_CPUID_NESTED_FEATURES.EAX also encodes the supported eVMCS range */
|
|
|
|
if (func == HV_CPUID_NESTED_FEATURES && reg == R_EAX) {
|
|
|
|
if (hyperv_feat_enabled(cpu, HYPERV_FEAT_EVMCS)) {
|
|
|
|
r |= DEFAULT_EVMCS_VERSION;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-22 19:11:15 +03:00
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2019-05-17 17:19:18 +03:00
|
|
|
/*
|
2021-04-22 19:11:20 +03:00
|
|
|
* Expand Hyper-V CPU features. In partucular, check that all the requested
|
|
|
|
* features are supported by the host and the sanity of the configuration
|
|
|
|
* (that all the required dependencies are included). Also, this takes care
|
|
|
|
* of 'hv_passthrough' mode and fills the environment with all supported
|
|
|
|
* Hyper-V features.
|
2019-05-17 17:19:18 +03:00
|
|
|
*/
|
2021-06-08 15:08:13 +03:00
|
|
|
bool kvm_hyperv_expand_features(X86CPU *cpu, Error **errp)
|
2019-05-17 17:19:17 +03:00
|
|
|
{
|
2021-06-08 15:08:13 +03:00
|
|
|
CPUState *cs = CPU(cpu);
|
2021-06-08 15:08:14 +03:00
|
|
|
Error *local_err = NULL;
|
|
|
|
int feat;
|
2019-05-17 17:19:17 +03:00
|
|
|
|
2019-05-17 17:19:18 +03:00
|
|
|
if (!hyperv_enabled(cpu))
|
2021-06-08 15:08:12 +03:00
|
|
|
return true;
|
2019-05-17 17:19:18 +03:00
|
|
|
|
2021-06-08 15:08:13 +03:00
|
|
|
/*
|
|
|
|
* When kvm_hyperv_expand_features is called at CPU feature expansion
|
|
|
|
* time per-CPU kvm_state is not available yet so we can only proceed
|
|
|
|
* when KVM_CAP_SYS_HYPERV_CPUID is supported.
|
|
|
|
*/
|
|
|
|
if (!cs->kvm_state &&
|
|
|
|
!kvm_check_extension(kvm_state, KVM_CAP_SYS_HYPERV_CPUID))
|
|
|
|
return true;
|
|
|
|
|
2019-05-17 17:19:20 +03:00
|
|
|
if (cpu->hyperv_passthrough) {
|
2021-04-22 19:11:17 +03:00
|
|
|
cpu->hyperv_vendor_id[0] =
|
2021-04-22 19:11:19 +03:00
|
|
|
hv_cpuid_get_host(cs, HV_CPUID_VENDOR_AND_MAX_FUNCTIONS, R_EBX);
|
2021-04-22 19:11:17 +03:00
|
|
|
cpu->hyperv_vendor_id[1] =
|
2021-04-22 19:11:19 +03:00
|
|
|
hv_cpuid_get_host(cs, HV_CPUID_VENDOR_AND_MAX_FUNCTIONS, R_ECX);
|
2021-04-22 19:11:17 +03:00
|
|
|
cpu->hyperv_vendor_id[2] =
|
2021-04-22 19:11:19 +03:00
|
|
|
hv_cpuid_get_host(cs, HV_CPUID_VENDOR_AND_MAX_FUNCTIONS, R_EDX);
|
2021-04-22 19:11:17 +03:00
|
|
|
cpu->hyperv_vendor = g_realloc(cpu->hyperv_vendor,
|
|
|
|
sizeof(cpu->hyperv_vendor_id) + 1);
|
|
|
|
memcpy(cpu->hyperv_vendor, cpu->hyperv_vendor_id,
|
|
|
|
sizeof(cpu->hyperv_vendor_id));
|
|
|
|
cpu->hyperv_vendor[sizeof(cpu->hyperv_vendor_id)] = 0;
|
|
|
|
|
|
|
|
cpu->hyperv_interface_id[0] =
|
2021-04-22 19:11:19 +03:00
|
|
|
hv_cpuid_get_host(cs, HV_CPUID_INTERFACE, R_EAX);
|
2021-04-22 19:11:17 +03:00
|
|
|
cpu->hyperv_interface_id[1] =
|
2021-04-22 19:11:19 +03:00
|
|
|
hv_cpuid_get_host(cs, HV_CPUID_INTERFACE, R_EBX);
|
2021-04-22 19:11:17 +03:00
|
|
|
cpu->hyperv_interface_id[2] =
|
2021-04-22 19:11:19 +03:00
|
|
|
hv_cpuid_get_host(cs, HV_CPUID_INTERFACE, R_ECX);
|
2021-04-22 19:11:17 +03:00
|
|
|
cpu->hyperv_interface_id[3] =
|
2021-04-22 19:11:19 +03:00
|
|
|
hv_cpuid_get_host(cs, HV_CPUID_INTERFACE, R_EDX);
|
2021-04-22 19:11:17 +03:00
|
|
|
|
2021-09-02 12:35:29 +03:00
|
|
|
cpu->hyperv_ver_id_build =
|
2021-04-22 19:11:19 +03:00
|
|
|
hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_EAX);
|
2021-09-02 12:35:29 +03:00
|
|
|
cpu->hyperv_ver_id_major =
|
|
|
|
hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_EBX) >> 16;
|
|
|
|
cpu->hyperv_ver_id_minor =
|
|
|
|
hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_EBX) & 0xffff;
|
|
|
|
cpu->hyperv_ver_id_sp =
|
2021-04-22 19:11:19 +03:00
|
|
|
hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_ECX);
|
2021-09-02 12:35:29 +03:00
|
|
|
cpu->hyperv_ver_id_sb =
|
|
|
|
hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_EDX) >> 24;
|
|
|
|
cpu->hyperv_ver_id_sn =
|
|
|
|
hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_EDX) & 0xffffff;
|
2021-04-22 19:11:17 +03:00
|
|
|
|
2021-04-22 19:11:19 +03:00
|
|
|
cpu->hv_max_vps = hv_cpuid_get_host(cs, HV_CPUID_IMPLEMENT_LIMITS,
|
2021-04-22 19:11:17 +03:00
|
|
|
R_EAX);
|
|
|
|
cpu->hyperv_limits[0] =
|
2021-04-22 19:11:19 +03:00
|
|
|
hv_cpuid_get_host(cs, HV_CPUID_IMPLEMENT_LIMITS, R_EBX);
|
2021-04-22 19:11:17 +03:00
|
|
|
cpu->hyperv_limits[1] =
|
2021-04-22 19:11:19 +03:00
|
|
|
hv_cpuid_get_host(cs, HV_CPUID_IMPLEMENT_LIMITS, R_ECX);
|
2021-04-22 19:11:17 +03:00
|
|
|
cpu->hyperv_limits[2] =
|
2021-04-22 19:11:19 +03:00
|
|
|
hv_cpuid_get_host(cs, HV_CPUID_IMPLEMENT_LIMITS, R_EDX);
|
2021-04-22 19:11:17 +03:00
|
|
|
|
|
|
|
cpu->hyperv_spinlock_attempts =
|
2021-04-22 19:11:19 +03:00
|
|
|
hv_cpuid_get_host(cs, HV_CPUID_ENLIGHTMENT_INFO, R_EBX);
|
2019-10-18 19:39:08 +03:00
|
|
|
|
2021-06-08 15:08:14 +03:00
|
|
|
/*
|
|
|
|
* Mark feature as enabled in 'cpu->hyperv_features' as
|
|
|
|
* hv_build_cpuid_leaf() uses this info to build guest CPUIDs.
|
|
|
|
*/
|
|
|
|
for (feat = 0; feat < ARRAY_SIZE(kvm_hyperv_properties); feat++) {
|
|
|
|
if (hyperv_feature_supported(cs, feat)) {
|
|
|
|
cpu->hyperv_features |= BIT(feat);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/* Check features availability and dependencies */
|
|
|
|
for (feat = 0; feat < ARRAY_SIZE(kvm_hyperv_properties); feat++) {
|
|
|
|
/* If the feature was not requested skip it. */
|
|
|
|
if (!hyperv_feat_enabled(cpu, feat)) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check if the feature is supported by KVM */
|
|
|
|
if (!hyperv_feature_supported(cs, feat)) {
|
|
|
|
error_setg(errp, "Hyper-V %s is not supported by kernel",
|
|
|
|
kvm_hyperv_properties[feat].desc);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check dependencies */
|
|
|
|
if (!hv_feature_check_deps(cpu, feat, &local_err)) {
|
|
|
|
error_propagate(errp, local_err);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
2021-04-22 19:11:22 +03:00
|
|
|
}
|
2019-05-17 17:19:17 +03:00
|
|
|
|
2019-05-17 17:19:21 +03:00
|
|
|
/* Additional dependencies not covered by kvm_hyperv_properties[] */
|
2019-05-17 17:19:17 +03:00
|
|
|
if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC) &&
|
|
|
|
!cpu->hyperv_synic_kvm_only &&
|
|
|
|
!hyperv_feat_enabled(cpu, HYPERV_FEAT_VPINDEX)) {
|
2021-04-22 19:11:22 +03:00
|
|
|
error_setg(errp, "Hyper-V %s requires Hyper-V %s",
|
|
|
|
kvm_hyperv_properties[HYPERV_FEAT_SYNIC].desc,
|
|
|
|
kvm_hyperv_properties[HYPERV_FEAT_VPINDEX].desc);
|
2021-06-08 15:08:12 +03:00
|
|
|
return false;
|
2019-05-17 17:19:17 +03:00
|
|
|
}
|
2021-06-08 15:08:12 +03:00
|
|
|
|
|
|
|
return true;
|
2021-04-22 19:11:20 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Fill in Hyper-V CPUIDs. Returns the number of entries filled in cpuid_ent.
|
|
|
|
*/
|
|
|
|
static int hyperv_fill_cpuids(CPUState *cs,
|
|
|
|
struct kvm_cpuid_entry2 *cpuid_ent)
|
|
|
|
{
|
|
|
|
X86CPU *cpu = X86_CPU(cs);
|
|
|
|
struct kvm_cpuid_entry2 *c;
|
2022-02-16 13:24:59 +03:00
|
|
|
uint32_t signature[3];
|
|
|
|
uint32_t cpuid_i = 0, max_cpuid_leaf = 0;
|
2022-05-25 14:59:44 +03:00
|
|
|
uint32_t nested_eax =
|
|
|
|
hv_build_cpuid_leaf(cs, HV_CPUID_NESTED_FEATURES, R_EAX);
|
2022-02-16 13:24:59 +03:00
|
|
|
|
2022-05-25 14:59:44 +03:00
|
|
|
max_cpuid_leaf = nested_eax ? HV_CPUID_NESTED_FEATURES :
|
|
|
|
HV_CPUID_IMPLEMENT_LIMITS;
|
2022-02-16 13:24:59 +03:00
|
|
|
|
|
|
|
if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNDBG)) {
|
|
|
|
max_cpuid_leaf =
|
|
|
|
MAX(max_cpuid_leaf, HV_CPUID_SYNDBG_PLATFORM_CAPABILITIES);
|
|
|
|
}
|
2021-04-22 19:11:20 +03:00
|
|
|
|
2019-05-17 17:19:18 +03:00
|
|
|
c = &cpuid_ent[cpuid_i++];
|
|
|
|
c->function = HV_CPUID_VENDOR_AND_MAX_FUNCTIONS;
|
2022-02-16 13:24:59 +03:00
|
|
|
c->eax = max_cpuid_leaf;
|
2020-11-19 13:32:17 +03:00
|
|
|
c->ebx = cpu->hyperv_vendor_id[0];
|
|
|
|
c->ecx = cpu->hyperv_vendor_id[1];
|
|
|
|
c->edx = cpu->hyperv_vendor_id[2];
|
2019-05-17 17:19:18 +03:00
|
|
|
|
|
|
|
c = &cpuid_ent[cpuid_i++];
|
|
|
|
c->function = HV_CPUID_INTERFACE;
|
2020-11-19 13:32:18 +03:00
|
|
|
c->eax = cpu->hyperv_interface_id[0];
|
|
|
|
c->ebx = cpu->hyperv_interface_id[1];
|
|
|
|
c->ecx = cpu->hyperv_interface_id[2];
|
|
|
|
c->edx = cpu->hyperv_interface_id[3];
|
2019-05-17 17:19:18 +03:00
|
|
|
|
|
|
|
c = &cpuid_ent[cpuid_i++];
|
|
|
|
c->function = HV_CPUID_VERSION;
|
2021-09-02 12:35:29 +03:00
|
|
|
c->eax = cpu->hyperv_ver_id_build;
|
|
|
|
c->ebx = (uint32_t)cpu->hyperv_ver_id_major << 16 |
|
|
|
|
cpu->hyperv_ver_id_minor;
|
|
|
|
c->ecx = cpu->hyperv_ver_id_sp;
|
|
|
|
c->edx = (uint32_t)cpu->hyperv_ver_id_sb << 24 |
|
|
|
|
(cpu->hyperv_ver_id_sn & 0xffffff);
|
2019-05-17 17:19:18 +03:00
|
|
|
|
|
|
|
c = &cpuid_ent[cpuid_i++];
|
|
|
|
c->function = HV_CPUID_FEATURES;
|
2021-04-22 19:11:18 +03:00
|
|
|
c->eax = hv_build_cpuid_leaf(cs, HV_CPUID_FEATURES, R_EAX);
|
|
|
|
c->ebx = hv_build_cpuid_leaf(cs, HV_CPUID_FEATURES, R_EBX);
|
|
|
|
c->edx = hv_build_cpuid_leaf(cs, HV_CPUID_FEATURES, R_EDX);
|
2021-04-22 19:11:15 +03:00
|
|
|
|
2021-06-08 15:08:15 +03:00
|
|
|
/* Unconditionally required with any Hyper-V enlightenment */
|
|
|
|
c->eax |= HV_HYPERCALL_AVAILABLE;
|
|
|
|
|
2021-06-08 15:08:16 +03:00
|
|
|
/* SynIC and Vmbus devices require messages/signals hypercalls */
|
|
|
|
if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC) &&
|
|
|
|
!cpu->hyperv_synic_kvm_only) {
|
|
|
|
c->ebx |= HV_POST_MESSAGES | HV_SIGNAL_EVENTS;
|
|
|
|
}
|
|
|
|
|
2021-09-02 12:35:27 +03:00
|
|
|
|
2021-04-22 19:11:15 +03:00
|
|
|
/* Not exposed by KVM but needed to make CPU hotplug in Windows work */
|
|
|
|
c->edx |= HV_CPU_DYNAMIC_PARTITIONING_AVAILABLE;
|
2019-05-17 17:19:18 +03:00
|
|
|
|
|
|
|
c = &cpuid_ent[cpuid_i++];
|
|
|
|
c->function = HV_CPUID_ENLIGHTMENT_INFO;
|
2021-04-22 19:11:18 +03:00
|
|
|
c->eax = hv_build_cpuid_leaf(cs, HV_CPUID_ENLIGHTMENT_INFO, R_EAX);
|
2019-05-17 17:19:18 +03:00
|
|
|
c->ebx = cpu->hyperv_spinlock_attempts;
|
|
|
|
|
2021-09-02 12:35:28 +03:00
|
|
|
if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VAPIC) &&
|
|
|
|
!hyperv_feat_enabled(cpu, HYPERV_FEAT_AVIC)) {
|
2021-09-02 12:35:27 +03:00
|
|
|
c->eax |= HV_APIC_ACCESS_RECOMMENDED;
|
|
|
|
}
|
|
|
|
|
2021-04-22 19:11:15 +03:00
|
|
|
if (cpu->hyperv_no_nonarch_cs == ON_OFF_AUTO_ON) {
|
|
|
|
c->eax |= HV_NO_NONARCH_CORESHARING;
|
|
|
|
} else if (cpu->hyperv_no_nonarch_cs == ON_OFF_AUTO_AUTO) {
|
2021-04-22 19:11:19 +03:00
|
|
|
c->eax |= hv_cpuid_get_host(cs, HV_CPUID_ENLIGHTMENT_INFO, R_EAX) &
|
2021-04-22 19:11:17 +03:00
|
|
|
HV_NO_NONARCH_CORESHARING;
|
2021-04-22 19:11:15 +03:00
|
|
|
}
|
|
|
|
|
2019-05-17 17:19:18 +03:00
|
|
|
c = &cpuid_ent[cpuid_i++];
|
|
|
|
c->function = HV_CPUID_IMPLEMENT_LIMITS;
|
|
|
|
c->eax = cpu->hv_max_vps;
|
2020-11-19 13:32:20 +03:00
|
|
|
c->ebx = cpu->hyperv_limits[0];
|
|
|
|
c->ecx = cpu->hyperv_limits[1];
|
|
|
|
c->edx = cpu->hyperv_limits[2];
|
2019-05-17 17:19:18 +03:00
|
|
|
|
2022-05-25 14:59:44 +03:00
|
|
|
if (nested_eax) {
|
2021-11-16 22:39:55 +03:00
|
|
|
uint32_t function;
|
2019-05-17 17:19:18 +03:00
|
|
|
|
|
|
|
/* Create zeroed 0x40000006..0x40000009 leaves */
|
|
|
|
for (function = HV_CPUID_IMPLEMENT_LIMITS + 1;
|
|
|
|
function < HV_CPUID_NESTED_FEATURES; function++) {
|
|
|
|
c = &cpuid_ent[cpuid_i++];
|
|
|
|
c->function = function;
|
|
|
|
}
|
|
|
|
|
|
|
|
c = &cpuid_ent[cpuid_i++];
|
|
|
|
c->function = HV_CPUID_NESTED_FEATURES;
|
2022-05-25 14:59:44 +03:00
|
|
|
c->eax = nested_eax;
|
2019-05-17 17:19:18 +03:00
|
|
|
}
|
2019-05-17 17:19:17 +03:00
|
|
|
|
2022-02-16 13:24:59 +03:00
|
|
|
if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNDBG)) {
|
|
|
|
c = &cpuid_ent[cpuid_i++];
|
|
|
|
c->function = HV_CPUID_SYNDBG_VENDOR_AND_MAX_FUNCTIONS;
|
|
|
|
c->eax = hyperv_feat_enabled(cpu, HYPERV_FEAT_EVMCS) ?
|
|
|
|
HV_CPUID_NESTED_FEATURES : HV_CPUID_IMPLEMENT_LIMITS;
|
|
|
|
memcpy(signature, "Microsoft VS", 12);
|
|
|
|
c->eax = 0;
|
|
|
|
c->ebx = signature[0];
|
|
|
|
c->ecx = signature[1];
|
|
|
|
c->edx = signature[2];
|
|
|
|
|
|
|
|
c = &cpuid_ent[cpuid_i++];
|
|
|
|
c->function = HV_CPUID_SYNDBG_INTERFACE;
|
|
|
|
memcpy(signature, "VS#1\0\0\0\0\0\0\0\0", 12);
|
|
|
|
c->eax = signature[0];
|
|
|
|
c->ebx = 0;
|
|
|
|
c->ecx = 0;
|
|
|
|
c->edx = 0;
|
|
|
|
|
|
|
|
c = &cpuid_ent[cpuid_i++];
|
|
|
|
c->function = HV_CPUID_SYNDBG_PLATFORM_CAPABILITIES;
|
|
|
|
c->eax = HV_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING;
|
|
|
|
c->ebx = 0;
|
|
|
|
c->ecx = 0;
|
|
|
|
c->edx = 0;
|
|
|
|
}
|
|
|
|
|
2021-04-22 19:11:19 +03:00
|
|
|
return cpuid_i;
|
2016-06-24 13:49:36 +03:00
|
|
|
}
|
|
|
|
|
2019-05-17 17:19:20 +03:00
|
|
|
static Error *hv_passthrough_mig_blocker;
|
2019-10-18 19:39:08 +03:00
|
|
|
static Error *hv_no_nonarch_cs_mig_blocker;
|
2019-05-17 17:19:20 +03:00
|
|
|
|
2021-06-08 15:08:11 +03:00
|
|
|
/* Checks that the exposed eVMCS version range is supported by KVM */
|
|
|
|
static bool evmcs_version_supported(uint16_t evmcs_version,
|
|
|
|
uint16_t supported_evmcs_version)
|
|
|
|
{
|
|
|
|
uint8_t min_version = evmcs_version & 0xff;
|
|
|
|
uint8_t max_version = evmcs_version >> 8;
|
|
|
|
uint8_t min_supported_version = supported_evmcs_version & 0xff;
|
|
|
|
uint8_t max_supported_version = supported_evmcs_version >> 8;
|
|
|
|
|
|
|
|
return (min_version >= min_supported_version) &&
|
|
|
|
(max_version <= max_supported_version);
|
|
|
|
}
|
|
|
|
|
2018-07-02 16:41:56 +03:00
|
|
|
static int hyperv_init_vcpu(X86CPU *cpu)
|
|
|
|
{
|
2018-09-21 11:22:08 +03:00
|
|
|
CPUState *cs = CPU(cpu);
|
2019-05-17 17:19:20 +03:00
|
|
|
Error *local_err = NULL;
|
2018-09-21 11:22:08 +03:00
|
|
|
int ret;
|
|
|
|
|
2019-05-17 17:19:20 +03:00
|
|
|
if (cpu->hyperv_passthrough && hv_passthrough_mig_blocker == NULL) {
|
|
|
|
error_setg(&hv_passthrough_mig_blocker,
|
|
|
|
"'hv-passthrough' CPU flag prevents migration, use explicit"
|
|
|
|
" set of hv-* flags instead");
|
2023-10-18 16:03:36 +03:00
|
|
|
ret = migrate_add_blocker(&hv_passthrough_mig_blocker, &local_err);
|
2021-07-20 15:54:01 +03:00
|
|
|
if (ret < 0) {
|
2019-05-17 17:19:20 +03:00
|
|
|
error_report_err(local_err);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-18 19:39:08 +03:00
|
|
|
if (cpu->hyperv_no_nonarch_cs == ON_OFF_AUTO_AUTO &&
|
|
|
|
hv_no_nonarch_cs_mig_blocker == NULL) {
|
|
|
|
error_setg(&hv_no_nonarch_cs_mig_blocker,
|
|
|
|
"'hv-no-nonarch-coresharing=auto' CPU flag prevents migration"
|
|
|
|
" use explicit 'hv-no-nonarch-coresharing=on' instead (but"
|
|
|
|
" make sure SMT is disabled and/or that vCPUs are properly"
|
|
|
|
" pinned)");
|
2023-10-18 16:03:36 +03:00
|
|
|
ret = migrate_add_blocker(&hv_no_nonarch_cs_mig_blocker, &local_err);
|
2021-07-20 15:54:01 +03:00
|
|
|
if (ret < 0) {
|
2019-10-18 19:39:08 +03:00
|
|
|
error_report_err(local_err);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-17 17:19:16 +03:00
|
|
|
if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VPINDEX) && !hv_vpindex_settable) {
|
2018-07-02 16:41:56 +03:00
|
|
|
/*
|
|
|
|
* the kernel doesn't support setting vp_index; assert that its value
|
|
|
|
* is in sync
|
|
|
|
*/
|
2022-02-15 22:52:53 +03:00
|
|
|
uint64_t value;
|
2018-07-02 16:41:56 +03:00
|
|
|
|
2022-02-15 22:52:53 +03:00
|
|
|
ret = kvm_get_one_msr(cpu, HV_X64_MSR_VP_INDEX, &value);
|
2018-07-02 16:41:56 +03:00
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2022-02-15 22:52:53 +03:00
|
|
|
if (value != hyperv_vp_index(CPU(cpu))) {
|
2018-07-02 16:41:56 +03:00
|
|
|
error_report("kernel's vp_index != QEMU's vp_index");
|
|
|
|
return -ENXIO;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-17 17:19:16 +03:00
|
|
|
if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC)) {
|
2018-09-21 11:22:10 +03:00
|
|
|
uint32_t synic_cap = cpu->hyperv_synic_kvm_only ?
|
|
|
|
KVM_CAP_HYPERV_SYNIC : KVM_CAP_HYPERV_SYNIC2;
|
|
|
|
ret = kvm_vcpu_enable_cap(cs, synic_cap, 0);
|
2018-09-21 11:22:08 +03:00
|
|
|
if (ret < 0) {
|
|
|
|
error_report("failed to turn on HyperV SynIC in KVM: %s",
|
|
|
|
strerror(-ret));
|
|
|
|
return ret;
|
|
|
|
}
|
2018-09-21 11:22:09 +03:00
|
|
|
|
2018-09-21 11:22:10 +03:00
|
|
|
if (!cpu->hyperv_synic_kvm_only) {
|
|
|
|
ret = hyperv_x86_synic_add(cpu);
|
|
|
|
if (ret < 0) {
|
|
|
|
error_report("failed to create HyperV SynIC: %s",
|
|
|
|
strerror(-ret));
|
|
|
|
return ret;
|
|
|
|
}
|
2018-09-21 11:22:09 +03:00
|
|
|
}
|
2018-09-21 11:22:08 +03:00
|
|
|
}
|
|
|
|
|
2021-04-22 19:11:21 +03:00
|
|
|
if (hyperv_feat_enabled(cpu, HYPERV_FEAT_EVMCS)) {
|
2021-06-08 15:08:11 +03:00
|
|
|
uint16_t evmcs_version = DEFAULT_EVMCS_VERSION;
|
|
|
|
uint16_t supported_evmcs_version;
|
2021-04-22 19:11:21 +03:00
|
|
|
|
|
|
|
ret = kvm_vcpu_enable_cap(cs, KVM_CAP_HYPERV_ENLIGHTENED_VMCS, 0,
|
2021-06-08 15:08:11 +03:00
|
|
|
(uintptr_t)&supported_evmcs_version);
|
2021-04-22 19:11:21 +03:00
|
|
|
|
2021-06-08 15:08:11 +03:00
|
|
|
/*
|
|
|
|
* KVM is required to support EVMCS ver.1. as that's what 'hv-evmcs'
|
|
|
|
* option sets. Note: we hardcode the maximum supported eVMCS version
|
|
|
|
* to '1' as well so 'hv-evmcs' feature is migratable even when (and if)
|
|
|
|
* ver.2 is implemented. A new option (e.g. 'hv-evmcs=2') will then have
|
|
|
|
* to be added.
|
|
|
|
*/
|
2021-04-22 19:11:21 +03:00
|
|
|
if (ret < 0) {
|
2021-06-08 15:08:11 +03:00
|
|
|
error_report("Hyper-V %s is not supported by kernel",
|
|
|
|
kvm_hyperv_properties[HYPERV_FEAT_EVMCS].desc);
|
2021-04-22 19:11:21 +03:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2021-06-08 15:08:11 +03:00
|
|
|
if (!evmcs_version_supported(evmcs_version, supported_evmcs_version)) {
|
|
|
|
error_report("eVMCS version range [%d..%d] is not supported by "
|
|
|
|
"kernel (supported: [%d..%d])", evmcs_version & 0xff,
|
|
|
|
evmcs_version >> 8, supported_evmcs_version & 0xff,
|
|
|
|
supported_evmcs_version >> 8);
|
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
2021-04-22 19:11:21 +03:00
|
|
|
}
|
|
|
|
|
2021-09-02 12:35:26 +03:00
|
|
|
if (cpu->hyperv_enforce_cpuid) {
|
|
|
|
ret = kvm_vcpu_enable_cap(cs, KVM_CAP_HYPERV_ENFORCE_CPUID, 0, 1);
|
|
|
|
if (ret < 0) {
|
|
|
|
error_report("failed to enable KVM_CAP_HYPERV_ENFORCE_CPUID: %s",
|
|
|
|
strerror(-ret));
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-07-02 16:41:56 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-05-14 23:30:09 +04:00
|
|
|
static Error *invtsc_mig_blocker;
|
|
|
|
|
2013-01-28 15:49:26 +04:00
|
|
|
#define KVM_MAX_CPUID_ENTRIES 100
|
2013-01-30 02:57:41 +04:00
|
|
|
|
2022-02-17 09:04:32 +03:00
|
|
|
static void kvm_init_xsave(CPUX86State *env)
|
|
|
|
{
|
|
|
|
if (has_xsave2) {
|
|
|
|
env->xsave_buf_len = QEMU_ALIGN_UP(has_xsave2, 4096);
|
|
|
|
} else {
|
2023-10-17 14:18:15 +03:00
|
|
|
env->xsave_buf_len = sizeof(struct kvm_xsave);
|
2022-02-17 09:04:32 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
env->xsave_buf = qemu_memalign(4096, env->xsave_buf_len);
|
|
|
|
memset(env->xsave_buf, 0, env->xsave_buf_len);
|
|
|
|
/*
|
|
|
|
* The allocated storage must be large enough for all of the
|
|
|
|
* possible XSAVE state components.
|
|
|
|
*/
|
|
|
|
assert(kvm_arch_get_supported_cpuid(kvm_state, 0xd, 0, R_ECX) <=
|
|
|
|
env->xsave_buf_len);
|
|
|
|
}
|
|
|
|
|
2022-08-18 18:01:12 +03:00
|
|
|
static void kvm_init_nested_state(CPUX86State *env)
|
|
|
|
{
|
|
|
|
struct kvm_vmx_nested_state_hdr *vmx_hdr;
|
|
|
|
uint32_t size;
|
|
|
|
|
|
|
|
if (!env->nested_state) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
size = env->nested_state->size;
|
|
|
|
|
|
|
|
memset(env->nested_state, 0, size);
|
|
|
|
env->nested_state->size = size;
|
|
|
|
|
|
|
|
if (cpu_has_vmx(env)) {
|
|
|
|
env->nested_state->format = KVM_STATE_NESTED_FORMAT_VMX;
|
|
|
|
vmx_hdr = &env->nested_state->hdr.vmx;
|
|
|
|
vmx_hdr->vmxon_pa = -1ull;
|
|
|
|
vmx_hdr->vmcs12_pa = -1ull;
|
|
|
|
} else if (cpu_has_svm(env)) {
|
|
|
|
env->nested_state->format = KVM_STATE_NESTED_FORMAT_SVM;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-10-31 09:57:49 +04:00
|
|
|
int kvm_arch_init_vcpu(CPUState *cs)
|
2008-11-05 19:29:27 +03:00
|
|
|
{
|
|
|
|
struct {
|
2009-02-09 18:50:31 +03:00
|
|
|
struct kvm_cpuid2 cpuid;
|
2013-01-28 15:49:26 +04:00
|
|
|
struct kvm_cpuid_entry2 entries[KVM_MAX_CPUID_ENTRIES];
|
2018-12-10 14:46:54 +03:00
|
|
|
} cpuid_data;
|
|
|
|
/*
|
|
|
|
* The kernel defines these structs with padding fields so there
|
|
|
|
* should be no extra padding in our cpuid_data struct.
|
|
|
|
*/
|
|
|
|
QEMU_BUILD_BUG_ON(sizeof(cpuid_data) !=
|
|
|
|
sizeof(struct kvm_cpuid2) +
|
|
|
|
sizeof(struct kvm_cpuid_entry2) * KVM_MAX_CPUID_ENTRIES);
|
|
|
|
|
2012-10-31 09:57:49 +04:00
|
|
|
X86CPU *cpu = X86_CPU(cs);
|
|
|
|
CPUX86State *env = &cpu->env;
|
2009-02-09 18:50:31 +03:00
|
|
|
uint32_t limit, i, j, cpuid_i;
|
2009-04-18 00:50:54 +04:00
|
|
|
uint32_t unused;
|
2010-01-13 16:25:06 +03:00
|
|
|
struct kvm_cpuid_entry2 *c;
|
|
|
|
uint32_t signature[3];
|
2014-01-23 22:27:24 +04:00
|
|
|
int kvm_base = KVM_CPUID_SIGNATURE;
|
2019-06-19 19:21:38 +03:00
|
|
|
int max_nested_state_len;
|
2011-07-07 18:13:13 +04:00
|
|
|
int r;
|
2017-01-16 14:31:53 +03:00
|
|
|
Error *local_err = NULL;
|
2008-11-05 19:29:27 +03:00
|
|
|
|
2013-11-07 01:35:27 +04:00
|
|
|
memset(&cpuid_data, 0, sizeof(cpuid_data));
|
|
|
|
|
2008-11-05 19:29:27 +03:00
|
|
|
cpuid_i = 0;
|
|
|
|
|
2022-02-17 09:04:32 +03:00
|
|
|
has_xsave2 = kvm_check_extension(cs->kvm_state, KVM_CAP_XSAVE2);
|
|
|
|
|
2017-08-07 11:57:01 +03:00
|
|
|
r = kvm_arch_set_tsc_khz(cs);
|
|
|
|
if (r < 0) {
|
2019-06-19 19:21:31 +03:00
|
|
|
return r;
|
2017-08-07 11:57:01 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/* vcpu's TSC frequency is either specified by user, or following
|
|
|
|
* the value used by KVM if the former is not present. In the
|
|
|
|
* latter case, we query it from KVM and record in env->tsc_khz,
|
|
|
|
* so that vcpu's TSC frequency can be migrated later via this field.
|
|
|
|
*/
|
|
|
|
if (!env->tsc_khz) {
|
|
|
|
r = kvm_check_extension(cs->kvm_state, KVM_CAP_GET_TSC_KHZ) ?
|
|
|
|
kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ) :
|
|
|
|
-ENOTSUP;
|
|
|
|
if (r > 0) {
|
|
|
|
env->tsc_khz = r;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-12 19:54:29 +03:00
|
|
|
env->apic_bus_freq = KVM_APIC_BUS_FREQUENCY;
|
|
|
|
|
2021-06-08 15:08:13 +03:00
|
|
|
/*
|
|
|
|
* kvm_hyperv_expand_features() is called here for the second time in case
|
|
|
|
* KVM_CAP_SYS_HYPERV_CPUID is not supported. While we can't possibly handle
|
|
|
|
* 'query-cpu-model-expansion' in this case as we don't have a KVM vCPU to
|
|
|
|
* check which Hyper-V enlightenments are supported and which are not, we
|
|
|
|
* can still proceed and check/expand Hyper-V enlightenments here so legacy
|
|
|
|
* behavior is preserved.
|
|
|
|
*/
|
|
|
|
if (!kvm_hyperv_expand_features(cpu, &local_err)) {
|
2021-04-22 19:11:22 +03:00
|
|
|
error_report_err(local_err);
|
|
|
|
return -ENOSYS;
|
2021-04-22 19:11:20 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (hyperv_enabled(cpu)) {
|
2021-04-22 19:11:21 +03:00
|
|
|
r = hyperv_init_vcpu(cpu);
|
|
|
|
if (r) {
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2021-04-22 19:11:20 +03:00
|
|
|
cpuid_i = hyperv_fill_cpuids(cs, cpuid_data.entries);
|
2014-01-23 22:27:24 +04:00
|
|
|
kvm_base = KVM_CPUID_SIGNATURE_NEXT;
|
2014-01-23 22:16:12 +04:00
|
|
|
has_msr_hv_hypercall = true;
|
2011-12-19 00:48:14 +04:00
|
|
|
}
|
|
|
|
|
2022-12-06 13:48:53 +03:00
|
|
|
if (cs->kvm_state->xen_version) {
|
|
|
|
#ifdef CONFIG_XEN_EMU
|
|
|
|
struct kvm_cpuid_entry2 *xen_max_leaf;
|
|
|
|
|
|
|
|
memcpy(signature, "XenVMMXenVMM", 12);
|
|
|
|
|
|
|
|
xen_max_leaf = c = &cpuid_data.entries[cpuid_i++];
|
|
|
|
c->function = kvm_base + XEN_CPUID_SIGNATURE;
|
|
|
|
c->eax = kvm_base + XEN_CPUID_TIME;
|
|
|
|
c->ebx = signature[0];
|
|
|
|
c->ecx = signature[1];
|
|
|
|
c->edx = signature[2];
|
|
|
|
|
|
|
|
c = &cpuid_data.entries[cpuid_i++];
|
|
|
|
c->function = kvm_base + XEN_CPUID_VENDOR;
|
|
|
|
c->eax = cs->kvm_state->xen_version;
|
|
|
|
c->ebx = 0;
|
|
|
|
c->ecx = 0;
|
|
|
|
c->edx = 0;
|
|
|
|
|
|
|
|
c = &cpuid_data.entries[cpuid_i++];
|
|
|
|
c->function = kvm_base + XEN_CPUID_HVM_MSR;
|
|
|
|
/* Number of hypercall-transfer pages */
|
|
|
|
c->eax = 1;
|
|
|
|
/* Hypercall MSR base address */
|
|
|
|
if (hyperv_enabled(cpu)) {
|
|
|
|
c->ebx = XEN_HYPERCALL_MSR_HYPERV;
|
|
|
|
kvm_xen_init(cs->kvm_state, c->ebx);
|
|
|
|
} else {
|
|
|
|
c->ebx = XEN_HYPERCALL_MSR;
|
|
|
|
}
|
|
|
|
c->ecx = 0;
|
|
|
|
c->edx = 0;
|
|
|
|
|
|
|
|
c = &cpuid_data.entries[cpuid_i++];
|
|
|
|
c->function = kvm_base + XEN_CPUID_TIME;
|
|
|
|
c->eax = ((!!tsc_is_stable_and_known(env) << 1) |
|
|
|
|
(!!(env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_RDTSCP) << 2));
|
|
|
|
/* default=0 (emulate if necessary) */
|
|
|
|
c->ebx = 0;
|
|
|
|
/* guest tsc frequency */
|
|
|
|
c->ecx = env->user_tsc_khz;
|
|
|
|
/* guest tsc incarnation (migration count) */
|
|
|
|
c->edx = 0;
|
|
|
|
|
|
|
|
c = &cpuid_data.entries[cpuid_i++];
|
|
|
|
c->function = kvm_base + XEN_CPUID_HVM;
|
|
|
|
xen_max_leaf->eax = kvm_base + XEN_CPUID_HVM;
|
|
|
|
if (cs->kvm_state->xen_version >= XEN_VERSION(4, 5)) {
|
|
|
|
c->function = kvm_base + XEN_CPUID_HVM;
|
|
|
|
|
|
|
|
if (cpu->xen_vapic) {
|
|
|
|
c->eax |= XEN_HVM_CPUID_APIC_ACCESS_VIRT;
|
|
|
|
c->eax |= XEN_HVM_CPUID_X2APIC_VIRT;
|
|
|
|
}
|
|
|
|
|
|
|
|
c->eax |= XEN_HVM_CPUID_IOMMU_MAPPINGS;
|
|
|
|
|
|
|
|
if (cs->kvm_state->xen_version >= XEN_VERSION(4, 6)) {
|
|
|
|
c->eax |= XEN_HVM_CPUID_VCPU_ID_PRESENT;
|
|
|
|
c->ebx = cs->cpu_index;
|
|
|
|
}
|
2023-10-12 01:50:02 +03:00
|
|
|
|
|
|
|
if (cs->kvm_state->xen_version >= XEN_VERSION(4, 17)) {
|
|
|
|
c->eax |= XEN_HVM_CPUID_UPCALL_VECTOR;
|
|
|
|
}
|
2022-12-06 13:48:53 +03:00
|
|
|
}
|
|
|
|
|
2022-12-16 14:05:29 +03:00
|
|
|
r = kvm_xen_init_vcpu(cs);
|
|
|
|
if (r) {
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2022-12-06 13:48:53 +03:00
|
|
|
kvm_base += 0x100;
|
|
|
|
#else /* CONFIG_XEN_EMU */
|
|
|
|
/* This should never happen as kvm_arch_init() would have died first. */
|
|
|
|
fprintf(stderr, "Cannot enable Xen CPUID without Xen support\n");
|
|
|
|
abort();
|
|
|
|
#endif
|
|
|
|
} else if (cpu->expose_kvm) {
|
2014-06-02 21:28:50 +04:00
|
|
|
memcpy(signature, "KVMKVMKVM\0\0\0", 12);
|
|
|
|
c = &cpuid_data.entries[cpuid_i++];
|
|
|
|
c->function = KVM_CPUID_SIGNATURE | kvm_base;
|
2014-06-04 05:10:06 +04:00
|
|
|
c->eax = KVM_CPUID_FEATURES | kvm_base;
|
2014-06-02 21:28:50 +04:00
|
|
|
c->ebx = signature[0];
|
|
|
|
c->ecx = signature[1];
|
|
|
|
c->edx = signature[2];
|
2014-01-23 22:27:24 +04:00
|
|
|
|
2014-06-02 21:28:50 +04:00
|
|
|
c = &cpuid_data.entries[cpuid_i++];
|
|
|
|
c->function = KVM_CPUID_FEATURES | kvm_base;
|
|
|
|
c->eax = env->features[FEAT_KVM];
|
2018-02-09 17:15:25 +03:00
|
|
|
c->edx = env->features[FEAT_KVM_HINTS];
|
2014-06-02 21:28:50 +04:00
|
|
|
}
|
2013-02-20 06:27:20 +04:00
|
|
|
|
2009-04-18 00:50:54 +04:00
|
|
|
cpu_x86_cpuid(env, 0, 0, &limit, &unused, &unused, &unused);
|
2008-11-05 19:29:27 +03:00
|
|
|
|
2021-09-02 12:35:25 +03:00
|
|
|
if (cpu->kvm_pv_enforce_cpuid) {
|
|
|
|
r = kvm_vcpu_enable_cap(cs, KVM_CAP_ENFORCE_PV_FEATURE_CPUID, 0, 1);
|
|
|
|
if (r < 0) {
|
|
|
|
fprintf(stderr,
|
|
|
|
"failed to enable KVM_CAP_ENFORCE_PV_FEATURE_CPUID: %s",
|
|
|
|
strerror(-r));
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-11-05 19:29:27 +03:00
|
|
|
for (i = 0; i <= limit; i++) {
|
2013-01-28 15:49:26 +04:00
|
|
|
if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
|
|
|
|
fprintf(stderr, "unsupported level value: 0x%x\n", limit);
|
|
|
|
abort();
|
|
|
|
}
|
2010-01-13 16:25:06 +03:00
|
|
|
c = &cpuid_data.entries[cpuid_i++];
|
2009-02-09 18:50:31 +03:00
|
|
|
|
|
|
|
switch (i) {
|
2009-02-09 18:50:36 +03:00
|
|
|
case 2: {
|
|
|
|
/* Keep reading function 2 till all the input is received */
|
|
|
|
int times;
|
|
|
|
|
|
|
|
c->function = i;
|
2009-04-18 00:50:54 +04:00
|
|
|
c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC |
|
|
|
|
KVM_CPUID_FLAG_STATE_READ_NEXT;
|
|
|
|
cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
|
|
|
|
times = c->eax & 0xff;
|
2009-02-09 18:50:36 +03:00
|
|
|
|
|
|
|
for (j = 1; j < times; ++j) {
|
2013-01-28 15:49:26 +04:00
|
|
|
if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
|
|
|
|
fprintf(stderr, "cpuid_data is full, no space for "
|
|
|
|
"cpuid(eax:2):eax & 0xf = 0x%x\n", times);
|
|
|
|
abort();
|
|
|
|
}
|
2009-04-18 00:50:54 +04:00
|
|
|
c = &cpuid_data.entries[cpuid_i++];
|
2009-02-09 18:50:36 +03:00
|
|
|
c->function = i;
|
2009-04-18 00:50:54 +04:00
|
|
|
c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC;
|
|
|
|
cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
|
2009-02-09 18:50:36 +03:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2019-06-20 08:45:23 +03:00
|
|
|
case 0x1f:
|
|
|
|
if (env->nr_dies < 2) {
|
2024-01-25 05:40:14 +03:00
|
|
|
cpuid_i--;
|
2019-06-20 08:45:23 +03:00
|
|
|
break;
|
|
|
|
}
|
2020-09-11 15:53:01 +03:00
|
|
|
/* fallthrough */
|
2009-02-09 18:50:31 +03:00
|
|
|
case 4:
|
|
|
|
case 0xb:
|
|
|
|
case 0xd:
|
|
|
|
for (j = 0; ; j++) {
|
2011-06-10 17:56:28 +04:00
|
|
|
if (i == 0xd && j == 64) {
|
|
|
|
break;
|
|
|
|
}
|
2019-06-20 08:45:23 +03:00
|
|
|
|
2009-02-09 18:50:31 +03:00
|
|
|
c->function = i;
|
|
|
|
c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
|
|
|
|
c->index = j;
|
2009-04-18 00:50:54 +04:00
|
|
|
cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
|
2009-02-09 18:50:31 +03:00
|
|
|
|
2010-12-27 18:19:29 +03:00
|
|
|
if (i == 4 && c->eax == 0) {
|
2009-02-09 18:50:31 +03:00
|
|
|
break;
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
|
|
|
if (i == 0xb && !(c->ecx & 0xff00)) {
|
2009-02-09 18:50:31 +03:00
|
|
|
break;
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2019-06-20 08:45:23 +03:00
|
|
|
if (i == 0x1f && !(c->ecx & 0xff00)) {
|
|
|
|
break;
|
|
|
|
}
|
2010-12-27 18:19:29 +03:00
|
|
|
if (i == 0xd && c->eax == 0) {
|
2011-06-10 17:56:28 +04:00
|
|
|
continue;
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2013-01-28 15:49:26 +04:00
|
|
|
if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
|
|
|
|
fprintf(stderr, "cpuid_data is full, no space for "
|
|
|
|
"cpuid(eax:0x%x,ecx:0x%x)\n", i, j);
|
|
|
|
abort();
|
|
|
|
}
|
2009-04-18 00:50:54 +04:00
|
|
|
c = &cpuid_data.entries[cpuid_i++];
|
2009-02-09 18:50:31 +03:00
|
|
|
}
|
|
|
|
break;
|
2019-07-25 09:14:16 +03:00
|
|
|
case 0x7:
|
2021-07-19 14:21:17 +03:00
|
|
|
case 0x12:
|
|
|
|
for (j = 0; ; j++) {
|
|
|
|
c->function = i;
|
|
|
|
c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
|
|
|
|
c->index = j;
|
|
|
|
cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
|
|
|
|
|
|
|
|
if (j > 1 && (c->eax & 0xf) != 1) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
|
|
|
|
fprintf(stderr, "cpuid_data is full, no space for "
|
|
|
|
"cpuid(eax:0x12,ecx:0x%x)\n", j);
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
c = &cpuid_data.entries[cpuid_i++];
|
|
|
|
}
|
|
|
|
break;
|
2022-02-17 09:04:31 +03:00
|
|
|
case 0x14:
|
|
|
|
case 0x1d:
|
|
|
|
case 0x1e: {
|
2018-03-04 19:48:35 +03:00
|
|
|
uint32_t times;
|
|
|
|
|
|
|
|
c->function = i;
|
|
|
|
c->index = 0;
|
|
|
|
c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
|
|
|
|
cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
|
|
|
|
times = c->eax;
|
|
|
|
|
|
|
|
for (j = 1; j <= times; ++j) {
|
|
|
|
if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
|
|
|
|
fprintf(stderr, "cpuid_data is full, no space for "
|
2019-07-25 09:14:16 +03:00
|
|
|
"cpuid(eax:0x%x,ecx:0x%x)\n", i, j);
|
2018-03-04 19:48:35 +03:00
|
|
|
abort();
|
|
|
|
}
|
|
|
|
c = &cpuid_data.entries[cpuid_i++];
|
|
|
|
c->function = i;
|
|
|
|
c->index = j;
|
|
|
|
c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
|
|
|
|
cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2009-02-09 18:50:31 +03:00
|
|
|
default:
|
|
|
|
c->function = i;
|
2009-04-18 00:50:54 +04:00
|
|
|
c->flags = 0;
|
|
|
|
cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
|
2019-08-23 01:52:10 +03:00
|
|
|
if (!c->eax && !c->ebx && !c->ecx && !c->edx) {
|
|
|
|
/*
|
|
|
|
* KVM already returns all zeroes if a CPUID entry is missing,
|
|
|
|
* so we can omit it and avoid hitting KVM's 80-entry limit.
|
|
|
|
*/
|
|
|
|
cpuid_i--;
|
|
|
|
}
|
2009-02-09 18:50:31 +03:00
|
|
|
break;
|
|
|
|
}
|
2008-11-05 19:29:27 +03:00
|
|
|
}
|
2013-07-25 19:05:22 +04:00
|
|
|
|
|
|
|
if (limit >= 0x0a) {
|
2017-12-27 17:04:26 +03:00
|
|
|
uint32_t eax, edx;
|
2013-07-25 19:05:22 +04:00
|
|
|
|
2017-12-27 17:04:26 +03:00
|
|
|
cpu_x86_cpuid(env, 0x0a, 0, &eax, &unused, &unused, &edx);
|
|
|
|
|
|
|
|
has_architectural_pmu_version = eax & 0xff;
|
|
|
|
if (has_architectural_pmu_version > 0) {
|
|
|
|
num_architectural_pmu_gp_counters = (eax & 0xff00) >> 8;
|
2013-07-25 19:05:22 +04:00
|
|
|
|
|
|
|
/* Shouldn't be more than 32, since that's the number of bits
|
|
|
|
* available in EBX to tell us _which_ counters are available.
|
|
|
|
* Play it safe.
|
|
|
|
*/
|
2017-12-27 17:04:26 +03:00
|
|
|
if (num_architectural_pmu_gp_counters > MAX_GP_COUNTERS) {
|
|
|
|
num_architectural_pmu_gp_counters = MAX_GP_COUNTERS;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (has_architectural_pmu_version > 1) {
|
|
|
|
num_architectural_pmu_fixed_counters = edx & 0x1f;
|
|
|
|
|
|
|
|
if (num_architectural_pmu_fixed_counters > MAX_FIXED_COUNTERS) {
|
|
|
|
num_architectural_pmu_fixed_counters = MAX_FIXED_COUNTERS;
|
|
|
|
}
|
2013-07-25 19:05:22 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-04-18 00:50:54 +04:00
|
|
|
cpu_x86_cpuid(env, 0x80000000, 0, &limit, &unused, &unused, &unused);
|
2008-11-05 19:29:27 +03:00
|
|
|
|
|
|
|
for (i = 0x80000000; i <= limit; i++) {
|
2013-01-28 15:49:26 +04:00
|
|
|
if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
|
|
|
|
fprintf(stderr, "unsupported xlevel value: 0x%x\n", limit);
|
|
|
|
abort();
|
|
|
|
}
|
2010-01-13 16:25:06 +03:00
|
|
|
c = &cpuid_data.entries[cpuid_i++];
|
2008-11-05 19:29:27 +03:00
|
|
|
|
2018-05-24 18:43:31 +03:00
|
|
|
switch (i) {
|
|
|
|
case 0x8000001d:
|
|
|
|
/* Query for all AMD cache information leaves */
|
|
|
|
for (j = 0; ; j++) {
|
|
|
|
c->function = i;
|
|
|
|
c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
|
|
|
|
c->index = j;
|
|
|
|
cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
|
|
|
|
|
|
|
|
if (c->eax == 0) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
|
|
|
|
fprintf(stderr, "cpuid_data is full, no space for "
|
|
|
|
"cpuid(eax:0x%x,ecx:0x%x)\n", i, j);
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
c = &cpuid_data.entries[cpuid_i++];
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
c->function = i;
|
|
|
|
c->flags = 0;
|
|
|
|
cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
|
2019-08-23 01:52:10 +03:00
|
|
|
if (!c->eax && !c->ebx && !c->ecx && !c->edx) {
|
|
|
|
/*
|
|
|
|
* KVM already returns all zeroes if a CPUID entry is missing,
|
|
|
|
* so we can omit it and avoid hitting KVM's 80-entry limit.
|
|
|
|
*/
|
|
|
|
cpuid_i--;
|
|
|
|
}
|
2018-05-24 18:43:31 +03:00
|
|
|
break;
|
|
|
|
}
|
2008-11-05 19:29:27 +03:00
|
|
|
}
|
|
|
|
|
2011-06-01 05:59:52 +04:00
|
|
|
/* Call Centaur's CPUID instructions they are supported. */
|
|
|
|
if (env->cpuid_xlevel2 > 0) {
|
|
|
|
cpu_x86_cpuid(env, 0xC0000000, 0, &limit, &unused, &unused, &unused);
|
|
|
|
|
|
|
|
for (i = 0xC0000000; i <= limit; i++) {
|
2013-01-28 15:49:26 +04:00
|
|
|
if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
|
|
|
|
fprintf(stderr, "unsupported xlevel2 value: 0x%x\n", limit);
|
|
|
|
abort();
|
|
|
|
}
|
2011-06-01 05:59:52 +04:00
|
|
|
c = &cpuid_data.entries[cpuid_i++];
|
|
|
|
|
|
|
|
c->function = i;
|
|
|
|
c->flags = 0;
|
|
|
|
cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-11-05 19:29:27 +03:00
|
|
|
cpuid_data.cpuid.nent = cpuid_i;
|
|
|
|
|
2010-10-11 22:31:18 +04:00
|
|
|
if (((env->cpuid_version >> 8)&0xF) >= 6
|
2013-04-22 23:00:15 +04:00
|
|
|
&& (env->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
|
2023-10-17 15:30:44 +03:00
|
|
|
(CPUID_MCE | CPUID_MCA)) {
|
2015-11-25 20:19:16 +03:00
|
|
|
uint64_t mcg_cap, unsupported_caps;
|
2010-10-11 22:31:18 +04:00
|
|
|
int banks;
|
2011-03-02 10:56:17 +03:00
|
|
|
int ret;
|
2010-10-11 22:31:18 +04:00
|
|
|
|
2012-12-01 08:35:08 +04:00
|
|
|
ret = kvm_get_mce_cap_supported(cs->kvm_state, &mcg_cap, &banks);
|
2011-03-02 10:56:18 +03:00
|
|
|
if (ret < 0) {
|
|
|
|
fprintf(stderr, "kvm_get_mce_cap_supported: %s", strerror(-ret));
|
|
|
|
return ret;
|
2010-10-11 22:31:18 +04:00
|
|
|
}
|
2011-03-02 10:56:18 +03:00
|
|
|
|
2015-11-25 20:19:15 +03:00
|
|
|
if (banks < (env->mcg_cap & MCG_CAP_BANKS_MASK)) {
|
2015-11-25 20:19:14 +03:00
|
|
|
error_report("kvm: Unsupported MCE bank count (QEMU = %d, KVM = %d)",
|
2015-11-25 20:19:15 +03:00
|
|
|
(int)(env->mcg_cap & MCG_CAP_BANKS_MASK), banks);
|
2015-11-25 20:19:14 +03:00
|
|
|
return -ENOTSUP;
|
2011-03-02 10:56:18 +03:00
|
|
|
}
|
2015-11-25 20:19:14 +03:00
|
|
|
|
2015-11-25 20:19:16 +03:00
|
|
|
unsupported_caps = env->mcg_cap & ~(mcg_cap | MCG_CAP_BANKS_MASK);
|
|
|
|
if (unsupported_caps) {
|
2016-06-22 09:56:21 +03:00
|
|
|
if (unsupported_caps & MCG_LMCE_P) {
|
|
|
|
error_report("kvm: LMCE not supported");
|
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
2017-07-12 16:57:41 +03:00
|
|
|
warn_report("Unsupported MCG_CAP bits: 0x%" PRIx64,
|
|
|
|
unsupported_caps);
|
2015-11-25 20:19:16 +03:00
|
|
|
}
|
|
|
|
|
2015-11-25 20:19:15 +03:00
|
|
|
env->mcg_cap &= mcg_cap | MCG_CAP_BANKS_MASK;
|
|
|
|
ret = kvm_vcpu_ioctl(cs, KVM_X86_SETUP_MCE, &env->mcg_cap);
|
2011-03-02 10:56:18 +03:00
|
|
|
if (ret < 0) {
|
|
|
|
fprintf(stderr, "KVM_X86_SETUP_MCE: %s", strerror(-ret));
|
|
|
|
return ret;
|
|
|
|
}
|
2010-10-11 22:31:18 +04:00
|
|
|
}
|
|
|
|
|
2020-05-13 16:26:30 +03:00
|
|
|
cpu->vmsentry = qemu_add_vm_change_state_handler(cpu_update_state, env);
|
2011-02-03 22:19:53 +03:00
|
|
|
|
2013-08-19 05:33:30 +04:00
|
|
|
c = cpuid_find_entry(&cpuid_data.cpuid, 1, 0);
|
|
|
|
if (c) {
|
|
|
|
has_msr_feature_control = !!(c->ecx & CPUID_EXT_VMX) ||
|
|
|
|
!!(c->ecx & CPUID_EXT_SMX);
|
|
|
|
}
|
|
|
|
|
2021-07-19 14:21:14 +03:00
|
|
|
c = cpuid_find_entry(&cpuid_data.cpuid, 7, 0);
|
|
|
|
if (c && (c->ebx & CPUID_7_0_EBX_SGX)) {
|
|
|
|
has_msr_feature_control = true;
|
|
|
|
}
|
|
|
|
|
2016-06-22 09:56:21 +03:00
|
|
|
if (env->mcg_cap & MCG_LMCE_P) {
|
|
|
|
has_msr_mcg_ext_ctl = has_msr_feature_control = true;
|
|
|
|
}
|
|
|
|
|
2017-01-08 20:32:34 +03:00
|
|
|
if (!env->user_tsc_khz) {
|
|
|
|
if ((env->features[FEAT_8000_0007_EDX] & CPUID_APM_INVTSC) &&
|
|
|
|
invtsc_mig_blocker == NULL) {
|
|
|
|
error_setg(&invtsc_mig_blocker,
|
|
|
|
"State blocked by non-migratable CPU device"
|
|
|
|
" (invtsc flag)");
|
2023-10-18 16:03:36 +03:00
|
|
|
r = migrate_add_blocker(&invtsc_mig_blocker, &local_err);
|
2021-07-20 15:54:01 +03:00
|
|
|
if (r < 0) {
|
2017-01-16 14:31:53 +03:00
|
|
|
error_report_err(local_err);
|
target/i386: kvm: Demand nested migration kernel capabilities only when vCPU may have enabled VMX
Previous to this change, a vCPU exposed with VMX running on a kernel
without KVM_CAP_NESTED_STATE or KVM_CAP_EXCEPTION_PAYLOAD resulted in
adding a migration blocker. This was because when the code was written
it was thought there is no way to reliably know if a vCPU is utilising
VMX or not at runtime. However, it turns out that this can be known to
some extent:
In order for a vCPU to enter VMX operation it must have CR4.VMXE set.
Since it was set, CR4.VMXE must remain set as long as the vCPU is in
VMX operation. This is because CR4.VMXE is one of the bits set
in MSR_IA32_VMX_CR4_FIXED1.
There is one exception to the above statement when vCPU enters SMM mode.
When a vCPU enters SMM mode, it temporarily exits VMX operation and
may also reset CR4.VMXE during execution in SMM mode.
When the vCPU exits SMM mode, vCPU state is restored to be in VMX operation
and CR4.VMXE is restored to its original state of being set.
Therefore, when the vCPU is not in SMM mode, we can infer whether
VMX is being used by examining CR4.VMXE. Otherwise, we cannot
know for certain but assume the worse that vCPU may utilise VMX.
Summaring all the above, a vCPU may have enabled VMX in case
CR4.VMXE is set or vCPU is in SMM mode.
Therefore, remove migration blocker and check before migration
(cpu_pre_save()) if the vCPU may have enabled VMX. If true, only then
require relevant kernel capabilities.
While at it, demand KVM_CAP_EXCEPTION_PAYLOAD only when the vCPU is in
guest-mode and there is a pending/injected exception. Otherwise, this
kernel capability is not required for proper migration.
Reviewed-by: Joao Martins <joao.m.martins@oracle.com>
Signed-off-by: Liran Alon <liran.alon@oracle.com>
Reviewed-by: Maran Wilson <maran.wilson@oracle.com>
Tested-by: Maran Wilson <maran.wilson@oracle.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2019-07-06 00:06:36 +03:00
|
|
|
return r;
|
2017-01-16 14:31:53 +03:00
|
|
|
}
|
2017-01-08 20:32:34 +03:00
|
|
|
}
|
2014-05-14 23:30:09 +04:00
|
|
|
}
|
|
|
|
|
2017-01-20 17:11:34 +03:00
|
|
|
if (cpu->vmware_cpuid_freq
|
|
|
|
/* Guests depend on 0x40000000 to detect this feature, so only expose
|
|
|
|
* it if KVM exposes leaf 0x40000000. (Conflicts with Hyper-V) */
|
|
|
|
&& cpu->expose_kvm
|
|
|
|
&& kvm_base == KVM_CPUID_SIGNATURE
|
|
|
|
/* TSC clock must be stable and known for this feature. */
|
2017-08-07 11:57:02 +03:00
|
|
|
&& tsc_is_stable_and_known(env)) {
|
2017-01-20 17:11:34 +03:00
|
|
|
|
|
|
|
c = &cpuid_data.entries[cpuid_i++];
|
|
|
|
c->function = KVM_CPUID_SIGNATURE | 0x10;
|
|
|
|
c->eax = env->tsc_khz;
|
2020-03-12 19:54:29 +03:00
|
|
|
c->ebx = env->apic_bus_freq / 1000; /* Hz to KHz */
|
2017-01-20 17:11:34 +03:00
|
|
|
c->ecx = c->edx = 0;
|
|
|
|
|
|
|
|
c = cpuid_find_entry(&cpuid_data.cpuid, kvm_base, 0);
|
|
|
|
c->eax = MAX(c->eax, KVM_CPUID_SIGNATURE | 0x10);
|
|
|
|
}
|
|
|
|
|
|
|
|
cpuid_data.cpuid.nent = cpuid_i;
|
|
|
|
|
|
|
|
cpuid_data.cpuid.padding = 0;
|
|
|
|
r = kvm_vcpu_ioctl(cs, KVM_SET_CPUID2, &cpuid_data);
|
|
|
|
if (r) {
|
|
|
|
goto fail;
|
|
|
|
}
|
2022-02-17 09:04:32 +03:00
|
|
|
kvm_init_xsave(env);
|
2019-06-19 19:21:38 +03:00
|
|
|
|
|
|
|
max_nested_state_len = kvm_max_nested_state_length();
|
|
|
|
if (max_nested_state_len > 0) {
|
|
|
|
assert(max_nested_state_len >= offsetof(struct kvm_nested_state, data));
|
|
|
|
|
2020-05-20 17:49:22 +03:00
|
|
|
if (cpu_has_vmx(env) || cpu_has_svm(env)) {
|
2019-07-11 16:41:48 +03:00
|
|
|
env->nested_state = g_malloc0(max_nested_state_len);
|
|
|
|
env->nested_state->size = max_nested_state_len;
|
|
|
|
|
2022-08-18 18:01:12 +03:00
|
|
|
kvm_init_nested_state(env);
|
2019-06-19 19:21:38 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-12-16 22:06:42 +03:00
|
|
|
cpu->kvm_msr_buf = g_malloc0(MSR_BUF_SIZE);
|
2011-10-27 21:25:58 +04:00
|
|
|
|
2016-03-30 23:47:47 +03:00
|
|
|
if (!(env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_RDTSCP)) {
|
|
|
|
has_msr_tsc_aux = false;
|
|
|
|
}
|
2014-08-15 01:39:33 +04:00
|
|
|
|
2020-01-20 21:21:42 +03:00
|
|
|
kvm_init_msrs(cpu);
|
|
|
|
|
2011-07-07 18:13:13 +04:00
|
|
|
return 0;
|
2017-01-16 14:31:53 +03:00
|
|
|
|
|
|
|
fail:
|
2023-10-18 16:03:36 +03:00
|
|
|
migrate_del_blocker(&invtsc_mig_blocker);
|
2019-06-19 19:21:31 +03:00
|
|
|
|
2017-01-16 14:31:53 +03:00
|
|
|
return r;
|
2008-11-05 19:29:27 +03:00
|
|
|
}
|
|
|
|
|
2019-06-19 19:21:32 +03:00
|
|
|
int kvm_arch_destroy_vcpu(CPUState *cs)
|
|
|
|
{
|
|
|
|
X86CPU *cpu = X86_CPU(cs);
|
2019-06-19 19:21:38 +03:00
|
|
|
CPUX86State *env = &cpu->env;
|
2019-06-19 19:21:32 +03:00
|
|
|
|
2022-03-22 15:05:22 +03:00
|
|
|
g_free(env->xsave_buf);
|
|
|
|
|
2022-09-23 12:04:28 +03:00
|
|
|
g_free(cpu->kvm_msr_buf);
|
|
|
|
cpu->kvm_msr_buf = NULL;
|
2019-06-19 19:21:32 +03:00
|
|
|
|
2022-09-23 12:04:28 +03:00
|
|
|
g_free(env->nested_state);
|
|
|
|
env->nested_state = NULL;
|
2019-06-19 19:21:38 +03:00
|
|
|
|
2020-05-13 16:26:30 +03:00
|
|
|
qemu_del_vm_change_state_handler(cpu->vmsentry);
|
|
|
|
|
2019-06-19 19:21:32 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-03-20 16:11:56 +04:00
|
|
|
void kvm_arch_reset_vcpu(X86CPU *cpu)
|
2009-11-06 21:39:24 +03:00
|
|
|
{
|
2012-10-31 09:57:49 +04:00
|
|
|
CPUX86State *env = &cpu->env;
|
2012-07-23 17:22:27 +04:00
|
|
|
|
2011-01-21 23:48:12 +03:00
|
|
|
env->xcr0 = 1;
|
2010-03-23 19:37:14 +03:00
|
|
|
if (kvm_irqchip_in_kernel()) {
|
2012-07-23 17:22:27 +04:00
|
|
|
env->mp_state = cpu_is_bsp(cpu) ? KVM_MP_STATE_RUNNABLE :
|
2010-03-23 19:37:14 +03:00
|
|
|
KVM_MP_STATE_UNINITIALIZED;
|
|
|
|
} else {
|
|
|
|
env->mp_state = KVM_MP_STATE_RUNNABLE;
|
|
|
|
}
|
2017-11-22 21:14:17 +03:00
|
|
|
|
2022-09-30 18:52:03 +03:00
|
|
|
/* enabled by default */
|
|
|
|
env->poll_control_msr = 1;
|
|
|
|
|
|
|
|
kvm_init_nested_state(env);
|
|
|
|
|
|
|
|
sev_es_set_reset_vector(CPU(cpu));
|
|
|
|
}
|
|
|
|
|
|
|
|
void kvm_arch_after_reset_vcpu(X86CPU *cpu)
|
|
|
|
{
|
|
|
|
CPUX86State *env = &cpu->env;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Reset SynIC after all other devices have been reset to let them remove
|
|
|
|
* their SINT routes first.
|
|
|
|
*/
|
2019-05-17 17:19:16 +03:00
|
|
|
if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC)) {
|
2017-11-22 21:14:17 +03:00
|
|
|
for (i = 0; i < ARRAY_SIZE(env->msr_hv_synic_sint); i++) {
|
|
|
|
env->msr_hv_synic_sint[i] = HV_SINT_MASKED;
|
|
|
|
}
|
2018-09-21 11:22:09 +03:00
|
|
|
|
|
|
|
hyperv_x86_synic_reset(cpu);
|
2017-11-22 21:14:17 +03:00
|
|
|
}
|
2009-11-06 21:39:24 +03:00
|
|
|
}
|
|
|
|
|
2013-03-08 22:21:50 +04:00
|
|
|
void kvm_arch_do_init_vcpu(X86CPU *cpu)
|
|
|
|
{
|
|
|
|
CPUX86State *env = &cpu->env;
|
|
|
|
|
|
|
|
/* APs get directly into wait-for-SIPI state. */
|
|
|
|
if (env->mp_state == KVM_MP_STATE_UNINITIALIZED) {
|
|
|
|
env->mp_state = KVM_MP_STATE_INIT_RECEIVED;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-15 07:47:23 +03:00
|
|
|
static int kvm_get_supported_feature_msrs(KVMState *s)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (kvm_feature_msrs != NULL) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!kvm_check_extension(s, KVM_CAP_GET_MSR_FEATURES)) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct kvm_msr_list msr_list;
|
|
|
|
|
|
|
|
msr_list.nmsrs = 0;
|
|
|
|
ret = kvm_ioctl(s, KVM_GET_MSR_FEATURE_INDEX_LIST, &msr_list);
|
|
|
|
if (ret < 0 && ret != -E2BIG) {
|
|
|
|
error_report("Fetch KVM feature MSR list failed: %s",
|
|
|
|
strerror(-ret));
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(msr_list.nmsrs > 0);
|
2022-09-23 15:00:24 +03:00
|
|
|
kvm_feature_msrs = g_malloc0(sizeof(msr_list) +
|
2018-10-15 07:47:23 +03:00
|
|
|
msr_list.nmsrs * sizeof(msr_list.indices[0]));
|
|
|
|
|
|
|
|
kvm_feature_msrs->nmsrs = msr_list.nmsrs;
|
|
|
|
ret = kvm_ioctl(s, KVM_GET_MSR_FEATURE_INDEX_LIST, kvm_feature_msrs);
|
|
|
|
|
|
|
|
if (ret < 0) {
|
|
|
|
error_report("Fetch KVM feature MSR list failed: %s",
|
|
|
|
strerror(-ret));
|
|
|
|
g_free(kvm_feature_msrs);
|
|
|
|
kvm_feature_msrs = NULL;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-01-21 23:48:13 +03:00
|
|
|
static int kvm_get_supported_msrs(KVMState *s)
|
2008-11-05 19:29:27 +03:00
|
|
|
{
|
2011-01-21 23:48:13 +03:00
|
|
|
int ret = 0;
|
2019-07-25 18:16:39 +03:00
|
|
|
struct kvm_msr_list msr_list, *kvm_msr_list;
|
2008-11-05 19:29:27 +03:00
|
|
|
|
2019-07-25 18:16:39 +03:00
|
|
|
/*
|
|
|
|
* Obtain MSR list from KVM. These are the MSRs that we must
|
|
|
|
* save/restore.
|
|
|
|
*/
|
|
|
|
msr_list.nmsrs = 0;
|
|
|
|
ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, &msr_list);
|
|
|
|
if (ret < 0 && ret != -E2BIG) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Old kernel modules had a bug and could write beyond the provided
|
|
|
|
* memory. Allocate at least a safe amount of 1K.
|
|
|
|
*/
|
|
|
|
kvm_msr_list = g_malloc0(MAX(1024, sizeof(msr_list) +
|
|
|
|
msr_list.nmsrs *
|
|
|
|
sizeof(msr_list.indices[0])));
|
2008-11-05 19:29:27 +03:00
|
|
|
|
2019-07-25 18:16:39 +03:00
|
|
|
kvm_msr_list->nmsrs = msr_list.nmsrs;
|
|
|
|
ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, kvm_msr_list);
|
|
|
|
if (ret >= 0) {
|
|
|
|
int i;
|
2008-11-05 19:29:27 +03:00
|
|
|
|
2019-07-25 18:16:39 +03:00
|
|
|
for (i = 0; i < kvm_msr_list->nmsrs; i++) {
|
|
|
|
switch (kvm_msr_list->indices[i]) {
|
|
|
|
case MSR_STAR:
|
|
|
|
has_msr_star = true;
|
|
|
|
break;
|
|
|
|
case MSR_VM_HSAVE_PA:
|
|
|
|
has_msr_hsave_pa = true;
|
|
|
|
break;
|
|
|
|
case MSR_TSC_AUX:
|
|
|
|
has_msr_tsc_aux = true;
|
|
|
|
break;
|
|
|
|
case MSR_TSC_ADJUST:
|
|
|
|
has_msr_tsc_adjust = true;
|
|
|
|
break;
|
|
|
|
case MSR_IA32_TSCDEADLINE:
|
|
|
|
has_msr_tsc_deadline = true;
|
|
|
|
break;
|
|
|
|
case MSR_IA32_SMBASE:
|
|
|
|
has_msr_smbase = true;
|
|
|
|
break;
|
|
|
|
case MSR_SMI_COUNT:
|
|
|
|
has_msr_smi_count = true;
|
|
|
|
break;
|
|
|
|
case MSR_IA32_MISC_ENABLE:
|
|
|
|
has_msr_misc_enable = true;
|
|
|
|
break;
|
|
|
|
case MSR_IA32_BNDCFGS:
|
|
|
|
has_msr_bndcfgs = true;
|
|
|
|
break;
|
|
|
|
case MSR_IA32_XSS:
|
|
|
|
has_msr_xss = true;
|
|
|
|
break;
|
2019-10-11 10:41:03 +03:00
|
|
|
case MSR_IA32_UMWAIT_CONTROL:
|
|
|
|
has_msr_umwait = true;
|
|
|
|
break;
|
2019-07-25 18:16:39 +03:00
|
|
|
case HV_X64_MSR_CRASH_CTL:
|
|
|
|
has_msr_hv_crash = true;
|
|
|
|
break;
|
|
|
|
case HV_X64_MSR_RESET:
|
|
|
|
has_msr_hv_reset = true;
|
|
|
|
break;
|
|
|
|
case HV_X64_MSR_VP_INDEX:
|
|
|
|
has_msr_hv_vpindex = true;
|
|
|
|
break;
|
|
|
|
case HV_X64_MSR_VP_RUNTIME:
|
|
|
|
has_msr_hv_runtime = true;
|
|
|
|
break;
|
|
|
|
case HV_X64_MSR_SCONTROL:
|
|
|
|
has_msr_hv_synic = true;
|
|
|
|
break;
|
|
|
|
case HV_X64_MSR_STIMER0_CONFIG:
|
|
|
|
has_msr_hv_stimer = true;
|
|
|
|
break;
|
|
|
|
case HV_X64_MSR_TSC_FREQUENCY:
|
|
|
|
has_msr_hv_frequencies = true;
|
|
|
|
break;
|
|
|
|
case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
|
|
|
|
has_msr_hv_reenlightenment = true;
|
|
|
|
break;
|
2022-02-16 13:24:59 +03:00
|
|
|
case HV_X64_MSR_SYNDBG_OPTIONS:
|
|
|
|
has_msr_hv_syndbg_options = true;
|
|
|
|
break;
|
2019-07-25 18:16:39 +03:00
|
|
|
case MSR_IA32_SPEC_CTRL:
|
|
|
|
has_msr_spec_ctrl = true;
|
|
|
|
break;
|
2021-11-01 16:23:00 +03:00
|
|
|
case MSR_AMD64_TSC_RATIO:
|
|
|
|
has_tsc_scale_msr = true;
|
|
|
|
break;
|
2019-11-20 15:19:22 +03:00
|
|
|
case MSR_IA32_TSX_CTRL:
|
|
|
|
has_msr_tsx_ctrl = true;
|
|
|
|
break;
|
2019-07-25 18:16:39 +03:00
|
|
|
case MSR_VIRT_SSBD:
|
|
|
|
has_msr_virt_ssbd = true;
|
|
|
|
break;
|
|
|
|
case MSR_IA32_ARCH_CAPABILITIES:
|
|
|
|
has_msr_arch_capabs = true;
|
|
|
|
break;
|
|
|
|
case MSR_IA32_CORE_CAPABILITY:
|
|
|
|
has_msr_core_capabs = true;
|
|
|
|
break;
|
2020-05-29 10:43:47 +03:00
|
|
|
case MSR_IA32_PERF_CAPABILITIES:
|
|
|
|
has_msr_perf_capabs = true;
|
|
|
|
break;
|
2019-07-01 19:32:17 +03:00
|
|
|
case MSR_IA32_VMX_VMFUNC:
|
|
|
|
has_msr_vmx_vmfunc = true;
|
|
|
|
break;
|
2020-02-11 20:55:16 +03:00
|
|
|
case MSR_IA32_UCODE_REV:
|
|
|
|
has_msr_ucode_rev = true;
|
|
|
|
break;
|
2020-03-31 19:27:52 +03:00
|
|
|
case MSR_IA32_VMX_PROCBASED_CTLS2:
|
|
|
|
has_msr_vmx_procbased_ctls2 = true;
|
|
|
|
break;
|
2021-02-05 11:33:24 +03:00
|
|
|
case MSR_IA32_PKRS:
|
|
|
|
has_msr_pkrs = true;
|
|
|
|
break;
|
2008-11-05 19:29:27 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-25 18:16:39 +03:00
|
|
|
g_free(kvm_msr_list);
|
|
|
|
|
2011-01-21 23:48:13 +03:00
|
|
|
return ret;
|
2008-11-05 19:29:27 +03:00
|
|
|
}
|
|
|
|
|
2022-10-05 01:56:43 +03:00
|
|
|
static bool kvm_rdmsr_core_thread_count(X86CPU *cpu, uint32_t msr,
|
|
|
|
uint64_t *val)
|
|
|
|
{
|
|
|
|
CPUState *cs = CPU(cpu);
|
|
|
|
|
|
|
|
*val = cs->nr_threads * cs->nr_cores; /* thread count, bits 15..0 */
|
|
|
|
*val |= ((uint32_t)cs->nr_cores << 16); /* core count, bits 31..16 */
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2015-06-18 19:30:16 +03:00
|
|
|
static Notifier smram_machine_done;
|
|
|
|
static KVMMemoryListener smram_listener;
|
|
|
|
static AddressSpace smram_address_space;
|
|
|
|
static MemoryRegion smram_as_root;
|
|
|
|
static MemoryRegion smram_as_mem;
|
|
|
|
|
|
|
|
static void register_smram_listener(Notifier *n, void *unused)
|
|
|
|
{
|
|
|
|
MemoryRegion *smram =
|
|
|
|
(MemoryRegion *) object_resolve_path("/machine/smram", NULL);
|
|
|
|
|
|
|
|
/* Outer container... */
|
|
|
|
memory_region_init(&smram_as_root, OBJECT(kvm_state), "mem-container-smram", ~0ull);
|
|
|
|
memory_region_set_enabled(&smram_as_root, true);
|
|
|
|
|
|
|
|
/* ... with two regions inside: normal system memory with low
|
|
|
|
* priority, and...
|
|
|
|
*/
|
|
|
|
memory_region_init_alias(&smram_as_mem, OBJECT(kvm_state), "mem-smram",
|
|
|
|
get_system_memory(), 0, ~0ull);
|
|
|
|
memory_region_add_subregion_overlap(&smram_as_root, 0, &smram_as_mem, 0);
|
|
|
|
memory_region_set_enabled(&smram_as_mem, true);
|
|
|
|
|
|
|
|
if (smram) {
|
|
|
|
/* ... SMRAM with higher priority */
|
|
|
|
memory_region_add_subregion_overlap(&smram_as_root, 0, smram, 10);
|
|
|
|
memory_region_set_enabled(smram, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
address_space_init(&smram_address_space, &smram_as_root, "KVM-SMRAM");
|
|
|
|
kvm_memory_listener_register(kvm_state, &smram_listener,
|
2021-08-17 04:35:52 +03:00
|
|
|
&smram_address_space, 1, "kvm-smram");
|
2015-06-18 19:30:16 +03:00
|
|
|
}
|
|
|
|
|
2023-08-22 19:31:02 +03:00
|
|
|
int kvm_arch_get_default_type(MachineState *ms)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-02-04 18:43:51 +03:00
|
|
|
int kvm_arch_init(MachineState *ms, KVMState *s)
|
2010-03-23 19:37:12 +03:00
|
|
|
{
|
2011-01-21 23:48:18 +03:00
|
|
|
uint64_t identity_base = 0xfffbc000;
|
2012-01-25 21:14:15 +04:00
|
|
|
uint64_t shadow_mem;
|
2010-03-23 19:37:12 +03:00
|
|
|
int ret;
|
2010-10-21 19:35:04 +04:00
|
|
|
struct utsname utsname;
|
2020-10-16 06:52:30 +03:00
|
|
|
Error *local_err = NULL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Initialize SEV context, if required
|
|
|
|
*
|
|
|
|
* If no memory encryption is requested (ms->cgs == NULL) this is
|
|
|
|
* a no-op.
|
|
|
|
*
|
|
|
|
* It's also a no-op if a non-SEV confidential guest support
|
|
|
|
* mechanism is selected. SEV is the only mechanism available to
|
|
|
|
* select on x86 at present, so this doesn't arise, but if new
|
|
|
|
* mechanisms are supported in future (e.g. TDX), they'll need
|
|
|
|
* their own initialization either here or elsewhere.
|
|
|
|
*/
|
|
|
|
ret = sev_kvm_init(ms->cgs, &local_err);
|
|
|
|
if (ret < 0) {
|
|
|
|
error_report_err(local_err);
|
|
|
|
return ret;
|
|
|
|
}
|
2010-03-23 19:37:12 +03:00
|
|
|
|
2015-10-15 21:30:20 +03:00
|
|
|
has_xcrs = kvm_check_extension(s, KVM_CAP_XCRS);
|
2021-11-01 16:22:58 +03:00
|
|
|
has_sregs2 = kvm_check_extension(s, KVM_CAP_SREGS2) > 0;
|
2015-10-15 21:30:20 +03:00
|
|
|
|
2018-07-02 16:41:56 +03:00
|
|
|
hv_vpindex_settable = kvm_check_extension(s, KVM_CAP_HYPERV_VP_INDEX);
|
|
|
|
|
2019-06-19 19:21:39 +03:00
|
|
|
has_exception_payload = kvm_check_extension(s, KVM_CAP_EXCEPTION_PAYLOAD);
|
|
|
|
if (has_exception_payload) {
|
|
|
|
ret = kvm_vm_enable_cap(s, KVM_CAP_EXCEPTION_PAYLOAD, 0, true);
|
|
|
|
if (ret < 0) {
|
|
|
|
error_report("kvm: Failed to enable exception payload cap: %s",
|
|
|
|
strerror(-ret));
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-09-29 10:20:11 +03:00
|
|
|
has_triple_fault_event = kvm_check_extension(s, KVM_CAP_X86_TRIPLE_FAULT_EVENT);
|
|
|
|
if (has_triple_fault_event) {
|
|
|
|
ret = kvm_vm_enable_cap(s, KVM_CAP_X86_TRIPLE_FAULT_EVENT, 0, true);
|
|
|
|
if (ret < 0) {
|
|
|
|
error_report("kvm: Failed to enable triple fault event cap: %s",
|
|
|
|
strerror(-ret));
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-12-03 20:51:13 +03:00
|
|
|
if (s->xen_version) {
|
|
|
|
#ifdef CONFIG_XEN_EMU
|
|
|
|
if (!object_dynamic_cast(OBJECT(ms), TYPE_PC_MACHINE)) {
|
|
|
|
error_report("kvm: Xen support only available in PC machine");
|
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
2022-12-06 13:48:53 +03:00
|
|
|
/* hyperv_enabled() doesn't work yet. */
|
|
|
|
uint32_t msr = XEN_HYPERCALL_MSR;
|
|
|
|
ret = kvm_xen_init(s, msr);
|
2022-12-03 20:51:13 +03:00
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
error_report("kvm: Xen support not enabled in qemu");
|
|
|
|
return -ENOTSUP;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2011-01-21 23:48:13 +03:00
|
|
|
ret = kvm_get_supported_msrs(s);
|
2010-03-23 19:37:12 +03:00
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
2010-10-21 19:35:04 +04:00
|
|
|
|
2018-10-15 07:47:23 +03:00
|
|
|
kvm_get_supported_feature_msrs(s);
|
|
|
|
|
2010-10-21 19:35:04 +04:00
|
|
|
uname(&utsname);
|
|
|
|
lm_capable_kernel = strcmp(utsname.machine, "x86_64") == 0;
|
|
|
|
|
2010-02-15 20:33:46 +03:00
|
|
|
/*
|
2011-01-21 23:48:18 +03:00
|
|
|
* On older Intel CPUs, KVM uses vm86 mode to emulate 16-bit code directly.
|
|
|
|
* In order to use vm86 mode, an EPT identity map and a TSS are needed.
|
|
|
|
* Since these must be part of guest physical memory, we need to allocate
|
|
|
|
* them, both by setting their start addresses in the kernel and by
|
2023-10-17 15:30:44 +03:00
|
|
|
* creating a corresponding e820 entry. We need 4 pages before the BIOS,
|
|
|
|
* so this value allows up to 16M BIOSes.
|
2010-02-15 20:33:46 +03:00
|
|
|
*/
|
2023-10-17 15:30:44 +03:00
|
|
|
identity_base = 0xfeffc000;
|
|
|
|
ret = kvm_vm_ioctl(s, KVM_SET_IDENTITY_MAP_ADDR, &identity_base);
|
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
2010-02-15 20:33:46 +03:00
|
|
|
}
|
2011-06-08 18:11:02 +04:00
|
|
|
|
2011-01-21 23:48:18 +03:00
|
|
|
/* Set TSS base one page after EPT identity map. */
|
|
|
|
ret = kvm_vm_ioctl(s, KVM_SET_TSS_ADDR, identity_base + 0x1000);
|
2010-03-23 19:37:12 +03:00
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2011-01-21 23:48:18 +03:00
|
|
|
/* Tell fw_cfg to notify the BIOS to reserve the range. */
|
|
|
|
ret = e820_add_entry(identity_base, 0x4000, E820_RESERVED);
|
2010-03-23 19:37:12 +03:00
|
|
|
if (ret < 0) {
|
2011-01-21 23:48:18 +03:00
|
|
|
fprintf(stderr, "e820_add_entry() table is full\n");
|
2010-03-23 19:37:12 +03:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-11-13 12:56:53 +03:00
|
|
|
shadow_mem = object_property_get_int(OBJECT(s), "kvm-shadow-mem", &error_abort);
|
Fix -machine options accel, kernel_irqchip, kvm_shadow_mem
Multiple -machine options with the same ID are merged. All but the
one without an ID are to be silently ignored.
In most places, we query these options with a null ID. This is
correct.
In some places, we instead query whatever options come first in the
list. This is wrong. When the -machine processed first happens to
have an ID, options are taken from that ID, and the ones specified
without ID are silently ignored.
Example:
$ upstream-qemu -nodefaults -S -display none -monitor stdio -machine id=foo -machine accel=kvm,usb=on
$ upstream-qemu -nodefaults -S -display none -monitor stdio -machine id=foo,accel=kvm,usb=on -machine accel=xen
$ upstream-qemu -nodefaults -S -display none -monitor stdio -machine accel=xen -machine id=foo,accel=kvm,usb=on
$ qemu-system-x86_64 -nodefaults -S -display none -monitor stdio -machine accel=kvm,usb=on
QEMU 1.5.50 monitor - type 'help' for more information
(qemu) info kvm
kvm support: enabled
(qemu) info usb
(qemu) q
$ qemu-system-x86_64 -nodefaults -S -display none -monitor stdio -machine id=foo -machine accel=kvm,usb=on
QEMU 1.5.50 monitor - type 'help' for more information
(qemu) info kvm
kvm support: disabled
(qemu) info usb
(qemu) q
$ qemu-system-x86_64 -nodefaults -S -display none -monitor stdio -machine id=foo,accel=kvm,usb=on -machine accel=xen
QEMU 1.5.50 monitor - type 'help' for more information
(qemu) info kvm
kvm support: enabled
(qemu) info usb
USB support not enabled
(qemu) q
$ qemu-system-x86_64 -nodefaults -S -display none -monitor stdio -machine accel=xen -machine id=foo,accel=kvm,usb=on
xc: error: Could not obtain handle on privileged command interface (2 = No such file or directory): Internal error
xen be core: can't open xen interface
failed to initialize Xen: Operation not permitted
Option usb is queried correctly, and the one without an ID wins,
regardless of option order.
Option accel is queried incorrectly, and which one wins depends on
option order and ID.
Affected options are accel (and its sugared forms -enable-kvm and
-no-kvm), kernel_irqchip, kvm_shadow_mem.
Additionally, option kernel_irqchip is normally on by default, except
it's off when no -machine options are given. Bug can't bite, because
kernel_irqchip is used only when KVM is enabled, KVM is off by
default, and enabling always creates -machine options. Downstreams
that enable KVM by default do get bitten, though.
Use qemu_get_machine_opts() to fix these bugs.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Message-id: 1372943363-24081-5-git-send-email-armbru@redhat.com
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
2013-07-04 17:09:20 +04:00
|
|
|
if (shadow_mem != -1) {
|
|
|
|
shadow_mem /= 4096;
|
|
|
|
ret = kvm_vm_ioctl(s, KVM_SET_NR_MMU_PAGES, shadow_mem);
|
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
2012-01-25 21:14:15 +04:00
|
|
|
}
|
|
|
|
}
|
2015-06-18 19:30:16 +03:00
|
|
|
|
2017-06-01 14:35:15 +03:00
|
|
|
if (kvm_check_extension(s, KVM_CAP_X86_SMM) &&
|
2019-12-30 11:00:51 +03:00
|
|
|
object_dynamic_cast(OBJECT(ms), TYPE_X86_MACHINE) &&
|
2019-12-12 19:28:01 +03:00
|
|
|
x86_machine_is_smm_enabled(X86_MACHINE(ms))) {
|
2015-06-18 19:30:16 +03:00
|
|
|
smram_machine_done.notify = register_smram_listener;
|
|
|
|
qemu_add_machine_init_done_notifier(&smram_machine_done);
|
|
|
|
}
|
2018-06-22 22:22:05 +03:00
|
|
|
|
|
|
|
if (enable_cpu_pm) {
|
|
|
|
int disable_exits = kvm_check_extension(s, KVM_CAP_X86_DISABLE_EXITS);
|
|
|
|
/* Work around for kernel header with a typo. TODO: fix header and drop. */
|
|
|
|
#if defined(KVM_X86_DISABLE_EXITS_HTL) && !defined(KVM_X86_DISABLE_EXITS_HLT)
|
|
|
|
#define KVM_X86_DISABLE_EXITS_HLT KVM_X86_DISABLE_EXITS_HTL
|
|
|
|
#endif
|
|
|
|
if (disable_exits) {
|
|
|
|
disable_exits &= (KVM_X86_DISABLE_EXITS_MWAIT |
|
|
|
|
KVM_X86_DISABLE_EXITS_HLT |
|
2019-07-15 04:28:44 +03:00
|
|
|
KVM_X86_DISABLE_EXITS_PAUSE |
|
|
|
|
KVM_X86_DISABLE_EXITS_CSTATE);
|
2018-06-22 22:22:05 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
ret = kvm_vm_enable_cap(s, KVM_CAP_X86_DISABLE_EXITS, 0,
|
|
|
|
disable_exits);
|
|
|
|
if (ret < 0) {
|
|
|
|
error_report("kvm: guest stopping CPU not supported: %s",
|
|
|
|
strerror(-ret));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-05-21 07:38:20 +03:00
|
|
|
if (object_dynamic_cast(OBJECT(ms), TYPE_X86_MACHINE)) {
|
|
|
|
X86MachineState *x86ms = X86_MACHINE(ms);
|
|
|
|
|
|
|
|
if (x86ms->bus_lock_ratelimit > 0) {
|
|
|
|
ret = kvm_check_extension(s, KVM_CAP_X86_BUS_LOCK_EXIT);
|
|
|
|
if (!(ret & KVM_BUS_LOCK_DETECTION_EXIT)) {
|
|
|
|
error_report("kvm: bus lock detection unsupported");
|
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
|
|
|
ret = kvm_vm_enable_cap(s, KVM_CAP_X86_BUS_LOCK_EXIT, 0,
|
|
|
|
KVM_BUS_LOCK_DETECTION_EXIT);
|
|
|
|
if (ret < 0) {
|
|
|
|
error_report("kvm: Failed to enable bus lock detection cap: %s",
|
|
|
|
strerror(-ret));
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
ratelimit_init(&bus_lock_ratelimit_ctrl);
|
|
|
|
ratelimit_set_speed(&bus_lock_ratelimit_ctrl,
|
|
|
|
x86ms->bus_lock_ratelimit, BUS_LOCK_SLICE_TIME);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-09-29 10:20:14 +03:00
|
|
|
if (s->notify_vmexit != NOTIFY_VMEXIT_OPTION_DISABLE &&
|
|
|
|
kvm_check_extension(s, KVM_CAP_X86_NOTIFY_VMEXIT)) {
|
|
|
|
uint64_t notify_window_flags =
|
|
|
|
((uint64_t)s->notify_window << 32) |
|
|
|
|
KVM_X86_NOTIFY_VMEXIT_ENABLED |
|
|
|
|
KVM_X86_NOTIFY_VMEXIT_USER;
|
|
|
|
ret = kvm_vm_enable_cap(s, KVM_CAP_X86_NOTIFY_VMEXIT, 0,
|
|
|
|
notify_window_flags);
|
|
|
|
if (ret < 0) {
|
|
|
|
error_report("kvm: Failed to enable notify vmexit cap: %s",
|
|
|
|
strerror(-ret));
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
2022-10-05 01:56:42 +03:00
|
|
|
if (kvm_vm_check_extension(s, KVM_CAP_X86_USER_SPACE_MSR)) {
|
2022-10-05 01:56:43 +03:00
|
|
|
bool r;
|
|
|
|
|
2022-10-05 01:56:42 +03:00
|
|
|
ret = kvm_vm_enable_cap(s, KVM_CAP_X86_USER_SPACE_MSR, 0,
|
|
|
|
KVM_MSR_EXIT_REASON_FILTER);
|
|
|
|
if (ret) {
|
|
|
|
error_report("Could not enable user space MSRs: %s",
|
|
|
|
strerror(-ret));
|
|
|
|
exit(1);
|
|
|
|
}
|
2022-10-05 01:56:43 +03:00
|
|
|
|
|
|
|
r = kvm_filter_msr(s, MSR_CORE_THREAD_COUNT,
|
|
|
|
kvm_rdmsr_core_thread_count, NULL);
|
|
|
|
if (!r) {
|
|
|
|
error_report("Could not install MSR_CORE_THREAD_COUNT handler: %s",
|
|
|
|
strerror(-ret));
|
|
|
|
exit(1);
|
|
|
|
}
|
2022-10-05 01:56:42 +03:00
|
|
|
}
|
2022-09-29 10:20:14 +03:00
|
|
|
|
2011-01-21 23:48:18 +03:00
|
|
|
return 0;
|
2008-11-05 19:29:27 +03:00
|
|
|
}
|
2010-12-27 18:19:29 +03:00
|
|
|
|
2008-11-05 19:29:27 +03:00
|
|
|
static void set_v8086_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
|
|
|
|
{
|
|
|
|
lhs->selector = rhs->selector;
|
|
|
|
lhs->base = rhs->base;
|
|
|
|
lhs->limit = rhs->limit;
|
|
|
|
lhs->type = 3;
|
|
|
|
lhs->present = 1;
|
|
|
|
lhs->dpl = 3;
|
|
|
|
lhs->db = 0;
|
|
|
|
lhs->s = 1;
|
|
|
|
lhs->l = 0;
|
|
|
|
lhs->g = 0;
|
|
|
|
lhs->avl = 0;
|
|
|
|
lhs->unusable = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void set_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
|
|
|
|
{
|
|
|
|
unsigned flags = rhs->flags;
|
|
|
|
lhs->selector = rhs->selector;
|
|
|
|
lhs->base = rhs->base;
|
|
|
|
lhs->limit = rhs->limit;
|
|
|
|
lhs->type = (flags >> DESC_TYPE_SHIFT) & 15;
|
|
|
|
lhs->present = (flags & DESC_P_MASK) != 0;
|
2010-12-27 17:56:44 +03:00
|
|
|
lhs->dpl = (flags >> DESC_DPL_SHIFT) & 3;
|
2008-11-05 19:29:27 +03:00
|
|
|
lhs->db = (flags >> DESC_B_SHIFT) & 1;
|
|
|
|
lhs->s = (flags & DESC_S_MASK) != 0;
|
|
|
|
lhs->l = (flags >> DESC_L_SHIFT) & 1;
|
|
|
|
lhs->g = (flags & DESC_G_MASK) != 0;
|
|
|
|
lhs->avl = (flags & DESC_AVL_MASK) != 0;
|
2015-12-07 07:54:07 +03:00
|
|
|
lhs->unusable = !lhs->present;
|
2012-02-29 19:54:29 +04:00
|
|
|
lhs->padding = 0;
|
2008-11-05 19:29:27 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static void get_seg(SegmentCache *lhs, const struct kvm_segment *rhs)
|
|
|
|
{
|
|
|
|
lhs->selector = rhs->selector;
|
|
|
|
lhs->base = rhs->base;
|
|
|
|
lhs->limit = rhs->limit;
|
2017-06-01 11:56:04 +03:00
|
|
|
lhs->flags = (rhs->type << DESC_TYPE_SHIFT) |
|
|
|
|
((rhs->present && !rhs->unusable) * DESC_P_MASK) |
|
|
|
|
(rhs->dpl << DESC_DPL_SHIFT) |
|
|
|
|
(rhs->db << DESC_B_SHIFT) |
|
|
|
|
(rhs->s * DESC_S_MASK) |
|
|
|
|
(rhs->l << DESC_L_SHIFT) |
|
|
|
|
(rhs->g * DESC_G_MASK) |
|
|
|
|
(rhs->avl * DESC_AVL_MASK);
|
2008-11-05 19:29:27 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static void kvm_getput_reg(__u64 *kvm_reg, target_ulong *qemu_reg, int set)
|
|
|
|
{
|
2010-12-27 18:19:29 +03:00
|
|
|
if (set) {
|
2008-11-05 19:29:27 +03:00
|
|
|
*kvm_reg = *qemu_reg;
|
2010-12-27 18:19:29 +03:00
|
|
|
} else {
|
2008-11-05 19:29:27 +03:00
|
|
|
*qemu_reg = *kvm_reg;
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2008-11-05 19:29:27 +03:00
|
|
|
}
|
|
|
|
|
2012-10-31 09:06:49 +04:00
|
|
|
static int kvm_getput_regs(X86CPU *cpu, int set)
|
2008-11-05 19:29:27 +03:00
|
|
|
{
|
2012-10-31 09:06:49 +04:00
|
|
|
CPUX86State *env = &cpu->env;
|
2008-11-05 19:29:27 +03:00
|
|
|
struct kvm_regs regs;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (!set) {
|
2012-10-31 09:06:49 +04:00
|
|
|
ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_REGS, ®s);
|
2010-12-27 18:19:29 +03:00
|
|
|
if (ret < 0) {
|
2008-11-05 19:29:27 +03:00
|
|
|
return ret;
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2008-11-05 19:29:27 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
kvm_getput_reg(®s.rax, &env->regs[R_EAX], set);
|
|
|
|
kvm_getput_reg(®s.rbx, &env->regs[R_EBX], set);
|
|
|
|
kvm_getput_reg(®s.rcx, &env->regs[R_ECX], set);
|
|
|
|
kvm_getput_reg(®s.rdx, &env->regs[R_EDX], set);
|
|
|
|
kvm_getput_reg(®s.rsi, &env->regs[R_ESI], set);
|
|
|
|
kvm_getput_reg(®s.rdi, &env->regs[R_EDI], set);
|
|
|
|
kvm_getput_reg(®s.rsp, &env->regs[R_ESP], set);
|
|
|
|
kvm_getput_reg(®s.rbp, &env->regs[R_EBP], set);
|
|
|
|
#ifdef TARGET_X86_64
|
|
|
|
kvm_getput_reg(®s.r8, &env->regs[8], set);
|
|
|
|
kvm_getput_reg(®s.r9, &env->regs[9], set);
|
|
|
|
kvm_getput_reg(®s.r10, &env->regs[10], set);
|
|
|
|
kvm_getput_reg(®s.r11, &env->regs[11], set);
|
|
|
|
kvm_getput_reg(®s.r12, &env->regs[12], set);
|
|
|
|
kvm_getput_reg(®s.r13, &env->regs[13], set);
|
|
|
|
kvm_getput_reg(®s.r14, &env->regs[14], set);
|
|
|
|
kvm_getput_reg(®s.r15, &env->regs[15], set);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
kvm_getput_reg(®s.rflags, &env->eflags, set);
|
|
|
|
kvm_getput_reg(®s.rip, &env->eip, set);
|
|
|
|
|
2010-12-27 18:19:29 +03:00
|
|
|
if (set) {
|
2012-10-31 09:06:49 +04:00
|
|
|
ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_REGS, ®s);
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2008-11-05 19:29:27 +03:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2012-10-31 09:06:49 +04:00
|
|
|
static int kvm_put_xsave(X86CPU *cpu)
|
2010-06-17 13:53:07 +04:00
|
|
|
{
|
2012-10-31 09:06:49 +04:00
|
|
|
CPUX86State *env = &cpu->env;
|
2021-07-05 13:46:28 +03:00
|
|
|
void *xsave = env->xsave_buf;
|
2010-06-17 13:53:07 +04:00
|
|
|
|
2021-07-05 13:46:28 +03:00
|
|
|
x86_cpu_xsave_all_areas(cpu, xsave, env->xsave_buf_len);
|
2010-06-17 13:53:07 +04:00
|
|
|
|
2016-06-14 00:57:58 +03:00
|
|
|
return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_XSAVE, xsave);
|
2010-06-17 13:53:07 +04:00
|
|
|
}
|
|
|
|
|
2012-10-31 09:06:49 +04:00
|
|
|
static int kvm_put_xcrs(X86CPU *cpu)
|
2010-06-17 13:53:07 +04:00
|
|
|
{
|
2012-10-31 09:06:49 +04:00
|
|
|
CPUX86State *env = &cpu->env;
|
2014-10-30 11:23:41 +03:00
|
|
|
struct kvm_xcrs xcrs = {};
|
2010-06-17 13:53:07 +04:00
|
|
|
|
2015-10-15 21:30:20 +03:00
|
|
|
if (!has_xcrs) {
|
2010-06-17 13:53:07 +04:00
|
|
|
return 0;
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2010-06-17 13:53:07 +04:00
|
|
|
|
|
|
|
xcrs.nr_xcrs = 1;
|
|
|
|
xcrs.flags = 0;
|
|
|
|
xcrs.xcrs[0].xcr = 0;
|
|
|
|
xcrs.xcrs[0].value = env->xcr0;
|
2012-10-31 09:06:49 +04:00
|
|
|
return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_XCRS, &xcrs);
|
2010-06-17 13:53:07 +04:00
|
|
|
}
|
|
|
|
|
2012-10-31 09:06:49 +04:00
|
|
|
static int kvm_put_sregs(X86CPU *cpu)
|
2008-11-05 19:29:27 +03:00
|
|
|
{
|
2012-10-31 09:06:49 +04:00
|
|
|
CPUX86State *env = &cpu->env;
|
2008-11-05 19:29:27 +03:00
|
|
|
struct kvm_sregs sregs;
|
|
|
|
|
2021-12-21 12:12:53 +03:00
|
|
|
/*
|
|
|
|
* The interrupt_bitmap is ignored because KVM_SET_SREGS is
|
|
|
|
* always followed by KVM_SET_VCPU_EVENTS.
|
|
|
|
*/
|
2009-11-06 21:39:24 +03:00
|
|
|
memset(sregs.interrupt_bitmap, 0, sizeof(sregs.interrupt_bitmap));
|
2008-11-05 19:29:27 +03:00
|
|
|
|
|
|
|
if ((env->eflags & VM_MASK)) {
|
2010-12-27 18:19:29 +03:00
|
|
|
set_v8086_seg(&sregs.cs, &env->segs[R_CS]);
|
|
|
|
set_v8086_seg(&sregs.ds, &env->segs[R_DS]);
|
|
|
|
set_v8086_seg(&sregs.es, &env->segs[R_ES]);
|
|
|
|
set_v8086_seg(&sregs.fs, &env->segs[R_FS]);
|
|
|
|
set_v8086_seg(&sregs.gs, &env->segs[R_GS]);
|
|
|
|
set_v8086_seg(&sregs.ss, &env->segs[R_SS]);
|
2008-11-05 19:29:27 +03:00
|
|
|
} else {
|
2010-12-27 18:19:29 +03:00
|
|
|
set_seg(&sregs.cs, &env->segs[R_CS]);
|
|
|
|
set_seg(&sregs.ds, &env->segs[R_DS]);
|
|
|
|
set_seg(&sregs.es, &env->segs[R_ES]);
|
|
|
|
set_seg(&sregs.fs, &env->segs[R_FS]);
|
|
|
|
set_seg(&sregs.gs, &env->segs[R_GS]);
|
|
|
|
set_seg(&sregs.ss, &env->segs[R_SS]);
|
2008-11-05 19:29:27 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
set_seg(&sregs.tr, &env->tr);
|
|
|
|
set_seg(&sregs.ldt, &env->ldt);
|
|
|
|
|
|
|
|
sregs.idt.limit = env->idt.limit;
|
|
|
|
sregs.idt.base = env->idt.base;
|
2012-02-29 19:54:29 +04:00
|
|
|
memset(sregs.idt.padding, 0, sizeof sregs.idt.padding);
|
2008-11-05 19:29:27 +03:00
|
|
|
sregs.gdt.limit = env->gdt.limit;
|
|
|
|
sregs.gdt.base = env->gdt.base;
|
2012-02-29 19:54:29 +04:00
|
|
|
memset(sregs.gdt.padding, 0, sizeof sregs.gdt.padding);
|
2008-11-05 19:29:27 +03:00
|
|
|
|
|
|
|
sregs.cr0 = env->cr[0];
|
|
|
|
sregs.cr2 = env->cr[2];
|
|
|
|
sregs.cr3 = env->cr[3];
|
|
|
|
sregs.cr4 = env->cr[4];
|
|
|
|
|
2013-12-23 13:04:02 +04:00
|
|
|
sregs.cr8 = cpu_get_apic_tpr(cpu->apic_state);
|
|
|
|
sregs.apic_base = cpu_get_apic_base(cpu->apic_state);
|
2008-11-05 19:29:27 +03:00
|
|
|
|
|
|
|
sregs.efer = env->efer;
|
|
|
|
|
2012-10-31 09:06:49 +04:00
|
|
|
return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_SREGS, &sregs);
|
2008-11-05 19:29:27 +03:00
|
|
|
}
|
|
|
|
|
2021-11-01 16:22:58 +03:00
|
|
|
static int kvm_put_sregs2(X86CPU *cpu)
|
|
|
|
{
|
|
|
|
CPUX86State *env = &cpu->env;
|
|
|
|
struct kvm_sregs2 sregs;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
sregs.flags = 0;
|
|
|
|
|
|
|
|
if ((env->eflags & VM_MASK)) {
|
|
|
|
set_v8086_seg(&sregs.cs, &env->segs[R_CS]);
|
|
|
|
set_v8086_seg(&sregs.ds, &env->segs[R_DS]);
|
|
|
|
set_v8086_seg(&sregs.es, &env->segs[R_ES]);
|
|
|
|
set_v8086_seg(&sregs.fs, &env->segs[R_FS]);
|
|
|
|
set_v8086_seg(&sregs.gs, &env->segs[R_GS]);
|
|
|
|
set_v8086_seg(&sregs.ss, &env->segs[R_SS]);
|
|
|
|
} else {
|
|
|
|
set_seg(&sregs.cs, &env->segs[R_CS]);
|
|
|
|
set_seg(&sregs.ds, &env->segs[R_DS]);
|
|
|
|
set_seg(&sregs.es, &env->segs[R_ES]);
|
|
|
|
set_seg(&sregs.fs, &env->segs[R_FS]);
|
|
|
|
set_seg(&sregs.gs, &env->segs[R_GS]);
|
|
|
|
set_seg(&sregs.ss, &env->segs[R_SS]);
|
|
|
|
}
|
|
|
|
|
|
|
|
set_seg(&sregs.tr, &env->tr);
|
|
|
|
set_seg(&sregs.ldt, &env->ldt);
|
|
|
|
|
|
|
|
sregs.idt.limit = env->idt.limit;
|
|
|
|
sregs.idt.base = env->idt.base;
|
|
|
|
memset(sregs.idt.padding, 0, sizeof sregs.idt.padding);
|
|
|
|
sregs.gdt.limit = env->gdt.limit;
|
|
|
|
sregs.gdt.base = env->gdt.base;
|
|
|
|
memset(sregs.gdt.padding, 0, sizeof sregs.gdt.padding);
|
|
|
|
|
|
|
|
sregs.cr0 = env->cr[0];
|
|
|
|
sregs.cr2 = env->cr[2];
|
|
|
|
sregs.cr3 = env->cr[3];
|
|
|
|
sregs.cr4 = env->cr[4];
|
|
|
|
|
|
|
|
sregs.cr8 = cpu_get_apic_tpr(cpu->apic_state);
|
|
|
|
sregs.apic_base = cpu_get_apic_base(cpu->apic_state);
|
|
|
|
|
|
|
|
sregs.efer = env->efer;
|
|
|
|
|
|
|
|
if (env->pdptrs_valid) {
|
|
|
|
for (i = 0; i < 4; i++) {
|
|
|
|
sregs.pdptrs[i] = env->pdptrs[i];
|
|
|
|
}
|
|
|
|
sregs.flags |= KVM_SREGS2_FLAGS_PDPTRS_VALID;
|
|
|
|
}
|
|
|
|
|
|
|
|
return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_SREGS2, &sregs);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-12-16 22:06:42 +03:00
|
|
|
static void kvm_msr_buf_reset(X86CPU *cpu)
|
|
|
|
{
|
|
|
|
memset(cpu->kvm_msr_buf, 0, MSR_BUF_SIZE);
|
|
|
|
}
|
|
|
|
|
2015-12-16 22:06:44 +03:00
|
|
|
static void kvm_msr_entry_add(X86CPU *cpu, uint32_t index, uint64_t value)
|
|
|
|
{
|
|
|
|
struct kvm_msrs *msrs = cpu->kvm_msr_buf;
|
|
|
|
void *limit = ((void *)msrs) + MSR_BUF_SIZE;
|
|
|
|
struct kvm_msr_entry *entry = &msrs->entries[msrs->nmsrs];
|
|
|
|
|
|
|
|
assert((void *)(entry + 1) <= limit);
|
|
|
|
|
2015-12-16 22:06:46 +03:00
|
|
|
entry->index = index;
|
|
|
|
entry->reserved = 0;
|
|
|
|
entry->data = value;
|
2015-12-16 22:06:44 +03:00
|
|
|
msrs->nmsrs++;
|
|
|
|
}
|
|
|
|
|
2016-09-22 15:50:00 +03:00
|
|
|
static int kvm_put_one_msr(X86CPU *cpu, int index, uint64_t value)
|
|
|
|
{
|
|
|
|
kvm_msr_buf_reset(cpu);
|
|
|
|
kvm_msr_entry_add(cpu, index, value);
|
|
|
|
|
|
|
|
return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, cpu->kvm_msr_buf);
|
|
|
|
}
|
|
|
|
|
2022-02-15 22:52:53 +03:00
|
|
|
static int kvm_get_one_msr(X86CPU *cpu, int index, uint64_t *value)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
struct {
|
|
|
|
struct kvm_msrs info;
|
|
|
|
struct kvm_msr_entry entries[1];
|
|
|
|
} msr_data = {
|
|
|
|
.info.nmsrs = 1,
|
|
|
|
.entries[0].index = index,
|
|
|
|
};
|
|
|
|
|
|
|
|
ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, &msr_data);
|
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
assert(ret == 1);
|
|
|
|
*value = msr_data.entries[0].data;
|
|
|
|
return ret;
|
|
|
|
}
|
2016-09-22 15:49:17 +03:00
|
|
|
void kvm_put_apicbase(X86CPU *cpu, uint64_t value)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = kvm_put_one_msr(cpu, MSR_IA32_APICBASE, value);
|
|
|
|
assert(ret == 1);
|
|
|
|
}
|
|
|
|
|
2013-08-19 21:13:42 +04:00
|
|
|
static int kvm_put_tscdeadline_msr(X86CPU *cpu)
|
|
|
|
{
|
|
|
|
CPUX86State *env = &cpu->env;
|
2016-03-30 23:55:29 +03:00
|
|
|
int ret;
|
2013-08-19 21:13:42 +04:00
|
|
|
|
|
|
|
if (!has_msr_tsc_deadline) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-09-22 15:50:00 +03:00
|
|
|
ret = kvm_put_one_msr(cpu, MSR_IA32_TSCDEADLINE, env->tsc_deadline);
|
2016-03-30 23:55:29 +03:00
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(ret == 1);
|
|
|
|
return 0;
|
2013-08-19 21:13:42 +04:00
|
|
|
}
|
|
|
|
|
2013-12-17 23:05:13 +04:00
|
|
|
/*
|
|
|
|
* Provide a separate write service for the feature control MSR in order to
|
|
|
|
* kick the VCPU out of VMXON or even guest mode on reset. This has to be done
|
|
|
|
* before writing any other state because forcibly leaving nested mode
|
|
|
|
* invalidates the VCPU state.
|
|
|
|
*/
|
|
|
|
static int kvm_put_msr_feature_control(X86CPU *cpu)
|
|
|
|
{
|
2016-03-30 23:55:29 +03:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!has_msr_feature_control) {
|
|
|
|
return 0;
|
|
|
|
}
|
2013-12-17 23:05:13 +04:00
|
|
|
|
2016-09-22 15:50:00 +03:00
|
|
|
ret = kvm_put_one_msr(cpu, MSR_IA32_FEATURE_CONTROL,
|
|
|
|
cpu->env.msr_ia32_feature_control);
|
2016-03-30 23:55:29 +03:00
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(ret == 1);
|
|
|
|
return 0;
|
2013-12-17 23:05:13 +04:00
|
|
|
}
|
|
|
|
|
2019-07-01 19:32:17 +03:00
|
|
|
static uint64_t make_vmx_msr_value(uint32_t index, uint32_t features)
|
|
|
|
{
|
|
|
|
uint32_t default1, can_be_one, can_be_zero;
|
|
|
|
uint32_t must_be_one;
|
|
|
|
|
|
|
|
switch (index) {
|
|
|
|
case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
|
|
|
|
default1 = 0x00000016;
|
|
|
|
break;
|
|
|
|
case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
|
|
|
|
default1 = 0x0401e172;
|
|
|
|
break;
|
|
|
|
case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
|
|
|
|
default1 = 0x000011ff;
|
|
|
|
break;
|
|
|
|
case MSR_IA32_VMX_TRUE_EXIT_CTLS:
|
|
|
|
default1 = 0x00036dff;
|
|
|
|
break;
|
|
|
|
case MSR_IA32_VMX_PROCBASED_CTLS2:
|
|
|
|
default1 = 0;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If a feature bit is set, the control can be either set or clear.
|
|
|
|
* Otherwise the value is limited to either 0 or 1 by default1.
|
|
|
|
*/
|
|
|
|
can_be_one = features | default1;
|
|
|
|
can_be_zero = features | ~default1;
|
|
|
|
must_be_one = ~can_be_zero;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Bit 0:31 -> 0 if the control bit can be zero (i.e. 1 if it must be one).
|
|
|
|
* Bit 32:63 -> 1 if the control bit can be one.
|
|
|
|
*/
|
|
|
|
return must_be_one | (((uint64_t)can_be_one) << 32);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void kvm_msr_entry_add_vmx(X86CPU *cpu, FeatureWordArray f)
|
|
|
|
{
|
|
|
|
uint64_t kvm_vmx_basic =
|
|
|
|
kvm_arch_get_supported_msr_feature(kvm_state,
|
|
|
|
MSR_IA32_VMX_BASIC);
|
2019-12-06 10:11:11 +03:00
|
|
|
|
|
|
|
if (!kvm_vmx_basic) {
|
|
|
|
/* If the kernel doesn't support VMX feature (kvm_intel.nested=0),
|
|
|
|
* then kvm_vmx_basic will be 0 and KVM_SET_MSR will fail.
|
|
|
|
*/
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-07-01 19:32:17 +03:00
|
|
|
uint64_t kvm_vmx_misc =
|
|
|
|
kvm_arch_get_supported_msr_feature(kvm_state,
|
|
|
|
MSR_IA32_VMX_MISC);
|
|
|
|
uint64_t kvm_vmx_ept_vpid =
|
|
|
|
kvm_arch_get_supported_msr_feature(kvm_state,
|
|
|
|
MSR_IA32_VMX_EPT_VPID_CAP);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the guest is 64-bit, a value of 1 is allowed for the host address
|
|
|
|
* space size vmexit control.
|
|
|
|
*/
|
|
|
|
uint64_t fixed_vmx_exit = f[FEAT_8000_0001_EDX] & CPUID_EXT2_LM
|
|
|
|
? (uint64_t)VMX_VM_EXIT_HOST_ADDR_SPACE_SIZE << 32 : 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Bits 0-30, 32-44 and 50-53 come from the host. KVM should
|
|
|
|
* not change them for backwards compatibility.
|
|
|
|
*/
|
|
|
|
uint64_t fixed_vmx_basic = kvm_vmx_basic &
|
|
|
|
(MSR_VMX_BASIC_VMCS_REVISION_MASK |
|
|
|
|
MSR_VMX_BASIC_VMXON_REGION_SIZE_MASK |
|
|
|
|
MSR_VMX_BASIC_VMCS_MEM_TYPE_MASK);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Same for bits 0-4 and 25-27. Bits 16-24 (CR3 target count) can
|
|
|
|
* change in the future but are always zero for now, clear them to be
|
|
|
|
* future proof. Bits 32-63 in theory could change, though KVM does
|
|
|
|
* not support dual-monitor treatment and probably never will; mask
|
|
|
|
* them out as well.
|
|
|
|
*/
|
|
|
|
uint64_t fixed_vmx_misc = kvm_vmx_misc &
|
|
|
|
(MSR_VMX_MISC_PREEMPTION_TIMER_SHIFT_MASK |
|
|
|
|
MSR_VMX_MISC_MAX_MSR_LIST_SIZE_MASK);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* EPT memory types should not change either, so we do not bother
|
|
|
|
* adding features for them.
|
|
|
|
*/
|
|
|
|
uint64_t fixed_vmx_ept_mask =
|
|
|
|
(f[FEAT_VMX_SECONDARY_CTLS] & VMX_SECONDARY_EXEC_ENABLE_EPT ?
|
|
|
|
MSR_VMX_EPT_UC | MSR_VMX_EPT_WB : 0);
|
|
|
|
uint64_t fixed_vmx_ept_vpid = kvm_vmx_ept_vpid & fixed_vmx_ept_mask;
|
|
|
|
|
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_VMX_TRUE_PROCBASED_CTLS,
|
|
|
|
make_vmx_msr_value(MSR_IA32_VMX_TRUE_PROCBASED_CTLS,
|
|
|
|
f[FEAT_VMX_PROCBASED_CTLS]));
|
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_VMX_TRUE_PINBASED_CTLS,
|
|
|
|
make_vmx_msr_value(MSR_IA32_VMX_TRUE_PINBASED_CTLS,
|
|
|
|
f[FEAT_VMX_PINBASED_CTLS]));
|
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_VMX_TRUE_EXIT_CTLS,
|
|
|
|
make_vmx_msr_value(MSR_IA32_VMX_TRUE_EXIT_CTLS,
|
|
|
|
f[FEAT_VMX_EXIT_CTLS]) | fixed_vmx_exit);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_VMX_TRUE_ENTRY_CTLS,
|
|
|
|
make_vmx_msr_value(MSR_IA32_VMX_TRUE_ENTRY_CTLS,
|
|
|
|
f[FEAT_VMX_ENTRY_CTLS]));
|
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_VMX_PROCBASED_CTLS2,
|
|
|
|
make_vmx_msr_value(MSR_IA32_VMX_PROCBASED_CTLS2,
|
|
|
|
f[FEAT_VMX_SECONDARY_CTLS]));
|
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_VMX_EPT_VPID_CAP,
|
|
|
|
f[FEAT_VMX_EPT_VPID_CAPS] | fixed_vmx_ept_vpid);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_VMX_BASIC,
|
|
|
|
f[FEAT_VMX_BASIC] | fixed_vmx_basic);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_VMX_MISC,
|
|
|
|
f[FEAT_VMX_MISC] | fixed_vmx_misc);
|
|
|
|
if (has_msr_vmx_vmfunc) {
|
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_VMX_VMFUNC, f[FEAT_VMX_VMFUNC]);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Just to be safe, write these with constant values. The CRn_FIXED1
|
|
|
|
* MSRs are generated by KVM based on the vCPU's CPUID.
|
|
|
|
*/
|
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_VMX_CR0_FIXED0,
|
|
|
|
CR0_PE_MASK | CR0_PG_MASK | CR0_NE_MASK);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_VMX_CR4_FIXED0,
|
|
|
|
CR4_VMXE_MASK);
|
2021-06-21 19:31:52 +03:00
|
|
|
|
|
|
|
if (f[FEAT_VMX_SECONDARY_CTLS] & VMX_SECONDARY_EXEC_TSC_SCALING) {
|
|
|
|
/* TSC multiplier (0x2032). */
|
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_VMX_VMCS_ENUM, 0x32);
|
|
|
|
} else {
|
|
|
|
/* Preemption timer (0x482E). */
|
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_VMX_VMCS_ENUM, 0x2E);
|
|
|
|
}
|
2019-07-01 19:32:17 +03:00
|
|
|
}
|
|
|
|
|
2020-05-29 10:43:47 +03:00
|
|
|
static void kvm_msr_entry_add_perf(X86CPU *cpu, FeatureWordArray f)
|
|
|
|
{
|
|
|
|
uint64_t kvm_perf_cap =
|
|
|
|
kvm_arch_get_supported_msr_feature(kvm_state,
|
|
|
|
MSR_IA32_PERF_CAPABILITIES);
|
|
|
|
|
|
|
|
if (kvm_perf_cap) {
|
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_PERF_CAPABILITIES,
|
|
|
|
kvm_perf_cap & f[FEAT_PERF_CAPABILITIES]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-20 21:21:42 +03:00
|
|
|
static int kvm_buf_set_msrs(X86CPU *cpu)
|
|
|
|
{
|
|
|
|
int ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, cpu->kvm_msr_buf);
|
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ret < cpu->kvm_msr_buf->nmsrs) {
|
|
|
|
struct kvm_msr_entry *e = &cpu->kvm_msr_buf->entries[ret];
|
|
|
|
error_report("error: failed to set MSR 0x%" PRIx32 " to 0x%" PRIx64,
|
|
|
|
(uint32_t)e->index, (uint64_t)e->data);
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(ret == cpu->kvm_msr_buf->nmsrs);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void kvm_init_msrs(X86CPU *cpu)
|
|
|
|
{
|
|
|
|
CPUX86State *env = &cpu->env;
|
|
|
|
|
|
|
|
kvm_msr_buf_reset(cpu);
|
|
|
|
if (has_msr_arch_capabs) {
|
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_ARCH_CAPABILITIES,
|
|
|
|
env->features[FEAT_ARCH_CAPABILITIES]);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (has_msr_core_capabs) {
|
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_CORE_CAPABILITY,
|
|
|
|
env->features[FEAT_CORE_CAPABILITY]);
|
|
|
|
}
|
|
|
|
|
2020-05-29 10:43:47 +03:00
|
|
|
if (has_msr_perf_capabs && cpu->enable_pmu) {
|
|
|
|
kvm_msr_entry_add_perf(cpu, env->features);
|
|
|
|
}
|
|
|
|
|
2020-02-11 20:55:16 +03:00
|
|
|
if (has_msr_ucode_rev) {
|
2020-01-20 21:21:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_UCODE_REV, cpu->ucode_rev);
|
|
|
|
}
|
|
|
|
|
2020-01-20 21:21:42 +03:00
|
|
|
/*
|
|
|
|
* Older kernels do not include VMX MSRs in KVM_GET_MSR_INDEX_LIST, but
|
|
|
|
* all kernels with MSR features should have them.
|
|
|
|
*/
|
|
|
|
if (kvm_feature_msrs && cpu_has_vmx(env)) {
|
|
|
|
kvm_msr_entry_add_vmx(cpu, env->features);
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(kvm_buf_set_msrs(cpu) == 0);
|
|
|
|
}
|
|
|
|
|
2012-10-31 09:06:49 +04:00
|
|
|
static int kvm_put_msrs(X86CPU *cpu, int level)
|
2008-11-05 19:29:27 +03:00
|
|
|
{
|
2012-10-31 09:06:49 +04:00
|
|
|
CPUX86State *env = &cpu->env;
|
2015-12-16 22:06:44 +03:00
|
|
|
int i;
|
2008-11-05 19:29:27 +03:00
|
|
|
|
2015-12-16 22:06:42 +03:00
|
|
|
kvm_msr_buf_reset(cpu);
|
|
|
|
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_CS, env->sysenter_cs);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_ESP, env->sysenter_esp);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_EIP, env->sysenter_eip);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_PAT, env->pat);
|
2011-01-21 23:48:13 +03:00
|
|
|
if (has_msr_star) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_STAR, env->star);
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2011-01-21 23:48:13 +03:00
|
|
|
if (has_msr_hsave_pa) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_VM_HSAVE_PA, env->vm_hsave);
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2015-09-23 09:27:33 +03:00
|
|
|
if (has_msr_tsc_aux) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_TSC_AUX, env->tsc_aux);
|
2015-09-23 09:27:33 +03:00
|
|
|
}
|
2012-11-27 09:32:18 +04:00
|
|
|
if (has_msr_tsc_adjust) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_TSC_ADJUST, env->tsc_adjust);
|
2012-11-27 09:32:18 +04:00
|
|
|
}
|
2011-10-04 18:26:35 +04:00
|
|
|
if (has_msr_misc_enable) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_MISC_ENABLE,
|
2011-10-04 18:26:35 +04:00
|
|
|
env->msr_ia32_misc_enable);
|
|
|
|
}
|
2015-06-18 19:28:42 +03:00
|
|
|
if (has_msr_smbase) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_SMBASE, env->smbase);
|
2015-06-18 19:28:42 +03:00
|
|
|
}
|
2018-02-27 13:22:12 +03:00
|
|
|
if (has_msr_smi_count) {
|
|
|
|
kvm_msr_entry_add(cpu, MSR_SMI_COUNT, env->msr_smi_count);
|
|
|
|
}
|
2021-02-05 11:33:24 +03:00
|
|
|
if (has_msr_pkrs) {
|
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_PKRS, env->pkrs);
|
|
|
|
}
|
2014-01-20 17:22:25 +04:00
|
|
|
if (has_msr_bndcfgs) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_BNDCFGS, env->msr_bndcfgs);
|
2014-01-20 17:22:25 +04:00
|
|
|
}
|
2014-12-03 05:36:23 +03:00
|
|
|
if (has_msr_xss) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_XSS, env->xss);
|
2014-12-03 05:36:23 +03:00
|
|
|
}
|
2019-10-11 10:41:03 +03:00
|
|
|
if (has_msr_umwait) {
|
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_UMWAIT_CONTROL, env->umwait);
|
|
|
|
}
|
2018-01-09 18:45:14 +03:00
|
|
|
if (has_msr_spec_ctrl) {
|
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_SPEC_CTRL, env->spec_ctrl);
|
|
|
|
}
|
2021-11-01 16:23:00 +03:00
|
|
|
if (has_tsc_scale_msr) {
|
|
|
|
kvm_msr_entry_add(cpu, MSR_AMD64_TSC_RATIO, env->amd_tsc_scale_msr);
|
|
|
|
}
|
|
|
|
|
2019-11-20 15:19:22 +03:00
|
|
|
if (has_msr_tsx_ctrl) {
|
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_TSX_CTRL, env->tsx_ctrl);
|
|
|
|
}
|
2018-05-22 00:54:24 +03:00
|
|
|
if (has_msr_virt_ssbd) {
|
|
|
|
kvm_msr_entry_add(cpu, MSR_VIRT_SSBD, env->virt_ssbd);
|
|
|
|
}
|
|
|
|
|
2008-11-05 19:29:27 +03:00
|
|
|
#ifdef TARGET_X86_64
|
2010-10-21 19:35:04 +04:00
|
|
|
if (lm_capable_kernel) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_CSTAR, env->cstar);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_KERNELGSBASE, env->kernelgsbase);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_FMASK, env->fmask);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_LSTAR, env->lstar);
|
2010-10-21 19:35:04 +04:00
|
|
|
}
|
2008-11-05 19:29:27 +03:00
|
|
|
#endif
|
2018-01-09 18:45:14 +03:00
|
|
|
|
2011-01-21 23:48:14 +03:00
|
|
|
/*
|
2013-07-25 19:05:22 +04:00
|
|
|
* The following MSRs have side effects on the guest or are too heavy
|
|
|
|
* for normal writeback. Limit them to reset or full state updates.
|
2011-01-21 23:48:14 +03:00
|
|
|
*/
|
|
|
|
if (level >= KVM_PUT_RESET_STATE) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_TSC, env->tsc);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_KVM_SYSTEM_TIME, env->system_time_msr);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_KVM_WALL_CLOCK, env->wall_clock_msr);
|
2020-09-17 13:23:16 +03:00
|
|
|
if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF_INT)) {
|
|
|
|
kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_INT, env->async_pf_int_msr);
|
|
|
|
}
|
2016-09-27 01:03:24 +03:00
|
|
|
if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF)) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_EN, env->async_pf_en_msr);
|
2011-01-21 23:48:22 +03:00
|
|
|
}
|
2016-09-27 01:03:24 +03:00
|
|
|
if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_PV_EOI)) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_KVM_PV_EOI_EN, env->pv_eoi_en_msr);
|
2012-08-28 21:43:56 +04:00
|
|
|
}
|
2016-09-27 01:03:24 +03:00
|
|
|
if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_STEAL_TIME)) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, env->steal_time_msr);
|
2013-02-20 06:27:20 +04:00
|
|
|
}
|
2019-06-04 02:04:08 +03:00
|
|
|
|
|
|
|
if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_POLL_CONTROL)) {
|
|
|
|
kvm_msr_entry_add(cpu, MSR_KVM_POLL_CONTROL, env->poll_control_msr);
|
|
|
|
}
|
|
|
|
|
2017-12-27 17:04:26 +03:00
|
|
|
if (has_architectural_pmu_version > 0) {
|
|
|
|
if (has_architectural_pmu_version > 1) {
|
|
|
|
/* Stop the counter. */
|
|
|
|
kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0);
|
|
|
|
}
|
2013-07-25 19:05:22 +04:00
|
|
|
|
|
|
|
/* Set the counter values. */
|
2017-12-27 17:04:26 +03:00
|
|
|
for (i = 0; i < num_architectural_pmu_fixed_counters; i++) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR0 + i,
|
2013-07-25 19:05:22 +04:00
|
|
|
env->msr_fixed_counters[i]);
|
|
|
|
}
|
2017-12-27 17:04:26 +03:00
|
|
|
for (i = 0; i < num_architectural_pmu_gp_counters; i++) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_P6_PERFCTR0 + i,
|
2013-07-25 19:05:22 +04:00
|
|
|
env->msr_gp_counters[i]);
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_P6_EVNTSEL0 + i,
|
2013-07-25 19:05:22 +04:00
|
|
|
env->msr_gp_evtsel[i]);
|
|
|
|
}
|
2017-12-27 17:04:26 +03:00
|
|
|
if (has_architectural_pmu_version > 1) {
|
|
|
|
kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS,
|
|
|
|
env->msr_global_status);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL,
|
|
|
|
env->msr_global_ovf_ctrl);
|
|
|
|
|
|
|
|
/* Now start the PMU. */
|
|
|
|
kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL,
|
|
|
|
env->msr_fixed_ctr_ctrl);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL,
|
|
|
|
env->msr_global_ctrl);
|
|
|
|
}
|
2013-07-25 19:05:22 +04:00
|
|
|
}
|
2017-11-22 21:14:16 +03:00
|
|
|
/*
|
|
|
|
* Hyper-V partition-wide MSRs: to avoid clearing them on cpu hot-add,
|
|
|
|
* only sync them to KVM on the first cpu
|
|
|
|
*/
|
|
|
|
if (current_cpu == first_cpu) {
|
|
|
|
if (has_msr_hv_hypercall) {
|
|
|
|
kvm_msr_entry_add(cpu, HV_X64_MSR_GUEST_OS_ID,
|
|
|
|
env->msr_hv_guest_os_id);
|
|
|
|
kvm_msr_entry_add(cpu, HV_X64_MSR_HYPERCALL,
|
|
|
|
env->msr_hv_hypercall);
|
|
|
|
}
|
2019-05-17 17:19:16 +03:00
|
|
|
if (hyperv_feat_enabled(cpu, HYPERV_FEAT_TIME)) {
|
2017-11-22 21:14:16 +03:00
|
|
|
kvm_msr_entry_add(cpu, HV_X64_MSR_REFERENCE_TSC,
|
|
|
|
env->msr_hv_tsc);
|
|
|
|
}
|
2019-05-17 17:19:16 +03:00
|
|
|
if (hyperv_feat_enabled(cpu, HYPERV_FEAT_REENLIGHTENMENT)) {
|
2018-04-11 14:50:36 +03:00
|
|
|
kvm_msr_entry_add(cpu, HV_X64_MSR_REENLIGHTENMENT_CONTROL,
|
|
|
|
env->msr_hv_reenlightenment_control);
|
|
|
|
kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_CONTROL,
|
|
|
|
env->msr_hv_tsc_emulation_control);
|
|
|
|
kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_STATUS,
|
|
|
|
env->msr_hv_tsc_emulation_status);
|
|
|
|
}
|
2022-02-16 13:25:00 +03:00
|
|
|
#ifdef CONFIG_SYNDBG
|
2022-02-16 13:24:59 +03:00
|
|
|
if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNDBG) &&
|
|
|
|
has_msr_hv_syndbg_options) {
|
|
|
|
kvm_msr_entry_add(cpu, HV_X64_MSR_SYNDBG_OPTIONS,
|
|
|
|
hyperv_syndbg_query_options());
|
|
|
|
}
|
2022-02-16 13:25:00 +03:00
|
|
|
#endif
|
2011-12-19 00:48:14 +04:00
|
|
|
}
|
2019-05-17 17:19:16 +03:00
|
|
|
if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VAPIC)) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, HV_X64_MSR_APIC_ASSIST_PAGE,
|
2014-01-23 17:40:48 +04:00
|
|
|
env->msr_hv_vapic);
|
2011-12-19 00:48:14 +04:00
|
|
|
}
|
2015-09-09 15:41:30 +03:00
|
|
|
if (has_msr_hv_crash) {
|
|
|
|
int j;
|
|
|
|
|
2017-07-13 23:15:21 +03:00
|
|
|
for (j = 0; j < HV_CRASH_PARAMS; j++)
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_P0 + j,
|
2015-09-09 15:41:30 +03:00
|
|
|
env->msr_hv_crash_params[j]);
|
|
|
|
|
2017-07-13 23:15:21 +03:00
|
|
|
kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_CTL, HV_CRASH_CTL_NOTIFY);
|
2015-09-09 15:41:30 +03:00
|
|
|
}
|
2015-09-16 12:59:44 +03:00
|
|
|
if (has_msr_hv_runtime) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, HV_X64_MSR_VP_RUNTIME, env->msr_hv_runtime);
|
2015-09-16 12:59:44 +03:00
|
|
|
}
|
2019-05-17 17:19:16 +03:00
|
|
|
if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VPINDEX)
|
|
|
|
&& hv_vpindex_settable) {
|
2018-09-21 11:20:39 +03:00
|
|
|
kvm_msr_entry_add(cpu, HV_X64_MSR_VP_INDEX,
|
|
|
|
hyperv_vp_index(CPU(cpu)));
|
2018-07-02 16:41:56 +03:00
|
|
|
}
|
2019-05-17 17:19:16 +03:00
|
|
|
if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC)) {
|
2015-11-11 13:18:38 +03:00
|
|
|
int j;
|
|
|
|
|
2017-11-22 21:14:18 +03:00
|
|
|
kvm_msr_entry_add(cpu, HV_X64_MSR_SVERSION, HV_SYNIC_VERSION);
|
|
|
|
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, HV_X64_MSR_SCONTROL,
|
2015-11-11 13:18:38 +03:00
|
|
|
env->msr_hv_synic_control);
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, HV_X64_MSR_SIEFP,
|
2015-11-11 13:18:38 +03:00
|
|
|
env->msr_hv_synic_evt_page);
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, HV_X64_MSR_SIMP,
|
2015-11-11 13:18:38 +03:00
|
|
|
env->msr_hv_synic_msg_page);
|
|
|
|
|
|
|
|
for (j = 0; j < ARRAY_SIZE(env->msr_hv_synic_sint); j++) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, HV_X64_MSR_SINT0 + j,
|
2015-11-11 13:18:38 +03:00
|
|
|
env->msr_hv_synic_sint[j]);
|
|
|
|
}
|
|
|
|
}
|
2015-11-25 18:21:25 +03:00
|
|
|
if (has_msr_hv_stimer) {
|
|
|
|
int j;
|
|
|
|
|
|
|
|
for (j = 0; j < ARRAY_SIZE(env->msr_hv_stimer_config); j++) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, HV_X64_MSR_STIMER0_CONFIG + j * 2,
|
2015-11-25 18:21:25 +03:00
|
|
|
env->msr_hv_stimer_config[j]);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (j = 0; j < ARRAY_SIZE(env->msr_hv_stimer_count); j++) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, HV_X64_MSR_STIMER0_COUNT + j * 2,
|
2015-11-25 18:21:25 +03:00
|
|
|
env->msr_hv_stimer_count[j]);
|
|
|
|
}
|
|
|
|
}
|
2016-09-27 01:03:29 +03:00
|
|
|
if (env->features[FEAT_1_EDX] & CPUID_MTRR) {
|
2016-07-08 18:01:37 +03:00
|
|
|
uint64_t phys_mask = MAKE_64BIT_MASK(0, cpu->phys_bits);
|
|
|
|
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_MTRRdefType, env->mtrr_deftype);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_MTRRfix64K_00000, env->mtrr_fixed[0]);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_MTRRfix16K_80000, env->mtrr_fixed[1]);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_MTRRfix16K_A0000, env->mtrr_fixed[2]);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C0000, env->mtrr_fixed[3]);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C8000, env->mtrr_fixed[4]);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D0000, env->mtrr_fixed[5]);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D8000, env->mtrr_fixed[6]);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E0000, env->mtrr_fixed[7]);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E8000, env->mtrr_fixed[8]);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F0000, env->mtrr_fixed[9]);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F8000, env->mtrr_fixed[10]);
|
2014-08-15 01:39:33 +04:00
|
|
|
for (i = 0; i < MSR_MTRRcap_VCNT; i++) {
|
2016-07-08 18:01:37 +03:00
|
|
|
/* The CPU GPs if we write to a bit above the physical limit of
|
|
|
|
* the host CPU (and KVM emulates that)
|
|
|
|
*/
|
|
|
|
uint64_t mask = env->mtrr_var[i].mask;
|
|
|
|
mask &= phys_mask;
|
|
|
|
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_MTRRphysBase(i),
|
|
|
|
env->mtrr_var[i].base);
|
2016-07-08 18:01:37 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_MTRRphysMask(i), mask);
|
2014-08-15 01:39:33 +04:00
|
|
|
}
|
|
|
|
}
|
2018-03-04 19:48:36 +03:00
|
|
|
if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) {
|
|
|
|
int addr_num = kvm_arch_get_supported_cpuid(kvm_state,
|
|
|
|
0x14, 1, R_EAX) & 0x7;
|
|
|
|
|
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_RTIT_CTL,
|
|
|
|
env->msr_rtit_ctrl);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_RTIT_STATUS,
|
|
|
|
env->msr_rtit_status);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_RTIT_OUTPUT_BASE,
|
|
|
|
env->msr_rtit_output_base);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_RTIT_OUTPUT_MASK,
|
|
|
|
env->msr_rtit_output_mask);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_RTIT_CR3_MATCH,
|
|
|
|
env->msr_rtit_cr3_match);
|
|
|
|
for (i = 0; i < addr_num; i++) {
|
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_RTIT_ADDR0_A + i,
|
|
|
|
env->msr_rtit_addrs[i]);
|
|
|
|
}
|
|
|
|
}
|
2013-12-17 23:05:13 +04:00
|
|
|
|
2021-07-19 14:21:13 +03:00
|
|
|
if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_SGX_LC) {
|
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH0,
|
|
|
|
env->msr_ia32_sgxlepubkeyhash[0]);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH1,
|
|
|
|
env->msr_ia32_sgxlepubkeyhash[1]);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH2,
|
|
|
|
env->msr_ia32_sgxlepubkeyhash[2]);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH3,
|
|
|
|
env->msr_ia32_sgxlepubkeyhash[3]);
|
|
|
|
}
|
|
|
|
|
2022-02-17 09:04:33 +03:00
|
|
|
if (env->features[FEAT_XSAVE] & CPUID_D_1_EAX_XFD) {
|
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_XFD,
|
|
|
|
env->msr_xfd);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_XFD_ERR,
|
|
|
|
env->msr_xfd_err);
|
|
|
|
}
|
|
|
|
|
2022-02-15 22:52:56 +03:00
|
|
|
if (kvm_enabled() && cpu->enable_pmu &&
|
|
|
|
(env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_ARCH_LBR)) {
|
|
|
|
uint64_t depth;
|
2023-09-25 13:19:34 +03:00
|
|
|
int ret;
|
2022-02-15 22:52:56 +03:00
|
|
|
|
|
|
|
/*
|
2022-05-17 18:50:24 +03:00
|
|
|
* Only migrate Arch LBR states when the host Arch LBR depth
|
|
|
|
* equals that of source guest's, this is to avoid mismatch
|
|
|
|
* of guest/host config for the msr hence avoid unexpected
|
|
|
|
* misbehavior.
|
2022-02-15 22:52:56 +03:00
|
|
|
*/
|
|
|
|
ret = kvm_get_one_msr(cpu, MSR_ARCH_LBR_DEPTH, &depth);
|
|
|
|
|
2022-05-17 18:50:24 +03:00
|
|
|
if (ret == 1 && !!depth && depth == env->msr_lbr_depth) {
|
2022-02-15 22:52:56 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_ARCH_LBR_CTL, env->msr_lbr_ctl);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_ARCH_LBR_DEPTH, env->msr_lbr_depth);
|
|
|
|
|
|
|
|
for (i = 0; i < ARCH_LBR_NR_ENTRIES; i++) {
|
|
|
|
if (!env->lbr_records[i].from) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
kvm_msr_entry_add(cpu, MSR_ARCH_LBR_FROM_0 + i,
|
|
|
|
env->lbr_records[i].from);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_ARCH_LBR_TO_0 + i,
|
|
|
|
env->lbr_records[i].to);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_ARCH_LBR_INFO_0 + i,
|
|
|
|
env->lbr_records[i].info);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-12-17 23:05:13 +04:00
|
|
|
/* Note: MSR_IA32_FEATURE_CONTROL is written separately, see
|
|
|
|
* kvm_put_msr_feature_control. */
|
2010-03-01 21:10:31 +03:00
|
|
|
}
|
2019-07-01 19:32:17 +03:00
|
|
|
|
2010-10-11 22:31:22 +04:00
|
|
|
if (env->mcg_cap) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_MCG_STATUS, env->mcg_status);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_MCG_CTL, env->mcg_ctl);
|
2016-06-22 09:56:21 +03:00
|
|
|
if (has_msr_mcg_ext_ctl) {
|
|
|
|
kvm_msr_entry_add(cpu, MSR_MCG_EXT_CTL, env->mcg_ext_ctl);
|
|
|
|
}
|
2011-03-02 10:56:16 +03:00
|
|
|
for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_MC0_CTL + i, env->mce_banks[i]);
|
2010-10-11 22:31:22 +04:00
|
|
|
}
|
|
|
|
}
|
2009-10-22 16:26:56 +04:00
|
|
|
|
2020-01-20 21:21:42 +03:00
|
|
|
return kvm_buf_set_msrs(cpu);
|
2008-11-05 19:29:27 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-10-31 09:06:49 +04:00
|
|
|
static int kvm_get_xsave(X86CPU *cpu)
|
2010-06-17 13:53:07 +04:00
|
|
|
{
|
2012-10-31 09:06:49 +04:00
|
|
|
CPUX86State *env = &cpu->env;
|
2021-07-05 13:46:28 +03:00
|
|
|
void *xsave = env->xsave_buf;
|
2022-02-17 09:04:32 +03:00
|
|
|
int type, ret;
|
2010-06-17 13:53:07 +04:00
|
|
|
|
2022-02-17 09:04:32 +03:00
|
|
|
type = has_xsave2 ? KVM_GET_XSAVE2 : KVM_GET_XSAVE;
|
|
|
|
ret = kvm_vcpu_ioctl(CPU(cpu), type, xsave);
|
2010-10-19 15:00:34 +04:00
|
|
|
if (ret < 0) {
|
2010-06-17 13:53:07 +04:00
|
|
|
return ret;
|
2010-10-19 15:00:34 +04:00
|
|
|
}
|
2021-07-05 13:46:28 +03:00
|
|
|
x86_cpu_xrstor_all_areas(cpu, xsave, env->xsave_buf_len);
|
2010-06-17 13:53:07 +04:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-10-31 09:06:49 +04:00
|
|
|
static int kvm_get_xcrs(X86CPU *cpu)
|
2010-06-17 13:53:07 +04:00
|
|
|
{
|
2012-10-31 09:06:49 +04:00
|
|
|
CPUX86State *env = &cpu->env;
|
2010-06-17 13:53:07 +04:00
|
|
|
int i, ret;
|
|
|
|
struct kvm_xcrs xcrs;
|
|
|
|
|
2015-10-15 21:30:20 +03:00
|
|
|
if (!has_xcrs) {
|
2010-06-17 13:53:07 +04:00
|
|
|
return 0;
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2010-06-17 13:53:07 +04:00
|
|
|
|
2012-10-31 09:06:49 +04:00
|
|
|
ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_XCRS, &xcrs);
|
2010-12-27 18:19:29 +03:00
|
|
|
if (ret < 0) {
|
2010-06-17 13:53:07 +04:00
|
|
|
return ret;
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2010-06-17 13:53:07 +04:00
|
|
|
|
2010-12-27 18:19:29 +03:00
|
|
|
for (i = 0; i < xcrs.nr_xcrs; i++) {
|
2010-06-17 13:53:07 +04:00
|
|
|
/* Only support xcr0 now */
|
2013-10-17 18:47:52 +04:00
|
|
|
if (xcrs.xcrs[i].xcr == 0) {
|
|
|
|
env->xcr0 = xcrs.xcrs[i].value;
|
2010-06-17 13:53:07 +04:00
|
|
|
break;
|
|
|
|
}
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2010-06-17 13:53:07 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-10-31 09:06:49 +04:00
|
|
|
static int kvm_get_sregs(X86CPU *cpu)
|
2008-11-05 19:29:27 +03:00
|
|
|
{
|
2012-10-31 09:06:49 +04:00
|
|
|
CPUX86State *env = &cpu->env;
|
2008-11-05 19:29:27 +03:00
|
|
|
struct kvm_sregs sregs;
|
2021-12-21 12:12:53 +03:00
|
|
|
int ret;
|
2008-11-05 19:29:27 +03:00
|
|
|
|
2012-10-31 09:06:49 +04:00
|
|
|
ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs);
|
2010-12-27 18:19:29 +03:00
|
|
|
if (ret < 0) {
|
2008-11-05 19:29:27 +03:00
|
|
|
return ret;
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2008-11-05 19:29:27 +03:00
|
|
|
|
2021-12-21 12:12:53 +03:00
|
|
|
/*
|
|
|
|
* The interrupt_bitmap is ignored because KVM_GET_SREGS is
|
|
|
|
* always preceded by KVM_GET_VCPU_EVENTS.
|
|
|
|
*/
|
2008-11-05 19:29:27 +03:00
|
|
|
|
|
|
|
get_seg(&env->segs[R_CS], &sregs.cs);
|
|
|
|
get_seg(&env->segs[R_DS], &sregs.ds);
|
|
|
|
get_seg(&env->segs[R_ES], &sregs.es);
|
|
|
|
get_seg(&env->segs[R_FS], &sregs.fs);
|
|
|
|
get_seg(&env->segs[R_GS], &sregs.gs);
|
|
|
|
get_seg(&env->segs[R_SS], &sregs.ss);
|
|
|
|
|
|
|
|
get_seg(&env->tr, &sregs.tr);
|
|
|
|
get_seg(&env->ldt, &sregs.ldt);
|
|
|
|
|
|
|
|
env->idt.limit = sregs.idt.limit;
|
|
|
|
env->idt.base = sregs.idt.base;
|
|
|
|
env->gdt.limit = sregs.gdt.limit;
|
|
|
|
env->gdt.base = sregs.gdt.base;
|
|
|
|
|
|
|
|
env->cr[0] = sregs.cr0;
|
|
|
|
env->cr[2] = sregs.cr2;
|
|
|
|
env->cr[3] = sregs.cr3;
|
|
|
|
env->cr[4] = sregs.cr4;
|
|
|
|
|
|
|
|
env->efer = sregs.efer;
|
i386/sev: Avoid SEV-ES crash due to missing MSR_EFER_LMA bit
Commit 7191f24c7fcf ("accel/kvm/kvm-all: Handle register access errors")
added error checking for KVM_SET_SREGS/KVM_SET_SREGS2. In doing so, it
exposed a long-running bug in current KVM support for SEV-ES where the
kernel assumes that MSR_EFER_LMA will be set explicitly by the guest
kernel, in which case EFER write traps would result in KVM eventually
seeing MSR_EFER_LMA get set and recording it in such a way that it would
be subsequently visible when accessing it via KVM_GET_SREGS/etc.
However, guest kernels currently rely on MSR_EFER_LMA getting set
automatically when MSR_EFER_LME is set and paging is enabled via
CR0_PG_MASK. As a result, the EFER write traps don't actually expose the
MSR_EFER_LMA bit, even though it is set internally, and when QEMU
subsequently tries to pass this EFER value back to KVM via
KVM_SET_SREGS* it will fail various sanity checks and return -EINVAL,
which is now considered fatal due to the aforementioned QEMU commit.
This can be addressed by inferring the MSR_EFER_LMA bit being set when
paging is enabled and MSR_EFER_LME is set, and synthesizing it to ensure
the expected bits are all present in subsequent handling on the host
side.
Ultimately, this handling will be implemented in the host kernel, but to
avoid breaking QEMU's SEV-ES support when using older host kernels, the
same handling can be done in QEMU just after fetching the register
values via KVM_GET_SREGS*. Implement that here.
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Cc: Akihiko Odaki <akihiko.odaki@daynix.com>
Cc: Philippe Mathieu-Daudé <philmd@linaro.org>
Cc: Lara Lazier <laramglazier@gmail.com>
Cc: Vitaly Kuznetsov <vkuznets@redhat.com>
Cc: Maxim Levitsky <mlevitsk@redhat.com>
Cc: <kvm@vger.kernel.org>
Fixes: 7191f24c7fcf ("accel/kvm/kvm-all: Handle register access errors")
Signed-off-by: Michael Roth <michael.roth@amd.com>
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Message-ID: <20231206155821.1194551-1-michael.roth@amd.com>
2023-12-06 18:58:21 +03:00
|
|
|
if (sev_es_enabled() && env->efer & MSR_EFER_LME &&
|
|
|
|
env->cr[0] & CR0_PG_MASK) {
|
|
|
|
env->efer |= MSR_EFER_LMA;
|
|
|
|
}
|
2011-10-26 15:09:45 +04:00
|
|
|
|
|
|
|
/* changes to apic base and cr8/tpr are read back via kvm_arch_post_run */
|
2018-01-10 22:50:54 +03:00
|
|
|
x86_update_hflags(env);
|
2008-11-05 19:29:27 +03:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-11-01 16:22:58 +03:00
|
|
|
static int kvm_get_sregs2(X86CPU *cpu)
|
|
|
|
{
|
|
|
|
CPUX86State *env = &cpu->env;
|
|
|
|
struct kvm_sregs2 sregs;
|
|
|
|
int i, ret;
|
|
|
|
|
|
|
|
ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS2, &sregs);
|
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
get_seg(&env->segs[R_CS], &sregs.cs);
|
|
|
|
get_seg(&env->segs[R_DS], &sregs.ds);
|
|
|
|
get_seg(&env->segs[R_ES], &sregs.es);
|
|
|
|
get_seg(&env->segs[R_FS], &sregs.fs);
|
|
|
|
get_seg(&env->segs[R_GS], &sregs.gs);
|
|
|
|
get_seg(&env->segs[R_SS], &sregs.ss);
|
|
|
|
|
|
|
|
get_seg(&env->tr, &sregs.tr);
|
|
|
|
get_seg(&env->ldt, &sregs.ldt);
|
|
|
|
|
|
|
|
env->idt.limit = sregs.idt.limit;
|
|
|
|
env->idt.base = sregs.idt.base;
|
|
|
|
env->gdt.limit = sregs.gdt.limit;
|
|
|
|
env->gdt.base = sregs.gdt.base;
|
|
|
|
|
|
|
|
env->cr[0] = sregs.cr0;
|
|
|
|
env->cr[2] = sregs.cr2;
|
|
|
|
env->cr[3] = sregs.cr3;
|
|
|
|
env->cr[4] = sregs.cr4;
|
|
|
|
|
|
|
|
env->efer = sregs.efer;
|
i386/sev: Avoid SEV-ES crash due to missing MSR_EFER_LMA bit
Commit 7191f24c7fcf ("accel/kvm/kvm-all: Handle register access errors")
added error checking for KVM_SET_SREGS/KVM_SET_SREGS2. In doing so, it
exposed a long-running bug in current KVM support for SEV-ES where the
kernel assumes that MSR_EFER_LMA will be set explicitly by the guest
kernel, in which case EFER write traps would result in KVM eventually
seeing MSR_EFER_LMA get set and recording it in such a way that it would
be subsequently visible when accessing it via KVM_GET_SREGS/etc.
However, guest kernels currently rely on MSR_EFER_LMA getting set
automatically when MSR_EFER_LME is set and paging is enabled via
CR0_PG_MASK. As a result, the EFER write traps don't actually expose the
MSR_EFER_LMA bit, even though it is set internally, and when QEMU
subsequently tries to pass this EFER value back to KVM via
KVM_SET_SREGS* it will fail various sanity checks and return -EINVAL,
which is now considered fatal due to the aforementioned QEMU commit.
This can be addressed by inferring the MSR_EFER_LMA bit being set when
paging is enabled and MSR_EFER_LME is set, and synthesizing it to ensure
the expected bits are all present in subsequent handling on the host
side.
Ultimately, this handling will be implemented in the host kernel, but to
avoid breaking QEMU's SEV-ES support when using older host kernels, the
same handling can be done in QEMU just after fetching the register
values via KVM_GET_SREGS*. Implement that here.
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Cc: Akihiko Odaki <akihiko.odaki@daynix.com>
Cc: Philippe Mathieu-Daudé <philmd@linaro.org>
Cc: Lara Lazier <laramglazier@gmail.com>
Cc: Vitaly Kuznetsov <vkuznets@redhat.com>
Cc: Maxim Levitsky <mlevitsk@redhat.com>
Cc: <kvm@vger.kernel.org>
Fixes: 7191f24c7fcf ("accel/kvm/kvm-all: Handle register access errors")
Signed-off-by: Michael Roth <michael.roth@amd.com>
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Message-ID: <20231206155821.1194551-1-michael.roth@amd.com>
2023-12-06 18:58:21 +03:00
|
|
|
if (sev_es_enabled() && env->efer & MSR_EFER_LME &&
|
|
|
|
env->cr[0] & CR0_PG_MASK) {
|
|
|
|
env->efer |= MSR_EFER_LMA;
|
|
|
|
}
|
2021-11-01 16:22:58 +03:00
|
|
|
|
|
|
|
env->pdptrs_valid = sregs.flags & KVM_SREGS2_FLAGS_PDPTRS_VALID;
|
|
|
|
|
|
|
|
if (env->pdptrs_valid) {
|
|
|
|
for (i = 0; i < 4; i++) {
|
|
|
|
env->pdptrs[i] = sregs.pdptrs[i];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* changes to apic base and cr8/tpr are read back via kvm_arch_post_run */
|
|
|
|
x86_update_hflags(env);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-10-31 09:06:49 +04:00
|
|
|
static int kvm_get_msrs(X86CPU *cpu)
|
2008-11-05 19:29:27 +03:00
|
|
|
{
|
2012-10-31 09:06:49 +04:00
|
|
|
CPUX86State *env = &cpu->env;
|
2015-12-16 22:06:42 +03:00
|
|
|
struct kvm_msr_entry *msrs = cpu->kvm_msr_buf->entries;
|
2015-12-16 22:06:44 +03:00
|
|
|
int ret, i;
|
2016-07-08 18:01:38 +03:00
|
|
|
uint64_t mtrr_top_bits;
|
2008-11-05 19:29:27 +03:00
|
|
|
|
2015-12-16 22:06:42 +03:00
|
|
|
kvm_msr_buf_reset(cpu);
|
|
|
|
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_CS, 0);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_ESP, 0);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_EIP, 0);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_PAT, 0);
|
2011-01-21 23:48:13 +03:00
|
|
|
if (has_msr_star) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_STAR, 0);
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2011-01-21 23:48:13 +03:00
|
|
|
if (has_msr_hsave_pa) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_VM_HSAVE_PA, 0);
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2015-09-23 09:27:33 +03:00
|
|
|
if (has_msr_tsc_aux) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_TSC_AUX, 0);
|
2015-09-23 09:27:33 +03:00
|
|
|
}
|
2012-11-27 09:32:18 +04:00
|
|
|
if (has_msr_tsc_adjust) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_TSC_ADJUST, 0);
|
2012-11-27 09:32:18 +04:00
|
|
|
}
|
2011-10-05 23:52:32 +04:00
|
|
|
if (has_msr_tsc_deadline) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_TSCDEADLINE, 0);
|
2011-10-05 23:52:32 +04:00
|
|
|
}
|
2011-10-04 18:26:35 +04:00
|
|
|
if (has_msr_misc_enable) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_MISC_ENABLE, 0);
|
2011-10-04 18:26:35 +04:00
|
|
|
}
|
2015-06-18 19:28:42 +03:00
|
|
|
if (has_msr_smbase) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_SMBASE, 0);
|
2015-06-18 19:28:42 +03:00
|
|
|
}
|
2018-02-27 13:22:12 +03:00
|
|
|
if (has_msr_smi_count) {
|
|
|
|
kvm_msr_entry_add(cpu, MSR_SMI_COUNT, 0);
|
|
|
|
}
|
2013-08-19 05:33:30 +04:00
|
|
|
if (has_msr_feature_control) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_FEATURE_CONTROL, 0);
|
2013-08-19 05:33:30 +04:00
|
|
|
}
|
2021-02-05 11:33:24 +03:00
|
|
|
if (has_msr_pkrs) {
|
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_PKRS, 0);
|
|
|
|
}
|
2013-12-05 04:32:12 +04:00
|
|
|
if (has_msr_bndcfgs) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_BNDCFGS, 0);
|
2013-12-05 04:32:12 +04:00
|
|
|
}
|
2014-12-03 05:36:23 +03:00
|
|
|
if (has_msr_xss) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_XSS, 0);
|
2014-12-03 05:36:23 +03:00
|
|
|
}
|
2019-10-11 10:41:03 +03:00
|
|
|
if (has_msr_umwait) {
|
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_UMWAIT_CONTROL, 0);
|
|
|
|
}
|
2018-01-09 18:45:14 +03:00
|
|
|
if (has_msr_spec_ctrl) {
|
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_SPEC_CTRL, 0);
|
|
|
|
}
|
2021-11-01 16:23:00 +03:00
|
|
|
if (has_tsc_scale_msr) {
|
|
|
|
kvm_msr_entry_add(cpu, MSR_AMD64_TSC_RATIO, 0);
|
|
|
|
}
|
|
|
|
|
2019-11-20 15:19:22 +03:00
|
|
|
if (has_msr_tsx_ctrl) {
|
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_TSX_CTRL, 0);
|
|
|
|
}
|
2018-05-22 00:54:24 +03:00
|
|
|
if (has_msr_virt_ssbd) {
|
|
|
|
kvm_msr_entry_add(cpu, MSR_VIRT_SSBD, 0);
|
|
|
|
}
|
2011-02-03 22:19:53 +03:00
|
|
|
if (!env->tsc_valid) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_TSC, 0);
|
2011-07-29 22:36:43 +04:00
|
|
|
env->tsc_valid = !runstate_is_running();
|
2011-02-03 22:19:53 +03:00
|
|
|
}
|
|
|
|
|
2008-11-05 19:29:27 +03:00
|
|
|
#ifdef TARGET_X86_64
|
2010-10-21 19:35:04 +04:00
|
|
|
if (lm_capable_kernel) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_CSTAR, 0);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_KERNELGSBASE, 0);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_FMASK, 0);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_LSTAR, 0);
|
2010-10-21 19:35:04 +04:00
|
|
|
}
|
2008-11-05 19:29:27 +03:00
|
|
|
#endif
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_KVM_SYSTEM_TIME, 0);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_KVM_WALL_CLOCK, 0);
|
2020-09-08 17:12:06 +03:00
|
|
|
if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF_INT)) {
|
|
|
|
kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_INT, 0);
|
|
|
|
}
|
2020-09-17 13:23:16 +03:00
|
|
|
if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF)) {
|
|
|
|
kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_EN, 0);
|
|
|
|
}
|
2016-09-27 01:03:24 +03:00
|
|
|
if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_PV_EOI)) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_KVM_PV_EOI_EN, 0);
|
2012-08-28 21:43:56 +04:00
|
|
|
}
|
2016-09-27 01:03:24 +03:00
|
|
|
if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_STEAL_TIME)) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, 0);
|
2013-02-20 06:27:20 +04:00
|
|
|
}
|
2019-06-04 02:04:08 +03:00
|
|
|
if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_POLL_CONTROL)) {
|
|
|
|
kvm_msr_entry_add(cpu, MSR_KVM_POLL_CONTROL, 1);
|
|
|
|
}
|
2017-12-27 17:04:26 +03:00
|
|
|
if (has_architectural_pmu_version > 0) {
|
|
|
|
if (has_architectural_pmu_version > 1) {
|
|
|
|
kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS, 0);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL, 0);
|
|
|
|
}
|
|
|
|
for (i = 0; i < num_architectural_pmu_fixed_counters; i++) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR0 + i, 0);
|
2013-07-25 19:05:22 +04:00
|
|
|
}
|
2017-12-27 17:04:26 +03:00
|
|
|
for (i = 0; i < num_architectural_pmu_gp_counters; i++) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_P6_PERFCTR0 + i, 0);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_P6_EVNTSEL0 + i, 0);
|
2013-07-25 19:05:22 +04:00
|
|
|
}
|
|
|
|
}
|
2009-10-22 16:26:56 +04:00
|
|
|
|
2010-10-11 22:31:22 +04:00
|
|
|
if (env->mcg_cap) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_MCG_STATUS, 0);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_MCG_CTL, 0);
|
2016-06-22 09:56:21 +03:00
|
|
|
if (has_msr_mcg_ext_ctl) {
|
|
|
|
kvm_msr_entry_add(cpu, MSR_MCG_EXT_CTL, 0);
|
|
|
|
}
|
2010-12-27 18:19:29 +03:00
|
|
|
for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_MC0_CTL + i, 0);
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2010-10-11 22:31:22 +04:00
|
|
|
}
|
|
|
|
|
2014-01-23 17:40:47 +04:00
|
|
|
if (has_msr_hv_hypercall) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, HV_X64_MSR_HYPERCALL, 0);
|
|
|
|
kvm_msr_entry_add(cpu, HV_X64_MSR_GUEST_OS_ID, 0);
|
2014-01-23 17:40:47 +04:00
|
|
|
}
|
2019-05-17 17:19:16 +03:00
|
|
|
if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VAPIC)) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, HV_X64_MSR_APIC_ASSIST_PAGE, 0);
|
2014-01-23 17:40:48 +04:00
|
|
|
}
|
2019-05-17 17:19:16 +03:00
|
|
|
if (hyperv_feat_enabled(cpu, HYPERV_FEAT_TIME)) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, HV_X64_MSR_REFERENCE_TSC, 0);
|
2014-01-23 17:40:49 +04:00
|
|
|
}
|
2019-05-17 17:19:16 +03:00
|
|
|
if (hyperv_feat_enabled(cpu, HYPERV_FEAT_REENLIGHTENMENT)) {
|
2018-04-11 14:50:36 +03:00
|
|
|
kvm_msr_entry_add(cpu, HV_X64_MSR_REENLIGHTENMENT_CONTROL, 0);
|
|
|
|
kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_CONTROL, 0);
|
|
|
|
kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_STATUS, 0);
|
|
|
|
}
|
2022-02-16 13:24:59 +03:00
|
|
|
if (has_msr_hv_syndbg_options) {
|
|
|
|
kvm_msr_entry_add(cpu, HV_X64_MSR_SYNDBG_OPTIONS, 0);
|
|
|
|
}
|
2015-09-09 15:41:30 +03:00
|
|
|
if (has_msr_hv_crash) {
|
|
|
|
int j;
|
|
|
|
|
2017-07-13 23:15:21 +03:00
|
|
|
for (j = 0; j < HV_CRASH_PARAMS; j++) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_P0 + j, 0);
|
2015-09-09 15:41:30 +03:00
|
|
|
}
|
|
|
|
}
|
2015-09-16 12:59:44 +03:00
|
|
|
if (has_msr_hv_runtime) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, HV_X64_MSR_VP_RUNTIME, 0);
|
2015-09-16 12:59:44 +03:00
|
|
|
}
|
2019-05-17 17:19:16 +03:00
|
|
|
if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC)) {
|
2015-11-11 13:18:38 +03:00
|
|
|
uint32_t msr;
|
|
|
|
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, HV_X64_MSR_SCONTROL, 0);
|
|
|
|
kvm_msr_entry_add(cpu, HV_X64_MSR_SIEFP, 0);
|
|
|
|
kvm_msr_entry_add(cpu, HV_X64_MSR_SIMP, 0);
|
2015-11-11 13:18:38 +03:00
|
|
|
for (msr = HV_X64_MSR_SINT0; msr <= HV_X64_MSR_SINT15; msr++) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, msr, 0);
|
2015-11-11 13:18:38 +03:00
|
|
|
}
|
|
|
|
}
|
2015-11-25 18:21:25 +03:00
|
|
|
if (has_msr_hv_stimer) {
|
|
|
|
uint32_t msr;
|
|
|
|
|
|
|
|
for (msr = HV_X64_MSR_STIMER0_CONFIG; msr <= HV_X64_MSR_STIMER3_COUNT;
|
|
|
|
msr++) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, msr, 0);
|
2015-11-25 18:21:25 +03:00
|
|
|
}
|
|
|
|
}
|
2016-09-27 01:03:29 +03:00
|
|
|
if (env->features[FEAT_1_EDX] & CPUID_MTRR) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_MTRRdefType, 0);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_MTRRfix64K_00000, 0);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_MTRRfix16K_80000, 0);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_MTRRfix16K_A0000, 0);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C0000, 0);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C8000, 0);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D0000, 0);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D8000, 0);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E0000, 0);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E8000, 0);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F0000, 0);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F8000, 0);
|
2014-08-15 01:39:33 +04:00
|
|
|
for (i = 0; i < MSR_MTRRcap_VCNT; i++) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_MTRRphysBase(i), 0);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_MTRRphysMask(i), 0);
|
2014-08-15 01:39:33 +04:00
|
|
|
}
|
|
|
|
}
|
2014-01-23 17:40:48 +04:00
|
|
|
|
2018-03-04 19:48:36 +03:00
|
|
|
if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) {
|
|
|
|
int addr_num =
|
|
|
|
kvm_arch_get_supported_cpuid(kvm_state, 0x14, 1, R_EAX) & 0x7;
|
|
|
|
|
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_RTIT_CTL, 0);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_RTIT_STATUS, 0);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_RTIT_OUTPUT_BASE, 0);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_RTIT_OUTPUT_MASK, 0);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_RTIT_CR3_MATCH, 0);
|
|
|
|
for (i = 0; i < addr_num; i++) {
|
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_RTIT_ADDR0_A + i, 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-07-19 14:21:13 +03:00
|
|
|
if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_SGX_LC) {
|
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH0, 0);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH1, 0);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH2, 0);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH3, 0);
|
|
|
|
}
|
|
|
|
|
2022-02-17 09:04:33 +03:00
|
|
|
if (env->features[FEAT_XSAVE] & CPUID_D_1_EAX_XFD) {
|
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_XFD, 0);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_XFD_ERR, 0);
|
|
|
|
}
|
|
|
|
|
2022-02-15 22:52:56 +03:00
|
|
|
if (kvm_enabled() && cpu->enable_pmu &&
|
|
|
|
(env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_ARCH_LBR)) {
|
2022-05-17 18:50:24 +03:00
|
|
|
uint64_t depth;
|
2022-02-15 22:52:56 +03:00
|
|
|
|
2022-05-17 18:50:24 +03:00
|
|
|
ret = kvm_get_one_msr(cpu, MSR_ARCH_LBR_DEPTH, &depth);
|
|
|
|
if (ret == 1 && depth == ARCH_LBR_NR_ENTRIES) {
|
2022-02-15 22:52:56 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_ARCH_LBR_CTL, 0);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_ARCH_LBR_DEPTH, 0);
|
|
|
|
|
|
|
|
for (i = 0; i < ARCH_LBR_NR_ENTRIES; i++) {
|
|
|
|
kvm_msr_entry_add(cpu, MSR_ARCH_LBR_FROM_0 + i, 0);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_ARCH_LBR_TO_0 + i, 0);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_ARCH_LBR_INFO_0 + i, 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-12-16 22:06:42 +03:00
|
|
|
ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, cpu->kvm_msr_buf);
|
2010-12-27 18:19:29 +03:00
|
|
|
if (ret < 0) {
|
2008-11-05 19:29:27 +03:00
|
|
|
return ret;
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2008-11-05 19:29:27 +03:00
|
|
|
|
2017-03-09 22:46:34 +03:00
|
|
|
if (ret < cpu->kvm_msr_buf->nmsrs) {
|
|
|
|
struct kvm_msr_entry *e = &cpu->kvm_msr_buf->entries[ret];
|
|
|
|
error_report("error: failed to get MSR 0x%" PRIx32,
|
|
|
|
(uint32_t)e->index);
|
|
|
|
}
|
|
|
|
|
2015-12-16 22:06:44 +03:00
|
|
|
assert(ret == cpu->kvm_msr_buf->nmsrs);
|
2016-07-08 18:01:38 +03:00
|
|
|
/*
|
|
|
|
* MTRR masks: Each mask consists of 5 parts
|
|
|
|
* a 10..0: must be zero
|
|
|
|
* b 11 : valid bit
|
|
|
|
* c n-1.12: actual mask bits
|
|
|
|
* d 51..n: reserved must be zero
|
|
|
|
* e 63.52: reserved must be zero
|
|
|
|
*
|
|
|
|
* 'n' is the number of physical bits supported by the CPU and is
|
|
|
|
* apparently always <= 52. We know our 'n' but don't know what
|
|
|
|
* the destinations 'n' is; it might be smaller, in which case
|
|
|
|
* it masks (c) on loading. It might be larger, in which case
|
|
|
|
* we fill 'd' so that d..c is consistent irrespetive of the 'n'
|
|
|
|
* we're migrating to.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (cpu->fill_mtrr_mask) {
|
|
|
|
QEMU_BUILD_BUG_ON(TARGET_PHYS_ADDR_SPACE_BITS > 52);
|
|
|
|
assert(cpu->phys_bits <= TARGET_PHYS_ADDR_SPACE_BITS);
|
|
|
|
mtrr_top_bits = MAKE_64BIT_MASK(cpu->phys_bits, 52 - cpu->phys_bits);
|
|
|
|
} else {
|
|
|
|
mtrr_top_bits = 0;
|
|
|
|
}
|
|
|
|
|
2008-11-05 19:29:27 +03:00
|
|
|
for (i = 0; i < ret; i++) {
|
2013-07-25 19:05:22 +04:00
|
|
|
uint32_t index = msrs[i].index;
|
|
|
|
switch (index) {
|
2008-11-05 19:29:27 +03:00
|
|
|
case MSR_IA32_SYSENTER_CS:
|
|
|
|
env->sysenter_cs = msrs[i].data;
|
|
|
|
break;
|
|
|
|
case MSR_IA32_SYSENTER_ESP:
|
|
|
|
env->sysenter_esp = msrs[i].data;
|
|
|
|
break;
|
|
|
|
case MSR_IA32_SYSENTER_EIP:
|
|
|
|
env->sysenter_eip = msrs[i].data;
|
|
|
|
break;
|
2011-03-15 14:26:23 +03:00
|
|
|
case MSR_PAT:
|
|
|
|
env->pat = msrs[i].data;
|
|
|
|
break;
|
2008-11-05 19:29:27 +03:00
|
|
|
case MSR_STAR:
|
|
|
|
env->star = msrs[i].data;
|
|
|
|
break;
|
|
|
|
#ifdef TARGET_X86_64
|
|
|
|
case MSR_CSTAR:
|
|
|
|
env->cstar = msrs[i].data;
|
|
|
|
break;
|
|
|
|
case MSR_KERNELGSBASE:
|
|
|
|
env->kernelgsbase = msrs[i].data;
|
|
|
|
break;
|
|
|
|
case MSR_FMASK:
|
|
|
|
env->fmask = msrs[i].data;
|
|
|
|
break;
|
|
|
|
case MSR_LSTAR:
|
|
|
|
env->lstar = msrs[i].data;
|
|
|
|
break;
|
|
|
|
#endif
|
|
|
|
case MSR_IA32_TSC:
|
|
|
|
env->tsc = msrs[i].data;
|
|
|
|
break;
|
2015-09-23 09:27:33 +03:00
|
|
|
case MSR_TSC_AUX:
|
|
|
|
env->tsc_aux = msrs[i].data;
|
|
|
|
break;
|
2012-11-27 09:32:18 +04:00
|
|
|
case MSR_TSC_ADJUST:
|
|
|
|
env->tsc_adjust = msrs[i].data;
|
|
|
|
break;
|
2011-10-05 23:52:32 +04:00
|
|
|
case MSR_IA32_TSCDEADLINE:
|
|
|
|
env->tsc_deadline = msrs[i].data;
|
|
|
|
break;
|
2010-10-21 19:35:01 +04:00
|
|
|
case MSR_VM_HSAVE_PA:
|
|
|
|
env->vm_hsave = msrs[i].data;
|
|
|
|
break;
|
2009-10-22 16:26:56 +04:00
|
|
|
case MSR_KVM_SYSTEM_TIME:
|
|
|
|
env->system_time_msr = msrs[i].data;
|
|
|
|
break;
|
|
|
|
case MSR_KVM_WALL_CLOCK:
|
|
|
|
env->wall_clock_msr = msrs[i].data;
|
|
|
|
break;
|
2010-10-11 22:31:22 +04:00
|
|
|
case MSR_MCG_STATUS:
|
|
|
|
env->mcg_status = msrs[i].data;
|
|
|
|
break;
|
|
|
|
case MSR_MCG_CTL:
|
|
|
|
env->mcg_ctl = msrs[i].data;
|
|
|
|
break;
|
2016-06-22 09:56:21 +03:00
|
|
|
case MSR_MCG_EXT_CTL:
|
|
|
|
env->mcg_ext_ctl = msrs[i].data;
|
|
|
|
break;
|
2011-10-04 18:26:35 +04:00
|
|
|
case MSR_IA32_MISC_ENABLE:
|
|
|
|
env->msr_ia32_misc_enable = msrs[i].data;
|
|
|
|
break;
|
2015-06-18 19:28:42 +03:00
|
|
|
case MSR_IA32_SMBASE:
|
|
|
|
env->smbase = msrs[i].data;
|
|
|
|
break;
|
2018-02-27 13:22:12 +03:00
|
|
|
case MSR_SMI_COUNT:
|
|
|
|
env->msr_smi_count = msrs[i].data;
|
|
|
|
break;
|
2013-07-07 19:13:37 +04:00
|
|
|
case MSR_IA32_FEATURE_CONTROL:
|
|
|
|
env->msr_ia32_feature_control = msrs[i].data;
|
2013-08-19 05:33:30 +04:00
|
|
|
break;
|
2013-12-05 04:32:12 +04:00
|
|
|
case MSR_IA32_BNDCFGS:
|
|
|
|
env->msr_bndcfgs = msrs[i].data;
|
|
|
|
break;
|
2014-12-03 05:36:23 +03:00
|
|
|
case MSR_IA32_XSS:
|
|
|
|
env->xss = msrs[i].data;
|
|
|
|
break;
|
2019-10-11 10:41:03 +03:00
|
|
|
case MSR_IA32_UMWAIT_CONTROL:
|
|
|
|
env->umwait = msrs[i].data;
|
|
|
|
break;
|
2021-02-05 11:33:24 +03:00
|
|
|
case MSR_IA32_PKRS:
|
|
|
|
env->pkrs = msrs[i].data;
|
|
|
|
break;
|
2010-10-11 22:31:22 +04:00
|
|
|
default:
|
|
|
|
if (msrs[i].index >= MSR_MC0_CTL &&
|
|
|
|
msrs[i].index < MSR_MC0_CTL + (env->mcg_cap & 0xff) * 4) {
|
|
|
|
env->mce_banks[msrs[i].index - MSR_MC0_CTL] = msrs[i].data;
|
|
|
|
}
|
2010-10-21 12:23:14 +04:00
|
|
|
break;
|
2010-10-24 16:27:55 +04:00
|
|
|
case MSR_KVM_ASYNC_PF_EN:
|
|
|
|
env->async_pf_en_msr = msrs[i].data;
|
|
|
|
break;
|
2020-09-08 17:12:06 +03:00
|
|
|
case MSR_KVM_ASYNC_PF_INT:
|
|
|
|
env->async_pf_int_msr = msrs[i].data;
|
|
|
|
break;
|
2012-08-28 21:43:56 +04:00
|
|
|
case MSR_KVM_PV_EOI_EN:
|
|
|
|
env->pv_eoi_en_msr = msrs[i].data;
|
|
|
|
break;
|
2013-02-20 06:27:20 +04:00
|
|
|
case MSR_KVM_STEAL_TIME:
|
|
|
|
env->steal_time_msr = msrs[i].data;
|
|
|
|
break;
|
2019-06-04 02:04:08 +03:00
|
|
|
case MSR_KVM_POLL_CONTROL: {
|
|
|
|
env->poll_control_msr = msrs[i].data;
|
|
|
|
break;
|
|
|
|
}
|
2013-07-25 19:05:22 +04:00
|
|
|
case MSR_CORE_PERF_FIXED_CTR_CTRL:
|
|
|
|
env->msr_fixed_ctr_ctrl = msrs[i].data;
|
|
|
|
break;
|
|
|
|
case MSR_CORE_PERF_GLOBAL_CTRL:
|
|
|
|
env->msr_global_ctrl = msrs[i].data;
|
|
|
|
break;
|
|
|
|
case MSR_CORE_PERF_GLOBAL_STATUS:
|
|
|
|
env->msr_global_status = msrs[i].data;
|
|
|
|
break;
|
|
|
|
case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
|
|
|
|
env->msr_global_ovf_ctrl = msrs[i].data;
|
|
|
|
break;
|
|
|
|
case MSR_CORE_PERF_FIXED_CTR0 ... MSR_CORE_PERF_FIXED_CTR0 + MAX_FIXED_COUNTERS - 1:
|
|
|
|
env->msr_fixed_counters[index - MSR_CORE_PERF_FIXED_CTR0] = msrs[i].data;
|
|
|
|
break;
|
|
|
|
case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR0 + MAX_GP_COUNTERS - 1:
|
|
|
|
env->msr_gp_counters[index - MSR_P6_PERFCTR0] = msrs[i].data;
|
|
|
|
break;
|
|
|
|
case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL0 + MAX_GP_COUNTERS - 1:
|
|
|
|
env->msr_gp_evtsel[index - MSR_P6_EVNTSEL0] = msrs[i].data;
|
|
|
|
break;
|
2014-01-23 17:40:47 +04:00
|
|
|
case HV_X64_MSR_HYPERCALL:
|
|
|
|
env->msr_hv_hypercall = msrs[i].data;
|
|
|
|
break;
|
|
|
|
case HV_X64_MSR_GUEST_OS_ID:
|
|
|
|
env->msr_hv_guest_os_id = msrs[i].data;
|
|
|
|
break;
|
2014-01-23 17:40:48 +04:00
|
|
|
case HV_X64_MSR_APIC_ASSIST_PAGE:
|
|
|
|
env->msr_hv_vapic = msrs[i].data;
|
|
|
|
break;
|
2014-01-23 17:40:49 +04:00
|
|
|
case HV_X64_MSR_REFERENCE_TSC:
|
|
|
|
env->msr_hv_tsc = msrs[i].data;
|
|
|
|
break;
|
2015-09-09 15:41:30 +03:00
|
|
|
case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
|
|
|
|
env->msr_hv_crash_params[index - HV_X64_MSR_CRASH_P0] = msrs[i].data;
|
|
|
|
break;
|
2015-09-16 12:59:44 +03:00
|
|
|
case HV_X64_MSR_VP_RUNTIME:
|
|
|
|
env->msr_hv_runtime = msrs[i].data;
|
|
|
|
break;
|
2015-11-11 13:18:38 +03:00
|
|
|
case HV_X64_MSR_SCONTROL:
|
|
|
|
env->msr_hv_synic_control = msrs[i].data;
|
|
|
|
break;
|
|
|
|
case HV_X64_MSR_SIEFP:
|
|
|
|
env->msr_hv_synic_evt_page = msrs[i].data;
|
|
|
|
break;
|
|
|
|
case HV_X64_MSR_SIMP:
|
|
|
|
env->msr_hv_synic_msg_page = msrs[i].data;
|
|
|
|
break;
|
|
|
|
case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
|
|
|
|
env->msr_hv_synic_sint[index - HV_X64_MSR_SINT0] = msrs[i].data;
|
2015-11-25 18:21:25 +03:00
|
|
|
break;
|
|
|
|
case HV_X64_MSR_STIMER0_CONFIG:
|
|
|
|
case HV_X64_MSR_STIMER1_CONFIG:
|
|
|
|
case HV_X64_MSR_STIMER2_CONFIG:
|
|
|
|
case HV_X64_MSR_STIMER3_CONFIG:
|
|
|
|
env->msr_hv_stimer_config[(index - HV_X64_MSR_STIMER0_CONFIG)/2] =
|
|
|
|
msrs[i].data;
|
|
|
|
break;
|
|
|
|
case HV_X64_MSR_STIMER0_COUNT:
|
|
|
|
case HV_X64_MSR_STIMER1_COUNT:
|
|
|
|
case HV_X64_MSR_STIMER2_COUNT:
|
|
|
|
case HV_X64_MSR_STIMER3_COUNT:
|
|
|
|
env->msr_hv_stimer_count[(index - HV_X64_MSR_STIMER0_COUNT)/2] =
|
|
|
|
msrs[i].data;
|
2015-11-11 13:18:38 +03:00
|
|
|
break;
|
2018-04-11 14:50:36 +03:00
|
|
|
case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
|
|
|
|
env->msr_hv_reenlightenment_control = msrs[i].data;
|
|
|
|
break;
|
|
|
|
case HV_X64_MSR_TSC_EMULATION_CONTROL:
|
|
|
|
env->msr_hv_tsc_emulation_control = msrs[i].data;
|
|
|
|
break;
|
|
|
|
case HV_X64_MSR_TSC_EMULATION_STATUS:
|
|
|
|
env->msr_hv_tsc_emulation_status = msrs[i].data;
|
|
|
|
break;
|
2022-02-16 13:24:59 +03:00
|
|
|
case HV_X64_MSR_SYNDBG_OPTIONS:
|
|
|
|
env->msr_hv_syndbg_options = msrs[i].data;
|
|
|
|
break;
|
2014-08-15 01:39:33 +04:00
|
|
|
case MSR_MTRRdefType:
|
|
|
|
env->mtrr_deftype = msrs[i].data;
|
|
|
|
break;
|
|
|
|
case MSR_MTRRfix64K_00000:
|
|
|
|
env->mtrr_fixed[0] = msrs[i].data;
|
|
|
|
break;
|
|
|
|
case MSR_MTRRfix16K_80000:
|
|
|
|
env->mtrr_fixed[1] = msrs[i].data;
|
|
|
|
break;
|
|
|
|
case MSR_MTRRfix16K_A0000:
|
|
|
|
env->mtrr_fixed[2] = msrs[i].data;
|
|
|
|
break;
|
|
|
|
case MSR_MTRRfix4K_C0000:
|
|
|
|
env->mtrr_fixed[3] = msrs[i].data;
|
|
|
|
break;
|
|
|
|
case MSR_MTRRfix4K_C8000:
|
|
|
|
env->mtrr_fixed[4] = msrs[i].data;
|
|
|
|
break;
|
|
|
|
case MSR_MTRRfix4K_D0000:
|
|
|
|
env->mtrr_fixed[5] = msrs[i].data;
|
|
|
|
break;
|
|
|
|
case MSR_MTRRfix4K_D8000:
|
|
|
|
env->mtrr_fixed[6] = msrs[i].data;
|
|
|
|
break;
|
|
|
|
case MSR_MTRRfix4K_E0000:
|
|
|
|
env->mtrr_fixed[7] = msrs[i].data;
|
|
|
|
break;
|
|
|
|
case MSR_MTRRfix4K_E8000:
|
|
|
|
env->mtrr_fixed[8] = msrs[i].data;
|
|
|
|
break;
|
|
|
|
case MSR_MTRRfix4K_F0000:
|
|
|
|
env->mtrr_fixed[9] = msrs[i].data;
|
|
|
|
break;
|
|
|
|
case MSR_MTRRfix4K_F8000:
|
|
|
|
env->mtrr_fixed[10] = msrs[i].data;
|
|
|
|
break;
|
|
|
|
case MSR_MTRRphysBase(0) ... MSR_MTRRphysMask(MSR_MTRRcap_VCNT - 1):
|
|
|
|
if (index & 1) {
|
2016-07-08 18:01:38 +03:00
|
|
|
env->mtrr_var[MSR_MTRRphysIndex(index)].mask = msrs[i].data |
|
|
|
|
mtrr_top_bits;
|
2014-08-15 01:39:33 +04:00
|
|
|
} else {
|
|
|
|
env->mtrr_var[MSR_MTRRphysIndex(index)].base = msrs[i].data;
|
|
|
|
}
|
|
|
|
break;
|
2018-01-09 18:45:14 +03:00
|
|
|
case MSR_IA32_SPEC_CTRL:
|
|
|
|
env->spec_ctrl = msrs[i].data;
|
|
|
|
break;
|
2021-11-01 16:23:00 +03:00
|
|
|
case MSR_AMD64_TSC_RATIO:
|
|
|
|
env->amd_tsc_scale_msr = msrs[i].data;
|
|
|
|
break;
|
2019-11-20 15:19:22 +03:00
|
|
|
case MSR_IA32_TSX_CTRL:
|
|
|
|
env->tsx_ctrl = msrs[i].data;
|
|
|
|
break;
|
2018-05-22 00:54:24 +03:00
|
|
|
case MSR_VIRT_SSBD:
|
|
|
|
env->virt_ssbd = msrs[i].data;
|
|
|
|
break;
|
2018-03-04 19:48:36 +03:00
|
|
|
case MSR_IA32_RTIT_CTL:
|
|
|
|
env->msr_rtit_ctrl = msrs[i].data;
|
|
|
|
break;
|
|
|
|
case MSR_IA32_RTIT_STATUS:
|
|
|
|
env->msr_rtit_status = msrs[i].data;
|
|
|
|
break;
|
|
|
|
case MSR_IA32_RTIT_OUTPUT_BASE:
|
|
|
|
env->msr_rtit_output_base = msrs[i].data;
|
|
|
|
break;
|
|
|
|
case MSR_IA32_RTIT_OUTPUT_MASK:
|
|
|
|
env->msr_rtit_output_mask = msrs[i].data;
|
|
|
|
break;
|
|
|
|
case MSR_IA32_RTIT_CR3_MATCH:
|
|
|
|
env->msr_rtit_cr3_match = msrs[i].data;
|
|
|
|
break;
|
|
|
|
case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B:
|
|
|
|
env->msr_rtit_addrs[index - MSR_IA32_RTIT_ADDR0_A] = msrs[i].data;
|
|
|
|
break;
|
2021-07-19 14:21:13 +03:00
|
|
|
case MSR_IA32_SGXLEPUBKEYHASH0 ... MSR_IA32_SGXLEPUBKEYHASH3:
|
|
|
|
env->msr_ia32_sgxlepubkeyhash[index - MSR_IA32_SGXLEPUBKEYHASH0] =
|
|
|
|
msrs[i].data;
|
|
|
|
break;
|
2022-02-17 09:04:33 +03:00
|
|
|
case MSR_IA32_XFD:
|
|
|
|
env->msr_xfd = msrs[i].data;
|
|
|
|
break;
|
|
|
|
case MSR_IA32_XFD_ERR:
|
|
|
|
env->msr_xfd_err = msrs[i].data;
|
|
|
|
break;
|
2022-02-15 22:52:56 +03:00
|
|
|
case MSR_ARCH_LBR_CTL:
|
|
|
|
env->msr_lbr_ctl = msrs[i].data;
|
|
|
|
break;
|
|
|
|
case MSR_ARCH_LBR_DEPTH:
|
|
|
|
env->msr_lbr_depth = msrs[i].data;
|
|
|
|
break;
|
|
|
|
case MSR_ARCH_LBR_FROM_0 ... MSR_ARCH_LBR_FROM_0 + 31:
|
|
|
|
env->lbr_records[index - MSR_ARCH_LBR_FROM_0].from = msrs[i].data;
|
|
|
|
break;
|
|
|
|
case MSR_ARCH_LBR_TO_0 ... MSR_ARCH_LBR_TO_0 + 31:
|
|
|
|
env->lbr_records[index - MSR_ARCH_LBR_TO_0].to = msrs[i].data;
|
|
|
|
break;
|
|
|
|
case MSR_ARCH_LBR_INFO_0 ... MSR_ARCH_LBR_INFO_0 + 31:
|
|
|
|
env->lbr_records[index - MSR_ARCH_LBR_INFO_0].info = msrs[i].data;
|
|
|
|
break;
|
2008-11-05 19:29:27 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-10-31 09:06:49 +04:00
|
|
|
static int kvm_put_mp_state(X86CPU *cpu)
|
2009-11-10 00:05:37 +03:00
|
|
|
{
|
2012-10-31 09:06:49 +04:00
|
|
|
struct kvm_mp_state mp_state = { .mp_state = cpu->env.mp_state };
|
2009-11-10 00:05:37 +03:00
|
|
|
|
2012-10-31 09:06:49 +04:00
|
|
|
return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state);
|
2009-11-10 00:05:37 +03:00
|
|
|
}
|
|
|
|
|
2012-05-03 18:56:46 +04:00
|
|
|
static int kvm_get_mp_state(X86CPU *cpu)
|
2009-11-10 00:05:37 +03:00
|
|
|
{
|
2013-01-17 21:51:17 +04:00
|
|
|
CPUState *cs = CPU(cpu);
|
2012-05-03 18:56:46 +04:00
|
|
|
CPUX86State *env = &cpu->env;
|
2009-11-10 00:05:37 +03:00
|
|
|
struct kvm_mp_state mp_state;
|
|
|
|
int ret;
|
|
|
|
|
2013-01-17 21:51:17 +04:00
|
|
|
ret = kvm_vcpu_ioctl(cs, KVM_GET_MP_STATE, &mp_state);
|
2009-11-10 00:05:37 +03:00
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
env->mp_state = mp_state.mp_state;
|
2011-01-21 23:48:10 +03:00
|
|
|
if (kvm_irqchip_in_kernel()) {
|
2013-01-17 21:51:17 +04:00
|
|
|
cs->halted = (mp_state.mp_state == KVM_MP_STATE_HALTED);
|
2011-01-21 23:48:10 +03:00
|
|
|
}
|
2009-11-10 00:05:37 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-10-31 09:06:49 +04:00
|
|
|
static int kvm_get_apic(X86CPU *cpu)
|
2011-10-16 15:23:26 +04:00
|
|
|
{
|
2013-12-23 13:04:02 +04:00
|
|
|
DeviceState *apic = cpu->apic_state;
|
2011-10-16 15:23:26 +04:00
|
|
|
struct kvm_lapic_state kapic;
|
|
|
|
int ret;
|
|
|
|
|
2012-01-31 22:17:52 +04:00
|
|
|
if (apic && kvm_irqchip_in_kernel()) {
|
2012-10-31 09:06:49 +04:00
|
|
|
ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_LAPIC, &kapic);
|
2011-10-16 15:23:26 +04:00
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
kvm_get_apic_state(apic, &kapic);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-10-31 09:06:49 +04:00
|
|
|
static int kvm_put_vcpu_events(X86CPU *cpu, int level)
|
2009-11-25 02:33:03 +03:00
|
|
|
{
|
2015-06-18 19:28:42 +03:00
|
|
|
CPUState *cs = CPU(cpu);
|
2012-10-31 09:06:49 +04:00
|
|
|
CPUX86State *env = &cpu->env;
|
2014-10-30 11:33:43 +03:00
|
|
|
struct kvm_vcpu_events events = {};
|
2009-11-25 02:33:03 +03:00
|
|
|
|
2019-06-19 19:21:39 +03:00
|
|
|
events.flags = 0;
|
|
|
|
|
|
|
|
if (has_exception_payload) {
|
|
|
|
events.flags |= KVM_VCPUEVENT_VALID_PAYLOAD;
|
|
|
|
events.exception.pending = env->exception_pending;
|
|
|
|
events.exception_has_payload = env->exception_has_payload;
|
|
|
|
events.exception_payload = env->exception_payload;
|
|
|
|
}
|
|
|
|
events.exception.nr = env->exception_nr;
|
|
|
|
events.exception.injected = env->exception_injected;
|
2009-11-25 02:33:03 +03:00
|
|
|
events.exception.has_error_code = env->has_error_code;
|
|
|
|
events.exception.error_code = env->error_code;
|
|
|
|
|
|
|
|
events.interrupt.injected = (env->interrupt_injected >= 0);
|
|
|
|
events.interrupt.nr = env->interrupt_injected;
|
|
|
|
events.interrupt.soft = env->soft_interrupt;
|
|
|
|
|
|
|
|
events.nmi.injected = env->nmi_injected;
|
|
|
|
events.nmi.pending = env->nmi_pending;
|
|
|
|
events.nmi.masked = !!(env->hflags2 & HF2_NMI_MASK);
|
|
|
|
|
|
|
|
events.sipi_vector = env->sipi_vector;
|
|
|
|
|
2015-06-18 19:28:42 +03:00
|
|
|
if (has_msr_smbase) {
|
|
|
|
events.smi.smm = !!(env->hflags & HF_SMM_MASK);
|
|
|
|
events.smi.smm_inside_nmi = !!(env->hflags2 & HF2_SMM_INSIDE_NMI_MASK);
|
|
|
|
if (kvm_irqchip_in_kernel()) {
|
|
|
|
/* As soon as these are moved to the kernel, remove them
|
|
|
|
* from cs->interrupt_request.
|
|
|
|
*/
|
|
|
|
events.smi.pending = cs->interrupt_request & CPU_INTERRUPT_SMI;
|
|
|
|
events.smi.latched_init = cs->interrupt_request & CPU_INTERRUPT_INIT;
|
|
|
|
cs->interrupt_request &= ~(CPU_INTERRUPT_INIT | CPU_INTERRUPT_SMI);
|
|
|
|
} else {
|
|
|
|
/* Keep these in cs->interrupt_request. */
|
|
|
|
events.smi.pending = 0;
|
|
|
|
events.smi.latched_init = 0;
|
|
|
|
}
|
2017-02-23 16:34:41 +03:00
|
|
|
/* Stop SMI delivery on old machine types to avoid a reboot
|
|
|
|
* on an inward migration of an old VM.
|
|
|
|
*/
|
|
|
|
if (!cpu->kvm_no_smi_migration) {
|
|
|
|
events.flags |= KVM_VCPUEVENT_VALID_SMM;
|
|
|
|
}
|
2015-06-18 19:28:42 +03:00
|
|
|
}
|
|
|
|
|
2010-03-01 21:10:31 +03:00
|
|
|
if (level >= KVM_PUT_RESET_STATE) {
|
2017-07-14 18:47:36 +03:00
|
|
|
events.flags |= KVM_VCPUEVENT_VALID_NMI_PENDING;
|
|
|
|
if (env->mp_state == KVM_MP_STATE_SIPI_RECEIVED) {
|
|
|
|
events.flags |= KVM_VCPUEVENT_VALID_SIPI_VECTOR;
|
|
|
|
}
|
2010-03-01 21:10:31 +03:00
|
|
|
}
|
2010-01-28 11:30:51 +03:00
|
|
|
|
2022-09-29 10:20:11 +03:00
|
|
|
if (has_triple_fault_event) {
|
|
|
|
events.flags |= KVM_VCPUEVENT_VALID_TRIPLE_FAULT;
|
|
|
|
events.triple_fault.pending = env->triple_fault_pending;
|
|
|
|
}
|
|
|
|
|
2012-10-31 09:06:49 +04:00
|
|
|
return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_VCPU_EVENTS, &events);
|
2009-11-25 02:33:03 +03:00
|
|
|
}
|
|
|
|
|
2012-10-31 09:06:49 +04:00
|
|
|
static int kvm_get_vcpu_events(X86CPU *cpu)
|
2009-11-25 02:33:03 +03:00
|
|
|
{
|
2012-10-31 09:06:49 +04:00
|
|
|
CPUX86State *env = &cpu->env;
|
2009-11-25 02:33:03 +03:00
|
|
|
struct kvm_vcpu_events events;
|
|
|
|
int ret;
|
|
|
|
|
2015-06-18 19:28:42 +03:00
|
|
|
memset(&events, 0, sizeof(events));
|
2012-10-31 09:06:49 +04:00
|
|
|
ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_VCPU_EVENTS, &events);
|
2009-11-25 02:33:03 +03:00
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
2019-06-19 19:21:39 +03:00
|
|
|
|
|
|
|
if (events.flags & KVM_VCPUEVENT_VALID_PAYLOAD) {
|
|
|
|
env->exception_pending = events.exception.pending;
|
|
|
|
env->exception_has_payload = events.exception_has_payload;
|
|
|
|
env->exception_payload = events.exception_payload;
|
|
|
|
} else {
|
|
|
|
env->exception_pending = 0;
|
|
|
|
env->exception_has_payload = false;
|
|
|
|
}
|
|
|
|
env->exception_injected = events.exception.injected;
|
|
|
|
env->exception_nr =
|
|
|
|
(env->exception_pending || env->exception_injected) ?
|
|
|
|
events.exception.nr : -1;
|
2009-11-25 02:33:03 +03:00
|
|
|
env->has_error_code = events.exception.has_error_code;
|
|
|
|
env->error_code = events.exception.error_code;
|
|
|
|
|
|
|
|
env->interrupt_injected =
|
|
|
|
events.interrupt.injected ? events.interrupt.nr : -1;
|
|
|
|
env->soft_interrupt = events.interrupt.soft;
|
|
|
|
|
|
|
|
env->nmi_injected = events.nmi.injected;
|
|
|
|
env->nmi_pending = events.nmi.pending;
|
|
|
|
if (events.nmi.masked) {
|
|
|
|
env->hflags2 |= HF2_NMI_MASK;
|
|
|
|
} else {
|
|
|
|
env->hflags2 &= ~HF2_NMI_MASK;
|
|
|
|
}
|
|
|
|
|
2015-06-18 19:28:42 +03:00
|
|
|
if (events.flags & KVM_VCPUEVENT_VALID_SMM) {
|
|
|
|
if (events.smi.smm) {
|
|
|
|
env->hflags |= HF_SMM_MASK;
|
|
|
|
} else {
|
|
|
|
env->hflags &= ~HF_SMM_MASK;
|
|
|
|
}
|
|
|
|
if (events.smi.pending) {
|
|
|
|
cpu_interrupt(CPU(cpu), CPU_INTERRUPT_SMI);
|
|
|
|
} else {
|
|
|
|
cpu_reset_interrupt(CPU(cpu), CPU_INTERRUPT_SMI);
|
|
|
|
}
|
|
|
|
if (events.smi.smm_inside_nmi) {
|
|
|
|
env->hflags2 |= HF2_SMM_INSIDE_NMI_MASK;
|
|
|
|
} else {
|
|
|
|
env->hflags2 &= ~HF2_SMM_INSIDE_NMI_MASK;
|
|
|
|
}
|
|
|
|
if (events.smi.latched_init) {
|
|
|
|
cpu_interrupt(CPU(cpu), CPU_INTERRUPT_INIT);
|
|
|
|
} else {
|
|
|
|
cpu_reset_interrupt(CPU(cpu), CPU_INTERRUPT_INIT);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-09-29 10:20:11 +03:00
|
|
|
if (events.flags & KVM_VCPUEVENT_VALID_TRIPLE_FAULT) {
|
|
|
|
env->triple_fault_pending = events.triple_fault.pending;
|
|
|
|
}
|
|
|
|
|
2009-11-25 02:33:03 +03:00
|
|
|
env->sipi_vector = events.sipi_vector;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-10-31 09:06:49 +04:00
|
|
|
static int kvm_put_debugregs(X86CPU *cpu)
|
2010-03-12 17:20:49 +03:00
|
|
|
{
|
2012-10-31 09:06:49 +04:00
|
|
|
CPUX86State *env = &cpu->env;
|
2010-03-12 17:20:49 +03:00
|
|
|
struct kvm_debugregs dbgregs;
|
|
|
|
int i;
|
|
|
|
|
2019-07-30 19:01:38 +03:00
|
|
|
memset(&dbgregs, 0, sizeof(dbgregs));
|
2010-03-12 17:20:49 +03:00
|
|
|
for (i = 0; i < 4; i++) {
|
|
|
|
dbgregs.db[i] = env->dr[i];
|
|
|
|
}
|
|
|
|
dbgregs.dr6 = env->dr[6];
|
|
|
|
dbgregs.dr7 = env->dr[7];
|
|
|
|
dbgregs.flags = 0;
|
|
|
|
|
2012-10-31 09:06:49 +04:00
|
|
|
return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_DEBUGREGS, &dbgregs);
|
2010-03-12 17:20:49 +03:00
|
|
|
}
|
|
|
|
|
2012-10-31 09:06:49 +04:00
|
|
|
static int kvm_get_debugregs(X86CPU *cpu)
|
2010-03-12 17:20:49 +03:00
|
|
|
{
|
2012-10-31 09:06:49 +04:00
|
|
|
CPUX86State *env = &cpu->env;
|
2010-03-12 17:20:49 +03:00
|
|
|
struct kvm_debugregs dbgregs;
|
|
|
|
int i, ret;
|
|
|
|
|
2012-10-31 09:06:49 +04:00
|
|
|
ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_DEBUGREGS, &dbgregs);
|
2010-03-12 17:20:49 +03:00
|
|
|
if (ret < 0) {
|
2010-12-27 18:19:29 +03:00
|
|
|
return ret;
|
2010-03-12 17:20:49 +03:00
|
|
|
}
|
|
|
|
for (i = 0; i < 4; i++) {
|
|
|
|
env->dr[i] = dbgregs.db[i];
|
|
|
|
}
|
|
|
|
env->dr[4] = env->dr[6] = dbgregs.dr6;
|
|
|
|
env->dr[5] = env->dr[7] = dbgregs.dr7;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-06-19 19:21:38 +03:00
|
|
|
static int kvm_put_nested_state(X86CPU *cpu)
|
|
|
|
{
|
|
|
|
CPUX86State *env = &cpu->env;
|
|
|
|
int max_nested_state_len = kvm_max_nested_state_length();
|
|
|
|
|
2019-07-11 16:41:48 +03:00
|
|
|
if (!env->nested_state) {
|
2019-06-19 19:21:38 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-05-20 17:49:22 +03:00
|
|
|
/*
|
|
|
|
* Copy flags that are affected by reset from env->hflags and env->hflags2.
|
|
|
|
*/
|
|
|
|
if (env->hflags & HF_GUEST_MASK) {
|
|
|
|
env->nested_state->flags |= KVM_STATE_NESTED_GUEST_MODE;
|
|
|
|
} else {
|
|
|
|
env->nested_state->flags &= ~KVM_STATE_NESTED_GUEST_MODE;
|
|
|
|
}
|
2020-07-23 17:27:01 +03:00
|
|
|
|
|
|
|
/* Don't set KVM_STATE_NESTED_GIF_SET on VMX as it is illegal */
|
|
|
|
if (cpu_has_svm(env) && (env->hflags2 & HF2_GIF_MASK)) {
|
2020-05-20 17:49:22 +03:00
|
|
|
env->nested_state->flags |= KVM_STATE_NESTED_GIF_SET;
|
|
|
|
} else {
|
|
|
|
env->nested_state->flags &= ~KVM_STATE_NESTED_GIF_SET;
|
|
|
|
}
|
|
|
|
|
2019-06-19 19:21:38 +03:00
|
|
|
assert(env->nested_state->size <= max_nested_state_len);
|
|
|
|
return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_NESTED_STATE, env->nested_state);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int kvm_get_nested_state(X86CPU *cpu)
|
|
|
|
{
|
|
|
|
CPUX86State *env = &cpu->env;
|
|
|
|
int max_nested_state_len = kvm_max_nested_state_length();
|
|
|
|
int ret;
|
|
|
|
|
2019-07-11 16:41:48 +03:00
|
|
|
if (!env->nested_state) {
|
2019-06-19 19:21:38 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* It is possible that migration restored a smaller size into
|
|
|
|
* nested_state->hdr.size than what our kernel support.
|
|
|
|
* We preserve migration origin nested_state->hdr.size for
|
|
|
|
* call to KVM_SET_NESTED_STATE but wish that our next call
|
|
|
|
* to KVM_GET_NESTED_STATE will use max size our kernel support.
|
|
|
|
*/
|
|
|
|
env->nested_state->size = max_nested_state_len;
|
|
|
|
|
|
|
|
ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_NESTED_STATE, env->nested_state);
|
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2020-05-20 17:49:22 +03:00
|
|
|
/*
|
|
|
|
* Copy flags that are affected by reset to env->hflags and env->hflags2.
|
|
|
|
*/
|
2019-06-19 19:21:38 +03:00
|
|
|
if (env->nested_state->flags & KVM_STATE_NESTED_GUEST_MODE) {
|
|
|
|
env->hflags |= HF_GUEST_MASK;
|
|
|
|
} else {
|
|
|
|
env->hflags &= ~HF_GUEST_MASK;
|
|
|
|
}
|
2020-07-23 17:27:01 +03:00
|
|
|
|
|
|
|
/* Keep HF2_GIF_MASK set on !SVM as x86_cpu_pending_interrupt() needs it */
|
|
|
|
if (cpu_has_svm(env)) {
|
|
|
|
if (env->nested_state->flags & KVM_STATE_NESTED_GIF_SET) {
|
|
|
|
env->hflags2 |= HF2_GIF_MASK;
|
|
|
|
} else {
|
|
|
|
env->hflags2 &= ~HF2_GIF_MASK;
|
|
|
|
}
|
2020-05-20 17:49:22 +03:00
|
|
|
}
|
2019-06-19 19:21:38 +03:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2012-10-31 09:57:49 +04:00
|
|
|
int kvm_arch_put_registers(CPUState *cpu, int level)
|
2008-11-05 19:29:27 +03:00
|
|
|
{
|
2012-10-31 09:57:49 +04:00
|
|
|
X86CPU *x86_cpu = X86_CPU(cpu);
|
2008-11-05 19:29:27 +03:00
|
|
|
int ret;
|
|
|
|
|
2012-05-03 01:38:39 +04:00
|
|
|
assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu));
|
2010-05-04 16:45:26 +04:00
|
|
|
|
2022-08-18 18:01:13 +03:00
|
|
|
/*
|
|
|
|
* Put MSR_IA32_FEATURE_CONTROL first, this ensures the VM gets out of VMX
|
|
|
|
* root operation upon vCPU reset. kvm_put_msr_feature_control() should also
|
2023-07-14 14:16:12 +03:00
|
|
|
* precede kvm_put_nested_state() when 'real' nested state is set.
|
2022-08-18 18:01:13 +03:00
|
|
|
*/
|
|
|
|
if (level >= KVM_PUT_RESET_STATE) {
|
|
|
|
ret = kvm_put_msr_feature_control(x86_cpu);
|
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-20 17:49:22 +03:00
|
|
|
/* must be before kvm_put_nested_state so that EFER.SVME is set */
|
2021-11-01 16:22:58 +03:00
|
|
|
ret = has_sregs2 ? kvm_put_sregs2(x86_cpu) : kvm_put_sregs(x86_cpu);
|
2020-05-20 17:49:22 +03:00
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-03-30 23:55:29 +03:00
|
|
|
if (level >= KVM_PUT_RESET_STATE) {
|
2019-07-22 07:00:08 +03:00
|
|
|
ret = kvm_put_nested_state(x86_cpu);
|
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
2013-12-17 23:05:13 +04:00
|
|
|
}
|
|
|
|
|
2015-11-24 06:33:57 +03:00
|
|
|
if (level == KVM_PUT_FULL_STATE) {
|
|
|
|
/* We don't check for kvm_arch_set_tsc_khz() errors here,
|
|
|
|
* because TSC frequency mismatch shouldn't abort migration,
|
|
|
|
* unless the user explicitly asked for a more strict TSC
|
|
|
|
* setting (e.g. using an explicit "tsc-freq" option).
|
|
|
|
*/
|
|
|
|
kvm_arch_set_tsc_khz(cpu);
|
|
|
|
}
|
|
|
|
|
2018-06-29 17:54:50 +03:00
|
|
|
#ifdef CONFIG_XEN_EMU
|
|
|
|
if (xen_mode == XEN_EMULATE && level == KVM_PUT_FULL_STATE) {
|
|
|
|
ret = kvm_put_xen_state(cpu);
|
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2012-10-31 09:06:49 +04:00
|
|
|
ret = kvm_getput_regs(x86_cpu, 1);
|
2010-12-27 18:19:29 +03:00
|
|
|
if (ret < 0) {
|
2008-11-05 19:29:27 +03:00
|
|
|
return ret;
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2012-10-31 09:06:49 +04:00
|
|
|
ret = kvm_put_xsave(x86_cpu);
|
2010-12-27 18:19:29 +03:00
|
|
|
if (ret < 0) {
|
2010-06-17 13:53:07 +04:00
|
|
|
return ret;
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2012-10-31 09:06:49 +04:00
|
|
|
ret = kvm_put_xcrs(x86_cpu);
|
2010-12-27 18:19:29 +03:00
|
|
|
if (ret < 0) {
|
2008-11-05 19:29:27 +03:00
|
|
|
return ret;
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2012-10-31 09:06:49 +04:00
|
|
|
ret = kvm_put_msrs(x86_cpu, level);
|
2010-12-27 18:19:29 +03:00
|
|
|
if (ret < 0) {
|
2008-11-05 19:29:27 +03:00
|
|
|
return ret;
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2017-07-14 18:47:36 +03:00
|
|
|
ret = kvm_put_vcpu_events(x86_cpu, level);
|
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
2010-03-01 21:10:31 +03:00
|
|
|
if (level >= KVM_PUT_RESET_STATE) {
|
2012-10-31 09:06:49 +04:00
|
|
|
ret = kvm_put_mp_state(x86_cpu);
|
2010-12-27 18:19:29 +03:00
|
|
|
if (ret < 0) {
|
2011-10-16 15:23:26 +04:00
|
|
|
return ret;
|
|
|
|
}
|
2010-03-01 21:10:31 +03:00
|
|
|
}
|
2013-08-19 21:13:42 +04:00
|
|
|
|
|
|
|
ret = kvm_put_tscdeadline_msr(x86_cpu);
|
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
2012-10-31 09:06:49 +04:00
|
|
|
ret = kvm_put_debugregs(x86_cpu);
|
2010-12-27 18:19:29 +03:00
|
|
|
if (ret < 0) {
|
2010-03-01 21:10:29 +03:00
|
|
|
return ret;
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2008-11-05 19:29:27 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-10-31 09:57:49 +04:00
|
|
|
int kvm_arch_get_registers(CPUState *cs)
|
2008-11-05 19:29:27 +03:00
|
|
|
{
|
2012-10-31 09:57:49 +04:00
|
|
|
X86CPU *cpu = X86_CPU(cs);
|
2008-11-05 19:29:27 +03:00
|
|
|
int ret;
|
|
|
|
|
2012-10-31 09:57:49 +04:00
|
|
|
assert(cpu_is_stopped(cs) || qemu_cpu_is_self(cs));
|
2010-05-04 16:45:26 +04:00
|
|
|
|
2017-07-14 18:47:36 +03:00
|
|
|
ret = kvm_get_vcpu_events(cpu);
|
2010-12-27 18:19:29 +03:00
|
|
|
if (ret < 0) {
|
2015-07-02 17:57:14 +03:00
|
|
|
goto out;
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2017-07-14 18:47:36 +03:00
|
|
|
/*
|
|
|
|
* KVM_GET_MPSTATE can modify CS and RIP, call it before
|
|
|
|
* KVM_GET_REGS and KVM_GET_SREGS.
|
|
|
|
*/
|
|
|
|
ret = kvm_get_mp_state(cpu);
|
2010-12-27 18:19:29 +03:00
|
|
|
if (ret < 0) {
|
2015-07-02 17:57:14 +03:00
|
|
|
goto out;
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2017-07-14 18:47:36 +03:00
|
|
|
ret = kvm_getput_regs(cpu, 0);
|
2010-12-27 18:19:29 +03:00
|
|
|
if (ret < 0) {
|
2015-07-02 17:57:14 +03:00
|
|
|
goto out;
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2017-07-14 18:47:36 +03:00
|
|
|
ret = kvm_get_xsave(cpu);
|
2010-12-27 18:19:29 +03:00
|
|
|
if (ret < 0) {
|
2015-07-02 17:57:14 +03:00
|
|
|
goto out;
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2017-07-14 18:47:36 +03:00
|
|
|
ret = kvm_get_xcrs(cpu);
|
2010-12-27 18:19:29 +03:00
|
|
|
if (ret < 0) {
|
2015-07-02 17:57:14 +03:00
|
|
|
goto out;
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2021-11-01 16:22:58 +03:00
|
|
|
ret = has_sregs2 ? kvm_get_sregs2(cpu) : kvm_get_sregs(cpu);
|
2010-12-27 18:19:29 +03:00
|
|
|
if (ret < 0) {
|
2015-07-02 17:57:14 +03:00
|
|
|
goto out;
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2017-07-14 18:47:36 +03:00
|
|
|
ret = kvm_get_msrs(cpu);
|
2011-10-16 15:23:26 +04:00
|
|
|
if (ret < 0) {
|
2015-07-02 17:57:14 +03:00
|
|
|
goto out;
|
2011-10-16 15:23:26 +04:00
|
|
|
}
|
2017-07-14 18:47:36 +03:00
|
|
|
ret = kvm_get_apic(cpu);
|
2010-12-27 18:19:29 +03:00
|
|
|
if (ret < 0) {
|
2015-07-02 17:57:14 +03:00
|
|
|
goto out;
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2012-10-31 09:06:49 +04:00
|
|
|
ret = kvm_get_debugregs(cpu);
|
2010-12-27 18:19:29 +03:00
|
|
|
if (ret < 0) {
|
2015-07-02 17:57:14 +03:00
|
|
|
goto out;
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2019-06-19 19:21:38 +03:00
|
|
|
ret = kvm_get_nested_state(cpu);
|
|
|
|
if (ret < 0) {
|
|
|
|
goto out;
|
|
|
|
}
|
2018-06-29 17:54:50 +03:00
|
|
|
#ifdef CONFIG_XEN_EMU
|
|
|
|
if (xen_mode == XEN_EMULATE) {
|
|
|
|
ret = kvm_get_xen_state(cs);
|
|
|
|
if (ret < 0) {
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
2015-07-02 17:57:14 +03:00
|
|
|
ret = 0;
|
|
|
|
out:
|
|
|
|
cpu_sync_bndcs_hflags(&cpu->env);
|
|
|
|
return ret;
|
2008-11-05 19:29:27 +03:00
|
|
|
}
|
|
|
|
|
2012-10-31 09:57:49 +04:00
|
|
|
void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
|
2008-11-05 19:29:27 +03:00
|
|
|
{
|
2012-10-31 09:57:49 +04:00
|
|
|
X86CPU *x86_cpu = X86_CPU(cpu);
|
|
|
|
CPUX86State *env = &x86_cpu->env;
|
2011-02-07 14:19:21 +03:00
|
|
|
int ret;
|
|
|
|
|
2010-12-10 10:42:53 +03:00
|
|
|
/* Inject NMI */
|
2015-06-18 19:28:42 +03:00
|
|
|
if (cpu->interrupt_request & (CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) {
|
|
|
|
if (cpu->interrupt_request & CPU_INTERRUPT_NMI) {
|
2024-01-02 18:35:25 +03:00
|
|
|
bql_lock();
|
2015-06-18 19:28:42 +03:00
|
|
|
cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
|
2024-01-02 18:35:25 +03:00
|
|
|
bql_unlock();
|
2015-06-18 19:28:42 +03:00
|
|
|
DPRINTF("injected NMI\n");
|
|
|
|
ret = kvm_vcpu_ioctl(cpu, KVM_NMI);
|
|
|
|
if (ret < 0) {
|
|
|
|
fprintf(stderr, "KVM: injection failed, NMI lost (%s)\n",
|
|
|
|
strerror(-ret));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (cpu->interrupt_request & CPU_INTERRUPT_SMI) {
|
2024-01-02 18:35:25 +03:00
|
|
|
bql_lock();
|
2015-06-18 19:28:42 +03:00
|
|
|
cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
|
2024-01-02 18:35:25 +03:00
|
|
|
bql_unlock();
|
2015-06-18 19:28:42 +03:00
|
|
|
DPRINTF("injected SMI\n");
|
|
|
|
ret = kvm_vcpu_ioctl(cpu, KVM_SMI);
|
|
|
|
if (ret < 0) {
|
|
|
|
fprintf(stderr, "KVM: injection failed, SMI lost (%s)\n",
|
|
|
|
strerror(-ret));
|
|
|
|
}
|
2011-02-07 14:19:21 +03:00
|
|
|
}
|
2010-12-10 10:42:53 +03:00
|
|
|
}
|
|
|
|
|
2015-12-17 19:16:08 +03:00
|
|
|
if (!kvm_pic_in_kernel()) {
|
2024-01-02 18:35:25 +03:00
|
|
|
bql_lock();
|
2015-06-18 19:47:23 +03:00
|
|
|
}
|
|
|
|
|
2013-03-08 22:21:50 +04:00
|
|
|
/* Force the VCPU out of its inner loop to process any INIT requests
|
|
|
|
* or (for userspace APIC, but it is cheap to combine the checks here)
|
|
|
|
* pending TPR access reports.
|
|
|
|
*/
|
|
|
|
if (cpu->interrupt_request & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) {
|
2015-06-18 19:28:42 +03:00
|
|
|
if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) &&
|
|
|
|
!(env->hflags & HF_SMM_MASK)) {
|
|
|
|
cpu->exit_request = 1;
|
|
|
|
}
|
|
|
|
if (cpu->interrupt_request & CPU_INTERRUPT_TPR) {
|
|
|
|
cpu->exit_request = 1;
|
|
|
|
}
|
2013-03-08 22:21:50 +04:00
|
|
|
}
|
2008-11-05 19:29:27 +03:00
|
|
|
|
2015-12-17 19:16:08 +03:00
|
|
|
if (!kvm_pic_in_kernel()) {
|
2011-02-07 14:19:19 +03:00
|
|
|
/* Try to inject an interrupt if the guest can accept it */
|
|
|
|
if (run->ready_for_interrupt_injection &&
|
2013-01-17 21:51:17 +04:00
|
|
|
(cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
|
2011-02-07 14:19:19 +03:00
|
|
|
(env->eflags & IF_MASK)) {
|
|
|
|
int irq;
|
|
|
|
|
2013-01-17 21:51:17 +04:00
|
|
|
cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
|
2011-02-07 14:19:19 +03:00
|
|
|
irq = cpu_get_pic_interrupt(env);
|
|
|
|
if (irq >= 0) {
|
|
|
|
struct kvm_interrupt intr;
|
|
|
|
|
|
|
|
intr.irq = irq;
|
|
|
|
DPRINTF("injected interrupt %d\n", irq);
|
2012-10-31 09:06:49 +04:00
|
|
|
ret = kvm_vcpu_ioctl(cpu, KVM_INTERRUPT, &intr);
|
2011-02-07 14:19:21 +03:00
|
|
|
if (ret < 0) {
|
|
|
|
fprintf(stderr,
|
|
|
|
"KVM: injection failed, interrupt lost (%s)\n",
|
|
|
|
strerror(-ret));
|
|
|
|
}
|
2011-02-07 14:19:19 +03:00
|
|
|
}
|
|
|
|
}
|
2008-11-05 19:29:27 +03:00
|
|
|
|
2011-02-07 14:19:19 +03:00
|
|
|
/* If we have an interrupt but the guest is not ready to receive an
|
|
|
|
* interrupt, request an interrupt window exit. This will
|
|
|
|
* cause a return to userspace as soon as the guest is ready to
|
|
|
|
* receive interrupts. */
|
2013-01-17 21:51:17 +04:00
|
|
|
if ((cpu->interrupt_request & CPU_INTERRUPT_HARD)) {
|
2011-02-07 14:19:19 +03:00
|
|
|
run->request_interrupt_window = 1;
|
|
|
|
} else {
|
|
|
|
run->request_interrupt_window = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
DPRINTF("setting tpr\n");
|
2013-12-23 13:04:02 +04:00
|
|
|
run->cr8 = cpu_get_apic_tpr(x86_cpu->apic_state);
|
2015-06-18 19:47:23 +03:00
|
|
|
|
2024-01-02 18:35:25 +03:00
|
|
|
bql_unlock();
|
2011-02-07 14:19:19 +03:00
|
|
|
}
|
2008-11-05 19:29:27 +03:00
|
|
|
}
|
|
|
|
|
2021-05-21 07:38:20 +03:00
|
|
|
static void kvm_rate_limit_on_bus_lock(void)
|
|
|
|
{
|
|
|
|
uint64_t delay_ns = ratelimit_calculate_delay(&bus_lock_ratelimit_ctrl, 1);
|
|
|
|
|
|
|
|
if (delay_ns) {
|
|
|
|
g_usleep(delay_ns / SCALE_US);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-08 14:30:58 +03:00
|
|
|
MemTxAttrs kvm_arch_post_run(CPUState *cpu, struct kvm_run *run)
|
2008-11-05 19:29:27 +03:00
|
|
|
{
|
2012-10-31 09:57:49 +04:00
|
|
|
X86CPU *x86_cpu = X86_CPU(cpu);
|
|
|
|
CPUX86State *env = &x86_cpu->env;
|
|
|
|
|
2015-06-18 19:28:42 +03:00
|
|
|
if (run->flags & KVM_RUN_X86_SMM) {
|
|
|
|
env->hflags |= HF_SMM_MASK;
|
|
|
|
} else {
|
2016-11-02 22:58:25 +03:00
|
|
|
env->hflags &= ~HF_SMM_MASK;
|
2015-06-18 19:28:42 +03:00
|
|
|
}
|
2010-12-27 18:19:29 +03:00
|
|
|
if (run->if_flag) {
|
2008-11-05 19:29:27 +03:00
|
|
|
env->eflags |= IF_MASK;
|
2010-12-27 18:19:29 +03:00
|
|
|
} else {
|
2008-11-05 19:29:27 +03:00
|
|
|
env->eflags &= ~IF_MASK;
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2021-05-21 07:38:20 +03:00
|
|
|
if (run->flags & KVM_RUN_X86_BUS_LOCK) {
|
|
|
|
kvm_rate_limit_on_bus_lock();
|
|
|
|
}
|
2015-06-18 19:47:23 +03:00
|
|
|
|
2023-08-07 18:57:00 +03:00
|
|
|
#ifdef CONFIG_XEN_EMU
|
hw/xen: Support HVM_PARAM_CALLBACK_TYPE_GSI callback
The GSI callback (and later PCI_INTX) is a level triggered interrupt. It
is asserted when an event channel is delivered to vCPU0, and is supposed
to be cleared when the vcpu_info->evtchn_upcall_pending field for vCPU0
is cleared again.
Thankfully, Xen does *not* assert the GSI if the guest sets its own
evtchn_upcall_pending field; we only need to assert the GSI when we
have delivered an event for ourselves. So that's the easy part, kind of.
There's a slight complexity in that we need to hold the BQL before we
can call qemu_set_irq(), and we definitely can't do that while holding
our own port_lock (because we'll need to take that from the qemu-side
functions that the PV backend drivers will call). So if we end up
wanting to set the IRQ in a context where we *don't* already hold the
BQL, defer to a BH.
However, we *do* need to poll for the evtchn_upcall_pending flag being
cleared. In an ideal world we would poll that when the EOI happens on
the PIC/IOAPIC. That's how it works in the kernel with the VFIO eventfd
pairs — one is used to trigger the interrupt, and the other works in the
other direction to 'resample' on EOI, and trigger the first eventfd
again if the line is still active.
However, QEMU doesn't seem to do that. Even VFIO level interrupts seem
to be supported by temporarily unmapping the device's BARs from the
guest when an interrupt happens, then trapping *all* MMIO to the device
and sending the 'resample' event on *every* MMIO access until the IRQ
is cleared! Maybe in future we'll plumb the 'resample' concept through
QEMU's irq framework but for now we'll do what Xen itself does: just
check the flag on every vmexit if the upcall GSI is known to be
asserted.
Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
Reviewed-by: Paul Durrant <paul@xen.org>
2022-12-15 23:35:24 +03:00
|
|
|
/*
|
|
|
|
* If the callback is asserted as a GSI (or PCI INTx) then check if
|
|
|
|
* vcpu_info->evtchn_upcall_pending has been cleared, and deassert
|
|
|
|
* the callback IRQ if so. Ideally we could hook into the PIC/IOAPIC
|
|
|
|
* EOI and only resample then, exactly how the VFIO eventfd pairs
|
|
|
|
* are designed to work for level triggered interrupts.
|
|
|
|
*/
|
|
|
|
if (x86_cpu->env.xen_callback_asserted) {
|
|
|
|
kvm_xen_maybe_deassert_callback(cpu);
|
|
|
|
}
|
2023-03-08 16:05:57 +03:00
|
|
|
#endif
|
hw/xen: Support HVM_PARAM_CALLBACK_TYPE_GSI callback
The GSI callback (and later PCI_INTX) is a level triggered interrupt. It
is asserted when an event channel is delivered to vCPU0, and is supposed
to be cleared when the vcpu_info->evtchn_upcall_pending field for vCPU0
is cleared again.
Thankfully, Xen does *not* assert the GSI if the guest sets its own
evtchn_upcall_pending field; we only need to assert the GSI when we
have delivered an event for ourselves. So that's the easy part, kind of.
There's a slight complexity in that we need to hold the BQL before we
can call qemu_set_irq(), and we definitely can't do that while holding
our own port_lock (because we'll need to take that from the qemu-side
functions that the PV backend drivers will call). So if we end up
wanting to set the IRQ in a context where we *don't* already hold the
BQL, defer to a BH.
However, we *do* need to poll for the evtchn_upcall_pending flag being
cleared. In an ideal world we would poll that when the EOI happens on
the PIC/IOAPIC. That's how it works in the kernel with the VFIO eventfd
pairs — one is used to trigger the interrupt, and the other works in the
other direction to 'resample' on EOI, and trigger the first eventfd
again if the line is still active.
However, QEMU doesn't seem to do that. Even VFIO level interrupts seem
to be supported by temporarily unmapping the device's BARs from the
guest when an interrupt happens, then trapping *all* MMIO to the device
and sending the 'resample' event on *every* MMIO access until the IRQ
is cleared! Maybe in future we'll plumb the 'resample' concept through
QEMU's irq framework but for now we'll do what Xen itself does: just
check the flag on every vmexit if the upcall GSI is known to be
asserted.
Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
Reviewed-by: Paul Durrant <paul@xen.org>
2022-12-15 23:35:24 +03:00
|
|
|
|
2015-06-18 19:47:23 +03:00
|
|
|
/* We need to protect the apic state against concurrent accesses from
|
|
|
|
* different threads in case the userspace irqchip is used. */
|
|
|
|
if (!kvm_irqchip_in_kernel()) {
|
2024-01-02 18:35:25 +03:00
|
|
|
bql_lock();
|
2015-06-18 19:47:23 +03:00
|
|
|
}
|
2013-12-23 13:04:02 +04:00
|
|
|
cpu_set_apic_tpr(x86_cpu->apic_state, run->cr8);
|
|
|
|
cpu_set_apic_base(x86_cpu->apic_state, run->apic_base);
|
2015-06-18 19:47:23 +03:00
|
|
|
if (!kvm_irqchip_in_kernel()) {
|
2024-01-02 18:35:25 +03:00
|
|
|
bql_unlock();
|
2015-06-18 19:47:23 +03:00
|
|
|
}
|
2015-04-08 15:52:04 +03:00
|
|
|
return cpu_get_mem_attrs(env);
|
2008-11-05 19:29:27 +03:00
|
|
|
}
|
|
|
|
|
2012-10-31 09:57:49 +04:00
|
|
|
int kvm_arch_process_async_events(CPUState *cs)
|
2010-05-04 16:45:27 +04:00
|
|
|
{
|
2012-10-31 09:57:49 +04:00
|
|
|
X86CPU *cpu = X86_CPU(cs);
|
|
|
|
CPUX86State *env = &cpu->env;
|
2012-05-05 03:14:41 +04:00
|
|
|
|
2013-01-17 21:51:17 +04:00
|
|
|
if (cs->interrupt_request & CPU_INTERRUPT_MCE) {
|
2011-03-02 10:56:14 +03:00
|
|
|
/* We must not raise CPU_INTERRUPT_MCE if it's not supported. */
|
|
|
|
assert(env->mcg_cap);
|
|
|
|
|
2013-01-17 21:51:17 +04:00
|
|
|
cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
|
2011-03-02 10:56:14 +03:00
|
|
|
|
2013-05-01 15:45:44 +04:00
|
|
|
kvm_cpu_synchronize_state(cs);
|
2011-03-02 10:56:14 +03:00
|
|
|
|
2019-06-19 19:21:39 +03:00
|
|
|
if (env->exception_nr == EXCP08_DBLE) {
|
2011-03-02 10:56:14 +03:00
|
|
|
/* this means triple fault */
|
2017-05-16 00:41:13 +03:00
|
|
|
qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
|
2012-12-17 11:02:44 +04:00
|
|
|
cs->exit_request = 1;
|
2011-03-02 10:56:14 +03:00
|
|
|
return 0;
|
|
|
|
}
|
2019-06-19 19:21:39 +03:00
|
|
|
kvm_queue_exception(env, EXCP12_MCHK, 0, 0);
|
2011-03-02 10:56:14 +03:00
|
|
|
env->has_error_code = 0;
|
|
|
|
|
2013-01-17 21:51:17 +04:00
|
|
|
cs->halted = 0;
|
2011-03-02 10:56:14 +03:00
|
|
|
if (kvm_irqchip_in_kernel() && env->mp_state == KVM_MP_STATE_HALTED) {
|
|
|
|
env->mp_state = KVM_MP_STATE_RUNNABLE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-18 19:28:42 +03:00
|
|
|
if ((cs->interrupt_request & CPU_INTERRUPT_INIT) &&
|
|
|
|
!(env->hflags & HF_SMM_MASK)) {
|
2013-03-08 22:21:50 +04:00
|
|
|
kvm_cpu_synchronize_state(cs);
|
|
|
|
do_cpu_init(cpu);
|
|
|
|
}
|
|
|
|
|
2011-02-07 14:19:19 +03:00
|
|
|
if (kvm_irqchip_in_kernel()) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-01-17 21:51:17 +04:00
|
|
|
if (cs->interrupt_request & CPU_INTERRUPT_POLL) {
|
|
|
|
cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
|
2013-12-23 13:04:02 +04:00
|
|
|
apic_poll_irq(cpu->apic_state);
|
2012-07-09 18:42:32 +04:00
|
|
|
}
|
2013-01-17 21:51:17 +04:00
|
|
|
if (((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
|
2011-03-15 14:26:19 +03:00
|
|
|
(env->eflags & IF_MASK)) ||
|
2013-01-17 21:51:17 +04:00
|
|
|
(cs->interrupt_request & CPU_INTERRUPT_NMI)) {
|
|
|
|
cs->halted = 0;
|
2011-02-07 14:19:18 +03:00
|
|
|
}
|
2013-01-17 21:51:17 +04:00
|
|
|
if (cs->interrupt_request & CPU_INTERRUPT_SIPI) {
|
2013-05-01 15:45:44 +04:00
|
|
|
kvm_cpu_synchronize_state(cs);
|
2012-05-05 03:14:41 +04:00
|
|
|
do_cpu_sipi(cpu);
|
2010-05-04 16:45:27 +04:00
|
|
|
}
|
2013-01-17 21:51:17 +04:00
|
|
|
if (cs->interrupt_request & CPU_INTERRUPT_TPR) {
|
|
|
|
cs->interrupt_request &= ~CPU_INTERRUPT_TPR;
|
2013-05-01 15:45:44 +04:00
|
|
|
kvm_cpu_synchronize_state(cs);
|
2013-12-23 13:04:02 +04:00
|
|
|
apic_handle_tpr_access_report(cpu->apic_state, env->eip,
|
2012-02-17 21:31:17 +04:00
|
|
|
env->tpr_access_type);
|
|
|
|
}
|
2010-05-04 16:45:27 +04:00
|
|
|
|
2013-01-17 21:51:17 +04:00
|
|
|
return cs->halted;
|
2010-05-04 16:45:27 +04:00
|
|
|
}
|
|
|
|
|
2012-05-03 19:00:31 +04:00
|
|
|
static int kvm_handle_halt(X86CPU *cpu)
|
2008-11-05 19:29:27 +03:00
|
|
|
{
|
2013-01-17 21:51:17 +04:00
|
|
|
CPUState *cs = CPU(cpu);
|
2012-05-03 19:00:31 +04:00
|
|
|
CPUX86State *env = &cpu->env;
|
|
|
|
|
2013-01-17 21:51:17 +04:00
|
|
|
if (!((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
|
2008-11-05 19:29:27 +03:00
|
|
|
(env->eflags & IF_MASK)) &&
|
2013-01-17 21:51:17 +04:00
|
|
|
!(cs->interrupt_request & CPU_INTERRUPT_NMI)) {
|
|
|
|
cs->halted = 1;
|
2011-03-15 14:26:28 +03:00
|
|
|
return EXCP_HLT;
|
2008-11-05 19:29:27 +03:00
|
|
|
}
|
|
|
|
|
2011-03-15 14:26:28 +03:00
|
|
|
return 0;
|
2008-11-05 19:29:27 +03:00
|
|
|
}
|
|
|
|
|
2012-12-01 09:18:14 +04:00
|
|
|
static int kvm_handle_tpr_access(X86CPU *cpu)
|
2012-02-17 21:31:17 +04:00
|
|
|
{
|
2012-12-01 09:18:14 +04:00
|
|
|
CPUState *cs = CPU(cpu);
|
|
|
|
struct kvm_run *run = cs->kvm_run;
|
2012-02-17 21:31:17 +04:00
|
|
|
|
2013-12-23 13:04:02 +04:00
|
|
|
apic_handle_tpr_access_report(cpu->apic_state, run->tpr_access.rip,
|
2012-02-17 21:31:17 +04:00
|
|
|
run->tpr_access.is_write ? TPR_ACCESS_WRITE
|
|
|
|
: TPR_ACCESS_READ);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2013-06-29 21:40:58 +04:00
|
|
|
int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
|
2009-03-12 23:12:48 +03:00
|
|
|
{
|
2009-09-23 03:19:02 +04:00
|
|
|
static const uint8_t int3 = 0xcc;
|
2009-03-28 20:51:40 +03:00
|
|
|
|
2013-06-29 21:40:58 +04:00
|
|
|
if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 1, 0) ||
|
|
|
|
cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&int3, 1, 1)) {
|
2009-03-12 23:12:48 +03:00
|
|
|
return -EINVAL;
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2009-03-12 23:12:48 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-06-29 21:40:58 +04:00
|
|
|
int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
|
2009-03-12 23:12:48 +03:00
|
|
|
{
|
|
|
|
uint8_t int3;
|
|
|
|
|
2021-03-01 14:02:44 +03:00
|
|
|
if (cpu_memory_rw_debug(cs, bp->pc, &int3, 1, 0)) {
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
if (int3 != 0xcc) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 1, 1)) {
|
2009-03-12 23:12:48 +03:00
|
|
|
return -EINVAL;
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2009-03-12 23:12:48 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct {
|
|
|
|
target_ulong addr;
|
|
|
|
int len;
|
|
|
|
int type;
|
|
|
|
} hw_breakpoint[4];
|
|
|
|
|
|
|
|
static int nb_hw_breakpoint;
|
|
|
|
|
|
|
|
static int find_hw_breakpoint(target_ulong addr, int len, int type)
|
|
|
|
{
|
|
|
|
int n;
|
|
|
|
|
2010-12-27 18:19:29 +03:00
|
|
|
for (n = 0; n < nb_hw_breakpoint; n++) {
|
2009-03-12 23:12:48 +03:00
|
|
|
if (hw_breakpoint[n].addr == addr && hw_breakpoint[n].type == type &&
|
2010-12-27 18:19:29 +03:00
|
|
|
(hw_breakpoint[n].len == len || len == -1)) {
|
2009-03-12 23:12:48 +03:00
|
|
|
return n;
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
|
|
|
}
|
2009-03-12 23:12:48 +03:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2023-08-07 18:57:00 +03:00
|
|
|
int kvm_arch_insert_hw_breakpoint(vaddr addr, vaddr len, int type)
|
2009-03-12 23:12:48 +03:00
|
|
|
{
|
|
|
|
switch (type) {
|
|
|
|
case GDB_BREAKPOINT_HW:
|
|
|
|
len = 1;
|
|
|
|
break;
|
|
|
|
case GDB_WATCHPOINT_WRITE:
|
|
|
|
case GDB_WATCHPOINT_ACCESS:
|
|
|
|
switch (len) {
|
|
|
|
case 1:
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
case 4:
|
|
|
|
case 8:
|
2010-12-27 18:19:29 +03:00
|
|
|
if (addr & (len - 1)) {
|
2009-03-12 23:12:48 +03:00
|
|
|
return -EINVAL;
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2009-03-12 23:12:48 +03:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -ENOSYS;
|
|
|
|
}
|
|
|
|
|
2010-12-27 18:19:29 +03:00
|
|
|
if (nb_hw_breakpoint == 4) {
|
2009-03-12 23:12:48 +03:00
|
|
|
return -ENOBUFS;
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
|
|
|
if (find_hw_breakpoint(addr, len, type) >= 0) {
|
2009-03-12 23:12:48 +03:00
|
|
|
return -EEXIST;
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2009-03-12 23:12:48 +03:00
|
|
|
hw_breakpoint[nb_hw_breakpoint].addr = addr;
|
|
|
|
hw_breakpoint[nb_hw_breakpoint].len = len;
|
|
|
|
hw_breakpoint[nb_hw_breakpoint].type = type;
|
|
|
|
nb_hw_breakpoint++;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2023-08-07 18:57:00 +03:00
|
|
|
int kvm_arch_remove_hw_breakpoint(vaddr addr, vaddr len, int type)
|
2009-03-12 23:12:48 +03:00
|
|
|
{
|
|
|
|
int n;
|
|
|
|
|
|
|
|
n = find_hw_breakpoint(addr, (type == GDB_BREAKPOINT_HW) ? 1 : len, type);
|
2010-12-27 18:19:29 +03:00
|
|
|
if (n < 0) {
|
2009-03-12 23:12:48 +03:00
|
|
|
return -ENOENT;
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2009-03-12 23:12:48 +03:00
|
|
|
nb_hw_breakpoint--;
|
|
|
|
hw_breakpoint[n] = hw_breakpoint[nb_hw_breakpoint];
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void kvm_arch_remove_all_hw_breakpoints(void)
|
|
|
|
{
|
|
|
|
nb_hw_breakpoint = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static CPUWatchpoint hw_watchpoint;
|
|
|
|
|
2012-12-01 08:35:08 +04:00
|
|
|
static int kvm_handle_debug(X86CPU *cpu,
|
2012-09-08 16:43:16 +04:00
|
|
|
struct kvm_debug_exit_arch *arch_info)
|
2009-03-12 23:12:48 +03:00
|
|
|
{
|
2013-06-21 22:20:45 +04:00
|
|
|
CPUState *cs = CPU(cpu);
|
2012-12-01 08:35:08 +04:00
|
|
|
CPUX86State *env = &cpu->env;
|
2011-03-15 14:26:30 +03:00
|
|
|
int ret = 0;
|
2009-03-12 23:12:48 +03:00
|
|
|
int n;
|
|
|
|
|
2019-06-19 19:21:33 +03:00
|
|
|
if (arch_info->exception == EXCP01_DB) {
|
|
|
|
if (arch_info->dr6 & DR6_BS) {
|
2013-06-21 22:20:45 +04:00
|
|
|
if (cs->singlestep_enabled) {
|
2011-03-15 14:26:30 +03:00
|
|
|
ret = EXCP_DEBUG;
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2009-03-12 23:12:48 +03:00
|
|
|
} else {
|
2010-12-27 18:19:29 +03:00
|
|
|
for (n = 0; n < 4; n++) {
|
|
|
|
if (arch_info->dr6 & (1 << n)) {
|
2009-03-12 23:12:48 +03:00
|
|
|
switch ((arch_info->dr7 >> (16 + n*4)) & 0x3) {
|
|
|
|
case 0x0:
|
2011-03-15 14:26:30 +03:00
|
|
|
ret = EXCP_DEBUG;
|
2009-03-12 23:12:48 +03:00
|
|
|
break;
|
|
|
|
case 0x1:
|
2011-03-15 14:26:30 +03:00
|
|
|
ret = EXCP_DEBUG;
|
2013-08-26 20:23:18 +04:00
|
|
|
cs->watchpoint_hit = &hw_watchpoint;
|
2009-03-12 23:12:48 +03:00
|
|
|
hw_watchpoint.vaddr = hw_breakpoint[n].addr;
|
|
|
|
hw_watchpoint.flags = BP_MEM_WRITE;
|
|
|
|
break;
|
|
|
|
case 0x3:
|
2011-03-15 14:26:30 +03:00
|
|
|
ret = EXCP_DEBUG;
|
2013-08-26 20:23:18 +04:00
|
|
|
cs->watchpoint_hit = &hw_watchpoint;
|
2009-03-12 23:12:48 +03:00
|
|
|
hw_watchpoint.vaddr = hw_breakpoint[n].addr;
|
|
|
|
hw_watchpoint.flags = BP_MEM_ACCESS;
|
|
|
|
break;
|
|
|
|
}
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
|
|
|
}
|
2009-03-12 23:12:48 +03:00
|
|
|
}
|
2013-08-26 20:23:18 +04:00
|
|
|
} else if (kvm_find_sw_breakpoint(cs, arch_info->pc)) {
|
2011-03-15 14:26:30 +03:00
|
|
|
ret = EXCP_DEBUG;
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2011-03-15 14:26:30 +03:00
|
|
|
if (ret == 0) {
|
2013-08-26 20:23:18 +04:00
|
|
|
cpu_synchronize_state(cs);
|
2019-06-19 19:21:39 +03:00
|
|
|
assert(env->exception_nr == -1);
|
2010-03-01 21:10:29 +03:00
|
|
|
|
2011-03-15 14:26:30 +03:00
|
|
|
/* pass to guest */
|
2019-06-19 19:21:39 +03:00
|
|
|
kvm_queue_exception(env, arch_info->exception,
|
|
|
|
arch_info->exception == EXCP01_DB,
|
|
|
|
arch_info->dr6);
|
2012-09-08 16:43:16 +04:00
|
|
|
env->has_error_code = 0;
|
2010-03-01 21:10:29 +03:00
|
|
|
}
|
2009-03-12 23:12:48 +03:00
|
|
|
|
2011-03-15 14:26:30 +03:00
|
|
|
return ret;
|
2009-03-12 23:12:48 +03:00
|
|
|
}
|
|
|
|
|
2012-10-31 09:57:49 +04:00
|
|
|
void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg)
|
2009-03-12 23:12:48 +03:00
|
|
|
{
|
|
|
|
const uint8_t type_code[] = {
|
|
|
|
[GDB_BREAKPOINT_HW] = 0x0,
|
|
|
|
[GDB_WATCHPOINT_WRITE] = 0x1,
|
|
|
|
[GDB_WATCHPOINT_ACCESS] = 0x3
|
|
|
|
};
|
|
|
|
const uint8_t len_code[] = {
|
|
|
|
[1] = 0x0, [2] = 0x1, [4] = 0x3, [8] = 0x2
|
|
|
|
};
|
|
|
|
int n;
|
|
|
|
|
2012-12-01 08:35:08 +04:00
|
|
|
if (kvm_sw_breakpoints_active(cpu)) {
|
2009-03-12 23:12:48 +03:00
|
|
|
dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2009-03-12 23:12:48 +03:00
|
|
|
if (nb_hw_breakpoint > 0) {
|
|
|
|
dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
|
|
|
|
dbg->arch.debugreg[7] = 0x0600;
|
|
|
|
for (n = 0; n < nb_hw_breakpoint; n++) {
|
|
|
|
dbg->arch.debugreg[n] = hw_breakpoint[n].addr;
|
|
|
|
dbg->arch.debugreg[7] |= (2 << (n * 2)) |
|
|
|
|
(type_code[hw_breakpoint[n].type] << (16 + n*4)) |
|
2010-12-27 17:58:23 +03:00
|
|
|
((uint32_t)len_code[hw_breakpoint[n].len] << (18 + n*4));
|
2009-03-12 23:12:48 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2010-05-10 12:21:34 +04:00
|
|
|
|
2022-10-05 01:56:42 +03:00
|
|
|
static bool kvm_install_msr_filters(KVMState *s)
|
|
|
|
{
|
|
|
|
uint64_t zero = 0;
|
|
|
|
struct kvm_msr_filter filter = {
|
|
|
|
.flags = KVM_MSR_FILTER_DEFAULT_ALLOW,
|
|
|
|
};
|
|
|
|
int r, i, j = 0;
|
|
|
|
|
|
|
|
for (i = 0; i < KVM_MSR_FILTER_MAX_RANGES; i++) {
|
|
|
|
KVMMSRHandlers *handler = &msr_handlers[i];
|
|
|
|
if (handler->msr) {
|
|
|
|
struct kvm_msr_filter_range *range = &filter.ranges[j++];
|
|
|
|
|
|
|
|
*range = (struct kvm_msr_filter_range) {
|
|
|
|
.flags = 0,
|
|
|
|
.nmsrs = 1,
|
|
|
|
.base = handler->msr,
|
|
|
|
.bitmap = (__u8 *)&zero,
|
|
|
|
};
|
|
|
|
|
|
|
|
if (handler->rdmsr) {
|
|
|
|
range->flags |= KVM_MSR_FILTER_READ;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (handler->wrmsr) {
|
|
|
|
range->flags |= KVM_MSR_FILTER_WRITE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
r = kvm_vm_ioctl(s, KVM_X86_SET_MSR_FILTER, &filter);
|
|
|
|
if (r) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool kvm_filter_msr(KVMState *s, uint32_t msr, QEMURDMSRHandler *rdmsr,
|
|
|
|
QEMUWRMSRHandler *wrmsr)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(msr_handlers); i++) {
|
|
|
|
if (!msr_handlers[i].msr) {
|
|
|
|
msr_handlers[i] = (KVMMSRHandlers) {
|
|
|
|
.msr = msr,
|
|
|
|
.rdmsr = rdmsr,
|
|
|
|
.wrmsr = wrmsr,
|
|
|
|
};
|
|
|
|
|
|
|
|
if (!kvm_install_msr_filters(s)) {
|
|
|
|
msr_handlers[i] = (KVMMSRHandlers) { };
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int kvm_handle_rdmsr(X86CPU *cpu, struct kvm_run *run)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
bool r;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(msr_handlers); i++) {
|
|
|
|
KVMMSRHandlers *handler = &msr_handlers[i];
|
|
|
|
if (run->msr.index == handler->msr) {
|
|
|
|
if (handler->rdmsr) {
|
|
|
|
r = handler->rdmsr(cpu, handler->msr,
|
|
|
|
(uint64_t *)&run->msr.data);
|
|
|
|
run->msr.error = r ? 0 : 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(false);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int kvm_handle_wrmsr(X86CPU *cpu, struct kvm_run *run)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
bool r;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(msr_handlers); i++) {
|
|
|
|
KVMMSRHandlers *handler = &msr_handlers[i];
|
|
|
|
if (run->msr.index == handler->msr) {
|
|
|
|
if (handler->wrmsr) {
|
|
|
|
r = handler->wrmsr(cpu, handler->msr, run->msr.data);
|
|
|
|
run->msr.error = r ? 0 : 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(false);
|
|
|
|
}
|
|
|
|
|
2021-07-19 14:21:16 +03:00
|
|
|
static bool has_sgx_provisioning;
|
|
|
|
|
|
|
|
static bool __kvm_enable_sgx_provisioning(KVMState *s)
|
|
|
|
{
|
|
|
|
int fd, ret;
|
|
|
|
|
|
|
|
if (!kvm_vm_check_extension(s, KVM_CAP_SGX_ATTRIBUTE)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
fd = qemu_open_old("/dev/sgx_provision", O_RDONLY);
|
|
|
|
if (fd < 0) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = kvm_vm_enable_cap(s, KVM_CAP_SGX_ATTRIBUTE, 0, fd);
|
|
|
|
if (ret) {
|
|
|
|
error_report("Could not enable SGX PROVISIONKEY: %s", strerror(-ret));
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
close(fd);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool kvm_enable_sgx_provisioning(KVMState *s)
|
|
|
|
{
|
|
|
|
return MEMORIZE(__kvm_enable_sgx_provisioning(s), has_sgx_provisioning);
|
|
|
|
}
|
|
|
|
|
2011-03-15 14:26:29 +03:00
|
|
|
static bool host_supports_vmx(void)
|
|
|
|
{
|
|
|
|
uint32_t ecx, unused;
|
|
|
|
|
|
|
|
host_cpuid(1, 0, &unused, &unused, &ecx, &unused);
|
|
|
|
return ecx & CPUID_EXT_VMX;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define VMX_INVALID_GUEST_STATE 0x80000021
|
|
|
|
|
2012-10-31 09:57:49 +04:00
|
|
|
int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
|
2011-03-15 14:26:29 +03:00
|
|
|
{
|
2012-10-31 09:57:49 +04:00
|
|
|
X86CPU *cpu = X86_CPU(cs);
|
2011-03-15 14:26:29 +03:00
|
|
|
uint64_t code;
|
|
|
|
int ret;
|
2022-09-29 10:20:14 +03:00
|
|
|
bool ctx_invalid;
|
|
|
|
char str[256];
|
|
|
|
KVMState *state;
|
2011-03-15 14:26:29 +03:00
|
|
|
|
|
|
|
switch (run->exit_reason) {
|
|
|
|
case KVM_EXIT_HLT:
|
|
|
|
DPRINTF("handle_hlt\n");
|
2024-01-02 18:35:25 +03:00
|
|
|
bql_lock();
|
2012-05-03 19:00:31 +04:00
|
|
|
ret = kvm_handle_halt(cpu);
|
2024-01-02 18:35:25 +03:00
|
|
|
bql_unlock();
|
2011-03-15 14:26:29 +03:00
|
|
|
break;
|
|
|
|
case KVM_EXIT_SET_TPR:
|
|
|
|
ret = 0;
|
|
|
|
break;
|
2012-02-17 21:31:17 +04:00
|
|
|
case KVM_EXIT_TPR_ACCESS:
|
2024-01-02 18:35:25 +03:00
|
|
|
bql_lock();
|
2012-12-01 09:18:14 +04:00
|
|
|
ret = kvm_handle_tpr_access(cpu);
|
2024-01-02 18:35:25 +03:00
|
|
|
bql_unlock();
|
2012-02-17 21:31:17 +04:00
|
|
|
break;
|
2011-03-15 14:26:29 +03:00
|
|
|
case KVM_EXIT_FAIL_ENTRY:
|
|
|
|
code = run->fail_entry.hardware_entry_failure_reason;
|
|
|
|
fprintf(stderr, "KVM: entry failed, hardware error 0x%" PRIx64 "\n",
|
|
|
|
code);
|
|
|
|
if (host_supports_vmx() && code == VMX_INVALID_GUEST_STATE) {
|
|
|
|
fprintf(stderr,
|
2011-11-15 02:06:23 +04:00
|
|
|
"\nIf you're running a guest on an Intel machine without "
|
2011-03-15 14:26:29 +03:00
|
|
|
"unrestricted mode\n"
|
|
|
|
"support, the failure can be most likely due to the guest "
|
|
|
|
"entering an invalid\n"
|
|
|
|
"state for Intel VT. For example, the guest maybe running "
|
|
|
|
"in big real mode\n"
|
|
|
|
"which is not supported on less recent Intel processors."
|
|
|
|
"\n\n");
|
|
|
|
}
|
|
|
|
ret = -1;
|
|
|
|
break;
|
|
|
|
case KVM_EXIT_EXCEPTION:
|
|
|
|
fprintf(stderr, "KVM: exception %d exit (error code 0x%x)\n",
|
|
|
|
run->ex.exception, run->ex.error_code);
|
|
|
|
ret = -1;
|
|
|
|
break;
|
2011-03-15 14:26:30 +03:00
|
|
|
case KVM_EXIT_DEBUG:
|
|
|
|
DPRINTF("kvm_exit_debug\n");
|
2024-01-02 18:35:25 +03:00
|
|
|
bql_lock();
|
2012-12-01 08:35:08 +04:00
|
|
|
ret = kvm_handle_debug(cpu, &run->debug.arch);
|
2024-01-02 18:35:25 +03:00
|
|
|
bql_unlock();
|
2011-03-15 14:26:30 +03:00
|
|
|
break;
|
2015-11-10 15:52:43 +03:00
|
|
|
case KVM_EXIT_HYPERV:
|
|
|
|
ret = kvm_hv_handle_exit(cpu, &run->hyperv);
|
|
|
|
break;
|
2015-12-17 19:16:08 +03:00
|
|
|
case KVM_EXIT_IOAPIC_EOI:
|
|
|
|
ioapic_eoi_broadcast(run->eoi.vector);
|
|
|
|
ret = 0;
|
|
|
|
break;
|
2021-05-21 07:38:20 +03:00
|
|
|
case KVM_EXIT_X86_BUS_LOCK:
|
|
|
|
/* already handled in kvm_arch_post_run */
|
|
|
|
ret = 0;
|
|
|
|
break;
|
2022-09-29 10:20:14 +03:00
|
|
|
case KVM_EXIT_NOTIFY:
|
|
|
|
ctx_invalid = !!(run->notify.flags & KVM_NOTIFY_CONTEXT_INVALID);
|
|
|
|
state = KVM_STATE(current_accel());
|
|
|
|
sprintf(str, "Encounter a notify exit with %svalid context in"
|
|
|
|
" guest. There can be possible misbehaves in guest."
|
|
|
|
" Please have a look.", ctx_invalid ? "in" : "");
|
|
|
|
if (ctx_invalid ||
|
|
|
|
state->notify_vmexit == NOTIFY_VMEXIT_OPTION_INTERNAL_ERROR) {
|
|
|
|
warn_report("KVM internal error: %s", str);
|
|
|
|
ret = -1;
|
|
|
|
} else {
|
|
|
|
warn_report_once("KVM: %s", str);
|
|
|
|
ret = 0;
|
|
|
|
}
|
|
|
|
break;
|
2022-10-05 01:56:42 +03:00
|
|
|
case KVM_EXIT_X86_RDMSR:
|
|
|
|
/* We only enable MSR filtering, any other exit is bogus */
|
|
|
|
assert(run->msr.reason == KVM_MSR_EXIT_REASON_FILTER);
|
|
|
|
ret = kvm_handle_rdmsr(cpu, run);
|
|
|
|
break;
|
|
|
|
case KVM_EXIT_X86_WRMSR:
|
|
|
|
/* We only enable MSR filtering, any other exit is bogus */
|
|
|
|
assert(run->msr.reason == KVM_MSR_EXIT_REASON_FILTER);
|
|
|
|
ret = kvm_handle_wrmsr(cpu, run);
|
|
|
|
break;
|
2018-06-13 17:14:31 +03:00
|
|
|
#ifdef CONFIG_XEN_EMU
|
|
|
|
case KVM_EXIT_XEN:
|
|
|
|
ret = kvm_xen_handle_exit(cpu, &run->xen);
|
|
|
|
break;
|
|
|
|
#endif
|
2011-03-15 14:26:29 +03:00
|
|
|
default:
|
|
|
|
fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason);
|
|
|
|
ret = -1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2012-10-31 09:57:49 +04:00
|
|
|
bool kvm_arch_stop_on_emulation_error(CPUState *cs)
|
2010-05-10 12:21:34 +04:00
|
|
|
{
|
2012-10-31 09:57:49 +04:00
|
|
|
X86CPU *cpu = X86_CPU(cs);
|
|
|
|
CPUX86State *env = &cpu->env;
|
|
|
|
|
2013-05-01 15:45:44 +04:00
|
|
|
kvm_cpu_synchronize_state(cs);
|
2010-12-27 18:19:29 +03:00
|
|
|
return !(env->cr[0] & CR0_PE_MASK) ||
|
|
|
|
((env->segs[R_CS].selector & 3) != 3);
|
2010-05-10 12:21:34 +04:00
|
|
|
}
|
2011-10-15 13:49:47 +04:00
|
|
|
|
|
|
|
void kvm_arch_init_irq_routing(KVMState *s)
|
|
|
|
{
|
2012-07-26 18:35:14 +04:00
|
|
|
/* We know at this point that we're using the in-kernel
|
2012-07-26 18:35:15 +04:00
|
|
|
* irqchip, so we can use irqfds, and on x86 we know
|
2012-07-26 18:35:16 +04:00
|
|
|
* we can use msi via irqfd and GSI routing.
|
2012-07-26 18:35:14 +04:00
|
|
|
*/
|
2012-07-26 18:35:15 +04:00
|
|
|
kvm_msi_via_irqfd_allowed = true;
|
2012-07-26 18:35:16 +04:00
|
|
|
kvm_gsi_routing_allowed = true;
|
2015-12-17 19:16:08 +03:00
|
|
|
|
|
|
|
if (kvm_irqchip_is_split()) {
|
2022-02-22 17:11:16 +03:00
|
|
|
KVMRouteChange c = kvm_irqchip_begin_route_changes(s);
|
2015-12-17 19:16:08 +03:00
|
|
|
int i;
|
|
|
|
|
|
|
|
/* If the ioapic is in QEMU and the lapics are in KVM, reserve
|
|
|
|
MSI routes for signaling interrupts to the local apics. */
|
|
|
|
for (i = 0; i < IOAPIC_NUM_PINS; i++) {
|
2022-02-22 17:11:16 +03:00
|
|
|
if (kvm_irqchip_add_msi_route(&c, 0, NULL) < 0) {
|
2015-12-17 19:16:08 +03:00
|
|
|
error_report("Could not enable split IRQ mode.");
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
}
|
2022-02-22 17:11:16 +03:00
|
|
|
kvm_irqchip_commit_route_changes(&c);
|
2015-12-17 19:16:08 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-13 13:17:12 +03:00
|
|
|
int kvm_arch_irqchip_create(KVMState *s)
|
2015-12-17 19:16:08 +03:00
|
|
|
{
|
|
|
|
int ret;
|
2019-11-13 13:17:12 +03:00
|
|
|
if (kvm_kernel_irqchip_split()) {
|
2015-12-17 19:16:08 +03:00
|
|
|
ret = kvm_vm_enable_cap(s, KVM_CAP_SPLIT_IRQCHIP, 0, 24);
|
|
|
|
if (ret) {
|
2016-08-03 14:37:51 +03:00
|
|
|
error_report("Could not enable split irqchip mode: %s",
|
2015-12-17 19:16:08 +03:00
|
|
|
strerror(-ret));
|
|
|
|
exit(1);
|
|
|
|
} else {
|
|
|
|
DPRINTF("Enabled KVM_CAP_SPLIT_IRQCHIP\n");
|
|
|
|
kvm_split_irqchip = true;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
return 0;
|
|
|
|
}
|
2011-10-15 13:49:47 +04:00
|
|
|
}
|
2012-08-27 10:28:40 +04:00
|
|
|
|
2020-10-05 17:18:19 +03:00
|
|
|
uint64_t kvm_swizzle_msi_ext_dest_id(uint64_t address)
|
|
|
|
{
|
|
|
|
CPUX86State *env;
|
|
|
|
uint64_t ext_id;
|
|
|
|
|
|
|
|
if (!first_cpu) {
|
|
|
|
return address;
|
|
|
|
}
|
|
|
|
env = &X86_CPU(first_cpu)->env;
|
|
|
|
if (!(env->features[FEAT_KVM] & (1 << KVM_FEATURE_MSI_EXT_DEST_ID))) {
|
|
|
|
return address;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the remappable format bit is set, or the upper bits are
|
|
|
|
* already set in address_hi, or the low extended bits aren't
|
|
|
|
* there anyway, do nothing.
|
|
|
|
*/
|
|
|
|
ext_id = address & (0xff << MSI_ADDR_DEST_IDX_SHIFT);
|
|
|
|
if (!ext_id || (ext_id & (1 << MSI_ADDR_DEST_IDX_SHIFT)) || (address >> 32)) {
|
|
|
|
return address;
|
|
|
|
}
|
|
|
|
|
|
|
|
address &= ~ext_id;
|
|
|
|
address |= ext_id << 35;
|
|
|
|
return address;
|
|
|
|
}
|
|
|
|
|
2015-01-09 11:04:40 +03:00
|
|
|
int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
|
2015-10-15 16:44:52 +03:00
|
|
|
uint64_t address, uint32_t data, PCIDevice *dev)
|
2015-01-09 11:04:40 +03:00
|
|
|
{
|
2016-07-14 08:56:25 +03:00
|
|
|
X86IOMMUState *iommu = x86_iommu_get_default();
|
|
|
|
|
|
|
|
if (iommu) {
|
2020-08-25 22:20:42 +03:00
|
|
|
X86IOMMUClass *class = X86_IOMMU_DEVICE_GET_CLASS(iommu);
|
2016-07-14 08:56:25 +03:00
|
|
|
|
2020-10-05 17:18:19 +03:00
|
|
|
if (class->int_remap) {
|
|
|
|
int ret;
|
|
|
|
MSIMessage src, dst;
|
2018-08-27 11:47:51 +03:00
|
|
|
|
2020-10-05 17:18:19 +03:00
|
|
|
src.address = route->u.msi.address_hi;
|
|
|
|
src.address <<= VTD_MSI_ADDR_HI_SHIFT;
|
|
|
|
src.address |= route->u.msi.address_lo;
|
|
|
|
src.data = route->u.msi.data;
|
2016-07-14 08:56:25 +03:00
|
|
|
|
2020-10-05 17:18:19 +03:00
|
|
|
ret = class->int_remap(iommu, &src, &dst, dev ? \
|
|
|
|
pci_requester_id(dev) : \
|
|
|
|
X86_IOMMU_SID_INVALID);
|
|
|
|
if (ret) {
|
|
|
|
trace_kvm_x86_fixup_msi_error(route->gsi);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2023-07-14 14:16:12 +03:00
|
|
|
* Handled untranslated compatibility format interrupt with
|
2020-10-05 17:18:19 +03:00
|
|
|
* extended destination ID in the low bits 11-5. */
|
|
|
|
dst.address = kvm_swizzle_msi_ext_dest_id(dst.address);
|
2016-07-14 08:56:25 +03:00
|
|
|
|
2020-10-05 17:18:19 +03:00
|
|
|
route->u.msi.address_hi = dst.address >> VTD_MSI_ADDR_HI_SHIFT;
|
|
|
|
route->u.msi.address_lo = dst.address & VTD_MSI_ADDR_LO_MASK;
|
|
|
|
route->u.msi.data = dst.data;
|
|
|
|
return 0;
|
|
|
|
}
|
2016-07-14 08:56:25 +03:00
|
|
|
}
|
|
|
|
|
2023-01-14 02:35:46 +03:00
|
|
|
#ifdef CONFIG_XEN_EMU
|
|
|
|
if (xen_mode == XEN_EMULATE) {
|
|
|
|
int handled = xen_evtchn_translate_pirq_msi(route, address, data);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If it was a PIRQ and successfully routed (handled == 0) or it was
|
|
|
|
* an error (handled < 0), return. If it wasn't a PIRQ, keep going.
|
|
|
|
*/
|
|
|
|
if (handled <= 0) {
|
|
|
|
return handled;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2020-10-05 17:18:19 +03:00
|
|
|
address = kvm_swizzle_msi_ext_dest_id(address);
|
|
|
|
route->u.msi.address_hi = address >> VTD_MSI_ADDR_HI_SHIFT;
|
|
|
|
route->u.msi.address_lo = address & VTD_MSI_ADDR_LO_MASK;
|
2015-01-09 11:04:40 +03:00
|
|
|
return 0;
|
|
|
|
}
|
2015-06-02 16:56:23 +03:00
|
|
|
|
2016-07-14 08:56:31 +03:00
|
|
|
typedef struct MSIRouteEntry MSIRouteEntry;
|
|
|
|
|
|
|
|
struct MSIRouteEntry {
|
|
|
|
PCIDevice *dev; /* Device pointer */
|
|
|
|
int vector; /* MSI/MSIX vector index */
|
|
|
|
int virq; /* Virtual IRQ index */
|
|
|
|
QLIST_ENTRY(MSIRouteEntry) list;
|
|
|
|
};
|
|
|
|
|
|
|
|
/* List of used GSI routes */
|
|
|
|
static QLIST_HEAD(, MSIRouteEntry) msi_route_list = \
|
|
|
|
QLIST_HEAD_INITIALIZER(msi_route_list);
|
|
|
|
|
2023-01-14 02:35:46 +03:00
|
|
|
void kvm_update_msi_routes_all(void *private, bool global,
|
|
|
|
uint32_t index, uint32_t mask)
|
2016-07-14 08:56:32 +03:00
|
|
|
{
|
2019-01-16 06:08:15 +03:00
|
|
|
int cnt = 0, vector;
|
2016-07-14 08:56:32 +03:00
|
|
|
MSIRouteEntry *entry;
|
|
|
|
MSIMessage msg;
|
2017-05-09 09:00:44 +03:00
|
|
|
PCIDevice *dev;
|
|
|
|
|
2016-07-14 08:56:32 +03:00
|
|
|
/* TODO: explicit route update */
|
|
|
|
QLIST_FOREACH(entry, &msi_route_list, list) {
|
|
|
|
cnt++;
|
2019-01-16 06:08:15 +03:00
|
|
|
vector = entry->vector;
|
2017-05-09 09:00:44 +03:00
|
|
|
dev = entry->dev;
|
2019-01-16 06:08:15 +03:00
|
|
|
if (msix_enabled(dev) && !msix_is_masked(dev, vector)) {
|
|
|
|
msg = msix_get_message(dev, vector);
|
|
|
|
} else if (msi_enabled(dev) && !msi_is_masked(dev, vector)) {
|
|
|
|
msg = msi_get_message(dev, vector);
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* Either MSI/MSIX is disabled for the device, or the
|
|
|
|
* specific message was masked out. Skip this one.
|
|
|
|
*/
|
2017-05-09 09:00:44 +03:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
kvm_irqchip_update_msi_route(kvm_state, entry->virq, msg, dev);
|
2016-07-14 08:56:32 +03:00
|
|
|
}
|
2016-07-14 08:56:33 +03:00
|
|
|
kvm_irqchip_commit_routes(kvm_state);
|
2016-07-14 08:56:32 +03:00
|
|
|
trace_kvm_x86_update_msi_routes(cnt);
|
|
|
|
}
|
|
|
|
|
2016-07-14 08:56:31 +03:00
|
|
|
int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
|
|
|
|
int vector, PCIDevice *dev)
|
|
|
|
{
|
2016-07-14 08:56:32 +03:00
|
|
|
static bool notify_list_inited = false;
|
2016-07-14 08:56:31 +03:00
|
|
|
MSIRouteEntry *entry;
|
|
|
|
|
|
|
|
if (!dev) {
|
|
|
|
/* These are (possibly) IOAPIC routes only used for split
|
|
|
|
* kernel irqchip mode, while what we are housekeeping are
|
|
|
|
* PCI devices only. */
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
entry = g_new0(MSIRouteEntry, 1);
|
|
|
|
entry->dev = dev;
|
|
|
|
entry->vector = vector;
|
|
|
|
entry->virq = route->gsi;
|
|
|
|
QLIST_INSERT_HEAD(&msi_route_list, entry, list);
|
|
|
|
|
|
|
|
trace_kvm_x86_add_msi_route(route->gsi);
|
2016-07-14 08:56:32 +03:00
|
|
|
|
|
|
|
if (!notify_list_inited) {
|
|
|
|
/* For the first time we do add route, add ourselves into
|
|
|
|
* IOMMU's IEC notify list if needed. */
|
|
|
|
X86IOMMUState *iommu = x86_iommu_get_default();
|
|
|
|
if (iommu) {
|
|
|
|
x86_iommu_iec_register_notifier(iommu,
|
|
|
|
kvm_update_msi_routes_all,
|
|
|
|
NULL);
|
|
|
|
}
|
|
|
|
notify_list_inited = true;
|
|
|
|
}
|
2016-07-14 08:56:31 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int kvm_arch_release_virq_post(int virq)
|
|
|
|
{
|
|
|
|
MSIRouteEntry *entry, *next;
|
|
|
|
QLIST_FOREACH_SAFE(entry, &msi_route_list, list, next) {
|
|
|
|
if (entry->virq == virq) {
|
|
|
|
trace_kvm_x86_remove_msi_route(virq);
|
|
|
|
QLIST_REMOVE(entry, list);
|
2017-12-25 05:47:04 +03:00
|
|
|
g_free(entry);
|
2016-07-14 08:56:31 +03:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2015-01-09 11:04:40 +03:00
|
|
|
return 0;
|
|
|
|
}
|
2015-06-02 16:56:23 +03:00
|
|
|
|
|
|
|
int kvm_arch_msi_data_to_gsi(uint32_t data)
|
|
|
|
{
|
|
|
|
abort();
|
|
|
|
}
|
2020-06-30 16:49:27 +03:00
|
|
|
|
|
|
|
bool kvm_has_waitpkg(void)
|
|
|
|
{
|
|
|
|
return has_msr_umwait;
|
|
|
|
}
|
2021-01-26 20:36:47 +03:00
|
|
|
|
|
|
|
bool kvm_arch_cpu_check_are_resettable(void)
|
|
|
|
{
|
|
|
|
return !sev_es_enabled();
|
|
|
|
}
|
2022-02-17 09:04:29 +03:00
|
|
|
|
|
|
|
#define ARCH_REQ_XCOMP_GUEST_PERM 0x1025
|
|
|
|
|
|
|
|
void kvm_request_xsave_components(X86CPU *cpu, uint64_t mask)
|
|
|
|
{
|
|
|
|
KVMState *s = kvm_state;
|
|
|
|
uint64_t supported;
|
|
|
|
|
|
|
|
mask &= XSTATE_DYNAMIC_MASK;
|
|
|
|
if (!mask) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Just ignore bits that are not in CPUID[EAX=0xD,ECX=0].
|
|
|
|
* ARCH_REQ_XCOMP_GUEST_PERM would fail, and QEMU has warned
|
|
|
|
* about them already because they are not supported features.
|
|
|
|
*/
|
|
|
|
supported = kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX);
|
|
|
|
supported |= (uint64_t)kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX) << 32;
|
|
|
|
mask &= supported;
|
|
|
|
|
|
|
|
while (mask) {
|
|
|
|
int bit = ctz64(mask);
|
|
|
|
int rc = syscall(SYS_arch_prctl, ARCH_REQ_XCOMP_GUEST_PERM, bit);
|
|
|
|
if (rc) {
|
|
|
|
/*
|
|
|
|
* Older kernel version (<5.17) do not support
|
|
|
|
* ARCH_REQ_XCOMP_GUEST_PERM, but also do not return
|
|
|
|
* any dynamic feature from kvm_arch_get_supported_cpuid.
|
|
|
|
*/
|
|
|
|
warn_report("prctl(ARCH_REQ_XCOMP_GUEST_PERM) failure "
|
|
|
|
"for feature bit %d", bit);
|
|
|
|
}
|
|
|
|
mask &= ~BIT_ULL(bit);
|
|
|
|
}
|
|
|
|
}
|
2022-09-29 10:20:12 +03:00
|
|
|
|
2022-09-29 10:20:14 +03:00
|
|
|
static int kvm_arch_get_notify_vmexit(Object *obj, Error **errp)
|
|
|
|
{
|
|
|
|
KVMState *s = KVM_STATE(obj);
|
|
|
|
return s->notify_vmexit;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void kvm_arch_set_notify_vmexit(Object *obj, int value, Error **errp)
|
|
|
|
{
|
|
|
|
KVMState *s = KVM_STATE(obj);
|
|
|
|
|
|
|
|
if (s->fd != -1) {
|
|
|
|
error_setg(errp, "Cannot set properties after the accelerator has been initialized");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
s->notify_vmexit = value;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void kvm_arch_get_notify_window(Object *obj, Visitor *v,
|
|
|
|
const char *name, void *opaque,
|
|
|
|
Error **errp)
|
|
|
|
{
|
|
|
|
KVMState *s = KVM_STATE(obj);
|
|
|
|
uint32_t value = s->notify_window;
|
|
|
|
|
|
|
|
visit_type_uint32(v, name, &value, errp);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void kvm_arch_set_notify_window(Object *obj, Visitor *v,
|
|
|
|
const char *name, void *opaque,
|
|
|
|
Error **errp)
|
|
|
|
{
|
|
|
|
KVMState *s = KVM_STATE(obj);
|
|
|
|
uint32_t value;
|
|
|
|
|
|
|
|
if (s->fd != -1) {
|
|
|
|
error_setg(errp, "Cannot set properties after the accelerator has been initialized");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2022-11-21 11:50:53 +03:00
|
|
|
if (!visit_type_uint32(v, name, &value, errp)) {
|
2022-09-29 10:20:14 +03:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
s->notify_window = value;
|
|
|
|
}
|
|
|
|
|
2022-12-03 20:51:13 +03:00
|
|
|
static void kvm_arch_get_xen_version(Object *obj, Visitor *v,
|
|
|
|
const char *name, void *opaque,
|
|
|
|
Error **errp)
|
|
|
|
{
|
|
|
|
KVMState *s = KVM_STATE(obj);
|
|
|
|
uint32_t value = s->xen_version;
|
|
|
|
|
|
|
|
visit_type_uint32(v, name, &value, errp);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void kvm_arch_set_xen_version(Object *obj, Visitor *v,
|
|
|
|
const char *name, void *opaque,
|
|
|
|
Error **errp)
|
|
|
|
{
|
|
|
|
KVMState *s = KVM_STATE(obj);
|
|
|
|
Error *error = NULL;
|
|
|
|
uint32_t value;
|
|
|
|
|
|
|
|
visit_type_uint32(v, name, &value, &error);
|
|
|
|
if (error) {
|
|
|
|
error_propagate(errp, error);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
s->xen_version = value;
|
|
|
|
if (value && xen_mode == XEN_DISABLED) {
|
|
|
|
xen_mode = XEN_EMULATE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-12-16 19:27:00 +03:00
|
|
|
static void kvm_arch_get_xen_gnttab_max_frames(Object *obj, Visitor *v,
|
|
|
|
const char *name, void *opaque,
|
|
|
|
Error **errp)
|
|
|
|
{
|
|
|
|
KVMState *s = KVM_STATE(obj);
|
|
|
|
uint16_t value = s->xen_gnttab_max_frames;
|
|
|
|
|
|
|
|
visit_type_uint16(v, name, &value, errp);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void kvm_arch_set_xen_gnttab_max_frames(Object *obj, Visitor *v,
|
|
|
|
const char *name, void *opaque,
|
|
|
|
Error **errp)
|
|
|
|
{
|
|
|
|
KVMState *s = KVM_STATE(obj);
|
|
|
|
Error *error = NULL;
|
|
|
|
uint16_t value;
|
|
|
|
|
|
|
|
visit_type_uint16(v, name, &value, &error);
|
|
|
|
if (error) {
|
|
|
|
error_propagate(errp, error);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
s->xen_gnttab_max_frames = value;
|
|
|
|
}
|
|
|
|
|
2023-01-18 17:36:23 +03:00
|
|
|
static void kvm_arch_get_xen_evtchn_max_pirq(Object *obj, Visitor *v,
|
|
|
|
const char *name, void *opaque,
|
|
|
|
Error **errp)
|
|
|
|
{
|
|
|
|
KVMState *s = KVM_STATE(obj);
|
|
|
|
uint16_t value = s->xen_evtchn_max_pirq;
|
|
|
|
|
|
|
|
visit_type_uint16(v, name, &value, errp);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void kvm_arch_set_xen_evtchn_max_pirq(Object *obj, Visitor *v,
|
|
|
|
const char *name, void *opaque,
|
|
|
|
Error **errp)
|
|
|
|
{
|
|
|
|
KVMState *s = KVM_STATE(obj);
|
|
|
|
Error *error = NULL;
|
|
|
|
uint16_t value;
|
|
|
|
|
|
|
|
visit_type_uint16(v, name, &value, &error);
|
|
|
|
if (error) {
|
|
|
|
error_propagate(errp, error);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
s->xen_evtchn_max_pirq = value;
|
|
|
|
}
|
|
|
|
|
2022-09-29 10:20:12 +03:00
|
|
|
void kvm_arch_accel_class_init(ObjectClass *oc)
|
|
|
|
{
|
2022-09-29 10:20:14 +03:00
|
|
|
object_class_property_add_enum(oc, "notify-vmexit", "NotifyVMexitOption",
|
|
|
|
&NotifyVmexitOption_lookup,
|
|
|
|
kvm_arch_get_notify_vmexit,
|
|
|
|
kvm_arch_set_notify_vmexit);
|
|
|
|
object_class_property_set_description(oc, "notify-vmexit",
|
|
|
|
"Enable notify VM exit");
|
|
|
|
|
|
|
|
object_class_property_add(oc, "notify-window", "uint32",
|
|
|
|
kvm_arch_get_notify_window,
|
|
|
|
kvm_arch_set_notify_window,
|
|
|
|
NULL, NULL);
|
|
|
|
object_class_property_set_description(oc, "notify-window",
|
|
|
|
"Clock cycles without an event window "
|
|
|
|
"after which a notification VM exit occurs");
|
2022-12-03 20:51:13 +03:00
|
|
|
|
|
|
|
object_class_property_add(oc, "xen-version", "uint32",
|
|
|
|
kvm_arch_get_xen_version,
|
|
|
|
kvm_arch_set_xen_version,
|
|
|
|
NULL, NULL);
|
|
|
|
object_class_property_set_description(oc, "xen-version",
|
|
|
|
"Xen version to be emulated "
|
|
|
|
"(in XENVER_version form "
|
|
|
|
"e.g. 0x4000a for 4.10)");
|
2022-12-16 19:27:00 +03:00
|
|
|
|
|
|
|
object_class_property_add(oc, "xen-gnttab-max-frames", "uint16",
|
|
|
|
kvm_arch_get_xen_gnttab_max_frames,
|
|
|
|
kvm_arch_set_xen_gnttab_max_frames,
|
|
|
|
NULL, NULL);
|
|
|
|
object_class_property_set_description(oc, "xen-gnttab-max-frames",
|
|
|
|
"Maximum number of grant table frames");
|
2023-01-18 17:36:23 +03:00
|
|
|
|
|
|
|
object_class_property_add(oc, "xen-evtchn-max-pirq", "uint16",
|
|
|
|
kvm_arch_get_xen_evtchn_max_pirq,
|
|
|
|
kvm_arch_set_xen_evtchn_max_pirq,
|
|
|
|
NULL, NULL);
|
|
|
|
object_class_property_set_description(oc, "xen-evtchn-max-pirq",
|
|
|
|
"Maximum number of Xen PIRQs");
|
2022-09-29 10:20:12 +03:00
|
|
|
}
|
2022-08-25 05:52:46 +03:00
|
|
|
|
|
|
|
void kvm_set_max_apic_id(uint32_t max_apic_id)
|
|
|
|
{
|
|
|
|
kvm_vm_enable_cap(kvm_state, KVM_CAP_MAX_VCPU_ID, 0, max_apic_id);
|
|
|
|
}
|