2008-11-05 19:29:27 +03:00
|
|
|
/*
|
|
|
|
* QEMU KVM support
|
|
|
|
*
|
|
|
|
* Copyright (C) 2006-2008 Qumranet Technologies
|
|
|
|
* Copyright IBM, Corp. 2008
|
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Anthony Liguori <aliguori@us.ibm.com>
|
|
|
|
*
|
|
|
|
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
|
|
|
* See the COPYING file in the top-level directory.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2016-01-26 21:17:03 +03:00
|
|
|
#include "qemu/osdep.h"
|
include/qemu/osdep.h: Don't include qapi/error.h
Commit 57cb38b included qapi/error.h into qemu/osdep.h to get the
Error typedef. Since then, we've moved to include qemu/osdep.h
everywhere. Its file comment explains: "To avoid getting into
possible circular include dependencies, this file should not include
any other QEMU headers, with the exceptions of config-host.h,
compiler.h, os-posix.h and os-win32.h, all of which are doing a
similar job to this file and are under similar constraints."
qapi/error.h doesn't do a similar job, and it doesn't adhere to
similar constraints: it includes qapi-types.h. That's in excess of
100KiB of crap most .c files don't actually need.
Add the typedef to qemu/typedefs.h, and include that instead of
qapi/error.h. Include qapi/error.h in .c files that need it and don't
get it now. Include qapi-types.h in qom/object.h for uint16List.
Update scripts/clean-includes accordingly. Update it further to match
reality: replace config.h by config-target.h, add sysemu/os-posix.h,
sysemu/os-win32.h. Update the list of includes in the qemu/osdep.h
comment quoted above similarly.
This reduces the number of objects depending on qapi/error.h from "all
of them" to less than a third. Unfortunately, the number depending on
qapi-types.h shrinks only a little. More work is needed for that one.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
[Fix compilation without the spice devel packages. - Paolo]
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2016-03-14 11:01:28 +03:00
|
|
|
#include "qapi/error.h"
|
2008-11-05 19:29:27 +03:00
|
|
|
#include <sys/ioctl.h>
|
2010-10-21 19:35:04 +04:00
|
|
|
#include <sys/utsname.h>
|
2008-11-05 19:29:27 +03:00
|
|
|
|
|
|
|
#include <linux/kvm.h>
|
2011-06-08 18:10:58 +04:00
|
|
|
#include <linux/kvm_para.h>
|
2008-11-05 19:29:27 +03:00
|
|
|
|
|
|
|
#include "qemu-common.h"
|
2016-03-15 18:58:45 +03:00
|
|
|
#include "cpu.h"
|
2012-12-17 21:20:04 +04:00
|
|
|
#include "sysemu/sysemu.h"
|
2017-01-10 13:59:55 +03:00
|
|
|
#include "sysemu/hw_accel.h"
|
2015-06-18 19:30:16 +03:00
|
|
|
#include "sysemu/kvm_int.h"
|
2012-07-26 18:35:13 +04:00
|
|
|
#include "kvm_i386.h"
|
2015-11-10 15:52:43 +03:00
|
|
|
#include "hyperv.h"
|
|
|
|
|
2012-12-17 21:19:49 +04:00
|
|
|
#include "exec/gdbstub.h"
|
2012-12-17 21:20:00 +04:00
|
|
|
#include "qemu/host-utils.h"
|
|
|
|
#include "qemu/config-file.h"
|
2015-10-16 18:38:22 +03:00
|
|
|
#include "qemu/error-report.h"
|
2013-02-05 20:06:20 +04:00
|
|
|
#include "hw/i386/pc.h"
|
|
|
|
#include "hw/i386/apic.h"
|
2013-03-08 22:21:50 +04:00
|
|
|
#include "hw/i386/apic_internal.h"
|
|
|
|
#include "hw/i386/apic-msidef.h"
|
2016-07-14 08:56:25 +03:00
|
|
|
#include "hw/i386/intel_iommu.h"
|
2016-07-14 08:56:32 +03:00
|
|
|
#include "hw/i386/x86-iommu.h"
|
2015-11-10 15:52:43 +03:00
|
|
|
|
2012-12-17 21:19:49 +04:00
|
|
|
#include "exec/ioport.h"
|
2015-09-09 16:25:52 +03:00
|
|
|
#include "standard-headers/asm-x86/hyperv.h"
|
2012-12-12 16:24:50 +04:00
|
|
|
#include "hw/pci/pci.h"
|
2015-12-17 19:16:08 +03:00
|
|
|
#include "hw/pci/msi.h"
|
2014-05-14 23:30:09 +04:00
|
|
|
#include "migration/migration.h"
|
2015-04-08 14:30:58 +03:00
|
|
|
#include "exec/memattrs.h"
|
2016-07-14 08:56:25 +03:00
|
|
|
#include "trace.h"
|
2008-11-05 19:29:27 +03:00
|
|
|
|
|
|
|
//#define DEBUG_KVM
|
|
|
|
|
|
|
|
#ifdef DEBUG_KVM
|
2010-04-18 18:22:14 +04:00
|
|
|
#define DPRINTF(fmt, ...) \
|
2008-11-05 19:29:27 +03:00
|
|
|
do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
|
|
|
|
#else
|
2010-04-18 18:22:14 +04:00
|
|
|
#define DPRINTF(fmt, ...) \
|
2008-11-05 19:29:27 +03:00
|
|
|
do { } while (0)
|
|
|
|
#endif
|
|
|
|
|
2009-10-22 16:26:56 +04:00
|
|
|
#define MSR_KVM_WALL_CLOCK 0x11
|
|
|
|
#define MSR_KVM_SYSTEM_TIME 0x12
|
|
|
|
|
2015-12-16 22:06:43 +03:00
|
|
|
/* A 4096-byte buffer can hold the 8-byte kvm_msrs header, plus
|
|
|
|
* 255 kvm_msr_entry structs */
|
|
|
|
#define MSR_BUF_SIZE 4096
|
2015-12-16 22:06:42 +03:00
|
|
|
|
2010-10-11 22:31:21 +04:00
|
|
|
#ifndef BUS_MCEERR_AR
|
|
|
|
#define BUS_MCEERR_AR 4
|
|
|
|
#endif
|
|
|
|
#ifndef BUS_MCEERR_AO
|
|
|
|
#define BUS_MCEERR_AO 5
|
|
|
|
#endif
|
|
|
|
|
2011-01-21 23:48:17 +03:00
|
|
|
const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
|
|
|
|
KVM_CAP_INFO(SET_TSS_ADDR),
|
|
|
|
KVM_CAP_INFO(EXT_CPUID),
|
|
|
|
KVM_CAP_INFO(MP_STATE),
|
|
|
|
KVM_CAP_LAST_INFO
|
|
|
|
};
|
2010-10-21 19:35:04 +04:00
|
|
|
|
2011-01-21 23:48:13 +03:00
|
|
|
static bool has_msr_star;
|
|
|
|
static bool has_msr_hsave_pa;
|
2015-09-23 09:27:33 +03:00
|
|
|
static bool has_msr_tsc_aux;
|
2012-11-27 09:32:18 +04:00
|
|
|
static bool has_msr_tsc_adjust;
|
2011-10-05 23:52:32 +04:00
|
|
|
static bool has_msr_tsc_deadline;
|
2013-08-19 05:33:30 +04:00
|
|
|
static bool has_msr_feature_control;
|
2011-10-04 18:26:35 +04:00
|
|
|
static bool has_msr_misc_enable;
|
2015-06-18 19:28:42 +03:00
|
|
|
static bool has_msr_smbase;
|
2013-12-05 04:32:12 +04:00
|
|
|
static bool has_msr_bndcfgs;
|
2010-10-21 19:35:04 +04:00
|
|
|
static int lm_capable_kernel;
|
2014-01-23 22:16:12 +04:00
|
|
|
static bool has_msr_hv_hypercall;
|
2015-09-09 15:41:30 +03:00
|
|
|
static bool has_msr_hv_crash;
|
2015-09-16 12:59:42 +03:00
|
|
|
static bool has_msr_hv_reset;
|
2015-09-16 12:59:43 +03:00
|
|
|
static bool has_msr_hv_vpindex;
|
2015-09-16 12:59:44 +03:00
|
|
|
static bool has_msr_hv_runtime;
|
2015-11-11 13:18:38 +03:00
|
|
|
static bool has_msr_hv_synic;
|
2015-11-25 18:21:25 +03:00
|
|
|
static bool has_msr_hv_stimer;
|
2014-12-03 05:36:23 +03:00
|
|
|
static bool has_msr_xss;
|
2009-05-03 18:04:01 +04:00
|
|
|
|
2013-07-25 19:05:22 +04:00
|
|
|
static bool has_msr_architectural_pmu;
|
|
|
|
static uint32_t num_architectural_pmu_counters;
|
|
|
|
|
2015-10-15 21:30:20 +03:00
|
|
|
static int has_xsave;
|
|
|
|
static int has_xcrs;
|
|
|
|
static int has_pit_state2;
|
|
|
|
|
2016-06-22 09:56:21 +03:00
|
|
|
static bool has_msr_mcg_ext_ctl;
|
|
|
|
|
target-i386: kvm: cache KVM_GET_SUPPORTED_CPUID data
KVM_GET_SUPPORTED_CPUID ioctl is called frequently when initializing
CPU. Depends on CPU features and CPU count, the number of calls can be
extremely high which slows down QEMU booting significantly. In our
testing, we saw 5922 calls with switches:
-cpu SandyBridge -smp 6,sockets=6,cores=1,threads=1
This ioctl takes more than 100ms, which is almost half of the total
QEMU startup time.
While for most cases the data returned from two different invocations
are not changed, that means, we can cache the data to avoid trapping
into kernel for the second time. To make sure the cache safe one
assumption is desirable: the ioctl is stateless. This is not true for
CPUID leaves in general (such as CPUID leaf 0xD, whose value depends
on guest XCR0 and IA32_XSS) but it is true of KVM_GET_SUPPORTED_CPUID,
which runs before there is a value for XCR0 and IA32_XSS.
Signed-off-by: Chao Peng <chao.p.peng@linux.intel.com>
Message-Id: <1465784487-23482-1-git-send-email-chao.p.peng@linux.intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2016-06-13 05:21:27 +03:00
|
|
|
static struct kvm_cpuid2 *cpuid_cache;
|
|
|
|
|
2015-10-15 21:30:20 +03:00
|
|
|
int kvm_has_pit_state2(void)
|
|
|
|
{
|
|
|
|
return has_pit_state2;
|
|
|
|
}
|
|
|
|
|
2015-06-18 19:30:52 +03:00
|
|
|
bool kvm_has_smm(void)
|
|
|
|
{
|
|
|
|
return kvm_check_extension(kvm_state, KVM_CAP_X86_SMM);
|
|
|
|
}
|
|
|
|
|
2016-11-21 13:50:04 +03:00
|
|
|
bool kvm_has_adjust_clock_stable(void)
|
|
|
|
{
|
|
|
|
int ret = kvm_check_extension(kvm_state, KVM_CAP_ADJUST_CLOCK);
|
|
|
|
|
|
|
|
return (ret == KVM_CLOCK_TSC_STABLE);
|
|
|
|
}
|
|
|
|
|
2012-07-26 18:35:13 +04:00
|
|
|
bool kvm_allows_irq0_override(void)
|
|
|
|
{
|
|
|
|
return !kvm_irqchip_in_kernel() || kvm_has_gsi_routing();
|
|
|
|
}
|
|
|
|
|
intel_iommu: reject broken EIM
Cluster x2APIC cannot work without KVM's x2apic API when the maximal
APIC ID is greater than 8 and only KVM's LAPIC can support x2APIC, so we
forbid other APICs and also the old KVM case with less than 9, to
simplify the code.
There is no point in enabling EIM in forbidden APICs, so we keep it
enabled only for the KVM APIC; unconditionally, because making the
option depend on KVM version would be a maintanance burden.
Old QEMUs would enable eim whenever intremap was on, which would trick
guests into thinking that they can enable cluster x2APIC even if any
interrupt destination would get clamped to 8 bits.
Depending on your configuration, QEMU could notice that the destination
LAPIC is not present and report it with a very non-obvious:
KVM: injection failed, MSI lost (Operation not permitted)
Or the guest could say something about unexpected interrupts, because
clamping leads to aliasing so interrupts were being delivered to
incorrect VCPUs.
KVM_X2APIC_API is the feature that allows us to enable EIM for KVM.
QEMU 2.7 allowed EIM whenever interrupt remapping was enabled. In order
to keep backward compatibility, we again allow guests to misbehave in
non-obvious ways, and make it the default for old machine types.
A user can enable the buggy mode it with "x-buggy-eim=on".
Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
Reviewed-by: Eduardo Habkost <ehabkost@redhat.com>
Reviewed-by: Peter Xu <peterx@redhat.com>
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
2016-10-10 18:28:47 +03:00
|
|
|
static bool kvm_x2apic_api_set_flags(uint64_t flags)
|
|
|
|
{
|
|
|
|
KVMState *s = KVM_STATE(current_machine->accelerator);
|
|
|
|
|
|
|
|
return !kvm_vm_enable_cap(s, KVM_CAP_X2APIC_API, 0, flags);
|
|
|
|
}
|
|
|
|
|
2016-10-19 15:05:38 +03:00
|
|
|
#define MEMORIZE(fn, _result) \
|
2016-10-10 18:28:48 +03:00
|
|
|
({ \
|
|
|
|
static bool _memorized; \
|
|
|
|
\
|
|
|
|
if (_memorized) { \
|
|
|
|
return _result; \
|
|
|
|
} \
|
|
|
|
_memorized = true; \
|
|
|
|
_result = fn; \
|
|
|
|
})
|
|
|
|
|
2016-10-19 15:05:38 +03:00
|
|
|
static bool has_x2apic_api;
|
|
|
|
|
|
|
|
bool kvm_has_x2apic_api(void)
|
|
|
|
{
|
|
|
|
return has_x2apic_api;
|
|
|
|
}
|
|
|
|
|
intel_iommu: reject broken EIM
Cluster x2APIC cannot work without KVM's x2apic API when the maximal
APIC ID is greater than 8 and only KVM's LAPIC can support x2APIC, so we
forbid other APICs and also the old KVM case with less than 9, to
simplify the code.
There is no point in enabling EIM in forbidden APICs, so we keep it
enabled only for the KVM APIC; unconditionally, because making the
option depend on KVM version would be a maintanance burden.
Old QEMUs would enable eim whenever intremap was on, which would trick
guests into thinking that they can enable cluster x2APIC even if any
interrupt destination would get clamped to 8 bits.
Depending on your configuration, QEMU could notice that the destination
LAPIC is not present and report it with a very non-obvious:
KVM: injection failed, MSI lost (Operation not permitted)
Or the guest could say something about unexpected interrupts, because
clamping leads to aliasing so interrupts were being delivered to
incorrect VCPUs.
KVM_X2APIC_API is the feature that allows us to enable EIM for KVM.
QEMU 2.7 allowed EIM whenever interrupt remapping was enabled. In order
to keep backward compatibility, we again allow guests to misbehave in
non-obvious ways, and make it the default for old machine types.
A user can enable the buggy mode it with "x-buggy-eim=on".
Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
Reviewed-by: Eduardo Habkost <ehabkost@redhat.com>
Reviewed-by: Peter Xu <peterx@redhat.com>
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
2016-10-10 18:28:47 +03:00
|
|
|
bool kvm_enable_x2apic(void)
|
|
|
|
{
|
2016-10-10 18:28:48 +03:00
|
|
|
return MEMORIZE(
|
|
|
|
kvm_x2apic_api_set_flags(KVM_X2APIC_API_USE_32BIT_IDS |
|
2016-10-19 15:05:38 +03:00
|
|
|
KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK),
|
|
|
|
has_x2apic_api);
|
intel_iommu: reject broken EIM
Cluster x2APIC cannot work without KVM's x2apic API when the maximal
APIC ID is greater than 8 and only KVM's LAPIC can support x2APIC, so we
forbid other APICs and also the old KVM case with less than 9, to
simplify the code.
There is no point in enabling EIM in forbidden APICs, so we keep it
enabled only for the KVM APIC; unconditionally, because making the
option depend on KVM version would be a maintanance burden.
Old QEMUs would enable eim whenever intremap was on, which would trick
guests into thinking that they can enable cluster x2APIC even if any
interrupt destination would get clamped to 8 bits.
Depending on your configuration, QEMU could notice that the destination
LAPIC is not present and report it with a very non-obvious:
KVM: injection failed, MSI lost (Operation not permitted)
Or the guest could say something about unexpected interrupts, because
clamping leads to aliasing so interrupts were being delivered to
incorrect VCPUs.
KVM_X2APIC_API is the feature that allows us to enable EIM for KVM.
QEMU 2.7 allowed EIM whenever interrupt remapping was enabled. In order
to keep backward compatibility, we again allow guests to misbehave in
non-obvious ways, and make it the default for old machine types.
A user can enable the buggy mode it with "x-buggy-eim=on".
Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
Reviewed-by: Eduardo Habkost <ehabkost@redhat.com>
Reviewed-by: Peter Xu <peterx@redhat.com>
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
2016-10-10 18:28:47 +03:00
|
|
|
}
|
|
|
|
|
2015-11-05 06:51:03 +03:00
|
|
|
static int kvm_get_tsc(CPUState *cs)
|
|
|
|
{
|
|
|
|
X86CPU *cpu = X86_CPU(cs);
|
|
|
|
CPUX86State *env = &cpu->env;
|
|
|
|
struct {
|
|
|
|
struct kvm_msrs info;
|
|
|
|
struct kvm_msr_entry entries[1];
|
|
|
|
} msr_data;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (env->tsc_valid) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
msr_data.info.nmsrs = 1;
|
|
|
|
msr_data.entries[0].index = MSR_IA32_TSC;
|
|
|
|
env->tsc_valid = !runstate_is_running();
|
|
|
|
|
|
|
|
ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, &msr_data);
|
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-03-30 23:55:29 +03:00
|
|
|
assert(ret == 1);
|
2015-11-05 06:51:03 +03:00
|
|
|
env->tsc = msr_data.entries[0].data;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-10-31 12:36:08 +03:00
|
|
|
static inline void do_kvm_synchronize_tsc(CPUState *cpu, run_on_cpu_data arg)
|
2015-11-05 06:51:03 +03:00
|
|
|
{
|
|
|
|
kvm_get_tsc(cpu);
|
|
|
|
}
|
|
|
|
|
|
|
|
void kvm_synchronize_all_tsc(void)
|
|
|
|
{
|
|
|
|
CPUState *cpu;
|
|
|
|
|
|
|
|
if (kvm_enabled()) {
|
|
|
|
CPU_FOREACH(cpu) {
|
2016-10-31 12:36:08 +03:00
|
|
|
run_on_cpu(cpu, do_kvm_synchronize_tsc, RUN_ON_CPU_NULL);
|
2015-11-05 06:51:03 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-05-03 18:04:01 +04:00
|
|
|
static struct kvm_cpuid2 *try_get_cpuid(KVMState *s, int max)
|
|
|
|
{
|
|
|
|
struct kvm_cpuid2 *cpuid;
|
|
|
|
int r, size;
|
|
|
|
|
|
|
|
size = sizeof(*cpuid) + max * sizeof(*cpuid->entries);
|
2014-12-04 16:46:46 +03:00
|
|
|
cpuid = g_malloc0(size);
|
2009-05-03 18:04:01 +04:00
|
|
|
cpuid->nent = max;
|
|
|
|
r = kvm_ioctl(s, KVM_GET_SUPPORTED_CPUID, cpuid);
|
2009-05-19 21:55:21 +04:00
|
|
|
if (r == 0 && cpuid->nent >= max) {
|
|
|
|
r = -E2BIG;
|
|
|
|
}
|
2009-05-03 18:04:01 +04:00
|
|
|
if (r < 0) {
|
|
|
|
if (r == -E2BIG) {
|
2011-08-21 07:09:37 +04:00
|
|
|
g_free(cpuid);
|
2009-05-03 18:04:01 +04:00
|
|
|
return NULL;
|
|
|
|
} else {
|
|
|
|
fprintf(stderr, "KVM_GET_SUPPORTED_CPUID failed: %s\n",
|
|
|
|
strerror(-r));
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return cpuid;
|
|
|
|
}
|
|
|
|
|
2012-10-05 00:48:58 +04:00
|
|
|
/* Run KVM_GET_SUPPORTED_CPUID ioctl(), allocating a buffer large enough
|
|
|
|
* for all entries.
|
|
|
|
*/
|
|
|
|
static struct kvm_cpuid2 *get_supported_cpuid(KVMState *s)
|
|
|
|
{
|
|
|
|
struct kvm_cpuid2 *cpuid;
|
|
|
|
int max = 1;
|
target-i386: kvm: cache KVM_GET_SUPPORTED_CPUID data
KVM_GET_SUPPORTED_CPUID ioctl is called frequently when initializing
CPU. Depends on CPU features and CPU count, the number of calls can be
extremely high which slows down QEMU booting significantly. In our
testing, we saw 5922 calls with switches:
-cpu SandyBridge -smp 6,sockets=6,cores=1,threads=1
This ioctl takes more than 100ms, which is almost half of the total
QEMU startup time.
While for most cases the data returned from two different invocations
are not changed, that means, we can cache the data to avoid trapping
into kernel for the second time. To make sure the cache safe one
assumption is desirable: the ioctl is stateless. This is not true for
CPUID leaves in general (such as CPUID leaf 0xD, whose value depends
on guest XCR0 and IA32_XSS) but it is true of KVM_GET_SUPPORTED_CPUID,
which runs before there is a value for XCR0 and IA32_XSS.
Signed-off-by: Chao Peng <chao.p.peng@linux.intel.com>
Message-Id: <1465784487-23482-1-git-send-email-chao.p.peng@linux.intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2016-06-13 05:21:27 +03:00
|
|
|
|
|
|
|
if (cpuid_cache != NULL) {
|
|
|
|
return cpuid_cache;
|
|
|
|
}
|
2012-10-05 00:48:58 +04:00
|
|
|
while ((cpuid = try_get_cpuid(s, max)) == NULL) {
|
|
|
|
max *= 2;
|
|
|
|
}
|
target-i386: kvm: cache KVM_GET_SUPPORTED_CPUID data
KVM_GET_SUPPORTED_CPUID ioctl is called frequently when initializing
CPU. Depends on CPU features and CPU count, the number of calls can be
extremely high which slows down QEMU booting significantly. In our
testing, we saw 5922 calls with switches:
-cpu SandyBridge -smp 6,sockets=6,cores=1,threads=1
This ioctl takes more than 100ms, which is almost half of the total
QEMU startup time.
While for most cases the data returned from two different invocations
are not changed, that means, we can cache the data to avoid trapping
into kernel for the second time. To make sure the cache safe one
assumption is desirable: the ioctl is stateless. This is not true for
CPUID leaves in general (such as CPUID leaf 0xD, whose value depends
on guest XCR0 and IA32_XSS) but it is true of KVM_GET_SUPPORTED_CPUID,
which runs before there is a value for XCR0 and IA32_XSS.
Signed-off-by: Chao Peng <chao.p.peng@linux.intel.com>
Message-Id: <1465784487-23482-1-git-send-email-chao.p.peng@linux.intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2016-06-13 05:21:27 +03:00
|
|
|
cpuid_cache = cpuid;
|
2012-10-05 00:48:58 +04:00
|
|
|
return cpuid;
|
|
|
|
}
|
|
|
|
|
2014-03-16 18:03:41 +04:00
|
|
|
static const struct kvm_para_features {
|
2011-03-18 01:42:05 +03:00
|
|
|
int cap;
|
|
|
|
int feature;
|
|
|
|
} para_features[] = {
|
|
|
|
{ KVM_CAP_CLOCKSOURCE, KVM_FEATURE_CLOCKSOURCE },
|
|
|
|
{ KVM_CAP_NOP_IO_DELAY, KVM_FEATURE_NOP_IO_DELAY },
|
|
|
|
{ KVM_CAP_PV_MMU, KVM_FEATURE_MMU_OP },
|
|
|
|
{ KVM_CAP_ASYNC_PF, KVM_FEATURE_ASYNC_PF },
|
|
|
|
};
|
|
|
|
|
2011-06-08 18:11:05 +04:00
|
|
|
static int get_para_features(KVMState *s)
|
2011-03-18 01:42:05 +03:00
|
|
|
{
|
|
|
|
int i, features = 0;
|
|
|
|
|
2014-03-21 01:30:32 +04:00
|
|
|
for (i = 0; i < ARRAY_SIZE(para_features); i++) {
|
2011-06-08 18:11:05 +04:00
|
|
|
if (kvm_check_extension(s, para_features[i].cap)) {
|
2011-03-18 01:42:05 +03:00
|
|
|
features |= (1 << para_features[i].feature);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return features;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-10-05 00:48:56 +04:00
|
|
|
/* Returns the value for a specific register on the cpuid entry
|
|
|
|
*/
|
|
|
|
static uint32_t cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry, int reg)
|
|
|
|
{
|
|
|
|
uint32_t ret = 0;
|
|
|
|
switch (reg) {
|
|
|
|
case R_EAX:
|
|
|
|
ret = entry->eax;
|
|
|
|
break;
|
|
|
|
case R_EBX:
|
|
|
|
ret = entry->ebx;
|
|
|
|
break;
|
|
|
|
case R_ECX:
|
|
|
|
ret = entry->ecx;
|
|
|
|
break;
|
|
|
|
case R_EDX:
|
|
|
|
ret = entry->edx;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2012-10-05 00:48:57 +04:00
|
|
|
/* Find matching entry for function/index on kvm_cpuid2 struct
|
|
|
|
*/
|
|
|
|
static struct kvm_cpuid_entry2 *cpuid_find_entry(struct kvm_cpuid2 *cpuid,
|
|
|
|
uint32_t function,
|
|
|
|
uint32_t index)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
for (i = 0; i < cpuid->nent; ++i) {
|
|
|
|
if (cpuid->entries[i].function == function &&
|
|
|
|
cpuid->entries[i].index == index) {
|
|
|
|
return &cpuid->entries[i];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/* not found: */
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2011-06-08 18:11:05 +04:00
|
|
|
uint32_t kvm_arch_get_supported_cpuid(KVMState *s, uint32_t function,
|
2010-06-17 11:18:13 +04:00
|
|
|
uint32_t index, int reg)
|
2009-05-03 18:04:01 +04:00
|
|
|
{
|
|
|
|
struct kvm_cpuid2 *cpuid;
|
|
|
|
uint32_t ret = 0;
|
|
|
|
uint32_t cpuid_1_edx;
|
2012-10-05 00:48:54 +04:00
|
|
|
bool found = false;
|
2009-05-03 18:04:01 +04:00
|
|
|
|
2012-10-05 00:48:58 +04:00
|
|
|
cpuid = get_supported_cpuid(s);
|
2009-05-03 18:04:01 +04:00
|
|
|
|
2012-10-05 00:48:57 +04:00
|
|
|
struct kvm_cpuid_entry2 *entry = cpuid_find_entry(cpuid, function, index);
|
|
|
|
if (entry) {
|
|
|
|
found = true;
|
|
|
|
ret = cpuid_entry_get_reg(entry, reg);
|
2009-05-03 18:04:01 +04:00
|
|
|
}
|
|
|
|
|
2012-10-05 00:48:53 +04:00
|
|
|
/* Fixups for the data returned by KVM, below */
|
|
|
|
|
2012-10-05 00:48:59 +04:00
|
|
|
if (function == 1 && reg == R_EDX) {
|
|
|
|
/* KVM before 2.6.30 misreports the following features */
|
|
|
|
ret |= CPUID_MTRR | CPUID_PAT | CPUID_MCE | CPUID_MCA;
|
i386: kvm: set CPUID_EXT_HYPERVISOR on kvm_arch_get_supported_cpuid()
Full grep for kvm_arch_get_supported_cpuid:
kvm.h:uint32_t kvm_arch_get_supported_cpuid(KVMState *env, uint32_t function,
target-i386/cpu.c: x86_cpu_def->cpuid_7_0_ebx_features = kvm_arch_get_supported_cpuid(kvm_state, 0x7, 0, R_EBX);
target-i386/cpu.c: *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
target-i386/cpu.c: *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
target-i386/cpu.c: *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
target-i386/cpu.c: *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
target-i386/cpu.c: *eax = kvm_arch_get_supported_cpuid(s, 0xd, count, R_EAX);
target-i386/cpu.c: *ebx = kvm_arch_get_supported_cpuid(s, 0xd, count, R_EBX);
target-i386/cpu.c: *ecx = kvm_arch_get_supported_cpuid(s, 0xd, count, R_ECX);
target-i386/cpu.c: *edx = kvm_arch_get_supported_cpuid(s, 0xd, count, R_EDX);
target-i386/kvm.c:uint32_t kvm_arch_get_supported_cpuid(KVMState *s, uint32_t function,
target-i386/kvm.c: cpuid_1_edx = kvm_arch_get_supported_cpuid(s, 1, 0, R_EDX);
target-i386/kvm.c: env->cpuid_features &= kvm_arch_get_supported_cpuid(s, 1, 0, R_EDX);
* target-i386/kvm.c: env->cpuid_ext_features &= kvm_arch_get_supported_cpuid(s, 1, 0, R_ECX);
target-i386/kvm.c: env->cpuid_ext2_features &= kvm_arch_get_supported_cpuid(s, 0x80000001,
target-i386/kvm.c: env->cpuid_ext3_features &= kvm_arch_get_supported_cpuid(s, 0x80000001,
target-i386/kvm.c: env->cpuid_svm_features &= kvm_arch_get_supported_cpuid(s, 0x8000000A,
target-i386/kvm.c: kvm_arch_get_supported_cpuid(s, KVM_CPUID_FEATURES, 0, R_EAX);
target-i386/kvm.c: kvm_arch_get_supported_cpuid(s, 0xC0000001, 0, R_EDX);
Note that there is only one call for CPUID[1].ECX above (*), and it is
the one that gets hacked to include CPUID_EXT_HYPERVISOR, so we can
simply make kvm_arch_get_supported_cpuid() set it, to let the rest of
the code automatically know that the flag can be safely set by QEMU.
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
2012-10-05 00:49:00 +04:00
|
|
|
} else if (function == 1 && reg == R_ECX) {
|
|
|
|
/* We can set the hypervisor flag, even if KVM does not return it on
|
|
|
|
* GET_SUPPORTED_CPUID
|
|
|
|
*/
|
|
|
|
ret |= CPUID_EXT_HYPERVISOR;
|
i386: kvm: set CPUID_EXT_TSC_DEADLINE_TIMER on kvm_arch_get_supported_cpuid()
This moves the CPUID_EXT_TSC_DEADLINE_TIMER CPUID flag hacking from
kvm_arch_init_vcpu() to kvm_arch_get_supported_cpuid().
Full git grep for kvm_arch_get_supported_cpuid:
kvm.h:uint32_t kvm_arch_get_supported_cpuid(KVMState *env, uint32_t function,
target-i386/cpu.c: x86_cpu_def->cpuid_7_0_ebx_features = kvm_arch_get_supported_cpuid(kvm_state, 0x7, 0, R_EBX);
target-i386/cpu.c: *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
target-i386/cpu.c: *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
target-i386/cpu.c: *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
target-i386/cpu.c: *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
target-i386/cpu.c: *eax = kvm_arch_get_supported_cpuid(s, 0xd, count, R_EAX);
target-i386/cpu.c: *ebx = kvm_arch_get_supported_cpuid(s, 0xd, count, R_EBX);
target-i386/cpu.c: *ecx = kvm_arch_get_supported_cpuid(s, 0xd, count, R_ECX);
target-i386/cpu.c: *edx = kvm_arch_get_supported_cpuid(s, 0xd, count, R_EDX);
target-i386/kvm.c:uint32_t kvm_arch_get_supported_cpuid(KVMState *s, uint32_t function,
target-i386/kvm.c: cpuid_1_edx = kvm_arch_get_supported_cpuid(s, 1, 0, R_EDX);
target-i386/kvm.c: env->cpuid_features &= kvm_arch_get_supported_cpuid(s, 1, 0, R_EDX);
* target-i386/kvm.c: env->cpuid_ext_features &= kvm_arch_get_supported_cpuid(s, 1, 0, R_ECX);
target-i386/kvm.c: env->cpuid_ext2_features &= kvm_arch_get_supported_cpuid(s, 0x80000001,
target-i386/kvm.c: env->cpuid_ext3_features &= kvm_arch_get_supported_cpuid(s, 0x80000001,
target-i386/kvm.c: env->cpuid_svm_features &= kvm_arch_get_supported_cpuid(s, 0x8000000A,
target-i386/kvm.c: kvm_arch_get_supported_cpuid(s, KVM_CPUID_FEATURES, 0, R_EAX);
target-i386/kvm.c: kvm_arch_get_supported_cpuid(s, 0xC0000001, 0, R_EDX);
Note that there is only one call for CPUID[1].ECX above (*), and it is
the one that gets hacked to include CPUID_EXT_TSC_DEADLINE_TIMER, so we
can simply make kvm_arch_get_supported_cpuid() set it, to let the rest
of the code know the flag can be safely set by QEMU.
One thing I was worrying about when doing this is that now
kvm_arch_get_supported_cpuid() depends on kvm_irqchip_in_kernel(). But
the 'kvm_kernel_irqchip' global variable is initialized during
kvm_init(), that is called very early, and kvm_init() is already a
requirement to run the GET_SUPPORTED_CPUID ioctl() (as kvm_init() is the
function that initializes the 'kvm_state' global variable).
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
2012-10-05 00:49:01 +04:00
|
|
|
/* tsc-deadline flag is not returned by GET_SUPPORTED_CPUID, but it
|
|
|
|
* can be enabled if the kernel has KVM_CAP_TSC_DEADLINE_TIMER,
|
|
|
|
* and the irqchip is in the kernel.
|
|
|
|
*/
|
|
|
|
if (kvm_irqchip_in_kernel() &&
|
|
|
|
kvm_check_extension(s, KVM_CAP_TSC_DEADLINE_TIMER)) {
|
|
|
|
ret |= CPUID_EXT_TSC_DEADLINE_TIMER;
|
|
|
|
}
|
2012-10-05 00:49:02 +04:00
|
|
|
|
|
|
|
/* x2apic is reported by GET_SUPPORTED_CPUID, but it can't be enabled
|
|
|
|
* without the in-kernel irqchip
|
|
|
|
*/
|
|
|
|
if (!kvm_irqchip_in_kernel()) {
|
|
|
|
ret &= ~CPUID_EXT_X2APIC;
|
2009-05-03 18:04:01 +04:00
|
|
|
}
|
2015-06-07 12:15:08 +03:00
|
|
|
} else if (function == 6 && reg == R_EAX) {
|
|
|
|
ret |= CPUID_6_EAX_ARAT; /* safe to allow because of emulated APIC */
|
2012-10-05 00:48:59 +04:00
|
|
|
} else if (function == 0x80000001 && reg == R_EDX) {
|
|
|
|
/* On Intel, kvm returns cpuid according to the Intel spec,
|
|
|
|
* so add missing bits according to the AMD spec:
|
|
|
|
*/
|
|
|
|
cpuid_1_edx = kvm_arch_get_supported_cpuid(s, 1, 0, R_EDX);
|
|
|
|
ret |= cpuid_1_edx & CPUID_EXT2_AMD_ALIASES;
|
2016-08-12 21:14:32 +03:00
|
|
|
} else if (function == KVM_CPUID_FEATURES && reg == R_EAX) {
|
|
|
|
/* kvm_pv_unhalt is reported by GET_SUPPORTED_CPUID, but it can't
|
|
|
|
* be enabled without the in-kernel irqchip
|
|
|
|
*/
|
|
|
|
if (!kvm_irqchip_in_kernel()) {
|
|
|
|
ret &= ~(1U << KVM_FEATURE_PV_UNHALT);
|
|
|
|
}
|
2009-05-03 18:04:01 +04:00
|
|
|
}
|
|
|
|
|
2011-03-18 01:42:05 +03:00
|
|
|
/* fallback for older kernels */
|
2012-10-05 00:48:54 +04:00
|
|
|
if ((function == KVM_CPUID_FEATURES) && !found) {
|
2011-06-08 18:11:05 +04:00
|
|
|
ret = get_para_features(s);
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2011-03-18 01:42:05 +03:00
|
|
|
|
|
|
|
return ret;
|
2010-01-13 16:25:06 +03:00
|
|
|
}
|
|
|
|
|
KVM, MCE, unpoison memory address across reboot
In Linux kernel HWPoison processing implementation, the virtual
address in processes mapping the error physical memory page is marked
as HWPoison. So that, the further accessing to the virtual
address will kill corresponding processes with SIGBUS.
If the error physical memory page is used by a KVM guest, the SIGBUS
will be sent to QEMU, and QEMU will simulate a MCE to report that
memory error to the guest OS. If the guest OS can not recover from
the error (for example, the page is accessed by kernel code), guest OS
will reboot the system. But because the underlying host virtual
address backing the guest physical memory is still poisoned, if the
guest system accesses the corresponding guest physical memory even
after rebooting, the SIGBUS will still be sent to QEMU and MCE will be
simulated. That is, guest system can not recover via rebooting.
In fact, across rebooting, the contents of guest physical memory page
need not to be kept. We can allocate a new host physical page to
back the corresponding guest physical address.
This patch fixes this issue in QEMU-KVM via calling qemu_ram_remap()
to clear the corresponding page table entry, so that make it possible
to allocate a new page to recover the issue.
[ Jan: rebasing and tiny cleanups]
Signed-off-by: Huang Ying <ying.huang@intel.com>
Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
2011-03-02 10:56:20 +03:00
|
|
|
typedef struct HWPoisonPage {
|
|
|
|
ram_addr_t ram_addr;
|
|
|
|
QLIST_ENTRY(HWPoisonPage) list;
|
|
|
|
} HWPoisonPage;
|
|
|
|
|
|
|
|
static QLIST_HEAD(, HWPoisonPage) hwpoison_page_list =
|
|
|
|
QLIST_HEAD_INITIALIZER(hwpoison_page_list);
|
|
|
|
|
|
|
|
static void kvm_unpoison_all(void *param)
|
|
|
|
{
|
|
|
|
HWPoisonPage *page, *next_page;
|
|
|
|
|
|
|
|
QLIST_FOREACH_SAFE(page, &hwpoison_page_list, list, next_page) {
|
|
|
|
QLIST_REMOVE(page, list);
|
|
|
|
qemu_ram_remap(page->ram_addr, TARGET_PAGE_SIZE);
|
2011-08-21 07:09:37 +04:00
|
|
|
g_free(page);
|
KVM, MCE, unpoison memory address across reboot
In Linux kernel HWPoison processing implementation, the virtual
address in processes mapping the error physical memory page is marked
as HWPoison. So that, the further accessing to the virtual
address will kill corresponding processes with SIGBUS.
If the error physical memory page is used by a KVM guest, the SIGBUS
will be sent to QEMU, and QEMU will simulate a MCE to report that
memory error to the guest OS. If the guest OS can not recover from
the error (for example, the page is accessed by kernel code), guest OS
will reboot the system. But because the underlying host virtual
address backing the guest physical memory is still poisoned, if the
guest system accesses the corresponding guest physical memory even
after rebooting, the SIGBUS will still be sent to QEMU and MCE will be
simulated. That is, guest system can not recover via rebooting.
In fact, across rebooting, the contents of guest physical memory page
need not to be kept. We can allocate a new host physical page to
back the corresponding guest physical address.
This patch fixes this issue in QEMU-KVM via calling qemu_ram_remap()
to clear the corresponding page table entry, so that make it possible
to allocate a new page to recover the issue.
[ Jan: rebasing and tiny cleanups]
Signed-off-by: Huang Ying <ying.huang@intel.com>
Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
2011-03-02 10:56:20 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void kvm_hwpoison_page_add(ram_addr_t ram_addr)
|
|
|
|
{
|
|
|
|
HWPoisonPage *page;
|
|
|
|
|
|
|
|
QLIST_FOREACH(page, &hwpoison_page_list, list) {
|
|
|
|
if (page->ram_addr == ram_addr) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
2014-12-04 16:46:45 +03:00
|
|
|
page = g_new(HWPoisonPage, 1);
|
KVM, MCE, unpoison memory address across reboot
In Linux kernel HWPoison processing implementation, the virtual
address in processes mapping the error physical memory page is marked
as HWPoison. So that, the further accessing to the virtual
address will kill corresponding processes with SIGBUS.
If the error physical memory page is used by a KVM guest, the SIGBUS
will be sent to QEMU, and QEMU will simulate a MCE to report that
memory error to the guest OS. If the guest OS can not recover from
the error (for example, the page is accessed by kernel code), guest OS
will reboot the system. But because the underlying host virtual
address backing the guest physical memory is still poisoned, if the
guest system accesses the corresponding guest physical memory even
after rebooting, the SIGBUS will still be sent to QEMU and MCE will be
simulated. That is, guest system can not recover via rebooting.
In fact, across rebooting, the contents of guest physical memory page
need not to be kept. We can allocate a new host physical page to
back the corresponding guest physical address.
This patch fixes this issue in QEMU-KVM via calling qemu_ram_remap()
to clear the corresponding page table entry, so that make it possible
to allocate a new page to recover the issue.
[ Jan: rebasing and tiny cleanups]
Signed-off-by: Huang Ying <ying.huang@intel.com>
Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
2011-03-02 10:56:20 +03:00
|
|
|
page->ram_addr = ram_addr;
|
|
|
|
QLIST_INSERT_HEAD(&hwpoison_page_list, page, list);
|
|
|
|
}
|
|
|
|
|
2010-10-11 22:31:18 +04:00
|
|
|
static int kvm_get_mce_cap_supported(KVMState *s, uint64_t *mce_cap,
|
|
|
|
int *max_banks)
|
|
|
|
{
|
|
|
|
int r;
|
|
|
|
|
2010-12-10 10:52:36 +03:00
|
|
|
r = kvm_check_extension(s, KVM_CAP_MCE);
|
2010-10-11 22:31:18 +04:00
|
|
|
if (r > 0) {
|
|
|
|
*max_banks = r;
|
|
|
|
return kvm_ioctl(s, KVM_X86_GET_MCE_CAP_SUPPORTED, mce_cap);
|
|
|
|
}
|
|
|
|
return -ENOSYS;
|
|
|
|
}
|
|
|
|
|
2012-05-03 17:13:58 +04:00
|
|
|
static void kvm_mce_inject(X86CPU *cpu, hwaddr paddr, int code)
|
2010-10-11 22:31:18 +04:00
|
|
|
{
|
2016-06-22 09:56:21 +03:00
|
|
|
CPUState *cs = CPU(cpu);
|
2012-05-03 17:13:58 +04:00
|
|
|
CPUX86State *env = &cpu->env;
|
2011-03-02 10:56:16 +03:00
|
|
|
uint64_t status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN |
|
|
|
|
MCI_STATUS_MISCV | MCI_STATUS_ADDRV | MCI_STATUS_S;
|
|
|
|
uint64_t mcg_status = MCG_STATUS_MCIP;
|
2016-06-22 09:56:21 +03:00
|
|
|
int flags = 0;
|
2010-10-11 22:31:18 +04:00
|
|
|
|
2011-03-02 10:56:16 +03:00
|
|
|
if (code == BUS_MCEERR_AR) {
|
|
|
|
status |= MCI_STATUS_AR | 0x134;
|
|
|
|
mcg_status |= MCG_STATUS_EIPV;
|
|
|
|
} else {
|
|
|
|
status |= 0xc0;
|
|
|
|
mcg_status |= MCG_STATUS_RIPV;
|
2011-03-02 10:56:12 +03:00
|
|
|
}
|
2016-06-22 09:56:21 +03:00
|
|
|
|
|
|
|
flags = cpu_x86_support_mca_broadcast(env) ? MCE_INJECT_BROADCAST : 0;
|
|
|
|
/* We need to read back the value of MSR_EXT_MCG_CTL that was set by the
|
|
|
|
* guest kernel back into env->mcg_ext_ctl.
|
|
|
|
*/
|
|
|
|
cpu_synchronize_state(cs);
|
|
|
|
if (env->mcg_ext_ctl & MCG_EXT_CTL_LMCE_EN) {
|
|
|
|
mcg_status |= MCG_STATUS_LMCE;
|
|
|
|
flags = 0;
|
|
|
|
}
|
|
|
|
|
2012-05-03 17:22:54 +04:00
|
|
|
cpu_x86_inject_mce(NULL, cpu, 9, status, mcg_status, paddr,
|
2016-06-22 09:56:21 +03:00
|
|
|
(MCM_ADDR_PHYS << 6) | 0xc, flags);
|
2011-03-02 10:56:12 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static void hardware_memory_error(void)
|
|
|
|
{
|
|
|
|
fprintf(stderr, "Hardware memory error!\n");
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
|
2012-10-31 09:57:49 +04:00
|
|
|
int kvm_arch_on_sigbus_vcpu(CPUState *c, int code, void *addr)
|
2011-03-02 10:56:12 +03:00
|
|
|
{
|
2012-10-31 09:57:49 +04:00
|
|
|
X86CPU *cpu = X86_CPU(c);
|
|
|
|
CPUX86State *env = &cpu->env;
|
2011-03-02 10:56:12 +03:00
|
|
|
ram_addr_t ram_addr;
|
2012-10-23 14:30:10 +04:00
|
|
|
hwaddr paddr;
|
2011-03-02 10:56:12 +03:00
|
|
|
|
|
|
|
if ((env->mcg_cap & MCG_SER_P) && addr
|
2011-03-02 10:56:16 +03:00
|
|
|
&& (code == BUS_MCEERR_AR || code == BUS_MCEERR_AO)) {
|
2016-03-25 14:55:08 +03:00
|
|
|
ram_addr = qemu_ram_addr_from_host(addr);
|
|
|
|
if (ram_addr == RAM_ADDR_INVALID ||
|
2012-12-01 08:35:08 +04:00
|
|
|
!kvm_physical_memory_addr_from_host(c->kvm_state, addr, &paddr)) {
|
2011-03-02 10:56:12 +03:00
|
|
|
fprintf(stderr, "Hardware memory error for memory used by "
|
|
|
|
"QEMU itself instead of guest system!\n");
|
|
|
|
/* Hope we are lucky for AO MCE */
|
|
|
|
if (code == BUS_MCEERR_AO) {
|
|
|
|
return 0;
|
|
|
|
} else {
|
|
|
|
hardware_memory_error();
|
|
|
|
}
|
|
|
|
}
|
KVM, MCE, unpoison memory address across reboot
In Linux kernel HWPoison processing implementation, the virtual
address in processes mapping the error physical memory page is marked
as HWPoison. So that, the further accessing to the virtual
address will kill corresponding processes with SIGBUS.
If the error physical memory page is used by a KVM guest, the SIGBUS
will be sent to QEMU, and QEMU will simulate a MCE to report that
memory error to the guest OS. If the guest OS can not recover from
the error (for example, the page is accessed by kernel code), guest OS
will reboot the system. But because the underlying host virtual
address backing the guest physical memory is still poisoned, if the
guest system accesses the corresponding guest physical memory even
after rebooting, the SIGBUS will still be sent to QEMU and MCE will be
simulated. That is, guest system can not recover via rebooting.
In fact, across rebooting, the contents of guest physical memory page
need not to be kept. We can allocate a new host physical page to
back the corresponding guest physical address.
This patch fixes this issue in QEMU-KVM via calling qemu_ram_remap()
to clear the corresponding page table entry, so that make it possible
to allocate a new page to recover the issue.
[ Jan: rebasing and tiny cleanups]
Signed-off-by: Huang Ying <ying.huang@intel.com>
Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
2011-03-02 10:56:20 +03:00
|
|
|
kvm_hwpoison_page_add(ram_addr);
|
2012-05-03 17:13:58 +04:00
|
|
|
kvm_mce_inject(cpu, paddr, code);
|
2011-06-08 18:11:02 +04:00
|
|
|
} else {
|
2011-03-02 10:56:12 +03:00
|
|
|
if (code == BUS_MCEERR_AO) {
|
|
|
|
return 0;
|
|
|
|
} else if (code == BUS_MCEERR_AR) {
|
|
|
|
hardware_memory_error();
|
|
|
|
} else {
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int kvm_arch_on_sigbus(int code, void *addr)
|
|
|
|
{
|
2013-05-30 00:29:20 +04:00
|
|
|
X86CPU *cpu = X86_CPU(first_cpu);
|
|
|
|
|
|
|
|
if ((cpu->env.mcg_cap & MCG_SER_P) && addr && code == BUS_MCEERR_AO) {
|
2011-03-02 10:56:12 +03:00
|
|
|
ram_addr_t ram_addr;
|
2012-10-23 14:30:10 +04:00
|
|
|
hwaddr paddr;
|
2011-03-02 10:56:12 +03:00
|
|
|
|
|
|
|
/* Hope we are lucky for AO MCE */
|
2016-03-25 14:55:08 +03:00
|
|
|
ram_addr = qemu_ram_addr_from_host(addr);
|
|
|
|
if (ram_addr == RAM_ADDR_INVALID ||
|
2013-05-30 00:29:20 +04:00
|
|
|
!kvm_physical_memory_addr_from_host(first_cpu->kvm_state,
|
2012-12-01 08:35:08 +04:00
|
|
|
addr, &paddr)) {
|
2011-03-02 10:56:12 +03:00
|
|
|
fprintf(stderr, "Hardware memory error for memory used by "
|
|
|
|
"QEMU itself instead of guest system!: %p\n", addr);
|
|
|
|
return 0;
|
|
|
|
}
|
KVM, MCE, unpoison memory address across reboot
In Linux kernel HWPoison processing implementation, the virtual
address in processes mapping the error physical memory page is marked
as HWPoison. So that, the further accessing to the virtual
address will kill corresponding processes with SIGBUS.
If the error physical memory page is used by a KVM guest, the SIGBUS
will be sent to QEMU, and QEMU will simulate a MCE to report that
memory error to the guest OS. If the guest OS can not recover from
the error (for example, the page is accessed by kernel code), guest OS
will reboot the system. But because the underlying host virtual
address backing the guest physical memory is still poisoned, if the
guest system accesses the corresponding guest physical memory even
after rebooting, the SIGBUS will still be sent to QEMU and MCE will be
simulated. That is, guest system can not recover via rebooting.
In fact, across rebooting, the contents of guest physical memory page
need not to be kept. We can allocate a new host physical page to
back the corresponding guest physical address.
This patch fixes this issue in QEMU-KVM via calling qemu_ram_remap()
to clear the corresponding page table entry, so that make it possible
to allocate a new page to recover the issue.
[ Jan: rebasing and tiny cleanups]
Signed-off-by: Huang Ying <ying.huang@intel.com>
Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
2011-03-02 10:56:20 +03:00
|
|
|
kvm_hwpoison_page_add(ram_addr);
|
2013-05-30 00:29:20 +04:00
|
|
|
kvm_mce_inject(X86_CPU(first_cpu), paddr, code);
|
2011-06-08 18:11:02 +04:00
|
|
|
} else {
|
2011-03-02 10:56:12 +03:00
|
|
|
if (code == BUS_MCEERR_AO) {
|
|
|
|
return 0;
|
|
|
|
} else if (code == BUS_MCEERR_AR) {
|
|
|
|
hardware_memory_error();
|
|
|
|
} else {
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
2010-10-11 22:31:18 +04:00
|
|
|
|
2012-10-31 09:06:49 +04:00
|
|
|
static int kvm_inject_mce_oldstyle(X86CPU *cpu)
|
2011-03-02 10:56:14 +03:00
|
|
|
{
|
2012-10-31 09:06:49 +04:00
|
|
|
CPUX86State *env = &cpu->env;
|
|
|
|
|
2011-03-02 10:56:14 +03:00
|
|
|
if (!kvm_has_vcpu_events() && env->exception_injected == EXCP12_MCHK) {
|
|
|
|
unsigned int bank, bank_num = env->mcg_cap & 0xff;
|
|
|
|
struct kvm_x86_mce mce;
|
|
|
|
|
|
|
|
env->exception_injected = -1;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* There must be at least one bank in use if an MCE is pending.
|
|
|
|
* Find it and use its values for the event injection.
|
|
|
|
*/
|
|
|
|
for (bank = 0; bank < bank_num; bank++) {
|
|
|
|
if (env->mce_banks[bank * 4 + 1] & MCI_STATUS_VAL) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
assert(bank < bank_num);
|
|
|
|
|
|
|
|
mce.bank = bank;
|
|
|
|
mce.status = env->mce_banks[bank * 4 + 1];
|
|
|
|
mce.mcg_status = env->mcg_status;
|
|
|
|
mce.addr = env->mce_banks[bank * 4 + 2];
|
|
|
|
mce.misc = env->mce_banks[bank * 4 + 3];
|
|
|
|
|
2012-10-31 09:06:49 +04:00
|
|
|
return kvm_vcpu_ioctl(CPU(cpu), KVM_X86_SET_MCE, &mce);
|
2011-03-02 10:56:14 +03:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-07-29 21:26:33 +04:00
|
|
|
static void cpu_update_state(void *opaque, int running, RunState state)
|
2011-02-03 22:19:53 +03:00
|
|
|
{
|
2012-03-14 04:38:21 +04:00
|
|
|
CPUX86State *env = opaque;
|
2011-02-03 22:19:53 +03:00
|
|
|
|
|
|
|
if (running) {
|
|
|
|
env->tsc_valid = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-01-23 00:25:02 +04:00
|
|
|
unsigned long kvm_arch_vcpu_id(CPUState *cs)
|
2013-01-23 00:25:01 +04:00
|
|
|
{
|
2013-01-23 00:25:02 +04:00
|
|
|
X86CPU *cpu = X86_CPU(cs);
|
2014-12-19 04:20:10 +03:00
|
|
|
return cpu->apic_id;
|
2013-01-23 00:25:01 +04:00
|
|
|
}
|
|
|
|
|
2013-06-05 17:18:40 +04:00
|
|
|
#ifndef KVM_CPUID_SIGNATURE_NEXT
|
|
|
|
#define KVM_CPUID_SIGNATURE_NEXT 0x40000100
|
|
|
|
#endif
|
|
|
|
|
|
|
|
static bool hyperv_hypercall_available(X86CPU *cpu)
|
|
|
|
{
|
|
|
|
return cpu->hyperv_vapic ||
|
|
|
|
(cpu->hyperv_spinlock_attempts != HYPERV_SPINLOCK_NEVER_RETRY);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool hyperv_enabled(X86CPU *cpu)
|
|
|
|
{
|
2014-01-23 22:16:12 +04:00
|
|
|
CPUState *cs = CPU(cpu);
|
|
|
|
return kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV) > 0 &&
|
|
|
|
(hyperv_hypercall_available(cpu) ||
|
2014-01-23 17:40:49 +04:00
|
|
|
cpu->hyperv_time ||
|
2015-09-09 15:41:30 +03:00
|
|
|
cpu->hyperv_relaxed_timing ||
|
2015-09-16 12:59:42 +03:00
|
|
|
cpu->hyperv_crash ||
|
2015-09-16 12:59:43 +03:00
|
|
|
cpu->hyperv_reset ||
|
2015-09-16 12:59:44 +03:00
|
|
|
cpu->hyperv_vpindex ||
|
2015-11-11 13:18:38 +03:00
|
|
|
cpu->hyperv_runtime ||
|
2015-11-25 18:21:25 +03:00
|
|
|
cpu->hyperv_synic ||
|
|
|
|
cpu->hyperv_stimer);
|
2013-06-05 17:18:40 +04:00
|
|
|
}
|
|
|
|
|
2015-11-24 06:33:56 +03:00
|
|
|
static int kvm_arch_set_tsc_khz(CPUState *cs)
|
|
|
|
{
|
|
|
|
X86CPU *cpu = X86_CPU(cs);
|
|
|
|
CPUX86State *env = &cpu->env;
|
|
|
|
int r;
|
|
|
|
|
|
|
|
if (!env->tsc_khz) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
r = kvm_check_extension(cs->kvm_state, KVM_CAP_TSC_CONTROL) ?
|
|
|
|
kvm_vcpu_ioctl(cs, KVM_SET_TSC_KHZ, env->tsc_khz) :
|
|
|
|
-ENOTSUP;
|
|
|
|
if (r < 0) {
|
|
|
|
/* When KVM_SET_TSC_KHZ fails, it's an error only if the current
|
|
|
|
* TSC frequency doesn't match the one we want.
|
|
|
|
*/
|
|
|
|
int cur_freq = kvm_check_extension(cs->kvm_state, KVM_CAP_GET_TSC_KHZ) ?
|
|
|
|
kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ) :
|
|
|
|
-ENOTSUP;
|
|
|
|
if (cur_freq <= 0 || cur_freq != env->tsc_khz) {
|
|
|
|
error_report("warning: TSC frequency mismatch between "
|
2016-06-30 21:12:17 +03:00
|
|
|
"VM (%" PRId64 " kHz) and host (%d kHz), "
|
|
|
|
"and TSC scaling unavailable",
|
|
|
|
env->tsc_khz, cur_freq);
|
2015-11-24 06:33:56 +03:00
|
|
|
return r;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-06-24 13:49:36 +03:00
|
|
|
static int hyperv_handle_properties(CPUState *cs)
|
|
|
|
{
|
|
|
|
X86CPU *cpu = X86_CPU(cs);
|
|
|
|
CPUX86State *env = &cpu->env;
|
|
|
|
|
2016-07-15 20:05:36 +03:00
|
|
|
if (cpu->hyperv_time &&
|
|
|
|
kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV_TIME) <= 0) {
|
|
|
|
cpu->hyperv_time = false;
|
|
|
|
}
|
|
|
|
|
2016-06-24 13:49:36 +03:00
|
|
|
if (cpu->hyperv_relaxed_timing) {
|
|
|
|
env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_HYPERCALL_AVAILABLE;
|
|
|
|
}
|
|
|
|
if (cpu->hyperv_vapic) {
|
|
|
|
env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_HYPERCALL_AVAILABLE;
|
|
|
|
env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_APIC_ACCESS_AVAILABLE;
|
|
|
|
}
|
2016-07-15 20:05:36 +03:00
|
|
|
if (cpu->hyperv_time) {
|
2016-06-24 13:49:36 +03:00
|
|
|
env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_HYPERCALL_AVAILABLE;
|
|
|
|
env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_TIME_REF_COUNT_AVAILABLE;
|
|
|
|
env->features[FEAT_HYPERV_EAX] |= 0x200;
|
|
|
|
}
|
|
|
|
if (cpu->hyperv_crash && has_msr_hv_crash) {
|
|
|
|
env->features[FEAT_HYPERV_EDX] |= HV_X64_GUEST_CRASH_MSR_AVAILABLE;
|
|
|
|
}
|
|
|
|
env->features[FEAT_HYPERV_EDX] |= HV_X64_CPU_DYNAMIC_PARTITIONING_AVAILABLE;
|
|
|
|
if (cpu->hyperv_reset && has_msr_hv_reset) {
|
|
|
|
env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_RESET_AVAILABLE;
|
|
|
|
}
|
|
|
|
if (cpu->hyperv_vpindex && has_msr_hv_vpindex) {
|
|
|
|
env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_VP_INDEX_AVAILABLE;
|
|
|
|
}
|
|
|
|
if (cpu->hyperv_runtime && has_msr_hv_runtime) {
|
|
|
|
env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_VP_RUNTIME_AVAILABLE;
|
|
|
|
}
|
|
|
|
if (cpu->hyperv_synic) {
|
|
|
|
int sint;
|
|
|
|
|
|
|
|
if (!has_msr_hv_synic ||
|
|
|
|
kvm_vcpu_enable_cap(cs, KVM_CAP_HYPERV_SYNIC, 0)) {
|
|
|
|
fprintf(stderr, "Hyper-V SynIC is not supported by kernel\n");
|
|
|
|
return -ENOSYS;
|
|
|
|
}
|
|
|
|
|
|
|
|
env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_SYNIC_AVAILABLE;
|
|
|
|
env->msr_hv_synic_version = HV_SYNIC_VERSION_1;
|
|
|
|
for (sint = 0; sint < ARRAY_SIZE(env->msr_hv_synic_sint); sint++) {
|
|
|
|
env->msr_hv_synic_sint[sint] = HV_SYNIC_SINT_MASKED;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (cpu->hyperv_stimer) {
|
|
|
|
if (!has_msr_hv_stimer) {
|
|
|
|
fprintf(stderr, "Hyper-V timers aren't supported by kernel\n");
|
|
|
|
return -ENOSYS;
|
|
|
|
}
|
|
|
|
env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_SYNTIMER_AVAILABLE;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-05-14 23:30:09 +04:00
|
|
|
static Error *invtsc_mig_blocker;
|
|
|
|
|
2013-01-28 15:49:26 +04:00
|
|
|
#define KVM_MAX_CPUID_ENTRIES 100
|
2013-01-30 02:57:41 +04:00
|
|
|
|
2012-10-31 09:57:49 +04:00
|
|
|
int kvm_arch_init_vcpu(CPUState *cs)
|
2008-11-05 19:29:27 +03:00
|
|
|
{
|
|
|
|
struct {
|
2009-02-09 18:50:31 +03:00
|
|
|
struct kvm_cpuid2 cpuid;
|
2013-01-28 15:49:26 +04:00
|
|
|
struct kvm_cpuid_entry2 entries[KVM_MAX_CPUID_ENTRIES];
|
2011-08-31 14:38:01 +04:00
|
|
|
} QEMU_PACKED cpuid_data;
|
2012-10-31 09:57:49 +04:00
|
|
|
X86CPU *cpu = X86_CPU(cs);
|
|
|
|
CPUX86State *env = &cpu->env;
|
2009-02-09 18:50:31 +03:00
|
|
|
uint32_t limit, i, j, cpuid_i;
|
2009-04-18 00:50:54 +04:00
|
|
|
uint32_t unused;
|
2010-01-13 16:25:06 +03:00
|
|
|
struct kvm_cpuid_entry2 *c;
|
|
|
|
uint32_t signature[3];
|
2014-01-23 22:27:24 +04:00
|
|
|
int kvm_base = KVM_CPUID_SIGNATURE;
|
2011-07-07 18:13:13 +04:00
|
|
|
int r;
|
2017-01-16 14:31:53 +03:00
|
|
|
Error *local_err = NULL;
|
2008-11-05 19:29:27 +03:00
|
|
|
|
2013-11-07 01:35:27 +04:00
|
|
|
memset(&cpuid_data, 0, sizeof(cpuid_data));
|
|
|
|
|
2008-11-05 19:29:27 +03:00
|
|
|
cpuid_i = 0;
|
|
|
|
|
2010-01-13 16:25:06 +03:00
|
|
|
/* Paravirtualization CPUIDs */
|
2014-01-23 22:27:24 +04:00
|
|
|
if (hyperv_enabled(cpu)) {
|
|
|
|
c = &cpuid_data.entries[cpuid_i++];
|
|
|
|
c->function = HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS;
|
2015-10-16 18:38:22 +03:00
|
|
|
if (!cpu->hyperv_vendor_id) {
|
|
|
|
memcpy(signature, "Microsoft Hv", 12);
|
|
|
|
} else {
|
|
|
|
size_t len = strlen(cpu->hyperv_vendor_id);
|
|
|
|
|
|
|
|
if (len > 12) {
|
|
|
|
error_report("hv-vendor-id truncated to 12 characters");
|
|
|
|
len = 12;
|
|
|
|
}
|
|
|
|
memset(signature, 0, 12);
|
|
|
|
memcpy(signature, cpu->hyperv_vendor_id, len);
|
|
|
|
}
|
2011-12-19 00:48:14 +04:00
|
|
|
c->eax = HYPERV_CPUID_MIN;
|
2014-01-23 22:27:24 +04:00
|
|
|
c->ebx = signature[0];
|
|
|
|
c->ecx = signature[1];
|
|
|
|
c->edx = signature[2];
|
2011-03-18 01:42:05 +03:00
|
|
|
|
2014-01-23 22:27:24 +04:00
|
|
|
c = &cpuid_data.entries[cpuid_i++];
|
|
|
|
c->function = HYPERV_CPUID_INTERFACE;
|
2011-12-19 00:48:14 +04:00
|
|
|
memcpy(signature, "Hv#1\0\0\0\0\0\0\0\0", 12);
|
|
|
|
c->eax = signature[0];
|
2014-01-23 22:27:24 +04:00
|
|
|
c->ebx = 0;
|
|
|
|
c->ecx = 0;
|
|
|
|
c->edx = 0;
|
2011-12-19 00:48:14 +04:00
|
|
|
|
|
|
|
c = &cpuid_data.entries[cpuid_i++];
|
|
|
|
c->function = HYPERV_CPUID_VERSION;
|
|
|
|
c->eax = 0x00001bbc;
|
|
|
|
c->ebx = 0x00060001;
|
|
|
|
|
|
|
|
c = &cpuid_data.entries[cpuid_i++];
|
|
|
|
c->function = HYPERV_CPUID_FEATURES;
|
2016-06-24 13:49:36 +03:00
|
|
|
r = hyperv_handle_properties(cs);
|
|
|
|
if (r) {
|
|
|
|
return r;
|
2015-09-16 12:59:44 +03:00
|
|
|
}
|
2016-06-24 13:49:36 +03:00
|
|
|
c->eax = env->features[FEAT_HYPERV_EAX];
|
|
|
|
c->ebx = env->features[FEAT_HYPERV_EBX];
|
|
|
|
c->edx = env->features[FEAT_HYPERV_EDX];
|
2015-11-11 13:18:38 +03:00
|
|
|
|
2011-12-19 00:48:14 +04:00
|
|
|
c = &cpuid_data.entries[cpuid_i++];
|
|
|
|
c->function = HYPERV_CPUID_ENLIGHTMENT_INFO;
|
2013-06-05 17:18:40 +04:00
|
|
|
if (cpu->hyperv_relaxed_timing) {
|
2011-12-19 00:48:14 +04:00
|
|
|
c->eax |= HV_X64_RELAXED_TIMING_RECOMMENDED;
|
|
|
|
}
|
2016-07-14 21:55:37 +03:00
|
|
|
if (cpu->hyperv_vapic) {
|
2011-12-19 00:48:14 +04:00
|
|
|
c->eax |= HV_X64_APIC_ACCESS_RECOMMENDED;
|
|
|
|
}
|
2013-06-05 17:18:40 +04:00
|
|
|
c->ebx = cpu->hyperv_spinlock_attempts;
|
2011-12-19 00:48:14 +04:00
|
|
|
|
|
|
|
c = &cpuid_data.entries[cpuid_i++];
|
|
|
|
c->function = HYPERV_CPUID_IMPLEMENT_LIMITS;
|
|
|
|
c->eax = 0x40;
|
|
|
|
c->ebx = 0x40;
|
|
|
|
|
2014-01-23 22:27:24 +04:00
|
|
|
kvm_base = KVM_CPUID_SIGNATURE_NEXT;
|
2014-01-23 22:16:12 +04:00
|
|
|
has_msr_hv_hypercall = true;
|
2011-12-19 00:48:14 +04:00
|
|
|
}
|
|
|
|
|
2014-06-02 21:28:50 +04:00
|
|
|
if (cpu->expose_kvm) {
|
|
|
|
memcpy(signature, "KVMKVMKVM\0\0\0", 12);
|
|
|
|
c = &cpuid_data.entries[cpuid_i++];
|
|
|
|
c->function = KVM_CPUID_SIGNATURE | kvm_base;
|
2014-06-04 05:10:06 +04:00
|
|
|
c->eax = KVM_CPUID_FEATURES | kvm_base;
|
2014-06-02 21:28:50 +04:00
|
|
|
c->ebx = signature[0];
|
|
|
|
c->ecx = signature[1];
|
|
|
|
c->edx = signature[2];
|
2014-01-23 22:27:24 +04:00
|
|
|
|
2014-06-02 21:28:50 +04:00
|
|
|
c = &cpuid_data.entries[cpuid_i++];
|
|
|
|
c->function = KVM_CPUID_FEATURES | kvm_base;
|
|
|
|
c->eax = env->features[FEAT_KVM];
|
|
|
|
}
|
2013-02-20 06:27:20 +04:00
|
|
|
|
2009-04-18 00:50:54 +04:00
|
|
|
cpu_x86_cpuid(env, 0, 0, &limit, &unused, &unused, &unused);
|
2008-11-05 19:29:27 +03:00
|
|
|
|
|
|
|
for (i = 0; i <= limit; i++) {
|
2013-01-28 15:49:26 +04:00
|
|
|
if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
|
|
|
|
fprintf(stderr, "unsupported level value: 0x%x\n", limit);
|
|
|
|
abort();
|
|
|
|
}
|
2010-01-13 16:25:06 +03:00
|
|
|
c = &cpuid_data.entries[cpuid_i++];
|
2009-02-09 18:50:31 +03:00
|
|
|
|
|
|
|
switch (i) {
|
2009-02-09 18:50:36 +03:00
|
|
|
case 2: {
|
|
|
|
/* Keep reading function 2 till all the input is received */
|
|
|
|
int times;
|
|
|
|
|
|
|
|
c->function = i;
|
2009-04-18 00:50:54 +04:00
|
|
|
c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC |
|
|
|
|
KVM_CPUID_FLAG_STATE_READ_NEXT;
|
|
|
|
cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
|
|
|
|
times = c->eax & 0xff;
|
2009-02-09 18:50:36 +03:00
|
|
|
|
|
|
|
for (j = 1; j < times; ++j) {
|
2013-01-28 15:49:26 +04:00
|
|
|
if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
|
|
|
|
fprintf(stderr, "cpuid_data is full, no space for "
|
|
|
|
"cpuid(eax:2):eax & 0xf = 0x%x\n", times);
|
|
|
|
abort();
|
|
|
|
}
|
2009-04-18 00:50:54 +04:00
|
|
|
c = &cpuid_data.entries[cpuid_i++];
|
2009-02-09 18:50:36 +03:00
|
|
|
c->function = i;
|
2009-04-18 00:50:54 +04:00
|
|
|
c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC;
|
|
|
|
cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
|
2009-02-09 18:50:36 +03:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2009-02-09 18:50:31 +03:00
|
|
|
case 4:
|
|
|
|
case 0xb:
|
|
|
|
case 0xd:
|
|
|
|
for (j = 0; ; j++) {
|
2011-06-10 17:56:28 +04:00
|
|
|
if (i == 0xd && j == 64) {
|
|
|
|
break;
|
|
|
|
}
|
2009-02-09 18:50:31 +03:00
|
|
|
c->function = i;
|
|
|
|
c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
|
|
|
|
c->index = j;
|
2009-04-18 00:50:54 +04:00
|
|
|
cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
|
2009-02-09 18:50:31 +03:00
|
|
|
|
2010-12-27 18:19:29 +03:00
|
|
|
if (i == 4 && c->eax == 0) {
|
2009-02-09 18:50:31 +03:00
|
|
|
break;
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
|
|
|
if (i == 0xb && !(c->ecx & 0xff00)) {
|
2009-02-09 18:50:31 +03:00
|
|
|
break;
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
|
|
|
if (i == 0xd && c->eax == 0) {
|
2011-06-10 17:56:28 +04:00
|
|
|
continue;
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2013-01-28 15:49:26 +04:00
|
|
|
if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
|
|
|
|
fprintf(stderr, "cpuid_data is full, no space for "
|
|
|
|
"cpuid(eax:0x%x,ecx:0x%x)\n", i, j);
|
|
|
|
abort();
|
|
|
|
}
|
2009-04-18 00:50:54 +04:00
|
|
|
c = &cpuid_data.entries[cpuid_i++];
|
2009-02-09 18:50:31 +03:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
c->function = i;
|
2009-04-18 00:50:54 +04:00
|
|
|
c->flags = 0;
|
|
|
|
cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
|
2009-02-09 18:50:31 +03:00
|
|
|
break;
|
|
|
|
}
|
2008-11-05 19:29:27 +03:00
|
|
|
}
|
2013-07-25 19:05:22 +04:00
|
|
|
|
|
|
|
if (limit >= 0x0a) {
|
|
|
|
uint32_t ver;
|
|
|
|
|
|
|
|
cpu_x86_cpuid(env, 0x0a, 0, &ver, &unused, &unused, &unused);
|
|
|
|
if ((ver & 0xff) > 0) {
|
|
|
|
has_msr_architectural_pmu = true;
|
|
|
|
num_architectural_pmu_counters = (ver & 0xff00) >> 8;
|
|
|
|
|
|
|
|
/* Shouldn't be more than 32, since that's the number of bits
|
|
|
|
* available in EBX to tell us _which_ counters are available.
|
|
|
|
* Play it safe.
|
|
|
|
*/
|
|
|
|
if (num_architectural_pmu_counters > MAX_GP_COUNTERS) {
|
|
|
|
num_architectural_pmu_counters = MAX_GP_COUNTERS;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-04-18 00:50:54 +04:00
|
|
|
cpu_x86_cpuid(env, 0x80000000, 0, &limit, &unused, &unused, &unused);
|
2008-11-05 19:29:27 +03:00
|
|
|
|
|
|
|
for (i = 0x80000000; i <= limit; i++) {
|
2013-01-28 15:49:26 +04:00
|
|
|
if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
|
|
|
|
fprintf(stderr, "unsupported xlevel value: 0x%x\n", limit);
|
|
|
|
abort();
|
|
|
|
}
|
2010-01-13 16:25:06 +03:00
|
|
|
c = &cpuid_data.entries[cpuid_i++];
|
2008-11-05 19:29:27 +03:00
|
|
|
|
|
|
|
c->function = i;
|
2009-04-18 00:50:54 +04:00
|
|
|
c->flags = 0;
|
|
|
|
cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
|
2008-11-05 19:29:27 +03:00
|
|
|
}
|
|
|
|
|
2011-06-01 05:59:52 +04:00
|
|
|
/* Call Centaur's CPUID instructions they are supported. */
|
|
|
|
if (env->cpuid_xlevel2 > 0) {
|
|
|
|
cpu_x86_cpuid(env, 0xC0000000, 0, &limit, &unused, &unused, &unused);
|
|
|
|
|
|
|
|
for (i = 0xC0000000; i <= limit; i++) {
|
2013-01-28 15:49:26 +04:00
|
|
|
if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
|
|
|
|
fprintf(stderr, "unsupported xlevel2 value: 0x%x\n", limit);
|
|
|
|
abort();
|
|
|
|
}
|
2011-06-01 05:59:52 +04:00
|
|
|
c = &cpuid_data.entries[cpuid_i++];
|
|
|
|
|
|
|
|
c->function = i;
|
|
|
|
c->flags = 0;
|
|
|
|
cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-11-05 19:29:27 +03:00
|
|
|
cpuid_data.cpuid.nent = cpuid_i;
|
|
|
|
|
2010-10-11 22:31:18 +04:00
|
|
|
if (((env->cpuid_version >> 8)&0xF) >= 6
|
2013-04-22 23:00:15 +04:00
|
|
|
&& (env->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
|
2013-04-22 23:00:13 +04:00
|
|
|
(CPUID_MCE | CPUID_MCA)
|
2012-12-01 08:35:08 +04:00
|
|
|
&& kvm_check_extension(cs->kvm_state, KVM_CAP_MCE) > 0) {
|
2015-11-25 20:19:16 +03:00
|
|
|
uint64_t mcg_cap, unsupported_caps;
|
2010-10-11 22:31:18 +04:00
|
|
|
int banks;
|
2011-03-02 10:56:17 +03:00
|
|
|
int ret;
|
2010-10-11 22:31:18 +04:00
|
|
|
|
2012-12-01 08:35:08 +04:00
|
|
|
ret = kvm_get_mce_cap_supported(cs->kvm_state, &mcg_cap, &banks);
|
2011-03-02 10:56:18 +03:00
|
|
|
if (ret < 0) {
|
|
|
|
fprintf(stderr, "kvm_get_mce_cap_supported: %s", strerror(-ret));
|
|
|
|
return ret;
|
2010-10-11 22:31:18 +04:00
|
|
|
}
|
2011-03-02 10:56:18 +03:00
|
|
|
|
2015-11-25 20:19:15 +03:00
|
|
|
if (banks < (env->mcg_cap & MCG_CAP_BANKS_MASK)) {
|
2015-11-25 20:19:14 +03:00
|
|
|
error_report("kvm: Unsupported MCE bank count (QEMU = %d, KVM = %d)",
|
2015-11-25 20:19:15 +03:00
|
|
|
(int)(env->mcg_cap & MCG_CAP_BANKS_MASK), banks);
|
2015-11-25 20:19:14 +03:00
|
|
|
return -ENOTSUP;
|
2011-03-02 10:56:18 +03:00
|
|
|
}
|
2015-11-25 20:19:14 +03:00
|
|
|
|
2015-11-25 20:19:16 +03:00
|
|
|
unsupported_caps = env->mcg_cap & ~(mcg_cap | MCG_CAP_BANKS_MASK);
|
|
|
|
if (unsupported_caps) {
|
2016-06-22 09:56:21 +03:00
|
|
|
if (unsupported_caps & MCG_LMCE_P) {
|
|
|
|
error_report("kvm: LMCE not supported");
|
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
2015-11-25 20:19:16 +03:00
|
|
|
error_report("warning: Unsupported MCG_CAP bits: 0x%" PRIx64,
|
|
|
|
unsupported_caps);
|
|
|
|
}
|
|
|
|
|
2015-11-25 20:19:15 +03:00
|
|
|
env->mcg_cap &= mcg_cap | MCG_CAP_BANKS_MASK;
|
|
|
|
ret = kvm_vcpu_ioctl(cs, KVM_X86_SETUP_MCE, &env->mcg_cap);
|
2011-03-02 10:56:18 +03:00
|
|
|
if (ret < 0) {
|
|
|
|
fprintf(stderr, "KVM_X86_SETUP_MCE: %s", strerror(-ret));
|
|
|
|
return ret;
|
|
|
|
}
|
2010-10-11 22:31:18 +04:00
|
|
|
}
|
|
|
|
|
2011-02-03 22:19:53 +03:00
|
|
|
qemu_add_vm_change_state_handler(cpu_update_state, env);
|
|
|
|
|
2013-08-19 05:33:30 +04:00
|
|
|
c = cpuid_find_entry(&cpuid_data.cpuid, 1, 0);
|
|
|
|
if (c) {
|
|
|
|
has_msr_feature_control = !!(c->ecx & CPUID_EXT_VMX) ||
|
|
|
|
!!(c->ecx & CPUID_EXT_SMX);
|
|
|
|
}
|
|
|
|
|
2016-06-22 09:56:21 +03:00
|
|
|
if (env->mcg_cap & MCG_LMCE_P) {
|
|
|
|
has_msr_mcg_ext_ctl = has_msr_feature_control = true;
|
|
|
|
}
|
|
|
|
|
2017-01-08 20:32:34 +03:00
|
|
|
if (!env->user_tsc_khz) {
|
|
|
|
if ((env->features[FEAT_8000_0007_EDX] & CPUID_APM_INVTSC) &&
|
|
|
|
invtsc_mig_blocker == NULL) {
|
|
|
|
/* for migration */
|
|
|
|
error_setg(&invtsc_mig_blocker,
|
|
|
|
"State blocked by non-migratable CPU device"
|
|
|
|
" (invtsc flag)");
|
2017-01-16 14:31:53 +03:00
|
|
|
r = migrate_add_blocker(invtsc_mig_blocker, &local_err);
|
|
|
|
if (local_err) {
|
|
|
|
error_report_err(local_err);
|
|
|
|
error_free(invtsc_mig_blocker);
|
|
|
|
goto fail;
|
|
|
|
}
|
2017-01-08 20:32:34 +03:00
|
|
|
/* for savevm */
|
|
|
|
vmstate_x86_cpu.unmigratable = 1;
|
|
|
|
}
|
2014-05-14 23:30:09 +04:00
|
|
|
}
|
|
|
|
|
2015-11-24 06:33:56 +03:00
|
|
|
r = kvm_arch_set_tsc_khz(cs);
|
|
|
|
if (r < 0) {
|
2017-01-16 14:31:53 +03:00
|
|
|
goto fail;
|
2011-07-07 18:13:13 +04:00
|
|
|
}
|
|
|
|
|
2015-11-24 06:33:55 +03:00
|
|
|
/* vcpu's TSC frequency is either specified by user, or following
|
|
|
|
* the value used by KVM if the former is not present. In the
|
|
|
|
* latter case, we query it from KVM and record in env->tsc_khz,
|
|
|
|
* so that vcpu's TSC frequency can be migrated later via this field.
|
|
|
|
*/
|
|
|
|
if (!env->tsc_khz) {
|
|
|
|
r = kvm_check_extension(cs->kvm_state, KVM_CAP_GET_TSC_KHZ) ?
|
|
|
|
kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ) :
|
|
|
|
-ENOTSUP;
|
|
|
|
if (r > 0) {
|
|
|
|
env->tsc_khz = r;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-20 17:11:34 +03:00
|
|
|
if (cpu->vmware_cpuid_freq
|
|
|
|
/* Guests depend on 0x40000000 to detect this feature, so only expose
|
|
|
|
* it if KVM exposes leaf 0x40000000. (Conflicts with Hyper-V) */
|
|
|
|
&& cpu->expose_kvm
|
|
|
|
&& kvm_base == KVM_CPUID_SIGNATURE
|
|
|
|
/* TSC clock must be stable and known for this feature. */
|
|
|
|
&& ((env->features[FEAT_8000_0007_EDX] & CPUID_APM_INVTSC)
|
|
|
|
|| env->user_tsc_khz != 0)
|
|
|
|
&& env->tsc_khz != 0) {
|
|
|
|
|
|
|
|
c = &cpuid_data.entries[cpuid_i++];
|
|
|
|
c->function = KVM_CPUID_SIGNATURE | 0x10;
|
|
|
|
c->eax = env->tsc_khz;
|
|
|
|
/* LAPIC resolution of 1ns (freq: 1GHz) is hardcoded in KVM's
|
|
|
|
* APIC_BUS_CYCLE_NS */
|
|
|
|
c->ebx = 1000000;
|
|
|
|
c->ecx = c->edx = 0;
|
|
|
|
|
|
|
|
c = cpuid_find_entry(&cpuid_data.cpuid, kvm_base, 0);
|
|
|
|
c->eax = MAX(c->eax, KVM_CPUID_SIGNATURE | 0x10);
|
|
|
|
}
|
|
|
|
|
|
|
|
cpuid_data.cpuid.nent = cpuid_i;
|
|
|
|
|
|
|
|
cpuid_data.cpuid.padding = 0;
|
|
|
|
r = kvm_vcpu_ioctl(cs, KVM_SET_CPUID2, &cpuid_data);
|
|
|
|
if (r) {
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
2015-10-15 21:30:20 +03:00
|
|
|
if (has_xsave) {
|
2011-10-27 21:25:58 +04:00
|
|
|
env->kvm_xsave_buf = qemu_memalign(4096, sizeof(struct kvm_xsave));
|
|
|
|
}
|
2015-12-16 22:06:42 +03:00
|
|
|
cpu->kvm_msr_buf = g_malloc0(MSR_BUF_SIZE);
|
2011-10-27 21:25:58 +04:00
|
|
|
|
2016-03-30 23:47:47 +03:00
|
|
|
if (!(env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_RDTSCP)) {
|
|
|
|
has_msr_tsc_aux = false;
|
|
|
|
}
|
2014-08-15 01:39:33 +04:00
|
|
|
|
2011-07-07 18:13:13 +04:00
|
|
|
return 0;
|
2017-01-16 14:31:53 +03:00
|
|
|
|
|
|
|
fail:
|
|
|
|
migrate_del_blocker(invtsc_mig_blocker);
|
|
|
|
return r;
|
2008-11-05 19:29:27 +03:00
|
|
|
}
|
|
|
|
|
2013-03-20 16:11:56 +04:00
|
|
|
void kvm_arch_reset_vcpu(X86CPU *cpu)
|
2009-11-06 21:39:24 +03:00
|
|
|
{
|
2012-10-31 09:57:49 +04:00
|
|
|
CPUX86State *env = &cpu->env;
|
2012-07-23 17:22:27 +04:00
|
|
|
|
2010-01-06 17:30:10 +03:00
|
|
|
env->exception_injected = -1;
|
2009-11-06 21:39:24 +03:00
|
|
|
env->interrupt_injected = -1;
|
2011-01-21 23:48:12 +03:00
|
|
|
env->xcr0 = 1;
|
2010-03-23 19:37:14 +03:00
|
|
|
if (kvm_irqchip_in_kernel()) {
|
2012-07-23 17:22:27 +04:00
|
|
|
env->mp_state = cpu_is_bsp(cpu) ? KVM_MP_STATE_RUNNABLE :
|
2010-03-23 19:37:14 +03:00
|
|
|
KVM_MP_STATE_UNINITIALIZED;
|
|
|
|
} else {
|
|
|
|
env->mp_state = KVM_MP_STATE_RUNNABLE;
|
|
|
|
}
|
2009-11-06 21:39:24 +03:00
|
|
|
}
|
|
|
|
|
2013-03-08 22:21:50 +04:00
|
|
|
void kvm_arch_do_init_vcpu(X86CPU *cpu)
|
|
|
|
{
|
|
|
|
CPUX86State *env = &cpu->env;
|
|
|
|
|
|
|
|
/* APs get directly into wait-for-SIPI state. */
|
|
|
|
if (env->mp_state == KVM_MP_STATE_UNINITIALIZED) {
|
|
|
|
env->mp_state = KVM_MP_STATE_INIT_RECEIVED;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-01-21 23:48:13 +03:00
|
|
|
static int kvm_get_supported_msrs(KVMState *s)
|
2008-11-05 19:29:27 +03:00
|
|
|
{
|
2010-10-21 19:35:02 +04:00
|
|
|
static int kvm_supported_msrs;
|
2011-01-21 23:48:13 +03:00
|
|
|
int ret = 0;
|
2008-11-05 19:29:27 +03:00
|
|
|
|
|
|
|
/* first time */
|
2010-10-21 19:35:02 +04:00
|
|
|
if (kvm_supported_msrs == 0) {
|
2008-11-05 19:29:27 +03:00
|
|
|
struct kvm_msr_list msr_list, *kvm_msr_list;
|
|
|
|
|
2010-10-21 19:35:02 +04:00
|
|
|
kvm_supported_msrs = -1;
|
2008-11-05 19:29:27 +03:00
|
|
|
|
|
|
|
/* Obtain MSR list from KVM. These are the MSRs that we must
|
|
|
|
* save/restore */
|
2008-12-13 23:41:58 +03:00
|
|
|
msr_list.nmsrs = 0;
|
2011-01-21 23:48:13 +03:00
|
|
|
ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, &msr_list);
|
2009-12-06 17:51:24 +03:00
|
|
|
if (ret < 0 && ret != -E2BIG) {
|
2011-01-21 23:48:13 +03:00
|
|
|
return ret;
|
2009-12-06 17:51:24 +03:00
|
|
|
}
|
2009-07-03 00:04:48 +04:00
|
|
|
/* Old kernel modules had a bug and could write beyond the provided
|
|
|
|
memory. Allocate at least a safe amount of 1K. */
|
2011-08-21 07:09:37 +04:00
|
|
|
kvm_msr_list = g_malloc0(MAX(1024, sizeof(msr_list) +
|
2009-07-03 00:04:48 +04:00
|
|
|
msr_list.nmsrs *
|
|
|
|
sizeof(msr_list.indices[0])));
|
2008-11-05 19:29:27 +03:00
|
|
|
|
2008-12-13 23:49:31 +03:00
|
|
|
kvm_msr_list->nmsrs = msr_list.nmsrs;
|
2011-01-21 23:48:13 +03:00
|
|
|
ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, kvm_msr_list);
|
2008-11-05 19:29:27 +03:00
|
|
|
if (ret >= 0) {
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < kvm_msr_list->nmsrs; i++) {
|
|
|
|
if (kvm_msr_list->indices[i] == MSR_STAR) {
|
2011-01-21 23:48:13 +03:00
|
|
|
has_msr_star = true;
|
2010-10-21 19:35:02 +04:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (kvm_msr_list->indices[i] == MSR_VM_HSAVE_PA) {
|
2011-01-21 23:48:13 +03:00
|
|
|
has_msr_hsave_pa = true;
|
2010-10-21 19:35:02 +04:00
|
|
|
continue;
|
2008-11-05 19:29:27 +03:00
|
|
|
}
|
2015-09-23 09:27:33 +03:00
|
|
|
if (kvm_msr_list->indices[i] == MSR_TSC_AUX) {
|
|
|
|
has_msr_tsc_aux = true;
|
|
|
|
continue;
|
|
|
|
}
|
2012-11-27 09:32:18 +04:00
|
|
|
if (kvm_msr_list->indices[i] == MSR_TSC_ADJUST) {
|
|
|
|
has_msr_tsc_adjust = true;
|
|
|
|
continue;
|
|
|
|
}
|
2011-10-05 23:52:32 +04:00
|
|
|
if (kvm_msr_list->indices[i] == MSR_IA32_TSCDEADLINE) {
|
|
|
|
has_msr_tsc_deadline = true;
|
|
|
|
continue;
|
|
|
|
}
|
2015-06-18 19:28:42 +03:00
|
|
|
if (kvm_msr_list->indices[i] == MSR_IA32_SMBASE) {
|
|
|
|
has_msr_smbase = true;
|
|
|
|
continue;
|
|
|
|
}
|
2011-10-04 18:26:35 +04:00
|
|
|
if (kvm_msr_list->indices[i] == MSR_IA32_MISC_ENABLE) {
|
|
|
|
has_msr_misc_enable = true;
|
|
|
|
continue;
|
|
|
|
}
|
2013-12-05 04:32:12 +04:00
|
|
|
if (kvm_msr_list->indices[i] == MSR_IA32_BNDCFGS) {
|
|
|
|
has_msr_bndcfgs = true;
|
|
|
|
continue;
|
|
|
|
}
|
2014-12-03 05:36:23 +03:00
|
|
|
if (kvm_msr_list->indices[i] == MSR_IA32_XSS) {
|
|
|
|
has_msr_xss = true;
|
|
|
|
continue;
|
|
|
|
}
|
2015-09-09 15:41:30 +03:00
|
|
|
if (kvm_msr_list->indices[i] == HV_X64_MSR_CRASH_CTL) {
|
|
|
|
has_msr_hv_crash = true;
|
|
|
|
continue;
|
|
|
|
}
|
2015-09-16 12:59:42 +03:00
|
|
|
if (kvm_msr_list->indices[i] == HV_X64_MSR_RESET) {
|
|
|
|
has_msr_hv_reset = true;
|
|
|
|
continue;
|
|
|
|
}
|
2015-09-16 12:59:43 +03:00
|
|
|
if (kvm_msr_list->indices[i] == HV_X64_MSR_VP_INDEX) {
|
|
|
|
has_msr_hv_vpindex = true;
|
|
|
|
continue;
|
|
|
|
}
|
2015-09-16 12:59:44 +03:00
|
|
|
if (kvm_msr_list->indices[i] == HV_X64_MSR_VP_RUNTIME) {
|
|
|
|
has_msr_hv_runtime = true;
|
|
|
|
continue;
|
|
|
|
}
|
2015-11-11 13:18:38 +03:00
|
|
|
if (kvm_msr_list->indices[i] == HV_X64_MSR_SCONTROL) {
|
|
|
|
has_msr_hv_synic = true;
|
|
|
|
continue;
|
|
|
|
}
|
2015-11-25 18:21:25 +03:00
|
|
|
if (kvm_msr_list->indices[i] == HV_X64_MSR_STIMER0_CONFIG) {
|
|
|
|
has_msr_hv_stimer = true;
|
|
|
|
continue;
|
|
|
|
}
|
2008-11-05 19:29:27 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-08-21 07:09:37 +04:00
|
|
|
g_free(kvm_msr_list);
|
2008-11-05 19:29:27 +03:00
|
|
|
}
|
|
|
|
|
2011-01-21 23:48:13 +03:00
|
|
|
return ret;
|
2008-11-05 19:29:27 +03:00
|
|
|
}
|
|
|
|
|
2015-06-18 19:30:16 +03:00
|
|
|
static Notifier smram_machine_done;
|
|
|
|
static KVMMemoryListener smram_listener;
|
|
|
|
static AddressSpace smram_address_space;
|
|
|
|
static MemoryRegion smram_as_root;
|
|
|
|
static MemoryRegion smram_as_mem;
|
|
|
|
|
|
|
|
static void register_smram_listener(Notifier *n, void *unused)
|
|
|
|
{
|
|
|
|
MemoryRegion *smram =
|
|
|
|
(MemoryRegion *) object_resolve_path("/machine/smram", NULL);
|
|
|
|
|
|
|
|
/* Outer container... */
|
|
|
|
memory_region_init(&smram_as_root, OBJECT(kvm_state), "mem-container-smram", ~0ull);
|
|
|
|
memory_region_set_enabled(&smram_as_root, true);
|
|
|
|
|
|
|
|
/* ... with two regions inside: normal system memory with low
|
|
|
|
* priority, and...
|
|
|
|
*/
|
|
|
|
memory_region_init_alias(&smram_as_mem, OBJECT(kvm_state), "mem-smram",
|
|
|
|
get_system_memory(), 0, ~0ull);
|
|
|
|
memory_region_add_subregion_overlap(&smram_as_root, 0, &smram_as_mem, 0);
|
|
|
|
memory_region_set_enabled(&smram_as_mem, true);
|
|
|
|
|
|
|
|
if (smram) {
|
|
|
|
/* ... SMRAM with higher priority */
|
|
|
|
memory_region_add_subregion_overlap(&smram_as_root, 0, smram, 10);
|
|
|
|
memory_region_set_enabled(smram, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
address_space_init(&smram_address_space, &smram_as_root, "KVM-SMRAM");
|
|
|
|
kvm_memory_listener_register(kvm_state, &smram_listener,
|
|
|
|
&smram_address_space, 1);
|
|
|
|
}
|
|
|
|
|
2015-02-04 18:43:51 +03:00
|
|
|
int kvm_arch_init(MachineState *ms, KVMState *s)
|
2010-03-23 19:37:12 +03:00
|
|
|
{
|
2011-01-21 23:48:18 +03:00
|
|
|
uint64_t identity_base = 0xfffbc000;
|
2012-01-25 21:14:15 +04:00
|
|
|
uint64_t shadow_mem;
|
2010-03-23 19:37:12 +03:00
|
|
|
int ret;
|
2010-10-21 19:35:04 +04:00
|
|
|
struct utsname utsname;
|
2010-03-23 19:37:12 +03:00
|
|
|
|
2015-10-15 21:30:20 +03:00
|
|
|
#ifdef KVM_CAP_XSAVE
|
|
|
|
has_xsave = kvm_check_extension(s, KVM_CAP_XSAVE);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef KVM_CAP_XCRS
|
|
|
|
has_xcrs = kvm_check_extension(s, KVM_CAP_XCRS);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef KVM_CAP_PIT_STATE2
|
|
|
|
has_pit_state2 = kvm_check_extension(s, KVM_CAP_PIT_STATE2);
|
|
|
|
#endif
|
|
|
|
|
2011-01-21 23:48:13 +03:00
|
|
|
ret = kvm_get_supported_msrs(s);
|
2010-03-23 19:37:12 +03:00
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
2010-10-21 19:35:04 +04:00
|
|
|
|
|
|
|
uname(&utsname);
|
|
|
|
lm_capable_kernel = strcmp(utsname.machine, "x86_64") == 0;
|
|
|
|
|
2010-02-15 20:33:46 +03:00
|
|
|
/*
|
2011-01-21 23:48:18 +03:00
|
|
|
* On older Intel CPUs, KVM uses vm86 mode to emulate 16-bit code directly.
|
|
|
|
* In order to use vm86 mode, an EPT identity map and a TSS are needed.
|
|
|
|
* Since these must be part of guest physical memory, we need to allocate
|
|
|
|
* them, both by setting their start addresses in the kernel and by
|
|
|
|
* creating a corresponding e820 entry. We need 4 pages before the BIOS.
|
|
|
|
*
|
|
|
|
* Older KVM versions may not support setting the identity map base. In
|
|
|
|
* that case we need to stick with the default, i.e. a 256K maximum BIOS
|
|
|
|
* size.
|
2010-02-15 20:33:46 +03:00
|
|
|
*/
|
2011-01-21 23:48:18 +03:00
|
|
|
if (kvm_check_extension(s, KVM_CAP_SET_IDENTITY_MAP_ADDR)) {
|
|
|
|
/* Allows up to 16M BIOSes. */
|
|
|
|
identity_base = 0xfeffc000;
|
|
|
|
|
|
|
|
ret = kvm_vm_ioctl(s, KVM_SET_IDENTITY_MAP_ADDR, &identity_base);
|
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
2010-02-15 20:33:46 +03:00
|
|
|
}
|
2011-06-08 18:11:02 +04:00
|
|
|
|
2011-01-21 23:48:18 +03:00
|
|
|
/* Set TSS base one page after EPT identity map. */
|
|
|
|
ret = kvm_vm_ioctl(s, KVM_SET_TSS_ADDR, identity_base + 0x1000);
|
2010-03-23 19:37:12 +03:00
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2011-01-21 23:48:18 +03:00
|
|
|
/* Tell fw_cfg to notify the BIOS to reserve the range. */
|
|
|
|
ret = e820_add_entry(identity_base, 0x4000, E820_RESERVED);
|
2010-03-23 19:37:12 +03:00
|
|
|
if (ret < 0) {
|
2011-01-21 23:48:18 +03:00
|
|
|
fprintf(stderr, "e820_add_entry() table is full\n");
|
2010-03-23 19:37:12 +03:00
|
|
|
return ret;
|
|
|
|
}
|
KVM, MCE, unpoison memory address across reboot
In Linux kernel HWPoison processing implementation, the virtual
address in processes mapping the error physical memory page is marked
as HWPoison. So that, the further accessing to the virtual
address will kill corresponding processes with SIGBUS.
If the error physical memory page is used by a KVM guest, the SIGBUS
will be sent to QEMU, and QEMU will simulate a MCE to report that
memory error to the guest OS. If the guest OS can not recover from
the error (for example, the page is accessed by kernel code), guest OS
will reboot the system. But because the underlying host virtual
address backing the guest physical memory is still poisoned, if the
guest system accesses the corresponding guest physical memory even
after rebooting, the SIGBUS will still be sent to QEMU and MCE will be
simulated. That is, guest system can not recover via rebooting.
In fact, across rebooting, the contents of guest physical memory page
need not to be kept. We can allocate a new host physical page to
back the corresponding guest physical address.
This patch fixes this issue in QEMU-KVM via calling qemu_ram_remap()
to clear the corresponding page table entry, so that make it possible
to allocate a new page to recover the issue.
[ Jan: rebasing and tiny cleanups]
Signed-off-by: Huang Ying <ying.huang@intel.com>
Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
2011-03-02 10:56:20 +03:00
|
|
|
qemu_register_reset(kvm_unpoison_all, NULL);
|
2010-03-23 19:37:12 +03:00
|
|
|
|
2015-02-04 18:43:52 +03:00
|
|
|
shadow_mem = machine_kvm_shadow_mem(ms);
|
Fix -machine options accel, kernel_irqchip, kvm_shadow_mem
Multiple -machine options with the same ID are merged. All but the
one without an ID are to be silently ignored.
In most places, we query these options with a null ID. This is
correct.
In some places, we instead query whatever options come first in the
list. This is wrong. When the -machine processed first happens to
have an ID, options are taken from that ID, and the ones specified
without ID are silently ignored.
Example:
$ upstream-qemu -nodefaults -S -display none -monitor stdio -machine id=foo -machine accel=kvm,usb=on
$ upstream-qemu -nodefaults -S -display none -monitor stdio -machine id=foo,accel=kvm,usb=on -machine accel=xen
$ upstream-qemu -nodefaults -S -display none -monitor stdio -machine accel=xen -machine id=foo,accel=kvm,usb=on
$ qemu-system-x86_64 -nodefaults -S -display none -monitor stdio -machine accel=kvm,usb=on
QEMU 1.5.50 monitor - type 'help' for more information
(qemu) info kvm
kvm support: enabled
(qemu) info usb
(qemu) q
$ qemu-system-x86_64 -nodefaults -S -display none -monitor stdio -machine id=foo -machine accel=kvm,usb=on
QEMU 1.5.50 monitor - type 'help' for more information
(qemu) info kvm
kvm support: disabled
(qemu) info usb
(qemu) q
$ qemu-system-x86_64 -nodefaults -S -display none -monitor stdio -machine id=foo,accel=kvm,usb=on -machine accel=xen
QEMU 1.5.50 monitor - type 'help' for more information
(qemu) info kvm
kvm support: enabled
(qemu) info usb
USB support not enabled
(qemu) q
$ qemu-system-x86_64 -nodefaults -S -display none -monitor stdio -machine accel=xen -machine id=foo,accel=kvm,usb=on
xc: error: Could not obtain handle on privileged command interface (2 = No such file or directory): Internal error
xen be core: can't open xen interface
failed to initialize Xen: Operation not permitted
Option usb is queried correctly, and the one without an ID wins,
regardless of option order.
Option accel is queried incorrectly, and which one wins depends on
option order and ID.
Affected options are accel (and its sugared forms -enable-kvm and
-no-kvm), kernel_irqchip, kvm_shadow_mem.
Additionally, option kernel_irqchip is normally on by default, except
it's off when no -machine options are given. Bug can't bite, because
kernel_irqchip is used only when KVM is enabled, KVM is off by
default, and enabling always creates -machine options. Downstreams
that enable KVM by default do get bitten, though.
Use qemu_get_machine_opts() to fix these bugs.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Message-id: 1372943363-24081-5-git-send-email-armbru@redhat.com
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
2013-07-04 17:09:20 +04:00
|
|
|
if (shadow_mem != -1) {
|
|
|
|
shadow_mem /= 4096;
|
|
|
|
ret = kvm_vm_ioctl(s, KVM_SET_NR_MMU_PAGES, shadow_mem);
|
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
2012-01-25 21:14:15 +04:00
|
|
|
}
|
|
|
|
}
|
2015-06-18 19:30:16 +03:00
|
|
|
|
|
|
|
if (kvm_check_extension(s, KVM_CAP_X86_SMM)) {
|
|
|
|
smram_machine_done.notify = register_smram_listener;
|
|
|
|
qemu_add_machine_init_done_notifier(&smram_machine_done);
|
|
|
|
}
|
2011-01-21 23:48:18 +03:00
|
|
|
return 0;
|
2008-11-05 19:29:27 +03:00
|
|
|
}
|
2010-12-27 18:19:29 +03:00
|
|
|
|
2008-11-05 19:29:27 +03:00
|
|
|
static void set_v8086_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
|
|
|
|
{
|
|
|
|
lhs->selector = rhs->selector;
|
|
|
|
lhs->base = rhs->base;
|
|
|
|
lhs->limit = rhs->limit;
|
|
|
|
lhs->type = 3;
|
|
|
|
lhs->present = 1;
|
|
|
|
lhs->dpl = 3;
|
|
|
|
lhs->db = 0;
|
|
|
|
lhs->s = 1;
|
|
|
|
lhs->l = 0;
|
|
|
|
lhs->g = 0;
|
|
|
|
lhs->avl = 0;
|
|
|
|
lhs->unusable = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void set_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
|
|
|
|
{
|
|
|
|
unsigned flags = rhs->flags;
|
|
|
|
lhs->selector = rhs->selector;
|
|
|
|
lhs->base = rhs->base;
|
|
|
|
lhs->limit = rhs->limit;
|
|
|
|
lhs->type = (flags >> DESC_TYPE_SHIFT) & 15;
|
|
|
|
lhs->present = (flags & DESC_P_MASK) != 0;
|
2010-12-27 17:56:44 +03:00
|
|
|
lhs->dpl = (flags >> DESC_DPL_SHIFT) & 3;
|
2008-11-05 19:29:27 +03:00
|
|
|
lhs->db = (flags >> DESC_B_SHIFT) & 1;
|
|
|
|
lhs->s = (flags & DESC_S_MASK) != 0;
|
|
|
|
lhs->l = (flags >> DESC_L_SHIFT) & 1;
|
|
|
|
lhs->g = (flags & DESC_G_MASK) != 0;
|
|
|
|
lhs->avl = (flags & DESC_AVL_MASK) != 0;
|
2015-12-07 07:54:07 +03:00
|
|
|
lhs->unusable = !lhs->present;
|
2012-02-29 19:54:29 +04:00
|
|
|
lhs->padding = 0;
|
2008-11-05 19:29:27 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static void get_seg(SegmentCache *lhs, const struct kvm_segment *rhs)
|
|
|
|
{
|
|
|
|
lhs->selector = rhs->selector;
|
|
|
|
lhs->base = rhs->base;
|
|
|
|
lhs->limit = rhs->limit;
|
2015-12-07 07:54:07 +03:00
|
|
|
if (rhs->unusable) {
|
|
|
|
lhs->flags = 0;
|
|
|
|
} else {
|
|
|
|
lhs->flags = (rhs->type << DESC_TYPE_SHIFT) |
|
|
|
|
(rhs->present * DESC_P_MASK) |
|
|
|
|
(rhs->dpl << DESC_DPL_SHIFT) |
|
|
|
|
(rhs->db << DESC_B_SHIFT) |
|
|
|
|
(rhs->s * DESC_S_MASK) |
|
|
|
|
(rhs->l << DESC_L_SHIFT) |
|
|
|
|
(rhs->g * DESC_G_MASK) |
|
|
|
|
(rhs->avl * DESC_AVL_MASK);
|
|
|
|
}
|
2008-11-05 19:29:27 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static void kvm_getput_reg(__u64 *kvm_reg, target_ulong *qemu_reg, int set)
|
|
|
|
{
|
2010-12-27 18:19:29 +03:00
|
|
|
if (set) {
|
2008-11-05 19:29:27 +03:00
|
|
|
*kvm_reg = *qemu_reg;
|
2010-12-27 18:19:29 +03:00
|
|
|
} else {
|
2008-11-05 19:29:27 +03:00
|
|
|
*qemu_reg = *kvm_reg;
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2008-11-05 19:29:27 +03:00
|
|
|
}
|
|
|
|
|
2012-10-31 09:06:49 +04:00
|
|
|
static int kvm_getput_regs(X86CPU *cpu, int set)
|
2008-11-05 19:29:27 +03:00
|
|
|
{
|
2012-10-31 09:06:49 +04:00
|
|
|
CPUX86State *env = &cpu->env;
|
2008-11-05 19:29:27 +03:00
|
|
|
struct kvm_regs regs;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (!set) {
|
2012-10-31 09:06:49 +04:00
|
|
|
ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_REGS, ®s);
|
2010-12-27 18:19:29 +03:00
|
|
|
if (ret < 0) {
|
2008-11-05 19:29:27 +03:00
|
|
|
return ret;
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2008-11-05 19:29:27 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
kvm_getput_reg(®s.rax, &env->regs[R_EAX], set);
|
|
|
|
kvm_getput_reg(®s.rbx, &env->regs[R_EBX], set);
|
|
|
|
kvm_getput_reg(®s.rcx, &env->regs[R_ECX], set);
|
|
|
|
kvm_getput_reg(®s.rdx, &env->regs[R_EDX], set);
|
|
|
|
kvm_getput_reg(®s.rsi, &env->regs[R_ESI], set);
|
|
|
|
kvm_getput_reg(®s.rdi, &env->regs[R_EDI], set);
|
|
|
|
kvm_getput_reg(®s.rsp, &env->regs[R_ESP], set);
|
|
|
|
kvm_getput_reg(®s.rbp, &env->regs[R_EBP], set);
|
|
|
|
#ifdef TARGET_X86_64
|
|
|
|
kvm_getput_reg(®s.r8, &env->regs[8], set);
|
|
|
|
kvm_getput_reg(®s.r9, &env->regs[9], set);
|
|
|
|
kvm_getput_reg(®s.r10, &env->regs[10], set);
|
|
|
|
kvm_getput_reg(®s.r11, &env->regs[11], set);
|
|
|
|
kvm_getput_reg(®s.r12, &env->regs[12], set);
|
|
|
|
kvm_getput_reg(®s.r13, &env->regs[13], set);
|
|
|
|
kvm_getput_reg(®s.r14, &env->regs[14], set);
|
|
|
|
kvm_getput_reg(®s.r15, &env->regs[15], set);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
kvm_getput_reg(®s.rflags, &env->eflags, set);
|
|
|
|
kvm_getput_reg(®s.rip, &env->eip, set);
|
|
|
|
|
2010-12-27 18:19:29 +03:00
|
|
|
if (set) {
|
2012-10-31 09:06:49 +04:00
|
|
|
ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_REGS, ®s);
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2008-11-05 19:29:27 +03:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2012-10-31 09:06:49 +04:00
|
|
|
static int kvm_put_fpu(X86CPU *cpu)
|
2008-11-05 19:29:27 +03:00
|
|
|
{
|
2012-10-31 09:06:49 +04:00
|
|
|
CPUX86State *env = &cpu->env;
|
2008-11-05 19:29:27 +03:00
|
|
|
struct kvm_fpu fpu;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
memset(&fpu, 0, sizeof fpu);
|
|
|
|
fpu.fsw = env->fpus & ~(7 << 11);
|
|
|
|
fpu.fsw |= (env->fpstt & 7) << 11;
|
|
|
|
fpu.fcw = env->fpuc;
|
2011-06-15 17:17:26 +04:00
|
|
|
fpu.last_opcode = env->fpop;
|
|
|
|
fpu.last_ip = env->fpip;
|
|
|
|
fpu.last_dp = env->fpdp;
|
2010-12-27 18:19:29 +03:00
|
|
|
for (i = 0; i < 8; ++i) {
|
|
|
|
fpu.ftwx |= (!env->fptags[i]) << i;
|
|
|
|
}
|
2008-11-05 19:29:27 +03:00
|
|
|
memcpy(fpu.fpr, env->fpregs, sizeof env->fpregs);
|
2014-10-24 11:44:38 +04:00
|
|
|
for (i = 0; i < CPU_NB_REGS; i++) {
|
2015-11-26 22:14:32 +03:00
|
|
|
stq_p(&fpu.xmm[i][0], env->xmm_regs[i].ZMM_Q(0));
|
|
|
|
stq_p(&fpu.xmm[i][8], env->xmm_regs[i].ZMM_Q(1));
|
2014-10-24 11:44:38 +04:00
|
|
|
}
|
2008-11-05 19:29:27 +03:00
|
|
|
fpu.mxcsr = env->mxcsr;
|
|
|
|
|
2012-10-31 09:06:49 +04:00
|
|
|
return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_FPU, &fpu);
|
2008-11-05 19:29:27 +03:00
|
|
|
}
|
|
|
|
|
2011-10-27 21:26:02 +04:00
|
|
|
#define XSAVE_FCW_FSW 0
|
|
|
|
#define XSAVE_FTW_FOP 1
|
2010-06-17 13:53:07 +04:00
|
|
|
#define XSAVE_CWD_RIP 2
|
|
|
|
#define XSAVE_CWD_RDP 4
|
|
|
|
#define XSAVE_MXCSR 6
|
|
|
|
#define XSAVE_ST_SPACE 8
|
|
|
|
#define XSAVE_XMM_SPACE 40
|
|
|
|
#define XSAVE_XSTATE_BV 128
|
|
|
|
#define XSAVE_YMMH_SPACE 144
|
2013-12-05 04:32:12 +04:00
|
|
|
#define XSAVE_BNDREGS 240
|
|
|
|
#define XSAVE_BNDCSR 256
|
2014-10-23 07:02:43 +04:00
|
|
|
#define XSAVE_OPMASK 272
|
|
|
|
#define XSAVE_ZMM_Hi256 288
|
|
|
|
#define XSAVE_Hi16_ZMM 416
|
2015-11-18 05:20:15 +03:00
|
|
|
#define XSAVE_PKRU 672
|
2010-06-17 13:53:07 +04:00
|
|
|
|
2015-11-19 21:52:33 +03:00
|
|
|
#define XSAVE_BYTE_OFFSET(word_offset) \
|
|
|
|
((word_offset) * sizeof(((struct kvm_xsave *)0)->region[0]))
|
|
|
|
|
|
|
|
#define ASSERT_OFFSET(word_offset, field) \
|
|
|
|
QEMU_BUILD_BUG_ON(XSAVE_BYTE_OFFSET(word_offset) != \
|
|
|
|
offsetof(X86XSaveArea, field))
|
|
|
|
|
|
|
|
ASSERT_OFFSET(XSAVE_FCW_FSW, legacy.fcw);
|
|
|
|
ASSERT_OFFSET(XSAVE_FTW_FOP, legacy.ftw);
|
|
|
|
ASSERT_OFFSET(XSAVE_CWD_RIP, legacy.fpip);
|
|
|
|
ASSERT_OFFSET(XSAVE_CWD_RDP, legacy.fpdp);
|
|
|
|
ASSERT_OFFSET(XSAVE_MXCSR, legacy.mxcsr);
|
|
|
|
ASSERT_OFFSET(XSAVE_ST_SPACE, legacy.fpregs);
|
|
|
|
ASSERT_OFFSET(XSAVE_XMM_SPACE, legacy.xmm_regs);
|
|
|
|
ASSERT_OFFSET(XSAVE_XSTATE_BV, header.xstate_bv);
|
|
|
|
ASSERT_OFFSET(XSAVE_YMMH_SPACE, avx_state);
|
|
|
|
ASSERT_OFFSET(XSAVE_BNDREGS, bndreg_state);
|
|
|
|
ASSERT_OFFSET(XSAVE_BNDCSR, bndcsr_state);
|
|
|
|
ASSERT_OFFSET(XSAVE_OPMASK, opmask_state);
|
|
|
|
ASSERT_OFFSET(XSAVE_ZMM_Hi256, zmm_hi256_state);
|
|
|
|
ASSERT_OFFSET(XSAVE_Hi16_ZMM, hi16_zmm_state);
|
|
|
|
ASSERT_OFFSET(XSAVE_PKRU, pkru_state);
|
|
|
|
|
2012-10-31 09:06:49 +04:00
|
|
|
static int kvm_put_xsave(X86CPU *cpu)
|
2010-06-17 13:53:07 +04:00
|
|
|
{
|
2012-10-31 09:06:49 +04:00
|
|
|
CPUX86State *env = &cpu->env;
|
2015-11-23 15:43:26 +03:00
|
|
|
X86XSaveArea *xsave = env->kvm_xsave_buf;
|
2011-06-15 17:17:26 +04:00
|
|
|
uint16_t cwd, swd, twd;
|
2016-06-14 00:57:58 +03:00
|
|
|
int i;
|
2010-06-17 13:53:07 +04:00
|
|
|
|
2015-10-15 21:30:20 +03:00
|
|
|
if (!has_xsave) {
|
2012-10-31 09:06:49 +04:00
|
|
|
return kvm_put_fpu(cpu);
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2010-06-17 13:53:07 +04:00
|
|
|
|
|
|
|
memset(xsave, 0, sizeof(struct kvm_xsave));
|
2011-09-04 15:03:52 +04:00
|
|
|
twd = 0;
|
2010-06-17 13:53:07 +04:00
|
|
|
swd = env->fpus & ~(7 << 11);
|
|
|
|
swd |= (env->fpstt & 7) << 11;
|
|
|
|
cwd = env->fpuc;
|
2010-12-27 18:19:29 +03:00
|
|
|
for (i = 0; i < 8; ++i) {
|
2010-06-17 13:53:07 +04:00
|
|
|
twd |= (!env->fptags[i]) << i;
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2015-11-23 15:43:26 +03:00
|
|
|
xsave->legacy.fcw = cwd;
|
|
|
|
xsave->legacy.fsw = swd;
|
|
|
|
xsave->legacy.ftw = twd;
|
|
|
|
xsave->legacy.fpop = env->fpop;
|
|
|
|
xsave->legacy.fpip = env->fpip;
|
|
|
|
xsave->legacy.fpdp = env->fpdp;
|
|
|
|
memcpy(&xsave->legacy.fpregs, env->fpregs,
|
2010-06-17 13:53:07 +04:00
|
|
|
sizeof env->fpregs);
|
2015-11-23 15:43:26 +03:00
|
|
|
xsave->legacy.mxcsr = env->mxcsr;
|
|
|
|
xsave->header.xstate_bv = env->xstate_bv;
|
|
|
|
memcpy(&xsave->bndreg_state.bnd_regs, env->bnd_regs,
|
2013-12-05 04:32:12 +04:00
|
|
|
sizeof env->bnd_regs);
|
2015-11-23 15:43:26 +03:00
|
|
|
xsave->bndcsr_state.bndcsr = env->bndcs_regs;
|
|
|
|
memcpy(&xsave->opmask_state.opmask_regs, env->opmask_regs,
|
2014-10-23 07:02:43 +04:00
|
|
|
sizeof env->opmask_regs);
|
2014-10-24 11:44:38 +04:00
|
|
|
|
2015-11-23 15:43:26 +03:00
|
|
|
for (i = 0; i < CPU_NB_REGS; i++) {
|
|
|
|
uint8_t *xmm = xsave->legacy.xmm_regs[i];
|
|
|
|
uint8_t *ymmh = xsave->avx_state.ymmh[i];
|
|
|
|
uint8_t *zmmh = xsave->zmm_hi256_state.zmm_hi256[i];
|
2015-11-26 22:14:32 +03:00
|
|
|
stq_p(xmm, env->xmm_regs[i].ZMM_Q(0));
|
|
|
|
stq_p(xmm+8, env->xmm_regs[i].ZMM_Q(1));
|
|
|
|
stq_p(ymmh, env->xmm_regs[i].ZMM_Q(2));
|
|
|
|
stq_p(ymmh+8, env->xmm_regs[i].ZMM_Q(3));
|
|
|
|
stq_p(zmmh, env->xmm_regs[i].ZMM_Q(4));
|
|
|
|
stq_p(zmmh+8, env->xmm_regs[i].ZMM_Q(5));
|
|
|
|
stq_p(zmmh+16, env->xmm_regs[i].ZMM_Q(6));
|
|
|
|
stq_p(zmmh+24, env->xmm_regs[i].ZMM_Q(7));
|
2014-10-24 11:44:38 +04:00
|
|
|
}
|
|
|
|
|
2014-10-23 07:02:43 +04:00
|
|
|
#ifdef TARGET_X86_64
|
2015-11-23 15:43:26 +03:00
|
|
|
memcpy(&xsave->hi16_zmm_state.hi16_zmm, &env->xmm_regs[16],
|
2014-10-24 11:50:21 +04:00
|
|
|
16 * sizeof env->xmm_regs[16]);
|
2015-11-23 15:43:26 +03:00
|
|
|
memcpy(&xsave->pkru_state, &env->pkru, sizeof env->pkru);
|
2014-10-23 07:02:43 +04:00
|
|
|
#endif
|
2016-06-14 00:57:58 +03:00
|
|
|
return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_XSAVE, xsave);
|
2010-06-17 13:53:07 +04:00
|
|
|
}
|
|
|
|
|
2012-10-31 09:06:49 +04:00
|
|
|
static int kvm_put_xcrs(X86CPU *cpu)
|
2010-06-17 13:53:07 +04:00
|
|
|
{
|
2012-10-31 09:06:49 +04:00
|
|
|
CPUX86State *env = &cpu->env;
|
2014-10-30 11:23:41 +03:00
|
|
|
struct kvm_xcrs xcrs = {};
|
2010-06-17 13:53:07 +04:00
|
|
|
|
2015-10-15 21:30:20 +03:00
|
|
|
if (!has_xcrs) {
|
2010-06-17 13:53:07 +04:00
|
|
|
return 0;
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2010-06-17 13:53:07 +04:00
|
|
|
|
|
|
|
xcrs.nr_xcrs = 1;
|
|
|
|
xcrs.flags = 0;
|
|
|
|
xcrs.xcrs[0].xcr = 0;
|
|
|
|
xcrs.xcrs[0].value = env->xcr0;
|
2012-10-31 09:06:49 +04:00
|
|
|
return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_XCRS, &xcrs);
|
2010-06-17 13:53:07 +04:00
|
|
|
}
|
|
|
|
|
2012-10-31 09:06:49 +04:00
|
|
|
static int kvm_put_sregs(X86CPU *cpu)
|
2008-11-05 19:29:27 +03:00
|
|
|
{
|
2012-10-31 09:06:49 +04:00
|
|
|
CPUX86State *env = &cpu->env;
|
2008-11-05 19:29:27 +03:00
|
|
|
struct kvm_sregs sregs;
|
|
|
|
|
2009-11-06 21:39:24 +03:00
|
|
|
memset(sregs.interrupt_bitmap, 0, sizeof(sregs.interrupt_bitmap));
|
|
|
|
if (env->interrupt_injected >= 0) {
|
|
|
|
sregs.interrupt_bitmap[env->interrupt_injected / 64] |=
|
|
|
|
(uint64_t)1 << (env->interrupt_injected % 64);
|
|
|
|
}
|
2008-11-05 19:29:27 +03:00
|
|
|
|
|
|
|
if ((env->eflags & VM_MASK)) {
|
2010-12-27 18:19:29 +03:00
|
|
|
set_v8086_seg(&sregs.cs, &env->segs[R_CS]);
|
|
|
|
set_v8086_seg(&sregs.ds, &env->segs[R_DS]);
|
|
|
|
set_v8086_seg(&sregs.es, &env->segs[R_ES]);
|
|
|
|
set_v8086_seg(&sregs.fs, &env->segs[R_FS]);
|
|
|
|
set_v8086_seg(&sregs.gs, &env->segs[R_GS]);
|
|
|
|
set_v8086_seg(&sregs.ss, &env->segs[R_SS]);
|
2008-11-05 19:29:27 +03:00
|
|
|
} else {
|
2010-12-27 18:19:29 +03:00
|
|
|
set_seg(&sregs.cs, &env->segs[R_CS]);
|
|
|
|
set_seg(&sregs.ds, &env->segs[R_DS]);
|
|
|
|
set_seg(&sregs.es, &env->segs[R_ES]);
|
|
|
|
set_seg(&sregs.fs, &env->segs[R_FS]);
|
|
|
|
set_seg(&sregs.gs, &env->segs[R_GS]);
|
|
|
|
set_seg(&sregs.ss, &env->segs[R_SS]);
|
2008-11-05 19:29:27 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
set_seg(&sregs.tr, &env->tr);
|
|
|
|
set_seg(&sregs.ldt, &env->ldt);
|
|
|
|
|
|
|
|
sregs.idt.limit = env->idt.limit;
|
|
|
|
sregs.idt.base = env->idt.base;
|
2012-02-29 19:54:29 +04:00
|
|
|
memset(sregs.idt.padding, 0, sizeof sregs.idt.padding);
|
2008-11-05 19:29:27 +03:00
|
|
|
sregs.gdt.limit = env->gdt.limit;
|
|
|
|
sregs.gdt.base = env->gdt.base;
|
2012-02-29 19:54:29 +04:00
|
|
|
memset(sregs.gdt.padding, 0, sizeof sregs.gdt.padding);
|
2008-11-05 19:29:27 +03:00
|
|
|
|
|
|
|
sregs.cr0 = env->cr[0];
|
|
|
|
sregs.cr2 = env->cr[2];
|
|
|
|
sregs.cr3 = env->cr[3];
|
|
|
|
sregs.cr4 = env->cr[4];
|
|
|
|
|
2013-12-23 13:04:02 +04:00
|
|
|
sregs.cr8 = cpu_get_apic_tpr(cpu->apic_state);
|
|
|
|
sregs.apic_base = cpu_get_apic_base(cpu->apic_state);
|
2008-11-05 19:29:27 +03:00
|
|
|
|
|
|
|
sregs.efer = env->efer;
|
|
|
|
|
2012-10-31 09:06:49 +04:00
|
|
|
return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_SREGS, &sregs);
|
2008-11-05 19:29:27 +03:00
|
|
|
}
|
|
|
|
|
2015-12-16 22:06:42 +03:00
|
|
|
static void kvm_msr_buf_reset(X86CPU *cpu)
|
|
|
|
{
|
|
|
|
memset(cpu->kvm_msr_buf, 0, MSR_BUF_SIZE);
|
|
|
|
}
|
|
|
|
|
2015-12-16 22:06:44 +03:00
|
|
|
static void kvm_msr_entry_add(X86CPU *cpu, uint32_t index, uint64_t value)
|
|
|
|
{
|
|
|
|
struct kvm_msrs *msrs = cpu->kvm_msr_buf;
|
|
|
|
void *limit = ((void *)msrs) + MSR_BUF_SIZE;
|
|
|
|
struct kvm_msr_entry *entry = &msrs->entries[msrs->nmsrs];
|
|
|
|
|
|
|
|
assert((void *)(entry + 1) <= limit);
|
|
|
|
|
2015-12-16 22:06:46 +03:00
|
|
|
entry->index = index;
|
|
|
|
entry->reserved = 0;
|
|
|
|
entry->data = value;
|
2015-12-16 22:06:44 +03:00
|
|
|
msrs->nmsrs++;
|
|
|
|
}
|
|
|
|
|
2016-09-22 15:50:00 +03:00
|
|
|
static int kvm_put_one_msr(X86CPU *cpu, int index, uint64_t value)
|
|
|
|
{
|
|
|
|
kvm_msr_buf_reset(cpu);
|
|
|
|
kvm_msr_entry_add(cpu, index, value);
|
|
|
|
|
|
|
|
return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, cpu->kvm_msr_buf);
|
|
|
|
}
|
|
|
|
|
2016-09-22 15:49:17 +03:00
|
|
|
void kvm_put_apicbase(X86CPU *cpu, uint64_t value)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = kvm_put_one_msr(cpu, MSR_IA32_APICBASE, value);
|
|
|
|
assert(ret == 1);
|
|
|
|
}
|
|
|
|
|
2013-08-19 21:13:42 +04:00
|
|
|
static int kvm_put_tscdeadline_msr(X86CPU *cpu)
|
|
|
|
{
|
|
|
|
CPUX86State *env = &cpu->env;
|
2016-03-30 23:55:29 +03:00
|
|
|
int ret;
|
2013-08-19 21:13:42 +04:00
|
|
|
|
|
|
|
if (!has_msr_tsc_deadline) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-09-22 15:50:00 +03:00
|
|
|
ret = kvm_put_one_msr(cpu, MSR_IA32_TSCDEADLINE, env->tsc_deadline);
|
2016-03-30 23:55:29 +03:00
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(ret == 1);
|
|
|
|
return 0;
|
2013-08-19 21:13:42 +04:00
|
|
|
}
|
|
|
|
|
2013-12-17 23:05:13 +04:00
|
|
|
/*
|
|
|
|
* Provide a separate write service for the feature control MSR in order to
|
|
|
|
* kick the VCPU out of VMXON or even guest mode on reset. This has to be done
|
|
|
|
* before writing any other state because forcibly leaving nested mode
|
|
|
|
* invalidates the VCPU state.
|
|
|
|
*/
|
|
|
|
static int kvm_put_msr_feature_control(X86CPU *cpu)
|
|
|
|
{
|
2016-03-30 23:55:29 +03:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!has_msr_feature_control) {
|
|
|
|
return 0;
|
|
|
|
}
|
2013-12-17 23:05:13 +04:00
|
|
|
|
2016-09-22 15:50:00 +03:00
|
|
|
ret = kvm_put_one_msr(cpu, MSR_IA32_FEATURE_CONTROL,
|
|
|
|
cpu->env.msr_ia32_feature_control);
|
2016-03-30 23:55:29 +03:00
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(ret == 1);
|
|
|
|
return 0;
|
2013-12-17 23:05:13 +04:00
|
|
|
}
|
|
|
|
|
2012-10-31 09:06:49 +04:00
|
|
|
static int kvm_put_msrs(X86CPU *cpu, int level)
|
2008-11-05 19:29:27 +03:00
|
|
|
{
|
2012-10-31 09:06:49 +04:00
|
|
|
CPUX86State *env = &cpu->env;
|
2015-12-16 22:06:44 +03:00
|
|
|
int i;
|
2016-03-30 23:55:29 +03:00
|
|
|
int ret;
|
2008-11-05 19:29:27 +03:00
|
|
|
|
2015-12-16 22:06:42 +03:00
|
|
|
kvm_msr_buf_reset(cpu);
|
|
|
|
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_CS, env->sysenter_cs);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_ESP, env->sysenter_esp);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_EIP, env->sysenter_eip);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_PAT, env->pat);
|
2011-01-21 23:48:13 +03:00
|
|
|
if (has_msr_star) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_STAR, env->star);
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2011-01-21 23:48:13 +03:00
|
|
|
if (has_msr_hsave_pa) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_VM_HSAVE_PA, env->vm_hsave);
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2015-09-23 09:27:33 +03:00
|
|
|
if (has_msr_tsc_aux) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_TSC_AUX, env->tsc_aux);
|
2015-09-23 09:27:33 +03:00
|
|
|
}
|
2012-11-27 09:32:18 +04:00
|
|
|
if (has_msr_tsc_adjust) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_TSC_ADJUST, env->tsc_adjust);
|
2012-11-27 09:32:18 +04:00
|
|
|
}
|
2011-10-04 18:26:35 +04:00
|
|
|
if (has_msr_misc_enable) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_MISC_ENABLE,
|
2011-10-04 18:26:35 +04:00
|
|
|
env->msr_ia32_misc_enable);
|
|
|
|
}
|
2015-06-18 19:28:42 +03:00
|
|
|
if (has_msr_smbase) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_SMBASE, env->smbase);
|
2015-06-18 19:28:42 +03:00
|
|
|
}
|
2014-01-20 17:22:25 +04:00
|
|
|
if (has_msr_bndcfgs) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_BNDCFGS, env->msr_bndcfgs);
|
2014-01-20 17:22:25 +04:00
|
|
|
}
|
2014-12-03 05:36:23 +03:00
|
|
|
if (has_msr_xss) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_XSS, env->xss);
|
2014-12-03 05:36:23 +03:00
|
|
|
}
|
2008-11-05 19:29:27 +03:00
|
|
|
#ifdef TARGET_X86_64
|
2010-10-21 19:35:04 +04:00
|
|
|
if (lm_capable_kernel) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_CSTAR, env->cstar);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_KERNELGSBASE, env->kernelgsbase);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_FMASK, env->fmask);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_LSTAR, env->lstar);
|
2010-10-21 19:35:04 +04:00
|
|
|
}
|
2008-11-05 19:29:27 +03:00
|
|
|
#endif
|
2011-01-21 23:48:14 +03:00
|
|
|
/*
|
2013-07-25 19:05:22 +04:00
|
|
|
* The following MSRs have side effects on the guest or are too heavy
|
|
|
|
* for normal writeback. Limit them to reset or full state updates.
|
2011-01-21 23:48:14 +03:00
|
|
|
*/
|
|
|
|
if (level >= KVM_PUT_RESET_STATE) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_TSC, env->tsc);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_KVM_SYSTEM_TIME, env->system_time_msr);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_KVM_WALL_CLOCK, env->wall_clock_msr);
|
2016-09-27 01:03:24 +03:00
|
|
|
if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF)) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_EN, env->async_pf_en_msr);
|
2011-01-21 23:48:22 +03:00
|
|
|
}
|
2016-09-27 01:03:24 +03:00
|
|
|
if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_PV_EOI)) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_KVM_PV_EOI_EN, env->pv_eoi_en_msr);
|
2012-08-28 21:43:56 +04:00
|
|
|
}
|
2016-09-27 01:03:24 +03:00
|
|
|
if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_STEAL_TIME)) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, env->steal_time_msr);
|
2013-02-20 06:27:20 +04:00
|
|
|
}
|
2013-07-25 19:05:22 +04:00
|
|
|
if (has_msr_architectural_pmu) {
|
|
|
|
/* Stop the counter. */
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0);
|
2013-07-25 19:05:22 +04:00
|
|
|
|
|
|
|
/* Set the counter values. */
|
|
|
|
for (i = 0; i < MAX_FIXED_COUNTERS; i++) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR0 + i,
|
2013-07-25 19:05:22 +04:00
|
|
|
env->msr_fixed_counters[i]);
|
|
|
|
}
|
|
|
|
for (i = 0; i < num_architectural_pmu_counters; i++) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_P6_PERFCTR0 + i,
|
2013-07-25 19:05:22 +04:00
|
|
|
env->msr_gp_counters[i]);
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_P6_EVNTSEL0 + i,
|
2013-07-25 19:05:22 +04:00
|
|
|
env->msr_gp_evtsel[i]);
|
|
|
|
}
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS,
|
2013-07-25 19:05:22 +04:00
|
|
|
env->msr_global_status);
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL,
|
2013-07-25 19:05:22 +04:00
|
|
|
env->msr_global_ovf_ctrl);
|
|
|
|
|
|
|
|
/* Now start the PMU. */
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL,
|
2013-07-25 19:05:22 +04:00
|
|
|
env->msr_fixed_ctr_ctrl);
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL,
|
2013-07-25 19:05:22 +04:00
|
|
|
env->msr_global_ctrl);
|
|
|
|
}
|
2014-01-23 22:16:12 +04:00
|
|
|
if (has_msr_hv_hypercall) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, HV_X64_MSR_GUEST_OS_ID,
|
2014-01-23 17:40:47 +04:00
|
|
|
env->msr_hv_guest_os_id);
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, HV_X64_MSR_HYPERCALL,
|
2014-01-23 17:40:47 +04:00
|
|
|
env->msr_hv_hypercall);
|
2011-12-19 00:48:14 +04:00
|
|
|
}
|
2016-07-14 21:55:37 +03:00
|
|
|
if (cpu->hyperv_vapic) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, HV_X64_MSR_APIC_ASSIST_PAGE,
|
2014-01-23 17:40:48 +04:00
|
|
|
env->msr_hv_vapic);
|
2011-12-19 00:48:14 +04:00
|
|
|
}
|
2016-07-15 20:05:36 +03:00
|
|
|
if (cpu->hyperv_time) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, HV_X64_MSR_REFERENCE_TSC, env->msr_hv_tsc);
|
2014-01-23 17:40:49 +04:00
|
|
|
}
|
2015-09-09 15:41:30 +03:00
|
|
|
if (has_msr_hv_crash) {
|
|
|
|
int j;
|
|
|
|
|
|
|
|
for (j = 0; j < HV_X64_MSR_CRASH_PARAMS; j++)
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_P0 + j,
|
2015-09-09 15:41:30 +03:00
|
|
|
env->msr_hv_crash_params[j]);
|
|
|
|
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_CTL,
|
2015-09-09 15:41:30 +03:00
|
|
|
HV_X64_MSR_CRASH_CTL_NOTIFY);
|
|
|
|
}
|
2015-09-16 12:59:44 +03:00
|
|
|
if (has_msr_hv_runtime) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, HV_X64_MSR_VP_RUNTIME, env->msr_hv_runtime);
|
2015-09-16 12:59:44 +03:00
|
|
|
}
|
2015-11-11 13:18:38 +03:00
|
|
|
if (cpu->hyperv_synic) {
|
|
|
|
int j;
|
|
|
|
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, HV_X64_MSR_SCONTROL,
|
2015-11-11 13:18:38 +03:00
|
|
|
env->msr_hv_synic_control);
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, HV_X64_MSR_SVERSION,
|
2015-11-11 13:18:38 +03:00
|
|
|
env->msr_hv_synic_version);
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, HV_X64_MSR_SIEFP,
|
2015-11-11 13:18:38 +03:00
|
|
|
env->msr_hv_synic_evt_page);
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, HV_X64_MSR_SIMP,
|
2015-11-11 13:18:38 +03:00
|
|
|
env->msr_hv_synic_msg_page);
|
|
|
|
|
|
|
|
for (j = 0; j < ARRAY_SIZE(env->msr_hv_synic_sint); j++) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, HV_X64_MSR_SINT0 + j,
|
2015-11-11 13:18:38 +03:00
|
|
|
env->msr_hv_synic_sint[j]);
|
|
|
|
}
|
|
|
|
}
|
2015-11-25 18:21:25 +03:00
|
|
|
if (has_msr_hv_stimer) {
|
|
|
|
int j;
|
|
|
|
|
|
|
|
for (j = 0; j < ARRAY_SIZE(env->msr_hv_stimer_config); j++) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, HV_X64_MSR_STIMER0_CONFIG + j * 2,
|
2015-11-25 18:21:25 +03:00
|
|
|
env->msr_hv_stimer_config[j]);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (j = 0; j < ARRAY_SIZE(env->msr_hv_stimer_count); j++) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, HV_X64_MSR_STIMER0_COUNT + j * 2,
|
2015-11-25 18:21:25 +03:00
|
|
|
env->msr_hv_stimer_count[j]);
|
|
|
|
}
|
|
|
|
}
|
2016-09-27 01:03:29 +03:00
|
|
|
if (env->features[FEAT_1_EDX] & CPUID_MTRR) {
|
2016-07-08 18:01:37 +03:00
|
|
|
uint64_t phys_mask = MAKE_64BIT_MASK(0, cpu->phys_bits);
|
|
|
|
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_MTRRdefType, env->mtrr_deftype);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_MTRRfix64K_00000, env->mtrr_fixed[0]);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_MTRRfix16K_80000, env->mtrr_fixed[1]);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_MTRRfix16K_A0000, env->mtrr_fixed[2]);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C0000, env->mtrr_fixed[3]);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C8000, env->mtrr_fixed[4]);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D0000, env->mtrr_fixed[5]);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D8000, env->mtrr_fixed[6]);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E0000, env->mtrr_fixed[7]);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E8000, env->mtrr_fixed[8]);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F0000, env->mtrr_fixed[9]);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F8000, env->mtrr_fixed[10]);
|
2014-08-15 01:39:33 +04:00
|
|
|
for (i = 0; i < MSR_MTRRcap_VCNT; i++) {
|
2016-07-08 18:01:37 +03:00
|
|
|
/* The CPU GPs if we write to a bit above the physical limit of
|
|
|
|
* the host CPU (and KVM emulates that)
|
|
|
|
*/
|
|
|
|
uint64_t mask = env->mtrr_var[i].mask;
|
|
|
|
mask &= phys_mask;
|
|
|
|
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_MTRRphysBase(i),
|
|
|
|
env->mtrr_var[i].base);
|
2016-07-08 18:01:37 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_MTRRphysMask(i), mask);
|
2014-08-15 01:39:33 +04:00
|
|
|
}
|
|
|
|
}
|
2013-12-17 23:05:13 +04:00
|
|
|
|
|
|
|
/* Note: MSR_IA32_FEATURE_CONTROL is written separately, see
|
|
|
|
* kvm_put_msr_feature_control. */
|
2010-03-01 21:10:31 +03:00
|
|
|
}
|
2010-10-11 22:31:22 +04:00
|
|
|
if (env->mcg_cap) {
|
2010-10-21 12:23:14 +04:00
|
|
|
int i;
|
2010-12-27 18:19:29 +03:00
|
|
|
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_MCG_STATUS, env->mcg_status);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_MCG_CTL, env->mcg_ctl);
|
2016-06-22 09:56:21 +03:00
|
|
|
if (has_msr_mcg_ext_ctl) {
|
|
|
|
kvm_msr_entry_add(cpu, MSR_MCG_EXT_CTL, env->mcg_ext_ctl);
|
|
|
|
}
|
2011-03-02 10:56:16 +03:00
|
|
|
for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_MC0_CTL + i, env->mce_banks[i]);
|
2010-10-11 22:31:22 +04:00
|
|
|
}
|
|
|
|
}
|
2009-10-22 16:26:56 +04:00
|
|
|
|
2015-12-16 22:06:42 +03:00
|
|
|
ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, cpu->kvm_msr_buf);
|
2016-03-30 23:55:29 +03:00
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
2008-11-05 19:29:27 +03:00
|
|
|
|
2015-12-16 22:06:44 +03:00
|
|
|
assert(ret == cpu->kvm_msr_buf->nmsrs);
|
2016-03-30 23:55:29 +03:00
|
|
|
return 0;
|
2008-11-05 19:29:27 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-10-31 09:06:49 +04:00
|
|
|
static int kvm_get_fpu(X86CPU *cpu)
|
2008-11-05 19:29:27 +03:00
|
|
|
{
|
2012-10-31 09:06:49 +04:00
|
|
|
CPUX86State *env = &cpu->env;
|
2008-11-05 19:29:27 +03:00
|
|
|
struct kvm_fpu fpu;
|
|
|
|
int i, ret;
|
|
|
|
|
2012-10-31 09:06:49 +04:00
|
|
|
ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_FPU, &fpu);
|
2010-12-27 18:19:29 +03:00
|
|
|
if (ret < 0) {
|
2008-11-05 19:29:27 +03:00
|
|
|
return ret;
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2008-11-05 19:29:27 +03:00
|
|
|
|
|
|
|
env->fpstt = (fpu.fsw >> 11) & 7;
|
|
|
|
env->fpus = fpu.fsw;
|
|
|
|
env->fpuc = fpu.fcw;
|
2011-06-15 17:17:26 +04:00
|
|
|
env->fpop = fpu.last_opcode;
|
|
|
|
env->fpip = fpu.last_ip;
|
|
|
|
env->fpdp = fpu.last_dp;
|
2010-12-27 18:19:29 +03:00
|
|
|
for (i = 0; i < 8; ++i) {
|
|
|
|
env->fptags[i] = !((fpu.ftwx >> i) & 1);
|
|
|
|
}
|
2008-11-05 19:29:27 +03:00
|
|
|
memcpy(env->fpregs, fpu.fpr, sizeof env->fpregs);
|
2014-10-24 11:44:38 +04:00
|
|
|
for (i = 0; i < CPU_NB_REGS; i++) {
|
2015-11-26 22:14:32 +03:00
|
|
|
env->xmm_regs[i].ZMM_Q(0) = ldq_p(&fpu.xmm[i][0]);
|
|
|
|
env->xmm_regs[i].ZMM_Q(1) = ldq_p(&fpu.xmm[i][8]);
|
2014-10-24 11:44:38 +04:00
|
|
|
}
|
2008-11-05 19:29:27 +03:00
|
|
|
env->mxcsr = fpu.mxcsr;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-10-31 09:06:49 +04:00
|
|
|
static int kvm_get_xsave(X86CPU *cpu)
|
2010-06-17 13:53:07 +04:00
|
|
|
{
|
2012-10-31 09:06:49 +04:00
|
|
|
CPUX86State *env = &cpu->env;
|
2015-11-23 15:43:26 +03:00
|
|
|
X86XSaveArea *xsave = env->kvm_xsave_buf;
|
2010-06-17 13:53:07 +04:00
|
|
|
int ret, i;
|
2011-06-15 17:17:26 +04:00
|
|
|
uint16_t cwd, swd, twd;
|
2010-06-17 13:53:07 +04:00
|
|
|
|
2015-10-15 21:30:20 +03:00
|
|
|
if (!has_xsave) {
|
2012-10-31 09:06:49 +04:00
|
|
|
return kvm_get_fpu(cpu);
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2010-06-17 13:53:07 +04:00
|
|
|
|
2012-10-31 09:06:49 +04:00
|
|
|
ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_XSAVE, xsave);
|
2010-10-19 15:00:34 +04:00
|
|
|
if (ret < 0) {
|
2010-06-17 13:53:07 +04:00
|
|
|
return ret;
|
2010-10-19 15:00:34 +04:00
|
|
|
}
|
2010-06-17 13:53:07 +04:00
|
|
|
|
2015-11-23 15:43:26 +03:00
|
|
|
cwd = xsave->legacy.fcw;
|
|
|
|
swd = xsave->legacy.fsw;
|
|
|
|
twd = xsave->legacy.ftw;
|
|
|
|
env->fpop = xsave->legacy.fpop;
|
2010-06-17 13:53:07 +04:00
|
|
|
env->fpstt = (swd >> 11) & 7;
|
|
|
|
env->fpus = swd;
|
|
|
|
env->fpuc = cwd;
|
2010-12-27 18:19:29 +03:00
|
|
|
for (i = 0; i < 8; ++i) {
|
2010-06-17 13:53:07 +04:00
|
|
|
env->fptags[i] = !((twd >> i) & 1);
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2015-11-23 15:43:26 +03:00
|
|
|
env->fpip = xsave->legacy.fpip;
|
|
|
|
env->fpdp = xsave->legacy.fpdp;
|
|
|
|
env->mxcsr = xsave->legacy.mxcsr;
|
|
|
|
memcpy(env->fpregs, &xsave->legacy.fpregs,
|
2010-06-17 13:53:07 +04:00
|
|
|
sizeof env->fpregs);
|
2015-11-23 15:43:26 +03:00
|
|
|
env->xstate_bv = xsave->header.xstate_bv;
|
|
|
|
memcpy(env->bnd_regs, &xsave->bndreg_state.bnd_regs,
|
2013-12-05 04:32:12 +04:00
|
|
|
sizeof env->bnd_regs);
|
2015-11-23 15:43:26 +03:00
|
|
|
env->bndcs_regs = xsave->bndcsr_state.bndcsr;
|
|
|
|
memcpy(env->opmask_regs, &xsave->opmask_state.opmask_regs,
|
2014-10-23 07:02:43 +04:00
|
|
|
sizeof env->opmask_regs);
|
2014-10-24 11:44:38 +04:00
|
|
|
|
2015-11-23 15:43:26 +03:00
|
|
|
for (i = 0; i < CPU_NB_REGS; i++) {
|
|
|
|
uint8_t *xmm = xsave->legacy.xmm_regs[i];
|
|
|
|
uint8_t *ymmh = xsave->avx_state.ymmh[i];
|
|
|
|
uint8_t *zmmh = xsave->zmm_hi256_state.zmm_hi256[i];
|
2015-11-26 22:14:32 +03:00
|
|
|
env->xmm_regs[i].ZMM_Q(0) = ldq_p(xmm);
|
|
|
|
env->xmm_regs[i].ZMM_Q(1) = ldq_p(xmm+8);
|
|
|
|
env->xmm_regs[i].ZMM_Q(2) = ldq_p(ymmh);
|
|
|
|
env->xmm_regs[i].ZMM_Q(3) = ldq_p(ymmh+8);
|
|
|
|
env->xmm_regs[i].ZMM_Q(4) = ldq_p(zmmh);
|
|
|
|
env->xmm_regs[i].ZMM_Q(5) = ldq_p(zmmh+8);
|
|
|
|
env->xmm_regs[i].ZMM_Q(6) = ldq_p(zmmh+16);
|
|
|
|
env->xmm_regs[i].ZMM_Q(7) = ldq_p(zmmh+24);
|
2014-10-24 11:44:38 +04:00
|
|
|
}
|
|
|
|
|
2014-10-23 07:02:43 +04:00
|
|
|
#ifdef TARGET_X86_64
|
2015-11-23 15:43:26 +03:00
|
|
|
memcpy(&env->xmm_regs[16], &xsave->hi16_zmm_state.hi16_zmm,
|
2014-10-24 11:50:21 +04:00
|
|
|
16 * sizeof env->xmm_regs[16]);
|
2015-11-23 15:43:26 +03:00
|
|
|
memcpy(&env->pkru, &xsave->pkru_state, sizeof env->pkru);
|
2014-10-23 07:02:43 +04:00
|
|
|
#endif
|
2010-06-17 13:53:07 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-10-31 09:06:49 +04:00
|
|
|
static int kvm_get_xcrs(X86CPU *cpu)
|
2010-06-17 13:53:07 +04:00
|
|
|
{
|
2012-10-31 09:06:49 +04:00
|
|
|
CPUX86State *env = &cpu->env;
|
2010-06-17 13:53:07 +04:00
|
|
|
int i, ret;
|
|
|
|
struct kvm_xcrs xcrs;
|
|
|
|
|
2015-10-15 21:30:20 +03:00
|
|
|
if (!has_xcrs) {
|
2010-06-17 13:53:07 +04:00
|
|
|
return 0;
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2010-06-17 13:53:07 +04:00
|
|
|
|
2012-10-31 09:06:49 +04:00
|
|
|
ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_XCRS, &xcrs);
|
2010-12-27 18:19:29 +03:00
|
|
|
if (ret < 0) {
|
2010-06-17 13:53:07 +04:00
|
|
|
return ret;
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2010-06-17 13:53:07 +04:00
|
|
|
|
2010-12-27 18:19:29 +03:00
|
|
|
for (i = 0; i < xcrs.nr_xcrs; i++) {
|
2010-06-17 13:53:07 +04:00
|
|
|
/* Only support xcr0 now */
|
2013-10-17 18:47:52 +04:00
|
|
|
if (xcrs.xcrs[i].xcr == 0) {
|
|
|
|
env->xcr0 = xcrs.xcrs[i].value;
|
2010-06-17 13:53:07 +04:00
|
|
|
break;
|
|
|
|
}
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2010-06-17 13:53:07 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-10-31 09:06:49 +04:00
|
|
|
static int kvm_get_sregs(X86CPU *cpu)
|
2008-11-05 19:29:27 +03:00
|
|
|
{
|
2012-10-31 09:06:49 +04:00
|
|
|
CPUX86State *env = &cpu->env;
|
2008-11-05 19:29:27 +03:00
|
|
|
struct kvm_sregs sregs;
|
|
|
|
uint32_t hflags;
|
2009-11-06 21:39:24 +03:00
|
|
|
int bit, i, ret;
|
2008-11-05 19:29:27 +03:00
|
|
|
|
2012-10-31 09:06:49 +04:00
|
|
|
ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs);
|
2010-12-27 18:19:29 +03:00
|
|
|
if (ret < 0) {
|
2008-11-05 19:29:27 +03:00
|
|
|
return ret;
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2008-11-05 19:29:27 +03:00
|
|
|
|
2009-11-06 21:39:24 +03:00
|
|
|
/* There can only be one pending IRQ set in the bitmap at a time, so try
|
|
|
|
to find it and save its number instead (-1 for none). */
|
|
|
|
env->interrupt_injected = -1;
|
|
|
|
for (i = 0; i < ARRAY_SIZE(sregs.interrupt_bitmap); i++) {
|
|
|
|
if (sregs.interrupt_bitmap[i]) {
|
|
|
|
bit = ctz64(sregs.interrupt_bitmap[i]);
|
|
|
|
env->interrupt_injected = i * 64 + bit;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2008-11-05 19:29:27 +03:00
|
|
|
|
|
|
|
get_seg(&env->segs[R_CS], &sregs.cs);
|
|
|
|
get_seg(&env->segs[R_DS], &sregs.ds);
|
|
|
|
get_seg(&env->segs[R_ES], &sregs.es);
|
|
|
|
get_seg(&env->segs[R_FS], &sregs.fs);
|
|
|
|
get_seg(&env->segs[R_GS], &sregs.gs);
|
|
|
|
get_seg(&env->segs[R_SS], &sregs.ss);
|
|
|
|
|
|
|
|
get_seg(&env->tr, &sregs.tr);
|
|
|
|
get_seg(&env->ldt, &sregs.ldt);
|
|
|
|
|
|
|
|
env->idt.limit = sregs.idt.limit;
|
|
|
|
env->idt.base = sregs.idt.base;
|
|
|
|
env->gdt.limit = sregs.gdt.limit;
|
|
|
|
env->gdt.base = sregs.gdt.base;
|
|
|
|
|
|
|
|
env->cr[0] = sregs.cr0;
|
|
|
|
env->cr[2] = sregs.cr2;
|
|
|
|
env->cr[3] = sregs.cr3;
|
|
|
|
env->cr[4] = sregs.cr4;
|
|
|
|
|
|
|
|
env->efer = sregs.efer;
|
2011-10-26 15:09:45 +04:00
|
|
|
|
|
|
|
/* changes to apic base and cr8/tpr are read back via kvm_arch_post_run */
|
2008-11-05 19:29:27 +03:00
|
|
|
|
2010-12-27 18:19:29 +03:00
|
|
|
#define HFLAG_COPY_MASK \
|
|
|
|
~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
|
|
|
|
HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
|
|
|
|
HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
|
|
|
|
HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
|
2008-11-05 19:29:27 +03:00
|
|
|
|
2015-07-02 16:53:40 +03:00
|
|
|
hflags = env->hflags & HFLAG_COPY_MASK;
|
|
|
|
hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
|
2008-11-05 19:29:27 +03:00
|
|
|
hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
|
|
|
|
hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
|
2010-12-27 18:19:29 +03:00
|
|
|
(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
|
2008-11-05 19:29:27 +03:00
|
|
|
hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
|
2015-07-02 16:53:40 +03:00
|
|
|
|
|
|
|
if (env->cr[4] & CR4_OSFXSR_MASK) {
|
|
|
|
hflags |= HF_OSFXSR_MASK;
|
|
|
|
}
|
2008-11-05 19:29:27 +03:00
|
|
|
|
|
|
|
if (env->efer & MSR_EFER_LMA) {
|
|
|
|
hflags |= HF_LMA_MASK;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
|
|
|
|
hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
|
|
|
|
} else {
|
|
|
|
hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >>
|
2010-12-27 18:19:29 +03:00
|
|
|
(DESC_B_SHIFT - HF_CS32_SHIFT);
|
2008-11-05 19:29:27 +03:00
|
|
|
hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >>
|
2010-12-27 18:19:29 +03:00
|
|
|
(DESC_B_SHIFT - HF_SS32_SHIFT);
|
|
|
|
if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) ||
|
|
|
|
!(hflags & HF_CS32_MASK)) {
|
|
|
|
hflags |= HF_ADDSEG_MASK;
|
|
|
|
} else {
|
|
|
|
hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base |
|
|
|
|
env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT;
|
|
|
|
}
|
2008-11-05 19:29:27 +03:00
|
|
|
}
|
2015-07-02 16:53:40 +03:00
|
|
|
env->hflags = hflags;
|
2008-11-05 19:29:27 +03:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-10-31 09:06:49 +04:00
|
|
|
static int kvm_get_msrs(X86CPU *cpu)
|
2008-11-05 19:29:27 +03:00
|
|
|
{
|
2012-10-31 09:06:49 +04:00
|
|
|
CPUX86State *env = &cpu->env;
|
2015-12-16 22:06:42 +03:00
|
|
|
struct kvm_msr_entry *msrs = cpu->kvm_msr_buf->entries;
|
2015-12-16 22:06:44 +03:00
|
|
|
int ret, i;
|
2016-07-08 18:01:38 +03:00
|
|
|
uint64_t mtrr_top_bits;
|
2008-11-05 19:29:27 +03:00
|
|
|
|
2015-12-16 22:06:42 +03:00
|
|
|
kvm_msr_buf_reset(cpu);
|
|
|
|
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_CS, 0);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_ESP, 0);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_EIP, 0);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_PAT, 0);
|
2011-01-21 23:48:13 +03:00
|
|
|
if (has_msr_star) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_STAR, 0);
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2011-01-21 23:48:13 +03:00
|
|
|
if (has_msr_hsave_pa) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_VM_HSAVE_PA, 0);
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2015-09-23 09:27:33 +03:00
|
|
|
if (has_msr_tsc_aux) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_TSC_AUX, 0);
|
2015-09-23 09:27:33 +03:00
|
|
|
}
|
2012-11-27 09:32:18 +04:00
|
|
|
if (has_msr_tsc_adjust) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_TSC_ADJUST, 0);
|
2012-11-27 09:32:18 +04:00
|
|
|
}
|
2011-10-05 23:52:32 +04:00
|
|
|
if (has_msr_tsc_deadline) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_TSCDEADLINE, 0);
|
2011-10-05 23:52:32 +04:00
|
|
|
}
|
2011-10-04 18:26:35 +04:00
|
|
|
if (has_msr_misc_enable) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_MISC_ENABLE, 0);
|
2011-10-04 18:26:35 +04:00
|
|
|
}
|
2015-06-18 19:28:42 +03:00
|
|
|
if (has_msr_smbase) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_SMBASE, 0);
|
2015-06-18 19:28:42 +03:00
|
|
|
}
|
2013-08-19 05:33:30 +04:00
|
|
|
if (has_msr_feature_control) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_FEATURE_CONTROL, 0);
|
2013-08-19 05:33:30 +04:00
|
|
|
}
|
2013-12-05 04:32:12 +04:00
|
|
|
if (has_msr_bndcfgs) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_BNDCFGS, 0);
|
2013-12-05 04:32:12 +04:00
|
|
|
}
|
2014-12-03 05:36:23 +03:00
|
|
|
if (has_msr_xss) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_XSS, 0);
|
2014-12-03 05:36:23 +03:00
|
|
|
}
|
|
|
|
|
2011-02-03 22:19:53 +03:00
|
|
|
|
|
|
|
if (!env->tsc_valid) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_IA32_TSC, 0);
|
2011-07-29 22:36:43 +04:00
|
|
|
env->tsc_valid = !runstate_is_running();
|
2011-02-03 22:19:53 +03:00
|
|
|
}
|
|
|
|
|
2008-11-05 19:29:27 +03:00
|
|
|
#ifdef TARGET_X86_64
|
2010-10-21 19:35:04 +04:00
|
|
|
if (lm_capable_kernel) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_CSTAR, 0);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_KERNELGSBASE, 0);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_FMASK, 0);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_LSTAR, 0);
|
2010-10-21 19:35:04 +04:00
|
|
|
}
|
2008-11-05 19:29:27 +03:00
|
|
|
#endif
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_KVM_SYSTEM_TIME, 0);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_KVM_WALL_CLOCK, 0);
|
2016-09-27 01:03:24 +03:00
|
|
|
if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF)) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_EN, 0);
|
2011-01-21 23:48:22 +03:00
|
|
|
}
|
2016-09-27 01:03:24 +03:00
|
|
|
if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_PV_EOI)) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_KVM_PV_EOI_EN, 0);
|
2012-08-28 21:43:56 +04:00
|
|
|
}
|
2016-09-27 01:03:24 +03:00
|
|
|
if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_STEAL_TIME)) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, 0);
|
2013-02-20 06:27:20 +04:00
|
|
|
}
|
2013-07-25 19:05:22 +04:00
|
|
|
if (has_msr_architectural_pmu) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS, 0);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL, 0);
|
2013-07-25 19:05:22 +04:00
|
|
|
for (i = 0; i < MAX_FIXED_COUNTERS; i++) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR0 + i, 0);
|
2013-07-25 19:05:22 +04:00
|
|
|
}
|
|
|
|
for (i = 0; i < num_architectural_pmu_counters; i++) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_P6_PERFCTR0 + i, 0);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_P6_EVNTSEL0 + i, 0);
|
2013-07-25 19:05:22 +04:00
|
|
|
}
|
|
|
|
}
|
2009-10-22 16:26:56 +04:00
|
|
|
|
2010-10-11 22:31:22 +04:00
|
|
|
if (env->mcg_cap) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_MCG_STATUS, 0);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_MCG_CTL, 0);
|
2016-06-22 09:56:21 +03:00
|
|
|
if (has_msr_mcg_ext_ctl) {
|
|
|
|
kvm_msr_entry_add(cpu, MSR_MCG_EXT_CTL, 0);
|
|
|
|
}
|
2010-12-27 18:19:29 +03:00
|
|
|
for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_MC0_CTL + i, 0);
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2010-10-11 22:31:22 +04:00
|
|
|
}
|
|
|
|
|
2014-01-23 17:40:47 +04:00
|
|
|
if (has_msr_hv_hypercall) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, HV_X64_MSR_HYPERCALL, 0);
|
|
|
|
kvm_msr_entry_add(cpu, HV_X64_MSR_GUEST_OS_ID, 0);
|
2014-01-23 17:40:47 +04:00
|
|
|
}
|
2016-07-14 21:55:37 +03:00
|
|
|
if (cpu->hyperv_vapic) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, HV_X64_MSR_APIC_ASSIST_PAGE, 0);
|
2014-01-23 17:40:48 +04:00
|
|
|
}
|
2016-07-15 20:05:36 +03:00
|
|
|
if (cpu->hyperv_time) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, HV_X64_MSR_REFERENCE_TSC, 0);
|
2014-01-23 17:40:49 +04:00
|
|
|
}
|
2015-09-09 15:41:30 +03:00
|
|
|
if (has_msr_hv_crash) {
|
|
|
|
int j;
|
|
|
|
|
|
|
|
for (j = 0; j < HV_X64_MSR_CRASH_PARAMS; j++) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_P0 + j, 0);
|
2015-09-09 15:41:30 +03:00
|
|
|
}
|
|
|
|
}
|
2015-09-16 12:59:44 +03:00
|
|
|
if (has_msr_hv_runtime) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, HV_X64_MSR_VP_RUNTIME, 0);
|
2015-09-16 12:59:44 +03:00
|
|
|
}
|
2015-11-11 13:18:38 +03:00
|
|
|
if (cpu->hyperv_synic) {
|
|
|
|
uint32_t msr;
|
|
|
|
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, HV_X64_MSR_SCONTROL, 0);
|
|
|
|
kvm_msr_entry_add(cpu, HV_X64_MSR_SVERSION, 0);
|
|
|
|
kvm_msr_entry_add(cpu, HV_X64_MSR_SIEFP, 0);
|
|
|
|
kvm_msr_entry_add(cpu, HV_X64_MSR_SIMP, 0);
|
2015-11-11 13:18:38 +03:00
|
|
|
for (msr = HV_X64_MSR_SINT0; msr <= HV_X64_MSR_SINT15; msr++) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, msr, 0);
|
2015-11-11 13:18:38 +03:00
|
|
|
}
|
|
|
|
}
|
2015-11-25 18:21:25 +03:00
|
|
|
if (has_msr_hv_stimer) {
|
|
|
|
uint32_t msr;
|
|
|
|
|
|
|
|
for (msr = HV_X64_MSR_STIMER0_CONFIG; msr <= HV_X64_MSR_STIMER3_COUNT;
|
|
|
|
msr++) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, msr, 0);
|
2015-11-25 18:21:25 +03:00
|
|
|
}
|
|
|
|
}
|
2016-09-27 01:03:29 +03:00
|
|
|
if (env->features[FEAT_1_EDX] & CPUID_MTRR) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_MTRRdefType, 0);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_MTRRfix64K_00000, 0);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_MTRRfix16K_80000, 0);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_MTRRfix16K_A0000, 0);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C0000, 0);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C8000, 0);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D0000, 0);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D8000, 0);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E0000, 0);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E8000, 0);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F0000, 0);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F8000, 0);
|
2014-08-15 01:39:33 +04:00
|
|
|
for (i = 0; i < MSR_MTRRcap_VCNT; i++) {
|
2015-12-16 22:06:44 +03:00
|
|
|
kvm_msr_entry_add(cpu, MSR_MTRRphysBase(i), 0);
|
|
|
|
kvm_msr_entry_add(cpu, MSR_MTRRphysMask(i), 0);
|
2014-08-15 01:39:33 +04:00
|
|
|
}
|
|
|
|
}
|
2014-01-23 17:40:48 +04:00
|
|
|
|
2015-12-16 22:06:42 +03:00
|
|
|
ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, cpu->kvm_msr_buf);
|
2010-12-27 18:19:29 +03:00
|
|
|
if (ret < 0) {
|
2008-11-05 19:29:27 +03:00
|
|
|
return ret;
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2008-11-05 19:29:27 +03:00
|
|
|
|
2015-12-16 22:06:44 +03:00
|
|
|
assert(ret == cpu->kvm_msr_buf->nmsrs);
|
2016-07-08 18:01:38 +03:00
|
|
|
/*
|
|
|
|
* MTRR masks: Each mask consists of 5 parts
|
|
|
|
* a 10..0: must be zero
|
|
|
|
* b 11 : valid bit
|
|
|
|
* c n-1.12: actual mask bits
|
|
|
|
* d 51..n: reserved must be zero
|
|
|
|
* e 63.52: reserved must be zero
|
|
|
|
*
|
|
|
|
* 'n' is the number of physical bits supported by the CPU and is
|
|
|
|
* apparently always <= 52. We know our 'n' but don't know what
|
|
|
|
* the destinations 'n' is; it might be smaller, in which case
|
|
|
|
* it masks (c) on loading. It might be larger, in which case
|
|
|
|
* we fill 'd' so that d..c is consistent irrespetive of the 'n'
|
|
|
|
* we're migrating to.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (cpu->fill_mtrr_mask) {
|
|
|
|
QEMU_BUILD_BUG_ON(TARGET_PHYS_ADDR_SPACE_BITS > 52);
|
|
|
|
assert(cpu->phys_bits <= TARGET_PHYS_ADDR_SPACE_BITS);
|
|
|
|
mtrr_top_bits = MAKE_64BIT_MASK(cpu->phys_bits, 52 - cpu->phys_bits);
|
|
|
|
} else {
|
|
|
|
mtrr_top_bits = 0;
|
|
|
|
}
|
|
|
|
|
2008-11-05 19:29:27 +03:00
|
|
|
for (i = 0; i < ret; i++) {
|
2013-07-25 19:05:22 +04:00
|
|
|
uint32_t index = msrs[i].index;
|
|
|
|
switch (index) {
|
2008-11-05 19:29:27 +03:00
|
|
|
case MSR_IA32_SYSENTER_CS:
|
|
|
|
env->sysenter_cs = msrs[i].data;
|
|
|
|
break;
|
|
|
|
case MSR_IA32_SYSENTER_ESP:
|
|
|
|
env->sysenter_esp = msrs[i].data;
|
|
|
|
break;
|
|
|
|
case MSR_IA32_SYSENTER_EIP:
|
|
|
|
env->sysenter_eip = msrs[i].data;
|
|
|
|
break;
|
2011-03-15 14:26:23 +03:00
|
|
|
case MSR_PAT:
|
|
|
|
env->pat = msrs[i].data;
|
|
|
|
break;
|
2008-11-05 19:29:27 +03:00
|
|
|
case MSR_STAR:
|
|
|
|
env->star = msrs[i].data;
|
|
|
|
break;
|
|
|
|
#ifdef TARGET_X86_64
|
|
|
|
case MSR_CSTAR:
|
|
|
|
env->cstar = msrs[i].data;
|
|
|
|
break;
|
|
|
|
case MSR_KERNELGSBASE:
|
|
|
|
env->kernelgsbase = msrs[i].data;
|
|
|
|
break;
|
|
|
|
case MSR_FMASK:
|
|
|
|
env->fmask = msrs[i].data;
|
|
|
|
break;
|
|
|
|
case MSR_LSTAR:
|
|
|
|
env->lstar = msrs[i].data;
|
|
|
|
break;
|
|
|
|
#endif
|
|
|
|
case MSR_IA32_TSC:
|
|
|
|
env->tsc = msrs[i].data;
|
|
|
|
break;
|
2015-09-23 09:27:33 +03:00
|
|
|
case MSR_TSC_AUX:
|
|
|
|
env->tsc_aux = msrs[i].data;
|
|
|
|
break;
|
2012-11-27 09:32:18 +04:00
|
|
|
case MSR_TSC_ADJUST:
|
|
|
|
env->tsc_adjust = msrs[i].data;
|
|
|
|
break;
|
2011-10-05 23:52:32 +04:00
|
|
|
case MSR_IA32_TSCDEADLINE:
|
|
|
|
env->tsc_deadline = msrs[i].data;
|
|
|
|
break;
|
2010-10-21 19:35:01 +04:00
|
|
|
case MSR_VM_HSAVE_PA:
|
|
|
|
env->vm_hsave = msrs[i].data;
|
|
|
|
break;
|
2009-10-22 16:26:56 +04:00
|
|
|
case MSR_KVM_SYSTEM_TIME:
|
|
|
|
env->system_time_msr = msrs[i].data;
|
|
|
|
break;
|
|
|
|
case MSR_KVM_WALL_CLOCK:
|
|
|
|
env->wall_clock_msr = msrs[i].data;
|
|
|
|
break;
|
2010-10-11 22:31:22 +04:00
|
|
|
case MSR_MCG_STATUS:
|
|
|
|
env->mcg_status = msrs[i].data;
|
|
|
|
break;
|
|
|
|
case MSR_MCG_CTL:
|
|
|
|
env->mcg_ctl = msrs[i].data;
|
|
|
|
break;
|
2016-06-22 09:56:21 +03:00
|
|
|
case MSR_MCG_EXT_CTL:
|
|
|
|
env->mcg_ext_ctl = msrs[i].data;
|
|
|
|
break;
|
2011-10-04 18:26:35 +04:00
|
|
|
case MSR_IA32_MISC_ENABLE:
|
|
|
|
env->msr_ia32_misc_enable = msrs[i].data;
|
|
|
|
break;
|
2015-06-18 19:28:42 +03:00
|
|
|
case MSR_IA32_SMBASE:
|
|
|
|
env->smbase = msrs[i].data;
|
|
|
|
break;
|
2013-07-07 19:13:37 +04:00
|
|
|
case MSR_IA32_FEATURE_CONTROL:
|
|
|
|
env->msr_ia32_feature_control = msrs[i].data;
|
2013-08-19 05:33:30 +04:00
|
|
|
break;
|
2013-12-05 04:32:12 +04:00
|
|
|
case MSR_IA32_BNDCFGS:
|
|
|
|
env->msr_bndcfgs = msrs[i].data;
|
|
|
|
break;
|
2014-12-03 05:36:23 +03:00
|
|
|
case MSR_IA32_XSS:
|
|
|
|
env->xss = msrs[i].data;
|
|
|
|
break;
|
2010-10-11 22:31:22 +04:00
|
|
|
default:
|
|
|
|
if (msrs[i].index >= MSR_MC0_CTL &&
|
|
|
|
msrs[i].index < MSR_MC0_CTL + (env->mcg_cap & 0xff) * 4) {
|
|
|
|
env->mce_banks[msrs[i].index - MSR_MC0_CTL] = msrs[i].data;
|
|
|
|
}
|
2010-10-21 12:23:14 +04:00
|
|
|
break;
|
2010-10-24 16:27:55 +04:00
|
|
|
case MSR_KVM_ASYNC_PF_EN:
|
|
|
|
env->async_pf_en_msr = msrs[i].data;
|
|
|
|
break;
|
2012-08-28 21:43:56 +04:00
|
|
|
case MSR_KVM_PV_EOI_EN:
|
|
|
|
env->pv_eoi_en_msr = msrs[i].data;
|
|
|
|
break;
|
2013-02-20 06:27:20 +04:00
|
|
|
case MSR_KVM_STEAL_TIME:
|
|
|
|
env->steal_time_msr = msrs[i].data;
|
|
|
|
break;
|
2013-07-25 19:05:22 +04:00
|
|
|
case MSR_CORE_PERF_FIXED_CTR_CTRL:
|
|
|
|
env->msr_fixed_ctr_ctrl = msrs[i].data;
|
|
|
|
break;
|
|
|
|
case MSR_CORE_PERF_GLOBAL_CTRL:
|
|
|
|
env->msr_global_ctrl = msrs[i].data;
|
|
|
|
break;
|
|
|
|
case MSR_CORE_PERF_GLOBAL_STATUS:
|
|
|
|
env->msr_global_status = msrs[i].data;
|
|
|
|
break;
|
|
|
|
case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
|
|
|
|
env->msr_global_ovf_ctrl = msrs[i].data;
|
|
|
|
break;
|
|
|
|
case MSR_CORE_PERF_FIXED_CTR0 ... MSR_CORE_PERF_FIXED_CTR0 + MAX_FIXED_COUNTERS - 1:
|
|
|
|
env->msr_fixed_counters[index - MSR_CORE_PERF_FIXED_CTR0] = msrs[i].data;
|
|
|
|
break;
|
|
|
|
case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR0 + MAX_GP_COUNTERS - 1:
|
|
|
|
env->msr_gp_counters[index - MSR_P6_PERFCTR0] = msrs[i].data;
|
|
|
|
break;
|
|
|
|
case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL0 + MAX_GP_COUNTERS - 1:
|
|
|
|
env->msr_gp_evtsel[index - MSR_P6_EVNTSEL0] = msrs[i].data;
|
|
|
|
break;
|
2014-01-23 17:40:47 +04:00
|
|
|
case HV_X64_MSR_HYPERCALL:
|
|
|
|
env->msr_hv_hypercall = msrs[i].data;
|
|
|
|
break;
|
|
|
|
case HV_X64_MSR_GUEST_OS_ID:
|
|
|
|
env->msr_hv_guest_os_id = msrs[i].data;
|
|
|
|
break;
|
2014-01-23 17:40:48 +04:00
|
|
|
case HV_X64_MSR_APIC_ASSIST_PAGE:
|
|
|
|
env->msr_hv_vapic = msrs[i].data;
|
|
|
|
break;
|
2014-01-23 17:40:49 +04:00
|
|
|
case HV_X64_MSR_REFERENCE_TSC:
|
|
|
|
env->msr_hv_tsc = msrs[i].data;
|
|
|
|
break;
|
2015-09-09 15:41:30 +03:00
|
|
|
case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
|
|
|
|
env->msr_hv_crash_params[index - HV_X64_MSR_CRASH_P0] = msrs[i].data;
|
|
|
|
break;
|
2015-09-16 12:59:44 +03:00
|
|
|
case HV_X64_MSR_VP_RUNTIME:
|
|
|
|
env->msr_hv_runtime = msrs[i].data;
|
|
|
|
break;
|
2015-11-11 13:18:38 +03:00
|
|
|
case HV_X64_MSR_SCONTROL:
|
|
|
|
env->msr_hv_synic_control = msrs[i].data;
|
|
|
|
break;
|
|
|
|
case HV_X64_MSR_SVERSION:
|
|
|
|
env->msr_hv_synic_version = msrs[i].data;
|
|
|
|
break;
|
|
|
|
case HV_X64_MSR_SIEFP:
|
|
|
|
env->msr_hv_synic_evt_page = msrs[i].data;
|
|
|
|
break;
|
|
|
|
case HV_X64_MSR_SIMP:
|
|
|
|
env->msr_hv_synic_msg_page = msrs[i].data;
|
|
|
|
break;
|
|
|
|
case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
|
|
|
|
env->msr_hv_synic_sint[index - HV_X64_MSR_SINT0] = msrs[i].data;
|
2015-11-25 18:21:25 +03:00
|
|
|
break;
|
|
|
|
case HV_X64_MSR_STIMER0_CONFIG:
|
|
|
|
case HV_X64_MSR_STIMER1_CONFIG:
|
|
|
|
case HV_X64_MSR_STIMER2_CONFIG:
|
|
|
|
case HV_X64_MSR_STIMER3_CONFIG:
|
|
|
|
env->msr_hv_stimer_config[(index - HV_X64_MSR_STIMER0_CONFIG)/2] =
|
|
|
|
msrs[i].data;
|
|
|
|
break;
|
|
|
|
case HV_X64_MSR_STIMER0_COUNT:
|
|
|
|
case HV_X64_MSR_STIMER1_COUNT:
|
|
|
|
case HV_X64_MSR_STIMER2_COUNT:
|
|
|
|
case HV_X64_MSR_STIMER3_COUNT:
|
|
|
|
env->msr_hv_stimer_count[(index - HV_X64_MSR_STIMER0_COUNT)/2] =
|
|
|
|
msrs[i].data;
|
2015-11-11 13:18:38 +03:00
|
|
|
break;
|
2014-08-15 01:39:33 +04:00
|
|
|
case MSR_MTRRdefType:
|
|
|
|
env->mtrr_deftype = msrs[i].data;
|
|
|
|
break;
|
|
|
|
case MSR_MTRRfix64K_00000:
|
|
|
|
env->mtrr_fixed[0] = msrs[i].data;
|
|
|
|
break;
|
|
|
|
case MSR_MTRRfix16K_80000:
|
|
|
|
env->mtrr_fixed[1] = msrs[i].data;
|
|
|
|
break;
|
|
|
|
case MSR_MTRRfix16K_A0000:
|
|
|
|
env->mtrr_fixed[2] = msrs[i].data;
|
|
|
|
break;
|
|
|
|
case MSR_MTRRfix4K_C0000:
|
|
|
|
env->mtrr_fixed[3] = msrs[i].data;
|
|
|
|
break;
|
|
|
|
case MSR_MTRRfix4K_C8000:
|
|
|
|
env->mtrr_fixed[4] = msrs[i].data;
|
|
|
|
break;
|
|
|
|
case MSR_MTRRfix4K_D0000:
|
|
|
|
env->mtrr_fixed[5] = msrs[i].data;
|
|
|
|
break;
|
|
|
|
case MSR_MTRRfix4K_D8000:
|
|
|
|
env->mtrr_fixed[6] = msrs[i].data;
|
|
|
|
break;
|
|
|
|
case MSR_MTRRfix4K_E0000:
|
|
|
|
env->mtrr_fixed[7] = msrs[i].data;
|
|
|
|
break;
|
|
|
|
case MSR_MTRRfix4K_E8000:
|
|
|
|
env->mtrr_fixed[8] = msrs[i].data;
|
|
|
|
break;
|
|
|
|
case MSR_MTRRfix4K_F0000:
|
|
|
|
env->mtrr_fixed[9] = msrs[i].data;
|
|
|
|
break;
|
|
|
|
case MSR_MTRRfix4K_F8000:
|
|
|
|
env->mtrr_fixed[10] = msrs[i].data;
|
|
|
|
break;
|
|
|
|
case MSR_MTRRphysBase(0) ... MSR_MTRRphysMask(MSR_MTRRcap_VCNT - 1):
|
|
|
|
if (index & 1) {
|
2016-07-08 18:01:38 +03:00
|
|
|
env->mtrr_var[MSR_MTRRphysIndex(index)].mask = msrs[i].data |
|
|
|
|
mtrr_top_bits;
|
2014-08-15 01:39:33 +04:00
|
|
|
} else {
|
|
|
|
env->mtrr_var[MSR_MTRRphysIndex(index)].base = msrs[i].data;
|
|
|
|
}
|
|
|
|
break;
|
2008-11-05 19:29:27 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-10-31 09:06:49 +04:00
|
|
|
static int kvm_put_mp_state(X86CPU *cpu)
|
2009-11-10 00:05:37 +03:00
|
|
|
{
|
2012-10-31 09:06:49 +04:00
|
|
|
struct kvm_mp_state mp_state = { .mp_state = cpu->env.mp_state };
|
2009-11-10 00:05:37 +03:00
|
|
|
|
2012-10-31 09:06:49 +04:00
|
|
|
return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state);
|
2009-11-10 00:05:37 +03:00
|
|
|
}
|
|
|
|
|
2012-05-03 18:56:46 +04:00
|
|
|
static int kvm_get_mp_state(X86CPU *cpu)
|
2009-11-10 00:05:37 +03:00
|
|
|
{
|
2013-01-17 21:51:17 +04:00
|
|
|
CPUState *cs = CPU(cpu);
|
2012-05-03 18:56:46 +04:00
|
|
|
CPUX86State *env = &cpu->env;
|
2009-11-10 00:05:37 +03:00
|
|
|
struct kvm_mp_state mp_state;
|
|
|
|
int ret;
|
|
|
|
|
2013-01-17 21:51:17 +04:00
|
|
|
ret = kvm_vcpu_ioctl(cs, KVM_GET_MP_STATE, &mp_state);
|
2009-11-10 00:05:37 +03:00
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
env->mp_state = mp_state.mp_state;
|
2011-01-21 23:48:10 +03:00
|
|
|
if (kvm_irqchip_in_kernel()) {
|
2013-01-17 21:51:17 +04:00
|
|
|
cs->halted = (mp_state.mp_state == KVM_MP_STATE_HALTED);
|
2011-01-21 23:48:10 +03:00
|
|
|
}
|
2009-11-10 00:05:37 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-10-31 09:06:49 +04:00
|
|
|
static int kvm_get_apic(X86CPU *cpu)
|
2011-10-16 15:23:26 +04:00
|
|
|
{
|
2013-12-23 13:04:02 +04:00
|
|
|
DeviceState *apic = cpu->apic_state;
|
2011-10-16 15:23:26 +04:00
|
|
|
struct kvm_lapic_state kapic;
|
|
|
|
int ret;
|
|
|
|
|
2012-01-31 22:17:52 +04:00
|
|
|
if (apic && kvm_irqchip_in_kernel()) {
|
2012-10-31 09:06:49 +04:00
|
|
|
ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_LAPIC, &kapic);
|
2011-10-16 15:23:26 +04:00
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
kvm_get_apic_state(apic, &kapic);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-10-31 09:06:49 +04:00
|
|
|
static int kvm_put_vcpu_events(X86CPU *cpu, int level)
|
2009-11-25 02:33:03 +03:00
|
|
|
{
|
2015-06-18 19:28:42 +03:00
|
|
|
CPUState *cs = CPU(cpu);
|
2012-10-31 09:06:49 +04:00
|
|
|
CPUX86State *env = &cpu->env;
|
2014-10-30 11:33:43 +03:00
|
|
|
struct kvm_vcpu_events events = {};
|
2009-11-25 02:33:03 +03:00
|
|
|
|
|
|
|
if (!kvm_has_vcpu_events()) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-12-14 14:26:17 +03:00
|
|
|
events.exception.injected = (env->exception_injected >= 0);
|
|
|
|
events.exception.nr = env->exception_injected;
|
2009-11-25 02:33:03 +03:00
|
|
|
events.exception.has_error_code = env->has_error_code;
|
|
|
|
events.exception.error_code = env->error_code;
|
2012-02-29 19:54:29 +04:00
|
|
|
events.exception.pad = 0;
|
2009-11-25 02:33:03 +03:00
|
|
|
|
|
|
|
events.interrupt.injected = (env->interrupt_injected >= 0);
|
|
|
|
events.interrupt.nr = env->interrupt_injected;
|
|
|
|
events.interrupt.soft = env->soft_interrupt;
|
|
|
|
|
|
|
|
events.nmi.injected = env->nmi_injected;
|
|
|
|
events.nmi.pending = env->nmi_pending;
|
|
|
|
events.nmi.masked = !!(env->hflags2 & HF2_NMI_MASK);
|
2012-02-29 19:54:29 +04:00
|
|
|
events.nmi.pad = 0;
|
2009-11-25 02:33:03 +03:00
|
|
|
|
|
|
|
events.sipi_vector = env->sipi_vector;
|
2016-09-22 10:56:28 +03:00
|
|
|
events.flags = 0;
|
2009-11-25 02:33:03 +03:00
|
|
|
|
2015-06-18 19:28:42 +03:00
|
|
|
if (has_msr_smbase) {
|
|
|
|
events.smi.smm = !!(env->hflags & HF_SMM_MASK);
|
|
|
|
events.smi.smm_inside_nmi = !!(env->hflags2 & HF2_SMM_INSIDE_NMI_MASK);
|
|
|
|
if (kvm_irqchip_in_kernel()) {
|
|
|
|
/* As soon as these are moved to the kernel, remove them
|
|
|
|
* from cs->interrupt_request.
|
|
|
|
*/
|
|
|
|
events.smi.pending = cs->interrupt_request & CPU_INTERRUPT_SMI;
|
|
|
|
events.smi.latched_init = cs->interrupt_request & CPU_INTERRUPT_INIT;
|
|
|
|
cs->interrupt_request &= ~(CPU_INTERRUPT_INIT | CPU_INTERRUPT_SMI);
|
|
|
|
} else {
|
|
|
|
/* Keep these in cs->interrupt_request. */
|
|
|
|
events.smi.pending = 0;
|
|
|
|
events.smi.latched_init = 0;
|
|
|
|
}
|
|
|
|
events.flags |= KVM_VCPUEVENT_VALID_SMM;
|
|
|
|
}
|
|
|
|
|
2010-03-01 21:10:31 +03:00
|
|
|
if (level >= KVM_PUT_RESET_STATE) {
|
|
|
|
events.flags |=
|
|
|
|
KVM_VCPUEVENT_VALID_NMI_PENDING | KVM_VCPUEVENT_VALID_SIPI_VECTOR;
|
|
|
|
}
|
2010-01-28 11:30:51 +03:00
|
|
|
|
2012-10-31 09:06:49 +04:00
|
|
|
return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_VCPU_EVENTS, &events);
|
2009-11-25 02:33:03 +03:00
|
|
|
}
|
|
|
|
|
2012-10-31 09:06:49 +04:00
|
|
|
static int kvm_get_vcpu_events(X86CPU *cpu)
|
2009-11-25 02:33:03 +03:00
|
|
|
{
|
2012-10-31 09:06:49 +04:00
|
|
|
CPUX86State *env = &cpu->env;
|
2009-11-25 02:33:03 +03:00
|
|
|
struct kvm_vcpu_events events;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!kvm_has_vcpu_events()) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-06-18 19:28:42 +03:00
|
|
|
memset(&events, 0, sizeof(events));
|
2012-10-31 09:06:49 +04:00
|
|
|
ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_VCPU_EVENTS, &events);
|
2009-11-25 02:33:03 +03:00
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
2009-12-14 14:26:17 +03:00
|
|
|
env->exception_injected =
|
2009-11-25 02:33:03 +03:00
|
|
|
events.exception.injected ? events.exception.nr : -1;
|
|
|
|
env->has_error_code = events.exception.has_error_code;
|
|
|
|
env->error_code = events.exception.error_code;
|
|
|
|
|
|
|
|
env->interrupt_injected =
|
|
|
|
events.interrupt.injected ? events.interrupt.nr : -1;
|
|
|
|
env->soft_interrupt = events.interrupt.soft;
|
|
|
|
|
|
|
|
env->nmi_injected = events.nmi.injected;
|
|
|
|
env->nmi_pending = events.nmi.pending;
|
|
|
|
if (events.nmi.masked) {
|
|
|
|
env->hflags2 |= HF2_NMI_MASK;
|
|
|
|
} else {
|
|
|
|
env->hflags2 &= ~HF2_NMI_MASK;
|
|
|
|
}
|
|
|
|
|
2015-06-18 19:28:42 +03:00
|
|
|
if (events.flags & KVM_VCPUEVENT_VALID_SMM) {
|
|
|
|
if (events.smi.smm) {
|
|
|
|
env->hflags |= HF_SMM_MASK;
|
|
|
|
} else {
|
|
|
|
env->hflags &= ~HF_SMM_MASK;
|
|
|
|
}
|
|
|
|
if (events.smi.pending) {
|
|
|
|
cpu_interrupt(CPU(cpu), CPU_INTERRUPT_SMI);
|
|
|
|
} else {
|
|
|
|
cpu_reset_interrupt(CPU(cpu), CPU_INTERRUPT_SMI);
|
|
|
|
}
|
|
|
|
if (events.smi.smm_inside_nmi) {
|
|
|
|
env->hflags2 |= HF2_SMM_INSIDE_NMI_MASK;
|
|
|
|
} else {
|
|
|
|
env->hflags2 &= ~HF2_SMM_INSIDE_NMI_MASK;
|
|
|
|
}
|
|
|
|
if (events.smi.latched_init) {
|
|
|
|
cpu_interrupt(CPU(cpu), CPU_INTERRUPT_INIT);
|
|
|
|
} else {
|
|
|
|
cpu_reset_interrupt(CPU(cpu), CPU_INTERRUPT_INIT);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-11-25 02:33:03 +03:00
|
|
|
env->sipi_vector = events.sipi_vector;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-10-31 09:06:49 +04:00
|
|
|
static int kvm_guest_debug_workarounds(X86CPU *cpu)
|
2010-03-01 21:10:29 +03:00
|
|
|
{
|
2013-06-21 22:20:45 +04:00
|
|
|
CPUState *cs = CPU(cpu);
|
2012-10-31 09:06:49 +04:00
|
|
|
CPUX86State *env = &cpu->env;
|
2010-03-01 21:10:29 +03:00
|
|
|
int ret = 0;
|
|
|
|
unsigned long reinject_trap = 0;
|
|
|
|
|
|
|
|
if (!kvm_has_vcpu_events()) {
|
|
|
|
if (env->exception_injected == 1) {
|
|
|
|
reinject_trap = KVM_GUESTDBG_INJECT_DB;
|
|
|
|
} else if (env->exception_injected == 3) {
|
|
|
|
reinject_trap = KVM_GUESTDBG_INJECT_BP;
|
|
|
|
}
|
|
|
|
env->exception_injected = -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Kernels before KVM_CAP_X86_ROBUST_SINGLESTEP overwrote flags.TF
|
|
|
|
* injected via SET_GUEST_DEBUG while updating GP regs. Work around this
|
|
|
|
* by updating the debug state once again if single-stepping is on.
|
|
|
|
* Another reason to call kvm_update_guest_debug here is a pending debug
|
|
|
|
* trap raise by the guest. On kernels without SET_VCPU_EVENTS we have to
|
|
|
|
* reinject them via SET_GUEST_DEBUG.
|
|
|
|
*/
|
|
|
|
if (reinject_trap ||
|
2013-06-21 22:20:45 +04:00
|
|
|
(!kvm_has_robust_singlestep() && cs->singlestep_enabled)) {
|
2013-07-25 22:50:21 +04:00
|
|
|
ret = kvm_update_guest_debug(cs, reinject_trap);
|
2010-03-01 21:10:29 +03:00
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2012-10-31 09:06:49 +04:00
|
|
|
static int kvm_put_debugregs(X86CPU *cpu)
|
2010-03-12 17:20:49 +03:00
|
|
|
{
|
2012-10-31 09:06:49 +04:00
|
|
|
CPUX86State *env = &cpu->env;
|
2010-03-12 17:20:49 +03:00
|
|
|
struct kvm_debugregs dbgregs;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (!kvm_has_debugregs()) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < 4; i++) {
|
|
|
|
dbgregs.db[i] = env->dr[i];
|
|
|
|
}
|
|
|
|
dbgregs.dr6 = env->dr[6];
|
|
|
|
dbgregs.dr7 = env->dr[7];
|
|
|
|
dbgregs.flags = 0;
|
|
|
|
|
2012-10-31 09:06:49 +04:00
|
|
|
return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_DEBUGREGS, &dbgregs);
|
2010-03-12 17:20:49 +03:00
|
|
|
}
|
|
|
|
|
2012-10-31 09:06:49 +04:00
|
|
|
static int kvm_get_debugregs(X86CPU *cpu)
|
2010-03-12 17:20:49 +03:00
|
|
|
{
|
2012-10-31 09:06:49 +04:00
|
|
|
CPUX86State *env = &cpu->env;
|
2010-03-12 17:20:49 +03:00
|
|
|
struct kvm_debugregs dbgregs;
|
|
|
|
int i, ret;
|
|
|
|
|
|
|
|
if (!kvm_has_debugregs()) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-10-31 09:06:49 +04:00
|
|
|
ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_DEBUGREGS, &dbgregs);
|
2010-03-12 17:20:49 +03:00
|
|
|
if (ret < 0) {
|
2010-12-27 18:19:29 +03:00
|
|
|
return ret;
|
2010-03-12 17:20:49 +03:00
|
|
|
}
|
|
|
|
for (i = 0; i < 4; i++) {
|
|
|
|
env->dr[i] = dbgregs.db[i];
|
|
|
|
}
|
|
|
|
env->dr[4] = env->dr[6] = dbgregs.dr6;
|
|
|
|
env->dr[5] = env->dr[7] = dbgregs.dr7;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-10-31 09:57:49 +04:00
|
|
|
int kvm_arch_put_registers(CPUState *cpu, int level)
|
2008-11-05 19:29:27 +03:00
|
|
|
{
|
2012-10-31 09:57:49 +04:00
|
|
|
X86CPU *x86_cpu = X86_CPU(cpu);
|
2008-11-05 19:29:27 +03:00
|
|
|
int ret;
|
|
|
|
|
2012-05-03 01:38:39 +04:00
|
|
|
assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu));
|
2010-05-04 16:45:26 +04:00
|
|
|
|
2016-03-30 23:55:29 +03:00
|
|
|
if (level >= KVM_PUT_RESET_STATE) {
|
2013-12-17 23:05:13 +04:00
|
|
|
ret = kvm_put_msr_feature_control(x86_cpu);
|
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-11-24 06:33:57 +03:00
|
|
|
if (level == KVM_PUT_FULL_STATE) {
|
|
|
|
/* We don't check for kvm_arch_set_tsc_khz() errors here,
|
|
|
|
* because TSC frequency mismatch shouldn't abort migration,
|
|
|
|
* unless the user explicitly asked for a more strict TSC
|
|
|
|
* setting (e.g. using an explicit "tsc-freq" option).
|
|
|
|
*/
|
|
|
|
kvm_arch_set_tsc_khz(cpu);
|
|
|
|
}
|
|
|
|
|
2012-10-31 09:06:49 +04:00
|
|
|
ret = kvm_getput_regs(x86_cpu, 1);
|
2010-12-27 18:19:29 +03:00
|
|
|
if (ret < 0) {
|
2008-11-05 19:29:27 +03:00
|
|
|
return ret;
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2012-10-31 09:06:49 +04:00
|
|
|
ret = kvm_put_xsave(x86_cpu);
|
2010-12-27 18:19:29 +03:00
|
|
|
if (ret < 0) {
|
2010-06-17 13:53:07 +04:00
|
|
|
return ret;
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2012-10-31 09:06:49 +04:00
|
|
|
ret = kvm_put_xcrs(x86_cpu);
|
2010-12-27 18:19:29 +03:00
|
|
|
if (ret < 0) {
|
2008-11-05 19:29:27 +03:00
|
|
|
return ret;
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2012-10-31 09:06:49 +04:00
|
|
|
ret = kvm_put_sregs(x86_cpu);
|
2010-12-27 18:19:29 +03:00
|
|
|
if (ret < 0) {
|
2008-11-05 19:29:27 +03:00
|
|
|
return ret;
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2011-03-02 10:56:14 +03:00
|
|
|
/* must be before kvm_put_msrs */
|
2012-10-31 09:06:49 +04:00
|
|
|
ret = kvm_inject_mce_oldstyle(x86_cpu);
|
2011-03-02 10:56:14 +03:00
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
2012-10-31 09:06:49 +04:00
|
|
|
ret = kvm_put_msrs(x86_cpu, level);
|
2010-12-27 18:19:29 +03:00
|
|
|
if (ret < 0) {
|
2008-11-05 19:29:27 +03:00
|
|
|
return ret;
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2010-03-01 21:10:31 +03:00
|
|
|
if (level >= KVM_PUT_RESET_STATE) {
|
2012-10-31 09:06:49 +04:00
|
|
|
ret = kvm_put_mp_state(x86_cpu);
|
2010-12-27 18:19:29 +03:00
|
|
|
if (ret < 0) {
|
2011-10-16 15:23:26 +04:00
|
|
|
return ret;
|
|
|
|
}
|
2010-03-01 21:10:31 +03:00
|
|
|
}
|
2013-08-19 21:13:42 +04:00
|
|
|
|
|
|
|
ret = kvm_put_tscdeadline_msr(x86_cpu);
|
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2012-10-31 09:06:49 +04:00
|
|
|
ret = kvm_put_vcpu_events(x86_cpu, level);
|
2010-12-27 18:19:29 +03:00
|
|
|
if (ret < 0) {
|
2009-11-25 02:33:03 +03:00
|
|
|
return ret;
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2012-10-31 09:06:49 +04:00
|
|
|
ret = kvm_put_debugregs(x86_cpu);
|
2010-12-27 18:19:29 +03:00
|
|
|
if (ret < 0) {
|
2010-03-01 21:10:29 +03:00
|
|
|
return ret;
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2010-03-01 21:10:29 +03:00
|
|
|
/* must be last */
|
2012-10-31 09:06:49 +04:00
|
|
|
ret = kvm_guest_debug_workarounds(x86_cpu);
|
2010-12-27 18:19:29 +03:00
|
|
|
if (ret < 0) {
|
2010-03-12 17:20:49 +03:00
|
|
|
return ret;
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2008-11-05 19:29:27 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-10-31 09:57:49 +04:00
|
|
|
int kvm_arch_get_registers(CPUState *cs)
|
2008-11-05 19:29:27 +03:00
|
|
|
{
|
2012-10-31 09:57:49 +04:00
|
|
|
X86CPU *cpu = X86_CPU(cs);
|
2008-11-05 19:29:27 +03:00
|
|
|
int ret;
|
|
|
|
|
2012-10-31 09:57:49 +04:00
|
|
|
assert(cpu_is_stopped(cs) || qemu_cpu_is_self(cs));
|
2010-05-04 16:45:26 +04:00
|
|
|
|
2012-10-31 09:06:49 +04:00
|
|
|
ret = kvm_getput_regs(cpu, 0);
|
2010-12-27 18:19:29 +03:00
|
|
|
if (ret < 0) {
|
2015-07-02 17:57:14 +03:00
|
|
|
goto out;
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2012-10-31 09:06:49 +04:00
|
|
|
ret = kvm_get_xsave(cpu);
|
2010-12-27 18:19:29 +03:00
|
|
|
if (ret < 0) {
|
2015-07-02 17:57:14 +03:00
|
|
|
goto out;
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2012-10-31 09:06:49 +04:00
|
|
|
ret = kvm_get_xcrs(cpu);
|
2010-12-27 18:19:29 +03:00
|
|
|
if (ret < 0) {
|
2015-07-02 17:57:14 +03:00
|
|
|
goto out;
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2012-10-31 09:06:49 +04:00
|
|
|
ret = kvm_get_sregs(cpu);
|
2010-12-27 18:19:29 +03:00
|
|
|
if (ret < 0) {
|
2015-07-02 17:57:14 +03:00
|
|
|
goto out;
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2012-10-31 09:06:49 +04:00
|
|
|
ret = kvm_get_msrs(cpu);
|
2010-12-27 18:19:29 +03:00
|
|
|
if (ret < 0) {
|
2015-07-02 17:57:14 +03:00
|
|
|
goto out;
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2012-05-03 18:56:46 +04:00
|
|
|
ret = kvm_get_mp_state(cpu);
|
2010-12-27 18:19:29 +03:00
|
|
|
if (ret < 0) {
|
2015-07-02 17:57:14 +03:00
|
|
|
goto out;
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2012-10-31 09:06:49 +04:00
|
|
|
ret = kvm_get_apic(cpu);
|
2011-10-16 15:23:26 +04:00
|
|
|
if (ret < 0) {
|
2015-07-02 17:57:14 +03:00
|
|
|
goto out;
|
2011-10-16 15:23:26 +04:00
|
|
|
}
|
2012-10-31 09:06:49 +04:00
|
|
|
ret = kvm_get_vcpu_events(cpu);
|
2010-12-27 18:19:29 +03:00
|
|
|
if (ret < 0) {
|
2015-07-02 17:57:14 +03:00
|
|
|
goto out;
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2012-10-31 09:06:49 +04:00
|
|
|
ret = kvm_get_debugregs(cpu);
|
2010-12-27 18:19:29 +03:00
|
|
|
if (ret < 0) {
|
2015-07-02 17:57:14 +03:00
|
|
|
goto out;
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2015-07-02 17:57:14 +03:00
|
|
|
ret = 0;
|
|
|
|
out:
|
|
|
|
cpu_sync_bndcs_hflags(&cpu->env);
|
|
|
|
return ret;
|
2008-11-05 19:29:27 +03:00
|
|
|
}
|
|
|
|
|
2012-10-31 09:57:49 +04:00
|
|
|
void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
|
2008-11-05 19:29:27 +03:00
|
|
|
{
|
2012-10-31 09:57:49 +04:00
|
|
|
X86CPU *x86_cpu = X86_CPU(cpu);
|
|
|
|
CPUX86State *env = &x86_cpu->env;
|
2011-02-07 14:19:21 +03:00
|
|
|
int ret;
|
|
|
|
|
2010-12-10 10:42:53 +03:00
|
|
|
/* Inject NMI */
|
2015-06-18 19:28:42 +03:00
|
|
|
if (cpu->interrupt_request & (CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) {
|
|
|
|
if (cpu->interrupt_request & CPU_INTERRUPT_NMI) {
|
|
|
|
qemu_mutex_lock_iothread();
|
|
|
|
cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
|
|
|
|
qemu_mutex_unlock_iothread();
|
|
|
|
DPRINTF("injected NMI\n");
|
|
|
|
ret = kvm_vcpu_ioctl(cpu, KVM_NMI);
|
|
|
|
if (ret < 0) {
|
|
|
|
fprintf(stderr, "KVM: injection failed, NMI lost (%s)\n",
|
|
|
|
strerror(-ret));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (cpu->interrupt_request & CPU_INTERRUPT_SMI) {
|
|
|
|
qemu_mutex_lock_iothread();
|
|
|
|
cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
|
|
|
|
qemu_mutex_unlock_iothread();
|
|
|
|
DPRINTF("injected SMI\n");
|
|
|
|
ret = kvm_vcpu_ioctl(cpu, KVM_SMI);
|
|
|
|
if (ret < 0) {
|
|
|
|
fprintf(stderr, "KVM: injection failed, SMI lost (%s)\n",
|
|
|
|
strerror(-ret));
|
|
|
|
}
|
2011-02-07 14:19:21 +03:00
|
|
|
}
|
2010-12-10 10:42:53 +03:00
|
|
|
}
|
|
|
|
|
2015-12-17 19:16:08 +03:00
|
|
|
if (!kvm_pic_in_kernel()) {
|
2015-06-18 19:47:23 +03:00
|
|
|
qemu_mutex_lock_iothread();
|
|
|
|
}
|
|
|
|
|
2013-03-08 22:21:50 +04:00
|
|
|
/* Force the VCPU out of its inner loop to process any INIT requests
|
|
|
|
* or (for userspace APIC, but it is cheap to combine the checks here)
|
|
|
|
* pending TPR access reports.
|
|
|
|
*/
|
|
|
|
if (cpu->interrupt_request & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) {
|
2015-06-18 19:28:42 +03:00
|
|
|
if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) &&
|
|
|
|
!(env->hflags & HF_SMM_MASK)) {
|
|
|
|
cpu->exit_request = 1;
|
|
|
|
}
|
|
|
|
if (cpu->interrupt_request & CPU_INTERRUPT_TPR) {
|
|
|
|
cpu->exit_request = 1;
|
|
|
|
}
|
2013-03-08 22:21:50 +04:00
|
|
|
}
|
2008-11-05 19:29:27 +03:00
|
|
|
|
2015-12-17 19:16:08 +03:00
|
|
|
if (!kvm_pic_in_kernel()) {
|
2011-02-07 14:19:19 +03:00
|
|
|
/* Try to inject an interrupt if the guest can accept it */
|
|
|
|
if (run->ready_for_interrupt_injection &&
|
2013-01-17 21:51:17 +04:00
|
|
|
(cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
|
2011-02-07 14:19:19 +03:00
|
|
|
(env->eflags & IF_MASK)) {
|
|
|
|
int irq;
|
|
|
|
|
2013-01-17 21:51:17 +04:00
|
|
|
cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
|
2011-02-07 14:19:19 +03:00
|
|
|
irq = cpu_get_pic_interrupt(env);
|
|
|
|
if (irq >= 0) {
|
|
|
|
struct kvm_interrupt intr;
|
|
|
|
|
|
|
|
intr.irq = irq;
|
|
|
|
DPRINTF("injected interrupt %d\n", irq);
|
2012-10-31 09:06:49 +04:00
|
|
|
ret = kvm_vcpu_ioctl(cpu, KVM_INTERRUPT, &intr);
|
2011-02-07 14:19:21 +03:00
|
|
|
if (ret < 0) {
|
|
|
|
fprintf(stderr,
|
|
|
|
"KVM: injection failed, interrupt lost (%s)\n",
|
|
|
|
strerror(-ret));
|
|
|
|
}
|
2011-02-07 14:19:19 +03:00
|
|
|
}
|
|
|
|
}
|
2008-11-05 19:29:27 +03:00
|
|
|
|
2011-02-07 14:19:19 +03:00
|
|
|
/* If we have an interrupt but the guest is not ready to receive an
|
|
|
|
* interrupt, request an interrupt window exit. This will
|
|
|
|
* cause a return to userspace as soon as the guest is ready to
|
|
|
|
* receive interrupts. */
|
2013-01-17 21:51:17 +04:00
|
|
|
if ((cpu->interrupt_request & CPU_INTERRUPT_HARD)) {
|
2011-02-07 14:19:19 +03:00
|
|
|
run->request_interrupt_window = 1;
|
|
|
|
} else {
|
|
|
|
run->request_interrupt_window = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
DPRINTF("setting tpr\n");
|
2013-12-23 13:04:02 +04:00
|
|
|
run->cr8 = cpu_get_apic_tpr(x86_cpu->apic_state);
|
2015-06-18 19:47:23 +03:00
|
|
|
|
|
|
|
qemu_mutex_unlock_iothread();
|
2011-02-07 14:19:19 +03:00
|
|
|
}
|
2008-11-05 19:29:27 +03:00
|
|
|
}
|
|
|
|
|
2015-04-08 14:30:58 +03:00
|
|
|
MemTxAttrs kvm_arch_post_run(CPUState *cpu, struct kvm_run *run)
|
2008-11-05 19:29:27 +03:00
|
|
|
{
|
2012-10-31 09:57:49 +04:00
|
|
|
X86CPU *x86_cpu = X86_CPU(cpu);
|
|
|
|
CPUX86State *env = &x86_cpu->env;
|
|
|
|
|
2015-06-18 19:28:42 +03:00
|
|
|
if (run->flags & KVM_RUN_X86_SMM) {
|
|
|
|
env->hflags |= HF_SMM_MASK;
|
|
|
|
} else {
|
2016-11-02 22:58:25 +03:00
|
|
|
env->hflags &= ~HF_SMM_MASK;
|
2015-06-18 19:28:42 +03:00
|
|
|
}
|
2010-12-27 18:19:29 +03:00
|
|
|
if (run->if_flag) {
|
2008-11-05 19:29:27 +03:00
|
|
|
env->eflags |= IF_MASK;
|
2010-12-27 18:19:29 +03:00
|
|
|
} else {
|
2008-11-05 19:29:27 +03:00
|
|
|
env->eflags &= ~IF_MASK;
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2015-06-18 19:47:23 +03:00
|
|
|
|
|
|
|
/* We need to protect the apic state against concurrent accesses from
|
|
|
|
* different threads in case the userspace irqchip is used. */
|
|
|
|
if (!kvm_irqchip_in_kernel()) {
|
|
|
|
qemu_mutex_lock_iothread();
|
|
|
|
}
|
2013-12-23 13:04:02 +04:00
|
|
|
cpu_set_apic_tpr(x86_cpu->apic_state, run->cr8);
|
|
|
|
cpu_set_apic_base(x86_cpu->apic_state, run->apic_base);
|
2015-06-18 19:47:23 +03:00
|
|
|
if (!kvm_irqchip_in_kernel()) {
|
|
|
|
qemu_mutex_unlock_iothread();
|
|
|
|
}
|
2015-04-08 15:52:04 +03:00
|
|
|
return cpu_get_mem_attrs(env);
|
2008-11-05 19:29:27 +03:00
|
|
|
}
|
|
|
|
|
2012-10-31 09:57:49 +04:00
|
|
|
int kvm_arch_process_async_events(CPUState *cs)
|
2010-05-04 16:45:27 +04:00
|
|
|
{
|
2012-10-31 09:57:49 +04:00
|
|
|
X86CPU *cpu = X86_CPU(cs);
|
|
|
|
CPUX86State *env = &cpu->env;
|
2012-05-05 03:14:41 +04:00
|
|
|
|
2013-01-17 21:51:17 +04:00
|
|
|
if (cs->interrupt_request & CPU_INTERRUPT_MCE) {
|
2011-03-02 10:56:14 +03:00
|
|
|
/* We must not raise CPU_INTERRUPT_MCE if it's not supported. */
|
|
|
|
assert(env->mcg_cap);
|
|
|
|
|
2013-01-17 21:51:17 +04:00
|
|
|
cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
|
2011-03-02 10:56:14 +03:00
|
|
|
|
2013-05-01 15:45:44 +04:00
|
|
|
kvm_cpu_synchronize_state(cs);
|
2011-03-02 10:56:14 +03:00
|
|
|
|
|
|
|
if (env->exception_injected == EXCP08_DBLE) {
|
|
|
|
/* this means triple fault */
|
|
|
|
qemu_system_reset_request();
|
2012-12-17 11:02:44 +04:00
|
|
|
cs->exit_request = 1;
|
2011-03-02 10:56:14 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
env->exception_injected = EXCP12_MCHK;
|
|
|
|
env->has_error_code = 0;
|
|
|
|
|
2013-01-17 21:51:17 +04:00
|
|
|
cs->halted = 0;
|
2011-03-02 10:56:14 +03:00
|
|
|
if (kvm_irqchip_in_kernel() && env->mp_state == KVM_MP_STATE_HALTED) {
|
|
|
|
env->mp_state = KVM_MP_STATE_RUNNABLE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-18 19:28:42 +03:00
|
|
|
if ((cs->interrupt_request & CPU_INTERRUPT_INIT) &&
|
|
|
|
!(env->hflags & HF_SMM_MASK)) {
|
2013-03-08 22:21:50 +04:00
|
|
|
kvm_cpu_synchronize_state(cs);
|
|
|
|
do_cpu_init(cpu);
|
|
|
|
}
|
|
|
|
|
2011-02-07 14:19:19 +03:00
|
|
|
if (kvm_irqchip_in_kernel()) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-01-17 21:51:17 +04:00
|
|
|
if (cs->interrupt_request & CPU_INTERRUPT_POLL) {
|
|
|
|
cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
|
2013-12-23 13:04:02 +04:00
|
|
|
apic_poll_irq(cpu->apic_state);
|
2012-07-09 18:42:32 +04:00
|
|
|
}
|
2013-01-17 21:51:17 +04:00
|
|
|
if (((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
|
2011-03-15 14:26:19 +03:00
|
|
|
(env->eflags & IF_MASK)) ||
|
2013-01-17 21:51:17 +04:00
|
|
|
(cs->interrupt_request & CPU_INTERRUPT_NMI)) {
|
|
|
|
cs->halted = 0;
|
2011-02-07 14:19:18 +03:00
|
|
|
}
|
2013-01-17 21:51:17 +04:00
|
|
|
if (cs->interrupt_request & CPU_INTERRUPT_SIPI) {
|
2013-05-01 15:45:44 +04:00
|
|
|
kvm_cpu_synchronize_state(cs);
|
2012-05-05 03:14:41 +04:00
|
|
|
do_cpu_sipi(cpu);
|
2010-05-04 16:45:27 +04:00
|
|
|
}
|
2013-01-17 21:51:17 +04:00
|
|
|
if (cs->interrupt_request & CPU_INTERRUPT_TPR) {
|
|
|
|
cs->interrupt_request &= ~CPU_INTERRUPT_TPR;
|
2013-05-01 15:45:44 +04:00
|
|
|
kvm_cpu_synchronize_state(cs);
|
2013-12-23 13:04:02 +04:00
|
|
|
apic_handle_tpr_access_report(cpu->apic_state, env->eip,
|
2012-02-17 21:31:17 +04:00
|
|
|
env->tpr_access_type);
|
|
|
|
}
|
2010-05-04 16:45:27 +04:00
|
|
|
|
2013-01-17 21:51:17 +04:00
|
|
|
return cs->halted;
|
2010-05-04 16:45:27 +04:00
|
|
|
}
|
|
|
|
|
2012-05-03 19:00:31 +04:00
|
|
|
static int kvm_handle_halt(X86CPU *cpu)
|
2008-11-05 19:29:27 +03:00
|
|
|
{
|
2013-01-17 21:51:17 +04:00
|
|
|
CPUState *cs = CPU(cpu);
|
2012-05-03 19:00:31 +04:00
|
|
|
CPUX86State *env = &cpu->env;
|
|
|
|
|
2013-01-17 21:51:17 +04:00
|
|
|
if (!((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
|
2008-11-05 19:29:27 +03:00
|
|
|
(env->eflags & IF_MASK)) &&
|
2013-01-17 21:51:17 +04:00
|
|
|
!(cs->interrupt_request & CPU_INTERRUPT_NMI)) {
|
|
|
|
cs->halted = 1;
|
2011-03-15 14:26:28 +03:00
|
|
|
return EXCP_HLT;
|
2008-11-05 19:29:27 +03:00
|
|
|
}
|
|
|
|
|
2011-03-15 14:26:28 +03:00
|
|
|
return 0;
|
2008-11-05 19:29:27 +03:00
|
|
|
}
|
|
|
|
|
2012-12-01 09:18:14 +04:00
|
|
|
static int kvm_handle_tpr_access(X86CPU *cpu)
|
2012-02-17 21:31:17 +04:00
|
|
|
{
|
2012-12-01 09:18:14 +04:00
|
|
|
CPUState *cs = CPU(cpu);
|
|
|
|
struct kvm_run *run = cs->kvm_run;
|
2012-02-17 21:31:17 +04:00
|
|
|
|
2013-12-23 13:04:02 +04:00
|
|
|
apic_handle_tpr_access_report(cpu->apic_state, run->tpr_access.rip,
|
2012-02-17 21:31:17 +04:00
|
|
|
run->tpr_access.is_write ? TPR_ACCESS_WRITE
|
|
|
|
: TPR_ACCESS_READ);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2013-06-29 21:40:58 +04:00
|
|
|
int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
|
2009-03-12 23:12:48 +03:00
|
|
|
{
|
2009-09-23 03:19:02 +04:00
|
|
|
static const uint8_t int3 = 0xcc;
|
2009-03-28 20:51:40 +03:00
|
|
|
|
2013-06-29 21:40:58 +04:00
|
|
|
if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 1, 0) ||
|
|
|
|
cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&int3, 1, 1)) {
|
2009-03-12 23:12:48 +03:00
|
|
|
return -EINVAL;
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2009-03-12 23:12:48 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-06-29 21:40:58 +04:00
|
|
|
int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
|
2009-03-12 23:12:48 +03:00
|
|
|
{
|
|
|
|
uint8_t int3;
|
|
|
|
|
2013-06-29 21:40:58 +04:00
|
|
|
if (cpu_memory_rw_debug(cs, bp->pc, &int3, 1, 0) || int3 != 0xcc ||
|
|
|
|
cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 1, 1)) {
|
2009-03-12 23:12:48 +03:00
|
|
|
return -EINVAL;
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2009-03-12 23:12:48 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct {
|
|
|
|
target_ulong addr;
|
|
|
|
int len;
|
|
|
|
int type;
|
|
|
|
} hw_breakpoint[4];
|
|
|
|
|
|
|
|
static int nb_hw_breakpoint;
|
|
|
|
|
|
|
|
static int find_hw_breakpoint(target_ulong addr, int len, int type)
|
|
|
|
{
|
|
|
|
int n;
|
|
|
|
|
2010-12-27 18:19:29 +03:00
|
|
|
for (n = 0; n < nb_hw_breakpoint; n++) {
|
2009-03-12 23:12:48 +03:00
|
|
|
if (hw_breakpoint[n].addr == addr && hw_breakpoint[n].type == type &&
|
2010-12-27 18:19:29 +03:00
|
|
|
(hw_breakpoint[n].len == len || len == -1)) {
|
2009-03-12 23:12:48 +03:00
|
|
|
return n;
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
|
|
|
}
|
2009-03-12 23:12:48 +03:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
int kvm_arch_insert_hw_breakpoint(target_ulong addr,
|
|
|
|
target_ulong len, int type)
|
|
|
|
{
|
|
|
|
switch (type) {
|
|
|
|
case GDB_BREAKPOINT_HW:
|
|
|
|
len = 1;
|
|
|
|
break;
|
|
|
|
case GDB_WATCHPOINT_WRITE:
|
|
|
|
case GDB_WATCHPOINT_ACCESS:
|
|
|
|
switch (len) {
|
|
|
|
case 1:
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
case 4:
|
|
|
|
case 8:
|
2010-12-27 18:19:29 +03:00
|
|
|
if (addr & (len - 1)) {
|
2009-03-12 23:12:48 +03:00
|
|
|
return -EINVAL;
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2009-03-12 23:12:48 +03:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -ENOSYS;
|
|
|
|
}
|
|
|
|
|
2010-12-27 18:19:29 +03:00
|
|
|
if (nb_hw_breakpoint == 4) {
|
2009-03-12 23:12:48 +03:00
|
|
|
return -ENOBUFS;
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
|
|
|
if (find_hw_breakpoint(addr, len, type) >= 0) {
|
2009-03-12 23:12:48 +03:00
|
|
|
return -EEXIST;
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2009-03-12 23:12:48 +03:00
|
|
|
hw_breakpoint[nb_hw_breakpoint].addr = addr;
|
|
|
|
hw_breakpoint[nb_hw_breakpoint].len = len;
|
|
|
|
hw_breakpoint[nb_hw_breakpoint].type = type;
|
|
|
|
nb_hw_breakpoint++;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int kvm_arch_remove_hw_breakpoint(target_ulong addr,
|
|
|
|
target_ulong len, int type)
|
|
|
|
{
|
|
|
|
int n;
|
|
|
|
|
|
|
|
n = find_hw_breakpoint(addr, (type == GDB_BREAKPOINT_HW) ? 1 : len, type);
|
2010-12-27 18:19:29 +03:00
|
|
|
if (n < 0) {
|
2009-03-12 23:12:48 +03:00
|
|
|
return -ENOENT;
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2009-03-12 23:12:48 +03:00
|
|
|
nb_hw_breakpoint--;
|
|
|
|
hw_breakpoint[n] = hw_breakpoint[nb_hw_breakpoint];
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void kvm_arch_remove_all_hw_breakpoints(void)
|
|
|
|
{
|
|
|
|
nb_hw_breakpoint = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static CPUWatchpoint hw_watchpoint;
|
|
|
|
|
2012-12-01 08:35:08 +04:00
|
|
|
static int kvm_handle_debug(X86CPU *cpu,
|
2012-09-08 16:43:16 +04:00
|
|
|
struct kvm_debug_exit_arch *arch_info)
|
2009-03-12 23:12:48 +03:00
|
|
|
{
|
2013-06-21 22:20:45 +04:00
|
|
|
CPUState *cs = CPU(cpu);
|
2012-12-01 08:35:08 +04:00
|
|
|
CPUX86State *env = &cpu->env;
|
2011-03-15 14:26:30 +03:00
|
|
|
int ret = 0;
|
2009-03-12 23:12:48 +03:00
|
|
|
int n;
|
|
|
|
|
|
|
|
if (arch_info->exception == 1) {
|
|
|
|
if (arch_info->dr6 & (1 << 14)) {
|
2013-06-21 22:20:45 +04:00
|
|
|
if (cs->singlestep_enabled) {
|
2011-03-15 14:26:30 +03:00
|
|
|
ret = EXCP_DEBUG;
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2009-03-12 23:12:48 +03:00
|
|
|
} else {
|
2010-12-27 18:19:29 +03:00
|
|
|
for (n = 0; n < 4; n++) {
|
|
|
|
if (arch_info->dr6 & (1 << n)) {
|
2009-03-12 23:12:48 +03:00
|
|
|
switch ((arch_info->dr7 >> (16 + n*4)) & 0x3) {
|
|
|
|
case 0x0:
|
2011-03-15 14:26:30 +03:00
|
|
|
ret = EXCP_DEBUG;
|
2009-03-12 23:12:48 +03:00
|
|
|
break;
|
|
|
|
case 0x1:
|
2011-03-15 14:26:30 +03:00
|
|
|
ret = EXCP_DEBUG;
|
2013-08-26 20:23:18 +04:00
|
|
|
cs->watchpoint_hit = &hw_watchpoint;
|
2009-03-12 23:12:48 +03:00
|
|
|
hw_watchpoint.vaddr = hw_breakpoint[n].addr;
|
|
|
|
hw_watchpoint.flags = BP_MEM_WRITE;
|
|
|
|
break;
|
|
|
|
case 0x3:
|
2011-03-15 14:26:30 +03:00
|
|
|
ret = EXCP_DEBUG;
|
2013-08-26 20:23:18 +04:00
|
|
|
cs->watchpoint_hit = &hw_watchpoint;
|
2009-03-12 23:12:48 +03:00
|
|
|
hw_watchpoint.vaddr = hw_breakpoint[n].addr;
|
|
|
|
hw_watchpoint.flags = BP_MEM_ACCESS;
|
|
|
|
break;
|
|
|
|
}
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
|
|
|
}
|
2009-03-12 23:12:48 +03:00
|
|
|
}
|
2013-08-26 20:23:18 +04:00
|
|
|
} else if (kvm_find_sw_breakpoint(cs, arch_info->pc)) {
|
2011-03-15 14:26:30 +03:00
|
|
|
ret = EXCP_DEBUG;
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2011-03-15 14:26:30 +03:00
|
|
|
if (ret == 0) {
|
2013-08-26 20:23:18 +04:00
|
|
|
cpu_synchronize_state(cs);
|
2012-09-08 16:43:16 +04:00
|
|
|
assert(env->exception_injected == -1);
|
2010-03-01 21:10:29 +03:00
|
|
|
|
2011-03-15 14:26:30 +03:00
|
|
|
/* pass to guest */
|
2012-09-08 16:43:16 +04:00
|
|
|
env->exception_injected = arch_info->exception;
|
|
|
|
env->has_error_code = 0;
|
2010-03-01 21:10:29 +03:00
|
|
|
}
|
2009-03-12 23:12:48 +03:00
|
|
|
|
2011-03-15 14:26:30 +03:00
|
|
|
return ret;
|
2009-03-12 23:12:48 +03:00
|
|
|
}
|
|
|
|
|
2012-10-31 09:57:49 +04:00
|
|
|
void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg)
|
2009-03-12 23:12:48 +03:00
|
|
|
{
|
|
|
|
const uint8_t type_code[] = {
|
|
|
|
[GDB_BREAKPOINT_HW] = 0x0,
|
|
|
|
[GDB_WATCHPOINT_WRITE] = 0x1,
|
|
|
|
[GDB_WATCHPOINT_ACCESS] = 0x3
|
|
|
|
};
|
|
|
|
const uint8_t len_code[] = {
|
|
|
|
[1] = 0x0, [2] = 0x1, [4] = 0x3, [8] = 0x2
|
|
|
|
};
|
|
|
|
int n;
|
|
|
|
|
2012-12-01 08:35:08 +04:00
|
|
|
if (kvm_sw_breakpoints_active(cpu)) {
|
2009-03-12 23:12:48 +03:00
|
|
|
dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
|
2010-12-27 18:19:29 +03:00
|
|
|
}
|
2009-03-12 23:12:48 +03:00
|
|
|
if (nb_hw_breakpoint > 0) {
|
|
|
|
dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
|
|
|
|
dbg->arch.debugreg[7] = 0x0600;
|
|
|
|
for (n = 0; n < nb_hw_breakpoint; n++) {
|
|
|
|
dbg->arch.debugreg[n] = hw_breakpoint[n].addr;
|
|
|
|
dbg->arch.debugreg[7] |= (2 << (n * 2)) |
|
|
|
|
(type_code[hw_breakpoint[n].type] << (16 + n*4)) |
|
2010-12-27 17:58:23 +03:00
|
|
|
((uint32_t)len_code[hw_breakpoint[n].len] << (18 + n*4));
|
2009-03-12 23:12:48 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2010-05-10 12:21:34 +04:00
|
|
|
|
2011-03-15 14:26:29 +03:00
|
|
|
static bool host_supports_vmx(void)
|
|
|
|
{
|
|
|
|
uint32_t ecx, unused;
|
|
|
|
|
|
|
|
host_cpuid(1, 0, &unused, &unused, &ecx, &unused);
|
|
|
|
return ecx & CPUID_EXT_VMX;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define VMX_INVALID_GUEST_STATE 0x80000021
|
|
|
|
|
2012-10-31 09:57:49 +04:00
|
|
|
int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
|
2011-03-15 14:26:29 +03:00
|
|
|
{
|
2012-10-31 09:57:49 +04:00
|
|
|
X86CPU *cpu = X86_CPU(cs);
|
2011-03-15 14:26:29 +03:00
|
|
|
uint64_t code;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
switch (run->exit_reason) {
|
|
|
|
case KVM_EXIT_HLT:
|
|
|
|
DPRINTF("handle_hlt\n");
|
2015-06-18 19:47:23 +03:00
|
|
|
qemu_mutex_lock_iothread();
|
2012-05-03 19:00:31 +04:00
|
|
|
ret = kvm_handle_halt(cpu);
|
2015-06-18 19:47:23 +03:00
|
|
|
qemu_mutex_unlock_iothread();
|
2011-03-15 14:26:29 +03:00
|
|
|
break;
|
|
|
|
case KVM_EXIT_SET_TPR:
|
|
|
|
ret = 0;
|
|
|
|
break;
|
2012-02-17 21:31:17 +04:00
|
|
|
case KVM_EXIT_TPR_ACCESS:
|
2015-06-18 19:47:23 +03:00
|
|
|
qemu_mutex_lock_iothread();
|
2012-12-01 09:18:14 +04:00
|
|
|
ret = kvm_handle_tpr_access(cpu);
|
2015-06-18 19:47:23 +03:00
|
|
|
qemu_mutex_unlock_iothread();
|
2012-02-17 21:31:17 +04:00
|
|
|
break;
|
2011-03-15 14:26:29 +03:00
|
|
|
case KVM_EXIT_FAIL_ENTRY:
|
|
|
|
code = run->fail_entry.hardware_entry_failure_reason;
|
|
|
|
fprintf(stderr, "KVM: entry failed, hardware error 0x%" PRIx64 "\n",
|
|
|
|
code);
|
|
|
|
if (host_supports_vmx() && code == VMX_INVALID_GUEST_STATE) {
|
|
|
|
fprintf(stderr,
|
2011-11-15 02:06:23 +04:00
|
|
|
"\nIf you're running a guest on an Intel machine without "
|
2011-03-15 14:26:29 +03:00
|
|
|
"unrestricted mode\n"
|
|
|
|
"support, the failure can be most likely due to the guest "
|
|
|
|
"entering an invalid\n"
|
|
|
|
"state for Intel VT. For example, the guest maybe running "
|
|
|
|
"in big real mode\n"
|
|
|
|
"which is not supported on less recent Intel processors."
|
|
|
|
"\n\n");
|
|
|
|
}
|
|
|
|
ret = -1;
|
|
|
|
break;
|
|
|
|
case KVM_EXIT_EXCEPTION:
|
|
|
|
fprintf(stderr, "KVM: exception %d exit (error code 0x%x)\n",
|
|
|
|
run->ex.exception, run->ex.error_code);
|
|
|
|
ret = -1;
|
|
|
|
break;
|
2011-03-15 14:26:30 +03:00
|
|
|
case KVM_EXIT_DEBUG:
|
|
|
|
DPRINTF("kvm_exit_debug\n");
|
2015-06-18 19:47:23 +03:00
|
|
|
qemu_mutex_lock_iothread();
|
2012-12-01 08:35:08 +04:00
|
|
|
ret = kvm_handle_debug(cpu, &run->debug.arch);
|
2015-06-18 19:47:23 +03:00
|
|
|
qemu_mutex_unlock_iothread();
|
2011-03-15 14:26:30 +03:00
|
|
|
break;
|
2015-11-10 15:52:43 +03:00
|
|
|
case KVM_EXIT_HYPERV:
|
|
|
|
ret = kvm_hv_handle_exit(cpu, &run->hyperv);
|
|
|
|
break;
|
2015-12-17 19:16:08 +03:00
|
|
|
case KVM_EXIT_IOAPIC_EOI:
|
|
|
|
ioapic_eoi_broadcast(run->eoi.vector);
|
|
|
|
ret = 0;
|
|
|
|
break;
|
2011-03-15 14:26:29 +03:00
|
|
|
default:
|
|
|
|
fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason);
|
|
|
|
ret = -1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2012-10-31 09:57:49 +04:00
|
|
|
bool kvm_arch_stop_on_emulation_error(CPUState *cs)
|
2010-05-10 12:21:34 +04:00
|
|
|
{
|
2012-10-31 09:57:49 +04:00
|
|
|
X86CPU *cpu = X86_CPU(cs);
|
|
|
|
CPUX86State *env = &cpu->env;
|
|
|
|
|
2013-05-01 15:45:44 +04:00
|
|
|
kvm_cpu_synchronize_state(cs);
|
2010-12-27 18:19:29 +03:00
|
|
|
return !(env->cr[0] & CR0_PE_MASK) ||
|
|
|
|
((env->segs[R_CS].selector & 3) != 3);
|
2010-05-10 12:21:34 +04:00
|
|
|
}
|
2011-10-15 13:49:47 +04:00
|
|
|
|
|
|
|
void kvm_arch_init_irq_routing(KVMState *s)
|
|
|
|
{
|
|
|
|
if (!kvm_check_extension(s, KVM_CAP_IRQ_ROUTING)) {
|
|
|
|
/* If kernel can't do irq routing, interrupt source
|
|
|
|
* override 0->2 cannot be set up as required by HPET.
|
|
|
|
* So we have to disable it.
|
|
|
|
*/
|
|
|
|
no_hpet = 1;
|
|
|
|
}
|
2012-07-26 18:35:14 +04:00
|
|
|
/* We know at this point that we're using the in-kernel
|
2012-07-26 18:35:15 +04:00
|
|
|
* irqchip, so we can use irqfds, and on x86 we know
|
2012-07-26 18:35:16 +04:00
|
|
|
* we can use msi via irqfd and GSI routing.
|
2012-07-26 18:35:14 +04:00
|
|
|
*/
|
2012-07-26 18:35:15 +04:00
|
|
|
kvm_msi_via_irqfd_allowed = true;
|
2012-07-26 18:35:16 +04:00
|
|
|
kvm_gsi_routing_allowed = true;
|
2015-12-17 19:16:08 +03:00
|
|
|
|
|
|
|
if (kvm_irqchip_is_split()) {
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* If the ioapic is in QEMU and the lapics are in KVM, reserve
|
|
|
|
MSI routes for signaling interrupts to the local apics. */
|
|
|
|
for (i = 0; i < IOAPIC_NUM_PINS; i++) {
|
2016-07-14 08:56:30 +03:00
|
|
|
if (kvm_irqchip_add_msi_route(s, 0, NULL) < 0) {
|
2015-12-17 19:16:08 +03:00
|
|
|
error_report("Could not enable split IRQ mode.");
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int kvm_arch_irqchip_create(MachineState *ms, KVMState *s)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
if (machine_kernel_irqchip_split(ms)) {
|
|
|
|
ret = kvm_vm_enable_cap(s, KVM_CAP_SPLIT_IRQCHIP, 0, 24);
|
|
|
|
if (ret) {
|
2016-08-03 14:37:51 +03:00
|
|
|
error_report("Could not enable split irqchip mode: %s",
|
2015-12-17 19:16:08 +03:00
|
|
|
strerror(-ret));
|
|
|
|
exit(1);
|
|
|
|
} else {
|
|
|
|
DPRINTF("Enabled KVM_CAP_SPLIT_IRQCHIP\n");
|
|
|
|
kvm_split_irqchip = true;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
return 0;
|
|
|
|
}
|
2011-10-15 13:49:47 +04:00
|
|
|
}
|
2012-08-27 10:28:40 +04:00
|
|
|
|
|
|
|
/* Classic KVM device assignment interface. Will remain x86 only. */
|
|
|
|
int kvm_device_pci_assign(KVMState *s, PCIHostDeviceAddress *dev_addr,
|
|
|
|
uint32_t flags, uint32_t *dev_id)
|
|
|
|
{
|
|
|
|
struct kvm_assigned_pci_dev dev_data = {
|
|
|
|
.segnr = dev_addr->domain,
|
|
|
|
.busnr = dev_addr->bus,
|
|
|
|
.devfn = PCI_DEVFN(dev_addr->slot, dev_addr->function),
|
|
|
|
.flags = flags,
|
|
|
|
};
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
dev_data.assigned_dev_id =
|
|
|
|
(dev_addr->domain << 16) | (dev_addr->bus << 8) | dev_data.devfn;
|
|
|
|
|
|
|
|
ret = kvm_vm_ioctl(s, KVM_ASSIGN_PCI_DEVICE, &dev_data);
|
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
*dev_id = dev_data.assigned_dev_id;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int kvm_device_pci_deassign(KVMState *s, uint32_t dev_id)
|
|
|
|
{
|
|
|
|
struct kvm_assigned_pci_dev dev_data = {
|
|
|
|
.assigned_dev_id = dev_id,
|
|
|
|
};
|
|
|
|
|
|
|
|
return kvm_vm_ioctl(s, KVM_DEASSIGN_PCI_DEVICE, &dev_data);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int kvm_assign_irq_internal(KVMState *s, uint32_t dev_id,
|
|
|
|
uint32_t irq_type, uint32_t guest_irq)
|
|
|
|
{
|
|
|
|
struct kvm_assigned_irq assigned_irq = {
|
|
|
|
.assigned_dev_id = dev_id,
|
|
|
|
.guest_irq = guest_irq,
|
|
|
|
.flags = irq_type,
|
|
|
|
};
|
|
|
|
|
|
|
|
if (kvm_check_extension(s, KVM_CAP_ASSIGN_DEV_IRQ)) {
|
|
|
|
return kvm_vm_ioctl(s, KVM_ASSIGN_DEV_IRQ, &assigned_irq);
|
|
|
|
} else {
|
|
|
|
return kvm_vm_ioctl(s, KVM_ASSIGN_IRQ, &assigned_irq);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int kvm_device_intx_assign(KVMState *s, uint32_t dev_id, bool use_host_msi,
|
|
|
|
uint32_t guest_irq)
|
|
|
|
{
|
|
|
|
uint32_t irq_type = KVM_DEV_IRQ_GUEST_INTX |
|
|
|
|
(use_host_msi ? KVM_DEV_IRQ_HOST_MSI : KVM_DEV_IRQ_HOST_INTX);
|
|
|
|
|
|
|
|
return kvm_assign_irq_internal(s, dev_id, irq_type, guest_irq);
|
|
|
|
}
|
|
|
|
|
|
|
|
int kvm_device_intx_set_mask(KVMState *s, uint32_t dev_id, bool masked)
|
|
|
|
{
|
|
|
|
struct kvm_assigned_pci_dev dev_data = {
|
|
|
|
.assigned_dev_id = dev_id,
|
|
|
|
.flags = masked ? KVM_DEV_ASSIGN_MASK_INTX : 0,
|
|
|
|
};
|
|
|
|
|
|
|
|
return kvm_vm_ioctl(s, KVM_ASSIGN_SET_INTX_MASK, &dev_data);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int kvm_deassign_irq_internal(KVMState *s, uint32_t dev_id,
|
|
|
|
uint32_t type)
|
|
|
|
{
|
|
|
|
struct kvm_assigned_irq assigned_irq = {
|
|
|
|
.assigned_dev_id = dev_id,
|
|
|
|
.flags = type,
|
|
|
|
};
|
|
|
|
|
|
|
|
return kvm_vm_ioctl(s, KVM_DEASSIGN_DEV_IRQ, &assigned_irq);
|
|
|
|
}
|
|
|
|
|
|
|
|
int kvm_device_intx_deassign(KVMState *s, uint32_t dev_id, bool use_host_msi)
|
|
|
|
{
|
|
|
|
return kvm_deassign_irq_internal(s, dev_id, KVM_DEV_IRQ_GUEST_INTX |
|
|
|
|
(use_host_msi ? KVM_DEV_IRQ_HOST_MSI : KVM_DEV_IRQ_HOST_INTX));
|
|
|
|
}
|
|
|
|
|
|
|
|
int kvm_device_msi_assign(KVMState *s, uint32_t dev_id, int virq)
|
|
|
|
{
|
|
|
|
return kvm_assign_irq_internal(s, dev_id, KVM_DEV_IRQ_HOST_MSI |
|
|
|
|
KVM_DEV_IRQ_GUEST_MSI, virq);
|
|
|
|
}
|
|
|
|
|
|
|
|
int kvm_device_msi_deassign(KVMState *s, uint32_t dev_id)
|
|
|
|
{
|
|
|
|
return kvm_deassign_irq_internal(s, dev_id, KVM_DEV_IRQ_GUEST_MSI |
|
|
|
|
KVM_DEV_IRQ_HOST_MSI);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool kvm_device_msix_supported(KVMState *s)
|
|
|
|
{
|
|
|
|
/* The kernel lacks a corresponding KVM_CAP, so we probe by calling
|
|
|
|
* KVM_ASSIGN_SET_MSIX_NR with an invalid parameter. */
|
|
|
|
return kvm_vm_ioctl(s, KVM_ASSIGN_SET_MSIX_NR, NULL) == -EFAULT;
|
|
|
|
}
|
|
|
|
|
|
|
|
int kvm_device_msix_init_vectors(KVMState *s, uint32_t dev_id,
|
|
|
|
uint32_t nr_vectors)
|
|
|
|
{
|
|
|
|
struct kvm_assigned_msix_nr msix_nr = {
|
|
|
|
.assigned_dev_id = dev_id,
|
|
|
|
.entry_nr = nr_vectors,
|
|
|
|
};
|
|
|
|
|
|
|
|
return kvm_vm_ioctl(s, KVM_ASSIGN_SET_MSIX_NR, &msix_nr);
|
|
|
|
}
|
|
|
|
|
|
|
|
int kvm_device_msix_set_vector(KVMState *s, uint32_t dev_id, uint32_t vector,
|
|
|
|
int virq)
|
|
|
|
{
|
|
|
|
struct kvm_assigned_msix_entry msix_entry = {
|
|
|
|
.assigned_dev_id = dev_id,
|
|
|
|
.gsi = virq,
|
|
|
|
.entry = vector,
|
|
|
|
};
|
|
|
|
|
|
|
|
return kvm_vm_ioctl(s, KVM_ASSIGN_SET_MSIX_ENTRY, &msix_entry);
|
|
|
|
}
|
|
|
|
|
|
|
|
int kvm_device_msix_assign(KVMState *s, uint32_t dev_id)
|
|
|
|
{
|
|
|
|
return kvm_assign_irq_internal(s, dev_id, KVM_DEV_IRQ_HOST_MSIX |
|
|
|
|
KVM_DEV_IRQ_GUEST_MSIX, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
int kvm_device_msix_deassign(KVMState *s, uint32_t dev_id)
|
|
|
|
{
|
|
|
|
return kvm_deassign_irq_internal(s, dev_id, KVM_DEV_IRQ_GUEST_MSIX |
|
|
|
|
KVM_DEV_IRQ_HOST_MSIX);
|
|
|
|
}
|
2015-01-09 11:04:40 +03:00
|
|
|
|
|
|
|
int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
|
2015-10-15 16:44:52 +03:00
|
|
|
uint64_t address, uint32_t data, PCIDevice *dev)
|
2015-01-09 11:04:40 +03:00
|
|
|
{
|
2016-07-14 08:56:25 +03:00
|
|
|
X86IOMMUState *iommu = x86_iommu_get_default();
|
|
|
|
|
|
|
|
if (iommu) {
|
|
|
|
int ret;
|
|
|
|
MSIMessage src, dst;
|
|
|
|
X86IOMMUClass *class = X86_IOMMU_GET_CLASS(iommu);
|
|
|
|
|
|
|
|
src.address = route->u.msi.address_hi;
|
|
|
|
src.address <<= VTD_MSI_ADDR_HI_SHIFT;
|
|
|
|
src.address |= route->u.msi.address_lo;
|
|
|
|
src.data = route->u.msi.data;
|
|
|
|
|
|
|
|
ret = class->int_remap(iommu, &src, &dst, dev ? \
|
|
|
|
pci_requester_id(dev) : \
|
|
|
|
X86_IOMMU_SID_INVALID);
|
|
|
|
if (ret) {
|
|
|
|
trace_kvm_x86_fixup_msi_error(route->gsi);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
route->u.msi.address_hi = dst.address >> VTD_MSI_ADDR_HI_SHIFT;
|
|
|
|
route->u.msi.address_lo = dst.address & VTD_MSI_ADDR_LO_MASK;
|
|
|
|
route->u.msi.data = dst.data;
|
|
|
|
}
|
|
|
|
|
2015-01-09 11:04:40 +03:00
|
|
|
return 0;
|
|
|
|
}
|
2015-06-02 16:56:23 +03:00
|
|
|
|
2016-07-14 08:56:31 +03:00
|
|
|
typedef struct MSIRouteEntry MSIRouteEntry;
|
|
|
|
|
|
|
|
struct MSIRouteEntry {
|
|
|
|
PCIDevice *dev; /* Device pointer */
|
|
|
|
int vector; /* MSI/MSIX vector index */
|
|
|
|
int virq; /* Virtual IRQ index */
|
|
|
|
QLIST_ENTRY(MSIRouteEntry) list;
|
|
|
|
};
|
|
|
|
|
|
|
|
/* List of used GSI routes */
|
|
|
|
static QLIST_HEAD(, MSIRouteEntry) msi_route_list = \
|
|
|
|
QLIST_HEAD_INITIALIZER(msi_route_list);
|
|
|
|
|
2016-07-14 08:56:32 +03:00
|
|
|
static void kvm_update_msi_routes_all(void *private, bool global,
|
|
|
|
uint32_t index, uint32_t mask)
|
|
|
|
{
|
|
|
|
int cnt = 0;
|
|
|
|
MSIRouteEntry *entry;
|
|
|
|
MSIMessage msg;
|
|
|
|
/* TODO: explicit route update */
|
|
|
|
QLIST_FOREACH(entry, &msi_route_list, list) {
|
|
|
|
cnt++;
|
|
|
|
msg = pci_get_msi_message(entry->dev, entry->vector);
|
|
|
|
kvm_irqchip_update_msi_route(kvm_state, entry->virq,
|
|
|
|
msg, entry->dev);
|
|
|
|
}
|
2016-07-14 08:56:33 +03:00
|
|
|
kvm_irqchip_commit_routes(kvm_state);
|
2016-07-14 08:56:32 +03:00
|
|
|
trace_kvm_x86_update_msi_routes(cnt);
|
|
|
|
}
|
|
|
|
|
2016-07-14 08:56:31 +03:00
|
|
|
int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
|
|
|
|
int vector, PCIDevice *dev)
|
|
|
|
{
|
2016-07-14 08:56:32 +03:00
|
|
|
static bool notify_list_inited = false;
|
2016-07-14 08:56:31 +03:00
|
|
|
MSIRouteEntry *entry;
|
|
|
|
|
|
|
|
if (!dev) {
|
|
|
|
/* These are (possibly) IOAPIC routes only used for split
|
|
|
|
* kernel irqchip mode, while what we are housekeeping are
|
|
|
|
* PCI devices only. */
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
entry = g_new0(MSIRouteEntry, 1);
|
|
|
|
entry->dev = dev;
|
|
|
|
entry->vector = vector;
|
|
|
|
entry->virq = route->gsi;
|
|
|
|
QLIST_INSERT_HEAD(&msi_route_list, entry, list);
|
|
|
|
|
|
|
|
trace_kvm_x86_add_msi_route(route->gsi);
|
2016-07-14 08:56:32 +03:00
|
|
|
|
|
|
|
if (!notify_list_inited) {
|
|
|
|
/* For the first time we do add route, add ourselves into
|
|
|
|
* IOMMU's IEC notify list if needed. */
|
|
|
|
X86IOMMUState *iommu = x86_iommu_get_default();
|
|
|
|
if (iommu) {
|
|
|
|
x86_iommu_iec_register_notifier(iommu,
|
|
|
|
kvm_update_msi_routes_all,
|
|
|
|
NULL);
|
|
|
|
}
|
|
|
|
notify_list_inited = true;
|
|
|
|
}
|
2016-07-14 08:56:31 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int kvm_arch_release_virq_post(int virq)
|
|
|
|
{
|
|
|
|
MSIRouteEntry *entry, *next;
|
|
|
|
QLIST_FOREACH_SAFE(entry, &msi_route_list, list, next) {
|
|
|
|
if (entry->virq == virq) {
|
|
|
|
trace_kvm_x86_remove_msi_route(virq);
|
|
|
|
QLIST_REMOVE(entry, list);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2015-01-09 11:04:40 +03:00
|
|
|
return 0;
|
|
|
|
}
|
2015-06-02 16:56:23 +03:00
|
|
|
|
|
|
|
int kvm_arch_msi_data_to_gsi(uint32_t data)
|
|
|
|
{
|
|
|
|
abort();
|
|
|
|
}
|