2012-07-26 18:35:13 +04:00
|
|
|
/*
|
|
|
|
* QEMU KVM support -- x86 specific functions.
|
|
|
|
*
|
|
|
|
* Copyright (c) 2012 Linaro Limited
|
|
|
|
*
|
|
|
|
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
|
|
|
* See the COPYING file in the top-level directory.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef QEMU_KVM_I386_H
|
|
|
|
#define QEMU_KVM_I386_H
|
|
|
|
|
2012-12-17 21:20:04 +04:00
|
|
|
#include "sysemu/kvm.h"
|
2012-08-27 10:28:40 +04:00
|
|
|
|
2017-06-26 08:22:53 +03:00
|
|
|
#ifdef CONFIG_KVM
|
|
|
|
|
|
|
|
#define kvm_pit_in_kernel() \
|
|
|
|
(kvm_irqchip_in_kernel() && !kvm_irqchip_is_split())
|
|
|
|
#define kvm_pic_in_kernel() \
|
|
|
|
(kvm_irqchip_in_kernel() && !kvm_irqchip_is_split())
|
|
|
|
#define kvm_ioapic_in_kernel() \
|
|
|
|
(kvm_irqchip_in_kernel() && !kvm_irqchip_is_split())
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
#define kvm_pit_in_kernel() 0
|
|
|
|
#define kvm_pic_in_kernel() 0
|
|
|
|
#define kvm_ioapic_in_kernel() 0
|
|
|
|
|
|
|
|
#endif /* CONFIG_KVM */
|
|
|
|
|
2015-06-18 19:30:52 +03:00
|
|
|
bool kvm_has_smm(void);
|
2023-09-04 15:43:20 +03:00
|
|
|
bool kvm_enable_x2apic(void);
|
|
|
|
bool kvm_hv_vpindex_settable(void);
|
|
|
|
|
|
|
|
bool kvm_enable_sgx_provisioning(KVMState *s);
|
|
|
|
bool kvm_hyperv_expand_features(X86CPU *cpu, Error **errp);
|
|
|
|
|
|
|
|
void kvm_arch_reset_vcpu(X86CPU *cs);
|
|
|
|
void kvm_arch_after_reset_vcpu(X86CPU *cpu);
|
|
|
|
void kvm_arch_do_init_vcpu(X86CPU *cs);
|
2023-09-04 15:43:21 +03:00
|
|
|
uint32_t kvm_arch_get_supported_cpuid(KVMState *env, uint32_t function,
|
|
|
|
uint32_t index, int reg);
|
|
|
|
uint64_t kvm_arch_get_supported_msr_feature(KVMState *s, uint32_t index);
|
2023-09-04 15:43:20 +03:00
|
|
|
|
|
|
|
void kvm_set_max_apic_id(uint32_t max_apic_id);
|
|
|
|
void kvm_request_xsave_components(X86CPU *cpu, uint64_t mask);
|
|
|
|
|
|
|
|
#ifdef CONFIG_KVM
|
|
|
|
|
2020-09-22 18:19:34 +03:00
|
|
|
bool kvm_has_adjust_clock(void);
|
2016-11-21 13:50:04 +03:00
|
|
|
bool kvm_has_adjust_clock_stable(void);
|
target/i386: kvm: Demand nested migration kernel capabilities only when vCPU may have enabled VMX
Previous to this change, a vCPU exposed with VMX running on a kernel
without KVM_CAP_NESTED_STATE or KVM_CAP_EXCEPTION_PAYLOAD resulted in
adding a migration blocker. This was because when the code was written
it was thought there is no way to reliably know if a vCPU is utilising
VMX or not at runtime. However, it turns out that this can be known to
some extent:
In order for a vCPU to enter VMX operation it must have CR4.VMXE set.
Since it was set, CR4.VMXE must remain set as long as the vCPU is in
VMX operation. This is because CR4.VMXE is one of the bits set
in MSR_IA32_VMX_CR4_FIXED1.
There is one exception to the above statement when vCPU enters SMM mode.
When a vCPU enters SMM mode, it temporarily exits VMX operation and
may also reset CR4.VMXE during execution in SMM mode.
When the vCPU exits SMM mode, vCPU state is restored to be in VMX operation
and CR4.VMXE is restored to its original state of being set.
Therefore, when the vCPU is not in SMM mode, we can infer whether
VMX is being used by examining CR4.VMXE. Otherwise, we cannot
know for certain but assume the worse that vCPU may utilise VMX.
Summaring all the above, a vCPU may have enabled VMX in case
CR4.VMXE is set or vCPU is in SMM mode.
Therefore, remove migration blocker and check before migration
(cpu_pre_save()) if the vCPU may have enabled VMX. If true, only then
require relevant kernel capabilities.
While at it, demand KVM_CAP_EXCEPTION_PAYLOAD only when the vCPU is in
guest-mode and there is a pending/injected exception. Otherwise, this
kernel capability is not required for proper migration.
Reviewed-by: Joao Martins <joao.m.martins@oracle.com>
Signed-off-by: Liran Alon <liran.alon@oracle.com>
Reviewed-by: Maran Wilson <maran.wilson@oracle.com>
Tested-by: Maran Wilson <maran.wilson@oracle.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2019-07-06 00:06:36 +03:00
|
|
|
bool kvm_has_exception_payload(void);
|
2015-11-05 06:51:03 +03:00
|
|
|
void kvm_synchronize_all_tsc(void);
|
2012-07-26 18:35:13 +04:00
|
|
|
|
2016-09-22 15:49:17 +03:00
|
|
|
void kvm_put_apicbase(X86CPU *cpu, uint64_t value);
|
|
|
|
|
2016-10-19 15:05:38 +03:00
|
|
|
bool kvm_has_x2apic_api(void);
|
2020-06-30 16:49:27 +03:00
|
|
|
bool kvm_has_waitpkg(void);
|
2018-07-02 16:41:56 +03:00
|
|
|
|
2020-10-05 17:18:19 +03:00
|
|
|
uint64_t kvm_swizzle_msi_ext_dest_id(uint64_t address);
|
2023-01-14 02:35:46 +03:00
|
|
|
void kvm_update_msi_routes_all(void *private, bool global,
|
|
|
|
uint32_t index, uint32_t mask);
|
2020-10-05 17:18:19 +03:00
|
|
|
|
2022-10-05 01:56:42 +03:00
|
|
|
typedef bool QEMURDMSRHandler(X86CPU *cpu, uint32_t msr, uint64_t *val);
|
|
|
|
typedef bool QEMUWRMSRHandler(X86CPU *cpu, uint32_t msr, uint64_t val);
|
|
|
|
typedef struct kvm_msr_handlers {
|
|
|
|
uint32_t msr;
|
|
|
|
QEMURDMSRHandler *rdmsr;
|
|
|
|
QEMUWRMSRHandler *wrmsr;
|
|
|
|
} KVMMSRHandlers;
|
|
|
|
|
|
|
|
bool kvm_filter_msr(KVMState *s, uint32_t msr, QEMURDMSRHandler *rdmsr,
|
|
|
|
QEMUWRMSRHandler *wrmsr);
|
|
|
|
|
2023-09-04 15:43:20 +03:00
|
|
|
#endif /* CONFIG_KVM */
|
2022-08-25 05:52:46 +03:00
|
|
|
|
2012-07-26 18:35:13 +04:00
|
|
|
#endif
|