qemu/target/i386/kvm_i386.h

69 lines
2.1 KiB
C
Raw Normal View History

/*
* QEMU KVM support -- x86 specific functions.
*
* Copyright (c) 2012 Linaro Limited
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*
*/
#ifndef QEMU_KVM_I386_H
#define QEMU_KVM_I386_H
#include "sysemu/kvm.h"
#define kvm_apic_in_kernel() (kvm_irqchip_in_kernel())
#ifdef CONFIG_KVM
#define kvm_pit_in_kernel() \
(kvm_irqchip_in_kernel() && !kvm_irqchip_is_split())
#define kvm_pic_in_kernel() \
(kvm_irqchip_in_kernel() && !kvm_irqchip_is_split())
#define kvm_ioapic_in_kernel() \
(kvm_irqchip_in_kernel() && !kvm_irqchip_is_split())
#else
#define kvm_pit_in_kernel() 0
#define kvm_pic_in_kernel() 0
#define kvm_ioapic_in_kernel() 0
#endif /* CONFIG_KVM */
bool kvm_allows_irq0_override(void);
bool kvm_has_smm(void);
bool kvm_has_adjust_clock_stable(void);
void kvm_synchronize_all_tsc(void);
void kvm_arch_reset_vcpu(X86CPU *cs);
void kvm_arch_do_init_vcpu(X86CPU *cs);
int kvm_device_pci_assign(KVMState *s, PCIHostDeviceAddress *dev_addr,
uint32_t flags, uint32_t *dev_id);
int kvm_device_pci_deassign(KVMState *s, uint32_t dev_id);
int kvm_device_intx_assign(KVMState *s, uint32_t dev_id,
bool use_host_msi, uint32_t guest_irq);
int kvm_device_intx_set_mask(KVMState *s, uint32_t dev_id, bool masked);
int kvm_device_intx_deassign(KVMState *s, uint32_t dev_id, bool use_host_msi);
int kvm_device_msi_assign(KVMState *s, uint32_t dev_id, int virq);
int kvm_device_msi_deassign(KVMState *s, uint32_t dev_id);
bool kvm_device_msix_supported(KVMState *s);
int kvm_device_msix_init_vectors(KVMState *s, uint32_t dev_id,
uint32_t nr_vectors);
int kvm_device_msix_set_vector(KVMState *s, uint32_t dev_id, uint32_t vector,
int virq);
int kvm_device_msix_assign(KVMState *s, uint32_t dev_id);
int kvm_device_msix_deassign(KVMState *s, uint32_t dev_id);
void kvm_put_apicbase(X86CPU *cpu, uint64_t value);
intel_iommu: reject broken EIM Cluster x2APIC cannot work without KVM's x2apic API when the maximal APIC ID is greater than 8 and only KVM's LAPIC can support x2APIC, so we forbid other APICs and also the old KVM case with less than 9, to simplify the code. There is no point in enabling EIM in forbidden APICs, so we keep it enabled only for the KVM APIC; unconditionally, because making the option depend on KVM version would be a maintanance burden. Old QEMUs would enable eim whenever intremap was on, which would trick guests into thinking that they can enable cluster x2APIC even if any interrupt destination would get clamped to 8 bits. Depending on your configuration, QEMU could notice that the destination LAPIC is not present and report it with a very non-obvious: KVM: injection failed, MSI lost (Operation not permitted) Or the guest could say something about unexpected interrupts, because clamping leads to aliasing so interrupts were being delivered to incorrect VCPUs. KVM_X2APIC_API is the feature that allows us to enable EIM for KVM. QEMU 2.7 allowed EIM whenever interrupt remapping was enabled. In order to keep backward compatibility, we again allow guests to misbehave in non-obvious ways, and make it the default for old machine types. A user can enable the buggy mode it with "x-buggy-eim=on". Signed-off-by: Radim Krčmář <rkrcmar@redhat.com> Reviewed-by: Eduardo Habkost <ehabkost@redhat.com> Reviewed-by: Peter Xu <peterx@redhat.com> Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
2016-10-10 18:28:47 +03:00
bool kvm_enable_x2apic(void);
bool kvm_has_x2apic_api(void);
bool kvm_hv_vpindex_settable(void);
#endif