2014-08-16 09:55:38 +04:00
|
|
|
/*
|
|
|
|
* QEMU emulation of an Intel IOMMU (VT-d)
|
|
|
|
* (DMA Remapping device)
|
|
|
|
*
|
|
|
|
* Copyright (C) 2013 Knut Omang, Oracle <knut.omang@oracle.com>
|
|
|
|
* Copyright (C) 2014 Le Tan, <tamlokveer@gmail.com>
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License as published by
|
|
|
|
* the Free Software Foundation; either version 2 of the License, or
|
|
|
|
* (at your option) any later version.
|
|
|
|
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
|
|
|
|
* You should have received a copy of the GNU General Public License along
|
|
|
|
* with this program; if not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef INTEL_IOMMU_H
|
|
|
|
#define INTEL_IOMMU_H
|
2019-08-12 08:23:51 +03:00
|
|
|
|
2016-07-14 08:56:10 +03:00
|
|
|
#include "hw/i386/x86-iommu.h"
|
intel-iommu: rework the page walk logic
This patch fixes a potential small window that the DMA page table might
be incomplete or invalid when the guest sends domain/context
invalidations to a device. This can cause random DMA errors for
assigned devices.
This is a major change to the VT-d shadow page walking logic. It
includes but is not limited to:
- For each VTDAddressSpace, now we maintain what IOVA ranges we have
mapped and what we have not. With that information, now we only send
MAP or UNMAP when necessary. Say, we don't send MAP notifies if we
know we have already mapped the range, meanwhile we don't send UNMAP
notifies if we know we never mapped the range at all.
- Introduce vtd_sync_shadow_page_table[_range] APIs so that we can call
in any places to resync the shadow page table for a device.
- When we receive domain/context invalidation, we should not really run
the replay logic, instead we use the new sync shadow page table API to
resync the whole shadow page table without unmapping the whole
region. After this change, we'll only do the page walk once for each
domain invalidations (before this, it can be multiple, depending on
number of notifiers per address space).
While at it, the page walking logic is also refactored to be simpler.
CC: QEMU Stable <qemu-stable@nongnu.org>
Reported-by: Jintack Lim <jintack@cs.columbia.edu>
Tested-by: Jintack Lim <jintack@cs.columbia.edu>
Signed-off-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2018-05-18 10:25:17 +03:00
|
|
|
#include "qemu/iova-tree.h"
|
2020-09-03 23:43:22 +03:00
|
|
|
#include "qom/object.h"
|
2014-08-16 09:55:38 +04:00
|
|
|
|
|
|
|
#define TYPE_INTEL_IOMMU_DEVICE "intel-iommu"
|
2020-09-16 21:25:19 +03:00
|
|
|
OBJECT_DECLARE_SIMPLE_TYPE(IntelIOMMUState, INTEL_IOMMU_DEVICE)
|
2014-08-16 09:55:38 +04:00
|
|
|
|
2017-07-11 06:56:20 +03:00
|
|
|
#define TYPE_INTEL_IOMMU_MEMORY_REGION "intel-iommu-iommu-memory-region"
|
|
|
|
|
2014-08-16 09:55:38 +04:00
|
|
|
/* DMAR Hardware Unit Definition address (IOMMU unit) */
|
|
|
|
#define Q35_HOST_BRIDGE_IOMMU_ADDR 0xfed90000ULL
|
|
|
|
|
|
|
|
#define VTD_PCI_BUS_MAX 256
|
|
|
|
#define VTD_PCI_SLOT_MAX 32
|
|
|
|
#define VTD_PCI_FUNC_MAX 8
|
|
|
|
#define VTD_PCI_SLOT(devfn) (((devfn) >> 3) & 0x1f)
|
|
|
|
#define VTD_PCI_FUNC(devfn) ((devfn) & 0x07)
|
2014-10-20 13:37:23 +04:00
|
|
|
#define VTD_SID_TO_BUS(sid) (((sid) >> 8) & 0xff)
|
2014-08-16 09:55:43 +04:00
|
|
|
#define VTD_SID_TO_DEVFN(sid) ((sid) & 0xff)
|
2014-08-16 09:55:38 +04:00
|
|
|
|
|
|
|
#define DMAR_REG_SIZE 0x230
|
2017-11-15 02:13:49 +03:00
|
|
|
#define VTD_HOST_AW_39BIT 39
|
|
|
|
#define VTD_HOST_AW_48BIT 48
|
|
|
|
#define VTD_HOST_ADDRESS_WIDTH VTD_HOST_AW_39BIT
|
|
|
|
#define VTD_HAW_MASK(aw) ((1ULL << (aw)) - 1)
|
2014-08-16 09:55:38 +04:00
|
|
|
|
2016-07-14 08:56:14 +03:00
|
|
|
#define DMAR_REPORT_F_INTR (1)
|
|
|
|
|
2016-07-14 08:56:22 +03:00
|
|
|
#define VTD_MSI_ADDR_HI_MASK (0xffffffff00000000ULL)
|
|
|
|
#define VTD_MSI_ADDR_HI_SHIFT (32)
|
|
|
|
#define VTD_MSI_ADDR_LO_MASK (0x00000000ffffffffULL)
|
|
|
|
|
2014-08-16 09:55:43 +04:00
|
|
|
typedef struct VTDContextEntry VTDContextEntry;
|
|
|
|
typedef struct VTDContextCacheEntry VTDContextCacheEntry;
|
2014-08-16 09:55:38 +04:00
|
|
|
typedef struct VTDAddressSpace VTDAddressSpace;
|
2014-08-16 09:55:44 +04:00
|
|
|
typedef struct VTDIOTLBEntry VTDIOTLBEntry;
|
2015-10-04 16:48:50 +03:00
|
|
|
typedef struct VTDBus VTDBus;
|
2016-07-21 18:54:10 +03:00
|
|
|
typedef union VTD_IR_TableEntry VTD_IR_TableEntry;
|
2016-07-14 08:56:20 +03:00
|
|
|
typedef union VTD_IR_MSIAddress VTD_IR_MSIAddress;
|
2019-03-05 05:34:53 +03:00
|
|
|
typedef struct VTDPASIDDirEntry VTDPASIDDirEntry;
|
|
|
|
typedef struct VTDPASIDEntry VTDPASIDEntry;
|
2014-08-16 09:55:43 +04:00
|
|
|
|
|
|
|
/* Context-Entry */
|
|
|
|
struct VTDContextEntry {
|
2019-03-05 05:34:53 +03:00
|
|
|
union {
|
|
|
|
struct {
|
|
|
|
uint64_t lo;
|
|
|
|
uint64_t hi;
|
|
|
|
};
|
|
|
|
struct {
|
|
|
|
uint64_t val[4];
|
|
|
|
};
|
|
|
|
};
|
2014-08-16 09:55:43 +04:00
|
|
|
};
|
|
|
|
|
|
|
|
struct VTDContextCacheEntry {
|
|
|
|
/* The cache entry is obsolete if
|
|
|
|
* context_cache_gen!=IntelIOMMUState.context_cache_gen
|
|
|
|
*/
|
|
|
|
uint32_t context_cache_gen;
|
|
|
|
struct VTDContextEntry context_entry;
|
|
|
|
};
|
|
|
|
|
2019-03-05 05:34:53 +03:00
|
|
|
/* PASID Directory Entry */
|
|
|
|
struct VTDPASIDDirEntry {
|
|
|
|
uint64_t val;
|
|
|
|
};
|
|
|
|
|
|
|
|
/* PASID Table Entry */
|
|
|
|
struct VTDPASIDEntry {
|
|
|
|
uint64_t val[8];
|
|
|
|
};
|
|
|
|
|
2014-08-16 09:55:38 +04:00
|
|
|
struct VTDAddressSpace {
|
2015-10-04 16:48:50 +03:00
|
|
|
PCIBus *bus;
|
2014-08-16 09:55:38 +04:00
|
|
|
uint8_t devfn;
|
|
|
|
AddressSpace as;
|
2017-07-11 06:56:19 +03:00
|
|
|
IOMMUMemoryRegion iommu;
|
2019-03-13 12:43:23 +03:00
|
|
|
MemoryRegion root; /* The root container of the device */
|
|
|
|
MemoryRegion nodmar; /* The alias of shared nodmar MR */
|
2016-07-14 08:56:22 +03:00
|
|
|
MemoryRegion iommu_ir; /* Interrupt region: 0xfeeXXXXX */
|
2014-08-16 09:55:38 +04:00
|
|
|
IntelIOMMUState *iommu_state;
|
2014-08-16 09:55:43 +04:00
|
|
|
VTDContextCacheEntry context_cache_entry;
|
2018-05-18 10:25:10 +03:00
|
|
|
QLIST_ENTRY(VTDAddressSpace) next;
|
2018-05-18 10:25:12 +03:00
|
|
|
/* Superset of notifier flags that this address space has */
|
|
|
|
IOMMUNotifierFlag notifier_flags;
|
intel-iommu: rework the page walk logic
This patch fixes a potential small window that the DMA page table might
be incomplete or invalid when the guest sends domain/context
invalidations to a device. This can cause random DMA errors for
assigned devices.
This is a major change to the VT-d shadow page walking logic. It
includes but is not limited to:
- For each VTDAddressSpace, now we maintain what IOVA ranges we have
mapped and what we have not. With that information, now we only send
MAP or UNMAP when necessary. Say, we don't send MAP notifies if we
know we have already mapped the range, meanwhile we don't send UNMAP
notifies if we know we never mapped the range at all.
- Introduce vtd_sync_shadow_page_table[_range] APIs so that we can call
in any places to resync the shadow page table for a device.
- When we receive domain/context invalidation, we should not really run
the replay logic, instead we use the new sync shadow page table API to
resync the whole shadow page table without unmapping the whole
region. After this change, we'll only do the page walk once for each
domain invalidations (before this, it can be multiple, depending on
number of notifiers per address space).
While at it, the page walking logic is also refactored to be simpler.
CC: QEMU Stable <qemu-stable@nongnu.org>
Reported-by: Jintack Lim <jintack@cs.columbia.edu>
Tested-by: Jintack Lim <jintack@cs.columbia.edu>
Signed-off-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2018-05-18 10:25:17 +03:00
|
|
|
IOVATree *iova_tree; /* Traces mapped IOVA ranges */
|
2014-08-16 09:55:38 +04:00
|
|
|
};
|
|
|
|
|
2015-10-04 16:48:50 +03:00
|
|
|
struct VTDBus {
|
|
|
|
PCIBus* bus; /* A reference to the bus to provide translation for */
|
misc: Replace zero-length arrays with flexible array member (automatic)
Description copied from Linux kernel commit from Gustavo A. R. Silva
(see [3]):
--v-- description start --v--
The current codebase makes use of the zero-length array language
extension to the C90 standard, but the preferred mechanism to
declare variable-length types such as these ones is a flexible
array member [1], introduced in C99:
struct foo {
int stuff;
struct boo array[];
};
By making use of the mechanism above, we will get a compiler
warning in case the flexible array does not occur last in the
structure, which will help us prevent some kind of undefined
behavior bugs from being unadvertenly introduced [2] to the
Linux codebase from now on.
--^-- description end --^--
Do the similar housekeeping in the QEMU codebase (which uses
C99 since commit 7be41675f7cb).
All these instances of code were found with the help of the
following Coccinelle script:
@@
identifier s, m, a;
type t, T;
@@
struct s {
...
t m;
- T a[0];
+ T a[];
};
@@
identifier s, m, a;
type t, T;
@@
struct s {
...
t m;
- T a[0];
+ T a[];
} QEMU_PACKED;
[1] https://gcc.gnu.org/onlinedocs/gcc/Zero-Length.html
[2] https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=76497732932f
[3] https://git.kernel.org/pub/scm/linux/kernel/git/gustavoars/linux.git/commit/?id=17642a2fbd2c1
Inspired-by: Gustavo A. R. Silva <gustavo@embeddedor.com>
Reviewed-by: David Hildenbrand <david@redhat.com>
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2020-03-04 18:38:15 +03:00
|
|
|
/* A table of VTDAddressSpace objects indexed by devfn */
|
|
|
|
VTDAddressSpace *dev_as[];
|
2015-10-04 16:48:50 +03:00
|
|
|
};
|
|
|
|
|
2014-08-16 09:55:44 +04:00
|
|
|
struct VTDIOTLBEntry {
|
|
|
|
uint64_t gfn;
|
|
|
|
uint16_t domain_id;
|
|
|
|
uint64_t slpte;
|
2016-01-14 08:47:24 +03:00
|
|
|
uint64_t mask;
|
2017-07-17 12:02:30 +03:00
|
|
|
uint8_t access_flags;
|
2014-08-16 09:55:44 +04:00
|
|
|
};
|
|
|
|
|
2016-07-14 08:56:29 +03:00
|
|
|
/* VT-d Source-ID Qualifier types */
|
|
|
|
enum {
|
|
|
|
VTD_SQ_FULL = 0x00, /* Full SID verification */
|
|
|
|
VTD_SQ_IGN_3 = 0x01, /* Ignore bit 3 */
|
|
|
|
VTD_SQ_IGN_2_3 = 0x02, /* Ignore bits 2 & 3 */
|
|
|
|
VTD_SQ_IGN_1_3 = 0x03, /* Ignore bits 1-3 */
|
|
|
|
VTD_SQ_MAX,
|
|
|
|
};
|
|
|
|
|
|
|
|
/* VT-d Source Validation Types */
|
|
|
|
enum {
|
|
|
|
VTD_SVT_NONE = 0x00, /* No validation */
|
|
|
|
VTD_SVT_ALL = 0x01, /* Do full validation */
|
|
|
|
VTD_SVT_BUS = 0x02, /* Validate bus range */
|
|
|
|
VTD_SVT_MAX,
|
|
|
|
};
|
|
|
|
|
2016-07-14 08:56:20 +03:00
|
|
|
/* Interrupt Remapping Table Entry Definition */
|
2016-07-21 18:54:10 +03:00
|
|
|
union VTD_IR_TableEntry {
|
2016-07-14 08:56:20 +03:00
|
|
|
struct {
|
2022-03-23 18:57:17 +03:00
|
|
|
#if HOST_BIG_ENDIAN
|
2016-07-14 08:56:20 +03:00
|
|
|
uint32_t __reserved_1:8; /* Reserved 1 */
|
|
|
|
uint32_t vector:8; /* Interrupt Vector */
|
|
|
|
uint32_t irte_mode:1; /* IRTE Mode */
|
|
|
|
uint32_t __reserved_0:3; /* Reserved 0 */
|
|
|
|
uint32_t __avail:4; /* Available spaces for software */
|
|
|
|
uint32_t delivery_mode:3; /* Delivery Mode */
|
|
|
|
uint32_t trigger_mode:1; /* Trigger Mode */
|
|
|
|
uint32_t redir_hint:1; /* Redirection Hint */
|
|
|
|
uint32_t dest_mode:1; /* Destination Mode */
|
|
|
|
uint32_t fault_disable:1; /* Fault Processing Disable */
|
|
|
|
uint32_t present:1; /* Whether entry present/available */
|
|
|
|
#else
|
|
|
|
uint32_t present:1; /* Whether entry present/available */
|
|
|
|
uint32_t fault_disable:1; /* Fault Processing Disable */
|
|
|
|
uint32_t dest_mode:1; /* Destination Mode */
|
|
|
|
uint32_t redir_hint:1; /* Redirection Hint */
|
|
|
|
uint32_t trigger_mode:1; /* Trigger Mode */
|
|
|
|
uint32_t delivery_mode:3; /* Delivery Mode */
|
|
|
|
uint32_t __avail:4; /* Available spaces for software */
|
|
|
|
uint32_t __reserved_0:3; /* Reserved 0 */
|
|
|
|
uint32_t irte_mode:1; /* IRTE Mode */
|
|
|
|
uint32_t vector:8; /* Interrupt Vector */
|
|
|
|
uint32_t __reserved_1:8; /* Reserved 1 */
|
|
|
|
#endif
|
2016-10-31 10:34:38 +03:00
|
|
|
uint32_t dest_id; /* Destination ID */
|
|
|
|
uint16_t source_id; /* Source-ID */
|
2022-03-23 18:57:17 +03:00
|
|
|
#if HOST_BIG_ENDIAN
|
2016-07-14 08:56:20 +03:00
|
|
|
uint64_t __reserved_2:44; /* Reserved 2 */
|
|
|
|
uint64_t sid_vtype:2; /* Source-ID Validation Type */
|
|
|
|
uint64_t sid_q:2; /* Source-ID Qualifier */
|
|
|
|
#else
|
|
|
|
uint64_t sid_q:2; /* Source-ID Qualifier */
|
|
|
|
uint64_t sid_vtype:2; /* Source-ID Validation Type */
|
|
|
|
uint64_t __reserved_2:44; /* Reserved 2 */
|
|
|
|
#endif
|
2016-07-21 18:54:10 +03:00
|
|
|
} QEMU_PACKED irte;
|
2016-07-14 08:56:20 +03:00
|
|
|
uint64_t data[2];
|
|
|
|
};
|
|
|
|
|
|
|
|
#define VTD_IR_INT_FORMAT_COMPAT (0) /* Compatible Interrupt */
|
|
|
|
#define VTD_IR_INT_FORMAT_REMAP (1) /* Remappable Interrupt */
|
|
|
|
|
|
|
|
/* Programming format for MSI/MSI-X addresses */
|
|
|
|
union VTD_IR_MSIAddress {
|
|
|
|
struct {
|
2022-03-23 18:57:17 +03:00
|
|
|
#if HOST_BIG_ENDIAN
|
2016-07-14 08:56:20 +03:00
|
|
|
uint32_t __head:12; /* Should always be: 0x0fee */
|
|
|
|
uint32_t index_l:15; /* Interrupt index bit 14-0 */
|
|
|
|
uint32_t int_mode:1; /* Interrupt format */
|
|
|
|
uint32_t sub_valid:1; /* SHV: Sub-Handle Valid bit */
|
|
|
|
uint32_t index_h:1; /* Interrupt index bit 15 */
|
|
|
|
uint32_t __not_care:2;
|
|
|
|
#else
|
|
|
|
uint32_t __not_care:2;
|
|
|
|
uint32_t index_h:1; /* Interrupt index bit 15 */
|
|
|
|
uint32_t sub_valid:1; /* SHV: Sub-Handle Valid bit */
|
|
|
|
uint32_t int_mode:1; /* Interrupt format */
|
|
|
|
uint32_t index_l:15; /* Interrupt index bit 14-0 */
|
|
|
|
uint32_t __head:12; /* Should always be: 0x0fee */
|
|
|
|
#endif
|
2016-07-21 18:54:10 +03:00
|
|
|
} QEMU_PACKED addr;
|
2016-07-14 08:56:20 +03:00
|
|
|
uint32_t data;
|
|
|
|
};
|
|
|
|
|
|
|
|
/* When IR is enabled, all MSI/MSI-X data bits should be zero */
|
|
|
|
#define VTD_IR_MSI_DATA (0)
|
|
|
|
|
2014-08-16 09:55:38 +04:00
|
|
|
/* The iommu (DMAR) device state struct */
|
|
|
|
struct IntelIOMMUState {
|
2016-07-14 08:56:10 +03:00
|
|
|
X86IOMMUState x86_iommu;
|
2014-08-16 09:55:38 +04:00
|
|
|
MemoryRegion csrmem;
|
2019-03-13 12:43:23 +03:00
|
|
|
MemoryRegion mr_nodmar;
|
|
|
|
MemoryRegion mr_ir;
|
|
|
|
MemoryRegion mr_sys_alias;
|
2014-08-16 09:55:38 +04:00
|
|
|
uint8_t csr[DMAR_REG_SIZE]; /* register values */
|
|
|
|
uint8_t wmask[DMAR_REG_SIZE]; /* R/W bytes */
|
|
|
|
uint8_t w1cmask[DMAR_REG_SIZE]; /* RW1C(Write 1 to Clear) bytes */
|
|
|
|
uint8_t womask[DMAR_REG_SIZE]; /* WO (write only - read returns 0) */
|
|
|
|
uint32_t version;
|
|
|
|
|
2019-03-05 05:34:55 +03:00
|
|
|
bool caching_mode; /* RO - is cap CM enabled? */
|
|
|
|
bool scalable_mode; /* RO - is Scalable Mode supported? */
|
2022-02-14 09:03:46 +03:00
|
|
|
bool snoop_control; /* RO - is SNP filed supported? */
|
2017-02-07 11:28:06 +03:00
|
|
|
|
2014-08-16 09:55:38 +04:00
|
|
|
dma_addr_t root; /* Current root table pointer */
|
2019-03-05 05:34:53 +03:00
|
|
|
bool root_scalable; /* Type of root table (scalable or not) */
|
2014-08-16 09:55:38 +04:00
|
|
|
bool dmar_enabled; /* Set if DMA remapping is enabled */
|
|
|
|
|
|
|
|
uint16_t iq_head; /* Current invalidation queue head */
|
|
|
|
uint16_t iq_tail; /* Current invalidation queue tail */
|
|
|
|
dma_addr_t iq; /* Current invalidation queue pointer */
|
|
|
|
uint16_t iq_size; /* IQ Size in number of entries */
|
2019-03-05 05:34:54 +03:00
|
|
|
bool iq_dw; /* IQ descriptor width 256bit or not */
|
2014-08-16 09:55:38 +04:00
|
|
|
bool qi_enabled; /* Set if the QI is enabled */
|
|
|
|
uint8_t iq_last_desc_type; /* The type of last completed descriptor */
|
|
|
|
|
|
|
|
/* The index of the Fault Recording Register to be used next.
|
|
|
|
* Wraps around from N-1 to 0, where N is the number of FRCD_REG.
|
|
|
|
*/
|
|
|
|
uint16_t next_frcd_reg;
|
|
|
|
|
|
|
|
uint64_t cap; /* The value of capability reg */
|
|
|
|
uint64_t ecap; /* The value of extended capability reg */
|
|
|
|
|
2014-08-16 09:55:43 +04:00
|
|
|
uint32_t context_cache_gen; /* Should be in [1,MAX] */
|
2014-08-16 09:55:44 +04:00
|
|
|
GHashTable *iotlb; /* IOTLB */
|
2014-08-16 09:55:43 +04:00
|
|
|
|
2015-10-04 16:48:50 +03:00
|
|
|
GHashTable *vtd_as_by_busptr; /* VTDBus objects indexed by PCIBus* reference */
|
|
|
|
VTDBus *vtd_as_by_bus_num[VTD_PCI_BUS_MAX]; /* VTDBus objects indexed by bus number */
|
2017-04-07 13:59:15 +03:00
|
|
|
/* list of registered notifiers */
|
2018-05-18 10:25:10 +03:00
|
|
|
QLIST_HEAD(, VTDAddressSpace) vtd_as_with_notifiers;
|
2016-07-14 08:56:18 +03:00
|
|
|
|
|
|
|
/* interrupt remapping */
|
|
|
|
bool intr_enabled; /* Whether guest enabled IR */
|
|
|
|
dma_addr_t intr_root; /* Interrupt remapping table pointer */
|
|
|
|
uint32_t intr_size; /* Number of IR table entries */
|
2016-07-14 08:56:28 +03:00
|
|
|
bool intr_eime; /* Extended interrupt mode enabled */
|
2016-10-10 18:28:46 +03:00
|
|
|
OnOffAuto intr_eim; /* Toggle for EIM cabability */
|
intel_iommu: reject broken EIM
Cluster x2APIC cannot work without KVM's x2apic API when the maximal
APIC ID is greater than 8 and only KVM's LAPIC can support x2APIC, so we
forbid other APICs and also the old KVM case with less than 9, to
simplify the code.
There is no point in enabling EIM in forbidden APICs, so we keep it
enabled only for the KVM APIC; unconditionally, because making the
option depend on KVM version would be a maintanance burden.
Old QEMUs would enable eim whenever intremap was on, which would trick
guests into thinking that they can enable cluster x2APIC even if any
interrupt destination would get clamped to 8 bits.
Depending on your configuration, QEMU could notice that the destination
LAPIC is not present and report it with a very non-obvious:
KVM: injection failed, MSI lost (Operation not permitted)
Or the guest could say something about unexpected interrupts, because
clamping leads to aliasing so interrupts were being delivered to
incorrect VCPUs.
KVM_X2APIC_API is the feature that allows us to enable EIM for KVM.
QEMU 2.7 allowed EIM whenever interrupt remapping was enabled. In order
to keep backward compatibility, we again allow guests to misbehave in
non-obvious ways, and make it the default for old machine types.
A user can enable the buggy mode it with "x-buggy-eim=on".
Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
Reviewed-by: Eduardo Habkost <ehabkost@redhat.com>
Reviewed-by: Peter Xu <peterx@redhat.com>
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
2016-10-10 18:28:47 +03:00
|
|
|
bool buggy_eim; /* Force buggy EIM unless eim=off */
|
2017-11-15 02:13:50 +03:00
|
|
|
uint8_t aw_bits; /* Host/IOVA address width (in bits) */
|
2018-12-17 10:31:12 +03:00
|
|
|
bool dma_drain; /* Whether DMA r/w draining enabled */
|
2018-05-18 10:25:11 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Protects IOMMU states in general. Currently it protects the
|
|
|
|
* per-IOMMU IOTLB cache, and context entry cache in VTDAddressSpace.
|
|
|
|
*/
|
|
|
|
QemuMutex iommu_lock;
|
2014-08-16 09:55:38 +04:00
|
|
|
};
|
|
|
|
|
2015-10-04 16:48:50 +03:00
|
|
|
/* Find the VTD Address space associated with the given bus pointer,
|
|
|
|
* create a new one if none exists
|
|
|
|
*/
|
|
|
|
VTDAddressSpace *vtd_find_add_as(IntelIOMMUState *s, PCIBus *bus, int devfn);
|
|
|
|
|
2014-08-16 09:55:38 +04:00
|
|
|
#endif
|