2014-08-16 09:55:38 +04:00
|
|
|
/*
|
|
|
|
* QEMU emulation of an Intel IOMMU (VT-d)
|
|
|
|
* (DMA Remapping device)
|
|
|
|
*
|
|
|
|
* Copyright (C) 2013 Knut Omang, Oracle <knut.omang@oracle.com>
|
|
|
|
* Copyright (C) 2014 Le Tan, <tamlokveer@gmail.com>
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License as published by
|
|
|
|
* the Free Software Foundation; either version 2 of the License, or
|
|
|
|
* (at your option) any later version.
|
|
|
|
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
|
|
|
|
* You should have received a copy of the GNU General Public License along
|
|
|
|
* with this program; if not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*/
|
|
|
|
|
2016-01-26 21:17:03 +03:00
|
|
|
#include "qemu/osdep.h"
|
2016-07-14 08:56:36 +03:00
|
|
|
#include "qemu/error-report.h"
|
2016-10-10 18:28:45 +03:00
|
|
|
#include "qapi/error.h"
|
2014-08-16 09:55:38 +04:00
|
|
|
#include "hw/sysbus.h"
|
|
|
|
#include "exec/address-spaces.h"
|
|
|
|
#include "intel_iommu_internal.h"
|
2015-10-04 16:48:50 +03:00
|
|
|
#include "hw/pci/pci.h"
|
2016-06-30 22:00:24 +03:00
|
|
|
#include "hw/pci/pci_bus.h"
|
2016-06-27 18:38:34 +03:00
|
|
|
#include "hw/i386/pc.h"
|
2016-09-21 19:12:17 +03:00
|
|
|
#include "hw/i386/apic-msidef.h"
|
2016-07-14 08:56:11 +03:00
|
|
|
#include "hw/boards.h"
|
|
|
|
#include "hw/i386/x86-iommu.h"
|
2016-07-14 08:56:23 +03:00
|
|
|
#include "hw/pci-host/q35.h"
|
2016-07-14 08:56:36 +03:00
|
|
|
#include "sysemu/kvm.h"
|
2016-10-10 18:28:44 +03:00
|
|
|
#include "hw/i386/apic_internal.h"
|
intel_iommu: reject broken EIM
Cluster x2APIC cannot work without KVM's x2apic API when the maximal
APIC ID is greater than 8 and only KVM's LAPIC can support x2APIC, so we
forbid other APICs and also the old KVM case with less than 9, to
simplify the code.
There is no point in enabling EIM in forbidden APICs, so we keep it
enabled only for the KVM APIC; unconditionally, because making the
option depend on KVM version would be a maintanance burden.
Old QEMUs would enable eim whenever intremap was on, which would trick
guests into thinking that they can enable cluster x2APIC even if any
interrupt destination would get clamped to 8 bits.
Depending on your configuration, QEMU could notice that the destination
LAPIC is not present and report it with a very non-obvious:
KVM: injection failed, MSI lost (Operation not permitted)
Or the guest could say something about unexpected interrupts, because
clamping leads to aliasing so interrupts were being delivered to
incorrect VCPUs.
KVM_X2APIC_API is the feature that allows us to enable EIM for KVM.
QEMU 2.7 allowed EIM whenever interrupt remapping was enabled. In order
to keep backward compatibility, we again allow guests to misbehave in
non-obvious ways, and make it the default for old machine types.
A user can enable the buggy mode it with "x-buggy-eim=on".
Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
Reviewed-by: Eduardo Habkost <ehabkost@redhat.com>
Reviewed-by: Peter Xu <peterx@redhat.com>
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
2016-10-10 18:28:47 +03:00
|
|
|
#include "kvm_i386.h"
|
2017-02-07 11:28:09 +03:00
|
|
|
#include "trace.h"
|
2014-08-16 09:55:38 +04:00
|
|
|
|
|
|
|
/*#define DEBUG_INTEL_IOMMU*/
|
|
|
|
#ifdef DEBUG_INTEL_IOMMU
|
|
|
|
enum {
|
|
|
|
DEBUG_GENERAL, DEBUG_CSR, DEBUG_INV, DEBUG_MMU, DEBUG_FLOG,
|
2016-07-14 08:56:18 +03:00
|
|
|
DEBUG_CACHE, DEBUG_IR,
|
2014-08-16 09:55:38 +04:00
|
|
|
};
|
|
|
|
#define VTD_DBGBIT(x) (1 << DEBUG_##x)
|
|
|
|
static int vtd_dbgflags = VTD_DBGBIT(GENERAL) | VTD_DBGBIT(CSR);
|
|
|
|
|
|
|
|
#define VTD_DPRINTF(what, fmt, ...) do { \
|
|
|
|
if (vtd_dbgflags & VTD_DBGBIT(what)) { \
|
|
|
|
fprintf(stderr, "(vtd)%s: " fmt "\n", __func__, \
|
|
|
|
## __VA_ARGS__); } \
|
|
|
|
} while (0)
|
|
|
|
#else
|
|
|
|
#define VTD_DPRINTF(what, fmt, ...) do {} while (0)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
static void vtd_define_quad(IntelIOMMUState *s, hwaddr addr, uint64_t val,
|
|
|
|
uint64_t wmask, uint64_t w1cmask)
|
|
|
|
{
|
|
|
|
stq_le_p(&s->csr[addr], val);
|
|
|
|
stq_le_p(&s->wmask[addr], wmask);
|
|
|
|
stq_le_p(&s->w1cmask[addr], w1cmask);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void vtd_define_quad_wo(IntelIOMMUState *s, hwaddr addr, uint64_t mask)
|
|
|
|
{
|
|
|
|
stq_le_p(&s->womask[addr], mask);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void vtd_define_long(IntelIOMMUState *s, hwaddr addr, uint32_t val,
|
|
|
|
uint32_t wmask, uint32_t w1cmask)
|
|
|
|
{
|
|
|
|
stl_le_p(&s->csr[addr], val);
|
|
|
|
stl_le_p(&s->wmask[addr], wmask);
|
|
|
|
stl_le_p(&s->w1cmask[addr], w1cmask);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void vtd_define_long_wo(IntelIOMMUState *s, hwaddr addr, uint32_t mask)
|
|
|
|
{
|
|
|
|
stl_le_p(&s->womask[addr], mask);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* "External" get/set operations */
|
|
|
|
static void vtd_set_quad(IntelIOMMUState *s, hwaddr addr, uint64_t val)
|
|
|
|
{
|
|
|
|
uint64_t oldval = ldq_le_p(&s->csr[addr]);
|
|
|
|
uint64_t wmask = ldq_le_p(&s->wmask[addr]);
|
|
|
|
uint64_t w1cmask = ldq_le_p(&s->w1cmask[addr]);
|
|
|
|
stq_le_p(&s->csr[addr],
|
|
|
|
((oldval & ~wmask) | (val & wmask)) & ~(w1cmask & val));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void vtd_set_long(IntelIOMMUState *s, hwaddr addr, uint32_t val)
|
|
|
|
{
|
|
|
|
uint32_t oldval = ldl_le_p(&s->csr[addr]);
|
|
|
|
uint32_t wmask = ldl_le_p(&s->wmask[addr]);
|
|
|
|
uint32_t w1cmask = ldl_le_p(&s->w1cmask[addr]);
|
|
|
|
stl_le_p(&s->csr[addr],
|
|
|
|
((oldval & ~wmask) | (val & wmask)) & ~(w1cmask & val));
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint64_t vtd_get_quad(IntelIOMMUState *s, hwaddr addr)
|
|
|
|
{
|
|
|
|
uint64_t val = ldq_le_p(&s->csr[addr]);
|
|
|
|
uint64_t womask = ldq_le_p(&s->womask[addr]);
|
|
|
|
return val & ~womask;
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint32_t vtd_get_long(IntelIOMMUState *s, hwaddr addr)
|
|
|
|
{
|
|
|
|
uint32_t val = ldl_le_p(&s->csr[addr]);
|
|
|
|
uint32_t womask = ldl_le_p(&s->womask[addr]);
|
|
|
|
return val & ~womask;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* "Internal" get/set operations */
|
|
|
|
static uint64_t vtd_get_quad_raw(IntelIOMMUState *s, hwaddr addr)
|
|
|
|
{
|
|
|
|
return ldq_le_p(&s->csr[addr]);
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint32_t vtd_get_long_raw(IntelIOMMUState *s, hwaddr addr)
|
|
|
|
{
|
|
|
|
return ldl_le_p(&s->csr[addr]);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void vtd_set_quad_raw(IntelIOMMUState *s, hwaddr addr, uint64_t val)
|
|
|
|
{
|
|
|
|
stq_le_p(&s->csr[addr], val);
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint32_t vtd_set_clear_mask_long(IntelIOMMUState *s, hwaddr addr,
|
|
|
|
uint32_t clear, uint32_t mask)
|
|
|
|
{
|
|
|
|
uint32_t new_val = (ldl_le_p(&s->csr[addr]) & ~clear) | mask;
|
|
|
|
stl_le_p(&s->csr[addr], new_val);
|
|
|
|
return new_val;
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint64_t vtd_set_clear_mask_quad(IntelIOMMUState *s, hwaddr addr,
|
|
|
|
uint64_t clear, uint64_t mask)
|
|
|
|
{
|
|
|
|
uint64_t new_val = (ldq_le_p(&s->csr[addr]) & ~clear) | mask;
|
|
|
|
stq_le_p(&s->csr[addr], new_val);
|
|
|
|
return new_val;
|
|
|
|
}
|
|
|
|
|
2014-08-16 09:55:44 +04:00
|
|
|
/* GHashTable functions */
|
|
|
|
static gboolean vtd_uint64_equal(gconstpointer v1, gconstpointer v2)
|
|
|
|
{
|
|
|
|
return *((const uint64_t *)v1) == *((const uint64_t *)v2);
|
|
|
|
}
|
|
|
|
|
|
|
|
static guint vtd_uint64_hash(gconstpointer v)
|
|
|
|
{
|
|
|
|
return (guint)*(const uint64_t *)v;
|
|
|
|
}
|
|
|
|
|
|
|
|
static gboolean vtd_hash_remove_by_domain(gpointer key, gpointer value,
|
|
|
|
gpointer user_data)
|
|
|
|
{
|
|
|
|
VTDIOTLBEntry *entry = (VTDIOTLBEntry *)value;
|
|
|
|
uint16_t domain_id = *(uint16_t *)user_data;
|
|
|
|
return entry->domain_id == domain_id;
|
|
|
|
}
|
|
|
|
|
2016-01-14 08:47:24 +03:00
|
|
|
/* The shift of an addr for a certain level of paging structure */
|
|
|
|
static inline uint32_t vtd_slpt_level_shift(uint32_t level)
|
|
|
|
{
|
2017-02-07 11:28:11 +03:00
|
|
|
assert(level != 0);
|
2016-01-14 08:47:24 +03:00
|
|
|
return VTD_PAGE_SHIFT_4K + (level - 1) * VTD_SL_LEVEL_BITS;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint64_t vtd_slpt_level_page_mask(uint32_t level)
|
|
|
|
{
|
|
|
|
return ~((1ULL << vtd_slpt_level_shift(level)) - 1);
|
|
|
|
}
|
|
|
|
|
2014-08-16 09:55:44 +04:00
|
|
|
static gboolean vtd_hash_remove_by_page(gpointer key, gpointer value,
|
|
|
|
gpointer user_data)
|
|
|
|
{
|
|
|
|
VTDIOTLBEntry *entry = (VTDIOTLBEntry *)value;
|
|
|
|
VTDIOTLBPageInvInfo *info = (VTDIOTLBPageInvInfo *)user_data;
|
2016-01-14 08:47:24 +03:00
|
|
|
uint64_t gfn = (info->addr >> VTD_PAGE_SHIFT_4K) & info->mask;
|
|
|
|
uint64_t gfn_tlb = (info->addr & entry->mask) >> VTD_PAGE_SHIFT_4K;
|
2014-08-16 09:55:44 +04:00
|
|
|
return (entry->domain_id == info->domain_id) &&
|
2016-01-14 08:47:24 +03:00
|
|
|
(((entry->gfn & info->mask) == gfn) ||
|
|
|
|
(entry->gfn == gfn_tlb));
|
2014-08-16 09:55:44 +04:00
|
|
|
}
|
|
|
|
|
2014-08-16 09:55:43 +04:00
|
|
|
/* Reset all the gen of VTDAddressSpace to zero and set the gen of
|
|
|
|
* IntelIOMMUState to 1.
|
|
|
|
*/
|
|
|
|
static void vtd_reset_context_cache(IntelIOMMUState *s)
|
|
|
|
{
|
|
|
|
VTDAddressSpace *vtd_as;
|
2015-10-04 16:48:50 +03:00
|
|
|
VTDBus *vtd_bus;
|
|
|
|
GHashTableIter bus_it;
|
2014-08-16 09:55:43 +04:00
|
|
|
uint32_t devfn_it;
|
|
|
|
|
2015-10-04 16:48:50 +03:00
|
|
|
g_hash_table_iter_init(&bus_it, s->vtd_as_by_busptr);
|
|
|
|
|
2014-08-16 09:55:43 +04:00
|
|
|
VTD_DPRINTF(CACHE, "global context_cache_gen=1");
|
2015-10-04 16:48:50 +03:00
|
|
|
while (g_hash_table_iter_next (&bus_it, NULL, (void**)&vtd_bus)) {
|
2016-07-14 08:56:11 +03:00
|
|
|
for (devfn_it = 0; devfn_it < X86_IOMMU_PCI_DEVFN_MAX; ++devfn_it) {
|
2015-10-04 16:48:50 +03:00
|
|
|
vtd_as = vtd_bus->dev_as[devfn_it];
|
2014-08-16 09:55:43 +04:00
|
|
|
if (!vtd_as) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
vtd_as->context_cache_entry.context_cache_gen = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
s->context_cache_gen = 1;
|
|
|
|
}
|
|
|
|
|
2014-08-16 09:55:44 +04:00
|
|
|
static void vtd_reset_iotlb(IntelIOMMUState *s)
|
|
|
|
{
|
|
|
|
assert(s->iotlb);
|
|
|
|
g_hash_table_remove_all(s->iotlb);
|
|
|
|
}
|
|
|
|
|
2016-11-03 04:22:23 +03:00
|
|
|
static uint64_t vtd_get_iotlb_key(uint64_t gfn, uint16_t source_id,
|
2016-01-14 08:47:24 +03:00
|
|
|
uint32_t level)
|
|
|
|
{
|
|
|
|
return gfn | ((uint64_t)(source_id) << VTD_IOTLB_SID_SHIFT) |
|
|
|
|
((uint64_t)(level) << VTD_IOTLB_LVL_SHIFT);
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint64_t vtd_get_iotlb_gfn(hwaddr addr, uint32_t level)
|
|
|
|
{
|
|
|
|
return (addr & vtd_slpt_level_page_mask(level)) >> VTD_PAGE_SHIFT_4K;
|
|
|
|
}
|
|
|
|
|
2014-08-16 09:55:44 +04:00
|
|
|
static VTDIOTLBEntry *vtd_lookup_iotlb(IntelIOMMUState *s, uint16_t source_id,
|
|
|
|
hwaddr addr)
|
|
|
|
{
|
2016-01-14 08:47:24 +03:00
|
|
|
VTDIOTLBEntry *entry;
|
2014-08-16 09:55:44 +04:00
|
|
|
uint64_t key;
|
2016-01-14 08:47:24 +03:00
|
|
|
int level;
|
|
|
|
|
|
|
|
for (level = VTD_SL_PT_LEVEL; level < VTD_SL_PML4_LEVEL; level++) {
|
|
|
|
key = vtd_get_iotlb_key(vtd_get_iotlb_gfn(addr, level),
|
|
|
|
source_id, level);
|
|
|
|
entry = g_hash_table_lookup(s->iotlb, &key);
|
|
|
|
if (entry) {
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
2014-08-16 09:55:44 +04:00
|
|
|
|
2016-01-14 08:47:24 +03:00
|
|
|
out:
|
|
|
|
return entry;
|
2014-08-16 09:55:44 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static void vtd_update_iotlb(IntelIOMMUState *s, uint16_t source_id,
|
|
|
|
uint16_t domain_id, hwaddr addr, uint64_t slpte,
|
2016-01-14 08:47:24 +03:00
|
|
|
bool read_flags, bool write_flags,
|
|
|
|
uint32_t level)
|
2014-08-16 09:55:44 +04:00
|
|
|
{
|
|
|
|
VTDIOTLBEntry *entry = g_malloc(sizeof(*entry));
|
|
|
|
uint64_t *key = g_malloc(sizeof(*key));
|
2016-01-14 08:47:24 +03:00
|
|
|
uint64_t gfn = vtd_get_iotlb_gfn(addr, level);
|
2014-08-16 09:55:44 +04:00
|
|
|
|
2017-02-07 11:28:10 +03:00
|
|
|
trace_vtd_iotlb_page_update(source_id, addr, slpte, domain_id);
|
2014-08-16 09:55:44 +04:00
|
|
|
if (g_hash_table_size(s->iotlb) >= VTD_IOTLB_MAX_SIZE) {
|
2017-02-07 11:28:10 +03:00
|
|
|
trace_vtd_iotlb_reset("iotlb exceeds size limit");
|
2014-08-16 09:55:44 +04:00
|
|
|
vtd_reset_iotlb(s);
|
|
|
|
}
|
|
|
|
|
|
|
|
entry->gfn = gfn;
|
|
|
|
entry->domain_id = domain_id;
|
|
|
|
entry->slpte = slpte;
|
|
|
|
entry->read_flags = read_flags;
|
|
|
|
entry->write_flags = write_flags;
|
2016-01-14 08:47:24 +03:00
|
|
|
entry->mask = vtd_slpt_level_page_mask(level);
|
|
|
|
*key = vtd_get_iotlb_key(gfn, source_id, level);
|
2014-08-16 09:55:44 +04:00
|
|
|
g_hash_table_replace(s->iotlb, key, entry);
|
|
|
|
}
|
|
|
|
|
2014-08-16 09:55:38 +04:00
|
|
|
/* Given the reg addr of both the message data and address, generate an
|
|
|
|
* interrupt via MSI.
|
|
|
|
*/
|
|
|
|
static void vtd_generate_interrupt(IntelIOMMUState *s, hwaddr mesg_addr_reg,
|
|
|
|
hwaddr mesg_data_reg)
|
|
|
|
{
|
2016-10-10 18:28:44 +03:00
|
|
|
MSIMessage msi;
|
2014-08-16 09:55:38 +04:00
|
|
|
|
|
|
|
assert(mesg_data_reg < DMAR_REG_SIZE);
|
|
|
|
assert(mesg_addr_reg < DMAR_REG_SIZE);
|
|
|
|
|
2016-10-10 18:28:44 +03:00
|
|
|
msi.address = vtd_get_long_raw(s, mesg_addr_reg);
|
|
|
|
msi.data = vtd_get_long_raw(s, mesg_data_reg);
|
2014-08-16 09:55:38 +04:00
|
|
|
|
2016-10-10 18:28:44 +03:00
|
|
|
VTD_DPRINTF(FLOG, "msi: addr 0x%"PRIx64 " data 0x%"PRIx32,
|
|
|
|
msi.address, msi.data);
|
|
|
|
apic_get_class()->send_msi(&msi);
|
2014-08-16 09:55:38 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Generate a fault event to software via MSI if conditions are met.
|
|
|
|
* Notice that the value of FSTS_REG being passed to it should be the one
|
|
|
|
* before any update.
|
|
|
|
*/
|
|
|
|
static void vtd_generate_fault_event(IntelIOMMUState *s, uint32_t pre_fsts)
|
|
|
|
{
|
|
|
|
if (pre_fsts & VTD_FSTS_PPF || pre_fsts & VTD_FSTS_PFO ||
|
|
|
|
pre_fsts & VTD_FSTS_IQE) {
|
|
|
|
VTD_DPRINTF(FLOG, "there are previous interrupt conditions "
|
|
|
|
"to be serviced by software, fault event is not generated "
|
|
|
|
"(FSTS_REG 0x%"PRIx32 ")", pre_fsts);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
vtd_set_clear_mask_long(s, DMAR_FECTL_REG, 0, VTD_FECTL_IP);
|
|
|
|
if (vtd_get_long_raw(s, DMAR_FECTL_REG) & VTD_FECTL_IM) {
|
|
|
|
VTD_DPRINTF(FLOG, "Interrupt Mask set, fault event is not generated");
|
|
|
|
} else {
|
|
|
|
vtd_generate_interrupt(s, DMAR_FEADDR_REG, DMAR_FEDATA_REG);
|
|
|
|
vtd_set_clear_mask_long(s, DMAR_FECTL_REG, VTD_FECTL_IP, 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check if the Fault (F) field of the Fault Recording Register referenced by
|
|
|
|
* @index is Set.
|
|
|
|
*/
|
|
|
|
static bool vtd_is_frcd_set(IntelIOMMUState *s, uint16_t index)
|
|
|
|
{
|
|
|
|
/* Each reg is 128-bit */
|
|
|
|
hwaddr addr = DMAR_FRCD_REG_OFFSET + (((uint64_t)index) << 4);
|
|
|
|
addr += 8; /* Access the high 64-bit half */
|
|
|
|
|
|
|
|
assert(index < DMAR_FRCD_REG_NR);
|
|
|
|
|
|
|
|
return vtd_get_quad_raw(s, addr) & VTD_FRCD_F;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Update the PPF field of Fault Status Register.
|
|
|
|
* Should be called whenever change the F field of any fault recording
|
|
|
|
* registers.
|
|
|
|
*/
|
|
|
|
static void vtd_update_fsts_ppf(IntelIOMMUState *s)
|
|
|
|
{
|
|
|
|
uint32_t i;
|
|
|
|
uint32_t ppf_mask = 0;
|
|
|
|
|
|
|
|
for (i = 0; i < DMAR_FRCD_REG_NR; i++) {
|
|
|
|
if (vtd_is_frcd_set(s, i)) {
|
|
|
|
ppf_mask = VTD_FSTS_PPF;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
vtd_set_clear_mask_long(s, DMAR_FSTS_REG, VTD_FSTS_PPF, ppf_mask);
|
|
|
|
VTD_DPRINTF(FLOG, "set PPF of FSTS_REG to %d", ppf_mask ? 1 : 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void vtd_set_frcd_and_update_ppf(IntelIOMMUState *s, uint16_t index)
|
|
|
|
{
|
|
|
|
/* Each reg is 128-bit */
|
|
|
|
hwaddr addr = DMAR_FRCD_REG_OFFSET + (((uint64_t)index) << 4);
|
|
|
|
addr += 8; /* Access the high 64-bit half */
|
|
|
|
|
|
|
|
assert(index < DMAR_FRCD_REG_NR);
|
|
|
|
|
|
|
|
vtd_set_clear_mask_quad(s, addr, 0, VTD_FRCD_F);
|
|
|
|
vtd_update_fsts_ppf(s);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Must not update F field now, should be done later */
|
|
|
|
static void vtd_record_frcd(IntelIOMMUState *s, uint16_t index,
|
|
|
|
uint16_t source_id, hwaddr addr,
|
|
|
|
VTDFaultReason fault, bool is_write)
|
|
|
|
{
|
|
|
|
uint64_t hi = 0, lo;
|
|
|
|
hwaddr frcd_reg_addr = DMAR_FRCD_REG_OFFSET + (((uint64_t)index) << 4);
|
|
|
|
|
|
|
|
assert(index < DMAR_FRCD_REG_NR);
|
|
|
|
|
|
|
|
lo = VTD_FRCD_FI(addr);
|
|
|
|
hi = VTD_FRCD_SID(source_id) | VTD_FRCD_FR(fault);
|
|
|
|
if (!is_write) {
|
|
|
|
hi |= VTD_FRCD_T;
|
|
|
|
}
|
|
|
|
vtd_set_quad_raw(s, frcd_reg_addr, lo);
|
|
|
|
vtd_set_quad_raw(s, frcd_reg_addr + 8, hi);
|
|
|
|
VTD_DPRINTF(FLOG, "record to FRCD_REG #%"PRIu16 ": hi 0x%"PRIx64
|
|
|
|
", lo 0x%"PRIx64, index, hi, lo);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Try to collapse multiple pending faults from the same requester */
|
|
|
|
static bool vtd_try_collapse_fault(IntelIOMMUState *s, uint16_t source_id)
|
|
|
|
{
|
|
|
|
uint32_t i;
|
|
|
|
uint64_t frcd_reg;
|
|
|
|
hwaddr addr = DMAR_FRCD_REG_OFFSET + 8; /* The high 64-bit half */
|
|
|
|
|
|
|
|
for (i = 0; i < DMAR_FRCD_REG_NR; i++) {
|
|
|
|
frcd_reg = vtd_get_quad_raw(s, addr);
|
|
|
|
VTD_DPRINTF(FLOG, "frcd_reg #%d 0x%"PRIx64, i, frcd_reg);
|
|
|
|
if ((frcd_reg & VTD_FRCD_F) &&
|
|
|
|
((frcd_reg & VTD_FRCD_SID_MASK) == source_id)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
addr += 16; /* 128-bit for each */
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Log and report an DMAR (address translation) fault to software */
|
|
|
|
static void vtd_report_dmar_fault(IntelIOMMUState *s, uint16_t source_id,
|
|
|
|
hwaddr addr, VTDFaultReason fault,
|
|
|
|
bool is_write)
|
|
|
|
{
|
|
|
|
uint32_t fsts_reg = vtd_get_long_raw(s, DMAR_FSTS_REG);
|
|
|
|
|
|
|
|
assert(fault < VTD_FR_MAX);
|
|
|
|
|
|
|
|
if (fault == VTD_FR_RESERVED_ERR) {
|
|
|
|
/* This is not a normal fault reason case. Drop it. */
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
VTD_DPRINTF(FLOG, "sid 0x%"PRIx16 ", fault %d, addr 0x%"PRIx64
|
|
|
|
", is_write %d", source_id, fault, addr, is_write);
|
|
|
|
if (fsts_reg & VTD_FSTS_PFO) {
|
|
|
|
VTD_DPRINTF(FLOG, "new fault is not recorded due to "
|
|
|
|
"Primary Fault Overflow");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (vtd_try_collapse_fault(s, source_id)) {
|
|
|
|
VTD_DPRINTF(FLOG, "new fault is not recorded due to "
|
|
|
|
"compression of faults");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (vtd_is_frcd_set(s, s->next_frcd_reg)) {
|
|
|
|
VTD_DPRINTF(FLOG, "Primary Fault Overflow and "
|
|
|
|
"new fault is not recorded, set PFO field");
|
|
|
|
vtd_set_clear_mask_long(s, DMAR_FSTS_REG, 0, VTD_FSTS_PFO);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
vtd_record_frcd(s, s->next_frcd_reg, source_id, addr, fault, is_write);
|
|
|
|
|
|
|
|
if (fsts_reg & VTD_FSTS_PPF) {
|
|
|
|
VTD_DPRINTF(FLOG, "there are pending faults already, "
|
|
|
|
"fault event is not generated");
|
|
|
|
vtd_set_frcd_and_update_ppf(s, s->next_frcd_reg);
|
|
|
|
s->next_frcd_reg++;
|
|
|
|
if (s->next_frcd_reg == DMAR_FRCD_REG_NR) {
|
|
|
|
s->next_frcd_reg = 0;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
vtd_set_clear_mask_long(s, DMAR_FSTS_REG, VTD_FSTS_FRI_MASK,
|
|
|
|
VTD_FSTS_FRI(s->next_frcd_reg));
|
|
|
|
vtd_set_frcd_and_update_ppf(s, s->next_frcd_reg); /* Will set PPF */
|
|
|
|
s->next_frcd_reg++;
|
|
|
|
if (s->next_frcd_reg == DMAR_FRCD_REG_NR) {
|
|
|
|
s->next_frcd_reg = 0;
|
|
|
|
}
|
|
|
|
/* This case actually cause the PPF to be Set.
|
|
|
|
* So generate fault event (interrupt).
|
|
|
|
*/
|
|
|
|
vtd_generate_fault_event(s, fsts_reg);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-08-16 09:55:42 +04:00
|
|
|
/* Handle Invalidation Queue Errors of queued invalidation interface error
|
|
|
|
* conditions.
|
|
|
|
*/
|
|
|
|
static void vtd_handle_inv_queue_error(IntelIOMMUState *s)
|
|
|
|
{
|
|
|
|
uint32_t fsts_reg = vtd_get_long_raw(s, DMAR_FSTS_REG);
|
|
|
|
|
|
|
|
vtd_set_clear_mask_long(s, DMAR_FSTS_REG, 0, VTD_FSTS_IQE);
|
|
|
|
vtd_generate_fault_event(s, fsts_reg);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Set the IWC field and try to generate an invalidation completion interrupt */
|
|
|
|
static void vtd_generate_completion_event(IntelIOMMUState *s)
|
|
|
|
{
|
|
|
|
if (vtd_get_long_raw(s, DMAR_ICS_REG) & VTD_ICS_IWC) {
|
2017-02-07 11:28:09 +03:00
|
|
|
trace_vtd_inv_desc_wait_irq("One pending, skip current");
|
2014-08-16 09:55:42 +04:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
vtd_set_clear_mask_long(s, DMAR_ICS_REG, 0, VTD_ICS_IWC);
|
|
|
|
vtd_set_clear_mask_long(s, DMAR_IECTL_REG, 0, VTD_IECTL_IP);
|
|
|
|
if (vtd_get_long_raw(s, DMAR_IECTL_REG) & VTD_IECTL_IM) {
|
2017-02-07 11:28:09 +03:00
|
|
|
trace_vtd_inv_desc_wait_irq("IM in IECTL_REG is set, "
|
|
|
|
"new event not generated");
|
2014-08-16 09:55:42 +04:00
|
|
|
return;
|
|
|
|
} else {
|
|
|
|
/* Generate the interrupt event */
|
2017-02-07 11:28:09 +03:00
|
|
|
trace_vtd_inv_desc_wait_irq("Generating complete event");
|
2014-08-16 09:55:42 +04:00
|
|
|
vtd_generate_interrupt(s, DMAR_IEADDR_REG, DMAR_IEDATA_REG);
|
|
|
|
vtd_set_clear_mask_long(s, DMAR_IECTL_REG, VTD_IECTL_IP, 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-08-16 09:55:38 +04:00
|
|
|
static inline bool vtd_root_entry_present(VTDRootEntry *root)
|
|
|
|
{
|
|
|
|
return root->val & VTD_ROOT_ENTRY_P;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int vtd_get_root_entry(IntelIOMMUState *s, uint8_t index,
|
|
|
|
VTDRootEntry *re)
|
|
|
|
{
|
|
|
|
dma_addr_t addr;
|
|
|
|
|
|
|
|
addr = s->root + index * sizeof(*re);
|
|
|
|
if (dma_memory_read(&address_space_memory, addr, re, sizeof(*re))) {
|
2017-02-07 11:28:10 +03:00
|
|
|
trace_vtd_re_invalid(re->rsvd, re->val);
|
2014-08-16 09:55:38 +04:00
|
|
|
re->val = 0;
|
|
|
|
return -VTD_FR_ROOT_TABLE_INV;
|
|
|
|
}
|
|
|
|
re->val = le64_to_cpu(re->val);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool vtd_context_entry_present(VTDContextEntry *context)
|
|
|
|
{
|
|
|
|
return context->lo & VTD_CONTEXT_ENTRY_P;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int vtd_get_context_entry_from_root(VTDRootEntry *root, uint8_t index,
|
|
|
|
VTDContextEntry *ce)
|
|
|
|
{
|
|
|
|
dma_addr_t addr;
|
|
|
|
|
2017-02-07 11:28:10 +03:00
|
|
|
/* we have checked that root entry is present */
|
2014-08-16 09:55:38 +04:00
|
|
|
addr = (root->val & VTD_ROOT_ENTRY_CTP) + index * sizeof(*ce);
|
|
|
|
if (dma_memory_read(&address_space_memory, addr, ce, sizeof(*ce))) {
|
2017-02-07 11:28:10 +03:00
|
|
|
trace_vtd_re_invalid(root->rsvd, root->val);
|
2014-08-16 09:55:38 +04:00
|
|
|
return -VTD_FR_CONTEXT_TABLE_INV;
|
|
|
|
}
|
|
|
|
ce->lo = le64_to_cpu(ce->lo);
|
|
|
|
ce->hi = le64_to_cpu(ce->hi);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline dma_addr_t vtd_get_slpt_base_from_context(VTDContextEntry *ce)
|
|
|
|
{
|
|
|
|
return ce->lo & VTD_CONTEXT_ENTRY_SLPTPTR;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint64_t vtd_get_slpte_addr(uint64_t slpte)
|
|
|
|
{
|
|
|
|
return slpte & VTD_SL_PT_BASE_ADDR_MASK;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Whether the pte indicates the address of the page frame */
|
|
|
|
static inline bool vtd_is_last_slpte(uint64_t slpte, uint32_t level)
|
|
|
|
{
|
|
|
|
return level == VTD_SL_PT_LEVEL || (slpte & VTD_SL_PT_PAGE_SIZE_MASK);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Get the content of a spte located in @base_addr[@index] */
|
|
|
|
static uint64_t vtd_get_slpte(dma_addr_t base_addr, uint32_t index)
|
|
|
|
{
|
|
|
|
uint64_t slpte;
|
|
|
|
|
|
|
|
assert(index < VTD_SL_PT_ENTRY_NR);
|
|
|
|
|
|
|
|
if (dma_memory_read(&address_space_memory,
|
|
|
|
base_addr + index * sizeof(slpte), &slpte,
|
|
|
|
sizeof(slpte))) {
|
|
|
|
slpte = (uint64_t)-1;
|
|
|
|
return slpte;
|
|
|
|
}
|
|
|
|
slpte = le64_to_cpu(slpte);
|
|
|
|
return slpte;
|
|
|
|
}
|
|
|
|
|
2017-02-07 11:28:08 +03:00
|
|
|
/* Given an iova and the level of paging structure, return the offset
|
|
|
|
* of current level.
|
2014-08-16 09:55:38 +04:00
|
|
|
*/
|
2017-02-07 11:28:08 +03:00
|
|
|
static inline uint32_t vtd_iova_level_offset(uint64_t iova, uint32_t level)
|
2014-08-16 09:55:38 +04:00
|
|
|
{
|
2017-02-07 11:28:08 +03:00
|
|
|
return (iova >> vtd_slpt_level_shift(level)) &
|
2014-08-16 09:55:38 +04:00
|
|
|
((1ULL << VTD_SL_LEVEL_BITS) - 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check Capability Register to see if the @level of page-table is supported */
|
|
|
|
static inline bool vtd_is_level_supported(IntelIOMMUState *s, uint32_t level)
|
|
|
|
{
|
|
|
|
return VTD_CAP_SAGAW_MASK & s->cap &
|
|
|
|
(1ULL << (level - 2 + VTD_CAP_SAGAW_SHIFT));
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Get the page-table level that hardware should use for the second-level
|
|
|
|
* page-table walk from the Address Width field of context-entry.
|
|
|
|
*/
|
|
|
|
static inline uint32_t vtd_get_level_from_context_entry(VTDContextEntry *ce)
|
|
|
|
{
|
|
|
|
return 2 + (ce->hi & VTD_CONTEXT_ENTRY_AW);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint32_t vtd_get_agaw_from_context_entry(VTDContextEntry *ce)
|
|
|
|
{
|
|
|
|
return 30 + (ce->hi & VTD_CONTEXT_ENTRY_AW) * 9;
|
|
|
|
}
|
|
|
|
|
2017-04-07 13:59:13 +03:00
|
|
|
static inline uint64_t vtd_iova_limit(VTDContextEntry *ce)
|
|
|
|
{
|
|
|
|
uint32_t ce_agaw = vtd_get_agaw_from_context_entry(ce);
|
|
|
|
return 1ULL << MIN(ce_agaw, VTD_MGAW);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Return true if IOVA passes range check, otherwise false. */
|
|
|
|
static inline bool vtd_iova_range_check(uint64_t iova, VTDContextEntry *ce)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Check if @iova is above 2^X-1, where X is the minimum of MGAW
|
|
|
|
* in CAP_REG and AW in context-entry.
|
|
|
|
*/
|
|
|
|
return !(iova & ~(vtd_iova_limit(ce) - 1));
|
|
|
|
}
|
|
|
|
|
2014-08-16 09:55:38 +04:00
|
|
|
static const uint64_t vtd_paging_entry_rsvd_field[] = {
|
|
|
|
[0] = ~0ULL,
|
|
|
|
/* For not large page */
|
|
|
|
[1] = 0x800ULL | ~(VTD_HAW_MASK | VTD_SL_IGN_COM),
|
|
|
|
[2] = 0x800ULL | ~(VTD_HAW_MASK | VTD_SL_IGN_COM),
|
|
|
|
[3] = 0x800ULL | ~(VTD_HAW_MASK | VTD_SL_IGN_COM),
|
|
|
|
[4] = 0x880ULL | ~(VTD_HAW_MASK | VTD_SL_IGN_COM),
|
|
|
|
/* For large page */
|
|
|
|
[5] = 0x800ULL | ~(VTD_HAW_MASK | VTD_SL_IGN_COM),
|
|
|
|
[6] = 0x1ff800ULL | ~(VTD_HAW_MASK | VTD_SL_IGN_COM),
|
|
|
|
[7] = 0x3ffff800ULL | ~(VTD_HAW_MASK | VTD_SL_IGN_COM),
|
|
|
|
[8] = 0x880ULL | ~(VTD_HAW_MASK | VTD_SL_IGN_COM),
|
|
|
|
};
|
|
|
|
|
|
|
|
static bool vtd_slpte_nonzero_rsvd(uint64_t slpte, uint32_t level)
|
|
|
|
{
|
|
|
|
if (slpte & VTD_SL_PT_PAGE_SIZE_MASK) {
|
|
|
|
/* Maybe large page */
|
|
|
|
return slpte & vtd_paging_entry_rsvd_field[level + 4];
|
|
|
|
} else {
|
|
|
|
return slpte & vtd_paging_entry_rsvd_field[level];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-02-07 11:28:08 +03:00
|
|
|
/* Given the @iova, get relevant @slptep. @slpte_level will be the last level
|
2014-08-16 09:55:38 +04:00
|
|
|
* of the translation, can be used for deciding the size of large page.
|
|
|
|
*/
|
2017-02-07 11:28:08 +03:00
|
|
|
static int vtd_iova_to_slpte(VTDContextEntry *ce, uint64_t iova, bool is_write,
|
|
|
|
uint64_t *slptep, uint32_t *slpte_level,
|
|
|
|
bool *reads, bool *writes)
|
2014-08-16 09:55:38 +04:00
|
|
|
{
|
|
|
|
dma_addr_t addr = vtd_get_slpt_base_from_context(ce);
|
|
|
|
uint32_t level = vtd_get_level_from_context_entry(ce);
|
|
|
|
uint32_t offset;
|
|
|
|
uint64_t slpte;
|
|
|
|
uint64_t access_right_check;
|
|
|
|
|
2017-04-07 13:59:13 +03:00
|
|
|
if (!vtd_iova_range_check(iova, ce)) {
|
2017-02-07 11:28:08 +03:00
|
|
|
VTD_DPRINTF(GENERAL, "error: iova 0x%"PRIx64 " exceeds limits", iova);
|
2014-08-16 09:55:38 +04:00
|
|
|
return -VTD_FR_ADDR_BEYOND_MGAW;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* FIXME: what is the Atomics request here? */
|
|
|
|
access_right_check = is_write ? VTD_SL_W : VTD_SL_R;
|
|
|
|
|
|
|
|
while (true) {
|
2017-02-07 11:28:08 +03:00
|
|
|
offset = vtd_iova_level_offset(iova, level);
|
2014-08-16 09:55:38 +04:00
|
|
|
slpte = vtd_get_slpte(addr, offset);
|
|
|
|
|
|
|
|
if (slpte == (uint64_t)-1) {
|
|
|
|
VTD_DPRINTF(GENERAL, "error: fail to access second-level paging "
|
2017-02-07 11:28:08 +03:00
|
|
|
"entry at level %"PRIu32 " for iova 0x%"PRIx64,
|
|
|
|
level, iova);
|
2014-08-16 09:55:38 +04:00
|
|
|
if (level == vtd_get_level_from_context_entry(ce)) {
|
|
|
|
/* Invalid programming of context-entry */
|
|
|
|
return -VTD_FR_CONTEXT_ENTRY_INV;
|
|
|
|
} else {
|
|
|
|
return -VTD_FR_PAGING_ENTRY_INV;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
*reads = (*reads) && (slpte & VTD_SL_R);
|
|
|
|
*writes = (*writes) && (slpte & VTD_SL_W);
|
|
|
|
if (!(slpte & access_right_check)) {
|
|
|
|
VTD_DPRINTF(GENERAL, "error: lack of %s permission for "
|
2017-02-07 11:28:08 +03:00
|
|
|
"iova 0x%"PRIx64 " slpte 0x%"PRIx64,
|
|
|
|
(is_write ? "write" : "read"), iova, slpte);
|
2014-08-16 09:55:38 +04:00
|
|
|
return is_write ? -VTD_FR_WRITE : -VTD_FR_READ;
|
|
|
|
}
|
|
|
|
if (vtd_slpte_nonzero_rsvd(slpte, level)) {
|
|
|
|
VTD_DPRINTF(GENERAL, "error: non-zero reserved field in second "
|
|
|
|
"level paging entry level %"PRIu32 " slpte 0x%"PRIx64,
|
|
|
|
level, slpte);
|
|
|
|
return -VTD_FR_PAGING_ENTRY_RSVD;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (vtd_is_last_slpte(slpte, level)) {
|
|
|
|
*slptep = slpte;
|
|
|
|
*slpte_level = level;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
addr = vtd_get_slpte_addr(slpte);
|
|
|
|
level--;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-07 13:59:13 +03:00
|
|
|
typedef int (*vtd_page_walk_hook)(IOMMUTLBEntry *entry, void *private);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* vtd_page_walk_level - walk over specific level for IOVA range
|
|
|
|
*
|
|
|
|
* @addr: base GPA addr to start the walk
|
|
|
|
* @start: IOVA range start address
|
|
|
|
* @end: IOVA range end address (start <= addr < end)
|
|
|
|
* @hook_fn: hook func to be called when detected page
|
|
|
|
* @private: private data to be passed into hook func
|
|
|
|
* @read: whether parent level has read permission
|
|
|
|
* @write: whether parent level has write permission
|
|
|
|
* @notify_unmap: whether we should notify invalid entries
|
|
|
|
*/
|
|
|
|
static int vtd_page_walk_level(dma_addr_t addr, uint64_t start,
|
|
|
|
uint64_t end, vtd_page_walk_hook hook_fn,
|
|
|
|
void *private, uint32_t level,
|
|
|
|
bool read, bool write, bool notify_unmap)
|
|
|
|
{
|
|
|
|
bool read_cur, write_cur, entry_valid;
|
|
|
|
uint32_t offset;
|
|
|
|
uint64_t slpte;
|
|
|
|
uint64_t subpage_size, subpage_mask;
|
|
|
|
IOMMUTLBEntry entry;
|
|
|
|
uint64_t iova = start;
|
|
|
|
uint64_t iova_next;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
trace_vtd_page_walk_level(addr, level, start, end);
|
|
|
|
|
|
|
|
subpage_size = 1ULL << vtd_slpt_level_shift(level);
|
|
|
|
subpage_mask = vtd_slpt_level_page_mask(level);
|
|
|
|
|
|
|
|
while (iova < end) {
|
|
|
|
iova_next = (iova & subpage_mask) + subpage_size;
|
|
|
|
|
|
|
|
offset = vtd_iova_level_offset(iova, level);
|
|
|
|
slpte = vtd_get_slpte(addr, offset);
|
|
|
|
|
|
|
|
if (slpte == (uint64_t)-1) {
|
|
|
|
trace_vtd_page_walk_skip_read(iova, iova_next);
|
|
|
|
goto next;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (vtd_slpte_nonzero_rsvd(slpte, level)) {
|
|
|
|
trace_vtd_page_walk_skip_reserve(iova, iova_next);
|
|
|
|
goto next;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Permissions are stacked with parents' */
|
|
|
|
read_cur = read && (slpte & VTD_SL_R);
|
|
|
|
write_cur = write && (slpte & VTD_SL_W);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* As long as we have either read/write permission, this is a
|
|
|
|
* valid entry. The rule works for both page entries and page
|
|
|
|
* table entries.
|
|
|
|
*/
|
|
|
|
entry_valid = read_cur | write_cur;
|
|
|
|
|
|
|
|
if (vtd_is_last_slpte(slpte, level)) {
|
|
|
|
entry.target_as = &address_space_memory;
|
|
|
|
entry.iova = iova & subpage_mask;
|
|
|
|
/* NOTE: this is only meaningful if entry_valid == true */
|
|
|
|
entry.translated_addr = vtd_get_slpte_addr(slpte);
|
|
|
|
entry.addr_mask = ~subpage_mask;
|
|
|
|
entry.perm = IOMMU_ACCESS_FLAG(read_cur, write_cur);
|
|
|
|
if (!entry_valid && !notify_unmap) {
|
|
|
|
trace_vtd_page_walk_skip_perm(iova, iova_next);
|
|
|
|
goto next;
|
|
|
|
}
|
|
|
|
trace_vtd_page_walk_one(level, entry.iova, entry.translated_addr,
|
|
|
|
entry.addr_mask, entry.perm);
|
|
|
|
if (hook_fn) {
|
|
|
|
ret = hook_fn(&entry, private);
|
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (!entry_valid) {
|
|
|
|
trace_vtd_page_walk_skip_perm(iova, iova_next);
|
|
|
|
goto next;
|
|
|
|
}
|
|
|
|
ret = vtd_page_walk_level(vtd_get_slpte_addr(slpte), iova,
|
|
|
|
MIN(iova_next, end), hook_fn, private,
|
|
|
|
level - 1, read_cur, write_cur,
|
|
|
|
notify_unmap);
|
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
next:
|
|
|
|
iova = iova_next;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* vtd_page_walk - walk specific IOVA range, and call the hook
|
|
|
|
*
|
|
|
|
* @ce: context entry to walk upon
|
|
|
|
* @start: IOVA address to start the walk
|
|
|
|
* @end: IOVA range end address (start <= addr < end)
|
|
|
|
* @hook_fn: the hook that to be called for each detected area
|
|
|
|
* @private: private data for the hook function
|
|
|
|
*/
|
|
|
|
static int vtd_page_walk(VTDContextEntry *ce, uint64_t start, uint64_t end,
|
2017-04-07 13:59:15 +03:00
|
|
|
vtd_page_walk_hook hook_fn, void *private,
|
|
|
|
bool notify_unmap)
|
2017-04-07 13:59:13 +03:00
|
|
|
{
|
|
|
|
dma_addr_t addr = vtd_get_slpt_base_from_context(ce);
|
|
|
|
uint32_t level = vtd_get_level_from_context_entry(ce);
|
|
|
|
|
|
|
|
if (!vtd_iova_range_check(start, ce)) {
|
|
|
|
return -VTD_FR_ADDR_BEYOND_MGAW;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!vtd_iova_range_check(end, ce)) {
|
|
|
|
/* Fix end so that it reaches the maximum */
|
|
|
|
end = vtd_iova_limit(ce);
|
|
|
|
}
|
|
|
|
|
|
|
|
return vtd_page_walk_level(addr, start, end, hook_fn, private,
|
2017-04-07 13:59:15 +03:00
|
|
|
level, true, true, notify_unmap);
|
2017-04-07 13:59:13 +03:00
|
|
|
}
|
|
|
|
|
2014-08-16 09:55:38 +04:00
|
|
|
/* Map a device to its corresponding domain (context-entry) */
|
|
|
|
static int vtd_dev_to_context_entry(IntelIOMMUState *s, uint8_t bus_num,
|
|
|
|
uint8_t devfn, VTDContextEntry *ce)
|
|
|
|
{
|
|
|
|
VTDRootEntry re;
|
|
|
|
int ret_fr;
|
|
|
|
|
|
|
|
ret_fr = vtd_get_root_entry(s, bus_num, &re);
|
|
|
|
if (ret_fr) {
|
|
|
|
return ret_fr;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!vtd_root_entry_present(&re)) {
|
2017-02-07 11:28:10 +03:00
|
|
|
/* Not error - it's okay we don't have root entry. */
|
|
|
|
trace_vtd_re_not_present(bus_num);
|
2014-08-16 09:55:38 +04:00
|
|
|
return -VTD_FR_ROOT_ENTRY_P;
|
|
|
|
} else if (re.rsvd || (re.val & VTD_ROOT_ENTRY_RSVD)) {
|
2017-02-07 11:28:10 +03:00
|
|
|
trace_vtd_re_invalid(re.rsvd, re.val);
|
2014-08-16 09:55:38 +04:00
|
|
|
return -VTD_FR_ROOT_ENTRY_RSVD;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret_fr = vtd_get_context_entry_from_root(&re, devfn, ce);
|
|
|
|
if (ret_fr) {
|
|
|
|
return ret_fr;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!vtd_context_entry_present(ce)) {
|
2017-02-07 11:28:10 +03:00
|
|
|
/* Not error - it's okay we don't have context entry. */
|
|
|
|
trace_vtd_ce_not_present(bus_num, devfn);
|
2014-08-16 09:55:38 +04:00
|
|
|
return -VTD_FR_CONTEXT_ENTRY_P;
|
|
|
|
} else if ((ce->hi & VTD_CONTEXT_ENTRY_RSVD_HI) ||
|
|
|
|
(ce->lo & VTD_CONTEXT_ENTRY_RSVD_LO)) {
|
2017-02-07 11:28:10 +03:00
|
|
|
trace_vtd_ce_invalid(ce->hi, ce->lo);
|
2014-08-16 09:55:38 +04:00
|
|
|
return -VTD_FR_CONTEXT_ENTRY_RSVD;
|
|
|
|
}
|
|
|
|
/* Check if the programming of context-entry is valid */
|
|
|
|
if (!vtd_is_level_supported(s, vtd_get_level_from_context_entry(ce))) {
|
2017-02-07 11:28:10 +03:00
|
|
|
trace_vtd_ce_invalid(ce->hi, ce->lo);
|
2014-08-16 09:55:38 +04:00
|
|
|
return -VTD_FR_CONTEXT_ENTRY_INV;
|
2016-12-30 13:09:14 +03:00
|
|
|
} else {
|
|
|
|
switch (ce->lo & VTD_CONTEXT_ENTRY_TT) {
|
|
|
|
case VTD_CONTEXT_TT_MULTI_LEVEL:
|
|
|
|
/* fall through */
|
|
|
|
case VTD_CONTEXT_TT_DEV_IOTLB:
|
|
|
|
break;
|
|
|
|
default:
|
2017-02-07 11:28:10 +03:00
|
|
|
trace_vtd_ce_invalid(ce->hi, ce->lo);
|
2016-12-30 13:09:14 +03:00
|
|
|
return -VTD_FR_CONTEXT_ENTRY_INV;
|
|
|
|
}
|
2014-08-16 09:55:38 +04:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint16_t vtd_make_source_id(uint8_t bus_num, uint8_t devfn)
|
|
|
|
{
|
|
|
|
return ((bus_num & 0xffUL) << 8) | (devfn & 0xffUL);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const bool vtd_qualified_faults[] = {
|
|
|
|
[VTD_FR_RESERVED] = false,
|
|
|
|
[VTD_FR_ROOT_ENTRY_P] = false,
|
|
|
|
[VTD_FR_CONTEXT_ENTRY_P] = true,
|
|
|
|
[VTD_FR_CONTEXT_ENTRY_INV] = true,
|
|
|
|
[VTD_FR_ADDR_BEYOND_MGAW] = true,
|
|
|
|
[VTD_FR_WRITE] = true,
|
|
|
|
[VTD_FR_READ] = true,
|
|
|
|
[VTD_FR_PAGING_ENTRY_INV] = true,
|
|
|
|
[VTD_FR_ROOT_TABLE_INV] = false,
|
|
|
|
[VTD_FR_CONTEXT_TABLE_INV] = false,
|
|
|
|
[VTD_FR_ROOT_ENTRY_RSVD] = false,
|
|
|
|
[VTD_FR_PAGING_ENTRY_RSVD] = true,
|
|
|
|
[VTD_FR_CONTEXT_ENTRY_TT] = true,
|
|
|
|
[VTD_FR_RESERVED_ERR] = false,
|
|
|
|
[VTD_FR_MAX] = false,
|
|
|
|
};
|
|
|
|
|
|
|
|
/* To see if a fault condition is "qualified", which is reported to software
|
|
|
|
* only if the FPD field in the context-entry used to process the faulting
|
|
|
|
* request is 0.
|
|
|
|
*/
|
|
|
|
static inline bool vtd_is_qualified_fault(VTDFaultReason fault)
|
|
|
|
{
|
|
|
|
return vtd_qualified_faults[fault];
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool vtd_is_interrupt_addr(hwaddr addr)
|
|
|
|
{
|
|
|
|
return VTD_INTERRUPT_ADDR_FIRST <= addr && addr <= VTD_INTERRUPT_ADDR_LAST;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Map dev to context-entry then do a paging-structures walk to do a iommu
|
|
|
|
* translation.
|
2015-01-21 14:09:14 +03:00
|
|
|
*
|
|
|
|
* Called from RCU critical section.
|
|
|
|
*
|
2014-08-16 09:55:38 +04:00
|
|
|
* @bus_num: The bus number
|
|
|
|
* @devfn: The devfn, which is the combined of device and function number
|
|
|
|
* @is_write: The access is a write operation
|
|
|
|
* @entry: IOMMUTLBEntry that contain the addr to be translated and result
|
|
|
|
*/
|
2015-10-04 16:48:50 +03:00
|
|
|
static void vtd_do_iommu_translate(VTDAddressSpace *vtd_as, PCIBus *bus,
|
2014-08-16 09:55:38 +04:00
|
|
|
uint8_t devfn, hwaddr addr, bool is_write,
|
|
|
|
IOMMUTLBEntry *entry)
|
|
|
|
{
|
2014-08-16 09:55:43 +04:00
|
|
|
IntelIOMMUState *s = vtd_as->iommu_state;
|
2014-08-16 09:55:38 +04:00
|
|
|
VTDContextEntry ce;
|
2015-10-04 16:48:50 +03:00
|
|
|
uint8_t bus_num = pci_bus_num(bus);
|
2014-08-16 09:55:43 +04:00
|
|
|
VTDContextCacheEntry *cc_entry = &vtd_as->context_cache_entry;
|
2016-01-14 08:47:24 +03:00
|
|
|
uint64_t slpte, page_mask;
|
2014-08-16 09:55:38 +04:00
|
|
|
uint32_t level;
|
|
|
|
uint16_t source_id = vtd_make_source_id(bus_num, devfn);
|
|
|
|
int ret_fr;
|
|
|
|
bool is_fpd_set = false;
|
|
|
|
bool reads = true;
|
|
|
|
bool writes = true;
|
2014-08-16 09:55:44 +04:00
|
|
|
VTDIOTLBEntry *iotlb_entry;
|
2014-08-16 09:55:38 +04:00
|
|
|
|
2017-02-07 11:28:07 +03:00
|
|
|
/*
|
|
|
|
* We have standalone memory region for interrupt addresses, we
|
|
|
|
* should never receive translation requests in this region.
|
|
|
|
*/
|
|
|
|
assert(!vtd_is_interrupt_addr(addr));
|
|
|
|
|
2014-08-16 09:55:44 +04:00
|
|
|
/* Try to fetch slpte form IOTLB */
|
|
|
|
iotlb_entry = vtd_lookup_iotlb(s, source_id, addr);
|
|
|
|
if (iotlb_entry) {
|
2017-02-07 11:28:10 +03:00
|
|
|
trace_vtd_iotlb_page_hit(source_id, addr, iotlb_entry->slpte,
|
|
|
|
iotlb_entry->domain_id);
|
2014-08-16 09:55:44 +04:00
|
|
|
slpte = iotlb_entry->slpte;
|
|
|
|
reads = iotlb_entry->read_flags;
|
|
|
|
writes = iotlb_entry->write_flags;
|
2016-01-14 08:47:24 +03:00
|
|
|
page_mask = iotlb_entry->mask;
|
2014-08-16 09:55:44 +04:00
|
|
|
goto out;
|
|
|
|
}
|
2014-08-16 09:55:43 +04:00
|
|
|
/* Try to fetch context-entry from cache first */
|
|
|
|
if (cc_entry->context_cache_gen == s->context_cache_gen) {
|
2017-02-07 11:28:10 +03:00
|
|
|
trace_vtd_iotlb_cc_hit(bus_num, devfn, cc_entry->context_entry.hi,
|
|
|
|
cc_entry->context_entry.lo,
|
|
|
|
cc_entry->context_cache_gen);
|
2014-08-16 09:55:43 +04:00
|
|
|
ce = cc_entry->context_entry;
|
|
|
|
is_fpd_set = ce.lo & VTD_CONTEXT_ENTRY_FPD;
|
|
|
|
} else {
|
|
|
|
ret_fr = vtd_dev_to_context_entry(s, bus_num, devfn, &ce);
|
|
|
|
is_fpd_set = ce.lo & VTD_CONTEXT_ENTRY_FPD;
|
|
|
|
if (ret_fr) {
|
|
|
|
ret_fr = -ret_fr;
|
|
|
|
if (is_fpd_set && vtd_is_qualified_fault(ret_fr)) {
|
2017-02-07 11:28:10 +03:00
|
|
|
trace_vtd_fault_disabled();
|
2014-08-16 09:55:43 +04:00
|
|
|
} else {
|
|
|
|
vtd_report_dmar_fault(s, source_id, addr, ret_fr, is_write);
|
|
|
|
}
|
|
|
|
return;
|
2014-08-16 09:55:38 +04:00
|
|
|
}
|
2014-08-16 09:55:43 +04:00
|
|
|
/* Update context-cache */
|
2017-02-07 11:28:10 +03:00
|
|
|
trace_vtd_iotlb_cc_update(bus_num, devfn, ce.hi, ce.lo,
|
|
|
|
cc_entry->context_cache_gen,
|
|
|
|
s->context_cache_gen);
|
2014-08-16 09:55:43 +04:00
|
|
|
cc_entry->context_entry = ce;
|
|
|
|
cc_entry->context_cache_gen = s->context_cache_gen;
|
2014-08-16 09:55:38 +04:00
|
|
|
}
|
|
|
|
|
2017-02-07 11:28:08 +03:00
|
|
|
ret_fr = vtd_iova_to_slpte(&ce, addr, is_write, &slpte, &level,
|
|
|
|
&reads, &writes);
|
2014-08-16 09:55:38 +04:00
|
|
|
if (ret_fr) {
|
|
|
|
ret_fr = -ret_fr;
|
|
|
|
if (is_fpd_set && vtd_is_qualified_fault(ret_fr)) {
|
2017-02-07 11:28:10 +03:00
|
|
|
trace_vtd_fault_disabled();
|
2014-08-16 09:55:38 +04:00
|
|
|
} else {
|
|
|
|
vtd_report_dmar_fault(s, source_id, addr, ret_fr, is_write);
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-01-14 08:47:24 +03:00
|
|
|
page_mask = vtd_slpt_level_page_mask(level);
|
2014-08-16 09:55:44 +04:00
|
|
|
vtd_update_iotlb(s, source_id, VTD_CONTEXT_ENTRY_DID(ce.hi), addr, slpte,
|
2016-01-14 08:47:24 +03:00
|
|
|
reads, writes, level);
|
2014-08-16 09:55:44 +04:00
|
|
|
out:
|
2016-01-14 08:47:24 +03:00
|
|
|
entry->iova = addr & page_mask;
|
|
|
|
entry->translated_addr = vtd_get_slpte_addr(slpte) & page_mask;
|
|
|
|
entry->addr_mask = ~page_mask;
|
2014-08-16 09:55:38 +04:00
|
|
|
entry->perm = (writes ? 2 : 0) + (reads ? 1 : 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void vtd_root_table_setup(IntelIOMMUState *s)
|
|
|
|
{
|
|
|
|
s->root = vtd_get_quad_raw(s, DMAR_RTADDR_REG);
|
|
|
|
s->root_extended = s->root & VTD_RTADDR_RTT;
|
|
|
|
s->root &= VTD_RTADDR_ADDR_MASK;
|
|
|
|
|
|
|
|
VTD_DPRINTF(CSR, "root_table addr 0x%"PRIx64 " %s", s->root,
|
|
|
|
(s->root_extended ? "(extended)" : ""));
|
|
|
|
}
|
|
|
|
|
2016-07-14 08:56:26 +03:00
|
|
|
static void vtd_iec_notify_all(IntelIOMMUState *s, bool global,
|
|
|
|
uint32_t index, uint32_t mask)
|
|
|
|
{
|
|
|
|
x86_iommu_iec_notify_all(X86_IOMMU_DEVICE(s), global, index, mask);
|
|
|
|
}
|
|
|
|
|
2016-07-14 08:56:18 +03:00
|
|
|
static void vtd_interrupt_remap_table_setup(IntelIOMMUState *s)
|
|
|
|
{
|
|
|
|
uint64_t value = 0;
|
|
|
|
value = vtd_get_quad_raw(s, DMAR_IRTA_REG);
|
|
|
|
s->intr_size = 1UL << ((value & VTD_IRTA_SIZE_MASK) + 1);
|
|
|
|
s->intr_root = value & VTD_IRTA_ADDR_MASK;
|
2016-07-14 08:56:28 +03:00
|
|
|
s->intr_eime = value & VTD_IRTA_EIME;
|
2016-07-14 08:56:18 +03:00
|
|
|
|
2016-07-14 08:56:26 +03:00
|
|
|
/* Notify global invalidation */
|
|
|
|
vtd_iec_notify_all(s, true, 0, 0);
|
2016-07-14 08:56:18 +03:00
|
|
|
|
|
|
|
VTD_DPRINTF(CSR, "int remap table addr 0x%"PRIx64 " size %"PRIu32,
|
|
|
|
s->intr_root, s->intr_size);
|
|
|
|
}
|
|
|
|
|
2017-04-07 13:59:15 +03:00
|
|
|
static void vtd_iommu_replay_all(IntelIOMMUState *s)
|
|
|
|
{
|
|
|
|
IntelIOMMUNotifierNode *node;
|
|
|
|
|
|
|
|
QLIST_FOREACH(node, &s->notifiers_list, next) {
|
|
|
|
memory_region_iommu_replay_all(&node->vtd_as->iommu);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-08-16 09:55:43 +04:00
|
|
|
static void vtd_context_global_invalidate(IntelIOMMUState *s)
|
|
|
|
{
|
2017-02-07 11:28:09 +03:00
|
|
|
trace_vtd_inv_desc_cc_global();
|
2014-08-16 09:55:43 +04:00
|
|
|
s->context_cache_gen++;
|
|
|
|
if (s->context_cache_gen == VTD_CONTEXT_CACHE_GEN_MAX) {
|
|
|
|
vtd_reset_context_cache(s);
|
|
|
|
}
|
2017-04-07 13:59:15 +03:00
|
|
|
/*
|
|
|
|
* From VT-d spec 6.5.2.1, a global context entry invalidation
|
|
|
|
* should be followed by a IOTLB global invalidation, so we should
|
|
|
|
* be safe even without this. Hoewever, let's replay the region as
|
|
|
|
* well to be safer, and go back here when we need finer tunes for
|
|
|
|
* VT-d emulation codes.
|
|
|
|
*/
|
|
|
|
vtd_iommu_replay_all(s);
|
2014-08-16 09:55:43 +04:00
|
|
|
}
|
|
|
|
|
2015-10-04 16:48:50 +03:00
|
|
|
|
|
|
|
/* Find the VTD address space currently associated with a given bus number,
|
|
|
|
*/
|
|
|
|
static VTDBus *vtd_find_as_from_bus_num(IntelIOMMUState *s, uint8_t bus_num)
|
|
|
|
{
|
|
|
|
VTDBus *vtd_bus = s->vtd_as_by_bus_num[bus_num];
|
|
|
|
if (!vtd_bus) {
|
|
|
|
/* Iterate over the registered buses to find the one
|
|
|
|
* which currently hold this bus number, and update the bus_num lookup table:
|
|
|
|
*/
|
|
|
|
GHashTableIter iter;
|
|
|
|
|
|
|
|
g_hash_table_iter_init(&iter, s->vtd_as_by_busptr);
|
|
|
|
while (g_hash_table_iter_next (&iter, NULL, (void**)&vtd_bus)) {
|
|
|
|
if (pci_bus_num(vtd_bus->bus) == bus_num) {
|
|
|
|
s->vtd_as_by_bus_num[bus_num] = vtd_bus;
|
|
|
|
return vtd_bus;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return vtd_bus;
|
|
|
|
}
|
|
|
|
|
2014-08-16 09:55:43 +04:00
|
|
|
/* Do a context-cache device-selective invalidation.
|
|
|
|
* @func_mask: FM field after shifting
|
|
|
|
*/
|
|
|
|
static void vtd_context_device_invalidate(IntelIOMMUState *s,
|
|
|
|
uint16_t source_id,
|
|
|
|
uint16_t func_mask)
|
|
|
|
{
|
|
|
|
uint16_t mask;
|
2015-10-04 16:48:50 +03:00
|
|
|
VTDBus *vtd_bus;
|
2014-08-16 09:55:43 +04:00
|
|
|
VTDAddressSpace *vtd_as;
|
2017-02-07 11:28:09 +03:00
|
|
|
uint8_t bus_n, devfn;
|
2014-08-16 09:55:43 +04:00
|
|
|
uint16_t devfn_it;
|
|
|
|
|
2017-02-07 11:28:09 +03:00
|
|
|
trace_vtd_inv_desc_cc_devices(source_id, func_mask);
|
|
|
|
|
2014-08-16 09:55:43 +04:00
|
|
|
switch (func_mask & 3) {
|
|
|
|
case 0:
|
|
|
|
mask = 0; /* No bits in the SID field masked */
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
mask = 4; /* Mask bit 2 in the SID field */
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
mask = 6; /* Mask bit 2:1 in the SID field */
|
|
|
|
break;
|
|
|
|
case 3:
|
|
|
|
mask = 7; /* Mask bit 2:0 in the SID field */
|
|
|
|
break;
|
|
|
|
}
|
2016-11-29 08:43:40 +03:00
|
|
|
mask = ~mask;
|
2017-02-07 11:28:09 +03:00
|
|
|
|
|
|
|
bus_n = VTD_SID_TO_BUS(source_id);
|
|
|
|
vtd_bus = vtd_find_as_from_bus_num(s, bus_n);
|
2015-10-04 16:48:50 +03:00
|
|
|
if (vtd_bus) {
|
2014-08-16 09:55:43 +04:00
|
|
|
devfn = VTD_SID_TO_DEVFN(source_id);
|
2016-07-14 08:56:11 +03:00
|
|
|
for (devfn_it = 0; devfn_it < X86_IOMMU_PCI_DEVFN_MAX; ++devfn_it) {
|
2015-10-04 16:48:50 +03:00
|
|
|
vtd_as = vtd_bus->dev_as[devfn_it];
|
2014-08-16 09:55:43 +04:00
|
|
|
if (vtd_as && ((devfn_it & mask) == (devfn & mask))) {
|
2017-02-07 11:28:09 +03:00
|
|
|
trace_vtd_inv_desc_cc_device(bus_n, VTD_PCI_SLOT(devfn_it),
|
|
|
|
VTD_PCI_FUNC(devfn_it));
|
2014-08-16 09:55:43 +04:00
|
|
|
vtd_as->context_cache_entry.context_cache_gen = 0;
|
2017-04-07 13:59:15 +03:00
|
|
|
/*
|
|
|
|
* So a device is moving out of (or moving into) a
|
|
|
|
* domain, a replay() suites here to notify all the
|
|
|
|
* IOMMU_NOTIFIER_MAP registers about this change.
|
|
|
|
* This won't bring bad even if we have no such
|
|
|
|
* notifier registered - the IOMMU notification
|
|
|
|
* framework will skip MAP notifications if that
|
|
|
|
* happened.
|
|
|
|
*/
|
|
|
|
memory_region_iommu_replay_all(&vtd_as->iommu);
|
2014-08-16 09:55:43 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-08-16 09:55:38 +04:00
|
|
|
/* Context-cache invalidation
|
|
|
|
* Returns the Context Actual Invalidation Granularity.
|
|
|
|
* @val: the content of the CCMD_REG
|
|
|
|
*/
|
|
|
|
static uint64_t vtd_context_cache_invalidate(IntelIOMMUState *s, uint64_t val)
|
|
|
|
{
|
|
|
|
uint64_t caig;
|
|
|
|
uint64_t type = val & VTD_CCMD_CIRG_MASK;
|
|
|
|
|
|
|
|
switch (type) {
|
2014-08-16 09:55:43 +04:00
|
|
|
case VTD_CCMD_DOMAIN_INVL:
|
|
|
|
VTD_DPRINTF(INV, "domain-selective invalidation domain 0x%"PRIx16,
|
|
|
|
(uint16_t)VTD_CCMD_DID(val));
|
|
|
|
/* Fall through */
|
2014-08-16 09:55:38 +04:00
|
|
|
case VTD_CCMD_GLOBAL_INVL:
|
2014-08-16 09:55:43 +04:00
|
|
|
VTD_DPRINTF(INV, "global invalidation");
|
2014-08-16 09:55:38 +04:00
|
|
|
caig = VTD_CCMD_GLOBAL_INVL_A;
|
2014-08-16 09:55:43 +04:00
|
|
|
vtd_context_global_invalidate(s);
|
2014-08-16 09:55:38 +04:00
|
|
|
break;
|
|
|
|
|
|
|
|
case VTD_CCMD_DEVICE_INVL:
|
|
|
|
caig = VTD_CCMD_DEVICE_INVL_A;
|
2014-08-16 09:55:43 +04:00
|
|
|
vtd_context_device_invalidate(s, VTD_CCMD_SID(val), VTD_CCMD_FM(val));
|
2014-08-16 09:55:38 +04:00
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
2014-08-16 09:55:43 +04:00
|
|
|
VTD_DPRINTF(GENERAL, "error: invalid granularity");
|
2014-08-16 09:55:38 +04:00
|
|
|
caig = 0;
|
|
|
|
}
|
|
|
|
return caig;
|
|
|
|
}
|
|
|
|
|
2014-08-16 09:55:44 +04:00
|
|
|
static void vtd_iotlb_global_invalidate(IntelIOMMUState *s)
|
|
|
|
{
|
2017-02-07 11:28:10 +03:00
|
|
|
trace_vtd_iotlb_reset("global invalidation recved");
|
2014-08-16 09:55:44 +04:00
|
|
|
vtd_reset_iotlb(s);
|
2017-04-07 13:59:15 +03:00
|
|
|
vtd_iommu_replay_all(s);
|
2014-08-16 09:55:44 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static void vtd_iotlb_domain_invalidate(IntelIOMMUState *s, uint16_t domain_id)
|
|
|
|
{
|
2017-04-07 13:59:15 +03:00
|
|
|
IntelIOMMUNotifierNode *node;
|
|
|
|
VTDContextEntry ce;
|
|
|
|
VTDAddressSpace *vtd_as;
|
|
|
|
|
2014-08-16 09:55:44 +04:00
|
|
|
g_hash_table_foreach_remove(s->iotlb, vtd_hash_remove_by_domain,
|
|
|
|
&domain_id);
|
2017-04-07 13:59:15 +03:00
|
|
|
|
|
|
|
QLIST_FOREACH(node, &s->notifiers_list, next) {
|
|
|
|
vtd_as = node->vtd_as;
|
|
|
|
if (!vtd_dev_to_context_entry(s, pci_bus_num(vtd_as->bus),
|
|
|
|
vtd_as->devfn, &ce) &&
|
|
|
|
domain_id == VTD_CONTEXT_ENTRY_DID(ce.hi)) {
|
|
|
|
memory_region_iommu_replay_all(&vtd_as->iommu);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int vtd_page_invalidate_notify_hook(IOMMUTLBEntry *entry,
|
|
|
|
void *private)
|
|
|
|
{
|
|
|
|
memory_region_notify_iommu((MemoryRegion *)private, *entry);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void vtd_iotlb_page_invalidate_notify(IntelIOMMUState *s,
|
|
|
|
uint16_t domain_id, hwaddr addr,
|
|
|
|
uint8_t am)
|
|
|
|
{
|
|
|
|
IntelIOMMUNotifierNode *node;
|
|
|
|
VTDContextEntry ce;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
QLIST_FOREACH(node, &(s->notifiers_list), next) {
|
|
|
|
VTDAddressSpace *vtd_as = node->vtd_as;
|
|
|
|
ret = vtd_dev_to_context_entry(s, pci_bus_num(vtd_as->bus),
|
|
|
|
vtd_as->devfn, &ce);
|
|
|
|
if (!ret && domain_id == VTD_CONTEXT_ENTRY_DID(ce.hi)) {
|
|
|
|
vtd_page_walk(&ce, addr, addr + (1 << am) * VTD_PAGE_SIZE,
|
|
|
|
vtd_page_invalidate_notify_hook,
|
|
|
|
(void *)&vtd_as->iommu, true);
|
|
|
|
}
|
|
|
|
}
|
2014-08-16 09:55:44 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static void vtd_iotlb_page_invalidate(IntelIOMMUState *s, uint16_t domain_id,
|
|
|
|
hwaddr addr, uint8_t am)
|
|
|
|
{
|
|
|
|
VTDIOTLBPageInvInfo info;
|
|
|
|
|
|
|
|
assert(am <= VTD_MAMV);
|
|
|
|
info.domain_id = domain_id;
|
2016-01-14 08:47:24 +03:00
|
|
|
info.addr = addr;
|
2014-08-16 09:55:44 +04:00
|
|
|
info.mask = ~((1 << am) - 1);
|
|
|
|
g_hash_table_foreach_remove(s->iotlb, vtd_hash_remove_by_page, &info);
|
2017-04-07 13:59:15 +03:00
|
|
|
vtd_iotlb_page_invalidate_notify(s, domain_id, addr, am);
|
2014-08-16 09:55:44 +04:00
|
|
|
}
|
|
|
|
|
2014-08-16 09:55:38 +04:00
|
|
|
/* Flush IOTLB
|
|
|
|
* Returns the IOTLB Actual Invalidation Granularity.
|
|
|
|
* @val: the content of the IOTLB_REG
|
|
|
|
*/
|
|
|
|
static uint64_t vtd_iotlb_flush(IntelIOMMUState *s, uint64_t val)
|
|
|
|
{
|
|
|
|
uint64_t iaig;
|
|
|
|
uint64_t type = val & VTD_TLB_FLUSH_GRANU_MASK;
|
2014-08-16 09:55:44 +04:00
|
|
|
uint16_t domain_id;
|
|
|
|
hwaddr addr;
|
|
|
|
uint8_t am;
|
2014-08-16 09:55:38 +04:00
|
|
|
|
|
|
|
switch (type) {
|
|
|
|
case VTD_TLB_GLOBAL_FLUSH:
|
2014-08-16 09:55:44 +04:00
|
|
|
VTD_DPRINTF(INV, "global invalidation");
|
2014-08-16 09:55:38 +04:00
|
|
|
iaig = VTD_TLB_GLOBAL_FLUSH_A;
|
2014-08-16 09:55:44 +04:00
|
|
|
vtd_iotlb_global_invalidate(s);
|
2014-08-16 09:55:38 +04:00
|
|
|
break;
|
|
|
|
|
|
|
|
case VTD_TLB_DSI_FLUSH:
|
2014-08-16 09:55:44 +04:00
|
|
|
domain_id = VTD_TLB_DID(val);
|
|
|
|
VTD_DPRINTF(INV, "domain-selective invalidation domain 0x%"PRIx16,
|
|
|
|
domain_id);
|
2014-08-16 09:55:38 +04:00
|
|
|
iaig = VTD_TLB_DSI_FLUSH_A;
|
2014-08-16 09:55:44 +04:00
|
|
|
vtd_iotlb_domain_invalidate(s, domain_id);
|
2014-08-16 09:55:38 +04:00
|
|
|
break;
|
|
|
|
|
|
|
|
case VTD_TLB_PSI_FLUSH:
|
2014-08-16 09:55:44 +04:00
|
|
|
domain_id = VTD_TLB_DID(val);
|
|
|
|
addr = vtd_get_quad_raw(s, DMAR_IVA_REG);
|
|
|
|
am = VTD_IVA_AM(addr);
|
|
|
|
addr = VTD_IVA_ADDR(addr);
|
|
|
|
VTD_DPRINTF(INV, "page-selective invalidation domain 0x%"PRIx16
|
|
|
|
" addr 0x%"PRIx64 " mask %"PRIu8, domain_id, addr, am);
|
|
|
|
if (am > VTD_MAMV) {
|
|
|
|
VTD_DPRINTF(GENERAL, "error: supported max address mask value is "
|
|
|
|
"%"PRIu8, (uint8_t)VTD_MAMV);
|
|
|
|
iaig = 0;
|
|
|
|
break;
|
|
|
|
}
|
2014-08-16 09:55:38 +04:00
|
|
|
iaig = VTD_TLB_PSI_FLUSH_A;
|
2014-08-16 09:55:44 +04:00
|
|
|
vtd_iotlb_page_invalidate(s, domain_id, addr, am);
|
2014-08-16 09:55:38 +04:00
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
2014-08-16 09:55:44 +04:00
|
|
|
VTD_DPRINTF(GENERAL, "error: invalid granularity");
|
2014-08-16 09:55:38 +04:00
|
|
|
iaig = 0;
|
|
|
|
}
|
|
|
|
return iaig;
|
|
|
|
}
|
|
|
|
|
2014-08-16 09:55:42 +04:00
|
|
|
static inline bool vtd_queued_inv_enable_check(IntelIOMMUState *s)
|
|
|
|
{
|
|
|
|
return s->iq_tail == 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool vtd_queued_inv_disable_check(IntelIOMMUState *s)
|
|
|
|
{
|
|
|
|
return s->qi_enabled && (s->iq_tail == s->iq_head) &&
|
|
|
|
(s->iq_last_desc_type == VTD_INV_DESC_WAIT);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void vtd_handle_gcmd_qie(IntelIOMMUState *s, bool en)
|
|
|
|
{
|
|
|
|
uint64_t iqa_val = vtd_get_quad_raw(s, DMAR_IQA_REG);
|
|
|
|
|
|
|
|
VTD_DPRINTF(INV, "Queued Invalidation Enable %s", (en ? "on" : "off"));
|
|
|
|
if (en) {
|
|
|
|
if (vtd_queued_inv_enable_check(s)) {
|
|
|
|
s->iq = iqa_val & VTD_IQA_IQA_MASK;
|
|
|
|
/* 2^(x+8) entries */
|
|
|
|
s->iq_size = 1UL << ((iqa_val & VTD_IQA_QS) + 8);
|
|
|
|
s->qi_enabled = true;
|
|
|
|
VTD_DPRINTF(INV, "DMAR_IQA_REG 0x%"PRIx64, iqa_val);
|
|
|
|
VTD_DPRINTF(INV, "Invalidation Queue addr 0x%"PRIx64 " size %d",
|
|
|
|
s->iq, s->iq_size);
|
|
|
|
/* Ok - report back to driver */
|
|
|
|
vtd_set_clear_mask_long(s, DMAR_GSTS_REG, 0, VTD_GSTS_QIES);
|
|
|
|
} else {
|
|
|
|
VTD_DPRINTF(GENERAL, "error: can't enable Queued Invalidation: "
|
|
|
|
"tail %"PRIu16, s->iq_tail);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (vtd_queued_inv_disable_check(s)) {
|
|
|
|
/* disable Queued Invalidation */
|
|
|
|
vtd_set_quad_raw(s, DMAR_IQH_REG, 0);
|
|
|
|
s->iq_head = 0;
|
|
|
|
s->qi_enabled = false;
|
|
|
|
/* Ok - report back to driver */
|
|
|
|
vtd_set_clear_mask_long(s, DMAR_GSTS_REG, VTD_GSTS_QIES, 0);
|
|
|
|
} else {
|
|
|
|
VTD_DPRINTF(GENERAL, "error: can't disable Queued Invalidation: "
|
|
|
|
"head %"PRIu16 ", tail %"PRIu16
|
|
|
|
", last_descriptor %"PRIu8,
|
|
|
|
s->iq_head, s->iq_tail, s->iq_last_desc_type);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-08-16 09:55:38 +04:00
|
|
|
/* Set Root Table Pointer */
|
|
|
|
static void vtd_handle_gcmd_srtp(IntelIOMMUState *s)
|
|
|
|
{
|
|
|
|
VTD_DPRINTF(CSR, "set Root Table Pointer");
|
|
|
|
|
|
|
|
vtd_root_table_setup(s);
|
|
|
|
/* Ok - report back to driver */
|
|
|
|
vtd_set_clear_mask_long(s, DMAR_GSTS_REG, 0, VTD_GSTS_RTPS);
|
|
|
|
}
|
|
|
|
|
2016-07-14 08:56:18 +03:00
|
|
|
/* Set Interrupt Remap Table Pointer */
|
|
|
|
static void vtd_handle_gcmd_sirtp(IntelIOMMUState *s)
|
|
|
|
{
|
|
|
|
VTD_DPRINTF(CSR, "set Interrupt Remap Table Pointer");
|
|
|
|
|
|
|
|
vtd_interrupt_remap_table_setup(s);
|
|
|
|
/* Ok - report back to driver */
|
|
|
|
vtd_set_clear_mask_long(s, DMAR_GSTS_REG, 0, VTD_GSTS_IRTPS);
|
|
|
|
}
|
|
|
|
|
2017-04-07 13:59:14 +03:00
|
|
|
static void vtd_switch_address_space(VTDAddressSpace *as)
|
|
|
|
{
|
|
|
|
assert(as);
|
|
|
|
|
|
|
|
trace_vtd_switch_address_space(pci_bus_num(as->bus),
|
|
|
|
VTD_PCI_SLOT(as->devfn),
|
|
|
|
VTD_PCI_FUNC(as->devfn),
|
|
|
|
as->iommu_state->dmar_enabled);
|
|
|
|
|
|
|
|
/* Turn off first then on the other */
|
|
|
|
if (as->iommu_state->dmar_enabled) {
|
|
|
|
memory_region_set_enabled(&as->sys_alias, false);
|
|
|
|
memory_region_set_enabled(&as->iommu, true);
|
|
|
|
} else {
|
|
|
|
memory_region_set_enabled(&as->iommu, false);
|
|
|
|
memory_region_set_enabled(&as->sys_alias, true);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void vtd_switch_address_space_all(IntelIOMMUState *s)
|
|
|
|
{
|
|
|
|
GHashTableIter iter;
|
|
|
|
VTDBus *vtd_bus;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
g_hash_table_iter_init(&iter, s->vtd_as_by_busptr);
|
|
|
|
while (g_hash_table_iter_next(&iter, NULL, (void **)&vtd_bus)) {
|
|
|
|
for (i = 0; i < X86_IOMMU_PCI_DEVFN_MAX; i++) {
|
|
|
|
if (!vtd_bus->dev_as[i]) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
vtd_switch_address_space(vtd_bus->dev_as[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-08-16 09:55:38 +04:00
|
|
|
/* Handle Translation Enable/Disable */
|
|
|
|
static void vtd_handle_gcmd_te(IntelIOMMUState *s, bool en)
|
|
|
|
{
|
2017-04-07 13:59:14 +03:00
|
|
|
if (s->dmar_enabled == en) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2014-08-16 09:55:38 +04:00
|
|
|
VTD_DPRINTF(CSR, "Translation Enable %s", (en ? "on" : "off"));
|
|
|
|
|
|
|
|
if (en) {
|
|
|
|
s->dmar_enabled = true;
|
|
|
|
/* Ok - report back to driver */
|
|
|
|
vtd_set_clear_mask_long(s, DMAR_GSTS_REG, 0, VTD_GSTS_TES);
|
|
|
|
} else {
|
|
|
|
s->dmar_enabled = false;
|
|
|
|
|
|
|
|
/* Clear the index of Fault Recording Register */
|
|
|
|
s->next_frcd_reg = 0;
|
|
|
|
/* Ok - report back to driver */
|
|
|
|
vtd_set_clear_mask_long(s, DMAR_GSTS_REG, VTD_GSTS_TES, 0);
|
|
|
|
}
|
2017-04-07 13:59:14 +03:00
|
|
|
|
|
|
|
vtd_switch_address_space_all(s);
|
2014-08-16 09:55:38 +04:00
|
|
|
}
|
|
|
|
|
2016-07-14 08:56:19 +03:00
|
|
|
/* Handle Interrupt Remap Enable/Disable */
|
|
|
|
static void vtd_handle_gcmd_ire(IntelIOMMUState *s, bool en)
|
|
|
|
{
|
|
|
|
VTD_DPRINTF(CSR, "Interrupt Remap Enable %s", (en ? "on" : "off"));
|
|
|
|
|
|
|
|
if (en) {
|
|
|
|
s->intr_enabled = true;
|
|
|
|
/* Ok - report back to driver */
|
|
|
|
vtd_set_clear_mask_long(s, DMAR_GSTS_REG, 0, VTD_GSTS_IRES);
|
|
|
|
} else {
|
|
|
|
s->intr_enabled = false;
|
|
|
|
/* Ok - report back to driver */
|
|
|
|
vtd_set_clear_mask_long(s, DMAR_GSTS_REG, VTD_GSTS_IRES, 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-08-16 09:55:38 +04:00
|
|
|
/* Handle write to Global Command Register */
|
|
|
|
static void vtd_handle_gcmd_write(IntelIOMMUState *s)
|
|
|
|
{
|
|
|
|
uint32_t status = vtd_get_long_raw(s, DMAR_GSTS_REG);
|
|
|
|
uint32_t val = vtd_get_long_raw(s, DMAR_GCMD_REG);
|
|
|
|
uint32_t changed = status ^ val;
|
|
|
|
|
|
|
|
VTD_DPRINTF(CSR, "value 0x%"PRIx32 " status 0x%"PRIx32, val, status);
|
|
|
|
if (changed & VTD_GCMD_TE) {
|
|
|
|
/* Translation enable/disable */
|
|
|
|
vtd_handle_gcmd_te(s, val & VTD_GCMD_TE);
|
|
|
|
}
|
|
|
|
if (val & VTD_GCMD_SRTP) {
|
|
|
|
/* Set/update the root-table pointer */
|
|
|
|
vtd_handle_gcmd_srtp(s);
|
|
|
|
}
|
2014-08-16 09:55:42 +04:00
|
|
|
if (changed & VTD_GCMD_QIE) {
|
|
|
|
/* Queued Invalidation Enable */
|
|
|
|
vtd_handle_gcmd_qie(s, val & VTD_GCMD_QIE);
|
|
|
|
}
|
2016-07-14 08:56:18 +03:00
|
|
|
if (val & VTD_GCMD_SIRTP) {
|
|
|
|
/* Set/update the interrupt remapping root-table pointer */
|
|
|
|
vtd_handle_gcmd_sirtp(s);
|
|
|
|
}
|
2016-07-14 08:56:19 +03:00
|
|
|
if (changed & VTD_GCMD_IRE) {
|
|
|
|
/* Interrupt remap enable/disable */
|
|
|
|
vtd_handle_gcmd_ire(s, val & VTD_GCMD_IRE);
|
|
|
|
}
|
2014-08-16 09:55:38 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Handle write to Context Command Register */
|
|
|
|
static void vtd_handle_ccmd_write(IntelIOMMUState *s)
|
|
|
|
{
|
|
|
|
uint64_t ret;
|
|
|
|
uint64_t val = vtd_get_quad_raw(s, DMAR_CCMD_REG);
|
|
|
|
|
|
|
|
/* Context-cache invalidation request */
|
|
|
|
if (val & VTD_CCMD_ICC) {
|
2014-08-16 09:55:42 +04:00
|
|
|
if (s->qi_enabled) {
|
|
|
|
VTD_DPRINTF(GENERAL, "error: Queued Invalidation enabled, "
|
|
|
|
"should not use register-based invalidation");
|
|
|
|
return;
|
|
|
|
}
|
2014-08-16 09:55:38 +04:00
|
|
|
ret = vtd_context_cache_invalidate(s, val);
|
|
|
|
/* Invalidation completed. Change something to show */
|
|
|
|
vtd_set_clear_mask_quad(s, DMAR_CCMD_REG, VTD_CCMD_ICC, 0ULL);
|
|
|
|
ret = vtd_set_clear_mask_quad(s, DMAR_CCMD_REG, VTD_CCMD_CAIG_MASK,
|
|
|
|
ret);
|
|
|
|
VTD_DPRINTF(INV, "CCMD_REG write-back val: 0x%"PRIx64, ret);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Handle write to IOTLB Invalidation Register */
|
|
|
|
static void vtd_handle_iotlb_write(IntelIOMMUState *s)
|
|
|
|
{
|
|
|
|
uint64_t ret;
|
|
|
|
uint64_t val = vtd_get_quad_raw(s, DMAR_IOTLB_REG);
|
|
|
|
|
|
|
|
/* IOTLB invalidation request */
|
|
|
|
if (val & VTD_TLB_IVT) {
|
2014-08-16 09:55:42 +04:00
|
|
|
if (s->qi_enabled) {
|
|
|
|
VTD_DPRINTF(GENERAL, "error: Queued Invalidation enabled, "
|
|
|
|
"should not use register-based invalidation");
|
|
|
|
return;
|
|
|
|
}
|
2014-08-16 09:55:38 +04:00
|
|
|
ret = vtd_iotlb_flush(s, val);
|
|
|
|
/* Invalidation completed. Change something to show */
|
|
|
|
vtd_set_clear_mask_quad(s, DMAR_IOTLB_REG, VTD_TLB_IVT, 0ULL);
|
|
|
|
ret = vtd_set_clear_mask_quad(s, DMAR_IOTLB_REG,
|
|
|
|
VTD_TLB_FLUSH_GRANU_MASK_A, ret);
|
|
|
|
VTD_DPRINTF(INV, "IOTLB_REG write-back val: 0x%"PRIx64, ret);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-08-16 09:55:42 +04:00
|
|
|
/* Fetch an Invalidation Descriptor from the Invalidation Queue */
|
|
|
|
static bool vtd_get_inv_desc(dma_addr_t base_addr, uint32_t offset,
|
|
|
|
VTDInvDesc *inv_desc)
|
|
|
|
{
|
|
|
|
dma_addr_t addr = base_addr + offset * sizeof(*inv_desc);
|
|
|
|
if (dma_memory_read(&address_space_memory, addr, inv_desc,
|
|
|
|
sizeof(*inv_desc))) {
|
|
|
|
VTD_DPRINTF(GENERAL, "error: fail to fetch Invalidation Descriptor "
|
|
|
|
"base_addr 0x%"PRIx64 " offset %"PRIu32, base_addr, offset);
|
|
|
|
inv_desc->lo = 0;
|
|
|
|
inv_desc->hi = 0;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
inv_desc->lo = le64_to_cpu(inv_desc->lo);
|
|
|
|
inv_desc->hi = le64_to_cpu(inv_desc->hi);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool vtd_process_wait_desc(IntelIOMMUState *s, VTDInvDesc *inv_desc)
|
|
|
|
{
|
|
|
|
if ((inv_desc->hi & VTD_INV_DESC_WAIT_RSVD_HI) ||
|
|
|
|
(inv_desc->lo & VTD_INV_DESC_WAIT_RSVD_LO)) {
|
2017-02-07 11:28:09 +03:00
|
|
|
trace_vtd_inv_desc_wait_invalid(inv_desc->hi, inv_desc->lo);
|
2014-08-16 09:55:42 +04:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (inv_desc->lo & VTD_INV_DESC_WAIT_SW) {
|
|
|
|
/* Status Write */
|
|
|
|
uint32_t status_data = (uint32_t)(inv_desc->lo >>
|
|
|
|
VTD_INV_DESC_WAIT_DATA_SHIFT);
|
|
|
|
|
|
|
|
assert(!(inv_desc->lo & VTD_INV_DESC_WAIT_IF));
|
|
|
|
|
|
|
|
/* FIXME: need to be masked with HAW? */
|
|
|
|
dma_addr_t status_addr = inv_desc->hi;
|
2017-02-07 11:28:09 +03:00
|
|
|
trace_vtd_inv_desc_wait_sw(status_addr, status_data);
|
2014-08-16 09:55:42 +04:00
|
|
|
status_data = cpu_to_le32(status_data);
|
|
|
|
if (dma_memory_write(&address_space_memory, status_addr, &status_data,
|
|
|
|
sizeof(status_data))) {
|
2017-02-07 11:28:09 +03:00
|
|
|
trace_vtd_inv_desc_wait_write_fail(inv_desc->hi, inv_desc->lo);
|
2014-08-16 09:55:42 +04:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
} else if (inv_desc->lo & VTD_INV_DESC_WAIT_IF) {
|
|
|
|
/* Interrupt flag */
|
|
|
|
vtd_generate_completion_event(s);
|
|
|
|
} else {
|
2017-02-07 11:28:09 +03:00
|
|
|
trace_vtd_inv_desc_wait_invalid(inv_desc->hi, inv_desc->lo);
|
2014-08-16 09:55:42 +04:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2014-08-16 09:55:43 +04:00
|
|
|
static bool vtd_process_context_cache_desc(IntelIOMMUState *s,
|
|
|
|
VTDInvDesc *inv_desc)
|
|
|
|
{
|
2017-02-07 11:28:09 +03:00
|
|
|
uint16_t sid, fmask;
|
|
|
|
|
2014-08-16 09:55:43 +04:00
|
|
|
if ((inv_desc->lo & VTD_INV_DESC_CC_RSVD) || inv_desc->hi) {
|
2017-02-07 11:28:09 +03:00
|
|
|
trace_vtd_inv_desc_cc_invalid(inv_desc->hi, inv_desc->lo);
|
2014-08-16 09:55:43 +04:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
switch (inv_desc->lo & VTD_INV_DESC_CC_G) {
|
|
|
|
case VTD_INV_DESC_CC_DOMAIN:
|
2017-02-07 11:28:09 +03:00
|
|
|
trace_vtd_inv_desc_cc_domain(
|
|
|
|
(uint16_t)VTD_INV_DESC_CC_DID(inv_desc->lo));
|
2014-08-16 09:55:43 +04:00
|
|
|
/* Fall through */
|
|
|
|
case VTD_INV_DESC_CC_GLOBAL:
|
|
|
|
vtd_context_global_invalidate(s);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VTD_INV_DESC_CC_DEVICE:
|
2017-02-07 11:28:09 +03:00
|
|
|
sid = VTD_INV_DESC_CC_SID(inv_desc->lo);
|
|
|
|
fmask = VTD_INV_DESC_CC_FM(inv_desc->lo);
|
|
|
|
vtd_context_device_invalidate(s, sid, fmask);
|
2014-08-16 09:55:43 +04:00
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
2017-02-07 11:28:09 +03:00
|
|
|
trace_vtd_inv_desc_cc_invalid(inv_desc->hi, inv_desc->lo);
|
2014-08-16 09:55:43 +04:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2014-08-16 09:55:44 +04:00
|
|
|
static bool vtd_process_iotlb_desc(IntelIOMMUState *s, VTDInvDesc *inv_desc)
|
|
|
|
{
|
|
|
|
uint16_t domain_id;
|
|
|
|
uint8_t am;
|
|
|
|
hwaddr addr;
|
|
|
|
|
|
|
|
if ((inv_desc->lo & VTD_INV_DESC_IOTLB_RSVD_LO) ||
|
|
|
|
(inv_desc->hi & VTD_INV_DESC_IOTLB_RSVD_HI)) {
|
2017-02-07 11:28:09 +03:00
|
|
|
trace_vtd_inv_desc_iotlb_invalid(inv_desc->hi, inv_desc->lo);
|
2014-08-16 09:55:44 +04:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (inv_desc->lo & VTD_INV_DESC_IOTLB_G) {
|
|
|
|
case VTD_INV_DESC_IOTLB_GLOBAL:
|
2017-02-07 11:28:09 +03:00
|
|
|
trace_vtd_inv_desc_iotlb_global();
|
2014-08-16 09:55:44 +04:00
|
|
|
vtd_iotlb_global_invalidate(s);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VTD_INV_DESC_IOTLB_DOMAIN:
|
|
|
|
domain_id = VTD_INV_DESC_IOTLB_DID(inv_desc->lo);
|
2017-02-07 11:28:09 +03:00
|
|
|
trace_vtd_inv_desc_iotlb_domain(domain_id);
|
2014-08-16 09:55:44 +04:00
|
|
|
vtd_iotlb_domain_invalidate(s, domain_id);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VTD_INV_DESC_IOTLB_PAGE:
|
|
|
|
domain_id = VTD_INV_DESC_IOTLB_DID(inv_desc->lo);
|
|
|
|
addr = VTD_INV_DESC_IOTLB_ADDR(inv_desc->hi);
|
|
|
|
am = VTD_INV_DESC_IOTLB_AM(inv_desc->hi);
|
2017-02-07 11:28:09 +03:00
|
|
|
trace_vtd_inv_desc_iotlb_pages(domain_id, addr, am);
|
2014-08-16 09:55:44 +04:00
|
|
|
if (am > VTD_MAMV) {
|
2017-02-07 11:28:09 +03:00
|
|
|
trace_vtd_inv_desc_iotlb_invalid(inv_desc->hi, inv_desc->lo);
|
2014-08-16 09:55:44 +04:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
vtd_iotlb_page_invalidate(s, domain_id, addr, am);
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
2017-02-07 11:28:09 +03:00
|
|
|
trace_vtd_inv_desc_iotlb_invalid(inv_desc->hi, inv_desc->lo);
|
2014-08-16 09:55:44 +04:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2016-07-14 08:56:26 +03:00
|
|
|
static bool vtd_process_inv_iec_desc(IntelIOMMUState *s,
|
|
|
|
VTDInvDesc *inv_desc)
|
|
|
|
{
|
|
|
|
VTD_DPRINTF(INV, "inv ir glob %d index %d mask %d",
|
|
|
|
inv_desc->iec.granularity,
|
|
|
|
inv_desc->iec.index,
|
|
|
|
inv_desc->iec.index_mask);
|
|
|
|
|
|
|
|
vtd_iec_notify_all(s, !inv_desc->iec.granularity,
|
|
|
|
inv_desc->iec.index,
|
|
|
|
inv_desc->iec.index_mask);
|
2016-12-30 13:09:14 +03:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool vtd_process_device_iotlb_desc(IntelIOMMUState *s,
|
|
|
|
VTDInvDesc *inv_desc)
|
|
|
|
{
|
|
|
|
VTDAddressSpace *vtd_dev_as;
|
|
|
|
IOMMUTLBEntry entry;
|
|
|
|
struct VTDBus *vtd_bus;
|
|
|
|
hwaddr addr;
|
|
|
|
uint64_t sz;
|
|
|
|
uint16_t sid;
|
|
|
|
uint8_t devfn;
|
|
|
|
bool size;
|
|
|
|
uint8_t bus_num;
|
|
|
|
|
|
|
|
addr = VTD_INV_DESC_DEVICE_IOTLB_ADDR(inv_desc->hi);
|
|
|
|
sid = VTD_INV_DESC_DEVICE_IOTLB_SID(inv_desc->lo);
|
|
|
|
devfn = sid & 0xff;
|
|
|
|
bus_num = sid >> 8;
|
|
|
|
size = VTD_INV_DESC_DEVICE_IOTLB_SIZE(inv_desc->hi);
|
|
|
|
|
|
|
|
if ((inv_desc->lo & VTD_INV_DESC_DEVICE_IOTLB_RSVD_LO) ||
|
|
|
|
(inv_desc->hi & VTD_INV_DESC_DEVICE_IOTLB_RSVD_HI)) {
|
|
|
|
VTD_DPRINTF(GENERAL, "error: non-zero reserved field in Device "
|
|
|
|
"IOTLB Invalidate Descriptor hi 0x%"PRIx64 " lo 0x%"PRIx64,
|
|
|
|
inv_desc->hi, inv_desc->lo);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
vtd_bus = vtd_find_as_from_bus_num(s, bus_num);
|
|
|
|
if (!vtd_bus) {
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
|
|
|
vtd_dev_as = vtd_bus->dev_as[devfn];
|
|
|
|
if (!vtd_dev_as) {
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
2017-01-20 09:35:28 +03:00
|
|
|
/* According to ATS spec table 2.4:
|
|
|
|
* S = 0, bits 15:12 = xxxx range size: 4K
|
|
|
|
* S = 1, bits 15:12 = xxx0 range size: 8K
|
|
|
|
* S = 1, bits 15:12 = xx01 range size: 16K
|
|
|
|
* S = 1, bits 15:12 = x011 range size: 32K
|
|
|
|
* S = 1, bits 15:12 = 0111 range size: 64K
|
|
|
|
* ...
|
|
|
|
*/
|
2016-12-30 13:09:14 +03:00
|
|
|
if (size) {
|
2017-01-20 09:35:28 +03:00
|
|
|
sz = (VTD_PAGE_SIZE * 2) << cto64(addr >> VTD_PAGE_SHIFT);
|
2016-12-30 13:09:14 +03:00
|
|
|
addr &= ~(sz - 1);
|
|
|
|
} else {
|
|
|
|
sz = VTD_PAGE_SIZE;
|
|
|
|
}
|
2016-07-14 08:56:26 +03:00
|
|
|
|
2016-12-30 13:09:14 +03:00
|
|
|
entry.target_as = &vtd_dev_as->as;
|
|
|
|
entry.addr_mask = sz - 1;
|
|
|
|
entry.iova = addr;
|
|
|
|
entry.perm = IOMMU_NONE;
|
|
|
|
entry.translated_addr = 0;
|
2017-04-07 13:59:12 +03:00
|
|
|
memory_region_notify_iommu(&vtd_dev_as->iommu, entry);
|
2016-12-30 13:09:14 +03:00
|
|
|
|
|
|
|
done:
|
2016-07-14 08:56:26 +03:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2014-08-16 09:55:42 +04:00
|
|
|
static bool vtd_process_inv_desc(IntelIOMMUState *s)
|
|
|
|
{
|
|
|
|
VTDInvDesc inv_desc;
|
|
|
|
uint8_t desc_type;
|
|
|
|
|
|
|
|
VTD_DPRINTF(INV, "iq head %"PRIu16, s->iq_head);
|
|
|
|
if (!vtd_get_inv_desc(s->iq, s->iq_head, &inv_desc)) {
|
|
|
|
s->iq_last_desc_type = VTD_INV_DESC_NONE;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
desc_type = inv_desc.lo & VTD_INV_DESC_TYPE;
|
|
|
|
/* FIXME: should update at first or at last? */
|
|
|
|
s->iq_last_desc_type = desc_type;
|
|
|
|
|
|
|
|
switch (desc_type) {
|
|
|
|
case VTD_INV_DESC_CC:
|
2017-02-07 11:28:09 +03:00
|
|
|
trace_vtd_inv_desc("context-cache", inv_desc.hi, inv_desc.lo);
|
2014-08-16 09:55:43 +04:00
|
|
|
if (!vtd_process_context_cache_desc(s, &inv_desc)) {
|
|
|
|
return false;
|
|
|
|
}
|
2014-08-16 09:55:42 +04:00
|
|
|
break;
|
|
|
|
|
|
|
|
case VTD_INV_DESC_IOTLB:
|
2017-02-07 11:28:09 +03:00
|
|
|
trace_vtd_inv_desc("iotlb", inv_desc.hi, inv_desc.lo);
|
2014-08-16 09:55:44 +04:00
|
|
|
if (!vtd_process_iotlb_desc(s, &inv_desc)) {
|
|
|
|
return false;
|
|
|
|
}
|
2014-08-16 09:55:42 +04:00
|
|
|
break;
|
|
|
|
|
|
|
|
case VTD_INV_DESC_WAIT:
|
2017-02-07 11:28:09 +03:00
|
|
|
trace_vtd_inv_desc("wait", inv_desc.hi, inv_desc.lo);
|
2014-08-16 09:55:42 +04:00
|
|
|
if (!vtd_process_wait_desc(s, &inv_desc)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
2016-07-14 08:56:15 +03:00
|
|
|
case VTD_INV_DESC_IEC:
|
2017-02-07 11:28:09 +03:00
|
|
|
trace_vtd_inv_desc("iec", inv_desc.hi, inv_desc.lo);
|
2016-07-14 08:56:26 +03:00
|
|
|
if (!vtd_process_inv_iec_desc(s, &inv_desc)) {
|
|
|
|
return false;
|
|
|
|
}
|
2016-07-14 08:56:15 +03:00
|
|
|
break;
|
|
|
|
|
2016-12-30 13:09:14 +03:00
|
|
|
case VTD_INV_DESC_DEVICE:
|
|
|
|
VTD_DPRINTF(INV, "Device IOTLB Invalidation Descriptor hi 0x%"PRIx64
|
|
|
|
" lo 0x%"PRIx64, inv_desc.hi, inv_desc.lo);
|
|
|
|
if (!vtd_process_device_iotlb_desc(s, &inv_desc)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
2014-08-16 09:55:42 +04:00
|
|
|
default:
|
2017-02-07 11:28:09 +03:00
|
|
|
trace_vtd_inv_desc_invalid(inv_desc.hi, inv_desc.lo);
|
2014-08-16 09:55:42 +04:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
s->iq_head++;
|
|
|
|
if (s->iq_head == s->iq_size) {
|
|
|
|
s->iq_head = 0;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Try to fetch and process more Invalidation Descriptors */
|
|
|
|
static void vtd_fetch_inv_desc(IntelIOMMUState *s)
|
|
|
|
{
|
|
|
|
VTD_DPRINTF(INV, "fetch Invalidation Descriptors");
|
|
|
|
if (s->iq_tail >= s->iq_size) {
|
|
|
|
/* Detects an invalid Tail pointer */
|
|
|
|
VTD_DPRINTF(GENERAL, "error: iq_tail is %"PRIu16
|
|
|
|
" while iq_size is %"PRIu16, s->iq_tail, s->iq_size);
|
|
|
|
vtd_handle_inv_queue_error(s);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
while (s->iq_head != s->iq_tail) {
|
|
|
|
if (!vtd_process_inv_desc(s)) {
|
|
|
|
/* Invalidation Queue Errors */
|
|
|
|
vtd_handle_inv_queue_error(s);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
/* Must update the IQH_REG in time */
|
|
|
|
vtd_set_quad_raw(s, DMAR_IQH_REG,
|
|
|
|
(((uint64_t)(s->iq_head)) << VTD_IQH_QH_SHIFT) &
|
|
|
|
VTD_IQH_QH_MASK);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Handle write to Invalidation Queue Tail Register */
|
|
|
|
static void vtd_handle_iqt_write(IntelIOMMUState *s)
|
|
|
|
{
|
|
|
|
uint64_t val = vtd_get_quad_raw(s, DMAR_IQT_REG);
|
|
|
|
|
|
|
|
s->iq_tail = VTD_IQT_QT(val);
|
|
|
|
VTD_DPRINTF(INV, "set iq tail %"PRIu16, s->iq_tail);
|
|
|
|
if (s->qi_enabled && !(vtd_get_long_raw(s, DMAR_FSTS_REG) & VTD_FSTS_IQE)) {
|
|
|
|
/* Process Invalidation Queue here */
|
|
|
|
vtd_fetch_inv_desc(s);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-08-16 09:55:38 +04:00
|
|
|
static void vtd_handle_fsts_write(IntelIOMMUState *s)
|
|
|
|
{
|
|
|
|
uint32_t fsts_reg = vtd_get_long_raw(s, DMAR_FSTS_REG);
|
|
|
|
uint32_t fectl_reg = vtd_get_long_raw(s, DMAR_FECTL_REG);
|
|
|
|
uint32_t status_fields = VTD_FSTS_PFO | VTD_FSTS_PPF | VTD_FSTS_IQE;
|
|
|
|
|
|
|
|
if ((fectl_reg & VTD_FECTL_IP) && !(fsts_reg & status_fields)) {
|
|
|
|
vtd_set_clear_mask_long(s, DMAR_FECTL_REG, VTD_FECTL_IP, 0);
|
|
|
|
VTD_DPRINTF(FLOG, "all pending interrupt conditions serviced, clear "
|
|
|
|
"IP field of FECTL_REG");
|
|
|
|
}
|
2014-08-16 09:55:42 +04:00
|
|
|
/* FIXME: when IQE is Clear, should we try to fetch some Invalidation
|
|
|
|
* Descriptors if there are any when Queued Invalidation is enabled?
|
|
|
|
*/
|
2014-08-16 09:55:38 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static void vtd_handle_fectl_write(IntelIOMMUState *s)
|
|
|
|
{
|
|
|
|
uint32_t fectl_reg;
|
|
|
|
/* FIXME: when software clears the IM field, check the IP field. But do we
|
|
|
|
* need to compare the old value and the new value to conclude that
|
|
|
|
* software clears the IM field? Or just check if the IM field is zero?
|
|
|
|
*/
|
|
|
|
fectl_reg = vtd_get_long_raw(s, DMAR_FECTL_REG);
|
|
|
|
if ((fectl_reg & VTD_FECTL_IP) && !(fectl_reg & VTD_FECTL_IM)) {
|
|
|
|
vtd_generate_interrupt(s, DMAR_FEADDR_REG, DMAR_FEDATA_REG);
|
|
|
|
vtd_set_clear_mask_long(s, DMAR_FECTL_REG, VTD_FECTL_IP, 0);
|
|
|
|
VTD_DPRINTF(FLOG, "IM field is cleared, generate "
|
|
|
|
"fault event interrupt");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-08-16 09:55:42 +04:00
|
|
|
static void vtd_handle_ics_write(IntelIOMMUState *s)
|
|
|
|
{
|
|
|
|
uint32_t ics_reg = vtd_get_long_raw(s, DMAR_ICS_REG);
|
|
|
|
uint32_t iectl_reg = vtd_get_long_raw(s, DMAR_IECTL_REG);
|
|
|
|
|
|
|
|
if ((iectl_reg & VTD_IECTL_IP) && !(ics_reg & VTD_ICS_IWC)) {
|
|
|
|
vtd_set_clear_mask_long(s, DMAR_IECTL_REG, VTD_IECTL_IP, 0);
|
|
|
|
VTD_DPRINTF(INV, "pending completion interrupt condition serviced, "
|
|
|
|
"clear IP field of IECTL_REG");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void vtd_handle_iectl_write(IntelIOMMUState *s)
|
|
|
|
{
|
|
|
|
uint32_t iectl_reg;
|
|
|
|
/* FIXME: when software clears the IM field, check the IP field. But do we
|
|
|
|
* need to compare the old value and the new value to conclude that
|
|
|
|
* software clears the IM field? Or just check if the IM field is zero?
|
|
|
|
*/
|
|
|
|
iectl_reg = vtd_get_long_raw(s, DMAR_IECTL_REG);
|
|
|
|
if ((iectl_reg & VTD_IECTL_IP) && !(iectl_reg & VTD_IECTL_IM)) {
|
|
|
|
vtd_generate_interrupt(s, DMAR_IEADDR_REG, DMAR_IEDATA_REG);
|
|
|
|
vtd_set_clear_mask_long(s, DMAR_IECTL_REG, VTD_IECTL_IP, 0);
|
|
|
|
VTD_DPRINTF(INV, "IM field is cleared, generate "
|
|
|
|
"invalidation event interrupt");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-08-16 09:55:38 +04:00
|
|
|
static uint64_t vtd_mem_read(void *opaque, hwaddr addr, unsigned size)
|
|
|
|
{
|
|
|
|
IntelIOMMUState *s = opaque;
|
|
|
|
uint64_t val;
|
|
|
|
|
|
|
|
if (addr + size > DMAR_REG_SIZE) {
|
|
|
|
VTD_DPRINTF(GENERAL, "error: addr outside region: max 0x%"PRIx64
|
|
|
|
", got 0x%"PRIx64 " %d",
|
|
|
|
(uint64_t)DMAR_REG_SIZE, addr, size);
|
|
|
|
return (uint64_t)-1;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (addr) {
|
|
|
|
/* Root Table Address Register, 64-bit */
|
|
|
|
case DMAR_RTADDR_REG:
|
|
|
|
if (size == 4) {
|
|
|
|
val = s->root & ((1ULL << 32) - 1);
|
|
|
|
} else {
|
|
|
|
val = s->root;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DMAR_RTADDR_REG_HI:
|
|
|
|
assert(size == 4);
|
|
|
|
val = s->root >> 32;
|
|
|
|
break;
|
|
|
|
|
2014-08-16 09:55:42 +04:00
|
|
|
/* Invalidation Queue Address Register, 64-bit */
|
|
|
|
case DMAR_IQA_REG:
|
|
|
|
val = s->iq | (vtd_get_quad(s, DMAR_IQA_REG) & VTD_IQA_QS);
|
|
|
|
if (size == 4) {
|
|
|
|
val = val & ((1ULL << 32) - 1);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DMAR_IQA_REG_HI:
|
|
|
|
assert(size == 4);
|
|
|
|
val = s->iq >> 32;
|
|
|
|
break;
|
|
|
|
|
2014-08-16 09:55:38 +04:00
|
|
|
default:
|
|
|
|
if (size == 4) {
|
|
|
|
val = vtd_get_long(s, addr);
|
|
|
|
} else {
|
|
|
|
val = vtd_get_quad(s, addr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
VTD_DPRINTF(CSR, "addr 0x%"PRIx64 " size %d val 0x%"PRIx64,
|
|
|
|
addr, size, val);
|
|
|
|
return val;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void vtd_mem_write(void *opaque, hwaddr addr,
|
|
|
|
uint64_t val, unsigned size)
|
|
|
|
{
|
|
|
|
IntelIOMMUState *s = opaque;
|
|
|
|
|
|
|
|
if (addr + size > DMAR_REG_SIZE) {
|
|
|
|
VTD_DPRINTF(GENERAL, "error: addr outside region: max 0x%"PRIx64
|
|
|
|
", got 0x%"PRIx64 " %d",
|
|
|
|
(uint64_t)DMAR_REG_SIZE, addr, size);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (addr) {
|
|
|
|
/* Global Command Register, 32-bit */
|
|
|
|
case DMAR_GCMD_REG:
|
|
|
|
VTD_DPRINTF(CSR, "DMAR_GCMD_REG write addr 0x%"PRIx64
|
|
|
|
", size %d, val 0x%"PRIx64, addr, size, val);
|
|
|
|
vtd_set_long(s, addr, val);
|
|
|
|
vtd_handle_gcmd_write(s);
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* Context Command Register, 64-bit */
|
|
|
|
case DMAR_CCMD_REG:
|
|
|
|
VTD_DPRINTF(CSR, "DMAR_CCMD_REG write addr 0x%"PRIx64
|
|
|
|
", size %d, val 0x%"PRIx64, addr, size, val);
|
|
|
|
if (size == 4) {
|
|
|
|
vtd_set_long(s, addr, val);
|
|
|
|
} else {
|
|
|
|
vtd_set_quad(s, addr, val);
|
|
|
|
vtd_handle_ccmd_write(s);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DMAR_CCMD_REG_HI:
|
|
|
|
VTD_DPRINTF(CSR, "DMAR_CCMD_REG_HI write addr 0x%"PRIx64
|
|
|
|
", size %d, val 0x%"PRIx64, addr, size, val);
|
|
|
|
assert(size == 4);
|
|
|
|
vtd_set_long(s, addr, val);
|
|
|
|
vtd_handle_ccmd_write(s);
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* IOTLB Invalidation Register, 64-bit */
|
|
|
|
case DMAR_IOTLB_REG:
|
|
|
|
VTD_DPRINTF(INV, "DMAR_IOTLB_REG write addr 0x%"PRIx64
|
|
|
|
", size %d, val 0x%"PRIx64, addr, size, val);
|
|
|
|
if (size == 4) {
|
|
|
|
vtd_set_long(s, addr, val);
|
|
|
|
} else {
|
|
|
|
vtd_set_quad(s, addr, val);
|
|
|
|
vtd_handle_iotlb_write(s);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DMAR_IOTLB_REG_HI:
|
|
|
|
VTD_DPRINTF(INV, "DMAR_IOTLB_REG_HI write addr 0x%"PRIx64
|
|
|
|
", size %d, val 0x%"PRIx64, addr, size, val);
|
|
|
|
assert(size == 4);
|
|
|
|
vtd_set_long(s, addr, val);
|
|
|
|
vtd_handle_iotlb_write(s);
|
|
|
|
break;
|
|
|
|
|
2014-08-16 09:55:44 +04:00
|
|
|
/* Invalidate Address Register, 64-bit */
|
|
|
|
case DMAR_IVA_REG:
|
|
|
|
VTD_DPRINTF(INV, "DMAR_IVA_REG write addr 0x%"PRIx64
|
|
|
|
", size %d, val 0x%"PRIx64, addr, size, val);
|
|
|
|
if (size == 4) {
|
|
|
|
vtd_set_long(s, addr, val);
|
|
|
|
} else {
|
|
|
|
vtd_set_quad(s, addr, val);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DMAR_IVA_REG_HI:
|
|
|
|
VTD_DPRINTF(INV, "DMAR_IVA_REG_HI write addr 0x%"PRIx64
|
|
|
|
", size %d, val 0x%"PRIx64, addr, size, val);
|
|
|
|
assert(size == 4);
|
|
|
|
vtd_set_long(s, addr, val);
|
|
|
|
break;
|
|
|
|
|
2014-08-16 09:55:38 +04:00
|
|
|
/* Fault Status Register, 32-bit */
|
|
|
|
case DMAR_FSTS_REG:
|
|
|
|
VTD_DPRINTF(FLOG, "DMAR_FSTS_REG write addr 0x%"PRIx64
|
|
|
|
", size %d, val 0x%"PRIx64, addr, size, val);
|
|
|
|
assert(size == 4);
|
|
|
|
vtd_set_long(s, addr, val);
|
|
|
|
vtd_handle_fsts_write(s);
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* Fault Event Control Register, 32-bit */
|
|
|
|
case DMAR_FECTL_REG:
|
|
|
|
VTD_DPRINTF(FLOG, "DMAR_FECTL_REG write addr 0x%"PRIx64
|
|
|
|
", size %d, val 0x%"PRIx64, addr, size, val);
|
|
|
|
assert(size == 4);
|
|
|
|
vtd_set_long(s, addr, val);
|
|
|
|
vtd_handle_fectl_write(s);
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* Fault Event Data Register, 32-bit */
|
|
|
|
case DMAR_FEDATA_REG:
|
|
|
|
VTD_DPRINTF(FLOG, "DMAR_FEDATA_REG write addr 0x%"PRIx64
|
|
|
|
", size %d, val 0x%"PRIx64, addr, size, val);
|
|
|
|
assert(size == 4);
|
|
|
|
vtd_set_long(s, addr, val);
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* Fault Event Address Register, 32-bit */
|
|
|
|
case DMAR_FEADDR_REG:
|
|
|
|
VTD_DPRINTF(FLOG, "DMAR_FEADDR_REG write addr 0x%"PRIx64
|
|
|
|
", size %d, val 0x%"PRIx64, addr, size, val);
|
|
|
|
assert(size == 4);
|
|
|
|
vtd_set_long(s, addr, val);
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* Fault Event Upper Address Register, 32-bit */
|
|
|
|
case DMAR_FEUADDR_REG:
|
|
|
|
VTD_DPRINTF(FLOG, "DMAR_FEUADDR_REG write addr 0x%"PRIx64
|
|
|
|
", size %d, val 0x%"PRIx64, addr, size, val);
|
|
|
|
assert(size == 4);
|
|
|
|
vtd_set_long(s, addr, val);
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* Protected Memory Enable Register, 32-bit */
|
|
|
|
case DMAR_PMEN_REG:
|
|
|
|
VTD_DPRINTF(CSR, "DMAR_PMEN_REG write addr 0x%"PRIx64
|
|
|
|
", size %d, val 0x%"PRIx64, addr, size, val);
|
|
|
|
assert(size == 4);
|
|
|
|
vtd_set_long(s, addr, val);
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* Root Table Address Register, 64-bit */
|
|
|
|
case DMAR_RTADDR_REG:
|
|
|
|
VTD_DPRINTF(CSR, "DMAR_RTADDR_REG write addr 0x%"PRIx64
|
|
|
|
", size %d, val 0x%"PRIx64, addr, size, val);
|
|
|
|
if (size == 4) {
|
|
|
|
vtd_set_long(s, addr, val);
|
|
|
|
} else {
|
|
|
|
vtd_set_quad(s, addr, val);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DMAR_RTADDR_REG_HI:
|
|
|
|
VTD_DPRINTF(CSR, "DMAR_RTADDR_REG_HI write addr 0x%"PRIx64
|
|
|
|
", size %d, val 0x%"PRIx64, addr, size, val);
|
|
|
|
assert(size == 4);
|
|
|
|
vtd_set_long(s, addr, val);
|
|
|
|
break;
|
|
|
|
|
2014-08-16 09:55:42 +04:00
|
|
|
/* Invalidation Queue Tail Register, 64-bit */
|
|
|
|
case DMAR_IQT_REG:
|
|
|
|
VTD_DPRINTF(INV, "DMAR_IQT_REG write addr 0x%"PRIx64
|
|
|
|
", size %d, val 0x%"PRIx64, addr, size, val);
|
|
|
|
if (size == 4) {
|
|
|
|
vtd_set_long(s, addr, val);
|
|
|
|
} else {
|
|
|
|
vtd_set_quad(s, addr, val);
|
|
|
|
}
|
|
|
|
vtd_handle_iqt_write(s);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DMAR_IQT_REG_HI:
|
|
|
|
VTD_DPRINTF(INV, "DMAR_IQT_REG_HI write addr 0x%"PRIx64
|
|
|
|
", size %d, val 0x%"PRIx64, addr, size, val);
|
|
|
|
assert(size == 4);
|
|
|
|
vtd_set_long(s, addr, val);
|
|
|
|
/* 19:63 of IQT_REG is RsvdZ, do nothing here */
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* Invalidation Queue Address Register, 64-bit */
|
|
|
|
case DMAR_IQA_REG:
|
|
|
|
VTD_DPRINTF(INV, "DMAR_IQA_REG write addr 0x%"PRIx64
|
|
|
|
", size %d, val 0x%"PRIx64, addr, size, val);
|
|
|
|
if (size == 4) {
|
|
|
|
vtd_set_long(s, addr, val);
|
|
|
|
} else {
|
|
|
|
vtd_set_quad(s, addr, val);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DMAR_IQA_REG_HI:
|
|
|
|
VTD_DPRINTF(INV, "DMAR_IQA_REG_HI write addr 0x%"PRIx64
|
|
|
|
", size %d, val 0x%"PRIx64, addr, size, val);
|
|
|
|
assert(size == 4);
|
|
|
|
vtd_set_long(s, addr, val);
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* Invalidation Completion Status Register, 32-bit */
|
|
|
|
case DMAR_ICS_REG:
|
|
|
|
VTD_DPRINTF(INV, "DMAR_ICS_REG write addr 0x%"PRIx64
|
|
|
|
", size %d, val 0x%"PRIx64, addr, size, val);
|
|
|
|
assert(size == 4);
|
|
|
|
vtd_set_long(s, addr, val);
|
|
|
|
vtd_handle_ics_write(s);
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* Invalidation Event Control Register, 32-bit */
|
|
|
|
case DMAR_IECTL_REG:
|
|
|
|
VTD_DPRINTF(INV, "DMAR_IECTL_REG write addr 0x%"PRIx64
|
|
|
|
", size %d, val 0x%"PRIx64, addr, size, val);
|
|
|
|
assert(size == 4);
|
|
|
|
vtd_set_long(s, addr, val);
|
|
|
|
vtd_handle_iectl_write(s);
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* Invalidation Event Data Register, 32-bit */
|
|
|
|
case DMAR_IEDATA_REG:
|
|
|
|
VTD_DPRINTF(INV, "DMAR_IEDATA_REG write addr 0x%"PRIx64
|
|
|
|
", size %d, val 0x%"PRIx64, addr, size, val);
|
|
|
|
assert(size == 4);
|
|
|
|
vtd_set_long(s, addr, val);
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* Invalidation Event Address Register, 32-bit */
|
|
|
|
case DMAR_IEADDR_REG:
|
|
|
|
VTD_DPRINTF(INV, "DMAR_IEADDR_REG write addr 0x%"PRIx64
|
|
|
|
", size %d, val 0x%"PRIx64, addr, size, val);
|
|
|
|
assert(size == 4);
|
|
|
|
vtd_set_long(s, addr, val);
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* Invalidation Event Upper Address Register, 32-bit */
|
|
|
|
case DMAR_IEUADDR_REG:
|
|
|
|
VTD_DPRINTF(INV, "DMAR_IEUADDR_REG write addr 0x%"PRIx64
|
|
|
|
", size %d, val 0x%"PRIx64, addr, size, val);
|
|
|
|
assert(size == 4);
|
|
|
|
vtd_set_long(s, addr, val);
|
|
|
|
break;
|
|
|
|
|
2014-08-16 09:55:38 +04:00
|
|
|
/* Fault Recording Registers, 128-bit */
|
|
|
|
case DMAR_FRCD_REG_0_0:
|
|
|
|
VTD_DPRINTF(FLOG, "DMAR_FRCD_REG_0_0 write addr 0x%"PRIx64
|
|
|
|
", size %d, val 0x%"PRIx64, addr, size, val);
|
|
|
|
if (size == 4) {
|
|
|
|
vtd_set_long(s, addr, val);
|
|
|
|
} else {
|
|
|
|
vtd_set_quad(s, addr, val);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DMAR_FRCD_REG_0_1:
|
|
|
|
VTD_DPRINTF(FLOG, "DMAR_FRCD_REG_0_1 write addr 0x%"PRIx64
|
|
|
|
", size %d, val 0x%"PRIx64, addr, size, val);
|
|
|
|
assert(size == 4);
|
|
|
|
vtd_set_long(s, addr, val);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DMAR_FRCD_REG_0_2:
|
|
|
|
VTD_DPRINTF(FLOG, "DMAR_FRCD_REG_0_2 write addr 0x%"PRIx64
|
|
|
|
", size %d, val 0x%"PRIx64, addr, size, val);
|
|
|
|
if (size == 4) {
|
|
|
|
vtd_set_long(s, addr, val);
|
|
|
|
} else {
|
|
|
|
vtd_set_quad(s, addr, val);
|
|
|
|
/* May clear bit 127 (Fault), update PPF */
|
|
|
|
vtd_update_fsts_ppf(s);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DMAR_FRCD_REG_0_3:
|
|
|
|
VTD_DPRINTF(FLOG, "DMAR_FRCD_REG_0_3 write addr 0x%"PRIx64
|
|
|
|
", size %d, val 0x%"PRIx64, addr, size, val);
|
|
|
|
assert(size == 4);
|
|
|
|
vtd_set_long(s, addr, val);
|
|
|
|
/* May clear bit 127 (Fault), update PPF */
|
|
|
|
vtd_update_fsts_ppf(s);
|
|
|
|
break;
|
|
|
|
|
2016-07-14 08:56:18 +03:00
|
|
|
case DMAR_IRTA_REG:
|
|
|
|
VTD_DPRINTF(IR, "DMAR_IRTA_REG write addr 0x%"PRIx64
|
|
|
|
", size %d, val 0x%"PRIx64, addr, size, val);
|
|
|
|
if (size == 4) {
|
|
|
|
vtd_set_long(s, addr, val);
|
|
|
|
} else {
|
|
|
|
vtd_set_quad(s, addr, val);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DMAR_IRTA_REG_HI:
|
|
|
|
VTD_DPRINTF(IR, "DMAR_IRTA_REG_HI write addr 0x%"PRIx64
|
|
|
|
", size %d, val 0x%"PRIx64, addr, size, val);
|
|
|
|
assert(size == 4);
|
|
|
|
vtd_set_long(s, addr, val);
|
|
|
|
break;
|
|
|
|
|
2014-08-16 09:55:38 +04:00
|
|
|
default:
|
|
|
|
VTD_DPRINTF(GENERAL, "error: unhandled reg write addr 0x%"PRIx64
|
|
|
|
", size %d, val 0x%"PRIx64, addr, size, val);
|
|
|
|
if (size == 4) {
|
|
|
|
vtd_set_long(s, addr, val);
|
|
|
|
} else {
|
|
|
|
vtd_set_quad(s, addr, val);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static IOMMUTLBEntry vtd_iommu_translate(MemoryRegion *iommu, hwaddr addr,
|
|
|
|
bool is_write)
|
|
|
|
{
|
|
|
|
VTDAddressSpace *vtd_as = container_of(iommu, VTDAddressSpace, iommu);
|
|
|
|
IntelIOMMUState *s = vtd_as->iommu_state;
|
|
|
|
IOMMUTLBEntry ret = {
|
|
|
|
.target_as = &address_space_memory,
|
|
|
|
.iova = addr,
|
|
|
|
.translated_addr = 0,
|
|
|
|
.addr_mask = ~(hwaddr)0,
|
|
|
|
.perm = IOMMU_NONE,
|
|
|
|
};
|
|
|
|
|
|
|
|
if (!s->dmar_enabled) {
|
|
|
|
/* DMAR disabled, passthrough, use 4k-page*/
|
|
|
|
ret.iova = addr & VTD_PAGE_MASK_4K;
|
|
|
|
ret.translated_addr = addr & VTD_PAGE_MASK_4K;
|
|
|
|
ret.addr_mask = ~VTD_PAGE_MASK_4K;
|
|
|
|
ret.perm = IOMMU_RW;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2015-10-04 16:48:50 +03:00
|
|
|
vtd_do_iommu_translate(vtd_as, vtd_as->bus, vtd_as->devfn, addr,
|
2014-08-16 09:55:43 +04:00
|
|
|
is_write, &ret);
|
2014-08-16 09:55:38 +04:00
|
|
|
VTD_DPRINTF(MMU,
|
|
|
|
"bus %"PRIu8 " slot %"PRIu8 " func %"PRIu8 " devfn %"PRIu8
|
2017-02-07 11:28:08 +03:00
|
|
|
" iova 0x%"PRIx64 " hpa 0x%"PRIx64, pci_bus_num(vtd_as->bus),
|
2014-08-16 09:55:43 +04:00
|
|
|
VTD_PCI_SLOT(vtd_as->devfn), VTD_PCI_FUNC(vtd_as->devfn),
|
|
|
|
vtd_as->devfn, addr, ret.translated_addr);
|
2014-08-16 09:55:38 +04:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-09-23 08:02:27 +03:00
|
|
|
static void vtd_iommu_notify_flag_changed(MemoryRegion *iommu,
|
|
|
|
IOMMUNotifierFlag old,
|
|
|
|
IOMMUNotifierFlag new)
|
2016-06-30 22:00:24 +03:00
|
|
|
{
|
|
|
|
VTDAddressSpace *vtd_as = container_of(iommu, VTDAddressSpace, iommu);
|
2017-04-07 13:59:15 +03:00
|
|
|
IntelIOMMUState *s = vtd_as->iommu_state;
|
|
|
|
IntelIOMMUNotifierNode *node = NULL;
|
|
|
|
IntelIOMMUNotifierNode *next_node = NULL;
|
2016-06-30 22:00:24 +03:00
|
|
|
|
2017-04-07 13:59:15 +03:00
|
|
|
if (!s->caching_mode && new & IOMMU_NOTIFIER_MAP) {
|
|
|
|
error_report("We need to set cache_mode=1 for intel-iommu to enable "
|
|
|
|
"device assignment with IOMMU protection.");
|
2016-09-23 08:02:28 +03:00
|
|
|
exit(1);
|
|
|
|
}
|
2017-04-07 13:59:15 +03:00
|
|
|
|
|
|
|
if (old == IOMMU_NOTIFIER_NONE) {
|
|
|
|
node = g_malloc0(sizeof(*node));
|
|
|
|
node->vtd_as = vtd_as;
|
|
|
|
QLIST_INSERT_HEAD(&s->notifiers_list, node, next);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* update notifier node with new flags */
|
|
|
|
QLIST_FOREACH_SAFE(node, &s->notifiers_list, next, next_node) {
|
|
|
|
if (node->vtd_as == vtd_as) {
|
|
|
|
if (new == IOMMU_NOTIFIER_NONE) {
|
|
|
|
QLIST_REMOVE(node, next);
|
|
|
|
g_free(node);
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
2016-06-30 22:00:24 +03:00
|
|
|
}
|
|
|
|
|
2014-08-16 09:55:38 +04:00
|
|
|
static const VMStateDescription vtd_vmstate = {
|
|
|
|
.name = "iommu-intel",
|
2017-01-06 07:06:13 +03:00
|
|
|
.version_id = 1,
|
|
|
|
.minimum_version_id = 1,
|
|
|
|
.priority = MIG_PRI_IOMMU,
|
|
|
|
.fields = (VMStateField[]) {
|
|
|
|
VMSTATE_UINT64(root, IntelIOMMUState),
|
|
|
|
VMSTATE_UINT64(intr_root, IntelIOMMUState),
|
|
|
|
VMSTATE_UINT64(iq, IntelIOMMUState),
|
|
|
|
VMSTATE_UINT32(intr_size, IntelIOMMUState),
|
|
|
|
VMSTATE_UINT16(iq_head, IntelIOMMUState),
|
|
|
|
VMSTATE_UINT16(iq_tail, IntelIOMMUState),
|
|
|
|
VMSTATE_UINT16(iq_size, IntelIOMMUState),
|
|
|
|
VMSTATE_UINT16(next_frcd_reg, IntelIOMMUState),
|
|
|
|
VMSTATE_UINT8_ARRAY(csr, IntelIOMMUState, DMAR_REG_SIZE),
|
|
|
|
VMSTATE_UINT8(iq_last_desc_type, IntelIOMMUState),
|
|
|
|
VMSTATE_BOOL(root_extended, IntelIOMMUState),
|
|
|
|
VMSTATE_BOOL(dmar_enabled, IntelIOMMUState),
|
|
|
|
VMSTATE_BOOL(qi_enabled, IntelIOMMUState),
|
|
|
|
VMSTATE_BOOL(intr_enabled, IntelIOMMUState),
|
|
|
|
VMSTATE_BOOL(intr_eime, IntelIOMMUState),
|
|
|
|
VMSTATE_END_OF_LIST()
|
|
|
|
}
|
2014-08-16 09:55:38 +04:00
|
|
|
};
|
|
|
|
|
|
|
|
static const MemoryRegionOps vtd_mem_ops = {
|
|
|
|
.read = vtd_mem_read,
|
|
|
|
.write = vtd_mem_write,
|
|
|
|
.endianness = DEVICE_LITTLE_ENDIAN,
|
|
|
|
.impl = {
|
|
|
|
.min_access_size = 4,
|
|
|
|
.max_access_size = 8,
|
|
|
|
},
|
|
|
|
.valid = {
|
|
|
|
.min_access_size = 4,
|
|
|
|
.max_access_size = 8,
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
static Property vtd_properties[] = {
|
|
|
|
DEFINE_PROP_UINT32("version", IntelIOMMUState, version, 0),
|
2016-10-10 18:28:46 +03:00
|
|
|
DEFINE_PROP_ON_OFF_AUTO("eim", IntelIOMMUState, intr_eim,
|
|
|
|
ON_OFF_AUTO_AUTO),
|
intel_iommu: reject broken EIM
Cluster x2APIC cannot work without KVM's x2apic API when the maximal
APIC ID is greater than 8 and only KVM's LAPIC can support x2APIC, so we
forbid other APICs and also the old KVM case with less than 9, to
simplify the code.
There is no point in enabling EIM in forbidden APICs, so we keep it
enabled only for the KVM APIC; unconditionally, because making the
option depend on KVM version would be a maintanance burden.
Old QEMUs would enable eim whenever intremap was on, which would trick
guests into thinking that they can enable cluster x2APIC even if any
interrupt destination would get clamped to 8 bits.
Depending on your configuration, QEMU could notice that the destination
LAPIC is not present and report it with a very non-obvious:
KVM: injection failed, MSI lost (Operation not permitted)
Or the guest could say something about unexpected interrupts, because
clamping leads to aliasing so interrupts were being delivered to
incorrect VCPUs.
KVM_X2APIC_API is the feature that allows us to enable EIM for KVM.
QEMU 2.7 allowed EIM whenever interrupt remapping was enabled. In order
to keep backward compatibility, we again allow guests to misbehave in
non-obvious ways, and make it the default for old machine types.
A user can enable the buggy mode it with "x-buggy-eim=on".
Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
Reviewed-by: Eduardo Habkost <ehabkost@redhat.com>
Reviewed-by: Peter Xu <peterx@redhat.com>
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
2016-10-10 18:28:47 +03:00
|
|
|
DEFINE_PROP_BOOL("x-buggy-eim", IntelIOMMUState, buggy_eim, false),
|
2017-02-07 11:28:06 +03:00
|
|
|
DEFINE_PROP_BOOL("caching-mode", IntelIOMMUState, caching_mode, FALSE),
|
2014-08-16 09:55:38 +04:00
|
|
|
DEFINE_PROP_END_OF_LIST(),
|
|
|
|
};
|
|
|
|
|
2016-07-14 08:56:22 +03:00
|
|
|
/* Read IRTE entry with specific index */
|
|
|
|
static int vtd_irte_get(IntelIOMMUState *iommu, uint16_t index,
|
2016-07-21 18:54:10 +03:00
|
|
|
VTD_IR_TableEntry *entry, uint16_t sid)
|
2016-07-14 08:56:22 +03:00
|
|
|
{
|
2016-07-14 08:56:29 +03:00
|
|
|
static const uint16_t vtd_svt_mask[VTD_SQ_MAX] = \
|
|
|
|
{0xffff, 0xfffb, 0xfff9, 0xfff8};
|
2016-07-14 08:56:22 +03:00
|
|
|
dma_addr_t addr = 0x00;
|
2016-07-14 08:56:29 +03:00
|
|
|
uint16_t mask, source_id;
|
|
|
|
uint8_t bus, bus_max, bus_min;
|
2016-07-14 08:56:22 +03:00
|
|
|
|
|
|
|
addr = iommu->intr_root + index * sizeof(*entry);
|
|
|
|
if (dma_memory_read(&address_space_memory, addr, entry,
|
|
|
|
sizeof(*entry))) {
|
|
|
|
VTD_DPRINTF(GENERAL, "error: fail to access IR root at 0x%"PRIx64
|
|
|
|
" + %"PRIu16, iommu->intr_root, index);
|
|
|
|
return -VTD_FR_IR_ROOT_INVAL;
|
|
|
|
}
|
|
|
|
|
2016-07-21 18:54:10 +03:00
|
|
|
if (!entry->irte.present) {
|
2016-07-14 08:56:22 +03:00
|
|
|
VTD_DPRINTF(GENERAL, "error: present flag not set in IRTE"
|
|
|
|
" entry index %u value 0x%"PRIx64 " 0x%"PRIx64,
|
|
|
|
index, le64_to_cpu(entry->data[1]),
|
|
|
|
le64_to_cpu(entry->data[0]));
|
|
|
|
return -VTD_FR_IR_ENTRY_P;
|
|
|
|
}
|
|
|
|
|
2016-07-21 18:54:10 +03:00
|
|
|
if (entry->irte.__reserved_0 || entry->irte.__reserved_1 ||
|
|
|
|
entry->irte.__reserved_2) {
|
2016-07-14 08:56:22 +03:00
|
|
|
VTD_DPRINTF(GENERAL, "error: IRTE entry index %"PRIu16
|
|
|
|
" reserved fields non-zero: 0x%"PRIx64 " 0x%"PRIx64,
|
|
|
|
index, le64_to_cpu(entry->data[1]),
|
|
|
|
le64_to_cpu(entry->data[0]));
|
|
|
|
return -VTD_FR_IR_IRTE_RSVD;
|
|
|
|
}
|
|
|
|
|
2016-07-14 08:56:29 +03:00
|
|
|
if (sid != X86_IOMMU_SID_INVALID) {
|
|
|
|
/* Validate IRTE SID */
|
2016-07-21 18:54:10 +03:00
|
|
|
source_id = le32_to_cpu(entry->irte.source_id);
|
|
|
|
switch (entry->irte.sid_vtype) {
|
2016-07-14 08:56:29 +03:00
|
|
|
case VTD_SVT_NONE:
|
|
|
|
VTD_DPRINTF(IR, "No SID validation for IRTE index %d", index);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VTD_SVT_ALL:
|
2016-07-21 18:54:10 +03:00
|
|
|
mask = vtd_svt_mask[entry->irte.sid_q];
|
2016-07-14 08:56:29 +03:00
|
|
|
if ((source_id & mask) != (sid & mask)) {
|
|
|
|
VTD_DPRINTF(GENERAL, "SID validation for IRTE index "
|
|
|
|
"%d failed (reqid 0x%04x sid 0x%04x)", index,
|
|
|
|
sid, source_id);
|
|
|
|
return -VTD_FR_IR_SID_ERR;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VTD_SVT_BUS:
|
|
|
|
bus_max = source_id >> 8;
|
|
|
|
bus_min = source_id & 0xff;
|
|
|
|
bus = sid >> 8;
|
|
|
|
if (bus > bus_max || bus < bus_min) {
|
|
|
|
VTD_DPRINTF(GENERAL, "SID validation for IRTE index %d "
|
|
|
|
"failed (bus %d outside %d-%d)", index, bus,
|
|
|
|
bus_min, bus_max);
|
|
|
|
return -VTD_FR_IR_SID_ERR;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
VTD_DPRINTF(GENERAL, "Invalid SVT bits (0x%x) in IRTE index "
|
2016-07-21 18:54:10 +03:00
|
|
|
"%d", entry->irte.sid_vtype, index);
|
2016-07-14 08:56:29 +03:00
|
|
|
/* Take this as verification failure. */
|
|
|
|
return -VTD_FR_IR_SID_ERR;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2016-07-14 08:56:22 +03:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Fetch IRQ information of specific IR index */
|
2016-07-14 08:56:29 +03:00
|
|
|
static int vtd_remap_irq_get(IntelIOMMUState *iommu, uint16_t index,
|
|
|
|
VTDIrq *irq, uint16_t sid)
|
2016-07-14 08:56:22 +03:00
|
|
|
{
|
2016-07-21 18:54:10 +03:00
|
|
|
VTD_IR_TableEntry irte = {};
|
2016-07-14 08:56:22 +03:00
|
|
|
int ret = 0;
|
|
|
|
|
2016-07-14 08:56:29 +03:00
|
|
|
ret = vtd_irte_get(iommu, index, &irte, sid);
|
2016-07-14 08:56:22 +03:00
|
|
|
if (ret) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-07-21 18:54:10 +03:00
|
|
|
irq->trigger_mode = irte.irte.trigger_mode;
|
|
|
|
irq->vector = irte.irte.vector;
|
|
|
|
irq->delivery_mode = irte.irte.delivery_mode;
|
|
|
|
irq->dest = le32_to_cpu(irte.irte.dest_id);
|
2016-07-14 08:56:28 +03:00
|
|
|
if (!iommu->intr_eime) {
|
2016-07-14 08:56:22 +03:00
|
|
|
#define VTD_IR_APIC_DEST_MASK (0xff00ULL)
|
|
|
|
#define VTD_IR_APIC_DEST_SHIFT (8)
|
2016-07-14 08:56:28 +03:00
|
|
|
irq->dest = (irq->dest & VTD_IR_APIC_DEST_MASK) >>
|
|
|
|
VTD_IR_APIC_DEST_SHIFT;
|
|
|
|
}
|
2016-07-21 18:54:10 +03:00
|
|
|
irq->dest_mode = irte.irte.dest_mode;
|
|
|
|
irq->redir_hint = irte.irte.redir_hint;
|
2016-07-14 08:56:22 +03:00
|
|
|
|
|
|
|
VTD_DPRINTF(IR, "remapping interrupt index %d: trig:%u,vec:%u,"
|
|
|
|
"deliver:%u,dest:%u,dest_mode:%u", index,
|
|
|
|
irq->trigger_mode, irq->vector, irq->delivery_mode,
|
|
|
|
irq->dest, irq->dest_mode);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Generate one MSI message from VTDIrq info */
|
|
|
|
static void vtd_generate_msi_message(VTDIrq *irq, MSIMessage *msg_out)
|
|
|
|
{
|
|
|
|
VTD_MSIMessage msg = {};
|
|
|
|
|
|
|
|
/* Generate address bits */
|
|
|
|
msg.dest_mode = irq->dest_mode;
|
|
|
|
msg.redir_hint = irq->redir_hint;
|
|
|
|
msg.dest = irq->dest;
|
2016-10-10 18:28:44 +03:00
|
|
|
msg.__addr_hi = irq->dest & 0xffffff00;
|
2016-07-14 08:56:22 +03:00
|
|
|
msg.__addr_head = cpu_to_le32(0xfee);
|
|
|
|
/* Keep this from original MSI address bits */
|
|
|
|
msg.__not_used = irq->msi_addr_last_bits;
|
|
|
|
|
|
|
|
/* Generate data bits */
|
|
|
|
msg.vector = irq->vector;
|
|
|
|
msg.delivery_mode = irq->delivery_mode;
|
|
|
|
msg.level = 1;
|
|
|
|
msg.trigger_mode = irq->trigger_mode;
|
|
|
|
|
|
|
|
msg_out->address = msg.msi_addr;
|
|
|
|
msg_out->data = msg.msi_data;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Interrupt remapping for MSI/MSI-X entry */
|
|
|
|
static int vtd_interrupt_remap_msi(IntelIOMMUState *iommu,
|
|
|
|
MSIMessage *origin,
|
2016-07-14 08:56:29 +03:00
|
|
|
MSIMessage *translated,
|
|
|
|
uint16_t sid)
|
2016-07-14 08:56:22 +03:00
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
VTD_IR_MSIAddress addr;
|
|
|
|
uint16_t index;
|
2016-07-21 18:42:42 +03:00
|
|
|
VTDIrq irq = {};
|
2016-07-14 08:56:22 +03:00
|
|
|
|
|
|
|
assert(origin && translated);
|
|
|
|
|
|
|
|
if (!iommu || !iommu->intr_enabled) {
|
|
|
|
goto do_not_translate;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (origin->address & VTD_MSI_ADDR_HI_MASK) {
|
|
|
|
VTD_DPRINTF(GENERAL, "error: MSI addr high 32 bits nonzero"
|
|
|
|
" during interrupt remapping: 0x%"PRIx32,
|
|
|
|
(uint32_t)((origin->address & VTD_MSI_ADDR_HI_MASK) >> \
|
|
|
|
VTD_MSI_ADDR_HI_SHIFT));
|
|
|
|
return -VTD_FR_IR_REQ_RSVD;
|
|
|
|
}
|
|
|
|
|
|
|
|
addr.data = origin->address & VTD_MSI_ADDR_LO_MASK;
|
2016-10-31 10:34:38 +03:00
|
|
|
if (addr.addr.__head != 0xfee) {
|
2016-07-14 08:56:22 +03:00
|
|
|
VTD_DPRINTF(GENERAL, "error: MSI addr low 32 bits invalid: "
|
|
|
|
"0x%"PRIx32, addr.data);
|
|
|
|
return -VTD_FR_IR_REQ_RSVD;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* This is compatible mode. */
|
2016-07-21 18:54:10 +03:00
|
|
|
if (addr.addr.int_mode != VTD_IR_INT_FORMAT_REMAP) {
|
2016-07-14 08:56:22 +03:00
|
|
|
goto do_not_translate;
|
|
|
|
}
|
|
|
|
|
2016-07-21 18:54:10 +03:00
|
|
|
index = addr.addr.index_h << 15 | le16_to_cpu(addr.addr.index_l);
|
2016-07-14 08:56:22 +03:00
|
|
|
|
|
|
|
#define VTD_IR_MSI_DATA_SUBHANDLE (0x0000ffff)
|
|
|
|
#define VTD_IR_MSI_DATA_RESERVED (0xffff0000)
|
|
|
|
|
2016-07-21 18:54:10 +03:00
|
|
|
if (addr.addr.sub_valid) {
|
2016-07-14 08:56:22 +03:00
|
|
|
/* See VT-d spec 5.1.2.2 and 5.1.3 on subhandle */
|
|
|
|
index += origin->data & VTD_IR_MSI_DATA_SUBHANDLE;
|
|
|
|
}
|
|
|
|
|
2016-07-14 08:56:29 +03:00
|
|
|
ret = vtd_remap_irq_get(iommu, index, &irq, sid);
|
2016-07-14 08:56:22 +03:00
|
|
|
if (ret) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-07-21 18:54:10 +03:00
|
|
|
if (addr.addr.sub_valid) {
|
2016-07-14 08:56:22 +03:00
|
|
|
VTD_DPRINTF(IR, "received MSI interrupt");
|
|
|
|
if (origin->data & VTD_IR_MSI_DATA_RESERVED) {
|
|
|
|
VTD_DPRINTF(GENERAL, "error: MSI data bits non-zero for "
|
|
|
|
"interrupt remappable entry: 0x%"PRIx32,
|
|
|
|
origin->data);
|
|
|
|
return -VTD_FR_IR_REQ_RSVD;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
uint8_t vector = origin->data & 0xff;
|
2016-09-21 19:12:17 +03:00
|
|
|
uint8_t trigger_mode = (origin->data >> MSI_DATA_TRIGGER_SHIFT) & 0x1;
|
|
|
|
|
2016-07-14 08:56:22 +03:00
|
|
|
VTD_DPRINTF(IR, "received IOAPIC interrupt");
|
|
|
|
/* IOAPIC entry vector should be aligned with IRTE vector
|
|
|
|
* (see vt-d spec 5.1.5.1). */
|
|
|
|
if (vector != irq.vector) {
|
|
|
|
VTD_DPRINTF(GENERAL, "IOAPIC vector inconsistent: "
|
|
|
|
"entry: %d, IRTE: %d, index: %d",
|
|
|
|
vector, irq.vector, index);
|
|
|
|
}
|
2016-09-21 19:12:17 +03:00
|
|
|
|
|
|
|
/* The Trigger Mode field must match the Trigger Mode in the IRTE.
|
|
|
|
* (see vt-d spec 5.1.5.1). */
|
|
|
|
if (trigger_mode != irq.trigger_mode) {
|
|
|
|
VTD_DPRINTF(GENERAL, "IOAPIC trigger mode inconsistent: "
|
|
|
|
"entry: %u, IRTE: %u, index: %d",
|
|
|
|
trigger_mode, irq.trigger_mode, index);
|
|
|
|
}
|
|
|
|
|
2016-07-14 08:56:22 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We'd better keep the last two bits, assuming that guest OS
|
|
|
|
* might modify it. Keep it does not hurt after all.
|
|
|
|
*/
|
2016-07-21 18:54:10 +03:00
|
|
|
irq.msi_addr_last_bits = addr.addr.__not_care;
|
2016-07-14 08:56:22 +03:00
|
|
|
|
|
|
|
/* Translate VTDIrq to MSI message */
|
|
|
|
vtd_generate_msi_message(&irq, translated);
|
|
|
|
|
|
|
|
VTD_DPRINTF(IR, "mapping MSI 0x%"PRIx64":0x%"PRIx32 " -> "
|
|
|
|
"0x%"PRIx64":0x%"PRIx32, origin->address, origin->data,
|
|
|
|
translated->address, translated->data);
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
do_not_translate:
|
|
|
|
memcpy(translated, origin, sizeof(*origin));
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-07-14 08:56:25 +03:00
|
|
|
static int vtd_int_remap(X86IOMMUState *iommu, MSIMessage *src,
|
|
|
|
MSIMessage *dst, uint16_t sid)
|
|
|
|
{
|
2016-07-14 08:56:29 +03:00
|
|
|
return vtd_interrupt_remap_msi(INTEL_IOMMU_DEVICE(iommu),
|
|
|
|
src, dst, sid);
|
2016-07-14 08:56:25 +03:00
|
|
|
}
|
|
|
|
|
2016-07-14 08:56:22 +03:00
|
|
|
static MemTxResult vtd_mem_ir_read(void *opaque, hwaddr addr,
|
|
|
|
uint64_t *data, unsigned size,
|
|
|
|
MemTxAttrs attrs)
|
|
|
|
{
|
|
|
|
return MEMTX_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
static MemTxResult vtd_mem_ir_write(void *opaque, hwaddr addr,
|
|
|
|
uint64_t value, unsigned size,
|
|
|
|
MemTxAttrs attrs)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
2016-07-21 18:42:42 +03:00
|
|
|
MSIMessage from = {}, to = {};
|
2016-07-14 08:56:29 +03:00
|
|
|
uint16_t sid = X86_IOMMU_SID_INVALID;
|
2016-07-14 08:56:22 +03:00
|
|
|
|
|
|
|
from.address = (uint64_t) addr + VTD_INTERRUPT_ADDR_FIRST;
|
|
|
|
from.data = (uint32_t) value;
|
|
|
|
|
2016-07-14 08:56:29 +03:00
|
|
|
if (!attrs.unspecified) {
|
|
|
|
/* We have explicit Source ID */
|
|
|
|
sid = attrs.requester_id;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = vtd_interrupt_remap_msi(opaque, &from, &to, sid);
|
2016-07-14 08:56:22 +03:00
|
|
|
if (ret) {
|
|
|
|
/* TODO: report error */
|
|
|
|
VTD_DPRINTF(GENERAL, "int remap fail for addr 0x%"PRIx64
|
|
|
|
" data 0x%"PRIx32, from.address, from.data);
|
|
|
|
/* Drop this interrupt */
|
|
|
|
return MEMTX_ERROR;
|
|
|
|
}
|
|
|
|
|
|
|
|
VTD_DPRINTF(IR, "delivering MSI 0x%"PRIx64":0x%"PRIx32
|
|
|
|
" for device sid 0x%04x",
|
|
|
|
to.address, to.data, sid);
|
|
|
|
|
2016-10-10 18:28:44 +03:00
|
|
|
apic_get_class()->send_msi(&to);
|
2016-07-14 08:56:22 +03:00
|
|
|
|
|
|
|
return MEMTX_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const MemoryRegionOps vtd_mem_ir_ops = {
|
|
|
|
.read_with_attrs = vtd_mem_ir_read,
|
|
|
|
.write_with_attrs = vtd_mem_ir_write,
|
|
|
|
.endianness = DEVICE_LITTLE_ENDIAN,
|
|
|
|
.impl = {
|
|
|
|
.min_access_size = 4,
|
|
|
|
.max_access_size = 4,
|
|
|
|
},
|
|
|
|
.valid = {
|
|
|
|
.min_access_size = 4,
|
|
|
|
.max_access_size = 4,
|
|
|
|
},
|
|
|
|
};
|
2015-10-04 16:48:50 +03:00
|
|
|
|
|
|
|
VTDAddressSpace *vtd_find_add_as(IntelIOMMUState *s, PCIBus *bus, int devfn)
|
|
|
|
{
|
|
|
|
uintptr_t key = (uintptr_t)bus;
|
|
|
|
VTDBus *vtd_bus = g_hash_table_lookup(s->vtd_as_by_busptr, &key);
|
|
|
|
VTDAddressSpace *vtd_dev_as;
|
2016-12-30 13:09:11 +03:00
|
|
|
char name[128];
|
2015-10-04 16:48:50 +03:00
|
|
|
|
|
|
|
if (!vtd_bus) {
|
2016-12-30 13:09:12 +03:00
|
|
|
uintptr_t *new_key = g_malloc(sizeof(*new_key));
|
|
|
|
*new_key = (uintptr_t)bus;
|
2015-10-04 16:48:50 +03:00
|
|
|
/* No corresponding free() */
|
2016-07-14 08:56:11 +03:00
|
|
|
vtd_bus = g_malloc0(sizeof(VTDBus) + sizeof(VTDAddressSpace *) * \
|
|
|
|
X86_IOMMU_PCI_DEVFN_MAX);
|
2015-10-04 16:48:50 +03:00
|
|
|
vtd_bus->bus = bus;
|
2016-12-30 13:09:12 +03:00
|
|
|
g_hash_table_insert(s->vtd_as_by_busptr, new_key, vtd_bus);
|
2015-10-04 16:48:50 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
vtd_dev_as = vtd_bus->dev_as[devfn];
|
|
|
|
|
|
|
|
if (!vtd_dev_as) {
|
2016-12-30 13:09:11 +03:00
|
|
|
snprintf(name, sizeof(name), "intel_iommu_devfn_%d", devfn);
|
2015-10-04 16:48:50 +03:00
|
|
|
vtd_bus->dev_as[devfn] = vtd_dev_as = g_malloc0(sizeof(VTDAddressSpace));
|
|
|
|
|
|
|
|
vtd_dev_as->bus = bus;
|
|
|
|
vtd_dev_as->devfn = (uint8_t)devfn;
|
|
|
|
vtd_dev_as->iommu_state = s;
|
|
|
|
vtd_dev_as->context_cache_entry.context_cache_gen = 0;
|
2017-04-07 13:59:14 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Memory region relationships looks like (Address range shows
|
|
|
|
* only lower 32 bits to make it short in length...):
|
|
|
|
*
|
|
|
|
* |-----------------+-------------------+----------|
|
|
|
|
* | Name | Address range | Priority |
|
|
|
|
* |-----------------+-------------------+----------+
|
|
|
|
* | vtd_root | 00000000-ffffffff | 0 |
|
|
|
|
* | intel_iommu | 00000000-ffffffff | 1 |
|
|
|
|
* | vtd_sys_alias | 00000000-ffffffff | 1 |
|
|
|
|
* | intel_iommu_ir | fee00000-feefffff | 64 |
|
|
|
|
* |-----------------+-------------------+----------|
|
|
|
|
*
|
|
|
|
* We enable/disable DMAR by switching enablement for
|
|
|
|
* vtd_sys_alias and intel_iommu regions. IR region is always
|
|
|
|
* enabled.
|
|
|
|
*/
|
2015-10-04 16:48:50 +03:00
|
|
|
memory_region_init_iommu(&vtd_dev_as->iommu, OBJECT(s),
|
2017-04-07 13:59:14 +03:00
|
|
|
&s->iommu_ops, "intel_iommu_dmar",
|
|
|
|
UINT64_MAX);
|
|
|
|
memory_region_init_alias(&vtd_dev_as->sys_alias, OBJECT(s),
|
|
|
|
"vtd_sys_alias", get_system_memory(),
|
|
|
|
0, memory_region_size(get_system_memory()));
|
2016-07-14 08:56:22 +03:00
|
|
|
memory_region_init_io(&vtd_dev_as->iommu_ir, OBJECT(s),
|
|
|
|
&vtd_mem_ir_ops, s, "intel_iommu_ir",
|
|
|
|
VTD_INTERRUPT_ADDR_SIZE);
|
2017-04-07 13:59:14 +03:00
|
|
|
memory_region_init(&vtd_dev_as->root, OBJECT(s),
|
|
|
|
"vtd_root", UINT64_MAX);
|
|
|
|
memory_region_add_subregion_overlap(&vtd_dev_as->root,
|
|
|
|
VTD_INTERRUPT_ADDR_FIRST,
|
|
|
|
&vtd_dev_as->iommu_ir, 64);
|
|
|
|
address_space_init(&vtd_dev_as->as, &vtd_dev_as->root, name);
|
|
|
|
memory_region_add_subregion_overlap(&vtd_dev_as->root, 0,
|
|
|
|
&vtd_dev_as->sys_alias, 1);
|
|
|
|
memory_region_add_subregion_overlap(&vtd_dev_as->root, 0,
|
|
|
|
&vtd_dev_as->iommu, 1);
|
|
|
|
vtd_switch_address_space(vtd_dev_as);
|
2015-10-04 16:48:50 +03:00
|
|
|
}
|
|
|
|
return vtd_dev_as;
|
|
|
|
}
|
|
|
|
|
2017-04-07 13:59:15 +03:00
|
|
|
/* Unmap the whole range in the notifier's scope. */
|
|
|
|
static void vtd_address_space_unmap(VTDAddressSpace *as, IOMMUNotifier *n)
|
|
|
|
{
|
|
|
|
IOMMUTLBEntry entry;
|
|
|
|
hwaddr size;
|
|
|
|
hwaddr start = n->start;
|
|
|
|
hwaddr end = n->end;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Note: all the codes in this function has a assumption that IOVA
|
|
|
|
* bits are no more than VTD_MGAW bits (which is restricted by
|
|
|
|
* VT-d spec), otherwise we need to consider overflow of 64 bits.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (end > VTD_ADDRESS_SIZE) {
|
|
|
|
/*
|
|
|
|
* Don't need to unmap regions that is bigger than the whole
|
|
|
|
* VT-d supported address space size
|
|
|
|
*/
|
|
|
|
end = VTD_ADDRESS_SIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(start <= end);
|
|
|
|
size = end - start;
|
|
|
|
|
|
|
|
if (ctpop64(size) != 1) {
|
|
|
|
/*
|
|
|
|
* This size cannot format a correct mask. Let's enlarge it to
|
|
|
|
* suite the minimum available mask.
|
|
|
|
*/
|
|
|
|
int n = 64 - clz64(size);
|
|
|
|
if (n > VTD_MGAW) {
|
|
|
|
/* should not happen, but in case it happens, limit it */
|
|
|
|
n = VTD_MGAW;
|
|
|
|
}
|
|
|
|
size = 1ULL << n;
|
|
|
|
}
|
|
|
|
|
|
|
|
entry.target_as = &address_space_memory;
|
|
|
|
/* Adjust iova for the size */
|
|
|
|
entry.iova = n->start & ~(size - 1);
|
|
|
|
/* This field is meaningless for unmap */
|
|
|
|
entry.translated_addr = 0;
|
|
|
|
entry.perm = IOMMU_NONE;
|
|
|
|
entry.addr_mask = size - 1;
|
|
|
|
|
|
|
|
trace_vtd_as_unmap_whole(pci_bus_num(as->bus),
|
|
|
|
VTD_PCI_SLOT(as->devfn),
|
|
|
|
VTD_PCI_FUNC(as->devfn),
|
|
|
|
entry.iova, size);
|
|
|
|
|
|
|
|
memory_region_notify_one(n, &entry);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void vtd_address_space_unmap_all(IntelIOMMUState *s)
|
|
|
|
{
|
|
|
|
IntelIOMMUNotifierNode *node;
|
|
|
|
VTDAddressSpace *vtd_as;
|
|
|
|
IOMMUNotifier *n;
|
|
|
|
|
|
|
|
QLIST_FOREACH(node, &s->notifiers_list, next) {
|
|
|
|
vtd_as = node->vtd_as;
|
|
|
|
IOMMU_NOTIFIER_FOREACH(n, &vtd_as->iommu) {
|
|
|
|
vtd_address_space_unmap(vtd_as, n);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-07 13:59:13 +03:00
|
|
|
static int vtd_replay_hook(IOMMUTLBEntry *entry, void *private)
|
|
|
|
{
|
|
|
|
memory_region_notify_one((IOMMUNotifier *)private, entry);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void vtd_iommu_replay(MemoryRegion *mr, IOMMUNotifier *n)
|
|
|
|
{
|
|
|
|
VTDAddressSpace *vtd_as = container_of(mr, VTDAddressSpace, iommu);
|
|
|
|
IntelIOMMUState *s = vtd_as->iommu_state;
|
|
|
|
uint8_t bus_n = pci_bus_num(vtd_as->bus);
|
|
|
|
VTDContextEntry ce;
|
|
|
|
|
2017-04-07 13:59:15 +03:00
|
|
|
/*
|
|
|
|
* The replay can be triggered by either a invalidation or a newly
|
|
|
|
* created entry. No matter what, we release existing mappings
|
|
|
|
* (it means flushing caches for UNMAP-only registers).
|
|
|
|
*/
|
|
|
|
vtd_address_space_unmap(vtd_as, n);
|
|
|
|
|
2017-04-07 13:59:13 +03:00
|
|
|
if (vtd_dev_to_context_entry(s, bus_n, vtd_as->devfn, &ce) == 0) {
|
|
|
|
trace_vtd_replay_ce_valid(bus_n, PCI_SLOT(vtd_as->devfn),
|
|
|
|
PCI_FUNC(vtd_as->devfn),
|
|
|
|
VTD_CONTEXT_ENTRY_DID(ce.hi),
|
|
|
|
ce.hi, ce.lo);
|
2017-04-07 13:59:15 +03:00
|
|
|
vtd_page_walk(&ce, 0, ~0ULL, vtd_replay_hook, (void *)n, false);
|
2017-04-07 13:59:13 +03:00
|
|
|
} else {
|
|
|
|
trace_vtd_replay_ce_invalid(bus_n, PCI_SLOT(vtd_as->devfn),
|
|
|
|
PCI_FUNC(vtd_as->devfn));
|
|
|
|
}
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2014-08-16 09:55:38 +04:00
|
|
|
/* Do the initialization. It will also be called when reset, so pay
|
|
|
|
* attention when adding new initialization stuff.
|
|
|
|
*/
|
|
|
|
static void vtd_init(IntelIOMMUState *s)
|
|
|
|
{
|
2016-07-14 08:56:16 +03:00
|
|
|
X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(s);
|
|
|
|
|
2014-08-16 09:55:38 +04:00
|
|
|
memset(s->csr, 0, DMAR_REG_SIZE);
|
|
|
|
memset(s->wmask, 0, DMAR_REG_SIZE);
|
|
|
|
memset(s->w1cmask, 0, DMAR_REG_SIZE);
|
|
|
|
memset(s->womask, 0, DMAR_REG_SIZE);
|
|
|
|
|
|
|
|
s->iommu_ops.translate = vtd_iommu_translate;
|
2016-09-23 08:02:27 +03:00
|
|
|
s->iommu_ops.notify_flag_changed = vtd_iommu_notify_flag_changed;
|
2017-04-07 13:59:13 +03:00
|
|
|
s->iommu_ops.replay = vtd_iommu_replay;
|
2014-08-16 09:55:38 +04:00
|
|
|
s->root = 0;
|
|
|
|
s->root_extended = false;
|
|
|
|
s->dmar_enabled = false;
|
|
|
|
s->iq_head = 0;
|
|
|
|
s->iq_tail = 0;
|
|
|
|
s->iq = 0;
|
|
|
|
s->iq_size = 0;
|
|
|
|
s->qi_enabled = false;
|
|
|
|
s->iq_last_desc_type = VTD_INV_DESC_NONE;
|
|
|
|
s->next_frcd_reg = 0;
|
|
|
|
s->cap = VTD_CAP_FRO | VTD_CAP_NFR | VTD_CAP_ND | VTD_CAP_MGAW |
|
2016-01-14 08:47:24 +03:00
|
|
|
VTD_CAP_SAGAW | VTD_CAP_MAMV | VTD_CAP_PSI | VTD_CAP_SLLPS;
|
2014-08-16 09:55:42 +04:00
|
|
|
s->ecap = VTD_ECAP_QI | VTD_ECAP_IRO;
|
2014-08-16 09:55:38 +04:00
|
|
|
|
2016-07-14 08:56:16 +03:00
|
|
|
if (x86_iommu->intr_supported) {
|
2016-10-10 18:28:46 +03:00
|
|
|
s->ecap |= VTD_ECAP_IR | VTD_ECAP_MHMV;
|
|
|
|
if (s->intr_eim == ON_OFF_AUTO_ON) {
|
|
|
|
s->ecap |= VTD_ECAP_EIM;
|
|
|
|
}
|
|
|
|
assert(s->intr_eim != ON_OFF_AUTO_AUTO);
|
2016-07-14 08:56:16 +03:00
|
|
|
}
|
|
|
|
|
2016-12-30 13:09:14 +03:00
|
|
|
if (x86_iommu->dt_supported) {
|
|
|
|
s->ecap |= VTD_ECAP_DT;
|
|
|
|
}
|
|
|
|
|
2017-02-07 11:28:06 +03:00
|
|
|
if (s->caching_mode) {
|
|
|
|
s->cap |= VTD_CAP_CM;
|
|
|
|
}
|
|
|
|
|
2014-08-16 09:55:43 +04:00
|
|
|
vtd_reset_context_cache(s);
|
2014-08-16 09:55:44 +04:00
|
|
|
vtd_reset_iotlb(s);
|
2014-08-16 09:55:43 +04:00
|
|
|
|
2014-08-16 09:55:38 +04:00
|
|
|
/* Define registers with default values and bit semantics */
|
|
|
|
vtd_define_long(s, DMAR_VER_REG, 0x10UL, 0, 0);
|
|
|
|
vtd_define_quad(s, DMAR_CAP_REG, s->cap, 0, 0);
|
|
|
|
vtd_define_quad(s, DMAR_ECAP_REG, s->ecap, 0, 0);
|
|
|
|
vtd_define_long(s, DMAR_GCMD_REG, 0, 0xff800000UL, 0);
|
|
|
|
vtd_define_long_wo(s, DMAR_GCMD_REG, 0xff800000UL);
|
|
|
|
vtd_define_long(s, DMAR_GSTS_REG, 0, 0, 0);
|
|
|
|
vtd_define_quad(s, DMAR_RTADDR_REG, 0, 0xfffffffffffff000ULL, 0);
|
|
|
|
vtd_define_quad(s, DMAR_CCMD_REG, 0, 0xe0000003ffffffffULL, 0);
|
|
|
|
vtd_define_quad_wo(s, DMAR_CCMD_REG, 0x3ffff0000ULL);
|
|
|
|
|
|
|
|
/* Advanced Fault Logging not supported */
|
|
|
|
vtd_define_long(s, DMAR_FSTS_REG, 0, 0, 0x11UL);
|
|
|
|
vtd_define_long(s, DMAR_FECTL_REG, 0x80000000UL, 0x80000000UL, 0);
|
|
|
|
vtd_define_long(s, DMAR_FEDATA_REG, 0, 0x0000ffffUL, 0);
|
|
|
|
vtd_define_long(s, DMAR_FEADDR_REG, 0, 0xfffffffcUL, 0);
|
|
|
|
|
|
|
|
/* Treated as RsvdZ when EIM in ECAP_REG is not supported
|
|
|
|
* vtd_define_long(s, DMAR_FEUADDR_REG, 0, 0xffffffffUL, 0);
|
|
|
|
*/
|
|
|
|
vtd_define_long(s, DMAR_FEUADDR_REG, 0, 0, 0);
|
|
|
|
|
|
|
|
/* Treated as RO for implementations that PLMR and PHMR fields reported
|
|
|
|
* as Clear in the CAP_REG.
|
|
|
|
* vtd_define_long(s, DMAR_PMEN_REG, 0, 0x80000000UL, 0);
|
|
|
|
*/
|
|
|
|
vtd_define_long(s, DMAR_PMEN_REG, 0, 0, 0);
|
|
|
|
|
2014-08-16 09:55:42 +04:00
|
|
|
vtd_define_quad(s, DMAR_IQH_REG, 0, 0, 0);
|
|
|
|
vtd_define_quad(s, DMAR_IQT_REG, 0, 0x7fff0ULL, 0);
|
|
|
|
vtd_define_quad(s, DMAR_IQA_REG, 0, 0xfffffffffffff007ULL, 0);
|
|
|
|
vtd_define_long(s, DMAR_ICS_REG, 0, 0, 0x1UL);
|
|
|
|
vtd_define_long(s, DMAR_IECTL_REG, 0x80000000UL, 0x80000000UL, 0);
|
|
|
|
vtd_define_long(s, DMAR_IEDATA_REG, 0, 0xffffffffUL, 0);
|
|
|
|
vtd_define_long(s, DMAR_IEADDR_REG, 0, 0xfffffffcUL, 0);
|
|
|
|
/* Treadted as RsvdZ when EIM in ECAP_REG is not supported */
|
|
|
|
vtd_define_long(s, DMAR_IEUADDR_REG, 0, 0, 0);
|
|
|
|
|
2014-08-16 09:55:38 +04:00
|
|
|
/* IOTLB registers */
|
|
|
|
vtd_define_quad(s, DMAR_IOTLB_REG, 0, 0Xb003ffff00000000ULL, 0);
|
|
|
|
vtd_define_quad(s, DMAR_IVA_REG, 0, 0xfffffffffffff07fULL, 0);
|
|
|
|
vtd_define_quad_wo(s, DMAR_IVA_REG, 0xfffffffffffff07fULL);
|
|
|
|
|
|
|
|
/* Fault Recording Registers, 128-bit */
|
|
|
|
vtd_define_quad(s, DMAR_FRCD_REG_0_0, 0, 0, 0);
|
|
|
|
vtd_define_quad(s, DMAR_FRCD_REG_0_2, 0, 0, 0x8000000000000000ULL);
|
2016-07-14 08:56:18 +03:00
|
|
|
|
|
|
|
/*
|
2016-07-14 08:56:28 +03:00
|
|
|
* Interrupt remapping registers.
|
2016-07-14 08:56:18 +03:00
|
|
|
*/
|
2016-07-14 08:56:28 +03:00
|
|
|
vtd_define_quad(s, DMAR_IRTA_REG, 0, 0xfffffffffffff80fULL, 0);
|
2014-08-16 09:55:38 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Should not reset address_spaces when reset because devices will still use
|
|
|
|
* the address space they got at first (won't ask the bus again).
|
|
|
|
*/
|
|
|
|
static void vtd_reset(DeviceState *dev)
|
|
|
|
{
|
|
|
|
IntelIOMMUState *s = INTEL_IOMMU_DEVICE(dev);
|
|
|
|
|
|
|
|
VTD_DPRINTF(GENERAL, "");
|
|
|
|
vtd_init(s);
|
2017-04-07 13:59:15 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* When device reset, throw away all mappings and external caches
|
|
|
|
*/
|
|
|
|
vtd_address_space_unmap_all(s);
|
2014-08-16 09:55:38 +04:00
|
|
|
}
|
|
|
|
|
2016-06-27 18:38:34 +03:00
|
|
|
static AddressSpace *vtd_host_dma_iommu(PCIBus *bus, void *opaque, int devfn)
|
|
|
|
{
|
|
|
|
IntelIOMMUState *s = opaque;
|
|
|
|
VTDAddressSpace *vtd_as;
|
|
|
|
|
2016-10-31 10:34:40 +03:00
|
|
|
assert(0 <= devfn && devfn < X86_IOMMU_PCI_DEVFN_MAX);
|
2016-06-27 18:38:34 +03:00
|
|
|
|
|
|
|
vtd_as = vtd_find_add_as(s, bus, devfn);
|
|
|
|
return &vtd_as->as;
|
|
|
|
}
|
|
|
|
|
2016-10-10 18:28:46 +03:00
|
|
|
static bool vtd_decide_config(IntelIOMMUState *s, Error **errp)
|
2016-10-10 18:28:45 +03:00
|
|
|
{
|
2016-10-10 18:28:46 +03:00
|
|
|
X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(s);
|
|
|
|
|
2016-10-10 18:28:45 +03:00
|
|
|
/* Currently Intel IOMMU IR only support "kernel-irqchip={off|split}" */
|
|
|
|
if (x86_iommu->intr_supported && kvm_irqchip_in_kernel() &&
|
|
|
|
!kvm_irqchip_is_split()) {
|
|
|
|
error_setg(errp, "Intel Interrupt Remapping cannot work with "
|
|
|
|
"kernel-irqchip=on, please use 'split|off'.");
|
|
|
|
return false;
|
|
|
|
}
|
2016-10-10 18:28:46 +03:00
|
|
|
if (s->intr_eim == ON_OFF_AUTO_ON && !x86_iommu->intr_supported) {
|
|
|
|
error_setg(errp, "eim=on cannot be selected without intremap=on");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (s->intr_eim == ON_OFF_AUTO_AUTO) {
|
intel_iommu: reject broken EIM
Cluster x2APIC cannot work without KVM's x2apic API when the maximal
APIC ID is greater than 8 and only KVM's LAPIC can support x2APIC, so we
forbid other APICs and also the old KVM case with less than 9, to
simplify the code.
There is no point in enabling EIM in forbidden APICs, so we keep it
enabled only for the KVM APIC; unconditionally, because making the
option depend on KVM version would be a maintanance burden.
Old QEMUs would enable eim whenever intremap was on, which would trick
guests into thinking that they can enable cluster x2APIC even if any
interrupt destination would get clamped to 8 bits.
Depending on your configuration, QEMU could notice that the destination
LAPIC is not present and report it with a very non-obvious:
KVM: injection failed, MSI lost (Operation not permitted)
Or the guest could say something about unexpected interrupts, because
clamping leads to aliasing so interrupts were being delivered to
incorrect VCPUs.
KVM_X2APIC_API is the feature that allows us to enable EIM for KVM.
QEMU 2.7 allowed EIM whenever interrupt remapping was enabled. In order
to keep backward compatibility, we again allow guests to misbehave in
non-obvious ways, and make it the default for old machine types.
A user can enable the buggy mode it with "x-buggy-eim=on".
Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
Reviewed-by: Eduardo Habkost <ehabkost@redhat.com>
Reviewed-by: Peter Xu <peterx@redhat.com>
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
2016-10-10 18:28:47 +03:00
|
|
|
s->intr_eim = (kvm_irqchip_in_kernel() || s->buggy_eim)
|
|
|
|
&& x86_iommu->intr_supported ?
|
2016-10-10 18:28:46 +03:00
|
|
|
ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF;
|
|
|
|
}
|
intel_iommu: reject broken EIM
Cluster x2APIC cannot work without KVM's x2apic API when the maximal
APIC ID is greater than 8 and only KVM's LAPIC can support x2APIC, so we
forbid other APICs and also the old KVM case with less than 9, to
simplify the code.
There is no point in enabling EIM in forbidden APICs, so we keep it
enabled only for the KVM APIC; unconditionally, because making the
option depend on KVM version would be a maintanance burden.
Old QEMUs would enable eim whenever intremap was on, which would trick
guests into thinking that they can enable cluster x2APIC even if any
interrupt destination would get clamped to 8 bits.
Depending on your configuration, QEMU could notice that the destination
LAPIC is not present and report it with a very non-obvious:
KVM: injection failed, MSI lost (Operation not permitted)
Or the guest could say something about unexpected interrupts, because
clamping leads to aliasing so interrupts were being delivered to
incorrect VCPUs.
KVM_X2APIC_API is the feature that allows us to enable EIM for KVM.
QEMU 2.7 allowed EIM whenever interrupt remapping was enabled. In order
to keep backward compatibility, we again allow guests to misbehave in
non-obvious ways, and make it the default for old machine types.
A user can enable the buggy mode it with "x-buggy-eim=on".
Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
Reviewed-by: Eduardo Habkost <ehabkost@redhat.com>
Reviewed-by: Peter Xu <peterx@redhat.com>
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
2016-10-10 18:28:47 +03:00
|
|
|
if (s->intr_eim == ON_OFF_AUTO_ON && !s->buggy_eim) {
|
|
|
|
if (!kvm_irqchip_in_kernel()) {
|
|
|
|
error_setg(errp, "eim=on requires accel=kvm,kernel-irqchip=split");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (!kvm_enable_x2apic()) {
|
|
|
|
error_setg(errp, "eim=on requires support on the KVM side"
|
|
|
|
"(X2APIC_API, first shipped in v4.7)");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
2016-10-10 18:28:46 +03:00
|
|
|
|
2016-10-10 18:28:45 +03:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2014-08-16 09:55:38 +04:00
|
|
|
static void vtd_realize(DeviceState *dev, Error **errp)
|
|
|
|
{
|
2016-07-14 08:56:23 +03:00
|
|
|
PCMachineState *pcms = PC_MACHINE(qdev_get_machine());
|
|
|
|
PCIBus *bus = pcms->bus;
|
2014-08-16 09:55:38 +04:00
|
|
|
IntelIOMMUState *s = INTEL_IOMMU_DEVICE(dev);
|
2016-07-14 08:56:36 +03:00
|
|
|
X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(dev);
|
2014-08-16 09:55:38 +04:00
|
|
|
|
|
|
|
VTD_DPRINTF(GENERAL, "");
|
2016-09-20 18:42:34 +03:00
|
|
|
x86_iommu->type = TYPE_INTEL;
|
2016-10-10 18:28:45 +03:00
|
|
|
|
2016-10-10 18:28:46 +03:00
|
|
|
if (!vtd_decide_config(s, errp)) {
|
2016-10-10 18:28:45 +03:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2017-04-07 13:59:15 +03:00
|
|
|
QLIST_INIT(&s->notifiers_list);
|
2015-10-04 16:48:50 +03:00
|
|
|
memset(s->vtd_as_by_bus_num, 0, sizeof(s->vtd_as_by_bus_num));
|
2014-08-16 09:55:38 +04:00
|
|
|
memory_region_init_io(&s->csrmem, OBJECT(s), &vtd_mem_ops, s,
|
|
|
|
"intel_iommu", DMAR_REG_SIZE);
|
|
|
|
sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->csrmem);
|
2014-08-16 09:55:44 +04:00
|
|
|
/* No corresponding destroy */
|
|
|
|
s->iotlb = g_hash_table_new_full(vtd_uint64_hash, vtd_uint64_equal,
|
|
|
|
g_free, g_free);
|
2015-10-04 16:48:50 +03:00
|
|
|
s->vtd_as_by_busptr = g_hash_table_new_full(vtd_uint64_hash, vtd_uint64_equal,
|
|
|
|
g_free, g_free);
|
2014-08-16 09:55:38 +04:00
|
|
|
vtd_init(s);
|
2016-06-27 18:38:34 +03:00
|
|
|
sysbus_mmio_map(SYS_BUS_DEVICE(s), 0, Q35_HOST_BRIDGE_IOMMU_ADDR);
|
|
|
|
pci_setup_iommu(bus, vtd_host_dma_iommu, dev);
|
2016-07-14 08:56:23 +03:00
|
|
|
/* Pseudo address space under root PCI bus. */
|
|
|
|
pcms->ioapic_as = vtd_host_dma_iommu(bus, s, Q35_PSEUDO_DEVFN_IOAPIC);
|
2014-08-16 09:55:38 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static void vtd_class_init(ObjectClass *klass, void *data)
|
|
|
|
{
|
|
|
|
DeviceClass *dc = DEVICE_CLASS(klass);
|
2016-07-14 08:56:10 +03:00
|
|
|
X86IOMMUClass *x86_class = X86_IOMMU_CLASS(klass);
|
2014-08-16 09:55:38 +04:00
|
|
|
|
|
|
|
dc->reset = vtd_reset;
|
|
|
|
dc->vmsd = &vtd_vmstate;
|
|
|
|
dc->props = vtd_properties;
|
2016-06-27 18:38:34 +03:00
|
|
|
dc->hotpluggable = false;
|
2016-07-14 08:56:10 +03:00
|
|
|
x86_class->realize = vtd_realize;
|
2016-07-14 08:56:25 +03:00
|
|
|
x86_class->int_remap = vtd_int_remap;
|
2014-08-16 09:55:38 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static const TypeInfo vtd_info = {
|
|
|
|
.name = TYPE_INTEL_IOMMU_DEVICE,
|
2016-07-14 08:56:10 +03:00
|
|
|
.parent = TYPE_X86_IOMMU_DEVICE,
|
2014-08-16 09:55:38 +04:00
|
|
|
.instance_size = sizeof(IntelIOMMUState),
|
|
|
|
.class_init = vtd_class_init,
|
|
|
|
};
|
|
|
|
|
|
|
|
static void vtd_register_types(void)
|
|
|
|
{
|
|
|
|
VTD_DPRINTF(GENERAL, "");
|
|
|
|
type_register_static(&vtd_info);
|
|
|
|
}
|
|
|
|
|
|
|
|
type_init(vtd_register_types)
|