intel_iommu: Fix unexpected unmaps during global unmap

This is an replacement work of Yan Zhao's patch:

https://www.mail-archive.com/qemu-devel@nongnu.org/msg625340.html

vtd_address_space_unmap() will do proper page mask alignment to make
sure each IOTLB message will have correct masks for notification
messages (2^N-1), but sometimes it can be expanded to even supercede
the registered range.  That could lead to unexpected UNMAP of already
mapped regions in some other notifiers.

Instead of doing mindless expension of the start address and address
mask, we split the range into smaller ones and guarantee that each
small range will have correct masks (2^N-1) and at the same time we
should also try our best to generate as less IOTLB messages as
possible.

Reported-by: Yan Zhao <yan.y.zhao@intel.com>
Signed-off-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Tested-by: Yan Zhao <yan.y.zhao@intel.com>
Message-Id: <20190624091811.30412-3-peterx@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Peter Xu 2019-06-24 17:18:11 +08:00 committed by Paolo Bonzini
parent d6d10793dc
commit 9a4bb8391f

View File

@ -3363,11 +3363,28 @@ VTDAddressSpace *vtd_find_add_as(IntelIOMMUState *s, PCIBus *bus, int devfn)
return vtd_dev_as; return vtd_dev_as;
} }
static uint64_t get_naturally_aligned_size(uint64_t start,
uint64_t size, int gaw)
{
uint64_t max_mask = 1ULL << gaw;
uint64_t alignment = start ? start & -start : max_mask;
alignment = MIN(alignment, max_mask);
size = MIN(size, max_mask);
if (alignment <= size) {
/* Increase the alignment of start */
return alignment;
} else {
/* Find the largest page mask from size */
return 1ULL << (63 - clz64(size));
}
}
/* Unmap the whole range in the notifier's scope. */ /* Unmap the whole range in the notifier's scope. */
static void vtd_address_space_unmap(VTDAddressSpace *as, IOMMUNotifier *n) static void vtd_address_space_unmap(VTDAddressSpace *as, IOMMUNotifier *n)
{ {
IOMMUTLBEntry entry; hwaddr size, remain;
hwaddr size;
hwaddr start = n->start; hwaddr start = n->start;
hwaddr end = n->end; hwaddr end = n->end;
IntelIOMMUState *s = as->iommu_state; IntelIOMMUState *s = as->iommu_state;
@ -3388,39 +3405,37 @@ static void vtd_address_space_unmap(VTDAddressSpace *as, IOMMUNotifier *n)
} }
assert(start <= end); assert(start <= end);
size = end - start; size = remain = end - start + 1;
if (ctpop64(size) != 1) { while (remain >= VTD_PAGE_SIZE) {
/* IOMMUTLBEntry entry;
* This size cannot format a correct mask. Let's enlarge it to uint64_t mask = get_naturally_aligned_size(start, remain, s->aw_bits);
* suite the minimum available mask.
*/ assert(mask);
int n = 64 - clz64(size);
if (n > s->aw_bits) { entry.iova = start;
/* should not happen, but in case it happens, limit it */ entry.addr_mask = mask - 1;
n = s->aw_bits; entry.target_as = &address_space_memory;
} entry.perm = IOMMU_NONE;
size = 1ULL << n; /* This field is meaningless for unmap */
entry.translated_addr = 0;
memory_region_notify_one(n, &entry);
start += mask;
remain -= mask;
} }
entry.target_as = &address_space_memory; assert(!remain);
/* Adjust iova for the size */
entry.iova = n->start & ~(size - 1);
/* This field is meaningless for unmap */
entry.translated_addr = 0;
entry.perm = IOMMU_NONE;
entry.addr_mask = size - 1;
trace_vtd_as_unmap_whole(pci_bus_num(as->bus), trace_vtd_as_unmap_whole(pci_bus_num(as->bus),
VTD_PCI_SLOT(as->devfn), VTD_PCI_SLOT(as->devfn),
VTD_PCI_FUNC(as->devfn), VTD_PCI_FUNC(as->devfn),
entry.iova, size); n->start, size);
map.iova = entry.iova; map.iova = n->start;
map.size = entry.addr_mask; map.size = size;
iova_tree_remove(as->iova_tree, &map); iova_tree_remove(as->iova_tree, &map);
memory_region_notify_one(n, &entry);
} }
static void vtd_address_space_unmap_all(IntelIOMMUState *s) static void vtd_address_space_unmap_all(IntelIOMMUState *s)