intel_iommu: Fix unexpected unmaps during global unmap
This is an replacement work of Yan Zhao's patch: https://www.mail-archive.com/qemu-devel@nongnu.org/msg625340.html vtd_address_space_unmap() will do proper page mask alignment to make sure each IOTLB message will have correct masks for notification messages (2^N-1), but sometimes it can be expanded to even supercede the registered range. That could lead to unexpected UNMAP of already mapped regions in some other notifiers. Instead of doing mindless expension of the start address and address mask, we split the range into smaller ones and guarantee that each small range will have correct masks (2^N-1) and at the same time we should also try our best to generate as less IOTLB messages as possible. Reported-by: Yan Zhao <yan.y.zhao@intel.com> Signed-off-by: Peter Xu <peterx@redhat.com> Reviewed-by: Eric Auger <eric.auger@redhat.com> Tested-by: Yan Zhao <yan.y.zhao@intel.com> Message-Id: <20190624091811.30412-3-peterx@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
d6d10793dc
commit
9a4bb8391f
@ -3363,11 +3363,28 @@ VTDAddressSpace *vtd_find_add_as(IntelIOMMUState *s, PCIBus *bus, int devfn)
|
||||
return vtd_dev_as;
|
||||
}
|
||||
|
||||
static uint64_t get_naturally_aligned_size(uint64_t start,
|
||||
uint64_t size, int gaw)
|
||||
{
|
||||
uint64_t max_mask = 1ULL << gaw;
|
||||
uint64_t alignment = start ? start & -start : max_mask;
|
||||
|
||||
alignment = MIN(alignment, max_mask);
|
||||
size = MIN(size, max_mask);
|
||||
|
||||
if (alignment <= size) {
|
||||
/* Increase the alignment of start */
|
||||
return alignment;
|
||||
} else {
|
||||
/* Find the largest page mask from size */
|
||||
return 1ULL << (63 - clz64(size));
|
||||
}
|
||||
}
|
||||
|
||||
/* Unmap the whole range in the notifier's scope. */
|
||||
static void vtd_address_space_unmap(VTDAddressSpace *as, IOMMUNotifier *n)
|
||||
{
|
||||
IOMMUTLBEntry entry;
|
||||
hwaddr size;
|
||||
hwaddr size, remain;
|
||||
hwaddr start = n->start;
|
||||
hwaddr end = n->end;
|
||||
IntelIOMMUState *s = as->iommu_state;
|
||||
@ -3388,39 +3405,37 @@ static void vtd_address_space_unmap(VTDAddressSpace *as, IOMMUNotifier *n)
|
||||
}
|
||||
|
||||
assert(start <= end);
|
||||
size = end - start;
|
||||
size = remain = end - start + 1;
|
||||
|
||||
if (ctpop64(size) != 1) {
|
||||
/*
|
||||
* This size cannot format a correct mask. Let's enlarge it to
|
||||
* suite the minimum available mask.
|
||||
*/
|
||||
int n = 64 - clz64(size);
|
||||
if (n > s->aw_bits) {
|
||||
/* should not happen, but in case it happens, limit it */
|
||||
n = s->aw_bits;
|
||||
}
|
||||
size = 1ULL << n;
|
||||
}
|
||||
while (remain >= VTD_PAGE_SIZE) {
|
||||
IOMMUTLBEntry entry;
|
||||
uint64_t mask = get_naturally_aligned_size(start, remain, s->aw_bits);
|
||||
|
||||
assert(mask);
|
||||
|
||||
entry.iova = start;
|
||||
entry.addr_mask = mask - 1;
|
||||
entry.target_as = &address_space_memory;
|
||||
/* Adjust iova for the size */
|
||||
entry.iova = n->start & ~(size - 1);
|
||||
entry.perm = IOMMU_NONE;
|
||||
/* This field is meaningless for unmap */
|
||||
entry.translated_addr = 0;
|
||||
entry.perm = IOMMU_NONE;
|
||||
entry.addr_mask = size - 1;
|
||||
|
||||
memory_region_notify_one(n, &entry);
|
||||
|
||||
start += mask;
|
||||
remain -= mask;
|
||||
}
|
||||
|
||||
assert(!remain);
|
||||
|
||||
trace_vtd_as_unmap_whole(pci_bus_num(as->bus),
|
||||
VTD_PCI_SLOT(as->devfn),
|
||||
VTD_PCI_FUNC(as->devfn),
|
||||
entry.iova, size);
|
||||
n->start, size);
|
||||
|
||||
map.iova = entry.iova;
|
||||
map.size = entry.addr_mask;
|
||||
map.iova = n->start;
|
||||
map.size = size;
|
||||
iova_tree_remove(as->iova_tree, &map);
|
||||
|
||||
memory_region_notify_one(n, &entry);
|
||||
}
|
||||
|
||||
static void vtd_address_space_unmap_all(IntelIOMMUState *s)
|
||||
|
Loading…
Reference in New Issue
Block a user