memory: Add reporting of supported page sizes
Every IOMMU has some granularity which MemoryRegionIOMMUOps::translate uses when translating, however this information is not available outside the translate context for various checks. This adds a get_min_page_size callback to MemoryRegionIOMMUOps and a wrapper for it so IOMMU users (such as VFIO) can know the minimum actual page size supported by an IOMMU. As IOMMU MR represents a guest IOMMU, this uses TARGET_PAGE_SIZE as fallback. This removes vfio_container_granularity() and uses new helper in memory_region_iommu_replay() when replaying IOMMU mappings on added IOMMU memory region. Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru> Reviewed-by: David Gibson <david@gibson.dropbear.id.au> Acked-by: Alex Williamson <alex.williamson@redhat.com> [dwg: Removed an unnecessary calculation] Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
This commit is contained in:
parent
f0278900d3
commit
f682e9c244
@ -149,6 +149,13 @@ static void spapr_tce_table_pre_save(void *opaque)
|
||||
tcet->bus_offset, tcet->page_shift);
|
||||
}
|
||||
|
||||
static uint64_t spapr_tce_get_min_page_size(MemoryRegion *iommu)
|
||||
{
|
||||
sPAPRTCETable *tcet = container_of(iommu, sPAPRTCETable, iommu);
|
||||
|
||||
return 1ULL << tcet->page_shift;
|
||||
}
|
||||
|
||||
static int spapr_tce_table_post_load(void *opaque, int version_id)
|
||||
{
|
||||
sPAPRTCETable *tcet = SPAPR_TCE_TABLE(opaque);
|
||||
@ -228,6 +235,7 @@ static const VMStateDescription vmstate_spapr_tce_table = {
|
||||
|
||||
static MemoryRegionIOMMUOps spapr_iommu_ops = {
|
||||
.translate = spapr_tce_translate_iommu,
|
||||
.get_min_page_size = spapr_tce_get_min_page_size,
|
||||
};
|
||||
|
||||
static int spapr_tce_table_realize(DeviceState *dev)
|
||||
|
@ -321,11 +321,6 @@ out:
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static hwaddr vfio_container_granularity(VFIOContainer *container)
|
||||
{
|
||||
return (hwaddr)1 << ctz64(container->iova_pgsizes);
|
||||
}
|
||||
|
||||
static void vfio_listener_region_add(MemoryListener *listener,
|
||||
MemoryRegionSection *section)
|
||||
{
|
||||
@ -392,9 +387,7 @@ static void vfio_listener_region_add(MemoryListener *listener,
|
||||
QLIST_INSERT_HEAD(&container->giommu_list, giommu, giommu_next);
|
||||
|
||||
memory_region_register_iommu_notifier(giommu->iommu, &giommu->n);
|
||||
memory_region_iommu_replay(giommu->iommu, &giommu->n,
|
||||
vfio_container_granularity(container),
|
||||
false);
|
||||
memory_region_iommu_replay(giommu->iommu, &giommu->n, false);
|
||||
|
||||
return;
|
||||
}
|
||||
|
@ -151,6 +151,8 @@ typedef struct MemoryRegionIOMMUOps MemoryRegionIOMMUOps;
|
||||
struct MemoryRegionIOMMUOps {
|
||||
/* Return a TLB entry that contains a given address. */
|
||||
IOMMUTLBEntry (*translate)(MemoryRegion *iommu, hwaddr addr, bool is_write);
|
||||
/* Returns minimum supported page size */
|
||||
uint64_t (*get_min_page_size)(MemoryRegion *iommu);
|
||||
};
|
||||
|
||||
typedef struct CoalescedMemoryRange CoalescedMemoryRange;
|
||||
@ -572,6 +574,16 @@ static inline bool memory_region_is_iommu(MemoryRegion *mr)
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* memory_region_iommu_get_min_page_size: get minimum supported page size
|
||||
* for an iommu
|
||||
*
|
||||
* Returns minimum supported page size for an iommu.
|
||||
*
|
||||
* @mr: the memory region being queried
|
||||
*/
|
||||
uint64_t memory_region_iommu_get_min_page_size(MemoryRegion *mr);
|
||||
|
||||
/**
|
||||
* memory_region_notify_iommu: notify a change in an IOMMU translation entry.
|
||||
*
|
||||
@ -596,16 +608,15 @@ void memory_region_register_iommu_notifier(MemoryRegion *mr, Notifier *n);
|
||||
|
||||
/**
|
||||
* memory_region_iommu_replay: replay existing IOMMU translations to
|
||||
* a notifier
|
||||
* a notifier with the minimum page granularity returned by
|
||||
* mr->iommu_ops->get_page_size().
|
||||
*
|
||||
* @mr: the memory region to observe
|
||||
* @n: the notifier to which to replay iommu mappings
|
||||
* @granularity: Minimum page granularity to replay notifications for
|
||||
* @is_write: Whether to treat the replay as a translate "write"
|
||||
* through the iommu
|
||||
*/
|
||||
void memory_region_iommu_replay(MemoryRegion *mr, Notifier *n,
|
||||
hwaddr granularity, bool is_write);
|
||||
void memory_region_iommu_replay(MemoryRegion *mr, Notifier *n, bool is_write);
|
||||
|
||||
/**
|
||||
* memory_region_unregister_iommu_notifier: unregister a notifier for
|
||||
|
16
memory.c
16
memory.c
@ -1502,12 +1502,22 @@ void memory_region_register_iommu_notifier(MemoryRegion *mr, Notifier *n)
|
||||
notifier_list_add(&mr->iommu_notify, n);
|
||||
}
|
||||
|
||||
void memory_region_iommu_replay(MemoryRegion *mr, Notifier *n,
|
||||
hwaddr granularity, bool is_write)
|
||||
uint64_t memory_region_iommu_get_min_page_size(MemoryRegion *mr)
|
||||
{
|
||||
hwaddr addr;
|
||||
assert(memory_region_is_iommu(mr));
|
||||
if (mr->iommu_ops && mr->iommu_ops->get_min_page_size) {
|
||||
return mr->iommu_ops->get_min_page_size(mr);
|
||||
}
|
||||
return TARGET_PAGE_SIZE;
|
||||
}
|
||||
|
||||
void memory_region_iommu_replay(MemoryRegion *mr, Notifier *n, bool is_write)
|
||||
{
|
||||
hwaddr addr, granularity;
|
||||
IOMMUTLBEntry iotlb;
|
||||
|
||||
granularity = memory_region_iommu_get_min_page_size(mr);
|
||||
|
||||
for (addr = 0; addr < memory_region_size(mr); addr += granularity) {
|
||||
iotlb = mr->iommu_ops->translate(mr, addr, is_write);
|
||||
if (iotlb.perm != IOMMU_NONE) {
|
||||
|
Loading…
Reference in New Issue
Block a user