vfio: move implement of vfio_get_xlat_addr() to memory.c
- Move the implement vfio_get_xlat_addr to softmmu/memory.c, and change the name to memory_get_xlat_addr(). So we can use this function on other devices, such as vDPA device. - Add a new function vfio_get_xlat_addr in vfio/common.c, and it will check whether the memory is backed by a discard manager. then device can have its own warning. Signed-off-by: Cindy Lu <lulu@redhat.com> Message-Id: <20221031031020.1405111-2-lulu@redhat.com> Acked-by: Alex Williamson <alex.williamson@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
This commit is contained in:
parent
be3afe8151
commit
baa44bce87
@ -578,45 +578,11 @@ static bool vfio_listener_skipped_section(MemoryRegionSection *section)
|
|||||||
static bool vfio_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr,
|
static bool vfio_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr,
|
||||||
ram_addr_t *ram_addr, bool *read_only)
|
ram_addr_t *ram_addr, bool *read_only)
|
||||||
{
|
{
|
||||||
MemoryRegion *mr;
|
bool ret, mr_has_discard_manager;
|
||||||
hwaddr xlat;
|
|
||||||
hwaddr len = iotlb->addr_mask + 1;
|
|
||||||
bool writable = iotlb->perm & IOMMU_WO;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The IOMMU TLB entry we have just covers translation through
|
|
||||||
* this IOMMU to its immediate target. We need to translate
|
|
||||||
* it the rest of the way through to memory.
|
|
||||||
*/
|
|
||||||
mr = address_space_translate(&address_space_memory,
|
|
||||||
iotlb->translated_addr,
|
|
||||||
&xlat, &len, writable,
|
|
||||||
MEMTXATTRS_UNSPECIFIED);
|
|
||||||
if (!memory_region_is_ram(mr)) {
|
|
||||||
error_report("iommu map to non memory area %"HWADDR_PRIx"",
|
|
||||||
xlat);
|
|
||||||
return false;
|
|
||||||
} else if (memory_region_has_ram_discard_manager(mr)) {
|
|
||||||
RamDiscardManager *rdm = memory_region_get_ram_discard_manager(mr);
|
|
||||||
MemoryRegionSection tmp = {
|
|
||||||
.mr = mr,
|
|
||||||
.offset_within_region = xlat,
|
|
||||||
.size = int128_make64(len),
|
|
||||||
};
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Malicious VMs can map memory into the IOMMU, which is expected
|
|
||||||
* to remain discarded. vfio will pin all pages, populating memory.
|
|
||||||
* Disallow that. vmstate priorities make sure any RamDiscardManager
|
|
||||||
* were already restored before IOMMUs are restored.
|
|
||||||
*/
|
|
||||||
if (!ram_discard_manager_is_populated(rdm, &tmp)) {
|
|
||||||
error_report("iommu map to discarded memory (e.g., unplugged via"
|
|
||||||
" virtio-mem): %"HWADDR_PRIx"",
|
|
||||||
iotlb->translated_addr);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
ret = memory_get_xlat_addr(iotlb, vaddr, ram_addr, read_only,
|
||||||
|
&mr_has_discard_manager);
|
||||||
|
if (ret && mr_has_discard_manager) {
|
||||||
/*
|
/*
|
||||||
* Malicious VMs might trigger discarding of IOMMU-mapped memory. The
|
* Malicious VMs might trigger discarding of IOMMU-mapped memory. The
|
||||||
* pages will remain pinned inside vfio until unmapped, resulting in a
|
* pages will remain pinned inside vfio until unmapped, resulting in a
|
||||||
@ -635,29 +601,7 @@ static bool vfio_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr,
|
|||||||
" intended via an IOMMU. It's possible to mitigate "
|
" intended via an IOMMU. It's possible to mitigate "
|
||||||
" by setting/adjusting RLIMIT_MEMLOCK.");
|
" by setting/adjusting RLIMIT_MEMLOCK.");
|
||||||
}
|
}
|
||||||
|
return ret;
|
||||||
/*
|
|
||||||
* Translation truncates length to the IOMMU page size,
|
|
||||||
* check that it did not truncate too much.
|
|
||||||
*/
|
|
||||||
if (len & iotlb->addr_mask) {
|
|
||||||
error_report("iommu has granularity incompatible with target AS");
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (vaddr) {
|
|
||||||
*vaddr = memory_region_get_ram_ptr(mr) + xlat;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (ram_addr) {
|
|
||||||
*ram_addr = memory_region_get_ram_addr(mr) + xlat;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (read_only) {
|
|
||||||
*read_only = !writable || mr->readonly;
|
|
||||||
}
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void vfio_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
|
static void vfio_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
|
||||||
|
@ -713,6 +713,10 @@ void ram_discard_manager_register_listener(RamDiscardManager *rdm,
|
|||||||
void ram_discard_manager_unregister_listener(RamDiscardManager *rdm,
|
void ram_discard_manager_unregister_listener(RamDiscardManager *rdm,
|
||||||
RamDiscardListener *rdl);
|
RamDiscardListener *rdl);
|
||||||
|
|
||||||
|
bool memory_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr,
|
||||||
|
ram_addr_t *ram_addr, bool *read_only,
|
||||||
|
bool *mr_has_discard_manager);
|
||||||
|
|
||||||
typedef struct CoalescedMemoryRange CoalescedMemoryRange;
|
typedef struct CoalescedMemoryRange CoalescedMemoryRange;
|
||||||
typedef struct MemoryRegionIoeventfd MemoryRegionIoeventfd;
|
typedef struct MemoryRegionIoeventfd MemoryRegionIoeventfd;
|
||||||
|
|
||||||
|
@ -33,6 +33,7 @@
|
|||||||
#include "qemu/accel.h"
|
#include "qemu/accel.h"
|
||||||
#include "hw/boards.h"
|
#include "hw/boards.h"
|
||||||
#include "migration/vmstate.h"
|
#include "migration/vmstate.h"
|
||||||
|
#include "exec/address-spaces.h"
|
||||||
|
|
||||||
//#define DEBUG_UNASSIGNED
|
//#define DEBUG_UNASSIGNED
|
||||||
|
|
||||||
@ -2121,6 +2122,77 @@ void ram_discard_manager_unregister_listener(RamDiscardManager *rdm,
|
|||||||
rdmc->unregister_listener(rdm, rdl);
|
rdmc->unregister_listener(rdm, rdl);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Called with rcu_read_lock held. */
|
||||||
|
bool memory_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr,
|
||||||
|
ram_addr_t *ram_addr, bool *read_only,
|
||||||
|
bool *mr_has_discard_manager)
|
||||||
|
{
|
||||||
|
MemoryRegion *mr;
|
||||||
|
hwaddr xlat;
|
||||||
|
hwaddr len = iotlb->addr_mask + 1;
|
||||||
|
bool writable = iotlb->perm & IOMMU_WO;
|
||||||
|
|
||||||
|
if (mr_has_discard_manager) {
|
||||||
|
*mr_has_discard_manager = false;
|
||||||
|
}
|
||||||
|
/*
|
||||||
|
* The IOMMU TLB entry we have just covers translation through
|
||||||
|
* this IOMMU to its immediate target. We need to translate
|
||||||
|
* it the rest of the way through to memory.
|
||||||
|
*/
|
||||||
|
mr = address_space_translate(&address_space_memory, iotlb->translated_addr,
|
||||||
|
&xlat, &len, writable, MEMTXATTRS_UNSPECIFIED);
|
||||||
|
if (!memory_region_is_ram(mr)) {
|
||||||
|
error_report("iommu map to non memory area %" HWADDR_PRIx "", xlat);
|
||||||
|
return false;
|
||||||
|
} else if (memory_region_has_ram_discard_manager(mr)) {
|
||||||
|
RamDiscardManager *rdm = memory_region_get_ram_discard_manager(mr);
|
||||||
|
MemoryRegionSection tmp = {
|
||||||
|
.mr = mr,
|
||||||
|
.offset_within_region = xlat,
|
||||||
|
.size = int128_make64(len),
|
||||||
|
};
|
||||||
|
if (mr_has_discard_manager) {
|
||||||
|
*mr_has_discard_manager = true;
|
||||||
|
}
|
||||||
|
/*
|
||||||
|
* Malicious VMs can map memory into the IOMMU, which is expected
|
||||||
|
* to remain discarded. vfio will pin all pages, populating memory.
|
||||||
|
* Disallow that. vmstate priorities make sure any RamDiscardManager
|
||||||
|
* were already restored before IOMMUs are restored.
|
||||||
|
*/
|
||||||
|
if (!ram_discard_manager_is_populated(rdm, &tmp)) {
|
||||||
|
error_report("iommu map to discarded memory (e.g., unplugged via"
|
||||||
|
" virtio-mem): %" HWADDR_PRIx "",
|
||||||
|
iotlb->translated_addr);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Translation truncates length to the IOMMU page size,
|
||||||
|
* check that it did not truncate too much.
|
||||||
|
*/
|
||||||
|
if (len & iotlb->addr_mask) {
|
||||||
|
error_report("iommu has granularity incompatible with target AS");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (vaddr) {
|
||||||
|
*vaddr = memory_region_get_ram_ptr(mr) + xlat;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (ram_addr) {
|
||||||
|
*ram_addr = memory_region_get_ram_addr(mr) + xlat;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (read_only) {
|
||||||
|
*read_only = !writable || mr->readonly;
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
|
void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
|
||||||
{
|
{
|
||||||
uint8_t mask = 1 << client;
|
uint8_t mask = 1 << client;
|
||||||
|
Loading…
Reference in New Issue
Block a user