vfio: Add vfio_listener_log_sync to mark dirty pages
vfio_listener_log_sync gets list of dirty pages from container using VFIO_IOMMU_GET_DIRTY_BITMAP ioctl and mark those pages dirty when all devices are stopped and saving state. Return early for the RAM block section of mapped MMIO region. Signed-off-by: Kirti Wankhede <kwankhede@nvidia.com> Reviewed-by: Neo Jia <cjia@nvidia.com> [aw: fix error_report types, fix cpu_physical_memory_set_dirty_lebitmap() cast] Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
This commit is contained in:
parent
e663f51683
commit
b6dd6504e3
116
hw/vfio/common.c
116
hw/vfio/common.c
@ -29,6 +29,7 @@
|
||||
#include "hw/vfio/vfio.h"
|
||||
#include "exec/address-spaces.h"
|
||||
#include "exec/memory.h"
|
||||
#include "exec/ram_addr.h"
|
||||
#include "hw/hw.h"
|
||||
#include "qemu/error-report.h"
|
||||
#include "qemu/main-loop.h"
|
||||
@ -37,6 +38,7 @@
|
||||
#include "sysemu/reset.h"
|
||||
#include "trace.h"
|
||||
#include "qapi/error.h"
|
||||
#include "migration/migration.h"
|
||||
|
||||
VFIOGroupList vfio_group_list =
|
||||
QLIST_HEAD_INITIALIZER(vfio_group_list);
|
||||
@ -286,6 +288,39 @@ const MemoryRegionOps vfio_region_ops = {
|
||||
},
|
||||
};
|
||||
|
||||
/*
|
||||
* Device state interfaces
|
||||
*/
|
||||
|
||||
static bool vfio_devices_all_stopped_and_saving(VFIOContainer *container)
|
||||
{
|
||||
VFIOGroup *group;
|
||||
VFIODevice *vbasedev;
|
||||
MigrationState *ms = migrate_get_current();
|
||||
|
||||
if (!migration_is_setup_or_active(ms->state)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
QLIST_FOREACH(group, &container->group_list, container_next) {
|
||||
QLIST_FOREACH(vbasedev, &group->device_list, next) {
|
||||
VFIOMigration *migration = vbasedev->migration;
|
||||
|
||||
if (!migration) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if ((migration->device_state & VFIO_DEVICE_STATE_SAVING) &&
|
||||
!(migration->device_state & VFIO_DEVICE_STATE_RUNNING)) {
|
||||
continue;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86
|
||||
*/
|
||||
@ -812,9 +847,90 @@ static void vfio_listener_region_del(MemoryListener *listener,
|
||||
}
|
||||
}
|
||||
|
||||
static int vfio_get_dirty_bitmap(VFIOContainer *container, uint64_t iova,
|
||||
uint64_t size, ram_addr_t ram_addr)
|
||||
{
|
||||
struct vfio_iommu_type1_dirty_bitmap *dbitmap;
|
||||
struct vfio_iommu_type1_dirty_bitmap_get *range;
|
||||
uint64_t pages;
|
||||
int ret;
|
||||
|
||||
dbitmap = g_malloc0(sizeof(*dbitmap) + sizeof(*range));
|
||||
|
||||
dbitmap->argsz = sizeof(*dbitmap) + sizeof(*range);
|
||||
dbitmap->flags = VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP;
|
||||
range = (struct vfio_iommu_type1_dirty_bitmap_get *)&dbitmap->data;
|
||||
range->iova = iova;
|
||||
range->size = size;
|
||||
|
||||
/*
|
||||
* cpu_physical_memory_set_dirty_lebitmap() expects pages in bitmap of
|
||||
* TARGET_PAGE_SIZE to mark those dirty. Hence set bitmap's pgsize to
|
||||
* TARGET_PAGE_SIZE.
|
||||
*/
|
||||
range->bitmap.pgsize = TARGET_PAGE_SIZE;
|
||||
|
||||
pages = TARGET_PAGE_ALIGN(range->size) >> TARGET_PAGE_BITS;
|
||||
range->bitmap.size = ROUND_UP(pages, sizeof(__u64) * BITS_PER_BYTE) /
|
||||
BITS_PER_BYTE;
|
||||
range->bitmap.data = g_try_malloc0(range->bitmap.size);
|
||||
if (!range->bitmap.data) {
|
||||
ret = -ENOMEM;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
ret = ioctl(container->fd, VFIO_IOMMU_DIRTY_PAGES, dbitmap);
|
||||
if (ret) {
|
||||
error_report("Failed to get dirty bitmap for iova: 0x%"PRIx64
|
||||
" size: 0x%"PRIx64" err: %d", (uint64_t)range->iova,
|
||||
(uint64_t)range->size, errno);
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
cpu_physical_memory_set_dirty_lebitmap((unsigned long *)range->bitmap.data,
|
||||
ram_addr, pages);
|
||||
|
||||
trace_vfio_get_dirty_bitmap(container->fd, range->iova, range->size,
|
||||
range->bitmap.size, ram_addr);
|
||||
err_out:
|
||||
g_free(range->bitmap.data);
|
||||
g_free(dbitmap);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int vfio_sync_dirty_bitmap(VFIOContainer *container,
|
||||
MemoryRegionSection *section)
|
||||
{
|
||||
ram_addr_t ram_addr;
|
||||
|
||||
ram_addr = memory_region_get_ram_addr(section->mr) +
|
||||
section->offset_within_region;
|
||||
|
||||
return vfio_get_dirty_bitmap(container,
|
||||
TARGET_PAGE_ALIGN(section->offset_within_address_space),
|
||||
int128_get64(section->size), ram_addr);
|
||||
}
|
||||
|
||||
static void vfio_listerner_log_sync(MemoryListener *listener,
|
||||
MemoryRegionSection *section)
|
||||
{
|
||||
VFIOContainer *container = container_of(listener, VFIOContainer, listener);
|
||||
|
||||
if (vfio_listener_skipped_section(section) ||
|
||||
!container->dirty_pages_supported) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (vfio_devices_all_stopped_and_saving(container)) {
|
||||
vfio_sync_dirty_bitmap(container, section);
|
||||
}
|
||||
}
|
||||
|
||||
static const MemoryListener vfio_memory_listener = {
|
||||
.region_add = vfio_listener_region_add,
|
||||
.region_del = vfio_listener_region_del,
|
||||
.log_sync = vfio_listerner_log_sync,
|
||||
};
|
||||
|
||||
static void vfio_listener_release(VFIOContainer *container)
|
||||
|
@ -163,3 +163,4 @@ vfio_load_device_config_state(const char *name) " (%s)"
|
||||
vfio_load_state(const char *name, uint64_t data) " (%s) data 0x%"PRIx64
|
||||
vfio_load_state_device_data(const char *name, uint64_t data_offset, uint64_t data_size) " (%s) Offset 0x%"PRIx64" size 0x%"PRIx64
|
||||
vfio_load_cleanup(const char *name) " (%s)"
|
||||
vfio_get_dirty_bitmap(int fd, uint64_t iova, uint64_t size, uint64_t bitmap_size, uint64_t start) "container fd=%d, iova=0x%"PRIx64" size= 0x%"PRIx64" bitmap_size=0x%"PRIx64" start=0x%"PRIx64
|
||||
|
Loading…
Reference in New Issue
Block a user