migration: sync all address spaces
Migrating a VM during reboot sometimes results in differences between the source and destination in the SMRAM area. This is because migration_bitmap_sync() only fetches from KVM the dirty log of address_space_memory. SMRAM memory slots are ignored and the modifications to SMRAM are not sent to the destination. Reported-by: He Rongguang <herongguang.he@huawei.com> Reviewed-by: He Rongguang <herongguang.he@huawei.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
cc9d8a3b2c
commit
9c1f8f4493
@ -1188,12 +1188,11 @@ MemoryRegionSection memory_region_find(MemoryRegion *mr,
|
|||||||
hwaddr addr, uint64_t size);
|
hwaddr addr, uint64_t size);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* address_space_sync_dirty_bitmap: synchronize the dirty log for all memory
|
* memory_global_dirty_log_sync: synchronize the dirty log for all memory
|
||||||
*
|
*
|
||||||
* Synchronizes the dirty page log for an entire address space.
|
* Synchronizes the dirty page log for all address spaces.
|
||||||
* @as: the address space that contains the memory being synchronized
|
|
||||||
*/
|
*/
|
||||||
void address_space_sync_dirty_bitmap(AddressSpace *as);
|
void memory_global_dirty_log_sync(void);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* memory_region_transaction_begin: Start a transaction.
|
* memory_region_transaction_begin: Start a transaction.
|
||||||
|
40
memory.c
40
memory.c
@ -158,14 +158,10 @@ static bool memory_listener_match(MemoryListener *listener,
|
|||||||
|
|
||||||
/* No need to ref/unref .mr, the FlatRange keeps it alive. */
|
/* No need to ref/unref .mr, the FlatRange keeps it alive. */
|
||||||
#define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback, _args...) \
|
#define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback, _args...) \
|
||||||
MEMORY_LISTENER_CALL(callback, dir, (&(MemoryRegionSection) { \
|
do { \
|
||||||
.mr = (fr)->mr, \
|
MemoryRegionSection mrs = section_from_flat_range(fr, as); \
|
||||||
.address_space = (as), \
|
MEMORY_LISTENER_CALL(callback, dir, &mrs, ##_args); \
|
||||||
.offset_within_region = (fr)->offset_in_region, \
|
} while(0)
|
||||||
.size = (fr)->addr.size, \
|
|
||||||
.offset_within_address_space = int128_get64((fr)->addr.start), \
|
|
||||||
.readonly = (fr)->readonly, \
|
|
||||||
}), ##_args)
|
|
||||||
|
|
||||||
struct CoalescedMemoryRange {
|
struct CoalescedMemoryRange {
|
||||||
AddrRange addr;
|
AddrRange addr;
|
||||||
@ -245,6 +241,19 @@ typedef struct AddressSpaceOps AddressSpaceOps;
|
|||||||
#define FOR_EACH_FLAT_RANGE(var, view) \
|
#define FOR_EACH_FLAT_RANGE(var, view) \
|
||||||
for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
|
for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
|
||||||
|
|
||||||
|
static inline MemoryRegionSection
|
||||||
|
section_from_flat_range(FlatRange *fr, AddressSpace *as)
|
||||||
|
{
|
||||||
|
return (MemoryRegionSection) {
|
||||||
|
.mr = fr->mr,
|
||||||
|
.address_space = as,
|
||||||
|
.offset_within_region = fr->offset_in_region,
|
||||||
|
.size = fr->addr.size,
|
||||||
|
.offset_within_address_space = int128_get64(fr->addr.start),
|
||||||
|
.readonly = fr->readonly,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
static bool flatrange_equal(FlatRange *a, FlatRange *b)
|
static bool flatrange_equal(FlatRange *a, FlatRange *b)
|
||||||
{
|
{
|
||||||
return a->mr == b->mr
|
return a->mr == b->mr
|
||||||
@ -2156,17 +2165,28 @@ bool memory_region_present(MemoryRegion *container, hwaddr addr)
|
|||||||
return mr && mr != container;
|
return mr && mr != container;
|
||||||
}
|
}
|
||||||
|
|
||||||
void address_space_sync_dirty_bitmap(AddressSpace *as)
|
void memory_global_dirty_log_sync(void)
|
||||||
{
|
{
|
||||||
|
MemoryListener *listener;
|
||||||
|
AddressSpace *as;
|
||||||
FlatView *view;
|
FlatView *view;
|
||||||
FlatRange *fr;
|
FlatRange *fr;
|
||||||
|
|
||||||
|
QTAILQ_FOREACH(listener, &memory_listeners, link) {
|
||||||
|
if (!listener->log_sync) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
/* Global listeners are being phased out. */
|
||||||
|
assert(listener->address_space_filter);
|
||||||
|
as = listener->address_space_filter;
|
||||||
view = address_space_get_flatview(as);
|
view = address_space_get_flatview(as);
|
||||||
FOR_EACH_FLAT_RANGE(fr, view) {
|
FOR_EACH_FLAT_RANGE(fr, view) {
|
||||||
MEMORY_LISTENER_UPDATE_REGION(fr, as, Forward, log_sync);
|
MemoryRegionSection mrs = section_from_flat_range(fr, as);
|
||||||
|
listener->log_sync(listener, &mrs);
|
||||||
}
|
}
|
||||||
flatview_unref(view);
|
flatview_unref(view);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void memory_global_dirty_log_start(void)
|
void memory_global_dirty_log_start(void)
|
||||||
{
|
{
|
||||||
|
@ -626,7 +626,7 @@ static void migration_bitmap_sync(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
trace_migration_bitmap_sync_start();
|
trace_migration_bitmap_sync_start();
|
||||||
address_space_sync_dirty_bitmap(&address_space_memory);
|
memory_global_dirty_log_sync();
|
||||||
|
|
||||||
qemu_mutex_lock(&migration_bitmap_mutex);
|
qemu_mutex_lock(&migration_bitmap_mutex);
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
|
Loading…
Reference in New Issue
Block a user