migration/multifd: Allow multifd sync without flush
Separate the multifd sync from flushing the client data to the channels. These two operations are closely related but not strictly necessary to be executed together. The multifd sync is intrinsic to how multifd works. The multiple channels operate independently and may finish IO out of order in relation to each other. This applies also between the source and destination QEMU. Flushing the data that is left in the client-owned data structures (e.g. MultiFDPages_t) prior to sync is usually the right thing to do, but that is particular to how the ram migration is implemented with several passes over dirty data. Make these two routines separate, allowing future code to call the sync by itself if needed. This also allows the usage of multifd_ram_send to be isolated to ram code. Reviewed-by: Peter Xu <peterx@redhat.com> Signed-off-by: Fabiano Rosas <farosas@suse.de>
This commit is contained in:
parent
a71ef5c7f3
commit
a0c78d815c
@ -914,11 +914,8 @@ static int multifd_zero_copy_flush(QIOChannel *c)
|
||||
return ret;
|
||||
}
|
||||
|
||||
int multifd_send_sync_main(void)
|
||||
int multifd_ram_flush_and_sync(void)
|
||||
{
|
||||
int i;
|
||||
bool flush_zero_copy;
|
||||
|
||||
if (!migrate_multifd()) {
|
||||
return 0;
|
||||
}
|
||||
@ -930,6 +927,14 @@ int multifd_send_sync_main(void)
|
||||
}
|
||||
}
|
||||
|
||||
return multifd_send_sync_main();
|
||||
}
|
||||
|
||||
int multifd_send_sync_main(void)
|
||||
{
|
||||
int i;
|
||||
bool flush_zero_copy;
|
||||
|
||||
flush_zero_copy = migrate_zero_copy_send();
|
||||
|
||||
for (i = 0; i < migrate_multifd_channels(); i++) {
|
||||
|
@ -270,4 +270,5 @@ static inline uint32_t multifd_ram_page_count(void)
|
||||
|
||||
void multifd_ram_save_setup(void);
|
||||
void multifd_ram_save_cleanup(void);
|
||||
int multifd_ram_flush_and_sync(void);
|
||||
#endif
|
||||
|
@ -1326,7 +1326,7 @@ static int find_dirty_block(RAMState *rs, PageSearchStatus *pss)
|
||||
(!migrate_multifd_flush_after_each_section() ||
|
||||
migrate_mapped_ram())) {
|
||||
QEMUFile *f = rs->pss[RAM_CHANNEL_PRECOPY].pss_channel;
|
||||
int ret = multifd_send_sync_main();
|
||||
int ret = multifd_ram_flush_and_sync();
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
@ -3066,7 +3066,7 @@ static int ram_save_setup(QEMUFile *f, void *opaque, Error **errp)
|
||||
}
|
||||
|
||||
bql_unlock();
|
||||
ret = multifd_send_sync_main();
|
||||
ret = multifd_ram_flush_and_sync();
|
||||
bql_lock();
|
||||
if (ret < 0) {
|
||||
error_setg(errp, "%s: multifd synchronization failed", __func__);
|
||||
@ -3213,7 +3213,7 @@ out:
|
||||
&& migration_is_setup_or_active()) {
|
||||
if (migrate_multifd() && migrate_multifd_flush_after_each_section() &&
|
||||
!migrate_mapped_ram()) {
|
||||
ret = multifd_send_sync_main();
|
||||
ret = multifd_ram_flush_and_sync();
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
@ -3285,7 +3285,7 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
|
||||
}
|
||||
}
|
||||
|
||||
ret = multifd_send_sync_main();
|
||||
ret = multifd_ram_flush_and_sync();
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user