Finish non-postcopiable iterative devices before package
Where we have iterable, but non-postcopiable devices (e.g. htab or block migration), complete them before forming the 'package' but with the CPUs stopped. This stops them filling up the package. Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com> Reviewed-by: Juan Quintela <quintela@redhat.com> Signed-off-by: Juan Quintela <quintela@redhat.com>
This commit is contained in:
parent
80e60c6e1c
commit
1c0d249ddf
@ -112,7 +112,7 @@ void qemu_savevm_state_header(QEMUFile *f);
|
||||
int qemu_savevm_state_iterate(QEMUFile *f, bool postcopy);
|
||||
void qemu_savevm_state_cleanup(void);
|
||||
void qemu_savevm_state_complete_postcopy(QEMUFile *f);
|
||||
void qemu_savevm_state_complete_precopy(QEMUFile *f);
|
||||
void qemu_savevm_state_complete_precopy(QEMUFile *f, bool iterable_only);
|
||||
void qemu_savevm_state_pending(QEMUFile *f, uint64_t max_size,
|
||||
uint64_t *res_non_postcopiable,
|
||||
uint64_t *res_postcopiable);
|
||||
|
@ -1428,6 +1428,12 @@ static int postcopy_start(MigrationState *ms, bool *old_vm_running)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/*
|
||||
* Cause any non-postcopiable, but iterative devices to
|
||||
* send out their final data.
|
||||
*/
|
||||
qemu_savevm_state_complete_precopy(ms->file, true);
|
||||
|
||||
/*
|
||||
* in Finish migrate and with the io-lock held everything should
|
||||
* be quiet, but we've potentially still got dirty pages and we
|
||||
@ -1471,7 +1477,7 @@ static int postcopy_start(MigrationState *ms, bool *old_vm_running)
|
||||
*/
|
||||
qemu_savevm_send_postcopy_listen(fb);
|
||||
|
||||
qemu_savevm_state_complete_precopy(fb);
|
||||
qemu_savevm_state_complete_precopy(fb, false);
|
||||
qemu_savevm_send_ping(fb, 3);
|
||||
|
||||
qemu_savevm_send_postcopy_run(fb);
|
||||
@ -1538,7 +1544,7 @@ static void migration_completion(MigrationState *s, int current_active_state,
|
||||
ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE);
|
||||
if (ret >= 0) {
|
||||
qemu_file_set_rate_limit(s->file, INT64_MAX);
|
||||
qemu_savevm_state_complete_precopy(s->file);
|
||||
qemu_savevm_state_complete_precopy(s->file, false);
|
||||
}
|
||||
}
|
||||
qemu_mutex_unlock_iothread();
|
||||
|
@ -1026,7 +1026,7 @@ void qemu_savevm_state_complete_postcopy(QEMUFile *f)
|
||||
qemu_fflush(f);
|
||||
}
|
||||
|
||||
void qemu_savevm_state_complete_precopy(QEMUFile *f)
|
||||
void qemu_savevm_state_complete_precopy(QEMUFile *f, bool iterable_only)
|
||||
{
|
||||
QJSON *vmdesc;
|
||||
int vmdesc_len;
|
||||
@ -1041,9 +1041,11 @@ void qemu_savevm_state_complete_precopy(QEMUFile *f)
|
||||
QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
|
||||
if (!se->ops ||
|
||||
(in_postcopy && se->ops->save_live_complete_postcopy) ||
|
||||
(in_postcopy && !iterable_only) ||
|
||||
!se->ops->save_live_complete_precopy) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (se->ops && se->ops->is_active) {
|
||||
if (!se->ops->is_active(se->opaque)) {
|
||||
continue;
|
||||
@ -1062,6 +1064,10 @@ void qemu_savevm_state_complete_precopy(QEMUFile *f)
|
||||
}
|
||||
}
|
||||
|
||||
if (iterable_only) {
|
||||
return;
|
||||
}
|
||||
|
||||
vmdesc = qjson_new();
|
||||
json_prop_int(vmdesc, "page_size", TARGET_PAGE_SIZE);
|
||||
json_start_array(vmdesc, "devices");
|
||||
@ -1176,7 +1182,7 @@ static int qemu_savevm_state(QEMUFile *f, Error **errp)
|
||||
|
||||
ret = qemu_file_get_error(f);
|
||||
if (ret == 0) {
|
||||
qemu_savevm_state_complete_precopy(f);
|
||||
qemu_savevm_state_complete_precopy(f, false);
|
||||
ret = qemu_file_get_error(f);
|
||||
}
|
||||
qemu_savevm_state_cleanup();
|
||||
|
Loading…
Reference in New Issue
Block a user