migration: Add tracepoints for downtime checkpoints
This patch is inspired by Joao Martin's patch here: https://lore.kernel.org/r/20230926161841.98464-1-joao.m.martins@oracle.com Add tracepoints for major downtime checkpoints on both src and dst. They share the same tracepoint with a string showing its stage. Besides the checkpoints in the previous patch, this patch also added destination checkpoints. On src, we have these checkpoints added: - src-downtime-start: right before vm stops on src - src-vm-stopped: after vm is fully stopped - src-iterable-saved: after all iterables saved (END sections) - src-non-iterable-saved: after all non-iterable saved (FULL sections) - src-downtime-stop: migration fully completed On dst, we have these checkpoints added: - dst-precopy-loadvm-completes: after loadvm all done for precopy - dst-precopy-bh-*: record BH steps to resume VM for precopy - dst-postcopy-bh-*: record BH steps to resume VM for postcopy On dst side, we don't have a good way to trace total time consumed by iterable or non-iterable for now. We can mark it by 1st time receiving a FULL / END section, but rather than that let's just rely on the other tracepoints added for vmstates to back up the information. With this patch, one can enable "vmstate_downtime*" tracepoints and it'll enable all tracepoints for downtime measurements necessary. Drop loadvm_postcopy_handle_run_bh() tracepoint alongside, because they service the same purpose, which was only for postcopy. We then have unified prefix for all downtime relevant tracepoints. Co-developed-by: Joao Martins <joao.m.martins@oracle.com> Signed-off-by: Peter Xu <peterx@redhat.com> Reviewed-by: Juan Quintela <quintela@redhat.com> Signed-off-by: Juan Quintela <quintela@redhat.com> Message-ID: <20231030163346.765724-6-peterx@redhat.com>
This commit is contained in:
parent
93bdf888fa
commit
3e5f3bcdc2
@ -103,6 +103,7 @@ static int close_return_path_on_source(MigrationState *s);
|
||||
|
||||
static void migration_downtime_start(MigrationState *s)
|
||||
{
|
||||
trace_vmstate_downtime_checkpoint("src-downtime-start");
|
||||
s->downtime_start = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
|
||||
}
|
||||
|
||||
@ -117,6 +118,8 @@ static void migration_downtime_end(MigrationState *s)
|
||||
if (!s->downtime) {
|
||||
s->downtime = now - s->downtime_start;
|
||||
}
|
||||
|
||||
trace_vmstate_downtime_checkpoint("src-downtime-end");
|
||||
}
|
||||
|
||||
static bool migration_needs_multiple_sockets(void)
|
||||
@ -151,7 +154,11 @@ static gint page_request_addr_cmp(gconstpointer ap, gconstpointer bp)
|
||||
|
||||
int migration_stop_vm(RunState state)
|
||||
{
|
||||
return vm_stop_force_state(state);
|
||||
int ret = vm_stop_force_state(state);
|
||||
|
||||
trace_vmstate_downtime_checkpoint("src-vm-stopped");
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void migration_object_init(void)
|
||||
@ -495,6 +502,8 @@ static void process_incoming_migration_bh(void *opaque)
|
||||
Error *local_err = NULL;
|
||||
MigrationIncomingState *mis = opaque;
|
||||
|
||||
trace_vmstate_downtime_checkpoint("dst-precopy-bh-enter");
|
||||
|
||||
/* If capability late_block_activate is set:
|
||||
* Only fire up the block code now if we're going to restart the
|
||||
* VM, else 'cont' will do it.
|
||||
@ -520,6 +529,8 @@ static void process_incoming_migration_bh(void *opaque)
|
||||
*/
|
||||
qemu_announce_self(&mis->announce_timer, migrate_announce_params());
|
||||
|
||||
trace_vmstate_downtime_checkpoint("dst-precopy-bh-announced");
|
||||
|
||||
multifd_load_shutdown();
|
||||
|
||||
dirty_bitmap_mig_before_vm_start();
|
||||
@ -537,6 +548,7 @@ static void process_incoming_migration_bh(void *opaque)
|
||||
} else {
|
||||
runstate_set(global_state_get_runstate());
|
||||
}
|
||||
trace_vmstate_downtime_checkpoint("dst-precopy-bh-vm-started");
|
||||
/*
|
||||
* This must happen after any state changes since as soon as an external
|
||||
* observer sees this event they might start to prod at the VM assuming
|
||||
@ -571,6 +583,8 @@ process_incoming_migration_co(void *opaque)
|
||||
ret = qemu_loadvm_state(mis->from_src_file);
|
||||
mis->loadvm_co = NULL;
|
||||
|
||||
trace_vmstate_downtime_checkpoint("dst-precopy-loadvm-completed");
|
||||
|
||||
ps = postcopy_state_get();
|
||||
trace_process_incoming_migration_co_end(ret, ps);
|
||||
if (ps != POSTCOPY_INCOMING_NONE) {
|
||||
|
@ -1526,6 +1526,8 @@ int qemu_savevm_state_complete_precopy_iterable(QEMUFile *f, bool in_postcopy)
|
||||
end_ts_each - start_ts_each);
|
||||
}
|
||||
|
||||
trace_vmstate_downtime_checkpoint("src-iterable-saved");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1592,6 +1594,8 @@ int qemu_savevm_state_complete_precopy_non_iterable(QEMUFile *f,
|
||||
json_writer_free(vmdesc);
|
||||
ms->vmdesc = NULL;
|
||||
|
||||
trace_vmstate_downtime_checkpoint("src-non-iterable-saved");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2133,18 +2137,18 @@ static void loadvm_postcopy_handle_run_bh(void *opaque)
|
||||
Error *local_err = NULL;
|
||||
MigrationIncomingState *mis = opaque;
|
||||
|
||||
trace_loadvm_postcopy_handle_run_bh("enter");
|
||||
trace_vmstate_downtime_checkpoint("dst-postcopy-bh-enter");
|
||||
|
||||
/* TODO we should move all of this lot into postcopy_ram.c or a shared code
|
||||
* in migration.c
|
||||
*/
|
||||
cpu_synchronize_all_post_init();
|
||||
|
||||
trace_loadvm_postcopy_handle_run_bh("after cpu sync");
|
||||
trace_vmstate_downtime_checkpoint("dst-postcopy-bh-cpu-synced");
|
||||
|
||||
qemu_announce_self(&mis->announce_timer, migrate_announce_params());
|
||||
|
||||
trace_loadvm_postcopy_handle_run_bh("after announce");
|
||||
trace_vmstate_downtime_checkpoint("dst-postcopy-bh-announced");
|
||||
|
||||
/* Make sure all file formats throw away their mutable metadata.
|
||||
* If we get an error here, just don't restart the VM yet. */
|
||||
@ -2155,7 +2159,7 @@ static void loadvm_postcopy_handle_run_bh(void *opaque)
|
||||
autostart = false;
|
||||
}
|
||||
|
||||
trace_loadvm_postcopy_handle_run_bh("after invalidate cache");
|
||||
trace_vmstate_downtime_checkpoint("dst-postcopy-bh-cache-invalidated");
|
||||
|
||||
dirty_bitmap_mig_before_vm_start();
|
||||
|
||||
@ -2169,7 +2173,7 @@ static void loadvm_postcopy_handle_run_bh(void *opaque)
|
||||
|
||||
qemu_bh_delete(mis->bh);
|
||||
|
||||
trace_loadvm_postcopy_handle_run_bh("return");
|
||||
trace_vmstate_downtime_checkpoint("dst-postcopy-bh-vm-started");
|
||||
}
|
||||
|
||||
/* After all discards we can start running and asking for pages */
|
||||
|
@ -17,7 +17,6 @@ loadvm_handle_recv_bitmap(char *s) "%s"
|
||||
loadvm_postcopy_handle_advise(void) ""
|
||||
loadvm_postcopy_handle_listen(const char *str) "%s"
|
||||
loadvm_postcopy_handle_run(void) ""
|
||||
loadvm_postcopy_handle_run_bh(const char *str) "%s"
|
||||
loadvm_postcopy_handle_resume(void) ""
|
||||
loadvm_postcopy_ram_handle_discard(void) ""
|
||||
loadvm_postcopy_ram_handle_discard_end(void) ""
|
||||
@ -50,6 +49,7 @@ vmstate_save(const char *idstr, const char *vmsd_name) "%s, %s"
|
||||
vmstate_load(const char *idstr, const char *vmsd_name) "%s, %s"
|
||||
vmstate_downtime_save(const char *type, const char *idstr, uint32_t instance_id, int64_t downtime) "type=%s idstr=%s instance_id=%d downtime=%"PRIi64
|
||||
vmstate_downtime_load(const char *type, const char *idstr, uint32_t instance_id, int64_t downtime) "type=%s idstr=%s instance_id=%d downtime=%"PRIi64
|
||||
vmstate_downtime_checkpoint(const char *checkpoint) "%s"
|
||||
postcopy_pause_incoming(void) ""
|
||||
postcopy_pause_incoming_continued(void) ""
|
||||
postcopy_page_req_sync(void *host_addr) "sync page req %p"
|
||||
|
Loading…
Reference in New Issue
Block a user