Migration pull request
- Fabiano's fixed-ram patches (1-5 only) - Peter's cleanups on multifd tls IOC referencing - Steve's cpr patches for vfio (migration patches only) - Fabiano's fix on mbps stats racing with COMPLETE state - Fabiano's fix on return path thread hang -----BEGIN PGP SIGNATURE----- iIcEABYKADAWIQS5GE3CDMRX2s990ak7X8zN86vXBgUCZd7AbhIccGV0ZXJ4QHJl ZGhhdC5jb20ACgkQO1/MzfOr1wbg0gDyA3Vg3pIqCJ+u+hLZ+QKxY/pnu8Y5kF+E HK2IdslQUQD+OX4ATUnl+CGMiVX9fjs1fKx0Z0Qetq8gC1YJF13yuA0= =P2QF -----END PGP SIGNATURE----- Merge tag 'migration-next-pull-request' of https://gitlab.com/peterx/qemu into staging Migration pull request - Fabiano's fixed-ram patches (1-5 only) - Peter's cleanups on multifd tls IOC referencing - Steve's cpr patches for vfio (migration patches only) - Fabiano's fix on mbps stats racing with COMPLETE state - Fabiano's fix on return path thread hang # -----BEGIN PGP SIGNATURE----- # # iIcEABYKADAWIQS5GE3CDMRX2s990ak7X8zN86vXBgUCZd7AbhIccGV0ZXJ4QHJl # ZGhhdC5jb20ACgkQO1/MzfOr1wbg0gDyA3Vg3pIqCJ+u+hLZ+QKxY/pnu8Y5kF+E # HK2IdslQUQD+OX4ATUnl+CGMiVX9fjs1fKx0Z0Qetq8gC1YJF13yuA0= # =P2QF # -----END PGP SIGNATURE----- # gpg: Signature made Wed 28 Feb 2024 05:11:10 GMT # gpg: using EDDSA key B9184DC20CC457DACF7DD1A93B5FCCCDF3ABD706 # gpg: issuer "peterx@redhat.com" # gpg: Good signature from "Peter Xu <xzpeter@gmail.com>" [marginal] # gpg: aka "Peter Xu <peterx@redhat.com>" [marginal] # gpg: WARNING: This key is not certified with sufficiently trusted signatures! # gpg: It is not certain that the signature belongs to the owner. # Primary key fingerprint: B918 4DC2 0CC4 57DA CF7D D1A9 3B5F CCCD F3AB D706 * tag 'migration-next-pull-request' of https://gitlab.com/peterx/qemu: (25 commits) migration: Use migrate_has_error() in close_return_path_on_source() migration: Join the return path thread before releasing to_dst_file migration: Fix qmp_query_migrate mbps value migration: options incompatible with cpr migration: update cpr-reboot description migration: stop vm for cpr migration: notifier error checking migration: refactor migrate_fd_connect failures migration: per-mode notifiers migration: MigrationNotifyFunc migration: remove postcopy_after_devices migration: MigrationEvent for notifiers migration: convert to NotifierWithReturn migration: remove error from notifier data notify: pass error to notifier with return migration/multifd: Drop unnecessary helper to destroy IOC migration/multifd: Cleanup outgoing_args in state destroy migration/multifd: Make multifd_channel_connect() return void migration/multifd: Drop registered_yank migration/multifd: Cleanup TLS iochannel referencing ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
c0c6a0e352
@ -41,6 +41,10 @@ over any transport.
|
||||
- exec migration: do the migration using the stdin/stdout through a process.
|
||||
- fd migration: do the migration using a file descriptor that is
|
||||
passed to QEMU. QEMU doesn't care how this file descriptor is opened.
|
||||
- file migration: do the migration using a file that is passed to QEMU
|
||||
by path. A file offset option is supported to allow a management
|
||||
application to add its own metadata to the start of the file without
|
||||
QEMU interference.
|
||||
|
||||
In addition, support is included for migration using RDMA, which
|
||||
transports the page data using ``RDMA``, where the hardware takes care of
|
||||
|
@ -3504,7 +3504,7 @@ out:
|
||||
return !err;
|
||||
}
|
||||
|
||||
static void virtio_net_handle_migration_primary(VirtIONet *n, MigrationState *s)
|
||||
static void virtio_net_handle_migration_primary(VirtIONet *n, MigrationEvent *e)
|
||||
{
|
||||
bool should_be_hidden;
|
||||
Error *err = NULL;
|
||||
@ -3516,7 +3516,7 @@ static void virtio_net_handle_migration_primary(VirtIONet *n, MigrationState *s)
|
||||
|
||||
should_be_hidden = qatomic_read(&n->failover_primary_hidden);
|
||||
|
||||
if (migration_in_setup(s) && !should_be_hidden) {
|
||||
if (e->type == MIG_EVENT_PRECOPY_SETUP && !should_be_hidden) {
|
||||
if (failover_unplug_primary(n, dev)) {
|
||||
vmstate_unregister(VMSTATE_IF(dev), qdev_get_vmsd(dev), dev);
|
||||
qapi_event_send_unplug_primary(dev->id);
|
||||
@ -3524,7 +3524,7 @@ static void virtio_net_handle_migration_primary(VirtIONet *n, MigrationState *s)
|
||||
} else {
|
||||
warn_report("couldn't unplug primary device");
|
||||
}
|
||||
} else if (migration_has_failed(s)) {
|
||||
} else if (e->type == MIG_EVENT_PRECOPY_FAILED) {
|
||||
/* We already unplugged the device let's plug it back */
|
||||
if (!failover_replug_primary(n, dev, &err)) {
|
||||
if (err) {
|
||||
@ -3534,11 +3534,12 @@ static void virtio_net_handle_migration_primary(VirtIONet *n, MigrationState *s)
|
||||
}
|
||||
}
|
||||
|
||||
static void virtio_net_migration_state_notifier(Notifier *notifier, void *data)
|
||||
static int virtio_net_migration_state_notifier(NotifierWithReturn *notifier,
|
||||
MigrationEvent *e, Error **errp)
|
||||
{
|
||||
MigrationState *s = data;
|
||||
VirtIONet *n = container_of(notifier, VirtIONet, migration_state);
|
||||
virtio_net_handle_migration_primary(n, s);
|
||||
virtio_net_handle_migration_primary(n, e);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool failover_hide_primary_device(DeviceListener *listener,
|
||||
|
@ -754,22 +754,19 @@ static void vfio_vmstate_change(void *opaque, bool running, RunState state)
|
||||
mig_state_to_str(new_state));
|
||||
}
|
||||
|
||||
static void vfio_migration_state_notifier(Notifier *notifier, void *data)
|
||||
static int vfio_migration_state_notifier(NotifierWithReturn *notifier,
|
||||
MigrationEvent *e, Error **errp)
|
||||
{
|
||||
MigrationState *s = data;
|
||||
VFIOMigration *migration = container_of(notifier, VFIOMigration,
|
||||
migration_state);
|
||||
VFIODevice *vbasedev = migration->vbasedev;
|
||||
|
||||
trace_vfio_migration_state_notifier(vbasedev->name,
|
||||
MigrationStatus_str(s->state));
|
||||
trace_vfio_migration_state_notifier(vbasedev->name, e->type);
|
||||
|
||||
switch (s->state) {
|
||||
case MIGRATION_STATUS_CANCELLING:
|
||||
case MIGRATION_STATUS_CANCELLED:
|
||||
case MIGRATION_STATUS_FAILED:
|
||||
if (e->type == MIG_EVENT_PRECOPY_FAILED) {
|
||||
vfio_migration_set_state_or_reset(vbasedev, VFIO_DEVICE_STATE_RUNNING);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vfio_migration_free(VFIODevice *vbasedev)
|
||||
|
@ -153,7 +153,7 @@ vfio_load_state(const char *name, uint64_t data) " (%s) data 0x%"PRIx64
|
||||
vfio_load_state_device_data(const char *name, uint64_t data_size, int ret) " (%s) size 0x%"PRIx64" ret %d"
|
||||
vfio_migration_realize(const char *name) " (%s)"
|
||||
vfio_migration_set_state(const char *name, const char *state) " (%s) state %s"
|
||||
vfio_migration_state_notifier(const char *name, const char *state) " (%s) state %s"
|
||||
vfio_migration_state_notifier(const char *name, int state) " (%s) state %d"
|
||||
vfio_save_block(const char *name, int data_size) " (%s) data_size %d"
|
||||
vfio_save_cleanup(const char *name) " (%s)"
|
||||
vfio_save_complete_precopy(const char *name, int ret) " (%s) ret %d"
|
||||
|
@ -2084,7 +2084,7 @@ static int vhost_user_postcopy_end(struct vhost_dev *dev, Error **errp)
|
||||
}
|
||||
|
||||
static int vhost_user_postcopy_notifier(NotifierWithReturn *notifier,
|
||||
void *opaque)
|
||||
void *opaque, Error **errp)
|
||||
{
|
||||
struct PostcopyNotifyData *pnd = opaque;
|
||||
struct vhost_user *u = container_of(notifier, struct vhost_user,
|
||||
@ -2096,20 +2096,20 @@ static int vhost_user_postcopy_notifier(NotifierWithReturn *notifier,
|
||||
if (!virtio_has_feature(dev->protocol_features,
|
||||
VHOST_USER_PROTOCOL_F_PAGEFAULT)) {
|
||||
/* TODO: Get the device name into this error somehow */
|
||||
error_setg(pnd->errp,
|
||||
error_setg(errp,
|
||||
"vhost-user backend not capable of postcopy");
|
||||
return -ENOENT;
|
||||
}
|
||||
break;
|
||||
|
||||
case POSTCOPY_NOTIFY_INBOUND_ADVISE:
|
||||
return vhost_user_postcopy_advise(dev, pnd->errp);
|
||||
return vhost_user_postcopy_advise(dev, errp);
|
||||
|
||||
case POSTCOPY_NOTIFY_INBOUND_LISTEN:
|
||||
return vhost_user_postcopy_listen(dev, pnd->errp);
|
||||
return vhost_user_postcopy_listen(dev, errp);
|
||||
|
||||
case POSTCOPY_NOTIFY_INBOUND_END:
|
||||
return vhost_user_postcopy_end(dev, pnd->errp);
|
||||
return vhost_user_postcopy_end(dev, errp);
|
||||
|
||||
default:
|
||||
/* We ignore notifications we don't know */
|
||||
|
@ -633,7 +633,8 @@ static void virtio_balloon_free_page_done(VirtIOBalloon *s)
|
||||
}
|
||||
|
||||
static int
|
||||
virtio_balloon_free_page_hint_notify(NotifierWithReturn *n, void *data)
|
||||
virtio_balloon_free_page_hint_notify(NotifierWithReturn *n, void *data,
|
||||
Error **errp)
|
||||
{
|
||||
VirtIOBalloon *dev = container_of(n, VirtIOBalloon, free_page_hint_notify);
|
||||
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
|
||||
|
@ -62,7 +62,7 @@ typedef struct VFIORegion {
|
||||
typedef struct VFIOMigration {
|
||||
struct VFIODevice *vbasedev;
|
||||
VMChangeStateEntry *vm_state;
|
||||
Notifier migration_state;
|
||||
NotifierWithReturn migration_state;
|
||||
uint32_t device_state;
|
||||
int data_fd;
|
||||
void *data_buffer;
|
||||
|
@ -221,7 +221,7 @@ struct VirtIONet {
|
||||
DeviceListener primary_listener;
|
||||
QDict *primary_opts;
|
||||
bool primary_opts_from_json;
|
||||
Notifier migration_state;
|
||||
NotifierWithReturn migration_state;
|
||||
VirtioNetRssData rss_data;
|
||||
struct NetRxPkt *rx_pkt;
|
||||
struct EBPFRSSContext ebpf_rss;
|
||||
|
@ -31,7 +31,6 @@ typedef enum PrecopyNotifyReason {
|
||||
|
||||
typedef struct PrecopyNotifyData {
|
||||
enum PrecopyNotifyReason reason;
|
||||
Error **errp;
|
||||
} PrecopyNotifyData;
|
||||
|
||||
void precopy_infrastructure_init(void);
|
||||
@ -61,15 +60,51 @@ void migration_object_init(void);
|
||||
void migration_shutdown(void);
|
||||
bool migration_is_idle(void);
|
||||
bool migration_is_active(MigrationState *);
|
||||
void migration_add_notifier(Notifier *notify,
|
||||
void (*func)(Notifier *notifier, void *data));
|
||||
void migration_remove_notifier(Notifier *notify);
|
||||
void migration_call_notifiers(MigrationState *s);
|
||||
bool migrate_mode_is_cpr(MigrationState *);
|
||||
|
||||
typedef enum MigrationEventType {
|
||||
MIG_EVENT_PRECOPY_SETUP,
|
||||
MIG_EVENT_PRECOPY_DONE,
|
||||
MIG_EVENT_PRECOPY_FAILED,
|
||||
MIG_EVENT_MAX
|
||||
} MigrationEventType;
|
||||
|
||||
typedef struct MigrationEvent {
|
||||
MigrationEventType type;
|
||||
} MigrationEvent;
|
||||
|
||||
/*
|
||||
* A MigrationNotifyFunc may return an error code and an Error object,
|
||||
* but only when @e->type is MIG_EVENT_PRECOPY_SETUP. The code is an int
|
||||
* to allow for different failure modes and recovery actions.
|
||||
*/
|
||||
typedef int (*MigrationNotifyFunc)(NotifierWithReturn *notify,
|
||||
MigrationEvent *e, Error **errp);
|
||||
|
||||
/*
|
||||
* Register the notifier @notify to be called when a migration event occurs
|
||||
* for MIG_MODE_NORMAL, as specified by the MigrationEvent passed to @func.
|
||||
* Notifiers may receive events in any of the following orders:
|
||||
* - MIG_EVENT_PRECOPY_SETUP -> MIG_EVENT_PRECOPY_DONE
|
||||
* - MIG_EVENT_PRECOPY_SETUP -> MIG_EVENT_PRECOPY_FAILED
|
||||
* - MIG_EVENT_PRECOPY_FAILED
|
||||
*/
|
||||
void migration_add_notifier(NotifierWithReturn *notify,
|
||||
MigrationNotifyFunc func);
|
||||
|
||||
/*
|
||||
* Same as migration_add_notifier, but applies to be specified @mode.
|
||||
*/
|
||||
void migration_add_notifier_mode(NotifierWithReturn *notify,
|
||||
MigrationNotifyFunc func, MigMode mode);
|
||||
|
||||
void migration_remove_notifier(NotifierWithReturn *notify);
|
||||
int migration_call_notifiers(MigrationState *s, MigrationEventType type,
|
||||
Error **errp);
|
||||
bool migration_in_setup(MigrationState *);
|
||||
bool migration_has_finished(MigrationState *);
|
||||
bool migration_has_failed(MigrationState *);
|
||||
/* ...and after the device transmission */
|
||||
bool migration_in_postcopy_after_devices(MigrationState *);
|
||||
/* True if incoming migration entered POSTCOPY_INCOMING_DISCARD */
|
||||
bool migration_in_incoming_postcopy(void);
|
||||
/* True if incoming migration entered POSTCOPY_INCOMING_ADVISE */
|
||||
|
@ -45,12 +45,16 @@ bool notifier_list_empty(NotifierList *list);
|
||||
/* Same as Notifier but allows .notify() to return errors */
|
||||
typedef struct NotifierWithReturn NotifierWithReturn;
|
||||
|
||||
/* Return int to allow for different failure modes and recovery actions */
|
||||
typedef int (*NotifierWithReturnFunc)(NotifierWithReturn *notifier, void *data,
|
||||
Error **errp);
|
||||
|
||||
struct NotifierWithReturn {
|
||||
/**
|
||||
* Return 0 on success (next notifier will be invoked), otherwise
|
||||
* notifier_with_return_list_notify() will stop and return the value.
|
||||
*/
|
||||
int (*notify)(NotifierWithReturn *notifier, void *data);
|
||||
NotifierWithReturnFunc notify;
|
||||
QLIST_ENTRY(NotifierWithReturn) node;
|
||||
};
|
||||
|
||||
@ -69,6 +73,6 @@ void notifier_with_return_list_add(NotifierWithReturnList *list,
|
||||
void notifier_with_return_remove(NotifierWithReturn *notifier);
|
||||
|
||||
int notifier_with_return_list_notify(NotifierWithReturnList *list,
|
||||
void *data);
|
||||
void *data, Error **errp);
|
||||
|
||||
#endif
|
||||
|
@ -69,8 +69,13 @@
|
||||
#include "qemu/sockets.h"
|
||||
#include "sysemu/kvm.h"
|
||||
|
||||
static NotifierList migration_state_notifiers =
|
||||
NOTIFIER_LIST_INITIALIZER(migration_state_notifiers);
|
||||
#define NOTIFIER_ELEM_INIT(array, elem) \
|
||||
[elem] = NOTIFIER_WITH_RETURN_LIST_INITIALIZER((array)[elem])
|
||||
|
||||
static NotifierWithReturnList migration_state_notifiers[] = {
|
||||
NOTIFIER_ELEM_INIT(migration_state_notifiers, MIG_MODE_NORMAL),
|
||||
NOTIFIER_ELEM_INIT(migration_state_notifiers, MIG_MODE_CPR_REBOOT),
|
||||
};
|
||||
|
||||
/* Messages sent on the return path from destination to source */
|
||||
enum mig_rp_message_type {
|
||||
@ -102,6 +107,7 @@ static int migration_maybe_pause(MigrationState *s,
|
||||
int new_state);
|
||||
static void migrate_fd_cancel(MigrationState *s);
|
||||
static bool close_return_path_on_source(MigrationState *s);
|
||||
static void migration_completion_end(MigrationState *s);
|
||||
|
||||
static void migration_downtime_start(MigrationState *s)
|
||||
{
|
||||
@ -162,11 +168,19 @@ static gint page_request_addr_cmp(gconstpointer ap, gconstpointer bp)
|
||||
return (a > b) - (a < b);
|
||||
}
|
||||
|
||||
int migration_stop_vm(RunState state)
|
||||
static int migration_stop_vm(MigrationState *s, RunState state)
|
||||
{
|
||||
int ret = vm_stop_force_state(state);
|
||||
int ret;
|
||||
|
||||
migration_downtime_start(s);
|
||||
|
||||
s->vm_old_state = runstate_get();
|
||||
global_state_store();
|
||||
|
||||
ret = vm_stop_force_state(state);
|
||||
|
||||
trace_vmstate_downtime_checkpoint("src-vm-stopped");
|
||||
trace_migration_completion_vm_stop(ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -1319,6 +1333,8 @@ void migrate_set_state(int *state, int old_state, int new_state)
|
||||
|
||||
static void migrate_fd_cleanup(MigrationState *s)
|
||||
{
|
||||
MigrationEventType type;
|
||||
|
||||
g_free(s->hostname);
|
||||
s->hostname = NULL;
|
||||
json_writer_free(s->vmdesc);
|
||||
@ -1326,6 +1342,8 @@ static void migrate_fd_cleanup(MigrationState *s)
|
||||
|
||||
qemu_savevm_state_cleanup();
|
||||
|
||||
close_return_path_on_source(s);
|
||||
|
||||
if (s->to_dst_file) {
|
||||
QEMUFile *tmp;
|
||||
|
||||
@ -1350,12 +1368,6 @@ static void migrate_fd_cleanup(MigrationState *s)
|
||||
qemu_fclose(tmp);
|
||||
}
|
||||
|
||||
/*
|
||||
* We already cleaned up to_dst_file, so errors from the return
|
||||
* path might be due to that, ignore them.
|
||||
*/
|
||||
close_return_path_on_source(s);
|
||||
|
||||
assert(!migration_is_active(s));
|
||||
|
||||
if (s->state == MIGRATION_STATUS_CANCELLING) {
|
||||
@ -1367,7 +1379,9 @@ static void migrate_fd_cleanup(MigrationState *s)
|
||||
/* It is used on info migrate. We can't free it */
|
||||
error_report_err(error_copy(s->error));
|
||||
}
|
||||
migration_call_notifiers(s);
|
||||
type = migration_has_failed(s) ? MIG_EVENT_PRECOPY_FAILED :
|
||||
MIG_EVENT_PRECOPY_DONE;
|
||||
migration_call_notifiers(s, type, NULL);
|
||||
block_cleanup_parameters();
|
||||
yank_unregister_instance(MIGRATION_YANK_INSTANCE);
|
||||
}
|
||||
@ -1459,24 +1473,39 @@ static void migrate_fd_cancel(MigrationState *s)
|
||||
}
|
||||
}
|
||||
|
||||
void migration_add_notifier(Notifier *notify,
|
||||
void (*func)(Notifier *notifier, void *data))
|
||||
void migration_add_notifier_mode(NotifierWithReturn *notify,
|
||||
MigrationNotifyFunc func, MigMode mode)
|
||||
{
|
||||
notify->notify = func;
|
||||
notifier_list_add(&migration_state_notifiers, notify);
|
||||
notify->notify = (NotifierWithReturnFunc)func;
|
||||
notifier_with_return_list_add(&migration_state_notifiers[mode], notify);
|
||||
}
|
||||
|
||||
void migration_remove_notifier(Notifier *notify)
|
||||
void migration_add_notifier(NotifierWithReturn *notify,
|
||||
MigrationNotifyFunc func)
|
||||
{
|
||||
migration_add_notifier_mode(notify, func, MIG_MODE_NORMAL);
|
||||
}
|
||||
|
||||
void migration_remove_notifier(NotifierWithReturn *notify)
|
||||
{
|
||||
if (notify->notify) {
|
||||
notifier_remove(notify);
|
||||
notifier_with_return_remove(notify);
|
||||
notify->notify = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
void migration_call_notifiers(MigrationState *s)
|
||||
int migration_call_notifiers(MigrationState *s, MigrationEventType type,
|
||||
Error **errp)
|
||||
{
|
||||
notifier_list_notify(&migration_state_notifiers, s);
|
||||
MigMode mode = s->parameters.mode;
|
||||
MigrationEvent e;
|
||||
int ret;
|
||||
|
||||
e.type = type;
|
||||
ret = notifier_with_return_list_notify(&migration_state_notifiers[mode],
|
||||
&e, errp);
|
||||
assert(!ret || type == MIG_EVENT_PRECOPY_SETUP);
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool migration_in_setup(MigrationState *s)
|
||||
@ -1520,11 +1549,6 @@ bool migration_postcopy_is_alive(int state)
|
||||
}
|
||||
}
|
||||
|
||||
bool migration_in_postcopy_after_devices(MigrationState *s)
|
||||
{
|
||||
return migration_in_postcopy() && s->postcopy_after_devices;
|
||||
}
|
||||
|
||||
bool migration_in_incoming_postcopy(void)
|
||||
{
|
||||
PostcopyState ps = postcopy_state_get();
|
||||
@ -1583,6 +1607,11 @@ bool migration_is_active(MigrationState *s)
|
||||
s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE);
|
||||
}
|
||||
|
||||
bool migrate_mode_is_cpr(MigrationState *s)
|
||||
{
|
||||
return s->parameters.mode == MIG_MODE_CPR_REBOOT;
|
||||
}
|
||||
|
||||
int migrate_init(MigrationState *s, Error **errp)
|
||||
{
|
||||
int ret;
|
||||
@ -1606,7 +1635,6 @@ int migrate_init(MigrationState *s, Error **errp)
|
||||
s->expected_downtime = 0;
|
||||
s->setup_time = 0;
|
||||
s->start_postcopy = false;
|
||||
s->postcopy_after_devices = false;
|
||||
s->migration_thread_running = false;
|
||||
error_free(s->error);
|
||||
s->error = NULL;
|
||||
@ -1922,6 +1950,23 @@ static bool migrate_prepare(MigrationState *s, bool blk, bool blk_inc,
|
||||
return false;
|
||||
}
|
||||
|
||||
if (migrate_mode_is_cpr(s)) {
|
||||
const char *conflict = NULL;
|
||||
|
||||
if (migrate_postcopy()) {
|
||||
conflict = "postcopy";
|
||||
} else if (migrate_background_snapshot()) {
|
||||
conflict = "background snapshot";
|
||||
} else if (migrate_colo()) {
|
||||
conflict = "COLO";
|
||||
}
|
||||
|
||||
if (conflict) {
|
||||
error_setg(errp, "Cannot use %s with CPR", conflict);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
if (blk || blk_inc) {
|
||||
if (migrate_colo()) {
|
||||
error_setg(errp, "No disk migration is required in COLO mode");
|
||||
@ -2384,8 +2429,7 @@ static bool close_return_path_on_source(MigrationState *ms)
|
||||
* cause it to unblock if it's stuck waiting for the destination.
|
||||
*/
|
||||
WITH_QEMU_LOCK_GUARD(&ms->qemu_file_lock) {
|
||||
if (ms->to_dst_file && ms->rp_state.from_dst_file &&
|
||||
qemu_file_get_error(ms->to_dst_file)) {
|
||||
if (migrate_has_error(ms) && ms->rp_state.from_dst_file) {
|
||||
qemu_file_shutdown(ms->rp_state.from_dst_file);
|
||||
}
|
||||
}
|
||||
@ -2436,10 +2480,7 @@ static int postcopy_start(MigrationState *ms, Error **errp)
|
||||
bql_lock();
|
||||
trace_postcopy_start_set_run();
|
||||
|
||||
migration_downtime_start(ms);
|
||||
|
||||
global_state_store();
|
||||
ret = migration_stop_vm(RUN_STATE_FINISH_MIGRATE);
|
||||
ret = migration_stop_vm(ms, RUN_STATE_FINISH_MIGRATE);
|
||||
if (ret < 0) {
|
||||
goto fail;
|
||||
}
|
||||
@ -2536,8 +2577,7 @@ static int postcopy_start(MigrationState *ms, Error **errp)
|
||||
* at the transition to postcopy and after the device state; in particular
|
||||
* spice needs to trigger a transition now
|
||||
*/
|
||||
ms->postcopy_after_devices = true;
|
||||
migration_call_notifiers(ms);
|
||||
migration_call_notifiers(ms, MIG_EVENT_PRECOPY_DONE, NULL);
|
||||
|
||||
migration_downtime_end(ms);
|
||||
|
||||
@ -2557,11 +2597,10 @@ static int postcopy_start(MigrationState *ms, Error **errp)
|
||||
|
||||
ret = qemu_file_get_error(ms->to_dst_file);
|
||||
if (ret) {
|
||||
error_setg(errp, "postcopy_start: Migration stream errored");
|
||||
migrate_set_state(&ms->state, MIGRATION_STATUS_POSTCOPY_ACTIVE,
|
||||
MIGRATION_STATUS_FAILED);
|
||||
error_setg_errno(errp, -ret, "postcopy_start: Migration stream error");
|
||||
bql_lock();
|
||||
goto fail;
|
||||
}
|
||||
|
||||
trace_postcopy_preempt_enabled(migrate_postcopy_preempt());
|
||||
|
||||
return ret;
|
||||
@ -2582,6 +2621,7 @@ fail:
|
||||
error_report_err(local_err);
|
||||
}
|
||||
}
|
||||
migration_call_notifiers(ms, MIG_EVENT_PRECOPY_FAILED, NULL);
|
||||
bql_unlock();
|
||||
return -1;
|
||||
}
|
||||
@ -2635,16 +2675,13 @@ static int migration_completion_precopy(MigrationState *s,
|
||||
int ret;
|
||||
|
||||
bql_lock();
|
||||
migration_downtime_start(s);
|
||||
|
||||
s->vm_old_state = runstate_get();
|
||||
global_state_store();
|
||||
|
||||
ret = migration_stop_vm(RUN_STATE_FINISH_MIGRATE);
|
||||
trace_migration_completion_vm_stop(ret);
|
||||
if (!migrate_mode_is_cpr(s)) {
|
||||
ret = migration_stop_vm(s, RUN_STATE_FINISH_MIGRATE);
|
||||
if (ret < 0) {
|
||||
goto out_unlock;
|
||||
}
|
||||
}
|
||||
|
||||
ret = migration_maybe_pause(s, current_active_state,
|
||||
MIGRATION_STATUS_DEVICE);
|
||||
@ -2746,8 +2783,7 @@ static void migration_completion(MigrationState *s)
|
||||
migrate_set_state(&s->state, MIGRATION_STATUS_ACTIVE,
|
||||
MIGRATION_STATUS_COLO);
|
||||
} else {
|
||||
migrate_set_state(&s->state, current_active_state,
|
||||
MIGRATION_STATUS_COMPLETED);
|
||||
migration_completion_end(s);
|
||||
}
|
||||
|
||||
return;
|
||||
@ -2784,8 +2820,7 @@ static void bg_migration_completion(MigrationState *s)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
migrate_set_state(&s->state, current_active_state,
|
||||
MIGRATION_STATUS_COMPLETED);
|
||||
migration_completion_end(s);
|
||||
return;
|
||||
|
||||
fail:
|
||||
@ -2874,6 +2909,13 @@ static MigThrError postcopy_pause(MigrationState *s)
|
||||
while (true) {
|
||||
QEMUFile *file;
|
||||
|
||||
/*
|
||||
* We're already pausing, so ignore any errors on the return
|
||||
* path and just wait for the thread to finish. It will be
|
||||
* re-created when we resume.
|
||||
*/
|
||||
close_return_path_on_source(s);
|
||||
|
||||
/*
|
||||
* Current channel is possibly broken. Release it. Note that this is
|
||||
* guaranteed even without lock because to_dst_file should only be
|
||||
@ -2893,13 +2935,6 @@ static MigThrError postcopy_pause(MigrationState *s)
|
||||
qemu_file_shutdown(file);
|
||||
qemu_fclose(file);
|
||||
|
||||
/*
|
||||
* We're already pausing, so ignore any errors on the return
|
||||
* path and just wait for the thread to finish. It will be
|
||||
* re-created when we resume.
|
||||
*/
|
||||
close_return_path_on_source(s);
|
||||
|
||||
migrate_set_state(&s->state, s->state,
|
||||
MIGRATION_STATUS_POSTCOPY_PAUSED);
|
||||
|
||||
@ -2987,18 +3022,28 @@ static MigThrError migration_detect_error(MigrationState *s)
|
||||
}
|
||||
}
|
||||
|
||||
static void migration_calculate_complete(MigrationState *s)
|
||||
static void migration_completion_end(MigrationState *s)
|
||||
{
|
||||
uint64_t bytes = migration_transferred_bytes();
|
||||
int64_t end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
|
||||
int64_t transfer_time;
|
||||
|
||||
/*
|
||||
* Take the BQL here so that query-migrate on the QMP thread sees:
|
||||
* - atomic update of s->total_time and s->mbps;
|
||||
* - correct ordering of s->mbps update vs. s->state;
|
||||
*/
|
||||
bql_lock();
|
||||
migration_downtime_end(s);
|
||||
s->total_time = end_time - s->start_time;
|
||||
transfer_time = s->total_time - s->setup_time;
|
||||
if (transfer_time) {
|
||||
s->mbps = ((double) bytes * 8.0) / transfer_time / 1000;
|
||||
}
|
||||
|
||||
migrate_set_state(&s->state, s->state,
|
||||
MIGRATION_STATUS_COMPLETED);
|
||||
bql_unlock();
|
||||
}
|
||||
|
||||
static void update_iteration_initial_status(MigrationState *s)
|
||||
@ -3145,7 +3190,6 @@ static void migration_iteration_finish(MigrationState *s)
|
||||
bql_lock();
|
||||
switch (s->state) {
|
||||
case MIGRATION_STATUS_COMPLETED:
|
||||
migration_calculate_complete(s);
|
||||
runstate_set(RUN_STATE_POSTMIGRATE);
|
||||
break;
|
||||
case MIGRATION_STATUS_COLO:
|
||||
@ -3189,9 +3233,6 @@ static void bg_migration_iteration_finish(MigrationState *s)
|
||||
bql_lock();
|
||||
switch (s->state) {
|
||||
case MIGRATION_STATUS_COMPLETED:
|
||||
migration_calculate_complete(s);
|
||||
break;
|
||||
|
||||
case MIGRATION_STATUS_ACTIVE:
|
||||
case MIGRATION_STATUS_FAILED:
|
||||
case MIGRATION_STATUS_CANCELLED:
|
||||
@ -3483,15 +3524,10 @@ static void *bg_migration_thread(void *opaque)
|
||||
s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start;
|
||||
|
||||
trace_migration_thread_setup_complete();
|
||||
migration_downtime_start(s);
|
||||
|
||||
bql_lock();
|
||||
|
||||
s->vm_old_state = runstate_get();
|
||||
|
||||
global_state_store();
|
||||
/* Forcibly stop VM before saving state of vCPUs and devices */
|
||||
if (migration_stop_vm(RUN_STATE_PAUSED)) {
|
||||
if (migration_stop_vm(s, RUN_STATE_PAUSED)) {
|
||||
goto fail;
|
||||
}
|
||||
/*
|
||||
@ -3567,6 +3603,7 @@ void migrate_fd_connect(MigrationState *s, Error *error_in)
|
||||
Error *local_err = NULL;
|
||||
uint64_t rate_limit;
|
||||
bool resume = s->state == MIGRATION_STATUS_POSTCOPY_PAUSED;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* If there's a previous error, free it and prepare for another one.
|
||||
@ -3601,7 +3638,9 @@ void migrate_fd_connect(MigrationState *s, Error *error_in)
|
||||
rate_limit = migrate_max_bandwidth();
|
||||
|
||||
/* Notify before starting migration thread */
|
||||
migration_call_notifiers(s);
|
||||
if (migration_call_notifiers(s, MIG_EVENT_PRECOPY_SETUP, &local_err)) {
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
|
||||
migration_rate_set(rate_limit);
|
||||
@ -3615,11 +3654,7 @@ void migrate_fd_connect(MigrationState *s, Error *error_in)
|
||||
if (migrate_postcopy_ram() || migrate_return_path()) {
|
||||
if (open_return_path_on_source(s)) {
|
||||
error_setg(&local_err, "Unable to open return-path for postcopy");
|
||||
migrate_set_state(&s->state, s->state, MIGRATION_STATUS_FAILED);
|
||||
migrate_set_error(s, local_err);
|
||||
error_report_err(local_err);
|
||||
migrate_fd_cleanup(s);
|
||||
return;
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
|
||||
@ -3640,6 +3675,14 @@ void migrate_fd_connect(MigrationState *s, Error *error_in)
|
||||
return;
|
||||
}
|
||||
|
||||
if (migrate_mode_is_cpr(s)) {
|
||||
ret = migration_stop_vm(s, RUN_STATE_FINISH_MIGRATE);
|
||||
if (ret < 0) {
|
||||
error_setg(&local_err, "migration_stop_vm failed, error %d", -ret);
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
|
||||
if (migrate_background_snapshot()) {
|
||||
qemu_thread_create(&s->thread, "bg_snapshot",
|
||||
bg_migration_thread, s, QEMU_THREAD_JOINABLE);
|
||||
@ -3648,6 +3691,13 @@ void migrate_fd_connect(MigrationState *s, Error *error_in)
|
||||
migration_thread, s, QEMU_THREAD_JOINABLE);
|
||||
}
|
||||
s->migration_thread_running = true;
|
||||
return;
|
||||
|
||||
fail:
|
||||
migrate_set_error(s, local_err);
|
||||
migrate_set_state(&s->state, s->state, MIGRATION_STATUS_FAILED);
|
||||
error_report_err(local_err);
|
||||
migrate_fd_cleanup(s);
|
||||
}
|
||||
|
||||
static void migration_class_init(ObjectClass *klass, void *data)
|
||||
|
@ -348,8 +348,6 @@ struct MigrationState {
|
||||
|
||||
/* Flag set once the migration has been asked to enter postcopy */
|
||||
bool start_postcopy;
|
||||
/* Flag set after postcopy has sent the device state */
|
||||
bool postcopy_after_devices;
|
||||
|
||||
/* Flag set once the migration thread is running (and needs joining) */
|
||||
bool migration_thread_running;
|
||||
@ -543,6 +541,4 @@ int migration_rp_wait(MigrationState *s);
|
||||
*/
|
||||
void migration_rp_kick(MigrationState *s);
|
||||
|
||||
int migration_stop_vm(RunState state);
|
||||
|
||||
#endif
|
||||
|
@ -79,6 +79,19 @@ struct {
|
||||
MultiFDMethods *ops;
|
||||
} *multifd_send_state;
|
||||
|
||||
struct {
|
||||
MultiFDRecvParams *params;
|
||||
/* number of created threads */
|
||||
int count;
|
||||
/* syncs main thread and channels */
|
||||
QemuSemaphore sem_sync;
|
||||
/* global number of generated multifd packets */
|
||||
uint64_t packet_num;
|
||||
int exiting;
|
||||
/* multifd ops */
|
||||
MultiFDMethods *ops;
|
||||
} *multifd_recv_state;
|
||||
|
||||
/* Multifd without compression */
|
||||
|
||||
/**
|
||||
@ -440,6 +453,11 @@ static bool multifd_send_should_exit(void)
|
||||
return qatomic_read(&multifd_send_state->exiting);
|
||||
}
|
||||
|
||||
static bool multifd_recv_should_exit(void)
|
||||
{
|
||||
return qatomic_read(&multifd_recv_state->exiting);
|
||||
}
|
||||
|
||||
/*
|
||||
* The migration thread can wait on either of the two semaphores. This
|
||||
* function can be used to kick the main thread out of waiting on either of
|
||||
@ -641,18 +659,13 @@ static void multifd_send_terminate_threads(void)
|
||||
}
|
||||
}
|
||||
|
||||
static int multifd_send_channel_destroy(QIOChannel *send)
|
||||
{
|
||||
return socket_send_channel_destroy(send);
|
||||
}
|
||||
|
||||
static bool multifd_send_cleanup_channel(MultiFDSendParams *p, Error **errp)
|
||||
{
|
||||
if (p->registered_yank) {
|
||||
if (p->c) {
|
||||
migration_ioc_unregister_yank(p->c);
|
||||
}
|
||||
multifd_send_channel_destroy(p->c);
|
||||
object_unref(OBJECT(p->c));
|
||||
p->c = NULL;
|
||||
}
|
||||
qemu_sem_destroy(&p->sem);
|
||||
qemu_sem_destroy(&p->sem_sync);
|
||||
g_free(p->name);
|
||||
@ -671,6 +684,7 @@ static bool multifd_send_cleanup_channel(MultiFDSendParams *p, Error **errp)
|
||||
|
||||
static void multifd_send_cleanup_state(void)
|
||||
{
|
||||
socket_cleanup_outgoing_migration();
|
||||
qemu_sem_destroy(&multifd_send_state->channels_created);
|
||||
qemu_sem_destroy(&multifd_send_state->channels_ready);
|
||||
g_free(multifd_send_state->params);
|
||||
@ -873,16 +887,22 @@ out:
|
||||
|
||||
static void multifd_new_send_channel_async(QIOTask *task, gpointer opaque);
|
||||
|
||||
typedef struct {
|
||||
MultiFDSendParams *p;
|
||||
QIOChannelTLS *tioc;
|
||||
} MultiFDTLSThreadArgs;
|
||||
|
||||
static void *multifd_tls_handshake_thread(void *opaque)
|
||||
{
|
||||
MultiFDSendParams *p = opaque;
|
||||
QIOChannelTLS *tioc = QIO_CHANNEL_TLS(p->c);
|
||||
MultiFDTLSThreadArgs *args = opaque;
|
||||
|
||||
qio_channel_tls_handshake(tioc,
|
||||
qio_channel_tls_handshake(args->tioc,
|
||||
multifd_new_send_channel_async,
|
||||
p,
|
||||
args->p,
|
||||
NULL,
|
||||
NULL);
|
||||
g_free(args);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -892,6 +912,7 @@ static bool multifd_tls_channel_connect(MultiFDSendParams *p,
|
||||
{
|
||||
MigrationState *s = migrate_get_current();
|
||||
const char *hostname = s->hostname;
|
||||
MultiFDTLSThreadArgs *args;
|
||||
QIOChannelTLS *tioc;
|
||||
|
||||
tioc = migration_tls_client_create(ioc, hostname, errp);
|
||||
@ -906,29 +927,29 @@ static bool multifd_tls_channel_connect(MultiFDSendParams *p,
|
||||
object_unref(OBJECT(ioc));
|
||||
trace_multifd_tls_outgoing_handshake_start(ioc, tioc, hostname);
|
||||
qio_channel_set_name(QIO_CHANNEL(tioc), "multifd-tls-outgoing");
|
||||
p->c = QIO_CHANNEL(tioc);
|
||||
|
||||
args = g_new0(MultiFDTLSThreadArgs, 1);
|
||||
args->tioc = tioc;
|
||||
args->p = p;
|
||||
|
||||
p->tls_thread_created = true;
|
||||
qemu_thread_create(&p->tls_thread, "multifd-tls-handshake-worker",
|
||||
multifd_tls_handshake_thread, p,
|
||||
multifd_tls_handshake_thread, args,
|
||||
QEMU_THREAD_JOINABLE);
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool multifd_channel_connect(MultiFDSendParams *p,
|
||||
QIOChannel *ioc,
|
||||
Error **errp)
|
||||
static void multifd_channel_connect(MultiFDSendParams *p, QIOChannel *ioc)
|
||||
{
|
||||
qio_channel_set_delay(ioc, false);
|
||||
|
||||
migration_ioc_register_yank(ioc);
|
||||
p->registered_yank = true;
|
||||
/* Setup p->c only if the channel is completely setup */
|
||||
p->c = ioc;
|
||||
|
||||
p->thread_created = true;
|
||||
qemu_thread_create(&p->thread, p->name, multifd_send_thread, p,
|
||||
QEMU_THREAD_JOINABLE);
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -960,7 +981,8 @@ static void multifd_new_send_channel_async(QIOTask *task, gpointer opaque)
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
ret = multifd_channel_connect(p, ioc, &local_err);
|
||||
multifd_channel_connect(p, ioc);
|
||||
ret = true;
|
||||
}
|
||||
|
||||
out:
|
||||
@ -976,14 +998,12 @@ out:
|
||||
|
||||
trace_multifd_new_send_channel_async_error(p->id, local_err);
|
||||
multifd_send_set_error(local_err);
|
||||
if (!p->c) {
|
||||
/*
|
||||
* If no channel has been created, drop the initial
|
||||
* reference. Otherwise cleanup happens at
|
||||
* multifd_send_channel_destroy()
|
||||
* For error cases (TLS or non-TLS), IO channel is always freed here
|
||||
* rather than when cleanup multifd: since p->c is not set, multifd
|
||||
* cleanup code doesn't even know its existence.
|
||||
*/
|
||||
object_unref(OBJECT(ioc));
|
||||
}
|
||||
error_free(local_err);
|
||||
}
|
||||
|
||||
@ -1063,24 +1083,16 @@ bool multifd_send_setup(void)
|
||||
return true;
|
||||
}
|
||||
|
||||
struct {
|
||||
MultiFDRecvParams *params;
|
||||
/* number of created threads */
|
||||
int count;
|
||||
/* syncs main thread and channels */
|
||||
QemuSemaphore sem_sync;
|
||||
/* global number of generated multifd packets */
|
||||
uint64_t packet_num;
|
||||
/* multifd ops */
|
||||
MultiFDMethods *ops;
|
||||
} *multifd_recv_state;
|
||||
|
||||
static void multifd_recv_terminate_threads(Error *err)
|
||||
{
|
||||
int i;
|
||||
|
||||
trace_multifd_recv_terminate_threads(err != NULL);
|
||||
|
||||
if (qatomic_xchg(&multifd_recv_state->exiting, 1)) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (err) {
|
||||
MigrationState *s = migrate_get_current();
|
||||
migrate_set_error(s, err);
|
||||
@ -1094,8 +1106,12 @@ static void multifd_recv_terminate_threads(Error *err)
|
||||
for (i = 0; i < migrate_multifd_channels(); i++) {
|
||||
MultiFDRecvParams *p = &multifd_recv_state->params[i];
|
||||
|
||||
qemu_mutex_lock(&p->mutex);
|
||||
p->quit = true;
|
||||
/*
|
||||
* multifd_recv_thread may hung at MULTIFD_FLAG_SYNC handle code,
|
||||
* however try to wakeup it without harm in cleanup phase.
|
||||
*/
|
||||
qemu_sem_post(&p->sem_sync);
|
||||
|
||||
/*
|
||||
* We could arrive here for two reasons:
|
||||
* - normal quit, i.e. everything went fine, just finished
|
||||
@ -1105,7 +1121,6 @@ static void multifd_recv_terminate_threads(Error *err)
|
||||
if (p->c) {
|
||||
qio_channel_shutdown(p->c, QIO_CHANNEL_SHUTDOWN_BOTH, NULL);
|
||||
}
|
||||
qemu_mutex_unlock(&p->mutex);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1155,12 +1170,6 @@ void multifd_recv_cleanup(void)
|
||||
for (i = 0; i < migrate_multifd_channels(); i++) {
|
||||
MultiFDRecvParams *p = &multifd_recv_state->params[i];
|
||||
|
||||
/*
|
||||
* multifd_recv_thread may hung at MULTIFD_FLAG_SYNC handle code,
|
||||
* however try to wakeup it without harm in cleanup phase.
|
||||
*/
|
||||
qemu_sem_post(&p->sem_sync);
|
||||
|
||||
if (p->thread_created) {
|
||||
qemu_thread_join(&p->thread);
|
||||
}
|
||||
@ -1210,7 +1219,7 @@ static void *multifd_recv_thread(void *opaque)
|
||||
while (true) {
|
||||
uint32_t flags;
|
||||
|
||||
if (p->quit) {
|
||||
if (multifd_recv_should_exit()) {
|
||||
break;
|
||||
}
|
||||
|
||||
@ -1274,6 +1283,7 @@ int multifd_recv_setup(Error **errp)
|
||||
multifd_recv_state = g_malloc0(sizeof(*multifd_recv_state));
|
||||
multifd_recv_state->params = g_new0(MultiFDRecvParams, thread_count);
|
||||
qatomic_set(&multifd_recv_state->count, 0);
|
||||
qatomic_set(&multifd_recv_state->exiting, 0);
|
||||
qemu_sem_init(&multifd_recv_state->sem_sync, 0);
|
||||
multifd_recv_state->ops = multifd_ops[migrate_multifd_compression()];
|
||||
|
||||
@ -1282,7 +1292,6 @@ int multifd_recv_setup(Error **errp)
|
||||
|
||||
qemu_mutex_init(&p->mutex);
|
||||
qemu_sem_init(&p->sem_sync, 0);
|
||||
p->quit = false;
|
||||
p->id = i;
|
||||
p->packet_len = sizeof(MultiFDPacket_t)
|
||||
+ sizeof(uint64_t) * page_count;
|
||||
|
@ -78,8 +78,6 @@ typedef struct {
|
||||
bool tls_thread_created;
|
||||
/* communication channel */
|
||||
QIOChannel *c;
|
||||
/* is the yank function registered */
|
||||
bool registered_yank;
|
||||
/* packet allocated len */
|
||||
uint32_t packet_len;
|
||||
/* guest page size */
|
||||
|
@ -77,10 +77,9 @@ int postcopy_notify(enum PostcopyNotifyReason reason, Error **errp)
|
||||
{
|
||||
struct PostcopyNotifyData pnd;
|
||||
pnd.reason = reason;
|
||||
pnd.errp = errp;
|
||||
|
||||
return notifier_with_return_list_notify(&postcopy_notifier_list,
|
||||
&pnd);
|
||||
&pnd, errp);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -128,7 +128,6 @@ enum PostcopyNotifyReason {
|
||||
|
||||
struct PostcopyNotifyData {
|
||||
enum PostcopyNotifyReason reason;
|
||||
Error **errp;
|
||||
};
|
||||
|
||||
void postcopy_add_notifier(NotifierWithReturn *nn);
|
||||
|
@ -426,9 +426,8 @@ int precopy_notify(PrecopyNotifyReason reason, Error **errp)
|
||||
{
|
||||
PrecopyNotifyData pnd;
|
||||
pnd.reason = reason;
|
||||
pnd.errp = errp;
|
||||
|
||||
return notifier_with_return_list_notify(&precopy_notifier_list, &pnd);
|
||||
return notifier_with_return_list_notify(&precopy_notifier_list, &pnd, errp);
|
||||
}
|
||||
|
||||
uint64_t ram_bytes_remaining(void)
|
||||
|
@ -60,17 +60,6 @@ QIOChannel *socket_send_channel_create_sync(Error **errp)
|
||||
return QIO_CHANNEL(sioc);
|
||||
}
|
||||
|
||||
int socket_send_channel_destroy(QIOChannel *send)
|
||||
{
|
||||
/* Remove channel */
|
||||
object_unref(OBJECT(send));
|
||||
if (outgoing_args.saddr) {
|
||||
qapi_free_SocketAddress(outgoing_args.saddr);
|
||||
outgoing_args.saddr = NULL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct SocketConnectData {
|
||||
MigrationState *s;
|
||||
char *hostname;
|
||||
@ -137,6 +126,14 @@ void socket_start_outgoing_migration(MigrationState *s,
|
||||
NULL);
|
||||
}
|
||||
|
||||
void socket_cleanup_outgoing_migration(void)
|
||||
{
|
||||
if (outgoing_args.saddr) {
|
||||
qapi_free_SocketAddress(outgoing_args.saddr);
|
||||
outgoing_args.saddr = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static void socket_accept_incoming_migration(QIONetListener *listener,
|
||||
QIOChannelSocket *cioc,
|
||||
gpointer opaque)
|
||||
|
@ -23,10 +23,11 @@
|
||||
|
||||
void socket_send_channel_create(QIOTaskFunc f, void *data);
|
||||
QIOChannel *socket_send_channel_create_sync(Error **errp);
|
||||
int socket_send_channel_destroy(QIOChannel *send);
|
||||
|
||||
void socket_start_incoming_migration(SocketAddress *saddr, Error **errp);
|
||||
|
||||
void socket_start_outgoing_migration(MigrationState *s,
|
||||
SocketAddress *saddr, Error **errp);
|
||||
void socket_cleanup_outgoing_migration(void);
|
||||
|
||||
#endif
|
||||
|
@ -34,7 +34,7 @@
|
||||
typedef struct VhostVDPAState {
|
||||
NetClientState nc;
|
||||
struct vhost_vdpa vhost_vdpa;
|
||||
Notifier migration_state;
|
||||
NotifierWithReturn migration_state;
|
||||
VHostNetState *vhost_net;
|
||||
|
||||
/* Control commands shadow buffers */
|
||||
@ -322,17 +322,17 @@ static void vhost_vdpa_net_log_global_enable(VhostVDPAState *s, bool enable)
|
||||
}
|
||||
}
|
||||
|
||||
static void vdpa_net_migration_state_notifier(Notifier *notifier, void *data)
|
||||
static int vdpa_net_migration_state_notifier(NotifierWithReturn *notifier,
|
||||
MigrationEvent *e, Error **errp)
|
||||
{
|
||||
MigrationState *migration = data;
|
||||
VhostVDPAState *s = container_of(notifier, VhostVDPAState,
|
||||
migration_state);
|
||||
VhostVDPAState *s = container_of(notifier, VhostVDPAState, migration_state);
|
||||
|
||||
if (migration_in_setup(migration)) {
|
||||
if (e->type == MIG_EVENT_PRECOPY_SETUP) {
|
||||
vhost_vdpa_net_log_global_enable(s, true);
|
||||
} else if (migration_has_failed(migration)) {
|
||||
} else if (e->type == MIG_EVENT_PRECOPY_FAILED) {
|
||||
vhost_vdpa_net_log_global_enable(s, false);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vhost_vdpa_net_data_start_first(VhostVDPAState *s)
|
||||
|
@ -636,18 +636,29 @@
|
||||
#
|
||||
# @normal: the original form of migration. (since 8.2)
|
||||
#
|
||||
# @cpr-reboot: The migrate command saves state to a file, allowing one to
|
||||
# quit qemu, reboot to an updated kernel, and restart an updated
|
||||
# version of qemu. The caller must specify a migration URI
|
||||
# that writes to and reads from a file. Unlike normal mode,
|
||||
# the use of certain local storage options does not block the
|
||||
# migration, but the caller must not modify guest block devices
|
||||
# between the quit and restart. To avoid saving guest RAM to the
|
||||
# file, the memory backend must be shared, and the @x-ignore-shared
|
||||
# migration capability must be set. Guest RAM must be non-volatile
|
||||
# across reboot, such as by backing it with a dax device, but this
|
||||
# is not enforced. The restarted qemu arguments must match those
|
||||
# used to initially start qemu, plus the -incoming option.
|
||||
# @cpr-reboot: The migrate command stops the VM and saves state to the URI.
|
||||
# After quitting qemu, the user resumes by running qemu -incoming.
|
||||
#
|
||||
# This mode allows the user to quit qemu, and restart an updated version
|
||||
# of qemu. The user may even update and reboot the OS before restarting,
|
||||
# as long as the URI persists across a reboot.
|
||||
#
|
||||
# Unlike normal mode, the use of certain local storage options does not
|
||||
# block the migration, but the user must not modify guest block devices
|
||||
# between the quit and restart.
|
||||
#
|
||||
# This mode supports vfio devices provided the user first puts the guest
|
||||
# in the suspended runstate, such as by issuing guest-suspend-ram to the
|
||||
# qemu guest agent.
|
||||
#
|
||||
# Best performance is achieved when the memory backend is shared and the
|
||||
# @x-ignore-shared migration capability is set, but this is not required.
|
||||
# Further, if the user reboots before restarting such a configuration, the
|
||||
# shared backend must be be non-volatile across reboot, such as by backing
|
||||
# it with a dax device.
|
||||
#
|
||||
# cpr-reboot may not be used with postcopy, colo, or background-snapshot.
|
||||
#
|
||||
# (since 8.2)
|
||||
##
|
||||
{ 'enum': 'MigMode',
|
||||
|
@ -2423,7 +2423,7 @@ static void test_migrate_fd_finish_hook(QTestState *from,
|
||||
qobject_unref(rsp);
|
||||
}
|
||||
|
||||
static void test_migrate_fd_proto(void)
|
||||
static void test_migrate_precopy_fd_socket(void)
|
||||
{
|
||||
MigrateCommon args = {
|
||||
.listen_uri = "defer",
|
||||
@ -2433,6 +2433,45 @@ static void test_migrate_fd_proto(void)
|
||||
};
|
||||
test_precopy_common(&args);
|
||||
}
|
||||
|
||||
static void *migrate_precopy_fd_file_start(QTestState *from, QTestState *to)
|
||||
{
|
||||
g_autofree char *file = g_strdup_printf("%s/%s", tmpfs, FILE_TEST_FILENAME);
|
||||
int src_flags = O_CREAT | O_RDWR;
|
||||
int dst_flags = O_CREAT | O_RDWR;
|
||||
int fds[2];
|
||||
|
||||
fds[0] = open(file, src_flags, 0660);
|
||||
assert(fds[0] != -1);
|
||||
|
||||
fds[1] = open(file, dst_flags, 0660);
|
||||
assert(fds[1] != -1);
|
||||
|
||||
|
||||
qtest_qmp_fds_assert_success(to, &fds[0], 1,
|
||||
"{ 'execute': 'getfd',"
|
||||
" 'arguments': { 'fdname': 'fd-mig' }}");
|
||||
|
||||
qtest_qmp_fds_assert_success(from, &fds[1], 1,
|
||||
"{ 'execute': 'getfd',"
|
||||
" 'arguments': { 'fdname': 'fd-mig' }}");
|
||||
|
||||
close(fds[0]);
|
||||
close(fds[1]);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void test_migrate_precopy_fd_file(void)
|
||||
{
|
||||
MigrateCommon args = {
|
||||
.listen_uri = "defer",
|
||||
.connect_uri = "fd:fd-mig",
|
||||
.start_hook = migrate_precopy_fd_file_start,
|
||||
.finish_hook = test_migrate_fd_finish_hook
|
||||
};
|
||||
test_file_common(&args, true);
|
||||
}
|
||||
#endif /* _WIN32 */
|
||||
|
||||
static void do_test_validate_uuid(MigrateStart *args, bool should_fail)
|
||||
@ -3527,7 +3566,10 @@ int main(int argc, char **argv)
|
||||
|
||||
/* migration_test_add("/migration/ignore_shared", test_ignore_shared); */
|
||||
#ifndef _WIN32
|
||||
migration_test_add("/migration/fd_proto", test_migrate_fd_proto);
|
||||
migration_test_add("/migration/precopy/fd/tcp",
|
||||
test_migrate_precopy_fd_socket);
|
||||
migration_test_add("/migration/precopy/fd/file",
|
||||
test_migrate_precopy_fd_file);
|
||||
#endif
|
||||
migration_test_add("/migration/validate_uuid", test_validate_uuid);
|
||||
migration_test_add("/migration/validate_uuid_error",
|
||||
|
@ -42,7 +42,7 @@
|
||||
/* core bits */
|
||||
|
||||
static SpiceServer *spice_server;
|
||||
static Notifier migration_state;
|
||||
static NotifierWithReturn migration_state;
|
||||
static const char *auth = "spice";
|
||||
static char *auth_passwd;
|
||||
static time_t auth_expires = TIME_MAX;
|
||||
@ -568,24 +568,23 @@ static SpiceInfo *qmp_query_spice_real(Error **errp)
|
||||
return info;
|
||||
}
|
||||
|
||||
static void migration_state_notifier(Notifier *notifier, void *data)
|
||||
static int migration_state_notifier(NotifierWithReturn *notifier,
|
||||
MigrationEvent *e, Error **errp)
|
||||
{
|
||||
MigrationState *s = data;
|
||||
|
||||
if (!spice_have_target_host) {
|
||||
return;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (migration_in_setup(s)) {
|
||||
if (e->type == MIG_EVENT_PRECOPY_SETUP) {
|
||||
spice_server_migrate_start(spice_server);
|
||||
} else if (migration_has_finished(s) ||
|
||||
migration_in_postcopy_after_devices(s)) {
|
||||
} else if (e->type == MIG_EVENT_PRECOPY_DONE) {
|
||||
spice_server_migrate_end(spice_server, true);
|
||||
spice_have_target_host = false;
|
||||
} else if (migration_has_failed(s)) {
|
||||
} else if (e->type == MIG_EVENT_PRECOPY_FAILED) {
|
||||
spice_server_migrate_end(spice_server, false);
|
||||
spice_have_target_host = false;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int qemu_spice_migrate_info(const char *hostname, int port, int tls_port,
|
||||
|
@ -61,13 +61,14 @@ void notifier_with_return_remove(NotifierWithReturn *notifier)
|
||||
QLIST_REMOVE(notifier, node);
|
||||
}
|
||||
|
||||
int notifier_with_return_list_notify(NotifierWithReturnList *list, void *data)
|
||||
int notifier_with_return_list_notify(NotifierWithReturnList *list, void *data,
|
||||
Error **errp)
|
||||
{
|
||||
NotifierWithReturn *notifier, *next;
|
||||
int ret = 0;
|
||||
|
||||
QLIST_FOREACH_SAFE(notifier, &list->notifiers, node, next) {
|
||||
ret = notifier->notify(notifier, data);
|
||||
ret = notifier->notify(notifier, data, errp);
|
||||
if (ret != 0) {
|
||||
break;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user