migration: simplify blockers
Modify migrate_add_blocker and migrate_del_blocker to take an Error ** reason. This allows migration to own the Error object, so that if an error occurs in migrate_add_blocker, migration code can free the Error and clear the client handle, simplifying client code. It also simplifies the migrate_del_blocker call site. In addition, this is a pre-requisite for a proposed future patch that would add a mode argument to migration requests to support live update, and maintain a list of blockers for each mode. A blocker may apply to a single mode or to multiple modes, and passing Error** will allow one Error object to be registered for multiple modes. No functional change. Signed-off-by: Steve Sistare <steven.sistare@oracle.com> Tested-by: Michael Galaxy <mgalaxy@akamai.com> Reviewed-by: Michael Galaxy <mgalaxy@akamai.com> Reviewed-by: Peter Xu <peterx@redhat.com> Reviewed-by: Juan Quintela <quintela@redhat.com> Signed-off-by: Juan Quintela <quintela@redhat.com> Message-ID: <1697634216-84215-1-git-send-email-steven.sistare@oracle.com>
This commit is contained in:
parent
04131e0009
commit
c8a7fc5179
@ -534,11 +534,8 @@ static int tpm_emulator_block_migration(TPMEmulator *tpm_emu)
|
||||
error_setg(&tpm_emu->migration_blocker,
|
||||
"Migration disabled: TPM emulator does not support "
|
||||
"migration");
|
||||
if (migrate_add_blocker(tpm_emu->migration_blocker, &err) < 0) {
|
||||
if (migrate_add_blocker(&tpm_emu->migration_blocker, &err) < 0) {
|
||||
error_report_err(err);
|
||||
error_free(tpm_emu->migration_blocker);
|
||||
tpm_emu->migration_blocker = NULL;
|
||||
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
@ -1016,10 +1013,7 @@ static void tpm_emulator_inst_finalize(Object *obj)
|
||||
|
||||
qapi_free_TPMEmulatorOptions(tpm_emu->options);
|
||||
|
||||
if (tpm_emu->migration_blocker) {
|
||||
migrate_del_blocker(tpm_emu->migration_blocker);
|
||||
error_free(tpm_emu->migration_blocker);
|
||||
}
|
||||
migrate_del_blocker(&tpm_emu->migration_blocker);
|
||||
|
||||
tpm_sized_buffer_reset(&state_blobs->volatil);
|
||||
tpm_sized_buffer_reset(&state_blobs->permanent);
|
||||
|
@ -1369,9 +1369,8 @@ static int parallels_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
bdrv_get_device_or_node_name(bs));
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
|
||||
ret = migrate_add_blocker(s->migration_blocker, errp);
|
||||
ret = migrate_add_blocker(&s->migration_blocker, errp);
|
||||
if (ret < 0) {
|
||||
error_setg(errp, "Migration blocker error");
|
||||
goto fail;
|
||||
}
|
||||
qemu_co_mutex_init(&s->lock);
|
||||
@ -1406,7 +1405,7 @@ static int parallels_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
ret = bdrv_check(bs, &res, BDRV_FIX_ERRORS | BDRV_FIX_LEAKS);
|
||||
if (ret < 0) {
|
||||
error_setg_errno(errp, -ret, "Could not repair corrupted image");
|
||||
migrate_del_blocker(s->migration_blocker);
|
||||
migrate_del_blocker(&s->migration_blocker);
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
@ -1423,7 +1422,6 @@ fail:
|
||||
*/
|
||||
parallels_free_used_bitmap(bs);
|
||||
|
||||
error_free(s->migration_blocker);
|
||||
g_free(s->bat_dirty_bmap);
|
||||
qemu_vfree(s->header);
|
||||
return ret;
|
||||
@ -1448,8 +1446,7 @@ static void parallels_close(BlockDriverState *bs)
|
||||
g_free(s->bat_dirty_bmap);
|
||||
qemu_vfree(s->header);
|
||||
|
||||
migrate_del_blocker(s->migration_blocker);
|
||||
error_free(s->migration_blocker);
|
||||
migrate_del_blocker(&s->migration_blocker);
|
||||
}
|
||||
|
||||
static bool parallels_is_support_dirty_bitmaps(BlockDriverState *bs)
|
||||
|
@ -307,9 +307,8 @@ static int qcow_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
bdrv_get_device_or_node_name(bs));
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
|
||||
ret = migrate_add_blocker(s->migration_blocker, errp);
|
||||
ret = migrate_add_blocker(&s->migration_blocker, errp);
|
||||
if (ret < 0) {
|
||||
error_free(s->migration_blocker);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
@ -802,8 +801,7 @@ static void qcow_close(BlockDriverState *bs)
|
||||
g_free(s->cluster_cache);
|
||||
g_free(s->cluster_data);
|
||||
|
||||
migrate_del_blocker(s->migration_blocker);
|
||||
error_free(s->migration_blocker);
|
||||
migrate_del_blocker(&s->migration_blocker);
|
||||
}
|
||||
|
||||
static int coroutine_fn GRAPH_UNLOCKED
|
||||
|
@ -498,9 +498,8 @@ static int vdi_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
bdrv_get_device_or_node_name(bs));
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
|
||||
ret = migrate_add_blocker(s->migration_blocker, errp);
|
||||
ret = migrate_add_blocker(&s->migration_blocker, errp);
|
||||
if (ret < 0) {
|
||||
error_free(s->migration_blocker);
|
||||
goto fail_free_bmap;
|
||||
}
|
||||
|
||||
@ -988,8 +987,7 @@ static void vdi_close(BlockDriverState *bs)
|
||||
|
||||
qemu_vfree(s->bmap);
|
||||
|
||||
migrate_del_blocker(s->migration_blocker);
|
||||
error_free(s->migration_blocker);
|
||||
migrate_del_blocker(&s->migration_blocker);
|
||||
}
|
||||
|
||||
static int vdi_has_zero_init(BlockDriverState *bs)
|
||||
|
@ -985,8 +985,7 @@ static void vhdx_close(BlockDriverState *bs)
|
||||
s->bat = NULL;
|
||||
qemu_vfree(s->parent_entries);
|
||||
s->parent_entries = NULL;
|
||||
migrate_del_blocker(s->migration_blocker);
|
||||
error_free(s->migration_blocker);
|
||||
migrate_del_blocker(&s->migration_blocker);
|
||||
qemu_vfree(s->log.hdr);
|
||||
s->log.hdr = NULL;
|
||||
vhdx_region_unregister_all(s);
|
||||
@ -1097,9 +1096,8 @@ static int vhdx_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
error_setg(&s->migration_blocker, "The vhdx format used by node '%s' "
|
||||
"does not support live migration",
|
||||
bdrv_get_device_or_node_name(bs));
|
||||
ret = migrate_add_blocker(s->migration_blocker, errp);
|
||||
ret = migrate_add_blocker(&s->migration_blocker, errp);
|
||||
if (ret < 0) {
|
||||
error_free(s->migration_blocker);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
|
@ -1386,9 +1386,8 @@ static int vmdk_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
error_setg(&s->migration_blocker, "The vmdk format used by node '%s' "
|
||||
"does not support live migration",
|
||||
bdrv_get_device_or_node_name(bs));
|
||||
ret = migrate_add_blocker(s->migration_blocker, errp);
|
||||
ret = migrate_add_blocker(&s->migration_blocker, errp);
|
||||
if (ret < 0) {
|
||||
error_free(s->migration_blocker);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
@ -2867,8 +2866,7 @@ static void vmdk_close(BlockDriverState *bs)
|
||||
vmdk_free_extents(bs);
|
||||
g_free(s->create_type);
|
||||
|
||||
migrate_del_blocker(s->migration_blocker);
|
||||
error_free(s->migration_blocker);
|
||||
migrate_del_blocker(&s->migration_blocker);
|
||||
}
|
||||
|
||||
static int64_t coroutine_fn GRAPH_RDLOCK
|
||||
|
@ -452,9 +452,8 @@ static int vpc_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
bdrv_get_device_or_node_name(bs));
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
|
||||
ret = migrate_add_blocker(s->migration_blocker, errp);
|
||||
ret = migrate_add_blocker(&s->migration_blocker, errp);
|
||||
if (ret < 0) {
|
||||
error_free(s->migration_blocker);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
@ -1190,8 +1189,7 @@ static void vpc_close(BlockDriverState *bs)
|
||||
g_free(s->pageentry_u8);
|
||||
#endif
|
||||
|
||||
migrate_del_blocker(s->migration_blocker);
|
||||
error_free(s->migration_blocker);
|
||||
migrate_del_blocker(&s->migration_blocker);
|
||||
}
|
||||
|
||||
static QemuOptsList vpc_create_opts = {
|
||||
|
@ -1268,9 +1268,8 @@ static int vvfat_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
"The vvfat (rw) format used by node '%s' "
|
||||
"does not support live migration",
|
||||
bdrv_get_device_or_node_name(bs));
|
||||
ret = migrate_add_blocker(s->migration_blocker, errp);
|
||||
ret = migrate_add_blocker(&s->migration_blocker, errp);
|
||||
if (ret < 0) {
|
||||
error_free(s->migration_blocker);
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
@ -3239,8 +3238,7 @@ static void vvfat_close(BlockDriverState *bs)
|
||||
g_free(s->cluster_buffer);
|
||||
|
||||
if (s->qcow) {
|
||||
migrate_del_blocker(s->migration_blocker);
|
||||
error_free(s->migration_blocker);
|
||||
migrate_del_blocker(&s->migration_blocker);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -111,7 +111,7 @@ static int dump_cleanup(DumpState *s)
|
||||
qemu_mutex_unlock_iothread();
|
||||
}
|
||||
}
|
||||
migrate_del_blocker(dump_migration_blocker);
|
||||
migrate_del_blocker(&dump_migration_blocker);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -2158,7 +2158,7 @@ void qmp_dump_guest_memory(bool paging, const char *file,
|
||||
* Allows even for -only-migratable, but forbid migration during the
|
||||
* process of dump guest memory.
|
||||
*/
|
||||
if (migrate_add_blocker_internal(dump_migration_blocker, errp)) {
|
||||
if (migrate_add_blocker_internal(&dump_migration_blocker, errp)) {
|
||||
/* Remember to release the fd before passing it over to dump state */
|
||||
close(fd);
|
||||
return;
|
||||
|
10
hw/9pfs/9p.c
10
hw/9pfs/9p.c
@ -406,11 +406,7 @@ static int coroutine_fn put_fid(V9fsPDU *pdu, V9fsFidState *fidp)
|
||||
* delete the migration blocker. Ideally, this
|
||||
* should be hooked to transport close notification
|
||||
*/
|
||||
if (pdu->s->migration_blocker) {
|
||||
migrate_del_blocker(pdu->s->migration_blocker);
|
||||
error_free(pdu->s->migration_blocker);
|
||||
pdu->s->migration_blocker = NULL;
|
||||
}
|
||||
migrate_del_blocker(&pdu->s->migration_blocker);
|
||||
}
|
||||
return free_fid(pdu, fidp);
|
||||
}
|
||||
@ -1505,10 +1501,8 @@ static void coroutine_fn v9fs_attach(void *opaque)
|
||||
error_setg(&s->migration_blocker,
|
||||
"Migration is disabled when VirtFS export path '%s' is mounted in the guest using mount_tag '%s'",
|
||||
s->ctx.fs_root ? s->ctx.fs_root : "NULL", s->tag);
|
||||
err = migrate_add_blocker(s->migration_blocker, NULL);
|
||||
err = migrate_add_blocker(&s->migration_blocker, NULL);
|
||||
if (err < 0) {
|
||||
error_free(s->migration_blocker);
|
||||
s->migration_blocker = NULL;
|
||||
clunk_fid(s, fid);
|
||||
goto out;
|
||||
}
|
||||
|
@ -184,8 +184,7 @@ virtio_gpu_base_device_realize(DeviceState *qdev,
|
||||
|
||||
if (virtio_gpu_virgl_enabled(g->conf)) {
|
||||
error_setg(&g->migration_blocker, "virgl is not yet migratable");
|
||||
if (migrate_add_blocker(g->migration_blocker, errp) < 0) {
|
||||
error_free(g->migration_blocker);
|
||||
if (migrate_add_blocker(&g->migration_blocker, errp) < 0) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@ -253,10 +252,7 @@ virtio_gpu_base_device_unrealize(DeviceState *qdev)
|
||||
{
|
||||
VirtIOGPUBase *g = VIRTIO_GPU_BASE(qdev);
|
||||
|
||||
if (g->migration_blocker) {
|
||||
migrate_del_blocker(g->migration_blocker);
|
||||
error_free(g->migration_blocker);
|
||||
}
|
||||
migrate_del_blocker(&g->migration_blocker);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -516,8 +516,7 @@ static void kvm_arm_gic_realize(DeviceState *dev, Error **errp)
|
||||
if (!kvm_arm_gic_can_save_restore(s)) {
|
||||
error_setg(&s->migration_blocker, "This operating system kernel does "
|
||||
"not support vGICv2 migration");
|
||||
if (migrate_add_blocker(s->migration_blocker, errp) < 0) {
|
||||
error_free(s->migration_blocker);
|
||||
if (migrate_add_blocker(&s->migration_blocker, errp) < 0) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
@ -114,8 +114,7 @@ static void kvm_arm_its_realize(DeviceState *dev, Error **errp)
|
||||
GITS_CTLR)) {
|
||||
error_setg(&s->migration_blocker, "This operating system kernel "
|
||||
"does not support vITS migration");
|
||||
if (migrate_add_blocker(s->migration_blocker, errp) < 0) {
|
||||
error_free(s->migration_blocker);
|
||||
if (migrate_add_blocker(&s->migration_blocker, errp) < 0) {
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
|
@ -878,8 +878,7 @@ static void kvm_arm_gicv3_realize(DeviceState *dev, Error **errp)
|
||||
GICD_CTLR)) {
|
||||
error_setg(&s->migration_blocker, "This operating system kernel does "
|
||||
"not support vGICv3 migration");
|
||||
if (migrate_add_blocker(s->migration_blocker, errp) < 0) {
|
||||
error_free(s->migration_blocker);
|
||||
if (migrate_add_blocker(&s->migration_blocker, errp) < 0) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
@ -903,8 +903,7 @@ static void ivshmem_common_realize(PCIDevice *dev, Error **errp)
|
||||
if (!ivshmem_is_master(s)) {
|
||||
error_setg(&s->migration_blocker,
|
||||
"Migration is disabled when using feature 'peer mode' in device 'ivshmem'");
|
||||
if (migrate_add_blocker(s->migration_blocker, errp) < 0) {
|
||||
error_free(s->migration_blocker);
|
||||
if (migrate_add_blocker(&s->migration_blocker, errp) < 0) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
@ -922,10 +921,7 @@ static void ivshmem_exit(PCIDevice *dev)
|
||||
IVShmemState *s = IVSHMEM_COMMON(dev);
|
||||
int i;
|
||||
|
||||
if (s->migration_blocker) {
|
||||
migrate_del_blocker(s->migration_blocker);
|
||||
error_free(s->migration_blocker);
|
||||
}
|
||||
migrate_del_blocker(&s->migration_blocker);
|
||||
|
||||
if (memory_region_is_mapped(s->ivshmem_bar2)) {
|
||||
if (!s->hostmem) {
|
||||
|
@ -63,7 +63,7 @@ static int kvmppc_svm_init(ConfidentialGuestSupport *cgs, Error **errp)
|
||||
/* add migration blocker */
|
||||
error_setg(&pef_mig_blocker, "PEF: Migration is not implemented");
|
||||
/* NB: This can fail if --only-migratable is used */
|
||||
migrate_add_blocker(pef_mig_blocker, &error_fatal);
|
||||
migrate_add_blocker(&pef_mig_blocker, &error_fatal);
|
||||
|
||||
cgs->ready = true;
|
||||
|
||||
|
@ -1761,7 +1761,7 @@ static void spapr_machine_reset(MachineState *machine, ShutdownCause reason)
|
||||
/* Signal all vCPUs waiting on this condition */
|
||||
qemu_cond_broadcast(&spapr->fwnmi_machine_check_interlock_cond);
|
||||
|
||||
migrate_del_blocker(spapr->fwnmi_migration_blocker);
|
||||
migrate_del_blocker(&spapr->fwnmi_migration_blocker);
|
||||
}
|
||||
|
||||
static void spapr_create_nvram(SpaprMachineState *spapr)
|
||||
@ -2937,13 +2937,6 @@ static void spapr_machine_init(MachineState *machine)
|
||||
spapr_create_lmb_dr_connectors(spapr);
|
||||
}
|
||||
|
||||
if (spapr_get_cap(spapr, SPAPR_CAP_FWNMI) == SPAPR_CAP_ON) {
|
||||
/* Create the error string for live migration blocker */
|
||||
error_setg(&spapr->fwnmi_migration_blocker,
|
||||
"A machine check is being handled during migration. The handler"
|
||||
"may run and log hardware error on the destination");
|
||||
}
|
||||
|
||||
if (mc->nvdimm_supported) {
|
||||
spapr_create_nvdimm_dr_connectors(spapr);
|
||||
}
|
||||
|
@ -920,7 +920,11 @@ void spapr_mce_req_event(PowerPCCPU *cpu, bool recovered)
|
||||
* fails when running with -only-migrate. A proper interface to
|
||||
* delay migration completion for a bit could avoid that.
|
||||
*/
|
||||
ret = migrate_add_blocker(spapr->fwnmi_migration_blocker, NULL);
|
||||
error_setg(&spapr->fwnmi_migration_blocker,
|
||||
"A machine check is being handled during migration. The handler"
|
||||
"may run and log hardware error on the destination");
|
||||
|
||||
ret = migrate_add_blocker(&spapr->fwnmi_migration_blocker, NULL);
|
||||
if (ret == -EBUSY) {
|
||||
warn_report("Received a fwnmi while migration was in progress");
|
||||
}
|
||||
|
@ -496,7 +496,7 @@ static void rtas_ibm_nmi_interlock(PowerPCCPU *cpu,
|
||||
spapr->fwnmi_machine_check_interlock = -1;
|
||||
qemu_cond_signal(&spapr->fwnmi_machine_check_interlock_cond);
|
||||
rtas_st(rets, 0, RTAS_OUT_SUCCESS);
|
||||
migrate_del_blocker(spapr->fwnmi_migration_blocker);
|
||||
migrate_del_blocker(&spapr->fwnmi_migration_blocker);
|
||||
}
|
||||
|
||||
static struct rtas_call {
|
||||
|
@ -107,8 +107,7 @@ static void pci_proxy_dev_realize(PCIDevice *device, Error **errp)
|
||||
|
||||
error_setg(&dev->migration_blocker, "%s does not support migration",
|
||||
TYPE_PCI_PROXY_DEV);
|
||||
if (migrate_add_blocker(dev->migration_blocker, errp) < 0) {
|
||||
error_free(dev->migration_blocker);
|
||||
if (migrate_add_blocker(&dev->migration_blocker, errp) < 0) {
|
||||
object_unref(dev->ioc);
|
||||
return;
|
||||
}
|
||||
@ -134,9 +133,7 @@ static void pci_proxy_dev_exit(PCIDevice *pdev)
|
||||
qio_channel_close(dev->ioc, NULL);
|
||||
}
|
||||
|
||||
migrate_del_blocker(dev->migration_blocker);
|
||||
|
||||
error_free(dev->migration_blocker);
|
||||
migrate_del_blocker(&dev->migration_blocker);
|
||||
|
||||
proxy_memory_listener_deconfigure(&dev->proxy_listener);
|
||||
|
||||
|
@ -332,8 +332,7 @@ static void s390_machine_unprotect(S390CcwMachineState *ms)
|
||||
s390_pv_vm_disable();
|
||||
}
|
||||
ms->pv = false;
|
||||
migrate_del_blocker(pv_mig_blocker);
|
||||
error_free_or_abort(&pv_mig_blocker);
|
||||
migrate_del_blocker(&pv_mig_blocker);
|
||||
ram_block_discard_disable(false);
|
||||
}
|
||||
|
||||
@ -356,11 +355,10 @@ static int s390_machine_protect(S390CcwMachineState *ms)
|
||||
|
||||
error_setg(&pv_mig_blocker,
|
||||
"protected VMs are currently not migratable.");
|
||||
rc = migrate_add_blocker(pv_mig_blocker, &local_err);
|
||||
rc = migrate_add_blocker(&pv_mig_blocker, &local_err);
|
||||
if (rc) {
|
||||
ram_block_discard_disable(false);
|
||||
error_report_err(local_err);
|
||||
error_free_or_abort(&pv_mig_blocker);
|
||||
return rc;
|
||||
}
|
||||
|
||||
@ -368,8 +366,7 @@ static int s390_machine_protect(S390CcwMachineState *ms)
|
||||
rc = s390_pv_vm_enable();
|
||||
if (rc) {
|
||||
ram_block_discard_disable(false);
|
||||
migrate_del_blocker(pv_mig_blocker);
|
||||
error_free_or_abort(&pv_mig_blocker);
|
||||
migrate_del_blocker(&pv_mig_blocker);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -208,7 +208,7 @@ static void vhost_scsi_realize(DeviceState *dev, Error **errp)
|
||||
"When external environment supports it (Orchestrator migrates "
|
||||
"target SCSI device state or use shared storage over network), "
|
||||
"set 'migratable' property to true to enable migration.");
|
||||
if (migrate_add_blocker(vsc->migration_blocker, errp) < 0) {
|
||||
if (migrate_add_blocker(&vsc->migration_blocker, errp) < 0) {
|
||||
goto free_virtio;
|
||||
}
|
||||
}
|
||||
@ -241,10 +241,9 @@ static void vhost_scsi_realize(DeviceState *dev, Error **errp)
|
||||
free_vqs:
|
||||
g_free(vqs);
|
||||
if (!vsc->migratable) {
|
||||
migrate_del_blocker(vsc->migration_blocker);
|
||||
migrate_del_blocker(&vsc->migration_blocker);
|
||||
}
|
||||
free_virtio:
|
||||
error_free(vsc->migration_blocker);
|
||||
virtio_scsi_common_unrealize(dev);
|
||||
close_fd:
|
||||
if (vhostfd >= 0) {
|
||||
@ -260,8 +259,7 @@ static void vhost_scsi_unrealize(DeviceState *dev)
|
||||
struct vhost_virtqueue *vqs = vsc->dev.vqs;
|
||||
|
||||
if (!vsc->migratable) {
|
||||
migrate_del_blocker(vsc->migration_blocker);
|
||||
error_free(vsc->migration_blocker);
|
||||
migrate_del_blocker(&vsc->migration_blocker);
|
||||
}
|
||||
|
||||
/* This will stop vhost backend. */
|
||||
|
@ -129,11 +129,7 @@ int vfio_block_multiple_devices_migration(VFIODevice *vbasedev, Error **errp)
|
||||
error_setg(&multiple_devices_migration_blocker,
|
||||
"Multiple VFIO devices migration is supported only if all of "
|
||||
"them support P2P migration");
|
||||
ret = migrate_add_blocker(multiple_devices_migration_blocker, errp);
|
||||
if (ret < 0) {
|
||||
error_free(multiple_devices_migration_blocker);
|
||||
multiple_devices_migration_blocker = NULL;
|
||||
}
|
||||
ret = migrate_add_blocker(&multiple_devices_migration_blocker, errp);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -145,9 +141,7 @@ void vfio_unblock_multiple_devices_migration(void)
|
||||
return;
|
||||
}
|
||||
|
||||
migrate_del_blocker(multiple_devices_migration_blocker);
|
||||
error_free(multiple_devices_migration_blocker);
|
||||
multiple_devices_migration_blocker = NULL;
|
||||
migrate_del_blocker(&multiple_devices_migration_blocker);
|
||||
}
|
||||
|
||||
bool vfio_viommu_preset(VFIODevice *vbasedev)
|
||||
|
@ -891,8 +891,6 @@ static void vfio_migration_deinit(VFIODevice *vbasedev)
|
||||
|
||||
static int vfio_block_migration(VFIODevice *vbasedev, Error *err, Error **errp)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (vbasedev->enable_migration == ON_OFF_AUTO_ON) {
|
||||
error_propagate(errp, err);
|
||||
return -EINVAL;
|
||||
@ -901,13 +899,7 @@ static int vfio_block_migration(VFIODevice *vbasedev, Error *err, Error **errp)
|
||||
vbasedev->migration_blocker = error_copy(err);
|
||||
error_free(err);
|
||||
|
||||
ret = migrate_add_blocker(vbasedev->migration_blocker, errp);
|
||||
if (ret < 0) {
|
||||
error_free(vbasedev->migration_blocker);
|
||||
vbasedev->migration_blocker = NULL;
|
||||
}
|
||||
|
||||
return ret;
|
||||
return migrate_add_blocker(&vbasedev->migration_blocker, errp);
|
||||
}
|
||||
|
||||
/* ---------------------------------------------------------------------- */
|
||||
@ -994,9 +986,5 @@ void vfio_migration_exit(VFIODevice *vbasedev)
|
||||
vfio_migration_deinit(vbasedev);
|
||||
}
|
||||
|
||||
if (vbasedev->migration_blocker) {
|
||||
migrate_del_blocker(vbasedev->migration_blocker);
|
||||
error_free(vbasedev->migration_blocker);
|
||||
vbasedev->migration_blocker = NULL;
|
||||
}
|
||||
migrate_del_blocker(&vbasedev->migration_blocker);
|
||||
}
|
||||
|
@ -1527,9 +1527,8 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
|
||||
}
|
||||
|
||||
if (hdev->migration_blocker != NULL) {
|
||||
r = migrate_add_blocker(hdev->migration_blocker, errp);
|
||||
r = migrate_add_blocker(&hdev->migration_blocker, errp);
|
||||
if (r < 0) {
|
||||
error_free(hdev->migration_blocker);
|
||||
goto fail_busyloop;
|
||||
}
|
||||
}
|
||||
@ -1597,10 +1596,7 @@ void vhost_dev_cleanup(struct vhost_dev *hdev)
|
||||
memory_listener_unregister(&hdev->memory_listener);
|
||||
QLIST_REMOVE(hdev, entry);
|
||||
}
|
||||
if (hdev->migration_blocker) {
|
||||
migrate_del_blocker(hdev->migration_blocker);
|
||||
error_free(hdev->migration_blocker);
|
||||
}
|
||||
migrate_del_blocker(&hdev->migration_blocker);
|
||||
g_free(hdev->mem);
|
||||
g_free(hdev->mem_sections);
|
||||
if (hdev->vhost_ops) {
|
||||
|
@ -17,19 +17,23 @@
|
||||
/**
|
||||
* @migrate_add_blocker - prevent migration from proceeding
|
||||
*
|
||||
* @reason - an error to be returned whenever migration is attempted
|
||||
* @reasonp - address of an error to be returned whenever migration is attempted
|
||||
*
|
||||
* @errp - [out] The reason (if any) we cannot block migration right now.
|
||||
*
|
||||
* @returns - 0 on success, -EBUSY/-EACCES on failure, with errp set.
|
||||
*
|
||||
* *@reasonp is freed and set to NULL if failure is returned.
|
||||
* On success, the caller must not free @reasonp, except by
|
||||
* calling migrate_del_blocker.
|
||||
*/
|
||||
int migrate_add_blocker(Error *reason, Error **errp);
|
||||
int migrate_add_blocker(Error **reasonp, Error **errp);
|
||||
|
||||
/**
|
||||
* @migrate_add_blocker_internal - prevent migration from proceeding without
|
||||
* only-migrate implications
|
||||
*
|
||||
* @reason - an error to be returned whenever migration is attempted
|
||||
* @reasonp - address of an error to be returned whenever migration is attempted
|
||||
*
|
||||
* @errp - [out] The reason (if any) we cannot block migration right now.
|
||||
*
|
||||
@ -38,14 +42,20 @@ int migrate_add_blocker(Error *reason, Error **errp);
|
||||
* Some of the migration blockers can be temporary (e.g., for a few seconds),
|
||||
* so it shouldn't need to conflict with "-only-migratable". For those cases,
|
||||
* we can call this function rather than @migrate_add_blocker().
|
||||
*
|
||||
* *@reasonp is freed and set to NULL if failure is returned.
|
||||
* On success, the caller must not free @reasonp, except by
|
||||
* calling migrate_del_blocker.
|
||||
*/
|
||||
int migrate_add_blocker_internal(Error *reason, Error **errp);
|
||||
int migrate_add_blocker_internal(Error **reasonp, Error **errp);
|
||||
|
||||
/**
|
||||
* @migrate_del_blocker - remove a blocking error from migration
|
||||
* @migrate_del_blocker - remove a blocking error from migration and free it.
|
||||
*
|
||||
* @reason - the error blocking migration
|
||||
* @reasonp - address of the error blocking migration
|
||||
*
|
||||
* This function frees *@reasonp and sets it to NULL.
|
||||
*/
|
||||
void migrate_del_blocker(Error *reason);
|
||||
void migrate_del_blocker(Error **reasonp);
|
||||
|
||||
#endif
|
||||
|
@ -1465,35 +1465,41 @@ int migrate_init(MigrationState *s, Error **errp)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int migrate_add_blocker_internal(Error *reason, Error **errp)
|
||||
int migrate_add_blocker_internal(Error **reasonp, Error **errp)
|
||||
{
|
||||
/* Snapshots are similar to migrations, so check RUN_STATE_SAVE_VM too. */
|
||||
if (runstate_check(RUN_STATE_SAVE_VM) || !migration_is_idle()) {
|
||||
error_propagate_prepend(errp, error_copy(reason),
|
||||
error_propagate_prepend(errp, *reasonp,
|
||||
"disallowing migration blocker "
|
||||
"(migration/snapshot in progress) for: ");
|
||||
*reasonp = NULL;
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
migration_blockers = g_slist_prepend(migration_blockers, reason);
|
||||
migration_blockers = g_slist_prepend(migration_blockers, *reasonp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int migrate_add_blocker(Error *reason, Error **errp)
|
||||
int migrate_add_blocker(Error **reasonp, Error **errp)
|
||||
{
|
||||
if (only_migratable) {
|
||||
error_propagate_prepend(errp, error_copy(reason),
|
||||
error_propagate_prepend(errp, *reasonp,
|
||||
"disallowing migration blocker "
|
||||
"(--only-migratable) for: ");
|
||||
*reasonp = NULL;
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
return migrate_add_blocker_internal(reason, errp);
|
||||
return migrate_add_blocker_internal(reasonp, errp);
|
||||
}
|
||||
|
||||
void migrate_del_blocker(Error *reason)
|
||||
void migrate_del_blocker(Error **reasonp)
|
||||
{
|
||||
migration_blockers = g_slist_remove(migration_blockers, reason);
|
||||
if (*reasonp) {
|
||||
migration_blockers = g_slist_remove(migration_blockers, *reasonp);
|
||||
error_free(*reasonp);
|
||||
*reasonp = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
void qmp_migrate_incoming(const char *uri, Error **errp)
|
||||
|
@ -1,11 +1,11 @@
|
||||
#include "qemu/osdep.h"
|
||||
#include "migration/blocker.h"
|
||||
|
||||
int migrate_add_blocker(Error *reason, Error **errp)
|
||||
int migrate_add_blocker(Error **reasonp, Error **errp)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
void migrate_del_blocker(Error *reason)
|
||||
void migrate_del_blocker(Error **reasonp)
|
||||
{
|
||||
}
|
||||
|
@ -1603,7 +1603,7 @@ static int hyperv_init_vcpu(X86CPU *cpu)
|
||||
error_setg(&hv_passthrough_mig_blocker,
|
||||
"'hv-passthrough' CPU flag prevents migration, use explicit"
|
||||
" set of hv-* flags instead");
|
||||
ret = migrate_add_blocker(hv_passthrough_mig_blocker, &local_err);
|
||||
ret = migrate_add_blocker(&hv_passthrough_mig_blocker, &local_err);
|
||||
if (ret < 0) {
|
||||
error_report_err(local_err);
|
||||
return ret;
|
||||
@ -1617,7 +1617,7 @@ static int hyperv_init_vcpu(X86CPU *cpu)
|
||||
" use explicit 'hv-no-nonarch-coresharing=on' instead (but"
|
||||
" make sure SMT is disabled and/or that vCPUs are properly"
|
||||
" pinned)");
|
||||
ret = migrate_add_blocker(hv_no_nonarch_cs_mig_blocker, &local_err);
|
||||
ret = migrate_add_blocker(&hv_no_nonarch_cs_mig_blocker, &local_err);
|
||||
if (ret < 0) {
|
||||
error_report_err(local_err);
|
||||
return ret;
|
||||
@ -2213,7 +2213,7 @@ int kvm_arch_init_vcpu(CPUState *cs)
|
||||
error_setg(&invtsc_mig_blocker,
|
||||
"State blocked by non-migratable CPU device"
|
||||
" (invtsc flag)");
|
||||
r = migrate_add_blocker(invtsc_mig_blocker, &local_err);
|
||||
r = migrate_add_blocker(&invtsc_mig_blocker, &local_err);
|
||||
if (r < 0) {
|
||||
error_report_err(local_err);
|
||||
return r;
|
||||
@ -2271,7 +2271,7 @@ int kvm_arch_init_vcpu(CPUState *cs)
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
migrate_del_blocker(invtsc_mig_blocker);
|
||||
migrate_del_blocker(&invtsc_mig_blocker);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
@ -929,9 +929,8 @@ nvmm_init_vcpu(CPUState *cpu)
|
||||
error_setg(&nvmm_migration_blocker,
|
||||
"NVMM: Migration not supported");
|
||||
|
||||
if (migrate_add_blocker(nvmm_migration_blocker, &local_error) < 0) {
|
||||
if (migrate_add_blocker(&nvmm_migration_blocker, &local_error) < 0) {
|
||||
error_report_err(local_error);
|
||||
error_free(nvmm_migration_blocker);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
@ -891,7 +891,7 @@ sev_launch_finish(SevGuestState *sev)
|
||||
/* add migration blocker */
|
||||
error_setg(&sev_mig_blocker,
|
||||
"SEV: Migration is not implemented");
|
||||
migrate_add_blocker(sev_mig_blocker, &error_fatal);
|
||||
migrate_add_blocker(&sev_mig_blocker, &error_fatal);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -2160,9 +2160,8 @@ int whpx_init_vcpu(CPUState *cpu)
|
||||
"State blocked due to non-migratable CPUID feature support,"
|
||||
"dirty memory tracking support, and XSAVE/XRSTOR support");
|
||||
|
||||
if (migrate_add_blocker(whpx_migration_blocker, &local_error) < 0) {
|
||||
if (migrate_add_blocker(&whpx_migration_blocker, &local_error) < 0) {
|
||||
error_report_err(local_error);
|
||||
error_free(whpx_migration_blocker);
|
||||
ret = -EINVAL;
|
||||
goto error;
|
||||
}
|
||||
|
@ -671,7 +671,7 @@ static void vdagent_chr_open(Chardev *chr,
|
||||
return;
|
||||
#endif
|
||||
|
||||
if (migrate_add_blocker(vd->migration_blocker, errp) != 0) {
|
||||
if (migrate_add_blocker(&vd->migration_blocker, errp) != 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
@ -924,13 +924,12 @@ static void vdagent_chr_fini(Object *obj)
|
||||
{
|
||||
VDAgentChardev *vd = QEMU_VDAGENT_CHARDEV(obj);
|
||||
|
||||
migrate_del_blocker(vd->migration_blocker);
|
||||
migrate_del_blocker(&vd->migration_blocker);
|
||||
vdagent_disconnect(vd);
|
||||
if (vd->mouse_hs) {
|
||||
qemu_input_handler_unregister(vd->mouse_hs);
|
||||
}
|
||||
buffer_free(&vd->outbuf);
|
||||
error_free(vd->migration_blocker);
|
||||
}
|
||||
|
||||
static const TypeInfo vdagent_chr_type_info = {
|
||||
|
Loading…
Reference in New Issue
Block a user