2017-08-24 22:14:01 +03:00
|
|
|
# -*- Mode: Python -*-
|
2020-07-29 21:50:24 +03:00
|
|
|
# vim: filetype=python
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
|
|
|
|
|
|
|
##
|
|
|
|
# = Migration
|
|
|
|
##
|
|
|
|
|
|
|
|
{ 'include': 'common.json' }
|
2019-02-27 13:51:27 +03:00
|
|
|
{ 'include': 'sockets.json' }
|
2017-08-24 22:14:01 +03:00
|
|
|
|
|
|
|
##
|
|
|
|
# @MigrationStats:
|
|
|
|
#
|
|
|
|
# Detailed migration status.
|
|
|
|
#
|
|
|
|
# @transferred: amount of bytes already transferred to the target VM
|
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @remaining: amount of bytes remaining to be transferred to the
|
|
|
|
# target VM
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
|
|
|
# @total: total amount of bytes involved in the migration process
|
|
|
|
#
|
|
|
|
# @duplicate: number of duplicate (zero) pages (since 1.2)
|
|
|
|
#
|
|
|
|
# @normal: number of normal pages (since 1.2)
|
|
|
|
#
|
|
|
|
# @normal-bytes: number of normal bytes sent (since 1.2)
|
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @dirty-pages-rate: number of pages dirtied by second by the guest
|
|
|
|
# (since 1.3)
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @mbps: throughput in megabits/sec. (since 1.6)
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @dirty-sync-count: number of times that dirty ram was synchronized
|
|
|
|
# (since 2.1)
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @postcopy-requests: The number of page requests received from the
|
|
|
|
# destination (since 2.7)
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
|
|
|
# @page-size: The number of bytes per page for the various page-based
|
2023-04-28 13:54:29 +03:00
|
|
|
# statistics (since 2.10)
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
2018-06-26 16:20:11 +03:00
|
|
|
# @multifd-bytes: The number of bytes sent through multifd (since 3.0)
|
|
|
|
#
|
2019-01-11 09:37:30 +03:00
|
|
|
# @pages-per-second: the number of memory pages transferred per second
|
2023-04-28 13:54:29 +03:00
|
|
|
# (Since 4.0)
|
2019-01-11 09:37:30 +03:00
|
|
|
#
|
2021-12-21 12:34:41 +03:00
|
|
|
# @precopy-bytes: The number of bytes sent in the pre-copy phase
|
2023-04-28 13:54:29 +03:00
|
|
|
# (since 7.0).
|
2021-12-21 12:34:41 +03:00
|
|
|
#
|
|
|
|
# @downtime-bytes: The number of bytes sent while the guest is paused
|
2023-04-28 13:54:29 +03:00
|
|
|
# (since 7.0).
|
2021-12-21 12:34:41 +03:00
|
|
|
#
|
|
|
|
# @postcopy-bytes: The number of bytes sent during the post-copy phase
|
2023-04-28 13:54:29 +03:00
|
|
|
# (since 7.0).
|
|
|
|
#
|
|
|
|
# @dirty-sync-missed-zero-copy: Number of times dirty RAM
|
|
|
|
# synchronization could not avoid copying dirty pages. This is
|
|
|
|
# between 0 and @dirty-sync-count * @multifd-channels. (since
|
|
|
|
# 7.1)
|
2021-12-21 12:34:41 +03:00
|
|
|
#
|
2020-11-18 09:41:58 +03:00
|
|
|
# Since: 0.14
|
2017-08-24 22:14:01 +03:00
|
|
|
##
|
|
|
|
{ 'struct': 'MigrationStats',
|
|
|
|
'data': {'transferred': 'int', 'remaining': 'int', 'total': 'int' ,
|
2023-06-12 22:33:39 +03:00
|
|
|
'duplicate': 'int',
|
|
|
|
'normal': 'int',
|
2023-06-12 22:16:04 +03:00
|
|
|
'normal-bytes': 'int', 'dirty-pages-rate': 'int',
|
|
|
|
'mbps': 'number', 'dirty-sync-count': 'int',
|
|
|
|
'postcopy-requests': 'int', 'page-size': 'int',
|
|
|
|
'multifd-bytes': 'uint64', 'pages-per-second': 'uint64',
|
|
|
|
'precopy-bytes': 'uint64', 'downtime-bytes': 'uint64',
|
|
|
|
'postcopy-bytes': 'uint64',
|
|
|
|
'dirty-sync-missed-zero-copy': 'uint64' } }
|
2017-08-24 22:14:01 +03:00
|
|
|
|
|
|
|
##
|
|
|
|
# @XBZRLECacheStats:
|
|
|
|
#
|
|
|
|
# Detailed XBZRLE migration cache statistics
|
|
|
|
#
|
|
|
|
# @cache-size: XBZRLE cache size
|
|
|
|
#
|
|
|
|
# @bytes: amount of bytes already transferred to the target VM
|
|
|
|
#
|
|
|
|
# @pages: amount of pages transferred to the target VM
|
|
|
|
#
|
|
|
|
# @cache-miss: number of cache miss
|
|
|
|
#
|
|
|
|
# @cache-miss-rate: rate of cache miss (since 2.1)
|
|
|
|
#
|
2020-04-30 03:59:35 +03:00
|
|
|
# @encoding-rate: rate of encoded bytes (since 5.1)
|
|
|
|
#
|
2017-08-24 22:14:01 +03:00
|
|
|
# @overflow: number of overflows
|
|
|
|
#
|
|
|
|
# Since: 1.2
|
|
|
|
##
|
|
|
|
{ 'struct': 'XBZRLECacheStats',
|
2021-02-02 17:17:32 +03:00
|
|
|
'data': {'cache-size': 'size', 'bytes': 'int', 'pages': 'int',
|
2017-08-24 22:14:01 +03:00
|
|
|
'cache-miss': 'int', 'cache-miss-rate': 'number',
|
2020-04-30 03:59:35 +03:00
|
|
|
'encoding-rate': 'number', 'overflow': 'int' } }
|
2017-08-24 22:14:01 +03:00
|
|
|
|
2018-09-06 10:01:00 +03:00
|
|
|
##
|
|
|
|
# @CompressionStats:
|
|
|
|
#
|
|
|
|
# Detailed migration compression statistics
|
|
|
|
#
|
|
|
|
# @pages: amount of pages compressed and transferred to the target VM
|
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @busy: count of times that no free thread was available to compress
|
|
|
|
# data
|
2018-09-06 10:01:00 +03:00
|
|
|
#
|
|
|
|
# @busy-rate: rate of thread busy
|
|
|
|
#
|
|
|
|
# @compressed-size: amount of bytes after compression
|
|
|
|
#
|
|
|
|
# @compression-rate: rate of compressed size
|
|
|
|
#
|
|
|
|
# Since: 3.1
|
|
|
|
##
|
|
|
|
{ 'struct': 'CompressionStats',
|
|
|
|
'data': {'pages': 'int', 'busy': 'int', 'busy-rate': 'number',
|
2020-02-13 20:56:27 +03:00
|
|
|
'compressed-size': 'int', 'compression-rate': 'number' } }
|
2018-09-06 10:01:00 +03:00
|
|
|
|
2017-08-24 22:14:01 +03:00
|
|
|
##
|
|
|
|
# @MigrationStatus:
|
|
|
|
#
|
|
|
|
# An enumeration of migration status.
|
|
|
|
#
|
|
|
|
# @none: no migration has ever happened.
|
|
|
|
#
|
|
|
|
# @setup: migration process has been initiated.
|
|
|
|
#
|
|
|
|
# @cancelling: in the process of cancelling migration.
|
|
|
|
#
|
|
|
|
# @cancelled: cancelling migration is finished.
|
|
|
|
#
|
|
|
|
# @active: in the process of doing migration.
|
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @postcopy-active: like active, but now in postcopy mode. (since
|
|
|
|
# 2.5)
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @postcopy-paused: during postcopy but paused. (since 3.0)
|
2018-05-02 13:47:18 +03:00
|
|
|
#
|
2024-07-29 09:52:20 +03:00
|
|
|
# @postcopy-recover-setup: setup phase for a postcopy recovery
|
|
|
|
# process, preparing for a recovery phase to start. (since 9.1)
|
migration/postcopy: Add postcopy-recover-setup phase
This patch adds a migration state on src called "postcopy-recover-setup".
The new state will describe the intermediate step starting from when the
src QEMU received a postcopy recovery request, until the migration channels
are properly established, but before the recovery process take place.
The request came from Libvirt where Libvirt currently rely on the migration
state events to detect migration state changes. That works for most of the
migration process but except postcopy recovery failures at the beginning.
Currently postcopy recovery only has two major states:
- postcopy-paused: this is the state that both sides of QEMU will be in
for a long time as long as the migration channel was interrupted.
- postcopy-recover: this is the state where both sides of QEMU handshake
with each other, preparing for a continuation of postcopy which used to
be interrupted.
The issue here is when the recovery port is invalid, the src QEMU will take
the URI/channels, noticing the ports are not valid, and it'll silently keep
in the postcopy-paused state, with no event sent to Libvirt. In this case,
the only thing Libvirt can do is to poll the migration status with a proper
interval, however that's less optimal.
Considering that this is the only case where Libvirt won't get a
notification from QEMU on such events, let's add postcopy-recover-setup
state to mimic what we have with the "setup" state of a newly initialized
migration, describing the phase of connection establishment.
With that, postcopy recovery will have two paths to go now, and either path
will guarantee an event generated. Now the events will look like this
during a recovery process on src QEMU:
- Initially when the recovery is initiated on src, QEMU will go from
"postcopy-paused" -> "postcopy-recover-setup". Old QEMUs don't have
this event.
- Depending on whether the channel re-establishment is succeeded:
- In succeeded case, src QEMU will move from "postcopy-recover-setup"
to "postcopy-recover". Old QEMUs also have this event.
- In failure case, src QEMU will move from "postcopy-recover-setup" to
"postcopy-paused" again. Old QEMUs don't have this event.
This guarantees that Libvirt will always receive a notification for
recovery process properly.
One thing to mention is, such new status is only needed on src QEMU not
both. On dest QEMU, the state machine doesn't change. Hence the events
don't change either. It's done like so because dest QEMU may not have an
explicit point of setup start. E.g., it can happen that when dest QEMUs
doesn't use migrate-recover command to use a new URI/channel, but the old
URI/channels can be reused in recovery, in which case the old ports simply
can work again after the network routes are fixed up.
Add a new helper postcopy_is_paused() detecting whether postcopy is still
paused, taking RECOVER_SETUP into account too. When using it on both
src/dst, a slight change is done altogether to always wait for the
semaphore before checking the status, because for both sides a sem_post()
will be required for a recovery.
Cc: Jiri Denemark <jdenemar@redhat.com>
Cc: Prasad Pandit <ppandit@redhat.com>
Reviewed-by: Fabiano Rosas <farosas@suse.de>
Buglink: https://issues.redhat.com/browse/RHEL-38485
Signed-off-by: Peter Xu <peterx@redhat.com>
Signed-off-by: Fabiano Rosas <farosas@suse.de>
2024-06-20 01:30:40 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @postcopy-recover: trying to recover from a paused postcopy. (since
|
|
|
|
# 3.0)
|
2018-05-02 13:47:25 +03:00
|
|
|
#
|
2017-08-24 22:14:01 +03:00
|
|
|
# @completed: migration is finished.
|
|
|
|
#
|
|
|
|
# @failed: some error occurred during migration process.
|
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @colo: VM is in the process of fault tolerance, VM can not get into
|
|
|
|
# this state unless colo capability is enabled for migration.
|
|
|
|
# (since 2.8)
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @pre-switchover: Paused before device serialisation. (since 2.11)
|
2017-10-20 12:05:51 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @device: During device serialisation when pause-before-switchover is
|
|
|
|
# enabled (since 2.11)
|
2017-10-20 12:05:51 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @wait-unplug: wait for device unplug request by guest OS to be
|
|
|
|
# completed. (since 4.2)
|
2019-10-29 14:49:02 +03:00
|
|
|
#
|
2017-08-24 22:14:01 +03:00
|
|
|
# Since: 2.3
|
|
|
|
##
|
|
|
|
{ 'enum': 'MigrationStatus',
|
|
|
|
'data': [ 'none', 'setup', 'cancelling', 'cancelled',
|
2018-05-02 13:47:18 +03:00
|
|
|
'active', 'postcopy-active', 'postcopy-paused',
|
migration/postcopy: Add postcopy-recover-setup phase
This patch adds a migration state on src called "postcopy-recover-setup".
The new state will describe the intermediate step starting from when the
src QEMU received a postcopy recovery request, until the migration channels
are properly established, but before the recovery process take place.
The request came from Libvirt where Libvirt currently rely on the migration
state events to detect migration state changes. That works for most of the
migration process but except postcopy recovery failures at the beginning.
Currently postcopy recovery only has two major states:
- postcopy-paused: this is the state that both sides of QEMU will be in
for a long time as long as the migration channel was interrupted.
- postcopy-recover: this is the state where both sides of QEMU handshake
with each other, preparing for a continuation of postcopy which used to
be interrupted.
The issue here is when the recovery port is invalid, the src QEMU will take
the URI/channels, noticing the ports are not valid, and it'll silently keep
in the postcopy-paused state, with no event sent to Libvirt. In this case,
the only thing Libvirt can do is to poll the migration status with a proper
interval, however that's less optimal.
Considering that this is the only case where Libvirt won't get a
notification from QEMU on such events, let's add postcopy-recover-setup
state to mimic what we have with the "setup" state of a newly initialized
migration, describing the phase of connection establishment.
With that, postcopy recovery will have two paths to go now, and either path
will guarantee an event generated. Now the events will look like this
during a recovery process on src QEMU:
- Initially when the recovery is initiated on src, QEMU will go from
"postcopy-paused" -> "postcopy-recover-setup". Old QEMUs don't have
this event.
- Depending on whether the channel re-establishment is succeeded:
- In succeeded case, src QEMU will move from "postcopy-recover-setup"
to "postcopy-recover". Old QEMUs also have this event.
- In failure case, src QEMU will move from "postcopy-recover-setup" to
"postcopy-paused" again. Old QEMUs don't have this event.
This guarantees that Libvirt will always receive a notification for
recovery process properly.
One thing to mention is, such new status is only needed on src QEMU not
both. On dest QEMU, the state machine doesn't change. Hence the events
don't change either. It's done like so because dest QEMU may not have an
explicit point of setup start. E.g., it can happen that when dest QEMUs
doesn't use migrate-recover command to use a new URI/channel, but the old
URI/channels can be reused in recovery, in which case the old ports simply
can work again after the network routes are fixed up.
Add a new helper postcopy_is_paused() detecting whether postcopy is still
paused, taking RECOVER_SETUP into account too. When using it on both
src/dst, a slight change is done altogether to always wait for the
semaphore before checking the status, because for both sides a sem_post()
will be required for a recovery.
Cc: Jiri Denemark <jdenemar@redhat.com>
Cc: Prasad Pandit <ppandit@redhat.com>
Reviewed-by: Fabiano Rosas <farosas@suse.de>
Buglink: https://issues.redhat.com/browse/RHEL-38485
Signed-off-by: Peter Xu <peterx@redhat.com>
Signed-off-by: Fabiano Rosas <farosas@suse.de>
2024-06-20 01:30:40 +03:00
|
|
|
'postcopy-recover-setup',
|
2018-05-02 13:47:25 +03:00
|
|
|
'postcopy-recover', 'completed', 'failed', 'colo',
|
2019-10-29 14:49:02 +03:00
|
|
|
'pre-switchover', 'device', 'wait-unplug' ] }
|
2020-10-26 12:36:27 +03:00
|
|
|
##
|
|
|
|
# @VfioStats:
|
|
|
|
#
|
|
|
|
# Detailed VFIO devices migration statistics
|
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @transferred: amount of bytes transferred to the target VM by VFIO
|
|
|
|
# devices
|
2020-10-26 12:36:27 +03:00
|
|
|
#
|
|
|
|
# Since: 5.2
|
|
|
|
##
|
|
|
|
{ 'struct': 'VfioStats',
|
|
|
|
'data': {'transferred': 'int' } }
|
2017-08-24 22:14:01 +03:00
|
|
|
|
|
|
|
##
|
|
|
|
# @MigrationInfo:
|
|
|
|
#
|
|
|
|
# Information about current migration process.
|
|
|
|
#
|
|
|
|
# @status: @MigrationStatus describing the current migration status.
|
2023-04-28 13:54:29 +03:00
|
|
|
# If this field is not returned, no migration process has been
|
|
|
|
# initiated
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @ram: @MigrationStats containing detailed migration status, only
|
|
|
|
# returned if status is 'active' or 'completed'(since 1.2)
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
|
|
|
# @xbzrle-cache: @XBZRLECacheStats containing detailed XBZRLE
|
2023-04-28 13:54:29 +03:00
|
|
|
# migration statistics, only returned if XBZRLE feature is on and
|
|
|
|
# status is 'active' or 'completed' (since 1.2)
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
|
|
|
# @total-time: total amount of milliseconds since migration started.
|
2023-04-28 13:54:29 +03:00
|
|
|
# If migration has ended, it returns the total migration time.
|
|
|
|
# (since 1.2)
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @downtime: only present when migration finishes correctly total
|
|
|
|
# downtime in milliseconds for the guest. (since 1.3)
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @expected-downtime: only present while migration is active expected
|
|
|
|
# downtime in milliseconds for the guest in last walk of the dirty
|
|
|
|
# bitmap. (since 1.3)
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
2020-02-13 20:56:34 +03:00
|
|
|
# @setup-time: amount of setup time in milliseconds *before* the
|
2023-04-28 13:54:29 +03:00
|
|
|
# iterations begin but *after* the QMP command is issued. This is
|
|
|
|
# designed to provide an accounting of any activities (such as
|
|
|
|
# RDMA pinning) which may be expensive, but do not actually occur
|
|
|
|
# during the iterative migration rounds themselves. (since 1.6)
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
|
|
|
# @cpu-throttle-percentage: percentage of time guest cpus are being
|
2023-04-28 13:54:29 +03:00
|
|
|
# throttled during auto-converge. This is only present when
|
|
|
|
# auto-converge has started throttling guest cpus. (Since 2.7)
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
2024-03-22 17:09:09 +03:00
|
|
|
# @error-desc: the human readable error description string. Clients
|
2023-10-05 01:02:31 +03:00
|
|
|
# should not attempt to parse the error strings. (Since 2.7)
|
2018-03-22 21:17:27 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @postcopy-blocktime: total time when all vCPU were blocked during
|
|
|
|
# postcopy live migration. This is only present when the
|
|
|
|
# postcopy-blocktime migration capability is enabled. (Since 3.0)
|
2018-03-22 21:17:27 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @postcopy-vcpu-blocktime: list of the postcopy blocktime per vCPU.
|
|
|
|
# This is only present when the postcopy-blocktime migration
|
|
|
|
# capability is enabled. (Since 3.0)
|
2018-03-22 21:17:27 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @socket-address: Only used for tcp, to know what the real port is
|
|
|
|
# (Since 4.0)
|
2019-02-27 13:51:27 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @vfio: @VfioStats containing detailed VFIO devices migration
|
|
|
|
# statistics, only returned if VFIO device is present, migration
|
|
|
|
# is supported by all VFIO devices and status is 'active' or
|
|
|
|
# 'completed' (since 5.2)
|
2020-10-26 12:36:27 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @blocked-reasons: A list of reasons an outgoing migration is
|
|
|
|
# blocked. Present and non-empty when migration is blocked.
|
|
|
|
# (since 6.0)
|
2021-04-20 08:19:06 +03:00
|
|
|
#
|
2024-07-29 09:52:20 +03:00
|
|
|
# @dirty-limit-throttle-time-per-round: Maximum throttle time (in
|
|
|
|
# microseconds) of virtual CPUs each dirty ring full round, which
|
|
|
|
# shows how MigrationCapability dirty-limit affects the guest
|
|
|
|
# during live migration. (Since 8.1)
|
2023-07-28 12:38:07 +03:00
|
|
|
#
|
|
|
|
# @dirty-limit-ring-full-time: Estimated average dirty ring full time
|
|
|
|
# (in microseconds) for each dirty ring full round. The value
|
|
|
|
# equals the dirty ring memory size divided by the average dirty
|
|
|
|
# page rate of the virtual CPU, which can be used to observe the
|
|
|
|
# average memory load of the virtual CPU indirectly. Note that
|
|
|
|
# zero means guest doesn't dirty memory. (Since 8.1)
|
2023-06-07 19:21:58 +03:00
|
|
|
#
|
2020-11-18 09:41:58 +03:00
|
|
|
# Since: 0.14
|
2017-08-24 22:14:01 +03:00
|
|
|
##
|
|
|
|
{ 'struct': 'MigrationInfo',
|
|
|
|
'data': {'*status': 'MigrationStatus', '*ram': 'MigrationStats',
|
2020-10-26 12:36:27 +03:00
|
|
|
'*vfio': 'VfioStats',
|
2017-08-24 22:14:01 +03:00
|
|
|
'*xbzrle-cache': 'XBZRLECacheStats',
|
|
|
|
'*total-time': 'int',
|
|
|
|
'*expected-downtime': 'int',
|
|
|
|
'*downtime': 'int',
|
|
|
|
'*setup-time': 'int',
|
|
|
|
'*cpu-throttle-percentage': 'int',
|
2018-03-22 21:17:27 +03:00
|
|
|
'*error-desc': 'str',
|
2021-02-02 16:55:21 +03:00
|
|
|
'*blocked-reasons': ['str'],
|
2023-06-12 22:16:04 +03:00
|
|
|
'*postcopy-blocktime': 'uint32',
|
2018-09-06 10:01:00 +03:00
|
|
|
'*postcopy-vcpu-blocktime': ['uint32'],
|
2023-06-07 19:21:58 +03:00
|
|
|
'*socket-address': ['SocketAddress'],
|
|
|
|
'*dirty-limit-throttle-time-per-round': 'uint64',
|
|
|
|
'*dirty-limit-ring-full-time': 'uint64'} }
|
2017-08-24 22:14:01 +03:00
|
|
|
|
|
|
|
##
|
|
|
|
# @query-migrate:
|
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# Returns information about current migration process. If migration
|
2017-08-24 22:14:01 +03:00
|
|
|
# is active there will be another json-object with RAM migration
|
2024-04-30 17:27:35 +03:00
|
|
|
# status.
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
|
|
|
# Returns: @MigrationInfo
|
|
|
|
#
|
2020-11-18 09:41:58 +03:00
|
|
|
# Since: 0.14
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
2024-07-17 05:13:09 +03:00
|
|
|
# .. qmp-example::
|
|
|
|
# :title: Before the first migration
|
2024-02-16 17:58:34 +03:00
|
|
|
#
|
|
|
|
# -> { "execute": "query-migrate" }
|
|
|
|
# <- { "return": {} }
|
|
|
|
#
|
2024-07-17 05:13:09 +03:00
|
|
|
# .. qmp-example::
|
|
|
|
# :title: Migration is done and has succeeded
|
2024-02-16 17:58:34 +03:00
|
|
|
#
|
|
|
|
# -> { "execute": "query-migrate" }
|
|
|
|
# <- { "return": {
|
|
|
|
# "status": "completed",
|
|
|
|
# "total-time":12345,
|
|
|
|
# "setup-time":12345,
|
|
|
|
# "downtime":12345,
|
|
|
|
# "ram":{
|
|
|
|
# "transferred":123,
|
|
|
|
# "remaining":123,
|
|
|
|
# "total":246,
|
|
|
|
# "duplicate":123,
|
|
|
|
# "normal":123,
|
|
|
|
# "normal-bytes":123456,
|
|
|
|
# "dirty-sync-count":15
|
|
|
|
# }
|
2017-08-24 22:14:01 +03:00
|
|
|
# }
|
2024-02-16 17:58:34 +03:00
|
|
|
# }
|
|
|
|
#
|
2024-07-17 05:13:09 +03:00
|
|
|
# .. qmp-example::
|
|
|
|
# :title: Migration is done and has failed
|
2024-02-16 17:58:34 +03:00
|
|
|
#
|
|
|
|
# -> { "execute": "query-migrate" }
|
|
|
|
# <- { "return": { "status": "failed" } }
|
|
|
|
#
|
2024-07-17 05:13:09 +03:00
|
|
|
# .. qmp-example::
|
|
|
|
# :title: Migration is being performed
|
2024-02-16 17:58:34 +03:00
|
|
|
#
|
|
|
|
# -> { "execute": "query-migrate" }
|
|
|
|
# <- {
|
|
|
|
# "return":{
|
|
|
|
# "status":"active",
|
|
|
|
# "total-time":12345,
|
|
|
|
# "setup-time":12345,
|
|
|
|
# "expected-downtime":12345,
|
|
|
|
# "ram":{
|
|
|
|
# "transferred":123,
|
|
|
|
# "remaining":123,
|
|
|
|
# "total":246,
|
|
|
|
# "duplicate":123,
|
|
|
|
# "normal":123,
|
|
|
|
# "normal-bytes":123456,
|
|
|
|
# "dirty-sync-count":15
|
|
|
|
# }
|
|
|
|
# }
|
|
|
|
# }
|
|
|
|
#
|
2024-07-17 05:13:09 +03:00
|
|
|
# .. qmp-example::
|
|
|
|
# :title: Migration is being performed and XBZRLE is active
|
2024-02-16 17:58:34 +03:00
|
|
|
#
|
|
|
|
# -> { "execute": "query-migrate" }
|
|
|
|
# <- {
|
|
|
|
# "return":{
|
|
|
|
# "status":"active",
|
|
|
|
# "total-time":12345,
|
|
|
|
# "setup-time":12345,
|
|
|
|
# "expected-downtime":12345,
|
|
|
|
# "ram":{
|
|
|
|
# "total":1057024,
|
|
|
|
# "remaining":1053304,
|
|
|
|
# "transferred":3720,
|
|
|
|
# "duplicate":10,
|
|
|
|
# "normal":3333,
|
|
|
|
# "normal-bytes":3412992,
|
|
|
|
# "dirty-sync-count":15
|
|
|
|
# },
|
|
|
|
# "xbzrle-cache":{
|
|
|
|
# "cache-size":67108864,
|
|
|
|
# "bytes":20971520,
|
|
|
|
# "pages":2444343,
|
|
|
|
# "cache-miss":2244,
|
|
|
|
# "cache-miss-rate":0.123,
|
|
|
|
# "encoding-rate":80.1,
|
|
|
|
# "overflow":34434
|
|
|
|
# }
|
|
|
|
# }
|
|
|
|
# }
|
2017-08-24 22:14:01 +03:00
|
|
|
##
|
|
|
|
{ 'command': 'query-migrate', 'returns': 'MigrationInfo' }
|
|
|
|
|
|
|
|
##
|
|
|
|
# @MigrationCapability:
|
|
|
|
#
|
|
|
|
# Migration capabilities enumeration
|
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @xbzrle: Migration supports xbzrle (Xor Based Zero Run Length
|
2024-07-29 09:52:20 +03:00
|
|
|
# Encoding). This feature allows us to minimize migration traffic
|
2023-04-28 13:54:29 +03:00
|
|
|
# for certain work loads, by sending compressed difference of the
|
|
|
|
# pages
|
|
|
|
#
|
|
|
|
# @rdma-pin-all: Controls whether or not the entire VM memory
|
|
|
|
# footprint is mlock()'d on demand or all at once. Refer to
|
|
|
|
# docs/rdma.txt for usage. Disabled by default. (since 2.0)
|
|
|
|
#
|
|
|
|
# @zero-blocks: During storage migration encode blocks of zeroes
|
|
|
|
# efficiently. This essentially saves 1MB of zeroes per block on
|
|
|
|
# the wire. Enabling requires source and target VM to support
|
|
|
|
# this feature. To enable it is sufficient to enable the
|
2024-07-29 09:52:20 +03:00
|
|
|
# capability on the source VM. The feature is disabled by
|
|
|
|
# default. (since 1.6)
|
2023-04-28 13:54:29 +03:00
|
|
|
#
|
2023-10-13 13:47:27 +03:00
|
|
|
# @events: generate events for each migration state change (since 2.4)
|
2023-04-28 13:54:29 +03:00
|
|
|
#
|
|
|
|
# @auto-converge: If enabled, QEMU will automatically throttle down
|
|
|
|
# the guest to speed up convergence of RAM migration. (since 1.6)
|
|
|
|
#
|
|
|
|
# @postcopy-ram: Start executing on the migration target before all of
|
|
|
|
# RAM has been migrated, pulling the remaining pages along as
|
|
|
|
# needed. The capacity must have the same setting on both source
|
|
|
|
# and target or migration will not even start. NOTE: If the
|
|
|
|
# migration fails during postcopy the VM will fail. (since 2.6)
|
|
|
|
#
|
|
|
|
# @x-colo: If enabled, migration will never end, and the state of the
|
|
|
|
# VM on the primary side will be migrated continuously to the VM
|
|
|
|
# on secondary side, this process is called COarse-Grain LOck
|
|
|
|
# Stepping (COLO) for Non-stop Service. (since 2.8)
|
|
|
|
#
|
|
|
|
# @release-ram: if enabled, qemu will free the migrated ram pages on
|
|
|
|
# the source during postcopy-ram migration. (since 2.9)
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
|
|
|
# @return-path: If enabled, migration will use the return path even
|
2023-04-28 13:54:29 +03:00
|
|
|
# for precopy. (since 2.10)
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @pause-before-switchover: Pause outgoing migration before
|
|
|
|
# serialising device state and before disabling block IO (since
|
|
|
|
# 2.11)
|
2017-10-20 12:05:50 +03:00
|
|
|
#
|
2019-02-06 15:54:06 +03:00
|
|
|
# @multifd: Use more than one fd for migration (since 4.0)
|
2016-01-14 14:23:00 +03:00
|
|
|
#
|
2018-03-13 22:34:00 +03:00
|
|
|
# @dirty-bitmaps: If enabled, QEMU will migrate named dirty bitmaps.
|
2023-04-28 13:54:29 +03:00
|
|
|
# (since 2.12)
|
2018-03-13 22:34:00 +03:00
|
|
|
#
|
2018-03-22 21:17:22 +03:00
|
|
|
# @postcopy-blocktime: Calculate downtime for postcopy live migration
|
2023-04-28 13:54:29 +03:00
|
|
|
# (since 3.0)
|
2018-03-22 21:17:22 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @late-block-activate: If enabled, the destination will not activate
|
|
|
|
# block devices (and thus take locks) immediately at the end of
|
|
|
|
# migration. (since 3.0)
|
2018-04-16 20:09:30 +03:00
|
|
|
#
|
2023-07-20 10:16:09 +03:00
|
|
|
# @x-ignore-shared: If enabled, QEMU will not migrate shared memory
|
|
|
|
# that is accessible on the destination machine. (since 4.0)
|
2019-02-15 20:45:45 +03:00
|
|
|
#
|
2019-09-03 19:22:44 +03:00
|
|
|
# @validate-uuid: Send the UUID of the source to allow the destination
|
2023-04-28 13:54:29 +03:00
|
|
|
# to ensure it is the same. (since 4.2)
|
|
|
|
#
|
|
|
|
# @background-snapshot: If enabled, the migration stream will be a
|
|
|
|
# snapshot of the VM exactly at the point when the migration
|
2024-03-22 17:09:08 +03:00
|
|
|
# procedure starts. The VM RAM is saved with running VM.
|
|
|
|
# (since 6.0)
|
2023-04-28 13:54:29 +03:00
|
|
|
#
|
|
|
|
# @zero-copy-send: Controls behavior on sending memory pages on
|
|
|
|
# migration. When true, enables a zero-copy mechanism for sending
|
|
|
|
# memory pages, if host supports it. Requires that QEMU be
|
|
|
|
# permitted to use locked memory for guest RAM pages. (since 7.1)
|
|
|
|
#
|
|
|
|
# @postcopy-preempt: If enabled, the migration process will allow
|
|
|
|
# postcopy requests to preempt precopy stream, so postcopy
|
|
|
|
# requests will be handled faster. This is a performance feature
|
|
|
|
# and should not affect the correctness of postcopy migration.
|
|
|
|
# (since 7.1)
|
2022-06-20 08:39:45 +03:00
|
|
|
#
|
2023-06-21 14:11:54 +03:00
|
|
|
# @switchover-ack: If enabled, migration will not stop the source VM
|
|
|
|
# and complete the migration until an ACK is received from the
|
|
|
|
# destination that it's OK to do so. Exactly when this ACK is
|
2023-07-20 10:16:09 +03:00
|
|
|
# sent depends on the migrated devices that use this feature. For
|
|
|
|
# example, a device can use it to make sure some of its data is
|
|
|
|
# sent and loaded in the destination before doing switchover.
|
2023-06-21 14:11:54 +03:00
|
|
|
# This can reduce downtime if devices that support this capability
|
|
|
|
# are present. 'return-path' capability must be enabled to use
|
|
|
|
# it. (since 8.1)
|
|
|
|
#
|
2023-07-28 18:10:40 +03:00
|
|
|
# @dirty-limit: If enabled, migration will throttle vCPUs as needed to
|
|
|
|
# keep their dirty page rate within @vcpu-dirty-limit. This can
|
|
|
|
# improve responsiveness of large guests during live migration,
|
|
|
|
# and can result in more stable read performance. Requires KVM
|
|
|
|
# with accelerator property "dirty-ring-size" set. (Since 8.1)
|
2023-06-07 18:30:50 +03:00
|
|
|
#
|
2024-02-29 18:30:01 +03:00
|
|
|
# @mapped-ram: Migrate using fixed offsets in the migration file for
|
|
|
|
# each RAM page. Requires a migration URI that supports seeking,
|
|
|
|
# such as a file. (since 9.0)
|
|
|
|
#
|
2021-10-28 13:25:13 +03:00
|
|
|
# Features:
|
2023-04-28 13:54:29 +03:00
|
|
|
#
|
2021-10-28 13:25:13 +03:00
|
|
|
# @unstable: Members @x-colo and @x-ignore-shared are experimental.
|
2024-09-19 16:46:22 +03:00
|
|
|
# @deprecated: Member @zero-blocks is deprecated as being part of
|
|
|
|
# block migration which was already removed.
|
2021-10-28 13:25:13 +03:00
|
|
|
#
|
2017-08-24 22:14:01 +03:00
|
|
|
# Since: 1.2
|
|
|
|
##
|
|
|
|
{ 'enum': 'MigrationCapability',
|
2024-09-19 16:46:22 +03:00
|
|
|
'data': ['xbzrle', 'rdma-pin-all', 'auto-converge',
|
|
|
|
{ 'name': 'zero-blocks', 'features': [ 'deprecated' ] },
|
2023-10-18 14:55:13 +03:00
|
|
|
'events', 'postcopy-ram',
|
2021-10-28 13:25:13 +03:00
|
|
|
{ 'name': 'x-colo', 'features': [ 'unstable' ] },
|
|
|
|
'release-ram',
|
2023-10-18 14:55:12 +03:00
|
|
|
'return-path', 'pause-before-switchover', 'multifd',
|
2019-02-15 20:45:45 +03:00
|
|
|
'dirty-bitmaps', 'postcopy-blocktime', 'late-block-activate',
|
2021-10-28 13:25:13 +03:00
|
|
|
{ 'name': 'x-ignore-shared', 'features': [ 'unstable' ] },
|
2022-06-20 08:39:45 +03:00
|
|
|
'validate-uuid', 'background-snapshot',
|
2023-06-07 18:30:50 +03:00
|
|
|
'zero-copy-send', 'postcopy-preempt', 'switchover-ack',
|
2024-02-29 18:30:01 +03:00
|
|
|
'dirty-limit', 'mapped-ram'] }
|
2017-08-24 22:14:01 +03:00
|
|
|
|
|
|
|
##
|
|
|
|
# @MigrationCapabilityStatus:
|
|
|
|
#
|
|
|
|
# Migration capability information
|
|
|
|
#
|
|
|
|
# @capability: capability enum
|
|
|
|
#
|
|
|
|
# @state: capability state bool
|
|
|
|
#
|
|
|
|
# Since: 1.2
|
|
|
|
##
|
|
|
|
{ 'struct': 'MigrationCapabilityStatus',
|
2023-06-12 22:16:04 +03:00
|
|
|
'data': { 'capability': 'MigrationCapability', 'state': 'bool' } }
|
2017-08-24 22:14:01 +03:00
|
|
|
|
|
|
|
##
|
|
|
|
# @migrate-set-capabilities:
|
|
|
|
#
|
|
|
|
# Enable/Disable the following migration capabilities (like xbzrle)
|
|
|
|
#
|
|
|
|
# @capabilities: json array of capability modifications to make
|
|
|
|
#
|
|
|
|
# Since: 1.2
|
|
|
|
#
|
2024-07-17 05:13:08 +03:00
|
|
|
# .. qmp-example::
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
2024-02-16 17:58:34 +03:00
|
|
|
# -> { "execute": "migrate-set-capabilities" , "arguments":
|
|
|
|
# { "capabilities": [ { "capability": "xbzrle", "state": true } ] } }
|
|
|
|
# <- { "return": {} }
|
2017-08-24 22:14:01 +03:00
|
|
|
##
|
|
|
|
{ 'command': 'migrate-set-capabilities',
|
|
|
|
'data': { 'capabilities': ['MigrationCapabilityStatus'] } }
|
|
|
|
|
|
|
|
##
|
|
|
|
# @query-migrate-capabilities:
|
|
|
|
#
|
|
|
|
# Returns information about the current migration capabilities status
|
|
|
|
#
|
2023-04-25 09:42:10 +03:00
|
|
|
# Returns: @MigrationCapabilityStatus
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
|
|
|
# Since: 1.2
|
|
|
|
#
|
2024-07-17 05:13:08 +03:00
|
|
|
# .. qmp-example::
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
2024-02-16 17:58:34 +03:00
|
|
|
# -> { "execute": "query-migrate-capabilities" }
|
|
|
|
# <- { "return": [
|
|
|
|
# {"state": false, "capability": "xbzrle"},
|
|
|
|
# {"state": false, "capability": "rdma-pin-all"},
|
|
|
|
# {"state": false, "capability": "auto-converge"},
|
|
|
|
# {"state": false, "capability": "zero-blocks"},
|
|
|
|
# {"state": true, "capability": "events"},
|
|
|
|
# {"state": false, "capability": "postcopy-ram"},
|
|
|
|
# {"state": false, "capability": "x-colo"}
|
|
|
|
# ]}
|
2017-08-24 22:14:01 +03:00
|
|
|
##
|
|
|
|
{ 'command': 'query-migrate-capabilities', 'returns': ['MigrationCapabilityStatus']}
|
|
|
|
|
2019-01-16 12:35:55 +03:00
|
|
|
##
|
|
|
|
# @MultiFDCompression:
|
|
|
|
#
|
|
|
|
# An enumeration of multifd compression methods.
|
|
|
|
#
|
|
|
|
# @none: no compression.
|
2023-04-28 13:54:29 +03:00
|
|
|
#
|
2019-01-04 17:30:06 +03:00
|
|
|
# @zlib: use zlib compression method.
|
2023-04-28 13:54:29 +03:00
|
|
|
#
|
2019-12-13 15:47:14 +03:00
|
|
|
# @zstd: use zstd compression method.
|
2019-01-16 12:35:55 +03:00
|
|
|
#
|
2024-08-31 02:27:21 +03:00
|
|
|
# @qatzip: use qatzip compression method. (Since 9.2)
|
|
|
|
#
|
2024-06-10 13:21:07 +03:00
|
|
|
# @qpl: use qpl compression method. Query Processing Library(qpl) is
|
2024-07-29 09:52:20 +03:00
|
|
|
# based on the deflate compression algorithm and use the Intel
|
|
|
|
# In-Memory Analytics Accelerator(IAA) accelerated compression and
|
|
|
|
# decompression. (Since 9.1)
|
2024-06-10 13:21:07 +03:00
|
|
|
#
|
2024-06-07 16:53:06 +03:00
|
|
|
# @uadk: use UADK library compression method. (Since 9.1)
|
|
|
|
#
|
2019-01-16 12:35:55 +03:00
|
|
|
# Since: 5.0
|
|
|
|
##
|
|
|
|
{ 'enum': 'MultiFDCompression',
|
2024-09-04 14:18:18 +03:00
|
|
|
'prefix': 'MULTIFD_COMPRESSION',
|
2019-12-13 15:47:14 +03:00
|
|
|
'data': [ 'none', 'zlib',
|
2024-06-10 13:21:07 +03:00
|
|
|
{ 'name': 'zstd', 'if': 'CONFIG_ZSTD' },
|
2024-08-31 02:27:21 +03:00
|
|
|
{ 'name': 'qatzip', 'if': 'CONFIG_QATZIP'},
|
2024-06-07 16:53:06 +03:00
|
|
|
{ 'name': 'qpl', 'if': 'CONFIG_QPL' },
|
|
|
|
{ 'name': 'uadk', 'if': 'CONFIG_UADK' } ] }
|
2019-01-16 12:35:55 +03:00
|
|
|
|
2023-10-25 22:44:24 +03:00
|
|
|
##
|
|
|
|
# @MigMode:
|
|
|
|
#
|
2024-03-22 17:09:09 +03:00
|
|
|
# @normal: the original form of migration. (since 8.2)
|
2023-10-25 22:44:24 +03:00
|
|
|
#
|
2024-03-22 17:09:08 +03:00
|
|
|
# @cpr-reboot: The migrate command stops the VM and saves state to the
|
|
|
|
# URI. After quitting QEMU, the user resumes by running QEMU
|
|
|
|
# -incoming.
|
2024-02-29 17:54:22 +03:00
|
|
|
#
|
|
|
|
# This mode allows the user to quit QEMU, optionally update and
|
|
|
|
# reboot the OS, and restart QEMU. If the user reboots, the URI
|
|
|
|
# must persist across the reboot, such as by using a file.
|
|
|
|
#
|
|
|
|
# Unlike normal mode, the use of certain local storage options
|
|
|
|
# does not block the migration, but the user must not modify the
|
|
|
|
# contents of guest block devices between the quit and restart.
|
|
|
|
#
|
2024-03-22 17:09:08 +03:00
|
|
|
# This mode supports VFIO devices provided the user first puts the
|
|
|
|
# guest in the suspended runstate, such as by issuing
|
2024-02-29 17:54:22 +03:00
|
|
|
# guest-suspend-ram to the QEMU guest agent.
|
|
|
|
#
|
|
|
|
# Best performance is achieved when the memory backend is shared
|
|
|
|
# and the @x-ignore-shared migration capability is set, but this
|
|
|
|
# is not required. Further, if the user reboots before restarting
|
|
|
|
# such a configuration, the shared memory must persist across the
|
|
|
|
# reboot, such as by backing it with a dax device.
|
|
|
|
#
|
|
|
|
# @cpr-reboot may not be used with postcopy, background-snapshot,
|
|
|
|
# or COLO.
|
2024-02-22 20:28:40 +03:00
|
|
|
#
|
2024-02-22 20:28:39 +03:00
|
|
|
# (since 8.2)
|
2023-10-25 22:44:24 +03:00
|
|
|
##
|
|
|
|
{ 'enum': 'MigMode',
|
cpr: reboot mode
Add the cpr-reboot migration mode. Usage:
$ qemu-system-$arch -monitor stdio ...
QEMU 8.1.50 monitor - type 'help' for more information
(qemu) migrate_set_capability x-ignore-shared on
(qemu) migrate_set_parameter mode cpr-reboot
(qemu) migrate -d file:vm.state
(qemu) info status
VM status: paused (postmigrate)
(qemu) quit
$ qemu-system-$arch -monitor stdio -incoming defer ...
QEMU 8.1.50 monitor - type 'help' for more information
(qemu) migrate_set_capability x-ignore-shared on
(qemu) migrate_set_parameter mode cpr-reboot
(qemu) migrate_incoming file:vm.state
(qemu) info status
VM status: running
In this mode, the migrate command saves state to a file, allowing one
to quit qemu, reboot to an updated kernel, and restart an updated version
of qemu. The caller must specify a migration URI that writes to and reads
from a file. Unlike normal mode, the use of certain local storage options
does not block the migration, but the caller must not modify guest block
devices between the quit and restart. To avoid saving guest RAM to the
file, the memory backend must be shared, and the @x-ignore-shared migration
capability must be set. Guest RAM must be non-volatile across reboot, such
as by backing it with a dax device, but this is not enforced. The restarted
qemu arguments must match those used to initially start qemu, plus the
-incoming option.
Signed-off-by: Steve Sistare <steven.sistare@oracle.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
Message-ID: <1698263069-406971-6-git-send-email-steven.sistare@oracle.com>
2023-10-25 22:44:28 +03:00
|
|
|
'data': [ 'normal', 'cpr-reboot' ] }
|
2023-10-25 22:44:24 +03:00
|
|
|
|
2024-03-11 21:00:11 +03:00
|
|
|
##
|
|
|
|
# @ZeroPageDetection:
|
|
|
|
#
|
|
|
|
# @none: Do not perform zero page checking.
|
|
|
|
#
|
|
|
|
# @legacy: Perform zero page checking in main migration thread.
|
|
|
|
#
|
2024-03-11 21:00:12 +03:00
|
|
|
# @multifd: Perform zero page checking in multifd sender thread if
|
2024-03-22 17:09:08 +03:00
|
|
|
# multifd migration is enabled, else in the main migration thread
|
|
|
|
# as for @legacy.
|
2024-03-11 21:00:12 +03:00
|
|
|
#
|
2024-03-11 21:00:11 +03:00
|
|
|
# Since: 9.0
|
|
|
|
##
|
|
|
|
{ 'enum': 'ZeroPageDetection',
|
2024-03-11 21:00:12 +03:00
|
|
|
'data': [ 'none', 'legacy', 'multifd' ] }
|
2024-03-11 21:00:11 +03:00
|
|
|
|
2021-02-12 20:34:24 +03:00
|
|
|
##
|
|
|
|
# @BitmapMigrationBitmapAliasTransform:
|
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @persistent: If present, the bitmap will be made persistent or
|
|
|
|
# transient depending on this parameter.
|
2021-02-12 20:34:24 +03:00
|
|
|
#
|
|
|
|
# Since: 6.0
|
|
|
|
##
|
|
|
|
{ 'struct': 'BitmapMigrationBitmapAliasTransform',
|
|
|
|
'data': {
|
|
|
|
'*persistent': 'bool'
|
|
|
|
} }
|
|
|
|
|
2020-08-20 18:07:23 +03:00
|
|
|
##
|
|
|
|
# @BitmapMigrationBitmapAlias:
|
|
|
|
#
|
|
|
|
# @name: The name of the bitmap.
|
|
|
|
#
|
|
|
|
# @alias: An alias name for migration (for example the bitmap name on
|
2023-04-28 13:54:29 +03:00
|
|
|
# the opposite site).
|
2020-08-20 18:07:23 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @transform: Allows the modification of the migrated bitmap. (since
|
|
|
|
# 6.0)
|
2021-02-12 20:34:24 +03:00
|
|
|
#
|
2020-08-20 18:07:23 +03:00
|
|
|
# Since: 5.2
|
|
|
|
##
|
|
|
|
{ 'struct': 'BitmapMigrationBitmapAlias',
|
|
|
|
'data': {
|
|
|
|
'name': 'str',
|
2021-02-12 20:34:24 +03:00
|
|
|
'alias': 'str',
|
|
|
|
'*transform': 'BitmapMigrationBitmapAliasTransform'
|
2020-08-20 18:07:23 +03:00
|
|
|
} }
|
|
|
|
|
|
|
|
##
|
|
|
|
# @BitmapMigrationNodeAlias:
|
|
|
|
#
|
|
|
|
# Maps a block node name and the bitmaps it has to aliases for dirty
|
|
|
|
# bitmap migration.
|
|
|
|
#
|
|
|
|
# @node-name: A block node name.
|
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @alias: An alias block node name for migration (for example the node
|
|
|
|
# name on the opposite site).
|
2020-08-20 18:07:23 +03:00
|
|
|
#
|
|
|
|
# @bitmaps: Mappings for the bitmaps on this node.
|
|
|
|
#
|
|
|
|
# Since: 5.2
|
|
|
|
##
|
|
|
|
{ 'struct': 'BitmapMigrationNodeAlias',
|
|
|
|
'data': {
|
|
|
|
'node-name': 'str',
|
|
|
|
'alias': 'str',
|
|
|
|
'bitmaps': [ 'BitmapMigrationBitmapAlias' ]
|
|
|
|
} }
|
|
|
|
|
2017-08-24 22:14:01 +03:00
|
|
|
##
|
|
|
|
# @MigrationParameter:
|
|
|
|
#
|
|
|
|
# Migration parameters enumeration
|
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @announce-initial: Initial delay (in milliseconds) before sending
|
|
|
|
# the first announce (Since 4.0)
|
2019-02-27 16:24:06 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @announce-max: Maximum delay (in milliseconds) between packets in
|
|
|
|
# the announcement (Since 4.0)
|
2019-02-27 16:24:06 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @announce-rounds: Number of self-announce packets sent after
|
|
|
|
# migration (Since 4.0)
|
2019-02-27 16:24:06 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @announce-step: Increase in delay (in milliseconds) between
|
|
|
|
# subsequent packets in the announcement (Since 4.0)
|
2019-02-27 16:24:06 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @throttle-trigger-threshold: The ratio of bytes_dirty_period and
|
|
|
|
# bytes_xfer_period to trigger throttling. It is expressed as
|
2024-03-22 17:09:09 +03:00
|
|
|
# percentage. The default value is 50. (Since 5.0)
|
2020-02-24 05:31:42 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @cpu-throttle-initial: Initial percentage of time guest cpus are
|
|
|
|
# throttled when migration auto-converge is activated. The
|
2024-03-22 17:09:09 +03:00
|
|
|
# default value is 20. (Since 2.7)
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
|
|
|
# @cpu-throttle-increment: throttle percentage increase each time
|
2023-04-28 13:54:29 +03:00
|
|
|
# auto-converge detects that migration is not making progress.
|
2024-03-22 17:09:09 +03:00
|
|
|
# The default value is 10. (Since 2.7)
|
2023-04-28 13:54:29 +03:00
|
|
|
#
|
|
|
|
# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage At
|
|
|
|
# the tail stage of throttling, the Guest is very sensitive to CPU
|
|
|
|
# percentage while the @cpu-throttle -increment is excessive
|
|
|
|
# usually at tail stage. If this parameter is true, we will
|
|
|
|
# compute the ideal CPU percentage used by the Guest, which may
|
|
|
|
# exactly make the dirty rate match the dirty rate threshold.
|
|
|
|
# Then we will choose a smaller throttle increment between the one
|
|
|
|
# specified by @cpu-throttle-increment and the one generated by
|
|
|
|
# ideal CPU percentage. Therefore, it is compatible to
|
|
|
|
# traditional throttling, meanwhile the throttle increment won't
|
|
|
|
# be excessive at tail stage. The default value is false. (Since
|
|
|
|
# 5.1)
|
|
|
|
#
|
|
|
|
# @tls-creds: ID of the 'tls-creds' object that provides credentials
|
|
|
|
# for establishing a TLS connection over the migration data
|
|
|
|
# channel. On the outgoing side of the migration, the credentials
|
|
|
|
# must be for a 'client' endpoint, while for the incoming side the
|
2024-03-22 16:51:15 +03:00
|
|
|
# credentials must be for a 'server' endpoint. Setting this to a
|
|
|
|
# non-empty string enables TLS for all migrations. An empty
|
|
|
|
# string means that QEMU will use plain text mode for migration,
|
|
|
|
# rather than TLS. (Since 2.7)
|
|
|
|
#
|
|
|
|
# @tls-hostname: migration target's hostname for validating the
|
|
|
|
# server's x509 certificate identity. If empty, QEMU will use the
|
|
|
|
# hostname from the migration URI, if any. A non-empty value is
|
|
|
|
# required when using x509 based TLS credentials and the migration
|
|
|
|
# URI does not include a hostname, such as fd: or exec: based
|
|
|
|
# migration. (Since 2.7)
|
2023-04-28 13:54:29 +03:00
|
|
|
#
|
2024-03-22 16:51:15 +03:00
|
|
|
# Note: empty value works only since 2.9.
|
2023-04-28 13:54:29 +03:00
|
|
|
#
|
|
|
|
# @tls-authz: ID of the 'authz' object subclass that provides access
|
|
|
|
# control checking of the TLS x509 certificate distinguished name.
|
|
|
|
# This object is only resolved at time of use, so can be deleted
|
|
|
|
# and recreated on the fly while the migration server is active.
|
|
|
|
# If missing, it will default to denying access (Since 4.0)
|
|
|
|
#
|
2024-03-22 17:09:06 +03:00
|
|
|
# @max-bandwidth: maximum speed for migration, in bytes per second.
|
|
|
|
# (Since 2.8)
|
2023-04-28 13:54:29 +03:00
|
|
|
#
|
migration: Allow user to specify available switchover bandwidth
Migration bandwidth is a very important value to live migration. It's
because it's one of the major factors that we'll make decision on when to
switchover to destination in a precopy process.
This value is currently estimated by QEMU during the whole live migration
process by monitoring how fast we were sending the data. This can be the
most accurate bandwidth if in the ideal world, where we're always feeding
unlimited data to the migration channel, and then it'll be limited to the
bandwidth that is available.
However in reality it may be very different, e.g., over a 10Gbps network we
can see query-migrate showing migration bandwidth of only a few tens of
MB/s just because there are plenty of other things the migration thread
might be doing. For example, the migration thread can be busy scanning
zero pages, or it can be fetching dirty bitmap from other external dirty
sources (like vhost or KVM). It means we may not be pushing data as much
as possible to migration channel, so the bandwidth estimated from "how many
data we sent in the channel" can be dramatically inaccurate sometimes.
With that, the decision to switchover will be affected, by assuming that we
may not be able to switchover at all with such a low bandwidth, but in
reality we can.
The migration may not even converge at all with the downtime specified,
with that wrong estimation of bandwidth, keeping iterations forever with a
low estimation of bandwidth.
The issue is QEMU itself may not be able to avoid those uncertainties on
measuing the real "available migration bandwidth". At least not something
I can think of so far.
One way to fix this is when the user is fully aware of the available
bandwidth, then we can allow the user to help providing an accurate value.
For example, if the user has a dedicated channel of 10Gbps for migration
for this specific VM, the user can specify this bandwidth so QEMU can
always do the calculation based on this fact, trusting the user as long as
specified. It may not be the exact bandwidth when switching over (in which
case qemu will push migration data as fast as possible), but much better
than QEMU trying to wildly guess, especially when very wrong.
A new parameter "avail-switchover-bandwidth" is introduced just for this.
So when the user specified this parameter, instead of trusting the
estimated value from QEMU itself (based on the QEMUFile send speed), it
trusts the user more by using this value to decide when to switchover,
assuming that we'll have such bandwidth available then.
Note that specifying this value will not throttle the bandwidth for
switchover yet, so QEMU will always use the full bandwidth possible for
sending switchover data, assuming that should always be the most important
way to use the network at that time.
This can resolve issues like "unconvergence migration" which is caused by
hilarious low "migration bandwidth" detected for whatever reason.
Reported-by: Zhiyi Guo <zhguo@redhat.com>
Reviewed-by: Joao Martins <joao.m.martins@oracle.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Peter Xu <peterx@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
Message-ID: <20231010221922.40638-1-peterx@redhat.com>
2023-10-11 01:19:22 +03:00
|
|
|
# @avail-switchover-bandwidth: to set the available bandwidth that
|
|
|
|
# migration can use during switchover phase. NOTE! This does not
|
2024-03-22 17:09:08 +03:00
|
|
|
# limit the bandwidth during switchover, but only for calculations
|
|
|
|
# when making decisions to switchover. By default, this value is
|
|
|
|
# zero, which means QEMU will estimate the bandwidth
|
|
|
|
# automatically. This can be set when the estimated value is not
|
|
|
|
# accurate, while the user is able to guarantee such bandwidth is
|
|
|
|
# available when switching over. When specified correctly, this
|
|
|
|
# can make the switchover decision much more accurate.
|
|
|
|
# (Since 8.2)
|
migration: Allow user to specify available switchover bandwidth
Migration bandwidth is a very important value to live migration. It's
because it's one of the major factors that we'll make decision on when to
switchover to destination in a precopy process.
This value is currently estimated by QEMU during the whole live migration
process by monitoring how fast we were sending the data. This can be the
most accurate bandwidth if in the ideal world, where we're always feeding
unlimited data to the migration channel, and then it'll be limited to the
bandwidth that is available.
However in reality it may be very different, e.g., over a 10Gbps network we
can see query-migrate showing migration bandwidth of only a few tens of
MB/s just because there are plenty of other things the migration thread
might be doing. For example, the migration thread can be busy scanning
zero pages, or it can be fetching dirty bitmap from other external dirty
sources (like vhost or KVM). It means we may not be pushing data as much
as possible to migration channel, so the bandwidth estimated from "how many
data we sent in the channel" can be dramatically inaccurate sometimes.
With that, the decision to switchover will be affected, by assuming that we
may not be able to switchover at all with such a low bandwidth, but in
reality we can.
The migration may not even converge at all with the downtime specified,
with that wrong estimation of bandwidth, keeping iterations forever with a
low estimation of bandwidth.
The issue is QEMU itself may not be able to avoid those uncertainties on
measuing the real "available migration bandwidth". At least not something
I can think of so far.
One way to fix this is when the user is fully aware of the available
bandwidth, then we can allow the user to help providing an accurate value.
For example, if the user has a dedicated channel of 10Gbps for migration
for this specific VM, the user can specify this bandwidth so QEMU can
always do the calculation based on this fact, trusting the user as long as
specified. It may not be the exact bandwidth when switching over (in which
case qemu will push migration data as fast as possible), but much better
than QEMU trying to wildly guess, especially when very wrong.
A new parameter "avail-switchover-bandwidth" is introduced just for this.
So when the user specified this parameter, instead of trusting the
estimated value from QEMU itself (based on the QEMUFile send speed), it
trusts the user more by using this value to decide when to switchover,
assuming that we'll have such bandwidth available then.
Note that specifying this value will not throttle the bandwidth for
switchover yet, so QEMU will always use the full bandwidth possible for
sending switchover data, assuming that should always be the most important
way to use the network at that time.
This can resolve issues like "unconvergence migration" which is caused by
hilarious low "migration bandwidth" detected for whatever reason.
Reported-by: Zhiyi Guo <zhguo@redhat.com>
Reviewed-by: Joao Martins <joao.m.martins@oracle.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Peter Xu <peterx@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
Message-ID: <20231010221922.40638-1-peterx@redhat.com>
2023-10-11 01:19:22 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @downtime-limit: set maximum tolerated downtime for migration.
|
|
|
|
# maximum downtime in milliseconds (Since 2.8)
|
|
|
|
#
|
|
|
|
# @x-checkpoint-delay: The delay time (in ms) between two COLO
|
|
|
|
# checkpoints in periodic mode. (Since 2.8)
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
2019-02-06 15:54:06 +03:00
|
|
|
# @multifd-channels: Number of channels used to migrate data in
|
2023-04-28 13:54:29 +03:00
|
|
|
# parallel. This is the same number that the number of sockets
|
|
|
|
# used for migration. The default value is 2 (since 4.0)
|
2016-01-15 10:56:17 +03:00
|
|
|
#
|
2017-10-05 22:30:10 +03:00
|
|
|
# @xbzrle-cache-size: cache size to be used by XBZRLE migration. It
|
2023-04-28 13:54:29 +03:00
|
|
|
# needs to be a multiple of the target page size and a power of 2
|
|
|
|
# (Since 2.11)
|
2017-10-05 22:30:10 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @max-postcopy-bandwidth: Background transfer bandwidth during
|
|
|
|
# postcopy. Defaults to 0 (unlimited). In bytes per second.
|
|
|
|
# (Since 3.0)
|
2018-08-01 16:00:20 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @max-cpu-throttle: maximum cpu throttle percentage. Defaults to 99.
|
|
|
|
# (Since 3.1)
|
2019-02-27 16:24:06 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @multifd-compression: Which compression method to use. Defaults to
|
|
|
|
# none. (Since 5.0)
|
2019-01-16 12:35:55 +03:00
|
|
|
#
|
2020-01-23 19:08:52 +03:00
|
|
|
# @multifd-zlib-level: Set the compression level to be used in live
|
2023-04-28 13:54:29 +03:00
|
|
|
# migration, the compression level is an integer between 0 and 9,
|
|
|
|
# where 0 means no compression, 1 means the best compression
|
|
|
|
# speed, and 9 means best compression ratio which will consume
|
2024-07-29 09:52:20 +03:00
|
|
|
# more CPU. Defaults to 1. (Since 5.0)
|
2020-01-23 19:08:52 +03:00
|
|
|
#
|
2024-08-31 02:27:20 +03:00
|
|
|
# @multifd-qatzip-level: Set the compression level to be used in live
|
|
|
|
# migration. The level is an integer between 1 and 9, where 1 means
|
|
|
|
# the best compression speed, and 9 means the best compression
|
|
|
|
# ratio which will consume more CPU. Defaults to 1. (Since 9.2)
|
|
|
|
#
|
2020-01-23 19:41:36 +03:00
|
|
|
# @multifd-zstd-level: Set the compression level to be used in live
|
2023-04-28 13:54:29 +03:00
|
|
|
# migration, the compression level is an integer between 0 and 20,
|
|
|
|
# where 0 means no compression, 1 means the best compression
|
|
|
|
# speed, and 20 means best compression ratio which will consume
|
2024-07-29 09:52:20 +03:00
|
|
|
# more CPU. Defaults to 1. (Since 5.0)
|
2022-05-13 09:28:33 +03:00
|
|
|
#
|
2020-08-20 18:07:23 +03:00
|
|
|
# @block-bitmap-mapping: Maps block nodes and bitmaps on them to
|
2023-04-28 13:54:29 +03:00
|
|
|
# aliases for the purpose of dirty bitmap migration. Such aliases
|
|
|
|
# may for example be the corresponding names on the opposite site.
|
|
|
|
# The mapping must be one-to-one, but not necessarily complete: On
|
|
|
|
# the source, unmapped bitmaps and all bitmaps on unmapped nodes
|
|
|
|
# will be ignored. On the destination, encountering an unmapped
|
|
|
|
# alias in the incoming migration stream will result in a report,
|
|
|
|
# and all further bitmap migration data will then be discarded.
|
|
|
|
# Note that the destination does not know about bitmaps it does
|
|
|
|
# not receive, so there is no limitation or requirement regarding
|
|
|
|
# the number of bitmaps received, or how they are named, or on
|
|
|
|
# which nodes they are placed. By default (when this parameter
|
|
|
|
# has never been set), bitmap names are mapped to themselves.
|
|
|
|
# Nodes are mapped to their block device name if there is one, and
|
|
|
|
# to their node name otherwise. (Since 5.2)
|
2020-08-20 18:07:23 +03:00
|
|
|
#
|
2023-07-28 12:38:07 +03:00
|
|
|
# @x-vcpu-dirty-limit-period: Periodic time (in milliseconds) of dirty
|
2024-03-22 17:09:08 +03:00
|
|
|
# limit during live migration. Should be in the range 1 to
|
|
|
|
# 1000ms. Defaults to 1000ms. (Since 8.1)
|
2023-06-07 16:32:59 +03:00
|
|
|
#
|
2023-06-07 17:58:32 +03:00
|
|
|
# @vcpu-dirty-limit: Dirtyrate limit (MB/s) during live migration.
|
2023-07-28 12:38:07 +03:00
|
|
|
# Defaults to 1. (Since 8.1)
|
2023-06-07 17:58:32 +03:00
|
|
|
#
|
2024-03-22 17:09:08 +03:00
|
|
|
# @mode: Migration mode. See description in @MigMode. Default is
|
|
|
|
# 'normal'. (Since 8.2)
|
2023-10-25 22:44:24 +03:00
|
|
|
#
|
2024-03-11 21:00:11 +03:00
|
|
|
# @zero-page-detection: Whether and how to detect zero pages.
|
2024-03-11 21:00:14 +03:00
|
|
|
# See description in @ZeroPageDetection. Default is 'multifd'.
|
2024-03-11 21:00:11 +03:00
|
|
|
# (since 9.0)
|
|
|
|
#
|
2024-06-17 21:57:26 +03:00
|
|
|
# @direct-io: Open migration files with O_DIRECT when possible. This
|
|
|
|
# only has effect if the @mapped-ram capability is enabled.
|
|
|
|
# (Since 9.1)
|
|
|
|
#
|
2021-10-28 13:25:13 +03:00
|
|
|
# Features:
|
2023-04-28 13:54:29 +03:00
|
|
|
#
|
2024-03-22 17:09:08 +03:00
|
|
|
# @unstable: Members @x-checkpoint-delay and
|
|
|
|
# @x-vcpu-dirty-limit-period are experimental.
|
2021-10-28 13:25:13 +03:00
|
|
|
#
|
2017-08-24 22:14:01 +03:00
|
|
|
# Since: 2.4
|
|
|
|
##
|
|
|
|
{ 'enum': 'MigrationParameter',
|
2019-02-27 16:24:06 +03:00
|
|
|
'data': ['announce-initial', 'announce-max',
|
|
|
|
'announce-rounds', 'announce-step',
|
2023-10-18 14:55:13 +03:00
|
|
|
'throttle-trigger-threshold',
|
2017-08-24 22:14:01 +03:00
|
|
|
'cpu-throttle-initial', 'cpu-throttle-increment',
|
2020-04-13 13:15:08 +03:00
|
|
|
'cpu-throttle-tailslow',
|
migration: add support for a "tls-authz" migration parameter
The QEMU instance that runs as the server for the migration data
transport (ie the target QEMU) needs to be able to configure access
control so it can prevent unauthorized clients initiating an incoming
migration. This adds a new 'tls-authz' migration parameter that is used
to provide the QOM ID of a QAuthZ subclass instance that provides the
access control check. This is checked against the x509 certificate
obtained during the TLS handshake.
For example, when starting a QEMU for incoming migration, it is
possible to give an example identity of the source QEMU that is
intended to be connecting later:
$QEMU \
-monitor stdio \
-incoming defer \
...other args...
(qemu) object_add tls-creds-x509,id=tls0,dir=/home/berrange/qemutls,\
endpoint=server,verify-peer=yes \
(qemu) object_add authz-simple,id=auth0,identity=CN=laptop.example.com,,\
O=Example Org,,L=London,,ST=London,,C=GB \
(qemu) migrate_incoming tcp:localhost:9000
Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
2019-02-27 17:53:24 +03:00
|
|
|
'tls-creds', 'tls-hostname', 'tls-authz', 'max-bandwidth',
|
migration: Allow user to specify available switchover bandwidth
Migration bandwidth is a very important value to live migration. It's
because it's one of the major factors that we'll make decision on when to
switchover to destination in a precopy process.
This value is currently estimated by QEMU during the whole live migration
process by monitoring how fast we were sending the data. This can be the
most accurate bandwidth if in the ideal world, where we're always feeding
unlimited data to the migration channel, and then it'll be limited to the
bandwidth that is available.
However in reality it may be very different, e.g., over a 10Gbps network we
can see query-migrate showing migration bandwidth of only a few tens of
MB/s just because there are plenty of other things the migration thread
might be doing. For example, the migration thread can be busy scanning
zero pages, or it can be fetching dirty bitmap from other external dirty
sources (like vhost or KVM). It means we may not be pushing data as much
as possible to migration channel, so the bandwidth estimated from "how many
data we sent in the channel" can be dramatically inaccurate sometimes.
With that, the decision to switchover will be affected, by assuming that we
may not be able to switchover at all with such a low bandwidth, but in
reality we can.
The migration may not even converge at all with the downtime specified,
with that wrong estimation of bandwidth, keeping iterations forever with a
low estimation of bandwidth.
The issue is QEMU itself may not be able to avoid those uncertainties on
measuing the real "available migration bandwidth". At least not something
I can think of so far.
One way to fix this is when the user is fully aware of the available
bandwidth, then we can allow the user to help providing an accurate value.
For example, if the user has a dedicated channel of 10Gbps for migration
for this specific VM, the user can specify this bandwidth so QEMU can
always do the calculation based on this fact, trusting the user as long as
specified. It may not be the exact bandwidth when switching over (in which
case qemu will push migration data as fast as possible), but much better
than QEMU trying to wildly guess, especially when very wrong.
A new parameter "avail-switchover-bandwidth" is introduced just for this.
So when the user specified this parameter, instead of trusting the
estimated value from QEMU itself (based on the QEMUFile send speed), it
trusts the user more by using this value to decide when to switchover,
assuming that we'll have such bandwidth available then.
Note that specifying this value will not throttle the bandwidth for
switchover yet, so QEMU will always use the full bandwidth possible for
sending switchover data, assuming that should always be the most important
way to use the network at that time.
This can resolve issues like "unconvergence migration" which is caused by
hilarious low "migration bandwidth" detected for whatever reason.
Reported-by: Zhiyi Guo <zhguo@redhat.com>
Reviewed-by: Joao Martins <joao.m.martins@oracle.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Peter Xu <peterx@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
Message-ID: <20231010221922.40638-1-peterx@redhat.com>
2023-10-11 01:19:22 +03:00
|
|
|
'avail-switchover-bandwidth', 'downtime-limit',
|
2021-10-28 13:25:13 +03:00
|
|
|
{ 'name': 'x-checkpoint-delay', 'features': [ 'unstable' ] },
|
2019-02-06 15:54:06 +03:00
|
|
|
'multifd-channels',
|
2018-08-01 16:00:20 +03:00
|
|
|
'xbzrle-cache-size', 'max-postcopy-bandwidth',
|
2020-01-23 19:08:52 +03:00
|
|
|
'max-cpu-throttle', 'multifd-compression',
|
2023-06-07 16:32:59 +03:00
|
|
|
'multifd-zlib-level', 'multifd-zstd-level',
|
2024-08-31 02:27:20 +03:00
|
|
|
'multifd-qatzip-level',
|
2023-06-07 16:32:59 +03:00
|
|
|
'block-bitmap-mapping',
|
2023-06-07 17:58:32 +03:00
|
|
|
{ 'name': 'x-vcpu-dirty-limit-period', 'features': ['unstable'] },
|
2023-10-25 22:44:24 +03:00
|
|
|
'vcpu-dirty-limit',
|
2024-03-11 21:00:11 +03:00
|
|
|
'mode',
|
2024-06-17 21:57:26 +03:00
|
|
|
'zero-page-detection',
|
|
|
|
'direct-io'] }
|
2017-08-24 22:14:01 +03:00
|
|
|
|
|
|
|
##
|
|
|
|
# @MigrateSetParameters:
|
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @announce-initial: Initial delay (in milliseconds) before sending
|
|
|
|
# the first announce (Since 4.0)
|
2019-02-27 16:24:06 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @announce-max: Maximum delay (in milliseconds) between packets in
|
|
|
|
# the announcement (Since 4.0)
|
2019-02-27 16:24:06 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @announce-rounds: Number of self-announce packets sent after
|
|
|
|
# migration (Since 4.0)
|
2019-02-27 16:24:06 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @announce-step: Increase in delay (in milliseconds) between
|
|
|
|
# subsequent packets in the announcement (Since 4.0)
|
2019-02-27 16:24:06 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @throttle-trigger-threshold: The ratio of bytes_dirty_period and
|
|
|
|
# bytes_xfer_period to trigger throttling. It is expressed as
|
2024-03-22 17:09:09 +03:00
|
|
|
# percentage. The default value is 50. (Since 5.0)
|
2020-02-24 05:31:42 +03:00
|
|
|
#
|
2017-08-24 22:14:01 +03:00
|
|
|
# @cpu-throttle-initial: Initial percentage of time guest cpus are
|
2023-04-28 13:54:29 +03:00
|
|
|
# throttled when migration auto-converge is activated. The
|
2024-03-22 17:09:09 +03:00
|
|
|
# default value is 20. (Since 2.7)
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
|
|
|
# @cpu-throttle-increment: throttle percentage increase each time
|
2023-04-28 13:54:29 +03:00
|
|
|
# auto-converge detects that migration is not making progress.
|
2024-03-22 17:09:09 +03:00
|
|
|
# The default value is 10. (Since 2.7)
|
2023-04-28 13:54:29 +03:00
|
|
|
#
|
|
|
|
# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage At
|
|
|
|
# the tail stage of throttling, the Guest is very sensitive to CPU
|
|
|
|
# percentage while the @cpu-throttle -increment is excessive
|
|
|
|
# usually at tail stage. If this parameter is true, we will
|
|
|
|
# compute the ideal CPU percentage used by the Guest, which may
|
|
|
|
# exactly make the dirty rate match the dirty rate threshold.
|
|
|
|
# Then we will choose a smaller throttle increment between the one
|
|
|
|
# specified by @cpu-throttle-increment and the one generated by
|
|
|
|
# ideal CPU percentage. Therefore, it is compatible to
|
|
|
|
# traditional throttling, meanwhile the throttle increment won't
|
|
|
|
# be excessive at tail stage. The default value is false. (Since
|
|
|
|
# 5.1)
|
2020-04-13 13:15:08 +03:00
|
|
|
#
|
2017-08-24 22:14:01 +03:00
|
|
|
# @tls-creds: ID of the 'tls-creds' object that provides credentials
|
2023-04-28 13:54:29 +03:00
|
|
|
# for establishing a TLS connection over the migration data
|
|
|
|
# channel. On the outgoing side of the migration, the credentials
|
|
|
|
# must be for a 'client' endpoint, while for the incoming side the
|
|
|
|
# credentials must be for a 'server' endpoint. Setting this to a
|
|
|
|
# non-empty string enables TLS for all migrations. An empty
|
|
|
|
# string means that QEMU will use plain text mode for migration,
|
2024-03-22 16:51:15 +03:00
|
|
|
# rather than TLS. This is the default. (Since 2.7)
|
|
|
|
#
|
|
|
|
# @tls-hostname: migration target's hostname for validating the
|
|
|
|
# server's x509 certificate identity. If empty, QEMU will use the
|
|
|
|
# hostname from the migration URI, if any. A non-empty value is
|
|
|
|
# required when using x509 based TLS credentials and the migration
|
|
|
|
# URI does not include a hostname, such as fd: or exec: based
|
|
|
|
# migration. (Since 2.7)
|
|
|
|
#
|
|
|
|
# Note: empty value works only since 2.9.
|
2023-04-28 13:54:29 +03:00
|
|
|
#
|
2024-02-07 06:28:36 +03:00
|
|
|
# @tls-authz: ID of the 'authz' object subclass that provides access
|
|
|
|
# control checking of the TLS x509 certificate distinguished name.
|
2024-03-22 16:51:15 +03:00
|
|
|
# This object is only resolved at time of use, so can be deleted
|
|
|
|
# and recreated on the fly while the migration server is active.
|
|
|
|
# If missing, it will default to denying access (Since 4.0)
|
2024-02-07 06:28:36 +03:00
|
|
|
#
|
2024-03-22 17:09:06 +03:00
|
|
|
# @max-bandwidth: maximum speed for migration, in bytes per second.
|
|
|
|
# (Since 2.8)
|
2023-04-28 13:54:29 +03:00
|
|
|
#
|
migration: Allow user to specify available switchover bandwidth
Migration bandwidth is a very important value to live migration. It's
because it's one of the major factors that we'll make decision on when to
switchover to destination in a precopy process.
This value is currently estimated by QEMU during the whole live migration
process by monitoring how fast we were sending the data. This can be the
most accurate bandwidth if in the ideal world, where we're always feeding
unlimited data to the migration channel, and then it'll be limited to the
bandwidth that is available.
However in reality it may be very different, e.g., over a 10Gbps network we
can see query-migrate showing migration bandwidth of only a few tens of
MB/s just because there are plenty of other things the migration thread
might be doing. For example, the migration thread can be busy scanning
zero pages, or it can be fetching dirty bitmap from other external dirty
sources (like vhost or KVM). It means we may not be pushing data as much
as possible to migration channel, so the bandwidth estimated from "how many
data we sent in the channel" can be dramatically inaccurate sometimes.
With that, the decision to switchover will be affected, by assuming that we
may not be able to switchover at all with such a low bandwidth, but in
reality we can.
The migration may not even converge at all with the downtime specified,
with that wrong estimation of bandwidth, keeping iterations forever with a
low estimation of bandwidth.
The issue is QEMU itself may not be able to avoid those uncertainties on
measuing the real "available migration bandwidth". At least not something
I can think of so far.
One way to fix this is when the user is fully aware of the available
bandwidth, then we can allow the user to help providing an accurate value.
For example, if the user has a dedicated channel of 10Gbps for migration
for this specific VM, the user can specify this bandwidth so QEMU can
always do the calculation based on this fact, trusting the user as long as
specified. It may not be the exact bandwidth when switching over (in which
case qemu will push migration data as fast as possible), but much better
than QEMU trying to wildly guess, especially when very wrong.
A new parameter "avail-switchover-bandwidth" is introduced just for this.
So when the user specified this parameter, instead of trusting the
estimated value from QEMU itself (based on the QEMUFile send speed), it
trusts the user more by using this value to decide when to switchover,
assuming that we'll have such bandwidth available then.
Note that specifying this value will not throttle the bandwidth for
switchover yet, so QEMU will always use the full bandwidth possible for
sending switchover data, assuming that should always be the most important
way to use the network at that time.
This can resolve issues like "unconvergence migration" which is caused by
hilarious low "migration bandwidth" detected for whatever reason.
Reported-by: Zhiyi Guo <zhguo@redhat.com>
Reviewed-by: Joao Martins <joao.m.martins@oracle.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Peter Xu <peterx@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
Message-ID: <20231010221922.40638-1-peterx@redhat.com>
2023-10-11 01:19:22 +03:00
|
|
|
# @avail-switchover-bandwidth: to set the available bandwidth that
|
|
|
|
# migration can use during switchover phase. NOTE! This does not
|
2024-03-22 17:09:08 +03:00
|
|
|
# limit the bandwidth during switchover, but only for calculations
|
|
|
|
# when making decisions to switchover. By default, this value is
|
|
|
|
# zero, which means QEMU will estimate the bandwidth
|
|
|
|
# automatically. This can be set when the estimated value is not
|
|
|
|
# accurate, while the user is able to guarantee such bandwidth is
|
|
|
|
# available when switching over. When specified correctly, this
|
|
|
|
# can make the switchover decision much more accurate.
|
|
|
|
# (Since 8.2)
|
migration: Allow user to specify available switchover bandwidth
Migration bandwidth is a very important value to live migration. It's
because it's one of the major factors that we'll make decision on when to
switchover to destination in a precopy process.
This value is currently estimated by QEMU during the whole live migration
process by monitoring how fast we were sending the data. This can be the
most accurate bandwidth if in the ideal world, where we're always feeding
unlimited data to the migration channel, and then it'll be limited to the
bandwidth that is available.
However in reality it may be very different, e.g., over a 10Gbps network we
can see query-migrate showing migration bandwidth of only a few tens of
MB/s just because there are plenty of other things the migration thread
might be doing. For example, the migration thread can be busy scanning
zero pages, or it can be fetching dirty bitmap from other external dirty
sources (like vhost or KVM). It means we may not be pushing data as much
as possible to migration channel, so the bandwidth estimated from "how many
data we sent in the channel" can be dramatically inaccurate sometimes.
With that, the decision to switchover will be affected, by assuming that we
may not be able to switchover at all with such a low bandwidth, but in
reality we can.
The migration may not even converge at all with the downtime specified,
with that wrong estimation of bandwidth, keeping iterations forever with a
low estimation of bandwidth.
The issue is QEMU itself may not be able to avoid those uncertainties on
measuing the real "available migration bandwidth". At least not something
I can think of so far.
One way to fix this is when the user is fully aware of the available
bandwidth, then we can allow the user to help providing an accurate value.
For example, if the user has a dedicated channel of 10Gbps for migration
for this specific VM, the user can specify this bandwidth so QEMU can
always do the calculation based on this fact, trusting the user as long as
specified. It may not be the exact bandwidth when switching over (in which
case qemu will push migration data as fast as possible), but much better
than QEMU trying to wildly guess, especially when very wrong.
A new parameter "avail-switchover-bandwidth" is introduced just for this.
So when the user specified this parameter, instead of trusting the
estimated value from QEMU itself (based on the QEMUFile send speed), it
trusts the user more by using this value to decide when to switchover,
assuming that we'll have such bandwidth available then.
Note that specifying this value will not throttle the bandwidth for
switchover yet, so QEMU will always use the full bandwidth possible for
sending switchover data, assuming that should always be the most important
way to use the network at that time.
This can resolve issues like "unconvergence migration" which is caused by
hilarious low "migration bandwidth" detected for whatever reason.
Reported-by: Zhiyi Guo <zhguo@redhat.com>
Reviewed-by: Joao Martins <joao.m.martins@oracle.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Peter Xu <peterx@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
Message-ID: <20231010221922.40638-1-peterx@redhat.com>
2023-10-11 01:19:22 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @downtime-limit: set maximum tolerated downtime for migration.
|
|
|
|
# maximum downtime in milliseconds (Since 2.8)
|
|
|
|
#
|
2024-03-22 16:51:16 +03:00
|
|
|
# @x-checkpoint-delay: The delay time (in ms) between two COLO
|
|
|
|
# checkpoints in periodic mode. (Since 2.8)
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
2019-02-06 15:54:06 +03:00
|
|
|
# @multifd-channels: Number of channels used to migrate data in
|
2023-04-28 13:54:29 +03:00
|
|
|
# parallel. This is the same number that the number of sockets
|
|
|
|
# used for migration. The default value is 2 (since 4.0)
|
2016-01-15 10:56:17 +03:00
|
|
|
#
|
2017-10-05 22:30:10 +03:00
|
|
|
# @xbzrle-cache-size: cache size to be used by XBZRLE migration. It
|
2023-04-28 13:54:29 +03:00
|
|
|
# needs to be a multiple of the target page size and a power of 2
|
|
|
|
# (Since 2.11)
|
2018-06-13 13:26:40 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @max-postcopy-bandwidth: Background transfer bandwidth during
|
|
|
|
# postcopy. Defaults to 0 (unlimited). In bytes per second.
|
|
|
|
# (Since 3.0)
|
2018-08-01 16:00:20 +03:00
|
|
|
#
|
2024-03-22 16:51:16 +03:00
|
|
|
# @max-cpu-throttle: maximum cpu throttle percentage. Defaults to 99.
|
|
|
|
# (Since 3.1)
|
2018-08-01 16:00:20 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @multifd-compression: Which compression method to use. Defaults to
|
|
|
|
# none. (Since 5.0)
|
2019-01-16 12:35:55 +03:00
|
|
|
#
|
2020-01-23 19:08:52 +03:00
|
|
|
# @multifd-zlib-level: Set the compression level to be used in live
|
2023-04-28 13:54:29 +03:00
|
|
|
# migration, the compression level is an integer between 0 and 9,
|
|
|
|
# where 0 means no compression, 1 means the best compression
|
|
|
|
# speed, and 9 means best compression ratio which will consume
|
2024-07-29 09:52:20 +03:00
|
|
|
# more CPU. Defaults to 1. (Since 5.0)
|
2020-01-23 19:08:52 +03:00
|
|
|
#
|
2024-08-31 02:27:20 +03:00
|
|
|
# @multifd-qatzip-level: Set the compression level to be used in live
|
|
|
|
# migration. The level is an integer between 1 and 9, where 1 means
|
|
|
|
# the best compression speed, and 9 means the best compression
|
|
|
|
# ratio which will consume more CPU. Defaults to 1. (Since 9.2)
|
|
|
|
#
|
2020-01-23 19:41:36 +03:00
|
|
|
# @multifd-zstd-level: Set the compression level to be used in live
|
2023-04-28 13:54:29 +03:00
|
|
|
# migration, the compression level is an integer between 0 and 20,
|
|
|
|
# where 0 means no compression, 1 means the best compression
|
|
|
|
# speed, and 20 means best compression ratio which will consume
|
2024-07-29 09:52:20 +03:00
|
|
|
# more CPU. Defaults to 1. (Since 5.0)
|
2020-01-23 19:41:36 +03:00
|
|
|
#
|
2020-08-20 18:07:23 +03:00
|
|
|
# @block-bitmap-mapping: Maps block nodes and bitmaps on them to
|
2023-04-28 13:54:29 +03:00
|
|
|
# aliases for the purpose of dirty bitmap migration. Such aliases
|
|
|
|
# may for example be the corresponding names on the opposite site.
|
|
|
|
# The mapping must be one-to-one, but not necessarily complete: On
|
|
|
|
# the source, unmapped bitmaps and all bitmaps on unmapped nodes
|
|
|
|
# will be ignored. On the destination, encountering an unmapped
|
|
|
|
# alias in the incoming migration stream will result in a report,
|
|
|
|
# and all further bitmap migration data will then be discarded.
|
|
|
|
# Note that the destination does not know about bitmaps it does
|
|
|
|
# not receive, so there is no limitation or requirement regarding
|
|
|
|
# the number of bitmaps received, or how they are named, or on
|
|
|
|
# which nodes they are placed. By default (when this parameter
|
|
|
|
# has never been set), bitmap names are mapped to themselves.
|
|
|
|
# Nodes are mapped to their block device name if there is one, and
|
|
|
|
# to their node name otherwise. (Since 5.2)
|
2020-08-20 18:07:23 +03:00
|
|
|
#
|
2023-07-28 12:38:07 +03:00
|
|
|
# @x-vcpu-dirty-limit-period: Periodic time (in milliseconds) of dirty
|
2024-03-22 17:09:08 +03:00
|
|
|
# limit during live migration. Should be in the range 1 to
|
|
|
|
# 1000ms. Defaults to 1000ms. (Since 8.1)
|
2023-06-07 16:32:59 +03:00
|
|
|
#
|
2023-06-07 17:58:32 +03:00
|
|
|
# @vcpu-dirty-limit: Dirtyrate limit (MB/s) during live migration.
|
2023-07-28 12:38:07 +03:00
|
|
|
# Defaults to 1. (Since 8.1)
|
2023-06-07 17:58:32 +03:00
|
|
|
#
|
2024-03-22 17:09:08 +03:00
|
|
|
# @mode: Migration mode. See description in @MigMode. Default is
|
|
|
|
# 'normal'. (Since 8.2)
|
2023-10-25 22:44:24 +03:00
|
|
|
#
|
2024-03-11 21:00:11 +03:00
|
|
|
# @zero-page-detection: Whether and how to detect zero pages.
|
2024-03-11 21:00:14 +03:00
|
|
|
# See description in @ZeroPageDetection. Default is 'multifd'.
|
2024-03-11 21:00:11 +03:00
|
|
|
# (since 9.0)
|
|
|
|
#
|
2024-06-17 21:57:26 +03:00
|
|
|
# @direct-io: Open migration files with O_DIRECT when possible. This
|
|
|
|
# only has effect if the @mapped-ram capability is enabled.
|
|
|
|
# (Since 9.1)
|
|
|
|
#
|
2021-10-28 13:25:13 +03:00
|
|
|
# Features:
|
2023-04-28 13:54:29 +03:00
|
|
|
#
|
2024-03-22 17:09:08 +03:00
|
|
|
# @unstable: Members @x-checkpoint-delay and
|
|
|
|
# @x-vcpu-dirty-limit-period are experimental.
|
2021-10-28 13:25:13 +03:00
|
|
|
#
|
2023-04-28 13:54:19 +03:00
|
|
|
# TODO: either fuse back into MigrationParameters, or make
|
2023-04-28 13:54:29 +03:00
|
|
|
# MigrationParameters members mandatory
|
2023-04-28 13:54:19 +03:00
|
|
|
#
|
2017-08-24 22:14:01 +03:00
|
|
|
# Since: 2.4
|
|
|
|
##
|
|
|
|
{ 'struct': 'MigrateSetParameters',
|
2019-02-27 16:24:06 +03:00
|
|
|
'data': { '*announce-initial': 'size',
|
|
|
|
'*announce-max': 'size',
|
|
|
|
'*announce-rounds': 'size',
|
|
|
|
'*announce-step': 'size',
|
2021-02-02 17:17:31 +03:00
|
|
|
'*throttle-trigger-threshold': 'uint8',
|
|
|
|
'*cpu-throttle-initial': 'uint8',
|
|
|
|
'*cpu-throttle-increment': 'uint8',
|
2020-04-13 13:15:08 +03:00
|
|
|
'*cpu-throttle-tailslow': 'bool',
|
2017-08-24 22:14:01 +03:00
|
|
|
'*tls-creds': 'StrOrNull',
|
|
|
|
'*tls-hostname': 'StrOrNull',
|
migration: add support for a "tls-authz" migration parameter
The QEMU instance that runs as the server for the migration data
transport (ie the target QEMU) needs to be able to configure access
control so it can prevent unauthorized clients initiating an incoming
migration. This adds a new 'tls-authz' migration parameter that is used
to provide the QOM ID of a QAuthZ subclass instance that provides the
access control check. This is checked against the x509 certificate
obtained during the TLS handshake.
For example, when starting a QEMU for incoming migration, it is
possible to give an example identity of the source QEMU that is
intended to be connecting later:
$QEMU \
-monitor stdio \
-incoming defer \
...other args...
(qemu) object_add tls-creds-x509,id=tls0,dir=/home/berrange/qemutls,\
endpoint=server,verify-peer=yes \
(qemu) object_add authz-simple,id=auth0,identity=CN=laptop.example.com,,\
O=Example Org,,L=London,,ST=London,,C=GB \
(qemu) migrate_incoming tcp:localhost:9000
Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
2019-02-27 17:53:24 +03:00
|
|
|
'*tls-authz': 'StrOrNull',
|
2021-02-02 17:17:31 +03:00
|
|
|
'*max-bandwidth': 'size',
|
migration: Allow user to specify available switchover bandwidth
Migration bandwidth is a very important value to live migration. It's
because it's one of the major factors that we'll make decision on when to
switchover to destination in a precopy process.
This value is currently estimated by QEMU during the whole live migration
process by monitoring how fast we were sending the data. This can be the
most accurate bandwidth if in the ideal world, where we're always feeding
unlimited data to the migration channel, and then it'll be limited to the
bandwidth that is available.
However in reality it may be very different, e.g., over a 10Gbps network we
can see query-migrate showing migration bandwidth of only a few tens of
MB/s just because there are plenty of other things the migration thread
might be doing. For example, the migration thread can be busy scanning
zero pages, or it can be fetching dirty bitmap from other external dirty
sources (like vhost or KVM). It means we may not be pushing data as much
as possible to migration channel, so the bandwidth estimated from "how many
data we sent in the channel" can be dramatically inaccurate sometimes.
With that, the decision to switchover will be affected, by assuming that we
may not be able to switchover at all with such a low bandwidth, but in
reality we can.
The migration may not even converge at all with the downtime specified,
with that wrong estimation of bandwidth, keeping iterations forever with a
low estimation of bandwidth.
The issue is QEMU itself may not be able to avoid those uncertainties on
measuing the real "available migration bandwidth". At least not something
I can think of so far.
One way to fix this is when the user is fully aware of the available
bandwidth, then we can allow the user to help providing an accurate value.
For example, if the user has a dedicated channel of 10Gbps for migration
for this specific VM, the user can specify this bandwidth so QEMU can
always do the calculation based on this fact, trusting the user as long as
specified. It may not be the exact bandwidth when switching over (in which
case qemu will push migration data as fast as possible), but much better
than QEMU trying to wildly guess, especially when very wrong.
A new parameter "avail-switchover-bandwidth" is introduced just for this.
So when the user specified this parameter, instead of trusting the
estimated value from QEMU itself (based on the QEMUFile send speed), it
trusts the user more by using this value to decide when to switchover,
assuming that we'll have such bandwidth available then.
Note that specifying this value will not throttle the bandwidth for
switchover yet, so QEMU will always use the full bandwidth possible for
sending switchover data, assuming that should always be the most important
way to use the network at that time.
This can resolve issues like "unconvergence migration" which is caused by
hilarious low "migration bandwidth" detected for whatever reason.
Reported-by: Zhiyi Guo <zhguo@redhat.com>
Reviewed-by: Joao Martins <joao.m.martins@oracle.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Peter Xu <peterx@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
Message-ID: <20231010221922.40638-1-peterx@redhat.com>
2023-10-11 01:19:22 +03:00
|
|
|
'*avail-switchover-bandwidth': 'size',
|
2021-02-02 17:17:31 +03:00
|
|
|
'*downtime-limit': 'uint64',
|
2021-10-28 13:25:13 +03:00
|
|
|
'*x-checkpoint-delay': { 'type': 'uint32',
|
|
|
|
'features': [ 'unstable' ] },
|
2021-02-02 17:17:31 +03:00
|
|
|
'*multifd-channels': 'uint8',
|
2018-06-13 13:26:40 +03:00
|
|
|
'*xbzrle-cache-size': 'size',
|
2018-08-01 16:00:20 +03:00
|
|
|
'*max-postcopy-bandwidth': 'size',
|
2021-02-02 17:17:31 +03:00
|
|
|
'*max-cpu-throttle': 'uint8',
|
2020-01-23 19:08:52 +03:00
|
|
|
'*multifd-compression': 'MultiFDCompression',
|
2021-02-02 17:17:31 +03:00
|
|
|
'*multifd-zlib-level': 'uint8',
|
2024-08-31 02:27:20 +03:00
|
|
|
'*multifd-qatzip-level': 'uint8',
|
2021-02-02 17:17:31 +03:00
|
|
|
'*multifd-zstd-level': 'uint8',
|
2023-06-07 16:32:59 +03:00
|
|
|
'*block-bitmap-mapping': [ 'BitmapMigrationNodeAlias' ],
|
|
|
|
'*x-vcpu-dirty-limit-period': { 'type': 'uint64',
|
2023-06-07 17:58:32 +03:00
|
|
|
'features': [ 'unstable' ] },
|
2023-10-25 22:44:24 +03:00
|
|
|
'*vcpu-dirty-limit': 'uint64',
|
2024-03-11 21:00:11 +03:00
|
|
|
'*mode': 'MigMode',
|
2024-06-17 21:57:26 +03:00
|
|
|
'*zero-page-detection': 'ZeroPageDetection',
|
|
|
|
'*direct-io': 'bool' } }
|
2017-08-24 22:14:01 +03:00
|
|
|
|
|
|
|
##
|
|
|
|
# @migrate-set-parameters:
|
|
|
|
#
|
|
|
|
# Set various migration parameters.
|
|
|
|
#
|
|
|
|
# Since: 2.4
|
|
|
|
#
|
2024-07-17 05:13:08 +03:00
|
|
|
# .. qmp-example::
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
2024-02-16 17:58:34 +03:00
|
|
|
# -> { "execute": "migrate-set-parameters" ,
|
|
|
|
# "arguments": { "multifd-channels": 5 } }
|
|
|
|
# <- { "return": {} }
|
2017-08-24 22:14:01 +03:00
|
|
|
##
|
|
|
|
{ 'command': 'migrate-set-parameters', 'boxed': true,
|
|
|
|
'data': 'MigrateSetParameters' }
|
|
|
|
|
|
|
|
##
|
|
|
|
# @MigrationParameters:
|
|
|
|
#
|
|
|
|
# The optional members aren't actually optional.
|
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @announce-initial: Initial delay (in milliseconds) before sending
|
|
|
|
# the first announce (Since 4.0)
|
2019-02-27 16:24:06 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @announce-max: Maximum delay (in milliseconds) between packets in
|
|
|
|
# the announcement (Since 4.0)
|
2019-02-27 16:24:06 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @announce-rounds: Number of self-announce packets sent after
|
|
|
|
# migration (Since 4.0)
|
2019-02-27 16:24:06 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @announce-step: Increase in delay (in milliseconds) between
|
|
|
|
# subsequent packets in the announcement (Since 4.0)
|
2019-02-27 16:24:06 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @throttle-trigger-threshold: The ratio of bytes_dirty_period and
|
|
|
|
# bytes_xfer_period to trigger throttling. It is expressed as
|
2024-03-22 17:09:09 +03:00
|
|
|
# percentage. The default value is 50. (Since 5.0)
|
2020-02-24 05:31:42 +03:00
|
|
|
#
|
2017-08-24 22:14:01 +03:00
|
|
|
# @cpu-throttle-initial: Initial percentage of time guest cpus are
|
2023-04-28 13:54:29 +03:00
|
|
|
# throttled when migration auto-converge is activated. (Since
|
|
|
|
# 2.7)
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
|
|
|
# @cpu-throttle-increment: throttle percentage increase each time
|
2023-04-28 13:54:29 +03:00
|
|
|
# auto-converge detects that migration is not making progress.
|
|
|
|
# (Since 2.7)
|
|
|
|
#
|
|
|
|
# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage At
|
|
|
|
# the tail stage of throttling, the Guest is very sensitive to CPU
|
|
|
|
# percentage while the @cpu-throttle -increment is excessive
|
|
|
|
# usually at tail stage. If this parameter is true, we will
|
|
|
|
# compute the ideal CPU percentage used by the Guest, which may
|
|
|
|
# exactly make the dirty rate match the dirty rate threshold.
|
|
|
|
# Then we will choose a smaller throttle increment between the one
|
|
|
|
# specified by @cpu-throttle-increment and the one generated by
|
|
|
|
# ideal CPU percentage. Therefore, it is compatible to
|
|
|
|
# traditional throttling, meanwhile the throttle increment won't
|
|
|
|
# be excessive at tail stage. The default value is false. (Since
|
|
|
|
# 5.1)
|
2020-04-13 13:15:08 +03:00
|
|
|
#
|
2017-08-24 22:14:01 +03:00
|
|
|
# @tls-creds: ID of the 'tls-creds' object that provides credentials
|
2023-04-28 13:54:29 +03:00
|
|
|
# for establishing a TLS connection over the migration data
|
|
|
|
# channel. On the outgoing side of the migration, the credentials
|
|
|
|
# must be for a 'client' endpoint, while for the incoming side the
|
|
|
|
# credentials must be for a 'server' endpoint. An empty string
|
|
|
|
# means that QEMU will use plain text mode for migration, rather
|
2024-03-22 16:51:15 +03:00
|
|
|
# than TLS. (Since 2.7)
|
|
|
|
#
|
|
|
|
# Note: 2.8 omits empty @tls-creds instead.
|
|
|
|
#
|
|
|
|
# @tls-hostname: migration target's hostname for validating the
|
|
|
|
# server's x509 certificate identity. If empty, QEMU will use the
|
|
|
|
# hostname from the migration URI, if any. (Since 2.7)
|
|
|
|
#
|
|
|
|
# Note: 2.8 omits empty @tls-hostname instead.
|
2023-04-28 13:54:29 +03:00
|
|
|
#
|
|
|
|
# @tls-authz: ID of the 'authz' object subclass that provides access
|
|
|
|
# control checking of the TLS x509 certificate distinguished name.
|
|
|
|
# (Since 4.0)
|
|
|
|
#
|
2024-03-22 17:09:06 +03:00
|
|
|
# @max-bandwidth: maximum speed for migration, in bytes per second.
|
|
|
|
# (Since 2.8)
|
2023-04-28 13:54:29 +03:00
|
|
|
#
|
migration: Allow user to specify available switchover bandwidth
Migration bandwidth is a very important value to live migration. It's
because it's one of the major factors that we'll make decision on when to
switchover to destination in a precopy process.
This value is currently estimated by QEMU during the whole live migration
process by monitoring how fast we were sending the data. This can be the
most accurate bandwidth if in the ideal world, where we're always feeding
unlimited data to the migration channel, and then it'll be limited to the
bandwidth that is available.
However in reality it may be very different, e.g., over a 10Gbps network we
can see query-migrate showing migration bandwidth of only a few tens of
MB/s just because there are plenty of other things the migration thread
might be doing. For example, the migration thread can be busy scanning
zero pages, or it can be fetching dirty bitmap from other external dirty
sources (like vhost or KVM). It means we may not be pushing data as much
as possible to migration channel, so the bandwidth estimated from "how many
data we sent in the channel" can be dramatically inaccurate sometimes.
With that, the decision to switchover will be affected, by assuming that we
may not be able to switchover at all with such a low bandwidth, but in
reality we can.
The migration may not even converge at all with the downtime specified,
with that wrong estimation of bandwidth, keeping iterations forever with a
low estimation of bandwidth.
The issue is QEMU itself may not be able to avoid those uncertainties on
measuing the real "available migration bandwidth". At least not something
I can think of so far.
One way to fix this is when the user is fully aware of the available
bandwidth, then we can allow the user to help providing an accurate value.
For example, if the user has a dedicated channel of 10Gbps for migration
for this specific VM, the user can specify this bandwidth so QEMU can
always do the calculation based on this fact, trusting the user as long as
specified. It may not be the exact bandwidth when switching over (in which
case qemu will push migration data as fast as possible), but much better
than QEMU trying to wildly guess, especially when very wrong.
A new parameter "avail-switchover-bandwidth" is introduced just for this.
So when the user specified this parameter, instead of trusting the
estimated value from QEMU itself (based on the QEMUFile send speed), it
trusts the user more by using this value to decide when to switchover,
assuming that we'll have such bandwidth available then.
Note that specifying this value will not throttle the bandwidth for
switchover yet, so QEMU will always use the full bandwidth possible for
sending switchover data, assuming that should always be the most important
way to use the network at that time.
This can resolve issues like "unconvergence migration" which is caused by
hilarious low "migration bandwidth" detected for whatever reason.
Reported-by: Zhiyi Guo <zhguo@redhat.com>
Reviewed-by: Joao Martins <joao.m.martins@oracle.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Peter Xu <peterx@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
Message-ID: <20231010221922.40638-1-peterx@redhat.com>
2023-10-11 01:19:22 +03:00
|
|
|
# @avail-switchover-bandwidth: to set the available bandwidth that
|
|
|
|
# migration can use during switchover phase. NOTE! This does not
|
2024-03-22 17:09:08 +03:00
|
|
|
# limit the bandwidth during switchover, but only for calculations
|
|
|
|
# when making decisions to switchover. By default, this value is
|
|
|
|
# zero, which means QEMU will estimate the bandwidth
|
|
|
|
# automatically. This can be set when the estimated value is not
|
|
|
|
# accurate, while the user is able to guarantee such bandwidth is
|
|
|
|
# available when switching over. When specified correctly, this
|
|
|
|
# can make the switchover decision much more accurate.
|
|
|
|
# (Since 8.2)
|
migration: Allow user to specify available switchover bandwidth
Migration bandwidth is a very important value to live migration. It's
because it's one of the major factors that we'll make decision on when to
switchover to destination in a precopy process.
This value is currently estimated by QEMU during the whole live migration
process by monitoring how fast we were sending the data. This can be the
most accurate bandwidth if in the ideal world, where we're always feeding
unlimited data to the migration channel, and then it'll be limited to the
bandwidth that is available.
However in reality it may be very different, e.g., over a 10Gbps network we
can see query-migrate showing migration bandwidth of only a few tens of
MB/s just because there are plenty of other things the migration thread
might be doing. For example, the migration thread can be busy scanning
zero pages, or it can be fetching dirty bitmap from other external dirty
sources (like vhost or KVM). It means we may not be pushing data as much
as possible to migration channel, so the bandwidth estimated from "how many
data we sent in the channel" can be dramatically inaccurate sometimes.
With that, the decision to switchover will be affected, by assuming that we
may not be able to switchover at all with such a low bandwidth, but in
reality we can.
The migration may not even converge at all with the downtime specified,
with that wrong estimation of bandwidth, keeping iterations forever with a
low estimation of bandwidth.
The issue is QEMU itself may not be able to avoid those uncertainties on
measuing the real "available migration bandwidth". At least not something
I can think of so far.
One way to fix this is when the user is fully aware of the available
bandwidth, then we can allow the user to help providing an accurate value.
For example, if the user has a dedicated channel of 10Gbps for migration
for this specific VM, the user can specify this bandwidth so QEMU can
always do the calculation based on this fact, trusting the user as long as
specified. It may not be the exact bandwidth when switching over (in which
case qemu will push migration data as fast as possible), but much better
than QEMU trying to wildly guess, especially when very wrong.
A new parameter "avail-switchover-bandwidth" is introduced just for this.
So when the user specified this parameter, instead of trusting the
estimated value from QEMU itself (based on the QEMUFile send speed), it
trusts the user more by using this value to decide when to switchover,
assuming that we'll have such bandwidth available then.
Note that specifying this value will not throttle the bandwidth for
switchover yet, so QEMU will always use the full bandwidth possible for
sending switchover data, assuming that should always be the most important
way to use the network at that time.
This can resolve issues like "unconvergence migration" which is caused by
hilarious low "migration bandwidth" detected for whatever reason.
Reported-by: Zhiyi Guo <zhguo@redhat.com>
Reviewed-by: Joao Martins <joao.m.martins@oracle.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Peter Xu <peterx@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
Message-ID: <20231010221922.40638-1-peterx@redhat.com>
2023-10-11 01:19:22 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @downtime-limit: set maximum tolerated downtime for migration.
|
|
|
|
# maximum downtime in milliseconds (Since 2.8)
|
|
|
|
#
|
|
|
|
# @x-checkpoint-delay: the delay time between two COLO checkpoints.
|
|
|
|
# (Since 2.8)
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
2019-02-06 15:54:06 +03:00
|
|
|
# @multifd-channels: Number of channels used to migrate data in
|
2023-04-28 13:54:29 +03:00
|
|
|
# parallel. This is the same number that the number of sockets
|
|
|
|
# used for migration. The default value is 2 (since 4.0)
|
2016-01-15 10:56:17 +03:00
|
|
|
#
|
2017-10-05 22:30:10 +03:00
|
|
|
# @xbzrle-cache-size: cache size to be used by XBZRLE migration. It
|
2023-04-28 13:54:29 +03:00
|
|
|
# needs to be a multiple of the target page size and a power of 2
|
|
|
|
# (Since 2.11)
|
2018-06-13 13:26:40 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @max-postcopy-bandwidth: Background transfer bandwidth during
|
|
|
|
# postcopy. Defaults to 0 (unlimited). In bytes per second.
|
|
|
|
# (Since 3.0)
|
2018-08-01 16:00:20 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @max-cpu-throttle: maximum cpu throttle percentage. Defaults to 99.
|
|
|
|
# (Since 3.1)
|
2018-08-01 16:00:20 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @multifd-compression: Which compression method to use. Defaults to
|
|
|
|
# none. (Since 5.0)
|
2019-01-16 12:35:55 +03:00
|
|
|
#
|
2020-01-23 19:08:52 +03:00
|
|
|
# @multifd-zlib-level: Set the compression level to be used in live
|
2023-04-28 13:54:29 +03:00
|
|
|
# migration, the compression level is an integer between 0 and 9,
|
|
|
|
# where 0 means no compression, 1 means the best compression
|
|
|
|
# speed, and 9 means best compression ratio which will consume
|
2024-07-29 09:52:20 +03:00
|
|
|
# more CPU. Defaults to 1. (Since 5.0)
|
2020-01-23 19:08:52 +03:00
|
|
|
#
|
2024-08-31 02:27:20 +03:00
|
|
|
# @multifd-qatzip-level: Set the compression level to be used in live
|
|
|
|
# migration. The level is an integer between 1 and 9, where 1 means
|
|
|
|
# the best compression speed, and 9 means the best compression
|
|
|
|
# ratio which will consume more CPU. Defaults to 1. (Since 9.2)
|
|
|
|
#
|
2020-01-23 19:41:36 +03:00
|
|
|
# @multifd-zstd-level: Set the compression level to be used in live
|
2023-04-28 13:54:29 +03:00
|
|
|
# migration, the compression level is an integer between 0 and 20,
|
|
|
|
# where 0 means no compression, 1 means the best compression
|
|
|
|
# speed, and 20 means best compression ratio which will consume
|
2024-07-29 09:52:20 +03:00
|
|
|
# more CPU. Defaults to 1. (Since 5.0)
|
2020-01-23 19:41:36 +03:00
|
|
|
#
|
2020-08-20 18:07:23 +03:00
|
|
|
# @block-bitmap-mapping: Maps block nodes and bitmaps on them to
|
2023-04-28 13:54:29 +03:00
|
|
|
# aliases for the purpose of dirty bitmap migration. Such aliases
|
|
|
|
# may for example be the corresponding names on the opposite site.
|
|
|
|
# The mapping must be one-to-one, but not necessarily complete: On
|
|
|
|
# the source, unmapped bitmaps and all bitmaps on unmapped nodes
|
|
|
|
# will be ignored. On the destination, encountering an unmapped
|
|
|
|
# alias in the incoming migration stream will result in a report,
|
|
|
|
# and all further bitmap migration data will then be discarded.
|
|
|
|
# Note that the destination does not know about bitmaps it does
|
|
|
|
# not receive, so there is no limitation or requirement regarding
|
|
|
|
# the number of bitmaps received, or how they are named, or on
|
|
|
|
# which nodes they are placed. By default (when this parameter
|
|
|
|
# has never been set), bitmap names are mapped to themselves.
|
|
|
|
# Nodes are mapped to their block device name if there is one, and
|
|
|
|
# to their node name otherwise. (Since 5.2)
|
2020-08-20 18:07:23 +03:00
|
|
|
#
|
2023-07-28 12:38:07 +03:00
|
|
|
# @x-vcpu-dirty-limit-period: Periodic time (in milliseconds) of dirty
|
2024-03-22 17:09:08 +03:00
|
|
|
# limit during live migration. Should be in the range 1 to
|
|
|
|
# 1000ms. Defaults to 1000ms. (Since 8.1)
|
2023-06-07 16:32:59 +03:00
|
|
|
#
|
2023-06-07 17:58:32 +03:00
|
|
|
# @vcpu-dirty-limit: Dirtyrate limit (MB/s) during live migration.
|
2023-07-28 12:38:07 +03:00
|
|
|
# Defaults to 1. (Since 8.1)
|
2023-06-07 17:58:32 +03:00
|
|
|
#
|
2024-03-22 17:09:08 +03:00
|
|
|
# @mode: Migration mode. See description in @MigMode. Default is
|
2024-07-29 09:52:20 +03:00
|
|
|
# 'normal'. (Since 8.2)
|
2023-10-25 22:44:24 +03:00
|
|
|
#
|
2024-03-11 21:00:11 +03:00
|
|
|
# @zero-page-detection: Whether and how to detect zero pages.
|
2024-03-11 21:00:14 +03:00
|
|
|
# See description in @ZeroPageDetection. Default is 'multifd'.
|
2024-03-11 21:00:11 +03:00
|
|
|
# (since 9.0)
|
|
|
|
#
|
2024-06-17 21:57:26 +03:00
|
|
|
# @direct-io: Open migration files with O_DIRECT when possible. This
|
|
|
|
# only has effect if the @mapped-ram capability is enabled.
|
|
|
|
# (Since 9.1)
|
|
|
|
#
|
2021-10-28 13:25:13 +03:00
|
|
|
# Features:
|
2023-04-28 13:54:29 +03:00
|
|
|
#
|
2024-03-22 17:09:08 +03:00
|
|
|
# @unstable: Members @x-checkpoint-delay and
|
|
|
|
# @x-vcpu-dirty-limit-period are experimental.
|
2021-10-28 13:25:13 +03:00
|
|
|
#
|
2017-08-24 22:14:01 +03:00
|
|
|
# Since: 2.4
|
|
|
|
##
|
|
|
|
{ 'struct': 'MigrationParameters',
|
2019-02-27 16:24:06 +03:00
|
|
|
'data': { '*announce-initial': 'size',
|
|
|
|
'*announce-max': 'size',
|
|
|
|
'*announce-rounds': 'size',
|
|
|
|
'*announce-step': 'size',
|
2020-02-24 05:31:42 +03:00
|
|
|
'*throttle-trigger-threshold': 'uint8',
|
2017-12-01 15:08:38 +03:00
|
|
|
'*cpu-throttle-initial': 'uint8',
|
|
|
|
'*cpu-throttle-increment': 'uint8',
|
2020-04-13 13:15:08 +03:00
|
|
|
'*cpu-throttle-tailslow': 'bool',
|
2017-08-24 22:14:01 +03:00
|
|
|
'*tls-creds': 'str',
|
|
|
|
'*tls-hostname': 'str',
|
migration: add support for a "tls-authz" migration parameter
The QEMU instance that runs as the server for the migration data
transport (ie the target QEMU) needs to be able to configure access
control so it can prevent unauthorized clients initiating an incoming
migration. This adds a new 'tls-authz' migration parameter that is used
to provide the QOM ID of a QAuthZ subclass instance that provides the
access control check. This is checked against the x509 certificate
obtained during the TLS handshake.
For example, when starting a QEMU for incoming migration, it is
possible to give an example identity of the source QEMU that is
intended to be connecting later:
$QEMU \
-monitor stdio \
-incoming defer \
...other args...
(qemu) object_add tls-creds-x509,id=tls0,dir=/home/berrange/qemutls,\
endpoint=server,verify-peer=yes \
(qemu) object_add authz-simple,id=auth0,identity=CN=laptop.example.com,,\
O=Example Org,,L=London,,ST=London,,C=GB \
(qemu) migrate_incoming tcp:localhost:9000
Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
2019-02-27 17:53:24 +03:00
|
|
|
'*tls-authz': 'str',
|
2017-12-01 15:08:38 +03:00
|
|
|
'*max-bandwidth': 'size',
|
migration: Allow user to specify available switchover bandwidth
Migration bandwidth is a very important value to live migration. It's
because it's one of the major factors that we'll make decision on when to
switchover to destination in a precopy process.
This value is currently estimated by QEMU during the whole live migration
process by monitoring how fast we were sending the data. This can be the
most accurate bandwidth if in the ideal world, where we're always feeding
unlimited data to the migration channel, and then it'll be limited to the
bandwidth that is available.
However in reality it may be very different, e.g., over a 10Gbps network we
can see query-migrate showing migration bandwidth of only a few tens of
MB/s just because there are plenty of other things the migration thread
might be doing. For example, the migration thread can be busy scanning
zero pages, or it can be fetching dirty bitmap from other external dirty
sources (like vhost or KVM). It means we may not be pushing data as much
as possible to migration channel, so the bandwidth estimated from "how many
data we sent in the channel" can be dramatically inaccurate sometimes.
With that, the decision to switchover will be affected, by assuming that we
may not be able to switchover at all with such a low bandwidth, but in
reality we can.
The migration may not even converge at all with the downtime specified,
with that wrong estimation of bandwidth, keeping iterations forever with a
low estimation of bandwidth.
The issue is QEMU itself may not be able to avoid those uncertainties on
measuing the real "available migration bandwidth". At least not something
I can think of so far.
One way to fix this is when the user is fully aware of the available
bandwidth, then we can allow the user to help providing an accurate value.
For example, if the user has a dedicated channel of 10Gbps for migration
for this specific VM, the user can specify this bandwidth so QEMU can
always do the calculation based on this fact, trusting the user as long as
specified. It may not be the exact bandwidth when switching over (in which
case qemu will push migration data as fast as possible), but much better
than QEMU trying to wildly guess, especially when very wrong.
A new parameter "avail-switchover-bandwidth" is introduced just for this.
So when the user specified this parameter, instead of trusting the
estimated value from QEMU itself (based on the QEMUFile send speed), it
trusts the user more by using this value to decide when to switchover,
assuming that we'll have such bandwidth available then.
Note that specifying this value will not throttle the bandwidth for
switchover yet, so QEMU will always use the full bandwidth possible for
sending switchover data, assuming that should always be the most important
way to use the network at that time.
This can resolve issues like "unconvergence migration" which is caused by
hilarious low "migration bandwidth" detected for whatever reason.
Reported-by: Zhiyi Guo <zhguo@redhat.com>
Reviewed-by: Joao Martins <joao.m.martins@oracle.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Peter Xu <peterx@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
Message-ID: <20231010221922.40638-1-peterx@redhat.com>
2023-10-11 01:19:22 +03:00
|
|
|
'*avail-switchover-bandwidth': 'size',
|
2017-12-01 15:08:38 +03:00
|
|
|
'*downtime-limit': 'uint64',
|
2021-10-28 13:25:13 +03:00
|
|
|
'*x-checkpoint-delay': { 'type': 'uint32',
|
|
|
|
'features': [ 'unstable' ] },
|
2019-02-06 15:54:06 +03:00
|
|
|
'*multifd-channels': 'uint8',
|
2018-06-13 13:26:40 +03:00
|
|
|
'*xbzrle-cache-size': 'size',
|
2020-02-13 20:56:27 +03:00
|
|
|
'*max-postcopy-bandwidth': 'size',
|
2019-01-16 12:35:55 +03:00
|
|
|
'*max-cpu-throttle': 'uint8',
|
2020-01-23 19:08:52 +03:00
|
|
|
'*multifd-compression': 'MultiFDCompression',
|
2020-01-23 19:41:36 +03:00
|
|
|
'*multifd-zlib-level': 'uint8',
|
2024-08-31 02:27:20 +03:00
|
|
|
'*multifd-qatzip-level': 'uint8',
|
2020-08-20 18:07:23 +03:00
|
|
|
'*multifd-zstd-level': 'uint8',
|
2023-06-07 16:32:59 +03:00
|
|
|
'*block-bitmap-mapping': [ 'BitmapMigrationNodeAlias' ],
|
|
|
|
'*x-vcpu-dirty-limit-period': { 'type': 'uint64',
|
2023-06-07 17:58:32 +03:00
|
|
|
'features': [ 'unstable' ] },
|
2023-10-25 22:44:24 +03:00
|
|
|
'*vcpu-dirty-limit': 'uint64',
|
2024-03-11 21:00:11 +03:00
|
|
|
'*mode': 'MigMode',
|
2024-06-17 21:57:26 +03:00
|
|
|
'*zero-page-detection': 'ZeroPageDetection',
|
|
|
|
'*direct-io': 'bool' } }
|
2017-08-24 22:14:01 +03:00
|
|
|
|
|
|
|
##
|
|
|
|
# @query-migrate-parameters:
|
|
|
|
#
|
|
|
|
# Returns information about the current migration parameters
|
|
|
|
#
|
|
|
|
# Returns: @MigrationParameters
|
|
|
|
#
|
|
|
|
# Since: 2.4
|
|
|
|
#
|
2024-07-17 05:13:08 +03:00
|
|
|
# .. qmp-example::
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
2024-02-16 17:58:34 +03:00
|
|
|
# -> { "execute": "query-migrate-parameters" }
|
|
|
|
# <- { "return": {
|
|
|
|
# "multifd-channels": 2,
|
|
|
|
# "cpu-throttle-increment": 10,
|
|
|
|
# "cpu-throttle-initial": 20,
|
|
|
|
# "max-bandwidth": 33554432,
|
|
|
|
# "downtime-limit": 300
|
|
|
|
# }
|
|
|
|
# }
|
2017-08-24 22:14:01 +03:00
|
|
|
##
|
|
|
|
{ 'command': 'query-migrate-parameters',
|
|
|
|
'returns': 'MigrationParameters' }
|
|
|
|
|
|
|
|
##
|
|
|
|
# @migrate-start-postcopy:
|
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# Followup to a migration command to switch the migration to postcopy
|
|
|
|
# mode. The postcopy-ram capability must be set on both source and
|
|
|
|
# destination before the original migration command.
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
|
|
|
# Since: 2.5
|
|
|
|
#
|
2024-07-17 05:13:08 +03:00
|
|
|
# .. qmp-example::
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
2024-02-16 17:58:34 +03:00
|
|
|
# -> { "execute": "migrate-start-postcopy" }
|
|
|
|
# <- { "return": {} }
|
2017-08-24 22:14:01 +03:00
|
|
|
##
|
|
|
|
{ 'command': 'migrate-start-postcopy' }
|
|
|
|
|
|
|
|
##
|
|
|
|
# @MIGRATION:
|
|
|
|
#
|
|
|
|
# Emitted when a migration event happens
|
|
|
|
#
|
|
|
|
# @status: @MigrationStatus describing the current migration status.
|
|
|
|
#
|
|
|
|
# Since: 2.4
|
|
|
|
#
|
2024-07-17 05:13:08 +03:00
|
|
|
# .. qmp-example::
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
2024-02-16 17:58:34 +03:00
|
|
|
# <- {"timestamp": {"seconds": 1432121972, "microseconds": 744001},
|
|
|
|
# "event": "MIGRATION",
|
|
|
|
# "data": {"status": "completed"} }
|
2017-08-24 22:14:01 +03:00
|
|
|
##
|
|
|
|
{ 'event': 'MIGRATION',
|
|
|
|
'data': {'status': 'MigrationStatus'}}
|
|
|
|
|
|
|
|
##
|
|
|
|
# @MIGRATION_PASS:
|
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# Emitted from the source side of a migration at the start of each
|
|
|
|
# pass (when it syncs the dirty bitmap)
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
|
|
|
# @pass: An incrementing count (starting at 1 on the first pass)
|
|
|
|
#
|
|
|
|
# Since: 2.6
|
|
|
|
#
|
2024-07-17 05:13:08 +03:00
|
|
|
# .. qmp-example::
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
2024-02-16 17:58:34 +03:00
|
|
|
# <- { "timestamp": {"seconds": 1449669631, "microseconds": 239225},
|
|
|
|
# "event": "MIGRATION_PASS", "data": {"pass": 2} }
|
2017-08-24 22:14:01 +03:00
|
|
|
##
|
|
|
|
{ 'event': 'MIGRATION_PASS',
|
|
|
|
'data': { 'pass': 'int' } }
|
|
|
|
|
|
|
|
##
|
|
|
|
# @COLOMessage:
|
|
|
|
#
|
|
|
|
# The message transmission between Primary side and Secondary side.
|
|
|
|
#
|
|
|
|
# @checkpoint-ready: Secondary VM (SVM) is ready for checkpointing
|
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @checkpoint-request: Primary VM (PVM) tells SVM to prepare for
|
|
|
|
# checkpointing
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
|
|
|
# @checkpoint-reply: SVM gets PVM's checkpoint request
|
|
|
|
#
|
|
|
|
# @vmstate-send: VM's state will be sent by PVM.
|
|
|
|
#
|
|
|
|
# @vmstate-size: The total size of VMstate.
|
|
|
|
#
|
|
|
|
# @vmstate-received: VM's state has been received by SVM.
|
|
|
|
#
|
|
|
|
# @vmstate-loaded: VM's state has been loaded by SVM.
|
|
|
|
#
|
|
|
|
# Since: 2.8
|
|
|
|
##
|
|
|
|
{ 'enum': 'COLOMessage',
|
|
|
|
'data': [ 'checkpoint-ready', 'checkpoint-request', 'checkpoint-reply',
|
|
|
|
'vmstate-send', 'vmstate-size', 'vmstate-received',
|
|
|
|
'vmstate-loaded' ] }
|
|
|
|
|
|
|
|
##
|
|
|
|
# @COLOMode:
|
|
|
|
#
|
2018-09-03 07:38:52 +03:00
|
|
|
# The COLO current mode.
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
2018-09-03 07:38:52 +03:00
|
|
|
# @none: COLO is disabled.
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
2018-09-03 07:38:52 +03:00
|
|
|
# @primary: COLO node in primary side.
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
2018-09-03 07:38:52 +03:00
|
|
|
# @secondary: COLO node in slave side.
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
|
|
|
# Since: 2.8
|
|
|
|
##
|
|
|
|
{ 'enum': 'COLOMode',
|
2018-09-03 07:38:52 +03:00
|
|
|
'data': [ 'none', 'primary', 'secondary'] }
|
2017-08-24 22:14:01 +03:00
|
|
|
|
|
|
|
##
|
|
|
|
# @FailoverStatus:
|
|
|
|
#
|
|
|
|
# An enumeration of COLO failover status
|
|
|
|
#
|
|
|
|
# @none: no failover has ever happened
|
|
|
|
#
|
|
|
|
# @require: got failover requirement but not handled
|
|
|
|
#
|
|
|
|
# @active: in the process of doing failover
|
|
|
|
#
|
|
|
|
# @completed: finish the process of failover
|
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @relaunch: restart the failover process, from 'none' -> 'completed'
|
|
|
|
# (Since 2.9)
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
|
|
|
# Since: 2.8
|
|
|
|
##
|
|
|
|
{ 'enum': 'FailoverStatus',
|
|
|
|
'data': [ 'none', 'require', 'active', 'completed', 'relaunch' ] }
|
|
|
|
|
2018-09-03 07:38:51 +03:00
|
|
|
##
|
|
|
|
# @COLO_EXIT:
|
|
|
|
#
|
|
|
|
# Emitted when VM finishes COLO mode due to some errors happening or
|
|
|
|
# at the request of users.
|
|
|
|
#
|
|
|
|
# @mode: report COLO mode when COLO exited.
|
|
|
|
#
|
|
|
|
# @reason: describes the reason for the COLO exit.
|
|
|
|
#
|
|
|
|
# Since: 3.1
|
|
|
|
#
|
2024-07-17 05:13:08 +03:00
|
|
|
# .. qmp-example::
|
2018-09-03 07:38:51 +03:00
|
|
|
#
|
2024-02-16 17:58:34 +03:00
|
|
|
# <- { "timestamp": {"seconds": 2032141960, "microseconds": 417172},
|
|
|
|
# "event": "COLO_EXIT", "data": {"mode": "primary", "reason": "request" } }
|
2018-09-03 07:38:51 +03:00
|
|
|
##
|
|
|
|
{ 'event': 'COLO_EXIT',
|
|
|
|
'data': {'mode': 'COLOMode', 'reason': 'COLOExitReason' } }
|
|
|
|
|
|
|
|
##
|
|
|
|
# @COLOExitReason:
|
|
|
|
#
|
2019-03-22 13:13:31 +03:00
|
|
|
# The reason for a COLO exit.
|
2018-09-03 07:38:51 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @none: failover has never happened. This state does not occur in
|
|
|
|
# the COLO_EXIT event, and is only visible in the result of
|
|
|
|
# query-colo-status.
|
2018-09-03 07:38:51 +03:00
|
|
|
#
|
2019-03-22 13:13:31 +03:00
|
|
|
# @request: COLO exit is due to an external request.
|
2018-09-03 07:38:51 +03:00
|
|
|
#
|
2019-03-22 13:13:31 +03:00
|
|
|
# @error: COLO exit is due to an internal error.
|
|
|
|
#
|
|
|
|
# @processing: COLO is currently handling a failover (since 4.0).
|
2018-09-03 07:38:51 +03:00
|
|
|
#
|
|
|
|
# Since: 3.1
|
|
|
|
##
|
|
|
|
{ 'enum': 'COLOExitReason',
|
2019-03-22 13:13:31 +03:00
|
|
|
'data': [ 'none', 'request', 'error' , 'processing' ] }
|
2018-09-03 07:38:51 +03:00
|
|
|
|
2017-08-24 22:14:01 +03:00
|
|
|
##
|
|
|
|
# @x-colo-lost-heartbeat:
|
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# Tell qemu that heartbeat is lost, request it to do takeover
|
|
|
|
# procedures. If this command is sent to the PVM, the Primary side
|
|
|
|
# will exit COLO mode. If sent to the Secondary, the Secondary side
|
|
|
|
# will run failover work, then takes over server operation to become
|
|
|
|
# the service VM.
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
2021-10-28 13:25:13 +03:00
|
|
|
# Features:
|
2023-04-28 13:54:29 +03:00
|
|
|
#
|
2021-10-28 13:25:13 +03:00
|
|
|
# @unstable: This command is experimental.
|
|
|
|
#
|
2017-08-24 22:14:01 +03:00
|
|
|
# Since: 2.8
|
|
|
|
#
|
2024-07-17 05:13:08 +03:00
|
|
|
# .. qmp-example::
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
2024-02-16 17:58:34 +03:00
|
|
|
# -> { "execute": "x-colo-lost-heartbeat" }
|
|
|
|
# <- { "return": {} }
|
2017-08-24 22:14:01 +03:00
|
|
|
##
|
2021-10-28 13:25:13 +03:00
|
|
|
{ 'command': 'x-colo-lost-heartbeat',
|
2023-04-28 22:49:21 +03:00
|
|
|
'features': [ 'unstable' ],
|
|
|
|
'if': 'CONFIG_REPLICATION' }
|
2017-08-24 22:14:01 +03:00
|
|
|
|
|
|
|
##
|
|
|
|
# @migrate_cancel:
|
|
|
|
#
|
|
|
|
# Cancel the current executing migration process.
|
|
|
|
#
|
2024-07-29 09:52:20 +03:00
|
|
|
# .. note:: This command succeeds even if there is no migration
|
|
|
|
# process running.
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
2020-11-18 09:41:58 +03:00
|
|
|
# Since: 0.14
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
2024-07-17 05:13:08 +03:00
|
|
|
# .. qmp-example::
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
2024-02-16 17:58:34 +03:00
|
|
|
# -> { "execute": "migrate_cancel" }
|
|
|
|
# <- { "return": {} }
|
2017-08-24 22:14:01 +03:00
|
|
|
##
|
|
|
|
{ 'command': 'migrate_cancel' }
|
|
|
|
|
2017-10-20 12:05:53 +03:00
|
|
|
##
|
|
|
|
# @migrate-continue:
|
|
|
|
#
|
|
|
|
# Continue migration when it's in a paused state.
|
|
|
|
#
|
|
|
|
# @state: The state the migration is currently expected to be in
|
|
|
|
#
|
|
|
|
# Since: 2.11
|
2022-05-03 10:37:32 +03:00
|
|
|
#
|
2024-07-17 05:13:08 +03:00
|
|
|
# .. qmp-example::
|
2017-10-20 12:05:53 +03:00
|
|
|
#
|
2024-02-16 17:58:34 +03:00
|
|
|
# -> { "execute": "migrate-continue" , "arguments":
|
|
|
|
# { "state": "pre-switchover" } }
|
|
|
|
# <- { "return": {} }
|
2017-10-20 12:05:53 +03:00
|
|
|
##
|
|
|
|
{ 'command': 'migrate-continue', 'data': {'state': 'MigrationStatus'} }
|
|
|
|
|
2023-10-23 21:20:40 +03:00
|
|
|
##
|
|
|
|
# @MigrationAddressType:
|
|
|
|
#
|
|
|
|
# The migration stream transport mechanisms.
|
|
|
|
#
|
|
|
|
# @socket: Migrate via socket.
|
|
|
|
#
|
|
|
|
# @exec: Direct the migration stream to another process.
|
|
|
|
#
|
|
|
|
# @rdma: Migrate via RDMA.
|
|
|
|
#
|
|
|
|
# @file: Direct the migration stream to a file.
|
|
|
|
#
|
2024-01-20 12:53:27 +03:00
|
|
|
# Since: 8.2
|
2023-10-23 21:20:40 +03:00
|
|
|
##
|
|
|
|
{ 'enum': 'MigrationAddressType',
|
|
|
|
'data': [ 'socket', 'exec', 'rdma', 'file' ] }
|
|
|
|
|
|
|
|
##
|
|
|
|
# @FileMigrationArgs:
|
|
|
|
#
|
|
|
|
# @filename: The file to receive the migration stream
|
|
|
|
#
|
|
|
|
# @offset: The file offset where the migration stream will start
|
|
|
|
#
|
2024-01-20 12:53:27 +03:00
|
|
|
# Since: 8.2
|
2023-10-23 21:20:40 +03:00
|
|
|
##
|
|
|
|
{ 'struct': 'FileMigrationArgs',
|
|
|
|
'data': { 'filename': 'str',
|
|
|
|
'offset': 'uint64' } }
|
|
|
|
|
|
|
|
##
|
|
|
|
# @MigrationExecCommand:
|
|
|
|
#
|
|
|
|
# @args: command (list head) and arguments to execute.
|
|
|
|
#
|
2024-01-20 12:53:27 +03:00
|
|
|
# Since: 8.2
|
2023-10-23 21:20:40 +03:00
|
|
|
##
|
|
|
|
{ 'struct': 'MigrationExecCommand',
|
|
|
|
'data': {'args': [ 'str' ] } }
|
|
|
|
|
|
|
|
##
|
|
|
|
# @MigrationAddress:
|
|
|
|
#
|
|
|
|
# Migration endpoint configuration.
|
|
|
|
#
|
2024-02-05 10:47:09 +03:00
|
|
|
# @transport: The migration stream transport mechanism
|
|
|
|
#
|
2024-01-20 12:53:27 +03:00
|
|
|
# Since: 8.2
|
2023-10-23 21:20:40 +03:00
|
|
|
##
|
|
|
|
{ 'union': 'MigrationAddress',
|
|
|
|
'base': { 'transport' : 'MigrationAddressType'},
|
|
|
|
'discriminator': 'transport',
|
|
|
|
'data': {
|
|
|
|
'socket': 'SocketAddress',
|
|
|
|
'exec': 'MigrationExecCommand',
|
|
|
|
'rdma': 'InetSocketAddress',
|
|
|
|
'file': 'FileMigrationArgs' } }
|
|
|
|
|
2023-10-23 21:20:48 +03:00
|
|
|
##
|
|
|
|
# @MigrationChannelType:
|
|
|
|
#
|
|
|
|
# The migration channel-type request options.
|
|
|
|
#
|
|
|
|
# @main: Main outbound migration channel.
|
|
|
|
#
|
2024-01-20 12:53:27 +03:00
|
|
|
# Since: 8.1
|
2023-10-23 21:20:48 +03:00
|
|
|
##
|
|
|
|
{ 'enum': 'MigrationChannelType',
|
|
|
|
'data': [ 'main' ] }
|
|
|
|
|
|
|
|
##
|
|
|
|
# @MigrationChannel:
|
|
|
|
#
|
|
|
|
# Migration stream channel parameters.
|
|
|
|
#
|
2023-11-14 00:13:16 +03:00
|
|
|
# @channel-type: Channel type for transferring packet information.
|
2023-10-23 21:20:48 +03:00
|
|
|
#
|
|
|
|
# @addr: Migration endpoint configuration on destination interface.
|
|
|
|
#
|
2024-01-20 12:53:27 +03:00
|
|
|
# Since: 8.1
|
2023-10-23 21:20:48 +03:00
|
|
|
##
|
|
|
|
{ 'struct': 'MigrationChannel',
|
|
|
|
'data': {
|
|
|
|
'channel-type': 'MigrationChannelType',
|
|
|
|
'addr': 'MigrationAddress' } }
|
|
|
|
|
2017-08-24 22:14:01 +03:00
|
|
|
##
|
|
|
|
# @migrate:
|
|
|
|
#
|
|
|
|
# Migrates the current running guest to another Virtual Machine.
|
|
|
|
#
|
|
|
|
# @uri: the Uniform Resource Identifier of the destination VM
|
|
|
|
#
|
2023-10-23 21:20:48 +03:00
|
|
|
# @channels: list of migration stream channels with each stream in the
|
|
|
|
# list connected to a destination interface endpoint.
|
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @detach: this argument exists only for compatibility reasons and is
|
|
|
|
# ignored by QEMU
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
2024-03-22 17:09:09 +03:00
|
|
|
# @resume: resume one paused migration, default "off". (since 3.0)
|
2018-05-02 13:47:23 +03:00
|
|
|
#
|
2020-11-18 09:41:58 +03:00
|
|
|
# Since: 0.14
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
qapi: convert "Note" sections to plain rST
We do not need a dedicated section for notes. By eliminating a specially
parsed section, these notes can be treated as normal rST paragraphs in
the new QMP reference manual, and can be placed and styled much more
flexibly.
Convert all existing "Note" and "Notes" sections to pure rST. As part of
the conversion, capitalize the first letter of each sentence and add
trailing punctuation where appropriate to ensure notes look sensible and
consistent in rendered HTML documentation. Markup is also re-aligned to
the de-facto standard of 3 spaces for directives.
Update docs/devel/qapi-code-gen.rst to reflect the new paradigm, and
update the QAPI parser to prohibit "Note" sections while suggesting a
new syntax. The exact formatting to use is a matter of taste, but a good
candidate is simply:
.. note:: lorem ipsum ...
... dolor sit amet ...
... consectetur adipiscing elit ...
... but there are other choices, too. The Sphinx readthedocs theme
offers theming for the following forms (capitalization unimportant); all
are adorned with a (!) symbol () in the title bar for rendered HTML
docs.
See
https://sphinx-rtd-theme.readthedocs.io/en/stable/demo/demo.html#admonitions
for examples of each directive/admonition in use.
These are rendered in orange:
.. Attention:: ...
.. Caution:: ...
.. WARNING:: ...
These are rendered in red:
.. DANGER:: ...
.. Error:: ...
These are rendered in green:
.. Hint:: ...
.. Important:: ...
.. Tip:: ...
These are rendered in blue:
.. Note:: ...
.. admonition:: custom title
admonition body text
This patch uses ".. note::" almost everywhere, with just two "caution"
directives. Several instances of "Notes:" have been converted to
merely ".. note::", or multiple ".. note::" where appropriate.
".. admonition:: notes" is used in a few places where we had an
ordered list of multiple notes that would not make sense as
standalone/separate admonitions. Two "Note:" following "Example:"
have been turned into ordinary paragraphs within the example.
NOTE: Because qapidoc.py does not attempt to preserve source ordering of
sections, the conversion of Notes from a "tagged section" to an
"untagged section" means that rendering order for some notes *may
change* as a result of this patch. The forthcoming qapidoc.py rewrite
strictly preserves source ordering in the rendered documentation, so
this issue will be rectified in the new generator.
Signed-off-by: John Snow <jsnow@redhat.com>
Acked-by: Stefan Hajnoczi <stefanha@redhat.com> [for block*.json]
Message-ID: <20240626222128.406106-11-jsnow@redhat.com>
Reviewed-by: Markus Armbruster <armbru@redhat.com>
[Commit message clarified slightly, period added to one more note]
Signed-off-by: Markus Armbruster <armbru@redhat.com>
2024-06-27 01:21:16 +03:00
|
|
|
# .. admonition:: Notes
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
2024-02-05 10:46:58 +03:00
|
|
|
# 1. The 'query-migrate' command should be used to check
|
|
|
|
# migration's progress and final result (this information is
|
qapi: convert "Note" sections to plain rST
We do not need a dedicated section for notes. By eliminating a specially
parsed section, these notes can be treated as normal rST paragraphs in
the new QMP reference manual, and can be placed and styled much more
flexibly.
Convert all existing "Note" and "Notes" sections to pure rST. As part of
the conversion, capitalize the first letter of each sentence and add
trailing punctuation where appropriate to ensure notes look sensible and
consistent in rendered HTML documentation. Markup is also re-aligned to
the de-facto standard of 3 spaces for directives.
Update docs/devel/qapi-code-gen.rst to reflect the new paradigm, and
update the QAPI parser to prohibit "Note" sections while suggesting a
new syntax. The exact formatting to use is a matter of taste, but a good
candidate is simply:
.. note:: lorem ipsum ...
... dolor sit amet ...
... consectetur adipiscing elit ...
... but there are other choices, too. The Sphinx readthedocs theme
offers theming for the following forms (capitalization unimportant); all
are adorned with a (!) symbol () in the title bar for rendered HTML
docs.
See
https://sphinx-rtd-theme.readthedocs.io/en/stable/demo/demo.html#admonitions
for examples of each directive/admonition in use.
These are rendered in orange:
.. Attention:: ...
.. Caution:: ...
.. WARNING:: ...
These are rendered in red:
.. DANGER:: ...
.. Error:: ...
These are rendered in green:
.. Hint:: ...
.. Important:: ...
.. Tip:: ...
These are rendered in blue:
.. Note:: ...
.. admonition:: custom title
admonition body text
This patch uses ".. note::" almost everywhere, with just two "caution"
directives. Several instances of "Notes:" have been converted to
merely ".. note::", or multiple ".. note::" where appropriate.
".. admonition:: notes" is used in a few places where we had an
ordered list of multiple notes that would not make sense as
standalone/separate admonitions. Two "Note:" following "Example:"
have been turned into ordinary paragraphs within the example.
NOTE: Because qapidoc.py does not attempt to preserve source ordering of
sections, the conversion of Notes from a "tagged section" to an
"untagged section" means that rendering order for some notes *may
change* as a result of this patch. The forthcoming qapidoc.py rewrite
strictly preserves source ordering in the rendered documentation, so
this issue will be rectified in the new generator.
Signed-off-by: John Snow <jsnow@redhat.com>
Acked-by: Stefan Hajnoczi <stefanha@redhat.com> [for block*.json]
Message-ID: <20240626222128.406106-11-jsnow@redhat.com>
Reviewed-by: Markus Armbruster <armbru@redhat.com>
[Commit message clarified slightly, period added to one more note]
Signed-off-by: Markus Armbruster <armbru@redhat.com>
2024-06-27 01:21:16 +03:00
|
|
|
# provided by the 'status' member).
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
qapi: convert "Note" sections to plain rST
We do not need a dedicated section for notes. By eliminating a specially
parsed section, these notes can be treated as normal rST paragraphs in
the new QMP reference manual, and can be placed and styled much more
flexibly.
Convert all existing "Note" and "Notes" sections to pure rST. As part of
the conversion, capitalize the first letter of each sentence and add
trailing punctuation where appropriate to ensure notes look sensible and
consistent in rendered HTML documentation. Markup is also re-aligned to
the de-facto standard of 3 spaces for directives.
Update docs/devel/qapi-code-gen.rst to reflect the new paradigm, and
update the QAPI parser to prohibit "Note" sections while suggesting a
new syntax. The exact formatting to use is a matter of taste, but a good
candidate is simply:
.. note:: lorem ipsum ...
... dolor sit amet ...
... consectetur adipiscing elit ...
... but there are other choices, too. The Sphinx readthedocs theme
offers theming for the following forms (capitalization unimportant); all
are adorned with a (!) symbol () in the title bar for rendered HTML
docs.
See
https://sphinx-rtd-theme.readthedocs.io/en/stable/demo/demo.html#admonitions
for examples of each directive/admonition in use.
These are rendered in orange:
.. Attention:: ...
.. Caution:: ...
.. WARNING:: ...
These are rendered in red:
.. DANGER:: ...
.. Error:: ...
These are rendered in green:
.. Hint:: ...
.. Important:: ...
.. Tip:: ...
These are rendered in blue:
.. Note:: ...
.. admonition:: custom title
admonition body text
This patch uses ".. note::" almost everywhere, with just two "caution"
directives. Several instances of "Notes:" have been converted to
merely ".. note::", or multiple ".. note::" where appropriate.
".. admonition:: notes" is used in a few places where we had an
ordered list of multiple notes that would not make sense as
standalone/separate admonitions. Two "Note:" following "Example:"
have been turned into ordinary paragraphs within the example.
NOTE: Because qapidoc.py does not attempt to preserve source ordering of
sections, the conversion of Notes from a "tagged section" to an
"untagged section" means that rendering order for some notes *may
change* as a result of this patch. The forthcoming qapidoc.py rewrite
strictly preserves source ordering in the rendered documentation, so
this issue will be rectified in the new generator.
Signed-off-by: John Snow <jsnow@redhat.com>
Acked-by: Stefan Hajnoczi <stefanha@redhat.com> [for block*.json]
Message-ID: <20240626222128.406106-11-jsnow@redhat.com>
Reviewed-by: Markus Armbruster <armbru@redhat.com>
[Commit message clarified slightly, period added to one more note]
Signed-off-by: Markus Armbruster <armbru@redhat.com>
2024-06-27 01:21:16 +03:00
|
|
|
# 2. All boolean arguments default to false.
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
2024-02-05 10:46:58 +03:00
|
|
|
# 3. The user Monitor's "detach" argument is invalid in QMP and
|
qapi: convert "Note" sections to plain rST
We do not need a dedicated section for notes. By eliminating a specially
parsed section, these notes can be treated as normal rST paragraphs in
the new QMP reference manual, and can be placed and styled much more
flexibly.
Convert all existing "Note" and "Notes" sections to pure rST. As part of
the conversion, capitalize the first letter of each sentence and add
trailing punctuation where appropriate to ensure notes look sensible and
consistent in rendered HTML documentation. Markup is also re-aligned to
the de-facto standard of 3 spaces for directives.
Update docs/devel/qapi-code-gen.rst to reflect the new paradigm, and
update the QAPI parser to prohibit "Note" sections while suggesting a
new syntax. The exact formatting to use is a matter of taste, but a good
candidate is simply:
.. note:: lorem ipsum ...
... dolor sit amet ...
... consectetur adipiscing elit ...
... but there are other choices, too. The Sphinx readthedocs theme
offers theming for the following forms (capitalization unimportant); all
are adorned with a (!) symbol () in the title bar for rendered HTML
docs.
See
https://sphinx-rtd-theme.readthedocs.io/en/stable/demo/demo.html#admonitions
for examples of each directive/admonition in use.
These are rendered in orange:
.. Attention:: ...
.. Caution:: ...
.. WARNING:: ...
These are rendered in red:
.. DANGER:: ...
.. Error:: ...
These are rendered in green:
.. Hint:: ...
.. Important:: ...
.. Tip:: ...
These are rendered in blue:
.. Note:: ...
.. admonition:: custom title
admonition body text
This patch uses ".. note::" almost everywhere, with just two "caution"
directives. Several instances of "Notes:" have been converted to
merely ".. note::", or multiple ".. note::" where appropriate.
".. admonition:: notes" is used in a few places where we had an
ordered list of multiple notes that would not make sense as
standalone/separate admonitions. Two "Note:" following "Example:"
have been turned into ordinary paragraphs within the example.
NOTE: Because qapidoc.py does not attempt to preserve source ordering of
sections, the conversion of Notes from a "tagged section" to an
"untagged section" means that rendering order for some notes *may
change* as a result of this patch. The forthcoming qapidoc.py rewrite
strictly preserves source ordering in the rendered documentation, so
this issue will be rectified in the new generator.
Signed-off-by: John Snow <jsnow@redhat.com>
Acked-by: Stefan Hajnoczi <stefanha@redhat.com> [for block*.json]
Message-ID: <20240626222128.406106-11-jsnow@redhat.com>
Reviewed-by: Markus Armbruster <armbru@redhat.com>
[Commit message clarified slightly, period added to one more note]
Signed-off-by: Markus Armbruster <armbru@redhat.com>
2024-06-27 01:21:16 +03:00
|
|
|
# should not be used.
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
2024-02-05 10:46:58 +03:00
|
|
|
# 4. The uri argument should have the Uniform Resource Identifier
|
2024-07-29 09:52:20 +03:00
|
|
|
# of default destination VM. This connection will be bound to
|
2024-02-05 10:46:58 +03:00
|
|
|
# default network.
|
2023-10-23 21:20:48 +03:00
|
|
|
#
|
2024-02-05 10:46:58 +03:00
|
|
|
# 5. For now, number of migration streams is restricted to one,
|
2024-03-22 17:09:05 +03:00
|
|
|
# i.e. number of items in 'channels' list is just 1.
|
2023-10-23 21:20:48 +03:00
|
|
|
#
|
2024-02-05 10:46:58 +03:00
|
|
|
# 6. The 'uri' and 'channels' arguments are mutually exclusive;
|
|
|
|
# exactly one of the two should be present.
|
2023-10-23 21:20:48 +03:00
|
|
|
#
|
2024-07-17 05:13:08 +03:00
|
|
|
# .. qmp-example::
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
2024-02-16 17:58:34 +03:00
|
|
|
# -> { "execute": "migrate", "arguments": { "uri": "tcp:0:4446" } }
|
|
|
|
# <- { "return": {} }
|
|
|
|
#
|
|
|
|
# -> { "execute": "migrate",
|
|
|
|
# "arguments": {
|
|
|
|
# "channels": [ { "channel-type": "main",
|
|
|
|
# "addr": { "transport": "socket",
|
|
|
|
# "type": "inet",
|
|
|
|
# "host": "10.12.34.9",
|
|
|
|
# "port": "1050" } } ] } }
|
|
|
|
# <- { "return": {} }
|
|
|
|
#
|
|
|
|
# -> { "execute": "migrate",
|
|
|
|
# "arguments": {
|
|
|
|
# "channels": [ { "channel-type": "main",
|
|
|
|
# "addr": { "transport": "exec",
|
|
|
|
# "args": [ "/bin/nc", "-p", "6000",
|
|
|
|
# "/some/sock" ] } } ] } }
|
|
|
|
# <- { "return": {} }
|
|
|
|
#
|
|
|
|
# -> { "execute": "migrate",
|
|
|
|
# "arguments": {
|
|
|
|
# "channels": [ { "channel-type": "main",
|
|
|
|
# "addr": { "transport": "rdma",
|
|
|
|
# "host": "10.12.34.9",
|
|
|
|
# "port": "1050" } } ] } }
|
|
|
|
# <- { "return": {} }
|
|
|
|
#
|
|
|
|
# -> { "execute": "migrate",
|
|
|
|
# "arguments": {
|
|
|
|
# "channels": [ { "channel-type": "main",
|
|
|
|
# "addr": { "transport": "file",
|
|
|
|
# "filename": "/tmp/migfile",
|
|
|
|
# "offset": "0x1000" } } ] } }
|
|
|
|
# <- { "return": {} }
|
2017-08-24 22:14:01 +03:00
|
|
|
##
|
|
|
|
{ 'command': 'migrate',
|
2024-01-23 09:42:19 +03:00
|
|
|
'data': {'*uri': 'str',
|
2023-10-23 21:20:48 +03:00
|
|
|
'*channels': [ 'MigrationChannel' ],
|
2018-05-02 13:47:23 +03:00
|
|
|
'*detach': 'bool', '*resume': 'bool' } }
|
2017-08-24 22:14:01 +03:00
|
|
|
|
|
|
|
##
|
|
|
|
# @migrate-incoming:
|
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# Start an incoming migration, the qemu must have been started with
|
|
|
|
# -incoming defer
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
|
|
|
# @uri: The Uniform Resource Identifier identifying the source or
|
2023-04-28 13:54:29 +03:00
|
|
|
# address to listen on
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
2023-10-23 21:20:48 +03:00
|
|
|
# @channels: list of migration stream channels with each stream in the
|
|
|
|
# list connected to a destination interface endpoint.
|
|
|
|
#
|
2024-04-30 11:56:46 +03:00
|
|
|
# @exit-on-error: Exit on incoming migration failure. Default true.
|
|
|
|
# When set to false, the failure triggers a MIGRATION event, and
|
2024-07-29 09:52:20 +03:00
|
|
|
# error details could be retrieved with query-migrate.
|
|
|
|
# (since 9.1)
|
2024-04-30 11:56:46 +03:00
|
|
|
#
|
2017-08-24 22:14:01 +03:00
|
|
|
# Since: 2.3
|
|
|
|
#
|
qapi: convert "Note" sections to plain rST
We do not need a dedicated section for notes. By eliminating a specially
parsed section, these notes can be treated as normal rST paragraphs in
the new QMP reference manual, and can be placed and styled much more
flexibly.
Convert all existing "Note" and "Notes" sections to pure rST. As part of
the conversion, capitalize the first letter of each sentence and add
trailing punctuation where appropriate to ensure notes look sensible and
consistent in rendered HTML documentation. Markup is also re-aligned to
the de-facto standard of 3 spaces for directives.
Update docs/devel/qapi-code-gen.rst to reflect the new paradigm, and
update the QAPI parser to prohibit "Note" sections while suggesting a
new syntax. The exact formatting to use is a matter of taste, but a good
candidate is simply:
.. note:: lorem ipsum ...
... dolor sit amet ...
... consectetur adipiscing elit ...
... but there are other choices, too. The Sphinx readthedocs theme
offers theming for the following forms (capitalization unimportant); all
are adorned with a (!) symbol () in the title bar for rendered HTML
docs.
See
https://sphinx-rtd-theme.readthedocs.io/en/stable/demo/demo.html#admonitions
for examples of each directive/admonition in use.
These are rendered in orange:
.. Attention:: ...
.. Caution:: ...
.. WARNING:: ...
These are rendered in red:
.. DANGER:: ...
.. Error:: ...
These are rendered in green:
.. Hint:: ...
.. Important:: ...
.. Tip:: ...
These are rendered in blue:
.. Note:: ...
.. admonition:: custom title
admonition body text
This patch uses ".. note::" almost everywhere, with just two "caution"
directives. Several instances of "Notes:" have been converted to
merely ".. note::", or multiple ".. note::" where appropriate.
".. admonition:: notes" is used in a few places where we had an
ordered list of multiple notes that would not make sense as
standalone/separate admonitions. Two "Note:" following "Example:"
have been turned into ordinary paragraphs within the example.
NOTE: Because qapidoc.py does not attempt to preserve source ordering of
sections, the conversion of Notes from a "tagged section" to an
"untagged section" means that rendering order for some notes *may
change* as a result of this patch. The forthcoming qapidoc.py rewrite
strictly preserves source ordering in the rendered documentation, so
this issue will be rectified in the new generator.
Signed-off-by: John Snow <jsnow@redhat.com>
Acked-by: Stefan Hajnoczi <stefanha@redhat.com> [for block*.json]
Message-ID: <20240626222128.406106-11-jsnow@redhat.com>
Reviewed-by: Markus Armbruster <armbru@redhat.com>
[Commit message clarified slightly, period added to one more note]
Signed-off-by: Markus Armbruster <armbru@redhat.com>
2024-06-27 01:21:16 +03:00
|
|
|
# .. admonition:: Notes
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
2024-02-05 10:46:58 +03:00
|
|
|
# 1. It's a bad idea to use a string for the uri, but it needs to
|
|
|
|
# stay compatible with -incoming and the format of the uri is
|
|
|
|
# already exposed above libvirt.
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
2024-02-05 10:46:58 +03:00
|
|
|
# 2. QEMU must be started with -incoming defer to allow
|
|
|
|
# migrate-incoming to be used.
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
2024-02-05 10:46:58 +03:00
|
|
|
# 3. The uri format is the same as for -incoming
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
2024-02-16 22:56:59 +03:00
|
|
|
# 4. For now, number of migration streams is restricted to one,
|
2024-03-22 17:09:05 +03:00
|
|
|
# i.e. number of items in 'channels' list is just 1.
|
2023-10-23 21:20:48 +03:00
|
|
|
#
|
2024-02-16 22:56:59 +03:00
|
|
|
# 5. The 'uri' and 'channels' arguments are mutually exclusive;
|
2024-02-05 10:46:58 +03:00
|
|
|
# exactly one of the two should be present.
|
2023-10-23 21:20:48 +03:00
|
|
|
#
|
2024-07-17 05:13:08 +03:00
|
|
|
# .. qmp-example::
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
2024-02-16 17:58:34 +03:00
|
|
|
# -> { "execute": "migrate-incoming",
|
|
|
|
# "arguments": { "uri": "tcp:0:4446" } }
|
|
|
|
# <- { "return": {} }
|
|
|
|
#
|
|
|
|
# -> { "execute": "migrate-incoming",
|
|
|
|
# "arguments": {
|
|
|
|
# "channels": [ { "channel-type": "main",
|
|
|
|
# "addr": { "transport": "socket",
|
|
|
|
# "type": "inet",
|
|
|
|
# "host": "10.12.34.9",
|
|
|
|
# "port": "1050" } } ] } }
|
|
|
|
# <- { "return": {} }
|
|
|
|
#
|
|
|
|
# -> { "execute": "migrate-incoming",
|
|
|
|
# "arguments": {
|
|
|
|
# "channels": [ { "channel-type": "main",
|
|
|
|
# "addr": { "transport": "exec",
|
|
|
|
# "args": [ "/bin/nc", "-p", "6000",
|
|
|
|
# "/some/sock" ] } } ] } }
|
|
|
|
# <- { "return": {} }
|
|
|
|
#
|
|
|
|
# -> { "execute": "migrate-incoming",
|
|
|
|
# "arguments": {
|
|
|
|
# "channels": [ { "channel-type": "main",
|
|
|
|
# "addr": { "transport": "rdma",
|
|
|
|
# "host": "10.12.34.9",
|
|
|
|
# "port": "1050" } } ] } }
|
|
|
|
# <- { "return": {} }
|
2017-08-24 22:14:01 +03:00
|
|
|
##
|
2023-10-23 21:20:48 +03:00
|
|
|
{ 'command': 'migrate-incoming',
|
|
|
|
'data': {'*uri': 'str',
|
2024-04-30 11:56:46 +03:00
|
|
|
'*channels': [ 'MigrationChannel' ],
|
|
|
|
'*exit-on-error': 'bool' } }
|
2017-08-24 22:14:01 +03:00
|
|
|
|
|
|
|
##
|
|
|
|
# @xen-save-devices-state:
|
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# Save the state of all devices to file. The RAM and the block
|
|
|
|
# devices of the VM are not saved by this command.
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
|
|
|
# @filename: the file to save the state of the devices to as binary
|
2023-04-28 13:54:29 +03:00
|
|
|
# data. See xen-save-devices-state.txt for a description of the
|
|
|
|
# binary format.
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @live: Optional argument to ask QEMU to treat this command as part
|
|
|
|
# of a live migration. Default to true. (since 2.11)
|
2017-11-16 18:14:19 +03:00
|
|
|
#
|
2017-08-24 22:14:01 +03:00
|
|
|
# Since: 1.1
|
|
|
|
#
|
2024-07-17 05:13:08 +03:00
|
|
|
# .. qmp-example::
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
2024-02-16 17:58:34 +03:00
|
|
|
# -> { "execute": "xen-save-devices-state",
|
|
|
|
# "arguments": { "filename": "/tmp/save" } }
|
|
|
|
# <- { "return": {} }
|
2017-08-24 22:14:01 +03:00
|
|
|
##
|
2017-11-16 18:14:19 +03:00
|
|
|
{ 'command': 'xen-save-devices-state',
|
|
|
|
'data': {'filename': 'str', '*live':'bool' } }
|
2017-08-24 22:14:01 +03:00
|
|
|
|
2020-10-12 15:15:36 +03:00
|
|
|
##
|
|
|
|
# @xen-set-global-dirty-log:
|
|
|
|
#
|
|
|
|
# Enable or disable the global dirty log mode.
|
|
|
|
#
|
|
|
|
# @enable: true to enable, false to disable.
|
|
|
|
#
|
|
|
|
# Since: 1.3
|
|
|
|
#
|
2024-07-17 05:13:08 +03:00
|
|
|
# .. qmp-example::
|
2020-10-12 15:15:36 +03:00
|
|
|
#
|
2024-02-16 17:58:34 +03:00
|
|
|
# -> { "execute": "xen-set-global-dirty-log",
|
|
|
|
# "arguments": { "enable": true } }
|
|
|
|
# <- { "return": {} }
|
2020-10-12 15:15:36 +03:00
|
|
|
##
|
|
|
|
{ 'command': 'xen-set-global-dirty-log', 'data': { 'enable': 'bool' } }
|
|
|
|
|
|
|
|
##
|
|
|
|
# @xen-load-devices-state:
|
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# Load the state of all devices from file. The RAM and the block
|
|
|
|
# devices of the VM are not loaded by this command.
|
2020-10-12 15:15:36 +03:00
|
|
|
#
|
|
|
|
# @filename: the file to load the state of the devices from as binary
|
2023-04-28 13:54:29 +03:00
|
|
|
# data. See xen-save-devices-state.txt for a description of the
|
|
|
|
# binary format.
|
2020-10-12 15:15:36 +03:00
|
|
|
#
|
|
|
|
# Since: 2.7
|
|
|
|
#
|
2024-07-17 05:13:08 +03:00
|
|
|
# .. qmp-example::
|
2020-10-12 15:15:36 +03:00
|
|
|
#
|
2024-02-16 17:58:34 +03:00
|
|
|
# -> { "execute": "xen-load-devices-state",
|
|
|
|
# "arguments": { "filename": "/tmp/resume" } }
|
|
|
|
# <- { "return": {} }
|
2020-10-12 15:15:36 +03:00
|
|
|
##
|
|
|
|
{ 'command': 'xen-load-devices-state', 'data': {'filename': 'str'} }
|
|
|
|
|
2017-08-24 22:14:01 +03:00
|
|
|
##
|
|
|
|
# @xen-set-replication:
|
|
|
|
#
|
|
|
|
# Enable or disable replication.
|
|
|
|
#
|
|
|
|
# @enable: true to enable, false to disable.
|
|
|
|
#
|
|
|
|
# @primary: true for primary or false for secondary.
|
|
|
|
#
|
2024-03-22 17:09:06 +03:00
|
|
|
# @failover: true to do failover, false to stop. Cannot be specified
|
|
|
|
# if 'enable' is true. Default value is false.
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
2024-07-17 05:13:08 +03:00
|
|
|
# .. qmp-example::
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
2024-02-16 17:58:34 +03:00
|
|
|
# -> { "execute": "xen-set-replication",
|
|
|
|
# "arguments": {"enable": true, "primary": false} }
|
|
|
|
# <- { "return": {} }
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
|
|
|
# Since: 2.9
|
|
|
|
##
|
|
|
|
{ 'command': 'xen-set-replication',
|
2023-06-12 22:16:04 +03:00
|
|
|
'data': { 'enable': 'bool', 'primary': 'bool', '*failover': 'bool' },
|
2021-08-04 11:31:05 +03:00
|
|
|
'if': 'CONFIG_REPLICATION' }
|
2017-08-24 22:14:01 +03:00
|
|
|
|
|
|
|
##
|
|
|
|
# @ReplicationStatus:
|
|
|
|
#
|
|
|
|
# The result format for 'query-xen-replication-status'.
|
|
|
|
#
|
|
|
|
# @error: true if an error happened, false if replication is normal.
|
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @desc: the human readable error description string, when @error is
|
|
|
|
# 'true'.
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
|
|
|
# Since: 2.9
|
|
|
|
##
|
|
|
|
{ 'struct': 'ReplicationStatus',
|
2018-12-13 15:37:24 +03:00
|
|
|
'data': { 'error': 'bool', '*desc': 'str' },
|
2021-08-04 11:31:05 +03:00
|
|
|
'if': 'CONFIG_REPLICATION' }
|
2017-08-24 22:14:01 +03:00
|
|
|
|
|
|
|
##
|
|
|
|
# @query-xen-replication-status:
|
|
|
|
#
|
|
|
|
# Query replication status while the vm is running.
|
|
|
|
#
|
2022-04-20 18:34:07 +03:00
|
|
|
# Returns: A @ReplicationStatus object showing the status.
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
2024-07-17 05:13:08 +03:00
|
|
|
# .. qmp-example::
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
2024-02-16 17:58:34 +03:00
|
|
|
# -> { "execute": "query-xen-replication-status" }
|
|
|
|
# <- { "return": { "error": false } }
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
|
|
|
# Since: 2.9
|
|
|
|
##
|
|
|
|
{ 'command': 'query-xen-replication-status',
|
2018-12-13 15:37:24 +03:00
|
|
|
'returns': 'ReplicationStatus',
|
2021-08-04 11:31:05 +03:00
|
|
|
'if': 'CONFIG_REPLICATION' }
|
2017-08-24 22:14:01 +03:00
|
|
|
|
|
|
|
##
|
|
|
|
# @xen-colo-do-checkpoint:
|
|
|
|
#
|
|
|
|
# Xen uses this command to notify replication to trigger a checkpoint.
|
|
|
|
#
|
2024-07-17 05:13:08 +03:00
|
|
|
# .. qmp-example::
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
2024-02-16 17:58:34 +03:00
|
|
|
# -> { "execute": "xen-colo-do-checkpoint" }
|
|
|
|
# <- { "return": {} }
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
|
|
|
# Since: 2.9
|
|
|
|
##
|
2018-12-13 15:37:24 +03:00
|
|
|
{ 'command': 'xen-colo-do-checkpoint',
|
2021-08-04 11:31:05 +03:00
|
|
|
'if': 'CONFIG_REPLICATION' }
|
2018-05-02 13:47:36 +03:00
|
|
|
|
2018-09-03 07:38:53 +03:00
|
|
|
##
|
|
|
|
# @COLOStatus:
|
|
|
|
#
|
|
|
|
# The result format for 'query-colo-status'.
|
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @mode: COLO running mode. If COLO is running, this field will
|
|
|
|
# return 'primary' or 'secondary'.
|
2018-09-03 07:38:53 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @last-mode: COLO last running mode. If COLO is running, this field
|
|
|
|
# will return same like mode field, after failover we can use this
|
|
|
|
# field to get last colo mode. (since 4.0)
|
2019-03-22 13:13:33 +03:00
|
|
|
#
|
2018-09-03 07:38:53 +03:00
|
|
|
# @reason: describes the reason for the COLO exit.
|
|
|
|
#
|
2018-10-22 19:41:18 +03:00
|
|
|
# Since: 3.1
|
2018-09-03 07:38:53 +03:00
|
|
|
##
|
|
|
|
{ 'struct': 'COLOStatus',
|
2019-04-02 11:55:21 +03:00
|
|
|
'data': { 'mode': 'COLOMode', 'last-mode': 'COLOMode',
|
2023-04-28 22:49:21 +03:00
|
|
|
'reason': 'COLOExitReason' },
|
|
|
|
'if': 'CONFIG_REPLICATION' }
|
2018-09-03 07:38:53 +03:00
|
|
|
|
|
|
|
##
|
|
|
|
# @query-colo-status:
|
|
|
|
#
|
|
|
|
# Query COLO status while the vm is running.
|
|
|
|
#
|
|
|
|
# Returns: A @COLOStatus object showing the status.
|
|
|
|
#
|
2024-07-17 05:13:08 +03:00
|
|
|
# .. qmp-example::
|
2018-09-03 07:38:53 +03:00
|
|
|
#
|
2024-02-16 17:58:34 +03:00
|
|
|
# -> { "execute": "query-colo-status" }
|
|
|
|
# <- { "return": { "mode": "primary", "last-mode": "none", "reason": "request" } }
|
2018-09-03 07:38:53 +03:00
|
|
|
#
|
2018-10-22 19:41:18 +03:00
|
|
|
# Since: 3.1
|
2018-09-03 07:38:53 +03:00
|
|
|
##
|
|
|
|
{ 'command': 'query-colo-status',
|
2023-04-28 22:49:21 +03:00
|
|
|
'returns': 'COLOStatus',
|
|
|
|
'if': 'CONFIG_REPLICATION' }
|
2018-09-03 07:38:53 +03:00
|
|
|
|
2018-05-02 13:47:36 +03:00
|
|
|
##
|
|
|
|
# @migrate-recover:
|
|
|
|
#
|
|
|
|
# Provide a recovery migration stream URI.
|
|
|
|
#
|
|
|
|
# @uri: the URI to be used for the recovery of migration stream.
|
|
|
|
#
|
2024-07-17 05:13:08 +03:00
|
|
|
# .. qmp-example::
|
2018-05-02 13:47:36 +03:00
|
|
|
#
|
2024-02-16 17:58:34 +03:00
|
|
|
# -> { "execute": "migrate-recover",
|
|
|
|
# "arguments": { "uri": "tcp:192.168.1.200:12345" } }
|
|
|
|
# <- { "return": {} }
|
2018-05-02 13:47:36 +03:00
|
|
|
#
|
2018-05-22 13:39:56 +03:00
|
|
|
# Since: 3.0
|
2018-05-02 13:47:36 +03:00
|
|
|
##
|
2018-12-08 14:16:04 +03:00
|
|
|
{ 'command': 'migrate-recover',
|
|
|
|
'data': { 'uri': 'str' },
|
2018-05-02 13:47:36 +03:00
|
|
|
'allow-oob': true }
|
2018-05-02 13:47:39 +03:00
|
|
|
|
|
|
|
##
|
|
|
|
# @migrate-pause:
|
|
|
|
#
|
|
|
|
# Pause a migration. Currently it only supports postcopy.
|
|
|
|
#
|
2024-07-17 05:13:08 +03:00
|
|
|
# .. qmp-example::
|
2018-05-02 13:47:39 +03:00
|
|
|
#
|
2024-02-16 17:58:34 +03:00
|
|
|
# -> { "execute": "migrate-pause" }
|
|
|
|
# <- { "return": {} }
|
2018-05-02 13:47:39 +03:00
|
|
|
#
|
2018-05-22 13:39:56 +03:00
|
|
|
# Since: 3.0
|
2018-05-02 13:47:39 +03:00
|
|
|
##
|
|
|
|
{ 'command': 'migrate-pause', 'allow-oob': true }
|
2019-10-29 14:48:59 +03:00
|
|
|
|
|
|
|
##
|
|
|
|
# @UNPLUG_PRIMARY:
|
|
|
|
#
|
|
|
|
# Emitted from source side of a migration when migration state is
|
2024-07-29 09:52:20 +03:00
|
|
|
# WAIT_UNPLUG. Device was unplugged by guest operating system.
|
|
|
|
# Device resources in QEMU are kept on standby to be able to re-plug
|
|
|
|
# it in case of migration failure.
|
2019-10-29 14:48:59 +03:00
|
|
|
#
|
|
|
|
# @device-id: QEMU device id of the unplugged device
|
|
|
|
#
|
|
|
|
# Since: 4.2
|
|
|
|
#
|
2024-07-17 05:13:08 +03:00
|
|
|
# .. qmp-example::
|
2022-05-03 10:37:32 +03:00
|
|
|
#
|
2024-02-16 17:58:34 +03:00
|
|
|
# <- { "event": "UNPLUG_PRIMARY",
|
|
|
|
# "data": { "device-id": "hostdev0" },
|
|
|
|
# "timestamp": { "seconds": 1265044230, "microseconds": 450486 } }
|
2019-10-29 14:48:59 +03:00
|
|
|
##
|
|
|
|
{ 'event': 'UNPLUG_PRIMARY',
|
|
|
|
'data': { 'device-id': 'str' } }
|
2020-09-16 09:21:57 +03:00
|
|
|
|
2021-06-29 19:01:20 +03:00
|
|
|
##
|
|
|
|
# @DirtyRateVcpu:
|
|
|
|
#
|
|
|
|
# Dirty rate of vcpu.
|
|
|
|
#
|
|
|
|
# @id: vcpu index.
|
|
|
|
#
|
|
|
|
# @dirty-rate: dirty rate.
|
|
|
|
#
|
2021-11-05 16:01:16 +03:00
|
|
|
# Since: 6.2
|
2021-06-29 19:01:20 +03:00
|
|
|
##
|
|
|
|
{ 'struct': 'DirtyRateVcpu',
|
|
|
|
'data': { 'id': 'int', 'dirty-rate': 'int64' } }
|
|
|
|
|
2020-09-16 09:21:57 +03:00
|
|
|
##
|
|
|
|
# @DirtyRateStatus:
|
|
|
|
#
|
2023-05-23 18:19:56 +03:00
|
|
|
# Dirty page rate measurement status.
|
2020-09-16 09:21:57 +03:00
|
|
|
#
|
2023-05-23 18:19:56 +03:00
|
|
|
# @unstarted: measuring thread has not been started yet
|
2020-09-16 09:21:57 +03:00
|
|
|
#
|
2023-05-23 18:19:56 +03:00
|
|
|
# @measuring: measuring thread is running
|
2020-09-16 09:21:57 +03:00
|
|
|
#
|
2023-05-23 18:19:56 +03:00
|
|
|
# @measured: dirty page rate is measured and the results are available
|
2020-09-16 09:21:57 +03:00
|
|
|
#
|
|
|
|
# Since: 5.2
|
|
|
|
##
|
|
|
|
{ 'enum': 'DirtyRateStatus',
|
|
|
|
'data': [ 'unstarted', 'measuring', 'measured'] }
|
2020-09-16 09:22:06 +03:00
|
|
|
|
2021-06-29 19:01:20 +03:00
|
|
|
##
|
|
|
|
# @DirtyRateMeasureMode:
|
|
|
|
#
|
2023-05-23 18:19:56 +03:00
|
|
|
# Method used to measure dirty page rate. Differences between
|
|
|
|
# available methods are explained in @calc-dirty-rate.
|
2021-06-29 19:01:20 +03:00
|
|
|
#
|
2023-05-23 18:19:56 +03:00
|
|
|
# @page-sampling: use page sampling
|
2021-06-29 19:01:20 +03:00
|
|
|
#
|
2023-05-23 18:19:56 +03:00
|
|
|
# @dirty-ring: use dirty ring
|
2021-07-20 18:19:17 +03:00
|
|
|
#
|
2023-05-23 18:19:56 +03:00
|
|
|
# @dirty-bitmap: use dirty bitmap
|
2021-06-29 19:01:20 +03:00
|
|
|
#
|
2021-11-05 16:01:16 +03:00
|
|
|
# Since: 6.2
|
2021-06-29 19:01:20 +03:00
|
|
|
##
|
|
|
|
{ 'enum': 'DirtyRateMeasureMode',
|
2021-07-20 18:19:17 +03:00
|
|
|
'data': ['page-sampling', 'dirty-ring', 'dirty-bitmap'] }
|
2021-06-29 19:01:20 +03:00
|
|
|
|
2023-09-05 10:05:43 +03:00
|
|
|
##
|
|
|
|
# @TimeUnit:
|
|
|
|
#
|
|
|
|
# Specifies unit in which time-related value is specified.
|
|
|
|
#
|
|
|
|
# @second: value is in seconds
|
|
|
|
#
|
|
|
|
# @millisecond: value is in milliseconds
|
|
|
|
#
|
2024-01-20 12:53:27 +03:00
|
|
|
# Since: 8.2
|
2023-09-05 10:05:43 +03:00
|
|
|
##
|
|
|
|
{ 'enum': 'TimeUnit',
|
|
|
|
'data': ['second', 'millisecond'] }
|
|
|
|
|
2020-09-16 09:22:06 +03:00
|
|
|
##
|
|
|
|
# @DirtyRateInfo:
|
|
|
|
#
|
2023-05-23 18:19:56 +03:00
|
|
|
# Information about measured dirty page rate.
|
2020-09-16 09:22:06 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @dirty-rate: an estimate of the dirty page rate of the VM in units
|
2023-05-23 18:19:56 +03:00
|
|
|
# of MiB/s. Value is present only when @status is 'measured'.
|
2020-09-16 09:22:06 +03:00
|
|
|
#
|
2023-05-23 18:19:56 +03:00
|
|
|
# @status: current status of dirty page rate measurements
|
2020-09-16 09:22:06 +03:00
|
|
|
#
|
|
|
|
# @start-time: start time in units of second for calculation
|
|
|
|
#
|
2023-09-05 10:05:43 +03:00
|
|
|
# @calc-time: time period for which dirty page rate was measured,
|
|
|
|
# expressed and rounded down to @calc-time-unit.
|
|
|
|
#
|
|
|
|
# @calc-time-unit: time unit of @calc-time (Since 8.2)
|
2020-09-16 09:22:06 +03:00
|
|
|
#
|
2023-05-23 18:19:56 +03:00
|
|
|
# @sample-pages: number of sampled pages per GiB of guest memory.
|
|
|
|
# Valid only in page-sampling mode (Since 6.1)
|
2021-06-07 04:11:34 +03:00
|
|
|
#
|
2023-05-23 18:19:56 +03:00
|
|
|
# @mode: mode that was used to measure dirty page rate (Since 6.2)
|
2021-06-29 19:01:23 +03:00
|
|
|
#
|
2023-05-23 18:19:56 +03:00
|
|
|
# @vcpu-dirty-rate: dirty rate for each vCPU if dirty-ring mode was
|
2023-04-28 13:54:29 +03:00
|
|
|
# specified (Since 6.2)
|
2021-06-29 19:01:23 +03:00
|
|
|
#
|
2020-09-16 09:22:06 +03:00
|
|
|
# Since: 5.2
|
|
|
|
##
|
|
|
|
{ 'struct': 'DirtyRateInfo',
|
migration/dirtyrate: present dirty rate only when querying the rate has completed
Make dirty_rate field optional, present dirty rate only when querying
the rate has completed.
The qmp results is shown as follow:
@unstarted:
{"return":{"status":"unstarted","start-time":0,"calc-time":0},"id":"libvirt-12"}
@measuring:
{"return":{"status":"measuring","start-time":102931,"calc-time":1},"id":"libvirt-85"}
@measured:
{"return":{"status":"measured","dirty-rate":4,"start-time":150146,"calc-time":1},"id":"libvirt-15"}
Signed-off-by: Chuan Zheng <zhengchuan@huawei.com>
Reviewed-by: David Edmondson <david.edmondson@oracle.com>
Message-Id: <1601350938-128320-3-git-send-email-zhengchuan@huawei.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
2020-09-29 06:42:18 +03:00
|
|
|
'data': {'*dirty-rate': 'int64',
|
2020-09-16 09:22:06 +03:00
|
|
|
'status': 'DirtyRateStatus',
|
|
|
|
'start-time': 'int64',
|
2021-06-07 04:11:34 +03:00
|
|
|
'calc-time': 'int64',
|
2023-09-05 10:05:43 +03:00
|
|
|
'calc-time-unit': 'TimeUnit',
|
2021-06-29 19:01:23 +03:00
|
|
|
'sample-pages': 'uint64',
|
|
|
|
'mode': 'DirtyRateMeasureMode',
|
|
|
|
'*vcpu-dirty-rate': [ 'DirtyRateVcpu' ] } }
|
2020-09-16 09:22:06 +03:00
|
|
|
|
|
|
|
##
|
|
|
|
# @calc-dirty-rate:
|
|
|
|
#
|
2023-05-23 18:19:56 +03:00
|
|
|
# Start measuring dirty page rate of the VM. Results can be retrieved
|
|
|
|
# with @query-dirty-rate after measurements are completed.
|
|
|
|
#
|
|
|
|
# Dirty page rate is the number of pages changed in a given time
|
|
|
|
# period expressed in MiB/s. The following methods of calculation are
|
|
|
|
# available:
|
|
|
|
#
|
|
|
|
# 1. In page sampling mode, a random subset of pages are selected and
|
|
|
|
# hashed twice: once at the beginning of measurement time period,
|
|
|
|
# and once again at the end. If two hashes for some page are
|
|
|
|
# different, the page is counted as changed. Since this method
|
|
|
|
# relies on sampling and hashing, calculated dirty page rate is
|
|
|
|
# only an estimate of its true value. Increasing @sample-pages
|
|
|
|
# improves estimation quality at the cost of higher computational
|
|
|
|
# overhead.
|
|
|
|
#
|
|
|
|
# 2. Dirty bitmap mode captures writes to memory (for example by
|
|
|
|
# temporarily revoking write access to all pages) and counting page
|
|
|
|
# faults. Information about modified pages is collected into a
|
|
|
|
# bitmap, where each bit corresponds to one guest page. This mode
|
|
|
|
# requires that KVM accelerator property "dirty-ring-size" is *not*
|
|
|
|
# set.
|
|
|
|
#
|
|
|
|
# 3. Dirty ring mode is similar to dirty bitmap mode, but the
|
|
|
|
# information about modified pages is collected into ring buffer.
|
|
|
|
# This mode tracks page modification per each vCPU separately. It
|
|
|
|
# requires that KVM accelerator property "dirty-ring-size" is set.
|
|
|
|
#
|
2024-07-29 09:52:20 +03:00
|
|
|
# @calc-time: time period for which dirty page rate is calculated. By
|
|
|
|
# default it is specified in seconds, but the unit can be set
|
2023-09-05 10:05:43 +03:00
|
|
|
# explicitly with @calc-time-unit. Note that larger @calc-time
|
|
|
|
# values will typically result in smaller dirty page rates because
|
2024-07-29 09:52:20 +03:00
|
|
|
# page dirtying is a one-time event. Once some page is counted as
|
|
|
|
# dirty during @calc-time period, further writes to this page will
|
|
|
|
# not increase dirty page rate anymore.
|
2023-09-05 10:05:43 +03:00
|
|
|
#
|
2024-07-29 09:52:20 +03:00
|
|
|
# @calc-time-unit: time unit in which @calc-time is specified. By
|
|
|
|
# default it is seconds. (Since 8.2)
|
2023-05-23 18:19:56 +03:00
|
|
|
#
|
|
|
|
# @sample-pages: number of sampled pages per each GiB of guest memory.
|
|
|
|
# Default value is 512. For 4KiB guest pages this corresponds to
|
|
|
|
# sampling ratio of 0.2%. This argument is used only in page
|
|
|
|
# sampling mode. (Since 6.1)
|
|
|
|
#
|
|
|
|
# @mode: mechanism for tracking dirty pages. Default value is
|
|
|
|
# 'page-sampling'. Others are 'dirty-bitmap' and 'dirty-ring'.
|
|
|
|
# (Since 6.1)
|
2021-06-29 19:01:23 +03:00
|
|
|
#
|
2020-09-16 09:22:06 +03:00
|
|
|
# Since: 5.2
|
|
|
|
#
|
2024-07-17 05:13:10 +03:00
|
|
|
# .. qmp-example::
|
2022-05-03 10:37:32 +03:00
|
|
|
#
|
2024-02-16 17:58:34 +03:00
|
|
|
# -> {"execute": "calc-dirty-rate", "arguments": {"calc-time": 1,
|
2024-06-27 01:21:14 +03:00
|
|
|
# "sample-pages": 512} }
|
2024-02-16 17:58:34 +03:00
|
|
|
# <- { "return": {} }
|
2023-09-05 10:05:43 +03:00
|
|
|
#
|
2024-07-17 05:13:10 +03:00
|
|
|
# .. qmp-example::
|
|
|
|
# :annotated:
|
|
|
|
#
|
|
|
|
# Measure dirty rate using dirty bitmap for 500 milliseconds::
|
2023-09-05 10:05:43 +03:00
|
|
|
#
|
2024-02-16 17:58:34 +03:00
|
|
|
# -> {"execute": "calc-dirty-rate", "arguments": {"calc-time": 500,
|
|
|
|
# "calc-time-unit": "millisecond", "mode": "dirty-bitmap"} }
|
2023-09-05 10:05:43 +03:00
|
|
|
#
|
2024-02-16 17:58:34 +03:00
|
|
|
# <- { "return": {} }
|
2020-09-16 09:22:06 +03:00
|
|
|
##
|
2021-06-07 04:11:34 +03:00
|
|
|
{ 'command': 'calc-dirty-rate', 'data': {'calc-time': 'int64',
|
2023-09-05 10:05:43 +03:00
|
|
|
'*calc-time-unit': 'TimeUnit',
|
2021-06-29 19:01:23 +03:00
|
|
|
'*sample-pages': 'int',
|
|
|
|
'*mode': 'DirtyRateMeasureMode'} }
|
2020-09-16 09:22:06 +03:00
|
|
|
|
|
|
|
##
|
|
|
|
# @query-dirty-rate:
|
|
|
|
#
|
2023-05-23 18:19:56 +03:00
|
|
|
# Query results of the most recent invocation of @calc-dirty-rate.
|
2020-09-16 09:22:06 +03:00
|
|
|
#
|
2023-09-05 10:05:43 +03:00
|
|
|
# @calc-time-unit: time unit in which to report calculation time.
|
2024-03-22 17:09:09 +03:00
|
|
|
# By default it is reported in seconds. (Since 8.2)
|
2023-09-05 10:05:43 +03:00
|
|
|
#
|
2020-09-16 09:22:06 +03:00
|
|
|
# Since: 5.2
|
2023-05-23 18:19:56 +03:00
|
|
|
#
|
2024-07-17 05:13:09 +03:00
|
|
|
# .. qmp-example::
|
|
|
|
# :title: Measurement is in progress
|
2023-05-23 18:19:56 +03:00
|
|
|
#
|
2024-02-16 17:58:34 +03:00
|
|
|
# <- {"status": "measuring", "sample-pages": 512,
|
|
|
|
# "mode": "page-sampling", "start-time": 1693900454, "calc-time": 10,
|
|
|
|
# "calc-time-unit": "second"}
|
2023-05-23 18:19:56 +03:00
|
|
|
#
|
2024-07-17 05:13:09 +03:00
|
|
|
# .. qmp-example::
|
|
|
|
# :title: Measurement has been completed
|
2023-05-23 18:19:56 +03:00
|
|
|
#
|
2024-02-16 17:58:34 +03:00
|
|
|
# <- {"status": "measured", "sample-pages": 512, "dirty-rate": 108,
|
|
|
|
# "mode": "page-sampling", "start-time": 1693900454, "calc-time": 10,
|
|
|
|
# "calc-time-unit": "second"}
|
2020-09-16 09:22:06 +03:00
|
|
|
##
|
2023-09-05 10:05:43 +03:00
|
|
|
{ 'command': 'query-dirty-rate', 'data': {'*calc-time-unit': 'TimeUnit' },
|
|
|
|
'returns': 'DirtyRateInfo' }
|
migration: introduce snapshot-{save, load, delete} QMP commands
savevm, loadvm and delvm are some of the few HMP commands that have never
been converted to use QMP. The reasons for the lack of conversion are
that they blocked execution of the event thread, and the semantics
around choice of disks were ill-defined.
Despite this downside, however, libvirt and applications using libvirt
have used these commands for as long as QMP has existed, via the
"human-monitor-command" passthrough command. IOW, while it is clearly
desirable to be able to fix the problems, they are not a blocker to
all real world usage.
Meanwhile there is a need for other features which involve adding new
parameters to the commands. This is possible with HMP passthrough, but
it provides no reliable way for apps to introspect features, so using
QAPI modelling is highly desirable.
This patch thus introduces new snapshot-{load,save,delete} commands to
QMP that are intended to replace the old HMP counterparts. The new
commands are given different names, because they will be using the new
QEMU job framework and thus will have diverging behaviour from the HMP
originals. It would thus be misleading to keep the same name.
While this design uses the generic job framework, the current impl is
still blocking. The intention that the blocking problem is fixed later.
None the less applications using these new commands should assume that
they are asynchronous and thus wait for the job status change event to
indicate completion.
In addition to using the job framework, the new commands require the
caller to be explicit about all the block device nodes used in the
snapshot operations, with no built-in default heuristics in use.
Note that the existing "query-named-block-nodes" can be used to query
what snapshots currently exist for block nodes.
Acked-by: Markus Armbruster <armbru@redhat.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
Message-Id: <20210204124834.774401-13-berrange@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
dgilbert: removed tests for now, the output ordering isn't
deterministic
2021-02-04 15:48:34 +03:00
|
|
|
|
2022-06-25 20:38:36 +03:00
|
|
|
##
|
|
|
|
# @DirtyLimitInfo:
|
|
|
|
#
|
|
|
|
# Dirty page rate limit information of a virtual CPU.
|
|
|
|
#
|
|
|
|
# @cpu-index: index of a virtual CPU.
|
|
|
|
#
|
|
|
|
# @limit-rate: upper limit of dirty page rate (MB/s) for a virtual
|
2023-04-28 13:54:29 +03:00
|
|
|
# CPU, 0 means unlimited.
|
2022-06-25 20:38:36 +03:00
|
|
|
#
|
|
|
|
# @current-rate: current dirty page rate (MB/s) for a virtual CPU.
|
|
|
|
#
|
|
|
|
# Since: 7.1
|
|
|
|
##
|
|
|
|
{ 'struct': 'DirtyLimitInfo',
|
|
|
|
'data': { 'cpu-index': 'int',
|
|
|
|
'limit-rate': 'uint64',
|
|
|
|
'current-rate': 'uint64' } }
|
|
|
|
|
|
|
|
##
|
|
|
|
# @set-vcpu-dirty-limit:
|
|
|
|
#
|
|
|
|
# Set the upper limit of dirty page rate for virtual CPUs.
|
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# Requires KVM with accelerator property "dirty-ring-size" set. A
|
|
|
|
# virtual CPU's dirty page rate is a measure of its memory load. To
|
|
|
|
# observe dirty page rates, use @calc-dirty-rate.
|
2022-06-25 20:38:36 +03:00
|
|
|
#
|
|
|
|
# @cpu-index: index of a virtual CPU, default is all.
|
|
|
|
#
|
|
|
|
# @dirty-rate: upper limit of dirty page rate (MB/s) for virtual CPUs.
|
|
|
|
#
|
|
|
|
# Since: 7.1
|
|
|
|
#
|
2024-07-17 05:13:08 +03:00
|
|
|
# .. qmp-example::
|
2023-04-25 09:42:14 +03:00
|
|
|
#
|
2024-02-16 17:58:34 +03:00
|
|
|
# -> {"execute": "set-vcpu-dirty-limit"}
|
|
|
|
# "arguments": { "dirty-rate": 200,
|
|
|
|
# "cpu-index": 1 } }
|
|
|
|
# <- { "return": {} }
|
2022-06-25 20:38:36 +03:00
|
|
|
##
|
|
|
|
{ 'command': 'set-vcpu-dirty-limit',
|
|
|
|
'data': { '*cpu-index': 'int',
|
|
|
|
'dirty-rate': 'uint64' } }
|
|
|
|
|
|
|
|
##
|
|
|
|
# @cancel-vcpu-dirty-limit:
|
|
|
|
#
|
|
|
|
# Cancel the upper limit of dirty page rate for virtual CPUs.
|
|
|
|
#
|
|
|
|
# Cancel the dirty page limit for the vCPU which has been set with
|
2023-04-28 13:54:29 +03:00
|
|
|
# set-vcpu-dirty-limit command. Note that this command requires
|
2022-06-25 20:38:36 +03:00
|
|
|
# support from dirty ring, same as the "set-vcpu-dirty-limit".
|
|
|
|
#
|
|
|
|
# @cpu-index: index of a virtual CPU, default is all.
|
|
|
|
#
|
|
|
|
# Since: 7.1
|
|
|
|
#
|
2024-07-17 05:13:08 +03:00
|
|
|
# .. qmp-example::
|
2023-04-25 09:42:14 +03:00
|
|
|
#
|
2024-02-16 17:58:34 +03:00
|
|
|
# -> {"execute": "cancel-vcpu-dirty-limit"},
|
|
|
|
# "arguments": { "cpu-index": 1 } }
|
|
|
|
# <- { "return": {} }
|
2022-06-25 20:38:36 +03:00
|
|
|
##
|
|
|
|
{ 'command': 'cancel-vcpu-dirty-limit',
|
|
|
|
'data': { '*cpu-index': 'int'} }
|
|
|
|
|
|
|
|
##
|
|
|
|
# @query-vcpu-dirty-limit:
|
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# Returns information about virtual CPU dirty page rate limits, if
|
|
|
|
# any.
|
2022-06-25 20:38:36 +03:00
|
|
|
#
|
|
|
|
# Since: 7.1
|
|
|
|
#
|
2024-07-17 05:13:08 +03:00
|
|
|
# .. qmp-example::
|
2023-04-25 09:42:14 +03:00
|
|
|
#
|
2024-02-16 17:58:34 +03:00
|
|
|
# -> {"execute": "query-vcpu-dirty-limit"}
|
|
|
|
# <- {"return": [
|
|
|
|
# { "limit-rate": 60, "current-rate": 3, "cpu-index": 0},
|
|
|
|
# { "limit-rate": 60, "current-rate": 3, "cpu-index": 1}]}
|
2022-06-25 20:38:36 +03:00
|
|
|
##
|
|
|
|
{ 'command': 'query-vcpu-dirty-limit',
|
|
|
|
'returns': [ 'DirtyLimitInfo' ] }
|
|
|
|
|
2023-02-03 10:35:18 +03:00
|
|
|
##
|
|
|
|
# @MigrationThreadInfo:
|
|
|
|
#
|
|
|
|
# Information about migrationthreads
|
|
|
|
#
|
|
|
|
# @name: the name of migration thread
|
|
|
|
#
|
|
|
|
# @thread-id: ID of the underlying host thread
|
|
|
|
#
|
|
|
|
# Since: 7.2
|
|
|
|
##
|
|
|
|
{ 'struct': 'MigrationThreadInfo',
|
|
|
|
'data': {'name': 'str',
|
|
|
|
'thread-id': 'int'} }
|
|
|
|
|
|
|
|
##
|
|
|
|
# @query-migrationthreads:
|
|
|
|
#
|
|
|
|
# Returns information of migration threads
|
|
|
|
#
|
2024-10-22 22:45:01 +03:00
|
|
|
# Features:
|
|
|
|
#
|
|
|
|
# @deprecated: This command is deprecated with no replacement yet.
|
|
|
|
#
|
2024-03-22 16:51:17 +03:00
|
|
|
# Returns: @MigrationThreadInfo
|
2023-02-03 10:35:18 +03:00
|
|
|
#
|
|
|
|
# Since: 7.2
|
|
|
|
##
|
|
|
|
{ 'command': 'query-migrationthreads',
|
2024-10-22 22:45:01 +03:00
|
|
|
'returns': ['MigrationThreadInfo'],
|
|
|
|
'features': ['deprecated'] }
|
2023-02-03 10:35:18 +03:00
|
|
|
|
migration: introduce snapshot-{save, load, delete} QMP commands
savevm, loadvm and delvm are some of the few HMP commands that have never
been converted to use QMP. The reasons for the lack of conversion are
that they blocked execution of the event thread, and the semantics
around choice of disks were ill-defined.
Despite this downside, however, libvirt and applications using libvirt
have used these commands for as long as QMP has existed, via the
"human-monitor-command" passthrough command. IOW, while it is clearly
desirable to be able to fix the problems, they are not a blocker to
all real world usage.
Meanwhile there is a need for other features which involve adding new
parameters to the commands. This is possible with HMP passthrough, but
it provides no reliable way for apps to introspect features, so using
QAPI modelling is highly desirable.
This patch thus introduces new snapshot-{load,save,delete} commands to
QMP that are intended to replace the old HMP counterparts. The new
commands are given different names, because they will be using the new
QEMU job framework and thus will have diverging behaviour from the HMP
originals. It would thus be misleading to keep the same name.
While this design uses the generic job framework, the current impl is
still blocking. The intention that the blocking problem is fixed later.
None the less applications using these new commands should assume that
they are asynchronous and thus wait for the job status change event to
indicate completion.
In addition to using the job framework, the new commands require the
caller to be explicit about all the block device nodes used in the
snapshot operations, with no built-in default heuristics in use.
Note that the existing "query-named-block-nodes" can be used to query
what snapshots currently exist for block nodes.
Acked-by: Markus Armbruster <armbru@redhat.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
Message-Id: <20210204124834.774401-13-berrange@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
dgilbert: removed tests for now, the output ordering isn't
deterministic
2021-02-04 15:48:34 +03:00
|
|
|
##
|
|
|
|
# @snapshot-save:
|
|
|
|
#
|
|
|
|
# Save a VM snapshot
|
|
|
|
#
|
|
|
|
# @job-id: identifier for the newly created job
|
2023-04-28 13:54:29 +03:00
|
|
|
#
|
migration: introduce snapshot-{save, load, delete} QMP commands
savevm, loadvm and delvm are some of the few HMP commands that have never
been converted to use QMP. The reasons for the lack of conversion are
that they blocked execution of the event thread, and the semantics
around choice of disks were ill-defined.
Despite this downside, however, libvirt and applications using libvirt
have used these commands for as long as QMP has existed, via the
"human-monitor-command" passthrough command. IOW, while it is clearly
desirable to be able to fix the problems, they are not a blocker to
all real world usage.
Meanwhile there is a need for other features which involve adding new
parameters to the commands. This is possible with HMP passthrough, but
it provides no reliable way for apps to introspect features, so using
QAPI modelling is highly desirable.
This patch thus introduces new snapshot-{load,save,delete} commands to
QMP that are intended to replace the old HMP counterparts. The new
commands are given different names, because they will be using the new
QEMU job framework and thus will have diverging behaviour from the HMP
originals. It would thus be misleading to keep the same name.
While this design uses the generic job framework, the current impl is
still blocking. The intention that the blocking problem is fixed later.
None the less applications using these new commands should assume that
they are asynchronous and thus wait for the job status change event to
indicate completion.
In addition to using the job framework, the new commands require the
caller to be explicit about all the block device nodes used in the
snapshot operations, with no built-in default heuristics in use.
Note that the existing "query-named-block-nodes" can be used to query
what snapshots currently exist for block nodes.
Acked-by: Markus Armbruster <armbru@redhat.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
Message-Id: <20210204124834.774401-13-berrange@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
dgilbert: removed tests for now, the output ordering isn't
deterministic
2021-02-04 15:48:34 +03:00
|
|
|
# @tag: name of the snapshot to create
|
2023-04-28 13:54:29 +03:00
|
|
|
#
|
migration: introduce snapshot-{save, load, delete} QMP commands
savevm, loadvm and delvm are some of the few HMP commands that have never
been converted to use QMP. The reasons for the lack of conversion are
that they blocked execution of the event thread, and the semantics
around choice of disks were ill-defined.
Despite this downside, however, libvirt and applications using libvirt
have used these commands for as long as QMP has existed, via the
"human-monitor-command" passthrough command. IOW, while it is clearly
desirable to be able to fix the problems, they are not a blocker to
all real world usage.
Meanwhile there is a need for other features which involve adding new
parameters to the commands. This is possible with HMP passthrough, but
it provides no reliable way for apps to introspect features, so using
QAPI modelling is highly desirable.
This patch thus introduces new snapshot-{load,save,delete} commands to
QMP that are intended to replace the old HMP counterparts. The new
commands are given different names, because they will be using the new
QEMU job framework and thus will have diverging behaviour from the HMP
originals. It would thus be misleading to keep the same name.
While this design uses the generic job framework, the current impl is
still blocking. The intention that the blocking problem is fixed later.
None the less applications using these new commands should assume that
they are asynchronous and thus wait for the job status change event to
indicate completion.
In addition to using the job framework, the new commands require the
caller to be explicit about all the block device nodes used in the
snapshot operations, with no built-in default heuristics in use.
Note that the existing "query-named-block-nodes" can be used to query
what snapshots currently exist for block nodes.
Acked-by: Markus Armbruster <armbru@redhat.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
Message-Id: <20210204124834.774401-13-berrange@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
dgilbert: removed tests for now, the output ordering isn't
deterministic
2021-02-04 15:48:34 +03:00
|
|
|
# @vmstate: block device node name to save vmstate to
|
2023-04-28 13:54:29 +03:00
|
|
|
#
|
migration: introduce snapshot-{save, load, delete} QMP commands
savevm, loadvm and delvm are some of the few HMP commands that have never
been converted to use QMP. The reasons for the lack of conversion are
that they blocked execution of the event thread, and the semantics
around choice of disks were ill-defined.
Despite this downside, however, libvirt and applications using libvirt
have used these commands for as long as QMP has existed, via the
"human-monitor-command" passthrough command. IOW, while it is clearly
desirable to be able to fix the problems, they are not a blocker to
all real world usage.
Meanwhile there is a need for other features which involve adding new
parameters to the commands. This is possible with HMP passthrough, but
it provides no reliable way for apps to introspect features, so using
QAPI modelling is highly desirable.
This patch thus introduces new snapshot-{load,save,delete} commands to
QMP that are intended to replace the old HMP counterparts. The new
commands are given different names, because they will be using the new
QEMU job framework and thus will have diverging behaviour from the HMP
originals. It would thus be misleading to keep the same name.
While this design uses the generic job framework, the current impl is
still blocking. The intention that the blocking problem is fixed later.
None the less applications using these new commands should assume that
they are asynchronous and thus wait for the job status change event to
indicate completion.
In addition to using the job framework, the new commands require the
caller to be explicit about all the block device nodes used in the
snapshot operations, with no built-in default heuristics in use.
Note that the existing "query-named-block-nodes" can be used to query
what snapshots currently exist for block nodes.
Acked-by: Markus Armbruster <armbru@redhat.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
Message-Id: <20210204124834.774401-13-berrange@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
dgilbert: removed tests for now, the output ordering isn't
deterministic
2021-02-04 15:48:34 +03:00
|
|
|
# @devices: list of block device node names to save a snapshot to
|
|
|
|
#
|
|
|
|
# Applications should not assume that the snapshot save is complete
|
2023-04-28 13:54:29 +03:00
|
|
|
# when this command returns. The job commands / events must be used
|
|
|
|
# to determine completion and to fetch details of any errors that
|
|
|
|
# arise.
|
migration: introduce snapshot-{save, load, delete} QMP commands
savevm, loadvm and delvm are some of the few HMP commands that have never
been converted to use QMP. The reasons for the lack of conversion are
that they blocked execution of the event thread, and the semantics
around choice of disks were ill-defined.
Despite this downside, however, libvirt and applications using libvirt
have used these commands for as long as QMP has existed, via the
"human-monitor-command" passthrough command. IOW, while it is clearly
desirable to be able to fix the problems, they are not a blocker to
all real world usage.
Meanwhile there is a need for other features which involve adding new
parameters to the commands. This is possible with HMP passthrough, but
it provides no reliable way for apps to introspect features, so using
QAPI modelling is highly desirable.
This patch thus introduces new snapshot-{load,save,delete} commands to
QMP that are intended to replace the old HMP counterparts. The new
commands are given different names, because they will be using the new
QEMU job framework and thus will have diverging behaviour from the HMP
originals. It would thus be misleading to keep the same name.
While this design uses the generic job framework, the current impl is
still blocking. The intention that the blocking problem is fixed later.
None the less applications using these new commands should assume that
they are asynchronous and thus wait for the job status change event to
indicate completion.
In addition to using the job framework, the new commands require the
caller to be explicit about all the block device nodes used in the
snapshot operations, with no built-in default heuristics in use.
Note that the existing "query-named-block-nodes" can be used to query
what snapshots currently exist for block nodes.
Acked-by: Markus Armbruster <armbru@redhat.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
Message-Id: <20210204124834.774401-13-berrange@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
dgilbert: removed tests for now, the output ordering isn't
deterministic
2021-02-04 15:48:34 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# Note that execution of the guest CPUs may be stopped during the time
|
|
|
|
# it takes to save the snapshot. A future version of QEMU may ensure
|
|
|
|
# CPUs are executing continuously.
|
migration: introduce snapshot-{save, load, delete} QMP commands
savevm, loadvm and delvm are some of the few HMP commands that have never
been converted to use QMP. The reasons for the lack of conversion are
that they blocked execution of the event thread, and the semantics
around choice of disks were ill-defined.
Despite this downside, however, libvirt and applications using libvirt
have used these commands for as long as QMP has existed, via the
"human-monitor-command" passthrough command. IOW, while it is clearly
desirable to be able to fix the problems, they are not a blocker to
all real world usage.
Meanwhile there is a need for other features which involve adding new
parameters to the commands. This is possible with HMP passthrough, but
it provides no reliable way for apps to introspect features, so using
QAPI modelling is highly desirable.
This patch thus introduces new snapshot-{load,save,delete} commands to
QMP that are intended to replace the old HMP counterparts. The new
commands are given different names, because they will be using the new
QEMU job framework and thus will have diverging behaviour from the HMP
originals. It would thus be misleading to keep the same name.
While this design uses the generic job framework, the current impl is
still blocking. The intention that the blocking problem is fixed later.
None the less applications using these new commands should assume that
they are asynchronous and thus wait for the job status change event to
indicate completion.
In addition to using the job framework, the new commands require the
caller to be explicit about all the block device nodes used in the
snapshot operations, with no built-in default heuristics in use.
Note that the existing "query-named-block-nodes" can be used to query
what snapshots currently exist for block nodes.
Acked-by: Markus Armbruster <armbru@redhat.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
Message-Id: <20210204124834.774401-13-berrange@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
dgilbert: removed tests for now, the output ordering isn't
deterministic
2021-02-04 15:48:34 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# It is strongly recommended that @devices contain all writable block
|
|
|
|
# device nodes if a consistent snapshot is required.
|
migration: introduce snapshot-{save, load, delete} QMP commands
savevm, loadvm and delvm are some of the few HMP commands that have never
been converted to use QMP. The reasons for the lack of conversion are
that they blocked execution of the event thread, and the semantics
around choice of disks were ill-defined.
Despite this downside, however, libvirt and applications using libvirt
have used these commands for as long as QMP has existed, via the
"human-monitor-command" passthrough command. IOW, while it is clearly
desirable to be able to fix the problems, they are not a blocker to
all real world usage.
Meanwhile there is a need for other features which involve adding new
parameters to the commands. This is possible with HMP passthrough, but
it provides no reliable way for apps to introspect features, so using
QAPI modelling is highly desirable.
This patch thus introduces new snapshot-{load,save,delete} commands to
QMP that are intended to replace the old HMP counterparts. The new
commands are given different names, because they will be using the new
QEMU job framework and thus will have diverging behaviour from the HMP
originals. It would thus be misleading to keep the same name.
While this design uses the generic job framework, the current impl is
still blocking. The intention that the blocking problem is fixed later.
None the less applications using these new commands should assume that
they are asynchronous and thus wait for the job status change event to
indicate completion.
In addition to using the job framework, the new commands require the
caller to be explicit about all the block device nodes used in the
snapshot operations, with no built-in default heuristics in use.
Note that the existing "query-named-block-nodes" can be used to query
what snapshots currently exist for block nodes.
Acked-by: Markus Armbruster <armbru@redhat.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
Message-Id: <20210204124834.774401-13-berrange@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
dgilbert: removed tests for now, the output ordering isn't
deterministic
2021-02-04 15:48:34 +03:00
|
|
|
#
|
|
|
|
# If @tag already exists, an error will be reported
|
|
|
|
#
|
2024-07-17 05:13:08 +03:00
|
|
|
# .. qmp-example::
|
migration: introduce snapshot-{save, load, delete} QMP commands
savevm, loadvm and delvm are some of the few HMP commands that have never
been converted to use QMP. The reasons for the lack of conversion are
that they blocked execution of the event thread, and the semantics
around choice of disks were ill-defined.
Despite this downside, however, libvirt and applications using libvirt
have used these commands for as long as QMP has existed, via the
"human-monitor-command" passthrough command. IOW, while it is clearly
desirable to be able to fix the problems, they are not a blocker to
all real world usage.
Meanwhile there is a need for other features which involve adding new
parameters to the commands. This is possible with HMP passthrough, but
it provides no reliable way for apps to introspect features, so using
QAPI modelling is highly desirable.
This patch thus introduces new snapshot-{load,save,delete} commands to
QMP that are intended to replace the old HMP counterparts. The new
commands are given different names, because they will be using the new
QEMU job framework and thus will have diverging behaviour from the HMP
originals. It would thus be misleading to keep the same name.
While this design uses the generic job framework, the current impl is
still blocking. The intention that the blocking problem is fixed later.
None the less applications using these new commands should assume that
they are asynchronous and thus wait for the job status change event to
indicate completion.
In addition to using the job framework, the new commands require the
caller to be explicit about all the block device nodes used in the
snapshot operations, with no built-in default heuristics in use.
Note that the existing "query-named-block-nodes" can be used to query
what snapshots currently exist for block nodes.
Acked-by: Markus Armbruster <armbru@redhat.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
Message-Id: <20210204124834.774401-13-berrange@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
dgilbert: removed tests for now, the output ordering isn't
deterministic
2021-02-04 15:48:34 +03:00
|
|
|
#
|
2024-02-16 17:58:34 +03:00
|
|
|
# -> { "execute": "snapshot-save",
|
|
|
|
# "arguments": {
|
|
|
|
# "job-id": "snapsave0",
|
|
|
|
# "tag": "my-snap",
|
|
|
|
# "vmstate": "disk0",
|
|
|
|
# "devices": ["disk0", "disk1"]
|
|
|
|
# }
|
|
|
|
# }
|
|
|
|
# <- { "return": { } }
|
|
|
|
# <- {"event": "JOB_STATUS_CHANGE",
|
|
|
|
# "timestamp": {"seconds": 1432121972, "microseconds": 744001},
|
|
|
|
# "data": {"status": "created", "id": "snapsave0"}}
|
|
|
|
# <- {"event": "JOB_STATUS_CHANGE",
|
|
|
|
# "timestamp": {"seconds": 1432122172, "microseconds": 744001},
|
|
|
|
# "data": {"status": "running", "id": "snapsave0"}}
|
|
|
|
# <- {"event": "STOP",
|
|
|
|
# "timestamp": {"seconds": 1432122372, "microseconds": 744001} }
|
|
|
|
# <- {"event": "RESUME",
|
|
|
|
# "timestamp": {"seconds": 1432122572, "microseconds": 744001} }
|
|
|
|
# <- {"event": "JOB_STATUS_CHANGE",
|
|
|
|
# "timestamp": {"seconds": 1432122772, "microseconds": 744001},
|
|
|
|
# "data": {"status": "waiting", "id": "snapsave0"}}
|
|
|
|
# <- {"event": "JOB_STATUS_CHANGE",
|
|
|
|
# "timestamp": {"seconds": 1432122972, "microseconds": 744001},
|
|
|
|
# "data": {"status": "pending", "id": "snapsave0"}}
|
|
|
|
# <- {"event": "JOB_STATUS_CHANGE",
|
|
|
|
# "timestamp": {"seconds": 1432123172, "microseconds": 744001},
|
|
|
|
# "data": {"status": "concluded", "id": "snapsave0"}}
|
|
|
|
# -> {"execute": "query-jobs"}
|
|
|
|
# <- {"return": [{"current-progress": 1,
|
|
|
|
# "status": "concluded",
|
|
|
|
# "total-progress": 1,
|
|
|
|
# "type": "snapshot-save",
|
|
|
|
# "id": "snapsave0"}]}
|
migration: introduce snapshot-{save, load, delete} QMP commands
savevm, loadvm and delvm are some of the few HMP commands that have never
been converted to use QMP. The reasons for the lack of conversion are
that they blocked execution of the event thread, and the semantics
around choice of disks were ill-defined.
Despite this downside, however, libvirt and applications using libvirt
have used these commands for as long as QMP has existed, via the
"human-monitor-command" passthrough command. IOW, while it is clearly
desirable to be able to fix the problems, they are not a blocker to
all real world usage.
Meanwhile there is a need for other features which involve adding new
parameters to the commands. This is possible with HMP passthrough, but
it provides no reliable way for apps to introspect features, so using
QAPI modelling is highly desirable.
This patch thus introduces new snapshot-{load,save,delete} commands to
QMP that are intended to replace the old HMP counterparts. The new
commands are given different names, because they will be using the new
QEMU job framework and thus will have diverging behaviour from the HMP
originals. It would thus be misleading to keep the same name.
While this design uses the generic job framework, the current impl is
still blocking. The intention that the blocking problem is fixed later.
None the less applications using these new commands should assume that
they are asynchronous and thus wait for the job status change event to
indicate completion.
In addition to using the job framework, the new commands require the
caller to be explicit about all the block device nodes used in the
snapshot operations, with no built-in default heuristics in use.
Note that the existing "query-named-block-nodes" can be used to query
what snapshots currently exist for block nodes.
Acked-by: Markus Armbruster <armbru@redhat.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
Message-Id: <20210204124834.774401-13-berrange@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
dgilbert: removed tests for now, the output ordering isn't
deterministic
2021-02-04 15:48:34 +03:00
|
|
|
#
|
|
|
|
# Since: 6.0
|
|
|
|
##
|
|
|
|
{ 'command': 'snapshot-save',
|
|
|
|
'data': { 'job-id': 'str',
|
|
|
|
'tag': 'str',
|
|
|
|
'vmstate': 'str',
|
|
|
|
'devices': ['str'] } }
|
|
|
|
|
|
|
|
##
|
|
|
|
# @snapshot-load:
|
|
|
|
#
|
|
|
|
# Load a VM snapshot
|
|
|
|
#
|
|
|
|
# @job-id: identifier for the newly created job
|
2023-04-28 13:54:29 +03:00
|
|
|
#
|
migration: introduce snapshot-{save, load, delete} QMP commands
savevm, loadvm and delvm are some of the few HMP commands that have never
been converted to use QMP. The reasons for the lack of conversion are
that they blocked execution of the event thread, and the semantics
around choice of disks were ill-defined.
Despite this downside, however, libvirt and applications using libvirt
have used these commands for as long as QMP has existed, via the
"human-monitor-command" passthrough command. IOW, while it is clearly
desirable to be able to fix the problems, they are not a blocker to
all real world usage.
Meanwhile there is a need for other features which involve adding new
parameters to the commands. This is possible with HMP passthrough, but
it provides no reliable way for apps to introspect features, so using
QAPI modelling is highly desirable.
This patch thus introduces new snapshot-{load,save,delete} commands to
QMP that are intended to replace the old HMP counterparts. The new
commands are given different names, because they will be using the new
QEMU job framework and thus will have diverging behaviour from the HMP
originals. It would thus be misleading to keep the same name.
While this design uses the generic job framework, the current impl is
still blocking. The intention that the blocking problem is fixed later.
None the less applications using these new commands should assume that
they are asynchronous and thus wait for the job status change event to
indicate completion.
In addition to using the job framework, the new commands require the
caller to be explicit about all the block device nodes used in the
snapshot operations, with no built-in default heuristics in use.
Note that the existing "query-named-block-nodes" can be used to query
what snapshots currently exist for block nodes.
Acked-by: Markus Armbruster <armbru@redhat.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
Message-Id: <20210204124834.774401-13-berrange@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
dgilbert: removed tests for now, the output ordering isn't
deterministic
2021-02-04 15:48:34 +03:00
|
|
|
# @tag: name of the snapshot to load.
|
2023-04-28 13:54:29 +03:00
|
|
|
#
|
migration: introduce snapshot-{save, load, delete} QMP commands
savevm, loadvm and delvm are some of the few HMP commands that have never
been converted to use QMP. The reasons for the lack of conversion are
that they blocked execution of the event thread, and the semantics
around choice of disks were ill-defined.
Despite this downside, however, libvirt and applications using libvirt
have used these commands for as long as QMP has existed, via the
"human-monitor-command" passthrough command. IOW, while it is clearly
desirable to be able to fix the problems, they are not a blocker to
all real world usage.
Meanwhile there is a need for other features which involve adding new
parameters to the commands. This is possible with HMP passthrough, but
it provides no reliable way for apps to introspect features, so using
QAPI modelling is highly desirable.
This patch thus introduces new snapshot-{load,save,delete} commands to
QMP that are intended to replace the old HMP counterparts. The new
commands are given different names, because they will be using the new
QEMU job framework and thus will have diverging behaviour from the HMP
originals. It would thus be misleading to keep the same name.
While this design uses the generic job framework, the current impl is
still blocking. The intention that the blocking problem is fixed later.
None the less applications using these new commands should assume that
they are asynchronous and thus wait for the job status change event to
indicate completion.
In addition to using the job framework, the new commands require the
caller to be explicit about all the block device nodes used in the
snapshot operations, with no built-in default heuristics in use.
Note that the existing "query-named-block-nodes" can be used to query
what snapshots currently exist for block nodes.
Acked-by: Markus Armbruster <armbru@redhat.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
Message-Id: <20210204124834.774401-13-berrange@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
dgilbert: removed tests for now, the output ordering isn't
deterministic
2021-02-04 15:48:34 +03:00
|
|
|
# @vmstate: block device node name to load vmstate from
|
2023-04-28 13:54:29 +03:00
|
|
|
#
|
migration: introduce snapshot-{save, load, delete} QMP commands
savevm, loadvm and delvm are some of the few HMP commands that have never
been converted to use QMP. The reasons for the lack of conversion are
that they blocked execution of the event thread, and the semantics
around choice of disks were ill-defined.
Despite this downside, however, libvirt and applications using libvirt
have used these commands for as long as QMP has existed, via the
"human-monitor-command" passthrough command. IOW, while it is clearly
desirable to be able to fix the problems, they are not a blocker to
all real world usage.
Meanwhile there is a need for other features which involve adding new
parameters to the commands. This is possible with HMP passthrough, but
it provides no reliable way for apps to introspect features, so using
QAPI modelling is highly desirable.
This patch thus introduces new snapshot-{load,save,delete} commands to
QMP that are intended to replace the old HMP counterparts. The new
commands are given different names, because they will be using the new
QEMU job framework and thus will have diverging behaviour from the HMP
originals. It would thus be misleading to keep the same name.
While this design uses the generic job framework, the current impl is
still blocking. The intention that the blocking problem is fixed later.
None the less applications using these new commands should assume that
they are asynchronous and thus wait for the job status change event to
indicate completion.
In addition to using the job framework, the new commands require the
caller to be explicit about all the block device nodes used in the
snapshot operations, with no built-in default heuristics in use.
Note that the existing "query-named-block-nodes" can be used to query
what snapshots currently exist for block nodes.
Acked-by: Markus Armbruster <armbru@redhat.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
Message-Id: <20210204124834.774401-13-berrange@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
dgilbert: removed tests for now, the output ordering isn't
deterministic
2021-02-04 15:48:34 +03:00
|
|
|
# @devices: list of block device node names to load a snapshot from
|
|
|
|
#
|
|
|
|
# Applications should not assume that the snapshot load is complete
|
2023-04-28 13:54:29 +03:00
|
|
|
# when this command returns. The job commands / events must be used
|
|
|
|
# to determine completion and to fetch details of any errors that
|
|
|
|
# arise.
|
migration: introduce snapshot-{save, load, delete} QMP commands
savevm, loadvm and delvm are some of the few HMP commands that have never
been converted to use QMP. The reasons for the lack of conversion are
that they blocked execution of the event thread, and the semantics
around choice of disks were ill-defined.
Despite this downside, however, libvirt and applications using libvirt
have used these commands for as long as QMP has existed, via the
"human-monitor-command" passthrough command. IOW, while it is clearly
desirable to be able to fix the problems, they are not a blocker to
all real world usage.
Meanwhile there is a need for other features which involve adding new
parameters to the commands. This is possible with HMP passthrough, but
it provides no reliable way for apps to introspect features, so using
QAPI modelling is highly desirable.
This patch thus introduces new snapshot-{load,save,delete} commands to
QMP that are intended to replace the old HMP counterparts. The new
commands are given different names, because they will be using the new
QEMU job framework and thus will have diverging behaviour from the HMP
originals. It would thus be misleading to keep the same name.
While this design uses the generic job framework, the current impl is
still blocking. The intention that the blocking problem is fixed later.
None the less applications using these new commands should assume that
they are asynchronous and thus wait for the job status change event to
indicate completion.
In addition to using the job framework, the new commands require the
caller to be explicit about all the block device nodes used in the
snapshot operations, with no built-in default heuristics in use.
Note that the existing "query-named-block-nodes" can be used to query
what snapshots currently exist for block nodes.
Acked-by: Markus Armbruster <armbru@redhat.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
Message-Id: <20210204124834.774401-13-berrange@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
dgilbert: removed tests for now, the output ordering isn't
deterministic
2021-02-04 15:48:34 +03:00
|
|
|
#
|
|
|
|
# Note that execution of the guest CPUs will be stopped during the
|
|
|
|
# time it takes to load the snapshot.
|
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# It is strongly recommended that @devices contain all writable block
|
|
|
|
# device nodes that can have changed since the original @snapshot-save
|
|
|
|
# command execution.
|
migration: introduce snapshot-{save, load, delete} QMP commands
savevm, loadvm and delvm are some of the few HMP commands that have never
been converted to use QMP. The reasons for the lack of conversion are
that they blocked execution of the event thread, and the semantics
around choice of disks were ill-defined.
Despite this downside, however, libvirt and applications using libvirt
have used these commands for as long as QMP has existed, via the
"human-monitor-command" passthrough command. IOW, while it is clearly
desirable to be able to fix the problems, they are not a blocker to
all real world usage.
Meanwhile there is a need for other features which involve adding new
parameters to the commands. This is possible with HMP passthrough, but
it provides no reliable way for apps to introspect features, so using
QAPI modelling is highly desirable.
This patch thus introduces new snapshot-{load,save,delete} commands to
QMP that are intended to replace the old HMP counterparts. The new
commands are given different names, because they will be using the new
QEMU job framework and thus will have diverging behaviour from the HMP
originals. It would thus be misleading to keep the same name.
While this design uses the generic job framework, the current impl is
still blocking. The intention that the blocking problem is fixed later.
None the less applications using these new commands should assume that
they are asynchronous and thus wait for the job status change event to
indicate completion.
In addition to using the job framework, the new commands require the
caller to be explicit about all the block device nodes used in the
snapshot operations, with no built-in default heuristics in use.
Note that the existing "query-named-block-nodes" can be used to query
what snapshots currently exist for block nodes.
Acked-by: Markus Armbruster <armbru@redhat.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
Message-Id: <20210204124834.774401-13-berrange@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
dgilbert: removed tests for now, the output ordering isn't
deterministic
2021-02-04 15:48:34 +03:00
|
|
|
#
|
2024-07-17 05:13:08 +03:00
|
|
|
# .. qmp-example::
|
migration: introduce snapshot-{save, load, delete} QMP commands
savevm, loadvm and delvm are some of the few HMP commands that have never
been converted to use QMP. The reasons for the lack of conversion are
that they blocked execution of the event thread, and the semantics
around choice of disks were ill-defined.
Despite this downside, however, libvirt and applications using libvirt
have used these commands for as long as QMP has existed, via the
"human-monitor-command" passthrough command. IOW, while it is clearly
desirable to be able to fix the problems, they are not a blocker to
all real world usage.
Meanwhile there is a need for other features which involve adding new
parameters to the commands. This is possible with HMP passthrough, but
it provides no reliable way for apps to introspect features, so using
QAPI modelling is highly desirable.
This patch thus introduces new snapshot-{load,save,delete} commands to
QMP that are intended to replace the old HMP counterparts. The new
commands are given different names, because they will be using the new
QEMU job framework and thus will have diverging behaviour from the HMP
originals. It would thus be misleading to keep the same name.
While this design uses the generic job framework, the current impl is
still blocking. The intention that the blocking problem is fixed later.
None the less applications using these new commands should assume that
they are asynchronous and thus wait for the job status change event to
indicate completion.
In addition to using the job framework, the new commands require the
caller to be explicit about all the block device nodes used in the
snapshot operations, with no built-in default heuristics in use.
Note that the existing "query-named-block-nodes" can be used to query
what snapshots currently exist for block nodes.
Acked-by: Markus Armbruster <armbru@redhat.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
Message-Id: <20210204124834.774401-13-berrange@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
dgilbert: removed tests for now, the output ordering isn't
deterministic
2021-02-04 15:48:34 +03:00
|
|
|
#
|
2024-02-16 17:58:34 +03:00
|
|
|
# -> { "execute": "snapshot-load",
|
|
|
|
# "arguments": {
|
|
|
|
# "job-id": "snapload0",
|
|
|
|
# "tag": "my-snap",
|
|
|
|
# "vmstate": "disk0",
|
|
|
|
# "devices": ["disk0", "disk1"]
|
|
|
|
# }
|
|
|
|
# }
|
|
|
|
# <- { "return": { } }
|
|
|
|
# <- {"event": "JOB_STATUS_CHANGE",
|
|
|
|
# "timestamp": {"seconds": 1472124172, "microseconds": 744001},
|
|
|
|
# "data": {"status": "created", "id": "snapload0"}}
|
|
|
|
# <- {"event": "JOB_STATUS_CHANGE",
|
|
|
|
# "timestamp": {"seconds": 1472125172, "microseconds": 744001},
|
|
|
|
# "data": {"status": "running", "id": "snapload0"}}
|
|
|
|
# <- {"event": "STOP",
|
|
|
|
# "timestamp": {"seconds": 1472125472, "microseconds": 744001} }
|
|
|
|
# <- {"event": "RESUME",
|
|
|
|
# "timestamp": {"seconds": 1472125872, "microseconds": 744001} }
|
|
|
|
# <- {"event": "JOB_STATUS_CHANGE",
|
|
|
|
# "timestamp": {"seconds": 1472126172, "microseconds": 744001},
|
|
|
|
# "data": {"status": "waiting", "id": "snapload0"}}
|
|
|
|
# <- {"event": "JOB_STATUS_CHANGE",
|
|
|
|
# "timestamp": {"seconds": 1472127172, "microseconds": 744001},
|
|
|
|
# "data": {"status": "pending", "id": "snapload0"}}
|
|
|
|
# <- {"event": "JOB_STATUS_CHANGE",
|
|
|
|
# "timestamp": {"seconds": 1472128172, "microseconds": 744001},
|
|
|
|
# "data": {"status": "concluded", "id": "snapload0"}}
|
|
|
|
# -> {"execute": "query-jobs"}
|
|
|
|
# <- {"return": [{"current-progress": 1,
|
|
|
|
# "status": "concluded",
|
|
|
|
# "total-progress": 1,
|
|
|
|
# "type": "snapshot-load",
|
|
|
|
# "id": "snapload0"}]}
|
migration: introduce snapshot-{save, load, delete} QMP commands
savevm, loadvm and delvm are some of the few HMP commands that have never
been converted to use QMP. The reasons for the lack of conversion are
that they blocked execution of the event thread, and the semantics
around choice of disks were ill-defined.
Despite this downside, however, libvirt and applications using libvirt
have used these commands for as long as QMP has existed, via the
"human-monitor-command" passthrough command. IOW, while it is clearly
desirable to be able to fix the problems, they are not a blocker to
all real world usage.
Meanwhile there is a need for other features which involve adding new
parameters to the commands. This is possible with HMP passthrough, but
it provides no reliable way for apps to introspect features, so using
QAPI modelling is highly desirable.
This patch thus introduces new snapshot-{load,save,delete} commands to
QMP that are intended to replace the old HMP counterparts. The new
commands are given different names, because they will be using the new
QEMU job framework and thus will have diverging behaviour from the HMP
originals. It would thus be misleading to keep the same name.
While this design uses the generic job framework, the current impl is
still blocking. The intention that the blocking problem is fixed later.
None the less applications using these new commands should assume that
they are asynchronous and thus wait for the job status change event to
indicate completion.
In addition to using the job framework, the new commands require the
caller to be explicit about all the block device nodes used in the
snapshot operations, with no built-in default heuristics in use.
Note that the existing "query-named-block-nodes" can be used to query
what snapshots currently exist for block nodes.
Acked-by: Markus Armbruster <armbru@redhat.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
Message-Id: <20210204124834.774401-13-berrange@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
dgilbert: removed tests for now, the output ordering isn't
deterministic
2021-02-04 15:48:34 +03:00
|
|
|
#
|
|
|
|
# Since: 6.0
|
|
|
|
##
|
|
|
|
{ 'command': 'snapshot-load',
|
|
|
|
'data': { 'job-id': 'str',
|
|
|
|
'tag': 'str',
|
|
|
|
'vmstate': 'str',
|
|
|
|
'devices': ['str'] } }
|
|
|
|
|
|
|
|
##
|
|
|
|
# @snapshot-delete:
|
|
|
|
#
|
|
|
|
# Delete a VM snapshot
|
|
|
|
#
|
|
|
|
# @job-id: identifier for the newly created job
|
2023-04-28 13:54:29 +03:00
|
|
|
#
|
migration: introduce snapshot-{save, load, delete} QMP commands
savevm, loadvm and delvm are some of the few HMP commands that have never
been converted to use QMP. The reasons for the lack of conversion are
that they blocked execution of the event thread, and the semantics
around choice of disks were ill-defined.
Despite this downside, however, libvirt and applications using libvirt
have used these commands for as long as QMP has existed, via the
"human-monitor-command" passthrough command. IOW, while it is clearly
desirable to be able to fix the problems, they are not a blocker to
all real world usage.
Meanwhile there is a need for other features which involve adding new
parameters to the commands. This is possible with HMP passthrough, but
it provides no reliable way for apps to introspect features, so using
QAPI modelling is highly desirable.
This patch thus introduces new snapshot-{load,save,delete} commands to
QMP that are intended to replace the old HMP counterparts. The new
commands are given different names, because they will be using the new
QEMU job framework and thus will have diverging behaviour from the HMP
originals. It would thus be misleading to keep the same name.
While this design uses the generic job framework, the current impl is
still blocking. The intention that the blocking problem is fixed later.
None the less applications using these new commands should assume that
they are asynchronous and thus wait for the job status change event to
indicate completion.
In addition to using the job framework, the new commands require the
caller to be explicit about all the block device nodes used in the
snapshot operations, with no built-in default heuristics in use.
Note that the existing "query-named-block-nodes" can be used to query
what snapshots currently exist for block nodes.
Acked-by: Markus Armbruster <armbru@redhat.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
Message-Id: <20210204124834.774401-13-berrange@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
dgilbert: removed tests for now, the output ordering isn't
deterministic
2021-02-04 15:48:34 +03:00
|
|
|
# @tag: name of the snapshot to delete.
|
2023-04-28 13:54:29 +03:00
|
|
|
#
|
migration: introduce snapshot-{save, load, delete} QMP commands
savevm, loadvm and delvm are some of the few HMP commands that have never
been converted to use QMP. The reasons for the lack of conversion are
that they blocked execution of the event thread, and the semantics
around choice of disks were ill-defined.
Despite this downside, however, libvirt and applications using libvirt
have used these commands for as long as QMP has existed, via the
"human-monitor-command" passthrough command. IOW, while it is clearly
desirable to be able to fix the problems, they are not a blocker to
all real world usage.
Meanwhile there is a need for other features which involve adding new
parameters to the commands. This is possible with HMP passthrough, but
it provides no reliable way for apps to introspect features, so using
QAPI modelling is highly desirable.
This patch thus introduces new snapshot-{load,save,delete} commands to
QMP that are intended to replace the old HMP counterparts. The new
commands are given different names, because they will be using the new
QEMU job framework and thus will have diverging behaviour from the HMP
originals. It would thus be misleading to keep the same name.
While this design uses the generic job framework, the current impl is
still blocking. The intention that the blocking problem is fixed later.
None the less applications using these new commands should assume that
they are asynchronous and thus wait for the job status change event to
indicate completion.
In addition to using the job framework, the new commands require the
caller to be explicit about all the block device nodes used in the
snapshot operations, with no built-in default heuristics in use.
Note that the existing "query-named-block-nodes" can be used to query
what snapshots currently exist for block nodes.
Acked-by: Markus Armbruster <armbru@redhat.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
Message-Id: <20210204124834.774401-13-berrange@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
dgilbert: removed tests for now, the output ordering isn't
deterministic
2021-02-04 15:48:34 +03:00
|
|
|
# @devices: list of block device node names to delete a snapshot from
|
|
|
|
#
|
|
|
|
# Applications should not assume that the snapshot delete is complete
|
2023-04-28 13:54:29 +03:00
|
|
|
# when this command returns. The job commands / events must be used
|
|
|
|
# to determine completion and to fetch details of any errors that
|
|
|
|
# arise.
|
migration: introduce snapshot-{save, load, delete} QMP commands
savevm, loadvm and delvm are some of the few HMP commands that have never
been converted to use QMP. The reasons for the lack of conversion are
that they blocked execution of the event thread, and the semantics
around choice of disks were ill-defined.
Despite this downside, however, libvirt and applications using libvirt
have used these commands for as long as QMP has existed, via the
"human-monitor-command" passthrough command. IOW, while it is clearly
desirable to be able to fix the problems, they are not a blocker to
all real world usage.
Meanwhile there is a need for other features which involve adding new
parameters to the commands. This is possible with HMP passthrough, but
it provides no reliable way for apps to introspect features, so using
QAPI modelling is highly desirable.
This patch thus introduces new snapshot-{load,save,delete} commands to
QMP that are intended to replace the old HMP counterparts. The new
commands are given different names, because they will be using the new
QEMU job framework and thus will have diverging behaviour from the HMP
originals. It would thus be misleading to keep the same name.
While this design uses the generic job framework, the current impl is
still blocking. The intention that the blocking problem is fixed later.
None the less applications using these new commands should assume that
they are asynchronous and thus wait for the job status change event to
indicate completion.
In addition to using the job framework, the new commands require the
caller to be explicit about all the block device nodes used in the
snapshot operations, with no built-in default heuristics in use.
Note that the existing "query-named-block-nodes" can be used to query
what snapshots currently exist for block nodes.
Acked-by: Markus Armbruster <armbru@redhat.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
Message-Id: <20210204124834.774401-13-berrange@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
dgilbert: removed tests for now, the output ordering isn't
deterministic
2021-02-04 15:48:34 +03:00
|
|
|
#
|
2024-07-17 05:13:08 +03:00
|
|
|
# .. qmp-example::
|
migration: introduce snapshot-{save, load, delete} QMP commands
savevm, loadvm and delvm are some of the few HMP commands that have never
been converted to use QMP. The reasons for the lack of conversion are
that they blocked execution of the event thread, and the semantics
around choice of disks were ill-defined.
Despite this downside, however, libvirt and applications using libvirt
have used these commands for as long as QMP has existed, via the
"human-monitor-command" passthrough command. IOW, while it is clearly
desirable to be able to fix the problems, they are not a blocker to
all real world usage.
Meanwhile there is a need for other features which involve adding new
parameters to the commands. This is possible with HMP passthrough, but
it provides no reliable way for apps to introspect features, so using
QAPI modelling is highly desirable.
This patch thus introduces new snapshot-{load,save,delete} commands to
QMP that are intended to replace the old HMP counterparts. The new
commands are given different names, because they will be using the new
QEMU job framework and thus will have diverging behaviour from the HMP
originals. It would thus be misleading to keep the same name.
While this design uses the generic job framework, the current impl is
still blocking. The intention that the blocking problem is fixed later.
None the less applications using these new commands should assume that
they are asynchronous and thus wait for the job status change event to
indicate completion.
In addition to using the job framework, the new commands require the
caller to be explicit about all the block device nodes used in the
snapshot operations, with no built-in default heuristics in use.
Note that the existing "query-named-block-nodes" can be used to query
what snapshots currently exist for block nodes.
Acked-by: Markus Armbruster <armbru@redhat.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
Message-Id: <20210204124834.774401-13-berrange@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
dgilbert: removed tests for now, the output ordering isn't
deterministic
2021-02-04 15:48:34 +03:00
|
|
|
#
|
2024-02-16 17:58:34 +03:00
|
|
|
# -> { "execute": "snapshot-delete",
|
|
|
|
# "arguments": {
|
|
|
|
# "job-id": "snapdelete0",
|
|
|
|
# "tag": "my-snap",
|
|
|
|
# "devices": ["disk0", "disk1"]
|
|
|
|
# }
|
|
|
|
# }
|
|
|
|
# <- { "return": { } }
|
|
|
|
# <- {"event": "JOB_STATUS_CHANGE",
|
|
|
|
# "timestamp": {"seconds": 1442124172, "microseconds": 744001},
|
|
|
|
# "data": {"status": "created", "id": "snapdelete0"}}
|
|
|
|
# <- {"event": "JOB_STATUS_CHANGE",
|
|
|
|
# "timestamp": {"seconds": 1442125172, "microseconds": 744001},
|
|
|
|
# "data": {"status": "running", "id": "snapdelete0"}}
|
|
|
|
# <- {"event": "JOB_STATUS_CHANGE",
|
|
|
|
# "timestamp": {"seconds": 1442126172, "microseconds": 744001},
|
|
|
|
# "data": {"status": "waiting", "id": "snapdelete0"}}
|
|
|
|
# <- {"event": "JOB_STATUS_CHANGE",
|
|
|
|
# "timestamp": {"seconds": 1442127172, "microseconds": 744001},
|
|
|
|
# "data": {"status": "pending", "id": "snapdelete0"}}
|
|
|
|
# <- {"event": "JOB_STATUS_CHANGE",
|
|
|
|
# "timestamp": {"seconds": 1442128172, "microseconds": 744001},
|
|
|
|
# "data": {"status": "concluded", "id": "snapdelete0"}}
|
|
|
|
# -> {"execute": "query-jobs"}
|
|
|
|
# <- {"return": [{"current-progress": 1,
|
|
|
|
# "status": "concluded",
|
|
|
|
# "total-progress": 1,
|
|
|
|
# "type": "snapshot-delete",
|
|
|
|
# "id": "snapdelete0"}]}
|
migration: introduce snapshot-{save, load, delete} QMP commands
savevm, loadvm and delvm are some of the few HMP commands that have never
been converted to use QMP. The reasons for the lack of conversion are
that they blocked execution of the event thread, and the semantics
around choice of disks were ill-defined.
Despite this downside, however, libvirt and applications using libvirt
have used these commands for as long as QMP has existed, via the
"human-monitor-command" passthrough command. IOW, while it is clearly
desirable to be able to fix the problems, they are not a blocker to
all real world usage.
Meanwhile there is a need for other features which involve adding new
parameters to the commands. This is possible with HMP passthrough, but
it provides no reliable way for apps to introspect features, so using
QAPI modelling is highly desirable.
This patch thus introduces new snapshot-{load,save,delete} commands to
QMP that are intended to replace the old HMP counterparts. The new
commands are given different names, because they will be using the new
QEMU job framework and thus will have diverging behaviour from the HMP
originals. It would thus be misleading to keep the same name.
While this design uses the generic job framework, the current impl is
still blocking. The intention that the blocking problem is fixed later.
None the less applications using these new commands should assume that
they are asynchronous and thus wait for the job status change event to
indicate completion.
In addition to using the job framework, the new commands require the
caller to be explicit about all the block device nodes used in the
snapshot operations, with no built-in default heuristics in use.
Note that the existing "query-named-block-nodes" can be used to query
what snapshots currently exist for block nodes.
Acked-by: Markus Armbruster <armbru@redhat.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
Message-Id: <20210204124834.774401-13-berrange@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
dgilbert: removed tests for now, the output ordering isn't
deterministic
2021-02-04 15:48:34 +03:00
|
|
|
#
|
|
|
|
# Since: 6.0
|
|
|
|
##
|
|
|
|
{ 'command': 'snapshot-delete',
|
|
|
|
'data': { 'job-id': 'str',
|
|
|
|
'tag': 'str',
|
|
|
|
'devices': ['str'] } }
|