2017-08-24 22:14:01 +03:00
|
|
|
# -*- Mode: Python -*-
|
2020-07-29 21:50:24 +03:00
|
|
|
# vim: filetype=python
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
|
|
|
|
|
|
|
##
|
|
|
|
# = Migration
|
|
|
|
##
|
|
|
|
|
|
|
|
{ 'include': 'common.json' }
|
2019-02-27 13:51:27 +03:00
|
|
|
{ 'include': 'sockets.json' }
|
2017-08-24 22:14:01 +03:00
|
|
|
|
|
|
|
##
|
|
|
|
# @MigrationStats:
|
|
|
|
#
|
|
|
|
# Detailed migration status.
|
|
|
|
#
|
|
|
|
# @transferred: amount of bytes already transferred to the target VM
|
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @remaining: amount of bytes remaining to be transferred to the
|
|
|
|
# target VM
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
|
|
|
# @total: total amount of bytes involved in the migration process
|
|
|
|
#
|
|
|
|
# @duplicate: number of duplicate (zero) pages (since 1.2)
|
|
|
|
#
|
2023-06-12 22:33:39 +03:00
|
|
|
# @skipped: number of skipped zero pages. Always zero, only provided for
|
|
|
|
# compatibility (since 1.5)
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
|
|
|
# @normal: number of normal pages (since 1.2)
|
|
|
|
#
|
|
|
|
# @normal-bytes: number of normal bytes sent (since 1.2)
|
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @dirty-pages-rate: number of pages dirtied by second by the guest
|
|
|
|
# (since 1.3)
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @mbps: throughput in megabits/sec. (since 1.6)
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @dirty-sync-count: number of times that dirty ram was synchronized
|
|
|
|
# (since 2.1)
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @postcopy-requests: The number of page requests received from the
|
|
|
|
# destination (since 2.7)
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
|
|
|
# @page-size: The number of bytes per page for the various page-based
|
2023-04-28 13:54:29 +03:00
|
|
|
# statistics (since 2.10)
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
2018-06-26 16:20:11 +03:00
|
|
|
# @multifd-bytes: The number of bytes sent through multifd (since 3.0)
|
|
|
|
#
|
2019-01-11 09:37:30 +03:00
|
|
|
# @pages-per-second: the number of memory pages transferred per second
|
2023-04-28 13:54:29 +03:00
|
|
|
# (Since 4.0)
|
2019-01-11 09:37:30 +03:00
|
|
|
#
|
2021-12-21 12:34:41 +03:00
|
|
|
# @precopy-bytes: The number of bytes sent in the pre-copy phase
|
2023-04-28 13:54:29 +03:00
|
|
|
# (since 7.0).
|
2021-12-21 12:34:41 +03:00
|
|
|
#
|
|
|
|
# @downtime-bytes: The number of bytes sent while the guest is paused
|
2023-04-28 13:54:29 +03:00
|
|
|
# (since 7.0).
|
2021-12-21 12:34:41 +03:00
|
|
|
#
|
|
|
|
# @postcopy-bytes: The number of bytes sent during the post-copy phase
|
2023-04-28 13:54:29 +03:00
|
|
|
# (since 7.0).
|
|
|
|
#
|
|
|
|
# @dirty-sync-missed-zero-copy: Number of times dirty RAM
|
|
|
|
# synchronization could not avoid copying dirty pages. This is
|
|
|
|
# between 0 and @dirty-sync-count * @multifd-channels. (since
|
|
|
|
# 7.1)
|
2021-12-21 12:34:41 +03:00
|
|
|
#
|
2023-06-12 22:33:39 +03:00
|
|
|
# Features:
|
|
|
|
#
|
|
|
|
# @deprecated: Member @skipped is always zero since 1.5.3
|
|
|
|
#
|
2020-11-18 09:41:58 +03:00
|
|
|
# Since: 0.14
|
2023-06-12 22:33:39 +03:00
|
|
|
#
|
2017-08-24 22:14:01 +03:00
|
|
|
##
|
|
|
|
{ 'struct': 'MigrationStats',
|
|
|
|
'data': {'transferred': 'int', 'remaining': 'int', 'total': 'int' ,
|
2023-06-12 22:33:39 +03:00
|
|
|
'duplicate': 'int',
|
2023-10-13 13:47:27 +03:00
|
|
|
'skipped': { 'type': 'int', 'features': [ 'deprecated' ] },
|
2023-06-12 22:33:39 +03:00
|
|
|
'normal': 'int',
|
2023-06-12 22:16:04 +03:00
|
|
|
'normal-bytes': 'int', 'dirty-pages-rate': 'int',
|
|
|
|
'mbps': 'number', 'dirty-sync-count': 'int',
|
|
|
|
'postcopy-requests': 'int', 'page-size': 'int',
|
|
|
|
'multifd-bytes': 'uint64', 'pages-per-second': 'uint64',
|
|
|
|
'precopy-bytes': 'uint64', 'downtime-bytes': 'uint64',
|
|
|
|
'postcopy-bytes': 'uint64',
|
|
|
|
'dirty-sync-missed-zero-copy': 'uint64' } }
|
2017-08-24 22:14:01 +03:00
|
|
|
|
|
|
|
##
|
|
|
|
# @XBZRLECacheStats:
|
|
|
|
#
|
|
|
|
# Detailed XBZRLE migration cache statistics
|
|
|
|
#
|
|
|
|
# @cache-size: XBZRLE cache size
|
|
|
|
#
|
|
|
|
# @bytes: amount of bytes already transferred to the target VM
|
|
|
|
#
|
|
|
|
# @pages: amount of pages transferred to the target VM
|
|
|
|
#
|
|
|
|
# @cache-miss: number of cache miss
|
|
|
|
#
|
|
|
|
# @cache-miss-rate: rate of cache miss (since 2.1)
|
|
|
|
#
|
2020-04-30 03:59:35 +03:00
|
|
|
# @encoding-rate: rate of encoded bytes (since 5.1)
|
|
|
|
#
|
2017-08-24 22:14:01 +03:00
|
|
|
# @overflow: number of overflows
|
|
|
|
#
|
|
|
|
# Since: 1.2
|
|
|
|
##
|
|
|
|
{ 'struct': 'XBZRLECacheStats',
|
2021-02-02 17:17:32 +03:00
|
|
|
'data': {'cache-size': 'size', 'bytes': 'int', 'pages': 'int',
|
2017-08-24 22:14:01 +03:00
|
|
|
'cache-miss': 'int', 'cache-miss-rate': 'number',
|
2020-04-30 03:59:35 +03:00
|
|
|
'encoding-rate': 'number', 'overflow': 'int' } }
|
2017-08-24 22:14:01 +03:00
|
|
|
|
2018-09-06 10:01:00 +03:00
|
|
|
##
|
|
|
|
# @CompressionStats:
|
|
|
|
#
|
|
|
|
# Detailed migration compression statistics
|
|
|
|
#
|
|
|
|
# @pages: amount of pages compressed and transferred to the target VM
|
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @busy: count of times that no free thread was available to compress
|
|
|
|
# data
|
2018-09-06 10:01:00 +03:00
|
|
|
#
|
|
|
|
# @busy-rate: rate of thread busy
|
|
|
|
#
|
|
|
|
# @compressed-size: amount of bytes after compression
|
|
|
|
#
|
|
|
|
# @compression-rate: rate of compressed size
|
|
|
|
#
|
|
|
|
# Since: 3.1
|
|
|
|
##
|
|
|
|
{ 'struct': 'CompressionStats',
|
|
|
|
'data': {'pages': 'int', 'busy': 'int', 'busy-rate': 'number',
|
2020-02-13 20:56:27 +03:00
|
|
|
'compressed-size': 'int', 'compression-rate': 'number' } }
|
2018-09-06 10:01:00 +03:00
|
|
|
|
2017-08-24 22:14:01 +03:00
|
|
|
##
|
|
|
|
# @MigrationStatus:
|
|
|
|
#
|
|
|
|
# An enumeration of migration status.
|
|
|
|
#
|
|
|
|
# @none: no migration has ever happened.
|
|
|
|
#
|
|
|
|
# @setup: migration process has been initiated.
|
|
|
|
#
|
|
|
|
# @cancelling: in the process of cancelling migration.
|
|
|
|
#
|
|
|
|
# @cancelled: cancelling migration is finished.
|
|
|
|
#
|
|
|
|
# @active: in the process of doing migration.
|
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @postcopy-active: like active, but now in postcopy mode. (since
|
|
|
|
# 2.5)
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @postcopy-paused: during postcopy but paused. (since 3.0)
|
2018-05-02 13:47:18 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @postcopy-recover: trying to recover from a paused postcopy. (since
|
|
|
|
# 3.0)
|
2018-05-02 13:47:25 +03:00
|
|
|
#
|
2017-08-24 22:14:01 +03:00
|
|
|
# @completed: migration is finished.
|
|
|
|
#
|
|
|
|
# @failed: some error occurred during migration process.
|
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @colo: VM is in the process of fault tolerance, VM can not get into
|
|
|
|
# this state unless colo capability is enabled for migration.
|
|
|
|
# (since 2.8)
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @pre-switchover: Paused before device serialisation. (since 2.11)
|
2017-10-20 12:05:51 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @device: During device serialisation when pause-before-switchover is
|
|
|
|
# enabled (since 2.11)
|
2017-10-20 12:05:51 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @wait-unplug: wait for device unplug request by guest OS to be
|
|
|
|
# completed. (since 4.2)
|
2019-10-29 14:49:02 +03:00
|
|
|
#
|
2017-08-24 22:14:01 +03:00
|
|
|
# Since: 2.3
|
|
|
|
##
|
|
|
|
{ 'enum': 'MigrationStatus',
|
|
|
|
'data': [ 'none', 'setup', 'cancelling', 'cancelled',
|
2018-05-02 13:47:18 +03:00
|
|
|
'active', 'postcopy-active', 'postcopy-paused',
|
2018-05-02 13:47:25 +03:00
|
|
|
'postcopy-recover', 'completed', 'failed', 'colo',
|
2019-10-29 14:49:02 +03:00
|
|
|
'pre-switchover', 'device', 'wait-unplug' ] }
|
2020-10-26 12:36:27 +03:00
|
|
|
##
|
|
|
|
# @VfioStats:
|
|
|
|
#
|
|
|
|
# Detailed VFIO devices migration statistics
|
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @transferred: amount of bytes transferred to the target VM by VFIO
|
|
|
|
# devices
|
2020-10-26 12:36:27 +03:00
|
|
|
#
|
|
|
|
# Since: 5.2
|
|
|
|
##
|
|
|
|
{ 'struct': 'VfioStats',
|
|
|
|
'data': {'transferred': 'int' } }
|
2017-08-24 22:14:01 +03:00
|
|
|
|
|
|
|
##
|
|
|
|
# @MigrationInfo:
|
|
|
|
#
|
|
|
|
# Information about current migration process.
|
|
|
|
#
|
|
|
|
# @status: @MigrationStatus describing the current migration status.
|
2023-04-28 13:54:29 +03:00
|
|
|
# If this field is not returned, no migration process has been
|
|
|
|
# initiated
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @ram: @MigrationStats containing detailed migration status, only
|
|
|
|
# returned if status is 'active' or 'completed'(since 1.2)
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @disk: @MigrationStats containing detailed disk migration status,
|
|
|
|
# only returned if status is 'active' and it is a block migration
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
|
|
|
# @xbzrle-cache: @XBZRLECacheStats containing detailed XBZRLE
|
2023-04-28 13:54:29 +03:00
|
|
|
# migration statistics, only returned if XBZRLE feature is on and
|
|
|
|
# status is 'active' or 'completed' (since 1.2)
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
|
|
|
# @total-time: total amount of milliseconds since migration started.
|
2023-04-28 13:54:29 +03:00
|
|
|
# If migration has ended, it returns the total migration time.
|
|
|
|
# (since 1.2)
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @downtime: only present when migration finishes correctly total
|
|
|
|
# downtime in milliseconds for the guest. (since 1.3)
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @expected-downtime: only present while migration is active expected
|
|
|
|
# downtime in milliseconds for the guest in last walk of the dirty
|
|
|
|
# bitmap. (since 1.3)
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
2020-02-13 20:56:34 +03:00
|
|
|
# @setup-time: amount of setup time in milliseconds *before* the
|
2023-04-28 13:54:29 +03:00
|
|
|
# iterations begin but *after* the QMP command is issued. This is
|
|
|
|
# designed to provide an accounting of any activities (such as
|
|
|
|
# RDMA pinning) which may be expensive, but do not actually occur
|
|
|
|
# during the iterative migration rounds themselves. (since 1.6)
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
|
|
|
# @cpu-throttle-percentage: percentage of time guest cpus are being
|
2023-04-28 13:54:29 +03:00
|
|
|
# throttled during auto-converge. This is only present when
|
|
|
|
# auto-converge has started throttling guest cpus. (Since 2.7)
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
2023-10-05 01:02:31 +03:00
|
|
|
# @error-desc: the human readable error description string. Clients
|
|
|
|
# should not attempt to parse the error strings. (Since 2.7)
|
2018-03-22 21:17:27 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @postcopy-blocktime: total time when all vCPU were blocked during
|
|
|
|
# postcopy live migration. This is only present when the
|
|
|
|
# postcopy-blocktime migration capability is enabled. (Since 3.0)
|
2018-03-22 21:17:27 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @postcopy-vcpu-blocktime: list of the postcopy blocktime per vCPU.
|
|
|
|
# This is only present when the postcopy-blocktime migration
|
|
|
|
# capability is enabled. (Since 3.0)
|
2018-03-22 21:17:27 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @compression: migration compression statistics, only returned if
|
|
|
|
# compression feature is on and status is 'active' or 'completed'
|
|
|
|
# (Since 3.1)
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @socket-address: Only used for tcp, to know what the real port is
|
|
|
|
# (Since 4.0)
|
2019-02-27 13:51:27 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @vfio: @VfioStats containing detailed VFIO devices migration
|
|
|
|
# statistics, only returned if VFIO device is present, migration
|
|
|
|
# is supported by all VFIO devices and status is 'active' or
|
|
|
|
# 'completed' (since 5.2)
|
2020-10-26 12:36:27 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @blocked-reasons: A list of reasons an outgoing migration is
|
|
|
|
# blocked. Present and non-empty when migration is blocked.
|
|
|
|
# (since 6.0)
|
2021-04-20 08:19:06 +03:00
|
|
|
#
|
2023-07-28 12:38:07 +03:00
|
|
|
# @dirty-limit-throttle-time-per-round: Maximum throttle time
|
|
|
|
# (in microseconds) of virtual CPUs each dirty ring full round,
|
|
|
|
# which shows how MigrationCapability dirty-limit affects the
|
|
|
|
# guest during live migration. (Since 8.1)
|
|
|
|
#
|
|
|
|
# @dirty-limit-ring-full-time: Estimated average dirty ring full time
|
|
|
|
# (in microseconds) for each dirty ring full round. The value
|
|
|
|
# equals the dirty ring memory size divided by the average dirty
|
|
|
|
# page rate of the virtual CPU, which can be used to observe the
|
|
|
|
# average memory load of the virtual CPU indirectly. Note that
|
|
|
|
# zero means guest doesn't dirty memory. (Since 8.1)
|
2023-06-07 19:21:58 +03:00
|
|
|
#
|
2020-11-18 09:41:58 +03:00
|
|
|
# Since: 0.14
|
2017-08-24 22:14:01 +03:00
|
|
|
##
|
|
|
|
{ 'struct': 'MigrationInfo',
|
|
|
|
'data': {'*status': 'MigrationStatus', '*ram': 'MigrationStats',
|
|
|
|
'*disk': 'MigrationStats',
|
2020-10-26 12:36:27 +03:00
|
|
|
'*vfio': 'VfioStats',
|
2017-08-24 22:14:01 +03:00
|
|
|
'*xbzrle-cache': 'XBZRLECacheStats',
|
|
|
|
'*total-time': 'int',
|
|
|
|
'*expected-downtime': 'int',
|
|
|
|
'*downtime': 'int',
|
|
|
|
'*setup-time': 'int',
|
|
|
|
'*cpu-throttle-percentage': 'int',
|
2018-03-22 21:17:27 +03:00
|
|
|
'*error-desc': 'str',
|
2021-02-02 16:55:21 +03:00
|
|
|
'*blocked-reasons': ['str'],
|
2023-06-12 22:16:04 +03:00
|
|
|
'*postcopy-blocktime': 'uint32',
|
2018-09-06 10:01:00 +03:00
|
|
|
'*postcopy-vcpu-blocktime': ['uint32'],
|
2019-02-27 13:51:27 +03:00
|
|
|
'*compression': 'CompressionStats',
|
2023-06-07 19:21:58 +03:00
|
|
|
'*socket-address': ['SocketAddress'],
|
|
|
|
'*dirty-limit-throttle-time-per-round': 'uint64',
|
|
|
|
'*dirty-limit-ring-full-time': 'uint64'} }
|
2017-08-24 22:14:01 +03:00
|
|
|
|
|
|
|
##
|
|
|
|
# @query-migrate:
|
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# Returns information about current migration process. If migration
|
2017-08-24 22:14:01 +03:00
|
|
|
# is active there will be another json-object with RAM migration
|
|
|
|
# status and if block migration is active another one with block
|
|
|
|
# migration status.
|
|
|
|
#
|
|
|
|
# Returns: @MigrationInfo
|
|
|
|
#
|
2020-11-18 09:41:58 +03:00
|
|
|
# Since: 0.14
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
2023-04-25 09:42:14 +03:00
|
|
|
# Examples:
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
|
|
|
# 1. Before the first migration
|
|
|
|
#
|
|
|
|
# -> { "execute": "query-migrate" }
|
|
|
|
# <- { "return": {} }
|
|
|
|
#
|
|
|
|
# 2. Migration is done and has succeeded
|
|
|
|
#
|
|
|
|
# -> { "execute": "query-migrate" }
|
|
|
|
# <- { "return": {
|
|
|
|
# "status": "completed",
|
2018-08-21 15:39:26 +03:00
|
|
|
# "total-time":12345,
|
|
|
|
# "setup-time":12345,
|
|
|
|
# "downtime":12345,
|
2017-08-24 22:14:01 +03:00
|
|
|
# "ram":{
|
|
|
|
# "transferred":123,
|
|
|
|
# "remaining":123,
|
|
|
|
# "total":246,
|
|
|
|
# "duplicate":123,
|
|
|
|
# "normal":123,
|
|
|
|
# "normal-bytes":123456,
|
|
|
|
# "dirty-sync-count":15
|
|
|
|
# }
|
|
|
|
# }
|
|
|
|
# }
|
|
|
|
#
|
|
|
|
# 3. Migration is done and has failed
|
|
|
|
#
|
|
|
|
# -> { "execute": "query-migrate" }
|
|
|
|
# <- { "return": { "status": "failed" } }
|
|
|
|
#
|
|
|
|
# 4. Migration is being performed and is not a block migration:
|
|
|
|
#
|
|
|
|
# -> { "execute": "query-migrate" }
|
|
|
|
# <- {
|
|
|
|
# "return":{
|
|
|
|
# "status":"active",
|
2018-08-21 15:39:26 +03:00
|
|
|
# "total-time":12345,
|
|
|
|
# "setup-time":12345,
|
|
|
|
# "expected-downtime":12345,
|
2017-08-24 22:14:01 +03:00
|
|
|
# "ram":{
|
|
|
|
# "transferred":123,
|
|
|
|
# "remaining":123,
|
|
|
|
# "total":246,
|
|
|
|
# "duplicate":123,
|
|
|
|
# "normal":123,
|
|
|
|
# "normal-bytes":123456,
|
|
|
|
# "dirty-sync-count":15
|
|
|
|
# }
|
|
|
|
# }
|
|
|
|
# }
|
|
|
|
#
|
|
|
|
# 5. Migration is being performed and is a block migration:
|
|
|
|
#
|
|
|
|
# -> { "execute": "query-migrate" }
|
|
|
|
# <- {
|
|
|
|
# "return":{
|
|
|
|
# "status":"active",
|
2018-08-21 15:39:26 +03:00
|
|
|
# "total-time":12345,
|
|
|
|
# "setup-time":12345,
|
|
|
|
# "expected-downtime":12345,
|
2017-08-24 22:14:01 +03:00
|
|
|
# "ram":{
|
|
|
|
# "total":1057024,
|
|
|
|
# "remaining":1053304,
|
|
|
|
# "transferred":3720,
|
|
|
|
# "duplicate":123,
|
|
|
|
# "normal":123,
|
|
|
|
# "normal-bytes":123456,
|
|
|
|
# "dirty-sync-count":15
|
|
|
|
# },
|
|
|
|
# "disk":{
|
|
|
|
# "total":20971520,
|
|
|
|
# "remaining":20880384,
|
|
|
|
# "transferred":91136
|
|
|
|
# }
|
|
|
|
# }
|
|
|
|
# }
|
|
|
|
#
|
|
|
|
# 6. Migration is being performed and XBZRLE is active:
|
|
|
|
#
|
|
|
|
# -> { "execute": "query-migrate" }
|
|
|
|
# <- {
|
|
|
|
# "return":{
|
|
|
|
# "status":"active",
|
2018-08-21 15:39:26 +03:00
|
|
|
# "total-time":12345,
|
|
|
|
# "setup-time":12345,
|
|
|
|
# "expected-downtime":12345,
|
2017-08-24 22:14:01 +03:00
|
|
|
# "ram":{
|
|
|
|
# "total":1057024,
|
|
|
|
# "remaining":1053304,
|
|
|
|
# "transferred":3720,
|
|
|
|
# "duplicate":10,
|
|
|
|
# "normal":3333,
|
|
|
|
# "normal-bytes":3412992,
|
|
|
|
# "dirty-sync-count":15
|
|
|
|
# },
|
|
|
|
# "xbzrle-cache":{
|
|
|
|
# "cache-size":67108864,
|
|
|
|
# "bytes":20971520,
|
|
|
|
# "pages":2444343,
|
|
|
|
# "cache-miss":2244,
|
|
|
|
# "cache-miss-rate":0.123,
|
2020-04-30 03:59:35 +03:00
|
|
|
# "encoding-rate":80.1,
|
2017-08-24 22:14:01 +03:00
|
|
|
# "overflow":34434
|
|
|
|
# }
|
|
|
|
# }
|
|
|
|
# }
|
|
|
|
##
|
|
|
|
{ 'command': 'query-migrate', 'returns': 'MigrationInfo' }
|
|
|
|
|
|
|
|
##
|
|
|
|
# @MigrationCapability:
|
|
|
|
#
|
|
|
|
# Migration capabilities enumeration
|
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @xbzrle: Migration supports xbzrle (Xor Based Zero Run Length
|
|
|
|
# Encoding). This feature allows us to minimize migration traffic
|
|
|
|
# for certain work loads, by sending compressed difference of the
|
|
|
|
# pages
|
|
|
|
#
|
|
|
|
# @rdma-pin-all: Controls whether or not the entire VM memory
|
|
|
|
# footprint is mlock()'d on demand or all at once. Refer to
|
|
|
|
# docs/rdma.txt for usage. Disabled by default. (since 2.0)
|
|
|
|
#
|
|
|
|
# @zero-blocks: During storage migration encode blocks of zeroes
|
|
|
|
# efficiently. This essentially saves 1MB of zeroes per block on
|
|
|
|
# the wire. Enabling requires source and target VM to support
|
|
|
|
# this feature. To enable it is sufficient to enable the
|
|
|
|
# capability on the source VM. The feature is disabled by default.
|
|
|
|
# (since 1.6)
|
|
|
|
#
|
|
|
|
# @compress: Use multiple compression threads to accelerate live
|
|
|
|
# migration. This feature can help to reduce the migration
|
|
|
|
# traffic, by sending compressed pages. Please note that if
|
|
|
|
# compress and xbzrle are both on, compress only takes effect in
|
|
|
|
# the ram bulk stage, after that, it will be disabled and only
|
|
|
|
# xbzrle takes effect, this can help to minimize migration
|
2023-10-13 13:47:27 +03:00
|
|
|
# traffic. The feature is disabled by default. (since 2.4)
|
2023-04-28 13:54:29 +03:00
|
|
|
#
|
2023-10-13 13:47:27 +03:00
|
|
|
# @events: generate events for each migration state change (since 2.4)
|
2023-04-28 13:54:29 +03:00
|
|
|
#
|
|
|
|
# @auto-converge: If enabled, QEMU will automatically throttle down
|
|
|
|
# the guest to speed up convergence of RAM migration. (since 1.6)
|
|
|
|
#
|
|
|
|
# @postcopy-ram: Start executing on the migration target before all of
|
|
|
|
# RAM has been migrated, pulling the remaining pages along as
|
|
|
|
# needed. The capacity must have the same setting on both source
|
|
|
|
# and target or migration will not even start. NOTE: If the
|
|
|
|
# migration fails during postcopy the VM will fail. (since 2.6)
|
|
|
|
#
|
|
|
|
# @x-colo: If enabled, migration will never end, and the state of the
|
|
|
|
# VM on the primary side will be migrated continuously to the VM
|
|
|
|
# on secondary side, this process is called COarse-Grain LOck
|
|
|
|
# Stepping (COLO) for Non-stop Service. (since 2.8)
|
|
|
|
#
|
|
|
|
# @release-ram: if enabled, qemu will free the migrated ram pages on
|
|
|
|
# the source during postcopy-ram migration. (since 2.9)
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
|
|
|
# @block: If enabled, QEMU will also migrate the contents of all block
|
2023-04-28 13:54:29 +03:00
|
|
|
# devices. Default is disabled. A possible alternative uses
|
|
|
|
# mirror jobs to a builtin NBD server on the destination, which
|
|
|
|
# offers more flexibility. (Since 2.10)
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
|
|
|
# @return-path: If enabled, migration will use the return path even
|
2023-04-28 13:54:29 +03:00
|
|
|
# for precopy. (since 2.10)
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @pause-before-switchover: Pause outgoing migration before
|
|
|
|
# serialising device state and before disabling block IO (since
|
|
|
|
# 2.11)
|
2017-10-20 12:05:50 +03:00
|
|
|
#
|
2019-02-06 15:54:06 +03:00
|
|
|
# @multifd: Use more than one fd for migration (since 4.0)
|
2016-01-14 14:23:00 +03:00
|
|
|
#
|
2018-03-13 22:34:00 +03:00
|
|
|
# @dirty-bitmaps: If enabled, QEMU will migrate named dirty bitmaps.
|
2023-04-28 13:54:29 +03:00
|
|
|
# (since 2.12)
|
2018-03-13 22:34:00 +03:00
|
|
|
#
|
2018-03-22 21:17:22 +03:00
|
|
|
# @postcopy-blocktime: Calculate downtime for postcopy live migration
|
2023-04-28 13:54:29 +03:00
|
|
|
# (since 3.0)
|
2018-03-22 21:17:22 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @late-block-activate: If enabled, the destination will not activate
|
|
|
|
# block devices (and thus take locks) immediately at the end of
|
|
|
|
# migration. (since 3.0)
|
2018-04-16 20:09:30 +03:00
|
|
|
#
|
2023-07-20 10:16:09 +03:00
|
|
|
# @x-ignore-shared: If enabled, QEMU will not migrate shared memory
|
|
|
|
# that is accessible on the destination machine. (since 4.0)
|
2019-02-15 20:45:45 +03:00
|
|
|
#
|
2019-09-03 19:22:44 +03:00
|
|
|
# @validate-uuid: Send the UUID of the source to allow the destination
|
2023-04-28 13:54:29 +03:00
|
|
|
# to ensure it is the same. (since 4.2)
|
|
|
|
#
|
|
|
|
# @background-snapshot: If enabled, the migration stream will be a
|
|
|
|
# snapshot of the VM exactly at the point when the migration
|
|
|
|
# procedure starts. The VM RAM is saved with running VM. (since
|
|
|
|
# 6.0)
|
|
|
|
#
|
|
|
|
# @zero-copy-send: Controls behavior on sending memory pages on
|
|
|
|
# migration. When true, enables a zero-copy mechanism for sending
|
|
|
|
# memory pages, if host supports it. Requires that QEMU be
|
|
|
|
# permitted to use locked memory for guest RAM pages. (since 7.1)
|
|
|
|
#
|
|
|
|
# @postcopy-preempt: If enabled, the migration process will allow
|
|
|
|
# postcopy requests to preempt precopy stream, so postcopy
|
|
|
|
# requests will be handled faster. This is a performance feature
|
|
|
|
# and should not affect the correctness of postcopy migration.
|
|
|
|
# (since 7.1)
|
2022-06-20 08:39:45 +03:00
|
|
|
#
|
2023-06-21 14:11:54 +03:00
|
|
|
# @switchover-ack: If enabled, migration will not stop the source VM
|
|
|
|
# and complete the migration until an ACK is received from the
|
|
|
|
# destination that it's OK to do so. Exactly when this ACK is
|
2023-07-20 10:16:09 +03:00
|
|
|
# sent depends on the migrated devices that use this feature. For
|
|
|
|
# example, a device can use it to make sure some of its data is
|
|
|
|
# sent and loaded in the destination before doing switchover.
|
2023-06-21 14:11:54 +03:00
|
|
|
# This can reduce downtime if devices that support this capability
|
|
|
|
# are present. 'return-path' capability must be enabled to use
|
|
|
|
# it. (since 8.1)
|
|
|
|
#
|
2023-07-28 18:10:40 +03:00
|
|
|
# @dirty-limit: If enabled, migration will throttle vCPUs as needed to
|
|
|
|
# keep their dirty page rate within @vcpu-dirty-limit. This can
|
|
|
|
# improve responsiveness of large guests during live migration,
|
|
|
|
# and can result in more stable read performance. Requires KVM
|
|
|
|
# with accelerator property "dirty-ring-size" set. (Since 8.1)
|
2023-06-07 18:30:50 +03:00
|
|
|
#
|
2021-10-28 13:25:13 +03:00
|
|
|
# Features:
|
2023-04-28 13:54:29 +03:00
|
|
|
#
|
2021-10-28 13:25:13 +03:00
|
|
|
# @unstable: Members @x-colo and @x-ignore-shared are experimental.
|
|
|
|
#
|
2017-08-24 22:14:01 +03:00
|
|
|
# Since: 1.2
|
|
|
|
##
|
|
|
|
{ 'enum': 'MigrationCapability',
|
|
|
|
'data': ['xbzrle', 'rdma-pin-all', 'auto-converge', 'zero-blocks',
|
2021-10-28 13:25:13 +03:00
|
|
|
'compress', 'events', 'postcopy-ram',
|
|
|
|
{ 'name': 'x-colo', 'features': [ 'unstable' ] },
|
|
|
|
'release-ram',
|
2019-02-06 15:54:06 +03:00
|
|
|
'block', 'return-path', 'pause-before-switchover', 'multifd',
|
2019-02-15 20:45:45 +03:00
|
|
|
'dirty-bitmaps', 'postcopy-blocktime', 'late-block-activate',
|
2021-10-28 13:25:13 +03:00
|
|
|
{ 'name': 'x-ignore-shared', 'features': [ 'unstable' ] },
|
2022-06-20 08:39:45 +03:00
|
|
|
'validate-uuid', 'background-snapshot',
|
2023-06-07 18:30:50 +03:00
|
|
|
'zero-copy-send', 'postcopy-preempt', 'switchover-ack',
|
|
|
|
'dirty-limit'] }
|
2017-08-24 22:14:01 +03:00
|
|
|
|
|
|
|
##
|
|
|
|
# @MigrationCapabilityStatus:
|
|
|
|
#
|
|
|
|
# Migration capability information
|
|
|
|
#
|
|
|
|
# @capability: capability enum
|
|
|
|
#
|
|
|
|
# @state: capability state bool
|
|
|
|
#
|
|
|
|
# Since: 1.2
|
|
|
|
##
|
|
|
|
{ 'struct': 'MigrationCapabilityStatus',
|
2023-06-12 22:16:04 +03:00
|
|
|
'data': { 'capability': 'MigrationCapability', 'state': 'bool' } }
|
2017-08-24 22:14:01 +03:00
|
|
|
|
|
|
|
##
|
|
|
|
# @migrate-set-capabilities:
|
|
|
|
#
|
|
|
|
# Enable/Disable the following migration capabilities (like xbzrle)
|
|
|
|
#
|
|
|
|
# @capabilities: json array of capability modifications to make
|
|
|
|
#
|
|
|
|
# Since: 1.2
|
|
|
|
#
|
|
|
|
# Example:
|
|
|
|
#
|
|
|
|
# -> { "execute": "migrate-set-capabilities" , "arguments":
|
|
|
|
# { "capabilities": [ { "capability": "xbzrle", "state": true } ] } }
|
2023-04-25 09:42:14 +03:00
|
|
|
# <- { "return": {} }
|
2017-08-24 22:14:01 +03:00
|
|
|
##
|
|
|
|
{ 'command': 'migrate-set-capabilities',
|
|
|
|
'data': { 'capabilities': ['MigrationCapabilityStatus'] } }
|
|
|
|
|
|
|
|
##
|
|
|
|
# @query-migrate-capabilities:
|
|
|
|
#
|
|
|
|
# Returns information about the current migration capabilities status
|
|
|
|
#
|
2023-04-25 09:42:10 +03:00
|
|
|
# Returns: @MigrationCapabilityStatus
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
|
|
|
# Since: 1.2
|
|
|
|
#
|
|
|
|
# Example:
|
|
|
|
#
|
|
|
|
# -> { "execute": "query-migrate-capabilities" }
|
|
|
|
# <- { "return": [
|
|
|
|
# {"state": false, "capability": "xbzrle"},
|
|
|
|
# {"state": false, "capability": "rdma-pin-all"},
|
|
|
|
# {"state": false, "capability": "auto-converge"},
|
|
|
|
# {"state": false, "capability": "zero-blocks"},
|
|
|
|
# {"state": false, "capability": "compress"},
|
|
|
|
# {"state": true, "capability": "events"},
|
|
|
|
# {"state": false, "capability": "postcopy-ram"},
|
|
|
|
# {"state": false, "capability": "x-colo"}
|
|
|
|
# ]}
|
|
|
|
##
|
|
|
|
{ 'command': 'query-migrate-capabilities', 'returns': ['MigrationCapabilityStatus']}
|
|
|
|
|
2019-01-16 12:35:55 +03:00
|
|
|
##
|
|
|
|
# @MultiFDCompression:
|
|
|
|
#
|
|
|
|
# An enumeration of multifd compression methods.
|
|
|
|
#
|
|
|
|
# @none: no compression.
|
2023-04-28 13:54:29 +03:00
|
|
|
#
|
2019-01-04 17:30:06 +03:00
|
|
|
# @zlib: use zlib compression method.
|
2023-04-28 13:54:29 +03:00
|
|
|
#
|
2019-12-13 15:47:14 +03:00
|
|
|
# @zstd: use zstd compression method.
|
2019-01-16 12:35:55 +03:00
|
|
|
#
|
|
|
|
# Since: 5.0
|
|
|
|
##
|
|
|
|
{ 'enum': 'MultiFDCompression',
|
2019-12-13 15:47:14 +03:00
|
|
|
'data': [ 'none', 'zlib',
|
2021-08-04 11:31:05 +03:00
|
|
|
{ 'name': 'zstd', 'if': 'CONFIG_ZSTD' } ] }
|
2019-01-16 12:35:55 +03:00
|
|
|
|
2021-02-12 20:34:24 +03:00
|
|
|
##
|
|
|
|
# @BitmapMigrationBitmapAliasTransform:
|
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @persistent: If present, the bitmap will be made persistent or
|
|
|
|
# transient depending on this parameter.
|
2021-02-12 20:34:24 +03:00
|
|
|
#
|
|
|
|
# Since: 6.0
|
|
|
|
##
|
|
|
|
{ 'struct': 'BitmapMigrationBitmapAliasTransform',
|
|
|
|
'data': {
|
|
|
|
'*persistent': 'bool'
|
|
|
|
} }
|
|
|
|
|
2020-08-20 18:07:23 +03:00
|
|
|
##
|
|
|
|
# @BitmapMigrationBitmapAlias:
|
|
|
|
#
|
|
|
|
# @name: The name of the bitmap.
|
|
|
|
#
|
|
|
|
# @alias: An alias name for migration (for example the bitmap name on
|
2023-04-28 13:54:29 +03:00
|
|
|
# the opposite site).
|
2020-08-20 18:07:23 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @transform: Allows the modification of the migrated bitmap. (since
|
|
|
|
# 6.0)
|
2021-02-12 20:34:24 +03:00
|
|
|
#
|
2020-08-20 18:07:23 +03:00
|
|
|
# Since: 5.2
|
|
|
|
##
|
|
|
|
{ 'struct': 'BitmapMigrationBitmapAlias',
|
|
|
|
'data': {
|
|
|
|
'name': 'str',
|
2021-02-12 20:34:24 +03:00
|
|
|
'alias': 'str',
|
|
|
|
'*transform': 'BitmapMigrationBitmapAliasTransform'
|
2020-08-20 18:07:23 +03:00
|
|
|
} }
|
|
|
|
|
|
|
|
##
|
|
|
|
# @BitmapMigrationNodeAlias:
|
|
|
|
#
|
|
|
|
# Maps a block node name and the bitmaps it has to aliases for dirty
|
|
|
|
# bitmap migration.
|
|
|
|
#
|
|
|
|
# @node-name: A block node name.
|
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @alias: An alias block node name for migration (for example the node
|
|
|
|
# name on the opposite site).
|
2020-08-20 18:07:23 +03:00
|
|
|
#
|
|
|
|
# @bitmaps: Mappings for the bitmaps on this node.
|
|
|
|
#
|
|
|
|
# Since: 5.2
|
|
|
|
##
|
|
|
|
{ 'struct': 'BitmapMigrationNodeAlias',
|
|
|
|
'data': {
|
|
|
|
'node-name': 'str',
|
|
|
|
'alias': 'str',
|
|
|
|
'bitmaps': [ 'BitmapMigrationBitmapAlias' ]
|
|
|
|
} }
|
|
|
|
|
2017-08-24 22:14:01 +03:00
|
|
|
##
|
|
|
|
# @MigrationParameter:
|
|
|
|
#
|
|
|
|
# Migration parameters enumeration
|
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @announce-initial: Initial delay (in milliseconds) before sending
|
|
|
|
# the first announce (Since 4.0)
|
2019-02-27 16:24:06 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @announce-max: Maximum delay (in milliseconds) between packets in
|
|
|
|
# the announcement (Since 4.0)
|
2019-02-27 16:24:06 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @announce-rounds: Number of self-announce packets sent after
|
|
|
|
# migration (Since 4.0)
|
2019-02-27 16:24:06 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @announce-step: Increase in delay (in milliseconds) between
|
|
|
|
# subsequent packets in the announcement (Since 4.0)
|
2019-02-27 16:24:06 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @compress-level: Set the compression level to be used in live
|
|
|
|
# migration, the compression level is an integer between 0 and 9,
|
|
|
|
# where 0 means no compression, 1 means the best compression
|
|
|
|
# speed, and 9 means best compression ratio which will consume
|
|
|
|
# more CPU.
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @compress-threads: Set compression thread count to be used in live
|
|
|
|
# migration, the compression thread count is an integer between 1
|
|
|
|
# and 255.
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @compress-wait-thread: Controls behavior when all compression
|
|
|
|
# threads are currently busy. If true (default), wait for a free
|
|
|
|
# compression thread to become available; otherwise, send the page
|
|
|
|
# uncompressed. (Since 3.1)
|
2018-08-21 11:10:20 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @decompress-threads: Set decompression thread count to be used in
|
|
|
|
# live migration, the decompression thread count is an integer
|
|
|
|
# between 1 and 255. Usually, decompression is at least 4 times as
|
|
|
|
# fast as compression, so set the decompress-threads to the number
|
|
|
|
# about 1/4 of compress-threads is adequate.
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @throttle-trigger-threshold: The ratio of bytes_dirty_period and
|
|
|
|
# bytes_xfer_period to trigger throttling. It is expressed as
|
|
|
|
# percentage. The default value is 50. (Since 5.0)
|
2020-02-24 05:31:42 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @cpu-throttle-initial: Initial percentage of time guest cpus are
|
|
|
|
# throttled when migration auto-converge is activated. The
|
|
|
|
# default value is 20. (Since 2.7)
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
|
|
|
# @cpu-throttle-increment: throttle percentage increase each time
|
2023-04-28 13:54:29 +03:00
|
|
|
# auto-converge detects that migration is not making progress.
|
|
|
|
# The default value is 10. (Since 2.7)
|
|
|
|
#
|
|
|
|
# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage At
|
|
|
|
# the tail stage of throttling, the Guest is very sensitive to CPU
|
|
|
|
# percentage while the @cpu-throttle -increment is excessive
|
|
|
|
# usually at tail stage. If this parameter is true, we will
|
|
|
|
# compute the ideal CPU percentage used by the Guest, which may
|
|
|
|
# exactly make the dirty rate match the dirty rate threshold.
|
|
|
|
# Then we will choose a smaller throttle increment between the one
|
|
|
|
# specified by @cpu-throttle-increment and the one generated by
|
|
|
|
# ideal CPU percentage. Therefore, it is compatible to
|
|
|
|
# traditional throttling, meanwhile the throttle increment won't
|
|
|
|
# be excessive at tail stage. The default value is false. (Since
|
|
|
|
# 5.1)
|
|
|
|
#
|
|
|
|
# @tls-creds: ID of the 'tls-creds' object that provides credentials
|
|
|
|
# for establishing a TLS connection over the migration data
|
|
|
|
# channel. On the outgoing side of the migration, the credentials
|
|
|
|
# must be for a 'client' endpoint, while for the incoming side the
|
|
|
|
# credentials must be for a 'server' endpoint. Setting this will
|
|
|
|
# enable TLS for all migrations. The default is unset, resulting
|
|
|
|
# in unsecured migration at the QEMU level. (Since 2.7)
|
|
|
|
#
|
|
|
|
# @tls-hostname: hostname of the target host for the migration. This
|
|
|
|
# is required when using x509 based TLS credentials and the
|
|
|
|
# migration URI does not already include a hostname. For example
|
|
|
|
# if using fd: or exec: based migration, the hostname must be
|
|
|
|
# provided so that the server's x509 certificate identity can be
|
|
|
|
# validated. (Since 2.7)
|
|
|
|
#
|
|
|
|
# @tls-authz: ID of the 'authz' object subclass that provides access
|
|
|
|
# control checking of the TLS x509 certificate distinguished name.
|
|
|
|
# This object is only resolved at time of use, so can be deleted
|
|
|
|
# and recreated on the fly while the migration server is active.
|
|
|
|
# If missing, it will default to denying access (Since 4.0)
|
|
|
|
#
|
|
|
|
# @max-bandwidth: to set maximum speed for migration. maximum speed
|
|
|
|
# in bytes per second. (Since 2.8)
|
|
|
|
#
|
migration: Allow user to specify available switchover bandwidth
Migration bandwidth is a very important value to live migration. It's
because it's one of the major factors that we'll make decision on when to
switchover to destination in a precopy process.
This value is currently estimated by QEMU during the whole live migration
process by monitoring how fast we were sending the data. This can be the
most accurate bandwidth if in the ideal world, where we're always feeding
unlimited data to the migration channel, and then it'll be limited to the
bandwidth that is available.
However in reality it may be very different, e.g., over a 10Gbps network we
can see query-migrate showing migration bandwidth of only a few tens of
MB/s just because there are plenty of other things the migration thread
might be doing. For example, the migration thread can be busy scanning
zero pages, or it can be fetching dirty bitmap from other external dirty
sources (like vhost or KVM). It means we may not be pushing data as much
as possible to migration channel, so the bandwidth estimated from "how many
data we sent in the channel" can be dramatically inaccurate sometimes.
With that, the decision to switchover will be affected, by assuming that we
may not be able to switchover at all with such a low bandwidth, but in
reality we can.
The migration may not even converge at all with the downtime specified,
with that wrong estimation of bandwidth, keeping iterations forever with a
low estimation of bandwidth.
The issue is QEMU itself may not be able to avoid those uncertainties on
measuing the real "available migration bandwidth". At least not something
I can think of so far.
One way to fix this is when the user is fully aware of the available
bandwidth, then we can allow the user to help providing an accurate value.
For example, if the user has a dedicated channel of 10Gbps for migration
for this specific VM, the user can specify this bandwidth so QEMU can
always do the calculation based on this fact, trusting the user as long as
specified. It may not be the exact bandwidth when switching over (in which
case qemu will push migration data as fast as possible), but much better
than QEMU trying to wildly guess, especially when very wrong.
A new parameter "avail-switchover-bandwidth" is introduced just for this.
So when the user specified this parameter, instead of trusting the
estimated value from QEMU itself (based on the QEMUFile send speed), it
trusts the user more by using this value to decide when to switchover,
assuming that we'll have such bandwidth available then.
Note that specifying this value will not throttle the bandwidth for
switchover yet, so QEMU will always use the full bandwidth possible for
sending switchover data, assuming that should always be the most important
way to use the network at that time.
This can resolve issues like "unconvergence migration" which is caused by
hilarious low "migration bandwidth" detected for whatever reason.
Reported-by: Zhiyi Guo <zhguo@redhat.com>
Reviewed-by: Joao Martins <joao.m.martins@oracle.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Peter Xu <peterx@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
Message-ID: <20231010221922.40638-1-peterx@redhat.com>
2023-10-11 01:19:22 +03:00
|
|
|
# @avail-switchover-bandwidth: to set the available bandwidth that
|
|
|
|
# migration can use during switchover phase. NOTE! This does not
|
|
|
|
# limit the bandwidth during switchover, but only for calculations when
|
|
|
|
# making decisions to switchover. By default, this value is zero,
|
|
|
|
# which means QEMU will estimate the bandwidth automatically. This can
|
|
|
|
# be set when the estimated value is not accurate, while the user is
|
|
|
|
# able to guarantee such bandwidth is available when switching over.
|
|
|
|
# When specified correctly, this can make the switchover decision much
|
|
|
|
# more accurate. (Since 8.2)
|
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @downtime-limit: set maximum tolerated downtime for migration.
|
|
|
|
# maximum downtime in milliseconds (Since 2.8)
|
|
|
|
#
|
|
|
|
# @x-checkpoint-delay: The delay time (in ms) between two COLO
|
|
|
|
# checkpoints in periodic mode. (Since 2.8)
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
|
|
|
# @block-incremental: Affects how much storage is migrated when the
|
2023-04-28 13:54:29 +03:00
|
|
|
# block migration capability is enabled. When false, the entire
|
|
|
|
# storage backing chain is migrated into a flattened image at the
|
|
|
|
# destination; when true, only the active qcow2 layer is migrated
|
|
|
|
# and the destination must already have access to the same backing
|
|
|
|
# chain as was used on the source. (since 2.10)
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
2019-02-06 15:54:06 +03:00
|
|
|
# @multifd-channels: Number of channels used to migrate data in
|
2023-04-28 13:54:29 +03:00
|
|
|
# parallel. This is the same number that the number of sockets
|
|
|
|
# used for migration. The default value is 2 (since 4.0)
|
2016-01-15 10:56:17 +03:00
|
|
|
#
|
2017-10-05 22:30:10 +03:00
|
|
|
# @xbzrle-cache-size: cache size to be used by XBZRLE migration. It
|
2023-04-28 13:54:29 +03:00
|
|
|
# needs to be a multiple of the target page size and a power of 2
|
|
|
|
# (Since 2.11)
|
2017-10-05 22:30:10 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @max-postcopy-bandwidth: Background transfer bandwidth during
|
|
|
|
# postcopy. Defaults to 0 (unlimited). In bytes per second.
|
|
|
|
# (Since 3.0)
|
2018-08-01 16:00:20 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @max-cpu-throttle: maximum cpu throttle percentage. Defaults to 99.
|
|
|
|
# (Since 3.1)
|
2019-02-27 16:24:06 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @multifd-compression: Which compression method to use. Defaults to
|
|
|
|
# none. (Since 5.0)
|
2019-01-16 12:35:55 +03:00
|
|
|
#
|
2020-01-23 19:08:52 +03:00
|
|
|
# @multifd-zlib-level: Set the compression level to be used in live
|
2023-04-28 13:54:29 +03:00
|
|
|
# migration, the compression level is an integer between 0 and 9,
|
|
|
|
# where 0 means no compression, 1 means the best compression
|
|
|
|
# speed, and 9 means best compression ratio which will consume
|
|
|
|
# more CPU. Defaults to 1. (Since 5.0)
|
2020-01-23 19:08:52 +03:00
|
|
|
#
|
2020-01-23 19:41:36 +03:00
|
|
|
# @multifd-zstd-level: Set the compression level to be used in live
|
2023-04-28 13:54:29 +03:00
|
|
|
# migration, the compression level is an integer between 0 and 20,
|
|
|
|
# where 0 means no compression, 1 means the best compression
|
|
|
|
# speed, and 20 means best compression ratio which will consume
|
|
|
|
# more CPU. Defaults to 1. (Since 5.0)
|
2022-05-13 09:28:33 +03:00
|
|
|
#
|
2020-08-20 18:07:23 +03:00
|
|
|
# @block-bitmap-mapping: Maps block nodes and bitmaps on them to
|
2023-04-28 13:54:29 +03:00
|
|
|
# aliases for the purpose of dirty bitmap migration. Such aliases
|
|
|
|
# may for example be the corresponding names on the opposite site.
|
|
|
|
# The mapping must be one-to-one, but not necessarily complete: On
|
|
|
|
# the source, unmapped bitmaps and all bitmaps on unmapped nodes
|
|
|
|
# will be ignored. On the destination, encountering an unmapped
|
|
|
|
# alias in the incoming migration stream will result in a report,
|
|
|
|
# and all further bitmap migration data will then be discarded.
|
|
|
|
# Note that the destination does not know about bitmaps it does
|
|
|
|
# not receive, so there is no limitation or requirement regarding
|
|
|
|
# the number of bitmaps received, or how they are named, or on
|
|
|
|
# which nodes they are placed. By default (when this parameter
|
|
|
|
# has never been set), bitmap names are mapped to themselves.
|
|
|
|
# Nodes are mapped to their block device name if there is one, and
|
|
|
|
# to their node name otherwise. (Since 5.2)
|
2020-08-20 18:07:23 +03:00
|
|
|
#
|
2023-07-28 12:38:07 +03:00
|
|
|
# @x-vcpu-dirty-limit-period: Periodic time (in milliseconds) of dirty
|
|
|
|
# limit during live migration. Should be in the range 1 to 1000ms.
|
|
|
|
# Defaults to 1000ms. (Since 8.1)
|
2023-06-07 16:32:59 +03:00
|
|
|
#
|
2023-06-07 17:58:32 +03:00
|
|
|
# @vcpu-dirty-limit: Dirtyrate limit (MB/s) during live migration.
|
2023-07-28 12:38:07 +03:00
|
|
|
# Defaults to 1. (Since 8.1)
|
2023-06-07 17:58:32 +03:00
|
|
|
#
|
2021-10-28 13:25:13 +03:00
|
|
|
# Features:
|
2023-04-28 13:54:29 +03:00
|
|
|
#
|
2023-06-07 16:32:59 +03:00
|
|
|
# @unstable: Members @x-checkpoint-delay and @x-vcpu-dirty-limit-period
|
2023-07-28 12:38:07 +03:00
|
|
|
# are experimental.
|
2021-10-28 13:25:13 +03:00
|
|
|
#
|
2017-08-24 22:14:01 +03:00
|
|
|
# Since: 2.4
|
|
|
|
##
|
|
|
|
{ 'enum': 'MigrationParameter',
|
2019-02-27 16:24:06 +03:00
|
|
|
'data': ['announce-initial', 'announce-max',
|
|
|
|
'announce-rounds', 'announce-step',
|
|
|
|
'compress-level', 'compress-threads', 'decompress-threads',
|
2020-02-24 05:31:42 +03:00
|
|
|
'compress-wait-thread', 'throttle-trigger-threshold',
|
2017-08-24 22:14:01 +03:00
|
|
|
'cpu-throttle-initial', 'cpu-throttle-increment',
|
2020-04-13 13:15:08 +03:00
|
|
|
'cpu-throttle-tailslow',
|
migration: add support for a "tls-authz" migration parameter
The QEMU instance that runs as the server for the migration data
transport (ie the target QEMU) needs to be able to configure access
control so it can prevent unauthorized clients initiating an incoming
migration. This adds a new 'tls-authz' migration parameter that is used
to provide the QOM ID of a QAuthZ subclass instance that provides the
access control check. This is checked against the x509 certificate
obtained during the TLS handshake.
For example, when starting a QEMU for incoming migration, it is
possible to give an example identity of the source QEMU that is
intended to be connecting later:
$QEMU \
-monitor stdio \
-incoming defer \
...other args...
(qemu) object_add tls-creds-x509,id=tls0,dir=/home/berrange/qemutls,\
endpoint=server,verify-peer=yes \
(qemu) object_add authz-simple,id=auth0,identity=CN=laptop.example.com,,\
O=Example Org,,L=London,,ST=London,,C=GB \
(qemu) migrate_incoming tcp:localhost:9000
Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
2019-02-27 17:53:24 +03:00
|
|
|
'tls-creds', 'tls-hostname', 'tls-authz', 'max-bandwidth',
|
migration: Allow user to specify available switchover bandwidth
Migration bandwidth is a very important value to live migration. It's
because it's one of the major factors that we'll make decision on when to
switchover to destination in a precopy process.
This value is currently estimated by QEMU during the whole live migration
process by monitoring how fast we were sending the data. This can be the
most accurate bandwidth if in the ideal world, where we're always feeding
unlimited data to the migration channel, and then it'll be limited to the
bandwidth that is available.
However in reality it may be very different, e.g., over a 10Gbps network we
can see query-migrate showing migration bandwidth of only a few tens of
MB/s just because there are plenty of other things the migration thread
might be doing. For example, the migration thread can be busy scanning
zero pages, or it can be fetching dirty bitmap from other external dirty
sources (like vhost or KVM). It means we may not be pushing data as much
as possible to migration channel, so the bandwidth estimated from "how many
data we sent in the channel" can be dramatically inaccurate sometimes.
With that, the decision to switchover will be affected, by assuming that we
may not be able to switchover at all with such a low bandwidth, but in
reality we can.
The migration may not even converge at all with the downtime specified,
with that wrong estimation of bandwidth, keeping iterations forever with a
low estimation of bandwidth.
The issue is QEMU itself may not be able to avoid those uncertainties on
measuing the real "available migration bandwidth". At least not something
I can think of so far.
One way to fix this is when the user is fully aware of the available
bandwidth, then we can allow the user to help providing an accurate value.
For example, if the user has a dedicated channel of 10Gbps for migration
for this specific VM, the user can specify this bandwidth so QEMU can
always do the calculation based on this fact, trusting the user as long as
specified. It may not be the exact bandwidth when switching over (in which
case qemu will push migration data as fast as possible), but much better
than QEMU trying to wildly guess, especially when very wrong.
A new parameter "avail-switchover-bandwidth" is introduced just for this.
So when the user specified this parameter, instead of trusting the
estimated value from QEMU itself (based on the QEMUFile send speed), it
trusts the user more by using this value to decide when to switchover,
assuming that we'll have such bandwidth available then.
Note that specifying this value will not throttle the bandwidth for
switchover yet, so QEMU will always use the full bandwidth possible for
sending switchover data, assuming that should always be the most important
way to use the network at that time.
This can resolve issues like "unconvergence migration" which is caused by
hilarious low "migration bandwidth" detected for whatever reason.
Reported-by: Zhiyi Guo <zhguo@redhat.com>
Reviewed-by: Joao Martins <joao.m.martins@oracle.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Peter Xu <peterx@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
Message-ID: <20231010221922.40638-1-peterx@redhat.com>
2023-10-11 01:19:22 +03:00
|
|
|
'avail-switchover-bandwidth', 'downtime-limit',
|
2021-10-28 13:25:13 +03:00
|
|
|
{ 'name': 'x-checkpoint-delay', 'features': [ 'unstable' ] },
|
|
|
|
'block-incremental',
|
2019-02-06 15:54:06 +03:00
|
|
|
'multifd-channels',
|
2018-08-01 16:00:20 +03:00
|
|
|
'xbzrle-cache-size', 'max-postcopy-bandwidth',
|
2020-01-23 19:08:52 +03:00
|
|
|
'max-cpu-throttle', 'multifd-compression',
|
2023-06-07 16:32:59 +03:00
|
|
|
'multifd-zlib-level', 'multifd-zstd-level',
|
|
|
|
'block-bitmap-mapping',
|
2023-06-07 17:58:32 +03:00
|
|
|
{ 'name': 'x-vcpu-dirty-limit-period', 'features': ['unstable'] },
|
|
|
|
'vcpu-dirty-limit'] }
|
2017-08-24 22:14:01 +03:00
|
|
|
|
|
|
|
##
|
|
|
|
# @MigrateSetParameters:
|
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @announce-initial: Initial delay (in milliseconds) before sending
|
|
|
|
# the first announce (Since 4.0)
|
2019-02-27 16:24:06 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @announce-max: Maximum delay (in milliseconds) between packets in
|
|
|
|
# the announcement (Since 4.0)
|
2019-02-27 16:24:06 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @announce-rounds: Number of self-announce packets sent after
|
|
|
|
# migration (Since 4.0)
|
2019-02-27 16:24:06 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @announce-step: Increase in delay (in milliseconds) between
|
|
|
|
# subsequent packets in the announcement (Since 4.0)
|
2019-02-27 16:24:06 +03:00
|
|
|
#
|
2017-08-24 22:14:01 +03:00
|
|
|
# @compress-level: compression level
|
|
|
|
#
|
|
|
|
# @compress-threads: compression thread count
|
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @compress-wait-thread: Controls behavior when all compression
|
|
|
|
# threads are currently busy. If true (default), wait for a free
|
|
|
|
# compression thread to become available; otherwise, send the page
|
|
|
|
# uncompressed. (Since 3.1)
|
2018-08-21 11:10:20 +03:00
|
|
|
#
|
2017-08-24 22:14:01 +03:00
|
|
|
# @decompress-threads: decompression thread count
|
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @throttle-trigger-threshold: The ratio of bytes_dirty_period and
|
|
|
|
# bytes_xfer_period to trigger throttling. It is expressed as
|
|
|
|
# percentage. The default value is 50. (Since 5.0)
|
2020-02-24 05:31:42 +03:00
|
|
|
#
|
2017-08-24 22:14:01 +03:00
|
|
|
# @cpu-throttle-initial: Initial percentage of time guest cpus are
|
2023-04-28 13:54:29 +03:00
|
|
|
# throttled when migration auto-converge is activated. The
|
|
|
|
# default value is 20. (Since 2.7)
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
|
|
|
# @cpu-throttle-increment: throttle percentage increase each time
|
2023-04-28 13:54:29 +03:00
|
|
|
# auto-converge detects that migration is not making progress.
|
|
|
|
# The default value is 10. (Since 2.7)
|
|
|
|
#
|
|
|
|
# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage At
|
|
|
|
# the tail stage of throttling, the Guest is very sensitive to CPU
|
|
|
|
# percentage while the @cpu-throttle -increment is excessive
|
|
|
|
# usually at tail stage. If this parameter is true, we will
|
|
|
|
# compute the ideal CPU percentage used by the Guest, which may
|
|
|
|
# exactly make the dirty rate match the dirty rate threshold.
|
|
|
|
# Then we will choose a smaller throttle increment between the one
|
|
|
|
# specified by @cpu-throttle-increment and the one generated by
|
|
|
|
# ideal CPU percentage. Therefore, it is compatible to
|
|
|
|
# traditional throttling, meanwhile the throttle increment won't
|
|
|
|
# be excessive at tail stage. The default value is false. (Since
|
|
|
|
# 5.1)
|
2020-04-13 13:15:08 +03:00
|
|
|
#
|
2017-08-24 22:14:01 +03:00
|
|
|
# @tls-creds: ID of the 'tls-creds' object that provides credentials
|
2023-04-28 13:54:29 +03:00
|
|
|
# for establishing a TLS connection over the migration data
|
|
|
|
# channel. On the outgoing side of the migration, the credentials
|
|
|
|
# must be for a 'client' endpoint, while for the incoming side the
|
|
|
|
# credentials must be for a 'server' endpoint. Setting this to a
|
|
|
|
# non-empty string enables TLS for all migrations. An empty
|
|
|
|
# string means that QEMU will use plain text mode for migration,
|
|
|
|
# rather than TLS (Since 2.9) Previously (since 2.7), this was
|
|
|
|
# reported by omitting tls-creds instead.
|
|
|
|
#
|
|
|
|
# @tls-hostname: hostname of the target host for the migration. This
|
|
|
|
# is required when using x509 based TLS credentials and the
|
|
|
|
# migration URI does not already include a hostname. For example
|
|
|
|
# if using fd: or exec: based migration, the hostname must be
|
|
|
|
# provided so that the server's x509 certificate identity can be
|
|
|
|
# validated. (Since 2.7) An empty string means that QEMU will use
|
|
|
|
# the hostname associated with the migration URI, if any. (Since
|
|
|
|
# 2.9) Previously (since 2.7), this was reported by omitting
|
|
|
|
# tls-hostname instead.
|
|
|
|
#
|
|
|
|
# @max-bandwidth: to set maximum speed for migration. maximum speed
|
|
|
|
# in bytes per second. (Since 2.8)
|
|
|
|
#
|
migration: Allow user to specify available switchover bandwidth
Migration bandwidth is a very important value to live migration. It's
because it's one of the major factors that we'll make decision on when to
switchover to destination in a precopy process.
This value is currently estimated by QEMU during the whole live migration
process by monitoring how fast we were sending the data. This can be the
most accurate bandwidth if in the ideal world, where we're always feeding
unlimited data to the migration channel, and then it'll be limited to the
bandwidth that is available.
However in reality it may be very different, e.g., over a 10Gbps network we
can see query-migrate showing migration bandwidth of only a few tens of
MB/s just because there are plenty of other things the migration thread
might be doing. For example, the migration thread can be busy scanning
zero pages, or it can be fetching dirty bitmap from other external dirty
sources (like vhost or KVM). It means we may not be pushing data as much
as possible to migration channel, so the bandwidth estimated from "how many
data we sent in the channel" can be dramatically inaccurate sometimes.
With that, the decision to switchover will be affected, by assuming that we
may not be able to switchover at all with such a low bandwidth, but in
reality we can.
The migration may not even converge at all with the downtime specified,
with that wrong estimation of bandwidth, keeping iterations forever with a
low estimation of bandwidth.
The issue is QEMU itself may not be able to avoid those uncertainties on
measuing the real "available migration bandwidth". At least not something
I can think of so far.
One way to fix this is when the user is fully aware of the available
bandwidth, then we can allow the user to help providing an accurate value.
For example, if the user has a dedicated channel of 10Gbps for migration
for this specific VM, the user can specify this bandwidth so QEMU can
always do the calculation based on this fact, trusting the user as long as
specified. It may not be the exact bandwidth when switching over (in which
case qemu will push migration data as fast as possible), but much better
than QEMU trying to wildly guess, especially when very wrong.
A new parameter "avail-switchover-bandwidth" is introduced just for this.
So when the user specified this parameter, instead of trusting the
estimated value from QEMU itself (based on the QEMUFile send speed), it
trusts the user more by using this value to decide when to switchover,
assuming that we'll have such bandwidth available then.
Note that specifying this value will not throttle the bandwidth for
switchover yet, so QEMU will always use the full bandwidth possible for
sending switchover data, assuming that should always be the most important
way to use the network at that time.
This can resolve issues like "unconvergence migration" which is caused by
hilarious low "migration bandwidth" detected for whatever reason.
Reported-by: Zhiyi Guo <zhguo@redhat.com>
Reviewed-by: Joao Martins <joao.m.martins@oracle.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Peter Xu <peterx@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
Message-ID: <20231010221922.40638-1-peterx@redhat.com>
2023-10-11 01:19:22 +03:00
|
|
|
# @avail-switchover-bandwidth: to set the available bandwidth that
|
|
|
|
# migration can use during switchover phase. NOTE! This does not
|
|
|
|
# limit the bandwidth during switchover, but only for calculations when
|
|
|
|
# making decisions to switchover. By default, this value is zero,
|
|
|
|
# which means QEMU will estimate the bandwidth automatically. This can
|
|
|
|
# be set when the estimated value is not accurate, while the user is
|
|
|
|
# able to guarantee such bandwidth is available when switching over.
|
|
|
|
# When specified correctly, this can make the switchover decision much
|
|
|
|
# more accurate. (Since 8.2)
|
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @downtime-limit: set maximum tolerated downtime for migration.
|
|
|
|
# maximum downtime in milliseconds (Since 2.8)
|
|
|
|
#
|
|
|
|
# @x-checkpoint-delay: the delay time between two COLO checkpoints.
|
|
|
|
# (Since 2.8)
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
|
|
|
# @block-incremental: Affects how much storage is migrated when the
|
2023-04-28 13:54:29 +03:00
|
|
|
# block migration capability is enabled. When false, the entire
|
|
|
|
# storage backing chain is migrated into a flattened image at the
|
|
|
|
# destination; when true, only the active qcow2 layer is migrated
|
|
|
|
# and the destination must already have access to the same backing
|
|
|
|
# chain as was used on the source. (since 2.10)
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
2019-02-06 15:54:06 +03:00
|
|
|
# @multifd-channels: Number of channels used to migrate data in
|
2023-04-28 13:54:29 +03:00
|
|
|
# parallel. This is the same number that the number of sockets
|
|
|
|
# used for migration. The default value is 2 (since 4.0)
|
2016-01-15 10:56:17 +03:00
|
|
|
#
|
2017-10-05 22:30:10 +03:00
|
|
|
# @xbzrle-cache-size: cache size to be used by XBZRLE migration. It
|
2023-04-28 13:54:29 +03:00
|
|
|
# needs to be a multiple of the target page size and a power of 2
|
|
|
|
# (Since 2.11)
|
2018-06-13 13:26:40 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @max-postcopy-bandwidth: Background transfer bandwidth during
|
|
|
|
# postcopy. Defaults to 0 (unlimited). In bytes per second.
|
|
|
|
# (Since 3.0)
|
2018-08-01 16:00:20 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @max-cpu-throttle: maximum cpu throttle percentage. The default
|
|
|
|
# value is 99. (Since 3.1)
|
2018-08-01 16:00:20 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @multifd-compression: Which compression method to use. Defaults to
|
|
|
|
# none. (Since 5.0)
|
2019-01-16 12:35:55 +03:00
|
|
|
#
|
2020-01-23 19:08:52 +03:00
|
|
|
# @multifd-zlib-level: Set the compression level to be used in live
|
2023-04-28 13:54:29 +03:00
|
|
|
# migration, the compression level is an integer between 0 and 9,
|
|
|
|
# where 0 means no compression, 1 means the best compression
|
|
|
|
# speed, and 9 means best compression ratio which will consume
|
|
|
|
# more CPU. Defaults to 1. (Since 5.0)
|
2020-01-23 19:08:52 +03:00
|
|
|
#
|
2020-01-23 19:41:36 +03:00
|
|
|
# @multifd-zstd-level: Set the compression level to be used in live
|
2023-04-28 13:54:29 +03:00
|
|
|
# migration, the compression level is an integer between 0 and 20,
|
|
|
|
# where 0 means no compression, 1 means the best compression
|
|
|
|
# speed, and 20 means best compression ratio which will consume
|
|
|
|
# more CPU. Defaults to 1. (Since 5.0)
|
2020-01-23 19:41:36 +03:00
|
|
|
#
|
2020-08-20 18:07:23 +03:00
|
|
|
# @block-bitmap-mapping: Maps block nodes and bitmaps on them to
|
2023-04-28 13:54:29 +03:00
|
|
|
# aliases for the purpose of dirty bitmap migration. Such aliases
|
|
|
|
# may for example be the corresponding names on the opposite site.
|
|
|
|
# The mapping must be one-to-one, but not necessarily complete: On
|
|
|
|
# the source, unmapped bitmaps and all bitmaps on unmapped nodes
|
|
|
|
# will be ignored. On the destination, encountering an unmapped
|
|
|
|
# alias in the incoming migration stream will result in a report,
|
|
|
|
# and all further bitmap migration data will then be discarded.
|
|
|
|
# Note that the destination does not know about bitmaps it does
|
|
|
|
# not receive, so there is no limitation or requirement regarding
|
|
|
|
# the number of bitmaps received, or how they are named, or on
|
|
|
|
# which nodes they are placed. By default (when this parameter
|
|
|
|
# has never been set), bitmap names are mapped to themselves.
|
|
|
|
# Nodes are mapped to their block device name if there is one, and
|
|
|
|
# to their node name otherwise. (Since 5.2)
|
2020-08-20 18:07:23 +03:00
|
|
|
#
|
2023-07-28 12:38:07 +03:00
|
|
|
# @x-vcpu-dirty-limit-period: Periodic time (in milliseconds) of dirty
|
|
|
|
# limit during live migration. Should be in the range 1 to 1000ms.
|
|
|
|
# Defaults to 1000ms. (Since 8.1)
|
2023-06-07 16:32:59 +03:00
|
|
|
#
|
2023-06-07 17:58:32 +03:00
|
|
|
# @vcpu-dirty-limit: Dirtyrate limit (MB/s) during live migration.
|
2023-07-28 12:38:07 +03:00
|
|
|
# Defaults to 1. (Since 8.1)
|
2023-06-07 17:58:32 +03:00
|
|
|
#
|
2021-10-28 13:25:13 +03:00
|
|
|
# Features:
|
2023-04-28 13:54:29 +03:00
|
|
|
#
|
2023-06-07 16:32:59 +03:00
|
|
|
# @unstable: Members @x-checkpoint-delay and @x-vcpu-dirty-limit-period
|
2023-07-28 12:38:07 +03:00
|
|
|
# are experimental.
|
2021-10-28 13:25:13 +03:00
|
|
|
#
|
2023-04-28 13:54:19 +03:00
|
|
|
# TODO: either fuse back into MigrationParameters, or make
|
2023-04-28 13:54:29 +03:00
|
|
|
# MigrationParameters members mandatory
|
2023-04-28 13:54:19 +03:00
|
|
|
#
|
2017-08-24 22:14:01 +03:00
|
|
|
# Since: 2.4
|
|
|
|
##
|
|
|
|
{ 'struct': 'MigrateSetParameters',
|
2019-02-27 16:24:06 +03:00
|
|
|
'data': { '*announce-initial': 'size',
|
|
|
|
'*announce-max': 'size',
|
|
|
|
'*announce-rounds': 'size',
|
|
|
|
'*announce-step': 'size',
|
2021-02-02 17:17:31 +03:00
|
|
|
'*compress-level': 'uint8',
|
|
|
|
'*compress-threads': 'uint8',
|
2018-08-21 11:10:20 +03:00
|
|
|
'*compress-wait-thread': 'bool',
|
2021-02-02 17:17:31 +03:00
|
|
|
'*decompress-threads': 'uint8',
|
|
|
|
'*throttle-trigger-threshold': 'uint8',
|
|
|
|
'*cpu-throttle-initial': 'uint8',
|
|
|
|
'*cpu-throttle-increment': 'uint8',
|
2020-04-13 13:15:08 +03:00
|
|
|
'*cpu-throttle-tailslow': 'bool',
|
2017-08-24 22:14:01 +03:00
|
|
|
'*tls-creds': 'StrOrNull',
|
|
|
|
'*tls-hostname': 'StrOrNull',
|
migration: add support for a "tls-authz" migration parameter
The QEMU instance that runs as the server for the migration data
transport (ie the target QEMU) needs to be able to configure access
control so it can prevent unauthorized clients initiating an incoming
migration. This adds a new 'tls-authz' migration parameter that is used
to provide the QOM ID of a QAuthZ subclass instance that provides the
access control check. This is checked against the x509 certificate
obtained during the TLS handshake.
For example, when starting a QEMU for incoming migration, it is
possible to give an example identity of the source QEMU that is
intended to be connecting later:
$QEMU \
-monitor stdio \
-incoming defer \
...other args...
(qemu) object_add tls-creds-x509,id=tls0,dir=/home/berrange/qemutls,\
endpoint=server,verify-peer=yes \
(qemu) object_add authz-simple,id=auth0,identity=CN=laptop.example.com,,\
O=Example Org,,L=London,,ST=London,,C=GB \
(qemu) migrate_incoming tcp:localhost:9000
Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
2019-02-27 17:53:24 +03:00
|
|
|
'*tls-authz': 'StrOrNull',
|
2021-02-02 17:17:31 +03:00
|
|
|
'*max-bandwidth': 'size',
|
migration: Allow user to specify available switchover bandwidth
Migration bandwidth is a very important value to live migration. It's
because it's one of the major factors that we'll make decision on when to
switchover to destination in a precopy process.
This value is currently estimated by QEMU during the whole live migration
process by monitoring how fast we were sending the data. This can be the
most accurate bandwidth if in the ideal world, where we're always feeding
unlimited data to the migration channel, and then it'll be limited to the
bandwidth that is available.
However in reality it may be very different, e.g., over a 10Gbps network we
can see query-migrate showing migration bandwidth of only a few tens of
MB/s just because there are plenty of other things the migration thread
might be doing. For example, the migration thread can be busy scanning
zero pages, or it can be fetching dirty bitmap from other external dirty
sources (like vhost or KVM). It means we may not be pushing data as much
as possible to migration channel, so the bandwidth estimated from "how many
data we sent in the channel" can be dramatically inaccurate sometimes.
With that, the decision to switchover will be affected, by assuming that we
may not be able to switchover at all with such a low bandwidth, but in
reality we can.
The migration may not even converge at all with the downtime specified,
with that wrong estimation of bandwidth, keeping iterations forever with a
low estimation of bandwidth.
The issue is QEMU itself may not be able to avoid those uncertainties on
measuing the real "available migration bandwidth". At least not something
I can think of so far.
One way to fix this is when the user is fully aware of the available
bandwidth, then we can allow the user to help providing an accurate value.
For example, if the user has a dedicated channel of 10Gbps for migration
for this specific VM, the user can specify this bandwidth so QEMU can
always do the calculation based on this fact, trusting the user as long as
specified. It may not be the exact bandwidth when switching over (in which
case qemu will push migration data as fast as possible), but much better
than QEMU trying to wildly guess, especially when very wrong.
A new parameter "avail-switchover-bandwidth" is introduced just for this.
So when the user specified this parameter, instead of trusting the
estimated value from QEMU itself (based on the QEMUFile send speed), it
trusts the user more by using this value to decide when to switchover,
assuming that we'll have such bandwidth available then.
Note that specifying this value will not throttle the bandwidth for
switchover yet, so QEMU will always use the full bandwidth possible for
sending switchover data, assuming that should always be the most important
way to use the network at that time.
This can resolve issues like "unconvergence migration" which is caused by
hilarious low "migration bandwidth" detected for whatever reason.
Reported-by: Zhiyi Guo <zhguo@redhat.com>
Reviewed-by: Joao Martins <joao.m.martins@oracle.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Peter Xu <peterx@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
Message-ID: <20231010221922.40638-1-peterx@redhat.com>
2023-10-11 01:19:22 +03:00
|
|
|
'*avail-switchover-bandwidth': 'size',
|
2021-02-02 17:17:31 +03:00
|
|
|
'*downtime-limit': 'uint64',
|
2021-10-28 13:25:13 +03:00
|
|
|
'*x-checkpoint-delay': { 'type': 'uint32',
|
|
|
|
'features': [ 'unstable' ] },
|
2016-01-15 10:56:17 +03:00
|
|
|
'*block-incremental': 'bool',
|
2021-02-02 17:17:31 +03:00
|
|
|
'*multifd-channels': 'uint8',
|
2018-06-13 13:26:40 +03:00
|
|
|
'*xbzrle-cache-size': 'size',
|
2018-08-01 16:00:20 +03:00
|
|
|
'*max-postcopy-bandwidth': 'size',
|
2021-02-02 17:17:31 +03:00
|
|
|
'*max-cpu-throttle': 'uint8',
|
2020-01-23 19:08:52 +03:00
|
|
|
'*multifd-compression': 'MultiFDCompression',
|
2021-02-02 17:17:31 +03:00
|
|
|
'*multifd-zlib-level': 'uint8',
|
|
|
|
'*multifd-zstd-level': 'uint8',
|
2023-06-07 16:32:59 +03:00
|
|
|
'*block-bitmap-mapping': [ 'BitmapMigrationNodeAlias' ],
|
|
|
|
'*x-vcpu-dirty-limit-period': { 'type': 'uint64',
|
2023-06-07 17:58:32 +03:00
|
|
|
'features': [ 'unstable' ] },
|
|
|
|
'*vcpu-dirty-limit': 'uint64'} }
|
2017-08-24 22:14:01 +03:00
|
|
|
|
|
|
|
##
|
|
|
|
# @migrate-set-parameters:
|
|
|
|
#
|
|
|
|
# Set various migration parameters.
|
|
|
|
#
|
|
|
|
# Since: 2.4
|
|
|
|
#
|
|
|
|
# Example:
|
|
|
|
#
|
|
|
|
# -> { "execute": "migrate-set-parameters" ,
|
|
|
|
# "arguments": { "compress-level": 1 } }
|
2023-04-25 09:42:14 +03:00
|
|
|
# <- { "return": {} }
|
2017-08-24 22:14:01 +03:00
|
|
|
##
|
|
|
|
{ 'command': 'migrate-set-parameters', 'boxed': true,
|
|
|
|
'data': 'MigrateSetParameters' }
|
|
|
|
|
|
|
|
##
|
|
|
|
# @MigrationParameters:
|
|
|
|
#
|
|
|
|
# The optional members aren't actually optional.
|
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @announce-initial: Initial delay (in milliseconds) before sending
|
|
|
|
# the first announce (Since 4.0)
|
2019-02-27 16:24:06 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @announce-max: Maximum delay (in milliseconds) between packets in
|
|
|
|
# the announcement (Since 4.0)
|
2019-02-27 16:24:06 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @announce-rounds: Number of self-announce packets sent after
|
|
|
|
# migration (Since 4.0)
|
2019-02-27 16:24:06 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @announce-step: Increase in delay (in milliseconds) between
|
|
|
|
# subsequent packets in the announcement (Since 4.0)
|
2019-02-27 16:24:06 +03:00
|
|
|
#
|
2017-08-24 22:14:01 +03:00
|
|
|
# @compress-level: compression level
|
|
|
|
#
|
|
|
|
# @compress-threads: compression thread count
|
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @compress-wait-thread: Controls behavior when all compression
|
|
|
|
# threads are currently busy. If true (default), wait for a free
|
|
|
|
# compression thread to become available; otherwise, send the page
|
|
|
|
# uncompressed. (Since 3.1)
|
2018-08-21 11:10:20 +03:00
|
|
|
#
|
2017-08-24 22:14:01 +03:00
|
|
|
# @decompress-threads: decompression thread count
|
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @throttle-trigger-threshold: The ratio of bytes_dirty_period and
|
|
|
|
# bytes_xfer_period to trigger throttling. It is expressed as
|
|
|
|
# percentage. The default value is 50. (Since 5.0)
|
2020-02-24 05:31:42 +03:00
|
|
|
#
|
2017-08-24 22:14:01 +03:00
|
|
|
# @cpu-throttle-initial: Initial percentage of time guest cpus are
|
2023-04-28 13:54:29 +03:00
|
|
|
# throttled when migration auto-converge is activated. (Since
|
|
|
|
# 2.7)
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
|
|
|
# @cpu-throttle-increment: throttle percentage increase each time
|
2023-04-28 13:54:29 +03:00
|
|
|
# auto-converge detects that migration is not making progress.
|
|
|
|
# (Since 2.7)
|
|
|
|
#
|
|
|
|
# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage At
|
|
|
|
# the tail stage of throttling, the Guest is very sensitive to CPU
|
|
|
|
# percentage while the @cpu-throttle -increment is excessive
|
|
|
|
# usually at tail stage. If this parameter is true, we will
|
|
|
|
# compute the ideal CPU percentage used by the Guest, which may
|
|
|
|
# exactly make the dirty rate match the dirty rate threshold.
|
|
|
|
# Then we will choose a smaller throttle increment between the one
|
|
|
|
# specified by @cpu-throttle-increment and the one generated by
|
|
|
|
# ideal CPU percentage. Therefore, it is compatible to
|
|
|
|
# traditional throttling, meanwhile the throttle increment won't
|
|
|
|
# be excessive at tail stage. The default value is false. (Since
|
|
|
|
# 5.1)
|
2020-04-13 13:15:08 +03:00
|
|
|
#
|
2017-08-24 22:14:01 +03:00
|
|
|
# @tls-creds: ID of the 'tls-creds' object that provides credentials
|
2023-04-28 13:54:29 +03:00
|
|
|
# for establishing a TLS connection over the migration data
|
|
|
|
# channel. On the outgoing side of the migration, the credentials
|
|
|
|
# must be for a 'client' endpoint, while for the incoming side the
|
|
|
|
# credentials must be for a 'server' endpoint. An empty string
|
|
|
|
# means that QEMU will use plain text mode for migration, rather
|
|
|
|
# than TLS (Since 2.7) Note: 2.8 reports this by omitting
|
|
|
|
# tls-creds instead.
|
|
|
|
#
|
|
|
|
# @tls-hostname: hostname of the target host for the migration. This
|
|
|
|
# is required when using x509 based TLS credentials and the
|
|
|
|
# migration URI does not already include a hostname. For example
|
|
|
|
# if using fd: or exec: based migration, the hostname must be
|
|
|
|
# provided so that the server's x509 certificate identity can be
|
|
|
|
# validated. (Since 2.7) An empty string means that QEMU will use
|
|
|
|
# the hostname associated with the migration URI, if any. (Since
|
|
|
|
# 2.9) Note: 2.8 reports this by omitting tls-hostname instead.
|
|
|
|
#
|
|
|
|
# @tls-authz: ID of the 'authz' object subclass that provides access
|
|
|
|
# control checking of the TLS x509 certificate distinguished name.
|
|
|
|
# (Since 4.0)
|
|
|
|
#
|
|
|
|
# @max-bandwidth: to set maximum speed for migration. maximum speed
|
|
|
|
# in bytes per second. (Since 2.8)
|
|
|
|
#
|
migration: Allow user to specify available switchover bandwidth
Migration bandwidth is a very important value to live migration. It's
because it's one of the major factors that we'll make decision on when to
switchover to destination in a precopy process.
This value is currently estimated by QEMU during the whole live migration
process by monitoring how fast we were sending the data. This can be the
most accurate bandwidth if in the ideal world, where we're always feeding
unlimited data to the migration channel, and then it'll be limited to the
bandwidth that is available.
However in reality it may be very different, e.g., over a 10Gbps network we
can see query-migrate showing migration bandwidth of only a few tens of
MB/s just because there are plenty of other things the migration thread
might be doing. For example, the migration thread can be busy scanning
zero pages, or it can be fetching dirty bitmap from other external dirty
sources (like vhost or KVM). It means we may not be pushing data as much
as possible to migration channel, so the bandwidth estimated from "how many
data we sent in the channel" can be dramatically inaccurate sometimes.
With that, the decision to switchover will be affected, by assuming that we
may not be able to switchover at all with such a low bandwidth, but in
reality we can.
The migration may not even converge at all with the downtime specified,
with that wrong estimation of bandwidth, keeping iterations forever with a
low estimation of bandwidth.
The issue is QEMU itself may not be able to avoid those uncertainties on
measuing the real "available migration bandwidth". At least not something
I can think of so far.
One way to fix this is when the user is fully aware of the available
bandwidth, then we can allow the user to help providing an accurate value.
For example, if the user has a dedicated channel of 10Gbps for migration
for this specific VM, the user can specify this bandwidth so QEMU can
always do the calculation based on this fact, trusting the user as long as
specified. It may not be the exact bandwidth when switching over (in which
case qemu will push migration data as fast as possible), but much better
than QEMU trying to wildly guess, especially when very wrong.
A new parameter "avail-switchover-bandwidth" is introduced just for this.
So when the user specified this parameter, instead of trusting the
estimated value from QEMU itself (based on the QEMUFile send speed), it
trusts the user more by using this value to decide when to switchover,
assuming that we'll have such bandwidth available then.
Note that specifying this value will not throttle the bandwidth for
switchover yet, so QEMU will always use the full bandwidth possible for
sending switchover data, assuming that should always be the most important
way to use the network at that time.
This can resolve issues like "unconvergence migration" which is caused by
hilarious low "migration bandwidth" detected for whatever reason.
Reported-by: Zhiyi Guo <zhguo@redhat.com>
Reviewed-by: Joao Martins <joao.m.martins@oracle.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Peter Xu <peterx@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
Message-ID: <20231010221922.40638-1-peterx@redhat.com>
2023-10-11 01:19:22 +03:00
|
|
|
# @avail-switchover-bandwidth: to set the available bandwidth that
|
|
|
|
# migration can use during switchover phase. NOTE! This does not
|
|
|
|
# limit the bandwidth during switchover, but only for calculations when
|
|
|
|
# making decisions to switchover. By default, this value is zero,
|
|
|
|
# which means QEMU will estimate the bandwidth automatically. This can
|
|
|
|
# be set when the estimated value is not accurate, while the user is
|
|
|
|
# able to guarantee such bandwidth is available when switching over.
|
|
|
|
# When specified correctly, this can make the switchover decision much
|
|
|
|
# more accurate. (Since 8.2)
|
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @downtime-limit: set maximum tolerated downtime for migration.
|
|
|
|
# maximum downtime in milliseconds (Since 2.8)
|
|
|
|
#
|
|
|
|
# @x-checkpoint-delay: the delay time between two COLO checkpoints.
|
|
|
|
# (Since 2.8)
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
|
|
|
# @block-incremental: Affects how much storage is migrated when the
|
2023-04-28 13:54:29 +03:00
|
|
|
# block migration capability is enabled. When false, the entire
|
|
|
|
# storage backing chain is migrated into a flattened image at the
|
|
|
|
# destination; when true, only the active qcow2 layer is migrated
|
|
|
|
# and the destination must already have access to the same backing
|
|
|
|
# chain as was used on the source. (since 2.10)
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
2019-02-06 15:54:06 +03:00
|
|
|
# @multifd-channels: Number of channels used to migrate data in
|
2023-04-28 13:54:29 +03:00
|
|
|
# parallel. This is the same number that the number of sockets
|
|
|
|
# used for migration. The default value is 2 (since 4.0)
|
2016-01-15 10:56:17 +03:00
|
|
|
#
|
2017-10-05 22:30:10 +03:00
|
|
|
# @xbzrle-cache-size: cache size to be used by XBZRLE migration. It
|
2023-04-28 13:54:29 +03:00
|
|
|
# needs to be a multiple of the target page size and a power of 2
|
|
|
|
# (Since 2.11)
|
2018-06-13 13:26:40 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @max-postcopy-bandwidth: Background transfer bandwidth during
|
|
|
|
# postcopy. Defaults to 0 (unlimited). In bytes per second.
|
|
|
|
# (Since 3.0)
|
2018-08-01 16:00:20 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @max-cpu-throttle: maximum cpu throttle percentage. Defaults to 99.
|
|
|
|
# (Since 3.1)
|
2018-08-01 16:00:20 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @multifd-compression: Which compression method to use. Defaults to
|
|
|
|
# none. (Since 5.0)
|
2019-01-16 12:35:55 +03:00
|
|
|
#
|
2020-01-23 19:08:52 +03:00
|
|
|
# @multifd-zlib-level: Set the compression level to be used in live
|
2023-04-28 13:54:29 +03:00
|
|
|
# migration, the compression level is an integer between 0 and 9,
|
|
|
|
# where 0 means no compression, 1 means the best compression
|
|
|
|
# speed, and 9 means best compression ratio which will consume
|
|
|
|
# more CPU. Defaults to 1. (Since 5.0)
|
2020-01-23 19:08:52 +03:00
|
|
|
#
|
2020-01-23 19:41:36 +03:00
|
|
|
# @multifd-zstd-level: Set the compression level to be used in live
|
2023-04-28 13:54:29 +03:00
|
|
|
# migration, the compression level is an integer between 0 and 20,
|
|
|
|
# where 0 means no compression, 1 means the best compression
|
|
|
|
# speed, and 20 means best compression ratio which will consume
|
|
|
|
# more CPU. Defaults to 1. (Since 5.0)
|
2020-01-23 19:41:36 +03:00
|
|
|
#
|
2020-08-20 18:07:23 +03:00
|
|
|
# @block-bitmap-mapping: Maps block nodes and bitmaps on them to
|
2023-04-28 13:54:29 +03:00
|
|
|
# aliases for the purpose of dirty bitmap migration. Such aliases
|
|
|
|
# may for example be the corresponding names on the opposite site.
|
|
|
|
# The mapping must be one-to-one, but not necessarily complete: On
|
|
|
|
# the source, unmapped bitmaps and all bitmaps on unmapped nodes
|
|
|
|
# will be ignored. On the destination, encountering an unmapped
|
|
|
|
# alias in the incoming migration stream will result in a report,
|
|
|
|
# and all further bitmap migration data will then be discarded.
|
|
|
|
# Note that the destination does not know about bitmaps it does
|
|
|
|
# not receive, so there is no limitation or requirement regarding
|
|
|
|
# the number of bitmaps received, or how they are named, or on
|
|
|
|
# which nodes they are placed. By default (when this parameter
|
|
|
|
# has never been set), bitmap names are mapped to themselves.
|
|
|
|
# Nodes are mapped to their block device name if there is one, and
|
|
|
|
# to their node name otherwise. (Since 5.2)
|
2020-08-20 18:07:23 +03:00
|
|
|
#
|
2023-07-28 12:38:07 +03:00
|
|
|
# @x-vcpu-dirty-limit-period: Periodic time (in milliseconds) of dirty
|
|
|
|
# limit during live migration. Should be in the range 1 to 1000ms.
|
|
|
|
# Defaults to 1000ms. (Since 8.1)
|
2023-06-07 16:32:59 +03:00
|
|
|
#
|
2023-06-07 17:58:32 +03:00
|
|
|
# @vcpu-dirty-limit: Dirtyrate limit (MB/s) during live migration.
|
2023-07-28 12:38:07 +03:00
|
|
|
# Defaults to 1. (Since 8.1)
|
2023-06-07 17:58:32 +03:00
|
|
|
#
|
2021-10-28 13:25:13 +03:00
|
|
|
# Features:
|
2023-04-28 13:54:29 +03:00
|
|
|
#
|
2023-06-07 16:32:59 +03:00
|
|
|
# @unstable: Members @x-checkpoint-delay and @x-vcpu-dirty-limit-period
|
2023-07-28 12:38:07 +03:00
|
|
|
# are experimental.
|
2021-10-28 13:25:13 +03:00
|
|
|
#
|
2017-08-24 22:14:01 +03:00
|
|
|
# Since: 2.4
|
|
|
|
##
|
|
|
|
{ 'struct': 'MigrationParameters',
|
2019-02-27 16:24:06 +03:00
|
|
|
'data': { '*announce-initial': 'size',
|
|
|
|
'*announce-max': 'size',
|
|
|
|
'*announce-rounds': 'size',
|
|
|
|
'*announce-step': 'size',
|
|
|
|
'*compress-level': 'uint8',
|
2017-12-01 15:08:38 +03:00
|
|
|
'*compress-threads': 'uint8',
|
2018-08-21 11:10:20 +03:00
|
|
|
'*compress-wait-thread': 'bool',
|
2017-12-01 15:08:38 +03:00
|
|
|
'*decompress-threads': 'uint8',
|
2020-02-24 05:31:42 +03:00
|
|
|
'*throttle-trigger-threshold': 'uint8',
|
2017-12-01 15:08:38 +03:00
|
|
|
'*cpu-throttle-initial': 'uint8',
|
|
|
|
'*cpu-throttle-increment': 'uint8',
|
2020-04-13 13:15:08 +03:00
|
|
|
'*cpu-throttle-tailslow': 'bool',
|
2017-08-24 22:14:01 +03:00
|
|
|
'*tls-creds': 'str',
|
|
|
|
'*tls-hostname': 'str',
|
migration: add support for a "tls-authz" migration parameter
The QEMU instance that runs as the server for the migration data
transport (ie the target QEMU) needs to be able to configure access
control so it can prevent unauthorized clients initiating an incoming
migration. This adds a new 'tls-authz' migration parameter that is used
to provide the QOM ID of a QAuthZ subclass instance that provides the
access control check. This is checked against the x509 certificate
obtained during the TLS handshake.
For example, when starting a QEMU for incoming migration, it is
possible to give an example identity of the source QEMU that is
intended to be connecting later:
$QEMU \
-monitor stdio \
-incoming defer \
...other args...
(qemu) object_add tls-creds-x509,id=tls0,dir=/home/berrange/qemutls,\
endpoint=server,verify-peer=yes \
(qemu) object_add authz-simple,id=auth0,identity=CN=laptop.example.com,,\
O=Example Org,,L=London,,ST=London,,C=GB \
(qemu) migrate_incoming tcp:localhost:9000
Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
2019-02-27 17:53:24 +03:00
|
|
|
'*tls-authz': 'str',
|
2017-12-01 15:08:38 +03:00
|
|
|
'*max-bandwidth': 'size',
|
migration: Allow user to specify available switchover bandwidth
Migration bandwidth is a very important value to live migration. It's
because it's one of the major factors that we'll make decision on when to
switchover to destination in a precopy process.
This value is currently estimated by QEMU during the whole live migration
process by monitoring how fast we were sending the data. This can be the
most accurate bandwidth if in the ideal world, where we're always feeding
unlimited data to the migration channel, and then it'll be limited to the
bandwidth that is available.
However in reality it may be very different, e.g., over a 10Gbps network we
can see query-migrate showing migration bandwidth of only a few tens of
MB/s just because there are plenty of other things the migration thread
might be doing. For example, the migration thread can be busy scanning
zero pages, or it can be fetching dirty bitmap from other external dirty
sources (like vhost or KVM). It means we may not be pushing data as much
as possible to migration channel, so the bandwidth estimated from "how many
data we sent in the channel" can be dramatically inaccurate sometimes.
With that, the decision to switchover will be affected, by assuming that we
may not be able to switchover at all with such a low bandwidth, but in
reality we can.
The migration may not even converge at all with the downtime specified,
with that wrong estimation of bandwidth, keeping iterations forever with a
low estimation of bandwidth.
The issue is QEMU itself may not be able to avoid those uncertainties on
measuing the real "available migration bandwidth". At least not something
I can think of so far.
One way to fix this is when the user is fully aware of the available
bandwidth, then we can allow the user to help providing an accurate value.
For example, if the user has a dedicated channel of 10Gbps for migration
for this specific VM, the user can specify this bandwidth so QEMU can
always do the calculation based on this fact, trusting the user as long as
specified. It may not be the exact bandwidth when switching over (in which
case qemu will push migration data as fast as possible), but much better
than QEMU trying to wildly guess, especially when very wrong.
A new parameter "avail-switchover-bandwidth" is introduced just for this.
So when the user specified this parameter, instead of trusting the
estimated value from QEMU itself (based on the QEMUFile send speed), it
trusts the user more by using this value to decide when to switchover,
assuming that we'll have such bandwidth available then.
Note that specifying this value will not throttle the bandwidth for
switchover yet, so QEMU will always use the full bandwidth possible for
sending switchover data, assuming that should always be the most important
way to use the network at that time.
This can resolve issues like "unconvergence migration" which is caused by
hilarious low "migration bandwidth" detected for whatever reason.
Reported-by: Zhiyi Guo <zhguo@redhat.com>
Reviewed-by: Joao Martins <joao.m.martins@oracle.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Peter Xu <peterx@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
Message-ID: <20231010221922.40638-1-peterx@redhat.com>
2023-10-11 01:19:22 +03:00
|
|
|
'*avail-switchover-bandwidth': 'size',
|
2017-12-01 15:08:38 +03:00
|
|
|
'*downtime-limit': 'uint64',
|
2021-10-28 13:25:13 +03:00
|
|
|
'*x-checkpoint-delay': { 'type': 'uint32',
|
|
|
|
'features': [ 'unstable' ] },
|
2021-02-02 17:17:31 +03:00
|
|
|
'*block-incremental': 'bool',
|
2019-02-06 15:54:06 +03:00
|
|
|
'*multifd-channels': 'uint8',
|
2018-06-13 13:26:40 +03:00
|
|
|
'*xbzrle-cache-size': 'size',
|
2020-02-13 20:56:27 +03:00
|
|
|
'*max-postcopy-bandwidth': 'size',
|
2019-01-16 12:35:55 +03:00
|
|
|
'*max-cpu-throttle': 'uint8',
|
2020-01-23 19:08:52 +03:00
|
|
|
'*multifd-compression': 'MultiFDCompression',
|
2020-01-23 19:41:36 +03:00
|
|
|
'*multifd-zlib-level': 'uint8',
|
2020-08-20 18:07:23 +03:00
|
|
|
'*multifd-zstd-level': 'uint8',
|
2023-06-07 16:32:59 +03:00
|
|
|
'*block-bitmap-mapping': [ 'BitmapMigrationNodeAlias' ],
|
|
|
|
'*x-vcpu-dirty-limit-period': { 'type': 'uint64',
|
2023-06-07 17:58:32 +03:00
|
|
|
'features': [ 'unstable' ] },
|
|
|
|
'*vcpu-dirty-limit': 'uint64'} }
|
2017-08-24 22:14:01 +03:00
|
|
|
|
|
|
|
##
|
|
|
|
# @query-migrate-parameters:
|
|
|
|
#
|
|
|
|
# Returns information about the current migration parameters
|
|
|
|
#
|
|
|
|
# Returns: @MigrationParameters
|
|
|
|
#
|
|
|
|
# Since: 2.4
|
|
|
|
#
|
|
|
|
# Example:
|
|
|
|
#
|
|
|
|
# -> { "execute": "query-migrate-parameters" }
|
|
|
|
# <- { "return": {
|
|
|
|
# "decompress-threads": 2,
|
|
|
|
# "cpu-throttle-increment": 10,
|
|
|
|
# "compress-threads": 8,
|
|
|
|
# "compress-level": 1,
|
|
|
|
# "cpu-throttle-initial": 20,
|
|
|
|
# "max-bandwidth": 33554432,
|
|
|
|
# "downtime-limit": 300
|
|
|
|
# }
|
|
|
|
# }
|
|
|
|
##
|
|
|
|
{ 'command': 'query-migrate-parameters',
|
|
|
|
'returns': 'MigrationParameters' }
|
|
|
|
|
|
|
|
##
|
|
|
|
# @migrate-start-postcopy:
|
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# Followup to a migration command to switch the migration to postcopy
|
|
|
|
# mode. The postcopy-ram capability must be set on both source and
|
|
|
|
# destination before the original migration command.
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
|
|
|
# Since: 2.5
|
|
|
|
#
|
|
|
|
# Example:
|
|
|
|
#
|
|
|
|
# -> { "execute": "migrate-start-postcopy" }
|
|
|
|
# <- { "return": {} }
|
|
|
|
##
|
|
|
|
{ 'command': 'migrate-start-postcopy' }
|
|
|
|
|
|
|
|
##
|
|
|
|
# @MIGRATION:
|
|
|
|
#
|
|
|
|
# Emitted when a migration event happens
|
|
|
|
#
|
|
|
|
# @status: @MigrationStatus describing the current migration status.
|
|
|
|
#
|
|
|
|
# Since: 2.4
|
|
|
|
#
|
|
|
|
# Example:
|
|
|
|
#
|
|
|
|
# <- {"timestamp": {"seconds": 1432121972, "microseconds": 744001},
|
|
|
|
# "event": "MIGRATION",
|
|
|
|
# "data": {"status": "completed"} }
|
|
|
|
##
|
|
|
|
{ 'event': 'MIGRATION',
|
|
|
|
'data': {'status': 'MigrationStatus'}}
|
|
|
|
|
|
|
|
##
|
|
|
|
# @MIGRATION_PASS:
|
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# Emitted from the source side of a migration at the start of each
|
|
|
|
# pass (when it syncs the dirty bitmap)
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
|
|
|
# @pass: An incrementing count (starting at 1 on the first pass)
|
|
|
|
#
|
|
|
|
# Since: 2.6
|
|
|
|
#
|
|
|
|
# Example:
|
|
|
|
#
|
2023-04-25 09:42:14 +03:00
|
|
|
# <- { "timestamp": {"seconds": 1449669631, "microseconds": 239225},
|
|
|
|
# "event": "MIGRATION_PASS", "data": {"pass": 2} }
|
2017-08-24 22:14:01 +03:00
|
|
|
##
|
|
|
|
{ 'event': 'MIGRATION_PASS',
|
|
|
|
'data': { 'pass': 'int' } }
|
|
|
|
|
|
|
|
##
|
|
|
|
# @COLOMessage:
|
|
|
|
#
|
|
|
|
# The message transmission between Primary side and Secondary side.
|
|
|
|
#
|
|
|
|
# @checkpoint-ready: Secondary VM (SVM) is ready for checkpointing
|
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @checkpoint-request: Primary VM (PVM) tells SVM to prepare for
|
|
|
|
# checkpointing
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
|
|
|
# @checkpoint-reply: SVM gets PVM's checkpoint request
|
|
|
|
#
|
|
|
|
# @vmstate-send: VM's state will be sent by PVM.
|
|
|
|
#
|
|
|
|
# @vmstate-size: The total size of VMstate.
|
|
|
|
#
|
|
|
|
# @vmstate-received: VM's state has been received by SVM.
|
|
|
|
#
|
|
|
|
# @vmstate-loaded: VM's state has been loaded by SVM.
|
|
|
|
#
|
|
|
|
# Since: 2.8
|
|
|
|
##
|
|
|
|
{ 'enum': 'COLOMessage',
|
|
|
|
'data': [ 'checkpoint-ready', 'checkpoint-request', 'checkpoint-reply',
|
|
|
|
'vmstate-send', 'vmstate-size', 'vmstate-received',
|
|
|
|
'vmstate-loaded' ] }
|
|
|
|
|
|
|
|
##
|
|
|
|
# @COLOMode:
|
|
|
|
#
|
2018-09-03 07:38:52 +03:00
|
|
|
# The COLO current mode.
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
2018-09-03 07:38:52 +03:00
|
|
|
# @none: COLO is disabled.
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
2018-09-03 07:38:52 +03:00
|
|
|
# @primary: COLO node in primary side.
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
2018-09-03 07:38:52 +03:00
|
|
|
# @secondary: COLO node in slave side.
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
|
|
|
# Since: 2.8
|
|
|
|
##
|
|
|
|
{ 'enum': 'COLOMode',
|
2018-09-03 07:38:52 +03:00
|
|
|
'data': [ 'none', 'primary', 'secondary'] }
|
2017-08-24 22:14:01 +03:00
|
|
|
|
|
|
|
##
|
|
|
|
# @FailoverStatus:
|
|
|
|
#
|
|
|
|
# An enumeration of COLO failover status
|
|
|
|
#
|
|
|
|
# @none: no failover has ever happened
|
|
|
|
#
|
|
|
|
# @require: got failover requirement but not handled
|
|
|
|
#
|
|
|
|
# @active: in the process of doing failover
|
|
|
|
#
|
|
|
|
# @completed: finish the process of failover
|
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @relaunch: restart the failover process, from 'none' -> 'completed'
|
|
|
|
# (Since 2.9)
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
|
|
|
# Since: 2.8
|
|
|
|
##
|
|
|
|
{ 'enum': 'FailoverStatus',
|
|
|
|
'data': [ 'none', 'require', 'active', 'completed', 'relaunch' ] }
|
|
|
|
|
2018-09-03 07:38:51 +03:00
|
|
|
##
|
|
|
|
# @COLO_EXIT:
|
|
|
|
#
|
|
|
|
# Emitted when VM finishes COLO mode due to some errors happening or
|
|
|
|
# at the request of users.
|
|
|
|
#
|
|
|
|
# @mode: report COLO mode when COLO exited.
|
|
|
|
#
|
|
|
|
# @reason: describes the reason for the COLO exit.
|
|
|
|
#
|
|
|
|
# Since: 3.1
|
|
|
|
#
|
|
|
|
# Example:
|
|
|
|
#
|
|
|
|
# <- { "timestamp": {"seconds": 2032141960, "microseconds": 417172},
|
|
|
|
# "event": "COLO_EXIT", "data": {"mode": "primary", "reason": "request" } }
|
|
|
|
##
|
|
|
|
{ 'event': 'COLO_EXIT',
|
|
|
|
'data': {'mode': 'COLOMode', 'reason': 'COLOExitReason' } }
|
|
|
|
|
|
|
|
##
|
|
|
|
# @COLOExitReason:
|
|
|
|
#
|
2019-03-22 13:13:31 +03:00
|
|
|
# The reason for a COLO exit.
|
2018-09-03 07:38:51 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @none: failover has never happened. This state does not occur in
|
|
|
|
# the COLO_EXIT event, and is only visible in the result of
|
|
|
|
# query-colo-status.
|
2018-09-03 07:38:51 +03:00
|
|
|
#
|
2019-03-22 13:13:31 +03:00
|
|
|
# @request: COLO exit is due to an external request.
|
2018-09-03 07:38:51 +03:00
|
|
|
#
|
2019-03-22 13:13:31 +03:00
|
|
|
# @error: COLO exit is due to an internal error.
|
|
|
|
#
|
|
|
|
# @processing: COLO is currently handling a failover (since 4.0).
|
2018-09-03 07:38:51 +03:00
|
|
|
#
|
|
|
|
# Since: 3.1
|
|
|
|
##
|
|
|
|
{ 'enum': 'COLOExitReason',
|
2019-03-22 13:13:31 +03:00
|
|
|
'data': [ 'none', 'request', 'error' , 'processing' ] }
|
2018-09-03 07:38:51 +03:00
|
|
|
|
2017-08-24 22:14:01 +03:00
|
|
|
##
|
|
|
|
# @x-colo-lost-heartbeat:
|
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# Tell qemu that heartbeat is lost, request it to do takeover
|
|
|
|
# procedures. If this command is sent to the PVM, the Primary side
|
|
|
|
# will exit COLO mode. If sent to the Secondary, the Secondary side
|
|
|
|
# will run failover work, then takes over server operation to become
|
|
|
|
# the service VM.
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
2021-10-28 13:25:13 +03:00
|
|
|
# Features:
|
2023-04-28 13:54:29 +03:00
|
|
|
#
|
2021-10-28 13:25:13 +03:00
|
|
|
# @unstable: This command is experimental.
|
|
|
|
#
|
2017-08-24 22:14:01 +03:00
|
|
|
# Since: 2.8
|
|
|
|
#
|
|
|
|
# Example:
|
|
|
|
#
|
|
|
|
# -> { "execute": "x-colo-lost-heartbeat" }
|
|
|
|
# <- { "return": {} }
|
|
|
|
##
|
2021-10-28 13:25:13 +03:00
|
|
|
{ 'command': 'x-colo-lost-heartbeat',
|
2023-04-28 22:49:21 +03:00
|
|
|
'features': [ 'unstable' ],
|
|
|
|
'if': 'CONFIG_REPLICATION' }
|
2017-08-24 22:14:01 +03:00
|
|
|
|
|
|
|
##
|
|
|
|
# @migrate_cancel:
|
|
|
|
#
|
|
|
|
# Cancel the current executing migration process.
|
|
|
|
#
|
|
|
|
# Returns: nothing on success
|
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# Notes: This command succeeds even if there is no migration process
|
|
|
|
# running.
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
2020-11-18 09:41:58 +03:00
|
|
|
# Since: 0.14
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
|
|
|
# Example:
|
|
|
|
#
|
|
|
|
# -> { "execute": "migrate_cancel" }
|
|
|
|
# <- { "return": {} }
|
|
|
|
##
|
|
|
|
{ 'command': 'migrate_cancel' }
|
|
|
|
|
2017-10-20 12:05:53 +03:00
|
|
|
##
|
|
|
|
# @migrate-continue:
|
|
|
|
#
|
|
|
|
# Continue migration when it's in a paused state.
|
|
|
|
#
|
|
|
|
# @state: The state the migration is currently expected to be in
|
|
|
|
#
|
|
|
|
# Returns: nothing on success
|
2022-05-03 10:37:32 +03:00
|
|
|
#
|
2017-10-20 12:05:53 +03:00
|
|
|
# Since: 2.11
|
2022-05-03 10:37:32 +03:00
|
|
|
#
|
2017-10-20 12:05:53 +03:00
|
|
|
# Example:
|
|
|
|
#
|
|
|
|
# -> { "execute": "migrate-continue" , "arguments":
|
|
|
|
# { "state": "pre-switchover" } }
|
|
|
|
# <- { "return": {} }
|
|
|
|
##
|
|
|
|
{ 'command': 'migrate-continue', 'data': {'state': 'MigrationStatus'} }
|
|
|
|
|
2017-08-24 22:14:01 +03:00
|
|
|
##
|
|
|
|
# @migrate:
|
|
|
|
#
|
|
|
|
# Migrates the current running guest to another Virtual Machine.
|
|
|
|
#
|
|
|
|
# @uri: the Uniform Resource Identifier of the destination VM
|
|
|
|
#
|
|
|
|
# @blk: do block migration (full disk copy)
|
|
|
|
#
|
|
|
|
# @inc: incremental disk copy migration
|
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @detach: this argument exists only for compatibility reasons and is
|
|
|
|
# ignored by QEMU
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
2018-05-22 13:39:56 +03:00
|
|
|
# @resume: resume one paused migration, default "off". (since 3.0)
|
2018-05-02 13:47:23 +03:00
|
|
|
#
|
2017-08-24 22:14:01 +03:00
|
|
|
# Returns: nothing on success
|
|
|
|
#
|
2020-11-18 09:41:58 +03:00
|
|
|
# Since: 0.14
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
|
|
|
# Notes:
|
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# 1. The 'query-migrate' command should be used to check migration's
|
|
|
|
# progress and final result (this information is provided by the
|
|
|
|
# 'status' member)
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
|
|
|
# 2. All boolean arguments default to false
|
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# 3. The user Monitor's "detach" argument is invalid in QMP and should
|
|
|
|
# not be used
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
|
|
|
# Example:
|
|
|
|
#
|
|
|
|
# -> { "execute": "migrate", "arguments": { "uri": "tcp:0:4446" } }
|
|
|
|
# <- { "return": {} }
|
|
|
|
##
|
|
|
|
{ 'command': 'migrate',
|
2018-05-02 13:47:23 +03:00
|
|
|
'data': {'uri': 'str', '*blk': 'bool', '*inc': 'bool',
|
|
|
|
'*detach': 'bool', '*resume': 'bool' } }
|
2017-08-24 22:14:01 +03:00
|
|
|
|
|
|
|
##
|
|
|
|
# @migrate-incoming:
|
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# Start an incoming migration, the qemu must have been started with
|
|
|
|
# -incoming defer
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
|
|
|
# @uri: The Uniform Resource Identifier identifying the source or
|
2023-04-28 13:54:29 +03:00
|
|
|
# address to listen on
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
|
|
|
# Returns: nothing on success
|
|
|
|
#
|
|
|
|
# Since: 2.3
|
|
|
|
#
|
|
|
|
# Notes:
|
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# 1. It's a bad idea to use a string for the uri, but it needs
|
|
|
|
# to stay compatible with -incoming and the format of the uri
|
|
|
|
# is already exposed above libvirt.
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# 2. QEMU must be started with -incoming defer to allow
|
|
|
|
# migrate-incoming to be used.
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
|
|
|
# 3. The uri format is the same as for -incoming
|
|
|
|
#
|
|
|
|
# Example:
|
|
|
|
#
|
|
|
|
# -> { "execute": "migrate-incoming",
|
|
|
|
# "arguments": { "uri": "tcp::4446" } }
|
|
|
|
# <- { "return": {} }
|
|
|
|
##
|
|
|
|
{ 'command': 'migrate-incoming', 'data': {'uri': 'str' } }
|
|
|
|
|
|
|
|
##
|
|
|
|
# @xen-save-devices-state:
|
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# Save the state of all devices to file. The RAM and the block
|
|
|
|
# devices of the VM are not saved by this command.
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
|
|
|
# @filename: the file to save the state of the devices to as binary
|
2023-04-28 13:54:29 +03:00
|
|
|
# data. See xen-save-devices-state.txt for a description of the
|
|
|
|
# binary format.
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @live: Optional argument to ask QEMU to treat this command as part
|
|
|
|
# of a live migration. Default to true. (since 2.11)
|
2017-11-16 18:14:19 +03:00
|
|
|
#
|
2017-08-24 22:14:01 +03:00
|
|
|
# Returns: Nothing on success
|
|
|
|
#
|
|
|
|
# Since: 1.1
|
|
|
|
#
|
|
|
|
# Example:
|
|
|
|
#
|
|
|
|
# -> { "execute": "xen-save-devices-state",
|
|
|
|
# "arguments": { "filename": "/tmp/save" } }
|
|
|
|
# <- { "return": {} }
|
|
|
|
##
|
2017-11-16 18:14:19 +03:00
|
|
|
{ 'command': 'xen-save-devices-state',
|
|
|
|
'data': {'filename': 'str', '*live':'bool' } }
|
2017-08-24 22:14:01 +03:00
|
|
|
|
2020-10-12 15:15:36 +03:00
|
|
|
##
|
|
|
|
# @xen-set-global-dirty-log:
|
|
|
|
#
|
|
|
|
# Enable or disable the global dirty log mode.
|
|
|
|
#
|
|
|
|
# @enable: true to enable, false to disable.
|
|
|
|
#
|
|
|
|
# Returns: nothing
|
|
|
|
#
|
|
|
|
# Since: 1.3
|
|
|
|
#
|
|
|
|
# Example:
|
|
|
|
#
|
|
|
|
# -> { "execute": "xen-set-global-dirty-log",
|
|
|
|
# "arguments": { "enable": true } }
|
|
|
|
# <- { "return": {} }
|
|
|
|
##
|
|
|
|
{ 'command': 'xen-set-global-dirty-log', 'data': { 'enable': 'bool' } }
|
|
|
|
|
|
|
|
##
|
|
|
|
# @xen-load-devices-state:
|
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# Load the state of all devices from file. The RAM and the block
|
|
|
|
# devices of the VM are not loaded by this command.
|
2020-10-12 15:15:36 +03:00
|
|
|
#
|
|
|
|
# @filename: the file to load the state of the devices from as binary
|
2023-04-28 13:54:29 +03:00
|
|
|
# data. See xen-save-devices-state.txt for a description of the
|
|
|
|
# binary format.
|
2020-10-12 15:15:36 +03:00
|
|
|
#
|
|
|
|
# Since: 2.7
|
|
|
|
#
|
|
|
|
# Example:
|
|
|
|
#
|
|
|
|
# -> { "execute": "xen-load-devices-state",
|
|
|
|
# "arguments": { "filename": "/tmp/resume" } }
|
|
|
|
# <- { "return": {} }
|
|
|
|
##
|
|
|
|
{ 'command': 'xen-load-devices-state', 'data': {'filename': 'str'} }
|
|
|
|
|
2017-08-24 22:14:01 +03:00
|
|
|
##
|
|
|
|
# @xen-set-replication:
|
|
|
|
#
|
|
|
|
# Enable or disable replication.
|
|
|
|
#
|
|
|
|
# @enable: true to enable, false to disable.
|
|
|
|
#
|
|
|
|
# @primary: true for primary or false for secondary.
|
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @failover: true to do failover, false to stop. but cannot be
|
|
|
|
# specified if 'enable' is true. default value is false.
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
|
|
|
# Returns: nothing.
|
|
|
|
#
|
|
|
|
# Example:
|
|
|
|
#
|
|
|
|
# -> { "execute": "xen-set-replication",
|
|
|
|
# "arguments": {"enable": true, "primary": false} }
|
|
|
|
# <- { "return": {} }
|
|
|
|
#
|
|
|
|
# Since: 2.9
|
|
|
|
##
|
|
|
|
{ 'command': 'xen-set-replication',
|
2023-06-12 22:16:04 +03:00
|
|
|
'data': { 'enable': 'bool', 'primary': 'bool', '*failover': 'bool' },
|
2021-08-04 11:31:05 +03:00
|
|
|
'if': 'CONFIG_REPLICATION' }
|
2017-08-24 22:14:01 +03:00
|
|
|
|
|
|
|
##
|
|
|
|
# @ReplicationStatus:
|
|
|
|
#
|
|
|
|
# The result format for 'query-xen-replication-status'.
|
|
|
|
#
|
|
|
|
# @error: true if an error happened, false if replication is normal.
|
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @desc: the human readable error description string, when @error is
|
|
|
|
# 'true'.
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
|
|
|
# Since: 2.9
|
|
|
|
##
|
|
|
|
{ 'struct': 'ReplicationStatus',
|
2018-12-13 15:37:24 +03:00
|
|
|
'data': { 'error': 'bool', '*desc': 'str' },
|
2021-08-04 11:31:05 +03:00
|
|
|
'if': 'CONFIG_REPLICATION' }
|
2017-08-24 22:14:01 +03:00
|
|
|
|
|
|
|
##
|
|
|
|
# @query-xen-replication-status:
|
|
|
|
#
|
|
|
|
# Query replication status while the vm is running.
|
|
|
|
#
|
2022-04-20 18:34:07 +03:00
|
|
|
# Returns: A @ReplicationStatus object showing the status.
|
2017-08-24 22:14:01 +03:00
|
|
|
#
|
|
|
|
# Example:
|
|
|
|
#
|
|
|
|
# -> { "execute": "query-xen-replication-status" }
|
|
|
|
# <- { "return": { "error": false } }
|
|
|
|
#
|
|
|
|
# Since: 2.9
|
|
|
|
##
|
|
|
|
{ 'command': 'query-xen-replication-status',
|
2018-12-13 15:37:24 +03:00
|
|
|
'returns': 'ReplicationStatus',
|
2021-08-04 11:31:05 +03:00
|
|
|
'if': 'CONFIG_REPLICATION' }
|
2017-08-24 22:14:01 +03:00
|
|
|
|
|
|
|
##
|
|
|
|
# @xen-colo-do-checkpoint:
|
|
|
|
#
|
|
|
|
# Xen uses this command to notify replication to trigger a checkpoint.
|
|
|
|
#
|
|
|
|
# Returns: nothing.
|
|
|
|
#
|
|
|
|
# Example:
|
|
|
|
#
|
|
|
|
# -> { "execute": "xen-colo-do-checkpoint" }
|
|
|
|
# <- { "return": {} }
|
|
|
|
#
|
|
|
|
# Since: 2.9
|
|
|
|
##
|
2018-12-13 15:37:24 +03:00
|
|
|
{ 'command': 'xen-colo-do-checkpoint',
|
2021-08-04 11:31:05 +03:00
|
|
|
'if': 'CONFIG_REPLICATION' }
|
2018-05-02 13:47:36 +03:00
|
|
|
|
2018-09-03 07:38:53 +03:00
|
|
|
##
|
|
|
|
# @COLOStatus:
|
|
|
|
#
|
|
|
|
# The result format for 'query-colo-status'.
|
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @mode: COLO running mode. If COLO is running, this field will
|
|
|
|
# return 'primary' or 'secondary'.
|
2018-09-03 07:38:53 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @last-mode: COLO last running mode. If COLO is running, this field
|
|
|
|
# will return same like mode field, after failover we can use this
|
|
|
|
# field to get last colo mode. (since 4.0)
|
2019-03-22 13:13:33 +03:00
|
|
|
#
|
2018-09-03 07:38:53 +03:00
|
|
|
# @reason: describes the reason for the COLO exit.
|
|
|
|
#
|
2018-10-22 19:41:18 +03:00
|
|
|
# Since: 3.1
|
2018-09-03 07:38:53 +03:00
|
|
|
##
|
|
|
|
{ 'struct': 'COLOStatus',
|
2019-04-02 11:55:21 +03:00
|
|
|
'data': { 'mode': 'COLOMode', 'last-mode': 'COLOMode',
|
2023-04-28 22:49:21 +03:00
|
|
|
'reason': 'COLOExitReason' },
|
|
|
|
'if': 'CONFIG_REPLICATION' }
|
2018-09-03 07:38:53 +03:00
|
|
|
|
|
|
|
##
|
|
|
|
# @query-colo-status:
|
|
|
|
#
|
|
|
|
# Query COLO status while the vm is running.
|
|
|
|
#
|
|
|
|
# Returns: A @COLOStatus object showing the status.
|
|
|
|
#
|
|
|
|
# Example:
|
|
|
|
#
|
|
|
|
# -> { "execute": "query-colo-status" }
|
2022-03-31 22:06:30 +03:00
|
|
|
# <- { "return": { "mode": "primary", "last-mode": "none", "reason": "request" } }
|
2018-09-03 07:38:53 +03:00
|
|
|
#
|
2018-10-22 19:41:18 +03:00
|
|
|
# Since: 3.1
|
2018-09-03 07:38:53 +03:00
|
|
|
##
|
|
|
|
{ 'command': 'query-colo-status',
|
2023-04-28 22:49:21 +03:00
|
|
|
'returns': 'COLOStatus',
|
|
|
|
'if': 'CONFIG_REPLICATION' }
|
2018-09-03 07:38:53 +03:00
|
|
|
|
2018-05-02 13:47:36 +03:00
|
|
|
##
|
|
|
|
# @migrate-recover:
|
|
|
|
#
|
|
|
|
# Provide a recovery migration stream URI.
|
|
|
|
#
|
|
|
|
# @uri: the URI to be used for the recovery of migration stream.
|
|
|
|
#
|
|
|
|
# Returns: nothing.
|
|
|
|
#
|
|
|
|
# Example:
|
|
|
|
#
|
|
|
|
# -> { "execute": "migrate-recover",
|
|
|
|
# "arguments": { "uri": "tcp:192.168.1.200:12345" } }
|
|
|
|
# <- { "return": {} }
|
|
|
|
#
|
2018-05-22 13:39:56 +03:00
|
|
|
# Since: 3.0
|
2018-05-02 13:47:36 +03:00
|
|
|
##
|
2018-12-08 14:16:04 +03:00
|
|
|
{ 'command': 'migrate-recover',
|
|
|
|
'data': { 'uri': 'str' },
|
2018-05-02 13:47:36 +03:00
|
|
|
'allow-oob': true }
|
2018-05-02 13:47:39 +03:00
|
|
|
|
|
|
|
##
|
|
|
|
# @migrate-pause:
|
|
|
|
#
|
|
|
|
# Pause a migration. Currently it only supports postcopy.
|
|
|
|
#
|
|
|
|
# Returns: nothing.
|
|
|
|
#
|
|
|
|
# Example:
|
|
|
|
#
|
|
|
|
# -> { "execute": "migrate-pause" }
|
|
|
|
# <- { "return": {} }
|
|
|
|
#
|
2018-05-22 13:39:56 +03:00
|
|
|
# Since: 3.0
|
2018-05-02 13:47:39 +03:00
|
|
|
##
|
|
|
|
{ 'command': 'migrate-pause', 'allow-oob': true }
|
2019-10-29 14:48:59 +03:00
|
|
|
|
|
|
|
##
|
|
|
|
# @UNPLUG_PRIMARY:
|
|
|
|
#
|
|
|
|
# Emitted from source side of a migration when migration state is
|
2023-04-28 13:54:29 +03:00
|
|
|
# WAIT_UNPLUG. Device was unplugged by guest operating system. Device
|
|
|
|
# resources in QEMU are kept on standby to be able to re-plug it in
|
|
|
|
# case of migration failure.
|
2019-10-29 14:48:59 +03:00
|
|
|
#
|
|
|
|
# @device-id: QEMU device id of the unplugged device
|
|
|
|
#
|
|
|
|
# Since: 4.2
|
|
|
|
#
|
|
|
|
# Example:
|
2022-05-03 10:37:32 +03:00
|
|
|
#
|
2022-03-28 17:05:57 +03:00
|
|
|
# <- { "event": "UNPLUG_PRIMARY",
|
|
|
|
# "data": { "device-id": "hostdev0" },
|
|
|
|
# "timestamp": { "seconds": 1265044230, "microseconds": 450486 } }
|
2019-10-29 14:48:59 +03:00
|
|
|
##
|
|
|
|
{ 'event': 'UNPLUG_PRIMARY',
|
|
|
|
'data': { 'device-id': 'str' } }
|
2020-09-16 09:21:57 +03:00
|
|
|
|
2021-06-29 19:01:20 +03:00
|
|
|
##
|
|
|
|
# @DirtyRateVcpu:
|
|
|
|
#
|
|
|
|
# Dirty rate of vcpu.
|
|
|
|
#
|
|
|
|
# @id: vcpu index.
|
|
|
|
#
|
|
|
|
# @dirty-rate: dirty rate.
|
|
|
|
#
|
2021-11-05 16:01:16 +03:00
|
|
|
# Since: 6.2
|
2021-06-29 19:01:20 +03:00
|
|
|
##
|
|
|
|
{ 'struct': 'DirtyRateVcpu',
|
|
|
|
'data': { 'id': 'int', 'dirty-rate': 'int64' } }
|
|
|
|
|
2020-09-16 09:21:57 +03:00
|
|
|
##
|
|
|
|
# @DirtyRateStatus:
|
|
|
|
#
|
2023-05-23 18:19:56 +03:00
|
|
|
# Dirty page rate measurement status.
|
2020-09-16 09:21:57 +03:00
|
|
|
#
|
2023-05-23 18:19:56 +03:00
|
|
|
# @unstarted: measuring thread has not been started yet
|
2020-09-16 09:21:57 +03:00
|
|
|
#
|
2023-05-23 18:19:56 +03:00
|
|
|
# @measuring: measuring thread is running
|
2020-09-16 09:21:57 +03:00
|
|
|
#
|
2023-05-23 18:19:56 +03:00
|
|
|
# @measured: dirty page rate is measured and the results are available
|
2020-09-16 09:21:57 +03:00
|
|
|
#
|
|
|
|
# Since: 5.2
|
|
|
|
##
|
|
|
|
{ 'enum': 'DirtyRateStatus',
|
|
|
|
'data': [ 'unstarted', 'measuring', 'measured'] }
|
2020-09-16 09:22:06 +03:00
|
|
|
|
2021-06-29 19:01:20 +03:00
|
|
|
##
|
|
|
|
# @DirtyRateMeasureMode:
|
|
|
|
#
|
2023-05-23 18:19:56 +03:00
|
|
|
# Method used to measure dirty page rate. Differences between
|
|
|
|
# available methods are explained in @calc-dirty-rate.
|
2021-06-29 19:01:20 +03:00
|
|
|
#
|
2023-05-23 18:19:56 +03:00
|
|
|
# @page-sampling: use page sampling
|
2021-06-29 19:01:20 +03:00
|
|
|
#
|
2023-05-23 18:19:56 +03:00
|
|
|
# @dirty-ring: use dirty ring
|
2021-07-20 18:19:17 +03:00
|
|
|
#
|
2023-05-23 18:19:56 +03:00
|
|
|
# @dirty-bitmap: use dirty bitmap
|
2021-06-29 19:01:20 +03:00
|
|
|
#
|
2021-11-05 16:01:16 +03:00
|
|
|
# Since: 6.2
|
2021-06-29 19:01:20 +03:00
|
|
|
##
|
|
|
|
{ 'enum': 'DirtyRateMeasureMode',
|
2021-07-20 18:19:17 +03:00
|
|
|
'data': ['page-sampling', 'dirty-ring', 'dirty-bitmap'] }
|
2021-06-29 19:01:20 +03:00
|
|
|
|
2023-09-05 10:05:43 +03:00
|
|
|
##
|
|
|
|
# @TimeUnit:
|
|
|
|
#
|
|
|
|
# Specifies unit in which time-related value is specified.
|
|
|
|
#
|
|
|
|
# @second: value is in seconds
|
|
|
|
#
|
|
|
|
# @millisecond: value is in milliseconds
|
|
|
|
#
|
|
|
|
# Since 8.2
|
|
|
|
#
|
|
|
|
##
|
|
|
|
{ 'enum': 'TimeUnit',
|
|
|
|
'data': ['second', 'millisecond'] }
|
|
|
|
|
2020-09-16 09:22:06 +03:00
|
|
|
##
|
|
|
|
# @DirtyRateInfo:
|
|
|
|
#
|
2023-05-23 18:19:56 +03:00
|
|
|
# Information about measured dirty page rate.
|
2020-09-16 09:22:06 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# @dirty-rate: an estimate of the dirty page rate of the VM in units
|
2023-05-23 18:19:56 +03:00
|
|
|
# of MiB/s. Value is present only when @status is 'measured'.
|
2020-09-16 09:22:06 +03:00
|
|
|
#
|
2023-05-23 18:19:56 +03:00
|
|
|
# @status: current status of dirty page rate measurements
|
2020-09-16 09:22:06 +03:00
|
|
|
#
|
|
|
|
# @start-time: start time in units of second for calculation
|
|
|
|
#
|
2023-09-05 10:05:43 +03:00
|
|
|
# @calc-time: time period for which dirty page rate was measured,
|
|
|
|
# expressed and rounded down to @calc-time-unit.
|
|
|
|
#
|
|
|
|
# @calc-time-unit: time unit of @calc-time (Since 8.2)
|
2020-09-16 09:22:06 +03:00
|
|
|
#
|
2023-05-23 18:19:56 +03:00
|
|
|
# @sample-pages: number of sampled pages per GiB of guest memory.
|
|
|
|
# Valid only in page-sampling mode (Since 6.1)
|
2021-06-07 04:11:34 +03:00
|
|
|
#
|
2023-05-23 18:19:56 +03:00
|
|
|
# @mode: mode that was used to measure dirty page rate (Since 6.2)
|
2021-06-29 19:01:23 +03:00
|
|
|
#
|
2023-05-23 18:19:56 +03:00
|
|
|
# @vcpu-dirty-rate: dirty rate for each vCPU if dirty-ring mode was
|
2023-04-28 13:54:29 +03:00
|
|
|
# specified (Since 6.2)
|
2021-06-29 19:01:23 +03:00
|
|
|
#
|
2020-09-16 09:22:06 +03:00
|
|
|
# Since: 5.2
|
|
|
|
##
|
|
|
|
{ 'struct': 'DirtyRateInfo',
|
migration/dirtyrate: present dirty rate only when querying the rate has completed
Make dirty_rate field optional, present dirty rate only when querying
the rate has completed.
The qmp results is shown as follow:
@unstarted:
{"return":{"status":"unstarted","start-time":0,"calc-time":0},"id":"libvirt-12"}
@measuring:
{"return":{"status":"measuring","start-time":102931,"calc-time":1},"id":"libvirt-85"}
@measured:
{"return":{"status":"measured","dirty-rate":4,"start-time":150146,"calc-time":1},"id":"libvirt-15"}
Signed-off-by: Chuan Zheng <zhengchuan@huawei.com>
Reviewed-by: David Edmondson <david.edmondson@oracle.com>
Message-Id: <1601350938-128320-3-git-send-email-zhengchuan@huawei.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
2020-09-29 06:42:18 +03:00
|
|
|
'data': {'*dirty-rate': 'int64',
|
2020-09-16 09:22:06 +03:00
|
|
|
'status': 'DirtyRateStatus',
|
|
|
|
'start-time': 'int64',
|
2021-06-07 04:11:34 +03:00
|
|
|
'calc-time': 'int64',
|
2023-09-05 10:05:43 +03:00
|
|
|
'calc-time-unit': 'TimeUnit',
|
2021-06-29 19:01:23 +03:00
|
|
|
'sample-pages': 'uint64',
|
|
|
|
'mode': 'DirtyRateMeasureMode',
|
|
|
|
'*vcpu-dirty-rate': [ 'DirtyRateVcpu' ] } }
|
2020-09-16 09:22:06 +03:00
|
|
|
|
|
|
|
##
|
|
|
|
# @calc-dirty-rate:
|
|
|
|
#
|
2023-05-23 18:19:56 +03:00
|
|
|
# Start measuring dirty page rate of the VM. Results can be retrieved
|
|
|
|
# with @query-dirty-rate after measurements are completed.
|
|
|
|
#
|
|
|
|
# Dirty page rate is the number of pages changed in a given time
|
|
|
|
# period expressed in MiB/s. The following methods of calculation are
|
|
|
|
# available:
|
|
|
|
#
|
|
|
|
# 1. In page sampling mode, a random subset of pages are selected and
|
|
|
|
# hashed twice: once at the beginning of measurement time period,
|
|
|
|
# and once again at the end. If two hashes for some page are
|
|
|
|
# different, the page is counted as changed. Since this method
|
|
|
|
# relies on sampling and hashing, calculated dirty page rate is
|
|
|
|
# only an estimate of its true value. Increasing @sample-pages
|
|
|
|
# improves estimation quality at the cost of higher computational
|
|
|
|
# overhead.
|
|
|
|
#
|
|
|
|
# 2. Dirty bitmap mode captures writes to memory (for example by
|
|
|
|
# temporarily revoking write access to all pages) and counting page
|
|
|
|
# faults. Information about modified pages is collected into a
|
|
|
|
# bitmap, where each bit corresponds to one guest page. This mode
|
|
|
|
# requires that KVM accelerator property "dirty-ring-size" is *not*
|
|
|
|
# set.
|
|
|
|
#
|
|
|
|
# 3. Dirty ring mode is similar to dirty bitmap mode, but the
|
|
|
|
# information about modified pages is collected into ring buffer.
|
|
|
|
# This mode tracks page modification per each vCPU separately. It
|
|
|
|
# requires that KVM accelerator property "dirty-ring-size" is set.
|
|
|
|
#
|
2023-09-05 10:05:43 +03:00
|
|
|
# @calc-time: time period for which dirty page rate is calculated.
|
|
|
|
# By default it is specified in seconds, but the unit can be set
|
|
|
|
# explicitly with @calc-time-unit. Note that larger @calc-time
|
|
|
|
# values will typically result in smaller dirty page rates because
|
|
|
|
# page dirtying is a one-time event. Once some page is counted
|
|
|
|
# as dirty during @calc-time period, further writes to this page
|
|
|
|
# will not increase dirty page rate anymore.
|
|
|
|
#
|
|
|
|
# @calc-time-unit: time unit in which @calc-time is specified.
|
|
|
|
# By default it is seconds. (Since 8.2)
|
2023-05-23 18:19:56 +03:00
|
|
|
#
|
|
|
|
# @sample-pages: number of sampled pages per each GiB of guest memory.
|
|
|
|
# Default value is 512. For 4KiB guest pages this corresponds to
|
|
|
|
# sampling ratio of 0.2%. This argument is used only in page
|
|
|
|
# sampling mode. (Since 6.1)
|
|
|
|
#
|
|
|
|
# @mode: mechanism for tracking dirty pages. Default value is
|
|
|
|
# 'page-sampling'. Others are 'dirty-bitmap' and 'dirty-ring'.
|
|
|
|
# (Since 6.1)
|
2021-06-29 19:01:23 +03:00
|
|
|
#
|
2020-09-16 09:22:06 +03:00
|
|
|
# Since: 5.2
|
|
|
|
#
|
|
|
|
# Example:
|
2022-05-03 10:37:32 +03:00
|
|
|
#
|
2023-04-25 09:42:14 +03:00
|
|
|
# -> {"execute": "calc-dirty-rate", "arguments": {"calc-time": 1,
|
|
|
|
# 'sample-pages': 512} }
|
|
|
|
# <- { "return": {} }
|
2023-09-05 10:05:43 +03:00
|
|
|
#
|
|
|
|
# Measure dirty rate using dirty bitmap for 500 milliseconds:
|
|
|
|
#
|
|
|
|
# -> {"execute": "calc-dirty-rate", "arguments": {"calc-time": 500,
|
|
|
|
# "calc-time-unit": "millisecond", "mode": "dirty-bitmap"} }
|
|
|
|
#
|
|
|
|
# <- { "return": {} }
|
2020-09-16 09:22:06 +03:00
|
|
|
##
|
2021-06-07 04:11:34 +03:00
|
|
|
{ 'command': 'calc-dirty-rate', 'data': {'calc-time': 'int64',
|
2023-09-05 10:05:43 +03:00
|
|
|
'*calc-time-unit': 'TimeUnit',
|
2021-06-29 19:01:23 +03:00
|
|
|
'*sample-pages': 'int',
|
|
|
|
'*mode': 'DirtyRateMeasureMode'} }
|
2020-09-16 09:22:06 +03:00
|
|
|
|
|
|
|
##
|
|
|
|
# @query-dirty-rate:
|
|
|
|
#
|
2023-05-23 18:19:56 +03:00
|
|
|
# Query results of the most recent invocation of @calc-dirty-rate.
|
2020-09-16 09:22:06 +03:00
|
|
|
#
|
2023-09-05 10:05:43 +03:00
|
|
|
# @calc-time-unit: time unit in which to report calculation time.
|
|
|
|
# By default it is reported in seconds. (Since 8.2)
|
|
|
|
#
|
2020-09-16 09:22:06 +03:00
|
|
|
# Since: 5.2
|
2023-05-23 18:19:56 +03:00
|
|
|
#
|
|
|
|
# Examples:
|
|
|
|
#
|
|
|
|
# 1. Measurement is in progress:
|
|
|
|
#
|
|
|
|
# <- {"status": "measuring", "sample-pages": 512,
|
2023-09-05 12:18:39 +03:00
|
|
|
# "mode": "page-sampling", "start-time": 1693900454, "calc-time": 10,
|
2023-09-05 10:05:43 +03:00
|
|
|
# "calc-time-unit": "second"}
|
2023-05-23 18:19:56 +03:00
|
|
|
#
|
|
|
|
# 2. Measurement has been completed:
|
|
|
|
#
|
|
|
|
# <- {"status": "measured", "sample-pages": 512, "dirty-rate": 108,
|
2023-09-05 12:18:39 +03:00
|
|
|
# "mode": "page-sampling", "start-time": 1693900454, "calc-time": 10,
|
2023-09-05 10:05:43 +03:00
|
|
|
# "calc-time-unit": "second"}
|
2020-09-16 09:22:06 +03:00
|
|
|
##
|
2023-09-05 10:05:43 +03:00
|
|
|
{ 'command': 'query-dirty-rate', 'data': {'*calc-time-unit': 'TimeUnit' },
|
|
|
|
'returns': 'DirtyRateInfo' }
|
migration: introduce snapshot-{save, load, delete} QMP commands
savevm, loadvm and delvm are some of the few HMP commands that have never
been converted to use QMP. The reasons for the lack of conversion are
that they blocked execution of the event thread, and the semantics
around choice of disks were ill-defined.
Despite this downside, however, libvirt and applications using libvirt
have used these commands for as long as QMP has existed, via the
"human-monitor-command" passthrough command. IOW, while it is clearly
desirable to be able to fix the problems, they are not a blocker to
all real world usage.
Meanwhile there is a need for other features which involve adding new
parameters to the commands. This is possible with HMP passthrough, but
it provides no reliable way for apps to introspect features, so using
QAPI modelling is highly desirable.
This patch thus introduces new snapshot-{load,save,delete} commands to
QMP that are intended to replace the old HMP counterparts. The new
commands are given different names, because they will be using the new
QEMU job framework and thus will have diverging behaviour from the HMP
originals. It would thus be misleading to keep the same name.
While this design uses the generic job framework, the current impl is
still blocking. The intention that the blocking problem is fixed later.
None the less applications using these new commands should assume that
they are asynchronous and thus wait for the job status change event to
indicate completion.
In addition to using the job framework, the new commands require the
caller to be explicit about all the block device nodes used in the
snapshot operations, with no built-in default heuristics in use.
Note that the existing "query-named-block-nodes" can be used to query
what snapshots currently exist for block nodes.
Acked-by: Markus Armbruster <armbru@redhat.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
Message-Id: <20210204124834.774401-13-berrange@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
dgilbert: removed tests for now, the output ordering isn't
deterministic
2021-02-04 15:48:34 +03:00
|
|
|
|
2022-06-25 20:38:36 +03:00
|
|
|
##
|
|
|
|
# @DirtyLimitInfo:
|
|
|
|
#
|
|
|
|
# Dirty page rate limit information of a virtual CPU.
|
|
|
|
#
|
|
|
|
# @cpu-index: index of a virtual CPU.
|
|
|
|
#
|
|
|
|
# @limit-rate: upper limit of dirty page rate (MB/s) for a virtual
|
2023-04-28 13:54:29 +03:00
|
|
|
# CPU, 0 means unlimited.
|
2022-06-25 20:38:36 +03:00
|
|
|
#
|
|
|
|
# @current-rate: current dirty page rate (MB/s) for a virtual CPU.
|
|
|
|
#
|
|
|
|
# Since: 7.1
|
|
|
|
##
|
|
|
|
{ 'struct': 'DirtyLimitInfo',
|
|
|
|
'data': { 'cpu-index': 'int',
|
|
|
|
'limit-rate': 'uint64',
|
|
|
|
'current-rate': 'uint64' } }
|
|
|
|
|
|
|
|
##
|
|
|
|
# @set-vcpu-dirty-limit:
|
|
|
|
#
|
|
|
|
# Set the upper limit of dirty page rate for virtual CPUs.
|
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# Requires KVM with accelerator property "dirty-ring-size" set. A
|
|
|
|
# virtual CPU's dirty page rate is a measure of its memory load. To
|
|
|
|
# observe dirty page rates, use @calc-dirty-rate.
|
2022-06-25 20:38:36 +03:00
|
|
|
#
|
|
|
|
# @cpu-index: index of a virtual CPU, default is all.
|
|
|
|
#
|
|
|
|
# @dirty-rate: upper limit of dirty page rate (MB/s) for virtual CPUs.
|
|
|
|
#
|
|
|
|
# Since: 7.1
|
|
|
|
#
|
|
|
|
# Example:
|
2023-04-25 09:42:14 +03:00
|
|
|
#
|
|
|
|
# -> {"execute": "set-vcpu-dirty-limit"}
|
|
|
|
# "arguments": { "dirty-rate": 200,
|
|
|
|
# "cpu-index": 1 } }
|
|
|
|
# <- { "return": {} }
|
2022-06-25 20:38:36 +03:00
|
|
|
##
|
|
|
|
{ 'command': 'set-vcpu-dirty-limit',
|
|
|
|
'data': { '*cpu-index': 'int',
|
|
|
|
'dirty-rate': 'uint64' } }
|
|
|
|
|
|
|
|
##
|
|
|
|
# @cancel-vcpu-dirty-limit:
|
|
|
|
#
|
|
|
|
# Cancel the upper limit of dirty page rate for virtual CPUs.
|
|
|
|
#
|
|
|
|
# Cancel the dirty page limit for the vCPU which has been set with
|
2023-04-28 13:54:29 +03:00
|
|
|
# set-vcpu-dirty-limit command. Note that this command requires
|
2022-06-25 20:38:36 +03:00
|
|
|
# support from dirty ring, same as the "set-vcpu-dirty-limit".
|
|
|
|
#
|
|
|
|
# @cpu-index: index of a virtual CPU, default is all.
|
|
|
|
#
|
|
|
|
# Since: 7.1
|
|
|
|
#
|
|
|
|
# Example:
|
2023-04-25 09:42:14 +03:00
|
|
|
#
|
|
|
|
# -> {"execute": "cancel-vcpu-dirty-limit"},
|
|
|
|
# "arguments": { "cpu-index": 1 } }
|
|
|
|
# <- { "return": {} }
|
2022-06-25 20:38:36 +03:00
|
|
|
##
|
|
|
|
{ 'command': 'cancel-vcpu-dirty-limit',
|
|
|
|
'data': { '*cpu-index': 'int'} }
|
|
|
|
|
|
|
|
##
|
|
|
|
# @query-vcpu-dirty-limit:
|
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# Returns information about virtual CPU dirty page rate limits, if
|
|
|
|
# any.
|
2022-06-25 20:38:36 +03:00
|
|
|
#
|
|
|
|
# Since: 7.1
|
|
|
|
#
|
|
|
|
# Example:
|
2023-04-25 09:42:14 +03:00
|
|
|
#
|
|
|
|
# -> {"execute": "query-vcpu-dirty-limit"}
|
|
|
|
# <- {"return": [
|
|
|
|
# { "limit-rate": 60, "current-rate": 3, "cpu-index": 0},
|
|
|
|
# { "limit-rate": 60, "current-rate": 3, "cpu-index": 1}]}
|
2022-06-25 20:38:36 +03:00
|
|
|
##
|
|
|
|
{ 'command': 'query-vcpu-dirty-limit',
|
|
|
|
'returns': [ 'DirtyLimitInfo' ] }
|
|
|
|
|
2023-02-03 10:35:18 +03:00
|
|
|
##
|
|
|
|
# @MigrationThreadInfo:
|
|
|
|
#
|
|
|
|
# Information about migrationthreads
|
|
|
|
#
|
|
|
|
# @name: the name of migration thread
|
|
|
|
#
|
|
|
|
# @thread-id: ID of the underlying host thread
|
|
|
|
#
|
|
|
|
# Since: 7.2
|
|
|
|
##
|
|
|
|
{ 'struct': 'MigrationThreadInfo',
|
|
|
|
'data': {'name': 'str',
|
|
|
|
'thread-id': 'int'} }
|
|
|
|
|
|
|
|
##
|
|
|
|
# @query-migrationthreads:
|
|
|
|
#
|
|
|
|
# Returns information of migration threads
|
|
|
|
#
|
|
|
|
# data: migration thread name
|
|
|
|
#
|
2023-04-25 09:42:21 +03:00
|
|
|
# Returns: information about migration threads
|
2023-02-03 10:35:18 +03:00
|
|
|
#
|
|
|
|
# Since: 7.2
|
|
|
|
##
|
|
|
|
{ 'command': 'query-migrationthreads',
|
|
|
|
'returns': ['MigrationThreadInfo'] }
|
|
|
|
|
migration: introduce snapshot-{save, load, delete} QMP commands
savevm, loadvm and delvm are some of the few HMP commands that have never
been converted to use QMP. The reasons for the lack of conversion are
that they blocked execution of the event thread, and the semantics
around choice of disks were ill-defined.
Despite this downside, however, libvirt and applications using libvirt
have used these commands for as long as QMP has existed, via the
"human-monitor-command" passthrough command. IOW, while it is clearly
desirable to be able to fix the problems, they are not a blocker to
all real world usage.
Meanwhile there is a need for other features which involve adding new
parameters to the commands. This is possible with HMP passthrough, but
it provides no reliable way for apps to introspect features, so using
QAPI modelling is highly desirable.
This patch thus introduces new snapshot-{load,save,delete} commands to
QMP that are intended to replace the old HMP counterparts. The new
commands are given different names, because they will be using the new
QEMU job framework and thus will have diverging behaviour from the HMP
originals. It would thus be misleading to keep the same name.
While this design uses the generic job framework, the current impl is
still blocking. The intention that the blocking problem is fixed later.
None the less applications using these new commands should assume that
they are asynchronous and thus wait for the job status change event to
indicate completion.
In addition to using the job framework, the new commands require the
caller to be explicit about all the block device nodes used in the
snapshot operations, with no built-in default heuristics in use.
Note that the existing "query-named-block-nodes" can be used to query
what snapshots currently exist for block nodes.
Acked-by: Markus Armbruster <armbru@redhat.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
Message-Id: <20210204124834.774401-13-berrange@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
dgilbert: removed tests for now, the output ordering isn't
deterministic
2021-02-04 15:48:34 +03:00
|
|
|
##
|
|
|
|
# @snapshot-save:
|
|
|
|
#
|
|
|
|
# Save a VM snapshot
|
|
|
|
#
|
|
|
|
# @job-id: identifier for the newly created job
|
2023-04-28 13:54:29 +03:00
|
|
|
#
|
migration: introduce snapshot-{save, load, delete} QMP commands
savevm, loadvm and delvm are some of the few HMP commands that have never
been converted to use QMP. The reasons for the lack of conversion are
that they blocked execution of the event thread, and the semantics
around choice of disks were ill-defined.
Despite this downside, however, libvirt and applications using libvirt
have used these commands for as long as QMP has existed, via the
"human-monitor-command" passthrough command. IOW, while it is clearly
desirable to be able to fix the problems, they are not a blocker to
all real world usage.
Meanwhile there is a need for other features which involve adding new
parameters to the commands. This is possible with HMP passthrough, but
it provides no reliable way for apps to introspect features, so using
QAPI modelling is highly desirable.
This patch thus introduces new snapshot-{load,save,delete} commands to
QMP that are intended to replace the old HMP counterparts. The new
commands are given different names, because they will be using the new
QEMU job framework and thus will have diverging behaviour from the HMP
originals. It would thus be misleading to keep the same name.
While this design uses the generic job framework, the current impl is
still blocking. The intention that the blocking problem is fixed later.
None the less applications using these new commands should assume that
they are asynchronous and thus wait for the job status change event to
indicate completion.
In addition to using the job framework, the new commands require the
caller to be explicit about all the block device nodes used in the
snapshot operations, with no built-in default heuristics in use.
Note that the existing "query-named-block-nodes" can be used to query
what snapshots currently exist for block nodes.
Acked-by: Markus Armbruster <armbru@redhat.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
Message-Id: <20210204124834.774401-13-berrange@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
dgilbert: removed tests for now, the output ordering isn't
deterministic
2021-02-04 15:48:34 +03:00
|
|
|
# @tag: name of the snapshot to create
|
2023-04-28 13:54:29 +03:00
|
|
|
#
|
migration: introduce snapshot-{save, load, delete} QMP commands
savevm, loadvm and delvm are some of the few HMP commands that have never
been converted to use QMP. The reasons for the lack of conversion are
that they blocked execution of the event thread, and the semantics
around choice of disks were ill-defined.
Despite this downside, however, libvirt and applications using libvirt
have used these commands for as long as QMP has existed, via the
"human-monitor-command" passthrough command. IOW, while it is clearly
desirable to be able to fix the problems, they are not a blocker to
all real world usage.
Meanwhile there is a need for other features which involve adding new
parameters to the commands. This is possible with HMP passthrough, but
it provides no reliable way for apps to introspect features, so using
QAPI modelling is highly desirable.
This patch thus introduces new snapshot-{load,save,delete} commands to
QMP that are intended to replace the old HMP counterparts. The new
commands are given different names, because they will be using the new
QEMU job framework and thus will have diverging behaviour from the HMP
originals. It would thus be misleading to keep the same name.
While this design uses the generic job framework, the current impl is
still blocking. The intention that the blocking problem is fixed later.
None the less applications using these new commands should assume that
they are asynchronous and thus wait for the job status change event to
indicate completion.
In addition to using the job framework, the new commands require the
caller to be explicit about all the block device nodes used in the
snapshot operations, with no built-in default heuristics in use.
Note that the existing "query-named-block-nodes" can be used to query
what snapshots currently exist for block nodes.
Acked-by: Markus Armbruster <armbru@redhat.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
Message-Id: <20210204124834.774401-13-berrange@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
dgilbert: removed tests for now, the output ordering isn't
deterministic
2021-02-04 15:48:34 +03:00
|
|
|
# @vmstate: block device node name to save vmstate to
|
2023-04-28 13:54:29 +03:00
|
|
|
#
|
migration: introduce snapshot-{save, load, delete} QMP commands
savevm, loadvm and delvm are some of the few HMP commands that have never
been converted to use QMP. The reasons for the lack of conversion are
that they blocked execution of the event thread, and the semantics
around choice of disks were ill-defined.
Despite this downside, however, libvirt and applications using libvirt
have used these commands for as long as QMP has existed, via the
"human-monitor-command" passthrough command. IOW, while it is clearly
desirable to be able to fix the problems, they are not a blocker to
all real world usage.
Meanwhile there is a need for other features which involve adding new
parameters to the commands. This is possible with HMP passthrough, but
it provides no reliable way for apps to introspect features, so using
QAPI modelling is highly desirable.
This patch thus introduces new snapshot-{load,save,delete} commands to
QMP that are intended to replace the old HMP counterparts. The new
commands are given different names, because they will be using the new
QEMU job framework and thus will have diverging behaviour from the HMP
originals. It would thus be misleading to keep the same name.
While this design uses the generic job framework, the current impl is
still blocking. The intention that the blocking problem is fixed later.
None the less applications using these new commands should assume that
they are asynchronous and thus wait for the job status change event to
indicate completion.
In addition to using the job framework, the new commands require the
caller to be explicit about all the block device nodes used in the
snapshot operations, with no built-in default heuristics in use.
Note that the existing "query-named-block-nodes" can be used to query
what snapshots currently exist for block nodes.
Acked-by: Markus Armbruster <armbru@redhat.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
Message-Id: <20210204124834.774401-13-berrange@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
dgilbert: removed tests for now, the output ordering isn't
deterministic
2021-02-04 15:48:34 +03:00
|
|
|
# @devices: list of block device node names to save a snapshot to
|
|
|
|
#
|
|
|
|
# Applications should not assume that the snapshot save is complete
|
2023-04-28 13:54:29 +03:00
|
|
|
# when this command returns. The job commands / events must be used
|
|
|
|
# to determine completion and to fetch details of any errors that
|
|
|
|
# arise.
|
migration: introduce snapshot-{save, load, delete} QMP commands
savevm, loadvm and delvm are some of the few HMP commands that have never
been converted to use QMP. The reasons for the lack of conversion are
that they blocked execution of the event thread, and the semantics
around choice of disks were ill-defined.
Despite this downside, however, libvirt and applications using libvirt
have used these commands for as long as QMP has existed, via the
"human-monitor-command" passthrough command. IOW, while it is clearly
desirable to be able to fix the problems, they are not a blocker to
all real world usage.
Meanwhile there is a need for other features which involve adding new
parameters to the commands. This is possible with HMP passthrough, but
it provides no reliable way for apps to introspect features, so using
QAPI modelling is highly desirable.
This patch thus introduces new snapshot-{load,save,delete} commands to
QMP that are intended to replace the old HMP counterparts. The new
commands are given different names, because they will be using the new
QEMU job framework and thus will have diverging behaviour from the HMP
originals. It would thus be misleading to keep the same name.
While this design uses the generic job framework, the current impl is
still blocking. The intention that the blocking problem is fixed later.
None the less applications using these new commands should assume that
they are asynchronous and thus wait for the job status change event to
indicate completion.
In addition to using the job framework, the new commands require the
caller to be explicit about all the block device nodes used in the
snapshot operations, with no built-in default heuristics in use.
Note that the existing "query-named-block-nodes" can be used to query
what snapshots currently exist for block nodes.
Acked-by: Markus Armbruster <armbru@redhat.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
Message-Id: <20210204124834.774401-13-berrange@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
dgilbert: removed tests for now, the output ordering isn't
deterministic
2021-02-04 15:48:34 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# Note that execution of the guest CPUs may be stopped during the time
|
|
|
|
# it takes to save the snapshot. A future version of QEMU may ensure
|
|
|
|
# CPUs are executing continuously.
|
migration: introduce snapshot-{save, load, delete} QMP commands
savevm, loadvm and delvm are some of the few HMP commands that have never
been converted to use QMP. The reasons for the lack of conversion are
that they blocked execution of the event thread, and the semantics
around choice of disks were ill-defined.
Despite this downside, however, libvirt and applications using libvirt
have used these commands for as long as QMP has existed, via the
"human-monitor-command" passthrough command. IOW, while it is clearly
desirable to be able to fix the problems, they are not a blocker to
all real world usage.
Meanwhile there is a need for other features which involve adding new
parameters to the commands. This is possible with HMP passthrough, but
it provides no reliable way for apps to introspect features, so using
QAPI modelling is highly desirable.
This patch thus introduces new snapshot-{load,save,delete} commands to
QMP that are intended to replace the old HMP counterparts. The new
commands are given different names, because they will be using the new
QEMU job framework and thus will have diverging behaviour from the HMP
originals. It would thus be misleading to keep the same name.
While this design uses the generic job framework, the current impl is
still blocking. The intention that the blocking problem is fixed later.
None the less applications using these new commands should assume that
they are asynchronous and thus wait for the job status change event to
indicate completion.
In addition to using the job framework, the new commands require the
caller to be explicit about all the block device nodes used in the
snapshot operations, with no built-in default heuristics in use.
Note that the existing "query-named-block-nodes" can be used to query
what snapshots currently exist for block nodes.
Acked-by: Markus Armbruster <armbru@redhat.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
Message-Id: <20210204124834.774401-13-berrange@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
dgilbert: removed tests for now, the output ordering isn't
deterministic
2021-02-04 15:48:34 +03:00
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# It is strongly recommended that @devices contain all writable block
|
|
|
|
# device nodes if a consistent snapshot is required.
|
migration: introduce snapshot-{save, load, delete} QMP commands
savevm, loadvm and delvm are some of the few HMP commands that have never
been converted to use QMP. The reasons for the lack of conversion are
that they blocked execution of the event thread, and the semantics
around choice of disks were ill-defined.
Despite this downside, however, libvirt and applications using libvirt
have used these commands for as long as QMP has existed, via the
"human-monitor-command" passthrough command. IOW, while it is clearly
desirable to be able to fix the problems, they are not a blocker to
all real world usage.
Meanwhile there is a need for other features which involve adding new
parameters to the commands. This is possible with HMP passthrough, but
it provides no reliable way for apps to introspect features, so using
QAPI modelling is highly desirable.
This patch thus introduces new snapshot-{load,save,delete} commands to
QMP that are intended to replace the old HMP counterparts. The new
commands are given different names, because they will be using the new
QEMU job framework and thus will have diverging behaviour from the HMP
originals. It would thus be misleading to keep the same name.
While this design uses the generic job framework, the current impl is
still blocking. The intention that the blocking problem is fixed later.
None the less applications using these new commands should assume that
they are asynchronous and thus wait for the job status change event to
indicate completion.
In addition to using the job framework, the new commands require the
caller to be explicit about all the block device nodes used in the
snapshot operations, with no built-in default heuristics in use.
Note that the existing "query-named-block-nodes" can be used to query
what snapshots currently exist for block nodes.
Acked-by: Markus Armbruster <armbru@redhat.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
Message-Id: <20210204124834.774401-13-berrange@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
dgilbert: removed tests for now, the output ordering isn't
deterministic
2021-02-04 15:48:34 +03:00
|
|
|
#
|
|
|
|
# If @tag already exists, an error will be reported
|
|
|
|
#
|
|
|
|
# Returns: nothing
|
|
|
|
#
|
|
|
|
# Example:
|
|
|
|
#
|
|
|
|
# -> { "execute": "snapshot-save",
|
2022-02-22 20:01:16 +03:00
|
|
|
# "arguments": {
|
migration: introduce snapshot-{save, load, delete} QMP commands
savevm, loadvm and delvm are some of the few HMP commands that have never
been converted to use QMP. The reasons for the lack of conversion are
that they blocked execution of the event thread, and the semantics
around choice of disks were ill-defined.
Despite this downside, however, libvirt and applications using libvirt
have used these commands for as long as QMP has existed, via the
"human-monitor-command" passthrough command. IOW, while it is clearly
desirable to be able to fix the problems, they are not a blocker to
all real world usage.
Meanwhile there is a need for other features which involve adding new
parameters to the commands. This is possible with HMP passthrough, but
it provides no reliable way for apps to introspect features, so using
QAPI modelling is highly desirable.
This patch thus introduces new snapshot-{load,save,delete} commands to
QMP that are intended to replace the old HMP counterparts. The new
commands are given different names, because they will be using the new
QEMU job framework and thus will have diverging behaviour from the HMP
originals. It would thus be misleading to keep the same name.
While this design uses the generic job framework, the current impl is
still blocking. The intention that the blocking problem is fixed later.
None the less applications using these new commands should assume that
they are asynchronous and thus wait for the job status change event to
indicate completion.
In addition to using the job framework, the new commands require the
caller to be explicit about all the block device nodes used in the
snapshot operations, with no built-in default heuristics in use.
Note that the existing "query-named-block-nodes" can be used to query
what snapshots currently exist for block nodes.
Acked-by: Markus Armbruster <armbru@redhat.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
Message-Id: <20210204124834.774401-13-berrange@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
dgilbert: removed tests for now, the output ordering isn't
deterministic
2021-02-04 15:48:34 +03:00
|
|
|
# "job-id": "snapsave0",
|
|
|
|
# "tag": "my-snap",
|
|
|
|
# "vmstate": "disk0",
|
|
|
|
# "devices": ["disk0", "disk1"]
|
|
|
|
# }
|
|
|
|
# }
|
|
|
|
# <- { "return": { } }
|
|
|
|
# <- {"event": "JOB_STATUS_CHANGE",
|
2022-09-01 11:58:40 +03:00
|
|
|
# "timestamp": {"seconds": 1432121972, "microseconds": 744001},
|
migration: introduce snapshot-{save, load, delete} QMP commands
savevm, loadvm and delvm are some of the few HMP commands that have never
been converted to use QMP. The reasons for the lack of conversion are
that they blocked execution of the event thread, and the semantics
around choice of disks were ill-defined.
Despite this downside, however, libvirt and applications using libvirt
have used these commands for as long as QMP has existed, via the
"human-monitor-command" passthrough command. IOW, while it is clearly
desirable to be able to fix the problems, they are not a blocker to
all real world usage.
Meanwhile there is a need for other features which involve adding new
parameters to the commands. This is possible with HMP passthrough, but
it provides no reliable way for apps to introspect features, so using
QAPI modelling is highly desirable.
This patch thus introduces new snapshot-{load,save,delete} commands to
QMP that are intended to replace the old HMP counterparts. The new
commands are given different names, because they will be using the new
QEMU job framework and thus will have diverging behaviour from the HMP
originals. It would thus be misleading to keep the same name.
While this design uses the generic job framework, the current impl is
still blocking. The intention that the blocking problem is fixed later.
None the less applications using these new commands should assume that
they are asynchronous and thus wait for the job status change event to
indicate completion.
In addition to using the job framework, the new commands require the
caller to be explicit about all the block device nodes used in the
snapshot operations, with no built-in default heuristics in use.
Note that the existing "query-named-block-nodes" can be used to query
what snapshots currently exist for block nodes.
Acked-by: Markus Armbruster <armbru@redhat.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
Message-Id: <20210204124834.774401-13-berrange@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
dgilbert: removed tests for now, the output ordering isn't
deterministic
2021-02-04 15:48:34 +03:00
|
|
|
# "data": {"status": "created", "id": "snapsave0"}}
|
|
|
|
# <- {"event": "JOB_STATUS_CHANGE",
|
2022-09-01 11:58:40 +03:00
|
|
|
# "timestamp": {"seconds": 1432122172, "microseconds": 744001},
|
migration: introduce snapshot-{save, load, delete} QMP commands
savevm, loadvm and delvm are some of the few HMP commands that have never
been converted to use QMP. The reasons for the lack of conversion are
that they blocked execution of the event thread, and the semantics
around choice of disks were ill-defined.
Despite this downside, however, libvirt and applications using libvirt
have used these commands for as long as QMP has existed, via the
"human-monitor-command" passthrough command. IOW, while it is clearly
desirable to be able to fix the problems, they are not a blocker to
all real world usage.
Meanwhile there is a need for other features which involve adding new
parameters to the commands. This is possible with HMP passthrough, but
it provides no reliable way for apps to introspect features, so using
QAPI modelling is highly desirable.
This patch thus introduces new snapshot-{load,save,delete} commands to
QMP that are intended to replace the old HMP counterparts. The new
commands are given different names, because they will be using the new
QEMU job framework and thus will have diverging behaviour from the HMP
originals. It would thus be misleading to keep the same name.
While this design uses the generic job framework, the current impl is
still blocking. The intention that the blocking problem is fixed later.
None the less applications using these new commands should assume that
they are asynchronous and thus wait for the job status change event to
indicate completion.
In addition to using the job framework, the new commands require the
caller to be explicit about all the block device nodes used in the
snapshot operations, with no built-in default heuristics in use.
Note that the existing "query-named-block-nodes" can be used to query
what snapshots currently exist for block nodes.
Acked-by: Markus Armbruster <armbru@redhat.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
Message-Id: <20210204124834.774401-13-berrange@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
dgilbert: removed tests for now, the output ordering isn't
deterministic
2021-02-04 15:48:34 +03:00
|
|
|
# "data": {"status": "running", "id": "snapsave0"}}
|
2022-09-01 11:58:40 +03:00
|
|
|
# <- {"event": "STOP",
|
|
|
|
# "timestamp": {"seconds": 1432122372, "microseconds": 744001} }
|
|
|
|
# <- {"event": "RESUME",
|
|
|
|
# "timestamp": {"seconds": 1432122572, "microseconds": 744001} }
|
migration: introduce snapshot-{save, load, delete} QMP commands
savevm, loadvm and delvm are some of the few HMP commands that have never
been converted to use QMP. The reasons for the lack of conversion are
that they blocked execution of the event thread, and the semantics
around choice of disks were ill-defined.
Despite this downside, however, libvirt and applications using libvirt
have used these commands for as long as QMP has existed, via the
"human-monitor-command" passthrough command. IOW, while it is clearly
desirable to be able to fix the problems, they are not a blocker to
all real world usage.
Meanwhile there is a need for other features which involve adding new
parameters to the commands. This is possible with HMP passthrough, but
it provides no reliable way for apps to introspect features, so using
QAPI modelling is highly desirable.
This patch thus introduces new snapshot-{load,save,delete} commands to
QMP that are intended to replace the old HMP counterparts. The new
commands are given different names, because they will be using the new
QEMU job framework and thus will have diverging behaviour from the HMP
originals. It would thus be misleading to keep the same name.
While this design uses the generic job framework, the current impl is
still blocking. The intention that the blocking problem is fixed later.
None the less applications using these new commands should assume that
they are asynchronous and thus wait for the job status change event to
indicate completion.
In addition to using the job framework, the new commands require the
caller to be explicit about all the block device nodes used in the
snapshot operations, with no built-in default heuristics in use.
Note that the existing "query-named-block-nodes" can be used to query
what snapshots currently exist for block nodes.
Acked-by: Markus Armbruster <armbru@redhat.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
Message-Id: <20210204124834.774401-13-berrange@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
dgilbert: removed tests for now, the output ordering isn't
deterministic
2021-02-04 15:48:34 +03:00
|
|
|
# <- {"event": "JOB_STATUS_CHANGE",
|
2022-09-01 11:58:40 +03:00
|
|
|
# "timestamp": {"seconds": 1432122772, "microseconds": 744001},
|
migration: introduce snapshot-{save, load, delete} QMP commands
savevm, loadvm and delvm are some of the few HMP commands that have never
been converted to use QMP. The reasons for the lack of conversion are
that they blocked execution of the event thread, and the semantics
around choice of disks were ill-defined.
Despite this downside, however, libvirt and applications using libvirt
have used these commands for as long as QMP has existed, via the
"human-monitor-command" passthrough command. IOW, while it is clearly
desirable to be able to fix the problems, they are not a blocker to
all real world usage.
Meanwhile there is a need for other features which involve adding new
parameters to the commands. This is possible with HMP passthrough, but
it provides no reliable way for apps to introspect features, so using
QAPI modelling is highly desirable.
This patch thus introduces new snapshot-{load,save,delete} commands to
QMP that are intended to replace the old HMP counterparts. The new
commands are given different names, because they will be using the new
QEMU job framework and thus will have diverging behaviour from the HMP
originals. It would thus be misleading to keep the same name.
While this design uses the generic job framework, the current impl is
still blocking. The intention that the blocking problem is fixed later.
None the less applications using these new commands should assume that
they are asynchronous and thus wait for the job status change event to
indicate completion.
In addition to using the job framework, the new commands require the
caller to be explicit about all the block device nodes used in the
snapshot operations, with no built-in default heuristics in use.
Note that the existing "query-named-block-nodes" can be used to query
what snapshots currently exist for block nodes.
Acked-by: Markus Armbruster <armbru@redhat.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
Message-Id: <20210204124834.774401-13-berrange@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
dgilbert: removed tests for now, the output ordering isn't
deterministic
2021-02-04 15:48:34 +03:00
|
|
|
# "data": {"status": "waiting", "id": "snapsave0"}}
|
|
|
|
# <- {"event": "JOB_STATUS_CHANGE",
|
2022-09-01 11:58:40 +03:00
|
|
|
# "timestamp": {"seconds": 1432122972, "microseconds": 744001},
|
migration: introduce snapshot-{save, load, delete} QMP commands
savevm, loadvm and delvm are some of the few HMP commands that have never
been converted to use QMP. The reasons for the lack of conversion are
that they blocked execution of the event thread, and the semantics
around choice of disks were ill-defined.
Despite this downside, however, libvirt and applications using libvirt
have used these commands for as long as QMP has existed, via the
"human-monitor-command" passthrough command. IOW, while it is clearly
desirable to be able to fix the problems, they are not a blocker to
all real world usage.
Meanwhile there is a need for other features which involve adding new
parameters to the commands. This is possible with HMP passthrough, but
it provides no reliable way for apps to introspect features, so using
QAPI modelling is highly desirable.
This patch thus introduces new snapshot-{load,save,delete} commands to
QMP that are intended to replace the old HMP counterparts. The new
commands are given different names, because they will be using the new
QEMU job framework and thus will have diverging behaviour from the HMP
originals. It would thus be misleading to keep the same name.
While this design uses the generic job framework, the current impl is
still blocking. The intention that the blocking problem is fixed later.
None the less applications using these new commands should assume that
they are asynchronous and thus wait for the job status change event to
indicate completion.
In addition to using the job framework, the new commands require the
caller to be explicit about all the block device nodes used in the
snapshot operations, with no built-in default heuristics in use.
Note that the existing "query-named-block-nodes" can be used to query
what snapshots currently exist for block nodes.
Acked-by: Markus Armbruster <armbru@redhat.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
Message-Id: <20210204124834.774401-13-berrange@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
dgilbert: removed tests for now, the output ordering isn't
deterministic
2021-02-04 15:48:34 +03:00
|
|
|
# "data": {"status": "pending", "id": "snapsave0"}}
|
|
|
|
# <- {"event": "JOB_STATUS_CHANGE",
|
2022-09-01 11:58:40 +03:00
|
|
|
# "timestamp": {"seconds": 1432123172, "microseconds": 744001},
|
migration: introduce snapshot-{save, load, delete} QMP commands
savevm, loadvm and delvm are some of the few HMP commands that have never
been converted to use QMP. The reasons for the lack of conversion are
that they blocked execution of the event thread, and the semantics
around choice of disks were ill-defined.
Despite this downside, however, libvirt and applications using libvirt
have used these commands for as long as QMP has existed, via the
"human-monitor-command" passthrough command. IOW, while it is clearly
desirable to be able to fix the problems, they are not a blocker to
all real world usage.
Meanwhile there is a need for other features which involve adding new
parameters to the commands. This is possible with HMP passthrough, but
it provides no reliable way for apps to introspect features, so using
QAPI modelling is highly desirable.
This patch thus introduces new snapshot-{load,save,delete} commands to
QMP that are intended to replace the old HMP counterparts. The new
commands are given different names, because they will be using the new
QEMU job framework and thus will have diverging behaviour from the HMP
originals. It would thus be misleading to keep the same name.
While this design uses the generic job framework, the current impl is
still blocking. The intention that the blocking problem is fixed later.
None the less applications using these new commands should assume that
they are asynchronous and thus wait for the job status change event to
indicate completion.
In addition to using the job framework, the new commands require the
caller to be explicit about all the block device nodes used in the
snapshot operations, with no built-in default heuristics in use.
Note that the existing "query-named-block-nodes" can be used to query
what snapshots currently exist for block nodes.
Acked-by: Markus Armbruster <armbru@redhat.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
Message-Id: <20210204124834.774401-13-berrange@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
dgilbert: removed tests for now, the output ordering isn't
deterministic
2021-02-04 15:48:34 +03:00
|
|
|
# "data": {"status": "concluded", "id": "snapsave0"}}
|
|
|
|
# -> {"execute": "query-jobs"}
|
|
|
|
# <- {"return": [{"current-progress": 1,
|
|
|
|
# "status": "concluded",
|
|
|
|
# "total-progress": 1,
|
|
|
|
# "type": "snapshot-save",
|
|
|
|
# "id": "snapsave0"}]}
|
|
|
|
#
|
|
|
|
# Since: 6.0
|
|
|
|
##
|
|
|
|
{ 'command': 'snapshot-save',
|
|
|
|
'data': { 'job-id': 'str',
|
|
|
|
'tag': 'str',
|
|
|
|
'vmstate': 'str',
|
|
|
|
'devices': ['str'] } }
|
|
|
|
|
|
|
|
##
|
|
|
|
# @snapshot-load:
|
|
|
|
#
|
|
|
|
# Load a VM snapshot
|
|
|
|
#
|
|
|
|
# @job-id: identifier for the newly created job
|
2023-04-28 13:54:29 +03:00
|
|
|
#
|
migration: introduce snapshot-{save, load, delete} QMP commands
savevm, loadvm and delvm are some of the few HMP commands that have never
been converted to use QMP. The reasons for the lack of conversion are
that they blocked execution of the event thread, and the semantics
around choice of disks were ill-defined.
Despite this downside, however, libvirt and applications using libvirt
have used these commands for as long as QMP has existed, via the
"human-monitor-command" passthrough command. IOW, while it is clearly
desirable to be able to fix the problems, they are not a blocker to
all real world usage.
Meanwhile there is a need for other features which involve adding new
parameters to the commands. This is possible with HMP passthrough, but
it provides no reliable way for apps to introspect features, so using
QAPI modelling is highly desirable.
This patch thus introduces new snapshot-{load,save,delete} commands to
QMP that are intended to replace the old HMP counterparts. The new
commands are given different names, because they will be using the new
QEMU job framework and thus will have diverging behaviour from the HMP
originals. It would thus be misleading to keep the same name.
While this design uses the generic job framework, the current impl is
still blocking. The intention that the blocking problem is fixed later.
None the less applications using these new commands should assume that
they are asynchronous and thus wait for the job status change event to
indicate completion.
In addition to using the job framework, the new commands require the
caller to be explicit about all the block device nodes used in the
snapshot operations, with no built-in default heuristics in use.
Note that the existing "query-named-block-nodes" can be used to query
what snapshots currently exist for block nodes.
Acked-by: Markus Armbruster <armbru@redhat.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
Message-Id: <20210204124834.774401-13-berrange@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
dgilbert: removed tests for now, the output ordering isn't
deterministic
2021-02-04 15:48:34 +03:00
|
|
|
# @tag: name of the snapshot to load.
|
2023-04-28 13:54:29 +03:00
|
|
|
#
|
migration: introduce snapshot-{save, load, delete} QMP commands
savevm, loadvm and delvm are some of the few HMP commands that have never
been converted to use QMP. The reasons for the lack of conversion are
that they blocked execution of the event thread, and the semantics
around choice of disks were ill-defined.
Despite this downside, however, libvirt and applications using libvirt
have used these commands for as long as QMP has existed, via the
"human-monitor-command" passthrough command. IOW, while it is clearly
desirable to be able to fix the problems, they are not a blocker to
all real world usage.
Meanwhile there is a need for other features which involve adding new
parameters to the commands. This is possible with HMP passthrough, but
it provides no reliable way for apps to introspect features, so using
QAPI modelling is highly desirable.
This patch thus introduces new snapshot-{load,save,delete} commands to
QMP that are intended to replace the old HMP counterparts. The new
commands are given different names, because they will be using the new
QEMU job framework and thus will have diverging behaviour from the HMP
originals. It would thus be misleading to keep the same name.
While this design uses the generic job framework, the current impl is
still blocking. The intention that the blocking problem is fixed later.
None the less applications using these new commands should assume that
they are asynchronous and thus wait for the job status change event to
indicate completion.
In addition to using the job framework, the new commands require the
caller to be explicit about all the block device nodes used in the
snapshot operations, with no built-in default heuristics in use.
Note that the existing "query-named-block-nodes" can be used to query
what snapshots currently exist for block nodes.
Acked-by: Markus Armbruster <armbru@redhat.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
Message-Id: <20210204124834.774401-13-berrange@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
dgilbert: removed tests for now, the output ordering isn't
deterministic
2021-02-04 15:48:34 +03:00
|
|
|
# @vmstate: block device node name to load vmstate from
|
2023-04-28 13:54:29 +03:00
|
|
|
#
|
migration: introduce snapshot-{save, load, delete} QMP commands
savevm, loadvm and delvm are some of the few HMP commands that have never
been converted to use QMP. The reasons for the lack of conversion are
that they blocked execution of the event thread, and the semantics
around choice of disks were ill-defined.
Despite this downside, however, libvirt and applications using libvirt
have used these commands for as long as QMP has existed, via the
"human-monitor-command" passthrough command. IOW, while it is clearly
desirable to be able to fix the problems, they are not a blocker to
all real world usage.
Meanwhile there is a need for other features which involve adding new
parameters to the commands. This is possible with HMP passthrough, but
it provides no reliable way for apps to introspect features, so using
QAPI modelling is highly desirable.
This patch thus introduces new snapshot-{load,save,delete} commands to
QMP that are intended to replace the old HMP counterparts. The new
commands are given different names, because they will be using the new
QEMU job framework and thus will have diverging behaviour from the HMP
originals. It would thus be misleading to keep the same name.
While this design uses the generic job framework, the current impl is
still blocking. The intention that the blocking problem is fixed later.
None the less applications using these new commands should assume that
they are asynchronous and thus wait for the job status change event to
indicate completion.
In addition to using the job framework, the new commands require the
caller to be explicit about all the block device nodes used in the
snapshot operations, with no built-in default heuristics in use.
Note that the existing "query-named-block-nodes" can be used to query
what snapshots currently exist for block nodes.
Acked-by: Markus Armbruster <armbru@redhat.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
Message-Id: <20210204124834.774401-13-berrange@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
dgilbert: removed tests for now, the output ordering isn't
deterministic
2021-02-04 15:48:34 +03:00
|
|
|
# @devices: list of block device node names to load a snapshot from
|
|
|
|
#
|
|
|
|
# Applications should not assume that the snapshot load is complete
|
2023-04-28 13:54:29 +03:00
|
|
|
# when this command returns. The job commands / events must be used
|
|
|
|
# to determine completion and to fetch details of any errors that
|
|
|
|
# arise.
|
migration: introduce snapshot-{save, load, delete} QMP commands
savevm, loadvm and delvm are some of the few HMP commands that have never
been converted to use QMP. The reasons for the lack of conversion are
that they blocked execution of the event thread, and the semantics
around choice of disks were ill-defined.
Despite this downside, however, libvirt and applications using libvirt
have used these commands for as long as QMP has existed, via the
"human-monitor-command" passthrough command. IOW, while it is clearly
desirable to be able to fix the problems, they are not a blocker to
all real world usage.
Meanwhile there is a need for other features which involve adding new
parameters to the commands. This is possible with HMP passthrough, but
it provides no reliable way for apps to introspect features, so using
QAPI modelling is highly desirable.
This patch thus introduces new snapshot-{load,save,delete} commands to
QMP that are intended to replace the old HMP counterparts. The new
commands are given different names, because they will be using the new
QEMU job framework and thus will have diverging behaviour from the HMP
originals. It would thus be misleading to keep the same name.
While this design uses the generic job framework, the current impl is
still blocking. The intention that the blocking problem is fixed later.
None the less applications using these new commands should assume that
they are asynchronous and thus wait for the job status change event to
indicate completion.
In addition to using the job framework, the new commands require the
caller to be explicit about all the block device nodes used in the
snapshot operations, with no built-in default heuristics in use.
Note that the existing "query-named-block-nodes" can be used to query
what snapshots currently exist for block nodes.
Acked-by: Markus Armbruster <armbru@redhat.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
Message-Id: <20210204124834.774401-13-berrange@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
dgilbert: removed tests for now, the output ordering isn't
deterministic
2021-02-04 15:48:34 +03:00
|
|
|
#
|
|
|
|
# Note that execution of the guest CPUs will be stopped during the
|
|
|
|
# time it takes to load the snapshot.
|
|
|
|
#
|
2023-04-28 13:54:29 +03:00
|
|
|
# It is strongly recommended that @devices contain all writable block
|
|
|
|
# device nodes that can have changed since the original @snapshot-save
|
|
|
|
# command execution.
|
migration: introduce snapshot-{save, load, delete} QMP commands
savevm, loadvm and delvm are some of the few HMP commands that have never
been converted to use QMP. The reasons for the lack of conversion are
that they blocked execution of the event thread, and the semantics
around choice of disks were ill-defined.
Despite this downside, however, libvirt and applications using libvirt
have used these commands for as long as QMP has existed, via the
"human-monitor-command" passthrough command. IOW, while it is clearly
desirable to be able to fix the problems, they are not a blocker to
all real world usage.
Meanwhile there is a need for other features which involve adding new
parameters to the commands. This is possible with HMP passthrough, but
it provides no reliable way for apps to introspect features, so using
QAPI modelling is highly desirable.
This patch thus introduces new snapshot-{load,save,delete} commands to
QMP that are intended to replace the old HMP counterparts. The new
commands are given different names, because they will be using the new
QEMU job framework and thus will have diverging behaviour from the HMP
originals. It would thus be misleading to keep the same name.
While this design uses the generic job framework, the current impl is
still blocking. The intention that the blocking problem is fixed later.
None the less applications using these new commands should assume that
they are asynchronous and thus wait for the job status change event to
indicate completion.
In addition to using the job framework, the new commands require the
caller to be explicit about all the block device nodes used in the
snapshot operations, with no built-in default heuristics in use.
Note that the existing "query-named-block-nodes" can be used to query
what snapshots currently exist for block nodes.
Acked-by: Markus Armbruster <armbru@redhat.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
Message-Id: <20210204124834.774401-13-berrange@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
dgilbert: removed tests for now, the output ordering isn't
deterministic
2021-02-04 15:48:34 +03:00
|
|
|
#
|
|
|
|
# Returns: nothing
|
|
|
|
#
|
|
|
|
# Example:
|
|
|
|
#
|
|
|
|
# -> { "execute": "snapshot-load",
|
2022-02-22 20:01:16 +03:00
|
|
|
# "arguments": {
|
migration: introduce snapshot-{save, load, delete} QMP commands
savevm, loadvm and delvm are some of the few HMP commands that have never
been converted to use QMP. The reasons for the lack of conversion are
that they blocked execution of the event thread, and the semantics
around choice of disks were ill-defined.
Despite this downside, however, libvirt and applications using libvirt
have used these commands for as long as QMP has existed, via the
"human-monitor-command" passthrough command. IOW, while it is clearly
desirable to be able to fix the problems, they are not a blocker to
all real world usage.
Meanwhile there is a need for other features which involve adding new
parameters to the commands. This is possible with HMP passthrough, but
it provides no reliable way for apps to introspect features, so using
QAPI modelling is highly desirable.
This patch thus introduces new snapshot-{load,save,delete} commands to
QMP that are intended to replace the old HMP counterparts. The new
commands are given different names, because they will be using the new
QEMU job framework and thus will have diverging behaviour from the HMP
originals. It would thus be misleading to keep the same name.
While this design uses the generic job framework, the current impl is
still blocking. The intention that the blocking problem is fixed later.
None the less applications using these new commands should assume that
they are asynchronous and thus wait for the job status change event to
indicate completion.
In addition to using the job framework, the new commands require the
caller to be explicit about all the block device nodes used in the
snapshot operations, with no built-in default heuristics in use.
Note that the existing "query-named-block-nodes" can be used to query
what snapshots currently exist for block nodes.
Acked-by: Markus Armbruster <armbru@redhat.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
Message-Id: <20210204124834.774401-13-berrange@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
dgilbert: removed tests for now, the output ordering isn't
deterministic
2021-02-04 15:48:34 +03:00
|
|
|
# "job-id": "snapload0",
|
|
|
|
# "tag": "my-snap",
|
|
|
|
# "vmstate": "disk0",
|
|
|
|
# "devices": ["disk0", "disk1"]
|
|
|
|
# }
|
|
|
|
# }
|
|
|
|
# <- { "return": { } }
|
|
|
|
# <- {"event": "JOB_STATUS_CHANGE",
|
2022-09-01 11:58:40 +03:00
|
|
|
# "timestamp": {"seconds": 1472124172, "microseconds": 744001},
|
migration: introduce snapshot-{save, load, delete} QMP commands
savevm, loadvm and delvm are some of the few HMP commands that have never
been converted to use QMP. The reasons for the lack of conversion are
that they blocked execution of the event thread, and the semantics
around choice of disks were ill-defined.
Despite this downside, however, libvirt and applications using libvirt
have used these commands for as long as QMP has existed, via the
"human-monitor-command" passthrough command. IOW, while it is clearly
desirable to be able to fix the problems, they are not a blocker to
all real world usage.
Meanwhile there is a need for other features which involve adding new
parameters to the commands. This is possible with HMP passthrough, but
it provides no reliable way for apps to introspect features, so using
QAPI modelling is highly desirable.
This patch thus introduces new snapshot-{load,save,delete} commands to
QMP that are intended to replace the old HMP counterparts. The new
commands are given different names, because they will be using the new
QEMU job framework and thus will have diverging behaviour from the HMP
originals. It would thus be misleading to keep the same name.
While this design uses the generic job framework, the current impl is
still blocking. The intention that the blocking problem is fixed later.
None the less applications using these new commands should assume that
they are asynchronous and thus wait for the job status change event to
indicate completion.
In addition to using the job framework, the new commands require the
caller to be explicit about all the block device nodes used in the
snapshot operations, with no built-in default heuristics in use.
Note that the existing "query-named-block-nodes" can be used to query
what snapshots currently exist for block nodes.
Acked-by: Markus Armbruster <armbru@redhat.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
Message-Id: <20210204124834.774401-13-berrange@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
dgilbert: removed tests for now, the output ordering isn't
deterministic
2021-02-04 15:48:34 +03:00
|
|
|
# "data": {"status": "created", "id": "snapload0"}}
|
|
|
|
# <- {"event": "JOB_STATUS_CHANGE",
|
2022-09-01 11:58:40 +03:00
|
|
|
# "timestamp": {"seconds": 1472125172, "microseconds": 744001},
|
migration: introduce snapshot-{save, load, delete} QMP commands
savevm, loadvm and delvm are some of the few HMP commands that have never
been converted to use QMP. The reasons for the lack of conversion are
that they blocked execution of the event thread, and the semantics
around choice of disks were ill-defined.
Despite this downside, however, libvirt and applications using libvirt
have used these commands for as long as QMP has existed, via the
"human-monitor-command" passthrough command. IOW, while it is clearly
desirable to be able to fix the problems, they are not a blocker to
all real world usage.
Meanwhile there is a need for other features which involve adding new
parameters to the commands. This is possible with HMP passthrough, but
it provides no reliable way for apps to introspect features, so using
QAPI modelling is highly desirable.
This patch thus introduces new snapshot-{load,save,delete} commands to
QMP that are intended to replace the old HMP counterparts. The new
commands are given different names, because they will be using the new
QEMU job framework and thus will have diverging behaviour from the HMP
originals. It would thus be misleading to keep the same name.
While this design uses the generic job framework, the current impl is
still blocking. The intention that the blocking problem is fixed later.
None the less applications using these new commands should assume that
they are asynchronous and thus wait for the job status change event to
indicate completion.
In addition to using the job framework, the new commands require the
caller to be explicit about all the block device nodes used in the
snapshot operations, with no built-in default heuristics in use.
Note that the existing "query-named-block-nodes" can be used to query
what snapshots currently exist for block nodes.
Acked-by: Markus Armbruster <armbru@redhat.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
Message-Id: <20210204124834.774401-13-berrange@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
dgilbert: removed tests for now, the output ordering isn't
deterministic
2021-02-04 15:48:34 +03:00
|
|
|
# "data": {"status": "running", "id": "snapload0"}}
|
2022-09-01 11:58:40 +03:00
|
|
|
# <- {"event": "STOP",
|
|
|
|
# "timestamp": {"seconds": 1472125472, "microseconds": 744001} }
|
|
|
|
# <- {"event": "RESUME",
|
|
|
|
# "timestamp": {"seconds": 1472125872, "microseconds": 744001} }
|
migration: introduce snapshot-{save, load, delete} QMP commands
savevm, loadvm and delvm are some of the few HMP commands that have never
been converted to use QMP. The reasons for the lack of conversion are
that they blocked execution of the event thread, and the semantics
around choice of disks were ill-defined.
Despite this downside, however, libvirt and applications using libvirt
have used these commands for as long as QMP has existed, via the
"human-monitor-command" passthrough command. IOW, while it is clearly
desirable to be able to fix the problems, they are not a blocker to
all real world usage.
Meanwhile there is a need for other features which involve adding new
parameters to the commands. This is possible with HMP passthrough, but
it provides no reliable way for apps to introspect features, so using
QAPI modelling is highly desirable.
This patch thus introduces new snapshot-{load,save,delete} commands to
QMP that are intended to replace the old HMP counterparts. The new
commands are given different names, because they will be using the new
QEMU job framework and thus will have diverging behaviour from the HMP
originals. It would thus be misleading to keep the same name.
While this design uses the generic job framework, the current impl is
still blocking. The intention that the blocking problem is fixed later.
None the less applications using these new commands should assume that
they are asynchronous and thus wait for the job status change event to
indicate completion.
In addition to using the job framework, the new commands require the
caller to be explicit about all the block device nodes used in the
snapshot operations, with no built-in default heuristics in use.
Note that the existing "query-named-block-nodes" can be used to query
what snapshots currently exist for block nodes.
Acked-by: Markus Armbruster <armbru@redhat.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
Message-Id: <20210204124834.774401-13-berrange@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
dgilbert: removed tests for now, the output ordering isn't
deterministic
2021-02-04 15:48:34 +03:00
|
|
|
# <- {"event": "JOB_STATUS_CHANGE",
|
2022-09-01 11:58:40 +03:00
|
|
|
# "timestamp": {"seconds": 1472126172, "microseconds": 744001},
|
migration: introduce snapshot-{save, load, delete} QMP commands
savevm, loadvm and delvm are some of the few HMP commands that have never
been converted to use QMP. The reasons for the lack of conversion are
that they blocked execution of the event thread, and the semantics
around choice of disks were ill-defined.
Despite this downside, however, libvirt and applications using libvirt
have used these commands for as long as QMP has existed, via the
"human-monitor-command" passthrough command. IOW, while it is clearly
desirable to be able to fix the problems, they are not a blocker to
all real world usage.
Meanwhile there is a need for other features which involve adding new
parameters to the commands. This is possible with HMP passthrough, but
it provides no reliable way for apps to introspect features, so using
QAPI modelling is highly desirable.
This patch thus introduces new snapshot-{load,save,delete} commands to
QMP that are intended to replace the old HMP counterparts. The new
commands are given different names, because they will be using the new
QEMU job framework and thus will have diverging behaviour from the HMP
originals. It would thus be misleading to keep the same name.
While this design uses the generic job framework, the current impl is
still blocking. The intention that the blocking problem is fixed later.
None the less applications using these new commands should assume that
they are asynchronous and thus wait for the job status change event to
indicate completion.
In addition to using the job framework, the new commands require the
caller to be explicit about all the block device nodes used in the
snapshot operations, with no built-in default heuristics in use.
Note that the existing "query-named-block-nodes" can be used to query
what snapshots currently exist for block nodes.
Acked-by: Markus Armbruster <armbru@redhat.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
Message-Id: <20210204124834.774401-13-berrange@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
dgilbert: removed tests for now, the output ordering isn't
deterministic
2021-02-04 15:48:34 +03:00
|
|
|
# "data": {"status": "waiting", "id": "snapload0"}}
|
|
|
|
# <- {"event": "JOB_STATUS_CHANGE",
|
2022-09-01 11:58:40 +03:00
|
|
|
# "timestamp": {"seconds": 1472127172, "microseconds": 744001},
|
migration: introduce snapshot-{save, load, delete} QMP commands
savevm, loadvm and delvm are some of the few HMP commands that have never
been converted to use QMP. The reasons for the lack of conversion are
that they blocked execution of the event thread, and the semantics
around choice of disks were ill-defined.
Despite this downside, however, libvirt and applications using libvirt
have used these commands for as long as QMP has existed, via the
"human-monitor-command" passthrough command. IOW, while it is clearly
desirable to be able to fix the problems, they are not a blocker to
all real world usage.
Meanwhile there is a need for other features which involve adding new
parameters to the commands. This is possible with HMP passthrough, but
it provides no reliable way for apps to introspect features, so using
QAPI modelling is highly desirable.
This patch thus introduces new snapshot-{load,save,delete} commands to
QMP that are intended to replace the old HMP counterparts. The new
commands are given different names, because they will be using the new
QEMU job framework and thus will have diverging behaviour from the HMP
originals. It would thus be misleading to keep the same name.
While this design uses the generic job framework, the current impl is
still blocking. The intention that the blocking problem is fixed later.
None the less applications using these new commands should assume that
they are asynchronous and thus wait for the job status change event to
indicate completion.
In addition to using the job framework, the new commands require the
caller to be explicit about all the block device nodes used in the
snapshot operations, with no built-in default heuristics in use.
Note that the existing "query-named-block-nodes" can be used to query
what snapshots currently exist for block nodes.
Acked-by: Markus Armbruster <armbru@redhat.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
Message-Id: <20210204124834.774401-13-berrange@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
dgilbert: removed tests for now, the output ordering isn't
deterministic
2021-02-04 15:48:34 +03:00
|
|
|
# "data": {"status": "pending", "id": "snapload0"}}
|
|
|
|
# <- {"event": "JOB_STATUS_CHANGE",
|
2022-09-01 11:58:40 +03:00
|
|
|
# "timestamp": {"seconds": 1472128172, "microseconds": 744001},
|
migration: introduce snapshot-{save, load, delete} QMP commands
savevm, loadvm and delvm are some of the few HMP commands that have never
been converted to use QMP. The reasons for the lack of conversion are
that they blocked execution of the event thread, and the semantics
around choice of disks were ill-defined.
Despite this downside, however, libvirt and applications using libvirt
have used these commands for as long as QMP has existed, via the
"human-monitor-command" passthrough command. IOW, while it is clearly
desirable to be able to fix the problems, they are not a blocker to
all real world usage.
Meanwhile there is a need for other features which involve adding new
parameters to the commands. This is possible with HMP passthrough, but
it provides no reliable way for apps to introspect features, so using
QAPI modelling is highly desirable.
This patch thus introduces new snapshot-{load,save,delete} commands to
QMP that are intended to replace the old HMP counterparts. The new
commands are given different names, because they will be using the new
QEMU job framework and thus will have diverging behaviour from the HMP
originals. It would thus be misleading to keep the same name.
While this design uses the generic job framework, the current impl is
still blocking. The intention that the blocking problem is fixed later.
None the less applications using these new commands should assume that
they are asynchronous and thus wait for the job status change event to
indicate completion.
In addition to using the job framework, the new commands require the
caller to be explicit about all the block device nodes used in the
snapshot operations, with no built-in default heuristics in use.
Note that the existing "query-named-block-nodes" can be used to query
what snapshots currently exist for block nodes.
Acked-by: Markus Armbruster <armbru@redhat.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
Message-Id: <20210204124834.774401-13-berrange@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
dgilbert: removed tests for now, the output ordering isn't
deterministic
2021-02-04 15:48:34 +03:00
|
|
|
# "data": {"status": "concluded", "id": "snapload0"}}
|
|
|
|
# -> {"execute": "query-jobs"}
|
|
|
|
# <- {"return": [{"current-progress": 1,
|
|
|
|
# "status": "concluded",
|
|
|
|
# "total-progress": 1,
|
|
|
|
# "type": "snapshot-load",
|
|
|
|
# "id": "snapload0"}]}
|
|
|
|
#
|
|
|
|
# Since: 6.0
|
|
|
|
##
|
|
|
|
{ 'command': 'snapshot-load',
|
|
|
|
'data': { 'job-id': 'str',
|
|
|
|
'tag': 'str',
|
|
|
|
'vmstate': 'str',
|
|
|
|
'devices': ['str'] } }
|
|
|
|
|
|
|
|
##
|
|
|
|
# @snapshot-delete:
|
|
|
|
#
|
|
|
|
# Delete a VM snapshot
|
|
|
|
#
|
|
|
|
# @job-id: identifier for the newly created job
|
2023-04-28 13:54:29 +03:00
|
|
|
#
|
migration: introduce snapshot-{save, load, delete} QMP commands
savevm, loadvm and delvm are some of the few HMP commands that have never
been converted to use QMP. The reasons for the lack of conversion are
that they blocked execution of the event thread, and the semantics
around choice of disks were ill-defined.
Despite this downside, however, libvirt and applications using libvirt
have used these commands for as long as QMP has existed, via the
"human-monitor-command" passthrough command. IOW, while it is clearly
desirable to be able to fix the problems, they are not a blocker to
all real world usage.
Meanwhile there is a need for other features which involve adding new
parameters to the commands. This is possible with HMP passthrough, but
it provides no reliable way for apps to introspect features, so using
QAPI modelling is highly desirable.
This patch thus introduces new snapshot-{load,save,delete} commands to
QMP that are intended to replace the old HMP counterparts. The new
commands are given different names, because they will be using the new
QEMU job framework and thus will have diverging behaviour from the HMP
originals. It would thus be misleading to keep the same name.
While this design uses the generic job framework, the current impl is
still blocking. The intention that the blocking problem is fixed later.
None the less applications using these new commands should assume that
they are asynchronous and thus wait for the job status change event to
indicate completion.
In addition to using the job framework, the new commands require the
caller to be explicit about all the block device nodes used in the
snapshot operations, with no built-in default heuristics in use.
Note that the existing "query-named-block-nodes" can be used to query
what snapshots currently exist for block nodes.
Acked-by: Markus Armbruster <armbru@redhat.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
Message-Id: <20210204124834.774401-13-berrange@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
dgilbert: removed tests for now, the output ordering isn't
deterministic
2021-02-04 15:48:34 +03:00
|
|
|
# @tag: name of the snapshot to delete.
|
2023-04-28 13:54:29 +03:00
|
|
|
#
|
migration: introduce snapshot-{save, load, delete} QMP commands
savevm, loadvm and delvm are some of the few HMP commands that have never
been converted to use QMP. The reasons for the lack of conversion are
that they blocked execution of the event thread, and the semantics
around choice of disks were ill-defined.
Despite this downside, however, libvirt and applications using libvirt
have used these commands for as long as QMP has existed, via the
"human-monitor-command" passthrough command. IOW, while it is clearly
desirable to be able to fix the problems, they are not a blocker to
all real world usage.
Meanwhile there is a need for other features which involve adding new
parameters to the commands. This is possible with HMP passthrough, but
it provides no reliable way for apps to introspect features, so using
QAPI modelling is highly desirable.
This patch thus introduces new snapshot-{load,save,delete} commands to
QMP that are intended to replace the old HMP counterparts. The new
commands are given different names, because they will be using the new
QEMU job framework and thus will have diverging behaviour from the HMP
originals. It would thus be misleading to keep the same name.
While this design uses the generic job framework, the current impl is
still blocking. The intention that the blocking problem is fixed later.
None the less applications using these new commands should assume that
they are asynchronous and thus wait for the job status change event to
indicate completion.
In addition to using the job framework, the new commands require the
caller to be explicit about all the block device nodes used in the
snapshot operations, with no built-in default heuristics in use.
Note that the existing "query-named-block-nodes" can be used to query
what snapshots currently exist for block nodes.
Acked-by: Markus Armbruster <armbru@redhat.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
Message-Id: <20210204124834.774401-13-berrange@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
dgilbert: removed tests for now, the output ordering isn't
deterministic
2021-02-04 15:48:34 +03:00
|
|
|
# @devices: list of block device node names to delete a snapshot from
|
|
|
|
#
|
|
|
|
# Applications should not assume that the snapshot delete is complete
|
2023-04-28 13:54:29 +03:00
|
|
|
# when this command returns. The job commands / events must be used
|
|
|
|
# to determine completion and to fetch details of any errors that
|
|
|
|
# arise.
|
migration: introduce snapshot-{save, load, delete} QMP commands
savevm, loadvm and delvm are some of the few HMP commands that have never
been converted to use QMP. The reasons for the lack of conversion are
that they blocked execution of the event thread, and the semantics
around choice of disks were ill-defined.
Despite this downside, however, libvirt and applications using libvirt
have used these commands for as long as QMP has existed, via the
"human-monitor-command" passthrough command. IOW, while it is clearly
desirable to be able to fix the problems, they are not a blocker to
all real world usage.
Meanwhile there is a need for other features which involve adding new
parameters to the commands. This is possible with HMP passthrough, but
it provides no reliable way for apps to introspect features, so using
QAPI modelling is highly desirable.
This patch thus introduces new snapshot-{load,save,delete} commands to
QMP that are intended to replace the old HMP counterparts. The new
commands are given different names, because they will be using the new
QEMU job framework and thus will have diverging behaviour from the HMP
originals. It would thus be misleading to keep the same name.
While this design uses the generic job framework, the current impl is
still blocking. The intention that the blocking problem is fixed later.
None the less applications using these new commands should assume that
they are asynchronous and thus wait for the job status change event to
indicate completion.
In addition to using the job framework, the new commands require the
caller to be explicit about all the block device nodes used in the
snapshot operations, with no built-in default heuristics in use.
Note that the existing "query-named-block-nodes" can be used to query
what snapshots currently exist for block nodes.
Acked-by: Markus Armbruster <armbru@redhat.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
Message-Id: <20210204124834.774401-13-berrange@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
dgilbert: removed tests for now, the output ordering isn't
deterministic
2021-02-04 15:48:34 +03:00
|
|
|
#
|
|
|
|
# Returns: nothing
|
|
|
|
#
|
|
|
|
# Example:
|
|
|
|
#
|
|
|
|
# -> { "execute": "snapshot-delete",
|
2022-02-22 20:01:16 +03:00
|
|
|
# "arguments": {
|
migration: introduce snapshot-{save, load, delete} QMP commands
savevm, loadvm and delvm are some of the few HMP commands that have never
been converted to use QMP. The reasons for the lack of conversion are
that they blocked execution of the event thread, and the semantics
around choice of disks were ill-defined.
Despite this downside, however, libvirt and applications using libvirt
have used these commands for as long as QMP has existed, via the
"human-monitor-command" passthrough command. IOW, while it is clearly
desirable to be able to fix the problems, they are not a blocker to
all real world usage.
Meanwhile there is a need for other features which involve adding new
parameters to the commands. This is possible with HMP passthrough, but
it provides no reliable way for apps to introspect features, so using
QAPI modelling is highly desirable.
This patch thus introduces new snapshot-{load,save,delete} commands to
QMP that are intended to replace the old HMP counterparts. The new
commands are given different names, because they will be using the new
QEMU job framework and thus will have diverging behaviour from the HMP
originals. It would thus be misleading to keep the same name.
While this design uses the generic job framework, the current impl is
still blocking. The intention that the blocking problem is fixed later.
None the less applications using these new commands should assume that
they are asynchronous and thus wait for the job status change event to
indicate completion.
In addition to using the job framework, the new commands require the
caller to be explicit about all the block device nodes used in the
snapshot operations, with no built-in default heuristics in use.
Note that the existing "query-named-block-nodes" can be used to query
what snapshots currently exist for block nodes.
Acked-by: Markus Armbruster <armbru@redhat.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
Message-Id: <20210204124834.774401-13-berrange@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
dgilbert: removed tests for now, the output ordering isn't
deterministic
2021-02-04 15:48:34 +03:00
|
|
|
# "job-id": "snapdelete0",
|
|
|
|
# "tag": "my-snap",
|
|
|
|
# "devices": ["disk0", "disk1"]
|
|
|
|
# }
|
|
|
|
# }
|
|
|
|
# <- { "return": { } }
|
|
|
|
# <- {"event": "JOB_STATUS_CHANGE",
|
2022-09-01 11:58:40 +03:00
|
|
|
# "timestamp": {"seconds": 1442124172, "microseconds": 744001},
|
migration: introduce snapshot-{save, load, delete} QMP commands
savevm, loadvm and delvm are some of the few HMP commands that have never
been converted to use QMP. The reasons for the lack of conversion are
that they blocked execution of the event thread, and the semantics
around choice of disks were ill-defined.
Despite this downside, however, libvirt and applications using libvirt
have used these commands for as long as QMP has existed, via the
"human-monitor-command" passthrough command. IOW, while it is clearly
desirable to be able to fix the problems, they are not a blocker to
all real world usage.
Meanwhile there is a need for other features which involve adding new
parameters to the commands. This is possible with HMP passthrough, but
it provides no reliable way for apps to introspect features, so using
QAPI modelling is highly desirable.
This patch thus introduces new snapshot-{load,save,delete} commands to
QMP that are intended to replace the old HMP counterparts. The new
commands are given different names, because they will be using the new
QEMU job framework and thus will have diverging behaviour from the HMP
originals. It would thus be misleading to keep the same name.
While this design uses the generic job framework, the current impl is
still blocking. The intention that the blocking problem is fixed later.
None the less applications using these new commands should assume that
they are asynchronous and thus wait for the job status change event to
indicate completion.
In addition to using the job framework, the new commands require the
caller to be explicit about all the block device nodes used in the
snapshot operations, with no built-in default heuristics in use.
Note that the existing "query-named-block-nodes" can be used to query
what snapshots currently exist for block nodes.
Acked-by: Markus Armbruster <armbru@redhat.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
Message-Id: <20210204124834.774401-13-berrange@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
dgilbert: removed tests for now, the output ordering isn't
deterministic
2021-02-04 15:48:34 +03:00
|
|
|
# "data": {"status": "created", "id": "snapdelete0"}}
|
|
|
|
# <- {"event": "JOB_STATUS_CHANGE",
|
2022-09-01 11:58:40 +03:00
|
|
|
# "timestamp": {"seconds": 1442125172, "microseconds": 744001},
|
migration: introduce snapshot-{save, load, delete} QMP commands
savevm, loadvm and delvm are some of the few HMP commands that have never
been converted to use QMP. The reasons for the lack of conversion are
that they blocked execution of the event thread, and the semantics
around choice of disks were ill-defined.
Despite this downside, however, libvirt and applications using libvirt
have used these commands for as long as QMP has existed, via the
"human-monitor-command" passthrough command. IOW, while it is clearly
desirable to be able to fix the problems, they are not a blocker to
all real world usage.
Meanwhile there is a need for other features which involve adding new
parameters to the commands. This is possible with HMP passthrough, but
it provides no reliable way for apps to introspect features, so using
QAPI modelling is highly desirable.
This patch thus introduces new snapshot-{load,save,delete} commands to
QMP that are intended to replace the old HMP counterparts. The new
commands are given different names, because they will be using the new
QEMU job framework and thus will have diverging behaviour from the HMP
originals. It would thus be misleading to keep the same name.
While this design uses the generic job framework, the current impl is
still blocking. The intention that the blocking problem is fixed later.
None the less applications using these new commands should assume that
they are asynchronous and thus wait for the job status change event to
indicate completion.
In addition to using the job framework, the new commands require the
caller to be explicit about all the block device nodes used in the
snapshot operations, with no built-in default heuristics in use.
Note that the existing "query-named-block-nodes" can be used to query
what snapshots currently exist for block nodes.
Acked-by: Markus Armbruster <armbru@redhat.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
Message-Id: <20210204124834.774401-13-berrange@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
dgilbert: removed tests for now, the output ordering isn't
deterministic
2021-02-04 15:48:34 +03:00
|
|
|
# "data": {"status": "running", "id": "snapdelete0"}}
|
|
|
|
# <- {"event": "JOB_STATUS_CHANGE",
|
2022-09-01 11:58:40 +03:00
|
|
|
# "timestamp": {"seconds": 1442126172, "microseconds": 744001},
|
migration: introduce snapshot-{save, load, delete} QMP commands
savevm, loadvm and delvm are some of the few HMP commands that have never
been converted to use QMP. The reasons for the lack of conversion are
that they blocked execution of the event thread, and the semantics
around choice of disks were ill-defined.
Despite this downside, however, libvirt and applications using libvirt
have used these commands for as long as QMP has existed, via the
"human-monitor-command" passthrough command. IOW, while it is clearly
desirable to be able to fix the problems, they are not a blocker to
all real world usage.
Meanwhile there is a need for other features which involve adding new
parameters to the commands. This is possible with HMP passthrough, but
it provides no reliable way for apps to introspect features, so using
QAPI modelling is highly desirable.
This patch thus introduces new snapshot-{load,save,delete} commands to
QMP that are intended to replace the old HMP counterparts. The new
commands are given different names, because they will be using the new
QEMU job framework and thus will have diverging behaviour from the HMP
originals. It would thus be misleading to keep the same name.
While this design uses the generic job framework, the current impl is
still blocking. The intention that the blocking problem is fixed later.
None the less applications using these new commands should assume that
they are asynchronous and thus wait for the job status change event to
indicate completion.
In addition to using the job framework, the new commands require the
caller to be explicit about all the block device nodes used in the
snapshot operations, with no built-in default heuristics in use.
Note that the existing "query-named-block-nodes" can be used to query
what snapshots currently exist for block nodes.
Acked-by: Markus Armbruster <armbru@redhat.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
Message-Id: <20210204124834.774401-13-berrange@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
dgilbert: removed tests for now, the output ordering isn't
deterministic
2021-02-04 15:48:34 +03:00
|
|
|
# "data": {"status": "waiting", "id": "snapdelete0"}}
|
|
|
|
# <- {"event": "JOB_STATUS_CHANGE",
|
2022-09-01 11:58:40 +03:00
|
|
|
# "timestamp": {"seconds": 1442127172, "microseconds": 744001},
|
migration: introduce snapshot-{save, load, delete} QMP commands
savevm, loadvm and delvm are some of the few HMP commands that have never
been converted to use QMP. The reasons for the lack of conversion are
that they blocked execution of the event thread, and the semantics
around choice of disks were ill-defined.
Despite this downside, however, libvirt and applications using libvirt
have used these commands for as long as QMP has existed, via the
"human-monitor-command" passthrough command. IOW, while it is clearly
desirable to be able to fix the problems, they are not a blocker to
all real world usage.
Meanwhile there is a need for other features which involve adding new
parameters to the commands. This is possible with HMP passthrough, but
it provides no reliable way for apps to introspect features, so using
QAPI modelling is highly desirable.
This patch thus introduces new snapshot-{load,save,delete} commands to
QMP that are intended to replace the old HMP counterparts. The new
commands are given different names, because they will be using the new
QEMU job framework and thus will have diverging behaviour from the HMP
originals. It would thus be misleading to keep the same name.
While this design uses the generic job framework, the current impl is
still blocking. The intention that the blocking problem is fixed later.
None the less applications using these new commands should assume that
they are asynchronous and thus wait for the job status change event to
indicate completion.
In addition to using the job framework, the new commands require the
caller to be explicit about all the block device nodes used in the
snapshot operations, with no built-in default heuristics in use.
Note that the existing "query-named-block-nodes" can be used to query
what snapshots currently exist for block nodes.
Acked-by: Markus Armbruster <armbru@redhat.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
Message-Id: <20210204124834.774401-13-berrange@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
dgilbert: removed tests for now, the output ordering isn't
deterministic
2021-02-04 15:48:34 +03:00
|
|
|
# "data": {"status": "pending", "id": "snapdelete0"}}
|
|
|
|
# <- {"event": "JOB_STATUS_CHANGE",
|
2022-09-01 11:58:40 +03:00
|
|
|
# "timestamp": {"seconds": 1442128172, "microseconds": 744001},
|
migration: introduce snapshot-{save, load, delete} QMP commands
savevm, loadvm and delvm are some of the few HMP commands that have never
been converted to use QMP. The reasons for the lack of conversion are
that they blocked execution of the event thread, and the semantics
around choice of disks were ill-defined.
Despite this downside, however, libvirt and applications using libvirt
have used these commands for as long as QMP has existed, via the
"human-monitor-command" passthrough command. IOW, while it is clearly
desirable to be able to fix the problems, they are not a blocker to
all real world usage.
Meanwhile there is a need for other features which involve adding new
parameters to the commands. This is possible with HMP passthrough, but
it provides no reliable way for apps to introspect features, so using
QAPI modelling is highly desirable.
This patch thus introduces new snapshot-{load,save,delete} commands to
QMP that are intended to replace the old HMP counterparts. The new
commands are given different names, because they will be using the new
QEMU job framework and thus will have diverging behaviour from the HMP
originals. It would thus be misleading to keep the same name.
While this design uses the generic job framework, the current impl is
still blocking. The intention that the blocking problem is fixed later.
None the less applications using these new commands should assume that
they are asynchronous and thus wait for the job status change event to
indicate completion.
In addition to using the job framework, the new commands require the
caller to be explicit about all the block device nodes used in the
snapshot operations, with no built-in default heuristics in use.
Note that the existing "query-named-block-nodes" can be used to query
what snapshots currently exist for block nodes.
Acked-by: Markus Armbruster <armbru@redhat.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
Message-Id: <20210204124834.774401-13-berrange@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
dgilbert: removed tests for now, the output ordering isn't
deterministic
2021-02-04 15:48:34 +03:00
|
|
|
# "data": {"status": "concluded", "id": "snapdelete0"}}
|
|
|
|
# -> {"execute": "query-jobs"}
|
|
|
|
# <- {"return": [{"current-progress": 1,
|
|
|
|
# "status": "concluded",
|
|
|
|
# "total-progress": 1,
|
|
|
|
# "type": "snapshot-delete",
|
|
|
|
# "id": "snapdelete0"}]}
|
|
|
|
#
|
|
|
|
# Since: 6.0
|
|
|
|
##
|
|
|
|
{ 'command': 'snapshot-delete',
|
|
|
|
'data': { 'job-id': 'str',
|
|
|
|
'tag': 'str',
|
|
|
|
'devices': ['str'] } }
|