qemu/qapi/block-core.json

5325 lines
170 KiB
JSON
Raw Normal View History

# -*- Mode: Python -*-
# vim: filetype=python
##
# == Block core (VM unrelated)
##
{ 'include': 'common.json' }
{ 'include': 'crypto.json' }
{ 'include': 'job.json' }
{ 'include': 'sockets.json' }
##
# @SnapshotInfo:
#
# @id: unique snapshot id
#
# @name: user chosen name
#
# @vm-state-size: size of the VM state
#
# @date-sec: UTC date of the snapshot in seconds
#
# @date-nsec: fractional part in nano seconds to be used with date-sec
#
# @vm-clock-sec: VM clock relative to boot in seconds
#
# @vm-clock-nsec: fractional part in nano seconds to be used with vm-clock-sec
#
# @icount: Current instruction count. Appears when execution record/replay
# is enabled. Used for "time-traveling" to match the moment
# in the recorded execution with the snapshots. This counter may
# be obtained through @query-replay command (since 5.2)
#
# Since: 1.3
#
##
{ 'struct': 'SnapshotInfo',
'data': { 'id': 'str', 'name': 'str', 'vm-state-size': 'int',
'date-sec': 'int', 'date-nsec': 'int',
'vm-clock-sec': 'int', 'vm-clock-nsec': 'int',
'*icount': 'int' } }
qcow2: report encryption specific image information Currently 'qemu-img info' reports a simple "encrypted: yes" field. This is not very useful now that qcow2 can support multiple encryption formats. Users want to know which format is in use and some data related to it. Wire up usage of the qcrypto_block_get_info() method so that 'qemu-img info' can report about the encryption format and parameters in use $ qemu-img create \ --object secret,id=sec0,data=123456 \ -o encrypt.format=luks,encrypt.key-secret=sec0 \ -f qcow2 demo.qcow2 1G Formatting 'demo.qcow2', fmt=qcow2 size=1073741824 \ encryption=off encrypt.format=luks encrypt.key-secret=sec0 \ cluster_size=65536 lazy_refcounts=off refcount_bits=16 $ qemu-img info demo.qcow2 image: demo.qcow2 file format: qcow2 virtual size: 1.0G (1073741824 bytes) disk size: 480K encrypted: yes cluster_size: 65536 Format specific information: compat: 1.1 lazy refcounts: false refcount bits: 16 encrypt: ivgen alg: plain64 hash alg: sha256 cipher alg: aes-256 uuid: 3fa930c4-58c8-4ef7-b3c5-314bb5af21f3 format: luks cipher mode: xts slots: [0]: active: true iters: 1839058 key offset: 4096 stripes: 4000 [1]: active: false key offset: 262144 [2]: active: false key offset: 520192 [3]: active: false key offset: 778240 [4]: active: false key offset: 1036288 [5]: active: false key offset: 1294336 [6]: active: false key offset: 1552384 [7]: active: false key offset: 1810432 payload offset: 2068480 master key iters: 438487 corrupt: false With the legacy "AES" encryption we just report the format name $ qemu-img create \ --object secret,id=sec0,data=123456 \ -o encrypt.format=aes,encrypt.key-secret=sec0 \ -f qcow2 demo.qcow2 1G Formatting 'demo.qcow2', fmt=qcow2 size=1073741824 \ encryption=off encrypt.format=aes encrypt.key-secret=sec0 \ cluster_size=65536 lazy_refcounts=off refcount_bits=16 $ ./qemu-img info demo.qcow2 image: demo.qcow2 file format: qcow2 virtual size: 1.0G (1073741824 bytes) disk size: 196K encrypted: yes cluster_size: 65536 Format specific information: compat: 1.1 lazy refcounts: false refcount bits: 16 encrypt: format: aes corrupt: false Reviewed-by: Alberto Garcia <berto@igalia.com> Reviewed-by: Max Reitz <mreitz@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com> Signed-off-by: Daniel P. Berrange <berrange@redhat.com> Message-id: 20170623162419.26068-20-berrange@redhat.com Signed-off-by: Max Reitz <mreitz@redhat.com>
2017-06-23 19:24:18 +03:00
##
# @ImageInfoSpecificQCow2EncryptionBase:
#
# @format: The encryption format
#
# Since: 2.10
##
{ 'struct': 'ImageInfoSpecificQCow2EncryptionBase',
'data': { 'format': 'BlockdevQcow2EncryptionFormat'}}
##
# @ImageInfoSpecificQCow2Encryption:
#
# Since: 2.10
##
{ 'union': 'ImageInfoSpecificQCow2Encryption',
'base': 'ImageInfoSpecificQCow2EncryptionBase',
'discriminator': 'format',
'data': { 'luks': 'QCryptoBlockInfoLUKS' } }
qcow2: report encryption specific image information Currently 'qemu-img info' reports a simple "encrypted: yes" field. This is not very useful now that qcow2 can support multiple encryption formats. Users want to know which format is in use and some data related to it. Wire up usage of the qcrypto_block_get_info() method so that 'qemu-img info' can report about the encryption format and parameters in use $ qemu-img create \ --object secret,id=sec0,data=123456 \ -o encrypt.format=luks,encrypt.key-secret=sec0 \ -f qcow2 demo.qcow2 1G Formatting 'demo.qcow2', fmt=qcow2 size=1073741824 \ encryption=off encrypt.format=luks encrypt.key-secret=sec0 \ cluster_size=65536 lazy_refcounts=off refcount_bits=16 $ qemu-img info demo.qcow2 image: demo.qcow2 file format: qcow2 virtual size: 1.0G (1073741824 bytes) disk size: 480K encrypted: yes cluster_size: 65536 Format specific information: compat: 1.1 lazy refcounts: false refcount bits: 16 encrypt: ivgen alg: plain64 hash alg: sha256 cipher alg: aes-256 uuid: 3fa930c4-58c8-4ef7-b3c5-314bb5af21f3 format: luks cipher mode: xts slots: [0]: active: true iters: 1839058 key offset: 4096 stripes: 4000 [1]: active: false key offset: 262144 [2]: active: false key offset: 520192 [3]: active: false key offset: 778240 [4]: active: false key offset: 1036288 [5]: active: false key offset: 1294336 [6]: active: false key offset: 1552384 [7]: active: false key offset: 1810432 payload offset: 2068480 master key iters: 438487 corrupt: false With the legacy "AES" encryption we just report the format name $ qemu-img create \ --object secret,id=sec0,data=123456 \ -o encrypt.format=aes,encrypt.key-secret=sec0 \ -f qcow2 demo.qcow2 1G Formatting 'demo.qcow2', fmt=qcow2 size=1073741824 \ encryption=off encrypt.format=aes encrypt.key-secret=sec0 \ cluster_size=65536 lazy_refcounts=off refcount_bits=16 $ ./qemu-img info demo.qcow2 image: demo.qcow2 file format: qcow2 virtual size: 1.0G (1073741824 bytes) disk size: 196K encrypted: yes cluster_size: 65536 Format specific information: compat: 1.1 lazy refcounts: false refcount bits: 16 encrypt: format: aes corrupt: false Reviewed-by: Alberto Garcia <berto@igalia.com> Reviewed-by: Max Reitz <mreitz@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com> Signed-off-by: Daniel P. Berrange <berrange@redhat.com> Message-id: 20170623162419.26068-20-berrange@redhat.com Signed-off-by: Max Reitz <mreitz@redhat.com>
2017-06-23 19:24:18 +03:00
##
# @ImageInfoSpecificQCow2:
#
# @compat: compatibility level
#
# @data-file: the filename of the external data file that is stored in the
# image and used as a default for opening the image (since: 4.0)
#
# @data-file-raw: True if the external data file must stay valid as a
# standalone (read-only) raw image without looking at qcow2
# metadata (since: 4.0)
#
# @extended-l2: true if the image has extended L2 entries; only valid for
# compat >= 1.1 (since 5.2)
#
# @lazy-refcounts: on or off; only valid for compat >= 1.1
#
# @corrupt: true if the image has been marked corrupt; only valid for
# compat >= 1.1 (since 2.2)
#
# @refcount-bits: width of a refcount entry in bits (since 2.3)
#
qcow2: report encryption specific image information Currently 'qemu-img info' reports a simple "encrypted: yes" field. This is not very useful now that qcow2 can support multiple encryption formats. Users want to know which format is in use and some data related to it. Wire up usage of the qcrypto_block_get_info() method so that 'qemu-img info' can report about the encryption format and parameters in use $ qemu-img create \ --object secret,id=sec0,data=123456 \ -o encrypt.format=luks,encrypt.key-secret=sec0 \ -f qcow2 demo.qcow2 1G Formatting 'demo.qcow2', fmt=qcow2 size=1073741824 \ encryption=off encrypt.format=luks encrypt.key-secret=sec0 \ cluster_size=65536 lazy_refcounts=off refcount_bits=16 $ qemu-img info demo.qcow2 image: demo.qcow2 file format: qcow2 virtual size: 1.0G (1073741824 bytes) disk size: 480K encrypted: yes cluster_size: 65536 Format specific information: compat: 1.1 lazy refcounts: false refcount bits: 16 encrypt: ivgen alg: plain64 hash alg: sha256 cipher alg: aes-256 uuid: 3fa930c4-58c8-4ef7-b3c5-314bb5af21f3 format: luks cipher mode: xts slots: [0]: active: true iters: 1839058 key offset: 4096 stripes: 4000 [1]: active: false key offset: 262144 [2]: active: false key offset: 520192 [3]: active: false key offset: 778240 [4]: active: false key offset: 1036288 [5]: active: false key offset: 1294336 [6]: active: false key offset: 1552384 [7]: active: false key offset: 1810432 payload offset: 2068480 master key iters: 438487 corrupt: false With the legacy "AES" encryption we just report the format name $ qemu-img create \ --object secret,id=sec0,data=123456 \ -o encrypt.format=aes,encrypt.key-secret=sec0 \ -f qcow2 demo.qcow2 1G Formatting 'demo.qcow2', fmt=qcow2 size=1073741824 \ encryption=off encrypt.format=aes encrypt.key-secret=sec0 \ cluster_size=65536 lazy_refcounts=off refcount_bits=16 $ ./qemu-img info demo.qcow2 image: demo.qcow2 file format: qcow2 virtual size: 1.0G (1073741824 bytes) disk size: 196K encrypted: yes cluster_size: 65536 Format specific information: compat: 1.1 lazy refcounts: false refcount bits: 16 encrypt: format: aes corrupt: false Reviewed-by: Alberto Garcia <berto@igalia.com> Reviewed-by: Max Reitz <mreitz@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com> Signed-off-by: Daniel P. Berrange <berrange@redhat.com> Message-id: 20170623162419.26068-20-berrange@redhat.com Signed-off-by: Max Reitz <mreitz@redhat.com>
2017-06-23 19:24:18 +03:00
# @encrypt: details about encryption parameters; only set if image
# is encrypted (since 2.10)
#
# @bitmaps: A list of qcow2 bitmap details (since 4.0)
#
qcow2: introduce compression type feature The patch adds some preparation parts for incompatible compression type feature to qcow2 allowing the use different compression methods for image clusters (de)compressing. It is implied that the compression type is set on the image creation and can be changed only later by image conversion, thus compression type defines the only compression algorithm used for the image, and thus, for all image clusters. The goal of the feature is to add support of other compression methods to qcow2. For example, ZSTD which is more effective on compression than ZLIB. The default compression is ZLIB. Images created with ZLIB compression type are backward compatible with older qemu versions. Adding of the compression type breaks a number of tests because now the compression type is reported on image creation and there are some changes in the qcow2 header in size and offsets. The tests are fixed in the following ways: * filter out compression_type for many tests * fix header size, feature table size and backing file offset affected tests: 031, 036, 061, 080 header_size +=8: 1 byte compression type 7 bytes padding feature_table += 48: incompatible feature compression type backing_file_offset += 56 (8 + 48 -> header_change + feature_table_change) * add "compression type" for test output matching when it isn't filtered affected tests: 049, 060, 061, 065, 082, 085, 144, 182, 185, 198, 206, 242, 255, 274, 280 Signed-off-by: Denis Plotnikov <dplotnikov@virtuozzo.com> Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Reviewed-by: Eric Blake <eblake@redhat.com> Reviewed-by: Max Reitz <mreitz@redhat.com> QAPI part: Acked-by: Markus Armbruster <armbru@redhat.com> Message-Id: <20200507082521.29210-2-dplotnikov@virtuozzo.com> Signed-off-by: Max Reitz <mreitz@redhat.com>
2020-05-07 11:25:18 +03:00
# @compression-type: the image cluster compression method (since 5.1)
#
# Since: 1.7
##
{ 'struct': 'ImageInfoSpecificQCow2',
'data': {
'compat': 'str',
'*data-file': 'str',
'*data-file-raw': 'bool',
'*extended-l2': 'bool',
'*lazy-refcounts': 'bool',
'*corrupt': 'bool',
qcow2: report encryption specific image information Currently 'qemu-img info' reports a simple "encrypted: yes" field. This is not very useful now that qcow2 can support multiple encryption formats. Users want to know which format is in use and some data related to it. Wire up usage of the qcrypto_block_get_info() method so that 'qemu-img info' can report about the encryption format and parameters in use $ qemu-img create \ --object secret,id=sec0,data=123456 \ -o encrypt.format=luks,encrypt.key-secret=sec0 \ -f qcow2 demo.qcow2 1G Formatting 'demo.qcow2', fmt=qcow2 size=1073741824 \ encryption=off encrypt.format=luks encrypt.key-secret=sec0 \ cluster_size=65536 lazy_refcounts=off refcount_bits=16 $ qemu-img info demo.qcow2 image: demo.qcow2 file format: qcow2 virtual size: 1.0G (1073741824 bytes) disk size: 480K encrypted: yes cluster_size: 65536 Format specific information: compat: 1.1 lazy refcounts: false refcount bits: 16 encrypt: ivgen alg: plain64 hash alg: sha256 cipher alg: aes-256 uuid: 3fa930c4-58c8-4ef7-b3c5-314bb5af21f3 format: luks cipher mode: xts slots: [0]: active: true iters: 1839058 key offset: 4096 stripes: 4000 [1]: active: false key offset: 262144 [2]: active: false key offset: 520192 [3]: active: false key offset: 778240 [4]: active: false key offset: 1036288 [5]: active: false key offset: 1294336 [6]: active: false key offset: 1552384 [7]: active: false key offset: 1810432 payload offset: 2068480 master key iters: 438487 corrupt: false With the legacy "AES" encryption we just report the format name $ qemu-img create \ --object secret,id=sec0,data=123456 \ -o encrypt.format=aes,encrypt.key-secret=sec0 \ -f qcow2 demo.qcow2 1G Formatting 'demo.qcow2', fmt=qcow2 size=1073741824 \ encryption=off encrypt.format=aes encrypt.key-secret=sec0 \ cluster_size=65536 lazy_refcounts=off refcount_bits=16 $ ./qemu-img info demo.qcow2 image: demo.qcow2 file format: qcow2 virtual size: 1.0G (1073741824 bytes) disk size: 196K encrypted: yes cluster_size: 65536 Format specific information: compat: 1.1 lazy refcounts: false refcount bits: 16 encrypt: format: aes corrupt: false Reviewed-by: Alberto Garcia <berto@igalia.com> Reviewed-by: Max Reitz <mreitz@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com> Signed-off-by: Daniel P. Berrange <berrange@redhat.com> Message-id: 20170623162419.26068-20-berrange@redhat.com Signed-off-by: Max Reitz <mreitz@redhat.com>
2017-06-23 19:24:18 +03:00
'refcount-bits': 'int',
'*encrypt': 'ImageInfoSpecificQCow2Encryption',
qcow2: introduce compression type feature The patch adds some preparation parts for incompatible compression type feature to qcow2 allowing the use different compression methods for image clusters (de)compressing. It is implied that the compression type is set on the image creation and can be changed only later by image conversion, thus compression type defines the only compression algorithm used for the image, and thus, for all image clusters. The goal of the feature is to add support of other compression methods to qcow2. For example, ZSTD which is more effective on compression than ZLIB. The default compression is ZLIB. Images created with ZLIB compression type are backward compatible with older qemu versions. Adding of the compression type breaks a number of tests because now the compression type is reported on image creation and there are some changes in the qcow2 header in size and offsets. The tests are fixed in the following ways: * filter out compression_type for many tests * fix header size, feature table size and backing file offset affected tests: 031, 036, 061, 080 header_size +=8: 1 byte compression type 7 bytes padding feature_table += 48: incompatible feature compression type backing_file_offset += 56 (8 + 48 -> header_change + feature_table_change) * add "compression type" for test output matching when it isn't filtered affected tests: 049, 060, 061, 065, 082, 085, 144, 182, 185, 198, 206, 242, 255, 274, 280 Signed-off-by: Denis Plotnikov <dplotnikov@virtuozzo.com> Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Reviewed-by: Eric Blake <eblake@redhat.com> Reviewed-by: Max Reitz <mreitz@redhat.com> QAPI part: Acked-by: Markus Armbruster <armbru@redhat.com> Message-Id: <20200507082521.29210-2-dplotnikov@virtuozzo.com> Signed-off-by: Max Reitz <mreitz@redhat.com>
2020-05-07 11:25:18 +03:00
'*bitmaps': ['Qcow2BitmapInfo'],
'compression-type': 'Qcow2CompressionType'
} }
##
# @ImageInfoSpecificVmdk:
#
# @create-type: The create type of VMDK image
#
# @cid: Content id of image
#
# @parent-cid: Parent VMDK image's cid
#
# @extents: List of extent files
#
# Since: 1.7
##
{ 'struct': 'ImageInfoSpecificVmdk',
'data': {
'create-type': 'str',
'cid': 'int',
'parent-cid': 'int',
'extents': ['ImageInfo']
} }
##
# @ImageInfoSpecific:
#
# A discriminated record of image format specific information structures.
#
# Since: 1.7
##
{ 'union': 'ImageInfoSpecific',
'data': {
'qcow2': 'ImageInfoSpecificQCow2',
'vmdk': 'ImageInfoSpecificVmdk',
# If we need to add block driver specific parameters for
# LUKS in future, then we'll subclass QCryptoBlockInfoLUKS
# to define a ImageInfoSpecificLUKS
'luks': 'QCryptoBlockInfoLUKS'
} }
##
# @ImageInfo:
#
# Information about a QEMU image file
#
# @filename: name of the image file
#
# @format: format of the image file
#
# @virtual-size: maximum capacity in bytes of the image
#
# @actual-size: actual size on disk in bytes of the image
#
# @dirty-flag: true if image is not cleanly closed
#
# @cluster-size: size of a cluster in bytes
#
# @encrypted: true if the image is encrypted
#
# @compressed: true if the image is compressed (Since 1.7)
#
# @backing-filename: name of the backing file
#
# @full-backing-filename: full path of the backing file
#
# @backing-filename-format: the format of the backing file
#
# @snapshots: list of VM snapshots
#
# @backing-image: info of the backing image (since 1.6)
#
# @format-specific: structure supplying additional format-specific
# information (since 1.7)
#
# Since: 1.3
#
##
{ 'struct': 'ImageInfo',
'data': {'filename': 'str', 'format': 'str', '*dirty-flag': 'bool',
'*actual-size': 'int', 'virtual-size': 'int',
'*cluster-size': 'int', '*encrypted': 'bool', '*compressed': 'bool',
'*backing-filename': 'str', '*full-backing-filename': 'str',
'*backing-filename-format': 'str', '*snapshots': ['SnapshotInfo'],
'*backing-image': 'ImageInfo',
'*format-specific': 'ImageInfoSpecific' } }
##
# @ImageCheck:
#
# Information about a QEMU image file check
#
# @filename: name of the image file checked
#
# @format: format of the image file checked
#
# @check-errors: number of unexpected errors occurred during check
#
# @image-end-offset: offset (in bytes) where the image ends, this
# field is present if the driver for the image format
# supports it
#
# @corruptions: number of corruptions found during the check if any
#
# @leaks: number of leaks found during the check if any
#
# @corruptions-fixed: number of corruptions fixed during the check
# if any
#
# @leaks-fixed: number of leaks fixed during the check if any
#
# @total-clusters: total number of clusters, this field is present
# if the driver for the image format supports it
#
# @allocated-clusters: total number of allocated clusters, this
# field is present if the driver for the image format
# supports it
#
# @fragmented-clusters: total number of fragmented clusters, this
# field is present if the driver for the image format
# supports it
#
# @compressed-clusters: total number of compressed clusters, this
# field is present if the driver for the image format
# supports it
#
# Since: 1.4
#
##
{ 'struct': 'ImageCheck',
'data': {'filename': 'str', 'format': 'str', 'check-errors': 'int',
'*image-end-offset': 'int', '*corruptions': 'int', '*leaks': 'int',
'*corruptions-fixed': 'int', '*leaks-fixed': 'int',
'*total-clusters': 'int', '*allocated-clusters': 'int',
'*fragmented-clusters': 'int', '*compressed-clusters': 'int' } }
##
# @MapEntry:
#
# Mapping information from a virtual block range to a host file range
#
# @start: virtual (guest) offset of the first byte described by this
# entry
#
# @length: the number of bytes of the mapped virtual range
#
# @data: reading the image will actually read data from a file (in
# particular, if @offset is present this means that the sectors
# are not simply preallocated, but contain actual data in raw
# format)
#
# @zero: whether the virtual blocks read as zeroes
#
# @depth: number of layers (0 = top image, 1 = top image's backing
# file, ..., n - 1 = bottom image (where n is the number of
# images in the chain)) before reaching one for which the
# range is allocated
#
# @offset: if present, the image file stores the data for this range
# in raw format at the given (host) offset
#
# @filename: filename that is referred to by @offset
#
# Since: 2.6
#
##
{ 'struct': 'MapEntry',
'data': {'start': 'int', 'length': 'int', 'data': 'bool',
'zero': 'bool', 'depth': 'int', '*offset': 'int',
'*filename': 'str' } }
##
# @BlockdevCacheInfo:
#
# Cache mode information for a block device
#
# @writeback: true if writeback mode is enabled
# @direct: true if the host page cache is bypassed (O_DIRECT)
# @no-flush: true if flush requests are ignored for the device
#
# Since: 2.3
##
{ 'struct': 'BlockdevCacheInfo',
'data': { 'writeback': 'bool',
'direct': 'bool',
'no-flush': 'bool' } }
##
# @BlockDeviceInfo:
#
# Information about the backing device for a block device.
#
# @file: the filename of the backing device
#
# @node-name: the name of the block driver node (Since 2.0)
#
# @ro: true if the backing device was open read-only
#
# @drv: the name of the block format used to open the backing device. As of
# 0.14 this can be: 'blkdebug', 'bochs', 'cloop', 'cow', 'dmg',
# 'file', 'file', 'ftp', 'ftps', 'host_cdrom', 'host_device',
block: add generic full disk encryption driver Add a block driver that is capable of supporting any full disk encryption format. This utilizes the previously added block encryption code, and at this time supports the LUKS format. The driver code is capable of supporting any format supported by the QCryptoBlock module, so it registers one block driver for each format. This patch only registers the "luks" driver since the "qcow" driver is there only for back-compatibility with existing qcow built-in encryption. New LUKS compatible volumes can be formatted using qemu-img with defaults for all settings. $ qemu-img create --object secret,data=123456,id=sec0 \ -f luks -o key-secret=sec0 demo.luks 10G Alternatively the cryptographic settings can be explicitly set $ qemu-img create --object secret,data=123456,id=sec0 \ -f luks -o key-secret=sec0,cipher-alg=aes-256,\ cipher-mode=cbc,ivgen-alg=plain64,hash-alg=sha256 \ demo.luks 10G And query its size $ qemu-img info demo.img image: demo.img file format: luks virtual size: 10G (10737418240 bytes) disk size: 132K encrypted: yes Note that it was not necessary to provide the password when querying info for the volume. The password is only required when performing I/O on the volume All volumes created by this new 'luks' driver should be capable of being opened by the kernel dm-crypt driver. The only algorithms listed in the LUKS spec that are not currently supported by this impl are sha512 and ripemd160 hashes and cast6 cipher. Reviewed-by: Eric Blake <eblake@redhat.com> Signed-off-by: Daniel P. Berrange <berrange@redhat.com> [ kwolf - Added #include to resolve conflict with da34e65c ] Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2016-03-21 17:11:47 +03:00
# 'http', 'https', 'luks', 'nbd', 'parallels', 'qcow',
# 'qcow2', 'raw', 'vdi', 'vmdk', 'vpc', 'vvfat'
block: delete cow block driver This patch removes support for the cow file format. Normally we do not break backwards compatibility but in this case there is no impact and it is the most logical option. Extraordinary claims require extraordinary evidence so I will show why removing the cow block driver is the right thing to do. The cow file format is the disk image format for Usermode Linux, a way of running a Linux system in userspace. The performance of UML was never great and it was hacky, but it enjoyed some popularity before hardware virtualization support became mainstream. QEMU's block/cow.c is supposed to read this image file format. Unfortunately the file format was underspecified: 1. Earlier Linux versions used the MAXPATHLEN constant for the backing filename field. The value of MAXPATHLEN can change, so Linux switched to a 4096 literal but QEMU has a 1024 literal. 2. Padding was not used on the header struct (both in the Linux kernel and in QEMU) so the struct layout varied across architectures. In particular, i386 and x86_64 were different due to int64_t alignment differences. Linux now uses __attribute__((packed)), QEMU does not. Therefore: 1. QEMU cow images do not conform to the Linux cow image file format. 2. cow images cannot be shared between different host architectures. This means QEMU cow images are useless and QEMU has not had bug reports from users actually hitting these issues. Let's get rid of this thing, it serves no purpose and no one will be affected. Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Reviewed-by: Markus Armbruster <armbru@redhat.com> Message-id: 1410877464-20481-1-git-send-email-stefanha@redhat.com Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2014-09-16 18:24:24 +04:00
# 2.2: 'archipelago' added, 'cow' dropped
# 2.3: 'host_floppy' deprecated
# 2.5: 'host_floppy' dropped
block: add generic full disk encryption driver Add a block driver that is capable of supporting any full disk encryption format. This utilizes the previously added block encryption code, and at this time supports the LUKS format. The driver code is capable of supporting any format supported by the QCryptoBlock module, so it registers one block driver for each format. This patch only registers the "luks" driver since the "qcow" driver is there only for back-compatibility with existing qcow built-in encryption. New LUKS compatible volumes can be formatted using qemu-img with defaults for all settings. $ qemu-img create --object secret,data=123456,id=sec0 \ -f luks -o key-secret=sec0 demo.luks 10G Alternatively the cryptographic settings can be explicitly set $ qemu-img create --object secret,data=123456,id=sec0 \ -f luks -o key-secret=sec0,cipher-alg=aes-256,\ cipher-mode=cbc,ivgen-alg=plain64,hash-alg=sha256 \ demo.luks 10G And query its size $ qemu-img info demo.img image: demo.img file format: luks virtual size: 10G (10737418240 bytes) disk size: 132K encrypted: yes Note that it was not necessary to provide the password when querying info for the volume. The password is only required when performing I/O on the volume All volumes created by this new 'luks' driver should be capable of being opened by the kernel dm-crypt driver. The only algorithms listed in the LUKS spec that are not currently supported by this impl are sha512 and ripemd160 hashes and cast6 cipher. Reviewed-by: Eric Blake <eblake@redhat.com> Signed-off-by: Daniel P. Berrange <berrange@redhat.com> [ kwolf - Added #include to resolve conflict with da34e65c ] Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2016-03-21 17:11:47 +03:00
# 2.6: 'luks' added
# 2.8: 'replication' added, 'tftp' dropped
# 2.9: 'archipelago' dropped
#
# @backing_file: the name of the backing file (for copy-on-write)
#
# @backing_file_depth: number of files in the backing file chain (since: 1.2)
#
# @encrypted: true if the backing device is encrypted
#
# @detect_zeroes: detect and optimize zero writes (Since 2.1)
#
# @bps: total throughput limit in bytes per second is specified
#
# @bps_rd: read throughput limit in bytes per second is specified
#
# @bps_wr: write throughput limit in bytes per second is specified
#
# @iops: total I/O operations per second is specified
#
# @iops_rd: read I/O operations per second is specified
#
# @iops_wr: write I/O operations per second is specified
#
# @image: the info of image used (since: 1.6)
#
# @bps_max: total throughput limit during bursts,
# in bytes (Since 1.7)
#
# @bps_rd_max: read throughput limit during bursts,
# in bytes (Since 1.7)
#
# @bps_wr_max: write throughput limit during bursts,
# in bytes (Since 1.7)
#
# @iops_max: total I/O operations per second during bursts,
# in bytes (Since 1.7)
#
# @iops_rd_max: read I/O operations per second during bursts,
# in bytes (Since 1.7)
#
# @iops_wr_max: write I/O operations per second during bursts,
# in bytes (Since 1.7)
#
# @bps_max_length: maximum length of the @bps_max burst
# period, in seconds. (Since 2.6)
#
# @bps_rd_max_length: maximum length of the @bps_rd_max
# burst period, in seconds. (Since 2.6)
#
# @bps_wr_max_length: maximum length of the @bps_wr_max
# burst period, in seconds. (Since 2.6)
#
# @iops_max_length: maximum length of the @iops burst
# period, in seconds. (Since 2.6)
#
# @iops_rd_max_length: maximum length of the @iops_rd_max
# burst period, in seconds. (Since 2.6)
#
# @iops_wr_max_length: maximum length of the @iops_wr_max
# burst period, in seconds. (Since 2.6)
#
# @iops_size: an I/O size in bytes (Since 1.7)
#
# @group: throttle group name (Since 2.4)
#
# @cache: the cache mode used for the block device (since: 2.3)
#
block: add event when disk usage exceeds threshold Managing applications, like oVirt (http://www.ovirt.org), make extensive use of thin-provisioned disk images. To let the guest run smoothly and be not unnecessarily paused, oVirt sets a disk usage threshold (so called 'high water mark') based on the occupation of the device, and automatically extends the image once the threshold is reached or exceeded. In order to detect the crossing of the threshold, oVirt has no choice but aggressively polling the QEMU monitor using the query-blockstats command. This lead to unnecessary system load, and is made even worse under scale: deployments with hundreds of VMs are no longer rare. To fix this, this patch adds: * A new monitor command `block-set-write-threshold', to set a mark for a given block device. * A new event `BLOCK_WRITE_THRESHOLD', to report if a block device usage exceeds the threshold. * A new `write_threshold' field into the `BlockDeviceInfo' structure, to report the configured threshold. This will allow the managing application to use smarter and more efficient monitoring, greatly reducing the need of polling. [Updated qemu-iotests 067 output to add the new 'write_threshold' property. --Stefan] [Changed g_assert_false() to !g_assert() to fix the build on older glib versions. --Kevin] Signed-off-by: Francesco Romani <fromani@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com> Message-id: 1421068273-692-1-git-send-email-fromani@redhat.com Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2015-01-12 16:11:13 +03:00
# @write_threshold: configured write threshold for the device.
# 0 if disabled. (Since 2.3)
#
# @dirty-bitmaps: dirty bitmaps information (only present if node
# has one or more dirty bitmaps) (Since 4.2)
#
# Since: 0.14
#
##
{ 'struct': 'BlockDeviceInfo',
'data': { 'file': 'str', '*node-name': 'str', 'ro': 'bool', 'drv': 'str',
'*backing_file': 'str', 'backing_file_depth': 'int',
'encrypted': 'bool',
'detect_zeroes': 'BlockdevDetectZeroesOptions',
'bps': 'int', 'bps_rd': 'int', 'bps_wr': 'int',
'iops': 'int', 'iops_rd': 'int', 'iops_wr': 'int',
'image': 'ImageInfo',
'*bps_max': 'int', '*bps_rd_max': 'int',
'*bps_wr_max': 'int', '*iops_max': 'int',
'*iops_rd_max': 'int', '*iops_wr_max': 'int',
'*bps_max_length': 'int', '*bps_rd_max_length': 'int',
'*bps_wr_max_length': 'int', '*iops_max_length': 'int',
'*iops_rd_max_length': 'int', '*iops_wr_max_length': 'int',
'*iops_size': 'int', '*group': 'str', 'cache': 'BlockdevCacheInfo',
'write_threshold': 'int', '*dirty-bitmaps': ['BlockDirtyInfo'] } }
##
# @BlockDeviceIoStatus:
#
# An enumeration of block device I/O status.
#
# @ok: The last I/O operation has succeeded
#
# @failed: The last I/O operation has failed
#
# @nospace: The last I/O operation has failed due to a no-space condition
#
# Since: 1.0
##
{ 'enum': 'BlockDeviceIoStatus', 'data': [ 'ok', 'failed', 'nospace' ] }
##
# @BlockDirtyInfo:
#
# Block dirty bitmap information.
#
# @name: the name of the dirty bitmap (Since 2.4)
#
# @count: number of dirty bytes according to the dirty bitmap
#
# @granularity: granularity of the dirty bitmap in bytes (since 1.4)
#
# @recording: true if the bitmap is recording new writes from the guest.
# Replaces `active` and `disabled` statuses. (since 4.0)
#
# @busy: true if the bitmap is in-use by some operation (NBD or jobs)
# and cannot be modified via QMP or used by another operation.
# Replaces `locked` and `frozen` statuses. (since 4.0)
#
# @persistent: true if the bitmap was stored on disk, is scheduled to be stored
# on disk, or both. (since 4.0)
#
# @inconsistent: true if this is a persistent bitmap that was improperly
# stored. Implies @persistent to be true; @recording and
# @busy to be false. This bitmap cannot be used. To remove
# it, use @block-dirty-bitmap-remove. (Since 4.0)
#
# Since: 1.3
##
{ 'struct': 'BlockDirtyInfo',
'data': {'*name': 'str', 'count': 'int', 'granularity': 'uint32',
'recording': 'bool', 'busy': 'bool',
'persistent': 'bool', '*inconsistent': 'bool' } }
##
# @Qcow2BitmapInfoFlags:
#
# An enumeration of flags that a bitmap can report to the user.
#
# @in-use: This flag is set by any process actively modifying the qcow2 file,
# and cleared when the updated bitmap is flushed to the qcow2 image.
# The presence of this flag in an offline image means that the bitmap
# was not saved correctly after its last usage, and may contain
# inconsistent data.
#
# @auto: The bitmap must reflect all changes of the virtual disk by any
# application that would write to this qcow2 file.
#
# Since: 4.0
##
{ 'enum': 'Qcow2BitmapInfoFlags',
'data': ['in-use', 'auto'] }
##
# @Qcow2BitmapInfo:
#
# Qcow2 bitmap information.
#
# @name: the name of the bitmap
#
# @granularity: granularity of the bitmap in bytes
#
# @flags: flags of the bitmap
#
# Since: 4.0
##
{ 'struct': 'Qcow2BitmapInfo',
'data': {'name': 'str', 'granularity': 'uint32',
'flags': ['Qcow2BitmapInfoFlags'] } }
##
# @BlockLatencyHistogramInfo:
#
# Block latency histogram.
#
# @boundaries: list of interval boundary values in nanoseconds, all greater
# than zero and in ascending order.
# For example, the list [10, 50, 100] produces the following
# histogram intervals: [0, 10), [10, 50), [50, 100), [100, +inf).
#
# @bins: list of io request counts corresponding to histogram intervals.
# len(@bins) = len(@boundaries) + 1
# For the example above, @bins may be something like [3, 1, 5, 2],
# and corresponding histogram looks like:
#
# ::
#
# 5| *
# 4| *
# 3| * *
# 2| * * *
# 1| * * * *
# +------------------
# 10 50 100
#
# Since: 4.0
##
{ 'struct': 'BlockLatencyHistogramInfo',
'data': {'boundaries': ['uint64'], 'bins': ['uint64'] } }
##
# @BlockInfo:
#
# Block device information. This structure describes a virtual device and
# the backing device associated with it.
#
# @device: The device name associated with the virtual device.
#
# @qdev: The qdev ID, or if no ID is assigned, the QOM path of the block
# device. (since 2.10)
#
# @type: This field is returned only for compatibility reasons, it should
# not be used (always returns 'unknown')
#
# @removable: True if the device supports removable media.
#
# @locked: True if the guest has locked this device from having its media
# removed
#
# @tray_open: True if the device's tray is open
# (only present if it has a tray)
#
# @io-status: @BlockDeviceIoStatus. Only present if the device
# supports it and the VM is configured to stop on errors
# (supported device models: virtio-blk, IDE, SCSI except
# scsi-generic)
#
# @inserted: @BlockDeviceInfo describing the device if media is
# present
#
# Since: 0.14
##
{ 'struct': 'BlockInfo',
'data': {'device': 'str', '*qdev': 'str', 'type': 'str', 'removable': 'bool',
'locked': 'bool', '*inserted': 'BlockDeviceInfo',
'*tray_open': 'bool', '*io-status': 'BlockDeviceIoStatus' } }
##
# @BlockMeasureInfo:
#
# Image file size calculation information. This structure describes the size
# requirements for creating a new image file.
#
# The size requirements depend on the new image file format. File size always
# equals virtual disk size for the 'raw' format, even for sparse POSIX files.
# Compact formats such as 'qcow2' represent unallocated and zero regions
# efficiently so file size may be smaller than virtual disk size.
#
# The values are upper bounds that are guaranteed to fit the new image file.
qcow2: Expose bitmaps' size during measure It's useful to know how much space can be occupied by qcow2 persistent bitmaps, even though such metadata is unrelated to the guest-visible data. Report this value as an additional QMP field, present when measuring an existing image and output format that both support bitmaps. Update iotest 178 and 190 to updated output, as well as new coverage in 190 demonstrating non-zero values made possible with the recently-added qemu-img bitmap command (see 3b51ab4b). The new 'bitmaps size:' field is displayed automatically as part of 'qemu-img measure' any time it is present in QMP (that is, any time both the source image being measured and destination format support bitmaps, even if the measurement is 0 because there are no bitmaps present). If the field is absent, it means that no bitmaps can be copied (source, destination, or both lack bitmaps, including when measuring based on size rather than on a source image). This behavior is compatible with an upcoming patch adding 'qemu-img convert --bitmaps': that command will fail in the same situations where this patch omits the field. The addition of a new field demonstrates why we should always zero-initialize qapi C structs; while the qcow2 driver still fully populates all fields, the raw and crypto drivers had to be tweaked to avoid uninitialized data. Consideration was also given towards having a 'qemu-img measure --bitmaps' which errors out when bitmaps are not possible, and otherwise sums the bitmaps into the existing allocation totals rather than displaying as a separate field, as a potential convenience factor. But this was ultimately decided to be more complexity than necessary when the QMP interface was sufficient enough with bitmaps remaining a separate field. See also: https://bugzilla.redhat.com/1779904 Reported-by: Nir Soffer <nsoffer@redhat.com> Signed-off-by: Eric Blake <eblake@redhat.com> Message-Id: <20200521192137.1120211-3-eblake@redhat.com> Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2020-05-21 22:21:34 +03:00
# Subsequent modification, such as internal snapshot or further bitmap
# creation, may require additional space and is not covered here.
#
qcow2: Expose bitmaps' size during measure It's useful to know how much space can be occupied by qcow2 persistent bitmaps, even though such metadata is unrelated to the guest-visible data. Report this value as an additional QMP field, present when measuring an existing image and output format that both support bitmaps. Update iotest 178 and 190 to updated output, as well as new coverage in 190 demonstrating non-zero values made possible with the recently-added qemu-img bitmap command (see 3b51ab4b). The new 'bitmaps size:' field is displayed automatically as part of 'qemu-img measure' any time it is present in QMP (that is, any time both the source image being measured and destination format support bitmaps, even if the measurement is 0 because there are no bitmaps present). If the field is absent, it means that no bitmaps can be copied (source, destination, or both lack bitmaps, including when measuring based on size rather than on a source image). This behavior is compatible with an upcoming patch adding 'qemu-img convert --bitmaps': that command will fail in the same situations where this patch omits the field. The addition of a new field demonstrates why we should always zero-initialize qapi C structs; while the qcow2 driver still fully populates all fields, the raw and crypto drivers had to be tweaked to avoid uninitialized data. Consideration was also given towards having a 'qemu-img measure --bitmaps' which errors out when bitmaps are not possible, and otherwise sums the bitmaps into the existing allocation totals rather than displaying as a separate field, as a potential convenience factor. But this was ultimately decided to be more complexity than necessary when the QMP interface was sufficient enough with bitmaps remaining a separate field. See also: https://bugzilla.redhat.com/1779904 Reported-by: Nir Soffer <nsoffer@redhat.com> Signed-off-by: Eric Blake <eblake@redhat.com> Message-Id: <20200521192137.1120211-3-eblake@redhat.com> Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2020-05-21 22:21:34 +03:00
# @required: Size required for a new image file, in bytes, when copying just
# allocated guest-visible contents.
#
# @fully-allocated: Image file size, in bytes, once data has been written
qcow2: Expose bitmaps' size during measure It's useful to know how much space can be occupied by qcow2 persistent bitmaps, even though such metadata is unrelated to the guest-visible data. Report this value as an additional QMP field, present when measuring an existing image and output format that both support bitmaps. Update iotest 178 and 190 to updated output, as well as new coverage in 190 demonstrating non-zero values made possible with the recently-added qemu-img bitmap command (see 3b51ab4b). The new 'bitmaps size:' field is displayed automatically as part of 'qemu-img measure' any time it is present in QMP (that is, any time both the source image being measured and destination format support bitmaps, even if the measurement is 0 because there are no bitmaps present). If the field is absent, it means that no bitmaps can be copied (source, destination, or both lack bitmaps, including when measuring based on size rather than on a source image). This behavior is compatible with an upcoming patch adding 'qemu-img convert --bitmaps': that command will fail in the same situations where this patch omits the field. The addition of a new field demonstrates why we should always zero-initialize qapi C structs; while the qcow2 driver still fully populates all fields, the raw and crypto drivers had to be tweaked to avoid uninitialized data. Consideration was also given towards having a 'qemu-img measure --bitmaps' which errors out when bitmaps are not possible, and otherwise sums the bitmaps into the existing allocation totals rather than displaying as a separate field, as a potential convenience factor. But this was ultimately decided to be more complexity than necessary when the QMP interface was sufficient enough with bitmaps remaining a separate field. See also: https://bugzilla.redhat.com/1779904 Reported-by: Nir Soffer <nsoffer@redhat.com> Signed-off-by: Eric Blake <eblake@redhat.com> Message-Id: <20200521192137.1120211-3-eblake@redhat.com> Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2020-05-21 22:21:34 +03:00
# to all sectors, when copying just guest-visible contents.
#
# @bitmaps: Additional size required if all the top-level bitmap metadata
# in the source image were to be copied to the destination,
# present only when source and destination both support
# persistent bitmaps. (since 5.1)
#
# Since: 2.10
##
{ 'struct': 'BlockMeasureInfo',
qcow2: Expose bitmaps' size during measure It's useful to know how much space can be occupied by qcow2 persistent bitmaps, even though such metadata is unrelated to the guest-visible data. Report this value as an additional QMP field, present when measuring an existing image and output format that both support bitmaps. Update iotest 178 and 190 to updated output, as well as new coverage in 190 demonstrating non-zero values made possible with the recently-added qemu-img bitmap command (see 3b51ab4b). The new 'bitmaps size:' field is displayed automatically as part of 'qemu-img measure' any time it is present in QMP (that is, any time both the source image being measured and destination format support bitmaps, even if the measurement is 0 because there are no bitmaps present). If the field is absent, it means that no bitmaps can be copied (source, destination, or both lack bitmaps, including when measuring based on size rather than on a source image). This behavior is compatible with an upcoming patch adding 'qemu-img convert --bitmaps': that command will fail in the same situations where this patch omits the field. The addition of a new field demonstrates why we should always zero-initialize qapi C structs; while the qcow2 driver still fully populates all fields, the raw and crypto drivers had to be tweaked to avoid uninitialized data. Consideration was also given towards having a 'qemu-img measure --bitmaps' which errors out when bitmaps are not possible, and otherwise sums the bitmaps into the existing allocation totals rather than displaying as a separate field, as a potential convenience factor. But this was ultimately decided to be more complexity than necessary when the QMP interface was sufficient enough with bitmaps remaining a separate field. See also: https://bugzilla.redhat.com/1779904 Reported-by: Nir Soffer <nsoffer@redhat.com> Signed-off-by: Eric Blake <eblake@redhat.com> Message-Id: <20200521192137.1120211-3-eblake@redhat.com> Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2020-05-21 22:21:34 +03:00
'data': {'required': 'int', 'fully-allocated': 'int', '*bitmaps': 'int'} }
##
# @query-block:
#
# Get a list of BlockInfo for all virtual block devices.
#
block: Skip implicit nodes in query-block/blockstats Commits 0db832f and 6cdbceb introduced the automatic insertion of filter nodes above the top layer of mirror and commit block jobs. The assumption made there was that since libvirt doesn't do node-level management of the block layer yet, it shouldn't be affected by added nodes. This is true as far as commands issued by libvirt are concerned. It only uses BlockBackend names to address nodes, so any operations it performs still operate on the root of the tree as intended. However, the assumption breaks down when you consider query commands, which return data for the wrong node now. These commands also return information on some child nodes (bs->file and/or bs->backing), which libvirt does make use of, and which refer to the wrong nodes, too. One of the consequences is that oVirt gets wrong information about the image size and stops the VM in response as long as a mirror or commit job is running: https://bugzilla.redhat.com/show_bug.cgi?id=1470634 This patch fixes the problem by hiding the implicit nodes created automatically by the mirror and commit block jobs in the output of query-block and BlockBackend-based query-blockstats as long as the user doesn't indicate that they are aware of those nodes by providing a node name for them in the QMP command to start the block job. The node-based commands query-named-block-nodes and query-blockstats with query-nodes=true still show all nodes, including implicit ones. This ensures that users that are capable of node-level management can still access the full information; users that only know BlockBackends won't use these commands. Cc: qemu-stable@nongnu.org Signed-off-by: Kevin Wolf <kwolf@redhat.com> Reviewed-by: Peter Krempa <pkrempa@redhat.com> Reviewed-by: Max Reitz <mreitz@redhat.com> Tested-by: Eric Blake <eblake@redhat.com>
2017-07-18 18:24:05 +03:00
# Returns: a list of @BlockInfo describing each virtual block device. Filter
# nodes that were created implicitly are skipped over.
#
# Since: 0.14
#
# Example:
#
# -> { "execute": "query-block" }
# <- {
# "return":[
# {
# "io-status": "ok",
# "device":"ide0-hd0",
# "locked":false,
# "removable":false,
# "inserted":{
# "ro":false,
# "drv":"qcow2",
# "encrypted":false,
# "file":"disks/test.qcow2",
# "backing_file_depth":1,
# "bps":1000000,
# "bps_rd":0,
# "bps_wr":0,
# "iops":1000000,
# "iops_rd":0,
# "iops_wr":0,
# "bps_max": 8000000,
# "bps_rd_max": 0,
# "bps_wr_max": 0,
# "iops_max": 0,
# "iops_rd_max": 0,
# "iops_wr_max": 0,
# "iops_size": 0,
# "detect_zeroes": "on",
# "write_threshold": 0,
# "image":{
# "filename":"disks/test.qcow2",
# "format":"qcow2",
# "virtual-size":2048000,
# "backing_file":"base.qcow2",
# "full-backing-filename":"disks/base.qcow2",
# "backing-filename-format":"qcow2",
# "snapshots":[
# {
# "id": "1",
# "name": "snapshot1",
# "vm-state-size": 0,
# "date-sec": 10000200,
# "date-nsec": 12,
# "vm-clock-sec": 206,
# "vm-clock-nsec": 30
# }
# ],
# "backing-image":{
# "filename":"disks/base.qcow2",
# "format":"qcow2",
# "virtual-size":2048000
# }
# }
# },
# "qdev": "ide_disk",
# "type":"unknown"
# },
# {
# "io-status": "ok",
# "device":"ide1-cd0",
# "locked":false,
# "removable":true,
# "qdev": "/machine/unattached/device[23]",
# "tray_open": false,
# "type":"unknown"
# },
# {
# "device":"floppy0",
# "locked":false,
# "removable":true,
# "qdev": "/machine/unattached/device[20]",
# "type":"unknown"
# },
# {
# "device":"sd0",
# "locked":false,
# "removable":true,
# "type":"unknown"
# }
# ]
# }
#
##
{ 'command': 'query-block', 'returns': ['BlockInfo'] }
##
# @BlockDeviceTimedStats:
#
# Statistics of a block device during a given interval of time.
#
# @interval_length: Interval used for calculating the statistics,
# in seconds.
#
# @min_rd_latency_ns: Minimum latency of read operations in the
# defined interval, in nanoseconds.
#
# @min_wr_latency_ns: Minimum latency of write operations in the
# defined interval, in nanoseconds.
#
# @min_flush_latency_ns: Minimum latency of flush operations in the
# defined interval, in nanoseconds.
#
# @max_rd_latency_ns: Maximum latency of read operations in the
# defined interval, in nanoseconds.
#
# @max_wr_latency_ns: Maximum latency of write operations in the
# defined interval, in nanoseconds.
#
# @max_flush_latency_ns: Maximum latency of flush operations in the
# defined interval, in nanoseconds.
#
# @avg_rd_latency_ns: Average latency of read operations in the
# defined interval, in nanoseconds.
#
# @avg_wr_latency_ns: Average latency of write operations in the
# defined interval, in nanoseconds.
#
# @avg_flush_latency_ns: Average latency of flush operations in the
# defined interval, in nanoseconds.
#
# @avg_rd_queue_depth: Average number of pending read operations
# in the defined interval.
#
# @avg_wr_queue_depth: Average number of pending write operations
# in the defined interval.
#
# Since: 2.5
##
{ 'struct': 'BlockDeviceTimedStats',
'data': { 'interval_length': 'int', 'min_rd_latency_ns': 'int',
'max_rd_latency_ns': 'int', 'avg_rd_latency_ns': 'int',
'min_wr_latency_ns': 'int', 'max_wr_latency_ns': 'int',
'avg_wr_latency_ns': 'int', 'min_flush_latency_ns': 'int',
'max_flush_latency_ns': 'int', 'avg_flush_latency_ns': 'int',
'avg_rd_queue_depth': 'number', 'avg_wr_queue_depth': 'number' } }
##
# @BlockDeviceStats:
#
# Statistics of a virtual block device or a block backing device.
#
# @rd_bytes: The number of bytes read by the device.
#
# @wr_bytes: The number of bytes written by the device.
#
# @unmap_bytes: The number of bytes unmapped by the device (Since 4.2)
#
# @rd_operations: The number of read operations performed by the device.
#
# @wr_operations: The number of write operations performed by the device.
#
# @flush_operations: The number of cache flush operations performed by the
# device (since 0.15)
#
# @unmap_operations: The number of unmap operations performed by the device
# (Since 4.2)
#
# @rd_total_time_ns: Total time spent on reads in nanoseconds (since 0.15).
#
# @wr_total_time_ns: Total time spent on writes in nanoseconds (since 0.15).
#
# @flush_total_time_ns: Total time spent on cache flushes in nanoseconds
# (since 0.15).
#
# @unmap_total_time_ns: Total time spent on unmap operations in nanoseconds
# (Since 4.2)
#
# @wr_highest_offset: The offset after the greatest byte written to the
# device. The intended use of this information is for
# growable sparse files (like qcow2) that are used on top
# of a physical device.
#
# @rd_merged: Number of read requests that have been merged into another
# request (Since 2.3).
#
# @wr_merged: Number of write requests that have been merged into another
# request (Since 2.3).
#
# @unmap_merged: Number of unmap requests that have been merged into another
# request (Since 4.2)
#
# @idle_time_ns: Time since the last I/O operation, in
# nanoseconds. If the field is absent it means that
# there haven't been any operations yet (Since 2.5).
#
# @failed_rd_operations: The number of failed read operations
# performed by the device (Since 2.5)
#
# @failed_wr_operations: The number of failed write operations
# performed by the device (Since 2.5)
#
# @failed_flush_operations: The number of failed flush operations
# performed by the device (Since 2.5)
#
# @failed_unmap_operations: The number of failed unmap operations performed
# by the device (Since 4.2)
#
# @invalid_rd_operations: The number of invalid read operations
# performed by the device (Since 2.5)
#
# @invalid_wr_operations: The number of invalid write operations
# performed by the device (Since 2.5)
#
# @invalid_flush_operations: The number of invalid flush operations
# performed by the device (Since 2.5)
#
# @invalid_unmap_operations: The number of invalid unmap operations performed
# by the device (Since 4.2)
#
# @account_invalid: Whether invalid operations are included in the
# last access statistics (Since 2.5)
#
# @account_failed: Whether failed operations are included in the
# latency and last access statistics (Since 2.5)
#
# @timed_stats: Statistics specific to the set of previously defined
# intervals of time (Since 2.5)
#
# @rd_latency_histogram: @BlockLatencyHistogramInfo. (Since 4.0)
#
# @wr_latency_histogram: @BlockLatencyHistogramInfo. (Since 4.0)
#
# @flush_latency_histogram: @BlockLatencyHistogramInfo. (Since 4.0)
#
# Since: 0.14
##
{ 'struct': 'BlockDeviceStats',
'data': {'rd_bytes': 'int', 'wr_bytes': 'int', 'unmap_bytes' : 'int',
'rd_operations': 'int', 'wr_operations': 'int',
'flush_operations': 'int', 'unmap_operations': 'int',
'rd_total_time_ns': 'int', 'wr_total_time_ns': 'int',
'flush_total_time_ns': 'int', 'unmap_total_time_ns': 'int',
'wr_highest_offset': 'int',
'rd_merged': 'int', 'wr_merged': 'int', 'unmap_merged': 'int',
'*idle_time_ns': 'int',
'failed_rd_operations': 'int', 'failed_wr_operations': 'int',
'failed_flush_operations': 'int', 'failed_unmap_operations': 'int',
'invalid_rd_operations': 'int', 'invalid_wr_operations': 'int',
'invalid_flush_operations': 'int', 'invalid_unmap_operations': 'int',
'account_invalid': 'bool', 'account_failed': 'bool',
'timed_stats': ['BlockDeviceTimedStats'],
'*rd_latency_histogram': 'BlockLatencyHistogramInfo',
'*wr_latency_histogram': 'BlockLatencyHistogramInfo',
'*flush_latency_histogram': 'BlockLatencyHistogramInfo' } }
##
# @BlockStatsSpecificFile:
#
# File driver statistics
#
# @discard-nb-ok: The number of successful discard operations performed by
# the driver.
#
# @discard-nb-failed: The number of failed discard operations performed by
# the driver.
#
# @discard-bytes-ok: The number of bytes discarded by the driver.
#
# Since: 4.2
##
{ 'struct': 'BlockStatsSpecificFile',
'data': {
'discard-nb-ok': 'uint64',
'discard-nb-failed': 'uint64',
'discard-bytes-ok': 'uint64' } }
##
# @BlockStatsSpecificNvme:
#
# NVMe driver statistics
#
# @completion-errors: The number of completion errors.
#
# @aligned-accesses: The number of aligned accesses performed by
# the driver.
#
# @unaligned-accesses: The number of unaligned accesses performed by
# the driver.
#
# Since: 5.2
##
{ 'struct': 'BlockStatsSpecificNvme',
'data': {
'completion-errors': 'uint64',
'aligned-accesses': 'uint64',
'unaligned-accesses': 'uint64' } }
##
# @BlockStatsSpecific:
#
# Block driver specific statistics
#
# Since: 4.2
##
{ 'union': 'BlockStatsSpecific',
'base': { 'driver': 'BlockdevDriver' },
'discriminator': 'driver',
'data': {
'file': 'BlockStatsSpecificFile',
'host_device': { 'type': 'BlockStatsSpecificFile',
'if': 'defined(HAVE_HOST_BLOCK_DEVICE)' },
'nvme': 'BlockStatsSpecificNvme' } }
##
# @BlockStats:
#
# Statistics of a virtual block device or a block backing device.
#
# @device: If the stats are for a virtual block device, the name
# corresponding to the virtual block device.
#
# @node-name: The node name of the device. (Since 2.3)
#
# @qdev: The qdev ID, or if no ID is assigned, the QOM path of the block
# device. (since 3.0)
#
# @stats: A @BlockDeviceStats for the device.
#
# @driver-specific: Optional driver-specific stats. (Since 4.2)
#
# @parent: This describes the file block device if it has one.
# Contains recursively the statistics of the underlying
# protocol (e.g. the host file for a qcow2 image). If there is
# no underlying protocol, this field is omitted
#
# @backing: This describes the backing block device if it has one.
# (Since 2.0)
#
# Since: 0.14
##
{ 'struct': 'BlockStats',
'data': {'*device': 'str', '*qdev': 'str', '*node-name': 'str',
'stats': 'BlockDeviceStats',
'*driver-specific': 'BlockStatsSpecific',
'*parent': 'BlockStats',
'*backing': 'BlockStats'} }
##
# @query-blockstats:
#
# Query the @BlockStats for all virtual block devices.
#
# @query-nodes: If true, the command will query all the block nodes
# that have a node name, in a list which will include "parent"
# information, but not "backing".
# If false or omitted, the behavior is as before - query all the
# device backends, recursively including their "parent" and
block: Skip implicit nodes in query-block/blockstats Commits 0db832f and 6cdbceb introduced the automatic insertion of filter nodes above the top layer of mirror and commit block jobs. The assumption made there was that since libvirt doesn't do node-level management of the block layer yet, it shouldn't be affected by added nodes. This is true as far as commands issued by libvirt are concerned. It only uses BlockBackend names to address nodes, so any operations it performs still operate on the root of the tree as intended. However, the assumption breaks down when you consider query commands, which return data for the wrong node now. These commands also return information on some child nodes (bs->file and/or bs->backing), which libvirt does make use of, and which refer to the wrong nodes, too. One of the consequences is that oVirt gets wrong information about the image size and stops the VM in response as long as a mirror or commit job is running: https://bugzilla.redhat.com/show_bug.cgi?id=1470634 This patch fixes the problem by hiding the implicit nodes created automatically by the mirror and commit block jobs in the output of query-block and BlockBackend-based query-blockstats as long as the user doesn't indicate that they are aware of those nodes by providing a node name for them in the QMP command to start the block job. The node-based commands query-named-block-nodes and query-blockstats with query-nodes=true still show all nodes, including implicit ones. This ensures that users that are capable of node-level management can still access the full information; users that only know BlockBackends won't use these commands. Cc: qemu-stable@nongnu.org Signed-off-by: Kevin Wolf <kwolf@redhat.com> Reviewed-by: Peter Krempa <pkrempa@redhat.com> Reviewed-by: Max Reitz <mreitz@redhat.com> Tested-by: Eric Blake <eblake@redhat.com>
2017-07-18 18:24:05 +03:00
# "backing". Filter nodes that were created implicitly are
# skipped over in this mode. (Since 2.3)
#
# Returns: A list of @BlockStats for each virtual block devices.
#
# Since: 0.14
#
# Example:
#
# -> { "execute": "query-blockstats" }
# <- {
# "return":[
# {
# "device":"ide0-hd0",
# "parent":{
# "stats":{
# "wr_highest_offset":3686448128,
# "wr_bytes":9786368,
# "wr_operations":751,
# "rd_bytes":122567168,
# "rd_operations":36772
# "wr_total_times_ns":313253456
# "rd_total_times_ns":3465673657
# "flush_total_times_ns":49653
# "flush_operations":61,
# "rd_merged":0,
# "wr_merged":0,
# "idle_time_ns":2953431879,
# "account_invalid":true,
# "account_failed":false
# }
# },
# "stats":{
# "wr_highest_offset":2821110784,
# "wr_bytes":9786368,
# "wr_operations":692,
# "rd_bytes":122739200,
# "rd_operations":36604
# "flush_operations":51,
# "wr_total_times_ns":313253456
# "rd_total_times_ns":3465673657
# "flush_total_times_ns":49653,
# "rd_merged":0,
# "wr_merged":0,
# "idle_time_ns":2953431879,
# "account_invalid":true,
# "account_failed":false
# },
# "qdev": "/machine/unattached/device[23]"
# },
# {
# "device":"ide1-cd0",
# "stats":{
# "wr_highest_offset":0,
# "wr_bytes":0,
# "wr_operations":0,
# "rd_bytes":0,
# "rd_operations":0
# "flush_operations":0,
# "wr_total_times_ns":0
# "rd_total_times_ns":0
# "flush_total_times_ns":0,
# "rd_merged":0,
# "wr_merged":0,
# "account_invalid":false,
# "account_failed":false
# },
# "qdev": "/machine/unattached/device[24]"
# },
# {
# "device":"floppy0",
# "stats":{
# "wr_highest_offset":0,
# "wr_bytes":0,
# "wr_operations":0,
# "rd_bytes":0,
# "rd_operations":0
# "flush_operations":0,
# "wr_total_times_ns":0
# "rd_total_times_ns":0
# "flush_total_times_ns":0,
# "rd_merged":0,
# "wr_merged":0,
# "account_invalid":false,
# "account_failed":false
# },
# "qdev": "/machine/unattached/device[16]"
# },
# {
# "device":"sd0",
# "stats":{
# "wr_highest_offset":0,
# "wr_bytes":0,
# "wr_operations":0,
# "rd_bytes":0,
# "rd_operations":0
# "flush_operations":0,
# "wr_total_times_ns":0
# "rd_total_times_ns":0
# "flush_total_times_ns":0,
# "rd_merged":0,
# "wr_merged":0,
# "account_invalid":false,
# "account_failed":false
# }
# }
# ]
# }
#
##
{ 'command': 'query-blockstats',
'data': { '*query-nodes': 'bool' },
'returns': ['BlockStats'] }
##
# @BlockdevOnError:
#
# An enumeration of possible behaviors for errors on I/O operations.
# The exact meaning depends on whether the I/O was initiated by a guest
# or by a block job
#
# @report: for guest operations, report the error to the guest;
# for jobs, cancel the job
#
# @ignore: ignore the error, only report a QMP event (BLOCK_IO_ERROR
# or BLOCK_JOB_ERROR). The backup, mirror and commit block jobs retry
# the failing request later and may still complete successfully. The
# stream block job continues to stream and will complete with an
# error.
#
# @enospc: same as @stop on ENOSPC, same as @report otherwise.
#
# @stop: for guest operations, stop the virtual machine;
# for jobs, pause the job
#
# @auto: inherit the error handling policy of the backend (since: 2.7)
#
# Since: 1.3
##
{ 'enum': 'BlockdevOnError',
'data': ['report', 'ignore', 'enospc', 'stop', 'auto'] }
##
# @MirrorSyncMode:
#
# An enumeration of possible behaviors for the initial synchronization
# phase of storage mirroring.
#
# @top: copies data in the topmost image to the destination
#
# @full: copies data from all images to the destination
#
# @none: only copy data written from now on
#
# @incremental: only copy data described by the dirty bitmap. (since: 2.4)
#
# @bitmap: only copy data described by the dirty bitmap. (since: 4.2)
# Behavior on completion is determined by the BitmapSyncMode.
#
# Since: 1.3
##
{ 'enum': 'MirrorSyncMode',
'data': ['top', 'full', 'none', 'incremental', 'bitmap'] }
##
# @BitmapSyncMode:
#
# An enumeration of possible behaviors for the synchronization of a bitmap
# when used for data copy operations.
#
# @on-success: The bitmap is only synced when the operation is successful.
# This is the behavior always used for 'INCREMENTAL' backups.
#
# @never: The bitmap is never synchronized with the operation, and is
# treated solely as a read-only manifest of blocks to copy.
#
# @always: The bitmap is always synchronized with the operation,
# regardless of whether or not the operation was successful.
#
# Since: 4.2
##
{ 'enum': 'BitmapSyncMode',
'data': ['on-success', 'never', 'always'] }
##
# @MirrorCopyMode:
#
# An enumeration whose values tell the mirror block job when to
# trigger writes to the target.
#
# @background: copy data in background only.
#
# @write-blocking: when data is written to the source, write it
# (synchronously) to the target as well. In
# addition, data is copied in background just like in
# @background mode.
#
# Since: 3.0
##
{ 'enum': 'MirrorCopyMode',
'data': ['background', 'write-blocking'] }
##
# @BlockJobInfo:
#
# Information about a long-running block device operation.
#
# @type: the job type ('stream' for image streaming)
#
# @device: The job identifier. Originally the device name but other
# values are allowed since QEMU 2.7
#
# @len: Estimated @offset value at the completion of the job. This value can
# arbitrarily change while the job is running, in both directions.
#
# @offset: Progress made until now. The unit is arbitrary and the value can
# only meaningfully be used for the ratio of @offset to @len. The
# value is monotonically increasing.
#
# @busy: false if the job is known to be in a quiescent state, with
# no pending I/O. Since 1.3.
#
# @paused: whether the job is paused or, if @busy is true, will
# pause itself as soon as possible. Since 1.3.
#
# @speed: the rate limit, bytes per second
#
# @io-status: the status of the job (since 1.3)
#
# @ready: true if the job may be completed (since 2.2)
#
blockjobs: add status enum We're about to add several new states, and booleans are becoming unwieldly and difficult to reason about. It would help to have a more explicit bookkeeping of the state of blockjobs. To this end, add a new "status" field and add our existing states in a redundant manner alongside the bools they are replacing: UNDEFINED: Placeholder, default state. Not currently visible to QMP unless changes occur in the future to allow creating jobs without starting them via QMP. CREATED: replaces !!job->co && paused && !busy RUNNING: replaces effectively (!paused && busy) PAUSED: Nearly redundant with info->paused, which shows pause_count. This reports the actual status of the job, which almost always matches the paused request status. It differs in that it is strictly only true when the job has actually gone dormant. READY: replaces job->ready. STANDBY: Paused, but job->ready is true. New state additions in coming commits will not be quite so redundant: WAITING: Waiting on transaction. This job has finished all the work it can until the transaction converges, fails, or is canceled. PENDING: Pending authorization from user. This job has finished all the work it can until the job or transaction is finalized via block_job_finalize. This implies the transaction has converged and left the WAITING phase. ABORTING: Job has encountered an error condition and is in the process of aborting. CONCLUDED: Job has ceased all operations and has a return code available for query and may be dismissed via block_job_dismiss. NULL: Job has been dismissed and (should) be destroyed. Should never be visible to QMP. Some of these states appear somewhat superfluous, but it helps define the expected flow of a job; so some of the states wind up being synchronous empty transitions. Importantly, jobs can be in only one of these states at any given time, which helps code and external users alike reason about the current condition of a job unambiguously. Signed-off-by: John Snow <jsnow@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2018-03-10 11:27:29 +03:00
# @status: Current job state/status (since 2.12)
#
# @auto-finalize: Job will finalize itself when PENDING, moving to
# the CONCLUDED state. (since 2.12)
#
# @auto-dismiss: Job will dismiss itself when CONCLUDED, moving to the NULL
# state and disappearing from the query list. (since 2.12)
#
# @error: Error information if the job did not complete successfully.
# Not set if the job completed successfully. (since 2.12.1)
#
# Since: 1.1
##
{ 'struct': 'BlockJobInfo',
'data': {'type': 'str', 'device': 'str', 'len': 'int',
'offset': 'int', 'busy': 'bool', 'paused': 'bool', 'speed': 'int',
blockjobs: add status enum We're about to add several new states, and booleans are becoming unwieldly and difficult to reason about. It would help to have a more explicit bookkeeping of the state of blockjobs. To this end, add a new "status" field and add our existing states in a redundant manner alongside the bools they are replacing: UNDEFINED: Placeholder, default state. Not currently visible to QMP unless changes occur in the future to allow creating jobs without starting them via QMP. CREATED: replaces !!job->co && paused && !busy RUNNING: replaces effectively (!paused && busy) PAUSED: Nearly redundant with info->paused, which shows pause_count. This reports the actual status of the job, which almost always matches the paused request status. It differs in that it is strictly only true when the job has actually gone dormant. READY: replaces job->ready. STANDBY: Paused, but job->ready is true. New state additions in coming commits will not be quite so redundant: WAITING: Waiting on transaction. This job has finished all the work it can until the transaction converges, fails, or is canceled. PENDING: Pending authorization from user. This job has finished all the work it can until the job or transaction is finalized via block_job_finalize. This implies the transaction has converged and left the WAITING phase. ABORTING: Job has encountered an error condition and is in the process of aborting. CONCLUDED: Job has ceased all operations and has a return code available for query and may be dismissed via block_job_dismiss. NULL: Job has been dismissed and (should) be destroyed. Should never be visible to QMP. Some of these states appear somewhat superfluous, but it helps define the expected flow of a job; so some of the states wind up being synchronous empty transitions. Importantly, jobs can be in only one of these states at any given time, which helps code and external users alike reason about the current condition of a job unambiguously. Signed-off-by: John Snow <jsnow@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2018-03-10 11:27:29 +03:00
'io-status': 'BlockDeviceIoStatus', 'ready': 'bool',
'status': 'JobStatus',
'auto-finalize': 'bool', 'auto-dismiss': 'bool',
'*error': 'str' } }
##
# @query-block-jobs:
#
# Return information about long-running block device operations.
#
# Returns: a list of @BlockJobInfo for each active block job
#
# Since: 1.1
##
{ 'command': 'query-block-jobs', 'returns': ['BlockJobInfo'] }
##
# @block_resize:
#
# Resize a block image while a guest is running.
#
# Either @device or @node-name must be set but not both.
#
# @device: the name of the device to get the image resized
#
# @node-name: graph node name to get the image resized (Since 2.0)
#
# @size: new image size in bytes
#
# Returns: - nothing on success
# - If @device is not a valid block device, DeviceNotFound
#
# Since: 0.14
#
# Example:
#
# -> { "execute": "block_resize",
# "arguments": { "device": "scratch", "size": 1073741824 } }
# <- { "return": {} }
#
##
{ 'command': 'block_resize',
'data': { '*device': 'str',
'*node-name': 'str',
'size': 'int' },
'coroutine': true }
##
# @NewImageMode:
#
# An enumeration that tells QEMU how to set the backing file path in
# a new image file.
#
# @existing: QEMU should look for an existing image file.
#
# @absolute-paths: QEMU should create a new image with absolute paths
# for the backing file. If there is no backing file available, the new
# image will not be backed either.
#
# Since: 1.1
##
{ 'enum': 'NewImageMode',
'data': [ 'existing', 'absolute-paths' ] }
##
# @BlockdevSnapshotSync:
#
# Either @device or @node-name must be set but not both.
#
# @device: the name of the device to take a snapshot of.
#
# @node-name: graph node name to generate the snapshot from (Since 2.0)
#
# @snapshot-file: the target of the new overlay image. If the file
# exists, or if it is a device, the overlay will be created in the
# existing file/device. Otherwise, a new file will be created.
#
# @snapshot-node-name: the graph node name of the new image (Since 2.0)
#
# @format: the format of the overlay image, default is 'qcow2'.
#
# @mode: whether and how QEMU should create a new image, default is
# 'absolute-paths'.
##
{ 'struct': 'BlockdevSnapshotSync',
'data': { '*device': 'str', '*node-name': 'str',
'snapshot-file': 'str', '*snapshot-node-name': 'str',
'*format': 'str', '*mode': 'NewImageMode' } }
##
# @BlockdevSnapshot:
#
# @node: device or node name that will have a snapshot taken.
#
# @overlay: reference to the existing block device that will become
# the overlay of @node, as part of taking the snapshot.
# It must not have a current backing file (this can be
# achieved by passing "backing": null to blockdev-add).
#
# Since: 2.5
##
{ 'struct': 'BlockdevSnapshot',
'data': { 'node': 'str', 'overlay': 'str' } }
##
# @BackupPerf:
#
# Optional parameters for backup. These parameters don't affect
# functionality, but may significantly affect performance.
#
# @use-copy-range: Use copy offloading. Default false.
#
# @max-workers: Maximum number of parallel requests for the sustained background
# copying process. Doesn't influence copy-before-write operations.
# Default 64.
#
# @max-chunk: Maximum request length for the sustained background copying
# process. Doesn't influence copy-before-write operations.
# 0 means unlimited. If max-chunk is non-zero then it should not be
# less than job cluster size which is calculated as maximum of
# target image cluster size and 64k. Default 0.
#
# Since: 6.0
##
{ 'struct': 'BackupPerf',
'data': { '*use-copy-range': 'bool',
'*max-workers': 'int', '*max-chunk': 'int64' } }
##
# @BackupCommon:
#
# @job-id: identifier for the newly-created block job. If
# omitted, the device name will be used. (Since 2.7)
#
# @device: the device name or node-name of a root node which should be copied.
#
# @sync: what parts of the disk image should be copied to the destination
# (all the disk, only the sectors allocated in the topmost image, from a
# dirty bitmap, or only new I/O).
#
# @speed: the maximum speed, in bytes per second. The default is 0,
# for unlimited.
#
# @bitmap: The name of a dirty bitmap to use.
# Must be present if sync is "bitmap" or "incremental".
# Can be present if sync is "full" or "top".
# Must not be present otherwise.
# (Since 2.4 (drive-backup), 3.1 (blockdev-backup))
#
# @bitmap-mode: Specifies the type of data the bitmap should contain after
# the operation concludes.
# Must be present if a bitmap was provided,
# Must NOT be present otherwise. (Since 4.2)
#
# @compress: true to compress data, if the target format supports it.
# (default: false) (since 2.8)
#
# @on-source-error: the action to take on an error on the source,
# default 'report'. 'stop' and 'enospc' can only be used
# if the block device supports io-status (see BlockInfo).
#
# @on-target-error: the action to take on an error on the target,
# default 'report' (no limitations, since this applies to
# a different block device than @device).
#
# @auto-finalize: When false, this job will wait in a PENDING state after it has
# finished its work, waiting for @block-job-finalize before
# making any block graph changes.
# When true, this job will automatically
# perform its abort or commit actions.
# Defaults to true. (Since 2.12)
#
# @auto-dismiss: When false, this job will wait in a CONCLUDED state after it
# has completely ceased all work, and awaits @block-job-dismiss.
# When true, this job will automatically disappear from the query
# list without user intervention.
# Defaults to true. (Since 2.12)
#
block/backup: use backup-top instead of write notifiers Drop write notifiers and use filter node instead. = Changes = 1. Add filter-node-name argument for backup qmp api. We have to do it in this commit, as 257 needs to be fixed. 2. There are no more write notifiers here, so is_write_notifier parameter is dropped from block-copy paths. 3. To sync with in-flight requests at job finish we now have drained removing of the filter, we don't need rw-lock. 4. Block-copy is now using BdrvChildren instead of BlockBackends 5. As backup-top owns these children, we also move block-copy state into backup-top's ownership. = Iotest changes = 56: op-blocker doesn't shoot now, as we set it on source, but then check on filter, when trying to start second backup. To keep the test we instead can catch another collision: both jobs will get 'drive0' job-id, as job-id parameter is unspecified. To prevent interleaving with file-posix locks (as they are dependent on config) let's use another target for second backup. Also, it's obvious now that we'd like to drop this op-blocker at all and add a test-case for two backups from one node (to different destinations) actually works. But not in these series. 141: Output changed: prepatch, "Node is in use" comes from bdrv_has_blk check inside qmp_blockdev_del. But we've dropped block-copy blk objects, so no more blk objects on source bs (job blk is on backup-top filter bs). New message is from op-blocker, which is the next check in qmp_blockdev_add. 257: The test wants to emulate guest write during backup. They should go to filter node, not to original source node, of course. Therefore we need to specify filter node name and use it. Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Message-id: 20191001131409.14202-6-vsementsov@virtuozzo.com Reviewed-by: Max Reitz <mreitz@redhat.com> Signed-off-by: Max Reitz <mreitz@redhat.com>
2019-10-01 16:14:09 +03:00
# @filter-node-name: the node name that should be assigned to the
# filter driver that the backup job inserts into the graph
# above node specified by @drive. If this option is not given,
# a node name is autogenerated. (Since: 4.2)
#
# @x-perf: Performance options. (Since 6.0)
#
# Note: @on-source-error and @on-target-error only affect background
# I/O. If an error occurs during a guest write request, the device's
# rerror/werror actions will be used.
#
# Since: 4.2
##
{ 'struct': 'BackupCommon',
'data': { '*job-id': 'str', 'device': 'str',
'sync': 'MirrorSyncMode', '*speed': 'int',
'*bitmap': 'str', '*bitmap-mode': 'BitmapSyncMode',
'*compress': 'bool',
'*on-source-error': 'BlockdevOnError',
'*on-target-error': 'BlockdevOnError',
block/backup: use backup-top instead of write notifiers Drop write notifiers and use filter node instead. = Changes = 1. Add filter-node-name argument for backup qmp api. We have to do it in this commit, as 257 needs to be fixed. 2. There are no more write notifiers here, so is_write_notifier parameter is dropped from block-copy paths. 3. To sync with in-flight requests at job finish we now have drained removing of the filter, we don't need rw-lock. 4. Block-copy is now using BdrvChildren instead of BlockBackends 5. As backup-top owns these children, we also move block-copy state into backup-top's ownership. = Iotest changes = 56: op-blocker doesn't shoot now, as we set it on source, but then check on filter, when trying to start second backup. To keep the test we instead can catch another collision: both jobs will get 'drive0' job-id, as job-id parameter is unspecified. To prevent interleaving with file-posix locks (as they are dependent on config) let's use another target for second backup. Also, it's obvious now that we'd like to drop this op-blocker at all and add a test-case for two backups from one node (to different destinations) actually works. But not in these series. 141: Output changed: prepatch, "Node is in use" comes from bdrv_has_blk check inside qmp_blockdev_del. But we've dropped block-copy blk objects, so no more blk objects on source bs (job blk is on backup-top filter bs). New message is from op-blocker, which is the next check in qmp_blockdev_add. 257: The test wants to emulate guest write during backup. They should go to filter node, not to original source node, of course. Therefore we need to specify filter node name and use it. Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Message-id: 20191001131409.14202-6-vsementsov@virtuozzo.com Reviewed-by: Max Reitz <mreitz@redhat.com> Signed-off-by: Max Reitz <mreitz@redhat.com>
2019-10-01 16:14:09 +03:00
'*auto-finalize': 'bool', '*auto-dismiss': 'bool',
'*filter-node-name': 'str', '*x-perf': 'BackupPerf' } }
##
# @DriveBackup:
#
# @target: the target of the new image. If the file exists, or if it
# is a device, the existing file/device will be used as the new
# destination. If it does not exist, a new file will be created.
#
# @format: the format of the new destination, default is to
# probe if @mode is 'existing', else the format of the source
#
# @mode: whether and how QEMU should create a new image, default is
# 'absolute-paths'.
#
# Since: 1.6
##
{ 'struct': 'DriveBackup',
'base': 'BackupCommon',
'data': { 'target': 'str',
'*format': 'str',
'*mode': 'NewImageMode' } }
##
# @BlockdevBackup:
#
# @target: the device name or node-name of the backup target node.
#
# Since: 2.3
##
{ 'struct': 'BlockdevBackup',
'base': 'BackupCommon',
'data': { 'target': 'str' } }
##
# @blockdev-snapshot-sync:
#
# Takes a synchronous snapshot of a block device.
#
# For the arguments, see the documentation of BlockdevSnapshotSync.
#
# Returns: - nothing on success
# - If @device is not a valid block device, DeviceNotFound
#
# Since: 0.14
#
# Example:
#
# -> { "execute": "blockdev-snapshot-sync",
# "arguments": { "device": "ide-hd0",
# "snapshot-file":
# "/some/place/my-image",
# "format": "qcow2" } }
# <- { "return": {} }
#
##
{ 'command': 'blockdev-snapshot-sync',
'data': 'BlockdevSnapshotSync' }
##
# @blockdev-snapshot:
#
# Takes a snapshot of a block device.
#
# Take a snapshot, by installing 'node' as the backing image of
# 'overlay'. Additionally, if 'node' is associated with a block
# device, the block device changes to using 'overlay' as its new active
# image.
#
# For the arguments, see the documentation of BlockdevSnapshot.
#
# Features:
# @allow-write-only-overlay: If present, the check whether this operation is safe
# was relaxed so that it can be used to change
# backing file of a destination of a blockdev-mirror.
# (since 5.0)
#
# Since: 2.5
#
# Example:
#
# -> { "execute": "blockdev-add",
# "arguments": { "driver": "qcow2",
# "node-name": "node1534",
# "file": { "driver": "file",
# "filename": "hd1.qcow2" },
# "backing": null } }
#
# <- { "return": {} }
#
# -> { "execute": "blockdev-snapshot",
# "arguments": { "node": "ide-hd0",
# "overlay": "node1534" } }
# <- { "return": {} }
#
##
{ 'command': 'blockdev-snapshot',
'data': 'BlockdevSnapshot',
'features': [ 'allow-write-only-overlay' ] }
##
# @change-backing-file:
#
# Change the backing file in the image file metadata. This does not
# cause QEMU to reopen the image file to reparse the backing filename
# (it may, however, perform a reopen to change permissions from
# r/o -> r/w -> r/o, if needed). The new backing file string is written
# into the image file metadata, and the QEMU internal strings are
# updated.
#
# @image-node-name: The name of the block driver state node of the
# image to modify. The "device" argument is used
# to verify "image-node-name" is in the chain
# described by "device".
#
# @device: The device name or node-name of the root node that owns
# image-node-name.
#
# @backing-file: The string to write as the backing file. This
# string is not validated, so care should be taken
# when specifying the string or the image chain may
# not be able to be reopened again.
#
# Returns: - Nothing on success
# - If "device" does not exist or cannot be determined, DeviceNotFound
#
# Since: 2.1
##
{ 'command': 'change-backing-file',
'data': { 'device': 'str', 'image-node-name': 'str',
'backing-file': 'str' } }
##
# @block-commit:
#
# Live commit of data from overlay image nodes into backing nodes - i.e.,
# writes data between 'top' and 'base' into 'base'.
#
# If top == base, that is an error.
# If top has no overlays on top of it, or if it is in use by a writer,
# the job will not be completed by itself. The user needs to complete
# the job with the block-job-complete command after getting the ready
# event. (Since 2.0)
#
# If the base image is smaller than top, then the base image will be
# resized to be the same size as top. If top is smaller than the base
# image, the base will not be truncated. If you want the base image
# size to match the size of the smaller top, you can safely truncate
# it yourself once the commit operation successfully completes.
#
# @job-id: identifier for the newly-created block job. If
# omitted, the device name will be used. (Since 2.7)
#
# @device: the device name or node-name of a root node
#
# @base-node: The node name of the backing image to write data into.
# If not specified, this is the deepest backing image.
# (since: 3.1)
#
# @base: Same as @base-node, except that it is a file name rather than a node
# name. This must be the exact filename string that was used to open the
# node; other strings, even if addressing the same file, are not
# accepted
#
# @top-node: The node name of the backing image within the image chain
# which contains the topmost data to be committed down. If
# not specified, this is the active layer. (since: 3.1)
#
# @top: Same as @top-node, except that it is a file name rather than a node
# name. This must be the exact filename string that was used to open the
# node; other strings, even if addressing the same file, are not
# accepted
#
# @backing-file: The backing file string to write into the overlay
# image of 'top'. If 'top' does not have an overlay
# image, or if 'top' is in use by a writer, specifying
# a backing file string is an error.
#
# This filename is not validated. If a pathname string
# is such that it cannot be resolved by QEMU, that
# means that subsequent QMP or HMP commands must use
# node-names for the image in question, as filename
# lookup methods will fail.
#
# If not specified, QEMU will automatically determine
# the backing file string to use, or error out if
# there is no obvious choice. Care should be taken
# when specifying the string, to specify a valid
# filename or protocol.
# (Since 2.1)
#
# @speed: the maximum speed, in bytes per second
#
# @on-error: the action to take on an error. 'ignore' means that the request
# should be retried. (default: report; Since: 5.0)
#
# @filter-node-name: the node name that should be assigned to the
# filter driver that the commit job inserts into the graph
# above @top. If this option is not given, a node name is
# autogenerated. (Since: 2.9)
#
# @auto-finalize: When false, this job will wait in a PENDING state after it has
# finished its work, waiting for @block-job-finalize before
# making any block graph changes.
# When true, this job will automatically
# perform its abort or commit actions.
# Defaults to true. (Since 3.1)
#
# @auto-dismiss: When false, this job will wait in a CONCLUDED state after it
# has completely ceased all work, and awaits @block-job-dismiss.
# When true, this job will automatically disappear from the query
# list without user intervention.
# Defaults to true. (Since 3.1)
#
# Features:
# @deprecated: Members @base and @top are deprecated. Use @base-node
# and @top-node instead.
#
# Returns: - Nothing on success
# - If @device does not exist, DeviceNotFound
# - Any other error returns a GenericError.
#
# Since: 1.3
#
# Example:
#
# -> { "execute": "block-commit",
# "arguments": { "device": "virtio0",
# "top": "/tmp/snap1.qcow2" } }
# <- { "return": {} }
#
##
{ 'command': 'block-commit',
'data': { '*job-id': 'str', 'device': 'str', '*base-node': 'str',
'*base': { 'type': 'str', 'features': [ 'deprecated' ] },
'*top-node': 'str',
'*top': { 'type': 'str', 'features': [ 'deprecated' ] },
'*backing-file': 'str', '*speed': 'int',
'*on-error': 'BlockdevOnError',
'*filter-node-name': 'str',
'*auto-finalize': 'bool', '*auto-dismiss': 'bool' } }
##
# @drive-backup:
#
# Start a point-in-time copy of a block device to a new destination. The
# status of ongoing drive-backup operations can be checked with
# query-block-jobs where the BlockJobInfo.type field has the value 'backup'.
# The operation can be stopped before it has completed using the
# block-job-cancel command.
#
# Returns: - nothing on success
# - If @device is not a valid block device, GenericError
#
# Since: 1.6
#
# Example:
#
# -> { "execute": "drive-backup",
# "arguments": { "device": "drive0",
# "sync": "full",
# "target": "backup.img" } }
# <- { "return": {} }
#
##
{ 'command': 'drive-backup', 'boxed': true,
'data': 'DriveBackup' }
##
# @blockdev-backup:
#
# Start a point-in-time copy of a block device to a new destination. The
# status of ongoing blockdev-backup operations can be checked with
# query-block-jobs where the BlockJobInfo.type field has the value 'backup'.
# The operation can be stopped before it has completed using the
# block-job-cancel command.
#
# Returns: - nothing on success
# - If @device is not a valid block device, DeviceNotFound
#
# Since: 2.3
#
# Example:
# -> { "execute": "blockdev-backup",
# "arguments": { "device": "src-id",
# "sync": "full",
# "target": "tgt-id" } }
# <- { "return": {} }
#
##
{ 'command': 'blockdev-backup', 'boxed': true,
'data': 'BlockdevBackup' }
##
# @query-named-block-nodes:
#
# Get the named block driver list
#
# @flat: Omit the nested data about backing image ("backing-image" key) if true.
# Default is false (Since 5.0)
#
# Returns: the list of BlockDeviceInfo
#
# Since: 2.0
#
# Example:
#
# -> { "execute": "query-named-block-nodes" }
# <- { "return": [ { "ro":false,
# "drv":"qcow2",
# "encrypted":false,
# "file":"disks/test.qcow2",
# "node-name": "my-node",
# "backing_file_depth":1,
# "bps":1000000,
# "bps_rd":0,
# "bps_wr":0,
# "iops":1000000,
# "iops_rd":0,
# "iops_wr":0,
# "bps_max": 8000000,
# "bps_rd_max": 0,
# "bps_wr_max": 0,
# "iops_max": 0,
# "iops_rd_max": 0,
# "iops_wr_max": 0,
# "iops_size": 0,
# "write_threshold": 0,
# "image":{
# "filename":"disks/test.qcow2",
# "format":"qcow2",
# "virtual-size":2048000,
# "backing_file":"base.qcow2",
# "full-backing-filename":"disks/base.qcow2",
# "backing-filename-format":"qcow2",
# "snapshots":[
# {
# "id": "1",
# "name": "snapshot1",
# "vm-state-size": 0,
# "date-sec": 10000200,
# "date-nsec": 12,
# "vm-clock-sec": 206,
# "vm-clock-nsec": 30
# }
# ],
# "backing-image":{
# "filename":"disks/base.qcow2",
# "format":"qcow2",
# "virtual-size":2048000
# }
# } } ] }
#
##
{ 'command': 'query-named-block-nodes',
'returns': [ 'BlockDeviceInfo' ],
'data': { '*flat': 'bool' } }
##
# @XDbgBlockGraphNodeType:
#
# @block-backend: corresponds to BlockBackend
#
# @block-job: corresponds to BlockJob
#
# @block-driver: corresponds to BlockDriverState
#
# Since: 4.0
##
{ 'enum': 'XDbgBlockGraphNodeType',
'data': [ 'block-backend', 'block-job', 'block-driver' ] }
##
# @XDbgBlockGraphNode:
#
# @id: Block graph node identifier. This @id is generated only for
# x-debug-query-block-graph and does not relate to any other identifiers in
# Qemu.
#
# @type: Type of graph node. Can be one of block-backend, block-job or
# block-driver-state.
#
# @name: Human readable name of the node. Corresponds to node-name for
# block-driver-state nodes; is not guaranteed to be unique in the whole
# graph (with block-jobs and block-backends).
#
# Since: 4.0
##
{ 'struct': 'XDbgBlockGraphNode',
'data': { 'id': 'uint64', 'type': 'XDbgBlockGraphNodeType', 'name': 'str' } }
##
# @BlockPermission:
#
# Enum of base block permissions.
#
# @consistent-read: A user that has the "permission" of consistent reads is
# guaranteed that their view of the contents of the block
# device is complete and self-consistent, representing the
# contents of a disk at a specific point.
# For most block devices (including their backing files) this
# is true, but the property cannot be maintained in a few
# situations like for intermediate nodes of a commit block
# job.
#
# @write: This permission is required to change the visible disk contents.
#
# @write-unchanged: This permission (which is weaker than BLK_PERM_WRITE) is
# both enough and required for writes to the block node when
# the caller promises that the visible disk content doesn't
# change.
# As the BLK_PERM_WRITE permission is strictly stronger,
# either is sufficient to perform an unchanging write.
#
# @resize: This permission is required to change the size of a block node.
#
# @graph-mod: This permission is required to change the node that this
# BdrvChild points to.
#
# Since: 4.0
##
{ 'enum': 'BlockPermission',
'data': [ 'consistent-read', 'write', 'write-unchanged', 'resize',
'graph-mod' ] }
##
# @XDbgBlockGraphEdge:
#
# Block Graph edge description for x-debug-query-block-graph.
#
# @parent: parent id
#
# @child: child id
#
# @name: name of the relation (examples are 'file' and 'backing')
#
# @perm: granted permissions for the parent operating on the child
#
# @shared-perm: permissions that can still be granted to other users of the
# child while it is still attached to this parent
#
# Since: 4.0
##
{ 'struct': 'XDbgBlockGraphEdge',
'data': { 'parent': 'uint64', 'child': 'uint64',
'name': 'str', 'perm': [ 'BlockPermission' ],
'shared-perm': [ 'BlockPermission' ] } }
##
# @XDbgBlockGraph:
#
# Block Graph - list of nodes and list of edges.
#
# Since: 4.0
##
{ 'struct': 'XDbgBlockGraph',
'data': { 'nodes': ['XDbgBlockGraphNode'], 'edges': ['XDbgBlockGraphEdge'] } }
##
# @x-debug-query-block-graph:
#
# Get the block graph.
#
# Since: 4.0
##
{ 'command': 'x-debug-query-block-graph', 'returns': 'XDbgBlockGraph' }
##
# @drive-mirror:
#
# Start mirroring a block device's writes to a new destination. target
# specifies the target of the new image. If the file exists, or if it
# is a device, it will be used as the new destination for writes. If
# it does not exist, a new file will be created. format specifies the
# format of the mirror image, default is to probe if mode='existing',
# else the format of the source.
#
# Returns: - nothing on success
# - If @device is not a valid block device, GenericError
#
# Since: 1.3
#
# Example:
#
# -> { "execute": "drive-mirror",
# "arguments": { "device": "ide-hd0",
# "target": "/some/place/my-image",
# "sync": "full",
# "format": "qcow2" } }
# <- { "return": {} }
#
##
{ 'command': 'drive-mirror', 'boxed': true,
'data': 'DriveMirror' }
##
# @DriveMirror:
#
# A set of parameters describing drive mirror setup.
#
# @job-id: identifier for the newly-created block job. If
# omitted, the device name will be used. (Since 2.7)
#
# @device: the device name or node-name of a root node whose writes should be
# mirrored.
#
# @target: the target of the new image. If the file exists, or if it
# is a device, the existing file/device will be used as the new
# destination. If it does not exist, a new file will be created.
#
# @format: the format of the new destination, default is to
# probe if @mode is 'existing', else the format of the source
#
# @node-name: the new block driver state node name in the graph
# (Since 2.1)
#
# @replaces: with sync=full graph node name to be replaced by the new
# image when a whole image copy is done. This can be used to repair
# broken Quorum files. By default, @device is replaced, although
# implicitly created filters on it are kept. (Since 2.1)
#
# @mode: whether and how QEMU should create a new image, default is
# 'absolute-paths'.
#
# @speed: the maximum speed, in bytes per second
#
# @sync: what parts of the disk image should be copied to the destination
# (all the disk, only the sectors allocated in the topmost image, or
# only new I/O).
#
# @granularity: granularity of the dirty bitmap, default is 64K
# if the image format doesn't have clusters, 4K if the clusters
# are smaller than that, else the cluster size. Must be a
# power of 2 between 512 and 64M (since 1.4).
#
# @buf-size: maximum amount of data in flight from source to
# target (since 1.4).
#
# @on-source-error: the action to take on an error on the source,
# default 'report'. 'stop' and 'enospc' can only be used
# if the block device supports io-status (see BlockInfo).
#
# @on-target-error: the action to take on an error on the target,
# default 'report' (no limitations, since this applies to
# a different block device than @device).
# @unmap: Whether to try to unmap target sectors where source has
# only zero. If true, and target unallocated sectors will read as zero,
# target image sectors will be unmapped; otherwise, zeroes will be
# written. Both will result in identical contents.
# Default is true. (Since 2.4)
#
# @copy-mode: when to copy data to the destination; defaults to 'background'
# (Since: 3.0)
#
# @auto-finalize: When false, this job will wait in a PENDING state after it has
# finished its work, waiting for @block-job-finalize before
# making any block graph changes.
# When true, this job will automatically
# perform its abort or commit actions.
# Defaults to true. (Since 3.1)
#
# @auto-dismiss: When false, this job will wait in a CONCLUDED state after it
# has completely ceased all work, and awaits @block-job-dismiss.
# When true, this job will automatically disappear from the query
# list without user intervention.
# Defaults to true. (Since 3.1)
# Since: 1.3
##
{ 'struct': 'DriveMirror',
'data': { '*job-id': 'str', 'device': 'str', 'target': 'str',
'*format': 'str', '*node-name': 'str', '*replaces': 'str',
'sync': 'MirrorSyncMode', '*mode': 'NewImageMode',
'*speed': 'int', '*granularity': 'uint32',
'*buf-size': 'int', '*on-source-error': 'BlockdevOnError',
'*on-target-error': 'BlockdevOnError',
'*unmap': 'bool', '*copy-mode': 'MirrorCopyMode',
'*auto-finalize': 'bool', '*auto-dismiss': 'bool' } }
##
# @BlockDirtyBitmap:
#
# @node: name of device/node which the bitmap is tracking
#
# @name: name of the dirty bitmap
#
# Since: 2.4
##
{ 'struct': 'BlockDirtyBitmap',
'data': { 'node': 'str', 'name': 'str' } }
##
# @BlockDirtyBitmapAdd:
#
# @node: name of device/node which the bitmap is tracking
#
# @name: name of the dirty bitmap (must be less than 1024 bytes)
#
# @granularity: the bitmap granularity, default is 64k for
# block-dirty-bitmap-add
#
# @persistent: the bitmap is persistent, i.e. it will be saved to the
# corresponding block device image file on its close. For now only
# Qcow2 disks support persistent bitmaps. Default is false for
# block-dirty-bitmap-add. (Since: 2.10)
#
# @disabled: the bitmap is created in the disabled state, which means that
# it will not track drive changes. The bitmap may be enabled with
# block-dirty-bitmap-enable. Default is false. (Since: 4.0)
#
# Since: 2.4
##
{ 'struct': 'BlockDirtyBitmapAdd',
'data': { 'node': 'str', 'name': 'str', '*granularity': 'uint32',
'*persistent': 'bool', '*disabled': 'bool' } }
##
# @BlockDirtyBitmapMergeSource:
#
# @local: name of the bitmap, attached to the same node as target bitmap.
#
# @external: bitmap with specified node
#
# Since: 4.1
##
{ 'alternate': 'BlockDirtyBitmapMergeSource',
'data': { 'local': 'str',
'external': 'BlockDirtyBitmap' } }
##
# @BlockDirtyBitmapMerge:
#
# @node: name of device/node which the @target bitmap is tracking
#
# @target: name of the destination dirty bitmap
#
# @bitmaps: name(s) of the source dirty bitmap(s) at @node and/or fully
# specified BlockDirtyBitmap elements. The latter are supported
# since 4.1.
#
# Since: 4.0
##
{ 'struct': 'BlockDirtyBitmapMerge',
'data': { 'node': 'str', 'target': 'str',
'bitmaps': ['BlockDirtyBitmapMergeSource'] } }
##
# @block-dirty-bitmap-add:
#
# Create a dirty bitmap with a name on the node, and start tracking the writes.
#
# Returns: - nothing on success
# - If @node is not a valid block device or node, DeviceNotFound
# - If @name is already taken, GenericError with an explanation
#
# Since: 2.4
#
# Example:
#
# -> { "execute": "block-dirty-bitmap-add",
# "arguments": { "node": "drive0", "name": "bitmap0" } }
# <- { "return": {} }
#
##
{ 'command': 'block-dirty-bitmap-add',
'data': 'BlockDirtyBitmapAdd' }
##
# @block-dirty-bitmap-remove:
#
# Stop write tracking and remove the dirty bitmap that was created
# with block-dirty-bitmap-add. If the bitmap is persistent, remove it from its
# storage too.
#
# Returns: - nothing on success
# - If @node is not a valid block device or node, DeviceNotFound
# - If @name is not found, GenericError with an explanation
# - if @name is frozen by an operation, GenericError
#
# Since: 2.4
#
# Example:
#
# -> { "execute": "block-dirty-bitmap-remove",
# "arguments": { "node": "drive0", "name": "bitmap0" } }
# <- { "return": {} }
#
##
{ 'command': 'block-dirty-bitmap-remove',
'data': 'BlockDirtyBitmap' }
##
# @block-dirty-bitmap-clear:
#
# Clear (reset) a dirty bitmap on the device, so that an incremental
# backup from this point in time forward will only backup clusters
# modified after this clear operation.
#
# Returns: - nothing on success
# - If @node is not a valid block device, DeviceNotFound
# - If @name is not found, GenericError with an explanation
#
# Since: 2.4
#
# Example:
#
# -> { "execute": "block-dirty-bitmap-clear",
# "arguments": { "node": "drive0", "name": "bitmap0" } }
# <- { "return": {} }
#
##
{ 'command': 'block-dirty-bitmap-clear',
'data': 'BlockDirtyBitmap' }
##
# @block-dirty-bitmap-enable:
#
# Enables a dirty bitmap so that it will begin tracking disk changes.
#
# Returns: - nothing on success
# - If @node is not a valid block device, DeviceNotFound
# - If @name is not found, GenericError with an explanation
#
# Since: 4.0
#
# Example:
#
# -> { "execute": "block-dirty-bitmap-enable",
# "arguments": { "node": "drive0", "name": "bitmap0" } }
# <- { "return": {} }
#
##
{ 'command': 'block-dirty-bitmap-enable',
'data': 'BlockDirtyBitmap' }
##
# @block-dirty-bitmap-disable:
#
# Disables a dirty bitmap so that it will stop tracking disk changes.
#
# Returns: - nothing on success
# - If @node is not a valid block device, DeviceNotFound
# - If @name is not found, GenericError with an explanation
#
# Since: 4.0
#
# Example:
#
# -> { "execute": "block-dirty-bitmap-disable",
# "arguments": { "node": "drive0", "name": "bitmap0" } }
# <- { "return": {} }
#
##
{ 'command': 'block-dirty-bitmap-disable',
'data': 'BlockDirtyBitmap' }
##
# @block-dirty-bitmap-merge:
#
# Merge dirty bitmaps listed in @bitmaps to the @target dirty bitmap.
# Dirty bitmaps in @bitmaps will be unchanged, except if it also appears
# as the @target bitmap. Any bits already set in @target will still be
# set after the merge, i.e., this operation does not clear the target.
# On error, @target is unchanged.
#
# The resulting bitmap will count as dirty any clusters that were dirty in any
# of the source bitmaps. This can be used to achieve backup checkpoints, or in
# simpler usages, to copy bitmaps.
#
# Returns: - nothing on success
# - If @node is not a valid block device, DeviceNotFound
# - If any bitmap in @bitmaps or @target is not found, GenericError
# - If any of the bitmaps have different sizes or granularities,
# GenericError
#
# Since: 4.0
#
# Example:
#
# -> { "execute": "block-dirty-bitmap-merge",
# "arguments": { "node": "drive0", "target": "bitmap0",
# "bitmaps": ["bitmap1"] } }
# <- { "return": {} }
#
##
{ 'command': 'block-dirty-bitmap-merge',
'data': 'BlockDirtyBitmapMerge' }
##
# @BlockDirtyBitmapSha256:
#
# SHA256 hash of dirty bitmap data
#
# @sha256: ASCII representation of SHA256 bitmap hash
#
# Since: 2.10
##
{ 'struct': 'BlockDirtyBitmapSha256',
'data': {'sha256': 'str'} }
##
# @x-debug-block-dirty-bitmap-sha256:
#
# Get bitmap SHA256.
#
# Returns: - BlockDirtyBitmapSha256 on success
# - If @node is not a valid block device, DeviceNotFound
# - If @name is not found or if hashing has failed, GenericError with an
# explanation
#
# Since: 2.10
##
{ 'command': 'x-debug-block-dirty-bitmap-sha256',
'data': 'BlockDirtyBitmap', 'returns': 'BlockDirtyBitmapSha256' }
##
# @blockdev-mirror:
#
# Start mirroring a block device's writes to a new destination.
#
# @job-id: identifier for the newly-created block job. If
# omitted, the device name will be used. (Since 2.7)
#
# @device: The device name or node-name of a root node whose writes should be
# mirrored.
#
# @target: the id or node-name of the block device to mirror to. This mustn't be
# attached to guest.
#
# @replaces: with sync=full graph node name to be replaced by the new
# image when a whole image copy is done. This can be used to repair
# broken Quorum files. By default, @device is replaced, although
# implicitly created filters on it are kept.
#
# @speed: the maximum speed, in bytes per second
#
# @sync: what parts of the disk image should be copied to the destination
# (all the disk, only the sectors allocated in the topmost image, or
# only new I/O).
#
# @granularity: granularity of the dirty bitmap, default is 64K
# if the image format doesn't have clusters, 4K if the clusters
# are smaller than that, else the cluster size. Must be a
# power of 2 between 512 and 64M
#
# @buf-size: maximum amount of data in flight from source to
# target
#
# @on-source-error: the action to take on an error on the source,
# default 'report'. 'stop' and 'enospc' can only be used
# if the block device supports io-status (see BlockInfo).
#
# @on-target-error: the action to take on an error on the target,
# default 'report' (no limitations, since this applies to
# a different block device than @device).
#
# @filter-node-name: the node name that should be assigned to the
# filter driver that the mirror job inserts into the graph
# above @device. If this option is not given, a node name is
# autogenerated. (Since: 2.9)
#
# @copy-mode: when to copy data to the destination; defaults to 'background'
# (Since: 3.0)
#
# @auto-finalize: When false, this job will wait in a PENDING state after it has
# finished its work, waiting for @block-job-finalize before
# making any block graph changes.
# When true, this job will automatically
# perform its abort or commit actions.
# Defaults to true. (Since 3.1)
#
# @auto-dismiss: When false, this job will wait in a CONCLUDED state after it
# has completely ceased all work, and awaits @block-job-dismiss.
# When true, this job will automatically disappear from the query
# list without user intervention.
# Defaults to true. (Since 3.1)
# Returns: nothing on success.
#
# Since: 2.6
#
# Example:
#
# -> { "execute": "blockdev-mirror",
# "arguments": { "device": "ide-hd0",
# "target": "target0",
# "sync": "full" } }
# <- { "return": {} }
#
##
{ 'command': 'blockdev-mirror',
'data': { '*job-id': 'str', 'device': 'str', 'target': 'str',
'*replaces': 'str',
'sync': 'MirrorSyncMode',
'*speed': 'int', '*granularity': 'uint32',
'*buf-size': 'int', '*on-source-error': 'BlockdevOnError',
'*on-target-error': 'BlockdevOnError',
'*filter-node-name': 'str',
'*copy-mode': 'MirrorCopyMode',
'*auto-finalize': 'bool', '*auto-dismiss': 'bool' } }
##
# @BlockIOThrottle:
#
# A set of parameters describing block throttling.
#
# @device: Block device name
#
# @id: The name or QOM path of the guest device (since: 2.8)
#
# @bps: total throughput limit in bytes per second
#
# @bps_rd: read throughput limit in bytes per second
#
# @bps_wr: write throughput limit in bytes per second
#
# @iops: total I/O operations per second
#
# @iops_rd: read I/O operations per second
#
# @iops_wr: write I/O operations per second
#
# @bps_max: total throughput limit during bursts,
# in bytes (Since 1.7)
#
# @bps_rd_max: read throughput limit during bursts,
# in bytes (Since 1.7)
#
# @bps_wr_max: write throughput limit during bursts,
# in bytes (Since 1.7)
#
# @iops_max: total I/O operations per second during bursts,
# in bytes (Since 1.7)
#
# @iops_rd_max: read I/O operations per second during bursts,
# in bytes (Since 1.7)
#
# @iops_wr_max: write I/O operations per second during bursts,
# in bytes (Since 1.7)
#
# @bps_max_length: maximum length of the @bps_max burst
# period, in seconds. It must only
# be set if @bps_max is set as well.
# Defaults to 1. (Since 2.6)
#
# @bps_rd_max_length: maximum length of the @bps_rd_max
# burst period, in seconds. It must only
# be set if @bps_rd_max is set as well.
# Defaults to 1. (Since 2.6)
#
# @bps_wr_max_length: maximum length of the @bps_wr_max
# burst period, in seconds. It must only
# be set if @bps_wr_max is set as well.
# Defaults to 1. (Since 2.6)
#
# @iops_max_length: maximum length of the @iops burst
# period, in seconds. It must only
# be set if @iops_max is set as well.
# Defaults to 1. (Since 2.6)
#
# @iops_rd_max_length: maximum length of the @iops_rd_max
# burst period, in seconds. It must only
# be set if @iops_rd_max is set as well.
# Defaults to 1. (Since 2.6)
#
# @iops_wr_max_length: maximum length of the @iops_wr_max
# burst period, in seconds. It must only
# be set if @iops_wr_max is set as well.
# Defaults to 1. (Since 2.6)
#
# @iops_size: an I/O size in bytes (Since 1.7)
#
# @group: throttle group name (Since 2.4)
#
# Features:
# @deprecated: Member @device is deprecated. Use @id instead.
#
# Since: 1.1
##
{ 'struct': 'BlockIOThrottle',
'data': { '*device': { 'type': 'str', 'features': [ 'deprecated' ] },
'*id': 'str', 'bps': 'int', 'bps_rd': 'int',
'bps_wr': 'int', 'iops': 'int', 'iops_rd': 'int', 'iops_wr': 'int',
'*bps_max': 'int', '*bps_rd_max': 'int',
'*bps_wr_max': 'int', '*iops_max': 'int',
'*iops_rd_max': 'int', '*iops_wr_max': 'int',
'*bps_max_length': 'int', '*bps_rd_max_length': 'int',
'*bps_wr_max_length': 'int', '*iops_max_length': 'int',
'*iops_rd_max_length': 'int', '*iops_wr_max_length': 'int',
'*iops_size': 'int', '*group': 'str' } }
block: convert ThrottleGroup to object with QOM ThrottleGroup is converted to an object. This will allow the future throttle block filter drive easy creation and configuration of throttle groups in QMP and cli. A new QAPI struct, ThrottleLimits, is introduced to provide a shared struct for all throttle configuration needs in QMP. ThrottleGroups can be created via CLI as -object throttle-group,id=foo,x-iops-total=100,x-.. where x-* are individual limit properties. Since we can't add non-scalar properties in -object this interface must be used instead. However, setting these properties must be disabled after initialization because certain combinations of limits are forbidden and thus configuration changes should be done in one transaction. The individual properties will go away when support for non-scalar values in CLI is implemented and thus are marked as experimental. ThrottleGroup also has a `limits` property that uses the ThrottleLimits struct. It can be used to create ThrottleGroups or set the configuration in existing groups as follows: { "execute": "object-add", "arguments": { "qom-type": "throttle-group", "id": "foo", "props" : { "limits": { "iops-total": 100 } } } } { "execute" : "qom-set", "arguments" : { "path" : "foo", "property" : "limits", "value" : { "iops-total" : 99 } } } This also means a group's configuration can be fetched with qom-get. Signed-off-by: Manos Pitsidianakis <el13635@mail.ntua.gr> Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> Reviewed-by: Alberto Garcia <berto@igalia.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2017-08-25 16:20:26 +03:00
##
# @ThrottleLimits:
#
# Limit parameters for throttling.
# Since some limit combinations are illegal, limits should always be set in one
# transaction. All fields are optional. When setting limits, if a field is
# missing the current value is not changed.
#
# @iops-total: limit total I/O operations per second
# @iops-total-max: I/O operations burst
# @iops-total-max-length: length of the iops-total-max burst period, in seconds
# It must only be set if @iops-total-max is set as well.
# @iops-read: limit read operations per second
# @iops-read-max: I/O operations read burst
# @iops-read-max-length: length of the iops-read-max burst period, in seconds
# It must only be set if @iops-read-max is set as well.
# @iops-write: limit write operations per second
# @iops-write-max: I/O operations write burst
# @iops-write-max-length: length of the iops-write-max burst period, in seconds
# It must only be set if @iops-write-max is set as well.
# @bps-total: limit total bytes per second
# @bps-total-max: total bytes burst
# @bps-total-max-length: length of the bps-total-max burst period, in seconds.
# It must only be set if @bps-total-max is set as well.
# @bps-read: limit read bytes per second
# @bps-read-max: total bytes read burst
# @bps-read-max-length: length of the bps-read-max burst period, in seconds
# It must only be set if @bps-read-max is set as well.
# @bps-write: limit write bytes per second
# @bps-write-max: total bytes write burst
# @bps-write-max-length: length of the bps-write-max burst period, in seconds
# It must only be set if @bps-write-max is set as well.
# @iops-size: when limiting by iops max size of an I/O in bytes
block: convert ThrottleGroup to object with QOM ThrottleGroup is converted to an object. This will allow the future throttle block filter drive easy creation and configuration of throttle groups in QMP and cli. A new QAPI struct, ThrottleLimits, is introduced to provide a shared struct for all throttle configuration needs in QMP. ThrottleGroups can be created via CLI as -object throttle-group,id=foo,x-iops-total=100,x-.. where x-* are individual limit properties. Since we can't add non-scalar properties in -object this interface must be used instead. However, setting these properties must be disabled after initialization because certain combinations of limits are forbidden and thus configuration changes should be done in one transaction. The individual properties will go away when support for non-scalar values in CLI is implemented and thus are marked as experimental. ThrottleGroup also has a `limits` property that uses the ThrottleLimits struct. It can be used to create ThrottleGroups or set the configuration in existing groups as follows: { "execute": "object-add", "arguments": { "qom-type": "throttle-group", "id": "foo", "props" : { "limits": { "iops-total": 100 } } } } { "execute" : "qom-set", "arguments" : { "path" : "foo", "property" : "limits", "value" : { "iops-total" : 99 } } } This also means a group's configuration can be fetched with qom-get. Signed-off-by: Manos Pitsidianakis <el13635@mail.ntua.gr> Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> Reviewed-by: Alberto Garcia <berto@igalia.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2017-08-25 16:20:26 +03:00
#
# Since: 2.11
##
{ 'struct': 'ThrottleLimits',
'data': { '*iops-total' : 'int', '*iops-total-max' : 'int',
'*iops-total-max-length' : 'int', '*iops-read' : 'int',
'*iops-read-max' : 'int', '*iops-read-max-length' : 'int',
'*iops-write' : 'int', '*iops-write-max' : 'int',
'*iops-write-max-length' : 'int', '*bps-total' : 'int',
'*bps-total-max' : 'int', '*bps-total-max-length' : 'int',
'*bps-read' : 'int', '*bps-read-max' : 'int',
'*bps-read-max-length' : 'int', '*bps-write' : 'int',
'*bps-write-max' : 'int', '*bps-write-max-length' : 'int',
'*iops-size' : 'int' } }
##
# @ThrottleGroupProperties:
#
# Properties for throttle-group objects.
#
# The options starting with x- are aliases for the same key without x- in
# the @limits object. As indicated by the x- prefix, this is not a stable
# interface and may be removed or changed incompatibly in the future. Use
# @limits for a supported stable interface.
#
# @limits: limits to apply for this throttle group
#
# Since: 2.11
##
{ 'struct': 'ThrottleGroupProperties',
'data': { '*limits': 'ThrottleLimits',
'*x-iops-total' : 'int', '*x-iops-total-max' : 'int',
'*x-iops-total-max-length' : 'int', '*x-iops-read' : 'int',
'*x-iops-read-max' : 'int', '*x-iops-read-max-length' : 'int',
'*x-iops-write' : 'int', '*x-iops-write-max' : 'int',
'*x-iops-write-max-length' : 'int', '*x-bps-total' : 'int',
'*x-bps-total-max' : 'int', '*x-bps-total-max-length' : 'int',
'*x-bps-read' : 'int', '*x-bps-read-max' : 'int',
'*x-bps-read-max-length' : 'int', '*x-bps-write' : 'int',
'*x-bps-write-max' : 'int', '*x-bps-write-max-length' : 'int',
'*x-iops-size' : 'int' } }
##
# @block-stream:
#
# Copy data from a backing file into a block device.
#
# The block streaming operation is performed in the background until the entire
# backing file has been copied. This command returns immediately once streaming
# has started. The status of ongoing block streaming operations can be checked
# with query-block-jobs. The operation can be stopped before it has completed
# using the block-job-cancel command.
#
# The node that receives the data is called the top image, can be located in
# any part of the chain (but always above the base image; see below) and can be
# specified using its device or node name. Earlier qemu versions only allowed
# 'device' to name the top level node; presence of the 'base-node' parameter
# during introspection can be used as a witness of the enhanced semantics
# of 'device'.
#
# If a base file is specified then sectors are not copied from that base file and
# its backing chain. This can be used to stream a subset of the backing file
# chain instead of flattening the entire image.
# When streaming completes the image file will have the base file as its backing
# file, unless that node was changed while the job was running. In that case,
# base's parent's backing (or filtered, whichever exists) child (i.e., base at
# the beginning of the job) will be the new backing file.
#
# On successful completion the image file is updated to drop the backing file
# and the BLOCK_JOB_COMPLETED event is emitted.
#
# In case @device is a filter node, block-stream modifies the first non-filter
# overlay node below it to point to the new backing node instead of modifying
# @device itself.
#
# @job-id: identifier for the newly-created block job. If
# omitted, the device name will be used. (Since 2.7)
#
# @device: the device or node name of the top image
#
# @base: the common backing file name.
# It cannot be set if @base-node or @bottom is also set.
#
# @base-node: the node name of the backing file.
# It cannot be set if @base or @bottom is also set. (Since 2.8)
#
# @bottom: the last node in the chain that should be streamed into
# top. It cannot be set if @base or @base-node is also set.
# It cannot be filter node. (Since 6.0)
#
# @backing-file: The backing file string to write into the top
# image. This filename is not validated.
#
# If a pathname string is such that it cannot be
# resolved by QEMU, that means that subsequent QMP or
# HMP commands must use node-names for the image in
# question, as filename lookup methods will fail.
#
# If not specified, QEMU will automatically determine
# the backing file string to use, or error out if there
# is no obvious choice. Care should be taken when
# specifying the string, to specify a valid filename or
# protocol.
# (Since 2.1)
#
# @speed: the maximum speed, in bytes per second
#
# @on-error: the action to take on an error (default report).
# 'stop' and 'enospc' can only be used if the block device
# supports io-status (see BlockInfo). Since 1.3.
#
# @filter-node-name: the node name that should be assigned to the
# filter driver that the stream job inserts into the graph
# above @device. If this option is not given, a node name is
# autogenerated. (Since: 6.0)
#
# @auto-finalize: When false, this job will wait in a PENDING state after it has
# finished its work, waiting for @block-job-finalize before
# making any block graph changes.
# When true, this job will automatically
# perform its abort or commit actions.
# Defaults to true. (Since 3.1)
#
# @auto-dismiss: When false, this job will wait in a CONCLUDED state after it
# has completely ceased all work, and awaits @block-job-dismiss.
# When true, this job will automatically disappear from the query
# list without user intervention.
# Defaults to true. (Since 3.1)
#
# Returns: - Nothing on success.
# - If @device does not exist, DeviceNotFound.
#
# Since: 1.1
#
# Example:
#
# -> { "execute": "block-stream",
# "arguments": { "device": "virtio0",
# "base": "/tmp/master.qcow2" } }
# <- { "return": {} }
#
##
{ 'command': 'block-stream',
'data': { '*job-id': 'str', 'device': 'str', '*base': 'str',
'*base-node': 'str', '*backing-file': 'str', '*bottom': 'str',
'*speed': 'int', '*on-error': 'BlockdevOnError',
'*filter-node-name': 'str',
'*auto-finalize': 'bool', '*auto-dismiss': 'bool' } }
##
# @block-job-set-speed:
#
# Set maximum speed for a background block operation.
#
# This command can only be issued when there is an active block job.
#
# Throttling can be disabled by setting the speed to 0.
#
# @device: The job identifier. This used to be a device name (hence
# the name of the parameter), but since QEMU 2.7 it can have
# other values.
#
# @speed: the maximum speed, in bytes per second, or 0 for unlimited.
# Defaults to 0.
#
# Returns: - Nothing on success
# - If no background operation is active on this device, DeviceNotActive
#
# Since: 1.1
##
{ 'command': 'block-job-set-speed',
'data': { 'device': 'str', 'speed': 'int' } }
##
# @block-job-cancel:
#
# Stop an active background block operation.
#
# This command returns immediately after marking the active background block
# operation for cancellation. It is an error to call this command if no
# operation is in progress.
#
# The operation will cancel as soon as possible and then emit the
# BLOCK_JOB_CANCELLED event. Before that happens the job is still visible when
# enumerated using query-block-jobs.
#
# Note that if you issue 'block-job-cancel' after 'drive-mirror' has indicated
# (via the event BLOCK_JOB_READY) that the source and destination are
# synchronized, then the event triggered by this command changes to
# BLOCK_JOB_COMPLETED, to indicate that the mirroring has ended and the
# destination now has a point-in-time copy tied to the time of the cancellation.
#
# For streaming, the image file retains its backing file unless the streaming
# operation happens to complete just as it is being cancelled. A new streaming
# operation can be started at a later time to finish copying all data from the
# backing file.
#
# @device: The job identifier. This used to be a device name (hence
# the name of the parameter), but since QEMU 2.7 it can have
# other values.
#
block/mirror: change the semantic of 'force' of block-job-cancel When doing drive mirror to a low speed shared storage, if there was heavy BLK IO write workload in VM after the 'ready' event, drive mirror block job can't be canceled immediately, it would keep running until the heavy BLK IO workload stopped in the VM. Libvirt depends on the current block-job-cancel semantics, which is that when used without a flag after the 'ready' event, the command blocks until data is in sync. However, these semantics are awkward in other situations, for example, people may use drive mirror for realtime backups while still wanting to use block live migration. Libvirt cannot start a block live migration while another drive mirror is in progress, but the user would rather abandon the backup attempt as broken and proceed with the live migration than be stuck waiting for the current drive mirror backup to finish. The drive-mirror command already includes a 'force' flag, which libvirt does not use, although it documented the flag as only being useful to quit a job which is paused. However, since quitting a paused job has the same effect as abandoning a backup in a non-paused job (namely, the destination file is not in sync, and the command completes immediately), we can just improve the documentation to make the force flag obviously useful. Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Jeff Cody <jcody@redhat.com> Cc: Kevin Wolf <kwolf@redhat.com> Cc: Max Reitz <mreitz@redhat.com> Cc: Eric Blake <eblake@redhat.com> Cc: John Snow <jsnow@redhat.com> Reported-by: Huaitong Han <huanhuaitong@didichuxing.com> Signed-off-by: Huaitong Han <huanhuaitong@didichuxing.com> Signed-off-by: Liang Li <liliangleo@didichuxing.com> Signed-off-by: Jeff Cody <jcody@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2018-03-13 15:12:16 +03:00
# @force: If true, and the job has already emitted the event BLOCK_JOB_READY,
# abandon the job immediately (even if it is paused) instead of waiting
# for the destination to complete its final synchronization (since 1.3)
#
# Returns: - Nothing on success
# - If no background operation is active on this device, DeviceNotActive
#
# Since: 1.1
##
{ 'command': 'block-job-cancel', 'data': { 'device': 'str', '*force': 'bool' } }
##
# @block-job-pause:
#
# Pause an active background block operation.
#
# This command returns immediately after marking the active background block
# operation for pausing. It is an error to call this command if no
# operation is in progress or if the job is already paused.
#
# The operation will pause as soon as possible. No event is emitted when
# the operation is actually paused. Cancelling a paused job automatically
# resumes it.
#
# @device: The job identifier. This used to be a device name (hence
# the name of the parameter), but since QEMU 2.7 it can have
# other values.
#
# Returns: - Nothing on success
# - If no background operation is active on this device, DeviceNotActive
#
# Since: 1.3
##
{ 'command': 'block-job-pause', 'data': { 'device': 'str' } }
##
# @block-job-resume:
#
# Resume an active background block operation.
#
# This command returns immediately after resuming a paused background block
# operation. It is an error to call this command if no operation is in
# progress or if the job is not paused.
#
# This command also clears the error status of the job.
#
# @device: The job identifier. This used to be a device name (hence
# the name of the parameter), but since QEMU 2.7 it can have
# other values.
#
# Returns: - Nothing on success
# - If no background operation is active on this device, DeviceNotActive
#
# Since: 1.3
##
{ 'command': 'block-job-resume', 'data': { 'device': 'str' } }
##
# @block-job-complete:
#
# Manually trigger completion of an active background block operation. This
# is supported for drive mirroring, where it also switches the device to
# write to the target path only. The ability to complete is signaled with
# a BLOCK_JOB_READY event.
#
# This command completes an active background block operation synchronously.
# The ordering of this command's return with the BLOCK_JOB_COMPLETED event
# is not defined. Note that if an I/O error occurs during the processing of
# this command: 1) the command itself will fail; 2) the error will be processed
# according to the rerror/werror arguments that were specified when starting
# the operation.
#
# A cancelled or paused job cannot be completed.
#
# @device: The job identifier. This used to be a device name (hence
# the name of the parameter), but since QEMU 2.7 it can have
# other values.
#
# Returns: - Nothing on success
# - If no background operation is active on this device, DeviceNotActive
#
# Since: 1.3
##
{ 'command': 'block-job-complete', 'data': { 'device': 'str' } }
##
# @block-job-dismiss:
#
# For jobs that have already concluded, remove them from the block-job-query
# list. This command only needs to be run for jobs which were started with
# QEMU 2.12+ job lifetime management semantics.
#
# This command will refuse to operate on any job that has not yet reached
# its terminal state, JOB_STATUS_CONCLUDED. For jobs that make use of the
# BLOCK_JOB_READY event, block-job-cancel or block-job-complete will still need
# to be used as appropriate.
#
# @id: The job identifier.
#
# Returns: Nothing on success
#
# Since: 2.12
##
{ 'command': 'block-job-dismiss', 'data': { 'id': 'str' } }
##
# @block-job-finalize:
#
# Once a job that has manual=true reaches the pending state, it can be
# instructed to finalize any graph changes and do any necessary cleanup
# via this command.
# For jobs in a transaction, instructing one job to finalize will force
# ALL jobs in the transaction to finalize, so it is only necessary to instruct
# a single member job to finalize.
#
# @id: The job identifier.
#
# Returns: Nothing on success
#
# Since: 2.12
##
{ 'command': 'block-job-finalize', 'data': { 'id': 'str' } }
##
# @BlockdevDiscardOptions:
#
# Determines how to handle discard requests.
#
# @ignore: Ignore the request
# @unmap: Forward as an unmap request
#
# Since: 2.9
##
{ 'enum': 'BlockdevDiscardOptions',
'data': [ 'ignore', 'unmap' ] }
##
# @BlockdevDetectZeroesOptions:
#
# Describes the operation mode for the automatic conversion of plain
# zero writes by the OS to driver specific optimized zero write commands.
#
# @off: Disabled (default)
# @on: Enabled
# @unmap: Enabled and even try to unmap blocks if possible. This requires
# also that @BlockdevDiscardOptions is set to unmap for this device.
#
# Since: 2.1
##
{ 'enum': 'BlockdevDetectZeroesOptions',
'data': [ 'off', 'on', 'unmap' ] }
##
# @BlockdevAioOptions:
#
# Selects the AIO backend to handle I/O requests
#
# @threads: Use qemu's thread pool
# @native: Use native AIO backend (only Linux and Windows)
# @io_uring: Use linux io_uring (since 5.0)
#
# Since: 2.9
##
{ 'enum': 'BlockdevAioOptions',
'data': [ 'threads', 'native',
{ 'name': 'io_uring', 'if': 'defined(CONFIG_LINUX_IO_URING)' } ] }
##
# @BlockdevCacheOptions:
#
# Includes cache-related options for block devices
#
# @direct: enables use of O_DIRECT (bypass the host page cache;
# default: false)
# @no-flush: ignore any flush requests for the device (default:
# false)
#
# Since: 2.9
##
{ 'struct': 'BlockdevCacheOptions',
'data': { '*direct': 'bool',
'*no-flush': 'bool' } }
##
# @BlockdevDriver:
#
# Drivers that are supported in block device operations.
#
# @throttle: Since 2.11
# @nvme: Since 2.12
# @copy-on-read: Since 3.0
# @blklogwrites: Since 3.0
# @blkreplay: Since 4.2
# @compress: Since 5.0
#
# Since: 2.9
##
{ 'enum': 'BlockdevDriver',
'data': [ 'blkdebug', 'blklogwrites', 'blkreplay', 'blkverify', 'bochs',
'cloop', 'compress', 'copy-on-read', 'dmg', 'file', 'ftp', 'ftps',
'gluster',
{'name': 'host_cdrom', 'if': 'defined(HAVE_HOST_BLOCK_DEVICE)' },
{'name': 'host_device', 'if': 'defined(HAVE_HOST_BLOCK_DEVICE)' },
'http', 'https', 'iscsi',
'luks', 'nbd', 'nfs', 'null-aio', 'null-co', 'nvme', 'parallels',
'preallocate', 'qcow', 'qcow2', 'qed', 'quorum', 'raw', 'rbd',
{ 'name': 'replication', 'if': 'defined(CONFIG_REPLICATION)' },
'ssh', 'throttle', 'vdi', 'vhdx', 'vmdk', 'vpc', 'vvfat' ] }
##
# @BlockdevOptionsFile:
#
# Driver specific block device options for the file backend.
#
# @filename: path to the image file
# @pr-manager: the id for the object that will handle persistent reservations
# for this device (default: none, forward the commands via SG_IO;
# since 2.11)
# @aio: AIO backend (default: threads) (since: 2.8)
# @locking: whether to enable file locking. If set to 'auto', only enable
# when Open File Descriptor (OFD) locking API is available
# (default: auto, since 2.10)
# @drop-cache: invalidate page cache during live migration. This prevents
# stale data on the migration destination with cache.direct=off.
# Currently only supported on Linux hosts.
# (default: on, since: 4.0)
# @x-check-cache-dropped: whether to check that page cache was dropped on live
# migration. May cause noticeable delays if the image
# file is large, do not use in production.
# (default: off) (since: 3.0)
#
# Features:
# @dynamic-auto-read-only: If present, enabled auto-read-only means that the
# driver will open the image read-only at first,
# dynamically reopen the image file read-write when
# the first writer is attached to the node and reopen
# read-only when the last writer is detached. This
# allows giving QEMU write permissions only on demand
# when an operation actually needs write access.
#
# Since: 2.9
##
{ 'struct': 'BlockdevOptionsFile',
'data': { 'filename': 'str',
scsi, file-posix: add support for persistent reservation management It is a common requirement for virtual machine to send persistent reservations, but this currently requires either running QEMU with CAP_SYS_RAWIO, or using out-of-tree patches that let an unprivileged QEMU bypass Linux's filter on SG_IO commands. As an alternative mechanism, the next patches will introduce a privileged helper to run persistent reservation commands without expanding QEMU's attack surface unnecessarily. The helper is invoked through a "pr-manager" QOM object, to which file-posix.c passes SG_IO requests for PERSISTENT RESERVE OUT and PERSISTENT RESERVE IN commands. For example: $ qemu-system-x86_64 -device virtio-scsi \ -object pr-manager-helper,id=helper0,path=/var/run/qemu-pr-helper.sock -drive if=none,id=hd,driver=raw,file.filename=/dev/sdb,file.pr-manager=helper0 -device scsi-block,drive=hd or: $ qemu-system-x86_64 -device virtio-scsi \ -object pr-manager-helper,id=helper0,path=/var/run/qemu-pr-helper.sock -blockdev node-name=hd,driver=raw,file.driver=host_device,file.filename=/dev/sdb,file.pr-manager=helper0 -device scsi-block,drive=hd Multiple pr-manager implementations are conceivable and possible, though only one is implemented right now. For example, a pr-manager could: - talk directly to the multipath daemon from a privileged QEMU (i.e. QEMU links to libmpathpersist); this makes reservation work properly with multipath, but still requires CAP_SYS_RAWIO - use the Linux IOC_PR_* ioctls (they require CAP_SYS_ADMIN though) - more interestingly, implement reservations directly in QEMU through file system locks or a shared database (e.g. sqlite) Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2017-08-21 19:58:56 +03:00
'*pr-manager': 'str',
'*locking': 'OnOffAuto',
'*aio': 'BlockdevAioOptions',
'*drop-cache': {'type': 'bool',
'if': 'defined(CONFIG_LINUX)'},
'*x-check-cache-dropped': 'bool' },
'features': [ { 'name': 'dynamic-auto-read-only',
'if': 'defined(CONFIG_POSIX)' } ] }
##
# @BlockdevOptionsNull:
#
# Driver specific block device options for the null backend.
#
# @size: size of the device in bytes.
# @latency-ns: emulated latency (in nanoseconds) in processing
# requests. Default to zero which completes requests immediately.
# (Since 2.4)
# @read-zeroes: if true, reads from the device produce zeroes; if false, the
# buffer is left unchanged. (default: false; since: 4.1)
#
# Since: 2.9
##
{ 'struct': 'BlockdevOptionsNull',
'data': { '*size': 'int', '*latency-ns': 'uint64', '*read-zeroes': 'bool' } }
##
# @BlockdevOptionsNVMe:
#
# Driver specific block device options for the NVMe backend.
#
# @device: PCI controller address of the NVMe device in
# format hhhh:bb:ss.f (host:bus:slot.function)
# @namespace: namespace number of the device, starting from 1.
#
# Note that the PCI @device must have been unbound from any host
# kernel driver before instructing QEMU to add the blockdev.
#
# Since: 2.12
##
{ 'struct': 'BlockdevOptionsNVMe',
'data': { 'device': 'str', 'namespace': 'int' } }
##
# @BlockdevOptionsVVFAT:
#
# Driver specific block device options for the vvfat protocol.
#
# @dir: directory to be exported as FAT image
# @fat-type: FAT type: 12, 16 or 32
# @floppy: whether to export a floppy image (true) or
# partitioned hard disk (false; default)
# @label: set the volume label, limited to 11 bytes. FAT16 and
# FAT32 traditionally have some restrictions on labels, which are
# ignored by most operating systems. Defaults to "QEMU VVFAT".
# (since 2.4)
# @rw: whether to allow write operations (default: false)
#
# Since: 2.9
##
{ 'struct': 'BlockdevOptionsVVFAT',
'data': { 'dir': 'str', '*fat-type': 'int', '*floppy': 'bool',
'*label': 'str', '*rw': 'bool' } }
##
# @BlockdevOptionsGenericFormat:
#
# Driver specific block device options for image format that have no option
# besides their data source.
#
# @file: reference to or definition of the data source block device
#
# Since: 2.9
##
{ 'struct': 'BlockdevOptionsGenericFormat',
'data': { 'file': 'BlockdevRef' } }
block: add generic full disk encryption driver Add a block driver that is capable of supporting any full disk encryption format. This utilizes the previously added block encryption code, and at this time supports the LUKS format. The driver code is capable of supporting any format supported by the QCryptoBlock module, so it registers one block driver for each format. This patch only registers the "luks" driver since the "qcow" driver is there only for back-compatibility with existing qcow built-in encryption. New LUKS compatible volumes can be formatted using qemu-img with defaults for all settings. $ qemu-img create --object secret,data=123456,id=sec0 \ -f luks -o key-secret=sec0 demo.luks 10G Alternatively the cryptographic settings can be explicitly set $ qemu-img create --object secret,data=123456,id=sec0 \ -f luks -o key-secret=sec0,cipher-alg=aes-256,\ cipher-mode=cbc,ivgen-alg=plain64,hash-alg=sha256 \ demo.luks 10G And query its size $ qemu-img info demo.img image: demo.img file format: luks virtual size: 10G (10737418240 bytes) disk size: 132K encrypted: yes Note that it was not necessary to provide the password when querying info for the volume. The password is only required when performing I/O on the volume All volumes created by this new 'luks' driver should be capable of being opened by the kernel dm-crypt driver. The only algorithms listed in the LUKS spec that are not currently supported by this impl are sha512 and ripemd160 hashes and cast6 cipher. Reviewed-by: Eric Blake <eblake@redhat.com> Signed-off-by: Daniel P. Berrange <berrange@redhat.com> [ kwolf - Added #include to resolve conflict with da34e65c ] Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2016-03-21 17:11:47 +03:00
##
# @BlockdevOptionsLUKS:
block: add generic full disk encryption driver Add a block driver that is capable of supporting any full disk encryption format. This utilizes the previously added block encryption code, and at this time supports the LUKS format. The driver code is capable of supporting any format supported by the QCryptoBlock module, so it registers one block driver for each format. This patch only registers the "luks" driver since the "qcow" driver is there only for back-compatibility with existing qcow built-in encryption. New LUKS compatible volumes can be formatted using qemu-img with defaults for all settings. $ qemu-img create --object secret,data=123456,id=sec0 \ -f luks -o key-secret=sec0 demo.luks 10G Alternatively the cryptographic settings can be explicitly set $ qemu-img create --object secret,data=123456,id=sec0 \ -f luks -o key-secret=sec0,cipher-alg=aes-256,\ cipher-mode=cbc,ivgen-alg=plain64,hash-alg=sha256 \ demo.luks 10G And query its size $ qemu-img info demo.img image: demo.img file format: luks virtual size: 10G (10737418240 bytes) disk size: 132K encrypted: yes Note that it was not necessary to provide the password when querying info for the volume. The password is only required when performing I/O on the volume All volumes created by this new 'luks' driver should be capable of being opened by the kernel dm-crypt driver. The only algorithms listed in the LUKS spec that are not currently supported by this impl are sha512 and ripemd160 hashes and cast6 cipher. Reviewed-by: Eric Blake <eblake@redhat.com> Signed-off-by: Daniel P. Berrange <berrange@redhat.com> [ kwolf - Added #include to resolve conflict with da34e65c ] Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2016-03-21 17:11:47 +03:00
#
# Driver specific block device options for LUKS.
#
# @key-secret: the ID of a QCryptoSecret object providing
block: add generic full disk encryption driver Add a block driver that is capable of supporting any full disk encryption format. This utilizes the previously added block encryption code, and at this time supports the LUKS format. The driver code is capable of supporting any format supported by the QCryptoBlock module, so it registers one block driver for each format. This patch only registers the "luks" driver since the "qcow" driver is there only for back-compatibility with existing qcow built-in encryption. New LUKS compatible volumes can be formatted using qemu-img with defaults for all settings. $ qemu-img create --object secret,data=123456,id=sec0 \ -f luks -o key-secret=sec0 demo.luks 10G Alternatively the cryptographic settings can be explicitly set $ qemu-img create --object secret,data=123456,id=sec0 \ -f luks -o key-secret=sec0,cipher-alg=aes-256,\ cipher-mode=cbc,ivgen-alg=plain64,hash-alg=sha256 \ demo.luks 10G And query its size $ qemu-img info demo.img image: demo.img file format: luks virtual size: 10G (10737418240 bytes) disk size: 132K encrypted: yes Note that it was not necessary to provide the password when querying info for the volume. The password is only required when performing I/O on the volume All volumes created by this new 'luks' driver should be capable of being opened by the kernel dm-crypt driver. The only algorithms listed in the LUKS spec that are not currently supported by this impl are sha512 and ripemd160 hashes and cast6 cipher. Reviewed-by: Eric Blake <eblake@redhat.com> Signed-off-by: Daniel P. Berrange <berrange@redhat.com> [ kwolf - Added #include to resolve conflict with da34e65c ] Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2016-03-21 17:11:47 +03:00
# the decryption key (since 2.6). Mandatory except when
# doing a metadata-only probe of the image.
#
# Since: 2.9
block: add generic full disk encryption driver Add a block driver that is capable of supporting any full disk encryption format. This utilizes the previously added block encryption code, and at this time supports the LUKS format. The driver code is capable of supporting any format supported by the QCryptoBlock module, so it registers one block driver for each format. This patch only registers the "luks" driver since the "qcow" driver is there only for back-compatibility with existing qcow built-in encryption. New LUKS compatible volumes can be formatted using qemu-img with defaults for all settings. $ qemu-img create --object secret,data=123456,id=sec0 \ -f luks -o key-secret=sec0 demo.luks 10G Alternatively the cryptographic settings can be explicitly set $ qemu-img create --object secret,data=123456,id=sec0 \ -f luks -o key-secret=sec0,cipher-alg=aes-256,\ cipher-mode=cbc,ivgen-alg=plain64,hash-alg=sha256 \ demo.luks 10G And query its size $ qemu-img info demo.img image: demo.img file format: luks virtual size: 10G (10737418240 bytes) disk size: 132K encrypted: yes Note that it was not necessary to provide the password when querying info for the volume. The password is only required when performing I/O on the volume All volumes created by this new 'luks' driver should be capable of being opened by the kernel dm-crypt driver. The only algorithms listed in the LUKS spec that are not currently supported by this impl are sha512 and ripemd160 hashes and cast6 cipher. Reviewed-by: Eric Blake <eblake@redhat.com> Signed-off-by: Daniel P. Berrange <berrange@redhat.com> [ kwolf - Added #include to resolve conflict with da34e65c ] Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2016-03-21 17:11:47 +03:00
##
{ 'struct': 'BlockdevOptionsLUKS',
'base': 'BlockdevOptionsGenericFormat',
'data': { '*key-secret': 'str' } }
##
# @BlockdevOptionsGenericCOWFormat:
#
# Driver specific block device options for image format that have no option
# besides their data source and an optional backing file.
#
# @backing: reference to or definition of the backing file block
# device, null disables the backing file entirely.
# Defaults to the backing file stored the image file.
#
# Since: 2.9
##
{ 'struct': 'BlockdevOptionsGenericCOWFormat',
'base': 'BlockdevOptionsGenericFormat',
block: Use JSON null instead of "" to disable backing file BlockdevRef is an alternate of BlockdevOptions (inline definition) and str (reference to an existing block device by name). BlockdevRef value "" is special: "no block device should be referenced." It's actually interpreted that way in just one place: optional member @backing of COW formats. Semantics: * Present means "use this block device" as backing storage * Absent means "default to the one stored in the image" * Except "" means "don't use backing storage at all" The first two are perfectly normal: when the parameter is absent, it defaults to an implied value, but the value's meaning is the same. The third one overloads the parameter with a second meaning. The overloading is *implicit*, i.e. it's not visible in the types. Works here, because "" is not a value block device ID. Pressing argument values the schema accepts, but are semantically invalid, into service to mean "do something else entirely" is not general, as suitable invalid values need not exist. I also find it ugly. To clean this up, we could add a separate flag argument to suppress @backing, or add a distinct value to @backing. This commit implements the latter: add JSON null to the values of @backing, deprecate "". Because we're so close to the 2.10 freeze, implement it in the stupidest way possible: have qmp_blockdev_add() rewrite null to "" before anything else can see the null. Works, because BlockdevRef occurs only within arguments of blockdev-add. The proper way to do it would be rewriting "" to null, preferably in a cleaner way, but that requires fixing up code to work with null. Add a TODO comment for that. Signed-off-by: Markus Armbruster <armbru@redhat.com> Reviewed-by: Daniel P. Berrange <berrange@redhat.com> Acked-by: Kevin Wolf <kwolf@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com>
2017-07-18 09:54:00 +03:00
'data': { '*backing': 'BlockdevRefOrNull' } }
##
# @Qcow2OverlapCheckMode:
#
# General overlap check modes.
#
# @none: Do not perform any checks
#
# @constant: Perform only checks which can be done in constant time and
# without reading anything from disk
#
# @cached: Perform only checks which can be done without reading anything
# from disk
#
# @all: Perform all available overlap checks
#
# Since: 2.9
##
{ 'enum': 'Qcow2OverlapCheckMode',
'data': [ 'none', 'constant', 'cached', 'all' ] }
##
# @Qcow2OverlapCheckFlags:
#
# Structure of flags for each metadata structure. Setting a field to 'true'
# makes qemu guard that structure against unintended overwriting. The default
# value is chosen according to the template given.
#
# @template: Specifies a template mode which can be adjusted using the other
# flags, defaults to 'cached'
#
# @bitmap-directory: since 3.0
#
# Since: 2.9
##
{ 'struct': 'Qcow2OverlapCheckFlags',
'data': { '*template': 'Qcow2OverlapCheckMode',
'*main-header': 'bool',
'*active-l1': 'bool',
'*active-l2': 'bool',
'*refcount-table': 'bool',
'*refcount-block': 'bool',
'*snapshot-table': 'bool',
'*inactive-l1': 'bool',
'*inactive-l2': 'bool',
'*bitmap-directory': 'bool' } }
##
# @Qcow2OverlapChecks:
#
# Specifies which metadata structures should be guarded against unintended
# overwriting.
#
# @flags: set of flags for separate specification of each metadata structure
# type
#
# @mode: named mode which chooses a specific set of flags
#
# Since: 2.9
##
{ 'alternate': 'Qcow2OverlapChecks',
'data': { 'flags': 'Qcow2OverlapCheckFlags',
'mode': 'Qcow2OverlapCheckMode' } }
##
# @BlockdevQcowEncryptionFormat:
#
# @aes: AES-CBC with plain64 initialization vectors
#
# Since: 2.10
##
{ 'enum': 'BlockdevQcowEncryptionFormat',
'data': [ 'aes' ] }
##
# @BlockdevQcowEncryption:
#
# Since: 2.10
##
{ 'union': 'BlockdevQcowEncryption',
'base': { 'format': 'BlockdevQcowEncryptionFormat' },
'discriminator': 'format',
'data': { 'aes': 'QCryptoBlockOptionsQCow' } }
##
# @BlockdevOptionsQcow:
#
# Driver specific block device options for qcow.
#
# @encrypt: Image decryption options. Mandatory for
# encrypted images, except when doing a metadata-only
# probe of the image.
#
# Since: 2.10
##
{ 'struct': 'BlockdevOptionsQcow',
'base': 'BlockdevOptionsGenericCOWFormat',
'data': { '*encrypt': 'BlockdevQcowEncryption' } }
##
# @BlockdevQcow2EncryptionFormat:
# @aes: AES-CBC with plain64 initialization vectors
#
# Since: 2.10
##
{ 'enum': 'BlockdevQcow2EncryptionFormat',
qcow2: add support for LUKS encryption format This adds support for using LUKS as an encryption format with the qcow2 file, using the new encrypt.format parameter to request "luks" format. e.g. # qemu-img create --object secret,data=123456,id=sec0 \ -f qcow2 -o encrypt.format=luks,encrypt.key-secret=sec0 \ test.qcow2 10G The legacy "encryption=on" parameter still results in creation of the old qcow2 AES format (and is equivalent to the new 'encryption-format=aes'). e.g. the following are equivalent: # qemu-img create --object secret,data=123456,id=sec0 \ -f qcow2 -o encryption=on,encrypt.key-secret=sec0 \ test.qcow2 10G # qemu-img create --object secret,data=123456,id=sec0 \ -f qcow2 -o encryption-format=aes,encrypt.key-secret=sec0 \ test.qcow2 10G With the LUKS format it is necessary to store the LUKS partition header and key material in the QCow2 file. This data can be many MB in size, so cannot go into the QCow2 header region directly. Thus the spec defines a FDE (Full Disk Encryption) header extension that specifies the offset of a set of clusters to hold the FDE headers, as well as the length of that region. The LUKS header is thus stored in these extra allocated clusters before the main image payload. Aside from all the cryptographic differences implied by use of the LUKS format, there is one further key difference between the use of legacy AES and LUKS encryption in qcow2. For LUKS, the initialiazation vectors are generated using the host physical sector as the input, rather than the guest virtual sector. This guarantees unique initialization vectors for all sectors when qcow2 internal snapshots are used, thus giving stronger protection against watermarking attacks. Signed-off-by: Daniel P. Berrange <berrange@redhat.com> Message-id: 20170623162419.26068-14-berrange@redhat.com Reviewed-by: Alberto Garcia <berto@igalia.com> Signed-off-by: Max Reitz <mreitz@redhat.com>
2017-06-23 19:24:12 +03:00
'data': [ 'aes', 'luks' ] }
##
# @BlockdevQcow2Encryption:
#
# Since: 2.10
##
{ 'union': 'BlockdevQcow2Encryption',
'base': { 'format': 'BlockdevQcow2EncryptionFormat' },
'discriminator': 'format',
qcow2: add support for LUKS encryption format This adds support for using LUKS as an encryption format with the qcow2 file, using the new encrypt.format parameter to request "luks" format. e.g. # qemu-img create --object secret,data=123456,id=sec0 \ -f qcow2 -o encrypt.format=luks,encrypt.key-secret=sec0 \ test.qcow2 10G The legacy "encryption=on" parameter still results in creation of the old qcow2 AES format (and is equivalent to the new 'encryption-format=aes'). e.g. the following are equivalent: # qemu-img create --object secret,data=123456,id=sec0 \ -f qcow2 -o encryption=on,encrypt.key-secret=sec0 \ test.qcow2 10G # qemu-img create --object secret,data=123456,id=sec0 \ -f qcow2 -o encryption-format=aes,encrypt.key-secret=sec0 \ test.qcow2 10G With the LUKS format it is necessary to store the LUKS partition header and key material in the QCow2 file. This data can be many MB in size, so cannot go into the QCow2 header region directly. Thus the spec defines a FDE (Full Disk Encryption) header extension that specifies the offset of a set of clusters to hold the FDE headers, as well as the length of that region. The LUKS header is thus stored in these extra allocated clusters before the main image payload. Aside from all the cryptographic differences implied by use of the LUKS format, there is one further key difference between the use of legacy AES and LUKS encryption in qcow2. For LUKS, the initialiazation vectors are generated using the host physical sector as the input, rather than the guest virtual sector. This guarantees unique initialization vectors for all sectors when qcow2 internal snapshots are used, thus giving stronger protection against watermarking attacks. Signed-off-by: Daniel P. Berrange <berrange@redhat.com> Message-id: 20170623162419.26068-14-berrange@redhat.com Reviewed-by: Alberto Garcia <berto@igalia.com> Signed-off-by: Max Reitz <mreitz@redhat.com>
2017-06-23 19:24:12 +03:00
'data': { 'aes': 'QCryptoBlockOptionsQCow',
'luks': 'QCryptoBlockOptionsLUKS'} }
##
# @BlockdevOptionsPreallocate:
#
# Filter driver intended to be inserted between format and protocol node
# and do preallocation in protocol node on write.
#
# @prealloc-align: on preallocation, align file length to this number,
# default 1048576 (1M)
#
# @prealloc-size: how much to preallocate, default 134217728 (128M)
#
# Since: 6.0
##
{ 'struct': 'BlockdevOptionsPreallocate',
'base': 'BlockdevOptionsGenericFormat',
'data': { '*prealloc-align': 'int', '*prealloc-size': 'int' } }
##
# @BlockdevOptionsQcow2:
#
# Driver specific block device options for qcow2.
#
# @lazy-refcounts: whether to enable the lazy refcounts
# feature (default is taken from the image file)
#
# @pass-discard-request: whether discard requests to the qcow2
# device should be forwarded to the data source
#
# @pass-discard-snapshot: whether discard requests for the data source
# should be issued when a snapshot operation (e.g.
# deleting a snapshot) frees clusters in the qcow2 file
#
# @pass-discard-other: whether discard requests for the data source
# should be issued on other occasions where a cluster
# gets freed
#
# @overlap-check: which overlap checks to perform for writes
# to the image, defaults to 'cached' (since 2.2)
#
# @cache-size: the maximum total size of the L2 table and
# refcount block caches in bytes (since 2.2)
#
# @l2-cache-size: the maximum size of the L2 table cache in
# bytes (since 2.2)
#
# @l2-cache-entry-size: the size of each entry in the L2 cache in
# bytes. It must be a power of two between 512
# and the cluster size. The default value is
# the cluster size (since 2.12)
qcow2: Allow configuring the L2 slice size Now that the code is ready to handle L2 slices we can finally add an option to allow configuring their size. An L2 slice is the portion of an L2 table that is read by the qcow2 cache. Until now the cache was always reading full L2 tables, and since the L2 table size is equal to the cluster size this was not very efficient with large clusters. Here's a more detailed explanation of why it makes sense to have smaller cache entries in order to load L2 data: https://lists.gnu.org/archive/html/qemu-block/2017-09/msg00635.html This patch introduces a new command-line option to the qcow2 driver named l2-cache-entry-size (cf. l2-cache-size). The cache entry size has the same restrictions as the cluster size: it must be a power of two and it has the same range of allowed values, with the additional requirement that it must not be larger than the cluster size. The L2 cache entry size (L2 slice size) remains equal to the cluster size for now by default, so this feature must be explicitly enabled. Although my tests show that 4KB slices consistently improve performance and give the best results, let's wait and make more tests with different cluster sizes before deciding on an optimal default. Now that the cache entry size is not necessarily equal to the cluster size we need to reflect that in the MIN_L2_CACHE_SIZE documentation. That minimum value is a requirement of the COW algorithm: we need to read two L2 slices (and not two L2 tables) in order to do COW, see l2_allocate() for the actual code. Signed-off-by: Alberto Garcia <berto@igalia.com> Reviewed-by: Eric Blake <eblake@redhat.com> Reviewed-by: Max Reitz <mreitz@redhat.com> Message-id: c73e5611ff4a9ec5d20de68a6c289553a13d2354.1517840877.git.berto@igalia.com Signed-off-by: Max Reitz <mreitz@redhat.com>
2018-02-05 17:33:36 +03:00
#
# @refcount-cache-size: the maximum size of the refcount block cache
# in bytes (since 2.2)
#
# @cache-clean-interval: clean unused entries in the L2 and refcount
# caches. The interval is in seconds. The default value
# is 600 on supporting platforms, and 0 on other
# platforms. 0 disables this feature. (since 2.5)
#
# @encrypt: Image decryption options. Mandatory for
# encrypted images, except when doing a metadata-only
# probe of the image. (since 2.10)
#
# @data-file: reference to or definition of the external data file.
# This may only be specified for images that require an
# external data file. If it is not specified for such
# an image, the data file name is loaded from the image
# file. (since 4.0)
#
# Since: 2.9
##
{ 'struct': 'BlockdevOptionsQcow2',
'base': 'BlockdevOptionsGenericCOWFormat',
'data': { '*lazy-refcounts': 'bool',
'*pass-discard-request': 'bool',
'*pass-discard-snapshot': 'bool',
'*pass-discard-other': 'bool',
'*overlap-check': 'Qcow2OverlapChecks',
'*cache-size': 'int',
'*l2-cache-size': 'int',
qcow2: Allow configuring the L2 slice size Now that the code is ready to handle L2 slices we can finally add an option to allow configuring their size. An L2 slice is the portion of an L2 table that is read by the qcow2 cache. Until now the cache was always reading full L2 tables, and since the L2 table size is equal to the cluster size this was not very efficient with large clusters. Here's a more detailed explanation of why it makes sense to have smaller cache entries in order to load L2 data: https://lists.gnu.org/archive/html/qemu-block/2017-09/msg00635.html This patch introduces a new command-line option to the qcow2 driver named l2-cache-entry-size (cf. l2-cache-size). The cache entry size has the same restrictions as the cluster size: it must be a power of two and it has the same range of allowed values, with the additional requirement that it must not be larger than the cluster size. The L2 cache entry size (L2 slice size) remains equal to the cluster size for now by default, so this feature must be explicitly enabled. Although my tests show that 4KB slices consistently improve performance and give the best results, let's wait and make more tests with different cluster sizes before deciding on an optimal default. Now that the cache entry size is not necessarily equal to the cluster size we need to reflect that in the MIN_L2_CACHE_SIZE documentation. That minimum value is a requirement of the COW algorithm: we need to read two L2 slices (and not two L2 tables) in order to do COW, see l2_allocate() for the actual code. Signed-off-by: Alberto Garcia <berto@igalia.com> Reviewed-by: Eric Blake <eblake@redhat.com> Reviewed-by: Max Reitz <mreitz@redhat.com> Message-id: c73e5611ff4a9ec5d20de68a6c289553a13d2354.1517840877.git.berto@igalia.com Signed-off-by: Max Reitz <mreitz@redhat.com>
2018-02-05 17:33:36 +03:00
'*l2-cache-entry-size': 'int',
'*refcount-cache-size': 'int',
'*cache-clean-interval': 'int',
'*encrypt': 'BlockdevQcow2Encryption',
'*data-file': 'BlockdevRef' } }
##
# @SshHostKeyCheckMode:
#
# @none: Don't check the host key at all
# @hash: Compare the host key with a given hash
# @known_hosts: Check the host key against the known_hosts file
#
# Since: 2.12
##
{ 'enum': 'SshHostKeyCheckMode',
'data': [ 'none', 'hash', 'known_hosts' ] }
##
# @SshHostKeyCheckHashType:
#
# @md5: The given hash is an md5 hash
# @sha1: The given hash is an sha1 hash
# @sha256: The given hash is an sha256 hash
#
# Since: 2.12
##
{ 'enum': 'SshHostKeyCheckHashType',
'data': [ 'md5', 'sha1', 'sha256' ] }
##
# @SshHostKeyHash:
#
# @type: The hash algorithm used for the hash
# @hash: The expected hash value
#
# Since: 2.12
##
{ 'struct': 'SshHostKeyHash',
'data': { 'type': 'SshHostKeyCheckHashType',
'hash': 'str' }}
##
# @SshHostKeyCheck:
#
# Since: 2.12
##
{ 'union': 'SshHostKeyCheck',
'base': { 'mode': 'SshHostKeyCheckMode' },
'discriminator': 'mode',
'data': { 'hash': 'SshHostKeyHash' } }
##
# @BlockdevOptionsSsh:
#
# @server: host address
#
# @path: path to the image on the host
#
# @user: user as which to connect, defaults to current
# local user name
#
# @host-key-check: Defines how and what to check the host key against
# (default: known_hosts)
#
# Since: 2.9
##
{ 'struct': 'BlockdevOptionsSsh',
'data': { 'server': 'InetSocketAddress',
'path': 'str',
'*user': 'str',
'*host-key-check': 'SshHostKeyCheck' } }
##
# @BlkdebugEvent:
#
# Trigger events supported by blkdebug.
#
# @l1_shrink_write_table: write zeros to the l1 table to shrink image.
# (since 2.11)
#
# @l1_shrink_free_l2_clusters: discard the l2 tables. (since 2.11)
#
# @cor_write: a write due to copy-on-read (since 2.11)
#
# @cluster_alloc_space: an allocation of file space for a cluster (since 4.1)
#
# @none: triggers once at creation of the blkdebug node (since 4.1)
#
# Since: 2.9
##
{ 'enum': 'BlkdebugEvent', 'prefix': 'BLKDBG',
'data': [ 'l1_update', 'l1_grow_alloc_table', 'l1_grow_write_table',
'l1_grow_activate_table', 'l2_load', 'l2_update',
'l2_update_compressed', 'l2_alloc_cow_read', 'l2_alloc_write',
'read_aio', 'read_backing_aio', 'read_compressed', 'write_aio',
'write_compressed', 'vmstate_load', 'vmstate_save', 'cow_read',
'cow_write', 'reftable_load', 'reftable_grow', 'reftable_update',
'refblock_load', 'refblock_update', 'refblock_update_part',
'refblock_alloc', 'refblock_alloc_hookup', 'refblock_alloc_write',
'refblock_alloc_write_blocks', 'refblock_alloc_write_table',
'refblock_alloc_switch_table', 'cluster_alloc',
'cluster_alloc_bytes', 'cluster_free', 'flush_to_os',
'flush_to_disk', 'pwritev_rmw_head', 'pwritev_rmw_after_head',
'pwritev_rmw_tail', 'pwritev_rmw_after_tail', 'pwritev',
'pwritev_zero', 'pwritev_done', 'empty_image_prepare',
'l1_shrink_write_table', 'l1_shrink_free_l2_clusters',
'cor_write', 'cluster_alloc_space', 'none'] }
##
# @BlkdebugIOType:
#
# Kinds of I/O that blkdebug can inject errors in.
#
# @read: .bdrv_co_preadv()
#
# @write: .bdrv_co_pwritev()
#
# @write-zeroes: .bdrv_co_pwrite_zeroes()
#
# @discard: .bdrv_co_pdiscard()
#
# @flush: .bdrv_co_flush_to_disk()
#
# @block-status: .bdrv_co_block_status()
#
# Since: 4.1
##
{ 'enum': 'BlkdebugIOType', 'prefix': 'BLKDEBUG_IO_TYPE',
'data': [ 'read', 'write', 'write-zeroes', 'discard', 'flush',
'block-status' ] }
##
# @BlkdebugInjectErrorOptions:
#
# Describes a single error injection for blkdebug.
#
# @event: trigger event
#
# @state: the state identifier blkdebug needs to be in to
# actually trigger the event; defaults to "any"
#
# @iotype: the type of I/O operations on which this error should
# be injected; defaults to "all read, write,
# write-zeroes, discard, and flush operations"
# (since: 4.1)
#
# @errno: error identifier (errno) to be returned; defaults to
# EIO
#
# @sector: specifies the sector index which has to be affected
# in order to actually trigger the event; defaults to "any
# sector"
#
# @once: disables further events after this one has been
# triggered; defaults to false
#
# @immediately: fail immediately; defaults to false
#
# Since: 2.9
##
{ 'struct': 'BlkdebugInjectErrorOptions',
'data': { 'event': 'BlkdebugEvent',
'*state': 'int',
'*iotype': 'BlkdebugIOType',
'*errno': 'int',
'*sector': 'int',
'*once': 'bool',
'*immediately': 'bool' } }
##
# @BlkdebugSetStateOptions:
#
# Describes a single state-change event for blkdebug.
#
# @event: trigger event
#
# @state: the current state identifier blkdebug needs to be in;
# defaults to "any"
#
# @new_state: the state identifier blkdebug is supposed to assume if
# this event is triggered
#
# Since: 2.9
##
{ 'struct': 'BlkdebugSetStateOptions',
'data': { 'event': 'BlkdebugEvent',
'*state': 'int',
'new_state': 'int' } }
##
# @BlockdevOptionsBlkdebug:
#
# Driver specific block device options for blkdebug.
#
# @image: underlying raw block device (or image file)
#
# @config: filename of the configuration file
#
# @align: required alignment for requests in bytes, must be
# positive power of 2, or 0 for default
#
# @max-transfer: maximum size for I/O transfers in bytes, must be
# positive multiple of @align and of the underlying
# file's request alignment (but need not be a power of
# 2), or 0 for default (since 2.10)
#
# @opt-write-zero: preferred alignment for write zero requests in bytes,
# must be positive multiple of @align and of the
# underlying file's request alignment (but need not be a
# power of 2), or 0 for default (since 2.10)
#
# @max-write-zero: maximum size for write zero requests in bytes, must be
# positive multiple of @align, of @opt-write-zero, and of
# the underlying file's request alignment (but need not
# be a power of 2), or 0 for default (since 2.10)
#
# @opt-discard: preferred alignment for discard requests in bytes, must
# be positive multiple of @align and of the underlying
# file's request alignment (but need not be a power of
# 2), or 0 for default (since 2.10)
#
# @max-discard: maximum size for discard requests in bytes, must be
# positive multiple of @align, of @opt-discard, and of
# the underlying file's request alignment (but need not
# be a power of 2), or 0 for default (since 2.10)
#
# @inject-error: array of error injection descriptions
#
# @set-state: array of state-change descriptions
#
# @take-child-perms: Permissions to take on @image in addition to what
# is necessary anyway (which depends on how the
# blkdebug node is used). Defaults to none.
# (since 5.0)
#
# @unshare-child-perms: Permissions not to share on @image in addition
# to what cannot be shared anyway (which depends
# on how the blkdebug node is used). Defaults
# to none. (since 5.0)
#
# Since: 2.9
##
{ 'struct': 'BlockdevOptionsBlkdebug',
'data': { 'image': 'BlockdevRef',
'*config': 'str',
'*align': 'int', '*max-transfer': 'int32',
'*opt-write-zero': 'int32', '*max-write-zero': 'int32',
'*opt-discard': 'int32', '*max-discard': 'int32',
'*inject-error': ['BlkdebugInjectErrorOptions'],
'*set-state': ['BlkdebugSetStateOptions'],
'*take-child-perms': ['BlockPermission'],
'*unshare-child-perms': ['BlockPermission'] } }
##
# @BlockdevOptionsBlklogwrites:
#
# Driver specific block device options for blklogwrites.
#
# @file: block device
#
# @log: block device used to log writes to @file
#
# @log-sector-size: sector size used in logging writes to @file, determines
# granularity of offsets and sizes of writes (default: 512)
#
# @log-append: append to an existing log (default: false)
#
# @log-super-update-interval: interval of write requests after which the log
# super block is updated to disk (default: 4096)
#
# Since: 3.0
##
{ 'struct': 'BlockdevOptionsBlklogwrites',
'data': { 'file': 'BlockdevRef',
'log': 'BlockdevRef',
'*log-sector-size': 'uint32',
'*log-append': 'bool',
'*log-super-update-interval': 'uint64' } }
##
# @BlockdevOptionsBlkverify:
#
# Driver specific block device options for blkverify.
#
# @test: block device to be tested
#
# @raw: raw image used for verification
#
# Since: 2.9
##
{ 'struct': 'BlockdevOptionsBlkverify',
'data': { 'test': 'BlockdevRef',
'raw': 'BlockdevRef' } }
##
# @BlockdevOptionsBlkreplay:
#
# Driver specific block device options for blkreplay.
#
# @image: disk image which should be controlled with blkreplay
#
# Since: 4.2
##
{ 'struct': 'BlockdevOptionsBlkreplay',
'data': { 'image': 'BlockdevRef' } }
##
# @QuorumReadPattern:
#
# An enumeration of quorum read patterns.
#
# @quorum: read all the children and do a quorum vote on reads
#
# @fifo: read only from the first child that has not failed
#
# Since: 2.9
##
{ 'enum': 'QuorumReadPattern', 'data': [ 'quorum', 'fifo' ] }
##
# @BlockdevOptionsQuorum:
#
# Driver specific block device options for Quorum
#
# @blkverify: true if the driver must print content mismatch
# set to false by default
#
# @children: the children block devices to use
#
# @vote-threshold: the vote limit under which a read will fail
#
# @rewrite-corrupted: rewrite corrupted data when quorum is reached
# (Since 2.1)
#
# @read-pattern: choose read pattern and set to quorum by default
# (Since 2.2)
#
# Since: 2.9
##
{ 'struct': 'BlockdevOptionsQuorum',
'data': { '*blkverify': 'bool',
'children': [ 'BlockdevRef' ],
'vote-threshold': 'int',
'*rewrite-corrupted': 'bool',
'*read-pattern': 'QuorumReadPattern' } }
##
# @BlockdevOptionsGluster:
#
# Driver specific block device options for Gluster
#
# @volume: name of gluster volume where VM image resides
#
# @path: absolute path to image file in gluster volume
#
# @server: gluster servers description
#
# @debug: libgfapi log level (default '4' which is Error)
# (Since 2.8)
#
# @logfile: libgfapi log file (default /dev/stderr) (Since 2.8)
#
# Since: 2.9
##
{ 'struct': 'BlockdevOptionsGluster',
'data': { 'volume': 'str',
'path': 'str',
'server': ['SocketAddress'],
'*debug': 'int',
'*logfile': 'str' } }
##
# @IscsiTransport:
#
# An enumeration of libiscsi transport types
#
# Since: 2.9
##
{ 'enum': 'IscsiTransport',
'data': [ 'tcp', 'iser' ] }
##
# @IscsiHeaderDigest:
#
# An enumeration of header digests supported by libiscsi
#
# Since: 2.9
##
{ 'enum': 'IscsiHeaderDigest',
'prefix': 'QAPI_ISCSI_HEADER_DIGEST',
'data': [ 'crc32c', 'none', 'crc32c-none', 'none-crc32c' ] }
##
# @BlockdevOptionsIscsi:
#
# @transport: The iscsi transport type
#
# @portal: The address of the iscsi portal
#
# @target: The target iqn name
#
# @lun: LUN to connect to. Defaults to 0.
#
# @user: User name to log in with. If omitted, no CHAP
# authentication is performed.
#
# @password-secret: The ID of a QCryptoSecret object providing
# the password for the login. This option is required if
# @user is specified.
#
# @initiator-name: The iqn name we want to identify to the target
# as. If this option is not specified, an initiator name is
# generated automatically.
#
# @header-digest: The desired header digest. Defaults to
# none-crc32c.
#
# @timeout: Timeout in seconds after which a request will
# timeout. 0 means no timeout and is the default.
#
# Driver specific block device options for iscsi
#
# Since: 2.9
##
{ 'struct': 'BlockdevOptionsIscsi',
'data': { 'transport': 'IscsiTransport',
'portal': 'str',
'target': 'str',
'*lun': 'int',
'*user': 'str',
'*password-secret': 'str',
'*initiator-name': 'str',
'*header-digest': 'IscsiHeaderDigest',
'*timeout': 'int' } }
rbd: New parameter auth-client-required Parameter auth-client-required lets you configure authentication methods. We tried to provide that in v2.9.0, but backed out due to interface design doubts (commit 464444fcc16). This commit is similar to what we backed out, but simpler: we use a list of enumeration values instead of a list of objects with a member of enumeration type. Let's review our reasons for backing out the first try, as stated in the commit message: * The implementation uses deprecated rados_conf_set() key "auth_supported". No biggie. Fixed: we use "auth-client-required". * The implementation makes -drive silently ignore invalid parameters "auth" and "auth-supported.*.X" where X isn't "auth". Fixable (in fact I'm going to fix similar bugs around parameter server), so again no biggie. That fix is commit 2836284db60. This commit doesn't bring the bugs back. * BlockdevOptionsRbd member @password-secret applies only to authentication method cephx. Should it be a variant member of RbdAuthMethod? We've had time to ponder, and we decided to stick to the way Ceph configuration works: the key configured separately, and silently ignored if the authentication method doesn't use it. * BlockdevOptionsRbd member @user could apply to both methods cephx and none, but I'm not sure it's actually used with none. If it isn't, should it be a variant member of RbdAuthMethod? Likewise. * The client offers a *set* of authentication methods, not a list. Should the methods be optional members of BlockdevOptionsRbd instead of members of list @auth-supported? The latter begs the question what multiple entries for the same method mean. Trivial question now that RbdAuthMethod contains nothing but @type, but less so when RbdAuthMethod acquires other members, such the ones discussed above. Again, we decided to stick to the way Ceph configuration works, except we make auth-client-required a list of enumeration values instead of a string containing keywords separated by delimiters. * How BlockdevOptionsRbd member @auth-supported interacts with settings from a configuration file specified with @conf is undocumented. I suspect it's untested, too. Not actually true, the documentation for @conf says "Values in the configuration file will be overridden by options specified via QAPI", and we've tested this. Signed-off-by: Markus Armbruster <armbru@redhat.com> Reviewed-by: Kevin Wolf <kwolf@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2018-06-14 22:14:42 +03:00
##
# @RbdAuthMode:
#
# Since: 3.0
##
{ 'enum': 'RbdAuthMode',
'data': [ 'cephx', 'none' ] }
##
# @BlockdevOptionsRbd:
#
# @pool: Ceph pool name.
#
# @namespace: Rados namespace name in the Ceph pool. (Since 5.0)
#
# @image: Image name in the Ceph pool.
#
# @conf: path to Ceph configuration file. Values
# in the configuration file will be overridden by
# options specified via QAPI.
#
# @snapshot: Ceph snapshot name.
#
# @user: Ceph id name.
#
rbd: New parameter auth-client-required Parameter auth-client-required lets you configure authentication methods. We tried to provide that in v2.9.0, but backed out due to interface design doubts (commit 464444fcc16). This commit is similar to what we backed out, but simpler: we use a list of enumeration values instead of a list of objects with a member of enumeration type. Let's review our reasons for backing out the first try, as stated in the commit message: * The implementation uses deprecated rados_conf_set() key "auth_supported". No biggie. Fixed: we use "auth-client-required". * The implementation makes -drive silently ignore invalid parameters "auth" and "auth-supported.*.X" where X isn't "auth". Fixable (in fact I'm going to fix similar bugs around parameter server), so again no biggie. That fix is commit 2836284db60. This commit doesn't bring the bugs back. * BlockdevOptionsRbd member @password-secret applies only to authentication method cephx. Should it be a variant member of RbdAuthMethod? We've had time to ponder, and we decided to stick to the way Ceph configuration works: the key configured separately, and silently ignored if the authentication method doesn't use it. * BlockdevOptionsRbd member @user could apply to both methods cephx and none, but I'm not sure it's actually used with none. If it isn't, should it be a variant member of RbdAuthMethod? Likewise. * The client offers a *set* of authentication methods, not a list. Should the methods be optional members of BlockdevOptionsRbd instead of members of list @auth-supported? The latter begs the question what multiple entries for the same method mean. Trivial question now that RbdAuthMethod contains nothing but @type, but less so when RbdAuthMethod acquires other members, such the ones discussed above. Again, we decided to stick to the way Ceph configuration works, except we make auth-client-required a list of enumeration values instead of a string containing keywords separated by delimiters. * How BlockdevOptionsRbd member @auth-supported interacts with settings from a configuration file specified with @conf is undocumented. I suspect it's untested, too. Not actually true, the documentation for @conf says "Values in the configuration file will be overridden by options specified via QAPI", and we've tested this. Signed-off-by: Markus Armbruster <armbru@redhat.com> Reviewed-by: Kevin Wolf <kwolf@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2018-06-14 22:14:42 +03:00
# @auth-client-required: Acceptable authentication modes.
# This maps to Ceph configuration option
# "auth_client_required". (Since 3.0)
rbd: New parameter auth-client-required Parameter auth-client-required lets you configure authentication methods. We tried to provide that in v2.9.0, but backed out due to interface design doubts (commit 464444fcc16). This commit is similar to what we backed out, but simpler: we use a list of enumeration values instead of a list of objects with a member of enumeration type. Let's review our reasons for backing out the first try, as stated in the commit message: * The implementation uses deprecated rados_conf_set() key "auth_supported". No biggie. Fixed: we use "auth-client-required". * The implementation makes -drive silently ignore invalid parameters "auth" and "auth-supported.*.X" where X isn't "auth". Fixable (in fact I'm going to fix similar bugs around parameter server), so again no biggie. That fix is commit 2836284db60. This commit doesn't bring the bugs back. * BlockdevOptionsRbd member @password-secret applies only to authentication method cephx. Should it be a variant member of RbdAuthMethod? We've had time to ponder, and we decided to stick to the way Ceph configuration works: the key configured separately, and silently ignored if the authentication method doesn't use it. * BlockdevOptionsRbd member @user could apply to both methods cephx and none, but I'm not sure it's actually used with none. If it isn't, should it be a variant member of RbdAuthMethod? Likewise. * The client offers a *set* of authentication methods, not a list. Should the methods be optional members of BlockdevOptionsRbd instead of members of list @auth-supported? The latter begs the question what multiple entries for the same method mean. Trivial question now that RbdAuthMethod contains nothing but @type, but less so when RbdAuthMethod acquires other members, such the ones discussed above. Again, we decided to stick to the way Ceph configuration works, except we make auth-client-required a list of enumeration values instead of a string containing keywords separated by delimiters. * How BlockdevOptionsRbd member @auth-supported interacts with settings from a configuration file specified with @conf is undocumented. I suspect it's untested, too. Not actually true, the documentation for @conf says "Values in the configuration file will be overridden by options specified via QAPI", and we've tested this. Signed-off-by: Markus Armbruster <armbru@redhat.com> Reviewed-by: Kevin Wolf <kwolf@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2018-06-14 22:14:42 +03:00
#
# @key-secret: ID of a QCryptoSecret object providing a key
# for cephx authentication.
# This maps to Ceph configuration option
# "key". (Since 3.0)
#
# @server: Monitor host address and port. This maps
# to the "mon_host" Ceph option.
#
# Since: 2.9
##
{ 'struct': 'BlockdevOptionsRbd',
'data': { 'pool': 'str',
'*namespace': 'str',
'image': 'str',
'*conf': 'str',
'*snapshot': 'str',
'*user': 'str',
rbd: New parameter auth-client-required Parameter auth-client-required lets you configure authentication methods. We tried to provide that in v2.9.0, but backed out due to interface design doubts (commit 464444fcc16). This commit is similar to what we backed out, but simpler: we use a list of enumeration values instead of a list of objects with a member of enumeration type. Let's review our reasons for backing out the first try, as stated in the commit message: * The implementation uses deprecated rados_conf_set() key "auth_supported". No biggie. Fixed: we use "auth-client-required". * The implementation makes -drive silently ignore invalid parameters "auth" and "auth-supported.*.X" where X isn't "auth". Fixable (in fact I'm going to fix similar bugs around parameter server), so again no biggie. That fix is commit 2836284db60. This commit doesn't bring the bugs back. * BlockdevOptionsRbd member @password-secret applies only to authentication method cephx. Should it be a variant member of RbdAuthMethod? We've had time to ponder, and we decided to stick to the way Ceph configuration works: the key configured separately, and silently ignored if the authentication method doesn't use it. * BlockdevOptionsRbd member @user could apply to both methods cephx and none, but I'm not sure it's actually used with none. If it isn't, should it be a variant member of RbdAuthMethod? Likewise. * The client offers a *set* of authentication methods, not a list. Should the methods be optional members of BlockdevOptionsRbd instead of members of list @auth-supported? The latter begs the question what multiple entries for the same method mean. Trivial question now that RbdAuthMethod contains nothing but @type, but less so when RbdAuthMethod acquires other members, such the ones discussed above. Again, we decided to stick to the way Ceph configuration works, except we make auth-client-required a list of enumeration values instead of a string containing keywords separated by delimiters. * How BlockdevOptionsRbd member @auth-supported interacts with settings from a configuration file specified with @conf is undocumented. I suspect it's untested, too. Not actually true, the documentation for @conf says "Values in the configuration file will be overridden by options specified via QAPI", and we've tested this. Signed-off-by: Markus Armbruster <armbru@redhat.com> Reviewed-by: Kevin Wolf <kwolf@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2018-06-14 22:14:42 +03:00
'*auth-client-required': ['RbdAuthMode'],
'*key-secret': 'str',
'*server': ['InetSocketAddressBase'] } }
##
# @ReplicationMode:
#
# An enumeration of replication modes.
#
# @primary: Primary mode, the vm's state will be sent to secondary QEMU.
#
# @secondary: Secondary mode, receive the vm's state from primary QEMU.
#
# Since: 2.9
##
{ 'enum' : 'ReplicationMode', 'data' : [ 'primary', 'secondary' ],
'if': 'defined(CONFIG_REPLICATION)' }
##
# @BlockdevOptionsReplication:
#
# Driver specific block device options for replication
#
# @mode: the replication mode
#
# @top-id: In secondary mode, node name or device ID of the root
# node who owns the replication node chain. Must not be given in
# primary mode.
#
# Since: 2.9
##
{ 'struct': 'BlockdevOptionsReplication',
'base': 'BlockdevOptionsGenericFormat',
'data': { 'mode': 'ReplicationMode',
'*top-id': 'str' },
'if': 'defined(CONFIG_REPLICATION)' }
##
# @NFSTransport:
#
# An enumeration of NFS transport types
#
# @inet: TCP transport
#
# Since: 2.9
##
{ 'enum': 'NFSTransport',
'data': [ 'inet' ] }
##
# @NFSServer:
#
# Captures the address of the socket
#
# @type: transport type used for NFS (only TCP supported)
#
# @host: host address for NFS server
#
# Since: 2.9
##
{ 'struct': 'NFSServer',
'data': { 'type': 'NFSTransport',
'host': 'str' } }
##
# @BlockdevOptionsNfs:
#
# Driver specific block device option for NFS
#
# @server: host address
#
# @path: path of the image on the host
#
# @user: UID value to use when talking to the
# server (defaults to 65534 on Windows and getuid()
# on unix)
#
# @group: GID value to use when talking to the
# server (defaults to 65534 on Windows and getgid()
# in unix)
#
# @tcp-syn-count: number of SYNs during the session
# establishment (defaults to libnfs default)
#
# @readahead-size: set the readahead size in bytes (defaults
# to libnfs default)
#
# @page-cache-size: set the pagecache size in bytes (defaults
# to libnfs default)
#
# @debug: set the NFS debug level (max 2) (defaults
# to libnfs default)
#
# Since: 2.9
##
{ 'struct': 'BlockdevOptionsNfs',
'data': { 'server': 'NFSServer',
'path': 'str',
'*user': 'int',
'*group': 'int',
'*tcp-syn-count': 'int',
'*readahead-size': 'int',
'*page-cache-size': 'int',
'*debug': 'int' } }
##
# @BlockdevOptionsCurlBase:
#
# Driver specific block device options shared by all protocols supported by the
# curl backend.
#
# @url: URL of the image file
#
# @readahead: Size of the read-ahead cache; must be a multiple of
# 512 (defaults to 256 kB)
#
# @timeout: Timeout for connections, in seconds (defaults to 5)
#
# @username: Username for authentication (defaults to none)
#
# @password-secret: ID of a QCryptoSecret object providing a password
# for authentication (defaults to no password)
#
# @proxy-username: Username for proxy authentication (defaults to none)
#
# @proxy-password-secret: ID of a QCryptoSecret object providing a password
# for proxy authentication (defaults to no password)
#
# Since: 2.9
##
{ 'struct': 'BlockdevOptionsCurlBase',
'data': { 'url': 'str',
'*readahead': 'int',
'*timeout': 'int',
'*username': 'str',
'*password-secret': 'str',
'*proxy-username': 'str',
'*proxy-password-secret': 'str' } }
##
# @BlockdevOptionsCurlHttp:
#
# Driver specific block device options for HTTP connections over the curl
# backend. URLs must start with "http://".
#
# @cookie: List of cookies to set; format is
# "name1=content1; name2=content2;" as explained by
# CURLOPT_COOKIE(3). Defaults to no cookies.
#
# @cookie-secret: ID of a QCryptoSecret object providing the cookie data in a
# secure way. See @cookie for the format. (since 2.10)
#
# Since: 2.9
##
{ 'struct': 'BlockdevOptionsCurlHttp',
'base': 'BlockdevOptionsCurlBase',
'data': { '*cookie': 'str',
'*cookie-secret': 'str'} }
##
# @BlockdevOptionsCurlHttps:
#
# Driver specific block device options for HTTPS connections over the curl
# backend. URLs must start with "https://".
#
# @cookie: List of cookies to set; format is
# "name1=content1; name2=content2;" as explained by
# CURLOPT_COOKIE(3). Defaults to no cookies.
#
# @sslverify: Whether to verify the SSL certificate's validity (defaults to
# true)
#
# @cookie-secret: ID of a QCryptoSecret object providing the cookie data in a
# secure way. See @cookie for the format. (since 2.10)
#
# Since: 2.9
##
{ 'struct': 'BlockdevOptionsCurlHttps',
'base': 'BlockdevOptionsCurlBase',
'data': { '*cookie': 'str',
'*sslverify': 'bool',
'*cookie-secret': 'str'} }
##
# @BlockdevOptionsCurlFtp:
#
# Driver specific block device options for FTP connections over the curl
# backend. URLs must start with "ftp://".
#
# Since: 2.9
##
{ 'struct': 'BlockdevOptionsCurlFtp',
'base': 'BlockdevOptionsCurlBase',
'data': { } }
##
# @BlockdevOptionsCurlFtps:
#
# Driver specific block device options for FTPS connections over the curl
# backend. URLs must start with "ftps://".
#
# @sslverify: Whether to verify the SSL certificate's validity (defaults to
# true)
#
# Since: 2.9
##
{ 'struct': 'BlockdevOptionsCurlFtps',
'base': 'BlockdevOptionsCurlBase',
'data': { '*sslverify': 'bool' } }
##
# @BlockdevOptionsNbd:
#
# Driver specific block device options for NBD.
#
# @server: NBD server address
#
# @export: export name
#
# @tls-creds: TLS credentials ID
#
# @x-dirty-bitmap: A metadata context name such as "qemu:dirty-bitmap:NAME"
# or "qemu:allocation-depth" to query in place of the
# traditional "base:allocation" block status (see
# NBD_OPT_LIST_META_CONTEXT in the NBD protocol; and
# yes, naming this option x-context would have made
# more sense) (since 3.0)
#
# @reconnect-delay: On an unexpected disconnect, the nbd client tries to
# connect again until succeeding or encountering a serious
# error. During the first @reconnect-delay seconds, all
# requests are paused and will be rerun on a successful
# reconnect. After that time, any delayed requests and all
# future requests before a successful reconnect will
# immediately fail. Default 0 (Since 4.2)
#
# Since: 2.9
##
{ 'struct': 'BlockdevOptionsNbd',
'data': { 'server': 'SocketAddress',
'*export': 'str',
'*tls-creds': 'str',
'*x-dirty-bitmap': 'str',
'*reconnect-delay': 'uint32' } }
##
# @BlockdevOptionsRaw:
#
# Driver specific block device options for the raw driver.
#
# @offset: position where the block device starts
# @size: the assumed size of the device
#
# Since: 2.9
##
{ 'struct': 'BlockdevOptionsRaw',
'base': 'BlockdevOptionsGenericFormat',
'data': { '*offset': 'int', '*size': 'int' } }
##
# @BlockdevOptionsThrottle:
#
# Driver specific block device options for the throttle driver
#
# @throttle-group: the name of the throttle-group object to use. It
# must already exist.
# @file: reference to or definition of the data source block device
# Since: 2.11
##
{ 'struct': 'BlockdevOptionsThrottle',
'data': { 'throttle-group': 'str',
'file' : 'BlockdevRef'
} }
##
# @BlockdevOptionsCor:
#
# Driver specific block device options for the copy-on-read driver.
#
# @bottom: The name of a non-filter node (allocation-bearing layer) that
# limits the COR operations in the backing chain (inclusive), so
# that no data below this node will be copied by this filter.
# If option is absent, the limit is not applied, so that data
# from all backing layers may be copied.
#
# Since: 6.0
##
{ 'struct': 'BlockdevOptionsCor',
'base': 'BlockdevOptionsGenericFormat',
'data': { '*bottom': 'str' } }
##
# @BlockdevOptions:
#
# Options for creating a block device. Many options are available for all
# block devices, independent of the block driver:
#
# @driver: block driver name
# @node-name: the node name of the new node (Since 2.0).
# This option is required on the top level of blockdev-add.
# Valid node names start with an alphabetic character and may
# contain only alphanumeric characters, '-', '.' and '_'. Their
# maximum length is 31 characters.
# @discard: discard-related options (default: ignore)
# @cache: cache-related options
# @read-only: whether the block device should be read-only (default: false).
# Note that some block drivers support only read-only access,
# either generally or in certain configurations. In this case,
# the default value does not work and the option must be
# specified explicitly.
block: Add auto-read-only option If a management application builds the block graph node by node, the protocol layer doesn't inherit its read-only option from the format layer any more, so it must be set explicitly. Backing files should work on read-only storage, but at the same time, a block job like commit should be able to reopen them read-write if they are on read-write storage. However, without option inheritance, reopen only changes the read-only option for the root node (typically the format layer), but not the protocol layer, so reopening fails (the format layer wants to get write permissions, but the protocol layer is still read-only). A simple workaround for the problem in the management tool would be to open the protocol layer always read-write and to make only the format layer read-only for backing files. However, sometimes the file is actually stored on read-only storage and we don't know whether the image can be opened read-write (for example, for NBD it depends on the server we're trying to connect to). This adds an option that makes QEMU try to open the image read-write, but allows it to degrade to a read-only mode without returning an error. The documentation for this option is consciously phrased in a way that allows QEMU to switch to a better model eventually: Instead of trying when the image is first opened, making the read-only flag dynamic and changing it automatically whenever the first BLK_PERM_WRITE user is attached or the last one is detached would be much more useful behaviour. Unfortunately, this more useful behaviour is also a lot harder to implement, and libvirt needs a solution now before it can switch to -blockdev, so let's start with this easier approach for now. Instead of adding a new auto-read-only option, turning the existing read-only into an enum (with a bool alternate for compatibility) was considered, but it complicated the implementation to the point that it didn't seem to be worth it. Signed-off-by: Kevin Wolf <kwolf@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com>
2018-10-05 19:57:40 +03:00
# @auto-read-only: if true and @read-only is false, QEMU may automatically
# decide not to open the image read-write as requested, but
# fall back to read-only instead (and switch between the modes
# later), e.g. depending on whether the image file is writable
# or whether a writing user is attached to the node
# (default: false, since 3.1)
# @detect-zeroes: detect and optimize zero writes (Since 2.1)
# (default: off)
# @force-share: force share all permission on added nodes.
# Requires read-only=true. (Since 2.10)
#
# Remaining options are determined by the block driver.
#
# Since: 2.9
##
{ 'union': 'BlockdevOptions',
'base': { 'driver': 'BlockdevDriver',
'*node-name': 'str',
'*discard': 'BlockdevDiscardOptions',
'*cache': 'BlockdevCacheOptions',
'*read-only': 'bool',
block: Add auto-read-only option If a management application builds the block graph node by node, the protocol layer doesn't inherit its read-only option from the format layer any more, so it must be set explicitly. Backing files should work on read-only storage, but at the same time, a block job like commit should be able to reopen them read-write if they are on read-write storage. However, without option inheritance, reopen only changes the read-only option for the root node (typically the format layer), but not the protocol layer, so reopening fails (the format layer wants to get write permissions, but the protocol layer is still read-only). A simple workaround for the problem in the management tool would be to open the protocol layer always read-write and to make only the format layer read-only for backing files. However, sometimes the file is actually stored on read-only storage and we don't know whether the image can be opened read-write (for example, for NBD it depends on the server we're trying to connect to). This adds an option that makes QEMU try to open the image read-write, but allows it to degrade to a read-only mode without returning an error. The documentation for this option is consciously phrased in a way that allows QEMU to switch to a better model eventually: Instead of trying when the image is first opened, making the read-only flag dynamic and changing it automatically whenever the first BLK_PERM_WRITE user is attached or the last one is detached would be much more useful behaviour. Unfortunately, this more useful behaviour is also a lot harder to implement, and libvirt needs a solution now before it can switch to -blockdev, so let's start with this easier approach for now. Instead of adding a new auto-read-only option, turning the existing read-only into an enum (with a bool alternate for compatibility) was considered, but it complicated the implementation to the point that it didn't seem to be worth it. Signed-off-by: Kevin Wolf <kwolf@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com>
2018-10-05 19:57:40 +03:00
'*auto-read-only': 'bool',
'*force-share': 'bool',
'*detect-zeroes': 'BlockdevDetectZeroesOptions' },
'discriminator': 'driver',
'data': {
'blkdebug': 'BlockdevOptionsBlkdebug',
'blklogwrites':'BlockdevOptionsBlklogwrites',
'blkverify': 'BlockdevOptionsBlkverify',
'blkreplay': 'BlockdevOptionsBlkreplay',
'bochs': 'BlockdevOptionsGenericFormat',
'cloop': 'BlockdevOptionsGenericFormat',
'compress': 'BlockdevOptionsGenericFormat',
'copy-on-read':'BlockdevOptionsCor',
'dmg': 'BlockdevOptionsGenericFormat',
'file': 'BlockdevOptionsFile',
'ftp': 'BlockdevOptionsCurlFtp',
'ftps': 'BlockdevOptionsCurlFtps',
'gluster': 'BlockdevOptionsGluster',
'host_cdrom': { 'type': 'BlockdevOptionsFile',
'if': 'defined(HAVE_HOST_BLOCK_DEVICE)' },
'host_device': { 'type': 'BlockdevOptionsFile',
'if': 'defined(HAVE_HOST_BLOCK_DEVICE)' },
'http': 'BlockdevOptionsCurlHttp',
'https': 'BlockdevOptionsCurlHttps',
'iscsi': 'BlockdevOptionsIscsi',
block: add generic full disk encryption driver Add a block driver that is capable of supporting any full disk encryption format. This utilizes the previously added block encryption code, and at this time supports the LUKS format. The driver code is capable of supporting any format supported by the QCryptoBlock module, so it registers one block driver for each format. This patch only registers the "luks" driver since the "qcow" driver is there only for back-compatibility with existing qcow built-in encryption. New LUKS compatible volumes can be formatted using qemu-img with defaults for all settings. $ qemu-img create --object secret,data=123456,id=sec0 \ -f luks -o key-secret=sec0 demo.luks 10G Alternatively the cryptographic settings can be explicitly set $ qemu-img create --object secret,data=123456,id=sec0 \ -f luks -o key-secret=sec0,cipher-alg=aes-256,\ cipher-mode=cbc,ivgen-alg=plain64,hash-alg=sha256 \ demo.luks 10G And query its size $ qemu-img info demo.img image: demo.img file format: luks virtual size: 10G (10737418240 bytes) disk size: 132K encrypted: yes Note that it was not necessary to provide the password when querying info for the volume. The password is only required when performing I/O on the volume All volumes created by this new 'luks' driver should be capable of being opened by the kernel dm-crypt driver. The only algorithms listed in the LUKS spec that are not currently supported by this impl are sha512 and ripemd160 hashes and cast6 cipher. Reviewed-by: Eric Blake <eblake@redhat.com> Signed-off-by: Daniel P. Berrange <berrange@redhat.com> [ kwolf - Added #include to resolve conflict with da34e65c ] Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2016-03-21 17:11:47 +03:00
'luks': 'BlockdevOptionsLUKS',
'nbd': 'BlockdevOptionsNbd',
'nfs': 'BlockdevOptionsNfs',
'null-aio': 'BlockdevOptionsNull',
'null-co': 'BlockdevOptionsNull',
'nvme': 'BlockdevOptionsNVMe',
'parallels': 'BlockdevOptionsGenericFormat',
'preallocate':'BlockdevOptionsPreallocate',
'qcow2': 'BlockdevOptionsQcow2',
'qcow': 'BlockdevOptionsQcow',
'qed': 'BlockdevOptionsGenericCOWFormat',
'quorum': 'BlockdevOptionsQuorum',
'raw': 'BlockdevOptionsRaw',
'rbd': 'BlockdevOptionsRbd',
'replication': { 'type': 'BlockdevOptionsReplication',
'if': 'defined(CONFIG_REPLICATION)' },
'ssh': 'BlockdevOptionsSsh',
'throttle': 'BlockdevOptionsThrottle',
'vdi': 'BlockdevOptionsGenericFormat',
'vhdx': 'BlockdevOptionsGenericFormat',
'vmdk': 'BlockdevOptionsGenericCOWFormat',
'vpc': 'BlockdevOptionsGenericFormat',
'vvfat': 'BlockdevOptionsVVFAT'
} }
##
# @BlockdevRef:
#
# Reference to a block device.
#
# @definition: defines a new block device inline
# @reference: references the ID of an existing block device
#
# Since: 2.9
##
{ 'alternate': 'BlockdevRef',
'data': { 'definition': 'BlockdevOptions',
'reference': 'str' } }
block: Use JSON null instead of "" to disable backing file BlockdevRef is an alternate of BlockdevOptions (inline definition) and str (reference to an existing block device by name). BlockdevRef value "" is special: "no block device should be referenced." It's actually interpreted that way in just one place: optional member @backing of COW formats. Semantics: * Present means "use this block device" as backing storage * Absent means "default to the one stored in the image" * Except "" means "don't use backing storage at all" The first two are perfectly normal: when the parameter is absent, it defaults to an implied value, but the value's meaning is the same. The third one overloads the parameter with a second meaning. The overloading is *implicit*, i.e. it's not visible in the types. Works here, because "" is not a value block device ID. Pressing argument values the schema accepts, but are semantically invalid, into service to mean "do something else entirely" is not general, as suitable invalid values need not exist. I also find it ugly. To clean this up, we could add a separate flag argument to suppress @backing, or add a distinct value to @backing. This commit implements the latter: add JSON null to the values of @backing, deprecate "". Because we're so close to the 2.10 freeze, implement it in the stupidest way possible: have qmp_blockdev_add() rewrite null to "" before anything else can see the null. Works, because BlockdevRef occurs only within arguments of blockdev-add. The proper way to do it would be rewriting "" to null, preferably in a cleaner way, but that requires fixing up code to work with null. Add a TODO comment for that. Signed-off-by: Markus Armbruster <armbru@redhat.com> Reviewed-by: Daniel P. Berrange <berrange@redhat.com> Acked-by: Kevin Wolf <kwolf@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com>
2017-07-18 09:54:00 +03:00
##
# @BlockdevRefOrNull:
#
# Reference to a block device.
#
# @definition: defines a new block device inline
# @reference: references the ID of an existing block device.
# An empty string means that no block device should
# be referenced. Deprecated; use null instead.
# @null: No block device should be referenced (since 2.10)
block: Use JSON null instead of "" to disable backing file BlockdevRef is an alternate of BlockdevOptions (inline definition) and str (reference to an existing block device by name). BlockdevRef value "" is special: "no block device should be referenced." It's actually interpreted that way in just one place: optional member @backing of COW formats. Semantics: * Present means "use this block device" as backing storage * Absent means "default to the one stored in the image" * Except "" means "don't use backing storage at all" The first two are perfectly normal: when the parameter is absent, it defaults to an implied value, but the value's meaning is the same. The third one overloads the parameter with a second meaning. The overloading is *implicit*, i.e. it's not visible in the types. Works here, because "" is not a value block device ID. Pressing argument values the schema accepts, but are semantically invalid, into service to mean "do something else entirely" is not general, as suitable invalid values need not exist. I also find it ugly. To clean this up, we could add a separate flag argument to suppress @backing, or add a distinct value to @backing. This commit implements the latter: add JSON null to the values of @backing, deprecate "". Because we're so close to the 2.10 freeze, implement it in the stupidest way possible: have qmp_blockdev_add() rewrite null to "" before anything else can see the null. Works, because BlockdevRef occurs only within arguments of blockdev-add. The proper way to do it would be rewriting "" to null, preferably in a cleaner way, but that requires fixing up code to work with null. Add a TODO comment for that. Signed-off-by: Markus Armbruster <armbru@redhat.com> Reviewed-by: Daniel P. Berrange <berrange@redhat.com> Acked-by: Kevin Wolf <kwolf@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com>
2017-07-18 09:54:00 +03:00
#
# Since: 2.9
##
{ 'alternate': 'BlockdevRefOrNull',
'data': { 'definition': 'BlockdevOptions',
'reference': 'str',
'null': 'null' } }
##
# @blockdev-add:
#
# Creates a new block device.
#
# Since: 2.9
#
# Example:
#
# 1.
# -> { "execute": "blockdev-add",
# "arguments": {
# "driver": "qcow2",
# "node-name": "test1",
# "file": {
# "driver": "file",
# "filename": "test.qcow2"
# }
# }
# }
# <- { "return": {} }
#
# 2.
# -> { "execute": "blockdev-add",
# "arguments": {
# "driver": "qcow2",
# "node-name": "node0",
# "discard": "unmap",
# "cache": {
# "direct": true
# },
# "file": {
# "driver": "file",
# "filename": "/tmp/test.qcow2"
# },
# "backing": {
# "driver": "raw",
# "file": {
# "driver": "file",
# "filename": "/dev/fdset/4"
# }
# }
# }
# }
#
# <- { "return": {} }
#
##
{ 'command': 'blockdev-add', 'data': 'BlockdevOptions', 'boxed': true }
##
# @x-blockdev-reopen:
#
# Reopens a block device using the given set of options. Any option
# not specified will be reset to its default value regardless of its
# previous status. If an option cannot be changed or a particular
# driver does not support reopening then the command will return an
# error.
#
# The top-level @node-name option (from BlockdevOptions) must be
# specified and is used to select the block device to be reopened.
# Other @node-name options must be either omitted or set to the
# current name of the appropriate node. This command won't change any
# node name and any attempt to do it will result in an error.
#
# In the case of options that refer to child nodes, the behavior of
# this command depends on the value:
#
# 1) A set of options (BlockdevOptions): the child is reopened with
# the specified set of options.
#
# 2) A reference to the current child: the child is reopened using
# its existing set of options.
#
# 3) A reference to a different node: the current child is replaced
# with the specified one.
#
# 4) NULL: the current child (if any) is detached.
#
# Options (1) and (2) are supported in all cases, but at the moment
# only @backing allows replacing or detaching an existing child.
#
# Unlike with blockdev-add, the @backing option must always be present
# unless the node being reopened does not have a backing file and its
# image does not have a default backing file name as part of its
# metadata.
#
# Since: 4.0
##
{ 'command': 'x-blockdev-reopen',
'data': 'BlockdevOptions', 'boxed': true }
##
# @blockdev-del:
#
# Deletes a block device that has been added using blockdev-add.
# The command will fail if the node is attached to a device or is
# otherwise being used.
#
# @node-name: Name of the graph node to delete.
#
# Since: 2.9
#
# Example:
#
# -> { "execute": "blockdev-add",
# "arguments": {
# "driver": "qcow2",
# "node-name": "node0",
# "file": {
# "driver": "file",
# "filename": "test.qcow2"
# }
# }
# }
# <- { "return": {} }
#
# -> { "execute": "blockdev-del",
# "arguments": { "node-name": "node0" }
# }
# <- { "return": {} }
#
##
{ 'command': 'blockdev-del', 'data': { 'node-name': 'str' } }
##
# @BlockdevCreateOptionsFile:
#
# Driver specific image creation options for file.
#
# @filename: Filename for the new image file
# @size: Size of the virtual disk in bytes
# @preallocation: Preallocation mode for the new image (default: off;
# allowed values: off,
# falloc (if defined CONFIG_POSIX_FALLOCATE),
# full (if defined CONFIG_POSIX))
# @nocow: Turn off copy-on-write (valid only on btrfs; default: off)
file-posix: Mitigate file fragmentation with extent size hints Especially when O_DIRECT is used with image files so that the page cache indirection can't cause a merge of allocating requests, the file will fragment on the file system layer, with a potentially very small fragment size (this depends on the requests the guest sent). On Linux, fragmentation can be reduced by setting an extent size hint when creating the file (at least on XFS, it can't be set any more after the first extent has been allocated), basically giving raw files a "cluster size" for allocation. This adds a create option to set the extent size hint, and changes the default from not setting a hint to setting it to 1 MB. The main reason why qcow2 defaults to smaller cluster sizes is that COW becomes more expensive, which is not an issue with raw files, so we can choose a larger size. The tradeoff here is only potentially wasted disk space. For qcow2 (or other image formats) over file-posix, the advantage should even be greater because they grow sequentially without leaving holes, so there won't be wasted space. Setting even larger extent size hints for such images may make sense. This can be done with the new option, but let's keep the default conservative for now. The effect is very visible with a test that intentionally creates a badly fragmented file with qemu-img bench (the time difference while creating the file is already remarkable) and then looks at the number of extents and the time a simple "qemu-img map" takes. Without an extent size hint: $ ./qemu-img create -f raw -o extent_size_hint=0 ~/tmp/test.raw 10G Formatting '/home/kwolf/tmp/test.raw', fmt=raw size=10737418240 extent_size_hint=0 $ ./qemu-img bench -f raw -t none -n -w ~/tmp/test.raw -c 1000000 -S 8192 -o 0 Sending 1000000 write requests, 4096 bytes each, 64 in parallel (starting at offset 0, step size 8192) Run completed in 25.848 seconds. $ ./qemu-img bench -f raw -t none -n -w ~/tmp/test.raw -c 1000000 -S 8192 -o 4096 Sending 1000000 write requests, 4096 bytes each, 64 in parallel (starting at offset 4096, step size 8192) Run completed in 19.616 seconds. $ filefrag ~/tmp/test.raw /home/kwolf/tmp/test.raw: 2000000 extents found $ time ./qemu-img map ~/tmp/test.raw Offset Length Mapped to File 0 0x1e8480000 0 /home/kwolf/tmp/test.raw real 0m1,279s user 0m0,043s sys 0m1,226s With the new default extent size hint of 1 MB: $ ./qemu-img create -f raw -o extent_size_hint=1M ~/tmp/test.raw 10G Formatting '/home/kwolf/tmp/test.raw', fmt=raw size=10737418240 extent_size_hint=1048576 $ ./qemu-img bench -f raw -t none -n -w ~/tmp/test.raw -c 1000000 -S 8192 -o 0 Sending 1000000 write requests, 4096 bytes each, 64 in parallel (starting at offset 0, step size 8192) Run completed in 11.833 seconds. $ ./qemu-img bench -f raw -t none -n -w ~/tmp/test.raw -c 1000000 -S 8192 -o 4096 Sending 1000000 write requests, 4096 bytes each, 64 in parallel (starting at offset 4096, step size 8192) Run completed in 10.155 seconds. $ filefrag ~/tmp/test.raw /home/kwolf/tmp/test.raw: 178 extents found $ time ./qemu-img map ~/tmp/test.raw Offset Length Mapped to File 0 0x1e8480000 0 /home/kwolf/tmp/test.raw real 0m0,061s user 0m0,040s sys 0m0,014s Signed-off-by: Kevin Wolf <kwolf@redhat.com> Message-Id: <20200707142329.48303-1-kwolf@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2020-07-07 17:23:29 +03:00
# @extent-size-hint: Extent size hint to add to the image file; 0 for not
# adding an extent size hint (default: 1 MB, since 5.1)
#
# Since: 2.12
##
{ 'struct': 'BlockdevCreateOptionsFile',
file-posix: Mitigate file fragmentation with extent size hints Especially when O_DIRECT is used with image files so that the page cache indirection can't cause a merge of allocating requests, the file will fragment on the file system layer, with a potentially very small fragment size (this depends on the requests the guest sent). On Linux, fragmentation can be reduced by setting an extent size hint when creating the file (at least on XFS, it can't be set any more after the first extent has been allocated), basically giving raw files a "cluster size" for allocation. This adds a create option to set the extent size hint, and changes the default from not setting a hint to setting it to 1 MB. The main reason why qcow2 defaults to smaller cluster sizes is that COW becomes more expensive, which is not an issue with raw files, so we can choose a larger size. The tradeoff here is only potentially wasted disk space. For qcow2 (or other image formats) over file-posix, the advantage should even be greater because they grow sequentially without leaving holes, so there won't be wasted space. Setting even larger extent size hints for such images may make sense. This can be done with the new option, but let's keep the default conservative for now. The effect is very visible with a test that intentionally creates a badly fragmented file with qemu-img bench (the time difference while creating the file is already remarkable) and then looks at the number of extents and the time a simple "qemu-img map" takes. Without an extent size hint: $ ./qemu-img create -f raw -o extent_size_hint=0 ~/tmp/test.raw 10G Formatting '/home/kwolf/tmp/test.raw', fmt=raw size=10737418240 extent_size_hint=0 $ ./qemu-img bench -f raw -t none -n -w ~/tmp/test.raw -c 1000000 -S 8192 -o 0 Sending 1000000 write requests, 4096 bytes each, 64 in parallel (starting at offset 0, step size 8192) Run completed in 25.848 seconds. $ ./qemu-img bench -f raw -t none -n -w ~/tmp/test.raw -c 1000000 -S 8192 -o 4096 Sending 1000000 write requests, 4096 bytes each, 64 in parallel (starting at offset 4096, step size 8192) Run completed in 19.616 seconds. $ filefrag ~/tmp/test.raw /home/kwolf/tmp/test.raw: 2000000 extents found $ time ./qemu-img map ~/tmp/test.raw Offset Length Mapped to File 0 0x1e8480000 0 /home/kwolf/tmp/test.raw real 0m1,279s user 0m0,043s sys 0m1,226s With the new default extent size hint of 1 MB: $ ./qemu-img create -f raw -o extent_size_hint=1M ~/tmp/test.raw 10G Formatting '/home/kwolf/tmp/test.raw', fmt=raw size=10737418240 extent_size_hint=1048576 $ ./qemu-img bench -f raw -t none -n -w ~/tmp/test.raw -c 1000000 -S 8192 -o 0 Sending 1000000 write requests, 4096 bytes each, 64 in parallel (starting at offset 0, step size 8192) Run completed in 11.833 seconds. $ ./qemu-img bench -f raw -t none -n -w ~/tmp/test.raw -c 1000000 -S 8192 -o 4096 Sending 1000000 write requests, 4096 bytes each, 64 in parallel (starting at offset 4096, step size 8192) Run completed in 10.155 seconds. $ filefrag ~/tmp/test.raw /home/kwolf/tmp/test.raw: 178 extents found $ time ./qemu-img map ~/tmp/test.raw Offset Length Mapped to File 0 0x1e8480000 0 /home/kwolf/tmp/test.raw real 0m0,061s user 0m0,040s sys 0m0,014s Signed-off-by: Kevin Wolf <kwolf@redhat.com> Message-Id: <20200707142329.48303-1-kwolf@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2020-07-07 17:23:29 +03:00
'data': { 'filename': 'str',
'size': 'size',
'*preallocation': 'PreallocMode',
'*nocow': 'bool',
'*extent-size-hint': 'size'} }
##
# @BlockdevCreateOptionsGluster:
#
# Driver specific image creation options for gluster.
#
# @location: Where to store the new image file
# @size: Size of the virtual disk in bytes
# @preallocation: Preallocation mode for the new image (default: off;
# allowed values: off,
# falloc (if defined CONFIG_GLUSTERFS_FALLOCATE),
# full (if defined CONFIG_GLUSTERFS_ZEROFILL))
#
# Since: 2.12
##
{ 'struct': 'BlockdevCreateOptionsGluster',
'data': { 'location': 'BlockdevOptionsGluster',
'size': 'size',
'*preallocation': 'PreallocMode' } }
##
# @BlockdevCreateOptionsLUKS:
#
# Driver specific image creation options for LUKS.
#
# @file: Node to create the image format on
# @size: Size of the virtual disk in bytes
# @preallocation: Preallocation mode for the new image
# (since: 4.2)
# (default: off; allowed values: off, metadata, falloc, full)
#
# Since: 2.12
##
{ 'struct': 'BlockdevCreateOptionsLUKS',
'base': 'QCryptoBlockCreateOptionsLUKS',
'data': { 'file': 'BlockdevRef',
'size': 'size',
'*preallocation': 'PreallocMode' } }
##
# @BlockdevCreateOptionsNfs:
#
# Driver specific image creation options for NFS.
#
# @location: Where to store the new image file
# @size: Size of the virtual disk in bytes
#
# Since: 2.12
##
{ 'struct': 'BlockdevCreateOptionsNfs',
'data': { 'location': 'BlockdevOptionsNfs',
'size': 'size' } }
##
# @BlockdevCreateOptionsParallels:
#
# Driver specific image creation options for parallels.
#
# @file: Node to create the image format on
# @size: Size of the virtual disk in bytes
# @cluster-size: Cluster size in bytes (default: 1 MB)
#
# Since: 2.12
##
{ 'struct': 'BlockdevCreateOptionsParallels',
'data': { 'file': 'BlockdevRef',
'size': 'size',
'*cluster-size': 'size' } }
##
# @BlockdevCreateOptionsQcow:
#
# Driver specific image creation options for qcow.
#
# @file: Node to create the image format on
# @size: Size of the virtual disk in bytes
# @backing-file: File name of the backing file if a backing file
# should be used
# @encrypt: Encryption options if the image should be encrypted
#
# Since: 2.12
##
{ 'struct': 'BlockdevCreateOptionsQcow',
'data': { 'file': 'BlockdevRef',
'size': 'size',
'*backing-file': 'str',
'*encrypt': 'QCryptoBlockCreateOptions' } }
##
# @BlockdevQcow2Version:
#
# @v2: The original QCOW2 format as introduced in qemu 0.10 (version 2)
# @v3: The extended QCOW2 format as introduced in qemu 1.1 (version 3)
#
# Since: 2.12
##
{ 'enum': 'BlockdevQcow2Version',
'data': [ 'v2', 'v3' ] }
qcow2: introduce compression type feature The patch adds some preparation parts for incompatible compression type feature to qcow2 allowing the use different compression methods for image clusters (de)compressing. It is implied that the compression type is set on the image creation and can be changed only later by image conversion, thus compression type defines the only compression algorithm used for the image, and thus, for all image clusters. The goal of the feature is to add support of other compression methods to qcow2. For example, ZSTD which is more effective on compression than ZLIB. The default compression is ZLIB. Images created with ZLIB compression type are backward compatible with older qemu versions. Adding of the compression type breaks a number of tests because now the compression type is reported on image creation and there are some changes in the qcow2 header in size and offsets. The tests are fixed in the following ways: * filter out compression_type for many tests * fix header size, feature table size and backing file offset affected tests: 031, 036, 061, 080 header_size +=8: 1 byte compression type 7 bytes padding feature_table += 48: incompatible feature compression type backing_file_offset += 56 (8 + 48 -> header_change + feature_table_change) * add "compression type" for test output matching when it isn't filtered affected tests: 049, 060, 061, 065, 082, 085, 144, 182, 185, 198, 206, 242, 255, 274, 280 Signed-off-by: Denis Plotnikov <dplotnikov@virtuozzo.com> Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Reviewed-by: Eric Blake <eblake@redhat.com> Reviewed-by: Max Reitz <mreitz@redhat.com> QAPI part: Acked-by: Markus Armbruster <armbru@redhat.com> Message-Id: <20200507082521.29210-2-dplotnikov@virtuozzo.com> Signed-off-by: Max Reitz <mreitz@redhat.com>
2020-05-07 11:25:18 +03:00
##
# @Qcow2CompressionType:
#
# Compression type used in qcow2 image file
#
# @zlib: zlib compression, see <http://zlib.net/>
# @zstd: zstd compression, see <http://github.com/facebook/zstd>
qcow2: introduce compression type feature The patch adds some preparation parts for incompatible compression type feature to qcow2 allowing the use different compression methods for image clusters (de)compressing. It is implied that the compression type is set on the image creation and can be changed only later by image conversion, thus compression type defines the only compression algorithm used for the image, and thus, for all image clusters. The goal of the feature is to add support of other compression methods to qcow2. For example, ZSTD which is more effective on compression than ZLIB. The default compression is ZLIB. Images created with ZLIB compression type are backward compatible with older qemu versions. Adding of the compression type breaks a number of tests because now the compression type is reported on image creation and there are some changes in the qcow2 header in size and offsets. The tests are fixed in the following ways: * filter out compression_type for many tests * fix header size, feature table size and backing file offset affected tests: 031, 036, 061, 080 header_size +=8: 1 byte compression type 7 bytes padding feature_table += 48: incompatible feature compression type backing_file_offset += 56 (8 + 48 -> header_change + feature_table_change) * add "compression type" for test output matching when it isn't filtered affected tests: 049, 060, 061, 065, 082, 085, 144, 182, 185, 198, 206, 242, 255, 274, 280 Signed-off-by: Denis Plotnikov <dplotnikov@virtuozzo.com> Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Reviewed-by: Eric Blake <eblake@redhat.com> Reviewed-by: Max Reitz <mreitz@redhat.com> QAPI part: Acked-by: Markus Armbruster <armbru@redhat.com> Message-Id: <20200507082521.29210-2-dplotnikov@virtuozzo.com> Signed-off-by: Max Reitz <mreitz@redhat.com>
2020-05-07 11:25:18 +03:00
#
# Since: 5.1
##
{ 'enum': 'Qcow2CompressionType',
'data': [ 'zlib', { 'name': 'zstd', 'if': 'defined(CONFIG_ZSTD)' } ] }
qcow2: introduce compression type feature The patch adds some preparation parts for incompatible compression type feature to qcow2 allowing the use different compression methods for image clusters (de)compressing. It is implied that the compression type is set on the image creation and can be changed only later by image conversion, thus compression type defines the only compression algorithm used for the image, and thus, for all image clusters. The goal of the feature is to add support of other compression methods to qcow2. For example, ZSTD which is more effective on compression than ZLIB. The default compression is ZLIB. Images created with ZLIB compression type are backward compatible with older qemu versions. Adding of the compression type breaks a number of tests because now the compression type is reported on image creation and there are some changes in the qcow2 header in size and offsets. The tests are fixed in the following ways: * filter out compression_type for many tests * fix header size, feature table size and backing file offset affected tests: 031, 036, 061, 080 header_size +=8: 1 byte compression type 7 bytes padding feature_table += 48: incompatible feature compression type backing_file_offset += 56 (8 + 48 -> header_change + feature_table_change) * add "compression type" for test output matching when it isn't filtered affected tests: 049, 060, 061, 065, 082, 085, 144, 182, 185, 198, 206, 242, 255, 274, 280 Signed-off-by: Denis Plotnikov <dplotnikov@virtuozzo.com> Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Reviewed-by: Eric Blake <eblake@redhat.com> Reviewed-by: Max Reitz <mreitz@redhat.com> QAPI part: Acked-by: Markus Armbruster <armbru@redhat.com> Message-Id: <20200507082521.29210-2-dplotnikov@virtuozzo.com> Signed-off-by: Max Reitz <mreitz@redhat.com>
2020-05-07 11:25:18 +03:00
##
# @BlockdevCreateOptionsQcow2:
#
# Driver specific image creation options for qcow2.
#
# @file: Node to create the image format on
# @data-file: Node to use as an external data file in which all guest
# data is stored so that only metadata remains in the qcow2
# file (since: 4.0)
# @data-file-raw: True if the external data file must stay valid as a
# standalone (read-only) raw image without looking at qcow2
# metadata (default: false; since: 4.0)
# @extended-l2: True to make the image have extended L2 entries
# (default: false; since 5.2)
# @size: Size of the virtual disk in bytes
# @version: Compatibility level (default: v3)
# @backing-file: File name of the backing file if a backing file
# should be used
# @backing-fmt: Name of the block driver to use for the backing file
# @encrypt: Encryption options if the image should be encrypted
# @cluster-size: qcow2 cluster size in bytes (default: 65536)
# @preallocation: Preallocation mode for the new image (default: off;
# allowed values: off, falloc, full, metadata)
# @lazy-refcounts: True if refcounts may be updated lazily (default: off)
# @refcount-bits: Width of reference counts in bits (default: 16)
qcow2: introduce compression type feature The patch adds some preparation parts for incompatible compression type feature to qcow2 allowing the use different compression methods for image clusters (de)compressing. It is implied that the compression type is set on the image creation and can be changed only later by image conversion, thus compression type defines the only compression algorithm used for the image, and thus, for all image clusters. The goal of the feature is to add support of other compression methods to qcow2. For example, ZSTD which is more effective on compression than ZLIB. The default compression is ZLIB. Images created with ZLIB compression type are backward compatible with older qemu versions. Adding of the compression type breaks a number of tests because now the compression type is reported on image creation and there are some changes in the qcow2 header in size and offsets. The tests are fixed in the following ways: * filter out compression_type for many tests * fix header size, feature table size and backing file offset affected tests: 031, 036, 061, 080 header_size +=8: 1 byte compression type 7 bytes padding feature_table += 48: incompatible feature compression type backing_file_offset += 56 (8 + 48 -> header_change + feature_table_change) * add "compression type" for test output matching when it isn't filtered affected tests: 049, 060, 061, 065, 082, 085, 144, 182, 185, 198, 206, 242, 255, 274, 280 Signed-off-by: Denis Plotnikov <dplotnikov@virtuozzo.com> Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Reviewed-by: Eric Blake <eblake@redhat.com> Reviewed-by: Max Reitz <mreitz@redhat.com> QAPI part: Acked-by: Markus Armbruster <armbru@redhat.com> Message-Id: <20200507082521.29210-2-dplotnikov@virtuozzo.com> Signed-off-by: Max Reitz <mreitz@redhat.com>
2020-05-07 11:25:18 +03:00
# @compression-type: The image cluster compression method
# (default: zlib, since 5.1)
#
# Since: 2.12
##
{ 'struct': 'BlockdevCreateOptionsQcow2',
'data': { 'file': 'BlockdevRef',
'*data-file': 'BlockdevRef',
'*data-file-raw': 'bool',
'*extended-l2': 'bool',
'size': 'size',
'*version': 'BlockdevQcow2Version',
'*backing-file': 'str',
'*backing-fmt': 'BlockdevDriver',
'*encrypt': 'QCryptoBlockCreateOptions',
'*cluster-size': 'size',
'*preallocation': 'PreallocMode',
'*lazy-refcounts': 'bool',
qcow2: introduce compression type feature The patch adds some preparation parts for incompatible compression type feature to qcow2 allowing the use different compression methods for image clusters (de)compressing. It is implied that the compression type is set on the image creation and can be changed only later by image conversion, thus compression type defines the only compression algorithm used for the image, and thus, for all image clusters. The goal of the feature is to add support of other compression methods to qcow2. For example, ZSTD which is more effective on compression than ZLIB. The default compression is ZLIB. Images created with ZLIB compression type are backward compatible with older qemu versions. Adding of the compression type breaks a number of tests because now the compression type is reported on image creation and there are some changes in the qcow2 header in size and offsets. The tests are fixed in the following ways: * filter out compression_type for many tests * fix header size, feature table size and backing file offset affected tests: 031, 036, 061, 080 header_size +=8: 1 byte compression type 7 bytes padding feature_table += 48: incompatible feature compression type backing_file_offset += 56 (8 + 48 -> header_change + feature_table_change) * add "compression type" for test output matching when it isn't filtered affected tests: 049, 060, 061, 065, 082, 085, 144, 182, 185, 198, 206, 242, 255, 274, 280 Signed-off-by: Denis Plotnikov <dplotnikov@virtuozzo.com> Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Reviewed-by: Eric Blake <eblake@redhat.com> Reviewed-by: Max Reitz <mreitz@redhat.com> QAPI part: Acked-by: Markus Armbruster <armbru@redhat.com> Message-Id: <20200507082521.29210-2-dplotnikov@virtuozzo.com> Signed-off-by: Max Reitz <mreitz@redhat.com>
2020-05-07 11:25:18 +03:00
'*refcount-bits': 'int',
'*compression-type':'Qcow2CompressionType' } }
##
# @BlockdevCreateOptionsQed:
#
# Driver specific image creation options for qed.
#
# @file: Node to create the image format on
# @size: Size of the virtual disk in bytes
# @backing-file: File name of the backing file if a backing file
# should be used
# @backing-fmt: Name of the block driver to use for the backing file
# @cluster-size: Cluster size in bytes (default: 65536)
# @table-size: L1/L2 table size (in clusters)
#
# Since: 2.12
##
{ 'struct': 'BlockdevCreateOptionsQed',
'data': { 'file': 'BlockdevRef',
'size': 'size',
'*backing-file': 'str',
'*backing-fmt': 'BlockdevDriver',
'*cluster-size': 'size',
'*table-size': 'int' } }
##
# @BlockdevCreateOptionsRbd:
#
# Driver specific image creation options for rbd/Ceph.
#
# @location: Where to store the new image file. This location cannot
# point to a snapshot.
# @size: Size of the virtual disk in bytes
# @cluster-size: RBD object size
#
# Since: 2.12
##
{ 'struct': 'BlockdevCreateOptionsRbd',
'data': { 'location': 'BlockdevOptionsRbd',
'size': 'size',
'*cluster-size' : 'size' } }
##
# @BlockdevVmdkSubformat:
#
# Subformat options for VMDK images
#
# @monolithicSparse: Single file image with sparse cluster allocation
#
# @monolithicFlat: Single flat data image and a descriptor file
#
# @twoGbMaxExtentSparse: Data is split into 2GB (per virtual LBA) sparse extent
# files, in addition to a descriptor file
#
# @twoGbMaxExtentFlat: Data is split into 2GB (per virtual LBA) flat extent
# files, in addition to a descriptor file
#
# @streamOptimized: Single file image sparse cluster allocation, optimized
# for streaming over network.
#
# Since: 4.0
##
{ 'enum': 'BlockdevVmdkSubformat',
'data': [ 'monolithicSparse', 'monolithicFlat', 'twoGbMaxExtentSparse',
'twoGbMaxExtentFlat', 'streamOptimized'] }
##
# @BlockdevVmdkAdapterType:
#
# Adapter type info for VMDK images
#
# Since: 4.0
##
{ 'enum': 'BlockdevVmdkAdapterType',
'data': [ 'ide', 'buslogic', 'lsilogic', 'legacyESX'] }
##
# @BlockdevCreateOptionsVmdk:
#
# Driver specific image creation options for VMDK.
#
# @file: Where to store the new image file. This refers to the image
# file for monolithcSparse and streamOptimized format, or the
# descriptor file for other formats.
# @size: Size of the virtual disk in bytes
# @extents: Where to store the data extents. Required for monolithcFlat,
# twoGbMaxExtentSparse and twoGbMaxExtentFlat formats. For
# monolithicFlat, only one entry is required; for
# twoGbMaxExtent* formats, the number of entries required is
# calculated as extent_number = virtual_size / 2GB. Providing
# more extents than will be used is an error.
# @subformat: The subformat of the VMDK image. Default: "monolithicSparse".
# @backing-file: The path of backing file. Default: no backing file is used.
# @adapter-type: The adapter type used to fill in the descriptor. Default: ide.
# @hwversion: Hardware version. The meaningful options are "4" or "6".
# Default: "4".
# @zeroed-grain: Whether to enable zeroed-grain feature for sparse subformats.
# Default: false.
#
# Since: 4.0
##
{ 'struct': 'BlockdevCreateOptionsVmdk',
'data': { 'file': 'BlockdevRef',
'size': 'size',
'*extents': ['BlockdevRef'],
'*subformat': 'BlockdevVmdkSubformat',
'*backing-file': 'str',
'*adapter-type': 'BlockdevVmdkAdapterType',
'*hwversion': 'str',
'*zeroed-grain': 'bool' } }
##
# @BlockdevCreateOptionsSsh:
#
# Driver specific image creation options for SSH.
#
# @location: Where to store the new image file
# @size: Size of the virtual disk in bytes
#
# Since: 2.12
##
{ 'struct': 'BlockdevCreateOptionsSsh',
'data': { 'location': 'BlockdevOptionsSsh',
'size': 'size' } }
##
# @BlockdevCreateOptionsVdi:
#
# Driver specific image creation options for VDI.
#
# @file: Node to create the image format on
# @size: Size of the virtual disk in bytes
# @preallocation: Preallocation mode for the new image (default: off;
# allowed values: off, metadata)
#
# Since: 2.12
##
{ 'struct': 'BlockdevCreateOptionsVdi',
'data': { 'file': 'BlockdevRef',
'size': 'size',
'*preallocation': 'PreallocMode' } }
##
# @BlockdevVhdxSubformat:
#
# @dynamic: Growing image file
# @fixed: Preallocated fixed-size image file
#
# Since: 2.12
##
{ 'enum': 'BlockdevVhdxSubformat',
'data': [ 'dynamic', 'fixed' ] }
##
# @BlockdevCreateOptionsVhdx:
#
# Driver specific image creation options for vhdx.
#
# @file: Node to create the image format on
# @size: Size of the virtual disk in bytes
# @log-size: Log size in bytes, must be a multiple of 1 MB
# (default: 1 MB)
# @block-size: Block size in bytes, must be a multiple of 1 MB and not
# larger than 256 MB (default: automatically choose a block
# size depending on the image size)
# @subformat: vhdx subformat (default: dynamic)
# @block-state-zero: Force use of payload blocks of type 'ZERO'. Non-standard,
# but default. Do not set to 'off' when using 'qemu-img
# convert' with subformat=dynamic.
#
# Since: 2.12
##
{ 'struct': 'BlockdevCreateOptionsVhdx',
'data': { 'file': 'BlockdevRef',
'size': 'size',
'*log-size': 'size',
'*block-size': 'size',
'*subformat': 'BlockdevVhdxSubformat',
'*block-state-zero': 'bool' } }
##
# @BlockdevVpcSubformat:
#
# @dynamic: Growing image file
# @fixed: Preallocated fixed-size image file
#
# Since: 2.12
##
{ 'enum': 'BlockdevVpcSubformat',
'data': [ 'dynamic', 'fixed' ] }
##
# @BlockdevCreateOptionsVpc:
#
# Driver specific image creation options for vpc (VHD).
#
# @file: Node to create the image format on
# @size: Size of the virtual disk in bytes
# @subformat: vhdx subformat (default: dynamic)
# @force-size: Force use of the exact byte size instead of rounding to the
# next size that can be represented in CHS geometry
# (default: false)
#
# Since: 2.12
##
{ 'struct': 'BlockdevCreateOptionsVpc',
'data': { 'file': 'BlockdevRef',
'size': 'size',
'*subformat': 'BlockdevVpcSubformat',
'*force-size': 'bool' } }
##
# @BlockdevCreateOptions:
#
# Options for creating an image format on a given node.
#
# @driver: block driver to create the image format
#
# Since: 2.12
##
{ 'union': 'BlockdevCreateOptions',
'base': {
'driver': 'BlockdevDriver' },
'discriminator': 'driver',
'data': {
'file': 'BlockdevCreateOptionsFile',
'gluster': 'BlockdevCreateOptionsGluster',
'luks': 'BlockdevCreateOptionsLUKS',
'nfs': 'BlockdevCreateOptionsNfs',
'parallels': 'BlockdevCreateOptionsParallels',
'qcow': 'BlockdevCreateOptionsQcow',
'qcow2': 'BlockdevCreateOptionsQcow2',
'qed': 'BlockdevCreateOptionsQed',
'rbd': 'BlockdevCreateOptionsRbd',
'ssh': 'BlockdevCreateOptionsSsh',
'vdi': 'BlockdevCreateOptionsVdi',
'vhdx': 'BlockdevCreateOptionsVhdx',
'vmdk': 'BlockdevCreateOptionsVmdk',
'vpc': 'BlockdevCreateOptionsVpc'
} }
##
# @blockdev-create:
#
# Starts a job to create an image format on a given node. The job is
# automatically finalized, but a manual job-dismiss is required.
#
# @job-id: Identifier for the newly created job.
#
# @options: Options for the image creation.
#
# Since: 3.0
##
{ 'command': 'blockdev-create',
'data': { 'job-id': 'str',
'options': 'BlockdevCreateOptions' } }
##
# @BlockdevAmendOptionsLUKS:
#
# Driver specific image amend options for LUKS.
#
# Since: 5.1
##
{ 'struct': 'BlockdevAmendOptionsLUKS',
'base': 'QCryptoBlockAmendOptionsLUKS',
'data': { }
}
##
# @BlockdevAmendOptionsQcow2:
#
# Driver specific image amend options for qcow2.
# For now, only encryption options can be amended
#
# @encrypt Encryption options to be amended
#
# Since: 5.1
##
{ 'struct': 'BlockdevAmendOptionsQcow2',
'data': { '*encrypt': 'QCryptoBlockAmendOptions' } }
##
# @BlockdevAmendOptions:
#
# Options for amending an image format
#
# @driver: Block driver of the node to amend.
#
# Since: 5.1
##
{ 'union': 'BlockdevAmendOptions',
'base': {
'driver': 'BlockdevDriver' },
'discriminator': 'driver',
'data': {
'luks': 'BlockdevAmendOptionsLUKS',
'qcow2': 'BlockdevAmendOptionsQcow2' } }
##
# @x-blockdev-amend:
#
# Starts a job to amend format specific options of an existing open block device
# The job is automatically finalized, but a manual job-dismiss is required.
#
# @job-id: Identifier for the newly created job.
#
# @node-name: Name of the block node to work on
#
# @options: Options (driver specific)
#
# @force: Allow unsafe operations, format specific
# For luks that allows erase of the last active keyslot
# (permanent loss of data),
# and replacement of an active keyslot
# (possible loss of data if IO error happens)
#
# Since: 5.1
##
{ 'command': 'x-blockdev-amend',
'data': { 'job-id': 'str',
'node-name': 'str',
'options': 'BlockdevAmendOptions',
'*force': 'bool' } }
##
# @BlockErrorAction:
#
# An enumeration of action that has been taken when a DISK I/O occurs
#
# @ignore: error has been ignored
#
# @report: error has been reported to the device
#
# @stop: error caused VM to be stopped
#
# Since: 2.1
##
{ 'enum': 'BlockErrorAction',
'data': [ 'ignore', 'report', 'stop' ] }
##
# @BLOCK_IMAGE_CORRUPTED:
#
# Emitted when a disk image is being marked corrupt. The image can be
# identified by its device or node name. The 'device' field is always
# present for compatibility reasons, but it can be empty ("") if the
# image does not have a device name associated.
#
# @device: device name. This is always present for compatibility
# reasons, but it can be empty ("") if the image does not
# have a device name associated.
#
# @node-name: node name (Since: 2.4)
#
# @msg: informative message for human consumption, such as the kind of
# corruption being detected. It should not be parsed by machine as it is
# not guaranteed to be stable
#
# @offset: if the corruption resulted from an image access, this is
# the host's access offset into the image
#
# @size: if the corruption resulted from an image access, this is
# the access size
#
# @fatal: if set, the image is marked corrupt and therefore unusable after this
# event and must be repaired (Since 2.2; before, every
# BLOCK_IMAGE_CORRUPTED event was fatal)
#
# Note: If action is "stop", a STOP event will eventually follow the
# BLOCK_IO_ERROR event.
#
# Example:
#
# <- { "event": "BLOCK_IMAGE_CORRUPTED",
# "data": { "device": "ide0-hd0", "node-name": "node0",
# "msg": "Prevented active L1 table overwrite", "offset": 196608,
# "size": 65536 },
# "timestamp": { "seconds": 1378126126, "microseconds": 966463 } }
#
# Since: 1.7
##
{ 'event': 'BLOCK_IMAGE_CORRUPTED',
'data': { 'device' : 'str',
'*node-name' : 'str',
'msg' : 'str',
'*offset' : 'int',
'*size' : 'int',
'fatal' : 'bool' } }
##
# @BLOCK_IO_ERROR:
#
# Emitted when a disk I/O error occurs
#
# @device: device name. This is always present for compatibility
# reasons, but it can be empty ("") if the image does not
# have a device name associated.
#
# @node-name: node name. Note that errors may be reported for the root node
# that is directly attached to a guest device rather than for the
# node where the error occurred. The node name is not present if
# the drive is empty. (Since: 2.8)
#
# @operation: I/O operation
#
# @action: action that has been taken
#
# @nospace: true if I/O error was caused due to a no-space
# condition. This key is only present if query-block's
# io-status is present, please see query-block documentation
# for more information (since: 2.2)
#
# @reason: human readable string describing the error cause.
# (This field is a debugging aid for humans, it should not
# be parsed by applications) (since: 2.2)
#
# Note: If action is "stop", a STOP event will eventually follow the
# BLOCK_IO_ERROR event
#
# Since: 0.13
#
# Example:
#
# <- { "event": "BLOCK_IO_ERROR",
# "data": { "device": "ide0-hd1",
# "node-name": "#block212",
# "operation": "write",
# "action": "stop" },
# "timestamp": { "seconds": 1265044230, "microseconds": 450486 } }
#
##
{ 'event': 'BLOCK_IO_ERROR',
'data': { 'device': 'str', '*node-name': 'str',
'operation': 'IoOperationType',
'action': 'BlockErrorAction', '*nospace': 'bool',
'reason': 'str' } }
##
# @BLOCK_JOB_COMPLETED:
#
# Emitted when a block job has completed
#
# @type: job type
#
# @device: The job identifier. Originally the device name but other
# values are allowed since QEMU 2.7
#
# @len: maximum progress value
#
# @offset: current progress value. On success this is equal to len.
# On failure this is less than len
#
# @speed: rate limit, bytes per second
#
# @error: error message. Only present on failure. This field
# contains a human-readable error message. There are no semantics
# other than that streaming has failed and clients should not try to
# interpret the error string
#
# Since: 1.1
#
# Example:
#
# <- { "event": "BLOCK_JOB_COMPLETED",
# "data": { "type": "stream", "device": "virtio-disk0",
# "len": 10737418240, "offset": 10737418240,
# "speed": 0 },
# "timestamp": { "seconds": 1267061043, "microseconds": 959568 } }
#
##
{ 'event': 'BLOCK_JOB_COMPLETED',
'data': { 'type' : 'JobType',
'device': 'str',
'len' : 'int',
'offset': 'int',
'speed' : 'int',
'*error': 'str' } }
##
# @BLOCK_JOB_CANCELLED:
#
# Emitted when a block job has been cancelled
#
# @type: job type
#
# @device: The job identifier. Originally the device name but other
# values are allowed since QEMU 2.7
#
# @len: maximum progress value
#
# @offset: current progress value. On success this is equal to len.
# On failure this is less than len
#
# @speed: rate limit, bytes per second
#
# Since: 1.1
#
# Example:
#
# <- { "event": "BLOCK_JOB_CANCELLED",
# "data": { "type": "stream", "device": "virtio-disk0",
# "len": 10737418240, "offset": 134217728,
# "speed": 0 },
# "timestamp": { "seconds": 1267061043, "microseconds": 959568 } }
#
##
{ 'event': 'BLOCK_JOB_CANCELLED',
'data': { 'type' : 'JobType',
'device': 'str',
'len' : 'int',
'offset': 'int',
'speed' : 'int' } }
##
# @BLOCK_JOB_ERROR:
#
# Emitted when a block job encounters an error
#
# @device: The job identifier. Originally the device name but other
# values are allowed since QEMU 2.7
#
# @operation: I/O operation
#
# @action: action that has been taken
#
# Since: 1.3
#
# Example:
#
# <- { "event": "BLOCK_JOB_ERROR",
# "data": { "device": "ide0-hd1",
# "operation": "write",
# "action": "stop" },
# "timestamp": { "seconds": 1265044230, "microseconds": 450486 } }
#
##
{ 'event': 'BLOCK_JOB_ERROR',
'data': { 'device' : 'str',
'operation': 'IoOperationType',
'action' : 'BlockErrorAction' } }
##
# @BLOCK_JOB_READY:
#
# Emitted when a block job is ready to complete
#
# @type: job type
#
# @device: The job identifier. Originally the device name but other
# values are allowed since QEMU 2.7
#
# @len: maximum progress value
#
# @offset: current progress value. On success this is equal to len.
# On failure this is less than len
#
# @speed: rate limit, bytes per second
#
# Note: The "ready to complete" status is always reset by a @BLOCK_JOB_ERROR
# event
#
# Since: 1.3
#
# Example:
#
# <- { "event": "BLOCK_JOB_READY",
# "data": { "device": "drive0", "type": "mirror", "speed": 0,
# "len": 2097152, "offset": 2097152 }
# "timestamp": { "seconds": 1265044230, "microseconds": 450486 } }
#
##
{ 'event': 'BLOCK_JOB_READY',
'data': { 'type' : 'JobType',
'device': 'str',
'len' : 'int',
'offset': 'int',
'speed' : 'int' } }
##
# @BLOCK_JOB_PENDING:
#
# Emitted when a block job is awaiting explicit authorization to finalize graph
# changes via @block-job-finalize. If this job is part of a transaction, it will
# not emit this event until the transaction has converged first.
#
# @type: job type
#
# @id: The job identifier.
#
# Since: 2.12
#
# Example:
#
# <- { "event": "BLOCK_JOB_WAITING",
# "data": { "device": "drive0", "type": "mirror" },
# "timestamp": { "seconds": 1265044230, "microseconds": 450486 } }
#
##
{ 'event': 'BLOCK_JOB_PENDING',
'data': { 'type' : 'JobType',
'id' : 'str' } }
##
# @PreallocMode:
#
# Preallocation mode of QEMU image file
#
# @off: no preallocation
# @metadata: preallocate only for metadata
# @falloc: like @full preallocation but allocate disk space by
# posix_fallocate() rather than writing data.
# @full: preallocate all data by writing it to the device to ensure
# disk space is really available. This data may or may not be
# zero, depending on the image format and storage.
# @full preallocation also sets up metadata correctly.
#
# Since: 2.2
##
{ 'enum': 'PreallocMode',
'data': [ 'off', 'metadata', 'falloc', 'full' ] }
block: add event when disk usage exceeds threshold Managing applications, like oVirt (http://www.ovirt.org), make extensive use of thin-provisioned disk images. To let the guest run smoothly and be not unnecessarily paused, oVirt sets a disk usage threshold (so called 'high water mark') based on the occupation of the device, and automatically extends the image once the threshold is reached or exceeded. In order to detect the crossing of the threshold, oVirt has no choice but aggressively polling the QEMU monitor using the query-blockstats command. This lead to unnecessary system load, and is made even worse under scale: deployments with hundreds of VMs are no longer rare. To fix this, this patch adds: * A new monitor command `block-set-write-threshold', to set a mark for a given block device. * A new event `BLOCK_WRITE_THRESHOLD', to report if a block device usage exceeds the threshold. * A new `write_threshold' field into the `BlockDeviceInfo' structure, to report the configured threshold. This will allow the managing application to use smarter and more efficient monitoring, greatly reducing the need of polling. [Updated qemu-iotests 067 output to add the new 'write_threshold' property. --Stefan] [Changed g_assert_false() to !g_assert() to fix the build on older glib versions. --Kevin] Signed-off-by: Francesco Romani <fromani@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com> Message-id: 1421068273-692-1-git-send-email-fromani@redhat.com Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2015-01-12 16:11:13 +03:00
##
# @BLOCK_WRITE_THRESHOLD:
block: add event when disk usage exceeds threshold Managing applications, like oVirt (http://www.ovirt.org), make extensive use of thin-provisioned disk images. To let the guest run smoothly and be not unnecessarily paused, oVirt sets a disk usage threshold (so called 'high water mark') based on the occupation of the device, and automatically extends the image once the threshold is reached or exceeded. In order to detect the crossing of the threshold, oVirt has no choice but aggressively polling the QEMU monitor using the query-blockstats command. This lead to unnecessary system load, and is made even worse under scale: deployments with hundreds of VMs are no longer rare. To fix this, this patch adds: * A new monitor command `block-set-write-threshold', to set a mark for a given block device. * A new event `BLOCK_WRITE_THRESHOLD', to report if a block device usage exceeds the threshold. * A new `write_threshold' field into the `BlockDeviceInfo' structure, to report the configured threshold. This will allow the managing application to use smarter and more efficient monitoring, greatly reducing the need of polling. [Updated qemu-iotests 067 output to add the new 'write_threshold' property. --Stefan] [Changed g_assert_false() to !g_assert() to fix the build on older glib versions. --Kevin] Signed-off-by: Francesco Romani <fromani@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com> Message-id: 1421068273-692-1-git-send-email-fromani@redhat.com Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2015-01-12 16:11:13 +03:00
#
# Emitted when writes on block device reaches or exceeds the
# configured write threshold. For thin-provisioned devices, this
# means the device should be extended to avoid pausing for
# disk exhaustion.
# The event is one shot. Once triggered, it needs to be
# re-registered with another block-set-write-threshold command.
block: add event when disk usage exceeds threshold Managing applications, like oVirt (http://www.ovirt.org), make extensive use of thin-provisioned disk images. To let the guest run smoothly and be not unnecessarily paused, oVirt sets a disk usage threshold (so called 'high water mark') based on the occupation of the device, and automatically extends the image once the threshold is reached or exceeded. In order to detect the crossing of the threshold, oVirt has no choice but aggressively polling the QEMU monitor using the query-blockstats command. This lead to unnecessary system load, and is made even worse under scale: deployments with hundreds of VMs are no longer rare. To fix this, this patch adds: * A new monitor command `block-set-write-threshold', to set a mark for a given block device. * A new event `BLOCK_WRITE_THRESHOLD', to report if a block device usage exceeds the threshold. * A new `write_threshold' field into the `BlockDeviceInfo' structure, to report the configured threshold. This will allow the managing application to use smarter and more efficient monitoring, greatly reducing the need of polling. [Updated qemu-iotests 067 output to add the new 'write_threshold' property. --Stefan] [Changed g_assert_false() to !g_assert() to fix the build on older glib versions. --Kevin] Signed-off-by: Francesco Romani <fromani@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com> Message-id: 1421068273-692-1-git-send-email-fromani@redhat.com Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2015-01-12 16:11:13 +03:00
#
# @node-name: graph node name on which the threshold was exceeded.
#
# @amount-exceeded: amount of data which exceeded the threshold, in bytes.
#
# @write-threshold: last configured threshold, in bytes.
#
# Since: 2.3
##
{ 'event': 'BLOCK_WRITE_THRESHOLD',
'data': { 'node-name': 'str',
'amount-exceeded': 'uint64',
'write-threshold': 'uint64' } }
##
# @block-set-write-threshold:
block: add event when disk usage exceeds threshold Managing applications, like oVirt (http://www.ovirt.org), make extensive use of thin-provisioned disk images. To let the guest run smoothly and be not unnecessarily paused, oVirt sets a disk usage threshold (so called 'high water mark') based on the occupation of the device, and automatically extends the image once the threshold is reached or exceeded. In order to detect the crossing of the threshold, oVirt has no choice but aggressively polling the QEMU monitor using the query-blockstats command. This lead to unnecessary system load, and is made even worse under scale: deployments with hundreds of VMs are no longer rare. To fix this, this patch adds: * A new monitor command `block-set-write-threshold', to set a mark for a given block device. * A new event `BLOCK_WRITE_THRESHOLD', to report if a block device usage exceeds the threshold. * A new `write_threshold' field into the `BlockDeviceInfo' structure, to report the configured threshold. This will allow the managing application to use smarter and more efficient monitoring, greatly reducing the need of polling. [Updated qemu-iotests 067 output to add the new 'write_threshold' property. --Stefan] [Changed g_assert_false() to !g_assert() to fix the build on older glib versions. --Kevin] Signed-off-by: Francesco Romani <fromani@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com> Message-id: 1421068273-692-1-git-send-email-fromani@redhat.com Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2015-01-12 16:11:13 +03:00
#
# Change the write threshold for a block drive. An event will be
# delivered if a write to this block drive crosses the configured
# threshold. The threshold is an offset, thus must be
# non-negative. Default is no write threshold. Setting the threshold
# to zero disables it.
#
block: add event when disk usage exceeds threshold Managing applications, like oVirt (http://www.ovirt.org), make extensive use of thin-provisioned disk images. To let the guest run smoothly and be not unnecessarily paused, oVirt sets a disk usage threshold (so called 'high water mark') based on the occupation of the device, and automatically extends the image once the threshold is reached or exceeded. In order to detect the crossing of the threshold, oVirt has no choice but aggressively polling the QEMU monitor using the query-blockstats command. This lead to unnecessary system load, and is made even worse under scale: deployments with hundreds of VMs are no longer rare. To fix this, this patch adds: * A new monitor command `block-set-write-threshold', to set a mark for a given block device. * A new event `BLOCK_WRITE_THRESHOLD', to report if a block device usage exceeds the threshold. * A new `write_threshold' field into the `BlockDeviceInfo' structure, to report the configured threshold. This will allow the managing application to use smarter and more efficient monitoring, greatly reducing the need of polling. [Updated qemu-iotests 067 output to add the new 'write_threshold' property. --Stefan] [Changed g_assert_false() to !g_assert() to fix the build on older glib versions. --Kevin] Signed-off-by: Francesco Romani <fromani@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com> Message-id: 1421068273-692-1-git-send-email-fromani@redhat.com Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2015-01-12 16:11:13 +03:00
# This is useful to transparently resize thin-provisioned drives without
# the guest OS noticing.
#
# @node-name: graph node name on which the threshold must be set.
#
# @write-threshold: configured threshold for the block device, bytes.
# Use 0 to disable the threshold.
#
# Since: 2.3
#
# Example:
#
# -> { "execute": "block-set-write-threshold",
# "arguments": { "node-name": "mydev",
# "write-threshold": 17179869184 } }
# <- { "return": {} }
#
block: add event when disk usage exceeds threshold Managing applications, like oVirt (http://www.ovirt.org), make extensive use of thin-provisioned disk images. To let the guest run smoothly and be not unnecessarily paused, oVirt sets a disk usage threshold (so called 'high water mark') based on the occupation of the device, and automatically extends the image once the threshold is reached or exceeded. In order to detect the crossing of the threshold, oVirt has no choice but aggressively polling the QEMU monitor using the query-blockstats command. This lead to unnecessary system load, and is made even worse under scale: deployments with hundreds of VMs are no longer rare. To fix this, this patch adds: * A new monitor command `block-set-write-threshold', to set a mark for a given block device. * A new event `BLOCK_WRITE_THRESHOLD', to report if a block device usage exceeds the threshold. * A new `write_threshold' field into the `BlockDeviceInfo' structure, to report the configured threshold. This will allow the managing application to use smarter and more efficient monitoring, greatly reducing the need of polling. [Updated qemu-iotests 067 output to add the new 'write_threshold' property. --Stefan] [Changed g_assert_false() to !g_assert() to fix the build on older glib versions. --Kevin] Signed-off-by: Francesco Romani <fromani@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com> Message-id: 1421068273-692-1-git-send-email-fromani@redhat.com Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2015-01-12 16:11:13 +03:00
##
{ 'command': 'block-set-write-threshold',
'data': { 'node-name': 'str', 'write-threshold': 'uint64' } }
##
# @x-blockdev-change:
#
# Dynamically reconfigure the block driver state graph. It can be used
# to add, remove, insert or replace a graph node. Currently only the
# Quorum driver implements this feature to add or remove its child. This
# is useful to fix a broken quorum child.
#
# If @node is specified, it will be inserted under @parent. @child
# may not be specified in this case. If both @parent and @child are
# specified but @node is not, @child will be detached from @parent.
#
# @parent: the id or name of the parent node.
#
# @child: the name of a child under the given parent node.
#
# @node: the name of the node that will be added.
#
# Note: this command is experimental, and its API is not stable. It
# does not support all kinds of operations, all kinds of children, nor
# all block drivers.
#
# FIXME Removing children from a quorum node means introducing gaps in the
# child indices. This cannot be represented in the 'children' list of
# BlockdevOptionsQuorum, as returned by .bdrv_refresh_filename().
#
# Warning: The data in a new quorum child MUST be consistent with that of
# the rest of the array.
#
# Since: 2.7
#
# Example:
#
# 1. Add a new node to a quorum
# -> { "execute": "blockdev-add",
# "arguments": {
# "driver": "raw",
# "node-name": "new_node",
# "file": { "driver": "file",
# "filename": "test.raw" } } }
# <- { "return": {} }
# -> { "execute": "x-blockdev-change",
# "arguments": { "parent": "disk1",
# "node": "new_node" } }
# <- { "return": {} }
#
# 2. Delete a quorum's node
# -> { "execute": "x-blockdev-change",
# "arguments": { "parent": "disk1",
# "child": "children.1" } }
# <- { "return": {} }
#
##
{ 'command': 'x-blockdev-change',
'data' : { 'parent': 'str',
'*child': 'str',
'*node': 'str' } }
##
# @x-blockdev-set-iothread:
#
# Move @node and its children into the @iothread. If @iothread is null then
# move @node and its children into the main loop.
#
# The node must not be attached to a BlockBackend.
#
# @node-name: the name of the block driver node
#
# @iothread: the name of the IOThread object or null for the main loop
#
# @force: true if the node and its children should be moved when a BlockBackend
# is already attached
#
# Note: this command is experimental and intended for test cases that need
# control over IOThreads only.
#
# Since: 2.12
#
# Example:
#
# 1. Move a node into an IOThread
# -> { "execute": "x-blockdev-set-iothread",
# "arguments": { "node-name": "disk1",
# "iothread": "iothread0" } }
# <- { "return": {} }
#
# 2. Move a node into the main loop
# -> { "execute": "x-blockdev-set-iothread",
# "arguments": { "node-name": "disk1",
# "iothread": null } }
# <- { "return": {} }
#
##
{ 'command': 'x-blockdev-set-iothread',
'data' : { 'node-name': 'str',
'iothread': 'StrOrNull',
'*force': 'bool' } }
##
# @QuorumOpType:
#
# An enumeration of the quorum operation types
#
# @read: read operation
#
# @write: write operation
#
# @flush: flush operation
#
# Since: 2.6
##
{ 'enum': 'QuorumOpType',
'data': [ 'read', 'write', 'flush' ] }
##
# @QUORUM_FAILURE:
#
# Emitted by the Quorum block driver if it fails to establish a quorum
#
# @reference: device name if defined else node name
#
# @sector-num: number of the first sector of the failed read operation
#
# @sectors-count: failed read operation sector count
#
# Note: This event is rate-limited.
#
# Since: 2.0
#
# Example:
#
# <- { "event": "QUORUM_FAILURE",
# "data": { "reference": "usr1", "sector-num": 345435, "sectors-count": 5 },
# "timestamp": { "seconds": 1344522075, "microseconds": 745528 } }
#
##
{ 'event': 'QUORUM_FAILURE',
'data': { 'reference': 'str', 'sector-num': 'int', 'sectors-count': 'int' } }
##
# @QUORUM_REPORT_BAD:
#
# Emitted to report a corruption of a Quorum file
#
# @type: quorum operation type (Since 2.6)
#
# @error: error message. Only present on failure. This field
# contains a human-readable error message. There are no semantics other
# than that the block layer reported an error and clients should not
# try to interpret the error string.
#
# @node-name: the graph node name of the block driver state
#
# @sector-num: number of the first sector of the failed read operation
#
# @sectors-count: failed read operation sector count
#
# Note: This event is rate-limited.
#
# Since: 2.0
#
# Example:
#
# 1. Read operation
#
# { "event": "QUORUM_REPORT_BAD",
# "data": { "node-name": "node0", "sector-num": 345435, "sectors-count": 5,
# "type": "read" },
# "timestamp": { "seconds": 1344522075, "microseconds": 745528 } }
#
# 2. Flush operation
#
# { "event": "QUORUM_REPORT_BAD",
# "data": { "node-name": "node0", "sector-num": 0, "sectors-count": 2097120,
# "type": "flush", "error": "Broken pipe" },
# "timestamp": { "seconds": 1456406829, "microseconds": 291763 } }
#
##
{ 'event': 'QUORUM_REPORT_BAD',
'data': { 'type': 'QuorumOpType', '*error': 'str', 'node-name': 'str',
'sector-num': 'int', 'sectors-count': 'int' } }
##
# @BlockdevSnapshotInternal:
#
# @device: the device name or node-name of a root node to generate the snapshot
# from
#
# @name: the name of the internal snapshot to be created
#
# Notes: In transaction, if @name is empty, or any snapshot matching @name
# exists, the operation will fail. Only some image formats support it,
# for example, qcow2, and rbd.
#
# Since: 1.7
##
{ 'struct': 'BlockdevSnapshotInternal',
'data': { 'device': 'str', 'name': 'str' } }
##
# @blockdev-snapshot-internal-sync:
#
# Synchronously take an internal snapshot of a block device, when the
# format of the image used supports it. If the name is an empty
# string, or a snapshot with name already exists, the operation will
# fail.
#
# For the arguments, see the documentation of BlockdevSnapshotInternal.
#
# Returns: - nothing on success
# - If @device is not a valid block device, GenericError
# - If any snapshot matching @name exists, or @name is empty,
# GenericError
# - If the format of the image used does not support it,
# BlockFormatFeatureNotSupported
#
# Since: 1.7
#
# Example:
#
# -> { "execute": "blockdev-snapshot-internal-sync",
# "arguments": { "device": "ide-hd0",
# "name": "snapshot0" }
# }
# <- { "return": {} }
#
##
{ 'command': 'blockdev-snapshot-internal-sync',
'data': 'BlockdevSnapshotInternal' }
##
# @blockdev-snapshot-delete-internal-sync:
#
# Synchronously delete an internal snapshot of a block device, when the format
# of the image used support it. The snapshot is identified by name or id or
# both. One of the name or id is required. Return SnapshotInfo for the
# successfully deleted snapshot.
#
# @device: the device name or node-name of a root node to delete the snapshot
# from
#
# @id: optional the snapshot's ID to be deleted
#
# @name: optional the snapshot's name to be deleted
#
# Returns: - SnapshotInfo on success
# - If @device is not a valid block device, GenericError
# - If snapshot not found, GenericError
# - If the format of the image used does not support it,
# BlockFormatFeatureNotSupported
# - If @id and @name are both not specified, GenericError
#
# Since: 1.7
#
# Example:
#
# -> { "execute": "blockdev-snapshot-delete-internal-sync",
# "arguments": { "device": "ide-hd0",
# "name": "snapshot0" }
# }
# <- { "return": {
# "id": "1",
# "name": "snapshot0",
# "vm-state-size": 0,
# "date-sec": 1000012,
# "date-nsec": 10,
# "vm-clock-sec": 100,
# "vm-clock-nsec": 20,
# "icount": 220414
# }
# }
#
##
{ 'command': 'blockdev-snapshot-delete-internal-sync',
'data': { 'device': 'str', '*id': 'str', '*name': 'str'},
'returns': 'SnapshotInfo' }