Block layer patches

- Fail gracefully when blockdev-snapshot creates loops
 - ide: Fix IDENTIFY DEVICE for disks > 128 GiB
 - file-posix: Fix return value translation for AIO discards
 - file-posix: add 'aio-max-batch' option
 - rbd: implement bdrv_co_block_status
 - Code cleanups and build fixes
 -----BEGIN PGP SIGNATURE-----
 
 iQJFBAABCAAvFiEE3D3rFZqa+V09dFb+fwmycsiPL9YFAmGBYXIRHGt3b2xmQHJl
 ZGhhdC5jb20ACgkQfwmycsiPL9Y3Qw//S9iBIU0cemm5mX/LlDzkrb0VhB5zCQ/C
 8ReZh2R0j3kJl2n+ViRr3qRzvjKyjYuKKJ+bb8FNl/DC88evmiowuXj+csVnC6PM
 I6a6vfKPdioIk36MwACWw75N208eVRNHOPvWH/s3sjar1hMlDftdN1lDZA9sDLLx
 NGH9VRG8gSoKox9+Wg4x584XGI3YU/BqGdcM+W9E0T4OLT9X0RUmXbzVzspYFlPh
 O/r/gTZ0Ip5UXoPiHgyGhCalNep6Uc9A8n1sTaHIjaVcfSE86AuJgc5RcM0HFASn
 1fTNmxbX2taEeynISJAn2x77LASCvLSUErBmVQjHZOAkbNgJD4/R9MUlndxr+MAR
 tXJD+Pkah+M2LwUWdUIfm8UiBBX3LC4Qd/5byc85qr+s24q+EPvOmyqThLT2RNjq
 NLGO9WmRnf7sNvFjvmMXz1JAp1pV4P82j2P/4oy3ZZSW8AVaWr6xxS5hgePpqD4x
 yFP91lnce//UKVfU60VafPh6wUv1Isv0aN1JaW0Vz9wo0LF/i/6zbFO+RVS2PbMR
 VpGmGee7rbgPyWxPrjunyXv+FqciRmbd+IEAg2MlKacHk1F2pQiDLnJUPIGf+9tz
 nv+cq92FNd7hpx4qsuBhe0Ys89QJ2hueTk4RrQF/38tEoRzZ6IwppLlp7kRK39C2
 8TPSkiyqlIQ=
 =Jtih
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/kwolf/tags/for-upstream' into staging

Block layer patches

- Fail gracefully when blockdev-snapshot creates loops
- ide: Fix IDENTIFY DEVICE for disks > 128 GiB
- file-posix: Fix return value translation for AIO discards
- file-posix: add 'aio-max-batch' option
- rbd: implement bdrv_co_block_status
- Code cleanups and build fixes

# gpg: Signature made Tue 02 Nov 2021 12:04:02 PM EDT
# gpg:                using RSA key DC3DEB159A9AF95D3D7456FE7F09B272C88F2FD6
# gpg:                issuer "kwolf@redhat.com"
# gpg: Good signature from "Kevin Wolf <kwolf@redhat.com>" [full]

* remotes/kwolf/tags/for-upstream:
  block/nvme: Extract nvme_free_queue() from nvme_free_queue_pair()
  block/nvme: Display CQ/SQ pointer in nvme_free_queue_pair()
  block/nvme: Automatically free qemu_memalign() with QEMU_AUTO_VFREE
  block-backend: Silence clang -m32 compiler warning
  linux-aio: add `dev_max_batch` parameter to laio_io_unplug()
  linux-aio: add `dev_max_batch` parameter to laio_co_submit()
  file-posix: add `aio-max-batch` option
  block/export/fuse.c: fix musl build
  ide: Cap LBA28 capacity announcement to 2^28-1
  block/rbd: implement bdrv_co_block_status
  block: Fail gracefully when blockdev-snapshot creates loops
  block/file-posix: Fix return value translation for AIO discards

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Richard Henderson 2021-11-03 00:32:56 -04:00
commit 741bdeb1d5
13 changed files with 258 additions and 35 deletions

10
block.c
View File

@ -84,6 +84,9 @@ static BlockDriverState *bdrv_open_inherit(const char *filename,
BdrvChildRole child_role,
Error **errp);
static bool bdrv_recurse_has_child(BlockDriverState *bs,
BlockDriverState *child);
static void bdrv_replace_child_noperm(BdrvChild *child,
BlockDriverState *new_bs);
static void bdrv_remove_file_or_backing_child(BlockDriverState *bs,
@ -2673,6 +2676,7 @@ static void bdrv_replace_child_noperm(BdrvChild *child,
int drain_saldo;
assert(!child->frozen);
assert(old_bs != new_bs);
if (old_bs && new_bs) {
assert(bdrv_get_aio_context(old_bs) == bdrv_get_aio_context(new_bs));
@ -2892,6 +2896,12 @@ static int bdrv_attach_child_noperm(BlockDriverState *parent_bs,
assert(parent_bs->drv);
if (bdrv_recurse_has_child(child_bs, parent_bs)) {
error_setg(errp, "Making '%s' a %s child of '%s' would create a cycle",
child_bs->node_name, child_name, parent_bs->node_name);
return -EINVAL;
}
bdrv_get_cumulative_perm(parent_bs, &perm, &shared_perm);
bdrv_child_perm(parent_bs, child_bs, NULL, child_role, NULL,
perm, shared_perm, &perm, &shared_perm);

View File

@ -1540,7 +1540,7 @@ BlockAIOCB *blk_aio_pwritev(BlockBackend *blk, int64_t offset,
QEMUIOVector *qiov, BdrvRequestFlags flags,
BlockCompletionFunc *cb, void *opaque)
{
assert(qiov->size <= INT64_MAX);
assert((uint64_t)qiov->size <= INT64_MAX);
return blk_aio_prwv(blk, offset, qiov->size, qiov,
blk_aio_write_entry, flags, cb, opaque);
}

View File

@ -31,6 +31,10 @@
#include <fuse.h>
#include <fuse_lowlevel.h>
#if defined(CONFIG_FALLOCATE_ZERO_RANGE)
#include <linux/falloc.h>
#endif
#ifdef __linux__
#include <linux/fs.h>
#endif

View File

@ -150,6 +150,8 @@ typedef struct BDRVRawState {
uint64_t locked_perm;
uint64_t locked_shared_perm;
uint64_t aio_max_batch;
int perm_change_fd;
int perm_change_flags;
BDRVReopenState *reopen_state;
@ -530,6 +532,11 @@ static QemuOptsList raw_runtime_opts = {
.type = QEMU_OPT_STRING,
.help = "host AIO implementation (threads, native, io_uring)",
},
{
.name = "aio-max-batch",
.type = QEMU_OPT_NUMBER,
.help = "AIO max batch size (0 = auto handled by AIO backend, default: 0)",
},
{
.name = "locking",
.type = QEMU_OPT_STRING,
@ -609,6 +616,8 @@ static int raw_open_common(BlockDriverState *bs, QDict *options,
s->use_linux_io_uring = (aio == BLOCKDEV_AIO_OPTIONS_IO_URING);
#endif
s->aio_max_batch = qemu_opt_get_number(opts, "aio-max-batch", 0);
locking = qapi_enum_parse(&OnOffAuto_lookup,
qemu_opt_get(opts, "locking"),
ON_OFF_AUTO_AUTO, &local_err);
@ -1807,7 +1816,7 @@ static int handle_aiocb_copy_range(void *opaque)
static int handle_aiocb_discard(void *opaque)
{
RawPosixAIOData *aiocb = opaque;
int ret = -EOPNOTSUPP;
int ret = -ENOTSUP;
BDRVRawState *s = aiocb->bs->opaque;
if (!s->has_discard) {
@ -1829,7 +1838,7 @@ static int handle_aiocb_discard(void *opaque)
#ifdef CONFIG_FALLOCATE_PUNCH_HOLE
ret = do_fallocate(s->fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
aiocb->aio_offset, aiocb->aio_nbytes);
ret = translate_err(-errno);
ret = translate_err(ret);
#elif defined(__APPLE__) && (__MACH__)
fpunchhole_t fpunchhole;
fpunchhole.fp_flags = 0;
@ -2057,7 +2066,8 @@ static int coroutine_fn raw_co_prw(BlockDriverState *bs, uint64_t offset,
} else if (s->use_linux_aio) {
LinuxAioState *aio = aio_get_linux_aio(bdrv_get_aio_context(bs));
assert(qiov->size == bytes);
return laio_co_submit(bs, aio, s->fd, offset, qiov, type);
return laio_co_submit(bs, aio, s->fd, offset, qiov, type,
s->aio_max_batch);
#endif
}
@ -2115,7 +2125,7 @@ static void raw_aio_unplug(BlockDriverState *bs)
#ifdef CONFIG_LINUX_AIO
if (s->use_linux_aio) {
LinuxAioState *aio = aio_get_linux_aio(bdrv_get_aio_context(bs));
laio_io_unplug(bs, aio);
laio_io_unplug(bs, aio, s->aio_max_batch);
}
#endif
#ifdef CONFIG_LINUX_IO_URING

View File

@ -334,30 +334,45 @@ static void ioq_submit(LinuxAioState *s)
}
}
static uint64_t laio_max_batch(LinuxAioState *s, uint64_t dev_max_batch)
{
uint64_t max_batch = s->aio_context->aio_max_batch ?: DEFAULT_MAX_BATCH;
/*
* AIO context can be shared between multiple block devices, so
* `dev_max_batch` allows reducing the batch size for latency-sensitive
* devices.
*/
max_batch = MIN_NON_ZERO(dev_max_batch, max_batch);
/* limit the batch with the number of available events */
max_batch = MIN_NON_ZERO(MAX_EVENTS - s->io_q.in_flight, max_batch);
return max_batch;
}
void laio_io_plug(BlockDriverState *bs, LinuxAioState *s)
{
s->io_q.plugged++;
}
void laio_io_unplug(BlockDriverState *bs, LinuxAioState *s)
void laio_io_unplug(BlockDriverState *bs, LinuxAioState *s,
uint64_t dev_max_batch)
{
assert(s->io_q.plugged);
if (--s->io_q.plugged == 0 &&
!s->io_q.blocked && !QSIMPLEQ_EMPTY(&s->io_q.pending)) {
if (s->io_q.in_queue >= laio_max_batch(s, dev_max_batch) ||
(--s->io_q.plugged == 0 &&
!s->io_q.blocked && !QSIMPLEQ_EMPTY(&s->io_q.pending))) {
ioq_submit(s);
}
}
static int laio_do_submit(int fd, struct qemu_laiocb *laiocb, off_t offset,
int type)
int type, uint64_t dev_max_batch)
{
LinuxAioState *s = laiocb->ctx;
struct iocb *iocbs = &laiocb->iocb;
QEMUIOVector *qiov = laiocb->qiov;
int64_t max_batch = s->aio_context->aio_max_batch ?: DEFAULT_MAX_BATCH;
/* limit the batch with the number of available events */
max_batch = MIN_NON_ZERO(MAX_EVENTS - s->io_q.in_flight, max_batch);
switch (type) {
case QEMU_AIO_WRITE:
@ -378,7 +393,7 @@ static int laio_do_submit(int fd, struct qemu_laiocb *laiocb, off_t offset,
s->io_q.in_queue++;
if (!s->io_q.blocked &&
(!s->io_q.plugged ||
s->io_q.in_queue >= max_batch)) {
s->io_q.in_queue >= laio_max_batch(s, dev_max_batch))) {
ioq_submit(s);
}
@ -386,7 +401,8 @@ static int laio_do_submit(int fd, struct qemu_laiocb *laiocb, off_t offset,
}
int coroutine_fn laio_co_submit(BlockDriverState *bs, LinuxAioState *s, int fd,
uint64_t offset, QEMUIOVector *qiov, int type)
uint64_t offset, QEMUIOVector *qiov, int type,
uint64_t dev_max_batch)
{
int ret;
struct qemu_laiocb laiocb = {
@ -398,7 +414,7 @@ int coroutine_fn laio_co_submit(BlockDriverState *bs, LinuxAioState *s, int fd,
.qiov = qiov,
};
ret = laio_do_submit(fd, &laiocb, offset, type);
ret = laio_do_submit(fd, &laiocb, offset, type, dev_max_batch);
if (ret < 0) {
return ret;
}

View File

@ -183,15 +183,20 @@ static bool nvme_init_queue(BDRVNVMeState *s, NVMeQueue *q,
return r == 0;
}
static void nvme_free_queue(NVMeQueue *q)
{
qemu_vfree(q->queue);
}
static void nvme_free_queue_pair(NVMeQueuePair *q)
{
trace_nvme_free_queue_pair(q->index, q);
trace_nvme_free_queue_pair(q->index, q, &q->cq, &q->sq);
if (q->completion_bh) {
qemu_bh_delete(q->completion_bh);
}
nvme_free_queue(&q->sq);
nvme_free_queue(&q->cq);
qemu_vfree(q->prp_list_pages);
qemu_vfree(q->sq.queue);
qemu_vfree(q->cq.queue);
qemu_mutex_destroy(&q->lock);
g_free(q);
}
@ -514,10 +519,10 @@ static bool nvme_identify(BlockDriverState *bs, int namespace, Error **errp)
{
BDRVNVMeState *s = bs->opaque;
bool ret = false;
union {
QEMU_AUTO_VFREE union {
NvmeIdCtrl ctrl;
NvmeIdNs ns;
} *id;
} *id = NULL;
NvmeLBAF *lbaf;
uint16_t oncs;
int r;
@ -595,7 +600,6 @@ static bool nvme_identify(BlockDriverState *bs, int namespace, Error **errp)
s->blkshift = lbaf->ds;
out:
qemu_vfio_dma_unmap(s->vfio, id);
qemu_vfree(id);
return ret;
}
@ -1219,7 +1223,7 @@ static int nvme_co_prw(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
{
BDRVNVMeState *s = bs->opaque;
int r;
uint8_t *buf = NULL;
QEMU_AUTO_VFREE uint8_t *buf = NULL;
QEMUIOVector local_qiov;
size_t len = QEMU_ALIGN_UP(bytes, qemu_real_host_page_size);
assert(QEMU_IS_ALIGNED(offset, s->page_size));
@ -1246,7 +1250,6 @@ static int nvme_co_prw(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
if (!r && !is_write) {
qemu_iovec_from_buf(qiov, 0, buf, bytes);
}
qemu_vfree(buf);
return r;
}
@ -1365,7 +1368,7 @@ static int coroutine_fn nvme_co_pdiscard(BlockDriverState *bs,
BDRVNVMeState *s = bs->opaque;
NVMeQueuePair *ioq = s->queues[INDEX_IO(0)];
NVMeRequest *req;
NvmeDsmRange *buf;
QEMU_AUTO_VFREE NvmeDsmRange *buf = NULL;
QEMUIOVector local_qiov;
int ret;
@ -1440,7 +1443,6 @@ static int coroutine_fn nvme_co_pdiscard(BlockDriverState *bs,
trace_nvme_dsm_done(s, offset, bytes, ret);
out:
qemu_iovec_destroy(&local_qiov);
qemu_vfree(buf);
return ret;
}

View File

@ -97,6 +97,12 @@ typedef struct RBDTask {
int64_t ret;
} RBDTask;
typedef struct RBDDiffIterateReq {
uint64_t offs;
uint64_t bytes;
bool exists;
} RBDDiffIterateReq;
static int qemu_rbd_connect(rados_t *cluster, rados_ioctx_t *io_ctx,
BlockdevOptionsRbd *opts, bool cache,
const char *keypairs, const char *secretid,
@ -1259,6 +1265,111 @@ static ImageInfoSpecific *qemu_rbd_get_specific_info(BlockDriverState *bs,
return spec_info;
}
/*
* rbd_diff_iterate2 allows to interrupt the exection by returning a negative
* value in the callback routine. Choose a value that does not conflict with
* an existing exitcode and return it if we want to prematurely stop the
* execution because we detected a change in the allocation status.
*/
#define QEMU_RBD_EXIT_DIFF_ITERATE2 -9000
static int qemu_rbd_diff_iterate_cb(uint64_t offs, size_t len,
int exists, void *opaque)
{
RBDDiffIterateReq *req = opaque;
assert(req->offs + req->bytes <= offs);
/*
* we do not diff against a snapshot so we should never receive a callback
* for a hole.
*/
assert(exists);
if (!req->exists && offs > req->offs) {
/*
* we started in an unallocated area and hit the first allocated
* block. req->bytes must be set to the length of the unallocated area
* before the allocated area. stop further processing.
*/
req->bytes = offs - req->offs;
return QEMU_RBD_EXIT_DIFF_ITERATE2;
}
if (req->exists && offs > req->offs + req->bytes) {
/*
* we started in an allocated area and jumped over an unallocated area,
* req->bytes contains the length of the allocated area before the
* unallocated area. stop further processing.
*/
return QEMU_RBD_EXIT_DIFF_ITERATE2;
}
req->bytes += len;
req->exists = true;
return 0;
}
static int coroutine_fn qemu_rbd_co_block_status(BlockDriverState *bs,
bool want_zero, int64_t offset,
int64_t bytes, int64_t *pnum,
int64_t *map,
BlockDriverState **file)
{
BDRVRBDState *s = bs->opaque;
int status, r;
RBDDiffIterateReq req = { .offs = offset };
uint64_t features, flags;
assert(offset + bytes <= s->image_size);
/* default to all sectors allocated */
status = BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID;
*map = offset;
*file = bs;
*pnum = bytes;
/* check if RBD image supports fast-diff */
r = rbd_get_features(s->image, &features);
if (r < 0) {
return status;
}
if (!(features & RBD_FEATURE_FAST_DIFF)) {
return status;
}
/* check if RBD fast-diff result is valid */
r = rbd_get_flags(s->image, &flags);
if (r < 0) {
return status;
}
if (flags & RBD_FLAG_FAST_DIFF_INVALID) {
return status;
}
r = rbd_diff_iterate2(s->image, NULL, offset, bytes, true, true,
qemu_rbd_diff_iterate_cb, &req);
if (r < 0 && r != QEMU_RBD_EXIT_DIFF_ITERATE2) {
return status;
}
assert(req.bytes <= bytes);
if (!req.exists) {
if (r == 0) {
/*
* rbd_diff_iterate2 does not invoke callbacks for unallocated
* areas. This here catches the case where no callback was
* invoked at all (req.bytes == 0).
*/
assert(req.bytes == 0);
req.bytes = bytes;
}
status = BDRV_BLOCK_ZERO | BDRV_BLOCK_OFFSET_VALID;
}
*pnum = req.bytes;
return status;
}
static int64_t qemu_rbd_getlength(BlockDriverState *bs)
{
BDRVRBDState *s = bs->opaque;
@ -1494,6 +1605,7 @@ static BlockDriver bdrv_rbd = {
#ifdef LIBRBD_SUPPORTS_WRITE_ZEROES
.bdrv_co_pwrite_zeroes = qemu_rbd_co_pwrite_zeroes,
#endif
.bdrv_co_block_status = qemu_rbd_co_block_status,
.bdrv_snapshot_create = qemu_rbd_snap_create,
.bdrv_snapshot_delete = qemu_rbd_snap_remove,

View File

@ -157,7 +157,7 @@ nvme_dsm_done(void *s, int64_t offset, int64_t bytes, int ret) "s %p offset 0x%"
nvme_dma_map_flush(void *s) "s %p"
nvme_free_req_queue_wait(void *s, unsigned q_index) "s %p q #%u"
nvme_create_queue_pair(unsigned q_index, void *q, size_t size, void *aio_context, int fd) "index %u q %p size %zu aioctx %p fd %d"
nvme_free_queue_pair(unsigned q_index, void *q) "index %u q %p"
nvme_free_queue_pair(unsigned q_index, void *q, void *cq, void *sq) "index %u q %p cq %p sq %p"
nvme_cmd_map_qiov(void *s, void *cmd, void *req, void *qiov, int entries) "s %p cmd %p req %p qiov %p entries %d"
nvme_cmd_map_qiov_pages(void *s, int i, uint64_t page) "s %p page[%d] 0x%"PRIx64
nvme_cmd_map_qiov_iov(void *s, int i, void *page, int pages) "s %p iov[%d] %p pages %d"

View File

@ -98,8 +98,12 @@ static void put_le16(uint16_t *p, unsigned int v)
static void ide_identify_size(IDEState *s)
{
uint16_t *p = (uint16_t *)s->identify_data;
put_le16(p + 60, s->nb_sectors);
put_le16(p + 61, s->nb_sectors >> 16);
int64_t nb_sectors_lba28 = s->nb_sectors;
if (nb_sectors_lba28 >= 1 << 28) {
nb_sectors_lba28 = (1 << 28) - 1;
}
put_le16(p + 60, nb_sectors_lba28);
put_le16(p + 61, nb_sectors_lba28 >> 16);
put_le16(p + 100, s->nb_sectors);
put_le16(p + 101, s->nb_sectors >> 16);
put_le16(p + 102, s->nb_sectors >> 32);

View File

@ -51,11 +51,13 @@ typedef struct LinuxAioState LinuxAioState;
LinuxAioState *laio_init(Error **errp);
void laio_cleanup(LinuxAioState *s);
int coroutine_fn laio_co_submit(BlockDriverState *bs, LinuxAioState *s, int fd,
uint64_t offset, QEMUIOVector *qiov, int type);
uint64_t offset, QEMUIOVector *qiov, int type,
uint64_t dev_max_batch);
void laio_detach_aio_context(LinuxAioState *s, AioContext *old_context);
void laio_attach_aio_context(LinuxAioState *s, AioContext *new_context);
void laio_io_plug(BlockDriverState *bs, LinuxAioState *s);
void laio_io_unplug(BlockDriverState *bs, LinuxAioState *s);
void laio_io_unplug(BlockDriverState *bs, LinuxAioState *s,
uint64_t dev_max_batch);
#endif
/* io_uring.c - Linux io_uring implementation */
#ifdef CONFIG_LINUX_IO_URING

View File

@ -2939,6 +2939,12 @@
# for this device (default: none, forward the commands via SG_IO;
# since 2.11)
# @aio: AIO backend (default: threads) (since: 2.8)
# @aio-max-batch: maximum number of requests to batch together into a single
# submission in the AIO backend. The smallest value between
# this and the aio-max-batch value of the IOThread object is
# chosen.
# 0 means that the AIO backend will handle it automatically.
# (default: 0, since 6.2)
# @locking: whether to enable file locking. If set to 'auto', only enable
# when Open File Descriptor (OFD) locking API is available
# (default: auto, since 2.10)
@ -2968,6 +2974,7 @@
'*pr-manager': 'str',
'*locking': 'OnOffAuto',
'*aio': 'BlockdevAioOptions',
'*aio-max-batch': 'int',
'*drop-cache': {'type': 'bool',
'if': 'CONFIG_LINUX'},
'*x-check-cache-dropped': { 'type': 'bool',

View File

@ -103,11 +103,18 @@ do_blockdev_add()
}
# ${1}: unique identifier for the snapshot filename
add_snapshot_image()
create_snapshot_image()
{
base_image="${TEST_DIR}/$((${1}-1))-${snapshot_virt0}"
snapshot_file="${TEST_DIR}/${1}-${snapshot_virt0}"
TEST_IMG=$snapshot_file _make_test_img -u -b "${base_image}" -F $IMGFMT "$size"
}
# ${1}: unique identifier for the snapshot filename
add_snapshot_image()
{
snapshot_file="${TEST_DIR}/${1}-${snapshot_virt0}"
create_snapshot_image "$1"
do_blockdev_add "$1" "'backing': null, " "${snapshot_file}"
}
@ -230,6 +237,28 @@ _make_test_img -b "${TEST_IMG}.base" -F $IMGFMT "$size"
do_blockdev_add ${SNAPSHOTS} "" "${TEST_IMG}"
blockdev_snapshot ${SNAPSHOTS} error
echo
echo === Invalid command - creating loops ===
echo
SNAPSHOTS=$((${SNAPSHOTS}+1))
add_snapshot_image ${SNAPSHOTS}
_send_qemu_cmd $h "{ 'execute': 'blockdev-snapshot',
'arguments': { 'node':'snap_${SNAPSHOTS}',
'overlay':'snap_${SNAPSHOTS}' }
}" "error"
SNAPSHOTS=$((${SNAPSHOTS}+1))
create_snapshot_image ${SNAPSHOTS}
do_blockdev_add ${SNAPSHOTS} "'backing': 'snap_$((${SNAPSHOTS}-1))', " \
"${TEST_DIR}/${SNAPSHOTS}-${snapshot_virt0}"
_send_qemu_cmd $h "{ 'execute': 'blockdev-snapshot',
'arguments': { 'node':'snap_${SNAPSHOTS}',
'overlay':'snap_$((${SNAPSHOTS}-1))' }
}" "error"
echo
echo === Invalid command - The node does not exist ===
echo

View File

@ -217,15 +217,42 @@ Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134217728 backing_file=TEST_DIR/
'overlay':'snap_13' } }
{"error": {"class": "GenericError", "desc": "The overlay already has a backing image"}}
=== Invalid command - creating loops ===
Formatting 'TEST_DIR/14-snapshot-v0.IMGFMT', fmt=IMGFMT size=134217728 backing_file=TEST_DIR/13-snapshot-v0.IMGFMT backing_fmt=IMGFMT
{ 'execute': 'blockdev-add', 'arguments':
{ 'driver': 'IMGFMT', 'node-name': 'snap_14', 'backing': null,
'file':
{ 'driver': 'file', 'filename': 'TEST_DIR/14-snapshot-v0.IMGFMT',
'node-name': 'file_14' } } }
{"return": {}}
{ 'execute': 'blockdev-snapshot',
'arguments': { 'node':'snap_14',
'overlay':'snap_14' }
}
{"error": {"class": "GenericError", "desc": "Making 'snap_14' a backing child of 'snap_14' would create a cycle"}}
Formatting 'TEST_DIR/15-snapshot-v0.IMGFMT', fmt=IMGFMT size=134217728 backing_file=TEST_DIR/14-snapshot-v0.IMGFMT backing_fmt=IMGFMT
{ 'execute': 'blockdev-add', 'arguments':
{ 'driver': 'IMGFMT', 'node-name': 'snap_15', 'backing': 'snap_14',
'file':
{ 'driver': 'file', 'filename': 'TEST_DIR/15-snapshot-v0.IMGFMT',
'node-name': 'file_15' } } }
{"return": {}}
{ 'execute': 'blockdev-snapshot',
'arguments': { 'node':'snap_15',
'overlay':'snap_14' }
}
{"error": {"class": "GenericError", "desc": "Making 'snap_15' a backing child of 'snap_14' would create a cycle"}}
=== Invalid command - The node does not exist ===
{ 'execute': 'blockdev-snapshot',
'arguments': { 'node': 'virtio0',
'overlay':'snap_14' } }
{"error": {"class": "GenericError", "desc": "Cannot find device='snap_14' nor node-name='snap_14'"}}
'overlay':'snap_16' } }
{"error": {"class": "GenericError", "desc": "Cannot find device='snap_16' nor node-name='snap_16'"}}
{ 'execute': 'blockdev-snapshot',
'arguments': { 'node':'nodevice',
'overlay':'snap_13' }
'overlay':'snap_15' }
}
{"error": {"class": "GenericError", "desc": "Cannot find device='nodevice' nor node-name='nodevice'"}}
*** done