2020-01-30 19:32:23 +03:00
|
|
|
#!/usr/bin/env python3
|
iotests: 30: drop from auto group (and effectively from make check)
I reproduced the following crash fast enough:
0 raise () at /lib64/libc.so.6
1 abort () at /lib64/libc.so.6
2 _nl_load_domain.cold () at /lib64/libc.so.6
3 annobin_assert.c_end () at /lib64/libc.so.6
4 bdrv_reopen_multiple (bs_queue=0x55de75fa9b70, errp=0x0)
at ../block.c:3820
5 bdrv_reopen_set_read_only (bs=0x55de760fc020, read_only=true,
errp=0x0) at ../block.c:3870
6 stream_clean (job=0x55de75fa9410) at ../block/stream.c:99
7 job_clean (job=0x55de75fa9410) at ../job.c:680
8 job_finalize_single (job=0x55de75fa9410) at ../job.c:696
9 job_txn_apply (job=0x55de75fa9410,
fn=0x55de741eee27 <job_finalize_single>) at ../job.c:158
10 job_do_finalize (job=0x55de75fa9410) at ../job.c:805
11 job_completed_txn_success (job=0x55de75fa9410) at ../job.c:855
12 job_completed (job=0x55de75fa9410) at ../job.c:868
13 job_exit (opaque=0x55de75fa9410) at ../job.c:888
14 aio_bh_call (bh=0x55de76b9b4e0) at ../util/async.c:136
15 aio_bh_poll (ctx=0x55de75bc5300) at ../util/async.c:164
16 aio_dispatch (ctx=0x55de75bc5300) at ../util/aio-posix.c:381
17 aio_ctx_dispatch (source=0x55de75bc5300, callback=0x0,
user_data=0x0) at ../util/async.c:306
18 g_main_context_dispatch () at /lib64/libglib-2.0.so.0
19 glib_pollfds_poll () at ../util/main-loop.c:232
20 os_host_main_loop_wait (timeout=0) at ../util/main-loop.c:255
21 main_loop_wait (nonblocking=0) at ../util/main-loop.c:531
22 qemu_main_loop () at ../softmmu/runstate.c:722
23 main (argc=20, argv=0x7ffe218f0268, envp=0x7ffe218f0310) at
../softmmu/main.c:50
(gdb) fr 4
4 bdrv_reopen_multiple (bs_queue=0x55de75fa9b70, errp=0x0) at
../block.c:3820
3820 assert(perm == state->perm);
(gdb) list
3815
3816 if (ret == 0) {
3817 uint64_t perm, shared;
3818
3819 bdrv_get_cumulative_perm(state->bs, &perm,
&shared);
3820 assert(perm == state->perm);
3821 assert(shared == state->shared_perm);
3822
3823 bdrv_set_perm(state->bs);
3824 } else {
(gdb) p perm
$1 = 1
(gdb) p state->perm
$2 = 0
Then I had 38 successful iterations and another crash:
0 bdrv_check_update_perm (bs=0x5631ac97bc50, q=0x0, new_used_perm=1,
new_shared_perm=31, ignore_children=0x0, errp=0x7ffd9d477cf8) at
../block.c:2197
1 bdrv_root_attach_child
(child_bs=0x5631ac97bc50, child_name=0x5631aaf6b1f9 "backing",
child_class=0x5631ab280ca0 <child_of_bds>, child_role=8,
ctx=0x5631ab757300, perm=1, shared_perm=31, opaque=0x5631abb8c020,
errp=0x7ffd9d477cf8)
at ../block.c:2642
2 bdrv_attach_child (parent_bs=0x5631abb8c020,
child_bs=0x5631ac97bc50, child_name=0x5631aaf6b1f9 "backing",
child_class=0x5631ab280ca0 <child_of_bds>, child_role=8,
errp=0x7ffd9d477cf8)
at ../block.c:2719
3 bdrv_set_backing_hd (bs=0x5631abb8c020, backing_hd=0x5631ac97bc50,
errp=0x7ffd9d477cf8) at ../block.c:2854
4 stream_prepare (job=0x5631ac751eb0) at ../block/stream.c:74
5 job_prepare (job=0x5631ac751eb0) at ../job.c:784
6 job_txn_apply (job=0x5631ac751eb0, fn=0x5631aacb1156 <job_prepare>)
at ../job.c:158
7 job_do_finalize (job=0x5631ac751eb0) at ../job.c:801
8 job_completed_txn_success (job=0x5631ac751eb0) at ../job.c:855
9 job_completed (job=0x5631ac751eb0) at ../job.c:868
10 job_exit (opaque=0x5631ac751eb0) at ../job.c:888
11 aio_bh_call (bh=0x7f3d9c007680) at ../util/async.c:136
12 aio_bh_poll (ctx=0x5631ab757300) at ../util/async.c:164
13 aio_dispatch (ctx=0x5631ab757300) at ../util/aio-posix.c:381
14 aio_ctx_dispatch (source=0x5631ab757300, callback=0x0,
user_data=0x0) at ../util/async.c:306
15 g_main_context_dispatch () at /lib64/libglib-2.0.so.0
16 glib_pollfds_poll () at ../util/main-loop.c:232
17 os_host_main_loop_wait (timeout=0) at ../util/main-loop.c:255
18 main_loop_wait (nonblocking=0) at ../util/main-loop.c:531
19 qemu_main_loop () at ../softmmu/runstate.c:722
20 main (argc=20, argv=0x7ffd9d478198, envp=0x7ffd9d478240) at
../softmmu/main.c:50
(gdb) list
2192 QLIST_FOREACH(c, &bs->parents, next_parent) {
2193 if (g_slist_find(ignore_children, c)) {
2194 continue;
2195 }
2196
2197 if ((new_used_perm & c->shared_perm) != new_used_perm)
{
2198 char *user = bdrv_child_user_desc(c);
2199 char *perm_names = bdrv_perm_names(new_used_perm &
~c->shared_perm);
2200
2201 error_setg(errp, "Conflicts with use by %s as '%s',
which does not "
(gdb) p c
$1 = (BdrvChild *) 0x8585858585858585
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Message-id: 20210205111021.715240-1-vsementsov@virtuozzo.com
Reviewed-by: Eric Blake <eblake@redhat.com>
[PMM: trimmed the part of the commit message referring to
as-yet-unapplied patchseries]
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2021-02-05 14:10:21 +03:00
|
|
|
# group: rw backing
|
2012-02-29 17:25:22 +04:00
|
|
|
#
|
|
|
|
# Tests for image streaming.
|
|
|
|
#
|
|
|
|
# Copyright (C) 2012 IBM Corp.
|
|
|
|
#
|
|
|
|
# This program is free software; you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License as published by
|
|
|
|
# the Free Software Foundation; either version 2 of the License, or
|
|
|
|
# (at your option) any later version.
|
|
|
|
#
|
|
|
|
# This program is distributed in the hope that it will be useful,
|
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
# GNU General Public License for more details.
|
|
|
|
#
|
|
|
|
# You should have received a copy of the GNU General Public License
|
|
|
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
#
|
|
|
|
|
2012-09-28 19:22:52 +04:00
|
|
|
import time
|
2012-02-29 17:25:22 +04:00
|
|
|
import os
|
|
|
|
import iotests
|
2020-09-07 14:38:24 +03:00
|
|
|
import unittest
|
2012-02-29 17:25:22 +04:00
|
|
|
from iotests import qemu_img, qemu_io
|
|
|
|
|
|
|
|
backing_img = os.path.join(iotests.test_dir, 'backing.img')
|
2012-05-09 17:05:03 +04:00
|
|
|
mid_img = os.path.join(iotests.test_dir, 'mid.img')
|
2012-02-29 17:25:22 +04:00
|
|
|
test_img = os.path.join(iotests.test_dir, 'test.img')
|
|
|
|
|
2013-05-28 19:11:37 +04:00
|
|
|
class TestSingleDrive(iotests.QMPTestCase):
|
2012-02-29 17:25:22 +04:00
|
|
|
image_len = 1 * 1024 * 1024 # MB
|
|
|
|
|
|
|
|
def setUp(self):
|
2013-05-28 19:11:37 +04:00
|
|
|
iotests.create_image(backing_img, TestSingleDrive.image_len)
|
iotests: Specify explicit backing format where sensible
There are many existing qcow2 images that specify a backing file but
no format. This has been the source of CVEs in the past, but has
become more prominent of a problem now that libvirt has switched to
-blockdev. With older -drive, at least the probing was always done by
qemu (so the only risk of a changed format between successive boots of
a guest was if qemu was upgraded and probed differently). But with
newer -blockdev, libvirt must specify a format; if libvirt guesses raw
where the image was formatted, this results in data corruption visible
to the guest; conversely, if libvirt guesses qcow2 where qemu was
using raw, this can result in potential security holes, so modern
libvirt instead refuses to use images without explicit backing format.
The change in libvirt to reject images without explicit backing format
has pointed out that a number of tools have been far too reliant on
probing in the past. It's time to set a better example in our own
iotests of properly setting this parameter.
iotest calls to create, rebase, and convert are all impacted to some
degree. It's a bit annoying that we are inconsistent on command line
- while all of those accept -o backing_file=...,backing_fmt=..., the
shortcuts are different: create and rebase have -b and -F, while
convert has -B but no -F. (amend has no shortcuts, but the previous
patch just deprecated the use of amend to change backing chains).
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <20200706203954.341758-9-eblake@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2020-07-06 23:39:52 +03:00
|
|
|
qemu_img('create', '-f', iotests.imgfmt,
|
|
|
|
'-o', 'backing_file=%s' % backing_img,
|
|
|
|
'-F', 'raw', mid_img)
|
|
|
|
qemu_img('create', '-f', iotests.imgfmt,
|
|
|
|
'-o', 'backing_file=%s' % mid_img,
|
|
|
|
'-F', iotests.imgfmt, test_img)
|
2014-11-20 18:27:08 +03:00
|
|
|
qemu_io('-f', 'raw', '-c', 'write -P 0x1 0 512', backing_img)
|
2016-03-21 16:47:26 +03:00
|
|
|
qemu_io('-f', iotests.imgfmt, '-c', 'write -P 0x1 524288 512', mid_img)
|
2019-07-03 20:28:12 +03:00
|
|
|
self.vm = iotests.VM().add_drive("blkdebug::" + test_img,
|
|
|
|
"backing.node-name=mid," +
|
|
|
|
"backing.backing.node-name=base")
|
2012-02-29 17:25:22 +04:00
|
|
|
self.vm.launch()
|
|
|
|
|
|
|
|
def tearDown(self):
|
|
|
|
self.vm.shutdown()
|
|
|
|
os.remove(test_img)
|
2012-05-09 17:05:03 +04:00
|
|
|
os.remove(mid_img)
|
2012-02-29 17:25:22 +04:00
|
|
|
os.remove(backing_img)
|
|
|
|
|
|
|
|
def test_stream(self):
|
2013-05-28 19:11:34 +04:00
|
|
|
self.assert_no_active_block_jobs()
|
2012-02-29 17:25:22 +04:00
|
|
|
|
2023-10-06 18:41:25 +03:00
|
|
|
self.vm.cmd('block-stream', device='drive0')
|
2012-02-29 17:25:22 +04:00
|
|
|
|
2014-04-02 09:54:07 +04:00
|
|
|
self.wait_until_completed()
|
2012-02-29 17:25:22 +04:00
|
|
|
|
2013-05-28 19:11:34 +04:00
|
|
|
self.assert_no_active_block_jobs()
|
2012-05-08 18:51:53 +04:00
|
|
|
self.vm.shutdown()
|
2012-02-29 17:25:22 +04:00
|
|
|
|
2022-04-19 00:14:59 +03:00
|
|
|
self.assertEqual(
|
|
|
|
qemu_io('-f', 'raw', '-c', 'map', backing_img).stdout,
|
|
|
|
qemu_io('-f', iotests.imgfmt, '-c', 'map', test_img).stdout,
|
|
|
|
'image file map does not match backing file after streaming')
|
2012-02-29 17:25:22 +04:00
|
|
|
|
2016-10-28 10:08:13 +03:00
|
|
|
def test_stream_intermediate(self):
|
|
|
|
self.assert_no_active_block_jobs()
|
|
|
|
|
2022-04-19 00:14:59 +03:00
|
|
|
self.assertNotEqual(
|
|
|
|
qemu_io('-f', 'raw', '-rU', '-c', 'map', backing_img).stdout,
|
|
|
|
qemu_io('-f', iotests.imgfmt, '-rU', '-c', 'map', mid_img).stdout,
|
|
|
|
'image file map matches backing file before streaming')
|
2016-10-28 10:08:13 +03:00
|
|
|
|
2023-10-06 18:41:25 +03:00
|
|
|
self.vm.cmd('block-stream', device='mid', job_id='stream-mid')
|
2016-10-28 10:08:13 +03:00
|
|
|
|
|
|
|
self.wait_until_completed(drive='stream-mid')
|
|
|
|
|
|
|
|
self.assert_no_active_block_jobs()
|
|
|
|
self.vm.shutdown()
|
|
|
|
|
2022-04-19 00:14:59 +03:00
|
|
|
self.assertEqual(
|
|
|
|
qemu_io('-f', 'raw', '-c', 'map', backing_img).stdout,
|
|
|
|
qemu_io('-f', iotests.imgfmt, '-c', 'map', mid_img).stdout,
|
|
|
|
'image file map does not match backing file after streaming')
|
2016-10-28 10:08:13 +03:00
|
|
|
|
2012-09-28 19:22:52 +04:00
|
|
|
def test_stream_pause(self):
|
2013-05-28 19:11:34 +04:00
|
|
|
self.assert_no_active_block_jobs()
|
2012-09-28 19:22:52 +04:00
|
|
|
|
2013-11-20 06:01:56 +04:00
|
|
|
self.vm.pause_drive('drive0')
|
2023-10-06 18:41:25 +03:00
|
|
|
self.vm.cmd('block-stream', device='drive0')
|
2012-09-28 19:22:52 +04:00
|
|
|
|
2018-03-10 11:27:31 +03:00
|
|
|
self.pause_job('drive0', wait=False)
|
2017-07-21 17:41:21 +03:00
|
|
|
self.vm.resume_drive('drive0')
|
2018-03-10 11:27:31 +03:00
|
|
|
self.pause_wait('drive0')
|
2017-07-21 17:41:21 +03:00
|
|
|
|
2012-09-28 19:22:52 +04:00
|
|
|
result = self.vm.qmp('query-block-jobs')
|
|
|
|
offset = self.dictpath(result, 'return[0]/offset')
|
|
|
|
|
2017-07-21 17:41:21 +03:00
|
|
|
time.sleep(0.5)
|
2012-09-28 19:22:52 +04:00
|
|
|
result = self.vm.qmp('query-block-jobs')
|
|
|
|
self.assert_qmp(result, 'return[0]/offset', offset)
|
|
|
|
|
2023-10-06 18:41:25 +03:00
|
|
|
self.vm.cmd('block-job-resume', device='drive0')
|
2012-09-28 19:22:52 +04:00
|
|
|
|
2014-04-02 09:54:07 +04:00
|
|
|
self.wait_until_completed()
|
2012-09-28 19:22:52 +04:00
|
|
|
|
2013-05-28 19:11:34 +04:00
|
|
|
self.assert_no_active_block_jobs()
|
2012-09-28 19:22:52 +04:00
|
|
|
self.vm.shutdown()
|
|
|
|
|
2022-04-19 00:14:59 +03:00
|
|
|
self.assertEqual(
|
|
|
|
qemu_io('-f', 'raw', '-c', 'map', backing_img).stdout,
|
|
|
|
qemu_io('-f', iotests.imgfmt, '-c', 'map', test_img).stdout,
|
|
|
|
'image file map does not match backing file after streaming')
|
2012-09-28 19:22:52 +04:00
|
|
|
|
2016-03-21 16:47:27 +03:00
|
|
|
def test_stream_no_op(self):
|
|
|
|
self.assert_no_active_block_jobs()
|
|
|
|
|
|
|
|
# The image map is empty before the operation
|
2022-04-19 00:14:59 +03:00
|
|
|
empty_map = qemu_io(
|
|
|
|
'-f', iotests.imgfmt, '-rU', '-c', 'map', test_img).stdout
|
2016-03-21 16:47:27 +03:00
|
|
|
|
|
|
|
# This is a no-op: no data should ever be copied from the base image
|
2023-10-06 18:41:25 +03:00
|
|
|
self.vm.cmd('block-stream', device='drive0', base=mid_img)
|
2016-03-21 16:47:27 +03:00
|
|
|
|
|
|
|
self.wait_until_completed()
|
|
|
|
|
|
|
|
self.assert_no_active_block_jobs()
|
|
|
|
self.vm.shutdown()
|
|
|
|
|
2022-04-19 00:14:59 +03:00
|
|
|
self.assertEqual(
|
|
|
|
qemu_io('-f', iotests.imgfmt, '-c', 'map', test_img).stdout,
|
|
|
|
empty_map, 'image file map changed after a no-op')
|
2016-03-21 16:47:27 +03:00
|
|
|
|
2012-05-09 17:05:03 +04:00
|
|
|
def test_stream_partial(self):
|
2013-05-28 19:11:34 +04:00
|
|
|
self.assert_no_active_block_jobs()
|
2012-05-09 17:05:03 +04:00
|
|
|
|
2023-10-06 18:41:25 +03:00
|
|
|
self.vm.cmd('block-stream', device='drive0', base=backing_img)
|
2012-05-09 17:05:03 +04:00
|
|
|
|
2014-04-02 09:54:07 +04:00
|
|
|
self.wait_until_completed()
|
2012-05-09 17:05:03 +04:00
|
|
|
|
2013-05-28 19:11:34 +04:00
|
|
|
self.assert_no_active_block_jobs()
|
2012-05-09 17:05:03 +04:00
|
|
|
self.vm.shutdown()
|
|
|
|
|
2022-04-19 00:14:59 +03:00
|
|
|
self.assertEqual(
|
|
|
|
qemu_io('-f', iotests.imgfmt, '-c', 'map', mid_img).stdout,
|
|
|
|
qemu_io('-f', iotests.imgfmt, '-c', 'map', test_img).stdout,
|
|
|
|
'image file map does not match backing file after streaming')
|
2012-05-09 17:05:03 +04:00
|
|
|
|
2012-02-29 17:25:22 +04:00
|
|
|
def test_device_not_found(self):
|
2012-04-11 19:27:10 +04:00
|
|
|
result = self.vm.qmp('block-stream', device='nonexistent')
|
2019-07-03 20:28:09 +03:00
|
|
|
self.assert_qmp(result, 'error/desc',
|
2021-03-05 18:19:28 +03:00
|
|
|
'Cannot find device=\'nonexistent\' nor node-name=\'nonexistent\'')
|
2012-02-29 17:25:22 +04:00
|
|
|
|
2017-05-15 15:36:23 +03:00
|
|
|
def test_job_id_missing(self):
|
|
|
|
result = self.vm.qmp('block-stream', device='mid')
|
2019-07-03 20:28:09 +03:00
|
|
|
self.assert_qmp(result, 'error/desc', "Invalid job ID ''")
|
2017-05-15 15:36:23 +03:00
|
|
|
|
2019-07-03 20:28:12 +03:00
|
|
|
def test_read_only(self):
|
|
|
|
# Create a new file that we can attach (we need a read-only top)
|
|
|
|
with iotests.FilePath('ro-top.img') as ro_top_path:
|
|
|
|
qemu_img('create', '-f', iotests.imgfmt, ro_top_path,
|
|
|
|
str(self.image_len))
|
|
|
|
|
2023-10-06 18:41:25 +03:00
|
|
|
self.vm.cmd('blockdev-add',
|
|
|
|
node_name='ro-top',
|
|
|
|
driver=iotests.imgfmt,
|
|
|
|
read_only=True,
|
|
|
|
file={
|
|
|
|
'driver': 'file',
|
|
|
|
'filename': ro_top_path,
|
|
|
|
'read-only': True
|
|
|
|
},
|
|
|
|
backing='mid')
|
2019-07-03 20:28:12 +03:00
|
|
|
|
|
|
|
result = self.vm.qmp('block-stream', job_id='stream',
|
|
|
|
device='ro-top', base_node='base')
|
|
|
|
self.assert_qmp(result, 'error/desc', 'Block node is read-only')
|
|
|
|
|
2023-10-06 18:41:25 +03:00
|
|
|
self.vm.cmd('blockdev-del', node_name='ro-top')
|
2019-07-03 20:28:12 +03:00
|
|
|
|
2012-08-28 18:26:49 +04:00
|
|
|
|
2016-10-28 10:08:14 +03:00
|
|
|
class TestParallelOps(iotests.QMPTestCase):
|
|
|
|
num_ops = 4 # Number of parallel block-stream operations
|
|
|
|
num_imgs = num_ops * 2 + 1
|
2019-07-03 20:28:08 +03:00
|
|
|
image_len = num_ops * 4 * 1024 * 1024
|
2016-10-28 10:08:14 +03:00
|
|
|
imgs = []
|
|
|
|
|
|
|
|
def setUp(self):
|
|
|
|
opts = []
|
|
|
|
self.imgs = []
|
|
|
|
|
|
|
|
# Initialize file names and command-line options
|
|
|
|
for i in range(self.num_imgs):
|
|
|
|
img_depth = self.num_imgs - i - 1
|
|
|
|
opts.append("backing." * img_depth + "node-name=node%d" % i)
|
|
|
|
self.imgs.append(os.path.join(iotests.test_dir, 'img-%d.img' % i))
|
|
|
|
|
|
|
|
# Create all images
|
|
|
|
iotests.create_image(self.imgs[0], self.image_len)
|
|
|
|
for i in range(1, self.num_imgs):
|
|
|
|
qemu_img('create', '-f', iotests.imgfmt,
|
iotests: Specify explicit backing format where sensible
There are many existing qcow2 images that specify a backing file but
no format. This has been the source of CVEs in the past, but has
become more prominent of a problem now that libvirt has switched to
-blockdev. With older -drive, at least the probing was always done by
qemu (so the only risk of a changed format between successive boots of
a guest was if qemu was upgraded and probed differently). But with
newer -blockdev, libvirt must specify a format; if libvirt guesses raw
where the image was formatted, this results in data corruption visible
to the guest; conversely, if libvirt guesses qcow2 where qemu was
using raw, this can result in potential security holes, so modern
libvirt instead refuses to use images without explicit backing format.
The change in libvirt to reject images without explicit backing format
has pointed out that a number of tools have been far too reliant on
probing in the past. It's time to set a better example in our own
iotests of properly setting this parameter.
iotest calls to create, rebase, and convert are all impacted to some
degree. It's a bit annoying that we are inconsistent on command line
- while all of those accept -o backing_file=...,backing_fmt=..., the
shortcuts are different: create and rebase have -b and -F, while
convert has -B but no -F. (amend has no shortcuts, but the previous
patch just deprecated the use of amend to change backing chains).
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <20200706203954.341758-9-eblake@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2020-07-06 23:39:52 +03:00
|
|
|
'-o', 'backing_file=%s' % self.imgs[i-1],
|
|
|
|
'-F', 'raw' if i == 1 else iotests.imgfmt, self.imgs[i])
|
2016-10-28 10:08:14 +03:00
|
|
|
|
|
|
|
# Put data into the images we are copying data from
|
2018-03-06 16:01:21 +03:00
|
|
|
odd_img_indexes = [x for x in reversed(range(self.num_imgs)) if x % 2 == 1]
|
|
|
|
for i in range(len(odd_img_indexes)):
|
2019-07-03 20:28:08 +03:00
|
|
|
# Alternate between 2MB and 4MB.
|
2016-10-28 10:08:14 +03:00
|
|
|
# This way jobs will not finish in the same order they were created
|
2019-07-03 20:28:08 +03:00
|
|
|
num_mb = 2 + 2 * (i % 2)
|
2016-10-28 10:08:14 +03:00
|
|
|
qemu_io('-f', iotests.imgfmt,
|
2019-07-03 20:28:08 +03:00
|
|
|
'-c', 'write -P 0xFF %dM %dM' % (i * 4, num_mb),
|
2018-03-06 16:01:21 +03:00
|
|
|
self.imgs[odd_img_indexes[i]])
|
2016-10-28 10:08:14 +03:00
|
|
|
|
|
|
|
# Attach the drive to the VM
|
|
|
|
self.vm = iotests.VM()
|
|
|
|
self.vm.add_drive(self.imgs[-1], ','.join(opts))
|
|
|
|
self.vm.launch()
|
|
|
|
|
|
|
|
def tearDown(self):
|
|
|
|
self.vm.shutdown()
|
|
|
|
for img in self.imgs:
|
|
|
|
os.remove(img)
|
|
|
|
|
|
|
|
# Test that it's possible to run several block-stream operations
|
|
|
|
# in parallel in the same snapshot chain
|
2020-09-07 14:38:24 +03:00
|
|
|
@unittest.skipIf(os.environ.get('QEMU_CHECK_BLOCK_AUTO'), 'disabled in CI')
|
2016-10-28 10:08:14 +03:00
|
|
|
def test_stream_parallel(self):
|
|
|
|
self.assert_no_active_block_jobs()
|
|
|
|
|
|
|
|
# Check that the maps don't match before the streaming operations
|
|
|
|
for i in range(2, self.num_imgs, 2):
|
2022-04-19 00:14:59 +03:00
|
|
|
self.assertNotEqual(
|
|
|
|
qemu_io('-f', iotests.imgfmt, '-rU', '-c', 'map', self.imgs[i]).stdout,
|
|
|
|
qemu_io('-f', iotests.imgfmt, '-rU', '-c', 'map', self.imgs[i-1]).stdout,
|
|
|
|
'image file map matches backing file before streaming')
|
2016-10-28 10:08:14 +03:00
|
|
|
|
|
|
|
# Create all streaming jobs
|
|
|
|
pending_jobs = []
|
|
|
|
for i in range(2, self.num_imgs, 2):
|
|
|
|
node_name = 'node%d' % i
|
|
|
|
job_id = 'stream-%s' % node_name
|
|
|
|
pending_jobs.append(job_id)
|
2023-10-06 18:41:25 +03:00
|
|
|
self.vm.cmd('block-stream', device=node_name,
|
|
|
|
job_id=job_id, bottom=f'node{i-1}',
|
|
|
|
speed=1024)
|
2016-10-28 10:08:14 +03:00
|
|
|
|
2021-11-15 17:54:06 +03:00
|
|
|
# Do this in reverse: After unthrottling them, some jobs may finish
|
|
|
|
# before we have unthrottled all of them. This will drain their
|
|
|
|
# subgraph, and this will make jobs above them advance (despite those
|
|
|
|
# jobs on top being throttled). In the worst case, all jobs below the
|
|
|
|
# top one are finished before we can unthrottle it, and this makes it
|
|
|
|
# advance so far that it completes before we can unthrottle it - which
|
|
|
|
# results in an error.
|
|
|
|
# Starting from the top (i.e. in reverse) does not have this problem:
|
|
|
|
# When a job finishes, the ones below it are not advanced.
|
|
|
|
for job in reversed(pending_jobs):
|
2023-10-06 18:41:25 +03:00
|
|
|
self.vm.cmd('block-job-set-speed', device=job, speed=0)
|
2019-07-03 20:28:08 +03:00
|
|
|
|
2016-10-28 10:08:14 +03:00
|
|
|
# Wait for all jobs to be finished.
|
|
|
|
while len(pending_jobs) > 0:
|
|
|
|
for event in self.vm.get_qmp_events(wait=True):
|
|
|
|
if event['event'] == 'BLOCK_JOB_COMPLETED':
|
|
|
|
job_id = self.dictpath(event, 'data/device')
|
|
|
|
self.assertTrue(job_id in pending_jobs)
|
|
|
|
self.assert_qmp_absent(event, 'data/error')
|
|
|
|
pending_jobs.remove(job_id)
|
|
|
|
|
|
|
|
self.assert_no_active_block_jobs()
|
|
|
|
self.vm.shutdown()
|
|
|
|
|
|
|
|
# Check that all maps match now
|
|
|
|
for i in range(2, self.num_imgs, 2):
|
2022-04-19 00:14:59 +03:00
|
|
|
self.assertEqual(
|
|
|
|
qemu_io('-f', iotests.imgfmt, '-c', 'map', self.imgs[i]).stdout,
|
|
|
|
qemu_io('-f', iotests.imgfmt, '-c', 'map', self.imgs[i-1]).stdout,
|
|
|
|
'image file map does not match backing file after streaming')
|
2016-10-28 10:08:14 +03:00
|
|
|
|
2016-10-28 10:08:15 +03:00
|
|
|
# Test that it's not possible to perform two block-stream
|
|
|
|
# operations if there are nodes involved in both.
|
|
|
|
def test_overlapping_1(self):
|
|
|
|
self.assert_no_active_block_jobs()
|
|
|
|
|
|
|
|
# Set a speed limit to make sure that this job blocks the rest
|
2023-10-06 18:41:25 +03:00
|
|
|
self.vm.cmd('block-stream', device='node4',
|
|
|
|
job_id='stream-node4', base=self.imgs[1],
|
|
|
|
filter_node_name='stream-filter', speed=1024*1024)
|
2016-10-28 10:08:15 +03:00
|
|
|
|
|
|
|
result = self.vm.qmp('block-stream', device='node5', job_id='stream-node5', base=self.imgs[2])
|
2019-07-03 20:28:09 +03:00
|
|
|
self.assert_qmp(result, 'error/desc',
|
2020-12-16 09:17:03 +03:00
|
|
|
"Node 'stream-filter' is busy: block device is in use by block job: stream")
|
2016-10-28 10:08:15 +03:00
|
|
|
|
|
|
|
result = self.vm.qmp('block-stream', device='node3', job_id='stream-node3', base=self.imgs[2])
|
2019-07-03 20:28:09 +03:00
|
|
|
self.assert_qmp(result, 'error/desc',
|
|
|
|
"Node 'node3' is busy: block device is in use by block job: stream")
|
2016-10-28 10:08:15 +03:00
|
|
|
|
|
|
|
result = self.vm.qmp('block-stream', device='node4', job_id='stream-node4-v2')
|
2019-07-03 20:28:09 +03:00
|
|
|
self.assert_qmp(result, 'error/desc',
|
|
|
|
"Node 'node4' is busy: block device is in use by block job: stream")
|
2016-10-28 10:08:15 +03:00
|
|
|
|
|
|
|
# block-commit should also fail if it touches nodes used by the stream job
|
|
|
|
result = self.vm.qmp('block-commit', device='drive0', base=self.imgs[4], job_id='commit-node4')
|
2019-07-03 20:28:09 +03:00
|
|
|
self.assert_qmp(result, 'error/desc',
|
2020-12-16 09:17:03 +03:00
|
|
|
"Node 'stream-filter' is busy: block device is in use by block job: stream")
|
2016-10-28 10:08:15 +03:00
|
|
|
|
|
|
|
result = self.vm.qmp('block-commit', device='drive0', base=self.imgs[1], top=self.imgs[3], job_id='commit-node1')
|
2019-07-03 20:28:09 +03:00
|
|
|
self.assert_qmp(result, 'error/desc',
|
|
|
|
"Node 'node3' is busy: block device is in use by block job: stream")
|
2016-10-28 10:08:15 +03:00
|
|
|
|
|
|
|
# This fails because it needs to modify the backing string in node2, which is blocked
|
|
|
|
result = self.vm.qmp('block-commit', device='drive0', base=self.imgs[0], top=self.imgs[1], job_id='commit-node0')
|
2019-07-03 20:28:09 +03:00
|
|
|
self.assert_qmp(result, 'error/desc',
|
|
|
|
"Node 'node2' is busy: block device is in use by block job: stream")
|
2016-10-28 10:08:15 +03:00
|
|
|
|
2023-10-06 18:41:25 +03:00
|
|
|
self.vm.cmd('block-job-set-speed', device='stream-node4', speed=0)
|
2019-07-03 20:28:08 +03:00
|
|
|
|
2016-10-28 10:08:15 +03:00
|
|
|
self.wait_until_completed(drive='stream-node4')
|
|
|
|
self.assert_no_active_block_jobs()
|
|
|
|
|
|
|
|
# Similar to test_overlapping_1, but with block-commit
|
|
|
|
# blocking the other jobs
|
|
|
|
def test_overlapping_2(self):
|
|
|
|
self.assertLessEqual(9, self.num_imgs)
|
|
|
|
self.assert_no_active_block_jobs()
|
|
|
|
|
|
|
|
# Set a speed limit to make sure that this job blocks the rest
|
2023-10-06 18:41:25 +03:00
|
|
|
self.vm.cmd('block-commit', device='drive0', top=self.imgs[5], base=self.imgs[3], job_id='commit-node3', speed=1024*1024)
|
2016-10-28 10:08:15 +03:00
|
|
|
|
|
|
|
result = self.vm.qmp('block-stream', device='node3', job_id='stream-node3')
|
2019-07-03 20:28:09 +03:00
|
|
|
self.assert_qmp(result, 'error/desc',
|
|
|
|
"Node 'node3' is busy: block device is in use by block job: commit")
|
2016-10-28 10:08:15 +03:00
|
|
|
|
|
|
|
result = self.vm.qmp('block-stream', device='node6', base=self.imgs[2], job_id='stream-node6')
|
2019-07-03 20:28:09 +03:00
|
|
|
self.assert_qmp(result, 'error/desc',
|
|
|
|
"Node 'node5' is busy: block device is in use by block job: commit")
|
2016-10-28 10:08:15 +03:00
|
|
|
|
|
|
|
result = self.vm.qmp('block-stream', device='node4', base=self.imgs[2], job_id='stream-node4')
|
2019-07-03 20:28:09 +03:00
|
|
|
self.assert_qmp(result, 'error/desc',
|
|
|
|
"Node 'node4' is busy: block device is in use by block job: commit")
|
2016-10-28 10:08:15 +03:00
|
|
|
|
|
|
|
result = self.vm.qmp('block-stream', device='node6', base=self.imgs[4], job_id='stream-node6-v2')
|
2019-07-03 20:28:09 +03:00
|
|
|
self.assert_qmp(result, 'error/desc',
|
|
|
|
"Node 'node5' is busy: block device is in use by block job: commit")
|
2016-10-28 10:08:15 +03:00
|
|
|
|
|
|
|
# This fails because block-commit currently blocks the active layer even if it's not used
|
|
|
|
result = self.vm.qmp('block-stream', device='drive0', base=self.imgs[5], job_id='stream-drive0')
|
2019-07-03 20:28:09 +03:00
|
|
|
self.assert_qmp(result, 'error/desc',
|
|
|
|
"Node 'drive0' is busy: block device is in use by block job: commit")
|
2016-10-28 10:08:15 +03:00
|
|
|
|
2023-10-06 18:41:25 +03:00
|
|
|
self.vm.cmd('block-job-set-speed', device='commit-node3', speed=0)
|
2019-07-03 20:28:08 +03:00
|
|
|
|
2016-10-28 10:08:15 +03:00
|
|
|
self.wait_until_completed(drive='commit-node3')
|
|
|
|
|
|
|
|
# Similar to test_overlapping_2, but here block-commit doesn't use the 'top' parameter.
|
|
|
|
# Internally this uses a mirror block job, hence the separate test case.
|
|
|
|
def test_overlapping_3(self):
|
|
|
|
self.assertLessEqual(8, self.num_imgs)
|
|
|
|
self.assert_no_active_block_jobs()
|
|
|
|
|
|
|
|
# Set a speed limit to make sure that this job blocks the rest
|
2023-10-06 18:41:25 +03:00
|
|
|
self.vm.cmd('block-commit', device='drive0', base=self.imgs[3], job_id='commit-drive0', speed=1024*1024)
|
2016-10-28 10:08:15 +03:00
|
|
|
|
|
|
|
result = self.vm.qmp('block-stream', device='node5', base=self.imgs[3], job_id='stream-node6')
|
2019-07-03 20:28:09 +03:00
|
|
|
self.assert_qmp(result, 'error/desc',
|
|
|
|
"Node 'node5' is busy: block device is in use by block job: commit")
|
2016-10-28 10:08:15 +03:00
|
|
|
|
2023-10-06 18:41:25 +03:00
|
|
|
self.vm.cmd('block-job-set-speed', device='commit-drive0', speed=0)
|
2020-05-13 13:00:25 +03:00
|
|
|
|
2018-04-30 20:09:46 +03:00
|
|
|
event = self.vm.event_wait(name='BLOCK_JOB_READY')
|
2016-10-28 10:08:15 +03:00
|
|
|
self.assert_qmp(event, 'data/device', 'commit-drive0')
|
|
|
|
self.assert_qmp(event, 'data/type', 'commit')
|
|
|
|
self.assert_qmp_absent(event, 'data/error')
|
|
|
|
|
2023-10-06 18:41:25 +03:00
|
|
|
self.vm.cmd('block-job-complete', device='commit-drive0')
|
2016-10-28 10:08:15 +03:00
|
|
|
|
|
|
|
self.wait_until_completed(drive='commit-drive0')
|
2016-10-28 10:08:16 +03:00
|
|
|
|
2019-03-28 19:25:11 +03:00
|
|
|
# In this case the base node of the stream job is the same as the
|
2019-07-03 20:28:09 +03:00
|
|
|
# top node of commit job. Since this results in the commit filter
|
|
|
|
# node being part of the stream chain, this is not allowed.
|
2019-03-28 19:25:11 +03:00
|
|
|
def test_overlapping_4(self):
|
|
|
|
self.assert_no_active_block_jobs()
|
|
|
|
|
|
|
|
# Commit from node2 into node0
|
2023-10-06 18:41:25 +03:00
|
|
|
self.vm.cmd('block-commit', device='drive0',
|
|
|
|
top=self.imgs[2], base=self.imgs[0],
|
|
|
|
filter_node_name='commit-filter', speed=1024*1024)
|
2019-03-28 19:25:11 +03:00
|
|
|
|
|
|
|
# Stream from node2 into node4
|
|
|
|
result = self.vm.qmp('block-stream', device='node4', base_node='node2', job_id='node4')
|
2019-07-03 20:28:09 +03:00
|
|
|
self.assert_qmp(result, 'error/desc',
|
|
|
|
"Cannot freeze 'backing' link to 'commit-filter'")
|
2019-03-28 19:25:11 +03:00
|
|
|
|
2023-10-06 18:41:25 +03:00
|
|
|
self.vm.cmd('block-job-set-speed', device='drive0', speed=0)
|
2019-07-03 20:28:08 +03:00
|
|
|
|
2019-03-28 19:25:11 +03:00
|
|
|
self.wait_until_completed()
|
|
|
|
self.assert_no_active_block_jobs()
|
|
|
|
|
2019-07-03 20:28:11 +03:00
|
|
|
# In this case the base node of the stream job is the commit job's
|
|
|
|
# filter node. stream does not have a real dependency on its base
|
|
|
|
# node, so even though commit removes it when it is done, there is
|
|
|
|
# no conflict.
|
|
|
|
def test_overlapping_5(self):
|
|
|
|
self.assert_no_active_block_jobs()
|
|
|
|
|
|
|
|
# Commit from node2 into node0
|
2023-10-06 18:41:25 +03:00
|
|
|
self.vm.cmd('block-commit', device='drive0',
|
|
|
|
top_node='node2', base_node='node0',
|
|
|
|
filter_node_name='commit-filter', speed=1024*1024)
|
2019-07-03 20:28:11 +03:00
|
|
|
|
|
|
|
# Stream from node2 into node4
|
2023-10-06 18:41:25 +03:00
|
|
|
self.vm.cmd('block-stream', device='node4',
|
|
|
|
base_node='commit-filter', job_id='node4')
|
2019-07-03 20:28:11 +03:00
|
|
|
|
2023-10-06 18:41:25 +03:00
|
|
|
self.vm.cmd('block-job-set-speed', device='drive0', speed=0)
|
2019-07-03 20:28:11 +03:00
|
|
|
|
2020-03-31 03:00:14 +03:00
|
|
|
self.vm.run_job(job='drive0', auto_dismiss=True)
|
|
|
|
self.vm.run_job(job='node4', auto_dismiss=True)
|
2019-07-03 20:28:11 +03:00
|
|
|
self.assert_no_active_block_jobs()
|
|
|
|
|
block/stream: Drain subtree around graph change
When the stream block job cuts out the nodes between top and base in
stream_prepare(), it does not drain the subtree manually; it fetches the
base node, and tries to insert it as the top node's backing node with
bdrv_set_backing_hd(). bdrv_set_backing_hd() however will drain, and so
the actual base node might change (because the base node is actually not
part of the stream job) before the old base node passed to
bdrv_set_backing_hd() is installed.
This has two implications:
First, the stream job does not keep a strong reference to the base node.
Therefore, if it is deleted in bdrv_set_backing_hd()'s drain (e.g.
because some other block job is drained to finish), we will get a
use-after-free. We should keep a strong reference to that node.
Second, even with such a strong reference, the problem remains that the
base node might change before bdrv_set_backing_hd() actually runs and as
a result the wrong base node is installed.
Both effects can be seen in 030's TestParallelOps.test_overlapping_5()
case, which has five nodes, and simultaneously streams from the middle
node to the top node, and commits the middle node down to the base node.
As it is, this will sometimes crash, namely when we encounter the
above-described use-after-free.
Taking a strong reference to the base node, we no longer get a crash,
but the resuling block graph is less than ideal: The expected result is
obviously that all middle nodes are cut out and the base node is the
immediate backing child of the top node. However, if stream_prepare()
takes a strong reference to its base node (the middle node), and then
the commit job finishes in bdrv_set_backing_hd(), supposedly dropping
that middle node, the stream job will just reinstall it again.
Therefore, we need to keep the whole subtree drained in
stream_prepare(), so that the graph modification it performs is
effectively atomic, i.e. that the base node it fetches is still the base
node when bdrv_set_backing_hd() sets it as the top node's backing node.
Verify this by asserting in said 030's test case that the base node is
always the top node's immediate backing child when both jobs are done.
Signed-off-by: Hanna Reitz <hreitz@redhat.com>
Message-Id: <20220324140907.17192-1-hreitz@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Acked-by: Vladimir Sementsov-Ogievskiy <v.sementsov-og@mail.ru>
2022-03-24 17:09:07 +03:00
|
|
|
# Assert that node0 is now the backing node of node4
|
|
|
|
result = self.vm.qmp('query-named-block-nodes')
|
|
|
|
node4 = next(node for node in result['return'] if node['node-name'] == 'node4')
|
|
|
|
self.assertEqual(node4['image']['backing-image']['filename'], self.imgs[0])
|
|
|
|
|
2016-10-28 10:08:16 +03:00
|
|
|
# Test a block-stream and a block-commit job in parallel
|
2018-03-06 16:01:21 +03:00
|
|
|
# Here the stream job is supposed to finish quickly in order to reproduce
|
|
|
|
# the scenario that triggers the bug fixed in 3d5d319e1221 and 1a63a907507
|
|
|
|
def test_stream_commit_1(self):
|
2016-10-28 10:08:16 +03:00
|
|
|
self.assertLessEqual(8, self.num_imgs)
|
|
|
|
self.assert_no_active_block_jobs()
|
|
|
|
|
|
|
|
# Stream from node0 into node2
|
2023-10-06 18:41:25 +03:00
|
|
|
self.vm.cmd('block-stream', device='node2', base_node='node0', job_id='node2')
|
2016-10-28 10:08:16 +03:00
|
|
|
|
|
|
|
# Commit from the active layer into node3
|
2023-10-06 18:41:25 +03:00
|
|
|
self.vm.cmd('block-commit', device='drive0', base=self.imgs[3])
|
2016-10-28 10:08:16 +03:00
|
|
|
|
|
|
|
# Wait for all jobs to be finished.
|
|
|
|
pending_jobs = ['node2', 'drive0']
|
|
|
|
while len(pending_jobs) > 0:
|
|
|
|
for event in self.vm.get_qmp_events(wait=True):
|
|
|
|
if event['event'] == 'BLOCK_JOB_COMPLETED':
|
|
|
|
node_name = self.dictpath(event, 'data/device')
|
|
|
|
self.assertTrue(node_name in pending_jobs)
|
|
|
|
self.assert_qmp_absent(event, 'data/error')
|
|
|
|
pending_jobs.remove(node_name)
|
|
|
|
if event['event'] == 'BLOCK_JOB_READY':
|
|
|
|
self.assert_qmp(event, 'data/device', 'drive0')
|
|
|
|
self.assert_qmp(event, 'data/type', 'commit')
|
|
|
|
self.assert_qmp_absent(event, 'data/error')
|
|
|
|
self.assertTrue('drive0' in pending_jobs)
|
|
|
|
self.vm.qmp('block-job-complete', device='drive0')
|
|
|
|
|
2016-10-28 10:08:15 +03:00
|
|
|
self.assert_no_active_block_jobs()
|
|
|
|
|
2018-03-06 16:01:21 +03:00
|
|
|
# This is similar to test_stream_commit_1 but both jobs are slowed
|
|
|
|
# down so they can run in parallel for a little while.
|
|
|
|
def test_stream_commit_2(self):
|
|
|
|
self.assertLessEqual(8, self.num_imgs)
|
|
|
|
self.assert_no_active_block_jobs()
|
|
|
|
|
|
|
|
# Stream from node0 into node4
|
2023-10-06 18:41:25 +03:00
|
|
|
self.vm.cmd('block-stream', device='node4', base_node='node0', job_id='node4', speed=1024*1024)
|
2018-03-06 16:01:21 +03:00
|
|
|
|
|
|
|
# Commit from the active layer into node5
|
2023-10-06 18:41:25 +03:00
|
|
|
self.vm.cmd('block-commit', device='drive0', base=self.imgs[5], speed=1024*1024)
|
2018-03-06 16:01:21 +03:00
|
|
|
|
2019-07-03 20:28:08 +03:00
|
|
|
for job in ['drive0', 'node4']:
|
2023-10-06 18:41:25 +03:00
|
|
|
self.vm.cmd('block-job-set-speed', device=job, speed=0)
|
2019-07-03 20:28:08 +03:00
|
|
|
|
2018-03-06 16:01:21 +03:00
|
|
|
# Wait for all jobs to be finished.
|
|
|
|
pending_jobs = ['node4', 'drive0']
|
|
|
|
while len(pending_jobs) > 0:
|
|
|
|
for event in self.vm.get_qmp_events(wait=True):
|
|
|
|
if event['event'] == 'BLOCK_JOB_COMPLETED':
|
|
|
|
node_name = self.dictpath(event, 'data/device')
|
|
|
|
self.assertTrue(node_name in pending_jobs)
|
|
|
|
self.assert_qmp_absent(event, 'data/error')
|
|
|
|
pending_jobs.remove(node_name)
|
|
|
|
if event['event'] == 'BLOCK_JOB_READY':
|
|
|
|
self.assert_qmp(event, 'data/device', 'drive0')
|
|
|
|
self.assert_qmp(event, 'data/type', 'commit')
|
|
|
|
self.assert_qmp_absent(event, 'data/error')
|
|
|
|
self.assertTrue('drive0' in pending_jobs)
|
|
|
|
self.vm.qmp('block-job-complete', device='drive0')
|
|
|
|
|
|
|
|
self.assert_no_active_block_jobs()
|
|
|
|
|
2016-10-28 10:08:20 +03:00
|
|
|
# Test the base_node parameter
|
|
|
|
def test_stream_base_node_name(self):
|
|
|
|
self.assert_no_active_block_jobs()
|
|
|
|
|
2022-04-19 00:14:59 +03:00
|
|
|
self.assertNotEqual(
|
|
|
|
qemu_io('-f', iotests.imgfmt, '-rU', '-c', 'map', self.imgs[4]).stdout,
|
|
|
|
qemu_io('-f', iotests.imgfmt, '-rU', '-c', 'map', self.imgs[3]).stdout,
|
|
|
|
'image file map matches backing file before streaming')
|
2016-10-28 10:08:20 +03:00
|
|
|
|
|
|
|
# Error: the base node does not exist
|
|
|
|
result = self.vm.qmp('block-stream', device='node4', base_node='none', job_id='stream')
|
2019-07-03 20:28:09 +03:00
|
|
|
self.assert_qmp(result, 'error/desc',
|
2021-03-05 18:19:28 +03:00
|
|
|
'Cannot find device=\'\' nor node-name=\'none\'')
|
2016-10-28 10:08:20 +03:00
|
|
|
|
|
|
|
# Error: the base node is not a backing file of the top node
|
|
|
|
result = self.vm.qmp('block-stream', device='node4', base_node='node6', job_id='stream')
|
2019-07-03 20:28:09 +03:00
|
|
|
self.assert_qmp(result, 'error/desc',
|
|
|
|
"Node 'node6' is not a backing image of 'node4'")
|
2016-10-28 10:08:20 +03:00
|
|
|
|
|
|
|
# Error: the base node is the same as the top node
|
|
|
|
result = self.vm.qmp('block-stream', device='node4', base_node='node4', job_id='stream')
|
2019-07-03 20:28:09 +03:00
|
|
|
self.assert_qmp(result, 'error/desc',
|
|
|
|
"Node 'node4' is not a backing image of 'node4'")
|
2016-10-28 10:08:20 +03:00
|
|
|
|
|
|
|
# Error: cannot specify 'base' and 'base-node' at the same time
|
|
|
|
result = self.vm.qmp('block-stream', device='node4', base=self.imgs[2], base_node='node2', job_id='stream')
|
2019-07-03 20:28:09 +03:00
|
|
|
self.assert_qmp(result, 'error/desc',
|
|
|
|
"'base' and 'base-node' cannot be specified at the same time")
|
2016-10-28 10:08:20 +03:00
|
|
|
|
|
|
|
# Success: the base node is a backing file of the top node
|
2023-10-06 18:41:25 +03:00
|
|
|
self.vm.cmd('block-stream', device='node4', base_node='node2', job_id='stream')
|
2016-10-28 10:08:20 +03:00
|
|
|
|
|
|
|
self.wait_until_completed(drive='stream')
|
|
|
|
|
|
|
|
self.assert_no_active_block_jobs()
|
|
|
|
self.vm.shutdown()
|
|
|
|
|
2022-04-19 00:14:59 +03:00
|
|
|
self.assertEqual(
|
|
|
|
qemu_io('-f', iotests.imgfmt, '-c', 'map', self.imgs[4]).stdout,
|
|
|
|
qemu_io('-f', iotests.imgfmt, '-c', 'map', self.imgs[3]).stdout,
|
|
|
|
'image file map matches backing file after streaming')
|
2016-10-28 10:08:20 +03:00
|
|
|
|
2016-10-28 10:08:18 +03:00
|
|
|
class TestQuorum(iotests.QMPTestCase):
|
|
|
|
num_children = 3
|
|
|
|
children = []
|
|
|
|
backing = []
|
|
|
|
|
2020-01-14 17:02:03 +03:00
|
|
|
@iotests.skip_if_unsupported(['quorum'])
|
2016-10-28 10:08:18 +03:00
|
|
|
def setUp(self):
|
|
|
|
opts = ['driver=quorum', 'vote-threshold=2']
|
|
|
|
|
|
|
|
# Initialize file names and command-line options
|
|
|
|
for i in range(self.num_children):
|
|
|
|
child_img = os.path.join(iotests.test_dir, 'img-%d.img' % i)
|
|
|
|
backing_img = os.path.join(iotests.test_dir, 'backing-%d.img' % i)
|
|
|
|
self.children.append(child_img)
|
|
|
|
self.backing.append(backing_img)
|
|
|
|
qemu_img('create', '-f', iotests.imgfmt, backing_img, '1M')
|
|
|
|
qemu_io('-f', iotests.imgfmt,
|
|
|
|
'-c', 'write -P 0x55 0 1024', backing_img)
|
|
|
|
qemu_img('create', '-f', iotests.imgfmt,
|
iotests: Specify explicit backing format where sensible
There are many existing qcow2 images that specify a backing file but
no format. This has been the source of CVEs in the past, but has
become more prominent of a problem now that libvirt has switched to
-blockdev. With older -drive, at least the probing was always done by
qemu (so the only risk of a changed format between successive boots of
a guest was if qemu was upgraded and probed differently). But with
newer -blockdev, libvirt must specify a format; if libvirt guesses raw
where the image was formatted, this results in data corruption visible
to the guest; conversely, if libvirt guesses qcow2 where qemu was
using raw, this can result in potential security holes, so modern
libvirt instead refuses to use images without explicit backing format.
The change in libvirt to reject images without explicit backing format
has pointed out that a number of tools have been far too reliant on
probing in the past. It's time to set a better example in our own
iotests of properly setting this parameter.
iotest calls to create, rebase, and convert are all impacted to some
degree. It's a bit annoying that we are inconsistent on command line
- while all of those accept -o backing_file=...,backing_fmt=..., the
shortcuts are different: create and rebase have -b and -F, while
convert has -B but no -F. (amend has no shortcuts, but the previous
patch just deprecated the use of amend to change backing chains).
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <20200706203954.341758-9-eblake@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2020-07-06 23:39:52 +03:00
|
|
|
'-o', 'backing_file=%s' % backing_img,
|
|
|
|
'-F', iotests.imgfmt, child_img)
|
2016-10-28 10:08:18 +03:00
|
|
|
opts.append("children.%d.file.filename=%s" % (i, child_img))
|
|
|
|
opts.append("children.%d.node-name=node%d" % (i, i))
|
|
|
|
|
|
|
|
# Attach the drive to the VM
|
|
|
|
self.vm = iotests.VM()
|
|
|
|
self.vm.add_drive(path = None, opts = ','.join(opts))
|
|
|
|
self.vm.launch()
|
|
|
|
|
|
|
|
def tearDown(self):
|
|
|
|
self.vm.shutdown()
|
|
|
|
for img in self.children:
|
|
|
|
os.remove(img)
|
|
|
|
for img in self.backing:
|
|
|
|
os.remove(img)
|
|
|
|
|
|
|
|
def test_stream_quorum(self):
|
2022-04-19 00:14:59 +03:00
|
|
|
self.assertNotEqual(
|
|
|
|
qemu_io('-f', iotests.imgfmt, '-rU', '-c', 'map', self.children[0]).stdout,
|
|
|
|
qemu_io('-f', iotests.imgfmt, '-rU', '-c', 'map', self.backing[0]).stdout,
|
|
|
|
'image file map matches backing file before streaming')
|
2016-10-28 10:08:18 +03:00
|
|
|
|
|
|
|
self.assert_no_active_block_jobs()
|
|
|
|
|
2023-10-06 18:41:25 +03:00
|
|
|
self.vm.cmd('block-stream', device='node0', job_id='stream-node0')
|
2016-10-28 10:08:18 +03:00
|
|
|
|
|
|
|
self.wait_until_completed(drive='stream-node0')
|
|
|
|
|
|
|
|
self.assert_no_active_block_jobs()
|
|
|
|
self.vm.shutdown()
|
|
|
|
|
2022-04-19 00:14:59 +03:00
|
|
|
self.assertEqual(
|
|
|
|
qemu_io('-f', iotests.imgfmt, '-c', 'map', self.children[0]).stdout,
|
|
|
|
qemu_io('-f', iotests.imgfmt, '-c', 'map', self.backing[0]).stdout,
|
|
|
|
'image file map does not match backing file after streaming')
|
2016-10-28 10:08:18 +03:00
|
|
|
|
2013-05-28 19:11:37 +04:00
|
|
|
class TestSmallerBackingFile(iotests.QMPTestCase):
|
2012-08-28 18:26:49 +04:00
|
|
|
backing_len = 1 * 1024 * 1024 # MB
|
|
|
|
image_len = 2 * backing_len
|
|
|
|
|
|
|
|
def setUp(self):
|
2013-05-28 19:11:37 +04:00
|
|
|
iotests.create_image(backing_img, self.backing_len)
|
iotests: Specify explicit backing format where sensible
There are many existing qcow2 images that specify a backing file but
no format. This has been the source of CVEs in the past, but has
become more prominent of a problem now that libvirt has switched to
-blockdev. With older -drive, at least the probing was always done by
qemu (so the only risk of a changed format between successive boots of
a guest was if qemu was upgraded and probed differently). But with
newer -blockdev, libvirt must specify a format; if libvirt guesses raw
where the image was formatted, this results in data corruption visible
to the guest; conversely, if libvirt guesses qcow2 where qemu was
using raw, this can result in potential security holes, so modern
libvirt instead refuses to use images without explicit backing format.
The change in libvirt to reject images without explicit backing format
has pointed out that a number of tools have been far too reliant on
probing in the past. It's time to set a better example in our own
iotests of properly setting this parameter.
iotest calls to create, rebase, and convert are all impacted to some
degree. It's a bit annoying that we are inconsistent on command line
- while all of those accept -o backing_file=...,backing_fmt=..., the
shortcuts are different: create and rebase have -b and -F, while
convert has -B but no -F. (amend has no shortcuts, but the previous
patch just deprecated the use of amend to change backing chains).
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <20200706203954.341758-9-eblake@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2020-07-06 23:39:52 +03:00
|
|
|
qemu_img('create', '-f', iotests.imgfmt,
|
|
|
|
'-o', 'backing_file=%s' % backing_img,
|
|
|
|
'-F', 'raw', test_img, str(self.image_len))
|
2012-08-28 18:26:49 +04:00
|
|
|
self.vm = iotests.VM().add_drive(test_img)
|
|
|
|
self.vm.launch()
|
|
|
|
|
|
|
|
# If this hangs, then you are missing a fix to complete streaming when the
|
|
|
|
# end of the backing file is reached.
|
|
|
|
def test_stream(self):
|
2013-05-28 19:11:34 +04:00
|
|
|
self.assert_no_active_block_jobs()
|
2012-08-28 18:26:49 +04:00
|
|
|
|
2023-10-06 18:41:25 +03:00
|
|
|
self.vm.cmd('block-stream', device='drive0')
|
2012-08-28 18:26:49 +04:00
|
|
|
|
2014-04-02 09:54:07 +04:00
|
|
|
self.wait_until_completed()
|
2012-08-28 18:26:49 +04:00
|
|
|
|
2013-05-28 19:11:34 +04:00
|
|
|
self.assert_no_active_block_jobs()
|
2012-08-28 18:26:49 +04:00
|
|
|
self.vm.shutdown()
|
|
|
|
|
2013-05-28 19:11:37 +04:00
|
|
|
class TestErrors(iotests.QMPTestCase):
|
2012-09-28 19:23:02 +04:00
|
|
|
image_len = 2 * 1024 * 1024 # MB
|
|
|
|
|
|
|
|
# this should match STREAM_BUFFER_SIZE/512 in block/stream.c
|
|
|
|
STREAM_BUFFER_SIZE = 512 * 1024
|
|
|
|
|
|
|
|
def create_blkdebug_file(self, name, event, errno):
|
|
|
|
file = open(name, 'w')
|
|
|
|
file.write('''
|
|
|
|
[inject-error]
|
|
|
|
state = "1"
|
|
|
|
event = "%s"
|
|
|
|
errno = "%d"
|
|
|
|
immediately = "off"
|
|
|
|
once = "on"
|
|
|
|
sector = "%d"
|
|
|
|
|
|
|
|
[set-state]
|
|
|
|
state = "1"
|
|
|
|
event = "%s"
|
|
|
|
new_state = "2"
|
|
|
|
|
|
|
|
[set-state]
|
|
|
|
state = "2"
|
|
|
|
event = "%s"
|
|
|
|
new_state = "1"
|
2018-10-22 16:53:02 +03:00
|
|
|
''' % (event, errno, self.STREAM_BUFFER_SIZE // 512, event, event))
|
2012-09-28 19:23:02 +04:00
|
|
|
file.close()
|
|
|
|
|
|
|
|
class TestEIO(TestErrors):
|
|
|
|
def setUp(self):
|
|
|
|
self.blkdebug_file = backing_img + ".blkdebug"
|
2013-05-28 19:11:37 +04:00
|
|
|
iotests.create_image(backing_img, TestErrors.image_len)
|
2012-09-28 19:23:02 +04:00
|
|
|
self.create_blkdebug_file(self.blkdebug_file, "read_aio", 5)
|
|
|
|
qemu_img('create', '-f', iotests.imgfmt,
|
|
|
|
'-o', 'backing_file=blkdebug:%s:%s,backing_fmt=raw'
|
|
|
|
% (self.blkdebug_file, backing_img),
|
|
|
|
test_img)
|
|
|
|
self.vm = iotests.VM().add_drive(test_img)
|
|
|
|
self.vm.launch()
|
|
|
|
|
|
|
|
def tearDown(self):
|
|
|
|
self.vm.shutdown()
|
|
|
|
os.remove(test_img)
|
|
|
|
os.remove(backing_img)
|
|
|
|
os.remove(self.blkdebug_file)
|
|
|
|
|
|
|
|
def test_report(self):
|
2013-05-28 19:11:34 +04:00
|
|
|
self.assert_no_active_block_jobs()
|
2012-09-28 19:23:02 +04:00
|
|
|
|
2023-10-06 18:41:25 +03:00
|
|
|
self.vm.cmd('block-stream', device='drive0')
|
2012-09-28 19:23:02 +04:00
|
|
|
|
|
|
|
completed = False
|
|
|
|
error = False
|
|
|
|
while not completed:
|
|
|
|
for event in self.vm.get_qmp_events(wait=True):
|
|
|
|
if event['event'] == 'BLOCK_JOB_ERROR':
|
|
|
|
self.assert_qmp(event, 'data/device', 'drive0')
|
|
|
|
self.assert_qmp(event, 'data/operation', 'read')
|
|
|
|
error = True
|
|
|
|
elif event['event'] == 'BLOCK_JOB_COMPLETED':
|
|
|
|
self.assertTrue(error, 'job completed unexpectedly')
|
|
|
|
self.assert_qmp(event, 'data/type', 'stream')
|
|
|
|
self.assert_qmp(event, 'data/device', 'drive0')
|
|
|
|
self.assert_qmp(event, 'data/error', 'Input/output error')
|
|
|
|
self.assert_qmp(event, 'data/offset', self.STREAM_BUFFER_SIZE)
|
|
|
|
self.assert_qmp(event, 'data/len', self.image_len)
|
|
|
|
completed = True
|
2018-04-30 20:09:46 +03:00
|
|
|
elif event['event'] == 'JOB_STATUS_CHANGE':
|
|
|
|
self.assert_qmp(event, 'data/id', 'drive0')
|
2012-09-28 19:23:02 +04:00
|
|
|
|
2013-05-28 19:11:34 +04:00
|
|
|
self.assert_no_active_block_jobs()
|
2012-09-28 19:23:02 +04:00
|
|
|
self.vm.shutdown()
|
|
|
|
|
|
|
|
def test_ignore(self):
|
2013-05-28 19:11:34 +04:00
|
|
|
self.assert_no_active_block_jobs()
|
2012-09-28 19:23:02 +04:00
|
|
|
|
2023-10-06 18:41:25 +03:00
|
|
|
self.vm.cmd('block-stream', device='drive0', on_error='ignore')
|
2012-09-28 19:23:02 +04:00
|
|
|
|
|
|
|
error = False
|
|
|
|
completed = False
|
|
|
|
while not completed:
|
|
|
|
for event in self.vm.get_qmp_events(wait=True):
|
|
|
|
if event['event'] == 'BLOCK_JOB_ERROR':
|
2017-02-17 01:00:00 +03:00
|
|
|
error = True
|
2012-09-28 19:23:02 +04:00
|
|
|
self.assert_qmp(event, 'data/device', 'drive0')
|
|
|
|
self.assert_qmp(event, 'data/operation', 'read')
|
|
|
|
result = self.vm.qmp('query-block-jobs')
|
2017-02-17 01:00:00 +03:00
|
|
|
if result == {'return': []}:
|
|
|
|
# Job finished too quickly
|
|
|
|
continue
|
2022-03-24 21:02:21 +03:00
|
|
|
self.assertIn(result['return'][0]['status'],
|
|
|
|
['running', 'pending', 'aborting', 'concluded'])
|
2012-09-28 19:23:02 +04:00
|
|
|
elif event['event'] == 'BLOCK_JOB_COMPLETED':
|
|
|
|
self.assertTrue(error, 'job completed unexpectedly')
|
|
|
|
self.assert_qmp(event, 'data/type', 'stream')
|
|
|
|
self.assert_qmp(event, 'data/device', 'drive0')
|
|
|
|
self.assert_qmp(event, 'data/error', 'Input/output error')
|
|
|
|
self.assert_qmp(event, 'data/offset', self.image_len)
|
|
|
|
self.assert_qmp(event, 'data/len', self.image_len)
|
|
|
|
completed = True
|
2018-04-30 20:09:46 +03:00
|
|
|
elif event['event'] == 'JOB_STATUS_CHANGE':
|
|
|
|
self.assert_qmp(event, 'data/id', 'drive0')
|
2012-09-28 19:23:02 +04:00
|
|
|
|
2013-05-28 19:11:34 +04:00
|
|
|
self.assert_no_active_block_jobs()
|
2012-09-28 19:23:02 +04:00
|
|
|
self.vm.shutdown()
|
|
|
|
|
|
|
|
def test_stop(self):
|
2013-05-28 19:11:34 +04:00
|
|
|
self.assert_no_active_block_jobs()
|
2012-09-28 19:23:02 +04:00
|
|
|
|
2023-10-06 18:41:25 +03:00
|
|
|
self.vm.cmd('block-stream', device='drive0', on_error='stop')
|
2012-09-28 19:23:02 +04:00
|
|
|
|
|
|
|
error = False
|
|
|
|
completed = False
|
|
|
|
while not completed:
|
|
|
|
for event in self.vm.get_qmp_events(wait=True):
|
|
|
|
if event['event'] == 'BLOCK_JOB_ERROR':
|
2015-11-11 23:27:36 +03:00
|
|
|
error = True
|
2012-09-28 19:23:02 +04:00
|
|
|
self.assert_qmp(event, 'data/device', 'drive0')
|
|
|
|
self.assert_qmp(event, 'data/operation', 'read')
|
|
|
|
|
2022-03-24 21:02:21 +03:00
|
|
|
if self.vm.qmp('query-block-jobs')['return'][0]['status'] != 'paused':
|
|
|
|
self.vm.events_wait([(
|
|
|
|
'JOB_STATUS_CHANGE',
|
|
|
|
{'data': {'id': 'drive0', 'status': 'paused'}}
|
|
|
|
)])
|
|
|
|
|
2012-09-28 19:23:02 +04:00
|
|
|
result = self.vm.qmp('query-block-jobs')
|
2022-03-24 21:02:21 +03:00
|
|
|
self.assert_qmp(result, 'return[0]/status', 'paused')
|
2012-09-28 19:23:02 +04:00
|
|
|
self.assert_qmp(result, 'return[0]/offset', self.STREAM_BUFFER_SIZE)
|
|
|
|
self.assert_qmp(result, 'return[0]/io-status', 'failed')
|
|
|
|
|
2023-10-06 18:41:25 +03:00
|
|
|
self.vm.cmd('block-job-resume', device='drive0')
|
2012-09-28 19:23:02 +04:00
|
|
|
|
|
|
|
result = self.vm.qmp('query-block-jobs')
|
2015-11-11 23:27:36 +03:00
|
|
|
if result == {'return': []}:
|
|
|
|
# Race; likely already finished. Check.
|
|
|
|
continue
|
2022-03-24 21:02:21 +03:00
|
|
|
self.assertIn(result['return'][0]['status'],
|
|
|
|
['running', 'pending', 'aborting', 'concluded'])
|
2012-09-28 19:23:02 +04:00
|
|
|
self.assert_qmp(result, 'return[0]/io-status', 'ok')
|
|
|
|
elif event['event'] == 'BLOCK_JOB_COMPLETED':
|
|
|
|
self.assertTrue(error, 'job completed unexpectedly')
|
|
|
|
self.assert_qmp(event, 'data/type', 'stream')
|
|
|
|
self.assert_qmp(event, 'data/device', 'drive0')
|
|
|
|
self.assert_qmp_absent(event, 'data/error')
|
|
|
|
self.assert_qmp(event, 'data/offset', self.image_len)
|
|
|
|
self.assert_qmp(event, 'data/len', self.image_len)
|
|
|
|
completed = True
|
2018-04-30 20:09:46 +03:00
|
|
|
elif event['event'] == 'JOB_STATUS_CHANGE':
|
|
|
|
self.assert_qmp(event, 'data/id', 'drive0')
|
2012-09-28 19:23:02 +04:00
|
|
|
|
2013-05-28 19:11:34 +04:00
|
|
|
self.assert_no_active_block_jobs()
|
2012-09-28 19:23:02 +04:00
|
|
|
self.vm.shutdown()
|
|
|
|
|
|
|
|
def test_enospc(self):
|
2013-05-28 19:11:34 +04:00
|
|
|
self.assert_no_active_block_jobs()
|
2012-09-28 19:23:02 +04:00
|
|
|
|
2023-10-06 18:41:25 +03:00
|
|
|
self.vm.cmd('block-stream', device='drive0', on_error='enospc')
|
2012-09-28 19:23:02 +04:00
|
|
|
|
|
|
|
completed = False
|
|
|
|
error = False
|
|
|
|
while not completed:
|
|
|
|
for event in self.vm.get_qmp_events(wait=True):
|
|
|
|
if event['event'] == 'BLOCK_JOB_ERROR':
|
|
|
|
self.assert_qmp(event, 'data/device', 'drive0')
|
|
|
|
self.assert_qmp(event, 'data/operation', 'read')
|
|
|
|
error = True
|
|
|
|
elif event['event'] == 'BLOCK_JOB_COMPLETED':
|
|
|
|
self.assertTrue(error, 'job completed unexpectedly')
|
|
|
|
self.assert_qmp(event, 'data/type', 'stream')
|
|
|
|
self.assert_qmp(event, 'data/device', 'drive0')
|
|
|
|
self.assert_qmp(event, 'data/error', 'Input/output error')
|
|
|
|
self.assert_qmp(event, 'data/offset', self.STREAM_BUFFER_SIZE)
|
|
|
|
self.assert_qmp(event, 'data/len', self.image_len)
|
|
|
|
completed = True
|
2018-04-30 20:09:46 +03:00
|
|
|
elif event['event'] == 'JOB_STATUS_CHANGE':
|
|
|
|
self.assert_qmp(event, 'data/id', 'drive0')
|
2012-09-28 19:23:02 +04:00
|
|
|
|
2013-05-28 19:11:34 +04:00
|
|
|
self.assert_no_active_block_jobs()
|
2012-09-28 19:23:02 +04:00
|
|
|
self.vm.shutdown()
|
|
|
|
|
|
|
|
class TestENOSPC(TestErrors):
|
|
|
|
def setUp(self):
|
|
|
|
self.blkdebug_file = backing_img + ".blkdebug"
|
2013-05-28 19:11:37 +04:00
|
|
|
iotests.create_image(backing_img, TestErrors.image_len)
|
2012-09-28 19:23:02 +04:00
|
|
|
self.create_blkdebug_file(self.blkdebug_file, "read_aio", 28)
|
|
|
|
qemu_img('create', '-f', iotests.imgfmt,
|
|
|
|
'-o', 'backing_file=blkdebug:%s:%s,backing_fmt=raw'
|
|
|
|
% (self.blkdebug_file, backing_img),
|
|
|
|
test_img)
|
|
|
|
self.vm = iotests.VM().add_drive(test_img)
|
|
|
|
self.vm.launch()
|
|
|
|
|
|
|
|
def tearDown(self):
|
|
|
|
self.vm.shutdown()
|
|
|
|
os.remove(test_img)
|
|
|
|
os.remove(backing_img)
|
|
|
|
os.remove(self.blkdebug_file)
|
|
|
|
|
|
|
|
def test_enospc(self):
|
2013-05-28 19:11:34 +04:00
|
|
|
self.assert_no_active_block_jobs()
|
2012-09-28 19:23:02 +04:00
|
|
|
|
2023-10-06 18:41:25 +03:00
|
|
|
self.vm.cmd('block-stream', device='drive0', on_error='enospc')
|
2012-09-28 19:23:02 +04:00
|
|
|
|
|
|
|
error = False
|
|
|
|
completed = False
|
|
|
|
while not completed:
|
|
|
|
for event in self.vm.get_qmp_events(wait=True):
|
|
|
|
if event['event'] == 'BLOCK_JOB_ERROR':
|
|
|
|
self.assert_qmp(event, 'data/device', 'drive0')
|
|
|
|
self.assert_qmp(event, 'data/operation', 'read')
|
2017-11-09 23:30:21 +03:00
|
|
|
error = True
|
2012-09-28 19:23:02 +04:00
|
|
|
|
2022-03-24 21:02:21 +03:00
|
|
|
if self.vm.qmp('query-block-jobs')['return'][0]['status'] != 'paused':
|
|
|
|
self.vm.events_wait([(
|
|
|
|
'JOB_STATUS_CHANGE',
|
|
|
|
{'data': {'id': 'drive0', 'status': 'paused'}}
|
|
|
|
)])
|
|
|
|
|
2012-09-28 19:23:02 +04:00
|
|
|
result = self.vm.qmp('query-block-jobs')
|
2022-03-24 21:02:21 +03:00
|
|
|
self.assert_qmp(result, 'return[0]/status', 'paused')
|
2012-09-28 19:23:02 +04:00
|
|
|
self.assert_qmp(result, 'return[0]/offset', self.STREAM_BUFFER_SIZE)
|
|
|
|
self.assert_qmp(result, 'return[0]/io-status', 'nospace')
|
|
|
|
|
2023-10-06 18:41:25 +03:00
|
|
|
self.vm.cmd('block-job-resume', device='drive0')
|
2012-09-28 19:23:02 +04:00
|
|
|
|
|
|
|
result = self.vm.qmp('query-block-jobs')
|
2017-11-09 23:30:21 +03:00
|
|
|
if result == {'return': []}:
|
|
|
|
# Race; likely already finished. Check.
|
|
|
|
continue
|
2022-03-24 21:02:21 +03:00
|
|
|
self.assertIn(result['return'][0]['status'],
|
|
|
|
['running', 'pending', 'aborting', 'concluded'])
|
2012-09-28 19:23:02 +04:00
|
|
|
self.assert_qmp(result, 'return[0]/io-status', 'ok')
|
|
|
|
elif event['event'] == 'BLOCK_JOB_COMPLETED':
|
|
|
|
self.assertTrue(error, 'job completed unexpectedly')
|
|
|
|
self.assert_qmp(event, 'data/type', 'stream')
|
|
|
|
self.assert_qmp(event, 'data/device', 'drive0')
|
|
|
|
self.assert_qmp_absent(event, 'data/error')
|
|
|
|
self.assert_qmp(event, 'data/offset', self.image_len)
|
|
|
|
self.assert_qmp(event, 'data/len', self.image_len)
|
|
|
|
completed = True
|
2018-04-30 20:09:46 +03:00
|
|
|
elif event['event'] == 'JOB_STATUS_CHANGE':
|
|
|
|
self.assert_qmp(event, 'data/id', 'drive0')
|
2012-09-28 19:23:02 +04:00
|
|
|
|
2013-05-28 19:11:34 +04:00
|
|
|
self.assert_no_active_block_jobs()
|
2012-09-28 19:23:02 +04:00
|
|
|
self.vm.shutdown()
|
2012-08-28 18:26:49 +04:00
|
|
|
|
2013-05-28 19:11:37 +04:00
|
|
|
class TestStreamStop(iotests.QMPTestCase):
|
2012-02-29 17:25:22 +04:00
|
|
|
image_len = 8 * 1024 * 1024 * 1024 # GB
|
|
|
|
|
|
|
|
def setUp(self):
|
|
|
|
qemu_img('create', backing_img, str(TestStreamStop.image_len))
|
2014-11-20 18:27:08 +03:00
|
|
|
qemu_io('-f', 'raw', '-c', 'write -P 0x1 0 32M', backing_img)
|
iotests: Specify explicit backing format where sensible
There are many existing qcow2 images that specify a backing file but
no format. This has been the source of CVEs in the past, but has
become more prominent of a problem now that libvirt has switched to
-blockdev. With older -drive, at least the probing was always done by
qemu (so the only risk of a changed format between successive boots of
a guest was if qemu was upgraded and probed differently). But with
newer -blockdev, libvirt must specify a format; if libvirt guesses raw
where the image was formatted, this results in data corruption visible
to the guest; conversely, if libvirt guesses qcow2 where qemu was
using raw, this can result in potential security holes, so modern
libvirt instead refuses to use images without explicit backing format.
The change in libvirt to reject images without explicit backing format
has pointed out that a number of tools have been far too reliant on
probing in the past. It's time to set a better example in our own
iotests of properly setting this parameter.
iotest calls to create, rebase, and convert are all impacted to some
degree. It's a bit annoying that we are inconsistent on command line
- while all of those accept -o backing_file=...,backing_fmt=..., the
shortcuts are different: create and rebase have -b and -F, while
convert has -B but no -F. (amend has no shortcuts, but the previous
patch just deprecated the use of amend to change backing chains).
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <20200706203954.341758-9-eblake@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2020-07-06 23:39:52 +03:00
|
|
|
qemu_img('create', '-f', iotests.imgfmt,
|
|
|
|
'-o', 'backing_file=%s' % backing_img,
|
|
|
|
'-F', 'raw', test_img)
|
2014-11-20 18:27:08 +03:00
|
|
|
qemu_io('-f', iotests.imgfmt, '-c', 'write -P 0x1 32M 32M', test_img)
|
2013-11-20 06:01:56 +04:00
|
|
|
self.vm = iotests.VM().add_drive("blkdebug::" + test_img)
|
2012-02-29 17:25:22 +04:00
|
|
|
self.vm.launch()
|
|
|
|
|
|
|
|
def tearDown(self):
|
|
|
|
self.vm.shutdown()
|
|
|
|
os.remove(test_img)
|
|
|
|
os.remove(backing_img)
|
|
|
|
|
|
|
|
def test_stream_stop(self):
|
2013-05-28 19:11:34 +04:00
|
|
|
self.assert_no_active_block_jobs()
|
2012-02-29 17:25:22 +04:00
|
|
|
|
2013-11-20 06:01:56 +04:00
|
|
|
self.vm.pause_drive('drive0')
|
2023-10-06 18:41:25 +03:00
|
|
|
self.vm.cmd('block-stream', device='drive0')
|
2012-02-29 17:25:22 +04:00
|
|
|
|
2012-06-06 18:23:27 +04:00
|
|
|
time.sleep(0.1)
|
2012-02-29 17:25:22 +04:00
|
|
|
events = self.vm.get_qmp_events(wait=False)
|
2018-04-30 20:09:46 +03:00
|
|
|
for e in events:
|
|
|
|
self.assert_qmp(e, 'event', 'JOB_STATUS_CHANGE')
|
|
|
|
self.assert_qmp(e, 'data/id', 'drive0')
|
2012-02-29 17:25:22 +04:00
|
|
|
|
2013-11-20 06:01:56 +04:00
|
|
|
self.cancel_and_wait(resume=True)
|
2012-02-29 17:25:22 +04:00
|
|
|
|
2013-05-28 19:11:37 +04:00
|
|
|
class TestSetSpeed(iotests.QMPTestCase):
|
2012-02-29 17:25:22 +04:00
|
|
|
image_len = 80 * 1024 * 1024 # MB
|
|
|
|
|
|
|
|
def setUp(self):
|
|
|
|
qemu_img('create', backing_img, str(TestSetSpeed.image_len))
|
2014-11-20 18:27:08 +03:00
|
|
|
qemu_io('-f', 'raw', '-c', 'write -P 0x1 0 32M', backing_img)
|
iotests: Specify explicit backing format where sensible
There are many existing qcow2 images that specify a backing file but
no format. This has been the source of CVEs in the past, but has
become more prominent of a problem now that libvirt has switched to
-blockdev. With older -drive, at least the probing was always done by
qemu (so the only risk of a changed format between successive boots of
a guest was if qemu was upgraded and probed differently). But with
newer -blockdev, libvirt must specify a format; if libvirt guesses raw
where the image was formatted, this results in data corruption visible
to the guest; conversely, if libvirt guesses qcow2 where qemu was
using raw, this can result in potential security holes, so modern
libvirt instead refuses to use images without explicit backing format.
The change in libvirt to reject images without explicit backing format
has pointed out that a number of tools have been far too reliant on
probing in the past. It's time to set a better example in our own
iotests of properly setting this parameter.
iotest calls to create, rebase, and convert are all impacted to some
degree. It's a bit annoying that we are inconsistent on command line
- while all of those accept -o backing_file=...,backing_fmt=..., the
shortcuts are different: create and rebase have -b and -F, while
convert has -B but no -F. (amend has no shortcuts, but the previous
patch just deprecated the use of amend to change backing chains).
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <20200706203954.341758-9-eblake@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2020-07-06 23:39:52 +03:00
|
|
|
qemu_img('create', '-f', iotests.imgfmt,
|
|
|
|
'-o', 'backing_file=%s' % backing_img,
|
|
|
|
'-F', 'raw', test_img)
|
2014-11-20 18:27:08 +03:00
|
|
|
qemu_io('-f', iotests.imgfmt, '-c', 'write -P 0x1 32M 32M', test_img)
|
2013-11-20 06:01:56 +04:00
|
|
|
self.vm = iotests.VM().add_drive('blkdebug::' + test_img)
|
2012-02-29 17:25:22 +04:00
|
|
|
self.vm.launch()
|
|
|
|
|
|
|
|
def tearDown(self):
|
|
|
|
self.vm.shutdown()
|
|
|
|
os.remove(test_img)
|
|
|
|
os.remove(backing_img)
|
|
|
|
|
2012-04-25 19:51:04 +04:00
|
|
|
# This is a short performance test which is not run by default.
|
|
|
|
# Invoke "IMGFMT=qed ./030 TestSetSpeed.perf_test_throughput"
|
|
|
|
def perf_test_throughput(self):
|
2013-05-28 19:11:34 +04:00
|
|
|
self.assert_no_active_block_jobs()
|
2012-02-29 17:25:22 +04:00
|
|
|
|
2023-10-06 18:41:25 +03:00
|
|
|
self.vm.cmd('block-stream', device='drive0')
|
2012-02-29 17:25:22 +04:00
|
|
|
|
2023-10-06 18:41:25 +03:00
|
|
|
self.vm.cmd('block-job-set-speed', device='drive0', speed=8 * 1024 * 1024)
|
2012-02-29 17:25:22 +04:00
|
|
|
|
2014-04-02 09:54:07 +04:00
|
|
|
self.wait_until_completed()
|
2012-02-29 17:25:22 +04:00
|
|
|
|
2013-05-28 19:11:34 +04:00
|
|
|
self.assert_no_active_block_jobs()
|
2012-02-29 17:25:22 +04:00
|
|
|
|
2012-04-25 19:51:04 +04:00
|
|
|
def test_set_speed(self):
|
2013-05-28 19:11:34 +04:00
|
|
|
self.assert_no_active_block_jobs()
|
2012-04-25 19:51:04 +04:00
|
|
|
|
2013-11-20 06:01:56 +04:00
|
|
|
self.vm.pause_drive('drive0')
|
2023-10-06 18:41:25 +03:00
|
|
|
self.vm.cmd('block-stream', device='drive0')
|
2012-04-25 19:51:04 +04:00
|
|
|
|
|
|
|
# Default speed is 0
|
|
|
|
result = self.vm.qmp('query-block-jobs')
|
|
|
|
self.assert_qmp(result, 'return[0]/device', 'drive0')
|
|
|
|
self.assert_qmp(result, 'return[0]/speed', 0)
|
|
|
|
|
2023-10-06 18:41:25 +03:00
|
|
|
self.vm.cmd('block-job-set-speed', device='drive0', speed=8 * 1024 * 1024)
|
2012-04-25 19:51:04 +04:00
|
|
|
|
|
|
|
# Ensure the speed we set was accepted
|
|
|
|
result = self.vm.qmp('query-block-jobs')
|
|
|
|
self.assert_qmp(result, 'return[0]/device', 'drive0')
|
|
|
|
self.assert_qmp(result, 'return[0]/speed', 8 * 1024 * 1024)
|
|
|
|
|
2013-11-20 06:01:56 +04:00
|
|
|
self.cancel_and_wait(resume=True)
|
|
|
|
self.vm.pause_drive('drive0')
|
2012-04-25 19:51:04 +04:00
|
|
|
|
|
|
|
# Check setting speed in block-stream works
|
2023-10-06 18:41:25 +03:00
|
|
|
self.vm.cmd('block-stream', device='drive0', speed=4 * 1024 * 1024)
|
2012-04-25 19:51:04 +04:00
|
|
|
|
|
|
|
result = self.vm.qmp('query-block-jobs')
|
|
|
|
self.assert_qmp(result, 'return[0]/device', 'drive0')
|
|
|
|
self.assert_qmp(result, 'return[0]/speed', 4 * 1024 * 1024)
|
|
|
|
|
2013-11-20 06:01:56 +04:00
|
|
|
self.cancel_and_wait(resume=True)
|
2012-04-25 19:51:04 +04:00
|
|
|
|
|
|
|
def test_set_speed_invalid(self):
|
2013-05-28 19:11:34 +04:00
|
|
|
self.assert_no_active_block_jobs()
|
2012-04-25 19:51:04 +04:00
|
|
|
|
|
|
|
result = self.vm.qmp('block-stream', device='drive0', speed=-1)
|
2019-11-26 16:39:55 +03:00
|
|
|
self.assert_qmp(result, 'error/desc', "Parameter 'speed' expects a non-negative value")
|
2012-04-25 19:51:04 +04:00
|
|
|
|
2013-05-28 19:11:34 +04:00
|
|
|
self.assert_no_active_block_jobs()
|
2012-04-25 19:51:04 +04:00
|
|
|
|
2017-11-09 23:30:21 +03:00
|
|
|
self.vm.pause_drive('drive0')
|
2023-10-06 18:41:25 +03:00
|
|
|
self.vm.cmd('block-stream', device='drive0')
|
2012-04-25 19:51:04 +04:00
|
|
|
|
|
|
|
result = self.vm.qmp('block-job-set-speed', device='drive0', speed=-1)
|
2019-11-26 16:39:55 +03:00
|
|
|
self.assert_qmp(result, 'error/desc', "Parameter 'speed' expects a non-negative value")
|
2012-04-25 19:51:04 +04:00
|
|
|
|
2017-11-09 23:30:21 +03:00
|
|
|
self.cancel_and_wait(resume=True)
|
2012-04-25 19:51:04 +04:00
|
|
|
|
2012-02-29 17:25:22 +04:00
|
|
|
if __name__ == '__main__':
|
2019-09-02 22:33:18 +03:00
|
|
|
iotests.main(supported_fmts=['qcow2', 'qed'],
|
|
|
|
supported_protocols=['file'])
|