e2c5093c99
I reproduced the following crash fast enough: 0 raise () at /lib64/libc.so.6 1 abort () at /lib64/libc.so.6 2 _nl_load_domain.cold () at /lib64/libc.so.6 3 annobin_assert.c_end () at /lib64/libc.so.6 4 bdrv_reopen_multiple (bs_queue=0x55de75fa9b70, errp=0x0) at ../block.c:3820 5 bdrv_reopen_set_read_only (bs=0x55de760fc020, read_only=true, errp=0x0) at ../block.c:3870 6 stream_clean (job=0x55de75fa9410) at ../block/stream.c:99 7 job_clean (job=0x55de75fa9410) at ../job.c:680 8 job_finalize_single (job=0x55de75fa9410) at ../job.c:696 9 job_txn_apply (job=0x55de75fa9410, fn=0x55de741eee27 <job_finalize_single>) at ../job.c:158 10 job_do_finalize (job=0x55de75fa9410) at ../job.c:805 11 job_completed_txn_success (job=0x55de75fa9410) at ../job.c:855 12 job_completed (job=0x55de75fa9410) at ../job.c:868 13 job_exit (opaque=0x55de75fa9410) at ../job.c:888 14 aio_bh_call (bh=0x55de76b9b4e0) at ../util/async.c:136 15 aio_bh_poll (ctx=0x55de75bc5300) at ../util/async.c:164 16 aio_dispatch (ctx=0x55de75bc5300) at ../util/aio-posix.c:381 17 aio_ctx_dispatch (source=0x55de75bc5300, callback=0x0, user_data=0x0) at ../util/async.c:306 18 g_main_context_dispatch () at /lib64/libglib-2.0.so.0 19 glib_pollfds_poll () at ../util/main-loop.c:232 20 os_host_main_loop_wait (timeout=0) at ../util/main-loop.c:255 21 main_loop_wait (nonblocking=0) at ../util/main-loop.c:531 22 qemu_main_loop () at ../softmmu/runstate.c:722 23 main (argc=20, argv=0x7ffe218f0268, envp=0x7ffe218f0310) at ../softmmu/main.c:50 (gdb) fr 4 4 bdrv_reopen_multiple (bs_queue=0x55de75fa9b70, errp=0x0) at ../block.c:3820 3820 assert(perm == state->perm); (gdb) list 3815 3816 if (ret == 0) { 3817 uint64_t perm, shared; 3818 3819 bdrv_get_cumulative_perm(state->bs, &perm, &shared); 3820 assert(perm == state->perm); 3821 assert(shared == state->shared_perm); 3822 3823 bdrv_set_perm(state->bs); 3824 } else { (gdb) p perm $1 = 1 (gdb) p state->perm $2 = 0 Then I had 38 successful iterations and another crash: 0 bdrv_check_update_perm (bs=0x5631ac97bc50, q=0x0, new_used_perm=1, new_shared_perm=31, ignore_children=0x0, errp=0x7ffd9d477cf8) at ../block.c:2197 1 bdrv_root_attach_child (child_bs=0x5631ac97bc50, child_name=0x5631aaf6b1f9 "backing", child_class=0x5631ab280ca0 <child_of_bds>, child_role=8, ctx=0x5631ab757300, perm=1, shared_perm=31, opaque=0x5631abb8c020, errp=0x7ffd9d477cf8) at ../block.c:2642 2 bdrv_attach_child (parent_bs=0x5631abb8c020, child_bs=0x5631ac97bc50, child_name=0x5631aaf6b1f9 "backing", child_class=0x5631ab280ca0 <child_of_bds>, child_role=8, errp=0x7ffd9d477cf8) at ../block.c:2719 3 bdrv_set_backing_hd (bs=0x5631abb8c020, backing_hd=0x5631ac97bc50, errp=0x7ffd9d477cf8) at ../block.c:2854 4 stream_prepare (job=0x5631ac751eb0) at ../block/stream.c:74 5 job_prepare (job=0x5631ac751eb0) at ../job.c:784 6 job_txn_apply (job=0x5631ac751eb0, fn=0x5631aacb1156 <job_prepare>) at ../job.c:158 7 job_do_finalize (job=0x5631ac751eb0) at ../job.c:801 8 job_completed_txn_success (job=0x5631ac751eb0) at ../job.c:855 9 job_completed (job=0x5631ac751eb0) at ../job.c:868 10 job_exit (opaque=0x5631ac751eb0) at ../job.c:888 11 aio_bh_call (bh=0x7f3d9c007680) at ../util/async.c:136 12 aio_bh_poll (ctx=0x5631ab757300) at ../util/async.c:164 13 aio_dispatch (ctx=0x5631ab757300) at ../util/aio-posix.c:381 14 aio_ctx_dispatch (source=0x5631ab757300, callback=0x0, user_data=0x0) at ../util/async.c:306 15 g_main_context_dispatch () at /lib64/libglib-2.0.so.0 16 glib_pollfds_poll () at ../util/main-loop.c:232 17 os_host_main_loop_wait (timeout=0) at ../util/main-loop.c:255 18 main_loop_wait (nonblocking=0) at ../util/main-loop.c:531 19 qemu_main_loop () at ../softmmu/runstate.c:722 20 main (argc=20, argv=0x7ffd9d478198, envp=0x7ffd9d478240) at ../softmmu/main.c:50 (gdb) list 2192 QLIST_FOREACH(c, &bs->parents, next_parent) { 2193 if (g_slist_find(ignore_children, c)) { 2194 continue; 2195 } 2196 2197 if ((new_used_perm & c->shared_perm) != new_used_perm) { 2198 char *user = bdrv_child_user_desc(c); 2199 char *perm_names = bdrv_perm_names(new_used_perm & ~c->shared_perm); 2200 2201 error_setg(errp, "Conflicts with use by %s as '%s', which does not " (gdb) p c $1 = (BdrvChild *) 0x8585858585858585 Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Message-id: 20210205111021.715240-1-vsementsov@virtuozzo.com Reviewed-by: Eric Blake <eblake@redhat.com> [PMM: trimmed the part of the commit message referring to as-yet-unapplied patchseries] Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
979 lines
41 KiB
Python
Executable File
979 lines
41 KiB
Python
Executable File
#!/usr/bin/env python3
|
|
# group: rw backing
|
|
#
|
|
# Tests for image streaming.
|
|
#
|
|
# Copyright (C) 2012 IBM Corp.
|
|
#
|
|
# This program is free software; you can redistribute it and/or modify
|
|
# it under the terms of the GNU General Public License as published by
|
|
# the Free Software Foundation; either version 2 of the License, or
|
|
# (at your option) any later version.
|
|
#
|
|
# This program is distributed in the hope that it will be useful,
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
# GNU General Public License for more details.
|
|
#
|
|
# You should have received a copy of the GNU General Public License
|
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
#
|
|
|
|
import time
|
|
import os
|
|
import iotests
|
|
import unittest
|
|
from iotests import qemu_img, qemu_io
|
|
|
|
backing_img = os.path.join(iotests.test_dir, 'backing.img')
|
|
mid_img = os.path.join(iotests.test_dir, 'mid.img')
|
|
test_img = os.path.join(iotests.test_dir, 'test.img')
|
|
|
|
class TestSingleDrive(iotests.QMPTestCase):
|
|
image_len = 1 * 1024 * 1024 # MB
|
|
|
|
def setUp(self):
|
|
iotests.create_image(backing_img, TestSingleDrive.image_len)
|
|
qemu_img('create', '-f', iotests.imgfmt,
|
|
'-o', 'backing_file=%s' % backing_img,
|
|
'-F', 'raw', mid_img)
|
|
qemu_img('create', '-f', iotests.imgfmt,
|
|
'-o', 'backing_file=%s' % mid_img,
|
|
'-F', iotests.imgfmt, test_img)
|
|
qemu_io('-f', 'raw', '-c', 'write -P 0x1 0 512', backing_img)
|
|
qemu_io('-f', iotests.imgfmt, '-c', 'write -P 0x1 524288 512', mid_img)
|
|
self.vm = iotests.VM().add_drive("blkdebug::" + test_img,
|
|
"backing.node-name=mid," +
|
|
"backing.backing.node-name=base")
|
|
self.vm.launch()
|
|
|
|
def tearDown(self):
|
|
self.vm.shutdown()
|
|
os.remove(test_img)
|
|
os.remove(mid_img)
|
|
os.remove(backing_img)
|
|
|
|
def test_stream(self):
|
|
self.assert_no_active_block_jobs()
|
|
|
|
result = self.vm.qmp('block-stream', device='drive0')
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
self.wait_until_completed()
|
|
|
|
self.assert_no_active_block_jobs()
|
|
self.vm.shutdown()
|
|
|
|
self.assertEqual(qemu_io('-f', 'raw', '-c', 'map', backing_img),
|
|
qemu_io('-f', iotests.imgfmt, '-c', 'map', test_img),
|
|
'image file map does not match backing file after streaming')
|
|
|
|
def test_stream_intermediate(self):
|
|
self.assert_no_active_block_jobs()
|
|
|
|
self.assertNotEqual(qemu_io('-f', 'raw', '-rU', '-c', 'map', backing_img),
|
|
qemu_io('-f', iotests.imgfmt, '-rU', '-c', 'map', mid_img),
|
|
'image file map matches backing file before streaming')
|
|
|
|
result = self.vm.qmp('block-stream', device='mid', job_id='stream-mid')
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
self.wait_until_completed(drive='stream-mid')
|
|
|
|
self.assert_no_active_block_jobs()
|
|
self.vm.shutdown()
|
|
|
|
self.assertEqual(qemu_io('-f', 'raw', '-c', 'map', backing_img),
|
|
qemu_io('-f', iotests.imgfmt, '-c', 'map', mid_img),
|
|
'image file map does not match backing file after streaming')
|
|
|
|
def test_stream_pause(self):
|
|
self.assert_no_active_block_jobs()
|
|
|
|
self.vm.pause_drive('drive0')
|
|
result = self.vm.qmp('block-stream', device='drive0')
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
self.pause_job('drive0', wait=False)
|
|
self.vm.resume_drive('drive0')
|
|
self.pause_wait('drive0')
|
|
|
|
result = self.vm.qmp('query-block-jobs')
|
|
offset = self.dictpath(result, 'return[0]/offset')
|
|
|
|
time.sleep(0.5)
|
|
result = self.vm.qmp('query-block-jobs')
|
|
self.assert_qmp(result, 'return[0]/offset', offset)
|
|
|
|
result = self.vm.qmp('block-job-resume', device='drive0')
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
self.wait_until_completed()
|
|
|
|
self.assert_no_active_block_jobs()
|
|
self.vm.shutdown()
|
|
|
|
self.assertEqual(qemu_io('-f', 'raw', '-c', 'map', backing_img),
|
|
qemu_io('-f', iotests.imgfmt, '-c', 'map', test_img),
|
|
'image file map does not match backing file after streaming')
|
|
|
|
def test_stream_no_op(self):
|
|
self.assert_no_active_block_jobs()
|
|
|
|
# The image map is empty before the operation
|
|
empty_map = qemu_io('-f', iotests.imgfmt, '-rU', '-c', 'map', test_img)
|
|
|
|
# This is a no-op: no data should ever be copied from the base image
|
|
result = self.vm.qmp('block-stream', device='drive0', base=mid_img)
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
self.wait_until_completed()
|
|
|
|
self.assert_no_active_block_jobs()
|
|
self.vm.shutdown()
|
|
|
|
self.assertEqual(qemu_io('-f', iotests.imgfmt, '-c', 'map', test_img),
|
|
empty_map, 'image file map changed after a no-op')
|
|
|
|
def test_stream_partial(self):
|
|
self.assert_no_active_block_jobs()
|
|
|
|
result = self.vm.qmp('block-stream', device='drive0', base=backing_img)
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
self.wait_until_completed()
|
|
|
|
self.assert_no_active_block_jobs()
|
|
self.vm.shutdown()
|
|
|
|
self.assertEqual(qemu_io('-f', iotests.imgfmt, '-c', 'map', mid_img),
|
|
qemu_io('-f', iotests.imgfmt, '-c', 'map', test_img),
|
|
'image file map does not match backing file after streaming')
|
|
|
|
def test_device_not_found(self):
|
|
result = self.vm.qmp('block-stream', device='nonexistent')
|
|
self.assert_qmp(result, 'error/desc',
|
|
'Cannot find device=nonexistent nor node_name=nonexistent')
|
|
|
|
def test_job_id_missing(self):
|
|
result = self.vm.qmp('block-stream', device='mid')
|
|
self.assert_qmp(result, 'error/desc', "Invalid job ID ''")
|
|
|
|
def test_read_only(self):
|
|
# Create a new file that we can attach (we need a read-only top)
|
|
with iotests.FilePath('ro-top.img') as ro_top_path:
|
|
qemu_img('create', '-f', iotests.imgfmt, ro_top_path,
|
|
str(self.image_len))
|
|
|
|
result = self.vm.qmp('blockdev-add',
|
|
node_name='ro-top',
|
|
driver=iotests.imgfmt,
|
|
read_only=True,
|
|
file={
|
|
'driver': 'file',
|
|
'filename': ro_top_path,
|
|
'read-only': True
|
|
},
|
|
backing='mid')
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
result = self.vm.qmp('block-stream', job_id='stream',
|
|
device='ro-top', base_node='base')
|
|
self.assert_qmp(result, 'error/desc', 'Block node is read-only')
|
|
|
|
result = self.vm.qmp('blockdev-del', node_name='ro-top')
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
|
|
class TestParallelOps(iotests.QMPTestCase):
|
|
num_ops = 4 # Number of parallel block-stream operations
|
|
num_imgs = num_ops * 2 + 1
|
|
image_len = num_ops * 4 * 1024 * 1024
|
|
imgs = []
|
|
|
|
def setUp(self):
|
|
opts = []
|
|
self.imgs = []
|
|
|
|
# Initialize file names and command-line options
|
|
for i in range(self.num_imgs):
|
|
img_depth = self.num_imgs - i - 1
|
|
opts.append("backing." * img_depth + "node-name=node%d" % i)
|
|
self.imgs.append(os.path.join(iotests.test_dir, 'img-%d.img' % i))
|
|
|
|
# Create all images
|
|
iotests.create_image(self.imgs[0], self.image_len)
|
|
for i in range(1, self.num_imgs):
|
|
qemu_img('create', '-f', iotests.imgfmt,
|
|
'-o', 'backing_file=%s' % self.imgs[i-1],
|
|
'-F', 'raw' if i == 1 else iotests.imgfmt, self.imgs[i])
|
|
|
|
# Put data into the images we are copying data from
|
|
odd_img_indexes = [x for x in reversed(range(self.num_imgs)) if x % 2 == 1]
|
|
for i in range(len(odd_img_indexes)):
|
|
# Alternate between 2MB and 4MB.
|
|
# This way jobs will not finish in the same order they were created
|
|
num_mb = 2 + 2 * (i % 2)
|
|
qemu_io('-f', iotests.imgfmt,
|
|
'-c', 'write -P 0xFF %dM %dM' % (i * 4, num_mb),
|
|
self.imgs[odd_img_indexes[i]])
|
|
|
|
# Attach the drive to the VM
|
|
self.vm = iotests.VM()
|
|
self.vm.add_drive(self.imgs[-1], ','.join(opts))
|
|
self.vm.launch()
|
|
|
|
def tearDown(self):
|
|
self.vm.shutdown()
|
|
for img in self.imgs:
|
|
os.remove(img)
|
|
|
|
# Test that it's possible to run several block-stream operations
|
|
# in parallel in the same snapshot chain
|
|
@unittest.skipIf(os.environ.get('QEMU_CHECK_BLOCK_AUTO'), 'disabled in CI')
|
|
def test_stream_parallel(self):
|
|
self.assert_no_active_block_jobs()
|
|
|
|
# Check that the maps don't match before the streaming operations
|
|
for i in range(2, self.num_imgs, 2):
|
|
self.assertNotEqual(qemu_io('-f', iotests.imgfmt, '-rU', '-c', 'map', self.imgs[i]),
|
|
qemu_io('-f', iotests.imgfmt, '-rU', '-c', 'map', self.imgs[i-1]),
|
|
'image file map matches backing file before streaming')
|
|
|
|
# Create all streaming jobs
|
|
pending_jobs = []
|
|
for i in range(2, self.num_imgs, 2):
|
|
node_name = 'node%d' % i
|
|
job_id = 'stream-%s' % node_name
|
|
pending_jobs.append(job_id)
|
|
result = self.vm.qmp('block-stream', device=node_name,
|
|
job_id=job_id, bottom=f'node{i-1}',
|
|
speed=1024)
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
for job in pending_jobs:
|
|
result = self.vm.qmp('block-job-set-speed', device=job, speed=0)
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
# Wait for all jobs to be finished.
|
|
while len(pending_jobs) > 0:
|
|
for event in self.vm.get_qmp_events(wait=True):
|
|
if event['event'] == 'BLOCK_JOB_COMPLETED':
|
|
job_id = self.dictpath(event, 'data/device')
|
|
self.assertTrue(job_id in pending_jobs)
|
|
self.assert_qmp_absent(event, 'data/error')
|
|
pending_jobs.remove(job_id)
|
|
|
|
self.assert_no_active_block_jobs()
|
|
self.vm.shutdown()
|
|
|
|
# Check that all maps match now
|
|
for i in range(2, self.num_imgs, 2):
|
|
self.assertEqual(qemu_io('-f', iotests.imgfmt, '-c', 'map', self.imgs[i]),
|
|
qemu_io('-f', iotests.imgfmt, '-c', 'map', self.imgs[i-1]),
|
|
'image file map does not match backing file after streaming')
|
|
|
|
# Test that it's not possible to perform two block-stream
|
|
# operations if there are nodes involved in both.
|
|
def test_overlapping_1(self):
|
|
self.assert_no_active_block_jobs()
|
|
|
|
# Set a speed limit to make sure that this job blocks the rest
|
|
result = self.vm.qmp('block-stream', device='node4',
|
|
job_id='stream-node4', base=self.imgs[1],
|
|
filter_node_name='stream-filter', speed=1024*1024)
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
result = self.vm.qmp('block-stream', device='node5', job_id='stream-node5', base=self.imgs[2])
|
|
self.assert_qmp(result, 'error/desc',
|
|
"Node 'stream-filter' is busy: block device is in use by block job: stream")
|
|
|
|
result = self.vm.qmp('block-stream', device='node3', job_id='stream-node3', base=self.imgs[2])
|
|
self.assert_qmp(result, 'error/desc',
|
|
"Node 'node3' is busy: block device is in use by block job: stream")
|
|
|
|
result = self.vm.qmp('block-stream', device='node4', job_id='stream-node4-v2')
|
|
self.assert_qmp(result, 'error/desc',
|
|
"Node 'node4' is busy: block device is in use by block job: stream")
|
|
|
|
# block-commit should also fail if it touches nodes used by the stream job
|
|
result = self.vm.qmp('block-commit', device='drive0', base=self.imgs[4], job_id='commit-node4')
|
|
self.assert_qmp(result, 'error/desc',
|
|
"Node 'stream-filter' is busy: block device is in use by block job: stream")
|
|
|
|
result = self.vm.qmp('block-commit', device='drive0', base=self.imgs[1], top=self.imgs[3], job_id='commit-node1')
|
|
self.assert_qmp(result, 'error/desc',
|
|
"Node 'node3' is busy: block device is in use by block job: stream")
|
|
|
|
# This fails because it needs to modify the backing string in node2, which is blocked
|
|
result = self.vm.qmp('block-commit', device='drive0', base=self.imgs[0], top=self.imgs[1], job_id='commit-node0')
|
|
self.assert_qmp(result, 'error/desc',
|
|
"Node 'node2' is busy: block device is in use by block job: stream")
|
|
|
|
result = self.vm.qmp('block-job-set-speed', device='stream-node4', speed=0)
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
self.wait_until_completed(drive='stream-node4')
|
|
self.assert_no_active_block_jobs()
|
|
|
|
# Similar to test_overlapping_1, but with block-commit
|
|
# blocking the other jobs
|
|
def test_overlapping_2(self):
|
|
self.assertLessEqual(9, self.num_imgs)
|
|
self.assert_no_active_block_jobs()
|
|
|
|
# Set a speed limit to make sure that this job blocks the rest
|
|
result = self.vm.qmp('block-commit', device='drive0', top=self.imgs[5], base=self.imgs[3], job_id='commit-node3', speed=1024*1024)
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
result = self.vm.qmp('block-stream', device='node3', job_id='stream-node3')
|
|
self.assert_qmp(result, 'error/desc',
|
|
"Node 'node3' is busy: block device is in use by block job: commit")
|
|
|
|
result = self.vm.qmp('block-stream', device='node6', base=self.imgs[2], job_id='stream-node6')
|
|
self.assert_qmp(result, 'error/desc',
|
|
"Node 'node5' is busy: block device is in use by block job: commit")
|
|
|
|
result = self.vm.qmp('block-stream', device='node4', base=self.imgs[2], job_id='stream-node4')
|
|
self.assert_qmp(result, 'error/desc',
|
|
"Node 'node4' is busy: block device is in use by block job: commit")
|
|
|
|
result = self.vm.qmp('block-stream', device='node6', base=self.imgs[4], job_id='stream-node6-v2')
|
|
self.assert_qmp(result, 'error/desc',
|
|
"Node 'node5' is busy: block device is in use by block job: commit")
|
|
|
|
# This fails because block-commit currently blocks the active layer even if it's not used
|
|
result = self.vm.qmp('block-stream', device='drive0', base=self.imgs[5], job_id='stream-drive0')
|
|
self.assert_qmp(result, 'error/desc',
|
|
"Node 'drive0' is busy: block device is in use by block job: commit")
|
|
|
|
result = self.vm.qmp('block-job-set-speed', device='commit-node3', speed=0)
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
self.wait_until_completed(drive='commit-node3')
|
|
|
|
# Similar to test_overlapping_2, but here block-commit doesn't use the 'top' parameter.
|
|
# Internally this uses a mirror block job, hence the separate test case.
|
|
def test_overlapping_3(self):
|
|
self.assertLessEqual(8, self.num_imgs)
|
|
self.assert_no_active_block_jobs()
|
|
|
|
# Set a speed limit to make sure that this job blocks the rest
|
|
result = self.vm.qmp('block-commit', device='drive0', base=self.imgs[3], job_id='commit-drive0', speed=1024*1024)
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
result = self.vm.qmp('block-stream', device='node5', base=self.imgs[3], job_id='stream-node6')
|
|
self.assert_qmp(result, 'error/desc',
|
|
"Node 'node5' is busy: block device is in use by block job: commit")
|
|
|
|
result = self.vm.qmp('block-job-set-speed', device='commit-drive0', speed=0)
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
event = self.vm.event_wait(name='BLOCK_JOB_READY')
|
|
self.assert_qmp(event, 'data/device', 'commit-drive0')
|
|
self.assert_qmp(event, 'data/type', 'commit')
|
|
self.assert_qmp_absent(event, 'data/error')
|
|
|
|
result = self.vm.qmp('block-job-complete', device='commit-drive0')
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
self.wait_until_completed(drive='commit-drive0')
|
|
|
|
# In this case the base node of the stream job is the same as the
|
|
# top node of commit job. Since this results in the commit filter
|
|
# node being part of the stream chain, this is not allowed.
|
|
def test_overlapping_4(self):
|
|
self.assert_no_active_block_jobs()
|
|
|
|
# Commit from node2 into node0
|
|
result = self.vm.qmp('block-commit', device='drive0',
|
|
top=self.imgs[2], base=self.imgs[0],
|
|
filter_node_name='commit-filter', speed=1024*1024)
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
# Stream from node2 into node4
|
|
result = self.vm.qmp('block-stream', device='node4', base_node='node2', job_id='node4')
|
|
self.assert_qmp(result, 'error/desc',
|
|
"Cannot freeze 'backing' link to 'commit-filter'")
|
|
|
|
result = self.vm.qmp('block-job-set-speed', device='drive0', speed=0)
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
self.wait_until_completed()
|
|
self.assert_no_active_block_jobs()
|
|
|
|
# In this case the base node of the stream job is the commit job's
|
|
# filter node. stream does not have a real dependency on its base
|
|
# node, so even though commit removes it when it is done, there is
|
|
# no conflict.
|
|
def test_overlapping_5(self):
|
|
self.assert_no_active_block_jobs()
|
|
|
|
# Commit from node2 into node0
|
|
result = self.vm.qmp('block-commit', device='drive0',
|
|
top_node='node2', base_node='node0',
|
|
filter_node_name='commit-filter', speed=1024*1024)
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
# Stream from node2 into node4
|
|
result = self.vm.qmp('block-stream', device='node4',
|
|
base_node='commit-filter', job_id='node4')
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
result = self.vm.qmp('block-job-set-speed', device='drive0', speed=0)
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
self.vm.run_job(job='drive0', auto_dismiss=True)
|
|
self.vm.run_job(job='node4', auto_dismiss=True)
|
|
self.assert_no_active_block_jobs()
|
|
|
|
# Test a block-stream and a block-commit job in parallel
|
|
# Here the stream job is supposed to finish quickly in order to reproduce
|
|
# the scenario that triggers the bug fixed in 3d5d319e1221 and 1a63a907507
|
|
def test_stream_commit_1(self):
|
|
self.assertLessEqual(8, self.num_imgs)
|
|
self.assert_no_active_block_jobs()
|
|
|
|
# Stream from node0 into node2
|
|
result = self.vm.qmp('block-stream', device='node2', base_node='node0', job_id='node2')
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
# Commit from the active layer into node3
|
|
result = self.vm.qmp('block-commit', device='drive0', base=self.imgs[3])
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
# Wait for all jobs to be finished.
|
|
pending_jobs = ['node2', 'drive0']
|
|
while len(pending_jobs) > 0:
|
|
for event in self.vm.get_qmp_events(wait=True):
|
|
if event['event'] == 'BLOCK_JOB_COMPLETED':
|
|
node_name = self.dictpath(event, 'data/device')
|
|
self.assertTrue(node_name in pending_jobs)
|
|
self.assert_qmp_absent(event, 'data/error')
|
|
pending_jobs.remove(node_name)
|
|
if event['event'] == 'BLOCK_JOB_READY':
|
|
self.assert_qmp(event, 'data/device', 'drive0')
|
|
self.assert_qmp(event, 'data/type', 'commit')
|
|
self.assert_qmp_absent(event, 'data/error')
|
|
self.assertTrue('drive0' in pending_jobs)
|
|
self.vm.qmp('block-job-complete', device='drive0')
|
|
|
|
self.assert_no_active_block_jobs()
|
|
|
|
# This is similar to test_stream_commit_1 but both jobs are slowed
|
|
# down so they can run in parallel for a little while.
|
|
def test_stream_commit_2(self):
|
|
self.assertLessEqual(8, self.num_imgs)
|
|
self.assert_no_active_block_jobs()
|
|
|
|
# Stream from node0 into node4
|
|
result = self.vm.qmp('block-stream', device='node4', base_node='node0', job_id='node4', speed=1024*1024)
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
# Commit from the active layer into node5
|
|
result = self.vm.qmp('block-commit', device='drive0', base=self.imgs[5], speed=1024*1024)
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
for job in ['drive0', 'node4']:
|
|
result = self.vm.qmp('block-job-set-speed', device=job, speed=0)
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
# Wait for all jobs to be finished.
|
|
pending_jobs = ['node4', 'drive0']
|
|
while len(pending_jobs) > 0:
|
|
for event in self.vm.get_qmp_events(wait=True):
|
|
if event['event'] == 'BLOCK_JOB_COMPLETED':
|
|
node_name = self.dictpath(event, 'data/device')
|
|
self.assertTrue(node_name in pending_jobs)
|
|
self.assert_qmp_absent(event, 'data/error')
|
|
pending_jobs.remove(node_name)
|
|
if event['event'] == 'BLOCK_JOB_READY':
|
|
self.assert_qmp(event, 'data/device', 'drive0')
|
|
self.assert_qmp(event, 'data/type', 'commit')
|
|
self.assert_qmp_absent(event, 'data/error')
|
|
self.assertTrue('drive0' in pending_jobs)
|
|
self.vm.qmp('block-job-complete', device='drive0')
|
|
|
|
self.assert_no_active_block_jobs()
|
|
|
|
# Test the base_node parameter
|
|
def test_stream_base_node_name(self):
|
|
self.assert_no_active_block_jobs()
|
|
|
|
self.assertNotEqual(qemu_io('-f', iotests.imgfmt, '-rU', '-c', 'map', self.imgs[4]),
|
|
qemu_io('-f', iotests.imgfmt, '-rU', '-c', 'map', self.imgs[3]),
|
|
'image file map matches backing file before streaming')
|
|
|
|
# Error: the base node does not exist
|
|
result = self.vm.qmp('block-stream', device='node4', base_node='none', job_id='stream')
|
|
self.assert_qmp(result, 'error/desc',
|
|
'Cannot find device= nor node_name=none')
|
|
|
|
# Error: the base node is not a backing file of the top node
|
|
result = self.vm.qmp('block-stream', device='node4', base_node='node6', job_id='stream')
|
|
self.assert_qmp(result, 'error/desc',
|
|
"Node 'node6' is not a backing image of 'node4'")
|
|
|
|
# Error: the base node is the same as the top node
|
|
result = self.vm.qmp('block-stream', device='node4', base_node='node4', job_id='stream')
|
|
self.assert_qmp(result, 'error/desc',
|
|
"Node 'node4' is not a backing image of 'node4'")
|
|
|
|
# Error: cannot specify 'base' and 'base-node' at the same time
|
|
result = self.vm.qmp('block-stream', device='node4', base=self.imgs[2], base_node='node2', job_id='stream')
|
|
self.assert_qmp(result, 'error/desc',
|
|
"'base' and 'base-node' cannot be specified at the same time")
|
|
|
|
# Success: the base node is a backing file of the top node
|
|
result = self.vm.qmp('block-stream', device='node4', base_node='node2', job_id='stream')
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
self.wait_until_completed(drive='stream')
|
|
|
|
self.assert_no_active_block_jobs()
|
|
self.vm.shutdown()
|
|
|
|
self.assertEqual(qemu_io('-f', iotests.imgfmt, '-c', 'map', self.imgs[4]),
|
|
qemu_io('-f', iotests.imgfmt, '-c', 'map', self.imgs[3]),
|
|
'image file map matches backing file after streaming')
|
|
|
|
class TestQuorum(iotests.QMPTestCase):
|
|
num_children = 3
|
|
children = []
|
|
backing = []
|
|
|
|
@iotests.skip_if_unsupported(['quorum'])
|
|
def setUp(self):
|
|
opts = ['driver=quorum', 'vote-threshold=2']
|
|
|
|
# Initialize file names and command-line options
|
|
for i in range(self.num_children):
|
|
child_img = os.path.join(iotests.test_dir, 'img-%d.img' % i)
|
|
backing_img = os.path.join(iotests.test_dir, 'backing-%d.img' % i)
|
|
self.children.append(child_img)
|
|
self.backing.append(backing_img)
|
|
qemu_img('create', '-f', iotests.imgfmt, backing_img, '1M')
|
|
qemu_io('-f', iotests.imgfmt,
|
|
'-c', 'write -P 0x55 0 1024', backing_img)
|
|
qemu_img('create', '-f', iotests.imgfmt,
|
|
'-o', 'backing_file=%s' % backing_img,
|
|
'-F', iotests.imgfmt, child_img)
|
|
opts.append("children.%d.file.filename=%s" % (i, child_img))
|
|
opts.append("children.%d.node-name=node%d" % (i, i))
|
|
|
|
# Attach the drive to the VM
|
|
self.vm = iotests.VM()
|
|
self.vm.add_drive(path = None, opts = ','.join(opts))
|
|
self.vm.launch()
|
|
|
|
def tearDown(self):
|
|
self.vm.shutdown()
|
|
for img in self.children:
|
|
os.remove(img)
|
|
for img in self.backing:
|
|
os.remove(img)
|
|
|
|
def test_stream_quorum(self):
|
|
self.assertNotEqual(qemu_io('-f', iotests.imgfmt, '-rU', '-c', 'map', self.children[0]),
|
|
qemu_io('-f', iotests.imgfmt, '-rU', '-c', 'map', self.backing[0]),
|
|
'image file map matches backing file before streaming')
|
|
|
|
self.assert_no_active_block_jobs()
|
|
|
|
result = self.vm.qmp('block-stream', device='node0', job_id='stream-node0')
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
self.wait_until_completed(drive='stream-node0')
|
|
|
|
self.assert_no_active_block_jobs()
|
|
self.vm.shutdown()
|
|
|
|
self.assertEqual(qemu_io('-f', iotests.imgfmt, '-c', 'map', self.children[0]),
|
|
qemu_io('-f', iotests.imgfmt, '-c', 'map', self.backing[0]),
|
|
'image file map does not match backing file after streaming')
|
|
|
|
class TestSmallerBackingFile(iotests.QMPTestCase):
|
|
backing_len = 1 * 1024 * 1024 # MB
|
|
image_len = 2 * backing_len
|
|
|
|
def setUp(self):
|
|
iotests.create_image(backing_img, self.backing_len)
|
|
qemu_img('create', '-f', iotests.imgfmt,
|
|
'-o', 'backing_file=%s' % backing_img,
|
|
'-F', 'raw', test_img, str(self.image_len))
|
|
self.vm = iotests.VM().add_drive(test_img)
|
|
self.vm.launch()
|
|
|
|
# If this hangs, then you are missing a fix to complete streaming when the
|
|
# end of the backing file is reached.
|
|
def test_stream(self):
|
|
self.assert_no_active_block_jobs()
|
|
|
|
result = self.vm.qmp('block-stream', device='drive0')
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
self.wait_until_completed()
|
|
|
|
self.assert_no_active_block_jobs()
|
|
self.vm.shutdown()
|
|
|
|
class TestErrors(iotests.QMPTestCase):
|
|
image_len = 2 * 1024 * 1024 # MB
|
|
|
|
# this should match STREAM_BUFFER_SIZE/512 in block/stream.c
|
|
STREAM_BUFFER_SIZE = 512 * 1024
|
|
|
|
def create_blkdebug_file(self, name, event, errno):
|
|
file = open(name, 'w')
|
|
file.write('''
|
|
[inject-error]
|
|
state = "1"
|
|
event = "%s"
|
|
errno = "%d"
|
|
immediately = "off"
|
|
once = "on"
|
|
sector = "%d"
|
|
|
|
[set-state]
|
|
state = "1"
|
|
event = "%s"
|
|
new_state = "2"
|
|
|
|
[set-state]
|
|
state = "2"
|
|
event = "%s"
|
|
new_state = "1"
|
|
''' % (event, errno, self.STREAM_BUFFER_SIZE // 512, event, event))
|
|
file.close()
|
|
|
|
class TestEIO(TestErrors):
|
|
def setUp(self):
|
|
self.blkdebug_file = backing_img + ".blkdebug"
|
|
iotests.create_image(backing_img, TestErrors.image_len)
|
|
self.create_blkdebug_file(self.blkdebug_file, "read_aio", 5)
|
|
qemu_img('create', '-f', iotests.imgfmt,
|
|
'-o', 'backing_file=blkdebug:%s:%s,backing_fmt=raw'
|
|
% (self.blkdebug_file, backing_img),
|
|
test_img)
|
|
self.vm = iotests.VM().add_drive(test_img)
|
|
self.vm.launch()
|
|
|
|
def tearDown(self):
|
|
self.vm.shutdown()
|
|
os.remove(test_img)
|
|
os.remove(backing_img)
|
|
os.remove(self.blkdebug_file)
|
|
|
|
def test_report(self):
|
|
self.assert_no_active_block_jobs()
|
|
|
|
result = self.vm.qmp('block-stream', device='drive0')
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
completed = False
|
|
error = False
|
|
while not completed:
|
|
for event in self.vm.get_qmp_events(wait=True):
|
|
if event['event'] == 'BLOCK_JOB_ERROR':
|
|
self.assert_qmp(event, 'data/device', 'drive0')
|
|
self.assert_qmp(event, 'data/operation', 'read')
|
|
error = True
|
|
elif event['event'] == 'BLOCK_JOB_COMPLETED':
|
|
self.assertTrue(error, 'job completed unexpectedly')
|
|
self.assert_qmp(event, 'data/type', 'stream')
|
|
self.assert_qmp(event, 'data/device', 'drive0')
|
|
self.assert_qmp(event, 'data/error', 'Input/output error')
|
|
self.assert_qmp(event, 'data/offset', self.STREAM_BUFFER_SIZE)
|
|
self.assert_qmp(event, 'data/len', self.image_len)
|
|
completed = True
|
|
elif event['event'] == 'JOB_STATUS_CHANGE':
|
|
self.assert_qmp(event, 'data/id', 'drive0')
|
|
|
|
self.assert_no_active_block_jobs()
|
|
self.vm.shutdown()
|
|
|
|
def test_ignore(self):
|
|
self.assert_no_active_block_jobs()
|
|
|
|
result = self.vm.qmp('block-stream', device='drive0', on_error='ignore')
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
error = False
|
|
completed = False
|
|
while not completed:
|
|
for event in self.vm.get_qmp_events(wait=True):
|
|
if event['event'] == 'BLOCK_JOB_ERROR':
|
|
error = True
|
|
self.assert_qmp(event, 'data/device', 'drive0')
|
|
self.assert_qmp(event, 'data/operation', 'read')
|
|
result = self.vm.qmp('query-block-jobs')
|
|
if result == {'return': []}:
|
|
# Job finished too quickly
|
|
continue
|
|
self.assert_qmp(result, 'return[0]/paused', False)
|
|
elif event['event'] == 'BLOCK_JOB_COMPLETED':
|
|
self.assertTrue(error, 'job completed unexpectedly')
|
|
self.assert_qmp(event, 'data/type', 'stream')
|
|
self.assert_qmp(event, 'data/device', 'drive0')
|
|
self.assert_qmp(event, 'data/error', 'Input/output error')
|
|
self.assert_qmp(event, 'data/offset', self.image_len)
|
|
self.assert_qmp(event, 'data/len', self.image_len)
|
|
completed = True
|
|
elif event['event'] == 'JOB_STATUS_CHANGE':
|
|
self.assert_qmp(event, 'data/id', 'drive0')
|
|
|
|
self.assert_no_active_block_jobs()
|
|
self.vm.shutdown()
|
|
|
|
def test_stop(self):
|
|
self.assert_no_active_block_jobs()
|
|
|
|
result = self.vm.qmp('block-stream', device='drive0', on_error='stop')
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
error = False
|
|
completed = False
|
|
while not completed:
|
|
for event in self.vm.get_qmp_events(wait=True):
|
|
if event['event'] == 'BLOCK_JOB_ERROR':
|
|
error = True
|
|
self.assert_qmp(event, 'data/device', 'drive0')
|
|
self.assert_qmp(event, 'data/operation', 'read')
|
|
|
|
result = self.vm.qmp('query-block-jobs')
|
|
self.assert_qmp(result, 'return[0]/paused', True)
|
|
self.assert_qmp(result, 'return[0]/offset', self.STREAM_BUFFER_SIZE)
|
|
self.assert_qmp(result, 'return[0]/io-status', 'failed')
|
|
|
|
result = self.vm.qmp('block-job-resume', device='drive0')
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
result = self.vm.qmp('query-block-jobs')
|
|
if result == {'return': []}:
|
|
# Race; likely already finished. Check.
|
|
continue
|
|
self.assert_qmp(result, 'return[0]/paused', False)
|
|
self.assert_qmp(result, 'return[0]/io-status', 'ok')
|
|
elif event['event'] == 'BLOCK_JOB_COMPLETED':
|
|
self.assertTrue(error, 'job completed unexpectedly')
|
|
self.assert_qmp(event, 'data/type', 'stream')
|
|
self.assert_qmp(event, 'data/device', 'drive0')
|
|
self.assert_qmp_absent(event, 'data/error')
|
|
self.assert_qmp(event, 'data/offset', self.image_len)
|
|
self.assert_qmp(event, 'data/len', self.image_len)
|
|
completed = True
|
|
elif event['event'] == 'JOB_STATUS_CHANGE':
|
|
self.assert_qmp(event, 'data/id', 'drive0')
|
|
|
|
self.assert_no_active_block_jobs()
|
|
self.vm.shutdown()
|
|
|
|
def test_enospc(self):
|
|
self.assert_no_active_block_jobs()
|
|
|
|
result = self.vm.qmp('block-stream', device='drive0', on_error='enospc')
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
completed = False
|
|
error = False
|
|
while not completed:
|
|
for event in self.vm.get_qmp_events(wait=True):
|
|
if event['event'] == 'BLOCK_JOB_ERROR':
|
|
self.assert_qmp(event, 'data/device', 'drive0')
|
|
self.assert_qmp(event, 'data/operation', 'read')
|
|
error = True
|
|
elif event['event'] == 'BLOCK_JOB_COMPLETED':
|
|
self.assertTrue(error, 'job completed unexpectedly')
|
|
self.assert_qmp(event, 'data/type', 'stream')
|
|
self.assert_qmp(event, 'data/device', 'drive0')
|
|
self.assert_qmp(event, 'data/error', 'Input/output error')
|
|
self.assert_qmp(event, 'data/offset', self.STREAM_BUFFER_SIZE)
|
|
self.assert_qmp(event, 'data/len', self.image_len)
|
|
completed = True
|
|
elif event['event'] == 'JOB_STATUS_CHANGE':
|
|
self.assert_qmp(event, 'data/id', 'drive0')
|
|
|
|
self.assert_no_active_block_jobs()
|
|
self.vm.shutdown()
|
|
|
|
class TestENOSPC(TestErrors):
|
|
def setUp(self):
|
|
self.blkdebug_file = backing_img + ".blkdebug"
|
|
iotests.create_image(backing_img, TestErrors.image_len)
|
|
self.create_blkdebug_file(self.blkdebug_file, "read_aio", 28)
|
|
qemu_img('create', '-f', iotests.imgfmt,
|
|
'-o', 'backing_file=blkdebug:%s:%s,backing_fmt=raw'
|
|
% (self.blkdebug_file, backing_img),
|
|
test_img)
|
|
self.vm = iotests.VM().add_drive(test_img)
|
|
self.vm.launch()
|
|
|
|
def tearDown(self):
|
|
self.vm.shutdown()
|
|
os.remove(test_img)
|
|
os.remove(backing_img)
|
|
os.remove(self.blkdebug_file)
|
|
|
|
def test_enospc(self):
|
|
self.assert_no_active_block_jobs()
|
|
|
|
result = self.vm.qmp('block-stream', device='drive0', on_error='enospc')
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
error = False
|
|
completed = False
|
|
while not completed:
|
|
for event in self.vm.get_qmp_events(wait=True):
|
|
if event['event'] == 'BLOCK_JOB_ERROR':
|
|
self.assert_qmp(event, 'data/device', 'drive0')
|
|
self.assert_qmp(event, 'data/operation', 'read')
|
|
error = True
|
|
|
|
result = self.vm.qmp('query-block-jobs')
|
|
self.assert_qmp(result, 'return[0]/paused', True)
|
|
self.assert_qmp(result, 'return[0]/offset', self.STREAM_BUFFER_SIZE)
|
|
self.assert_qmp(result, 'return[0]/io-status', 'nospace')
|
|
|
|
result = self.vm.qmp('block-job-resume', device='drive0')
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
result = self.vm.qmp('query-block-jobs')
|
|
if result == {'return': []}:
|
|
# Race; likely already finished. Check.
|
|
continue
|
|
self.assert_qmp(result, 'return[0]/paused', False)
|
|
self.assert_qmp(result, 'return[0]/io-status', 'ok')
|
|
elif event['event'] == 'BLOCK_JOB_COMPLETED':
|
|
self.assertTrue(error, 'job completed unexpectedly')
|
|
self.assert_qmp(event, 'data/type', 'stream')
|
|
self.assert_qmp(event, 'data/device', 'drive0')
|
|
self.assert_qmp_absent(event, 'data/error')
|
|
self.assert_qmp(event, 'data/offset', self.image_len)
|
|
self.assert_qmp(event, 'data/len', self.image_len)
|
|
completed = True
|
|
elif event['event'] == 'JOB_STATUS_CHANGE':
|
|
self.assert_qmp(event, 'data/id', 'drive0')
|
|
|
|
self.assert_no_active_block_jobs()
|
|
self.vm.shutdown()
|
|
|
|
class TestStreamStop(iotests.QMPTestCase):
|
|
image_len = 8 * 1024 * 1024 * 1024 # GB
|
|
|
|
def setUp(self):
|
|
qemu_img('create', backing_img, str(TestStreamStop.image_len))
|
|
qemu_io('-f', 'raw', '-c', 'write -P 0x1 0 32M', backing_img)
|
|
qemu_img('create', '-f', iotests.imgfmt,
|
|
'-o', 'backing_file=%s' % backing_img,
|
|
'-F', 'raw', test_img)
|
|
qemu_io('-f', iotests.imgfmt, '-c', 'write -P 0x1 32M 32M', test_img)
|
|
self.vm = iotests.VM().add_drive("blkdebug::" + test_img)
|
|
self.vm.launch()
|
|
|
|
def tearDown(self):
|
|
self.vm.shutdown()
|
|
os.remove(test_img)
|
|
os.remove(backing_img)
|
|
|
|
def test_stream_stop(self):
|
|
self.assert_no_active_block_jobs()
|
|
|
|
self.vm.pause_drive('drive0')
|
|
result = self.vm.qmp('block-stream', device='drive0')
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
time.sleep(0.1)
|
|
events = self.vm.get_qmp_events(wait=False)
|
|
for e in events:
|
|
self.assert_qmp(e, 'event', 'JOB_STATUS_CHANGE')
|
|
self.assert_qmp(e, 'data/id', 'drive0')
|
|
|
|
self.cancel_and_wait(resume=True)
|
|
|
|
class TestSetSpeed(iotests.QMPTestCase):
|
|
image_len = 80 * 1024 * 1024 # MB
|
|
|
|
def setUp(self):
|
|
qemu_img('create', backing_img, str(TestSetSpeed.image_len))
|
|
qemu_io('-f', 'raw', '-c', 'write -P 0x1 0 32M', backing_img)
|
|
qemu_img('create', '-f', iotests.imgfmt,
|
|
'-o', 'backing_file=%s' % backing_img,
|
|
'-F', 'raw', test_img)
|
|
qemu_io('-f', iotests.imgfmt, '-c', 'write -P 0x1 32M 32M', test_img)
|
|
self.vm = iotests.VM().add_drive('blkdebug::' + test_img)
|
|
self.vm.launch()
|
|
|
|
def tearDown(self):
|
|
self.vm.shutdown()
|
|
os.remove(test_img)
|
|
os.remove(backing_img)
|
|
|
|
# This is a short performance test which is not run by default.
|
|
# Invoke "IMGFMT=qed ./030 TestSetSpeed.perf_test_throughput"
|
|
def perf_test_throughput(self):
|
|
self.assert_no_active_block_jobs()
|
|
|
|
result = self.vm.qmp('block-stream', device='drive0')
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
result = self.vm.qmp('block-job-set-speed', device='drive0', speed=8 * 1024 * 1024)
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
self.wait_until_completed()
|
|
|
|
self.assert_no_active_block_jobs()
|
|
|
|
def test_set_speed(self):
|
|
self.assert_no_active_block_jobs()
|
|
|
|
self.vm.pause_drive('drive0')
|
|
result = self.vm.qmp('block-stream', device='drive0')
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
# Default speed is 0
|
|
result = self.vm.qmp('query-block-jobs')
|
|
self.assert_qmp(result, 'return[0]/device', 'drive0')
|
|
self.assert_qmp(result, 'return[0]/speed', 0)
|
|
|
|
result = self.vm.qmp('block-job-set-speed', device='drive0', speed=8 * 1024 * 1024)
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
# Ensure the speed we set was accepted
|
|
result = self.vm.qmp('query-block-jobs')
|
|
self.assert_qmp(result, 'return[0]/device', 'drive0')
|
|
self.assert_qmp(result, 'return[0]/speed', 8 * 1024 * 1024)
|
|
|
|
self.cancel_and_wait(resume=True)
|
|
self.vm.pause_drive('drive0')
|
|
|
|
# Check setting speed in block-stream works
|
|
result = self.vm.qmp('block-stream', device='drive0', speed=4 * 1024 * 1024)
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
result = self.vm.qmp('query-block-jobs')
|
|
self.assert_qmp(result, 'return[0]/device', 'drive0')
|
|
self.assert_qmp(result, 'return[0]/speed', 4 * 1024 * 1024)
|
|
|
|
self.cancel_and_wait(resume=True)
|
|
|
|
def test_set_speed_invalid(self):
|
|
self.assert_no_active_block_jobs()
|
|
|
|
result = self.vm.qmp('block-stream', device='drive0', speed=-1)
|
|
self.assert_qmp(result, 'error/desc', "Parameter 'speed' expects a non-negative value")
|
|
|
|
self.assert_no_active_block_jobs()
|
|
|
|
self.vm.pause_drive('drive0')
|
|
result = self.vm.qmp('block-stream', device='drive0')
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
result = self.vm.qmp('block-job-set-speed', device='drive0', speed=-1)
|
|
self.assert_qmp(result, 'error/desc', "Parameter 'speed' expects a non-negative value")
|
|
|
|
self.cancel_and_wait(resume=True)
|
|
|
|
if __name__ == '__main__':
|
|
iotests.main(supported_fmts=['qcow2', 'qed'],
|
|
supported_protocols=['file'])
|