b6aed193e5
In many cases we just want an effect of qmp command and want to raise on failure. Use vm.cmd() method which does exactly this. The commit is generated by command git grep -l '\.qmp(' | xargs ./scripts/python_qmp_updater.py And then, fix self.assertRaises to expect ExecuteError exception in tests/qemu-iotests/124 Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru> Reviewed-by: Eric Blake <eblake@redhat.com> Message-id: 20231006154125.1068348-16-vsementsov@yandex-team.ru Signed-off-by: John Snow <jsnow@redhat.com>
423 lines
16 KiB
Python
Executable File
423 lines
16 KiB
Python
Executable File
#!/usr/bin/env python3
|
|
# group: throttle
|
|
#
|
|
# Tests for IO throttling
|
|
#
|
|
# Copyright (C) 2015 Red Hat, Inc.
|
|
# Copyright (C) 2015-2016 Igalia, S.L.
|
|
#
|
|
# This program is free software; you can redistribute it and/or modify
|
|
# it under the terms of the GNU General Public License as published by
|
|
# the Free Software Foundation; either version 2 of the License, or
|
|
# (at your option) any later version.
|
|
#
|
|
# This program is distributed in the hope that it will be useful,
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
# GNU General Public License for more details.
|
|
#
|
|
# You should have received a copy of the GNU General Public License
|
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
#
|
|
|
|
import iotests
|
|
|
|
nsec_per_sec = 1000000000
|
|
|
|
class ThrottleTestCase(iotests.QMPTestCase):
|
|
test_driver = "null-aio"
|
|
max_drives = 3
|
|
|
|
def blockstats(self, device):
|
|
result = self.vm.qmp("query-blockstats")
|
|
for r in result['return']:
|
|
if r['device'] == device:
|
|
stat = r['stats']
|
|
return stat['rd_bytes'], stat['rd_operations'], stat['wr_bytes'], stat['wr_operations']
|
|
raise Exception("Device not found for blockstats: %s" % device)
|
|
|
|
def required_drivers(self):
|
|
return [self.test_driver]
|
|
|
|
@iotests.skip_if_unsupported(required_drivers)
|
|
def setUp(self):
|
|
self.vm = iotests.VM()
|
|
for i in range(0, self.max_drives):
|
|
self.vm.add_drive(self.test_driver + "://", "file.read-zeroes=on")
|
|
self.vm.launch()
|
|
|
|
def tearDown(self):
|
|
self.vm.shutdown()
|
|
|
|
def configure_throttle(self, ndrives, params):
|
|
params['group'] = 'test'
|
|
|
|
# Set the I/O throttling parameters to all drives
|
|
for i in range(0, ndrives):
|
|
params['device'] = 'drive%d' % i
|
|
self.vm.cmd("block_set_io_throttle", conv_keys=False, **params)
|
|
|
|
def do_test_throttle(self, ndrives, seconds, params, first_drive = 0):
|
|
def check_limit(limit, num):
|
|
# IO throttling algorithm is discrete, allow 10% error so the test
|
|
# is more robust
|
|
return limit == 0 or \
|
|
(num < seconds * limit * 1.1 / ndrives
|
|
and num > seconds * limit * 0.9 / ndrives)
|
|
|
|
# Set vm clock to a known value
|
|
ns = seconds * nsec_per_sec
|
|
self.vm.qtest("clock_step %d" % ns)
|
|
|
|
# Submit enough requests so the throttling mechanism kicks
|
|
# in. The throttled requests won't be executed until we
|
|
# advance the virtual clock.
|
|
rq_size = 512
|
|
rd_nr = max(params['bps'] // rq_size // 2,
|
|
params['bps_rd'] // rq_size,
|
|
params['iops'] // 2,
|
|
params['iops_rd'])
|
|
rd_nr *= seconds * 2
|
|
rd_nr //= ndrives
|
|
wr_nr = max(params['bps'] // rq_size // 2,
|
|
params['bps_wr'] // rq_size,
|
|
params['iops'] // 2,
|
|
params['iops_wr'])
|
|
wr_nr *= seconds * 2
|
|
wr_nr //= ndrives
|
|
|
|
# Send I/O requests to all drives
|
|
for i in range(rd_nr):
|
|
for drive in range(0, ndrives):
|
|
idx = first_drive + drive
|
|
self.vm.hmp_qemu_io("drive%d" % idx, "aio_read %d %d" %
|
|
(i * rq_size, rq_size))
|
|
|
|
for i in range(wr_nr):
|
|
for drive in range(0, ndrives):
|
|
idx = first_drive + drive
|
|
self.vm.hmp_qemu_io("drive%d" % idx, "aio_write %d %d" %
|
|
(i * rq_size, rq_size))
|
|
|
|
# We'll store the I/O stats for each drive in these arrays
|
|
start_rd_bytes = [0] * ndrives
|
|
start_rd_iops = [0] * ndrives
|
|
start_wr_bytes = [0] * ndrives
|
|
start_wr_iops = [0] * ndrives
|
|
end_rd_bytes = [0] * ndrives
|
|
end_rd_iops = [0] * ndrives
|
|
end_wr_bytes = [0] * ndrives
|
|
end_wr_iops = [0] * ndrives
|
|
|
|
# Read the stats before advancing the clock
|
|
for i in range(0, ndrives):
|
|
idx = first_drive + i
|
|
start_rd_bytes[i], start_rd_iops[i], start_wr_bytes[i], \
|
|
start_wr_iops[i] = self.blockstats('drive%d' % idx)
|
|
|
|
self.vm.qtest("clock_step %d" % ns)
|
|
|
|
# Read the stats after advancing the clock
|
|
for i in range(0, ndrives):
|
|
idx = first_drive + i
|
|
end_rd_bytes[i], end_rd_iops[i], end_wr_bytes[i], \
|
|
end_wr_iops[i] = self.blockstats('drive%d' % idx)
|
|
|
|
# Check that the I/O is within the limits and evenly distributed
|
|
for i in range(0, ndrives):
|
|
rd_bytes = end_rd_bytes[i] - start_rd_bytes[i]
|
|
rd_iops = end_rd_iops[i] - start_rd_iops[i]
|
|
wr_bytes = end_wr_bytes[i] - start_wr_bytes[i]
|
|
wr_iops = end_wr_iops[i] - start_wr_iops[i]
|
|
|
|
self.assertTrue(check_limit(params['bps'], rd_bytes + wr_bytes))
|
|
self.assertTrue(check_limit(params['bps_rd'], rd_bytes))
|
|
self.assertTrue(check_limit(params['bps_wr'], wr_bytes))
|
|
self.assertTrue(check_limit(params['iops'], rd_iops + wr_iops))
|
|
self.assertTrue(check_limit(params['iops_rd'], rd_iops))
|
|
self.assertTrue(check_limit(params['iops_wr'], wr_iops))
|
|
|
|
# Allow remaining requests to finish. We submitted twice as many to
|
|
# ensure the throttle limit is reached.
|
|
self.vm.qtest("clock_step %d" % ns)
|
|
|
|
# Connect N drives to a VM and test I/O in all of them
|
|
def test_all(self):
|
|
params = {"bps": 4096,
|
|
"bps_rd": 4096,
|
|
"bps_wr": 4096,
|
|
"iops": 10,
|
|
"iops_rd": 10,
|
|
"iops_wr": 10,
|
|
}
|
|
# Repeat the test with different numbers of drives
|
|
for ndrives in range(1, self.max_drives + 1):
|
|
# Pick each out of all possible params and test
|
|
for tk in params:
|
|
limits = dict([(k, 0) for k in params])
|
|
limits[tk] = params[tk] * ndrives
|
|
self.configure_throttle(ndrives, limits)
|
|
self.do_test_throttle(ndrives, 5, limits)
|
|
|
|
# Connect N drives to a VM and test I/O in just one of them a time
|
|
def test_one(self):
|
|
params = {"bps": 4096,
|
|
"bps_rd": 4096,
|
|
"bps_wr": 4096,
|
|
"iops": 10,
|
|
"iops_rd": 10,
|
|
"iops_wr": 10,
|
|
}
|
|
# Repeat the test for each one of the drives
|
|
for drive in range(0, self.max_drives):
|
|
# Pick each out of all possible params and test
|
|
for tk in params:
|
|
limits = dict([(k, 0) for k in params])
|
|
limits[tk] = params[tk] * self.max_drives
|
|
self.configure_throttle(self.max_drives, limits)
|
|
self.do_test_throttle(1, 5, limits, drive)
|
|
|
|
def test_burst(self):
|
|
params = {"bps": 4096,
|
|
"bps_rd": 4096,
|
|
"bps_wr": 4096,
|
|
"iops": 10,
|
|
"iops_rd": 10,
|
|
"iops_wr": 10,
|
|
}
|
|
ndrives = 1
|
|
# Pick each out of all possible params and test
|
|
for tk in params:
|
|
rate = params[tk] * ndrives
|
|
burst_rate = rate * 7
|
|
burst_length = 4
|
|
|
|
# Configure the throttling settings
|
|
settings = dict([(k, 0) for k in params])
|
|
settings[tk] = rate
|
|
settings['%s_max' % tk] = burst_rate
|
|
settings['%s_max_length' % tk] = burst_length
|
|
self.configure_throttle(ndrives, settings)
|
|
|
|
# Wait for the bucket to empty so we can do bursts
|
|
wait_ns = nsec_per_sec * burst_length * burst_rate // rate
|
|
self.vm.qtest("clock_step %d" % wait_ns)
|
|
|
|
# Test I/O at the max burst rate
|
|
limits = dict([(k, 0) for k in params])
|
|
limits[tk] = burst_rate
|
|
self.do_test_throttle(ndrives, burst_length, limits)
|
|
|
|
# Now test I/O at the normal rate
|
|
limits[tk] = rate
|
|
self.do_test_throttle(ndrives, 5, limits)
|
|
|
|
# Test that removing a drive from a throttle group should not
|
|
# affect the remaining members of the group.
|
|
# https://bugzilla.redhat.com/show_bug.cgi?id=1535914
|
|
def test_remove_group_member(self):
|
|
# Create a throttle group with two drives
|
|
# and set a 4 KB/s read limit.
|
|
params = {"bps": 0,
|
|
"bps_rd": 4096,
|
|
"bps_wr": 0,
|
|
"iops": 0,
|
|
"iops_rd": 0,
|
|
"iops_wr": 0 }
|
|
self.configure_throttle(2, params)
|
|
|
|
# Read 4KB from drive0. This is performed immediately.
|
|
self.vm.hmp_qemu_io("drive0", "aio_read 0 4096")
|
|
|
|
# Read 2KB. The I/O limit has been exceeded so this
|
|
# request is throttled and a timer is set to wake it up.
|
|
self.vm.hmp_qemu_io("drive0", "aio_read 0 2048")
|
|
|
|
# Read 2KB again. We're still over the I/O limit so this is
|
|
# request is also throttled, but no new timer is set since
|
|
# there's already one.
|
|
self.vm.hmp_qemu_io("drive0", "aio_read 0 2048")
|
|
|
|
# Read from drive1. This request is also throttled, and no
|
|
# timer is set in drive1 because there's already one in
|
|
# drive0.
|
|
self.vm.hmp_qemu_io("drive1", "aio_read 0 4096")
|
|
|
|
# At this point only the first 4KB have been read from drive0.
|
|
# The other requests are throttled.
|
|
self.assertEqual(self.blockstats('drive0')[0], 4096)
|
|
self.assertEqual(self.blockstats('drive1')[0], 0)
|
|
|
|
# Remove drive0 from the throttle group and disable its I/O limits.
|
|
# drive1 remains in the group with a throttled request.
|
|
params['bps_rd'] = 0
|
|
params['device'] = 'drive0'
|
|
self.vm.cmd("block_set_io_throttle", conv_keys=False, **params)
|
|
|
|
# Removing the I/O limits from drive0 drains its two pending requests.
|
|
# The read request in drive1 is still throttled.
|
|
self.assertEqual(self.blockstats('drive0')[0], 8192)
|
|
self.assertEqual(self.blockstats('drive1')[0], 0)
|
|
|
|
# Advance the clock 5 seconds. This completes the request in drive1
|
|
self.vm.qtest("clock_step %d" % (5 * nsec_per_sec))
|
|
|
|
# Now all requests have been processed.
|
|
self.assertEqual(self.blockstats('drive0')[0], 8192)
|
|
self.assertEqual(self.blockstats('drive1')[0], 4096)
|
|
|
|
class ThrottleTestCoroutine(ThrottleTestCase):
|
|
test_driver = "null-co"
|
|
|
|
class ThrottleTestGroupNames(iotests.QMPTestCase):
|
|
max_drives = 3
|
|
|
|
def setUp(self):
|
|
self.vm = iotests.VM()
|
|
for i in range(0, self.max_drives):
|
|
self.vm.add_drive("null-co://",
|
|
"throttling.iops-total=100,file.read-zeroes=on")
|
|
self.vm.launch()
|
|
|
|
def tearDown(self):
|
|
self.vm.shutdown()
|
|
|
|
def set_io_throttle(self, device, params):
|
|
params["device"] = device
|
|
self.vm.cmd("block_set_io_throttle", conv_keys=False, **params)
|
|
|
|
def verify_name(self, device, name):
|
|
result = self.vm.qmp("query-block")
|
|
for r in result["return"]:
|
|
if r["device"] == device:
|
|
info = r["inserted"]
|
|
if name:
|
|
self.assertEqual(info["group"], name)
|
|
else:
|
|
self.assertFalse('group' in info)
|
|
return
|
|
|
|
raise Exception("No group information found for '%s'" % device)
|
|
|
|
def test_group_naming(self):
|
|
params = {"bps": 0,
|
|
"bps_rd": 0,
|
|
"bps_wr": 0,
|
|
"iops": 0,
|
|
"iops_rd": 0,
|
|
"iops_wr": 0}
|
|
|
|
# Check the drives added using the command line.
|
|
# The default throttling group name is the device name.
|
|
for i in range(self.max_drives):
|
|
devname = "drive%d" % i
|
|
self.verify_name(devname, devname)
|
|
|
|
# Clear throttling settings => the group name is gone.
|
|
for i in range(self.max_drives):
|
|
devname = "drive%d" % i
|
|
self.set_io_throttle(devname, params)
|
|
self.verify_name(devname, None)
|
|
|
|
# Set throttling settings using block_set_io_throttle and
|
|
# check the default group names.
|
|
params["iops"] = 10
|
|
for i in range(self.max_drives):
|
|
devname = "drive%d" % i
|
|
self.set_io_throttle(devname, params)
|
|
self.verify_name(devname, devname)
|
|
|
|
# Set a custom group name for each device
|
|
for i in range(3):
|
|
devname = "drive%d" % i
|
|
groupname = "group%d" % i
|
|
params['group'] = groupname
|
|
self.set_io_throttle(devname, params)
|
|
self.verify_name(devname, groupname)
|
|
|
|
# Put drive0 in group1 and check that all other devices remain
|
|
# unchanged
|
|
params['group'] = 'group1'
|
|
self.set_io_throttle('drive0', params)
|
|
self.verify_name('drive0', 'group1')
|
|
for i in range(1, self.max_drives):
|
|
devname = "drive%d" % i
|
|
groupname = "group%d" % i
|
|
self.verify_name(devname, groupname)
|
|
|
|
# Put drive0 in group2 and check that all other devices remain
|
|
# unchanged
|
|
params['group'] = 'group2'
|
|
self.set_io_throttle('drive0', params)
|
|
self.verify_name('drive0', 'group2')
|
|
for i in range(1, self.max_drives):
|
|
devname = "drive%d" % i
|
|
groupname = "group%d" % i
|
|
self.verify_name(devname, groupname)
|
|
|
|
# Clear throttling settings from drive0 check that all other
|
|
# devices remain unchanged
|
|
params["iops"] = 0
|
|
self.set_io_throttle('drive0', params)
|
|
self.verify_name('drive0', None)
|
|
for i in range(1, self.max_drives):
|
|
devname = "drive%d" % i
|
|
groupname = "group%d" % i
|
|
self.verify_name(devname, groupname)
|
|
|
|
class ThrottleTestRemovableMedia(iotests.QMPTestCase):
|
|
def setUp(self):
|
|
self.vm = iotests.VM()
|
|
self.vm.add_device("{},id=virtio-scsi".format('virtio-scsi'))
|
|
self.vm.launch()
|
|
|
|
def tearDown(self):
|
|
self.vm.shutdown()
|
|
|
|
def test_removable_media(self):
|
|
# Add a couple of dummy nodes named cd0 and cd1
|
|
self.vm.cmd("blockdev-add", driver="null-co",
|
|
read_zeroes=True, node_name="cd0")
|
|
self.vm.cmd("blockdev-add", driver="null-co",
|
|
read_zeroes=True, node_name="cd1")
|
|
|
|
# Attach a CD drive with cd0 inserted
|
|
self.vm.cmd("device_add", driver="scsi-cd",
|
|
id="dev0", drive="cd0")
|
|
|
|
# Set I/O limits
|
|
args = { "id": "dev0", "iops": 100, "iops_rd": 0, "iops_wr": 0,
|
|
"bps": 50, "bps_rd": 0, "bps_wr": 0 }
|
|
self.vm.cmd("block_set_io_throttle", conv_keys=False, **args)
|
|
|
|
# Check that the I/O limits have been set
|
|
result = self.vm.qmp("query-block")
|
|
self.assert_qmp(result, 'return[0]/inserted/iops', 100)
|
|
self.assert_qmp(result, 'return[0]/inserted/bps', 50)
|
|
|
|
# Now eject cd0 and insert cd1
|
|
self.vm.cmd("blockdev-open-tray", id='dev0')
|
|
self.vm.cmd("blockdev-remove-medium", id='dev0')
|
|
self.vm.cmd("blockdev-insert-medium", id='dev0', node_name='cd1')
|
|
|
|
# Check that the I/O limits are still the same
|
|
result = self.vm.qmp("query-block")
|
|
self.assert_qmp(result, 'return[0]/inserted/iops', 100)
|
|
self.assert_qmp(result, 'return[0]/inserted/bps', 50)
|
|
|
|
# Eject cd1
|
|
self.vm.cmd("blockdev-remove-medium", id='dev0')
|
|
|
|
# Check that we can't set limits if the device has no medium
|
|
result = self.vm.qmp("block_set_io_throttle", conv_keys=False, **args)
|
|
self.assert_qmp(result, 'error/class', 'GenericError')
|
|
|
|
# Remove the CD drive
|
|
self.vm.cmd("device_del", id='dev0')
|
|
|
|
|
|
if __name__ == '__main__':
|
|
if 'null-co' not in iotests.supported_formats():
|
|
iotests.notrun('null-co driver support missing')
|
|
iotests.main(supported_fmts=["raw"])
|