qemu-iotests: Test I/O in a single drive from a throttling group
iotest 093 contains a test that creates a throttling group with
several drives and performs I/O in all of them. This patch adds a new
test that creates a similar setup but only performs I/O in one of the
drives at the same time.
This is useful to test that the round robin algorithm is behaving
properly in these scenarios, and is specifically written using the
regression introduced in 27ccdd5259
as an example.
Signed-off-by: Alberto Garcia <berto@igalia.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
This commit is contained in:
parent
6bf77e1c2d
commit
a26ddb4396
@ -53,7 +53,7 @@ class ThrottleTestCase(iotests.QMPTestCase):
|
||||
result = self.vm.qmp("block_set_io_throttle", conv_keys=False, **params)
|
||||
self.assert_qmp(result, 'return', {})
|
||||
|
||||
def do_test_throttle(self, ndrives, seconds, params):
|
||||
def do_test_throttle(self, ndrives, seconds, params, first_drive = 0):
|
||||
def check_limit(limit, num):
|
||||
# IO throttling algorithm is discrete, allow 10% error so the test
|
||||
# is more robust
|
||||
@ -85,12 +85,14 @@ class ThrottleTestCase(iotests.QMPTestCase):
|
||||
# Send I/O requests to all drives
|
||||
for i in range(rd_nr):
|
||||
for drive in range(0, ndrives):
|
||||
self.vm.hmp_qemu_io("drive%d" % drive, "aio_read %d %d" %
|
||||
idx = first_drive + drive
|
||||
self.vm.hmp_qemu_io("drive%d" % idx, "aio_read %d %d" %
|
||||
(i * rq_size, rq_size))
|
||||
|
||||
for i in range(wr_nr):
|
||||
for drive in range(0, ndrives):
|
||||
self.vm.hmp_qemu_io("drive%d" % drive, "aio_write %d %d" %
|
||||
idx = first_drive + drive
|
||||
self.vm.hmp_qemu_io("drive%d" % idx, "aio_write %d %d" %
|
||||
(i * rq_size, rq_size))
|
||||
|
||||
# We'll store the I/O stats for each drive in these arrays
|
||||
@ -105,15 +107,17 @@ class ThrottleTestCase(iotests.QMPTestCase):
|
||||
|
||||
# Read the stats before advancing the clock
|
||||
for i in range(0, ndrives):
|
||||
idx = first_drive + i
|
||||
start_rd_bytes[i], start_rd_iops[i], start_wr_bytes[i], \
|
||||
start_wr_iops[i] = self.blockstats('drive%d' % i)
|
||||
start_wr_iops[i] = self.blockstats('drive%d' % idx)
|
||||
|
||||
self.vm.qtest("clock_step %d" % ns)
|
||||
|
||||
# Read the stats after advancing the clock
|
||||
for i in range(0, ndrives):
|
||||
idx = first_drive + i
|
||||
end_rd_bytes[i], end_rd_iops[i], end_wr_bytes[i], \
|
||||
end_wr_iops[i] = self.blockstats('drive%d' % i)
|
||||
end_wr_iops[i] = self.blockstats('drive%d' % idx)
|
||||
|
||||
# Check that the I/O is within the limits and evenly distributed
|
||||
for i in range(0, ndrives):
|
||||
@ -129,6 +133,7 @@ class ThrottleTestCase(iotests.QMPTestCase):
|
||||
self.assertTrue(check_limit(params['iops_rd'], rd_iops))
|
||||
self.assertTrue(check_limit(params['iops_wr'], wr_iops))
|
||||
|
||||
# Connect N drives to a VM and test I/O in all of them
|
||||
def test_all(self):
|
||||
params = {"bps": 4096,
|
||||
"bps_rd": 4096,
|
||||
@ -146,6 +151,24 @@ class ThrottleTestCase(iotests.QMPTestCase):
|
||||
self.configure_throttle(ndrives, limits)
|
||||
self.do_test_throttle(ndrives, 5, limits)
|
||||
|
||||
# Connect N drives to a VM and test I/O in just one of them a time
|
||||
def test_one(self):
|
||||
params = {"bps": 4096,
|
||||
"bps_rd": 4096,
|
||||
"bps_wr": 4096,
|
||||
"iops": 10,
|
||||
"iops_rd": 10,
|
||||
"iops_wr": 10,
|
||||
}
|
||||
# Repeat the test for each one of the drives
|
||||
for drive in range(0, self.max_drives):
|
||||
# Pick each out of all possible params and test
|
||||
for tk in params:
|
||||
limits = dict([(k, 0) for k in params])
|
||||
limits[tk] = params[tk] * self.max_drives
|
||||
self.configure_throttle(self.max_drives, limits)
|
||||
self.do_test_throttle(1, 5, limits, drive)
|
||||
|
||||
def test_burst(self):
|
||||
params = {"bps": 4096,
|
||||
"bps_rd": 4096,
|
||||
|
@ -1,5 +1,5 @@
|
||||
.....
|
||||
.......
|
||||
----------------------------------------------------------------------
|
||||
Ran 5 tests
|
||||
Ran 7 tests
|
||||
|
||||
OK
|
||||
|
Loading…
Reference in New Issue
Block a user