2020-01-30 19:32:23 +03:00
|
|
|
#!/usr/bin/env python3
|
2021-01-16 16:44:19 +03:00
|
|
|
# group: rw
|
2015-10-28 18:33:10 +03:00
|
|
|
#
|
|
|
|
# Tests for block device statistics
|
|
|
|
#
|
|
|
|
# Copyright (C) 2015 Igalia, S.L.
|
|
|
|
# Author: Alberto Garcia <berto@igalia.com>
|
|
|
|
#
|
|
|
|
# This program is free software; you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License as published by
|
|
|
|
# the Free Software Foundation; either version 2 of the License, or
|
|
|
|
# (at your option) any later version.
|
|
|
|
#
|
|
|
|
# This program is distributed in the hope that it will be useful,
|
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
# GNU General Public License for more details.
|
|
|
|
#
|
|
|
|
# You should have received a copy of the GNU General Public License
|
|
|
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
#
|
|
|
|
|
|
|
|
import iotests
|
|
|
|
import os
|
|
|
|
|
|
|
|
interval_length = 10
|
|
|
|
nsec_per_sec = 1000000000
|
2018-10-22 16:53:02 +03:00
|
|
|
op_latency = nsec_per_sec // 1000 # See qtest_latency_ns in accounting.c
|
2015-10-28 18:33:10 +03:00
|
|
|
bad_sector = 8192
|
|
|
|
bad_offset = bad_sector * 512
|
|
|
|
blkdebug_file = os.path.join(iotests.test_dir, 'blkdebug.conf')
|
|
|
|
|
|
|
|
class BlockDeviceStatsTestCase(iotests.QMPTestCase):
|
2019-09-17 12:20:03 +03:00
|
|
|
test_driver = "null-aio"
|
2015-10-28 18:33:10 +03:00
|
|
|
total_rd_bytes = 0
|
|
|
|
total_rd_ops = 0
|
|
|
|
total_wr_bytes = 0
|
|
|
|
total_wr_ops = 0
|
|
|
|
total_wr_merged = 0
|
|
|
|
total_flush_ops = 0
|
|
|
|
failed_rd_ops = 0
|
|
|
|
failed_wr_ops = 0
|
|
|
|
invalid_rd_ops = 0
|
|
|
|
invalid_wr_ops = 0
|
|
|
|
wr_highest_offset = 0
|
|
|
|
account_invalid = False
|
|
|
|
account_failed = False
|
|
|
|
|
|
|
|
def blockstats(self, device):
|
|
|
|
result = self.vm.qmp("query-blockstats")
|
|
|
|
for r in result['return']:
|
|
|
|
if r['device'] == device:
|
|
|
|
return r['stats']
|
|
|
|
raise Exception("Device not found for blockstats: %s" % device)
|
|
|
|
|
|
|
|
def create_blkdebug_file(self):
|
|
|
|
file = open(blkdebug_file, 'w')
|
|
|
|
file.write('''
|
|
|
|
[inject-error]
|
|
|
|
event = "read_aio"
|
|
|
|
errno = "5"
|
|
|
|
sector = "%d"
|
|
|
|
|
|
|
|
[inject-error]
|
|
|
|
event = "write_aio"
|
|
|
|
errno = "5"
|
|
|
|
sector = "%d"
|
|
|
|
''' % (bad_sector, bad_sector))
|
|
|
|
file.close()
|
|
|
|
|
2019-09-17 12:20:03 +03:00
|
|
|
def required_drivers(self):
|
|
|
|
return [self.test_driver]
|
|
|
|
|
|
|
|
@iotests.skip_if_unsupported(required_drivers)
|
2015-10-28 18:33:10 +03:00
|
|
|
def setUp(self):
|
|
|
|
drive_args = []
|
2015-11-16 12:28:38 +03:00
|
|
|
drive_args.append("stats-intervals.0=%d" % interval_length)
|
2015-10-28 18:33:10 +03:00
|
|
|
drive_args.append("stats-account-invalid=%s" %
|
|
|
|
(self.account_invalid and "on" or "off"))
|
|
|
|
drive_args.append("stats-account-failed=%s" %
|
|
|
|
(self.account_failed and "on" or "off"))
|
2019-07-15 19:07:59 +03:00
|
|
|
drive_args.append("file.image.read-zeroes=on")
|
2015-10-28 18:33:10 +03:00
|
|
|
self.create_blkdebug_file()
|
2019-09-17 12:20:03 +03:00
|
|
|
self.vm = iotests.VM().add_drive('blkdebug:%s:%s://' %
|
|
|
|
(blkdebug_file, self.test_driver),
|
2015-10-28 18:33:10 +03:00
|
|
|
','.join(drive_args))
|
|
|
|
self.vm.launch()
|
|
|
|
# Set an initial value for the clock
|
|
|
|
self.vm.qtest("clock_step %d" % nsec_per_sec)
|
|
|
|
|
|
|
|
def tearDown(self):
|
|
|
|
self.vm.shutdown()
|
|
|
|
os.remove(blkdebug_file)
|
|
|
|
|
|
|
|
def accounted_ops(self, read = False, write = False, flush = False):
|
|
|
|
ops = 0
|
|
|
|
if write:
|
|
|
|
ops += self.total_wr_ops
|
|
|
|
if self.account_failed:
|
|
|
|
ops += self.failed_wr_ops
|
|
|
|
if self.account_invalid:
|
|
|
|
ops += self.invalid_wr_ops
|
|
|
|
if read:
|
|
|
|
ops += self.total_rd_ops
|
|
|
|
if self.account_failed:
|
|
|
|
ops += self.failed_rd_ops
|
|
|
|
if self.account_invalid:
|
|
|
|
ops += self.invalid_rd_ops
|
|
|
|
if flush:
|
|
|
|
ops += self.total_flush_ops
|
|
|
|
return ops
|
|
|
|
|
|
|
|
def accounted_latency(self, read = False, write = False, flush = False):
|
|
|
|
latency = 0
|
|
|
|
if write:
|
|
|
|
latency += self.total_wr_ops * op_latency
|
|
|
|
if self.account_failed:
|
|
|
|
latency += self.failed_wr_ops * op_latency
|
|
|
|
if read:
|
|
|
|
latency += self.total_rd_ops * op_latency
|
|
|
|
if self.account_failed:
|
|
|
|
latency += self.failed_rd_ops * op_latency
|
|
|
|
if flush:
|
|
|
|
latency += self.total_flush_ops * op_latency
|
|
|
|
return latency
|
|
|
|
|
|
|
|
def check_values(self):
|
|
|
|
stats = self.blockstats('drive0')
|
|
|
|
|
|
|
|
# Check that the totals match with what we have calculated
|
|
|
|
self.assertEqual(self.total_rd_bytes, stats['rd_bytes'])
|
|
|
|
self.assertEqual(self.total_wr_bytes, stats['wr_bytes'])
|
|
|
|
self.assertEqual(self.total_rd_ops, stats['rd_operations'])
|
|
|
|
self.assertEqual(self.total_wr_ops, stats['wr_operations'])
|
|
|
|
self.assertEqual(self.total_flush_ops, stats['flush_operations'])
|
|
|
|
self.assertEqual(self.wr_highest_offset, stats['wr_highest_offset'])
|
|
|
|
self.assertEqual(self.failed_rd_ops, stats['failed_rd_operations'])
|
|
|
|
self.assertEqual(self.failed_wr_ops, stats['failed_wr_operations'])
|
|
|
|
self.assertEqual(self.invalid_rd_ops, stats['invalid_rd_operations'])
|
|
|
|
self.assertEqual(self.invalid_wr_ops, stats['invalid_wr_operations'])
|
|
|
|
self.assertEqual(self.account_invalid, stats['account_invalid'])
|
|
|
|
self.assertEqual(self.account_failed, stats['account_failed'])
|
|
|
|
self.assertEqual(self.total_wr_merged, stats['wr_merged'])
|
|
|
|
|
|
|
|
# Check that there's exactly one interval with the length we defined
|
|
|
|
self.assertEqual(1, len(stats['timed_stats']))
|
|
|
|
timed_stats = stats['timed_stats'][0]
|
|
|
|
self.assertEqual(interval_length, timed_stats['interval_length'])
|
|
|
|
|
|
|
|
total_rd_latency = self.accounted_latency(read = True)
|
|
|
|
if (total_rd_latency != 0):
|
|
|
|
self.assertEqual(total_rd_latency, stats['rd_total_time_ns'])
|
|
|
|
self.assertEqual(op_latency, timed_stats['min_rd_latency_ns'])
|
|
|
|
self.assertEqual(op_latency, timed_stats['max_rd_latency_ns'])
|
|
|
|
self.assertEqual(op_latency, timed_stats['avg_rd_latency_ns'])
|
|
|
|
self.assertLess(0, timed_stats['avg_rd_queue_depth'])
|
|
|
|
else:
|
|
|
|
self.assertEqual(0, stats['rd_total_time_ns'])
|
|
|
|
self.assertEqual(0, timed_stats['min_rd_latency_ns'])
|
|
|
|
self.assertEqual(0, timed_stats['max_rd_latency_ns'])
|
|
|
|
self.assertEqual(0, timed_stats['avg_rd_latency_ns'])
|
|
|
|
self.assertEqual(0, timed_stats['avg_rd_queue_depth'])
|
|
|
|
|
|
|
|
# min read latency <= avg read latency <= max read latency
|
|
|
|
self.assertLessEqual(timed_stats['min_rd_latency_ns'],
|
|
|
|
timed_stats['avg_rd_latency_ns'])
|
|
|
|
self.assertLessEqual(timed_stats['avg_rd_latency_ns'],
|
|
|
|
timed_stats['max_rd_latency_ns'])
|
|
|
|
|
|
|
|
total_wr_latency = self.accounted_latency(write = True)
|
|
|
|
if (total_wr_latency != 0):
|
|
|
|
self.assertEqual(total_wr_latency, stats['wr_total_time_ns'])
|
|
|
|
self.assertEqual(op_latency, timed_stats['min_wr_latency_ns'])
|
|
|
|
self.assertEqual(op_latency, timed_stats['max_wr_latency_ns'])
|
|
|
|
self.assertEqual(op_latency, timed_stats['avg_wr_latency_ns'])
|
|
|
|
self.assertLess(0, timed_stats['avg_wr_queue_depth'])
|
|
|
|
else:
|
|
|
|
self.assertEqual(0, stats['wr_total_time_ns'])
|
|
|
|
self.assertEqual(0, timed_stats['min_wr_latency_ns'])
|
|
|
|
self.assertEqual(0, timed_stats['max_wr_latency_ns'])
|
|
|
|
self.assertEqual(0, timed_stats['avg_wr_latency_ns'])
|
|
|
|
self.assertEqual(0, timed_stats['avg_wr_queue_depth'])
|
|
|
|
|
|
|
|
# min write latency <= avg write latency <= max write latency
|
|
|
|
self.assertLessEqual(timed_stats['min_wr_latency_ns'],
|
|
|
|
timed_stats['avg_wr_latency_ns'])
|
|
|
|
self.assertLessEqual(timed_stats['avg_wr_latency_ns'],
|
|
|
|
timed_stats['max_wr_latency_ns'])
|
|
|
|
|
|
|
|
total_flush_latency = self.accounted_latency(flush = True)
|
|
|
|
if (total_flush_latency != 0):
|
|
|
|
self.assertEqual(total_flush_latency, stats['flush_total_time_ns'])
|
|
|
|
self.assertEqual(op_latency, timed_stats['min_flush_latency_ns'])
|
|
|
|
self.assertEqual(op_latency, timed_stats['max_flush_latency_ns'])
|
|
|
|
self.assertEqual(op_latency, timed_stats['avg_flush_latency_ns'])
|
|
|
|
else:
|
|
|
|
self.assertEqual(0, stats['flush_total_time_ns'])
|
|
|
|
self.assertEqual(0, timed_stats['min_flush_latency_ns'])
|
|
|
|
self.assertEqual(0, timed_stats['max_flush_latency_ns'])
|
|
|
|
self.assertEqual(0, timed_stats['avg_flush_latency_ns'])
|
|
|
|
|
|
|
|
# min flush latency <= avg flush latency <= max flush latency
|
|
|
|
self.assertLessEqual(timed_stats['min_flush_latency_ns'],
|
|
|
|
timed_stats['avg_flush_latency_ns'])
|
|
|
|
self.assertLessEqual(timed_stats['avg_flush_latency_ns'],
|
|
|
|
timed_stats['max_flush_latency_ns'])
|
|
|
|
|
|
|
|
# idle_time_ns must be > 0 if we have performed any operation
|
|
|
|
if (self.accounted_ops(read = True, write = True, flush = True) != 0):
|
|
|
|
self.assertLess(0, stats['idle_time_ns'])
|
|
|
|
else:
|
2018-06-08 15:29:46 +03:00
|
|
|
self.assertFalse('idle_time_ns' in stats)
|
2015-10-28 18:33:10 +03:00
|
|
|
|
|
|
|
# This test does not alter these, so they must be all 0
|
|
|
|
self.assertEqual(0, stats['rd_merged'])
|
|
|
|
self.assertEqual(0, stats['failed_flush_operations'])
|
|
|
|
self.assertEqual(0, stats['invalid_flush_operations'])
|
|
|
|
|
|
|
|
def do_test_stats(self, rd_size = 0, rd_ops = 0, wr_size = 0, wr_ops = 0,
|
|
|
|
flush_ops = 0, invalid_rd_ops = 0, invalid_wr_ops = 0,
|
|
|
|
failed_rd_ops = 0, failed_wr_ops = 0, wr_merged = 0):
|
|
|
|
# The 'ops' list will contain all the requested I/O operations
|
|
|
|
ops = []
|
|
|
|
for i in range(rd_ops):
|
|
|
|
ops.append("aio_read %d %d" % (i * rd_size, rd_size))
|
|
|
|
|
|
|
|
for i in range(wr_ops):
|
|
|
|
ops.append("aio_write %d %d" % (i * wr_size, wr_size))
|
|
|
|
|
|
|
|
for i in range(flush_ops):
|
|
|
|
ops.append("aio_flush")
|
|
|
|
|
|
|
|
highest_offset = wr_ops * wr_size
|
|
|
|
|
2016-05-16 19:43:03 +03:00
|
|
|
for i in range(invalid_rd_ops):
|
|
|
|
ops.append("aio_read -i 0 512")
|
2015-10-28 18:33:10 +03:00
|
|
|
|
2016-05-16 19:43:03 +03:00
|
|
|
for i in range(invalid_wr_ops):
|
|
|
|
ops.append("aio_write -i 0 512")
|
2015-10-28 18:33:10 +03:00
|
|
|
|
|
|
|
for i in range(failed_rd_ops):
|
|
|
|
ops.append("aio_read %d 512" % bad_offset)
|
|
|
|
|
|
|
|
for i in range(failed_wr_ops):
|
|
|
|
ops.append("aio_write %d 512" % bad_offset)
|
|
|
|
|
2017-11-09 23:30:25 +03:00
|
|
|
# We need an extra aio_flush to settle all outstanding AIO
|
|
|
|
# operations before we can advance the virtual clock, so that
|
|
|
|
# the last access happens before clock_step and idle_time_ns
|
|
|
|
# will be greater than 0
|
|
|
|
extra_flush = 0
|
|
|
|
if rd_ops + wr_ops + invalid_rd_ops + invalid_wr_ops + \
|
|
|
|
failed_rd_ops + failed_wr_ops > 0:
|
|
|
|
extra_flush = 1
|
|
|
|
|
|
|
|
if extra_flush > 0:
|
|
|
|
ops.append("aio_flush")
|
|
|
|
|
2015-10-28 18:33:10 +03:00
|
|
|
if failed_wr_ops > 0:
|
|
|
|
highest_offset = max(highest_offset, bad_offset + 512)
|
|
|
|
|
|
|
|
# Now perform all operations
|
|
|
|
for op in ops:
|
|
|
|
self.vm.hmp_qemu_io("drive0", op)
|
|
|
|
|
|
|
|
# Update the expected totals
|
|
|
|
self.total_rd_bytes += rd_ops * rd_size
|
|
|
|
self.total_rd_ops += rd_ops
|
|
|
|
self.total_wr_bytes += wr_ops * wr_size
|
|
|
|
self.total_wr_ops += wr_ops
|
|
|
|
self.total_wr_merged += wr_merged
|
2017-11-09 23:30:25 +03:00
|
|
|
self.total_flush_ops += flush_ops + extra_flush
|
2015-10-28 18:33:10 +03:00
|
|
|
self.invalid_rd_ops += invalid_rd_ops
|
|
|
|
self.invalid_wr_ops += invalid_wr_ops
|
|
|
|
self.failed_rd_ops += failed_rd_ops
|
|
|
|
self.failed_wr_ops += failed_wr_ops
|
|
|
|
|
|
|
|
self.wr_highest_offset = max(self.wr_highest_offset, highest_offset)
|
|
|
|
|
|
|
|
# Advance the clock so idle_time_ns has a meaningful value
|
|
|
|
self.vm.qtest("clock_step %d" % nsec_per_sec)
|
|
|
|
|
|
|
|
# And check that the actual statistics match the expected ones
|
|
|
|
self.check_values()
|
|
|
|
|
|
|
|
def test_read_only(self):
|
|
|
|
test_values = [[512, 1],
|
|
|
|
[65536, 1],
|
|
|
|
[512, 12],
|
|
|
|
[65536, 12]]
|
|
|
|
for i in test_values:
|
|
|
|
self.do_test_stats(rd_size = i[0], rd_ops = i[1])
|
|
|
|
|
|
|
|
def test_write_only(self):
|
|
|
|
test_values = [[512, 1],
|
|
|
|
[65536, 1],
|
|
|
|
[512, 12],
|
|
|
|
[65536, 12]]
|
|
|
|
for i in test_values:
|
|
|
|
self.do_test_stats(wr_size = i[0], wr_ops = i[1])
|
|
|
|
|
|
|
|
def test_invalid(self):
|
|
|
|
self.do_test_stats(invalid_rd_ops = 7)
|
|
|
|
self.do_test_stats(invalid_wr_ops = 3)
|
|
|
|
self.do_test_stats(invalid_rd_ops = 4, invalid_wr_ops = 5)
|
|
|
|
|
|
|
|
def test_failed(self):
|
|
|
|
self.do_test_stats(failed_rd_ops = 8)
|
|
|
|
self.do_test_stats(failed_wr_ops = 6)
|
|
|
|
self.do_test_stats(failed_rd_ops = 5, failed_wr_ops = 12)
|
|
|
|
|
|
|
|
def test_flush(self):
|
|
|
|
self.do_test_stats(flush_ops = 8)
|
|
|
|
|
|
|
|
def test_all(self):
|
|
|
|
# rd_size, rd_ops, wr_size, wr_ops, flush_ops
|
|
|
|
# invalid_rd_ops, invalid_wr_ops,
|
|
|
|
# failed_rd_ops, failed_wr_ops
|
|
|
|
# wr_merged
|
2016-02-26 15:50:43 +03:00
|
|
|
test_values = [[512, 1, 512, 1, 1, 4, 7, 5, 2, 0],
|
|
|
|
[65536, 1, 2048, 12, 7, 7, 5, 2, 5, 0],
|
|
|
|
[32768, 9, 8192, 1, 4, 3, 2, 4, 6, 0],
|
|
|
|
[16384, 11, 3584, 16, 9, 8, 6, 7, 3, 0]]
|
2015-10-28 18:33:10 +03:00
|
|
|
for i in test_values:
|
|
|
|
self.do_test_stats(*i)
|
|
|
|
|
|
|
|
def test_no_op(self):
|
|
|
|
# All values must be sane before doing any I/O
|
|
|
|
self.check_values()
|
|
|
|
|
|
|
|
|
|
|
|
class BlockDeviceStatsTestAccountInvalid(BlockDeviceStatsTestCase):
|
|
|
|
account_invalid = True
|
|
|
|
account_failed = False
|
|
|
|
|
|
|
|
class BlockDeviceStatsTestAccountFailed(BlockDeviceStatsTestCase):
|
|
|
|
account_invalid = False
|
|
|
|
account_failed = True
|
|
|
|
|
|
|
|
class BlockDeviceStatsTestAccountBoth(BlockDeviceStatsTestCase):
|
|
|
|
account_invalid = True
|
|
|
|
account_failed = True
|
|
|
|
|
|
|
|
class BlockDeviceStatsTestCoroutine(BlockDeviceStatsTestCase):
|
2019-09-17 12:20:03 +03:00
|
|
|
test_driver = "null-co"
|
2015-10-28 18:33:10 +03:00
|
|
|
|
|
|
|
if __name__ == '__main__':
|
2019-09-17 12:20:03 +03:00
|
|
|
if 'null-co' not in iotests.supported_formats():
|
|
|
|
iotests.notrun('null-co driver support missing')
|
2015-10-28 18:33:10 +03:00
|
|
|
iotests.main(supported_fmts=["raw"])
|