2012-02-29 17:25:21 +04:00
|
|
|
# Common utilities and Python wrappers for qemu-iotests
|
|
|
|
#
|
|
|
|
# Copyright (C) 2012 IBM Corp.
|
|
|
|
#
|
|
|
|
# This program is free software; you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License as published by
|
|
|
|
# the Free Software Foundation; either version 2 of the License, or
|
|
|
|
# (at your option) any later version.
|
|
|
|
#
|
|
|
|
# This program is distributed in the hope that it will be useful,
|
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
# GNU General Public License for more details.
|
|
|
|
#
|
|
|
|
# You should have received a copy of the GNU General Public License
|
|
|
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
#
|
|
|
|
|
2020-03-31 03:00:06 +03:00
|
|
|
import atexit
|
2021-02-24 13:47:06 +03:00
|
|
|
import bz2
|
2020-03-31 03:00:06 +03:00
|
|
|
from collections import OrderedDict
|
|
|
|
import faulthandler
|
|
|
|
import json
|
|
|
|
import logging
|
2012-02-29 17:25:21 +04:00
|
|
|
import os
|
|
|
|
import re
|
2021-02-24 13:47:06 +03:00
|
|
|
import shutil
|
2020-03-31 03:00:06 +03:00
|
|
|
import signal
|
|
|
|
import struct
|
2012-02-29 17:25:21 +04:00
|
|
|
import subprocess
|
2015-01-30 05:49:44 +03:00
|
|
|
import sys
|
2020-07-28 00:58:44 +03:00
|
|
|
import time
|
2021-10-26 20:56:09 +03:00
|
|
|
from typing import (Any, Callable, Dict, Iterable, Iterator,
|
2021-05-03 14:01:06 +03:00
|
|
|
List, Optional, Sequence, TextIO, Tuple, Type, TypeVar)
|
2020-03-31 03:00:06 +03:00
|
|
|
import unittest
|
2012-02-29 17:25:21 +04:00
|
|
|
|
2020-07-28 00:58:44 +03:00
|
|
|
from contextlib import contextmanager
|
|
|
|
|
2021-05-28 00:16:53 +03:00
|
|
|
from qemu.machine import qtest
|
2020-07-10 08:22:06 +03:00
|
|
|
from qemu.qmp import QMPMessage
|
2018-03-12 18:21:24 +03:00
|
|
|
|
2020-03-31 03:00:14 +03:00
|
|
|
# Use this logger for logging messages directly from the iotests module
|
|
|
|
logger = logging.getLogger('qemu.iotests')
|
|
|
|
logger.addHandler(logging.NullHandler())
|
|
|
|
|
|
|
|
# Use this logger for messages that ought to be used for diff output.
|
|
|
|
test_logger = logging.getLogger('qemu.iotests.diff_io')
|
|
|
|
|
|
|
|
|
2020-03-13 11:36:15 +03:00
|
|
|
faulthandler.enable()
|
|
|
|
|
2015-09-02 21:52:27 +03:00
|
|
|
# This will not work if arguments contain spaces but is necessary if we
|
2012-02-29 17:25:21 +04:00
|
|
|
# want to support the override options that ./check supports.
|
2015-09-02 21:52:27 +03:00
|
|
|
qemu_img_args = [os.environ.get('QEMU_IMG_PROG', 'qemu-img')]
|
|
|
|
if os.environ.get('QEMU_IMG_OPTIONS'):
|
|
|
|
qemu_img_args += os.environ['QEMU_IMG_OPTIONS'].strip().split(' ')
|
|
|
|
|
|
|
|
qemu_io_args = [os.environ.get('QEMU_IO_PROG', 'qemu-io')]
|
|
|
|
if os.environ.get('QEMU_IO_OPTIONS'):
|
|
|
|
qemu_io_args += os.environ['QEMU_IO_OPTIONS'].strip().split(' ')
|
|
|
|
|
2019-11-12 06:39:36 +03:00
|
|
|
qemu_io_args_no_fmt = [os.environ.get('QEMU_IO_PROG', 'qemu-io')]
|
|
|
|
if os.environ.get('QEMU_IO_OPTIONS_NO_FMT'):
|
|
|
|
qemu_io_args_no_fmt += \
|
|
|
|
os.environ['QEMU_IO_OPTIONS_NO_FMT'].strip().split(' ')
|
|
|
|
|
2020-09-24 18:27:15 +03:00
|
|
|
qemu_nbd_prog = os.environ.get('QEMU_NBD_PROG', 'qemu-nbd')
|
|
|
|
qemu_nbd_args = [qemu_nbd_prog]
|
2016-10-25 16:11:37 +03:00
|
|
|
if os.environ.get('QEMU_NBD_OPTIONS'):
|
|
|
|
qemu_nbd_args += os.environ['QEMU_NBD_OPTIONS'].strip().split(' ')
|
|
|
|
|
2016-07-26 19:16:07 +03:00
|
|
|
qemu_prog = os.environ.get('QEMU_PROG', 'qemu')
|
2016-07-20 16:23:10 +03:00
|
|
|
qemu_opts = os.environ.get('QEMU_OPTIONS', '').strip().split(' ')
|
2012-02-29 17:25:21 +04:00
|
|
|
|
2021-08-09 12:01:03 +03:00
|
|
|
gdb_qemu_env = os.environ.get('GDB_OPTIONS')
|
|
|
|
qemu_gdb = []
|
|
|
|
if gdb_qemu_env:
|
|
|
|
qemu_gdb = ['gdbserver'] + gdb_qemu_env.strip().split(' ')
|
|
|
|
|
2021-08-09 12:01:13 +03:00
|
|
|
qemu_print = os.environ.get('PRINT_QEMU', False)
|
|
|
|
|
2012-02-29 17:25:21 +04:00
|
|
|
imgfmt = os.environ.get('IMGFMT', 'raw')
|
|
|
|
imgproto = os.environ.get('IMGPROTO', 'file')
|
2014-05-25 01:24:55 +04:00
|
|
|
output_dir = os.environ.get('OUTPUT_DIR', '.')
|
2021-01-18 13:57:11 +03:00
|
|
|
|
|
|
|
try:
|
|
|
|
test_dir = os.environ['TEST_DIR']
|
|
|
|
sock_dir = os.environ['SOCK_DIR']
|
|
|
|
cachemode = os.environ['CACHEMODE']
|
|
|
|
aiomode = os.environ['AIOMODE']
|
|
|
|
qemu_default_machine = os.environ['QEMU_DEFAULT_MACHINE']
|
|
|
|
except KeyError:
|
|
|
|
# We are using these variables as proxies to indicate that we're
|
|
|
|
# not being run via "check". There may be other things set up by
|
|
|
|
# "check" that individual test cases rely on.
|
|
|
|
sys.stderr.write('Please run this test via the "check" script\n')
|
|
|
|
sys.exit(os.EX_USAGE)
|
2012-02-29 17:25:21 +04:00
|
|
|
|
2021-08-09 12:01:08 +03:00
|
|
|
qemu_valgrind = []
|
|
|
|
if os.environ.get('VALGRIND_QEMU') == "y" and \
|
|
|
|
os.environ.get('NO_VALGRIND') != "y":
|
|
|
|
valgrind_logfile = "--log-file=" + test_dir
|
|
|
|
# %p allows to put the valgrind process PID, since
|
|
|
|
# we don't know it a priori (subprocess.Popen is
|
|
|
|
# not yet invoked)
|
|
|
|
valgrind_logfile += "/%p.valgrind"
|
|
|
|
|
|
|
|
qemu_valgrind = ['valgrind', valgrind_logfile, '--error-exitcode=99']
|
|
|
|
|
2018-02-06 21:25:07 +03:00
|
|
|
luks_default_secret_object = 'secret,id=keysec0,data=' + \
|
2018-12-21 12:35:23 +03:00
|
|
|
os.environ.get('IMGKEYSECRET', '')
|
2018-02-06 21:25:07 +03:00
|
|
|
luks_default_key_secret_opt = 'key-secret=keysec0'
|
|
|
|
|
2021-02-24 13:47:06 +03:00
|
|
|
sample_img_dir = os.environ['SAMPLE_IMG_DIR']
|
|
|
|
|
|
|
|
|
2021-10-26 20:56:09 +03:00
|
|
|
@contextmanager
|
|
|
|
def change_log_level(
|
|
|
|
logger_name: str, level: int = logging.CRITICAL) -> Iterator[None]:
|
|
|
|
"""
|
|
|
|
Utility function for temporarily changing the log level of a logger.
|
|
|
|
|
|
|
|
This can be used to silence errors that are expected or uninteresting.
|
|
|
|
"""
|
|
|
|
_logger = logging.getLogger(logger_name)
|
|
|
|
current_level = _logger.level
|
|
|
|
_logger.setLevel(level)
|
|
|
|
|
|
|
|
try:
|
|
|
|
yield
|
|
|
|
finally:
|
|
|
|
_logger.setLevel(current_level)
|
|
|
|
|
|
|
|
|
2021-02-24 13:47:06 +03:00
|
|
|
def unarchive_sample_image(sample, fname):
|
|
|
|
sample_fname = os.path.join(sample_img_dir, sample + '.bz2')
|
|
|
|
with bz2.open(sample_fname) as f_in, open(fname, 'wb') as f_out:
|
|
|
|
shutil.copyfileobj(f_in, f_out)
|
|
|
|
|
2018-02-06 21:25:07 +03:00
|
|
|
|
2020-09-24 18:27:14 +03:00
|
|
|
def qemu_tool_pipe_and_status(tool: str, args: Sequence[str],
|
|
|
|
connect_stderr: bool = True) -> Tuple[str, int]:
|
2020-06-25 15:55:33 +03:00
|
|
|
"""
|
2020-09-24 18:27:14 +03:00
|
|
|
Run a tool and return both its output and its exit code
|
2020-06-25 15:55:33 +03:00
|
|
|
"""
|
2020-09-24 18:27:14 +03:00
|
|
|
stderr = subprocess.STDOUT if connect_stderr else None
|
2021-05-10 22:04:49 +03:00
|
|
|
with subprocess.Popen(args, stdout=subprocess.PIPE,
|
|
|
|
stderr=stderr, universal_newlines=True) as subp:
|
|
|
|
output = subp.communicate()[0]
|
|
|
|
if subp.returncode < 0:
|
|
|
|
cmd = ' '.join(args)
|
|
|
|
sys.stderr.write(f'{tool} received signal \
|
|
|
|
{-subp.returncode}: {cmd}\n')
|
|
|
|
return (output, subp.returncode)
|
2020-06-25 15:55:33 +03:00
|
|
|
|
2020-09-24 18:27:14 +03:00
|
|
|
def qemu_img_pipe_and_status(*args: str) -> Tuple[str, int]:
|
|
|
|
"""
|
|
|
|
Run qemu-img and return both its output and its exit code
|
|
|
|
"""
|
|
|
|
full_args = qemu_img_args + list(args)
|
|
|
|
return qemu_tool_pipe_and_status('qemu-img', full_args)
|
|
|
|
|
2020-06-25 15:55:33 +03:00
|
|
|
def qemu_img(*args: str) -> int:
|
|
|
|
'''Run qemu-img and return the exit code'''
|
|
|
|
return qemu_img_pipe_and_status(*args)[1]
|
2012-02-29 17:25:21 +04:00
|
|
|
|
2019-02-10 17:57:36 +03:00
|
|
|
def ordered_qmp(qmsg, conv_keys=True):
|
2019-01-31 05:26:48 +03:00
|
|
|
# Dictionaries are not ordered prior to 3.6, therefore:
|
|
|
|
if isinstance(qmsg, list):
|
|
|
|
return [ordered_qmp(atom) for atom in qmsg]
|
|
|
|
if isinstance(qmsg, dict):
|
|
|
|
od = OrderedDict()
|
|
|
|
for k, v in sorted(qmsg.items()):
|
2019-02-10 17:57:36 +03:00
|
|
|
if conv_keys:
|
|
|
|
k = k.replace('_', '-')
|
|
|
|
od[k] = ordered_qmp(v, conv_keys=False)
|
2019-01-31 05:26:48 +03:00
|
|
|
return od
|
|
|
|
return qmsg
|
iotests: add qmp recursive sorting function
Python before 3.6 does not sort dictionaries (including kwargs).
Therefore, printing QMP objects involves sorting the keys to have
a predictable ordering in the iotests output. This means that
iotests output will sometimes show arguments in an order not
specified by the test author.
Presently, we accomplish this by using json.dumps' sort_keys argument,
where we only serialize the arguments dictionary, but not the command.
However, if we want to pretty-print QMP objects being sent to the
QEMU process, we need to build the entire command before logging it.
Ordinarily, this would then involve "arguments" being sorted above
"execute", which would necessitate a rather ugly and harder-to-read
change to many iotests outputs.
To facilitate pretty-printing AND maintaining predictable output AND
having "arguments" sort after "execute", add a custom sort function
that takes a dictionary and recursively builds an OrderedDict that
maintains the specific key order we wish to see in iotests output.
The qmp_log function uses this to build a QMP object that keeps
"execute" above "arguments", but sorts all keys and keys in any
subdicts in "arguments" lexicographically to maintain consistent
iotests output, with no incompatible changes to any current test.
Signed-off-by: John Snow <jsnow@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Message-Id: <20181221093529.23855-8-jsnow@redhat.com>
Signed-off-by: Eric Blake <eblake@redhat.com>
2018-12-21 12:35:25 +03:00
|
|
|
|
2018-02-06 21:25:07 +03:00
|
|
|
def qemu_img_create(*args):
|
|
|
|
args = list(args)
|
|
|
|
|
|
|
|
# default luks support
|
|
|
|
if '-f' in args and args[args.index('-f') + 1] == 'luks':
|
|
|
|
if '-o' in args:
|
|
|
|
i = args.index('-o')
|
|
|
|
if 'key-secret' not in args[i + 1]:
|
|
|
|
args[i + 1].append(luks_default_key_secret_opt)
|
|
|
|
args.insert(i + 2, '--object')
|
|
|
|
args.insert(i + 3, luks_default_secret_object)
|
|
|
|
else:
|
|
|
|
args = ['-o', luks_default_key_secret_opt,
|
|
|
|
'--object', luks_default_secret_object] + args
|
|
|
|
|
|
|
|
args.insert(0, 'create')
|
|
|
|
|
|
|
|
return qemu_img(*args)
|
|
|
|
|
2020-07-28 00:58:45 +03:00
|
|
|
def qemu_img_measure(*args):
|
|
|
|
return json.loads(qemu_img_pipe("measure", "--output", "json", *args))
|
|
|
|
|
|
|
|
def qemu_img_check(*args):
|
|
|
|
return json.loads(qemu_img_pipe("check", "--output", "json", *args))
|
|
|
|
|
2012-10-26 22:31:15 +04:00
|
|
|
def qemu_img_verbose(*args):
|
2012-11-23 10:26:04 +04:00
|
|
|
'''Run qemu-img without suppressing its output and return the exit code'''
|
2015-09-02 21:52:28 +03:00
|
|
|
exitcode = subprocess.call(qemu_img_args + list(args))
|
|
|
|
if exitcode < 0:
|
2020-03-31 03:00:09 +03:00
|
|
|
sys.stderr.write('qemu-img received signal %i: %s\n'
|
|
|
|
% (-exitcode, ' '.join(qemu_img_args + list(args))))
|
2015-09-02 21:52:28 +03:00
|
|
|
return exitcode
|
2012-10-26 22:31:15 +04:00
|
|
|
|
2020-06-25 15:55:33 +03:00
|
|
|
def qemu_img_pipe(*args: str) -> str:
|
2013-10-09 12:46:20 +04:00
|
|
|
'''Run qemu-img and return its output'''
|
2020-06-25 15:55:33 +03:00
|
|
|
return qemu_img_pipe_and_status(*args)[0]
|
2013-10-09 12:46:20 +04:00
|
|
|
|
2019-05-21 21:35:52 +03:00
|
|
|
def qemu_img_log(*args):
|
|
|
|
result = qemu_img_pipe(*args)
|
|
|
|
log(result, filters=[filter_testfiles])
|
|
|
|
return result
|
|
|
|
|
2020-03-31 03:00:04 +03:00
|
|
|
def img_info_log(filename, filter_path=None, imgopts=False, extra_args=()):
|
2020-03-31 03:00:01 +03:00
|
|
|
args = ['info']
|
2018-05-23 19:19:00 +03:00
|
|
|
if imgopts:
|
|
|
|
args.append('--image-opts')
|
|
|
|
else:
|
2020-03-31 03:00:01 +03:00
|
|
|
args += ['-f', imgfmt]
|
2018-05-23 19:19:00 +03:00
|
|
|
args += extra_args
|
|
|
|
args.append(filename)
|
|
|
|
|
|
|
|
output = qemu_img_pipe(*args)
|
2018-05-24 14:12:56 +03:00
|
|
|
if not filter_path:
|
|
|
|
filter_path = filename
|
|
|
|
log(filter_img_info(output, filter_path))
|
|
|
|
|
2012-02-29 17:25:21 +04:00
|
|
|
def qemu_io(*args):
|
|
|
|
'''Run qemu-io and return the stdout data'''
|
|
|
|
args = qemu_io_args + list(args)
|
2020-11-30 16:40:20 +03:00
|
|
|
return qemu_tool_pipe_and_status('qemu-io', args)[0]
|
2012-02-29 17:25:21 +04:00
|
|
|
|
2019-11-19 20:36:09 +03:00
|
|
|
def qemu_io_log(*args):
|
|
|
|
result = qemu_io(*args)
|
|
|
|
log(result, filters=[filter_testfiles, filter_qemu_io])
|
|
|
|
return result
|
|
|
|
|
2018-05-09 22:43:01 +03:00
|
|
|
def qemu_io_silent(*args):
|
|
|
|
'''Run qemu-io and return the exit code, suppressing stdout'''
|
2020-10-21 17:58:48 +03:00
|
|
|
if '-f' in args or '--image-opts' in args:
|
|
|
|
default_args = qemu_io_args_no_fmt
|
|
|
|
else:
|
|
|
|
default_args = qemu_io_args
|
|
|
|
|
|
|
|
args = default_args + list(args)
|
2021-07-20 20:33:21 +03:00
|
|
|
result = subprocess.run(args, stdout=subprocess.DEVNULL, check=False)
|
|
|
|
if result.returncode < 0:
|
2018-05-09 22:43:01 +03:00
|
|
|
sys.stderr.write('qemu-io received signal %i: %s\n' %
|
2021-07-20 20:33:21 +03:00
|
|
|
(-result.returncode, ' '.join(args)))
|
|
|
|
return result.returncode
|
2018-05-09 22:43:01 +03:00
|
|
|
|
2019-10-09 11:41:58 +03:00
|
|
|
def qemu_io_silent_check(*args):
|
|
|
|
'''Run qemu-io and return the true if subprocess returned 0'''
|
|
|
|
args = qemu_io_args + list(args)
|
2021-07-20 20:33:21 +03:00
|
|
|
result = subprocess.run(args, stdout=subprocess.DEVNULL,
|
|
|
|
stderr=subprocess.STDOUT, check=False)
|
|
|
|
return result.returncode == 0
|
2019-10-09 11:41:58 +03:00
|
|
|
|
2018-01-19 16:57:18 +03:00
|
|
|
class QemuIoInteractive:
|
|
|
|
def __init__(self, *args):
|
2020-07-01 13:53:27 +03:00
|
|
|
self.args = qemu_io_args_no_fmt + list(args)
|
2021-05-10 22:04:49 +03:00
|
|
|
# We need to keep the Popen objext around, and not
|
|
|
|
# close it immediately. Therefore, disable the pylint check:
|
|
|
|
# pylint: disable=consider-using-with
|
2018-01-19 16:57:18 +03:00
|
|
|
self._p = subprocess.Popen(self.args, stdin=subprocess.PIPE,
|
|
|
|
stdout=subprocess.PIPE,
|
2018-10-22 16:53:01 +03:00
|
|
|
stderr=subprocess.STDOUT,
|
|
|
|
universal_newlines=True)
|
2020-07-01 13:53:28 +03:00
|
|
|
out = self._p.stdout.read(9)
|
|
|
|
if out != 'qemu-io> ':
|
|
|
|
# Most probably qemu-io just failed to start.
|
|
|
|
# Let's collect the whole output and exit.
|
|
|
|
out += self._p.stdout.read()
|
|
|
|
self._p.wait(timeout=1)
|
|
|
|
raise ValueError(out)
|
2018-01-19 16:57:18 +03:00
|
|
|
|
|
|
|
def close(self):
|
|
|
|
self._p.communicate('q\n')
|
|
|
|
|
|
|
|
def _read_output(self):
|
|
|
|
pattern = 'qemu-io> '
|
|
|
|
n = len(pattern)
|
|
|
|
pos = 0
|
|
|
|
s = []
|
|
|
|
while pos != n:
|
|
|
|
c = self._p.stdout.read(1)
|
|
|
|
# check unexpected EOF
|
|
|
|
assert c != ''
|
|
|
|
s.append(c)
|
|
|
|
if c == pattern[pos]:
|
|
|
|
pos += 1
|
|
|
|
else:
|
|
|
|
pos = 0
|
|
|
|
|
|
|
|
return ''.join(s[:-n])
|
|
|
|
|
|
|
|
def cmd(self, cmd):
|
|
|
|
# quit command is in close(), '\n' is added automatically
|
|
|
|
assert '\n' not in cmd
|
|
|
|
cmd = cmd.strip()
|
2020-03-31 03:00:01 +03:00
|
|
|
assert cmd not in ('q', 'quit')
|
2018-01-19 16:57:18 +03:00
|
|
|
self._p.stdin.write(cmd + '\n')
|
2018-10-22 16:53:00 +03:00
|
|
|
self._p.stdin.flush()
|
2018-01-19 16:57:18 +03:00
|
|
|
return self._read_output()
|
|
|
|
|
|
|
|
|
2016-10-25 16:11:37 +03:00
|
|
|
def qemu_nbd(*args):
|
|
|
|
'''Run qemu-nbd in daemon mode and return the parent's exit code'''
|
|
|
|
return subprocess.call(qemu_nbd_args + ['--fork'] + list(args))
|
|
|
|
|
2020-09-24 18:27:14 +03:00
|
|
|
def qemu_nbd_early_pipe(*args: str) -> Tuple[int, str]:
|
2018-12-22 02:47:48 +03:00
|
|
|
'''Run qemu-nbd in daemon mode and return both the parent's exit code
|
2019-05-09 00:18:17 +03:00
|
|
|
and its output in case of an error'''
|
2020-09-24 18:27:14 +03:00
|
|
|
full_args = qemu_nbd_args + ['--fork'] + list(args)
|
|
|
|
output, returncode = qemu_tool_pipe_and_status('qemu-nbd', full_args,
|
|
|
|
connect_stderr=False)
|
|
|
|
return returncode, output if returncode else ''
|
2018-12-22 02:47:48 +03:00
|
|
|
|
2020-09-24 18:27:15 +03:00
|
|
|
def qemu_nbd_list_log(*args: str) -> str:
|
|
|
|
'''Run qemu-nbd to list remote exports'''
|
|
|
|
full_args = [qemu_nbd_prog, '-L'] + list(args)
|
|
|
|
output, _ = qemu_tool_pipe_and_status('qemu-nbd', full_args)
|
|
|
|
log(output, filters=[filter_testfiles, filter_nbd_exports])
|
|
|
|
return output
|
|
|
|
|
2020-07-28 00:58:44 +03:00
|
|
|
@contextmanager
|
2019-10-09 11:41:58 +03:00
|
|
|
def qemu_nbd_popen(*args):
|
2020-07-28 00:58:44 +03:00
|
|
|
'''Context manager running qemu-nbd within the context'''
|
2021-02-05 19:37:17 +03:00
|
|
|
pid_file = file_path("qemu_nbd_popen-nbd-pid-file")
|
|
|
|
|
|
|
|
assert not os.path.exists(pid_file)
|
2020-07-28 00:58:44 +03:00
|
|
|
|
|
|
|
cmd = list(qemu_nbd_args)
|
|
|
|
cmd.extend(('--persistent', '--pid-file', pid_file))
|
|
|
|
cmd.extend(args)
|
|
|
|
|
|
|
|
log('Start NBD server')
|
2021-05-10 22:04:49 +03:00
|
|
|
with subprocess.Popen(cmd) as p:
|
|
|
|
try:
|
|
|
|
while not os.path.exists(pid_file):
|
|
|
|
if p.poll() is not None:
|
|
|
|
raise RuntimeError(
|
|
|
|
"qemu-nbd terminated with exit code {}: {}"
|
|
|
|
.format(p.returncode, ' '.join(cmd)))
|
|
|
|
|
|
|
|
time.sleep(0.01)
|
|
|
|
yield
|
|
|
|
finally:
|
|
|
|
if os.path.exists(pid_file):
|
|
|
|
os.remove(pid_file)
|
|
|
|
log('Kill NBD server')
|
|
|
|
p.kill()
|
|
|
|
p.wait()
|
2019-10-09 11:41:58 +03:00
|
|
|
|
2016-07-22 11:17:54 +03:00
|
|
|
def compare_images(img1, img2, fmt1=imgfmt, fmt2=imgfmt):
|
2013-05-28 19:11:36 +04:00
|
|
|
'''Return True if two image files are identical'''
|
2016-07-22 11:17:54 +03:00
|
|
|
return qemu_img('compare', '-f', fmt1,
|
|
|
|
'-F', fmt2, img1, img2) == 0
|
2013-05-28 19:11:36 +04:00
|
|
|
|
2013-05-28 19:11:37 +04:00
|
|
|
def create_image(name, size):
|
|
|
|
'''Create a fully-allocated raw image with sector markers'''
|
2021-05-10 22:04:49 +03:00
|
|
|
with open(name, 'wb') as file:
|
|
|
|
i = 0
|
|
|
|
while i < size:
|
|
|
|
sector = struct.pack('>l504xl', i // 512, i // 512)
|
|
|
|
file.write(sector)
|
|
|
|
i = i + 512
|
2013-05-28 19:11:37 +04:00
|
|
|
|
2016-04-20 05:48:35 +03:00
|
|
|
def image_size(img):
|
|
|
|
'''Return image's virtual size'''
|
|
|
|
r = qemu_img_pipe('info', '--output=json', '-f', imgfmt, img)
|
|
|
|
return json.loads(r)['virtual-size']
|
|
|
|
|
2019-02-10 17:57:32 +03:00
|
|
|
def is_str(val):
|
2019-09-19 19:18:31 +03:00
|
|
|
return isinstance(val, str)
|
2019-02-10 17:57:32 +03:00
|
|
|
|
2016-03-21 17:11:46 +03:00
|
|
|
test_dir_re = re.compile(r"%s" % test_dir)
|
|
|
|
def filter_test_dir(msg):
|
|
|
|
return test_dir_re.sub("TEST_DIR", msg)
|
|
|
|
|
|
|
|
win32_re = re.compile(r"\r")
|
|
|
|
def filter_win32(msg):
|
|
|
|
return win32_re.sub("", msg)
|
|
|
|
|
2020-03-31 03:00:09 +03:00
|
|
|
qemu_io_re = re.compile(r"[0-9]* ops; [0-9\/:. sec]* "
|
|
|
|
r"\([0-9\/.inf]* [EPTGMKiBbytes]*\/sec "
|
|
|
|
r"and [0-9\/.inf]* ops\/sec\)")
|
2016-03-21 17:11:46 +03:00
|
|
|
def filter_qemu_io(msg):
|
|
|
|
msg = filter_win32(msg)
|
2020-03-31 03:00:09 +03:00
|
|
|
return qemu_io_re.sub("X ops; XX:XX:XX.X "
|
|
|
|
"(XXX YYY/sec and XXX ops/sec)", msg)
|
2016-03-21 17:11:46 +03:00
|
|
|
|
|
|
|
chown_re = re.compile(r"chown [0-9]+:[0-9]+")
|
|
|
|
def filter_chown(msg):
|
|
|
|
return chown_re.sub("chown UID:GID", msg)
|
|
|
|
|
2017-08-23 17:05:06 +03:00
|
|
|
def filter_qmp_event(event):
|
|
|
|
'''Filter a QMP event dict'''
|
|
|
|
event = dict(event)
|
|
|
|
if 'timestamp' in event:
|
|
|
|
event['timestamp']['seconds'] = 'SECS'
|
|
|
|
event['timestamp']['microseconds'] = 'USECS'
|
|
|
|
return event
|
|
|
|
|
2018-12-21 12:35:27 +03:00
|
|
|
def filter_qmp(qmsg, filter_fn):
|
|
|
|
'''Given a string filter, filter a QMP object's values.
|
|
|
|
filter_fn takes a (key, value) pair.'''
|
|
|
|
# Iterate through either lists or dicts;
|
|
|
|
if isinstance(qmsg, list):
|
|
|
|
items = enumerate(qmsg)
|
|
|
|
else:
|
|
|
|
items = qmsg.items()
|
|
|
|
|
|
|
|
for k, v in items:
|
2020-03-31 03:00:01 +03:00
|
|
|
if isinstance(v, (dict, list)):
|
2018-12-21 12:35:27 +03:00
|
|
|
qmsg[k] = filter_qmp(v, filter_fn)
|
|
|
|
else:
|
|
|
|
qmsg[k] = filter_fn(k, v)
|
|
|
|
return qmsg
|
|
|
|
|
2018-05-23 19:17:45 +03:00
|
|
|
def filter_testfiles(msg):
|
2020-07-01 13:53:30 +03:00
|
|
|
pref1 = os.path.join(test_dir, "%s-" % (os.getpid()))
|
|
|
|
pref2 = os.path.join(sock_dir, "%s-" % (os.getpid()))
|
|
|
|
return msg.replace(pref1, 'TEST_DIR/PID-').replace(pref2, 'SOCK_DIR/PID-')
|
2018-05-23 19:17:45 +03:00
|
|
|
|
2018-12-21 12:35:27 +03:00
|
|
|
def filter_qmp_testfiles(qmsg):
|
2020-03-31 03:00:01 +03:00
|
|
|
def _filter(_key, value):
|
2019-02-10 17:57:33 +03:00
|
|
|
if is_str(value):
|
2018-12-21 12:35:27 +03:00
|
|
|
return filter_testfiles(value)
|
|
|
|
return value
|
|
|
|
return filter_qmp(qmsg, _filter)
|
|
|
|
|
2020-11-04 21:50:24 +03:00
|
|
|
def filter_virtio_scsi(output: str) -> str:
|
|
|
|
return re.sub(r'(virtio-scsi)-(ccw|pci)', r'\1', output)
|
|
|
|
|
|
|
|
def filter_qmp_virtio_scsi(qmsg):
|
|
|
|
def _filter(_key, value):
|
|
|
|
if is_str(value):
|
|
|
|
return filter_virtio_scsi(value)
|
|
|
|
return value
|
|
|
|
return filter_qmp(qmsg, _filter)
|
|
|
|
|
2018-12-21 12:35:24 +03:00
|
|
|
def filter_generated_node_ids(msg):
|
|
|
|
return re.sub("#block[0-9]+", "NODE_NAME", msg)
|
|
|
|
|
2018-05-24 14:12:56 +03:00
|
|
|
def filter_img_info(output, filename):
|
|
|
|
lines = []
|
|
|
|
for line in output.split('\n'):
|
|
|
|
if 'disk size' in line or 'actual-size' in line:
|
|
|
|
continue
|
2020-04-24 15:54:46 +03:00
|
|
|
line = line.replace(filename, 'TEST_IMG')
|
|
|
|
line = filter_testfiles(line)
|
|
|
|
line = line.replace(imgfmt, 'IMGFMT')
|
2018-05-24 14:12:56 +03:00
|
|
|
line = re.sub('iters: [0-9]+', 'iters: XXX', line)
|
2020-03-31 03:00:09 +03:00
|
|
|
line = re.sub('uuid: [-a-f0-9]+',
|
|
|
|
'uuid: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX',
|
|
|
|
line)
|
2018-05-15 18:36:33 +03:00
|
|
|
line = re.sub('cid: [0-9]+', 'cid: XXXXXXXXXX', line)
|
2018-05-24 14:12:56 +03:00
|
|
|
lines.append(line)
|
|
|
|
return '\n'.join(lines)
|
|
|
|
|
2019-02-01 22:29:10 +03:00
|
|
|
def filter_imgfmt(msg):
|
|
|
|
return msg.replace(imgfmt, 'IMGFMT')
|
|
|
|
|
|
|
|
def filter_qmp_imgfmt(qmsg):
|
2020-03-31 03:00:01 +03:00
|
|
|
def _filter(_key, value):
|
2019-02-01 22:29:10 +03:00
|
|
|
if is_str(value):
|
|
|
|
return filter_imgfmt(value)
|
|
|
|
return value
|
|
|
|
return filter_qmp(qmsg, _filter)
|
|
|
|
|
2020-09-24 18:27:15 +03:00
|
|
|
def filter_nbd_exports(output: str) -> str:
|
|
|
|
return re.sub(r'((min|opt|max) block): [0-9]+', r'\1: XXX', output)
|
|
|
|
|
2020-03-31 03:00:08 +03:00
|
|
|
|
|
|
|
Msg = TypeVar('Msg', Dict[str, Any], List[Any], str)
|
|
|
|
|
|
|
|
def log(msg: Msg,
|
|
|
|
filters: Iterable[Callable[[Msg], Msg]] = (),
|
|
|
|
indent: Optional[int] = None) -> None:
|
|
|
|
"""
|
|
|
|
Logs either a string message or a JSON serializable message (like QMP).
|
|
|
|
If indent is provided, JSON serializable messages are pretty-printed.
|
|
|
|
"""
|
2016-03-21 17:11:46 +03:00
|
|
|
for flt in filters:
|
|
|
|
msg = flt(msg)
|
2020-03-31 03:00:01 +03:00
|
|
|
if isinstance(msg, (dict, list)):
|
iotests: add qmp recursive sorting function
Python before 3.6 does not sort dictionaries (including kwargs).
Therefore, printing QMP objects involves sorting the keys to have
a predictable ordering in the iotests output. This means that
iotests output will sometimes show arguments in an order not
specified by the test author.
Presently, we accomplish this by using json.dumps' sort_keys argument,
where we only serialize the arguments dictionary, but not the command.
However, if we want to pretty-print QMP objects being sent to the
QEMU process, we need to build the entire command before logging it.
Ordinarily, this would then involve "arguments" being sorted above
"execute", which would necessitate a rather ugly and harder-to-read
change to many iotests outputs.
To facilitate pretty-printing AND maintaining predictable output AND
having "arguments" sort after "execute", add a custom sort function
that takes a dictionary and recursively builds an OrderedDict that
maintains the specific key order we wish to see in iotests output.
The qmp_log function uses this to build a QMP object that keeps
"execute" above "arguments", but sorts all keys and keys in any
subdicts in "arguments" lexicographically to maintain consistent
iotests output, with no incompatible changes to any current test.
Signed-off-by: John Snow <jsnow@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Message-Id: <20181221093529.23855-8-jsnow@redhat.com>
Signed-off-by: Eric Blake <eblake@redhat.com>
2018-12-21 12:35:25 +03:00
|
|
|
# Don't sort if it's already sorted
|
|
|
|
do_sort = not isinstance(msg, OrderedDict)
|
2020-03-31 03:00:14 +03:00
|
|
|
test_logger.info(json.dumps(msg, sort_keys=do_sort, indent=indent))
|
iotests: Unify log outputs between Python 2 and 3
When dumping an object into the log, there are differences between
Python 2 and 3. First, unicode strings are prefixed by 'u' in Python 2
(they are no longer in 3, because unicode strings are the default
there). Second, the order of keys in dicts may differ. Third,
especially long numbers are longs in Python 2 and thus get an 'L'
suffix, which does not happen in Python 3.
We can get around all of these differences by dumping objects (lists and
dicts) in a language-independent format, namely JSON. The JSON
generator even allows emitting dicts with their keys sorted
alphabetically.
This changes the output of all tests that use these logging functions
(dict keys are ordered now, strings in dicts are now enclosed in double
quotes instead of single quotes, the 'L' suffix of large integers is
dropped, and "true" and "false" are now in lower case).
The quote change necessitates a small change to a filter used in test
207.
Suggested-by: Eduardo Habkost <ehabkost@redhat.com>
Signed-off-by: Max Reitz <mreitz@redhat.com>
Reviewed-by: Cleber Rosa <crosa@redhat.com>
Message-Id: <20181022135307.14398-10-mreitz@redhat.com>
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
2018-10-22 16:53:07 +03:00
|
|
|
else:
|
2020-03-31 03:00:14 +03:00
|
|
|
test_logger.info(msg)
|
2016-03-21 17:11:46 +03:00
|
|
|
|
2017-07-21 17:41:21 +03:00
|
|
|
class Timeout:
|
2020-03-31 03:00:01 +03:00
|
|
|
def __init__(self, seconds, errmsg="Timeout"):
|
2017-07-21 17:41:21 +03:00
|
|
|
self.seconds = seconds
|
|
|
|
self.errmsg = errmsg
|
|
|
|
def __enter__(self):
|
2021-08-09 12:01:09 +03:00
|
|
|
if qemu_gdb or qemu_valgrind:
|
2021-08-09 12:01:04 +03:00
|
|
|
return self
|
2017-07-21 17:41:21 +03:00
|
|
|
signal.signal(signal.SIGALRM, self.timeout)
|
|
|
|
signal.setitimer(signal.ITIMER_REAL, self.seconds)
|
|
|
|
return self
|
2020-03-31 03:00:01 +03:00
|
|
|
def __exit__(self, exc_type, value, traceback):
|
2021-08-09 12:01:09 +03:00
|
|
|
if qemu_gdb or qemu_valgrind:
|
2021-08-09 12:01:04 +03:00
|
|
|
return False
|
2017-07-21 17:41:21 +03:00
|
|
|
signal.setitimer(signal.ITIMER_REAL, 0)
|
|
|
|
return False
|
|
|
|
def timeout(self, signum, frame):
|
|
|
|
raise Exception(self.errmsg)
|
|
|
|
|
2019-07-29 23:35:54 +03:00
|
|
|
def file_pattern(name):
|
|
|
|
return "{0}-{1}".format(os.getpid(), name)
|
2017-08-24 10:22:01 +03:00
|
|
|
|
2020-08-29 02:21:51 +03:00
|
|
|
class FilePath:
|
2019-07-29 23:35:54 +03:00
|
|
|
"""
|
2020-08-29 02:21:49 +03:00
|
|
|
Context manager generating multiple file names. The generated files are
|
|
|
|
removed when exiting the context.
|
2017-08-24 10:22:01 +03:00
|
|
|
|
2020-08-29 02:21:49 +03:00
|
|
|
Example usage:
|
|
|
|
|
2020-08-29 02:21:51 +03:00
|
|
|
with FilePath('a.img', 'b.img') as (img_a, img_b):
|
2020-08-29 02:21:49 +03:00
|
|
|
# Use img_a and img_b here...
|
|
|
|
|
|
|
|
# a.img and b.img are automatically removed here.
|
|
|
|
|
|
|
|
By default images are created in iotests.test_dir. To create sockets use
|
|
|
|
iotests.sock_dir:
|
|
|
|
|
2020-08-29 02:21:51 +03:00
|
|
|
with FilePath('a.sock', base_dir=iotests.sock_dir) as sock:
|
|
|
|
|
|
|
|
For convenience, calling with one argument yields a single file instead of
|
|
|
|
a tuple with one item.
|
2017-08-24 10:22:01 +03:00
|
|
|
|
2019-07-29 23:35:54 +03:00
|
|
|
"""
|
2020-08-29 02:21:50 +03:00
|
|
|
def __init__(self, *names, base_dir=test_dir):
|
2020-08-29 02:21:52 +03:00
|
|
|
self.paths = [os.path.join(base_dir, file_pattern(name))
|
|
|
|
for name in names]
|
2017-08-24 10:22:01 +03:00
|
|
|
|
|
|
|
def __enter__(self):
|
2020-08-29 02:21:51 +03:00
|
|
|
if len(self.paths) == 1:
|
|
|
|
return self.paths[0]
|
|
|
|
else:
|
|
|
|
return self.paths
|
2017-08-24 10:22:01 +03:00
|
|
|
|
|
|
|
def __exit__(self, exc_type, exc_val, exc_tb):
|
2020-08-29 02:21:48 +03:00
|
|
|
for path in self.paths:
|
|
|
|
try:
|
2019-07-29 23:35:54 +03:00
|
|
|
os.remove(path)
|
2020-08-29 02:21:48 +03:00
|
|
|
except OSError:
|
|
|
|
pass
|
2017-08-24 10:22:01 +03:00
|
|
|
return False
|
|
|
|
|
|
|
|
|
2021-01-18 13:57:13 +03:00
|
|
|
def try_remove(img):
|
|
|
|
try:
|
|
|
|
os.remove(img)
|
|
|
|
except OSError:
|
|
|
|
pass
|
|
|
|
|
2018-03-12 18:21:25 +03:00
|
|
|
def file_path_remover():
|
|
|
|
for path in reversed(file_path_remover.paths):
|
2021-01-18 13:57:13 +03:00
|
|
|
try_remove(path)
|
2018-03-12 18:21:25 +03:00
|
|
|
|
|
|
|
|
2019-10-17 16:31:35 +03:00
|
|
|
def file_path(*names, base_dir=test_dir):
|
2018-03-12 18:21:25 +03:00
|
|
|
''' Another way to get auto-generated filename that cleans itself up.
|
|
|
|
|
|
|
|
Use is as simple as:
|
|
|
|
|
|
|
|
img_a, img_b = file_path('a.img', 'b.img')
|
|
|
|
sock = file_path('socket')
|
|
|
|
'''
|
|
|
|
|
|
|
|
if not hasattr(file_path_remover, 'paths'):
|
|
|
|
file_path_remover.paths = []
|
|
|
|
atexit.register(file_path_remover)
|
|
|
|
|
|
|
|
paths = []
|
|
|
|
for name in names:
|
2019-07-29 23:35:54 +03:00
|
|
|
filename = file_pattern(name)
|
2019-10-17 16:31:35 +03:00
|
|
|
path = os.path.join(base_dir, filename)
|
2018-03-12 18:21:25 +03:00
|
|
|
file_path_remover.paths.append(path)
|
|
|
|
paths.append(path)
|
|
|
|
|
|
|
|
return paths[0] if len(paths) == 1 else paths
|
|
|
|
|
2018-05-29 22:44:47 +03:00
|
|
|
def remote_filename(path):
|
|
|
|
if imgproto == 'file':
|
|
|
|
return path
|
|
|
|
elif imgproto == 'ssh':
|
2019-02-25 22:08:27 +03:00
|
|
|
return "ssh://%s@127.0.0.1:22%s" % (os.environ.get('USER'), path)
|
2018-05-29 22:44:47 +03:00
|
|
|
else:
|
|
|
|
raise Exception("Protocol %s not supported" % (imgproto))
|
2018-03-12 18:21:25 +03:00
|
|
|
|
2016-07-26 19:16:07 +03:00
|
|
|
class VM(qtest.QEMUQtestMachine):
|
2012-02-29 17:25:21 +04:00
|
|
|
'''A QEMU VM'''
|
|
|
|
|
2016-10-25 16:11:38 +03:00
|
|
|
def __init__(self, path_suffix=''):
|
|
|
|
name = "qemu%s-%d" % (path_suffix, os.getpid())
|
2021-08-09 12:01:09 +03:00
|
|
|
timer = 15.0 if not (qemu_gdb or qemu_valgrind) else None
|
2021-08-09 12:01:11 +03:00
|
|
|
if qemu_gdb and qemu_valgrind:
|
|
|
|
sys.stderr.write('gdb and valgrind are mutually exclusive\n')
|
|
|
|
sys.exit(1)
|
|
|
|
wrapper = qemu_gdb if qemu_gdb else qemu_valgrind
|
|
|
|
super().__init__(qemu_prog, qemu_opts, wrapper=wrapper,
|
2021-08-09 12:01:05 +03:00
|
|
|
name=name,
|
2021-02-12 01:01:42 +03:00
|
|
|
base_temp_dir=test_dir,
|
2021-08-09 12:00:59 +03:00
|
|
|
sock_dir=sock_dir, qmp_timer=timer)
|
2012-02-29 17:25:21 +04:00
|
|
|
self._num_drives = 0
|
2013-09-06 07:24:33 +04:00
|
|
|
|
2021-08-09 12:01:10 +03:00
|
|
|
def _post_shutdown(self) -> None:
|
|
|
|
super()._post_shutdown()
|
|
|
|
if not qemu_valgrind or not self._popen:
|
|
|
|
return
|
|
|
|
valgrind_filename = f"{test_dir}/{self._popen.pid}.valgrind"
|
|
|
|
if self.exitcode() == 99:
|
2021-08-24 18:35:39 +03:00
|
|
|
with open(valgrind_filename, encoding='utf-8') as f:
|
2021-08-09 12:01:10 +03:00
|
|
|
print(f.read())
|
|
|
|
else:
|
|
|
|
os.remove(valgrind_filename)
|
|
|
|
|
2021-08-09 12:01:13 +03:00
|
|
|
def _pre_launch(self) -> None:
|
|
|
|
super()._pre_launch()
|
|
|
|
if qemu_print:
|
|
|
|
# set QEMU binary output to stdout
|
|
|
|
self._close_qemu_log_file()
|
|
|
|
|
2017-12-07 23:13:18 +03:00
|
|
|
def add_object(self, opts):
|
|
|
|
self._args.append('-object')
|
|
|
|
self._args.append(opts)
|
|
|
|
return self
|
|
|
|
|
2016-09-20 14:38:49 +03:00
|
|
|
def add_device(self, opts):
|
|
|
|
self._args.append('-device')
|
|
|
|
self._args.append(opts)
|
|
|
|
return self
|
|
|
|
|
2015-12-01 12:36:29 +03:00
|
|
|
def add_drive_raw(self, opts):
|
|
|
|
self._args.append('-drive')
|
|
|
|
self._args.append(opts)
|
|
|
|
return self
|
|
|
|
|
2020-03-31 03:00:02 +03:00
|
|
|
def add_drive(self, path, opts='', interface='virtio', img_format=imgfmt):
|
2012-02-29 17:25:21 +04:00
|
|
|
'''Add a virtio-blk drive to the VM'''
|
2015-09-02 21:52:25 +03:00
|
|
|
options = ['if=%s' % interface,
|
2012-02-29 17:25:21 +04:00
|
|
|
'id=drive%d' % self._num_drives]
|
2015-09-02 21:52:25 +03:00
|
|
|
|
|
|
|
if path is not None:
|
|
|
|
options.append('file=%s' % path)
|
2020-03-31 03:00:02 +03:00
|
|
|
options.append('format=%s' % img_format)
|
2015-05-07 15:41:30 +03:00
|
|
|
options.append('cache=%s' % cachemode)
|
2020-01-20 17:18:57 +03:00
|
|
|
options.append('aio=%s' % aiomode)
|
2015-09-02 21:52:25 +03:00
|
|
|
|
2012-02-29 17:25:21 +04:00
|
|
|
if opts:
|
|
|
|
options.append(opts)
|
|
|
|
|
2020-03-31 03:00:02 +03:00
|
|
|
if img_format == 'luks' and 'key-secret' not in opts:
|
2018-02-06 21:25:07 +03:00
|
|
|
# default luks support
|
|
|
|
if luks_default_secret_object not in self._args:
|
|
|
|
self.add_object(luks_default_secret_object)
|
|
|
|
|
|
|
|
options.append(luks_default_key_secret_opt)
|
|
|
|
|
2012-02-29 17:25:21 +04:00
|
|
|
self._args.append('-drive')
|
|
|
|
self._args.append(','.join(options))
|
|
|
|
self._num_drives += 1
|
|
|
|
return self
|
|
|
|
|
2017-04-03 20:51:50 +03:00
|
|
|
def add_blockdev(self, opts):
|
|
|
|
self._args.append('-blockdev')
|
|
|
|
if isinstance(opts, str):
|
|
|
|
self._args.append(opts)
|
|
|
|
else:
|
|
|
|
self._args.append(','.join(opts))
|
|
|
|
return self
|
|
|
|
|
2017-08-23 17:05:06 +03:00
|
|
|
def add_incoming(self, addr):
|
|
|
|
self._args.append('-incoming')
|
|
|
|
self._args.append(addr)
|
|
|
|
return self
|
|
|
|
|
2020-07-10 08:22:06 +03:00
|
|
|
def hmp(self, command_line: str, use_log: bool = False) -> QMPMessage:
|
2020-03-31 03:00:10 +03:00
|
|
|
cmd = 'human-monitor-command'
|
2020-10-07 02:58:07 +03:00
|
|
|
kwargs: Dict[str, Any] = {'command-line': command_line}
|
2020-03-31 03:00:10 +03:00
|
|
|
if use_log:
|
|
|
|
return self.qmp_log(cmd, **kwargs)
|
|
|
|
else:
|
|
|
|
return self.qmp(cmd, **kwargs)
|
|
|
|
|
|
|
|
def pause_drive(self, drive: str, event: Optional[str] = None) -> None:
|
|
|
|
"""Pause drive r/w operations"""
|
2013-11-20 06:01:55 +04:00
|
|
|
if not event:
|
|
|
|
self.pause_drive(drive, "read_aio")
|
|
|
|
self.pause_drive(drive, "write_aio")
|
|
|
|
return
|
2020-03-31 03:00:10 +03:00
|
|
|
self.hmp(f'qemu-io {drive} "break {event} bp_{drive}"')
|
|
|
|
|
|
|
|
def resume_drive(self, drive: str) -> None:
|
|
|
|
"""Resume drive r/w operations"""
|
|
|
|
self.hmp(f'qemu-io {drive} "remove_break bp_{drive}"')
|
|
|
|
|
|
|
|
def hmp_qemu_io(self, drive: str, cmd: str,
|
2021-08-24 11:38:51 +03:00
|
|
|
use_log: bool = False, qdev: bool = False) -> QMPMessage:
|
2020-03-31 03:00:10 +03:00
|
|
|
"""Write to a given drive using an HMP command"""
|
2021-08-24 11:38:51 +03:00
|
|
|
d = '-d ' if qdev else ''
|
|
|
|
return self.hmp(f'qemu-io {d}{drive} "{cmd}"', use_log=use_log)
|
2013-07-26 22:39:05 +04:00
|
|
|
|
2018-05-08 19:10:16 +03:00
|
|
|
def flatten_qmp_object(self, obj, output=None, basestr=''):
|
|
|
|
if output is None:
|
2021-08-24 18:35:40 +03:00
|
|
|
output = {}
|
2018-05-08 19:10:16 +03:00
|
|
|
if isinstance(obj, list):
|
2020-03-31 03:00:01 +03:00
|
|
|
for i, item in enumerate(obj):
|
|
|
|
self.flatten_qmp_object(item, output, basestr + str(i) + '.')
|
2018-05-08 19:10:16 +03:00
|
|
|
elif isinstance(obj, dict):
|
|
|
|
for key in obj:
|
|
|
|
self.flatten_qmp_object(obj[key], output, basestr + key + '.')
|
|
|
|
else:
|
|
|
|
output[basestr[:-1]] = obj # Strip trailing '.'
|
|
|
|
return output
|
|
|
|
|
|
|
|
def qmp_to_opts(self, obj):
|
|
|
|
obj = self.flatten_qmp_object(obj)
|
2021-08-24 18:35:40 +03:00
|
|
|
output_list = []
|
2018-05-08 19:10:16 +03:00
|
|
|
for key in obj:
|
|
|
|
output_list += [key + '=' + obj[key]]
|
|
|
|
return ','.join(output_list)
|
|
|
|
|
2019-05-23 20:06:40 +03:00
|
|
|
def get_qmp_events_filtered(self, wait=60.0):
|
2018-05-23 18:59:46 +03:00
|
|
|
result = []
|
|
|
|
for ev in self.get_qmp_events(wait=wait):
|
|
|
|
result.append(filter_qmp_event(ev))
|
|
|
|
return result
|
2018-05-08 19:10:16 +03:00
|
|
|
|
2020-03-31 03:00:04 +03:00
|
|
|
def qmp_log(self, cmd, filters=(), indent=None, **kwargs):
|
iotests: add qmp recursive sorting function
Python before 3.6 does not sort dictionaries (including kwargs).
Therefore, printing QMP objects involves sorting the keys to have
a predictable ordering in the iotests output. This means that
iotests output will sometimes show arguments in an order not
specified by the test author.
Presently, we accomplish this by using json.dumps' sort_keys argument,
where we only serialize the arguments dictionary, but not the command.
However, if we want to pretty-print QMP objects being sent to the
QEMU process, we need to build the entire command before logging it.
Ordinarily, this would then involve "arguments" being sorted above
"execute", which would necessitate a rather ugly and harder-to-read
change to many iotests outputs.
To facilitate pretty-printing AND maintaining predictable output AND
having "arguments" sort after "execute", add a custom sort function
that takes a dictionary and recursively builds an OrderedDict that
maintains the specific key order we wish to see in iotests output.
The qmp_log function uses this to build a QMP object that keeps
"execute" above "arguments", but sorts all keys and keys in any
subdicts in "arguments" lexicographically to maintain consistent
iotests output, with no incompatible changes to any current test.
Signed-off-by: John Snow <jsnow@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Message-Id: <20181221093529.23855-8-jsnow@redhat.com>
Signed-off-by: Eric Blake <eblake@redhat.com>
2018-12-21 12:35:25 +03:00
|
|
|
full_cmd = OrderedDict((
|
|
|
|
("execute", cmd),
|
2019-01-31 05:26:48 +03:00
|
|
|
("arguments", ordered_qmp(kwargs))
|
iotests: add qmp recursive sorting function
Python before 3.6 does not sort dictionaries (including kwargs).
Therefore, printing QMP objects involves sorting the keys to have
a predictable ordering in the iotests output. This means that
iotests output will sometimes show arguments in an order not
specified by the test author.
Presently, we accomplish this by using json.dumps' sort_keys argument,
where we only serialize the arguments dictionary, but not the command.
However, if we want to pretty-print QMP objects being sent to the
QEMU process, we need to build the entire command before logging it.
Ordinarily, this would then involve "arguments" being sorted above
"execute", which would necessitate a rather ugly and harder-to-read
change to many iotests outputs.
To facilitate pretty-printing AND maintaining predictable output AND
having "arguments" sort after "execute", add a custom sort function
that takes a dictionary and recursively builds an OrderedDict that
maintains the specific key order we wish to see in iotests output.
The qmp_log function uses this to build a QMP object that keeps
"execute" above "arguments", but sorts all keys and keys in any
subdicts in "arguments" lexicographically to maintain consistent
iotests output, with no incompatible changes to any current test.
Signed-off-by: John Snow <jsnow@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Message-Id: <20181221093529.23855-8-jsnow@redhat.com>
Signed-off-by: Eric Blake <eblake@redhat.com>
2018-12-21 12:35:25 +03:00
|
|
|
))
|
2018-12-21 12:35:28 +03:00
|
|
|
log(full_cmd, filters, indent=indent)
|
2018-05-23 19:17:45 +03:00
|
|
|
result = self.qmp(cmd, **kwargs)
|
2018-12-21 12:35:28 +03:00
|
|
|
log(result, filters, indent=indent)
|
2018-05-23 19:17:45 +03:00
|
|
|
return result
|
|
|
|
|
2019-02-18 21:06:46 +03:00
|
|
|
# Returns None on success, and an error string on failure
|
2019-05-21 21:35:52 +03:00
|
|
|
def run_job(self, job, auto_finalize=True, auto_dismiss=False,
|
2020-03-31 03:00:14 +03:00
|
|
|
pre_finalize=None, cancel=False, wait=60.0):
|
2019-07-29 23:35:53 +03:00
|
|
|
"""
|
|
|
|
run_job moves a job from creation through to dismissal.
|
|
|
|
|
|
|
|
:param job: String. ID of recently-launched job
|
|
|
|
:param auto_finalize: Bool. True if the job was launched with
|
|
|
|
auto_finalize. Defaults to True.
|
|
|
|
:param auto_dismiss: Bool. True if the job was launched with
|
|
|
|
auto_dismiss=True. Defaults to False.
|
|
|
|
:param pre_finalize: Callback. A callable that takes no arguments to be
|
|
|
|
invoked prior to issuing job-finalize, if any.
|
|
|
|
:param cancel: Bool. When true, cancels the job after the pre_finalize
|
|
|
|
callback.
|
|
|
|
:param wait: Float. Timeout value specifying how long to wait for any
|
|
|
|
event, in seconds. Defaults to 60.0.
|
|
|
|
"""
|
2019-05-23 20:06:42 +03:00
|
|
|
match_device = {'data': {'device': job}}
|
|
|
|
match_id = {'data': {'id': job}}
|
|
|
|
events = [
|
|
|
|
('BLOCK_JOB_COMPLETED', match_device),
|
|
|
|
('BLOCK_JOB_CANCELLED', match_device),
|
|
|
|
('BLOCK_JOB_ERROR', match_device),
|
|
|
|
('BLOCK_JOB_READY', match_device),
|
|
|
|
('BLOCK_JOB_PENDING', match_id),
|
|
|
|
('JOB_STATUS_CHANGE', match_id)
|
|
|
|
]
|
2019-02-18 21:06:46 +03:00
|
|
|
error = None
|
2018-05-29 21:52:57 +03:00
|
|
|
while True:
|
2019-11-19 20:37:06 +03:00
|
|
|
ev = filter_qmp_event(self.events_wait(events, timeout=wait))
|
2019-05-23 20:06:42 +03:00
|
|
|
if ev['event'] != 'JOB_STATUS_CHANGE':
|
2020-03-31 03:00:14 +03:00
|
|
|
log(ev)
|
2019-05-23 20:06:42 +03:00
|
|
|
continue
|
|
|
|
status = ev['data']['status']
|
|
|
|
if status == 'aborting':
|
|
|
|
result = self.qmp('query-jobs')
|
|
|
|
for j in result['return']:
|
|
|
|
if j['id'] == job:
|
|
|
|
error = j['error']
|
2020-03-31 03:00:14 +03:00
|
|
|
log('Job failed: %s' % (j['error']))
|
2019-11-19 20:38:21 +03:00
|
|
|
elif status == 'ready':
|
2020-03-31 03:00:14 +03:00
|
|
|
self.qmp_log('job-complete', id=job)
|
2019-05-23 20:06:42 +03:00
|
|
|
elif status == 'pending' and not auto_finalize:
|
|
|
|
if pre_finalize:
|
|
|
|
pre_finalize()
|
2020-03-31 03:00:14 +03:00
|
|
|
if cancel:
|
2019-07-29 23:35:53 +03:00
|
|
|
self.qmp_log('job-cancel', id=job)
|
2019-07-03 20:28:10 +03:00
|
|
|
else:
|
2020-03-31 03:00:14 +03:00
|
|
|
self.qmp_log('job-finalize', id=job)
|
2019-05-23 20:06:42 +03:00
|
|
|
elif status == 'concluded' and not auto_dismiss:
|
2020-03-31 03:00:14 +03:00
|
|
|
self.qmp_log('job-dismiss', id=job)
|
2019-05-23 20:06:42 +03:00
|
|
|
elif status == 'null':
|
|
|
|
return error
|
2018-05-29 21:52:57 +03:00
|
|
|
|
2019-12-16 19:53:41 +03:00
|
|
|
# Returns None on success, and an error string on failure
|
|
|
|
def blockdev_create(self, options, job_id='job0', filters=None):
|
|
|
|
if filters is None:
|
|
|
|
filters = [filter_qmp_testfiles]
|
|
|
|
result = self.qmp_log('blockdev-create', filters=filters,
|
|
|
|
job_id=job_id, options=options)
|
|
|
|
|
|
|
|
if 'return' in result:
|
|
|
|
assert result['return'] == {}
|
|
|
|
job_result = self.run_job(job_id)
|
|
|
|
else:
|
|
|
|
job_result = result['error']
|
|
|
|
|
|
|
|
log("")
|
|
|
|
return job_result
|
|
|
|
|
2019-08-01 14:14:09 +03:00
|
|
|
def enable_migration_events(self, name):
|
|
|
|
log('Enabling migration QMP events on %s...' % name)
|
|
|
|
log(self.qmp('migrate-set-capabilities', capabilities=[
|
|
|
|
{
|
|
|
|
'capability': 'events',
|
|
|
|
'state': True
|
|
|
|
}
|
|
|
|
]))
|
|
|
|
|
2020-08-20 18:07:24 +03:00
|
|
|
def wait_migration(self, expect_runstate: Optional[str]) -> bool:
|
2019-08-01 14:14:09 +03:00
|
|
|
while True:
|
|
|
|
event = self.event_wait('MIGRATION')
|
2020-10-27 19:38:04 +03:00
|
|
|
# We use the default timeout, and with a timeout, event_wait()
|
|
|
|
# never returns None
|
|
|
|
assert event
|
|
|
|
|
2019-08-01 14:14:09 +03:00
|
|
|
log(event, filters=[filter_qmp_event])
|
2020-08-20 18:07:24 +03:00
|
|
|
if event['data']['status'] in ('completed', 'failed'):
|
2019-08-01 14:14:09 +03:00
|
|
|
break
|
2020-08-20 18:07:24 +03:00
|
|
|
|
|
|
|
if event['data']['status'] == 'completed':
|
|
|
|
# The event may occur in finish-migrate, so wait for the expected
|
|
|
|
# post-migration runstate
|
|
|
|
runstate = None
|
|
|
|
while runstate != expect_runstate:
|
|
|
|
runstate = self.qmp('query-status')['return']['status']
|
|
|
|
return True
|
|
|
|
else:
|
|
|
|
return False
|
2019-08-01 14:14:09 +03:00
|
|
|
|
2019-02-01 22:29:11 +03:00
|
|
|
def node_info(self, node_name):
|
|
|
|
nodes = self.qmp('query-named-block-nodes')
|
|
|
|
for x in nodes['return']:
|
|
|
|
if x['node-name'] == node_name:
|
|
|
|
return x
|
|
|
|
return None
|
|
|
|
|
2019-09-20 17:20:50 +03:00
|
|
|
def query_bitmaps(self):
|
|
|
|
res = self.qmp("query-named-block-nodes")
|
|
|
|
return {device['node-name']: device['dirty-bitmaps']
|
|
|
|
for device in res['return'] if 'dirty-bitmaps' in device}
|
|
|
|
|
|
|
|
def get_bitmap(self, node_name, bitmap_name, recording=None, bitmaps=None):
|
|
|
|
"""
|
|
|
|
get a specific bitmap from the object returned by query_bitmaps.
|
|
|
|
:param recording: If specified, filter results by the specified value.
|
|
|
|
:param bitmaps: If specified, use it instead of call query_bitmaps()
|
|
|
|
"""
|
|
|
|
if bitmaps is None:
|
|
|
|
bitmaps = self.query_bitmaps()
|
|
|
|
|
|
|
|
for bitmap in bitmaps[node_name]:
|
|
|
|
if bitmap.get('name', '') == bitmap_name:
|
2020-03-31 03:00:01 +03:00
|
|
|
if recording is None or bitmap.get('recording') == recording:
|
2019-09-20 17:20:50 +03:00
|
|
|
return bitmap
|
|
|
|
return None
|
|
|
|
|
|
|
|
def check_bitmap_status(self, node_name, bitmap_name, fields):
|
|
|
|
ret = self.get_bitmap(node_name, bitmap_name)
|
|
|
|
|
|
|
|
return fields.items() <= ret.items()
|
|
|
|
|
2020-02-18 13:34:49 +03:00
|
|
|
def assert_block_path(self, root, path, expected_node, graph=None):
|
|
|
|
"""
|
|
|
|
Check whether the node under the given path in the block graph
|
|
|
|
is @expected_node.
|
|
|
|
|
|
|
|
@root is the node name of the node where the @path is rooted.
|
|
|
|
|
|
|
|
@path is a string that consists of child names separated by
|
|
|
|
slashes. It must begin with a slash.
|
|
|
|
|
|
|
|
Examples for @root + @path:
|
|
|
|
- root="qcow2-node", path="/backing/file"
|
|
|
|
- root="quorum-node", path="/children.2/file"
|
|
|
|
|
|
|
|
Hypothetically, @path could be empty, in which case it would
|
|
|
|
point to @root. However, in practice this case is not useful
|
|
|
|
and hence not allowed.
|
|
|
|
|
|
|
|
@expected_node may be None. (All elements of the path but the
|
|
|
|
leaf must still exist.)
|
|
|
|
|
|
|
|
@graph may be None or the result of an x-debug-query-block-graph
|
|
|
|
call that has already been performed.
|
|
|
|
"""
|
|
|
|
if graph is None:
|
|
|
|
graph = self.qmp('x-debug-query-block-graph')['return']
|
|
|
|
|
|
|
|
iter_path = iter(path.split('/'))
|
|
|
|
|
|
|
|
# Must start with a /
|
|
|
|
assert next(iter_path) == ''
|
|
|
|
|
|
|
|
node = next((node for node in graph['nodes'] if node['name'] == root),
|
|
|
|
None)
|
|
|
|
|
|
|
|
# An empty @path is not allowed, so the root node must be present
|
|
|
|
assert node is not None, 'Root node %s not found' % root
|
|
|
|
|
|
|
|
for child_name in iter_path:
|
|
|
|
assert node is not None, 'Cannot follow path %s%s' % (root, path)
|
|
|
|
|
|
|
|
try:
|
2020-03-31 03:00:01 +03:00
|
|
|
node_id = next(edge['child'] for edge in graph['edges']
|
|
|
|
if (edge['parent'] == node['id'] and
|
|
|
|
edge['name'] == child_name))
|
|
|
|
|
|
|
|
node = next(node for node in graph['nodes']
|
|
|
|
if node['id'] == node_id)
|
2020-02-18 13:34:49 +03:00
|
|
|
|
|
|
|
except StopIteration:
|
|
|
|
node = None
|
|
|
|
|
|
|
|
if node is None:
|
|
|
|
assert expected_node is None, \
|
|
|
|
'No node found under %s (but expected %s)' % \
|
|
|
|
(path, expected_node)
|
|
|
|
else:
|
|
|
|
assert node['name'] == expected_node, \
|
|
|
|
'Found node %s under %s (but expected %s)' % \
|
|
|
|
(node['name'], path, expected_node)
|
2015-04-18 02:50:06 +03:00
|
|
|
|
2012-02-29 17:25:21 +04:00
|
|
|
index_re = re.compile(r'([^\[]+)\[([^\]]+)\]')
|
|
|
|
|
|
|
|
class QMPTestCase(unittest.TestCase):
|
|
|
|
'''Abstract base class for QMP test cases'''
|
|
|
|
|
2020-03-31 03:00:01 +03:00
|
|
|
def __init__(self, *args, **kwargs):
|
|
|
|
super().__init__(*args, **kwargs)
|
|
|
|
# Many users of this class set a VM property we rely on heavily
|
|
|
|
# in the methods below.
|
|
|
|
self.vm = None
|
|
|
|
|
2012-02-29 17:25:21 +04:00
|
|
|
def dictpath(self, d, path):
|
|
|
|
'''Traverse a path in a nested dict'''
|
|
|
|
for component in path.split('/'):
|
|
|
|
m = index_re.match(component)
|
|
|
|
if m:
|
|
|
|
component, idx = m.groups()
|
|
|
|
idx = int(idx)
|
|
|
|
|
|
|
|
if not isinstance(d, dict) or component not in d:
|
2020-03-31 03:00:09 +03:00
|
|
|
self.fail(f'failed path traversal for "{path}" in "{d}"')
|
2012-02-29 17:25:21 +04:00
|
|
|
d = d[component]
|
|
|
|
|
|
|
|
if m:
|
|
|
|
if not isinstance(d, list):
|
2020-03-31 03:00:09 +03:00
|
|
|
self.fail(f'path component "{component}" in "{path}" '
|
|
|
|
f'is not a list in "{d}"')
|
2012-02-29 17:25:21 +04:00
|
|
|
try:
|
|
|
|
d = d[idx]
|
|
|
|
except IndexError:
|
2020-03-31 03:00:09 +03:00
|
|
|
self.fail(f'invalid index "{idx}" in path "{path}" '
|
|
|
|
f'in "{d}"')
|
2012-02-29 17:25:21 +04:00
|
|
|
return d
|
|
|
|
|
2012-09-28 19:23:02 +04:00
|
|
|
def assert_qmp_absent(self, d, path):
|
|
|
|
try:
|
|
|
|
result = self.dictpath(d, path)
|
|
|
|
except AssertionError:
|
|
|
|
return
|
|
|
|
self.fail('path "%s" has value "%s"' % (path, str(result)))
|
|
|
|
|
2012-02-29 17:25:21 +04:00
|
|
|
def assert_qmp(self, d, path, value):
|
2019-05-15 23:15:01 +03:00
|
|
|
'''Assert that the value for a specific path in a QMP dict
|
|
|
|
matches. When given a list of values, assert that any of
|
|
|
|
them matches.'''
|
|
|
|
|
2012-02-29 17:25:21 +04:00
|
|
|
result = self.dictpath(d, path)
|
2019-05-15 23:15:01 +03:00
|
|
|
|
|
|
|
# [] makes no sense as a list of valid values, so treat it as
|
|
|
|
# an actual single value.
|
|
|
|
if isinstance(value, list) and value != []:
|
|
|
|
for v in value:
|
|
|
|
if result == v:
|
|
|
|
return
|
|
|
|
self.fail('no match for "%s" in %s' % (str(result), str(value)))
|
|
|
|
else:
|
|
|
|
self.assertEqual(result, value,
|
2019-10-26 13:12:21 +03:00
|
|
|
'"%s" is "%s", expected "%s"'
|
2020-03-31 03:00:01 +03:00
|
|
|
% (path, str(result), str(value)))
|
2012-02-29 17:25:21 +04:00
|
|
|
|
2013-05-28 19:11:34 +04:00
|
|
|
def assert_no_active_block_jobs(self):
|
|
|
|
result = self.vm.qmp('query-block-jobs')
|
|
|
|
self.assert_qmp(result, 'return', [])
|
|
|
|
|
2016-04-13 06:43:15 +03:00
|
|
|
def assert_has_block_node(self, node_name=None, file_name=None):
|
|
|
|
"""Issue a query-named-block-nodes and assert node_name and/or
|
|
|
|
file_name is present in the result"""
|
|
|
|
def check_equal_or_none(a, b):
|
2020-03-31 03:00:01 +03:00
|
|
|
return a is None or b is None or a == b
|
2016-04-13 06:43:15 +03:00
|
|
|
assert node_name or file_name
|
|
|
|
result = self.vm.qmp('query-named-block-nodes')
|
|
|
|
for x in result["return"]:
|
|
|
|
if check_equal_or_none(x.get("node-name"), node_name) and \
|
|
|
|
check_equal_or_none(x.get("file"), file_name):
|
|
|
|
return
|
2020-03-31 03:00:01 +03:00
|
|
|
self.fail("Cannot find %s %s in result:\n%s" %
|
|
|
|
(node_name, file_name, result))
|
2016-04-13 06:43:15 +03:00
|
|
|
|
2016-10-25 16:11:40 +03:00
|
|
|
def assert_json_filename_equal(self, json_filename, reference):
|
|
|
|
'''Asserts that the given filename is a json: filename and that its
|
|
|
|
content is equal to the given reference object'''
|
|
|
|
self.assertEqual(json_filename[:5], 'json:')
|
2020-03-31 03:00:09 +03:00
|
|
|
self.assertEqual(
|
|
|
|
self.vm.flatten_qmp_object(json.loads(json_filename[5:])),
|
|
|
|
self.vm.flatten_qmp_object(reference)
|
|
|
|
)
|
2016-10-25 16:11:40 +03:00
|
|
|
|
2020-03-31 03:00:09 +03:00
|
|
|
def cancel_and_wait(self, drive='drive0', force=False,
|
|
|
|
resume=False, wait=60.0):
|
2013-05-28 19:11:35 +04:00
|
|
|
'''Cancel a block job and wait for it to finish, returning the event'''
|
|
|
|
result = self.vm.qmp('block-job-cancel', device=drive, force=force)
|
|
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
|
2013-11-20 06:01:55 +04:00
|
|
|
if resume:
|
|
|
|
self.vm.resume_drive(drive)
|
|
|
|
|
2013-05-28 19:11:35 +04:00
|
|
|
cancelled = False
|
|
|
|
result = None
|
|
|
|
while not cancelled:
|
2019-05-23 20:06:40 +03:00
|
|
|
for event in self.vm.get_qmp_events(wait=wait):
|
2013-05-28 19:11:35 +04:00
|
|
|
if event['event'] == 'BLOCK_JOB_COMPLETED' or \
|
|
|
|
event['event'] == 'BLOCK_JOB_CANCELLED':
|
|
|
|
self.assert_qmp(event, 'data/device', drive)
|
|
|
|
result = event
|
|
|
|
cancelled = True
|
2018-04-30 20:09:46 +03:00
|
|
|
elif event['event'] == 'JOB_STATUS_CHANGE':
|
|
|
|
self.assert_qmp(event, 'data/id', drive)
|
|
|
|
|
2013-05-28 19:11:35 +04:00
|
|
|
|
|
|
|
self.assert_no_active_block_jobs()
|
|
|
|
return result
|
|
|
|
|
2020-03-31 03:00:09 +03:00
|
|
|
def wait_until_completed(self, drive='drive0', check_offset=True,
|
|
|
|
wait=60.0, error=None):
|
2013-06-24 19:13:19 +04:00
|
|
|
'''Wait for a block job to finish, returning the event'''
|
2018-04-08 06:05:42 +03:00
|
|
|
while True:
|
2019-05-23 20:06:40 +03:00
|
|
|
for event in self.vm.get_qmp_events(wait=wait):
|
2013-06-24 19:13:19 +04:00
|
|
|
if event['event'] == 'BLOCK_JOB_COMPLETED':
|
|
|
|
self.assert_qmp(event, 'data/device', drive)
|
2019-11-08 15:34:54 +03:00
|
|
|
if error is None:
|
|
|
|
self.assert_qmp_absent(event, 'data/error')
|
|
|
|
if check_offset:
|
|
|
|
self.assert_qmp(event, 'data/offset',
|
|
|
|
event['data']['len'])
|
|
|
|
else:
|
|
|
|
self.assert_qmp(event, 'data/error', error)
|
2018-04-08 06:05:42 +03:00
|
|
|
self.assert_no_active_block_jobs()
|
|
|
|
return event
|
2020-03-31 03:00:01 +03:00
|
|
|
if event['event'] == 'JOB_STATUS_CHANGE':
|
2018-04-30 20:09:46 +03:00
|
|
|
self.assert_qmp(event, 'data/id', drive)
|
2013-06-24 19:13:19 +04:00
|
|
|
|
2015-06-08 08:56:12 +03:00
|
|
|
def wait_ready(self, drive='drive0'):
|
2020-03-31 03:00:01 +03:00
|
|
|
"""Wait until a BLOCK_JOB_READY event, and return the event."""
|
2019-06-11 22:04:49 +03:00
|
|
|
return self.vm.events_wait([
|
|
|
|
('BLOCK_JOB_READY',
|
|
|
|
{'data': {'type': 'mirror', 'device': drive}}),
|
|
|
|
('BLOCK_JOB_READY',
|
|
|
|
{'data': {'type': 'commit', 'device': drive}})
|
|
|
|
])
|
2015-06-08 08:56:12 +03:00
|
|
|
|
|
|
|
def wait_ready_and_cancel(self, drive='drive0'):
|
|
|
|
self.wait_ready(drive=drive)
|
|
|
|
event = self.cancel_and_wait(drive=drive)
|
2018-11-20 20:12:21 +03:00
|
|
|
self.assertEqual(event['event'], 'BLOCK_JOB_COMPLETED')
|
2015-06-08 08:56:12 +03:00
|
|
|
self.assert_qmp(event, 'data/type', 'mirror')
|
|
|
|
self.assert_qmp(event, 'data/offset', event['data']['len'])
|
|
|
|
|
2019-11-08 15:34:54 +03:00
|
|
|
def complete_and_wait(self, drive='drive0', wait_ready=True,
|
|
|
|
completion_error=None):
|
2015-06-08 08:56:12 +03:00
|
|
|
'''Complete a block job and wait for it to finish'''
|
|
|
|
if wait_ready:
|
|
|
|
self.wait_ready(drive=drive)
|
|
|
|
|
|
|
|
result = self.vm.qmp('block-job-complete', device=drive)
|
|
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
|
2019-11-08 15:34:54 +03:00
|
|
|
event = self.wait_until_completed(drive=drive, error=completion_error)
|
2019-06-11 22:04:49 +03:00
|
|
|
self.assertTrue(event['data']['type'] in ['mirror', 'commit'])
|
2015-06-08 08:56:12 +03:00
|
|
|
|
2018-03-10 11:27:31 +03:00
|
|
|
def pause_wait(self, job_id='job0'):
|
2020-03-13 11:36:17 +03:00
|
|
|
with Timeout(3, "Timeout waiting for job to pause"):
|
2017-07-21 17:41:21 +03:00
|
|
|
while True:
|
|
|
|
result = self.vm.qmp('query-block-jobs')
|
2018-06-01 14:59:23 +03:00
|
|
|
found = False
|
2017-07-21 17:41:21 +03:00
|
|
|
for job in result['return']:
|
2018-06-01 14:59:23 +03:00
|
|
|
if job['device'] == job_id:
|
|
|
|
found = True
|
2020-03-31 03:00:01 +03:00
|
|
|
if job['paused'] and not job['busy']:
|
2018-06-01 14:59:23 +03:00
|
|
|
return job
|
|
|
|
break
|
|
|
|
assert found
|
2017-07-21 17:41:21 +03:00
|
|
|
|
2018-03-10 11:27:31 +03:00
|
|
|
def pause_job(self, job_id='job0', wait=True):
|
|
|
|
result = self.vm.qmp('block-job-pause', device=job_id)
|
|
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
if wait:
|
|
|
|
return self.pause_wait(job_id)
|
|
|
|
return result
|
|
|
|
|
2019-09-17 12:19:59 +03:00
|
|
|
def case_skip(self, reason):
|
|
|
|
'''Skip this test case'''
|
|
|
|
case_notrun(reason)
|
|
|
|
self.skipTest(reason)
|
|
|
|
|
2017-07-21 17:41:21 +03:00
|
|
|
|
2012-02-29 17:25:21 +04:00
|
|
|
def notrun(reason):
|
|
|
|
'''Skip this test suite'''
|
|
|
|
# Each test in qemu-iotests has a number ("seq")
|
|
|
|
seq = os.path.basename(sys.argv[0])
|
|
|
|
|
2021-08-24 18:35:39 +03:00
|
|
|
with open('%s/%s.notrun' % (output_dir, seq), 'w', encoding='utf-8') \
|
|
|
|
as outfile:
|
2021-07-20 20:33:20 +03:00
|
|
|
outfile.write(reason + '\n')
|
2020-03-31 03:00:14 +03:00
|
|
|
logger.warning("%s not run: %s", seq, reason)
|
2012-02-29 17:25:21 +04:00
|
|
|
sys.exit(0)
|
|
|
|
|
2019-03-07 16:33:59 +03:00
|
|
|
def case_notrun(reason):
|
2019-09-17 12:19:59 +03:00
|
|
|
'''Mark this test case as not having been run (without actually
|
|
|
|
skipping it, that is left to the caller). See
|
|
|
|
QMPTestCase.case_skip() for a variant that actually skips the
|
|
|
|
current test case.'''
|
|
|
|
|
2019-03-07 16:33:59 +03:00
|
|
|
# Each test in qemu-iotests has a number ("seq")
|
|
|
|
seq = os.path.basename(sys.argv[0])
|
|
|
|
|
2021-08-24 18:35:39 +03:00
|
|
|
with open('%s/%s.casenotrun' % (output_dir, seq), 'a', encoding='utf-8') \
|
|
|
|
as outfile:
|
2021-07-20 20:33:20 +03:00
|
|
|
outfile.write(' [case not run] ' + reason + '\n')
|
2019-03-07 16:33:59 +03:00
|
|
|
|
2020-03-31 03:00:13 +03:00
|
|
|
def _verify_image_format(supported_fmts: Sequence[str] = (),
|
|
|
|
unsupported_fmts: Sequence[str] = ()) -> None:
|
2018-04-09 14:44:17 +03:00
|
|
|
if 'generic' in supported_fmts and \
|
|
|
|
os.environ.get('IMGFMT_GENERIC', 'true') == 'true':
|
|
|
|
# similar to
|
|
|
|
# _supported_fmt generic
|
|
|
|
# for bash tests
|
2020-09-24 18:27:16 +03:00
|
|
|
supported_fmts = ()
|
2018-04-09 14:44:17 +03:00
|
|
|
|
|
|
|
not_sup = supported_fmts and (imgfmt not in supported_fmts)
|
|
|
|
if not_sup or (imgfmt in unsupported_fmts):
|
2017-09-01 13:54:34 +03:00
|
|
|
notrun('not suitable for this image format: %s' % imgfmt)
|
2012-02-29 17:25:21 +04:00
|
|
|
|
2020-06-25 15:55:35 +03:00
|
|
|
if imgfmt == 'luks':
|
|
|
|
verify_working_luks()
|
|
|
|
|
2020-03-31 03:00:13 +03:00
|
|
|
def _verify_protocol(supported: Sequence[str] = (),
|
|
|
|
unsupported: Sequence[str] = ()) -> None:
|
2018-05-29 22:44:47 +03:00
|
|
|
assert not (supported and unsupported)
|
|
|
|
|
|
|
|
if 'generic' in supported:
|
|
|
|
return
|
|
|
|
|
|
|
|
not_sup = supported and (imgproto not in supported)
|
|
|
|
if not_sup or (imgproto in unsupported):
|
|
|
|
notrun('not suitable for this protocol: %s' % imgproto)
|
|
|
|
|
2020-03-31 03:00:13 +03:00
|
|
|
def _verify_platform(supported: Sequence[str] = (),
|
|
|
|
unsupported: Sequence[str] = ()) -> None:
|
2020-03-31 03:00:11 +03:00
|
|
|
if any((sys.platform.startswith(x) for x in unsupported)):
|
|
|
|
notrun('not suitable for this OS: %s' % sys.platform)
|
2020-01-21 12:52:00 +03:00
|
|
|
|
2020-03-31 03:00:11 +03:00
|
|
|
if supported:
|
2020-01-21 12:52:00 +03:00
|
|
|
if not any((sys.platform.startswith(x) for x in supported)):
|
|
|
|
notrun('not suitable for this OS: %s' % sys.platform)
|
2015-01-04 04:53:52 +03:00
|
|
|
|
2020-03-31 03:00:13 +03:00
|
|
|
def _verify_cache_mode(supported_cache_modes: Sequence[str] = ()) -> None:
|
2018-03-13 22:34:01 +03:00
|
|
|
if supported_cache_modes and (cachemode not in supported_cache_modes):
|
|
|
|
notrun('not suitable for this cache mode: %s' % cachemode)
|
|
|
|
|
2020-05-11 19:35:28 +03:00
|
|
|
def _verify_aio_mode(supported_aio_modes: Sequence[str] = ()) -> None:
|
2020-01-20 17:18:57 +03:00
|
|
|
if supported_aio_modes and (aiomode not in supported_aio_modes):
|
|
|
|
notrun('not suitable for this aio mode: %s' % aiomode)
|
|
|
|
|
2020-10-21 17:58:49 +03:00
|
|
|
def _verify_formats(required_formats: Sequence[str] = ()) -> None:
|
|
|
|
usf_list = list(set(required_formats) - set(supported_formats()))
|
|
|
|
if usf_list:
|
|
|
|
notrun(f'formats {usf_list} are not whitelisted')
|
|
|
|
|
2021-01-25 21:50:55 +03:00
|
|
|
|
|
|
|
def _verify_virtio_blk() -> None:
|
|
|
|
out = qemu_pipe('-M', 'none', '-device', 'help')
|
|
|
|
if 'virtio-blk' not in out:
|
|
|
|
notrun('Missing virtio-blk in QEMU binary')
|
|
|
|
|
2021-03-23 19:53:07 +03:00
|
|
|
def _verify_virtio_scsi_pci_or_ccw() -> None:
|
|
|
|
out = qemu_pipe('-M', 'none', '-device', 'help')
|
|
|
|
if 'virtio-scsi-pci' not in out and 'virtio-scsi-ccw' not in out:
|
|
|
|
notrun('Missing virtio-scsi-pci or virtio-scsi-ccw in QEMU binary')
|
|
|
|
|
2021-01-25 21:50:55 +03:00
|
|
|
|
2016-10-28 10:08:17 +03:00
|
|
|
def supports_quorum():
|
|
|
|
return 'quorum' in qemu_img_pipe('--help')
|
|
|
|
|
2016-04-05 12:21:46 +03:00
|
|
|
def verify_quorum():
|
|
|
|
'''Skip test suite if quorum support is not available'''
|
2016-10-28 10:08:17 +03:00
|
|
|
if not supports_quorum():
|
2016-04-05 12:21:46 +03:00
|
|
|
notrun('quorum support missing')
|
|
|
|
|
2020-06-25 15:55:34 +03:00
|
|
|
def has_working_luks() -> Tuple[bool, str]:
|
|
|
|
"""
|
|
|
|
Check whether our LUKS driver can actually create images
|
|
|
|
(this extends to LUKS encryption for qcow2).
|
|
|
|
|
|
|
|
If not, return the reason why.
|
|
|
|
"""
|
|
|
|
|
|
|
|
img_file = f'{test_dir}/luks-test.luks'
|
|
|
|
(output, status) = \
|
|
|
|
qemu_img_pipe_and_status('create', '-f', 'luks',
|
|
|
|
'--object', luks_default_secret_object,
|
|
|
|
'-o', luks_default_key_secret_opt,
|
|
|
|
'-o', 'iter-time=10',
|
|
|
|
img_file, '1G')
|
|
|
|
try:
|
|
|
|
os.remove(img_file)
|
|
|
|
except OSError:
|
|
|
|
pass
|
|
|
|
|
|
|
|
if status != 0:
|
|
|
|
reason = output
|
|
|
|
for line in output.splitlines():
|
|
|
|
if img_file + ':' in line:
|
|
|
|
reason = line.split(img_file + ':', 1)[1].strip()
|
|
|
|
break
|
|
|
|
|
|
|
|
return (False, reason)
|
|
|
|
else:
|
|
|
|
return (True, '')
|
|
|
|
|
|
|
|
def verify_working_luks():
|
|
|
|
"""
|
|
|
|
Skip test suite if LUKS does not work
|
|
|
|
"""
|
|
|
|
(working, reason) = has_working_luks()
|
|
|
|
if not working:
|
|
|
|
notrun(reason)
|
|
|
|
|
2020-09-24 18:27:14 +03:00
|
|
|
def qemu_pipe(*args: str) -> str:
|
2020-03-31 03:00:09 +03:00
|
|
|
"""
|
|
|
|
Run qemu with an option to print something and exit (e.g. a help option).
|
|
|
|
|
|
|
|
:return: QEMU's stdout output.
|
|
|
|
"""
|
2020-09-24 18:27:14 +03:00
|
|
|
full_args = [qemu_prog] + qemu_opts + list(args)
|
|
|
|
output, _ = qemu_tool_pipe_and_status('qemu', full_args)
|
2020-06-30 11:37:11 +03:00
|
|
|
return output
|
2019-03-07 16:33:59 +03:00
|
|
|
|
|
|
|
def supported_formats(read_only=False):
|
|
|
|
'''Set 'read_only' to True to check ro-whitelist
|
|
|
|
Otherwise, rw-whitelist is checked'''
|
2019-09-17 12:20:04 +03:00
|
|
|
|
|
|
|
if not hasattr(supported_formats, "formats"):
|
|
|
|
supported_formats.formats = {}
|
|
|
|
|
|
|
|
if read_only not in supported_formats.formats:
|
|
|
|
format_message = qemu_pipe("-drive", "format=help")
|
|
|
|
line = 1 if read_only else 0
|
|
|
|
supported_formats.formats[read_only] = \
|
|
|
|
format_message.splitlines()[line].split(":")[1].split()
|
|
|
|
|
|
|
|
return supported_formats.formats[read_only]
|
2019-03-07 16:33:59 +03:00
|
|
|
|
2020-03-31 03:00:04 +03:00
|
|
|
def skip_if_unsupported(required_formats=(), read_only=False):
|
2019-03-07 16:33:59 +03:00
|
|
|
'''Skip Test Decorator
|
|
|
|
Runs the test if all the required formats are whitelisted'''
|
|
|
|
def skip_test_decorator(func):
|
2020-05-11 19:35:28 +03:00
|
|
|
def func_wrapper(test_case: QMPTestCase, *args: List[Any],
|
|
|
|
**kwargs: Dict[str, Any]) -> None:
|
2019-09-17 12:20:01 +03:00
|
|
|
if callable(required_formats):
|
|
|
|
fmts = required_formats(test_case)
|
|
|
|
else:
|
|
|
|
fmts = required_formats
|
|
|
|
|
|
|
|
usf_list = list(set(fmts) - set(supported_formats(read_only)))
|
2019-03-07 16:33:59 +03:00
|
|
|
if usf_list:
|
2020-03-31 03:00:09 +03:00
|
|
|
msg = f'{test_case}: formats {usf_list} are not whitelisted'
|
|
|
|
test_case.case_skip(msg)
|
2019-03-07 16:33:59 +03:00
|
|
|
else:
|
2020-05-11 19:35:28 +03:00
|
|
|
func(test_case, *args, **kwargs)
|
2019-03-07 16:33:59 +03:00
|
|
|
return func_wrapper
|
|
|
|
return skip_test_decorator
|
|
|
|
|
2020-06-17 13:48:18 +03:00
|
|
|
def skip_for_formats(formats: Sequence[str] = ()) \
|
|
|
|
-> Callable[[Callable[[QMPTestCase, List[Any], Dict[str, Any]], None]],
|
|
|
|
Callable[[QMPTestCase, List[Any], Dict[str, Any]], None]]:
|
|
|
|
'''Skip Test Decorator
|
|
|
|
Skips the test for the given formats'''
|
|
|
|
def skip_test_decorator(func):
|
|
|
|
def func_wrapper(test_case: QMPTestCase, *args: List[Any],
|
|
|
|
**kwargs: Dict[str, Any]) -> None:
|
|
|
|
if imgfmt in formats:
|
|
|
|
msg = f'{test_case}: Skipped for format {imgfmt}'
|
|
|
|
test_case.case_skip(msg)
|
|
|
|
else:
|
|
|
|
func(test_case, *args, **kwargs)
|
|
|
|
return func_wrapper
|
|
|
|
return skip_test_decorator
|
|
|
|
|
2019-10-18 14:46:42 +03:00
|
|
|
def skip_if_user_is_root(func):
|
|
|
|
'''Skip Test Decorator
|
|
|
|
Runs the test only without root permissions'''
|
|
|
|
def func_wrapper(*args, **kwargs):
|
|
|
|
if os.getuid() == 0:
|
|
|
|
case_notrun('{}: cannot be run as root'.format(args[0]))
|
2020-03-31 03:00:01 +03:00
|
|
|
return None
|
2019-10-18 14:46:42 +03:00
|
|
|
else:
|
|
|
|
return func(*args, **kwargs)
|
|
|
|
return func_wrapper
|
|
|
|
|
2021-05-03 14:01:06 +03:00
|
|
|
# We need to filter out the time taken from the output so that
|
|
|
|
# qemu-iotest can reliably diff the results against master output,
|
|
|
|
# and hide skipped tests from the reference output.
|
|
|
|
|
|
|
|
class ReproducibleTestResult(unittest.TextTestResult):
|
|
|
|
def addSkip(self, test, reason):
|
|
|
|
# Same as TextTestResult, but print dot instead of "s"
|
|
|
|
unittest.TestResult.addSkip(self, test, reason)
|
|
|
|
if self.showAll:
|
|
|
|
self.stream.writeln("skipped {0!r}".format(reason))
|
|
|
|
elif self.dots:
|
|
|
|
self.stream.write(".")
|
|
|
|
self.stream.flush()
|
|
|
|
|
|
|
|
class ReproducibleStreamWrapper:
|
|
|
|
def __init__(self, stream: TextIO):
|
|
|
|
self.stream = stream
|
|
|
|
|
|
|
|
def __getattr__(self, attr):
|
|
|
|
if attr in ('stream', '__getstate__'):
|
|
|
|
raise AttributeError(attr)
|
|
|
|
return getattr(self.stream, attr)
|
|
|
|
|
|
|
|
def write(self, arg=None):
|
|
|
|
arg = re.sub(r'Ran (\d+) tests? in [\d.]+s', r'Ran \1 tests', arg)
|
|
|
|
arg = re.sub(r' \(skipped=\d+\)', r'', arg)
|
|
|
|
self.stream.write(arg)
|
|
|
|
|
|
|
|
class ReproducibleTestRunner(unittest.TextTestRunner):
|
|
|
|
def __init__(self, stream: Optional[TextIO] = None,
|
|
|
|
resultclass: Type[unittest.TestResult] = ReproducibleTestResult,
|
|
|
|
**kwargs: Any) -> None:
|
|
|
|
rstream = ReproducibleStreamWrapper(stream or sys.stdout)
|
|
|
|
super().__init__(stream=rstream, # type: ignore
|
|
|
|
descriptions=True,
|
|
|
|
resultclass=resultclass,
|
|
|
|
**kwargs)
|
|
|
|
|
2021-05-03 14:01:07 +03:00
|
|
|
def execute_unittest(argv: List[str], debug: bool = False) -> None:
|
2020-03-31 03:00:11 +03:00
|
|
|
"""Executes unittests within the calling module."""
|
|
|
|
|
2021-05-03 14:01:07 +03:00
|
|
|
# Some tests have warnings, especially ResourceWarnings for unclosed
|
|
|
|
# files and sockets. Ignore them for now to ensure reproducibility of
|
|
|
|
# the test output.
|
|
|
|
unittest.main(argv=argv,
|
|
|
|
testRunner=ReproducibleTestRunner,
|
|
|
|
verbosity=2 if debug else 1,
|
|
|
|
warnings=None if sys.warnoptions else 'ignore')
|
2016-03-21 17:11:45 +03:00
|
|
|
|
2020-03-31 03:00:11 +03:00
|
|
|
def execute_setup_common(supported_fmts: Sequence[str] = (),
|
|
|
|
supported_platforms: Sequence[str] = (),
|
|
|
|
supported_cache_modes: Sequence[str] = (),
|
|
|
|
supported_aio_modes: Sequence[str] = (),
|
|
|
|
unsupported_fmts: Sequence[str] = (),
|
|
|
|
supported_protocols: Sequence[str] = (),
|
2020-10-21 17:58:49 +03:00
|
|
|
unsupported_protocols: Sequence[str] = (),
|
|
|
|
required_fmts: Sequence[str] = ()) -> bool:
|
2020-03-31 03:00:11 +03:00
|
|
|
"""
|
|
|
|
Perform necessary setup for either script-style or unittest-style tests.
|
|
|
|
|
|
|
|
:return: Bool; Whether or not debug mode has been requested via the CLI.
|
|
|
|
"""
|
|
|
|
# Note: Python 3.6 and pylint do not like 'Collection' so use 'Sequence'.
|
2016-08-23 17:44:03 +03:00
|
|
|
|
2020-05-14 23:16:12 +03:00
|
|
|
debug = '-d' in sys.argv
|
|
|
|
if debug:
|
|
|
|
sys.argv.remove('-d')
|
|
|
|
logging.basicConfig(level=(logging.DEBUG if debug else logging.WARN))
|
|
|
|
|
2020-03-31 03:00:13 +03:00
|
|
|
_verify_image_format(supported_fmts, unsupported_fmts)
|
|
|
|
_verify_protocol(supported_protocols, unsupported_protocols)
|
|
|
|
_verify_platform(supported=supported_platforms)
|
|
|
|
_verify_cache_mode(supported_cache_modes)
|
|
|
|
_verify_aio_mode(supported_aio_modes)
|
2020-10-21 17:58:49 +03:00
|
|
|
_verify_formats(required_fmts)
|
2021-01-25 21:50:55 +03:00
|
|
|
_verify_virtio_blk()
|
2016-03-21 17:11:45 +03:00
|
|
|
|
2020-03-31 03:00:11 +03:00
|
|
|
return debug
|
|
|
|
|
|
|
|
def execute_test(*args, test_function=None, **kwargs):
|
|
|
|
"""Run either unittest or script-style tests."""
|
|
|
|
|
|
|
|
debug = execute_setup_common(*args, **kwargs)
|
2019-07-29 23:35:53 +03:00
|
|
|
if not test_function:
|
2021-05-03 14:01:07 +03:00
|
|
|
execute_unittest(sys.argv, debug)
|
2019-07-29 23:35:53 +03:00
|
|
|
else:
|
|
|
|
test_function()
|
|
|
|
|
2020-03-31 03:00:14 +03:00
|
|
|
def activate_logging():
|
|
|
|
"""Activate iotests.log() output to stdout for script-style tests."""
|
|
|
|
handler = logging.StreamHandler(stream=sys.stdout)
|
|
|
|
formatter = logging.Formatter('%(message)s')
|
|
|
|
handler.setFormatter(formatter)
|
|
|
|
test_logger.addHandler(handler)
|
|
|
|
test_logger.setLevel(logging.INFO)
|
|
|
|
test_logger.propagate = False
|
|
|
|
|
2020-03-31 03:00:11 +03:00
|
|
|
# This is called from script-style iotests without a single point of entry
|
|
|
|
def script_initialize(*args, **kwargs):
|
|
|
|
"""Initialize script-style tests without running any tests."""
|
2020-03-31 03:00:14 +03:00
|
|
|
activate_logging()
|
2020-03-31 03:00:11 +03:00
|
|
|
execute_setup_common(*args, **kwargs)
|
|
|
|
|
|
|
|
# This is called from script-style iotests with a single point of entry
|
2019-07-29 23:35:53 +03:00
|
|
|
def script_main(test_function, *args, **kwargs):
|
|
|
|
"""Run script-style tests outside of the unittest framework"""
|
2020-03-31 03:00:14 +03:00
|
|
|
activate_logging()
|
2020-03-31 03:00:11 +03:00
|
|
|
execute_test(*args, test_function=test_function, **kwargs)
|
2012-02-29 17:25:21 +04:00
|
|
|
|
2020-03-31 03:00:11 +03:00
|
|
|
# This is called from unittest style iotests
|
2019-07-29 23:35:53 +03:00
|
|
|
def main(*args, **kwargs):
|
|
|
|
"""Run tests using the unittest framework"""
|
2020-03-31 03:00:11 +03:00
|
|
|
execute_test(*args, **kwargs)
|