qemu/tests/Makefile.include

220 lines
8.1 KiB
Makefile
Raw Normal View History

Makefile: Rename targets for make recursion We make a few sub-directories recursively, in particular $(TARGET_DIRS). For goal "all", we do it the nice way: "all" has a prerequisite subdir-T for each T in $(TARGET_DIRS), and T's recipe runs make recursively. Behaves nicely with -j and -k. For other goals such as "clean" and "install", the recipe runs make recursively in a for loop. Ignores -j and -k. The next commit will fix that for "clean" and "install". This commit prepares the ground by renaming the targets we use for "all" to include the goal for the sub-make. This will permit reusing them for goals other than "all". Targets subdir-T for T in $(TARGET_DIRS) run "make all" in T. Rename to T/all, and declare phony. Targets romsubdir-R for R in $(ROMS) run "make" in pc-bios/R. Default goal is "all" for all R. Rename to pc-bios/R/all, and declare phony. The remainder are renamed just for consistency. Target subdir-dtc runs "make libbft/libfdt.a" in dtc. Rename to dtc/all, and declare phony. Target subdir-capstone runs make $(BUILD_DIR)/capstone/$(LIBCAPSTONE) in $(SRC_PATH)/capstone. Rename to capstone/all, and declare phony. Target subdir-slirp runs "make" in $(SRC_PATH)/slirp. Default goal is all, which builds $(BUILD_DIR)/libslirp.a. Rename to slirp/all, and declare phony. Signed-off-by: Markus Armbruster <armbru@redhat.com> Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com> Message-Id: <20190528082308.22032-4-armbru@redhat.com> [Add compatibility gunk to keep make working across the rename]
2019-05-28 11:23:07 +03:00
# -*- Mode: makefile -*-
.PHONY: check-help
check-help:
@echo "Regression testing targets:"
@echo
@echo " $(MAKE) check Run block, qapi-schema, unit, softfloat, qtest and decodetree tests"
@echo
@echo " $(MAKE) check-qtest-TARGET Run qtest tests for given target"
@echo " $(MAKE) check-qtest Run qtest tests"
@echo " $(MAKE) check-unit Run qobject tests"
@echo " $(MAKE) check-speed Run qobject speed tests"
@echo " $(MAKE) check-qapi-schema Run QAPI schema tests"
@echo " $(MAKE) check-block Run block tests"
ifeq ($(CONFIG_TCG),y)
@echo " $(MAKE) check-tcg Run TCG tests"
@echo " $(MAKE) check-softfloat Run FPU emulation tests"
endif
@echo " $(MAKE) check-acceptance Run all acceptance (functional) tests"
@echo
@echo " $(MAKE) check-report.tap Generates an aggregated TAP test report"
@echo " $(MAKE) check-venv Creates a Python venv for tests"
@echo " $(MAKE) check-clean Clean the tests and related data"
@echo
@echo "The following are useful for CI builds"
@echo " $(MAKE) check-build Build most test binaris"
@echo " $(MAKE) get-vm-images Downloads all images used by acceptance tests, according to configured targets (~350 MB each, 1.5 GB max)"
@echo
@echo
@echo "The variable SPEED can be set to control the gtester speed setting."
@echo "Default options are -k and (for $(MAKE) V=1) --verbose; they can be"
@echo "changed with variable GTESTER_OPTIONS."
ifneq ($(wildcard config-host.mak),)
export SRC_PATH
# Get the list of all supported sysemu targets
SYSEMU_TARGET_LIST := $(subst -softmmu.mak,,$(notdir \
$(wildcard $(SRC_PATH)/default-configs/*-softmmu.mak)))
check-speed-$(CONFIG_BLOCK) += tests/benchmark-crypto-hash$(EXESUF)
check-speed-$(CONFIG_BLOCK) += tests/benchmark-crypto-hmac$(EXESUF)
check-speed-$(CONFIG_BLOCK) += tests/benchmark-crypto-cipher$(EXESUF)
QEMU_CFLAGS += -I$(SRC_PATH)/tests -I$(SRC_PATH)/tests/qtest
tests/benchmark-crypto-hash$(EXESUF): tests/benchmark-crypto-hash.o $(test-crypto-obj-y)
tests/benchmark-crypto-hmac$(EXESUF): tests/benchmark-crypto-hmac.o $(test-crypto-obj-y)
tests/benchmark-crypto-cipher$(EXESUF): tests/benchmark-crypto-cipher.o $(test-crypto-obj-y)
tests: introduce a framework for testing migration performance This introduces a moderately general purpose framework for testing performance of migration. The initial guest workload is provided by the included 'stress' program, which is configured to spawn one thread per guest CPU and run a maximally memory intensive workload. It will loop over GB of memory, xor'ing each byte with data from a 4k array of random bytes. This ensures heavy read and write load across all of guest memory to stress the migration performance. While running the 'stress' program will record how long it takes to xor each GB of memory and print this data for later reporting. The test engine will spawn a pair of QEMU processes, either on the same host, or with the target on a remote host via ssh, using the host kernel and a custom initrd built with 'stress' as the /init binary. Kernel command line args are set to ensure a fast kernel boot time (< 1 second) between launching QEMU and the stress program starting execution. None the less, the test engine will initially wait N seconds for the guest workload to stablize, before starting the migration operation. When migration is running, the engine will use pause, post-copy, autoconverge, xbzrle compression and multithread compression features, as well as downtime & bandwidth tuning to encourage completion. If migration completes, the test engine will wait N seconds again for the guest workooad to stablize on the target host. If migration does not complete after a preset number of iterations, it will be aborted. While the QEMU process is running on the source host, the test engine will sample the host CPU usage of QEMU as a whole, and each vCPU thread. While migration is running, it will record all the stats reported by 'query-migration'. Finally, it will capture the output of the stress program running in the guest. All the data produced from a single test execution is recorded in a structured JSON file. A separate program is then able to create interactive charts using the "plotly" python + javascript libraries, showing the characteristics of the migration. The data output provides visualization of the effect on guest vCPU workloads from the migration process, the corresponding vCPU utilization on the host, and the overall CPU hit from QEMU on the host. This is correlated from statistics from the migration process, such as downtime, vCPU throttling and iteration number. While the tests can be run individually with arbitrary parameters, there is also a facility for producing batch reports for a number of pre-defined scenarios / comparisons, in order to be able to get standardized results across different hardware configurations (eg TCP vs RDMA, or comparing different VCPU counts / memory sizes, etc). To use this, first you must build the initrd image $ make tests/migration/initrd-stress.img To run a a one-shot test with all default parameters $ ./tests/migration/guestperf.py > result.json This has many command line args for varying its behaviour. For example, to increase the RAM size and CPU count and bind it to specific host NUMA nodes $ ./tests/migration/guestperf.py \ --mem 4 --cpus 2 \ --src-mem-bind 0 --src-cpu-bind 0,1 \ --dst-mem-bind 1 --dst-cpu-bind 2,3 \ > result.json Using mem + cpu binding is strongly recommended on NUMA machines, otherwise the guest performance results will vary wildly between runs of the test due to lucky/unlucky NUMA placement, making sensible data analysis impossible. To make it run across separate hosts: $ ./tests/migration/guestperf.py \ --dst-host somehostname > result.json To request that post-copy is enabled, with switchover after 5 iterations $ ./tests/migration/guestperf.py \ --post-copy --post-copy-iters 5 > result.json Once a result.json file is created, a graph of the data can be generated, showing guest workload performance per thread and the migration iteration points: $ ./tests/migration/guestperf-plot.py --output result.html \ --migration-iters --split-guest-cpu result.json To further include host vCPU utilization and overall QEMU utilization $ ./tests/migration/guestperf-plot.py --output result.html \ --migration-iters --split-guest-cpu \ --qemu-cpu --vcpu-cpu result.json NB, the 'guestperf-plot.py' command requires that you have the plotly python library installed. eg you must do $ pip install --user plotly Viewing the result.html file requires that you have the plotly.min.js file in the same directory as the HTML output. This js file is installed as part of the plotly python library, so can be found in $HOME/.local/lib/python2.7/site-packages/plotly/offline/plotly.min.js The guestperf-plot.py program can accept multiple json files to plot, enabling results from different configurations to be compared. Finally, to run the entire standardized set of comparisons $ ./tests/migration/guestperf-batch.py \ --dst-host somehost \ --mem 4 --cpus 2 \ --src-mem-bind 0 --src-cpu-bind 0,1 \ --dst-mem-bind 1 --dst-cpu-bind 2,3 --output tcp-somehost-4gb-2cpu will store JSON files from all scenarios in the directory named tcp-somehost-4gb-2cpu Signed-off-by: Daniel P. Berrange <berrange@redhat.com> Message-Id: <1469020993-29426-7-git-send-email-berrange@redhat.com> Signed-off-by: Amit Shah <amit.shah@redhat.com>
2016-07-20 16:23:13 +03:00
tests/migration/stress$(EXESUF): tests/migration/stress.o
rules.mak: quiet-command: Split command name and args to print The quiet-command make rule currently takes two arguments: the command and arguments to run, and a string to print if the V flag is not set (ie we are not being verbose). By convention, the string printed is of the form " NAME some args". Unfortunately to get nicely lined up output all the strings have to agree about what column the arguments should start in, which means that if we add a new quiet-command usage which wants a slightly longer CMD name then we either put up with misalignment or change every quiet-command string. Split the quiet-mode string into two, the "NAME" and the "same args" part, and use printf(1) to format the string automatically. This means we only need to change one place if we want to support a longer maximum name. In particular, we can now print 7-character names lined up properly (they are needed for the OSX "SETTOOL" invocation). Change all the uses of quiet-command to the new syntax. (Any which are missed or inadvertently reintroduced via later merges will result in slightly misformatted quiet output rather than disaster.) A few places in the pc-bios/ makefiles are updated to use "BUILD", "SIGN" and "STRIP" rather than "Building", "Signing" and "Stripping" for consistency and to keep them below 7 characters. Module .mo links now print "LD" rather than the nonstandard "LD -r". Signed-off-by: Peter Maydell <peter.maydell@linaro.org> Reviewed-by: Eric Blake <eblake@redhat.com> Message-id: 1475598441-27908-1-git-send-email-peter.maydell@linaro.org
2016-10-04 19:27:21 +03:00
$(call quiet-command, $(LINKPROG) -static -O3 $(PTHREAD_LIB) -o $@ $< ,"LINK","$(TARGET_DIR)$@")
tests: introduce a framework for testing migration performance This introduces a moderately general purpose framework for testing performance of migration. The initial guest workload is provided by the included 'stress' program, which is configured to spawn one thread per guest CPU and run a maximally memory intensive workload. It will loop over GB of memory, xor'ing each byte with data from a 4k array of random bytes. This ensures heavy read and write load across all of guest memory to stress the migration performance. While running the 'stress' program will record how long it takes to xor each GB of memory and print this data for later reporting. The test engine will spawn a pair of QEMU processes, either on the same host, or with the target on a remote host via ssh, using the host kernel and a custom initrd built with 'stress' as the /init binary. Kernel command line args are set to ensure a fast kernel boot time (< 1 second) between launching QEMU and the stress program starting execution. None the less, the test engine will initially wait N seconds for the guest workload to stablize, before starting the migration operation. When migration is running, the engine will use pause, post-copy, autoconverge, xbzrle compression and multithread compression features, as well as downtime & bandwidth tuning to encourage completion. If migration completes, the test engine will wait N seconds again for the guest workooad to stablize on the target host. If migration does not complete after a preset number of iterations, it will be aborted. While the QEMU process is running on the source host, the test engine will sample the host CPU usage of QEMU as a whole, and each vCPU thread. While migration is running, it will record all the stats reported by 'query-migration'. Finally, it will capture the output of the stress program running in the guest. All the data produced from a single test execution is recorded in a structured JSON file. A separate program is then able to create interactive charts using the "plotly" python + javascript libraries, showing the characteristics of the migration. The data output provides visualization of the effect on guest vCPU workloads from the migration process, the corresponding vCPU utilization on the host, and the overall CPU hit from QEMU on the host. This is correlated from statistics from the migration process, such as downtime, vCPU throttling and iteration number. While the tests can be run individually with arbitrary parameters, there is also a facility for producing batch reports for a number of pre-defined scenarios / comparisons, in order to be able to get standardized results across different hardware configurations (eg TCP vs RDMA, or comparing different VCPU counts / memory sizes, etc). To use this, first you must build the initrd image $ make tests/migration/initrd-stress.img To run a a one-shot test with all default parameters $ ./tests/migration/guestperf.py > result.json This has many command line args for varying its behaviour. For example, to increase the RAM size and CPU count and bind it to specific host NUMA nodes $ ./tests/migration/guestperf.py \ --mem 4 --cpus 2 \ --src-mem-bind 0 --src-cpu-bind 0,1 \ --dst-mem-bind 1 --dst-cpu-bind 2,3 \ > result.json Using mem + cpu binding is strongly recommended on NUMA machines, otherwise the guest performance results will vary wildly between runs of the test due to lucky/unlucky NUMA placement, making sensible data analysis impossible. To make it run across separate hosts: $ ./tests/migration/guestperf.py \ --dst-host somehostname > result.json To request that post-copy is enabled, with switchover after 5 iterations $ ./tests/migration/guestperf.py \ --post-copy --post-copy-iters 5 > result.json Once a result.json file is created, a graph of the data can be generated, showing guest workload performance per thread and the migration iteration points: $ ./tests/migration/guestperf-plot.py --output result.html \ --migration-iters --split-guest-cpu result.json To further include host vCPU utilization and overall QEMU utilization $ ./tests/migration/guestperf-plot.py --output result.html \ --migration-iters --split-guest-cpu \ --qemu-cpu --vcpu-cpu result.json NB, the 'guestperf-plot.py' command requires that you have the plotly python library installed. eg you must do $ pip install --user plotly Viewing the result.html file requires that you have the plotly.min.js file in the same directory as the HTML output. This js file is installed as part of the plotly python library, so can be found in $HOME/.local/lib/python2.7/site-packages/plotly/offline/plotly.min.js The guestperf-plot.py program can accept multiple json files to plot, enabling results from different configurations to be compared. Finally, to run the entire standardized set of comparisons $ ./tests/migration/guestperf-batch.py \ --dst-host somehost \ --mem 4 --cpus 2 \ --src-mem-bind 0 --src-cpu-bind 0,1 \ --dst-mem-bind 1 --dst-cpu-bind 2,3 --output tcp-somehost-4gb-2cpu will store JSON files from all scenarios in the directory named tcp-somehost-4gb-2cpu Signed-off-by: Daniel P. Berrange <berrange@redhat.com> Message-Id: <1469020993-29426-7-git-send-email-berrange@redhat.com> Signed-off-by: Amit Shah <amit.shah@redhat.com>
2016-07-20 16:23:13 +03:00
INITRD_WORK_DIR=tests/migration/initrd
tests/migration/initrd-stress.img: tests/migration/stress$(EXESUF)
mkdir -p $(INITRD_WORK_DIR)
cp $< $(INITRD_WORK_DIR)/init
(cd $(INITRD_WORK_DIR) && (find | cpio --quiet -o -H newc | gzip -9)) > $@
rm $(INITRD_WORK_DIR)/init
rmdir $(INITRD_WORK_DIR)
SPEED = quick
# gtester tests, possibly with verbose output
# do_test_tap runs all tests, even if some of them fail, while do_test_human
# stops at the first failure unless -k is given on the command line
define do_test_human_k
$(quiet-@)rc=0; $(foreach COMMAND, $1, \
$(call quiet-command-run, \
export MALLOC_PERTURB_=$${MALLOC_PERTURB_:-$$(( $${RANDOM:-0} % 255 + 1))} $2; \
$(COMMAND) -m=$(SPEED) -k --tap < /dev/null \
| ./scripts/tap-driver.pl --test-name="$(notdir $(COMMAND))" $(if $(V),, --show-failures-only) \
|| rc=$$?;, "TEST", "$@: $(COMMAND)")) exit $$rc
endef
define do_test_human_no_k
$(foreach COMMAND, $1, \
$(call quiet-command, \
MALLOC_PERTURB_=$${MALLOC_PERTURB_:-$$(( $${RANDOM:-0} % 255 + 1))} $2 \
$(COMMAND) -m=$(SPEED) -k --tap < /dev/null \
| ./scripts/tap-driver.pl --test-name="$(notdir $(COMMAND))" $(if $(V),, --show-failures-only), \
"TEST", "$@: $(COMMAND)")
)
endef
do_test_human = \
$(if $(findstring k, $(MAKEFLAGS)), $(do_test_human_k), $(do_test_human_no_k))
define do_test_tap
$(call quiet-command, \
{ export MALLOC_PERTURB_=$${MALLOC_PERTURB_:-$$(( $${RANDOM:-0} % 255 + 1))} $2; \
$(foreach COMMAND, $1, \
$(COMMAND) -m=$(SPEED) -k --tap < /dev/null \
| sed "s/^\(not \)\?ok [0-9]* /&$(notdir $(COMMAND)) /" || true; ) } \
| ./scripts/tap-merge.pl | tee "$@" \
| ./scripts/tap-driver.pl $(if $(V),, --show-failures-only), \
"TAP","$@")
endef
check-speed: $(check-speed-y)
$(call do_test_human, $^)
# Per guest TCG tests
BUILD_TCG_TARGET_RULES=$(patsubst %,build-tcg-tests-%, $(TARGET_DIRS))
CLEAN_TCG_TARGET_RULES=$(patsubst %,clean-tcg-tests-%, $(TARGET_DIRS))
RUN_TCG_TARGET_RULES=$(patsubst %,run-tcg-tests-%, $(TARGET_DIRS))
# Probe for the Docker Builds needed for each build
$(foreach PROBE_TARGET,$(TARGET_DIRS), \
$(eval -include $(SRC_PATH)/tests/tcg/Makefile.prereqs))
build-tcg-tests-%: $(if $(CONFIG_PLUGIN),plugins)
$(call quiet-command,$(MAKE) $(SUBDIR_MAKEFLAGS) \
-f $(SRC_PATH)/tests/tcg/Makefile.qemu \
SRC_PATH=$(SRC_PATH) \
V="$(V)" TARGET="$*" guest-tests, \
"BUILD", "TCG tests for $*")
run-tcg-tests-%: build-tcg-tests-% all
$(call quiet-command,$(MAKE) $(SUBDIR_MAKEFLAGS) \
-f $(SRC_PATH)/tests/tcg/Makefile.qemu \
SRC_PATH=$(SRC_PATH) SPEED="$(SPEED)" \
V="$(V)" TARGET="$*" run-guest-tests, \
"RUN", "TCG tests for $*")
clean-tcg-tests-%:
$(call quiet-command,$(MAKE) $(SUBDIR_MAKEFLAGS) \
-f $(SRC_PATH)/tests/tcg/Makefile.qemu \
SRC_PATH=$(SRC_PATH) TARGET="$*" clean-guest-tests, \
"CLEAN", "TCG tests for $*")
.PHONY: build-tcg
build-tcg: $(BUILD_TCG_TARGET_RULES)
.PHONY: check-tcg
check-tcg: $(RUN_TCG_TARGET_RULES)
.PHONY: clean-tcg
clean-tcg: $(CLEAN_TCG_TARGET_RULES)
# Python venv for running tests
.PHONY: check-venv check-acceptance
TESTS_VENV_DIR=$(BUILD_DIR)/tests/venv
TESTS_VENV_REQ=$(SRC_PATH)/tests/requirements.txt
TESTS_RESULTS_DIR=$(BUILD_DIR)/tests/results
# Controls the output generated by Avocado when running tests.
# Any number of command separated loggers are accepted. For more
# information please refer to "avocado --help".
AVOCADO_SHOW=app
AVOCADO_TAGS=$(patsubst %-softmmu,-t arch:%, $(filter %-softmmu,$(TARGET_DIRS)))
$(TESTS_VENV_DIR): $(TESTS_VENV_REQ)
$(call quiet-command, \
$(PYTHON) -m venv --system-site-packages $@, \
VENV, $@)
$(call quiet-command, \
$(TESTS_VENV_DIR)/bin/python -m pip -q install -r $(TESTS_VENV_REQ), \
PIP, $(TESTS_VENV_REQ))
$(call quiet-command, touch $@)
$(TESTS_RESULTS_DIR):
$(call quiet-command, mkdir -p $@, \
MKDIR, $@)
check-venv: $(TESTS_VENV_DIR)
FEDORA_31_ARCHES_CANDIDATES=$(patsubst ppc64,ppc64le,$(TARGETS))
FEDORA_31_ARCHES := x86_64 aarch64 ppc64le s390x
FEDORA_31_DOWNLOAD=$(filter $(FEDORA_31_ARCHES),$(FEDORA_31_ARCHES_CANDIDATES))
# download one specific Fedora 31 image
get-vm-image-fedora-31-%: check-venv
$(call quiet-command, \
$(TESTS_VENV_DIR)/bin/python -m avocado vmimage get \
--distro=fedora --distro-version=31 --arch=$*, \
"AVOCADO", "Downloading acceptance tests VM image for $*")
# download all vm images, according to defined targets
get-vm-images: check-venv $(patsubst %,get-vm-image-fedora-31-%, $(FEDORA_31_DOWNLOAD))
check-acceptance: check-venv $(TESTS_RESULTS_DIR) get-vm-images
$(call quiet-command, \
$(TESTS_VENV_DIR)/bin/python -m avocado \
--show=$(AVOCADO_SHOW) run --job-results-dir=$(TESTS_RESULTS_DIR) \
--filter-by-tags-include-empty --filter-by-tags-include-empty-key \
$(AVOCADO_TAGS) \
$(if $(GITLAB_CI),,--failfast=on) tests/acceptance, \
"AVOCADO", "tests/acceptance")
# Consolidated targets
.PHONY: check-block check check-clean get-vm-images
check:
ifeq ($(CONFIG_TOOLS)$(CONFIG_POSIX),yy)
QEMU_IOTESTS_HELPERS-$(CONFIG_LINUX) = tests/qemu-iotests/socket_scm_helper$(EXESUF)
check: check-block
check-block: $(SRC_PATH)/tests/check-block.sh qemu-img$(EXESUF) \
qemu-io$(EXESUF) qemu-nbd$(EXESUF) $(QEMU_IOTESTS_HELPERS-y) \
$(patsubst %-softmmu,qemu-system-%,$(filter %-softmmu,$(TARGET_DIRS)))
@$<
endif
check-build: $(QEMU_IOTESTS_HELPERS-y)
check-clean:
rm -rf tests/*.o tests/*/*.o $(QEMU_IOTESTS_HELPERS-y)
rm -rf $(TESTS_VENV_DIR) $(TESTS_RESULTS_DIR)
clean: check-clean
# Build the help program automatically
-include $(wildcard tests/*.d)
endif