e287072b40
If there is a failing iotest, the output is currently not logged to the console anymore. To get this working again, we need to run the meson test runner with "--print-errorlogs" (and without "--verbose" due to a current meson bug that will be fixed here: https://github.com/mesonbuild/meson/commit/c3f145ca2b9f5.patch ). We could update the "meson test" call in tests/Makefile.include, but actually it's nicer and easier if we simply do not treat the iotests as separate test target anymore and integrate them along with the other test suites. This has the disadvantage of not getting the detailed progress indication there anymore, but since that was only working right in single-threaded "make -j1" mode anyway, it's not a huge loss right now. Signed-off-by: Thomas Huth <thuth@redhat.com> Message-Id: <20220310075048.2303495-1-thuth@redhat.com> Tested-by: Hanna Reitz <hreitz@redhat.com> Signed-off-by: Hanna Reitz <hreitz@redhat.com>
161 lines
5.7 KiB
Makefile
161 lines
5.7 KiB
Makefile
# -*- Mode: makefile -*-
|
|
|
|
.PHONY: check-help
|
|
check-help:
|
|
@echo "Regression testing targets:"
|
|
@echo " $(MAKE) check Run block, qapi-schema, unit, softfloat, qtest and decodetree tests"
|
|
@echo " $(MAKE) bench Run speed tests"
|
|
@echo
|
|
@echo "Individual test suites:"
|
|
@echo " $(MAKE) check-qtest-TARGET Run qtest tests for given target"
|
|
@echo " $(MAKE) check-qtest Run qtest tests"
|
|
@echo " $(MAKE) check-unit Run qobject tests"
|
|
@echo " $(MAKE) check-qapi-schema Run QAPI schema tests"
|
|
@echo " $(MAKE) check-block Run block tests"
|
|
ifneq ($(filter $(all-check-targets), check-softfloat),)
|
|
@echo " $(MAKE) check-tcg Run TCG tests"
|
|
@echo " $(MAKE) check-softfloat Run FPU emulation tests"
|
|
endif
|
|
@echo " $(MAKE) check-avocado Run avocado (integration) tests for currently configured targets"
|
|
@echo
|
|
@echo " $(MAKE) check-report.tap Generates an aggregated TAP test report"
|
|
@echo " $(MAKE) check-venv Creates a Python venv for tests"
|
|
@echo " $(MAKE) check-clean Clean the tests and related data"
|
|
@echo
|
|
@echo "The following are useful for CI builds"
|
|
@echo " $(MAKE) check-build Build most test binaries"
|
|
@echo " $(MAKE) get-vm-images Downloads all images used by avocado tests, according to configured targets (~350 MB each, 1.5 GB max)"
|
|
@echo
|
|
@echo
|
|
@echo "The variable SPEED can be set to control the gtester speed setting."
|
|
@echo "Default options are -k and (for $(MAKE) V=1) --verbose; they can be"
|
|
@echo "changed with variable GTESTER_OPTIONS."
|
|
|
|
ifneq ($(wildcard config-host.mak),)
|
|
export SRC_PATH
|
|
|
|
SPEED = quick
|
|
|
|
# Build up our target list from the filtered list of ninja targets
|
|
TARGETS=$(patsubst libqemu-%.fa, %, $(filter libqemu-%.fa, $(ninja-targets)))
|
|
|
|
# Per guest TCG tests
|
|
BUILD_TCG_TARGET_RULES=$(patsubst %,build-tcg-tests-%, $(TARGETS))
|
|
CLEAN_TCG_TARGET_RULES=$(patsubst %,clean-tcg-tests-%, $(TARGETS))
|
|
RUN_TCG_TARGET_RULES=$(patsubst %,run-tcg-tests-%, $(TARGETS))
|
|
|
|
# Probe for the Docker Builds needed for each build
|
|
$(foreach PROBE_TARGET,$(TARGET_DIRS), \
|
|
$(eval -include $(SRC_PATH)/tests/tcg/Makefile.prereqs))
|
|
|
|
$(BUILD_TCG_TARGET_RULES): build-tcg-tests-%: $(if $(CONFIG_PLUGIN),test-plugins)
|
|
$(call quiet-command,$(MAKE) $(SUBDIR_MAKEFLAGS) \
|
|
-f $(SRC_PATH)/tests/tcg/Makefile.qemu \
|
|
SRC_PATH=$(SRC_PATH) \
|
|
V="$(V)" TARGET="$*" guest-tests, \
|
|
"BUILD", "TCG tests for $*")
|
|
|
|
$(RUN_TCG_TARGET_RULES): run-tcg-tests-%: build-tcg-tests-% all
|
|
$(call quiet-command,$(MAKE) $(SUBDIR_MAKEFLAGS) \
|
|
-f $(SRC_PATH)/tests/tcg/Makefile.qemu \
|
|
SRC_PATH=$(SRC_PATH) SPEED="$(SPEED)" \
|
|
V="$(V)" TARGET="$*" run-guest-tests, \
|
|
"RUN", "TCG tests for $*")
|
|
|
|
$(CLEAN_TCG_TARGET_RULES): clean-tcg-tests-%:
|
|
$(call quiet-command,$(MAKE) $(SUBDIR_MAKEFLAGS) \
|
|
-f $(SRC_PATH)/tests/tcg/Makefile.qemu \
|
|
SRC_PATH=$(SRC_PATH) TARGET="$*" clean-guest-tests, \
|
|
"CLEAN", "TCG tests for $*")
|
|
|
|
.PHONY: build-tcg
|
|
build-tcg: $(BUILD_TCG_TARGET_RULES)
|
|
|
|
.PHONY: check-tcg
|
|
check-tcg: $(RUN_TCG_TARGET_RULES)
|
|
|
|
.PHONY: clean-tcg
|
|
clean-tcg: $(CLEAN_TCG_TARGET_RULES)
|
|
|
|
# Python venv for running tests
|
|
|
|
.PHONY: check-venv check-avocado check-acceptance check-acceptance-deprecated-warning
|
|
|
|
TESTS_VENV_DIR=$(BUILD_DIR)/tests/venv
|
|
TESTS_VENV_REQ=$(SRC_PATH)/tests/requirements.txt
|
|
TESTS_RESULTS_DIR=$(BUILD_DIR)/tests/results
|
|
ifndef AVOCADO_TESTS
|
|
AVOCADO_TESTS=tests/avocado
|
|
endif
|
|
# Controls the output generated by Avocado when running tests.
|
|
# Any number of command separated loggers are accepted. For more
|
|
# information please refer to "avocado --help".
|
|
AVOCADO_SHOW=app
|
|
ifndef AVOCADO_TAGS
|
|
AVOCADO_CMDLINE_TAGS=$(patsubst %-softmmu,-t arch:%, \
|
|
$(filter %-softmmu,$(TARGETS)))
|
|
else
|
|
AVOCADO_CMDLINE_TAGS=$(addprefix -t , $(AVOCADO_TAGS))
|
|
endif
|
|
|
|
$(TESTS_VENV_DIR): $(TESTS_VENV_REQ)
|
|
$(call quiet-command, \
|
|
$(PYTHON) -m venv $@, \
|
|
VENV, $@)
|
|
$(call quiet-command, \
|
|
$(TESTS_VENV_DIR)/bin/python -m pip -q install -r $(TESTS_VENV_REQ), \
|
|
PIP, $(TESTS_VENV_REQ))
|
|
$(call quiet-command, touch $@)
|
|
|
|
$(TESTS_RESULTS_DIR):
|
|
$(call quiet-command, mkdir -p $@, \
|
|
MKDIR, $@)
|
|
|
|
check-venv: $(TESTS_VENV_DIR)
|
|
|
|
FEDORA_31_ARCHES_TARGETS=$(patsubst %-softmmu,%, $(filter %-softmmu,$(TARGETS)))
|
|
FEDORA_31_ARCHES_CANDIDATES=$(patsubst ppc64,ppc64le,$(FEDORA_31_ARCHES_TARGETS))
|
|
FEDORA_31_ARCHES := x86_64 aarch64 ppc64le s390x
|
|
FEDORA_31_DOWNLOAD=$(filter $(FEDORA_31_ARCHES),$(FEDORA_31_ARCHES_CANDIDATES))
|
|
|
|
# download one specific Fedora 31 image
|
|
get-vm-image-fedora-31-%: check-venv
|
|
$(call quiet-command, \
|
|
$(TESTS_VENV_DIR)/bin/python -m avocado vmimage get \
|
|
--distro=fedora --distro-version=31 --arch=$*, \
|
|
"AVOCADO", "Downloading avocado tests VM image for $*")
|
|
|
|
# download all vm images, according to defined targets
|
|
get-vm-images: check-venv $(patsubst %,get-vm-image-fedora-31-%, $(FEDORA_31_DOWNLOAD))
|
|
|
|
check-avocado: check-venv $(TESTS_RESULTS_DIR) get-vm-images
|
|
$(call quiet-command, \
|
|
$(TESTS_VENV_DIR)/bin/python -m avocado \
|
|
--show=$(AVOCADO_SHOW) run --job-results-dir=$(TESTS_RESULTS_DIR) \
|
|
$(if $(AVOCADO_TAGS),, --filter-by-tags-include-empty \
|
|
--filter-by-tags-include-empty-key) \
|
|
$(AVOCADO_CMDLINE_TAGS) \
|
|
$(if $(GITLAB_CI),,--failfast) $(AVOCADO_TESTS), \
|
|
"AVOCADO", "tests/avocado")
|
|
|
|
check-acceptance-deprecated-warning:
|
|
@echo
|
|
@echo "Note '$(MAKE) check-acceptance' is deprecated, use '$(MAKE) check-avocado' instead."
|
|
@echo
|
|
|
|
check-acceptance: check-acceptance-deprecated-warning | check-avocado
|
|
|
|
# Consolidated targets
|
|
|
|
.PHONY: check check-clean get-vm-images
|
|
check:
|
|
|
|
check-build: run-ninja
|
|
|
|
check-clean:
|
|
rm -rf $(TESTS_VENV_DIR) $(TESTS_RESULTS_DIR)
|
|
|
|
clean: check-clean
|
|
|
|
endif
|