354b5c19b3
Add an explicit test to check expected memory values are read/written. 8,16,32 load/store are tested for all arch. 64,128 load/store are tested for aarch64/x64. atomic operations (8,16,32,64) are tested for x64 only. By default, atomic accesses are non atomic if a single cpu is running, so we force creation of a second one by creating a new thread first. load/store helpers code path can't be triggered easily in user mode (no softmmu), so we can't test it here. Output of test-plugin-mem-access.c is the list of expected patterns in plugin output. By reading stdout, we can compare to plugins output and have a multiarch test. Can be run with: make -C build/tests/tcg/$ARCH-linux-user run-plugin-test-plugin-mem-access-with-libmem.so Tested-by: Xingtao Yao <yaoxt.fnst@fujitsu.com> Signed-off-by: Pierrick Bouvier <pierrick.bouvier@linaro.org> Signed-off-by: Alex Bennée <alex.bennee@linaro.org> Message-Id: <20240910172033.1427812-7-pierrick.bouvier@linaro.org> Message-Id: <20240916085400.1046925-10-alex.bennee@linaro.org>
186 lines
5.6 KiB
Makefile
186 lines
5.6 KiB
Makefile
# -*- Mode: makefile -*-
|
|
#
|
|
# Multiarch Tests - included from tests/tcg/Makefile.target
|
|
#
|
|
# These tests are plain C and built without any architecture specific code.
|
|
#
|
|
|
|
MULTIARCH_SRC=$(SRC_PATH)/tests/tcg/multiarch
|
|
|
|
# Set search path for all sources
|
|
VPATH += $(MULTIARCH_SRC)
|
|
MULTIARCH_SRCS = $(notdir $(wildcard $(MULTIARCH_SRC)/*.c))
|
|
ifeq ($(filter %-linux-user, $(TARGET)),$(TARGET))
|
|
VPATH += $(MULTIARCH_SRC)/linux
|
|
MULTIARCH_SRCS += $(notdir $(wildcard $(MULTIARCH_SRC)/linux/*.c))
|
|
endif
|
|
MULTIARCH_TESTS = $(MULTIARCH_SRCS:.c=)
|
|
|
|
#
|
|
# The following are any additional rules needed to build things
|
|
#
|
|
|
|
|
|
float_%: LDFLAGS+=-lm
|
|
float_%: float_%.c libs/float_helpers.c
|
|
$(CC) $(CFLAGS) $(EXTRA_CFLAGS) $< $(MULTIARCH_SRC)/libs/float_helpers.c -o $@ $(LDFLAGS)
|
|
|
|
run-float_%: float_%
|
|
$(call run-test,$<, $(QEMU) $(QEMU_OPTS) $<)
|
|
$(call conditional-diff-out,$<,$(SRC_PATH)/tests/tcg/$(TARGET_NAME)/$<.ref)
|
|
|
|
|
|
testthread: LDFLAGS+=-lpthread
|
|
|
|
threadcount: LDFLAGS+=-lpthread
|
|
|
|
signals: LDFLAGS+=-lrt -lpthread
|
|
|
|
munmap-pthread: CFLAGS+=-pthread
|
|
munmap-pthread: LDFLAGS+=-pthread
|
|
|
|
vma-pthread: CFLAGS+=-pthread
|
|
vma-pthread: LDFLAGS+=-pthread
|
|
|
|
# The vma-pthread seems very sensitive on gitlab and we currently
|
|
# don't know if its exposing a real bug or the test is flaky.
|
|
ifneq ($(GITLAB_CI),)
|
|
run-vma-pthread: vma-pthread
|
|
$(call skip-test, $<, "flaky on CI?")
|
|
run-plugin-vma-pthread-with-%: vma-pthread
|
|
$(call skip-test, $<, "flaky on CI?")
|
|
endif
|
|
|
|
run-test-mmap: test-mmap
|
|
$(call run-test, test-mmap, $(QEMU) $<, $< (default))
|
|
|
|
ifneq ($(GDB),)
|
|
GDB_SCRIPT=$(SRC_PATH)/tests/guest-debug/run-test.py
|
|
|
|
run-gdbstub-sha1: sha1
|
|
$(call run-test, $@, $(GDB_SCRIPT) \
|
|
--gdb $(GDB) \
|
|
--qemu $(QEMU) --qargs "$(QEMU_OPTS)" \
|
|
--bin $< --test $(MULTIARCH_SRC)/gdbstub/sha1.py, \
|
|
basic gdbstub support)
|
|
|
|
run-gdbstub-qxfer-auxv-read: sha1
|
|
$(call run-test, $@, $(GDB_SCRIPT) \
|
|
--gdb $(GDB) \
|
|
--qemu $(QEMU) --qargs "$(QEMU_OPTS)" \
|
|
--bin $< --test $(MULTIARCH_SRC)/gdbstub/test-qxfer-auxv-read.py, \
|
|
basic gdbstub qXfer:auxv:read support)
|
|
|
|
run-gdbstub-qxfer-siginfo-read: segfault
|
|
$(call run-test, $@, $(GDB_SCRIPT) \
|
|
--gdb $(GDB) \
|
|
--qemu $(QEMU) --qargs "$(QEMU_OPTS)" \
|
|
--bin "$< -s" --test $(MULTIARCH_SRC)/gdbstub/test-qxfer-siginfo-read.py, \
|
|
basic gdbstub qXfer:siginfo:read support)
|
|
|
|
run-gdbstub-proc-mappings: sha1
|
|
$(call run-test, $@, $(GDB_SCRIPT) \
|
|
--gdb $(GDB) \
|
|
--qemu $(QEMU) --qargs "$(QEMU_OPTS)" \
|
|
--bin $< --test $(MULTIARCH_SRC)/gdbstub/test-proc-mappings.py, \
|
|
proc mappings support)
|
|
|
|
run-gdbstub-thread-breakpoint: testthread
|
|
$(call run-test, $@, $(GDB_SCRIPT) \
|
|
--gdb $(GDB) \
|
|
--qemu $(QEMU) --qargs "$(QEMU_OPTS)" \
|
|
--bin $< --test $(MULTIARCH_SRC)/gdbstub/test-thread-breakpoint.py, \
|
|
hitting a breakpoint on non-main thread)
|
|
|
|
run-gdbstub-registers: sha512
|
|
$(call run-test, $@, $(GDB_SCRIPT) \
|
|
--gdb $(GDB) \
|
|
--qemu $(QEMU) --qargs "$(QEMU_OPTS)" \
|
|
--bin $< --test $(MULTIARCH_SRC)/gdbstub/registers.py, \
|
|
checking register enumeration)
|
|
|
|
run-gdbstub-prot-none: prot-none
|
|
$(call run-test, $@, env PROT_NONE_PY=1 $(GDB_SCRIPT) \
|
|
--gdb $(GDB) \
|
|
--qemu $(QEMU) --qargs "$(QEMU_OPTS)" \
|
|
--bin $< --test $(MULTIARCH_SRC)/gdbstub/prot-none.py, \
|
|
accessing PROT_NONE memory)
|
|
|
|
run-gdbstub-catch-syscalls: catch-syscalls
|
|
$(call run-test, $@, $(GDB_SCRIPT) \
|
|
--gdb $(GDB) \
|
|
--qemu $(QEMU) --qargs "$(QEMU_OPTS)" \
|
|
--bin $< --test $(MULTIARCH_SRC)/gdbstub/catch-syscalls.py, \
|
|
hitting a syscall catchpoint)
|
|
|
|
run-gdbstub-follow-fork-mode-child: follow-fork-mode
|
|
$(call run-test, $@, $(GDB_SCRIPT) \
|
|
--gdb $(GDB) \
|
|
--qemu $(QEMU) --qargs "$(QEMU_OPTS)" \
|
|
--bin $< --test $(MULTIARCH_SRC)/gdbstub/follow-fork-mode-child.py, \
|
|
following children on fork)
|
|
|
|
run-gdbstub-follow-fork-mode-parent: follow-fork-mode
|
|
$(call run-test, $@, $(GDB_SCRIPT) \
|
|
--gdb $(GDB) \
|
|
--qemu $(QEMU) --qargs "$(QEMU_OPTS)" \
|
|
--bin $< --test $(MULTIARCH_SRC)/gdbstub/follow-fork-mode-parent.py, \
|
|
following parents on fork)
|
|
|
|
else
|
|
run-gdbstub-%:
|
|
$(call skip-test, "gdbstub test $*", "need working gdb with $(patsubst -%,,$(TARGET_NAME)) support")
|
|
endif
|
|
EXTRA_RUNS += run-gdbstub-sha1 run-gdbstub-qxfer-auxv-read \
|
|
run-gdbstub-proc-mappings run-gdbstub-thread-breakpoint \
|
|
run-gdbstub-registers run-gdbstub-prot-none \
|
|
run-gdbstub-catch-syscalls run-gdbstub-follow-fork-mode-child \
|
|
run-gdbstub-follow-fork-mode-parent \
|
|
run-gdbstub-qxfer-siginfo-read
|
|
|
|
# ARM Compatible Semi Hosting Tests
|
|
#
|
|
# Despite having ARM in the name we actually have several
|
|
# architectures that implement it. We gate the tests on the feature
|
|
# appearing in config.
|
|
#
|
|
ifeq ($(CONFIG_ARM_COMPATIBLE_SEMIHOSTING),y)
|
|
VPATH += $(MULTIARCH_SRC)/arm-compat-semi
|
|
|
|
# Add -I path back to TARGET_NAME for semicall.h
|
|
semihosting: CFLAGS+=-I$(SRC_PATH)/tests/tcg/$(TARGET_NAME)
|
|
|
|
run-semihosting: semihosting
|
|
$(call run-test,$<,$(QEMU) $< 2> $<.err)
|
|
|
|
run-plugin-semihosting-with-%:
|
|
$(call run-test, $@, $(QEMU) $(QEMU_OPTS) \
|
|
-plugin $(PLUGIN_LIB)/$(call extract-plugin,$@) \
|
|
$(call strip-plugin,$<) 2> $<.err, \
|
|
$< with $*)
|
|
|
|
semiconsole: CFLAGS+=-I$(SRC_PATH)/tests/tcg/$(TARGET_NAME)
|
|
|
|
run-semiconsole: semiconsole
|
|
$(call skip-test, $<, "MANUAL ONLY")
|
|
|
|
run-plugin-semiconsole-with-%:
|
|
$(call skip-test, $<, "MANUAL ONLY")
|
|
|
|
TESTS += semihosting semiconsole
|
|
endif
|
|
|
|
# Test plugin memory access instrumentation
|
|
run-plugin-test-plugin-mem-access-with-libmem.so: \
|
|
PLUGIN_ARGS=$(COMMA)print-accesses=true
|
|
run-plugin-test-plugin-mem-access-with-libmem.so: \
|
|
CHECK_PLUGIN_OUTPUT_COMMAND= \
|
|
$(SRC_PATH)/tests/tcg/multiarch/check-plugin-output.sh \
|
|
$(QEMU) $<
|
|
|
|
test-plugin-mem-access: CFLAGS+=-pthread -O0
|
|
test-plugin-mem-access: LDFLAGS+=-pthread -O0
|
|
|
|
# Update TESTS
|
|
TESTS += $(MULTIARCH_TESTS)
|