qemu/tests/tcg/aarch64/Makefile.target

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

133 lines
3.6 KiB
Makefile
Raw Normal View History

# -*- Mode: makefile -*-
#
# AArch64 specific tweaks
ARM_SRC=$(SRC_PATH)/tests/tcg/arm
VPATH += $(ARM_SRC)
AARCH64_SRC=$(SRC_PATH)/tests/tcg/aarch64
VPATH += $(AARCH64_SRC)
# Base architecture tests
AARCH64_TESTS=fcvt pcalign-a64
fcvt: LDFLAGS+=-lm
run-fcvt: fcvt
$(call run-test,$<,$(QEMU) $<, "$< on $(TARGET_NAME)")
$(call diff-out,$<,$(AARCH64_SRC)/fcvt.ref)
config-cc.mak: Makefile
$(quiet-@)( \
$(call cc-option,-march=armv8.1-a+sve, CROSS_CC_HAS_SVE); \
$(call cc-option,-march=armv8.1-a+sve2, CROSS_CC_HAS_SVE2); \
$(call cc-option,-march=armv8.2-a, CROSS_CC_HAS_ARMV8_2); \
$(call cc-option,-march=armv8.3-a, CROSS_CC_HAS_ARMV8_3); \
$(call cc-option,-march=armv8.5-a, CROSS_CC_HAS_ARMV8_5); \
$(call cc-option,-mbranch-protection=standard, CROSS_CC_HAS_ARMV8_BTI); \
target/arm: align exposed ID registers with Linux In CPUID registers exposed to userspace, some registers were missing and some fields were not exposed. This patch aligns exposed ID registers and their fields with what the upstream kernel currently exposes. Specifically, the following new ID registers/fields are exposed to userspace: ID_AA64PFR1_EL1.BT: bits 3-0 ID_AA64PFR1_EL1.MTE: bits 11-8 ID_AA64PFR1_EL1.SME: bits 27-24 ID_AA64ZFR0_EL1.SVEver: bits 3-0 ID_AA64ZFR0_EL1.AES: bits 7-4 ID_AA64ZFR0_EL1.BitPerm: bits 19-16 ID_AA64ZFR0_EL1.BF16: bits 23-20 ID_AA64ZFR0_EL1.SHA3: bits 35-32 ID_AA64ZFR0_EL1.SM4: bits 43-40 ID_AA64ZFR0_EL1.I8MM: bits 47-44 ID_AA64ZFR0_EL1.F32MM: bits 55-52 ID_AA64ZFR0_EL1.F64MM: bits 59-56 ID_AA64SMFR0_EL1.F32F32: bit 32 ID_AA64SMFR0_EL1.B16F32: bit 34 ID_AA64SMFR0_EL1.F16F32: bit 35 ID_AA64SMFR0_EL1.I8I32: bits 39-36 ID_AA64SMFR0_EL1.F64F64: bit 48 ID_AA64SMFR0_EL1.I16I64: bits 55-52 ID_AA64SMFR0_EL1.FA64: bit 63 ID_AA64MMFR0_EL1.ECV: bits 63-60 ID_AA64MMFR1_EL1.AFP: bits 47-44 ID_AA64MMFR2_EL1.AT: bits 35-32 ID_AA64ISAR0_EL1.RNDR: bits 63-60 ID_AA64ISAR1_EL1.FRINTTS: bits 35-32 ID_AA64ISAR1_EL1.BF16: bits 47-44 ID_AA64ISAR1_EL1.DGH: bits 51-48 ID_AA64ISAR1_EL1.I8MM: bits 55-52 ID_AA64ISAR2_EL1.WFxT: bits 3-0 ID_AA64ISAR2_EL1.RPRES: bits 7-4 ID_AA64ISAR2_EL1.GPA3: bits 11-8 ID_AA64ISAR2_EL1.APA3: bits 15-12 The code is also refactored to use symbolic names for ID register fields for better readability and maintainability. The test case in tests/tcg/aarch64/sysregs.c is also updated to match the intended behavior. Signed-off-by: Zhuojia Shen <chaosdefinition@hotmail.com> Message-id: DS7PR12MB6309FB585E10772928F14271ACE79@DS7PR12MB6309.namprd12.prod.outlook.com Reviewed-by: Peter Maydell <peter.maydell@linaro.org> [PMM: use Sn_n_Cn_Cn_n syntax to work with older assemblers that don't recognize id_aa64isar2_el1 and id_aa64mmfr2_el1] Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2022-12-17 04:01:26 +03:00
$(call cc-option,-march=armv8.5-a+memtag, CROSS_CC_HAS_ARMV8_MTE); \
$(call cc-option,-Wa$(COMMA)-march=armv9-a+sme, CROSS_AS_HAS_ARMV9_SME)) 3> config-cc.mak
-include config-cc.mak
ifneq ($(CROSS_CC_HAS_ARMV8_2),)
AARCH64_TESTS += dcpop
dcpop: CFLAGS += -march=armv8.2-a
endif
ifneq ($(CROSS_CC_HAS_ARMV8_5),)
AARCH64_TESTS += dcpodp
dcpodp: CFLAGS += -march=armv8.5-a
endif
# Pauth Tests
ifneq ($(CROSS_CC_HAS_ARMV8_3),)
AARCH64_TESTS += pauth-1 pauth-2 pauth-4 pauth-5
pauth-%: CFLAGS += -march=armv8.3-a
run-pauth-%: QEMU_OPTS += -cpu max
endif
# BTI Tests
# bti-1 tests the elf notes, so we require special compiler support.
ifneq ($(CROSS_CC_HAS_ARMV8_BTI),)
AARCH64_TESTS += bti-1 bti-3
bti-1 bti-3: CFLAGS += -mbranch-protection=standard
bti-1 bti-3: LDFLAGS += -nostdlib
endif
# bti-2 tests PROT_BTI, so no special compiler support required.
AARCH64_TESTS += bti-2
# MTE Tests
ifneq ($(CROSS_CC_HAS_ARMV8_MTE),)
AARCH64_TESTS += mte-1 mte-2 mte-3 mte-4 mte-5 mte-6 mte-7
mte-%: CFLAGS += -march=armv8.5-a+memtag
endif
# SME Tests
ifneq ($(CROSS_AS_HAS_ARMV9_SME),)
AARCH64_TESTS += sme-outprod1
endif
# System Registers Tests
AARCH64_TESTS += sysregs
ifneq ($(CROSS_CC_HAS_SVE),)
# SVE ioctl test
AARCH64_TESTS += sve-ioctls
sve-ioctls: CFLAGS+=-march=armv8.1-a+sve
AARCH64_TESTS += test-aes
test-aes: CFLAGS += -O -march=armv8-a+aes
test-aes: test-aes-main.c.inc
# Vector SHA1
sha1-vector: CFLAGS=-O3
sha1-vector: sha1.c
$(CC) $(CFLAGS) $(EXTRA_CFLAGS) $< -o $@ $(LDFLAGS)
run-sha1-vector: sha1-vector run-sha1
$(call run-test, $<, $(QEMU) $(QEMU_OPTS) $<)
$(call diff-out, sha1-vector, sha1.out)
TESTS += sha1-vector
# Vector versions of sha512 (-O3 triggers vectorisation)
sha512-vector: CFLAGS=-O3
sha512-vector: sha512.c
$(CC) $(CFLAGS) $(EXTRA_CFLAGS) $< -o $@ $(LDFLAGS)
TESTS += sha512-vector
plugins: force slow path when plugins instrument memory ops The lack of SVE memory instrumentation has been an omission in plugin handling since it was introduced. Fortunately we can utilise the probe_* functions to force all all memory access to follow the slow path. We do this by checking the access type and presence of plugin memory callbacks and if set return the TLB_MMIO flag. We have to jump through a few hoops in user mode to re-use the flag but it was the desired effect: ./qemu-system-aarch64 -display none -serial mon:stdio \ -M virt -cpu max -semihosting-config enable=on \ -kernel ./tests/tcg/aarch64-softmmu/memory-sve \ -plugin ./contrib/plugins/libexeclog.so,ifilter=st1w,afilter=0x40001808 -d plugin gives (disas doesn't currently understand st1w): 0, 0x40001808, 0xe54342a0, ".byte 0xa0, 0x42, 0x43, 0xe5", store, 0x40213010, RAM, store, 0x40213014, RAM, store, 0x40213018, RAM And for user-mode: ./qemu-aarch64 \ -plugin contrib/plugins/libexeclog.so,afilter=0x4007c0 \ -d plugin \ ./tests/tcg/aarch64-linux-user/sha512-sve gives: 1..10 ok 1 - do_test(&tests[i]) 0, 0x4007c0, 0xa4004b80, ".byte 0x80, 0x4b, 0x00, 0xa4", load, 0x5500800370, load, 0x5500800371, load, 0x5500800372, load, 0x5500800373, load, 0x5500800374, load, 0x5500800375, load, 0x5500800376, load, 0x5500800377, load, 0x5500800378, load, 0x5500800379, load, 0x550080037a, load, 0x550080037b, load, 0x550080037c, load, 0x550080037d, load, 0x550080037e, load, 0x550080037f, load, 0x5500800380, load, 0x5500800381, load, 0x5500800382, load, 0x5500800383, load, 0x5500800384, load, 0x5500800385, load, 0x5500800386, lo ad, 0x5500800387, load, 0x5500800388, load, 0x5500800389, load, 0x550080038a, load, 0x550080038b, load, 0x550080038c, load, 0x550080038d, load, 0x550080038e, load, 0x550080038f, load, 0x5500800390, load, 0x5500800391, load, 0x5500800392, load, 0x5500800393, load, 0x5500800394, load, 0x5500800395, load, 0x5500800396, load, 0x5500800397, load, 0x5500800398, load, 0x5500800399, load, 0x550080039a, load, 0x550080039b, load, 0x550080039c, load, 0x550080039d, load, 0x550080039e, load, 0x550080039f, load, 0x55008003a0, load, 0x55008003a1, load, 0x55008003a2, load, 0x55008003a3, load, 0x55008003a4, load, 0x55008003a5, load, 0x55008003a6, load, 0x55008003a7, load, 0x55008003a8, load, 0x55008003a9, load, 0x55008003aa, load, 0x55008003ab, load, 0x55008003ac, load, 0x55008003ad, load, 0x55008003ae, load, 0x55008003af (4007c0 is the ld1b in the sha512-sve) Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Cc: Robert Henry <robhenry@microsoft.com> Cc: Aaron Lindsay <aaron@os.amperecomputing.com> Signed-off-by: Alex Bennée <alex.bennee@linaro.org> Message-Id: <20230630180423.558337-20-alex.bennee@linaro.org>
2023-06-30 21:04:04 +03:00
ifneq ($(CROSS_CC_HAS_SVE),)
sha512-sve: CFLAGS=-O3 -march=armv8.1-a+sve
sha512-sve: sha512.c
$(CC) $(CFLAGS) $(EXTRA_CFLAGS) $< -o $@ $(LDFLAGS)
TESTS += sha512-sve
endif
ifeq ($(HOST_GDB_SUPPORTS_ARCH),y)
GDB_SCRIPT=$(SRC_PATH)/tests/guest-debug/run-test.py
run-gdbstub-sysregs: sysregs
$(call run-test, $@, $(GDB_SCRIPT) \
--gdb $(HAVE_GDB_BIN) \
--qemu $(QEMU) --qargs "$(QEMU_OPTS)" \
--bin $< --test $(AARCH64_SRC)/gdbstub/test-sve.py, \
basic gdbstub SVE support)
run-gdbstub-sve-ioctls: sve-ioctls
$(call run-test, $@, $(GDB_SCRIPT) \
--gdb $(HAVE_GDB_BIN) \
--qemu $(QEMU) --qargs "$(QEMU_OPTS)" \
--bin $< --test $(AARCH64_SRC)/gdbstub/test-sve-ioctl.py, \
basic gdbstub SVE ZLEN support)
EXTRA_RUNS += run-gdbstub-sysregs run-gdbstub-sve-ioctls
endif
endif
ifneq ($(CROSS_CC_HAS_SVE2),)
AARCH64_TESTS += test-826
test-826: CFLAGS+=-march=armv8.1-a+sve2
endif
TESTS += $(AARCH64_TESTS)