From 8491026a08b417b2d4070f7c373dcb43134c5312 Mon Sep 17 00:00:00 2001 From: Helge Deller Date: Tue, 5 Nov 2024 16:15:24 +0100 Subject: [PATCH 01/15] linux-user: Fix setreuid and setregid to use direct syscalls The commit fd6f7798ac30 ("linux-user: Use direct syscalls for setuid(), etc") added direct syscall wrappers for setuid(), setgid(), etc since the system calls have different semantics than the libc functions. Add and use the corresponding wrappers for setreuid and setregid which were missed in that commit. This fixes the build of the debian package of the uid_wrapper library (https://cwrap.org/uid_wrapper.html) when running linux-user. Cc: qemu-stable@nongnu.org Signed-off-by: Helge Deller Reviewed-by: Richard Henderson Reviewed-by: Ilya Leoshkevich Message-ID: Signed-off-by: Richard Henderson --- linux-user/syscall.c | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/linux-user/syscall.c b/linux-user/syscall.c index 59b2080b98..0279f23576 100644 --- a/linux-user/syscall.c +++ b/linux-user/syscall.c @@ -7233,12 +7233,24 @@ static inline int tswapid(int id) #else #define __NR_sys_setgroups __NR_setgroups #endif +#ifdef __NR_sys_setreuid32 +#define __NR_sys_setreuid __NR_setreuid32 +#else +#define __NR_sys_setreuid __NR_setreuid +#endif +#ifdef __NR_sys_setregid32 +#define __NR_sys_setregid __NR_setregid32 +#else +#define __NR_sys_setregid __NR_setregid +#endif _syscall1(int, sys_setuid, uid_t, uid) _syscall1(int, sys_setgid, gid_t, gid) _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid) _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid) _syscall2(int, sys_setgroups, int, size, gid_t *, grouplist) +_syscall2(int, sys_setreuid, uid_t, ruid, uid_t, euid); +_syscall2(int, sys_setregid, gid_t, rgid, gid_t, egid); void syscall_init(void) { @@ -11932,9 +11944,9 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1, return get_errno(high2lowgid(getegid())); #endif case TARGET_NR_setreuid: - return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2))); + return get_errno(sys_setreuid(low2highuid(arg1), low2highuid(arg2))); case TARGET_NR_setregid: - return get_errno(setregid(low2highgid(arg1), low2highgid(arg2))); + return get_errno(sys_setregid(low2highgid(arg1), low2highgid(arg2))); case TARGET_NR_getgroups: { /* the same code as for TARGET_NR_getgroups32 */ int gidsetsize = arg1; @@ -12264,11 +12276,11 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1, #endif #ifdef TARGET_NR_setreuid32 case TARGET_NR_setreuid32: - return get_errno(setreuid(arg1, arg2)); + return get_errno(sys_setreuid(arg1, arg2)); #endif #ifdef TARGET_NR_setregid32 case TARGET_NR_setregid32: - return get_errno(setregid(arg1, arg2)); + return get_errno(sys_setregid(arg1, arg2)); #endif #ifdef TARGET_NR_getgroups32 case TARGET_NR_getgroups32: From f27550804688da43c6e0d87b2f9e143adbf76271 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Tue, 12 Nov 2024 06:12:32 -0800 Subject: [PATCH 02/15] target/arm: Drop user-only special case in sve_stN_r This path is reachable with plugins enabled, and provoked with run-plugin-catch-syscalls-with-libinline.so. Cc: qemu-stable@nongnu.org Reviewed-by: Peter Maydell Signed-off-by: Richard Henderson Message-ID: <20241112141232.321354-1-richard.henderson@linaro.org> --- target/arm/tcg/sve_helper.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/target/arm/tcg/sve_helper.c b/target/arm/tcg/sve_helper.c index f1ee0e060f..904296705c 100644 --- a/target/arm/tcg/sve_helper.c +++ b/target/arm/tcg/sve_helper.c @@ -6317,9 +6317,6 @@ void sve_stN_r(CPUARMState *env, uint64_t *vg, target_ulong addr, flags = info.page[0].flags | info.page[1].flags; if (unlikely(flags != 0)) { -#ifdef CONFIG_USER_ONLY - g_assert_not_reached(); -#else /* * At least one page includes MMIO. * Any bus operation can fail with cpu_transaction_failed, @@ -6350,7 +6347,6 @@ void sve_stN_r(CPUARMState *env, uint64_t *vg, target_ulong addr, } while (reg_off & 63); } while (reg_off <= reg_last); return; -#endif } mem_off = info.mem_off_first[0]; From 2a339fee450638b512c5122281cb5ab49331cfb8 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Mon, 11 Nov 2024 06:45:40 -0800 Subject: [PATCH 03/15] accel/tcg: Fix user-only probe_access_internal plugin check MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The acc_flag check for write should have been against PAGE_WRITE_ORG, not PAGE_WRITE. But it is better to combine two acc_flag checks to a single check against access_type. This matches the system code in cputlb.c. Cc: qemu-stable@nongnu.org Resolves: https://gitlab.com/qemu-project/qemu/-/issues/2647 Signed-off-by: Richard Henderson Message-Id: 20241111145002.144995-1-richard.henderson@linaro.org Reviewed-by: Alex Bennée --- accel/tcg/user-exec.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c index aa8af52cc3..06016eb030 100644 --- a/accel/tcg/user-exec.c +++ b/accel/tcg/user-exec.c @@ -800,7 +800,7 @@ static int probe_access_internal(CPUArchState *env, vaddr addr, if (guest_addr_valid_untagged(addr)) { int page_flags = page_get_flags(addr); if (page_flags & acc_flag) { - if ((acc_flag == PAGE_READ || acc_flag == PAGE_WRITE) + if (access_type != MMU_INST_FETCH && cpu_plugin_mem_cbs_enabled(env_cpu(env))) { return TLB_MMIO; } From fb7f3572b111ffb6c2dd2c7f6c5b4dc57dd8a3f5 Mon Sep 17 00:00:00 2001 From: Ilya Leoshkevich Date: Wed, 23 Oct 2024 02:24:31 +0200 Subject: [PATCH 04/15] linux-user: Tolerate CONFIG_LSM_MMAP_MIN_ADDR Running qemu-i386 on a system running with SELinux in enforcing mode (more precisely: s390x trixie container on Fedora 40) fails with: qemu-i386: tests/tcg/i386-linux-user/sigreturn-sigmask: Unable to find a guest_base to satisfy all guest address mapping requirements 00000000-ffffffff The reason is that main() determines mmap_min_addr from /proc/sys/vm/mmap_min_addr, but SELinux additionally defines CONFIG_LSM_MMAP_MIN_ADDR, which is normally larger: 32K or 64K, but, in general, can be anything. There is no portable way to query its value: /boot/config, /proc/config and /proc/config.gz are distro- and environment-specific. Once the identity map fails, the magnitude of guest_base does not matter, so fix by starting the search from 1M or 1G. Cc: qemu-stable@nongnu.org Resolves: https://gitlab.com/qemu-project/qemu/-/issues/2598 Suggested-by: Richard Henderson Signed-off-by: Ilya Leoshkevich Message-ID: <20241023002558.34589-1-iii@linux.ibm.com> Signed-off-by: Richard Henderson --- linux-user/elfload.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/linux-user/elfload.c b/linux-user/elfload.c index 6cef8db3b5..d6ad77d27d 100644 --- a/linux-user/elfload.c +++ b/linux-user/elfload.c @@ -2898,7 +2898,7 @@ static uintptr_t pgb_try_itree(const PGBAddrs *ga, uintptr_t base, static uintptr_t pgb_find_itree(const PGBAddrs *ga, IntervalTreeRoot *root, uintptr_t align, uintptr_t brk) { - uintptr_t last = mmap_min_addr; + uintptr_t last = sizeof(uintptr_t) == 4 ? MiB : GiB; uintptr_t base, skip; while (true) { From ef7e76a2cdc116719ad9c67d4f44dee0016f923c Mon Sep 17 00:00:00 2001 From: Ilya Leoshkevich Date: Fri, 8 Nov 2024 15:50:04 +0100 Subject: [PATCH 05/15] tests/tcg: Test that sigreturn() does not corrupt the signal mask Add a small test to prevent regressions. Signed-off-by: Ilya Leoshkevich Message-ID: <20241108145237.37377-2-iii@linux.ibm.com> Signed-off-by: Richard Henderson --- tests/tcg/multiarch/Makefile.target | 3 ++ tests/tcg/multiarch/sigreturn-sigmask.c | 51 +++++++++++++++++++++++++ 2 files changed, 54 insertions(+) create mode 100644 tests/tcg/multiarch/sigreturn-sigmask.c diff --git a/tests/tcg/multiarch/Makefile.target b/tests/tcg/multiarch/Makefile.target index 78b83d5575..18d3cf4ae0 100644 --- a/tests/tcg/multiarch/Makefile.target +++ b/tests/tcg/multiarch/Makefile.target @@ -42,6 +42,9 @@ munmap-pthread: LDFLAGS+=-pthread vma-pthread: CFLAGS+=-pthread vma-pthread: LDFLAGS+=-pthread +sigreturn-sigmask: CFLAGS+=-pthread +sigreturn-sigmask: LDFLAGS+=-pthread + # The vma-pthread seems very sensitive on gitlab and we currently # don't know if its exposing a real bug or the test is flaky. ifneq ($(GITLAB_CI),) diff --git a/tests/tcg/multiarch/sigreturn-sigmask.c b/tests/tcg/multiarch/sigreturn-sigmask.c new file mode 100644 index 0000000000..e6cc904898 --- /dev/null +++ b/tests/tcg/multiarch/sigreturn-sigmask.c @@ -0,0 +1,51 @@ +/* + * Test that sigreturn() does not corrupt the signal mask. + * Block SIGUSR2 and handle SIGUSR1. + * Then sigwait() SIGUSR2, which relies on it remaining blocked. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#include +#include +#include +#include +#include + +int seen_sig = -1; + +static void signal_func(int sig) +{ + seen_sig = sig; +} + +static void *thread_func(void *arg) +{ + kill(getpid(), SIGUSR2); + return NULL; +} + +int main(void) +{ + struct sigaction act = { + .sa_handler = signal_func, + }; + pthread_t thread; + sigset_t set; + int sig; + + assert(sigaction(SIGUSR1, &act, NULL) == 0); + + assert(sigemptyset(&set) == 0); + assert(sigaddset(&set, SIGUSR2) == 0); + assert(sigprocmask(SIG_BLOCK, &set, NULL) == 0); + + kill(getpid(), SIGUSR1); + assert(seen_sig == SIGUSR1); + + assert(pthread_create(&thread, NULL, thread_func, NULL) == 0); + assert(sigwait(&set, &sig) == 0); + assert(sig == SIGUSR2); + assert(pthread_join(thread, NULL) == 0); + + return EXIT_SUCCESS; +} From 7ba055b49b74c4d2f4a338c5198485bdff373fb1 Mon Sep 17 00:00:00 2001 From: Pierrick Bouvier Date: Fri, 25 Oct 2024 10:58:56 -0700 Subject: [PATCH 06/15] target/i386: fix hang when using slow path for ptw_setl When instrumenting memory accesses for plugin, we force memory accesses to use the slow path for mmu [1]. This create a situation where we end up calling ptw_setl_slow. This was fixed recently in [2] but the issue still could appear out of plugins use case. Since this function gets called during a cpu_exec, start_exclusive then hangs. This exclusive section was introduced initially for security reasons [3]. I suspect this code path was never triggered, because ptw_setl_slow would always be called transitively from cpu_exec, resulting in a hang. [1] https://gitlab.com/qemu-project/qemu/-/commit/6d03226b42247b68ab2f0b3663e0f624335a4055 [2] https://gitlab.com/qemu-project/qemu/-/commit/115ade42d50144c15b74368d32dc734ea277d853 [3] https://gitlab.com/qemu-project/qemu/-/issues/279 Fixes: https://gitlab.com/qemu-project/qemu/-/issues/2566 Signed-off-by: Pierrick Bouvier Reviewed-by: Richard Henderson Message-ID: <20241025175857.2554252-2-pierrick.bouvier@linaro.org> Signed-off-by: Richard Henderson --- target/i386/tcg/sysemu/excp_helper.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/target/i386/tcg/sysemu/excp_helper.c b/target/i386/tcg/sysemu/excp_helper.c index 02d3486421..b1f40040f8 100644 --- a/target/i386/tcg/sysemu/excp_helper.c +++ b/target/i386/tcg/sysemu/excp_helper.c @@ -107,6 +107,10 @@ static bool ptw_setl_slow(const PTETranslate *in, uint32_t old, uint32_t new) { uint32_t cmp; + CPUState *cpu = env_cpu(in->env); + /* We are in cpu_exec, and start_exclusive can't be called directly.*/ + g_assert(cpu->running); + cpu_exec_end(cpu); /* Does x86 really perform a rmw cycle on mmio for ptw? */ start_exclusive(); cmp = cpu_ldl_mmuidx_ra(in->env, in->gaddr, in->ptw_idx, 0); @@ -114,6 +118,7 @@ static bool ptw_setl_slow(const PTETranslate *in, uint32_t old, uint32_t new) cpu_stl_mmuidx_ra(in->env, in->gaddr, new, in->ptw_idx, 0); } end_exclusive(); + cpu_exec_start(cpu); return cmp == old; } From 779f30a01af8566780cefc8639505b758950afb3 Mon Sep 17 00:00:00 2001 From: Pierrick Bouvier Date: Fri, 25 Oct 2024 10:58:57 -0700 Subject: [PATCH 07/15] cpu: ensure we don't call start_exclusive from cpu_exec MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reviewed-by: Richard Henderson Signed-off-by: Pierrick Bouvier Reviewed-by: Philippe Mathieu-Daudé Message-ID: <20241025175857.2554252-3-pierrick.bouvier@linaro.org> Signed-off-by: Richard Henderson --- cpu-common.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/cpu-common.c b/cpu-common.c index 6b262233a3..0d607bbe49 100644 --- a/cpu-common.c +++ b/cpu-common.c @@ -194,6 +194,9 @@ void start_exclusive(void) CPUState *other_cpu; int running_cpus; + /* Ensure we are not running, or start_exclusive will be blocked. */ + g_assert(!current_cpu->running); + if (current_cpu->exclusive_context_count) { current_cpu->exclusive_context_count++; return; From c81d1fafa6233448bcc2d8fcd2ba63a4ae834f3a Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Tue, 12 Nov 2024 11:32:01 -0800 Subject: [PATCH 08/15] linux-user: Honor elf alignment when placing images Most binaries don't actually depend on more than page alignment, but any binary can request it. Not honoring this was a bug. This became obvious when gdb reported Failed to read a valid object file image from memory when examining some vdso which are marked as needing more than page alignment. Signed-off-by: Richard Henderson --- linux-user/elfload.c | 35 ++++++++++++++++++++++++++++------- 1 file changed, 28 insertions(+), 7 deletions(-) diff --git a/linux-user/elfload.c b/linux-user/elfload.c index d6ad77d27d..90e79a01b4 100644 --- a/linux-user/elfload.c +++ b/linux-user/elfload.c @@ -3179,7 +3179,8 @@ static void load_elf_image(const char *image_name, const ImageSource *src, char **pinterp_name) { g_autofree struct elf_phdr *phdr = NULL; - abi_ulong load_addr, load_bias, loaddr, hiaddr, error; + abi_ulong load_addr, load_bias, loaddr, hiaddr, error, align; + size_t reserve_size, align_size; int i, prot_exec; Error *err = NULL; @@ -3263,6 +3264,9 @@ static void load_elf_image(const char *image_name, const ImageSource *src, load_addr = loaddr; + align = pow2ceil(info->alignment); + info->alignment = align; + if (pinterp_name != NULL) { if (ehdr->e_type == ET_EXEC) { /* @@ -3271,8 +3275,6 @@ static void load_elf_image(const char *image_name, const ImageSource *src, */ probe_guest_base(image_name, loaddr, hiaddr); } else { - abi_ulong align; - /* * The binary is dynamic, but we still need to * select guest_base. In this case we pass a size. @@ -3290,10 +3292,7 @@ static void load_elf_image(const char *image_name, const ImageSource *src, * Since we do not have complete control over the guest * address space, we prefer the kernel to choose some address * rather than force the use of LOAD_ADDR via MAP_FIXED. - * But without MAP_FIXED we cannot guarantee alignment, - * only suggest it. */ - align = pow2ceil(info->alignment); if (align) { load_addr &= -align; } @@ -3317,13 +3316,35 @@ static void load_elf_image(const char *image_name, const ImageSource *src, * In both cases, we will overwrite pages in this range with mappings * from the executable. */ - load_addr = target_mmap(load_addr, (size_t)hiaddr - loaddr + 1, PROT_NONE, + reserve_size = (size_t)hiaddr - loaddr + 1; + align_size = reserve_size; + + if (ehdr->e_type != ET_EXEC && align > qemu_real_host_page_size()) { + align_size += align - 1; + } + + load_addr = target_mmap(load_addr, align_size, PROT_NONE, MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | (ehdr->e_type == ET_EXEC ? MAP_FIXED_NOREPLACE : 0), -1, 0); if (load_addr == -1) { goto exit_mmap; } + + if (align_size != reserve_size) { + abi_ulong align_addr = ROUND_UP(load_addr, align); + abi_ulong align_end = align_addr + reserve_size; + abi_ulong load_end = load_addr + align_size; + + if (align_addr != load_addr) { + target_munmap(load_addr, align_addr - load_addr); + } + if (align_end != load_end) { + target_munmap(align_end, load_end - align_end); + } + load_addr = align_addr; + } + load_bias = load_addr - loaddr; if (elf_is_fdpic(ehdr)) { From dff406754efdb13f401e2b39f869541286903619 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Tue, 12 Nov 2024 11:33:38 -0800 Subject: [PATCH 09/15] linux-user: Drop image_info.alignment MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This field is write-only. Use only the function-local variable within load_elf_image. Reviewed-by: Philippe Mathieu-Daudé Signed-off-by: Richard Henderson --- linux-user/elfload.c | 7 +++---- linux-user/qemu.h | 1 - 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/linux-user/elfload.c b/linux-user/elfload.c index 90e79a01b4..ef9cffbe4a 100644 --- a/linux-user/elfload.c +++ b/linux-user/elfload.c @@ -3220,7 +3220,7 @@ static void load_elf_image(const char *image_name, const ImageSource *src, * amount of memory to handle that. Locate the interpreter, if any. */ loaddr = -1, hiaddr = 0; - info->alignment = 0; + align = 0; info->exec_stack = EXSTACK_DEFAULT; for (i = 0; i < ehdr->e_phnum; ++i) { struct elf_phdr *eppnt = phdr + i; @@ -3234,7 +3234,7 @@ static void load_elf_image(const char *image_name, const ImageSource *src, hiaddr = a; } ++info->nsegs; - info->alignment |= eppnt->p_align; + align |= eppnt->p_align; } else if (eppnt->p_type == PT_INTERP && pinterp_name) { g_autofree char *interp_name = NULL; @@ -3264,8 +3264,7 @@ static void load_elf_image(const char *image_name, const ImageSource *src, load_addr = loaddr; - align = pow2ceil(info->alignment); - info->alignment = align; + align = pow2ceil(align); if (pinterp_name != NULL) { if (ehdr->e_type == ET_EXEC) { diff --git a/linux-user/qemu.h b/linux-user/qemu.h index 895bdd722a..67bc81b149 100644 --- a/linux-user/qemu.h +++ b/linux-user/qemu.h @@ -44,7 +44,6 @@ struct image_info { abi_ulong file_string; uint32_t elf_flags; int personality; - abi_ulong alignment; bool exec_stack; /* Generic semihosting knows about these pointers. */ From f19ec28ddc383143573e828d34f190cdda7e9669 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Tue, 12 Nov 2024 11:50:39 -0800 Subject: [PATCH 10/15] linux-user/aarch64: Reduce vdso alignment to 4k MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reduce vdso alignment to minimum page size. Reviewed-by: Philippe Mathieu-Daudé Signed-off-by: Richard Henderson --- linux-user/aarch64/Makefile.vdso | 5 +++-- linux-user/aarch64/vdso-be.so | Bin 3224 -> 3224 bytes linux-user/aarch64/vdso-le.so | Bin 3224 -> 3224 bytes 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/linux-user/aarch64/Makefile.vdso b/linux-user/aarch64/Makefile.vdso index 599958116b..c33a679c0f 100644 --- a/linux-user/aarch64/Makefile.vdso +++ b/linux-user/aarch64/Makefile.vdso @@ -5,8 +5,9 @@ VPATH += $(SUBDIR) all: $(SUBDIR)/vdso-be.so $(SUBDIR)/vdso-le.so -LDFLAGS = -nostdlib -shared -Wl,-h,linux-vdso.so.1 -Wl,--build-id=sha1 \ - -Wl,--hash-style=both -Wl,-T,$(SUBDIR)/vdso.ld +LDFLAGS = -nostdlib -shared -Wl,-h,linux-vdso.so.1 \ + -Wl,--build-id=sha1 -Wl,--hash-style=both \ + -Wl,-z,max-page-size=4096 -Wl,-T,$(SUBDIR)/vdso.ld $(SUBDIR)/vdso-be.so: vdso.S vdso.ld $(CC) -o $@ $(LDFLAGS) -mbig-endian $< diff --git a/linux-user/aarch64/vdso-be.so b/linux-user/aarch64/vdso-be.so index 808206ade824b09d786f6cc34f7cddf80b63130e..d43c3b19cdf6588757f2039f2308a8bce21aed9c 100755 GIT binary patch delta 50 zcmV-20L}lH8JHQ6tpWfLk+0bl4vUoZ1rE*XtsXbvz!6Y1qE4ha#lpM HaR(;_22&J+ delta 49 zcmV-10M7rI8JHQ6sR952k*nPl^~i=KtbcHFtl42<9TUsw2PpfHvz!6Y1qD@cX=ik^ HaR(;_8o3m& From f7150b2151398c9274686d06c2c1e24618aa4cd6 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Tue, 12 Nov 2024 11:51:00 -0800 Subject: [PATCH 11/15] linux-user/arm: Reduce vdso alignment to 4k MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reduce vdso alignment to minimum page size. Reviewed-by: Philippe Mathieu-Daudé Signed-off-by: Richard Henderson --- linux-user/arm/Makefile.vdso | 2 +- linux-user/arm/vdso-be.so | Bin 2648 -> 2648 bytes linux-user/arm/vdso-le.so | Bin 2648 -> 2648 bytes 3 files changed, 1 insertion(+), 1 deletion(-) diff --git a/linux-user/arm/Makefile.vdso b/linux-user/arm/Makefile.vdso index 2d098a5748..8a24b0e534 100644 --- a/linux-user/arm/Makefile.vdso +++ b/linux-user/arm/Makefile.vdso @@ -6,7 +6,7 @@ VPATH += $(SUBDIR) all: $(SUBDIR)/vdso-be.so $(SUBDIR)/vdso-le.so # Adding -use-blx disables unneeded interworking without actually using blx. -LDFLAGS = -nostdlib -shared -Wl,-use-blx \ +LDFLAGS = -nostdlib -shared -Wl,-use-blx -Wl,-z,max-page-size=4096 \ -Wl,-h,linux-vdso.so.1 -Wl,--build-id=sha1 \ -Wl,--hash-style=both -Wl,-T,$(SUBDIR)/vdso.ld diff --git a/linux-user/arm/vdso-be.so b/linux-user/arm/vdso-be.so index 69cafbb956e283e2975bac59a10491c0cbafca57..bed02804a4bd367eb9fd8ca54d0c980103c02245 100755 GIT binary patch delta 49 zcmV-10M7r|6xbAyaRLAkk#lhrGQ`BrU>NUTo0WUr&~YvSTwestvG{WZ25D?qS41(h HaR%oD3DgvN delta 49 zcmV-10M7r|6xbAyaRLDVk#lhrweyTc_Z*p@&&2@VLR1?$m|vtIvG{WZ23l}Oc}8xt HaR%oD4;B=< diff --git a/linux-user/arm/vdso-le.so b/linux-user/arm/vdso-le.so index ad05a1251875ac0c76685e1f9190a7307a8444d1..38d3d51047372391b3125c3f9f6ea5401f04bba1 100755 GIT binary patch delta 49 zcmV-10M7r|6xbAyaRLwkk#lhr8{q*880U1i=t7z4dQ|b*DDU}VvG{WZ25CxDZDwh+ HaR%oD5d#(3 delta 49 zcmV-10M7r|6xbAyaRLAVk#lhrkUMV7Jk`NO!6O#urC$6IB@6+uvG{WZ26R_8MpaO= HaR%oD?$r}} From 399c8082ca0d410a84b8367a8569cee48f1440dd Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Tue, 12 Nov 2024 11:51:22 -0800 Subject: [PATCH 12/15] linux-user/loongarch64: Reduce vdso alignment to 4k MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reduce vdso alignment to minimum page size. Reviewed-by: Philippe Mathieu-Daudé Signed-off-by: Richard Henderson --- linux-user/loongarch64/Makefile.vdso | 3 ++- linux-user/loongarch64/vdso.so | Bin 3560 -> 3560 bytes 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/linux-user/loongarch64/Makefile.vdso b/linux-user/loongarch64/Makefile.vdso index 369de13344..1d760b1e47 100644 --- a/linux-user/loongarch64/Makefile.vdso +++ b/linux-user/loongarch64/Makefile.vdso @@ -8,4 +8,5 @@ all: $(SUBDIR)/vdso.so $(SUBDIR)/vdso.so: vdso.S vdso.ld vdso-asmoffset.h $(CC) -o $@ -nostdlib -shared -fpic -Wl,-h,linux-vdso.so.1 \ -Wl,--build-id=sha1 -Wl,--hash-style=both \ - -Wl,--no-warn-rwx-segments -Wl,-T,$(SUBDIR)/vdso.ld $< + -Wl,--no-warn-rwx-segments -Wl,-z,max-page-size=4096 \ + -Wl,-T,$(SUBDIR)/vdso.ld $< diff --git a/linux-user/loongarch64/vdso.so b/linux-user/loongarch64/vdso.so index bfaa26f2bfe1aaa01d9a349b8b030ef6323e1f8e..7c2de6c50e706164225e82f652d4becc04c71ff0 100755 GIT binary patch delta 37 tcmaDM{X%-eN=AW+tM-YA3hb&jk8@2 Date: Tue, 12 Nov 2024 11:51:36 -0800 Subject: [PATCH 13/15] linux-user/ppc: Reduce vdso alignment to 4k MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reduce vdso alignment to minimum page size. Reviewed-by: Philippe Mathieu-Daudé Signed-off-by: Richard Henderson --- linux-user/ppc/Makefile.vdso | 6 ++++-- linux-user/ppc/vdso-32.so | Bin 3020 -> 3020 bytes linux-user/ppc/vdso-64.so | Bin 3896 -> 3896 bytes linux-user/ppc/vdso-64le.so | Bin 3896 -> 3896 bytes 4 files changed, 4 insertions(+), 2 deletions(-) diff --git a/linux-user/ppc/Makefile.vdso b/linux-user/ppc/Makefile.vdso index 3ca3c6b83e..e2b8facbb5 100644 --- a/linux-user/ppc/Makefile.vdso +++ b/linux-user/ppc/Makefile.vdso @@ -6,9 +6,11 @@ VPATH += $(SUBDIR) all: $(SUBDIR)/vdso-32.so $(SUBDIR)/vdso-64.so $(SUBDIR)/vdso-64le.so LDFLAGS32 = -nostdlib -shared -Wl,-T,$(SUBDIR)/vdso-32.ld \ - -Wl,-h,linux-vdso32.so.1 -Wl,--hash-style=both -Wl,--build-id=sha1 + -Wl,-h,linux-vdso32.so.1 -Wl,--hash-style=both \ + -Wl,--build-id=sha1 -Wl,-z,max-page-size=4096 LDFLAGS64 = -nostdlib -shared -Wl,-T,$(SUBDIR)/vdso-64.ld \ - -Wl,-h,linux-vdso64.so.1 -Wl,--hash-style=both -Wl,--build-id=sha1 + -Wl,-h,linux-vdso64.so.1 -Wl,--hash-style=both \ + -Wl,--build-id=sha1 -Wl,-z,max-page-size=4096 $(SUBDIR)/vdso-32.so: vdso.S vdso-32.ld vdso-asmoffset.h $(CC) -o $@ $(LDFLAGS32) -m32 $< diff --git a/linux-user/ppc/vdso-32.so b/linux-user/ppc/vdso-32.so index b19baafb0d38e15b4a24def5c44a6d684714be45..0dc55e0dddff618b954dbb939335e99956daf64a 100755 GIT binary patch delta 42 zcmV+_0M-A@7t9xsCINtvCrSbU5Rr3n6lq1YQP%3b&XBjV4sl%JXqmbIL$UbO3Tx34 A(f|Me delta 42 xcmX>jenxzP8Y9C*buT6$SzIX6c=(tjbAN``v6&a|RP{JtzgDQRW#f-4TmVmR5>WsE diff --git a/linux-user/ppc/vdso-64.so b/linux-user/ppc/vdso-64.so index 913c831b3819fc09912b9b31f7fbe9ee311ae12f..ac1ab2582e4675979ffca3ce90dce17df579ab2a 100755 GIT binary patch delta 38 wcmV+>0NMYz9=INmtpWfLk*~QFqejR%tq=sGFts+qF9Cf{%e>^#vwQ)(4KYR#WB>pF delta 38 wcmV+>0NMYz9=INmtpWi6k*~QF<@TF=qRS8+wHM`Qf0n_&>m=ZivwQ)(4PLhrs{jB1 diff --git a/linux-user/ppc/vdso-64le.so b/linux-user/ppc/vdso-64le.so index 258a03b807c4eca23547d978c16d1ad5ebd08bc5..424abb4290b7d3100e9dede2f3059483608ba703 100755 GIT binary patch delta 38 wcmV+>0NMYz9=INmsR9rHk*mEFj|Vri9^_Z(nV0Nw;)4VN>*aZovwQ)(4Mvj@kpKVy delta 38 wcmV+>0NMYz9=INmsR952k*mEF`$9%&`;)~jB!C2C?=itIoQ(CfvwQ)(4MbHDMgRZ+ From 95c9e2209cc09453cfd49e91321df254ccbf466f Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Wed, 13 Nov 2024 08:59:54 -0800 Subject: [PATCH 14/15] linux-user/arm: Select vdso for be8 and be32 modes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In be8 mode, instructions are little-endian. In be32 mode, instructions are big-endian. Resolves: https://gitlab.com/qemu-project/qemu/-/issues/2333 Reviewed-by: Philippe Mathieu-Daudé Signed-off-by: Richard Henderson --- linux-user/arm/Makefile.vdso | 9 ++++-- linux-user/arm/meson.build | 13 +++++++-- linux-user/arm/vdso-be32.so | Bin 0 -> 2648 bytes linux-user/arm/{vdso-be.so => vdso-be8.so} | Bin 2648 -> 2648 bytes linux-user/elfload.c | 31 +++++++++++++++++---- 5 files changed, 41 insertions(+), 12 deletions(-) create mode 100755 linux-user/arm/vdso-be32.so rename linux-user/arm/{vdso-be.so => vdso-be8.so} (95%) diff --git a/linux-user/arm/Makefile.vdso b/linux-user/arm/Makefile.vdso index 8a24b0e534..ede489e236 100644 --- a/linux-user/arm/Makefile.vdso +++ b/linux-user/arm/Makefile.vdso @@ -3,15 +3,18 @@ include $(BUILD_DIR)/tests/tcg/arm-linux-user/config-target.mak SUBDIR = $(SRC_PATH)/linux-user/arm VPATH += $(SUBDIR) -all: $(SUBDIR)/vdso-be.so $(SUBDIR)/vdso-le.so +all: $(SUBDIR)/vdso-be8.so $(SUBDIR)/vdso-be32.so $(SUBDIR)/vdso-le.so # Adding -use-blx disables unneeded interworking without actually using blx. LDFLAGS = -nostdlib -shared -Wl,-use-blx -Wl,-z,max-page-size=4096 \ -Wl,-h,linux-vdso.so.1 -Wl,--build-id=sha1 \ -Wl,--hash-style=both -Wl,-T,$(SUBDIR)/vdso.ld -$(SUBDIR)/vdso-be.so: vdso.S vdso.ld vdso-asmoffset.h - $(CC) -o $@ $(LDFLAGS) -mbig-endian $< +$(SUBDIR)/vdso-be8.so: vdso.S vdso.ld vdso-asmoffset.h + $(CC) -o $@ $(LDFLAGS) -mbig-endian -mbe8 $< + +$(SUBDIR)/vdso-be32.so: vdso.S vdso.ld vdso-asmoffset.h + $(CC) -o $@ $(LDFLAGS) -mbig-endian -mbe32 $< $(SUBDIR)/vdso-le.so: vdso.S vdso.ld vdso-asmoffset.h $(CC) -o $@ $(LDFLAGS) -mlittle-endian $< diff --git a/linux-user/arm/meson.build b/linux-user/arm/meson.build index c4bb9af5b8..348ffb810d 100644 --- a/linux-user/arm/meson.build +++ b/linux-user/arm/meson.build @@ -10,10 +10,17 @@ syscall_nr_generators += { # is always true as far as source_set.apply() is concerned. Always build # both header files and include the right one via #if. -vdso_be_inc = gen_vdso.process('vdso-be.so', - extra_args: ['-s', 'sigreturn_codes']) +vdso_be8_inc = gen_vdso.process('vdso-be8.so', + extra_args: ['-s', 'sigreturn_codes', + '-p', 'vdso_be8']) + +vdso_be32_inc = gen_vdso.process('vdso-be32.so', + extra_args: ['-s', 'sigreturn_codes', + '-p', 'vdso_be32']) vdso_le_inc = gen_vdso.process('vdso-le.so', extra_args: ['-s', 'sigreturn_codes']) -linux_user_ss.add(when: 'TARGET_ARM', if_true: [vdso_be_inc, vdso_le_inc]) +linux_user_ss.add(when: 'TARGET_ARM', if_true: [ + vdso_be8_inc, vdso_be32_inc, vdso_le_inc +]) diff --git a/linux-user/arm/vdso-be32.so b/linux-user/arm/vdso-be32.so new file mode 100755 index 0000000000000000000000000000000000000000..b896d3d545ebf91942038831a9535b023137a86b GIT binary patch literal 2648 zcmbtVO-x)>6h3eM80atz#?V$w#FU7s?MrPY6@OwJ7zS#9;uIpr#K$o6U?zom&CCPA z_(K)jYPEI2#x#u>*BWAK(oG}X7}JGu(U>kYb%7gWj2bmA)bac7dzTK##(3a+=YHp$ zd+&Sqo^$4vzQK$UFl1ALazDn}kQ8ZH3rj#GDIF4quT>K8M*#RXv5GMf@}cL0QbMa9 zq(RhlE+~K0VF_bG%`dp$jfe!`*N`-Pg!23P2DC;e#zejUPViv=@REPVMjCuphrI44sC?HyR7jO@IUJ0uoNG_9y)7(0_pk zpnrn8jPd`K6kebEMDGxZK_3LSp__c(2L5av#~Fv7s^g4R(keB6AJ11y5Ch&BHHGIy z!MCcr=6z^k4DXK!hxdkb4CjQ`+OJFT(1mRCl`nhCy&vqo@3Z~q#IoiKRmaNDIQbLS zwC%d3S$jw0k>tP36}zf!{!C}8kXw|QQh9FSAlEhV|DX&W9UdFE4x2~t7C7+DKLhWH zzvdr5eIIt-`P~5TRezZg{AliXF#0I!oQ(G1qZmH{ASaD+Xuz0+J_$VoeHMBidKLO* zXxy{00z?7Y4#38I5#xod1YV3A?XhN3?z0Er`8P=_9z7E{&$Fcc5b*sD_5s+w-oZWr z+ZRBzZk;4J_dJeGTe#`OQsPw0tMS*Gmt(K*I^A@p@oe;shBqVU!f%D%4xSHO;N}qS z{}^x@Xp{cB6gDM(KY0%G>-$p$oVBF; z6Y^?Hf4T&IyFcagBjZl~;aSs>PFFe$Vx^xMP7NLHm1=3aV!LydvXyrVsvDqbTYUpo zCYu`Svj);xvHAy(^`r)^W0}nHzENv5)ic;<)wKk_0c~j@+nZ8dquJ7^cIZi}e~DRL zoON@P&~C-kMLjAzt}SL^v7DPN<<$VO&^b3J=5%?^Eas|3&+b+{%e-BwmYnhyVim-+ zi`G;nH)~tP0wOgN)3p~|F;m$gGv~UM(&U_rqxnyQ`&N0%+-F@7oq!fU=jC1)nBPb{ zl`qWMCBV;dq?4E5`=Oa7Gyy{nqnbxuFe zn6iBG_8EYNR`J*aXyF= EF_ARM_EABI_VER4 + && (elf_flags & EF_ARM_BE8) + ? &vdso_be8_image_info + : &vdso_be32_image_info); +} +#define vdso_image_info vdso_image_info +#else +# define VDSO_HEADER "vdso-le.c.inc" +#endif + #else /* 64 bit ARM definitions */ @@ -958,14 +975,14 @@ const char *elf_hwcap2_str(uint32_t bit) #undef GET_FEATURE_ID -#endif /* not TARGET_AARCH64 */ - #if TARGET_BIG_ENDIAN # define VDSO_HEADER "vdso-be.c.inc" #else # define VDSO_HEADER "vdso-le.c.inc" #endif +#endif /* not TARGET_AARCH64 */ + #endif /* TARGET_ARM */ #ifdef TARGET_SPARC @@ -3524,12 +3541,14 @@ static void load_elf_interp(const char *filename, struct image_info *info, load_elf_image(filename, &src, info, &ehdr, NULL); } +#ifndef vdso_image_info #ifdef VDSO_HEADER #include VDSO_HEADER -#define vdso_image_info() &vdso_image_info +#define vdso_image_info(flags) &vdso_image_info #else -#define vdso_image_info() NULL -#endif +#define vdso_image_info(flags) NULL +#endif /* VDSO_HEADER */ +#endif /* vdso_image_info */ static void load_elf_vdso(struct image_info *info, const VdsoImageInfo *vdso) { @@ -3860,7 +3879,7 @@ int load_elf_binary(struct linux_binprm *bprm, struct image_info *info) * Load a vdso if available, which will amongst other things contain the * signal trampolines. Otherwise, allocate a separate page for them. */ - const VdsoImageInfo *vdso = vdso_image_info(); + const VdsoImageInfo *vdso = vdso_image_info(info->elf_flags); if (vdso) { load_elf_vdso(&vdso_info, vdso); info->vdso = vdso_info.load_bias; From 8377e3fb854d126ba10e61cb6b60885af8443ad4 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Fri, 15 Nov 2024 17:25:15 +0000 Subject: [PATCH 15/15] tcg: Allow top bit of SIMD_DATA_BITS to be set in simd_desc() In simd_desc() we create a SIMD descriptor from various pieces including an arbitrary data value from the caller. We try to sanitize these to make sure everything will fit: the 'data' value needs to fit in the SIMD_DATA_BITS (== 22) sized field. However we do that sanitizing with: tcg_debug_assert(data == sextract32(data, 0, SIMD_DATA_BITS)); This works for the case where the data is supposed to be considered as a signed integer (which can then be returned via simd_data()). However, some callers want to treat the data value as unsigned. Specifically, for the Arm SVE operations, make_svemte_desc() assembles a data value as a collection of fields, and it needs to use all 22 bits. Currently if MTE is enabled then its MTEDESC SIZEM1 field may have the most significant bit set, and then it will trip this assertion. Loosen the assertion so that we only check that the data value will fit into the field in some way, either as a signed or as an unsigned value. This means we will fail to detect some kinds of bug in the callers, but we won't spuriously assert for intentional use of the data field as unsigned. Cc: qemu-stable@nongnu.org Fixes: db432672dc50e ("tcg: Add generic vector expanders") Resolves: https://gitlab.com/qemu-project/qemu/-/issues/2601 Signed-off-by: Peter Maydell Message-ID: <20241115172515.1229393-1-peter.maydell@linaro.org> Reviewed-by: Richard Henderson Signed-off-by: Richard Henderson --- tcg/tcg-op-gvec.c | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/tcg/tcg-op-gvec.c b/tcg/tcg-op-gvec.c index 78ee1ced80..97e4df221a 100644 --- a/tcg/tcg-op-gvec.c +++ b/tcg/tcg-op-gvec.c @@ -88,7 +88,20 @@ uint32_t simd_desc(uint32_t oprsz, uint32_t maxsz, int32_t data) uint32_t desc = 0; check_size_align(oprsz, maxsz, 0); - tcg_debug_assert(data == sextract32(data, 0, SIMD_DATA_BITS)); + + /* + * We want to check that 'data' will fit into SIMD_DATA_BITS. + * However, some callers want to treat the data as a signed + * value (which they can later get back with simd_data()) + * and some want to treat it as an unsigned value. + * So here we assert only that the data will fit into the + * field in at least one way. This means that some invalid + * values from the caller will not be detected, e.g. if the + * caller wants to handle the value as a signed integer but + * incorrectly passes us 1 << (SIMD_DATA_BITS - 1). + */ + tcg_debug_assert(data == sextract32(data, 0, SIMD_DATA_BITS) || + data == extract32(data, 0, SIMD_DATA_BITS)); oprsz = (oprsz / 8) - 1; maxsz = (maxsz / 8) - 1;