qemu/atomic128: Add runtime test for FEAT_LSE2

With FEAT_LSE2, load and store of int128 is directly supported.

Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Richard Henderson 2023-05-19 19:22:25 -07:00
parent b35b812567
commit e45fb74ddb

View File

@ -11,27 +11,48 @@
#ifndef AARCH64_ATOMIC128_LDST_H #ifndef AARCH64_ATOMIC128_LDST_H
#define AARCH64_ATOMIC128_LDST_H #define AARCH64_ATOMIC128_LDST_H
#include "host/cpuinfo.h"
#include "tcg/debug-assert.h"
/* /*
* Through gcc 10, aarch64 has no support for 128-bit atomics. * Through gcc 10, aarch64 has no support for 128-bit atomics.
* Through clang 16, without -march=armv8.4-a, __atomic_load_16 * Through clang 16, without -march=armv8.4-a, __atomic_load_16
* is incorrectly expanded to a read-write operation. * is incorrectly expanded to a read-write operation.
*
* Anyway, this method allows runtime detection of FEAT_LSE2.
*/ */
#define HAVE_ATOMIC128_RO 0 #define HAVE_ATOMIC128_RO (cpuinfo & CPUINFO_LSE2)
#define HAVE_ATOMIC128_RW 1 #define HAVE_ATOMIC128_RW 1
Int128 QEMU_ERROR("unsupported atomic") atomic16_read_ro(const Int128 *ptr); static inline Int128 atomic16_read_ro(const Int128 *ptr)
{
uint64_t l, h;
tcg_debug_assert(HAVE_ATOMIC128_RO);
/* With FEAT_LSE2, 16-byte aligned LDP is atomic. */
asm("ldp %[l], %[h], %[mem]"
: [l] "=r"(l), [h] "=r"(h) : [mem] "m"(*ptr));
return int128_make128(l, h);
}
static inline Int128 atomic16_read_rw(Int128 *ptr) static inline Int128 atomic16_read_rw(Int128 *ptr)
{ {
uint64_t l, h; uint64_t l, h;
uint32_t tmp; uint32_t tmp;
/* The load must be paired with the store to guarantee not tearing. */ if (cpuinfo & CPUINFO_LSE2) {
asm("0: ldxp %[l], %[h], %[mem]\n\t" /* With FEAT_LSE2, 16-byte aligned LDP is atomic. */
"stxp %w[tmp], %[l], %[h], %[mem]\n\t" asm("ldp %[l], %[h], %[mem]"
"cbnz %w[tmp], 0b" : [l] "=r"(l), [h] "=r"(h) : [mem] "m"(*ptr));
: [mem] "+m"(*ptr), [tmp] "=r"(tmp), [l] "=r"(l), [h] "=r"(h)); } else {
/* The load must be paired with the store to guarantee not tearing. */
asm("0: ldxp %[l], %[h], %[mem]\n\t"
"stxp %w[tmp], %[l], %[h], %[mem]\n\t"
"cbnz %w[tmp], 0b"
: [mem] "+m"(*ptr), [tmp] "=&r"(tmp), [l] "=&r"(l), [h] "=&r"(h));
}
return int128_make128(l, h); return int128_make128(l, h);
} }
@ -41,12 +62,18 @@ static inline void atomic16_set(Int128 *ptr, Int128 val)
uint64_t l = int128_getlo(val), h = int128_gethi(val); uint64_t l = int128_getlo(val), h = int128_gethi(val);
uint64_t t1, t2; uint64_t t1, t2;
/* Load into temporaries to acquire the exclusive access lock. */ if (cpuinfo & CPUINFO_LSE2) {
asm("0: ldxp %[t1], %[t2], %[mem]\n\t" /* With FEAT_LSE2, 16-byte aligned STP is atomic. */
"stxp %w[t1], %[l], %[h], %[mem]\n\t" asm("stp %[l], %[h], %[mem]"
"cbnz %w[t1], 0b" : [mem] "=m"(*ptr) : [l] "r"(l), [h] "r"(h));
: [mem] "+m"(*ptr), [t1] "=&r"(t1), [t2] "=&r"(t2) } else {
: [l] "r"(l), [h] "r"(h)); /* Load into temporaries to acquire the exclusive access lock. */
asm("0: ldxp %[t1], %[t2], %[mem]\n\t"
"stxp %w[t1], %[l], %[h], %[mem]\n\t"
"cbnz %w[t1], 0b"
: [mem] "+m"(*ptr), [t1] "=&r"(t1), [t2] "=&r"(t2)
: [l] "r"(l), [h] "r"(h));
}
} }
#endif /* AARCH64_ATOMIC128_LDST_H */ #endif /* AARCH64_ATOMIC128_LDST_H */