diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index 13986820fe..f35c5f359b 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -2691,7 +2691,7 @@ static uint64_t do_st16_leN(CPUState *cpu, MMULookupPageData *p, case MO_ATOM_WITHIN16_PAIR: /* Since size > 8, this is the half that must be atomic. */ - if (!HAVE_ATOMIC128_RW) { + if (!HAVE_CMPXCHG128) { cpu_loop_exit_atomic(cpu, ra); } return store_whole_le16(p->haddr, p->size, val_le); diff --git a/accel/tcg/ldst_atomicity.c.inc b/accel/tcg/ldst_atomicity.c.inc index e8f97506fa..33a04dec52 100644 --- a/accel/tcg/ldst_atomicity.c.inc +++ b/accel/tcg/ldst_atomicity.c.inc @@ -825,7 +825,7 @@ static uint64_t store_whole_le16(void *pv, int size, Int128 val_le) int sh = o * 8; Int128 m, v; - qemu_build_assert(HAVE_ATOMIC128_RW); + qemu_build_assert(HAVE_CMPXCHG128); /* Like MAKE_64BIT_MASK(0, sz), but larger. */ if (sz <= 64) { @@ -887,7 +887,7 @@ static void store_atom_2(CPUState *cpu, uintptr_t ra, return; } } else if ((pi & 15) == 7) { - if (HAVE_ATOMIC128_RW) { + if (HAVE_CMPXCHG128) { Int128 v = int128_lshift(int128_make64(val), 56); Int128 m = int128_lshift(int128_make64(0xffff), 56); store_atom_insert_al16(pv - 7, v, m); @@ -956,7 +956,7 @@ static void store_atom_4(CPUState *cpu, uintptr_t ra, return; } } else { - if (HAVE_ATOMIC128_RW) { + if (HAVE_CMPXCHG128) { store_whole_le16(pv, 4, int128_make64(cpu_to_le32(val))); return; } @@ -1021,7 +1021,7 @@ static void store_atom_8(CPUState *cpu, uintptr_t ra, } break; case MO_64: - if (HAVE_ATOMIC128_RW) { + if (HAVE_CMPXCHG128) { store_whole_le16(pv, 8, int128_make64(cpu_to_le64(val))); return; } @@ -1076,7 +1076,7 @@ static void store_atom_16(CPUState *cpu, uintptr_t ra, } break; case -MO_64: - if (HAVE_ATOMIC128_RW) { + if (HAVE_CMPXCHG128) { uint64_t val_le; int s2 = pi & 15; int s1 = 16 - s2;