From 00406e6d3f3e4d3f852af9991a0e03df66362de6 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Wed, 19 Apr 2023 16:21:55 +0200 Subject: [PATCH] tcg/s390x: Use atom_and_align_for_opc Reviewed-by: Peter Maydell Signed-off-by: Richard Henderson --- tcg/s390x/tcg-target.c.inc | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc index 22f0206b5a..8e34b214fc 100644 --- a/tcg/s390x/tcg-target.c.inc +++ b/tcg/s390x/tcg-target.c.inc @@ -1572,6 +1572,7 @@ typedef struct { TCGReg base; TCGReg index; int disp; + TCGAtomAlign aa; } HostAddress; bool tcg_target_has_memory_bswap(MemOp memop) @@ -1733,8 +1734,10 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, { TCGLabelQemuLdst *ldst = NULL; MemOp opc = get_memop(oi); - unsigned a_bits = get_alignment_bits(opc); - unsigned a_mask = (1u << a_bits) - 1; + unsigned a_mask; + + h->aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, false); + a_mask = (1 << h->aa.align) - 1; #ifdef CONFIG_SOFTMMU unsigned s_bits = opc & MO_SIZE; @@ -1764,7 +1767,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, * bits within the address. For unaligned access, we check that we don't * cross pages using the address of the last byte of the access. */ - a_off = (a_bits >= s_bits ? 0 : s_mask - a_mask); + a_off = (a_mask >= s_mask ? 0 : s_mask - a_mask); tlb_mask = (uint64_t)TARGET_PAGE_MASK | a_mask; if (a_off == 0) { tgen_andi_risbg(s, TCG_REG_R0, addr_reg, tlb_mask); @@ -1806,7 +1809,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, ldst->addrlo_reg = addr_reg; /* We are expecting a_bits to max out at 7, much lower than TMLL. */ - tcg_debug_assert(a_bits < 16); + tcg_debug_assert(a_mask <= 0xffff); tcg_out_insn(s, RI, TMLL, addr_reg, a_mask); tcg_out16(s, RI_BRC | (7 << 4)); /* CC in {1,2,3} */