target/arm: Hoist finalize_memop out of do_gpr_{ld, st}

We are going to need the complete memop beforehand,
so let's not compute it twice.

Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 20230530191438.411344-11-richard.henderson@linaro.org
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Richard Henderson 2023-06-06 10:19:37 +01:00 committed by Peter Maydell
parent 6f47e7c189
commit a75b66f617
1 changed files with 35 additions and 26 deletions

View File

@ -838,7 +838,6 @@ static void do_gpr_st_memidx(DisasContext *s, TCGv_i64 source,
unsigned int iss_srt, unsigned int iss_srt,
bool iss_sf, bool iss_ar) bool iss_sf, bool iss_ar)
{ {
memop = finalize_memop(s, memop);
tcg_gen_qemu_st_i64(source, tcg_addr, memidx, memop); tcg_gen_qemu_st_i64(source, tcg_addr, memidx, memop);
if (iss_valid) { if (iss_valid) {
@ -873,7 +872,6 @@ static void do_gpr_ld_memidx(DisasContext *s, TCGv_i64 dest, TCGv_i64 tcg_addr,
bool iss_valid, unsigned int iss_srt, bool iss_valid, unsigned int iss_srt,
bool iss_sf, bool iss_ar) bool iss_sf, bool iss_ar)
{ {
memop = finalize_memop(s, memop);
tcg_gen_qemu_ld_i64(dest, tcg_addr, memidx, memop); tcg_gen_qemu_ld_i64(dest, tcg_addr, memidx, memop);
if (extend && (memop & MO_SIGN)) { if (extend && (memop & MO_SIGN)) {
@ -2625,6 +2623,7 @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
int o2_L_o1_o0 = extract32(insn, 21, 3) * 2 | is_lasr; int o2_L_o1_o0 = extract32(insn, 21, 3) * 2 | is_lasr;
int size = extract32(insn, 30, 2); int size = extract32(insn, 30, 2);
TCGv_i64 clean_addr; TCGv_i64 clean_addr;
MemOp memop;
switch (o2_L_o1_o0) { switch (o2_L_o1_o0) {
case 0x0: /* STXR */ case 0x0: /* STXR */
@ -2661,10 +2660,11 @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
gen_check_sp_alignment(s); gen_check_sp_alignment(s);
} }
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL); tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
/* TODO: ARMv8.4-LSE SCTLR.nAA */
memop = finalize_memop(s, size | MO_ALIGN);
clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
true, rn != 31, size); true, rn != 31, size);
/* TODO: ARMv8.4-LSE SCTLR.nAA */ do_gpr_st(s, cpu_reg(s, rt), clean_addr, memop, true, rt,
do_gpr_st(s, cpu_reg(s, rt), clean_addr, size | MO_ALIGN, true, rt,
disas_ldst_compute_iss_sf(size, false, 0), is_lasr); disas_ldst_compute_iss_sf(size, false, 0), is_lasr);
return; return;
@ -2679,10 +2679,11 @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
if (rn == 31) { if (rn == 31) {
gen_check_sp_alignment(s); gen_check_sp_alignment(s);
} }
/* TODO: ARMv8.4-LSE SCTLR.nAA */
memop = finalize_memop(s, size | MO_ALIGN);
clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
false, rn != 31, size); false, rn != 31, size);
/* TODO: ARMv8.4-LSE SCTLR.nAA */ do_gpr_ld(s, cpu_reg(s, rt), clean_addr, memop, false, true,
do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size | MO_ALIGN, false, true,
rt, disas_ldst_compute_iss_sf(size, false, 0), is_lasr); rt, disas_ldst_compute_iss_sf(size, false, 0), is_lasr);
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ); tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
return; return;
@ -2790,9 +2791,9 @@ static void disas_ld_lit(DisasContext *s, uint32_t insn)
} else { } else {
/* Only unsigned 32bit loads target 32bit registers. */ /* Only unsigned 32bit loads target 32bit registers. */
bool iss_sf = opc != 0; bool iss_sf = opc != 0;
MemOp memop = finalize_memop(s, size + is_signed * MO_SIGN);
do_gpr_ld(s, tcg_rt, clean_addr, size + is_signed * MO_SIGN, do_gpr_ld(s, tcg_rt, clean_addr, memop, false, true, rt, iss_sf, false);
false, true, rt, iss_sf, false);
} }
} }
@ -3046,7 +3047,7 @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn,
bool post_index; bool post_index;
bool writeback; bool writeback;
int memidx; int memidx;
MemOp memop;
TCGv_i64 clean_addr, dirty_addr; TCGv_i64 clean_addr, dirty_addr;
if (is_vector) { if (is_vector) {
@ -3073,7 +3074,7 @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn,
return; return;
} }
is_store = (opc == 0); is_store = (opc == 0);
is_signed = extract32(opc, 1, 1); is_signed = !is_store && extract32(opc, 1, 1);
is_extended = (size < 3) && extract32(opc, 0, 1); is_extended = (size < 3) && extract32(opc, 0, 1);
} }
@ -3107,6 +3108,8 @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn,
} }
memidx = is_unpriv ? get_a64_user_mem_index(s) : get_mem_index(s); memidx = is_unpriv ? get_a64_user_mem_index(s) : get_mem_index(s);
memop = finalize_memop(s, size + is_signed * MO_SIGN);
clean_addr = gen_mte_check1_mmuidx(s, dirty_addr, is_store, clean_addr = gen_mte_check1_mmuidx(s, dirty_addr, is_store,
writeback || rn != 31, writeback || rn != 31,
size, is_unpriv, memidx); size, is_unpriv, memidx);
@ -3122,10 +3125,10 @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn,
bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc); bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
if (is_store) { if (is_store) {
do_gpr_st_memidx(s, tcg_rt, clean_addr, size, memidx, do_gpr_st_memidx(s, tcg_rt, clean_addr, memop, memidx,
iss_valid, rt, iss_sf, false); iss_valid, rt, iss_sf, false);
} else { } else {
do_gpr_ld_memidx(s, tcg_rt, clean_addr, size + is_signed * MO_SIGN, do_gpr_ld_memidx(s, tcg_rt, clean_addr, memop,
is_extended, memidx, is_extended, memidx,
iss_valid, rt, iss_sf, false); iss_valid, rt, iss_sf, false);
} }
@ -3174,8 +3177,8 @@ static void disas_ldst_reg_roffset(DisasContext *s, uint32_t insn,
bool is_signed = false; bool is_signed = false;
bool is_store = false; bool is_store = false;
bool is_extended = false; bool is_extended = false;
TCGv_i64 tcg_rm, clean_addr, dirty_addr; TCGv_i64 tcg_rm, clean_addr, dirty_addr;
MemOp memop;
if (extract32(opt, 1, 1) == 0) { if (extract32(opt, 1, 1) == 0) {
unallocated_encoding(s); unallocated_encoding(s);
@ -3202,7 +3205,7 @@ static void disas_ldst_reg_roffset(DisasContext *s, uint32_t insn,
return; return;
} }
is_store = (opc == 0); is_store = (opc == 0);
is_signed = extract32(opc, 1, 1); is_signed = !is_store && extract32(opc, 1, 1);
is_extended = (size < 3) && extract32(opc, 0, 1); is_extended = (size < 3) && extract32(opc, 0, 1);
} }
@ -3215,6 +3218,8 @@ static void disas_ldst_reg_roffset(DisasContext *s, uint32_t insn,
ext_and_shift_reg(tcg_rm, tcg_rm, opt, shift ? size : 0); ext_and_shift_reg(tcg_rm, tcg_rm, opt, shift ? size : 0);
tcg_gen_add_i64(dirty_addr, dirty_addr, tcg_rm); tcg_gen_add_i64(dirty_addr, dirty_addr, tcg_rm);
memop = finalize_memop(s, size + is_signed * MO_SIGN);
clean_addr = gen_mte_check1(s, dirty_addr, is_store, true, size); clean_addr = gen_mte_check1(s, dirty_addr, is_store, true, size);
if (is_vector) { if (is_vector) {
@ -3226,11 +3231,12 @@ static void disas_ldst_reg_roffset(DisasContext *s, uint32_t insn,
} else { } else {
TCGv_i64 tcg_rt = cpu_reg(s, rt); TCGv_i64 tcg_rt = cpu_reg(s, rt);
bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc); bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
if (is_store) { if (is_store) {
do_gpr_st(s, tcg_rt, clean_addr, size, do_gpr_st(s, tcg_rt, clean_addr, memop,
true, rt, iss_sf, false); true, rt, iss_sf, false);
} else { } else {
do_gpr_ld(s, tcg_rt, clean_addr, size + is_signed * MO_SIGN, do_gpr_ld(s, tcg_rt, clean_addr, memop,
is_extended, true, rt, iss_sf, false); is_extended, true, rt, iss_sf, false);
} }
} }
@ -3262,12 +3268,11 @@ static void disas_ldst_reg_unsigned_imm(DisasContext *s, uint32_t insn,
int rn = extract32(insn, 5, 5); int rn = extract32(insn, 5, 5);
unsigned int imm12 = extract32(insn, 10, 12); unsigned int imm12 = extract32(insn, 10, 12);
unsigned int offset; unsigned int offset;
TCGv_i64 clean_addr, dirty_addr; TCGv_i64 clean_addr, dirty_addr;
bool is_store; bool is_store;
bool is_signed = false; bool is_signed = false;
bool is_extended = false; bool is_extended = false;
MemOp memop;
if (is_vector) { if (is_vector) {
size |= (opc & 2) << 1; size |= (opc & 2) << 1;
@ -3289,7 +3294,7 @@ static void disas_ldst_reg_unsigned_imm(DisasContext *s, uint32_t insn,
return; return;
} }
is_store = (opc == 0); is_store = (opc == 0);
is_signed = extract32(opc, 1, 1); is_signed = !is_store && extract32(opc, 1, 1);
is_extended = (size < 3) && extract32(opc, 0, 1); is_extended = (size < 3) && extract32(opc, 0, 1);
} }
@ -3299,6 +3304,8 @@ static void disas_ldst_reg_unsigned_imm(DisasContext *s, uint32_t insn,
dirty_addr = read_cpu_reg_sp(s, rn, 1); dirty_addr = read_cpu_reg_sp(s, rn, 1);
offset = imm12 << size; offset = imm12 << size;
tcg_gen_addi_i64(dirty_addr, dirty_addr, offset); tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
memop = finalize_memop(s, size + is_signed * MO_SIGN);
clean_addr = gen_mte_check1(s, dirty_addr, is_store, rn != 31, size); clean_addr = gen_mte_check1(s, dirty_addr, is_store, rn != 31, size);
if (is_vector) { if (is_vector) {
@ -3311,10 +3318,9 @@ static void disas_ldst_reg_unsigned_imm(DisasContext *s, uint32_t insn,
TCGv_i64 tcg_rt = cpu_reg(s, rt); TCGv_i64 tcg_rt = cpu_reg(s, rt);
bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc); bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
if (is_store) { if (is_store) {
do_gpr_st(s, tcg_rt, clean_addr, size, do_gpr_st(s, tcg_rt, clean_addr, memop, true, rt, iss_sf, false);
true, rt, iss_sf, false);
} else { } else {
do_gpr_ld(s, tcg_rt, clean_addr, size + is_signed * MO_SIGN, do_gpr_ld(s, tcg_rt, clean_addr, memop,
is_extended, true, rt, iss_sf, false); is_extended, true, rt, iss_sf, false);
} }
} }
@ -3344,7 +3350,7 @@ static void disas_ldst_atomic(DisasContext *s, uint32_t insn,
bool a = extract32(insn, 23, 1); bool a = extract32(insn, 23, 1);
TCGv_i64 tcg_rs, tcg_rt, clean_addr; TCGv_i64 tcg_rs, tcg_rt, clean_addr;
AtomicThreeOpFn *fn = NULL; AtomicThreeOpFn *fn = NULL;
MemOp mop = s->be_data | size | MO_ALIGN; MemOp mop = finalize_memop(s, size | MO_ALIGN);
if (is_vector || !dc_isar_feature(aa64_atomics, s)) { if (is_vector || !dc_isar_feature(aa64_atomics, s)) {
unallocated_encoding(s); unallocated_encoding(s);
@ -3405,7 +3411,7 @@ static void disas_ldst_atomic(DisasContext *s, uint32_t insn,
* full load-acquire (we only need "load-acquire processor consistent"), * full load-acquire (we only need "load-acquire processor consistent"),
* but we choose to implement them as full LDAQ. * but we choose to implement them as full LDAQ.
*/ */
do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size, false, do_gpr_ld(s, cpu_reg(s, rt), clean_addr, mop, false,
true, rt, disas_ldst_compute_iss_sf(size, false, 0), true); true, rt, disas_ldst_compute_iss_sf(size, false, 0), true);
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ); tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
return; return;
@ -3451,6 +3457,7 @@ static void disas_ldst_pac(DisasContext *s, uint32_t insn,
bool use_key_a = !extract32(insn, 23, 1); bool use_key_a = !extract32(insn, 23, 1);
int offset; int offset;
TCGv_i64 clean_addr, dirty_addr, tcg_rt; TCGv_i64 clean_addr, dirty_addr, tcg_rt;
MemOp memop;
if (size != 3 || is_vector || !dc_isar_feature(aa64_pauth, s)) { if (size != 3 || is_vector || !dc_isar_feature(aa64_pauth, s)) {
unallocated_encoding(s); unallocated_encoding(s);
@ -3477,12 +3484,14 @@ static void disas_ldst_pac(DisasContext *s, uint32_t insn,
offset = sextract32(offset << size, 0, 10 + size); offset = sextract32(offset << size, 0, 10 + size);
tcg_gen_addi_i64(dirty_addr, dirty_addr, offset); tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
memop = finalize_memop(s, size);
/* Note that "clean" and "dirty" here refer to TBI not PAC. */ /* Note that "clean" and "dirty" here refer to TBI not PAC. */
clean_addr = gen_mte_check1(s, dirty_addr, false, clean_addr = gen_mte_check1(s, dirty_addr, false,
is_wback || rn != 31, size); is_wback || rn != 31, size);
tcg_rt = cpu_reg(s, rt); tcg_rt = cpu_reg(s, rt);
do_gpr_ld(s, tcg_rt, clean_addr, size, do_gpr_ld(s, tcg_rt, clean_addr, memop,
/* extend */ false, /* iss_valid */ !is_wback, /* extend */ false, /* iss_valid */ !is_wback,
/* iss_srt */ rt, /* iss_sf */ true, /* iss_ar */ false); /* iss_srt */ rt, /* iss_sf */ true, /* iss_ar */ false);
@ -3524,7 +3533,7 @@ static void disas_ldst_ldapr_stlr(DisasContext *s, uint32_t insn)
} }
/* TODO: ARMv8.4-LSE SCTLR.nAA */ /* TODO: ARMv8.4-LSE SCTLR.nAA */
mop = size | MO_ALIGN; mop = finalize_memop(s, size | MO_ALIGN);
switch (opc) { switch (opc) {
case 0: /* STLURB */ case 0: /* STLURB */