target/arm: Hoist finalize_memop out of do_fp_{ld, st}

We are going to need the complete memop beforehand,
so let's not compute it twice.

Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 20230530191438.411344-12-richard.henderson@linaro.org
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Richard Henderson 2023-06-06 10:19:37 +01:00 committed by Peter Maydell
parent a75b66f617
commit 03176bcd03

View File

@ -905,15 +905,14 @@ static void do_gpr_ld(DisasContext *s, TCGv_i64 dest, TCGv_i64 tcg_addr,
/*
* Store from FP register to memory
*/
static void do_fp_st(DisasContext *s, int srcidx, TCGv_i64 tcg_addr, int size)
static void do_fp_st(DisasContext *s, int srcidx, TCGv_i64 tcg_addr, MemOp mop)
{
/* This writes the bottom N bits of a 128 bit wide vector to memory */
TCGv_i64 tmplo = tcg_temp_new_i64();
MemOp mop = finalize_memop_asimd(s, size);
tcg_gen_ld_i64(tmplo, cpu_env, fp_reg_offset(s, srcidx, MO_64));
if (size < MO_128) {
if ((mop & MO_SIZE) < MO_128) {
tcg_gen_qemu_st_i64(tmplo, tcg_addr, get_mem_index(s), mop);
} else {
TCGv_i64 tmphi = tcg_temp_new_i64();
@ -929,14 +928,13 @@ static void do_fp_st(DisasContext *s, int srcidx, TCGv_i64 tcg_addr, int size)
/*
* Load from memory to FP register
*/
static void do_fp_ld(DisasContext *s, int destidx, TCGv_i64 tcg_addr, int size)
static void do_fp_ld(DisasContext *s, int destidx, TCGv_i64 tcg_addr, MemOp mop)
{
/* This always zero-extends and writes to a full 128 bit wide vector */
TCGv_i64 tmplo = tcg_temp_new_i64();
TCGv_i64 tmphi = NULL;
MemOp mop = finalize_memop_asimd(s, size);
if (size < MO_128) {
if ((mop & MO_SIZE) < MO_128) {
tcg_gen_qemu_ld_i64(tmplo, tcg_addr, get_mem_index(s), mop);
} else {
TCGv_i128 t16 = tcg_temp_new_i128();
@ -2763,6 +2761,7 @@ static void disas_ld_lit(DisasContext *s, uint32_t insn)
bool is_signed = false;
int size = 2;
TCGv_i64 tcg_rt, clean_addr;
MemOp memop;
if (is_vector) {
if (opc == 3) {
@ -2773,6 +2772,7 @@ static void disas_ld_lit(DisasContext *s, uint32_t insn)
if (!fp_access_check(s)) {
return;
}
memop = finalize_memop_asimd(s, size);
} else {
if (opc == 3) {
/* PRFM (literal) : prefetch */
@ -2780,19 +2780,19 @@ static void disas_ld_lit(DisasContext *s, uint32_t insn)
}
size = 2 + extract32(opc, 0, 1);
is_signed = extract32(opc, 1, 1);
memop = finalize_memop(s, size + is_signed * MO_SIGN);
}
tcg_rt = cpu_reg(s, rt);
clean_addr = tcg_temp_new_i64();
gen_pc_plus_diff(s, clean_addr, imm);
if (is_vector) {
do_fp_ld(s, rt, clean_addr, size);
do_fp_ld(s, rt, clean_addr, memop);
} else {
/* Only unsigned 32bit loads target 32bit registers. */
bool iss_sf = opc != 0;
MemOp memop = finalize_memop(s, size + is_signed * MO_SIGN);
do_gpr_ld(s, tcg_rt, clean_addr, memop, false, true, rt, iss_sf, false);
}
}
@ -2929,16 +2929,18 @@ static void disas_ldst_pair(DisasContext *s, uint32_t insn)
(wback || rn != 31) && !set_tag, 2 << size);
if (is_vector) {
MemOp mop = finalize_memop_asimd(s, size);
if (is_load) {
do_fp_ld(s, rt, clean_addr, size);
do_fp_ld(s, rt, clean_addr, mop);
} else {
do_fp_st(s, rt, clean_addr, size);
do_fp_st(s, rt, clean_addr, mop);
}
tcg_gen_addi_i64(clean_addr, clean_addr, 1 << size);
if (is_load) {
do_fp_ld(s, rt2, clean_addr, size);
do_fp_ld(s, rt2, clean_addr, mop);
} else {
do_fp_st(s, rt2, clean_addr, size);
do_fp_st(s, rt2, clean_addr, mop);
}
} else {
TCGv_i64 tcg_rt = cpu_reg(s, rt);
@ -3060,6 +3062,7 @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn,
if (!fp_access_check(s)) {
return;
}
memop = finalize_memop_asimd(s, size);
} else {
if (size == 3 && opc == 2) {
/* PRFM - prefetch */
@ -3076,6 +3079,7 @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn,
is_store = (opc == 0);
is_signed = !is_store && extract32(opc, 1, 1);
is_extended = (size < 3) && extract32(opc, 0, 1);
memop = finalize_memop(s, size + is_signed * MO_SIGN);
}
switch (idx) {
@ -3108,7 +3112,6 @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn,
}
memidx = is_unpriv ? get_a64_user_mem_index(s) : get_mem_index(s);
memop = finalize_memop(s, size + is_signed * MO_SIGN);
clean_addr = gen_mte_check1_mmuidx(s, dirty_addr, is_store,
writeback || rn != 31,
@ -3116,9 +3119,9 @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn,
if (is_vector) {
if (is_store) {
do_fp_st(s, rt, clean_addr, size);
do_fp_st(s, rt, clean_addr, memop);
} else {
do_fp_ld(s, rt, clean_addr, size);
do_fp_ld(s, rt, clean_addr, memop);
}
} else {
TCGv_i64 tcg_rt = cpu_reg(s, rt);
@ -3224,9 +3227,9 @@ static void disas_ldst_reg_roffset(DisasContext *s, uint32_t insn,
if (is_vector) {
if (is_store) {
do_fp_st(s, rt, clean_addr, size);
do_fp_st(s, rt, clean_addr, memop);
} else {
do_fp_ld(s, rt, clean_addr, size);
do_fp_ld(s, rt, clean_addr, memop);
}
} else {
TCGv_i64 tcg_rt = cpu_reg(s, rt);
@ -3310,9 +3313,9 @@ static void disas_ldst_reg_unsigned_imm(DisasContext *s, uint32_t insn,
if (is_vector) {
if (is_store) {
do_fp_st(s, rt, clean_addr, size);
do_fp_st(s, rt, clean_addr, memop);
} else {
do_fp_ld(s, rt, clean_addr, size);
do_fp_ld(s, rt, clean_addr, memop);
}
} else {
TCGv_i64 tcg_rt = cpu_reg(s, rt);