target/arm: Avoid an extra temporary for store_exclusive

Instead of copying addr to a local temp, reuse the value (which we
have just compared as equal) already saved in cpu_exclusive_addr.

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Alistair Francis <alistair.francis@xilinx.com>
Message-id: 20170908163859.29820-1-richard.henderson@linaro.org
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Richard Henderson 2017-09-14 18:43:18 +01:00 committed by Peter Maydell
parent dddbba9943
commit 37e29a6425

View File

@ -1894,7 +1894,7 @@ static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
}
static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
TCGv_i64 inaddr, int size, int is_pair)
TCGv_i64 addr, int size, int is_pair)
{
/* if (env->exclusive_addr == addr && env->exclusive_val == [addr]
* && (!is_pair || env->exclusive_high == [addr + datasize])) {
@ -1910,13 +1910,8 @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
*/
TCGLabel *fail_label = gen_new_label();
TCGLabel *done_label = gen_new_label();
TCGv_i64 addr = tcg_temp_local_new_i64();
TCGv_i64 tmp;
/* Copy input into a local temp so it is not trashed when the
* basic block ends at the branch insn.
*/
tcg_gen_mov_i64(addr, inaddr);
tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
tmp = tcg_temp_new_i64();
@ -1927,27 +1922,24 @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
} else {
tcg_gen_concat32_i64(tmp, cpu_reg(s, rt2), cpu_reg(s, rt));
}
tcg_gen_atomic_cmpxchg_i64(tmp, addr, cpu_exclusive_val, tmp,
tcg_gen_atomic_cmpxchg_i64(tmp, cpu_exclusive_addr,
cpu_exclusive_val, tmp,
get_mem_index(s),
MO_64 | MO_ALIGN | s->be_data);
tcg_gen_setcond_i64(TCG_COND_NE, tmp, tmp, cpu_exclusive_val);
} else if (s->be_data == MO_LE) {
gen_helper_paired_cmpxchg64_le(tmp, cpu_env, addr, cpu_reg(s, rt),
cpu_reg(s, rt2));
gen_helper_paired_cmpxchg64_le(tmp, cpu_env, cpu_exclusive_addr,
cpu_reg(s, rt), cpu_reg(s, rt2));
} else {
gen_helper_paired_cmpxchg64_be(tmp, cpu_env, addr, cpu_reg(s, rt),
cpu_reg(s, rt2));
gen_helper_paired_cmpxchg64_be(tmp, cpu_env, cpu_exclusive_addr,
cpu_reg(s, rt), cpu_reg(s, rt2));
}
} else {
TCGv_i64 val = cpu_reg(s, rt);
tcg_gen_atomic_cmpxchg_i64(tmp, addr, cpu_exclusive_val, val,
get_mem_index(s),
tcg_gen_atomic_cmpxchg_i64(tmp, cpu_exclusive_addr, cpu_exclusive_val,
cpu_reg(s, rt), get_mem_index(s),
size | MO_ALIGN | s->be_data);
tcg_gen_setcond_i64(TCG_COND_NE, tmp, tmp, cpu_exclusive_val);
}
tcg_temp_free_i64(addr);
tcg_gen_mov_i64(cpu_reg(s, rd), tmp);
tcg_temp_free_i64(tmp);
tcg_gen_br(done_label);