tcg/mips: Use full load/store helpers in user-only mode
Instead of using helper_unaligned_{ld,st}, use the full load/store helpers. This will allow the fast path to increase alignment to implement atomicity while not immediately raising an alignment exception. Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
parent
b6ee2453f6
commit
30feb7ee43
@ -1075,7 +1075,6 @@ static void tcg_out_call(TCGContext *s, const tcg_insn_unit *arg,
|
||||
tcg_out_nop(s);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_SOFTMMU)
|
||||
/* We have four temps, we might as well expose three of them. */
|
||||
static const TCGLdstHelperParam ldst_helper_param = {
|
||||
.ntmp = 3, .tmp = { TCG_TMP0, TCG_TMP1, TCG_TMP2 }
|
||||
@ -1088,8 +1087,7 @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
|
||||
|
||||
/* resolve label address */
|
||||
if (!reloc_pc16(l->label_ptr[0], tgt_rx)
|
||||
|| (TCG_TARGET_REG_BITS < TARGET_LONG_BITS
|
||||
&& !reloc_pc16(l->label_ptr[1], tgt_rx))) {
|
||||
|| (l->label_ptr[1] && !reloc_pc16(l->label_ptr[1], tgt_rx))) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -1118,8 +1116,7 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
|
||||
|
||||
/* resolve label address */
|
||||
if (!reloc_pc16(l->label_ptr[0], tgt_rx)
|
||||
|| (TCG_TARGET_REG_BITS < TARGET_LONG_BITS
|
||||
&& !reloc_pc16(l->label_ptr[1], tgt_rx))) {
|
||||
|| (l->label_ptr[1] && !reloc_pc16(l->label_ptr[1], tgt_rx))) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -1139,56 +1136,6 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
|
||||
return true;
|
||||
}
|
||||
|
||||
#else
|
||||
static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l)
|
||||
{
|
||||
void *target;
|
||||
|
||||
if (!reloc_pc16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
|
||||
/* A0 is env, A1 is skipped, A2:A3 is the uint64_t address. */
|
||||
TCGReg a2 = MIPS_BE ? l->addrhi_reg : l->addrlo_reg;
|
||||
TCGReg a3 = MIPS_BE ? l->addrlo_reg : l->addrhi_reg;
|
||||
|
||||
if (a3 != TCG_REG_A2) {
|
||||
tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_A2, a2);
|
||||
tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_A3, a3);
|
||||
} else if (a2 != TCG_REG_A3) {
|
||||
tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_A3, a3);
|
||||
tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_A2, a2);
|
||||
} else {
|
||||
tcg_out_mov(s, TCG_TYPE_I32, TCG_TMP0, TCG_REG_A2);
|
||||
tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_A2, TCG_REG_A3);
|
||||
tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_A3, TCG_TMP0);
|
||||
}
|
||||
} else {
|
||||
tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_A1, l->addrlo_reg);
|
||||
}
|
||||
tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A0, TCG_AREG0);
|
||||
|
||||
/*
|
||||
* Tail call to the helper, with the return address back inline.
|
||||
* We have arrived here via BNEL, so $31 is already set.
|
||||
*/
|
||||
target = (l->is_ld ? helper_unaligned_ld : helper_unaligned_st);
|
||||
tcg_out_call_int(s, target, true);
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
|
||||
{
|
||||
return tcg_out_fail_alignment(s, l);
|
||||
}
|
||||
|
||||
static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
|
||||
{
|
||||
return tcg_out_fail_alignment(s, l);
|
||||
}
|
||||
#endif /* SOFTMMU */
|
||||
|
||||
typedef struct {
|
||||
TCGReg base;
|
||||
MemOp align;
|
||||
|
Loading…
x
Reference in New Issue
Block a user