tcg/sparc64: Rename tcg_out_movi_imm13 to tcg_out_movi_s13
Emphasize that the constant is signed. Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
parent
33982b890b
commit
8b14f8627c
@ -399,7 +399,8 @@ static void tcg_out_sethi(TCGContext *s, TCGReg ret, uint32_t arg)
|
||||
tcg_out32(s, SETHI | INSN_RD(ret) | ((arg & 0xfffffc00) >> 10));
|
||||
}
|
||||
|
||||
static void tcg_out_movi_imm13(TCGContext *s, TCGReg ret, int32_t arg)
|
||||
/* A 13-bit constant sign-extended to 64 bits. */
|
||||
static void tcg_out_movi_s13(TCGContext *s, TCGReg ret, int32_t arg)
|
||||
{
|
||||
tcg_out_arithi(s, ret, TCG_REG_G0, arg, ARITH_OR);
|
||||
}
|
||||
@ -408,7 +409,7 @@ static void tcg_out_movi_imm32(TCGContext *s, TCGReg ret, int32_t arg)
|
||||
{
|
||||
if (check_fit_i32(arg, 13)) {
|
||||
/* A 13-bit constant sign-extended to 64-bits. */
|
||||
tcg_out_movi_imm13(s, ret, arg);
|
||||
tcg_out_movi_s13(s, ret, arg);
|
||||
} else {
|
||||
/* A 32-bit constant zero-extended to 64 bits. */
|
||||
tcg_out_sethi(s, ret, arg);
|
||||
@ -433,7 +434,7 @@ static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
|
||||
|
||||
/* A 13-bit constant sign-extended to 64-bits. */
|
||||
if (check_fit_tl(arg, 13)) {
|
||||
tcg_out_movi_imm13(s, ret, arg);
|
||||
tcg_out_movi_s13(s, ret, arg);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -767,7 +768,7 @@ static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
|
||||
|
||||
default:
|
||||
tcg_out_cmp(s, c1, c2, c2const);
|
||||
tcg_out_movi_imm13(s, ret, 0);
|
||||
tcg_out_movi_s13(s, ret, 0);
|
||||
tcg_out_movcc(s, cond, MOVCC_ICC, ret, 1, 1);
|
||||
return;
|
||||
}
|
||||
@ -803,11 +804,11 @@ static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
|
||||
/* For 64-bit signed comparisons vs zero, we can avoid the compare
|
||||
if the input does not overlap the output. */
|
||||
if (c2 == 0 && !is_unsigned_cond(cond) && c1 != ret) {
|
||||
tcg_out_movi_imm13(s, ret, 0);
|
||||
tcg_out_movi_s13(s, ret, 0);
|
||||
tcg_out_movr(s, cond, ret, c1, 1, 1);
|
||||
} else {
|
||||
tcg_out_cmp(s, c1, c2, c2const);
|
||||
tcg_out_movi_imm13(s, ret, 0);
|
||||
tcg_out_movi_s13(s, ret, 0);
|
||||
tcg_out_movcc(s, cond, MOVCC_XCC, ret, 1, 1);
|
||||
}
|
||||
}
|
||||
@ -844,7 +845,7 @@ static void tcg_out_addsub2_i64(TCGContext *s, TCGReg rl, TCGReg rh,
|
||||
if (use_vis3_instructions && !is_sub) {
|
||||
/* Note that ADDXC doesn't accept immediates. */
|
||||
if (bhconst && bh != 0) {
|
||||
tcg_out_movi_imm13(s, TCG_REG_T2, bh);
|
||||
tcg_out_movi_s13(s, TCG_REG_T2, bh);
|
||||
bh = TCG_REG_T2;
|
||||
}
|
||||
tcg_out_arith(s, rh, ah, bh, ARITH_ADDXC);
|
||||
@ -866,7 +867,7 @@ static void tcg_out_addsub2_i64(TCGContext *s, TCGReg rl, TCGReg rh,
|
||||
* so the adjustment fits 12 bits.
|
||||
*/
|
||||
if (bhconst) {
|
||||
tcg_out_movi_imm13(s, TCG_REG_T2, bh + (is_sub ? -1 : 1));
|
||||
tcg_out_movi_s13(s, TCG_REG_T2, bh + (is_sub ? -1 : 1));
|
||||
} else {
|
||||
tcg_out_arithi(s, TCG_REG_T2, bh, 1,
|
||||
is_sub ? ARITH_SUB : ARITH_ADD);
|
||||
@ -1036,7 +1037,7 @@ static void tcg_target_qemu_prologue(TCGContext *s)
|
||||
tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
|
||||
tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
|
||||
/* delay slot */
|
||||
tcg_out_movi_imm13(s, TCG_REG_O0, 0);
|
||||
tcg_out_movi_s13(s, TCG_REG_O0, 0);
|
||||
|
||||
build_trampolines(s);
|
||||
}
|
||||
@ -1430,7 +1431,7 @@ static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
|
||||
{
|
||||
if (check_fit_ptr(a0, 13)) {
|
||||
tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
|
||||
tcg_out_movi_imm13(s, TCG_REG_O0, a0);
|
||||
tcg_out_movi_s13(s, TCG_REG_O0, a0);
|
||||
return;
|
||||
} else {
|
||||
intptr_t tb_diff = tcg_tbrel_diff(s, (void *)a0);
|
||||
|
Loading…
Reference in New Issue
Block a user