tcg: Remove TCG_TARGET_HAS_new_ldst
Since all backends have been converted, remove the compatibility code. Acked-by: Claudio Fontana <claudio.fontana@huawei.com> Signed-off-by: Richard Henderson <rth@twiddle.net>
This commit is contained in:
parent
76782fab1c
commit
3d1b2ff62c
@ -99,8 +99,6 @@ typedef enum {
|
||||
#define TCG_TARGET_HAS_muluh_i64 1
|
||||
#define TCG_TARGET_HAS_mulsh_i64 1
|
||||
|
||||
#define TCG_TARGET_HAS_new_ldst 1
|
||||
|
||||
static inline void flush_icache_range(uintptr_t start, uintptr_t stop)
|
||||
{
|
||||
__builtin___clear_cache((char *)start, (char *)stop);
|
||||
|
@ -86,8 +86,6 @@ extern bool use_idiv_instructions;
|
||||
#define TCG_TARGET_HAS_div_i32 use_idiv_instructions
|
||||
#define TCG_TARGET_HAS_rem_i32 0
|
||||
|
||||
#define TCG_TARGET_HAS_new_ldst 1
|
||||
|
||||
extern bool tcg_target_deposit_valid(int ofs, int len);
|
||||
#define TCG_TARGET_deposit_i32_valid tcg_target_deposit_valid
|
||||
|
||||
|
@ -130,8 +130,6 @@ extern bool have_bmi1;
|
||||
#define TCG_TARGET_HAS_mulsh_i64 0
|
||||
#endif
|
||||
|
||||
#define TCG_TARGET_HAS_new_ldst 1
|
||||
|
||||
#define TCG_TARGET_deposit_i32_valid(ofs, len) \
|
||||
(((ofs) == 0 && (len) == 8) || ((ofs) == 8 && (len) == 8) || \
|
||||
((ofs) == 0 && (len) == 16))
|
||||
|
@ -160,8 +160,6 @@ typedef enum {
|
||||
#define TCG_TARGET_HAS_mulsh_i64 0
|
||||
#define TCG_TARGET_HAS_trunc_shr_i32 0
|
||||
|
||||
#define TCG_TARGET_HAS_new_ldst 1
|
||||
|
||||
#define TCG_TARGET_deposit_i32_valid(ofs, len) ((len) <= 16)
|
||||
#define TCG_TARGET_deposit_i64_valid(ofs, len) ((len) <= 16)
|
||||
|
||||
|
@ -118,8 +118,6 @@ extern bool use_mips32r2_instructions;
|
||||
#define TCG_TARGET_HAS_ext16s_i32 use_mips32r2_instructions
|
||||
#define TCG_TARGET_HAS_rot_i32 use_mips32r2_instructions
|
||||
|
||||
#define TCG_TARGET_HAS_new_ldst 1
|
||||
|
||||
/* optional instructions automatically implemented */
|
||||
#define TCG_TARGET_HAS_neg_i32 0 /* sub rd, zero, rt */
|
||||
#define TCG_TARGET_HAS_ext8u_i32 0 /* andi rt, rs, 0xff */
|
||||
|
@ -889,17 +889,12 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr,
|
||||
break;
|
||||
|
||||
CASE_OP_32_64(ld8u):
|
||||
case INDEX_op_qemu_ld8u:
|
||||
mask = 0xff;
|
||||
break;
|
||||
CASE_OP_32_64(ld16u):
|
||||
case INDEX_op_qemu_ld16u:
|
||||
mask = 0xffff;
|
||||
break;
|
||||
case INDEX_op_ld32u_i64:
|
||||
#if TCG_TARGET_REG_BITS == 64
|
||||
case INDEX_op_qemu_ld32u:
|
||||
#endif
|
||||
mask = 0xffffffffu;
|
||||
break;
|
||||
|
||||
|
@ -100,8 +100,6 @@ typedef enum {
|
||||
#define TCG_TARGET_HAS_muluh_i32 0
|
||||
#define TCG_TARGET_HAS_mulsh_i32 0
|
||||
|
||||
#define TCG_TARGET_HAS_new_ldst 1
|
||||
|
||||
#define TCG_AREG0 TCG_REG_R27
|
||||
|
||||
#define tcg_qemu_tb_exec(env, tb_ptr) \
|
||||
|
@ -124,8 +124,6 @@ typedef enum {
|
||||
#define TCG_TARGET_HAS_muluh_i64 1
|
||||
#define TCG_TARGET_HAS_mulsh_i64 1
|
||||
|
||||
#define TCG_TARGET_HAS_new_ldst 1
|
||||
|
||||
#define TCG_AREG0 TCG_REG_R27
|
||||
|
||||
#define TCG_TARGET_EXTEND_ARGS 1
|
||||
|
@ -100,8 +100,6 @@ typedef enum TCGReg {
|
||||
#define TCG_TARGET_HAS_muluh_i64 0
|
||||
#define TCG_TARGET_HAS_mulsh_i64 0
|
||||
|
||||
#define TCG_TARGET_HAS_new_ldst 1
|
||||
|
||||
extern bool tcg_target_deposit_valid(int ofs, int len);
|
||||
#define TCG_TARGET_deposit_i32_valid tcg_target_deposit_valid
|
||||
#define TCG_TARGET_deposit_i64_valid tcg_target_deposit_valid
|
||||
|
@ -140,8 +140,6 @@ typedef enum {
|
||||
#define TCG_TARGET_HAS_muluh_i64 0
|
||||
#define TCG_TARGET_HAS_mulsh_i64 0
|
||||
|
||||
#define TCG_TARGET_HAS_new_ldst 1
|
||||
|
||||
#define TCG_AREG0 TCG_REG_I0
|
||||
|
||||
static inline void flush_icache_range(uintptr_t start, uintptr_t stop)
|
||||
|
110
tcg/tcg-opc.h
110
tcg/tcg-opc.h
@ -185,106 +185,20 @@ DEF(debug_insn_start, 0, 0, 1, TCG_OPF_NOT_PRESENT)
|
||||
DEF(exit_tb, 0, 0, 1, TCG_OPF_BB_END)
|
||||
DEF(goto_tb, 0, 0, 1, TCG_OPF_BB_END)
|
||||
|
||||
#define IMPL_NEW_LDST \
|
||||
(TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS \
|
||||
| IMPL(TCG_TARGET_HAS_new_ldst))
|
||||
#define TLADDR_ARGS (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? 1 : 2)
|
||||
#define DATA64_ARGS (TCG_TARGET_REG_BITS == 64 ? 1 : 2)
|
||||
|
||||
#if TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
|
||||
DEF(qemu_ld_i32, 1, 1, 2, IMPL_NEW_LDST)
|
||||
DEF(qemu_st_i32, 0, 2, 2, IMPL_NEW_LDST)
|
||||
# if TCG_TARGET_REG_BITS == 64
|
||||
DEF(qemu_ld_i64, 1, 1, 2, IMPL_NEW_LDST | TCG_OPF_64BIT)
|
||||
DEF(qemu_st_i64, 0, 2, 2, IMPL_NEW_LDST | TCG_OPF_64BIT)
|
||||
# else
|
||||
DEF(qemu_ld_i64, 2, 1, 2, IMPL_NEW_LDST | TCG_OPF_64BIT)
|
||||
DEF(qemu_st_i64, 0, 3, 2, IMPL_NEW_LDST | TCG_OPF_64BIT)
|
||||
# endif
|
||||
#else
|
||||
DEF(qemu_ld_i32, 1, 2, 2, IMPL_NEW_LDST)
|
||||
DEF(qemu_st_i32, 0, 3, 2, IMPL_NEW_LDST)
|
||||
DEF(qemu_ld_i64, 2, 2, 2, IMPL_NEW_LDST | TCG_OPF_64BIT)
|
||||
DEF(qemu_st_i64, 0, 4, 2, IMPL_NEW_LDST | TCG_OPF_64BIT)
|
||||
#endif
|
||||
|
||||
#undef IMPL_NEW_LDST
|
||||
|
||||
#define IMPL_OLD_LDST \
|
||||
(TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS \
|
||||
| IMPL(!TCG_TARGET_HAS_new_ldst))
|
||||
|
||||
#if TCG_TARGET_REG_BITS == 32
|
||||
#if TARGET_LONG_BITS == 32
|
||||
DEF(qemu_ld8u, 1, 1, 1, IMPL_OLD_LDST)
|
||||
#else
|
||||
DEF(qemu_ld8u, 1, 2, 1, IMPL_OLD_LDST)
|
||||
#endif
|
||||
#if TARGET_LONG_BITS == 32
|
||||
DEF(qemu_ld8s, 1, 1, 1, IMPL_OLD_LDST)
|
||||
#else
|
||||
DEF(qemu_ld8s, 1, 2, 1, IMPL_OLD_LDST)
|
||||
#endif
|
||||
#if TARGET_LONG_BITS == 32
|
||||
DEF(qemu_ld16u, 1, 1, 1, IMPL_OLD_LDST)
|
||||
#else
|
||||
DEF(qemu_ld16u, 1, 2, 1, IMPL_OLD_LDST)
|
||||
#endif
|
||||
#if TARGET_LONG_BITS == 32
|
||||
DEF(qemu_ld16s, 1, 1, 1, IMPL_OLD_LDST)
|
||||
#else
|
||||
DEF(qemu_ld16s, 1, 2, 1, IMPL_OLD_LDST)
|
||||
#endif
|
||||
#if TARGET_LONG_BITS == 32
|
||||
DEF(qemu_ld32, 1, 1, 1, IMPL_OLD_LDST)
|
||||
#else
|
||||
DEF(qemu_ld32, 1, 2, 1, IMPL_OLD_LDST)
|
||||
#endif
|
||||
#if TARGET_LONG_BITS == 32
|
||||
DEF(qemu_ld64, 2, 1, 1, IMPL_OLD_LDST | TCG_OPF_64BIT)
|
||||
#else
|
||||
DEF(qemu_ld64, 2, 2, 1, IMPL_OLD_LDST | TCG_OPF_64BIT)
|
||||
#endif
|
||||
|
||||
#if TARGET_LONG_BITS == 32
|
||||
DEF(qemu_st8, 0, 2, 1, IMPL_OLD_LDST)
|
||||
#else
|
||||
DEF(qemu_st8, 0, 3, 1, IMPL_OLD_LDST)
|
||||
#endif
|
||||
#if TARGET_LONG_BITS == 32
|
||||
DEF(qemu_st16, 0, 2, 1, IMPL_OLD_LDST)
|
||||
#else
|
||||
DEF(qemu_st16, 0, 3, 1, IMPL_OLD_LDST)
|
||||
#endif
|
||||
#if TARGET_LONG_BITS == 32
|
||||
DEF(qemu_st32, 0, 2, 1, IMPL_OLD_LDST)
|
||||
#else
|
||||
DEF(qemu_st32, 0, 3, 1, IMPL_OLD_LDST)
|
||||
#endif
|
||||
#if TARGET_LONG_BITS == 32
|
||||
DEF(qemu_st64, 0, 3, 1, IMPL_OLD_LDST | TCG_OPF_64BIT)
|
||||
#else
|
||||
DEF(qemu_st64, 0, 4, 1, IMPL_OLD_LDST | TCG_OPF_64BIT)
|
||||
#endif
|
||||
|
||||
#else /* TCG_TARGET_REG_BITS == 32 */
|
||||
|
||||
DEF(qemu_ld8u, 1, 1, 1, IMPL_OLD_LDST | TCG_OPF_64BIT)
|
||||
DEF(qemu_ld8s, 1, 1, 1, IMPL_OLD_LDST | TCG_OPF_64BIT)
|
||||
DEF(qemu_ld16u, 1, 1, 1, IMPL_OLD_LDST | TCG_OPF_64BIT)
|
||||
DEF(qemu_ld16s, 1, 1, 1, IMPL_OLD_LDST | TCG_OPF_64BIT)
|
||||
DEF(qemu_ld32, 1, 1, 1, IMPL_OLD_LDST | TCG_OPF_64BIT)
|
||||
DEF(qemu_ld32u, 1, 1, 1, IMPL_OLD_LDST | TCG_OPF_64BIT)
|
||||
DEF(qemu_ld32s, 1, 1, 1, IMPL_OLD_LDST | TCG_OPF_64BIT)
|
||||
DEF(qemu_ld64, 1, 1, 1, IMPL_OLD_LDST | TCG_OPF_64BIT)
|
||||
|
||||
DEF(qemu_st8, 0, 2, 1, IMPL_OLD_LDST | TCG_OPF_64BIT)
|
||||
DEF(qemu_st16, 0, 2, 1, IMPL_OLD_LDST | TCG_OPF_64BIT)
|
||||
DEF(qemu_st32, 0, 2, 1, IMPL_OLD_LDST | TCG_OPF_64BIT)
|
||||
DEF(qemu_st64, 0, 2, 1, IMPL_OLD_LDST | TCG_OPF_64BIT)
|
||||
|
||||
#endif /* TCG_TARGET_REG_BITS != 32 */
|
||||
|
||||
#undef IMPL_OLD_LDST
|
||||
DEF(qemu_ld_i32, 1, TLADDR_ARGS, 2,
|
||||
TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
|
||||
DEF(qemu_st_i32, 0, TLADDR_ARGS + 1, 2,
|
||||
TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
|
||||
DEF(qemu_ld_i64, DATA64_ARGS, TLADDR_ARGS, 2,
|
||||
TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT)
|
||||
DEF(qemu_st_i64, 0, TLADDR_ARGS + DATA64_ARGS, 2,
|
||||
TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT)
|
||||
|
||||
#undef TLADDR_ARGS
|
||||
#undef DATA64_ARGS
|
||||
#undef IMPL
|
||||
#undef IMPL64
|
||||
#undef DEF
|
||||
|
123
tcg/tcg.c
123
tcg/tcg.c
@ -941,97 +941,26 @@ static inline TCGMemOp tcg_canonicalize_memop(TCGMemOp op, bool is64, bool st)
|
||||
return op;
|
||||
}
|
||||
|
||||
static const TCGOpcode old_ld_opc[8] = {
|
||||
[MO_UB] = INDEX_op_qemu_ld8u,
|
||||
[MO_SB] = INDEX_op_qemu_ld8s,
|
||||
[MO_UW] = INDEX_op_qemu_ld16u,
|
||||
[MO_SW] = INDEX_op_qemu_ld16s,
|
||||
#if TCG_TARGET_REG_BITS == 32
|
||||
[MO_UL] = INDEX_op_qemu_ld32,
|
||||
[MO_SL] = INDEX_op_qemu_ld32,
|
||||
#else
|
||||
[MO_UL] = INDEX_op_qemu_ld32u,
|
||||
[MO_SL] = INDEX_op_qemu_ld32s,
|
||||
#endif
|
||||
[MO_Q] = INDEX_op_qemu_ld64,
|
||||
};
|
||||
|
||||
static const TCGOpcode old_st_opc[4] = {
|
||||
[MO_UB] = INDEX_op_qemu_st8,
|
||||
[MO_UW] = INDEX_op_qemu_st16,
|
||||
[MO_UL] = INDEX_op_qemu_st32,
|
||||
[MO_Q] = INDEX_op_qemu_st64,
|
||||
};
|
||||
|
||||
void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, TCGMemOp memop)
|
||||
{
|
||||
memop = tcg_canonicalize_memop(memop, 0, 0);
|
||||
|
||||
if (TCG_TARGET_HAS_new_ldst) {
|
||||
*tcg_ctx.gen_opc_ptr++ = INDEX_op_qemu_ld_i32;
|
||||
tcg_add_param_i32(val);
|
||||
tcg_add_param_tl(addr);
|
||||
*tcg_ctx.gen_opparam_ptr++ = memop;
|
||||
*tcg_ctx.gen_opparam_ptr++ = idx;
|
||||
return;
|
||||
}
|
||||
|
||||
/* The old opcodes only support target-endian memory operations. */
|
||||
assert((memop & MO_BSWAP) == MO_TE || (memop & MO_SIZE) == MO_8);
|
||||
assert(old_ld_opc[memop & MO_SSIZE] != 0);
|
||||
|
||||
if (TCG_TARGET_REG_BITS == 32) {
|
||||
*tcg_ctx.gen_opc_ptr++ = old_ld_opc[memop & MO_SSIZE];
|
||||
tcg_add_param_i32(val);
|
||||
tcg_add_param_tl(addr);
|
||||
*tcg_ctx.gen_opparam_ptr++ = idx;
|
||||
} else {
|
||||
TCGv_i64 val64 = tcg_temp_new_i64();
|
||||
|
||||
*tcg_ctx.gen_opc_ptr++ = old_ld_opc[memop & MO_SSIZE];
|
||||
tcg_add_param_i64(val64);
|
||||
tcg_add_param_tl(addr);
|
||||
*tcg_ctx.gen_opparam_ptr++ = idx;
|
||||
|
||||
tcg_gen_trunc_i64_i32(val, val64);
|
||||
tcg_temp_free_i64(val64);
|
||||
}
|
||||
*tcg_ctx.gen_opc_ptr++ = INDEX_op_qemu_ld_i32;
|
||||
tcg_add_param_i32(val);
|
||||
tcg_add_param_tl(addr);
|
||||
*tcg_ctx.gen_opparam_ptr++ = memop;
|
||||
*tcg_ctx.gen_opparam_ptr++ = idx;
|
||||
}
|
||||
|
||||
void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, TCGMemOp memop)
|
||||
{
|
||||
memop = tcg_canonicalize_memop(memop, 0, 1);
|
||||
|
||||
if (TCG_TARGET_HAS_new_ldst) {
|
||||
*tcg_ctx.gen_opc_ptr++ = INDEX_op_qemu_st_i32;
|
||||
tcg_add_param_i32(val);
|
||||
tcg_add_param_tl(addr);
|
||||
*tcg_ctx.gen_opparam_ptr++ = memop;
|
||||
*tcg_ctx.gen_opparam_ptr++ = idx;
|
||||
return;
|
||||
}
|
||||
|
||||
/* The old opcodes only support target-endian memory operations. */
|
||||
assert((memop & MO_BSWAP) == MO_TE || (memop & MO_SIZE) == MO_8);
|
||||
assert(old_st_opc[memop & MO_SIZE] != 0);
|
||||
|
||||
if (TCG_TARGET_REG_BITS == 32) {
|
||||
*tcg_ctx.gen_opc_ptr++ = old_st_opc[memop & MO_SIZE];
|
||||
tcg_add_param_i32(val);
|
||||
tcg_add_param_tl(addr);
|
||||
*tcg_ctx.gen_opparam_ptr++ = idx;
|
||||
} else {
|
||||
TCGv_i64 val64 = tcg_temp_new_i64();
|
||||
|
||||
tcg_gen_extu_i32_i64(val64, val);
|
||||
|
||||
*tcg_ctx.gen_opc_ptr++ = old_st_opc[memop & MO_SIZE];
|
||||
tcg_add_param_i64(val64);
|
||||
tcg_add_param_tl(addr);
|
||||
*tcg_ctx.gen_opparam_ptr++ = idx;
|
||||
|
||||
tcg_temp_free_i64(val64);
|
||||
}
|
||||
*tcg_ctx.gen_opc_ptr++ = INDEX_op_qemu_st_i32;
|
||||
tcg_add_param_i32(val);
|
||||
tcg_add_param_tl(addr);
|
||||
*tcg_ctx.gen_opparam_ptr++ = memop;
|
||||
*tcg_ctx.gen_opparam_ptr++ = idx;
|
||||
}
|
||||
|
||||
void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, TCGMemOp memop)
|
||||
@ -1050,22 +979,10 @@ void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, TCGMemOp memop)
|
||||
}
|
||||
#endif
|
||||
|
||||
if (TCG_TARGET_HAS_new_ldst) {
|
||||
*tcg_ctx.gen_opc_ptr++ = INDEX_op_qemu_ld_i64;
|
||||
tcg_add_param_i64(val);
|
||||
tcg_add_param_tl(addr);
|
||||
*tcg_ctx.gen_opparam_ptr++ = memop;
|
||||
*tcg_ctx.gen_opparam_ptr++ = idx;
|
||||
return;
|
||||
}
|
||||
|
||||
/* The old opcodes only support target-endian memory operations. */
|
||||
assert((memop & MO_BSWAP) == MO_TE || (memop & MO_SIZE) == MO_8);
|
||||
assert(old_ld_opc[memop & MO_SSIZE] != 0);
|
||||
|
||||
*tcg_ctx.gen_opc_ptr++ = old_ld_opc[memop & MO_SSIZE];
|
||||
*tcg_ctx.gen_opc_ptr++ = INDEX_op_qemu_ld_i64;
|
||||
tcg_add_param_i64(val);
|
||||
tcg_add_param_tl(addr);
|
||||
*tcg_ctx.gen_opparam_ptr++ = memop;
|
||||
*tcg_ctx.gen_opparam_ptr++ = idx;
|
||||
}
|
||||
|
||||
@ -1080,22 +997,10 @@ void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg idx, TCGMemOp memop)
|
||||
}
|
||||
#endif
|
||||
|
||||
if (TCG_TARGET_HAS_new_ldst) {
|
||||
*tcg_ctx.gen_opc_ptr++ = INDEX_op_qemu_st_i64;
|
||||
tcg_add_param_i64(val);
|
||||
tcg_add_param_tl(addr);
|
||||
*tcg_ctx.gen_opparam_ptr++ = memop;
|
||||
*tcg_ctx.gen_opparam_ptr++ = idx;
|
||||
return;
|
||||
}
|
||||
|
||||
/* The old opcodes only support target-endian memory operations. */
|
||||
assert((memop & MO_BSWAP) == MO_TE || (memop & MO_SIZE) == MO_8);
|
||||
assert(old_st_opc[memop & MO_SIZE] != 0);
|
||||
|
||||
*tcg_ctx.gen_opc_ptr++ = old_st_opc[memop & MO_SIZE];
|
||||
*tcg_ctx.gen_opc_ptr++ = INDEX_op_qemu_st_i64;
|
||||
tcg_add_param_i64(val);
|
||||
tcg_add_param_tl(addr);
|
||||
*tcg_ctx.gen_opparam_ptr++ = memop;
|
||||
*tcg_ctx.gen_opparam_ptr++ = idx;
|
||||
}
|
||||
|
||||
|
@ -118,8 +118,6 @@
|
||||
#define TCG_TARGET_HAS_mulu2_i32 1
|
||||
#endif /* TCG_TARGET_REG_BITS == 64 */
|
||||
|
||||
#define TCG_TARGET_HAS_new_ldst 1
|
||||
|
||||
/* Number of registers available.
|
||||
For 32 bit hosts, we need more than 8 registers (call arguments). */
|
||||
/* #define TCG_TARGET_NB_REGS 8 */
|
||||
|
Loading…
x
Reference in New Issue
Block a user