tcg/tci: Split out constraint sets to tcg-target-con-set.h

This requires finishing the conversion to tcg_target_op_def.

Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Richard Henderson 2020-10-17 11:35:13 -07:00
parent 0d11dc7c97
commit 63b29fda4e
3 changed files with 161 additions and 145 deletions

View File

@ -0,0 +1,25 @@
/* SPDX-License-Identifier: MIT */
/*
* TCI target-specific constraint sets.
* Copyright (c) 2021 Linaro
*/
/*
* C_On_Im(...) defines a constraint set with <n> outputs and <m> inputs.
* Each operand should be a sequence of constraint letters as defined by
* tcg-target-con-str.h; the constraint combination is inclusive or.
*/
C_O0_I2(r, r)
C_O0_I2(r, ri)
C_O0_I3(r, r, r)
C_O0_I4(r, r, ri, ri)
C_O0_I4(r, r, r, r)
C_O1_I1(r, r)
C_O1_I2(r, 0, r)
C_O1_I2(r, ri, ri)
C_O1_I2(r, r, r)
C_O1_I2(r, r, ri)
C_O1_I4(r, r, r, ri, ri)
C_O2_I1(r, r, r)
C_O2_I2(r, r, r, r)
C_O2_I4(r, r, r, r, r, r)

View File

@ -37,154 +37,143 @@
/* Bitfield n...m (in 32 bit value). */
#define BITS(n, m) (((0xffffffffU << (31 - n)) >> (31 - n + m)) << m)
/* Macros used in tcg_target_op_defs. */
#define R "r"
#define RI "ri"
#if TCG_TARGET_REG_BITS == 32
# define R64 "r", "r"
#else
# define R64 "r"
#endif
#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
# define L "r", "r"
# define S "r", "r"
#else
# define L "r"
# define S "r"
#endif
/* TODO: documentation. */
static const TCGTargetOpDef tcg_target_op_defs[] = {
{ INDEX_op_exit_tb, { NULL } },
{ INDEX_op_goto_tb, { NULL } },
{ INDEX_op_br, { NULL } },
{ INDEX_op_ld8u_i32, { R, R } },
{ INDEX_op_ld8s_i32, { R, R } },
{ INDEX_op_ld16u_i32, { R, R } },
{ INDEX_op_ld16s_i32, { R, R } },
{ INDEX_op_ld_i32, { R, R } },
{ INDEX_op_st8_i32, { R, R } },
{ INDEX_op_st16_i32, { R, R } },
{ INDEX_op_st_i32, { R, R } },
{ INDEX_op_add_i32, { R, RI, RI } },
{ INDEX_op_sub_i32, { R, RI, RI } },
{ INDEX_op_mul_i32, { R, RI, RI } },
{ INDEX_op_div_i32, { R, R, R } },
{ INDEX_op_divu_i32, { R, R, R } },
{ INDEX_op_rem_i32, { R, R, R } },
{ INDEX_op_remu_i32, { R, R, R } },
/* TODO: Does R, RI, RI result in faster code than R, R, RI?
If both operands are constants, we can optimize. */
{ INDEX_op_and_i32, { R, RI, RI } },
{ INDEX_op_andc_i32, { R, RI, RI } },
{ INDEX_op_eqv_i32, { R, RI, RI } },
{ INDEX_op_nand_i32, { R, RI, RI } },
{ INDEX_op_nor_i32, { R, RI, RI } },
{ INDEX_op_or_i32, { R, RI, RI } },
{ INDEX_op_orc_i32, { R, RI, RI } },
{ INDEX_op_xor_i32, { R, RI, RI } },
{ INDEX_op_shl_i32, { R, RI, RI } },
{ INDEX_op_shr_i32, { R, RI, RI } },
{ INDEX_op_sar_i32, { R, RI, RI } },
{ INDEX_op_rotl_i32, { R, RI, RI } },
{ INDEX_op_rotr_i32, { R, RI, RI } },
{ INDEX_op_deposit_i32, { R, "0", R } },
{ INDEX_op_brcond_i32, { R, RI } },
{ INDEX_op_setcond_i32, { R, R, RI } },
{ INDEX_op_setcond_i64, { R, R, RI } },
/* TODO: Support R, R, R, R, RI, RI? Will it be faster? */
{ INDEX_op_add2_i32, { R, R, R, R, R, R } },
{ INDEX_op_sub2_i32, { R, R, R, R, R, R } },
{ INDEX_op_brcond2_i32, { R, R, RI, RI } },
{ INDEX_op_mulu2_i32, { R, R, R, R } },
{ INDEX_op_setcond2_i32, { R, R, R, RI, RI } },
{ INDEX_op_not_i32, { R, R } },
{ INDEX_op_neg_i32, { R, R } },
{ INDEX_op_ld8u_i64, { R, R } },
{ INDEX_op_ld8s_i64, { R, R } },
{ INDEX_op_ld16u_i64, { R, R } },
{ INDEX_op_ld16s_i64, { R, R } },
{ INDEX_op_ld32u_i64, { R, R } },
{ INDEX_op_ld32s_i64, { R, R } },
{ INDEX_op_ld_i64, { R, R } },
{ INDEX_op_st8_i64, { R, R } },
{ INDEX_op_st16_i64, { R, R } },
{ INDEX_op_st32_i64, { R, R } },
{ INDEX_op_st_i64, { R, R } },
{ INDEX_op_add_i64, { R, RI, RI } },
{ INDEX_op_sub_i64, { R, RI, RI } },
{ INDEX_op_mul_i64, { R, RI, RI } },
{ INDEX_op_div_i64, { R, R, R } },
{ INDEX_op_divu_i64, { R, R, R } },
{ INDEX_op_rem_i64, { R, R, R } },
{ INDEX_op_remu_i64, { R, R, R } },
{ INDEX_op_and_i64, { R, RI, RI } },
{ INDEX_op_andc_i64, { R, RI, RI } },
{ INDEX_op_eqv_i64, { R, RI, RI } },
{ INDEX_op_nand_i64, { R, RI, RI } },
{ INDEX_op_nor_i64, { R, RI, RI } },
{ INDEX_op_or_i64, { R, RI, RI } },
{ INDEX_op_orc_i64, { R, RI, RI } },
{ INDEX_op_xor_i64, { R, RI, RI } },
{ INDEX_op_shl_i64, { R, RI, RI } },
{ INDEX_op_shr_i64, { R, RI, RI } },
{ INDEX_op_sar_i64, { R, RI, RI } },
{ INDEX_op_rotl_i64, { R, RI, RI } },
{ INDEX_op_rotr_i64, { R, RI, RI } },
{ INDEX_op_deposit_i64, { R, "0", R } },
{ INDEX_op_brcond_i64, { R, RI } },
{ INDEX_op_ext8s_i64, { R, R } },
{ INDEX_op_ext16s_i64, { R, R } },
{ INDEX_op_ext32s_i64, { R, R } },
{ INDEX_op_ext8u_i64, { R, R } },
{ INDEX_op_ext16u_i64, { R, R } },
{ INDEX_op_ext32u_i64, { R, R } },
{ INDEX_op_ext_i32_i64, { R, R } },
{ INDEX_op_extu_i32_i64, { R, R } },
{ INDEX_op_bswap16_i64, { R, R } },
{ INDEX_op_bswap32_i64, { R, R } },
{ INDEX_op_bswap64_i64, { R, R } },
{ INDEX_op_not_i64, { R, R } },
{ INDEX_op_neg_i64, { R, R } },
{ INDEX_op_qemu_ld_i32, { R, L } },
{ INDEX_op_qemu_ld_i64, { R64, L } },
{ INDEX_op_qemu_st_i32, { R, S } },
{ INDEX_op_qemu_st_i64, { R64, S } },
{ INDEX_op_ext8s_i32, { R, R } },
{ INDEX_op_ext16s_i32, { R, R } },
{ INDEX_op_ext8u_i32, { R, R } },
{ INDEX_op_ext16u_i32, { R, R } },
{ INDEX_op_bswap16_i32, { R, R } },
{ INDEX_op_bswap32_i32, { R, R } },
{ INDEX_op_mb, { } },
{ -1 },
};
static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
{
int i, n = ARRAY_SIZE(tcg_target_op_defs);
switch (op) {
case INDEX_op_ld8u_i32:
case INDEX_op_ld8s_i32:
case INDEX_op_ld16u_i32:
case INDEX_op_ld16s_i32:
case INDEX_op_ld_i32:
case INDEX_op_ld8u_i64:
case INDEX_op_ld8s_i64:
case INDEX_op_ld16u_i64:
case INDEX_op_ld16s_i64:
case INDEX_op_ld32u_i64:
case INDEX_op_ld32s_i64:
case INDEX_op_ld_i64:
case INDEX_op_not_i32:
case INDEX_op_not_i64:
case INDEX_op_neg_i32:
case INDEX_op_neg_i64:
case INDEX_op_ext8s_i32:
case INDEX_op_ext8s_i64:
case INDEX_op_ext16s_i32:
case INDEX_op_ext16s_i64:
case INDEX_op_ext8u_i32:
case INDEX_op_ext8u_i64:
case INDEX_op_ext16u_i32:
case INDEX_op_ext16u_i64:
case INDEX_op_ext32s_i64:
case INDEX_op_ext32u_i64:
case INDEX_op_ext_i32_i64:
case INDEX_op_extu_i32_i64:
case INDEX_op_bswap16_i32:
case INDEX_op_bswap16_i64:
case INDEX_op_bswap32_i32:
case INDEX_op_bswap32_i64:
case INDEX_op_bswap64_i64:
return C_O1_I1(r, r);
for (i = 0; i < n; ++i) {
if (tcg_target_op_defs[i].op == op) {
return &tcg_target_op_defs[i];
}
case INDEX_op_st8_i32:
case INDEX_op_st16_i32:
case INDEX_op_st_i32:
case INDEX_op_st8_i64:
case INDEX_op_st16_i64:
case INDEX_op_st32_i64:
case INDEX_op_st_i64:
return C_O0_I2(r, r);
case INDEX_op_div_i32:
case INDEX_op_div_i64:
case INDEX_op_divu_i32:
case INDEX_op_divu_i64:
case INDEX_op_rem_i32:
case INDEX_op_rem_i64:
case INDEX_op_remu_i32:
case INDEX_op_remu_i64:
return C_O1_I2(r, r, r);
case INDEX_op_add_i32:
case INDEX_op_add_i64:
case INDEX_op_sub_i32:
case INDEX_op_sub_i64:
case INDEX_op_mul_i32:
case INDEX_op_mul_i64:
case INDEX_op_and_i32:
case INDEX_op_and_i64:
case INDEX_op_andc_i32:
case INDEX_op_andc_i64:
case INDEX_op_eqv_i32:
case INDEX_op_eqv_i64:
case INDEX_op_nand_i32:
case INDEX_op_nand_i64:
case INDEX_op_nor_i32:
case INDEX_op_nor_i64:
case INDEX_op_or_i32:
case INDEX_op_or_i64:
case INDEX_op_orc_i32:
case INDEX_op_orc_i64:
case INDEX_op_xor_i32:
case INDEX_op_xor_i64:
case INDEX_op_shl_i32:
case INDEX_op_shl_i64:
case INDEX_op_shr_i32:
case INDEX_op_shr_i64:
case INDEX_op_sar_i32:
case INDEX_op_sar_i64:
case INDEX_op_rotl_i32:
case INDEX_op_rotl_i64:
case INDEX_op_rotr_i32:
case INDEX_op_rotr_i64:
/* TODO: Does R, RI, RI result in faster code than R, R, RI? */
return C_O1_I2(r, ri, ri);
case INDEX_op_deposit_i32:
case INDEX_op_deposit_i64:
return C_O1_I2(r, 0, r);
case INDEX_op_brcond_i32:
case INDEX_op_brcond_i64:
return C_O0_I2(r, ri);
case INDEX_op_setcond_i32:
case INDEX_op_setcond_i64:
return C_O1_I2(r, r, ri);
#if TCG_TARGET_REG_BITS == 32
/* TODO: Support R, R, R, R, RI, RI? Will it be faster? */
case INDEX_op_add2_i32:
case INDEX_op_sub2_i32:
return C_O2_I4(r, r, r, r, r, r);
case INDEX_op_brcond2_i32:
return C_O0_I4(r, r, ri, ri);
case INDEX_op_mulu2_i32:
return C_O2_I2(r, r, r, r);
case INDEX_op_setcond2_i32:
return C_O1_I4(r, r, r, ri, ri);
#endif
case INDEX_op_qemu_ld_i32:
return (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
? C_O1_I1(r, r)
: C_O1_I2(r, r, r));
case INDEX_op_qemu_ld_i64:
return (TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r)
: TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? C_O2_I1(r, r, r)
: C_O2_I2(r, r, r, r));
case INDEX_op_qemu_st_i32:
return (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
? C_O0_I2(r, r)
: C_O0_I3(r, r, r));
case INDEX_op_qemu_st_i64:
return (TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r)
: TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? C_O0_I3(r, r, r)
: C_O0_I4(r, r, r, r));
default:
g_assert_not_reached();
}
return NULL;
}
static const int tcg_target_reg_alloc_order[] = {

View File

@ -207,4 +207,6 @@ static inline void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx,
/* no need to flush icache explicitly */
}
#define TCG_TARGET_CON_SET_H
#endif /* TCG_TARGET_H */