target/arm: Convert CMGT, CMHI, CMGE, CMHS, CMTST, CMEQ to decodetree

Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 20240528203044.612851-18-richard.henderson@linaro.org
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Richard Henderson 2024-05-28 13:30:28 -07:00 committed by Peter Maydell
parent 6c9bccf52f
commit 649005fd3a
2 changed files with 60 additions and 84 deletions

View File

@ -767,6 +767,12 @@ UQRSHL_s 0111 1110 ..1 ..... 01011 1 ..... ..... @rrr_e
ADD_s 0101 1110 111 ..... 10000 1 ..... ..... @rrr_d ADD_s 0101 1110 111 ..... 10000 1 ..... ..... @rrr_d
SUB_s 0111 1110 111 ..... 10000 1 ..... ..... @rrr_d SUB_s 0111 1110 111 ..... 10000 1 ..... ..... @rrr_d
CMGT_s 0101 1110 111 ..... 00110 1 ..... ..... @rrr_d
CMHI_s 0111 1110 111 ..... 00110 1 ..... ..... @rrr_d
CMGE_s 0101 1110 111 ..... 00111 1 ..... ..... @rrr_d
CMHS_s 0111 1110 111 ..... 00111 1 ..... ..... @rrr_d
CMTST_s 0101 1110 111 ..... 10001 1 ..... ..... @rrr_d
CMEQ_s 0111 1110 111 ..... 10001 1 ..... ..... @rrr_d
### Advanced SIMD scalar pairwise ### Advanced SIMD scalar pairwise
@ -900,6 +906,12 @@ UQRSHL_v 0.10 1110 ..1 ..... 01011 1 ..... ..... @qrrr_e
ADD_v 0.00 1110 ..1 ..... 10000 1 ..... ..... @qrrr_e ADD_v 0.00 1110 ..1 ..... 10000 1 ..... ..... @qrrr_e
SUB_v 0.10 1110 ..1 ..... 10000 1 ..... ..... @qrrr_e SUB_v 0.10 1110 ..1 ..... 10000 1 ..... ..... @qrrr_e
CMGT_v 0.00 1110 ..1 ..... 00110 1 ..... ..... @qrrr_e
CMHI_v 0.10 1110 ..1 ..... 00110 1 ..... ..... @qrrr_e
CMGE_v 0.00 1110 ..1 ..... 00111 1 ..... ..... @qrrr_e
CMHS_v 0.10 1110 ..1 ..... 00111 1 ..... ..... @qrrr_e
CMTST_v 0.00 1110 ..1 ..... 10001 1 ..... ..... @qrrr_e
CMEQ_v 0.10 1110 ..1 ..... 10001 1 ..... ..... @qrrr_e
### Advanced SIMD scalar x indexed element ### Advanced SIMD scalar x indexed element

View File

@ -5180,6 +5180,24 @@ static const ENVScalar2 f_scalar_uqrshl = {
}; };
TRANS(UQRSHL_s, do_env_scalar2, a, &f_scalar_uqrshl) TRANS(UQRSHL_s, do_env_scalar2, a, &f_scalar_uqrshl)
static bool do_cmop_d(DisasContext *s, arg_rrr_e *a, TCGCond cond)
{
if (fp_access_check(s)) {
TCGv_i64 t0 = read_fp_dreg(s, a->rn);
TCGv_i64 t1 = read_fp_dreg(s, a->rm);
tcg_gen_negsetcond_i64(cond, t0, t0, t1);
write_fp_dreg(s, a->rd, t0);
}
return true;
}
TRANS(CMGT_s, do_cmop_d, a, TCG_COND_GT)
TRANS(CMHI_s, do_cmop_d, a, TCG_COND_GTU)
TRANS(CMGE_s, do_cmop_d, a, TCG_COND_GE)
TRANS(CMHS_s, do_cmop_d, a, TCG_COND_GEU)
TRANS(CMEQ_s, do_cmop_d, a, TCG_COND_EQ)
TRANS(CMTST_s, do_cmop_d, a, TCG_COND_TSTNE)
static bool do_fp3_vector(DisasContext *s, arg_qrrr_e *a, static bool do_fp3_vector(DisasContext *s, arg_qrrr_e *a,
gen_helper_gvec_3_ptr * const fns[3]) gen_helper_gvec_3_ptr * const fns[3])
{ {
@ -5437,6 +5455,28 @@ TRANS(UQRSHL_v, do_gvec_fn3, a, gen_neon_uqrshl)
TRANS(ADD_v, do_gvec_fn3, a, tcg_gen_gvec_add) TRANS(ADD_v, do_gvec_fn3, a, tcg_gen_gvec_add)
TRANS(SUB_v, do_gvec_fn3, a, tcg_gen_gvec_sub) TRANS(SUB_v, do_gvec_fn3, a, tcg_gen_gvec_sub)
static bool do_cmop_v(DisasContext *s, arg_qrrr_e *a, TCGCond cond)
{
if (a->esz == MO_64 && !a->q) {
return false;
}
if (fp_access_check(s)) {
tcg_gen_gvec_cmp(cond, a->esz,
vec_full_reg_offset(s, a->rd),
vec_full_reg_offset(s, a->rn),
vec_full_reg_offset(s, a->rm),
a->q ? 16 : 8, vec_full_reg_size(s));
}
return true;
}
TRANS(CMGT_v, do_cmop_v, a, TCG_COND_GT)
TRANS(CMHI_v, do_cmop_v, a, TCG_COND_GTU)
TRANS(CMGE_v, do_cmop_v, a, TCG_COND_GE)
TRANS(CMHS_v, do_cmop_v, a, TCG_COND_GEU)
TRANS(CMEQ_v, do_cmop_v, a, TCG_COND_EQ)
TRANS(CMTST_v, do_gvec_fn3, a, gen_gvec_cmtst)
/* /*
* Advanced SIMD scalar/vector x indexed element * Advanced SIMD scalar/vector x indexed element
*/ */
@ -9421,45 +9461,6 @@ static void disas_simd_scalar_three_reg_diff(DisasContext *s, uint32_t insn)
} }
} }
static void handle_3same_64(DisasContext *s, int opcode, bool u,
TCGv_i64 tcg_rd, TCGv_i64 tcg_rn, TCGv_i64 tcg_rm)
{
/* Handle 64x64->64 opcodes which are shared between the scalar
* and vector 3-same groups. We cover every opcode where size == 3
* is valid in either the three-reg-same (integer, not pairwise)
* or scalar-three-reg-same groups.
*/
TCGCond cond;
switch (opcode) {
case 0x6: /* CMGT, CMHI */
cond = u ? TCG_COND_GTU : TCG_COND_GT;
do_cmop:
/* 64 bit integer comparison, result = test ? -1 : 0. */
tcg_gen_negsetcond_i64(cond, tcg_rd, tcg_rn, tcg_rm);
break;
case 0x7: /* CMGE, CMHS */
cond = u ? TCG_COND_GEU : TCG_COND_GE;
goto do_cmop;
case 0x11: /* CMTST, CMEQ */
if (u) {
cond = TCG_COND_EQ;
goto do_cmop;
}
gen_cmtst_i64(tcg_rd, tcg_rn, tcg_rm);
break;
default:
case 0x1: /* SQADD / UQADD */
case 0x5: /* SQSUB / UQSUB */
case 0x8: /* SSHL, USHL */
case 0x9: /* SQSHL, UQSHL */
case 0xa: /* SRSHL, URSHL */
case 0xb: /* SQRSHL, UQRSHL */
case 0x10: /* ADD, SUB */
g_assert_not_reached();
}
}
/* AdvSIMD scalar three same /* AdvSIMD scalar three same
* 31 30 29 28 24 23 22 21 20 16 15 11 10 9 5 4 0 * 31 30 29 28 24 23 22 21 20 16 15 11 10 9 5 4 0
* +-----+---+-----------+------+---+------+--------+---+------+------+ * +-----+---+-----------+------+---+------+--------+---+------+------+
@ -9477,14 +9478,6 @@ static void disas_simd_scalar_three_reg_same(DisasContext *s, uint32_t insn)
TCGv_i64 tcg_rd; TCGv_i64 tcg_rd;
switch (opcode) { switch (opcode) {
case 0x6: /* CMGT, CMHI */
case 0x7: /* CMGE, CMHS */
case 0x11: /* CMTST, CMEQ */
if (size != 3) {
unallocated_encoding(s);
return;
}
break;
case 0x16: /* SQDMULH, SQRDMULH (vector) */ case 0x16: /* SQDMULH, SQRDMULH (vector) */
if (size != 1 && size != 2) { if (size != 1 && size != 2) {
unallocated_encoding(s); unallocated_encoding(s);
@ -9494,11 +9487,14 @@ static void disas_simd_scalar_three_reg_same(DisasContext *s, uint32_t insn)
default: default:
case 0x1: /* SQADD, UQADD */ case 0x1: /* SQADD, UQADD */
case 0x5: /* SQSUB, UQSUB */ case 0x5: /* SQSUB, UQSUB */
case 0x6: /* CMGT, CMHI */
case 0x7: /* CMGE, CMHS */
case 0x8: /* SSHL, USHL */ case 0x8: /* SSHL, USHL */
case 0x9: /* SQSHL, UQSHL */ case 0x9: /* SQSHL, UQSHL */
case 0xa: /* SRSHL, URSHL */ case 0xa: /* SRSHL, URSHL */
case 0xb: /* SQRSHL, UQRSHL */ case 0xb: /* SQRSHL, UQRSHL */
case 0x10: /* ADD, SUB (vector) */ case 0x10: /* ADD, SUB (vector) */
case 0x11: /* CMTST, CMEQ */
unallocated_encoding(s); unallocated_encoding(s);
return; return;
} }
@ -9510,10 +9506,7 @@ static void disas_simd_scalar_three_reg_same(DisasContext *s, uint32_t insn)
tcg_rd = tcg_temp_new_i64(); tcg_rd = tcg_temp_new_i64();
if (size == 3) { if (size == 3) {
TCGv_i64 tcg_rn = read_fp_dreg(s, rn); g_assert_not_reached();
TCGv_i64 tcg_rm = read_fp_dreg(s, rm);
handle_3same_64(s, opcode, u, tcg_rd, tcg_rn, tcg_rm);
} else { } else {
/* Do a single operation on the lowest element in the vector. /* Do a single operation on the lowest element in the vector.
* We use the standard Neon helpers and rely on 0 OP 0 == 0 with * We use the standard Neon helpers and rely on 0 OP 0 == 0 with
@ -10919,7 +10912,6 @@ static void disas_simd_3same_int(DisasContext *s, uint32_t insn)
int rn = extract32(insn, 5, 5); int rn = extract32(insn, 5, 5);
int rd = extract32(insn, 0, 5); int rd = extract32(insn, 0, 5);
int pass; int pass;
TCGCond cond;
switch (opcode) { switch (opcode) {
case 0x13: /* MUL, PMUL */ case 0x13: /* MUL, PMUL */
@ -10956,11 +10948,14 @@ static void disas_simd_3same_int(DisasContext *s, uint32_t insn)
case 0x01: /* SQADD, UQADD */ case 0x01: /* SQADD, UQADD */
case 0x05: /* SQSUB, UQSUB */ case 0x05: /* SQSUB, UQSUB */
case 0x06: /* CMGT, CMHI */
case 0x07: /* CMGE, CMHS */
case 0x08: /* SSHL, USHL */ case 0x08: /* SSHL, USHL */
case 0x09: /* SQSHL, UQSHL */ case 0x09: /* SQSHL, UQSHL */
case 0x0a: /* SRSHL, URSHL */ case 0x0a: /* SRSHL, URSHL */
case 0x0b: /* SQRSHL, UQRSHL */ case 0x0b: /* SQRSHL, UQRSHL */
case 0x10: /* ADD, SUB */ case 0x10: /* ADD, SUB */
case 0x11: /* CMTST, CMEQ */
unallocated_encoding(s); unallocated_encoding(s);
return; return;
} }
@ -11021,41 +11016,10 @@ static void disas_simd_3same_int(DisasContext *s, uint32_t insn)
gen_gvec_op3_qc(s, is_q, rd, rn, rm, fns[size - 1][u]); gen_gvec_op3_qc(s, is_q, rd, rn, rm, fns[size - 1][u]);
} }
return; return;
case 0x11:
if (!u) { /* CMTST */
gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_cmtst, size);
return;
}
/* else CMEQ */
cond = TCG_COND_EQ;
goto do_gvec_cmp;
case 0x06: /* CMGT, CMHI */
cond = u ? TCG_COND_GTU : TCG_COND_GT;
goto do_gvec_cmp;
case 0x07: /* CMGE, CMHS */
cond = u ? TCG_COND_GEU : TCG_COND_GE;
do_gvec_cmp:
tcg_gen_gvec_cmp(cond, size, vec_full_reg_offset(s, rd),
vec_full_reg_offset(s, rn),
vec_full_reg_offset(s, rm),
is_q ? 16 : 8, vec_full_reg_size(s));
return;
} }
if (size == 3) { if (size == 3) {
assert(is_q); g_assert_not_reached();
for (pass = 0; pass < 2; pass++) {
TCGv_i64 tcg_op1 = tcg_temp_new_i64();
TCGv_i64 tcg_op2 = tcg_temp_new_i64();
TCGv_i64 tcg_res = tcg_temp_new_i64();
read_vec_element(s, tcg_op1, rn, pass, MO_64);
read_vec_element(s, tcg_op2, rm, pass, MO_64);
handle_3same_64(s, opcode, u, tcg_res, tcg_op1, tcg_op2);
write_vec_element(s, tcg_res, rd, pass, MO_64);
}
} else { } else {
for (pass = 0; pass < (is_q ? 4 : 2); pass++) { for (pass = 0; pass < (is_q ? 4 : 2); pass++) {
TCGv_i32 tcg_op1 = tcg_temp_new_i32(); TCGv_i32 tcg_op1 = tcg_temp_new_i32();