target/arm: Convert scalar [US]QSHRN, [US]QRSHRN, SQSHRUN to decodetree

Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 20240912024114.1097832-30-richard.henderson@linaro.org
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Richard Henderson 2024-09-11 19:41:14 -07:00 committed by Peter Maydell
parent a3b6578f38
commit f21b07e272
2 changed files with 63 additions and 127 deletions

View File

@ -1328,6 +1328,12 @@ SQRSHRUN_v 0.10 11110 .... ... 10001 1 ..... ..... @q_shri_s
# Advanced SIMD scalar shift by immediate # Advanced SIMD scalar shift by immediate
@shri_b .... ..... 0001 ... ..... . rn:5 rd:5 \
&rri_e esz=0 imm=%neon_rshift_i3
@shri_h .... ..... 001 .... ..... . rn:5 rd:5 \
&rri_e esz=1 imm=%neon_rshift_i4
@shri_s .... ..... 01 ..... ..... . rn:5 rd:5 \
&rri_e esz=2 imm=%neon_rshift_i5
@shri_d .... ..... 1 ...... ..... . rn:5 rd:5 \ @shri_d .... ..... 1 ...... ..... . rn:5 rd:5 \
&rri_e esz=3 imm=%neon_rshift_i6 &rri_e esz=3 imm=%neon_rshift_i6
@ -1363,3 +1369,27 @@ SQSHLU_si 0111 11110 .... ... 01100 1 ..... ..... @shli_b
SQSHLU_si 0111 11110 .... ... 01100 1 ..... ..... @shli_h SQSHLU_si 0111 11110 .... ... 01100 1 ..... ..... @shli_h
SQSHLU_si 0111 11110 .... ... 01100 1 ..... ..... @shli_s SQSHLU_si 0111 11110 .... ... 01100 1 ..... ..... @shli_s
SQSHLU_si 0111 11110 .... ... 01100 1 ..... ..... @shli_d SQSHLU_si 0111 11110 .... ... 01100 1 ..... ..... @shli_d
SQSHRN_si 0101 11110 .... ... 10010 1 ..... ..... @shri_b
SQSHRN_si 0101 11110 .... ... 10010 1 ..... ..... @shri_h
SQSHRN_si 0101 11110 .... ... 10010 1 ..... ..... @shri_s
UQSHRN_si 0111 11110 .... ... 10010 1 ..... ..... @shri_b
UQSHRN_si 0111 11110 .... ... 10010 1 ..... ..... @shri_h
UQSHRN_si 0111 11110 .... ... 10010 1 ..... ..... @shri_s
SQSHRUN_si 0111 11110 .... ... 10000 1 ..... ..... @shri_b
SQSHRUN_si 0111 11110 .... ... 10000 1 ..... ..... @shri_h
SQSHRUN_si 0111 11110 .... ... 10000 1 ..... ..... @shri_s
SQRSHRN_si 0101 11110 .... ... 10011 1 ..... ..... @shri_b
SQRSHRN_si 0101 11110 .... ... 10011 1 ..... ..... @shri_h
SQRSHRN_si 0101 11110 .... ... 10011 1 ..... ..... @shri_s
UQRSHRN_si 0111 11110 .... ... 10011 1 ..... ..... @shri_b
UQRSHRN_si 0111 11110 .... ... 10011 1 ..... ..... @shri_h
UQRSHRN_si 0111 11110 .... ... 10011 1 ..... ..... @shri_s
SQRSHRUN_si 0111 11110 .... ... 10001 1 ..... ..... @shri_b
SQRSHRUN_si 0111 11110 .... ... 10001 1 ..... ..... @shri_h
SQRSHRUN_si 0111 11110 .... ... 10001 1 ..... ..... @shri_s

View File

@ -7456,6 +7456,35 @@ TRANS(SQSHL_si, do_scalar_shift_imm, a, f_scalar_sqshli[a->esz], false, 0)
TRANS(UQSHL_si, do_scalar_shift_imm, a, f_scalar_uqshli[a->esz], false, 0) TRANS(UQSHL_si, do_scalar_shift_imm, a, f_scalar_uqshli[a->esz], false, 0)
TRANS(SQSHLU_si, do_scalar_shift_imm, a, f_scalar_sqshlui[a->esz], false, 0) TRANS(SQSHLU_si, do_scalar_shift_imm, a, f_scalar_sqshlui[a->esz], false, 0)
static bool do_scalar_shift_imm_narrow(DisasContext *s, arg_rri_e *a,
WideShiftImmFn * const fns[3],
MemOp sign, bool zext)
{
MemOp esz = a->esz;
tcg_debug_assert(esz >= MO_8 && esz <= MO_32);
if (fp_access_check(s)) {
TCGv_i64 rd = tcg_temp_new_i64();
TCGv_i64 rn = tcg_temp_new_i64();
read_vec_element(s, rn, a->rn, 0, (esz + 1) | sign);
fns[esz](rd, rn, a->imm);
if (zext) {
tcg_gen_ext_i64(rd, rd, esz);
}
write_fp_dreg(s, a->rd, rd);
}
return true;
}
TRANS(SQSHRN_si, do_scalar_shift_imm_narrow, a, sqshrn_fns, MO_SIGN, true)
TRANS(SQRSHRN_si, do_scalar_shift_imm_narrow, a, sqrshrn_fns, MO_SIGN, true)
TRANS(UQSHRN_si, do_scalar_shift_imm_narrow, a, uqshrn_fns, 0, false)
TRANS(UQRSHRN_si, do_scalar_shift_imm_narrow, a, uqrshrn_fns, 0, false)
TRANS(SQSHRUN_si, do_scalar_shift_imm_narrow, a, sqshrun_fns, MO_SIGN, false)
TRANS(SQRSHRUN_si, do_scalar_shift_imm_narrow, a, sqrshrun_fns, MO_SIGN, false)
/* Shift a TCGv src by TCGv shift_amount, put result in dst. /* Shift a TCGv src by TCGv shift_amount, put result in dst.
* Note that it is the caller's responsibility to ensure that the * Note that it is the caller's responsibility to ensure that the
* shift amount is in range (ie 0..31 or 0..63) and provide the ARM * shift amount is in range (ie 0..31 or 0..63) and provide the ARM
@ -9635,119 +9664,6 @@ static void disas_data_proc_fp(DisasContext *s, uint32_t insn)
} }
} }
/*
* Common SSHR[RA]/USHR[RA] - Shift right (optional rounding/accumulate)
*
* This code is handles the common shifting code and is used by both
* the vector and scalar code.
*/
static void handle_shri_with_rndacc(TCGv_i64 tcg_res, TCGv_i64 tcg_src,
bool round, bool accumulate,
bool is_u, int size, int shift)
{
if (!round) {
if (is_u) {
gen_ushr_d(tcg_src, tcg_src, shift);
} else {
gen_sshr_d(tcg_src, tcg_src, shift);
}
} else if (size == MO_64) {
if (is_u) {
gen_urshr_d(tcg_src, tcg_src, shift);
} else {
gen_srshr_d(tcg_src, tcg_src, shift);
}
} else {
if (is_u) {
gen_urshr_bhs(tcg_src, tcg_src, shift);
} else {
gen_srshr_bhs(tcg_src, tcg_src, shift);
}
}
if (accumulate) {
tcg_gen_add_i64(tcg_res, tcg_res, tcg_src);
} else {
tcg_gen_mov_i64(tcg_res, tcg_src);
}
}
/* SQSHRN/SQSHRUN - Saturating (signed/unsigned) shift right with
* (signed/unsigned) narrowing */
static void handle_vec_simd_sqshrn(DisasContext *s, bool is_scalar, bool is_q,
bool is_u_shift, bool is_u_narrow,
int immh, int immb, int opcode,
int rn, int rd)
{
int immhb = immh << 3 | immb;
int size = 32 - clz32(immh) - 1;
int esize = 8 << size;
int shift = (2 * esize) - immhb;
int elements = is_scalar ? 1 : (64 / esize);
bool round = extract32(opcode, 0, 1);
MemOp ldop = (size + 1) | (is_u_shift ? 0 : MO_SIGN);
TCGv_i64 tcg_rn, tcg_rd, tcg_final;
static NeonGenOne64OpEnvFn * const signed_narrow_fns[4][2] = {
{ gen_helper_neon_narrow_sat_s8,
gen_helper_neon_unarrow_sat8 },
{ gen_helper_neon_narrow_sat_s16,
gen_helper_neon_unarrow_sat16 },
{ gen_helper_neon_narrow_sat_s32,
gen_helper_neon_unarrow_sat32 },
{ NULL, NULL },
};
static NeonGenOne64OpEnvFn * const unsigned_narrow_fns[4] = {
gen_helper_neon_narrow_sat_u8,
gen_helper_neon_narrow_sat_u16,
gen_helper_neon_narrow_sat_u32,
NULL
};
NeonGenOne64OpEnvFn *narrowfn;
int i;
assert(size < 4);
if (extract32(immh, 3, 1)) {
unallocated_encoding(s);
return;
}
if (!fp_access_check(s)) {
return;
}
if (is_u_shift) {
narrowfn = unsigned_narrow_fns[size];
} else {
narrowfn = signed_narrow_fns[size][is_u_narrow ? 1 : 0];
}
tcg_rn = tcg_temp_new_i64();
tcg_rd = tcg_temp_new_i64();
tcg_final = tcg_temp_new_i64();
for (i = 0; i < elements; i++) {
read_vec_element(s, tcg_rn, rn, i, ldop);
handle_shri_with_rndacc(tcg_rd, tcg_rn, round,
false, is_u_shift, size+1, shift);
narrowfn(tcg_rd, tcg_env, tcg_rd);
if (i == 0) {
tcg_gen_extract_i64(tcg_final, tcg_rd, 0, esize);
} else {
tcg_gen_deposit_i64(tcg_final, tcg_final, tcg_rd, esize * i, esize);
}
}
if (!is_q) {
write_vec_element(s, tcg_final, rd, 0, MO_64);
} else {
write_vec_element(s, tcg_final, rd, 1, MO_64);
}
clear_vec_high(s, is_q, rd);
}
/* Common vector code for handling integer to FP conversion */ /* Common vector code for handling integer to FP conversion */
static void handle_simd_intfp_conv(DisasContext *s, int rd, int rn, static void handle_simd_intfp_conv(DisasContext *s, int rd, int rn,
int elements, int is_signed, int elements, int is_signed,
@ -10013,20 +9929,6 @@ static void disas_simd_scalar_shift_imm(DisasContext *s, uint32_t insn)
handle_simd_shift_intfp_conv(s, true, false, is_u, immh, immb, handle_simd_shift_intfp_conv(s, true, false, is_u, immh, immb,
opcode, rn, rd); opcode, rn, rd);
break; break;
case 0x10: /* SQSHRUN, SQSHRUN2 */
case 0x11: /* SQRSHRUN, SQRSHRUN2 */
if (!is_u) {
unallocated_encoding(s);
return;
}
handle_vec_simd_sqshrn(s, true, false, false, true,
immh, immb, opcode, rn, rd);
break;
case 0x12: /* SQSHRN, SQSHRN2, UQSHRN */
case 0x13: /* SQRSHRN, SQRSHRN2, UQRSHRN, UQRSHRN2 */
handle_vec_simd_sqshrn(s, true, false, is_u, is_u,
immh, immb, opcode, rn, rd);
break;
case 0x1f: /* FCVTZS, FCVTZU */ case 0x1f: /* FCVTZS, FCVTZU */
handle_simd_shift_fpint_conv(s, true, false, is_u, immh, immb, rn, rd); handle_simd_shift_fpint_conv(s, true, false, is_u, immh, immb, rn, rd);
break; break;
@ -10039,6 +9941,10 @@ static void disas_simd_scalar_shift_imm(DisasContext *s, uint32_t insn)
case 0x0a: /* SHL / SLI */ case 0x0a: /* SHL / SLI */
case 0x0c: /* SQSHLU */ case 0x0c: /* SQSHLU */
case 0x0e: /* SQSHL, UQSHL */ case 0x0e: /* SQSHL, UQSHL */
case 0x10: /* SQSHRUN */
case 0x11: /* SQRSHRUN */
case 0x12: /* SQSHRN, UQSHRN */
case 0x13: /* SQRSHRN, UQRSHRN */
unallocated_encoding(s); unallocated_encoding(s);
break; break;
} }