target/arm: Convert handle_vec_simd_shri to decodetree

This includes SSHR, USHR, SSRA, USRA, SRSHR, URSHR, SRSRA, URSRA, SRI.

Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 20240912024114.1097832-17-richard.henderson@linaro.org
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Richard Henderson 2024-09-11 19:41:01 -07:00 committed by Peter Maydell
parent da457c9356
commit 6e74165564
2 changed files with 89 additions and 60 deletions

View File

@ -34,6 +34,7 @@
&rrx_e rd rn rm idx esz
&rrrr_e rd rn rm ra esz
&qrr_e q rd rn esz
&qrri_e q rd rn imm esz
&qrrr_e q rd rn rm esz
&qrrx_e q rd rn rm idx esz
&qrrrr_e q rd rn rm ra esz
@ -1185,11 +1186,71 @@ FMINV_s 0110 1110 10 11000 01111 10 ..... ..... @rr_q1e2
FMOVI_s 0001 1110 .. 1 imm:8 100 00000 rd:5 esz=%esz_hsd
# Advanced SIMD Modified Immediate
# Advanced SIMD Modified Immediate / Shift by Immediate
%abcdefgh 16:3 5:5
# Right shifts are encoded as N - shift, where N is the element size in bits.
%neon_rshift_i6 16:6 !function=rsub_64
%neon_rshift_i5 16:5 !function=rsub_32
%neon_rshift_i4 16:4 !function=rsub_16
%neon_rshift_i3 16:3 !function=rsub_8
@q_shri_b . q:1 .. ..... 0001 ... ..... . rn:5 rd:5 \
&qrri_e esz=0 imm=%neon_rshift_i3
@q_shri_h . q:1 .. ..... 001 .... ..... . rn:5 rd:5 \
&qrri_e esz=1 imm=%neon_rshift_i4
@q_shri_s . q:1 .. ..... 01 ..... ..... . rn:5 rd:5 \
&qrri_e esz=2 imm=%neon_rshift_i5
@q_shri_d . 1 .. ..... 1 ...... ..... . rn:5 rd:5 \
&qrri_e esz=3 imm=%neon_rshift_i6 q=1
FMOVI_v_h 0 q:1 00 1111 00000 ... 1111 11 ..... rd:5 %abcdefgh
# MOVI, MVNI, ORR, BIC, FMOV are all intermixed via cmode.
Vimm 0 q:1 op:1 0 1111 00000 ... cmode:4 01 ..... rd:5 %abcdefgh
SSHR_v 0.00 11110 .... ... 00000 1 ..... ..... @q_shri_b
SSHR_v 0.00 11110 .... ... 00000 1 ..... ..... @q_shri_h
SSHR_v 0.00 11110 .... ... 00000 1 ..... ..... @q_shri_s
SSHR_v 0.00 11110 .... ... 00000 1 ..... ..... @q_shri_d
USHR_v 0.10 11110 .... ... 00000 1 ..... ..... @q_shri_b
USHR_v 0.10 11110 .... ... 00000 1 ..... ..... @q_shri_h
USHR_v 0.10 11110 .... ... 00000 1 ..... ..... @q_shri_s
USHR_v 0.10 11110 .... ... 00000 1 ..... ..... @q_shri_d
SSRA_v 0.00 11110 .... ... 00010 1 ..... ..... @q_shri_b
SSRA_v 0.00 11110 .... ... 00010 1 ..... ..... @q_shri_h
SSRA_v 0.00 11110 .... ... 00010 1 ..... ..... @q_shri_s
SSRA_v 0.00 11110 .... ... 00010 1 ..... ..... @q_shri_d
USRA_v 0.10 11110 .... ... 00010 1 ..... ..... @q_shri_b
USRA_v 0.10 11110 .... ... 00010 1 ..... ..... @q_shri_h
USRA_v 0.10 11110 .... ... 00010 1 ..... ..... @q_shri_s
USRA_v 0.10 11110 .... ... 00010 1 ..... ..... @q_shri_d
SRSHR_v 0.00 11110 .... ... 00100 1 ..... ..... @q_shri_b
SRSHR_v 0.00 11110 .... ... 00100 1 ..... ..... @q_shri_h
SRSHR_v 0.00 11110 .... ... 00100 1 ..... ..... @q_shri_s
SRSHR_v 0.00 11110 .... ... 00100 1 ..... ..... @q_shri_d
URSHR_v 0.10 11110 .... ... 00100 1 ..... ..... @q_shri_b
URSHR_v 0.10 11110 .... ... 00100 1 ..... ..... @q_shri_h
URSHR_v 0.10 11110 .... ... 00100 1 ..... ..... @q_shri_s
URSHR_v 0.10 11110 .... ... 00100 1 ..... ..... @q_shri_d
SRSRA_v 0.00 11110 .... ... 00110 1 ..... ..... @q_shri_b
SRSRA_v 0.00 11110 .... ... 00110 1 ..... ..... @q_shri_h
SRSRA_v 0.00 11110 .... ... 00110 1 ..... ..... @q_shri_s
SRSRA_v 0.00 11110 .... ... 00110 1 ..... ..... @q_shri_d
URSRA_v 0.10 11110 .... ... 00110 1 ..... ..... @q_shri_b
URSRA_v 0.10 11110 .... ... 00110 1 ..... ..... @q_shri_h
URSRA_v 0.10 11110 .... ... 00110 1 ..... ..... @q_shri_s
URSRA_v 0.10 11110 .... ... 00110 1 ..... ..... @q_shri_d
SRI_v 0.10 11110 .... ... 01000 1 ..... ..... @q_shri_b
SRI_v 0.10 11110 .... ... 01000 1 ..... ..... @q_shri_h
SRI_v 0.10 11110 .... ... 01000 1 ..... ..... @q_shri_s
SRI_v 0.10 11110 .... ... 01000 1 ..... ..... @q_shri_d

View File

@ -6959,6 +6959,28 @@ static bool trans_Vimm(DisasContext *s, arg_Vimm *a)
return true;
}
/*
* Advanced SIMD Shift by Immediate
*/
static bool do_vec_shift_imm(DisasContext *s, arg_qrri_e *a, GVecGen2iFn *fn)
{
if (fp_access_check(s)) {
gen_gvec_fn2i(s, a->q, a->rd, a->rn, a->imm, fn, a->esz);
}
return true;
}
TRANS(SSHR_v, do_vec_shift_imm, a, gen_gvec_sshr)
TRANS(USHR_v, do_vec_shift_imm, a, gen_gvec_ushr)
TRANS(SSRA_v, do_vec_shift_imm, a, gen_gvec_ssra)
TRANS(USRA_v, do_vec_shift_imm, a, gen_gvec_usra)
TRANS(SRSHR_v, do_vec_shift_imm, a, gen_gvec_srshr)
TRANS(URSHR_v, do_vec_shift_imm, a, gen_gvec_urshr)
TRANS(SRSRA_v, do_vec_shift_imm, a, gen_gvec_srsra)
TRANS(URSRA_v, do_vec_shift_imm, a, gen_gvec_ursra)
TRANS(SRI_v, do_vec_shift_imm, a, gen_gvec_sri)
/* Shift a TCGv src by TCGv shift_amount, put result in dst.
* Note that it is the caller's responsibility to ensure that the
* shift amount is in range (ie 0..31 or 0..63) and provide the ARM
@ -10423,53 +10445,6 @@ static void disas_simd_scalar_two_reg_misc(DisasContext *s, uint32_t insn)
}
}
/* SSHR[RA]/USHR[RA] - Vector shift right (optional rounding/accumulate) */
static void handle_vec_simd_shri(DisasContext *s, bool is_q, bool is_u,
int immh, int immb, int opcode, int rn, int rd)
{
int size = 32 - clz32(immh) - 1;
int immhb = immh << 3 | immb;
int shift = 2 * (8 << size) - immhb;
GVecGen2iFn *gvec_fn;
if (extract32(immh, 3, 1) && !is_q) {
unallocated_encoding(s);
return;
}
tcg_debug_assert(size <= 3);
if (!fp_access_check(s)) {
return;
}
switch (opcode) {
case 0x02: /* SSRA / USRA (accumulate) */
gvec_fn = is_u ? gen_gvec_usra : gen_gvec_ssra;
break;
case 0x08: /* SRI */
gvec_fn = gen_gvec_sri;
break;
case 0x00: /* SSHR / USHR */
gvec_fn = is_u ? gen_gvec_ushr : gen_gvec_sshr;
break;
case 0x04: /* SRSHR / URSHR (rounding) */
gvec_fn = is_u ? gen_gvec_urshr : gen_gvec_srshr;
break;
case 0x06: /* SRSRA / URSRA (accum + rounding) */
gvec_fn = is_u ? gen_gvec_ursra : gen_gvec_srsra;
break;
default:
g_assert_not_reached();
}
gen_gvec_fn2i(s, is_q, rd, rn, shift, gvec_fn, size);
}
/* SHL/SLI - Vector shift left */
static void handle_vec_simd_shli(DisasContext *s, bool is_q, bool insert,
int immh, int immb, int opcode, int rn, int rd)
@ -10610,18 +10585,6 @@ static void disas_simd_shift_imm(DisasContext *s, uint32_t insn)
}
switch (opcode) {
case 0x08: /* SRI */
if (!is_u) {
unallocated_encoding(s);
return;
}
/* fall through */
case 0x00: /* SSHR / USHR */
case 0x02: /* SSRA / USRA (accumulate) */
case 0x04: /* SRSHR / URSHR (rounding) */
case 0x06: /* SRSRA / URSRA (accum + rounding) */
handle_vec_simd_shri(s, is_q, is_u, immh, immb, opcode, rn, rd);
break;
case 0x0a: /* SHL / SLI */
handle_vec_simd_shli(s, is_q, is_u, immh, immb, opcode, rn, rd);
break;
@ -10660,6 +10623,11 @@ static void disas_simd_shift_imm(DisasContext *s, uint32_t insn)
handle_simd_shift_fpint_conv(s, false, is_q, is_u, immh, immb, rn, rd);
return;
default:
case 0x00: /* SSHR / USHR */
case 0x02: /* SSRA / USRA (accumulate) */
case 0x04: /* SRSHR / URSHR (rounding) */
case 0x06: /* SRSRA / URSRA (accum + rounding) */
case 0x08: /* SRI */
unallocated_encoding(s);
return;
}