target/arm: Implement MVE shifts by immediate
Implement the MVE shifts by immediate, which perform shifts on a single general-purpose register. These patterns overlap with the long-shift-by-immediates, so we have to rearrange the grouping a little here. Signed-off-by: Peter Maydell <peter.maydell@linaro.org> Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Message-id: 20210628135835.6690-18-peter.maydell@linaro.org
This commit is contained in:
parent
0aa4b4c358
commit
46321d47a9
@ -458,3 +458,6 @@ DEF_HELPER_FLAGS_3(mve_sqrshrl, TCG_CALL_NO_RWG, i64, env, i64, i32)
|
||||
DEF_HELPER_FLAGS_3(mve_uqrshll, TCG_CALL_NO_RWG, i64, env, i64, i32)
|
||||
DEF_HELPER_FLAGS_3(mve_sqrshrl48, TCG_CALL_NO_RWG, i64, env, i64, i32)
|
||||
DEF_HELPER_FLAGS_3(mve_uqrshll48, TCG_CALL_NO_RWG, i64, env, i64, i32)
|
||||
|
||||
DEF_HELPER_FLAGS_3(mve_uqshl, TCG_CALL_NO_RWG, i32, env, i32, i32)
|
||||
DEF_HELPER_FLAGS_3(mve_sqshl, TCG_CALL_NO_RWG, i32, env, i32, i32)
|
||||
|
@ -1628,3 +1628,13 @@ uint64_t HELPER(mve_uqrshll48)(CPUARMState *env, uint64_t n, uint32_t shift)
|
||||
{
|
||||
return do_uqrshl48_d(n, (int8_t)shift, true, &env->QF);
|
||||
}
|
||||
|
||||
uint32_t HELPER(mve_uqshl)(CPUARMState *env, uint32_t n, uint32_t shift)
|
||||
{
|
||||
return do_uqrshl_bhs(n, (int8_t)shift, 32, false, &env->QF);
|
||||
}
|
||||
|
||||
uint32_t HELPER(mve_sqshl)(CPUARMState *env, uint32_t n, uint32_t shift)
|
||||
{
|
||||
return do_sqrshl_bhs(n, (int8_t)shift, 32, false, &env->QF);
|
||||
}
|
||||
|
@ -50,6 +50,7 @@
|
||||
|
||||
&mve_shl_ri rdalo rdahi shim
|
||||
&mve_shl_rr rdalo rdahi rm
|
||||
&mve_sh_ri rda shim
|
||||
|
||||
# rdahi: bits [3:1] from insn, bit 0 is 1
|
||||
# rdalo: bits [3:1] from insn, bit 0 is 0
|
||||
@ -71,6 +72,8 @@
|
||||
&mve_shl_ri shim=%imm5_12_6 rdalo=%rdalo_17 rdahi=%rdahi_9
|
||||
@mve_shl_rr ....... .... . ... . rm:4 ... . .. .. .... \
|
||||
&mve_shl_rr rdalo=%rdalo_17 rdahi=%rdahi_9
|
||||
@mve_sh_ri ....... .... . rda:4 . ... ... . .. .. .... \
|
||||
&mve_sh_ri shim=%imm5_12_6
|
||||
|
||||
{
|
||||
TST_xrri 1110101 0000 1 .... 0 ... 1111 .... .... @S_xrr_shi
|
||||
@ -86,14 +89,28 @@ BIC_rrri 1110101 0001 . .... 0 ... .... .... .... @s_rrr_shi
|
||||
# the rest fall through (where ORR_rrri and MOV_rxri will end up
|
||||
# handling them as r13 and r15 accesses with the same semantics as A32).
|
||||
[
|
||||
{
|
||||
UQSHL_ri 1110101 0010 1 .... 0 ... 1111 .. 00 1111 @mve_sh_ri
|
||||
LSLL_ri 1110101 0010 1 ... 0 0 ... ... 1 .. 00 1111 @mve_shl_ri
|
||||
LSRL_ri 1110101 0010 1 ... 0 0 ... ... 1 .. 01 1111 @mve_shl_ri
|
||||
ASRL_ri 1110101 0010 1 ... 0 0 ... ... 1 .. 10 1111 @mve_shl_ri
|
||||
|
||||
UQSHLL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 00 1111 @mve_shl_ri
|
||||
}
|
||||
|
||||
{
|
||||
URSHR_ri 1110101 0010 1 .... 0 ... 1111 .. 01 1111 @mve_sh_ri
|
||||
LSRL_ri 1110101 0010 1 ... 0 0 ... ... 1 .. 01 1111 @mve_shl_ri
|
||||
URSHRL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 01 1111 @mve_shl_ri
|
||||
}
|
||||
|
||||
{
|
||||
SRSHR_ri 1110101 0010 1 .... 0 ... 1111 .. 10 1111 @mve_sh_ri
|
||||
ASRL_ri 1110101 0010 1 ... 0 0 ... ... 1 .. 10 1111 @mve_shl_ri
|
||||
SRSHRL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 10 1111 @mve_shl_ri
|
||||
}
|
||||
|
||||
{
|
||||
SQSHL_ri 1110101 0010 1 .... 0 ... 1111 .. 11 1111 @mve_sh_ri
|
||||
SQSHLL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 11 1111 @mve_shl_ri
|
||||
}
|
||||
|
||||
LSLL_rr 1110101 0010 1 ... 0 .... ... 1 0000 1101 @mve_shl_rr
|
||||
ASRL_rr 1110101 0010 1 ... 0 .... ... 1 0010 1101 @mve_shl_rr
|
||||
|
@ -3218,8 +3218,14 @@ static void gen_srshr16_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh)
|
||||
|
||||
static void gen_srshr32_i32(TCGv_i32 d, TCGv_i32 a, int32_t sh)
|
||||
{
|
||||
TCGv_i32 t = tcg_temp_new_i32();
|
||||
TCGv_i32 t;
|
||||
|
||||
/* Handle shift by the input size for the benefit of trans_SRSHR_ri */
|
||||
if (sh == 32) {
|
||||
tcg_gen_movi_i32(d, 0);
|
||||
return;
|
||||
}
|
||||
t = tcg_temp_new_i32();
|
||||
tcg_gen_extract_i32(t, a, sh - 1, 1);
|
||||
tcg_gen_sari_i32(d, a, sh);
|
||||
tcg_gen_add_i32(d, d, t);
|
||||
@ -3419,8 +3425,14 @@ static void gen_urshr16_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh)
|
||||
|
||||
static void gen_urshr32_i32(TCGv_i32 d, TCGv_i32 a, int32_t sh)
|
||||
{
|
||||
TCGv_i32 t = tcg_temp_new_i32();
|
||||
TCGv_i32 t;
|
||||
|
||||
/* Handle shift by the input size for the benefit of trans_URSHR_ri */
|
||||
if (sh == 32) {
|
||||
tcg_gen_extract_i32(d, a, sh - 1, 1);
|
||||
return;
|
||||
}
|
||||
t = tcg_temp_new_i32();
|
||||
tcg_gen_extract_i32(t, a, sh - 1, 1);
|
||||
tcg_gen_shri_i32(d, a, sh);
|
||||
tcg_gen_add_i32(d, d, t);
|
||||
@ -5861,6 +5873,58 @@ static bool trans_SQRSHRL48_rr(DisasContext *s, arg_mve_shl_rr *a)
|
||||
return do_mve_shl_rr(s, a, gen_helper_mve_sqrshrl48);
|
||||
}
|
||||
|
||||
static bool do_mve_sh_ri(DisasContext *s, arg_mve_sh_ri *a, ShiftImmFn *fn)
|
||||
{
|
||||
if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
|
||||
/* Decode falls through to ORR/MOV UNPREDICTABLE handling */
|
||||
return false;
|
||||
}
|
||||
if (!dc_isar_feature(aa32_mve, s) ||
|
||||
!arm_dc_feature(s, ARM_FEATURE_M_MAIN) ||
|
||||
a->rda == 13 || a->rda == 15) {
|
||||
/* These rda cases are UNPREDICTABLE; we choose to UNDEF */
|
||||
unallocated_encoding(s);
|
||||
return true;
|
||||
}
|
||||
|
||||
if (a->shim == 0) {
|
||||
a->shim = 32;
|
||||
}
|
||||
fn(cpu_R[a->rda], cpu_R[a->rda], a->shim);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool trans_URSHR_ri(DisasContext *s, arg_mve_sh_ri *a)
|
||||
{
|
||||
return do_mve_sh_ri(s, a, gen_urshr32_i32);
|
||||
}
|
||||
|
||||
static bool trans_SRSHR_ri(DisasContext *s, arg_mve_sh_ri *a)
|
||||
{
|
||||
return do_mve_sh_ri(s, a, gen_srshr32_i32);
|
||||
}
|
||||
|
||||
static void gen_mve_sqshl(TCGv_i32 r, TCGv_i32 n, int32_t shift)
|
||||
{
|
||||
gen_helper_mve_sqshl(r, cpu_env, n, tcg_constant_i32(shift));
|
||||
}
|
||||
|
||||
static bool trans_SQSHL_ri(DisasContext *s, arg_mve_sh_ri *a)
|
||||
{
|
||||
return do_mve_sh_ri(s, a, gen_mve_sqshl);
|
||||
}
|
||||
|
||||
static void gen_mve_uqshl(TCGv_i32 r, TCGv_i32 n, int32_t shift)
|
||||
{
|
||||
gen_helper_mve_uqshl(r, cpu_env, n, tcg_constant_i32(shift));
|
||||
}
|
||||
|
||||
static bool trans_UQSHL_ri(DisasContext *s, arg_mve_sh_ri *a)
|
||||
{
|
||||
return do_mve_sh_ri(s, a, gen_mve_uqshl);
|
||||
}
|
||||
|
||||
/*
|
||||
* Multiply and multiply accumulate
|
||||
*/
|
||||
|
@ -466,6 +466,7 @@ typedef void CryptoThreeOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr);
|
||||
typedef void AtomicThreeOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, MemOp);
|
||||
typedef void WideShiftImmFn(TCGv_i64, TCGv_i64, int64_t shift);
|
||||
typedef void WideShiftFn(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i32);
|
||||
typedef void ShiftImmFn(TCGv_i32, TCGv_i32, int32_t shift);
|
||||
|
||||
/**
|
||||
* arm_tbflags_from_tb:
|
||||
|
Loading…
Reference in New Issue
Block a user