target/arm: Implement MVE VMLAS
Implement the MVE VMLAS insn, which multiplies a vector by a vector and adds a scalar. Signed-off-by: Peter Maydell <peter.maydell@linaro.org> Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
parent
c386443b16
commit
6b895bf8fb
@ -347,6 +347,10 @@ DEF_HELPER_FLAGS_4(mve_vqdmullb_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i3
|
||||
DEF_HELPER_FLAGS_4(mve_vqdmullt_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
|
||||
DEF_HELPER_FLAGS_4(mve_vqdmullt_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
|
||||
|
||||
DEF_HELPER_FLAGS_4(mve_vmlasb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
|
||||
DEF_HELPER_FLAGS_4(mve_vmlash, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
|
||||
DEF_HELPER_FLAGS_4(mve_vmlasw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
|
||||
|
||||
DEF_HELPER_FLAGS_4(mve_vmlaldavsh, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
|
||||
DEF_HELPER_FLAGS_4(mve_vmlaldavsw, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
|
||||
DEF_HELPER_FLAGS_4(mve_vmlaldavxsh, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
|
||||
|
@ -345,6 +345,9 @@ VBRSR 1111 1110 0 . .. ... 1 ... 1 1110 . 110 .... @2scalar
|
||||
VQDMULH_scalar 1110 1110 0 . .. ... 1 ... 0 1110 . 110 .... @2scalar
|
||||
VQRDMULH_scalar 1111 1110 0 . .. ... 1 ... 0 1110 . 110 .... @2scalar
|
||||
|
||||
# The U bit (28) is don't-care because it does not affect the result
|
||||
VMLAS 111- 1110 0 . .. ... 1 ... 1 1110 . 100 .... @2scalar
|
||||
|
||||
# Vector add across vector
|
||||
{
|
||||
VADDV 111 u:1 1110 1111 size:2 01 ... 0 1111 0 0 a:1 0 qm:3 0 rda=%rdalo
|
||||
|
@ -948,6 +948,22 @@ DO_VQDMLADH_OP(vqrdmlsdhxw, 4, int32_t, 1, 1, do_vqdmlsdh_w)
|
||||
mve_advance_vpt(env); \
|
||||
}
|
||||
|
||||
/* "accumulating" version where FN takes d as well as n and m */
|
||||
#define DO_2OP_ACC_SCALAR(OP, ESIZE, TYPE, FN) \
|
||||
void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \
|
||||
uint32_t rm) \
|
||||
{ \
|
||||
TYPE *d = vd, *n = vn; \
|
||||
TYPE m = rm; \
|
||||
uint16_t mask = mve_element_mask(env); \
|
||||
unsigned e; \
|
||||
for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
|
||||
mergemask(&d[H##ESIZE(e)], \
|
||||
FN(d[H##ESIZE(e)], n[H##ESIZE(e)], m), mask); \
|
||||
} \
|
||||
mve_advance_vpt(env); \
|
||||
}
|
||||
|
||||
/* provide unsigned 2-op scalar helpers for all sizes */
|
||||
#define DO_2OP_SCALAR_U(OP, FN) \
|
||||
DO_2OP_SCALAR(OP##b, 1, uint8_t, FN) \
|
||||
@ -958,6 +974,11 @@ DO_VQDMLADH_OP(vqrdmlsdhxw, 4, int32_t, 1, 1, do_vqdmlsdh_w)
|
||||
DO_2OP_SCALAR(OP##h, 2, int16_t, FN) \
|
||||
DO_2OP_SCALAR(OP##w, 4, int32_t, FN)
|
||||
|
||||
#define DO_2OP_ACC_SCALAR_U(OP, FN) \
|
||||
DO_2OP_ACC_SCALAR(OP##b, 1, uint8_t, FN) \
|
||||
DO_2OP_ACC_SCALAR(OP##h, 2, uint16_t, FN) \
|
||||
DO_2OP_ACC_SCALAR(OP##w, 4, uint32_t, FN)
|
||||
|
||||
DO_2OP_SCALAR_U(vadd_scalar, DO_ADD)
|
||||
DO_2OP_SCALAR_U(vsub_scalar, DO_SUB)
|
||||
DO_2OP_SCALAR_U(vmul_scalar, DO_MUL)
|
||||
@ -987,6 +1008,11 @@ DO_2OP_SAT_SCALAR(vqrdmulh_scalarb, 1, int8_t, DO_QRDMULH_B)
|
||||
DO_2OP_SAT_SCALAR(vqrdmulh_scalarh, 2, int16_t, DO_QRDMULH_H)
|
||||
DO_2OP_SAT_SCALAR(vqrdmulh_scalarw, 4, int32_t, DO_QRDMULH_W)
|
||||
|
||||
/* Vector by vector plus scalar */
|
||||
#define DO_VMLAS(D, N, M) ((N) * (D) + (M))
|
||||
|
||||
DO_2OP_ACC_SCALAR_U(vmlas, DO_VMLAS)
|
||||
|
||||
/*
|
||||
* Long saturating scalar ops. As with DO_2OP_L, TYPE and H are for the
|
||||
* input (smaller) type and LESIZE, LTYPE, LH for the output (long) type.
|
||||
|
@ -596,6 +596,7 @@ DO_2OP_SCALAR(VQSUB_U_scalar, vqsubu_scalar)
|
||||
DO_2OP_SCALAR(VQDMULH_scalar, vqdmulh_scalar)
|
||||
DO_2OP_SCALAR(VQRDMULH_scalar, vqrdmulh_scalar)
|
||||
DO_2OP_SCALAR(VBRSR, vbrsr)
|
||||
DO_2OP_SCALAR(VMLAS, vmlas)
|
||||
|
||||
static bool trans_VQDMULLB_scalar(DisasContext *s, arg_2scalar *a)
|
||||
{
|
||||
|
Loading…
Reference in New Issue
Block a user