qemu/target/arm/mve_helper.c
Peter Maydell 39f2ec8592 target/arm: Implement MVE VQADD and VQSUB
Implement the MVE VQADD and VQSUB insns, which perform saturating
addition of a scalar to each element.  Note that individual bytes of
each result element are used or discarded according to the predicate
mask, but FPSCR.QC is only set if the predicate mask for the lowest
byte of the element is set.

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 20210617121628.20116-28-peter.maydell@linaro.org
2021-06-24 14:58:47 +01:00

704 lines
26 KiB
C

/*
* M-profile MVE Operations
*
* Copyright (c) 2021 Linaro, Ltd.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu/osdep.h"
#include "qemu/int128.h"
#include "cpu.h"
#include "internals.h"
#include "vec_internal.h"
#include "exec/helper-proto.h"
#include "exec/cpu_ldst.h"
#include "exec/exec-all.h"
#include "tcg/tcg.h"
static uint16_t mve_element_mask(CPUARMState *env)
{
/*
* Return the mask of which elements in the MVE vector should be
* updated. This is a combination of multiple things:
* (1) by default, we update every lane in the vector
* (2) VPT predication stores its state in the VPR register;
* (3) low-overhead-branch tail predication will mask out part
* the vector on the final iteration of the loop
* (4) if EPSR.ECI is set then we must execute only some beats
* of the insn
* We combine all these into a 16-bit result with the same semantics
* as VPR.P0: 0 to mask the lane, 1 if it is active.
* 8-bit vector ops will look at all bits of the result;
* 16-bit ops will look at bits 0, 2, 4, ...;
* 32-bit ops will look at bits 0, 4, 8 and 12.
* Compare pseudocode GetCurInstrBeat(), though that only returns
* the 4-bit slice of the mask corresponding to a single beat.
*/
uint16_t mask = FIELD_EX32(env->v7m.vpr, V7M_VPR, P0);
if (!(env->v7m.vpr & R_V7M_VPR_MASK01_MASK)) {
mask |= 0xff;
}
if (!(env->v7m.vpr & R_V7M_VPR_MASK23_MASK)) {
mask |= 0xff00;
}
if (env->v7m.ltpsize < 4 &&
env->regs[14] <= (1 << (4 - env->v7m.ltpsize))) {
/*
* Tail predication active, and this is the last loop iteration.
* The element size is (1 << ltpsize), and we only want to process
* loopcount elements, so we want to retain the least significant
* (loopcount * esize) predicate bits and zero out bits above that.
*/
int masklen = env->regs[14] << env->v7m.ltpsize;
assert(masklen <= 16);
mask &= MAKE_64BIT_MASK(0, masklen);
}
if ((env->condexec_bits & 0xf) == 0) {
/*
* ECI bits indicate which beats are already executed;
* we handle this by effectively predicating them out.
*/
int eci = env->condexec_bits >> 4;
switch (eci) {
case ECI_NONE:
break;
case ECI_A0:
mask &= 0xfff0;
break;
case ECI_A0A1:
mask &= 0xff00;
break;
case ECI_A0A1A2:
case ECI_A0A1A2B0:
mask &= 0xf000;
break;
default:
g_assert_not_reached();
}
}
return mask;
}
static void mve_advance_vpt(CPUARMState *env)
{
/* Advance the VPT and ECI state if necessary */
uint32_t vpr = env->v7m.vpr;
unsigned mask01, mask23;
if ((env->condexec_bits & 0xf) == 0) {
env->condexec_bits = (env->condexec_bits == (ECI_A0A1A2B0 << 4)) ?
(ECI_A0 << 4) : (ECI_NONE << 4);
}
if (!(vpr & (R_V7M_VPR_MASK01_MASK | R_V7M_VPR_MASK23_MASK))) {
/* VPT not enabled, nothing to do */
return;
}
mask01 = FIELD_EX32(vpr, V7M_VPR, MASK01);
mask23 = FIELD_EX32(vpr, V7M_VPR, MASK23);
if (mask01 > 8) {
/* high bit set, but not 0b1000: invert the relevant half of P0 */
vpr ^= 0xff;
}
if (mask23 > 8) {
/* high bit set, but not 0b1000: invert the relevant half of P0 */
vpr ^= 0xff00;
}
vpr = FIELD_DP32(vpr, V7M_VPR, MASK01, mask01 << 1);
vpr = FIELD_DP32(vpr, V7M_VPR, MASK23, mask23 << 1);
env->v7m.vpr = vpr;
}
#define DO_VLDR(OP, MSIZE, LDTYPE, ESIZE, TYPE) \
void HELPER(mve_##OP)(CPUARMState *env, void *vd, uint32_t addr) \
{ \
TYPE *d = vd; \
uint16_t mask = mve_element_mask(env); \
unsigned b, e; \
/* \
* R_SXTM allows the dest reg to become UNKNOWN for abandoned \
* beats so we don't care if we update part of the dest and \
* then take an exception. \
*/ \
for (b = 0, e = 0; b < 16; b += ESIZE, e++) { \
if (mask & (1 << b)) { \
d[H##ESIZE(e)] = cpu_##LDTYPE##_data_ra(env, addr, GETPC()); \
} \
addr += MSIZE; \
} \
mve_advance_vpt(env); \
}
#define DO_VSTR(OP, MSIZE, STTYPE, ESIZE, TYPE) \
void HELPER(mve_##OP)(CPUARMState *env, void *vd, uint32_t addr) \
{ \
TYPE *d = vd; \
uint16_t mask = mve_element_mask(env); \
unsigned b, e; \
for (b = 0, e = 0; b < 16; b += ESIZE, e++) { \
if (mask & (1 << b)) { \
cpu_##STTYPE##_data_ra(env, addr, d[H##ESIZE(e)], GETPC()); \
} \
addr += MSIZE; \
} \
mve_advance_vpt(env); \
}
DO_VLDR(vldrb, 1, ldub, 1, uint8_t)
DO_VLDR(vldrh, 2, lduw, 2, uint16_t)
DO_VLDR(vldrw, 4, ldl, 4, uint32_t)
DO_VSTR(vstrb, 1, stb, 1, uint8_t)
DO_VSTR(vstrh, 2, stw, 2, uint16_t)
DO_VSTR(vstrw, 4, stl, 4, uint32_t)
DO_VLDR(vldrb_sh, 1, ldsb, 2, int16_t)
DO_VLDR(vldrb_sw, 1, ldsb, 4, int32_t)
DO_VLDR(vldrb_uh, 1, ldub, 2, uint16_t)
DO_VLDR(vldrb_uw, 1, ldub, 4, uint32_t)
DO_VLDR(vldrh_sw, 2, ldsw, 4, int32_t)
DO_VLDR(vldrh_uw, 2, lduw, 4, uint32_t)
DO_VSTR(vstrb_h, 1, stb, 2, int16_t)
DO_VSTR(vstrb_w, 1, stb, 4, int32_t)
DO_VSTR(vstrh_w, 2, stw, 4, int32_t)
#undef DO_VLDR
#undef DO_VSTR
/*
* The mergemask(D, R, M) macro performs the operation "*D = R" but
* storing only the bytes which correspond to 1 bits in M,
* leaving other bytes in *D unchanged. We use _Generic
* to select the correct implementation based on the type of D.
*/
static void mergemask_ub(uint8_t *d, uint8_t r, uint16_t mask)
{
if (mask & 1) {
*d = r;
}
}
static void mergemask_sb(int8_t *d, int8_t r, uint16_t mask)
{
mergemask_ub((uint8_t *)d, r, mask);
}
static void mergemask_uh(uint16_t *d, uint16_t r, uint16_t mask)
{
uint16_t bmask = expand_pred_b_data[mask & 3];
*d = (*d & ~bmask) | (r & bmask);
}
static void mergemask_sh(int16_t *d, int16_t r, uint16_t mask)
{
mergemask_uh((uint16_t *)d, r, mask);
}
static void mergemask_uw(uint32_t *d, uint32_t r, uint16_t mask)
{
uint32_t bmask = expand_pred_b_data[mask & 0xf];
*d = (*d & ~bmask) | (r & bmask);
}
static void mergemask_sw(int32_t *d, int32_t r, uint16_t mask)
{
mergemask_uw((uint32_t *)d, r, mask);
}
static void mergemask_uq(uint64_t *d, uint64_t r, uint16_t mask)
{
uint64_t bmask = expand_pred_b_data[mask & 0xff];
*d = (*d & ~bmask) | (r & bmask);
}
static void mergemask_sq(int64_t *d, int64_t r, uint16_t mask)
{
mergemask_uq((uint64_t *)d, r, mask);
}
#define mergemask(D, R, M) \
_Generic(D, \
uint8_t *: mergemask_ub, \
int8_t *: mergemask_sb, \
uint16_t *: mergemask_uh, \
int16_t *: mergemask_sh, \
uint32_t *: mergemask_uw, \
int32_t *: mergemask_sw, \
uint64_t *: mergemask_uq, \
int64_t *: mergemask_sq)(D, R, M)
void HELPER(mve_vdup)(CPUARMState *env, void *vd, uint32_t val)
{
/*
* The generated code already replicated an 8 or 16 bit constant
* into the 32-bit value, so we only need to write the 32-bit
* value to all elements of the Qreg, allowing for predication.
*/
uint32_t *d = vd;
uint16_t mask = mve_element_mask(env);
unsigned e;
for (e = 0; e < 16 / 4; e++, mask >>= 4) {
mergemask(&d[H4(e)], val, mask);
}
mve_advance_vpt(env);
}
#define DO_1OP(OP, ESIZE, TYPE, FN) \
void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm) \
{ \
TYPE *d = vd, *m = vm; \
uint16_t mask = mve_element_mask(env); \
unsigned e; \
for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
mergemask(&d[H##ESIZE(e)], FN(m[H##ESIZE(e)]), mask); \
} \
mve_advance_vpt(env); \
}
#define DO_CLS_B(N) (clrsb32(N) - 24)
#define DO_CLS_H(N) (clrsb32(N) - 16)
DO_1OP(vclsb, 1, int8_t, DO_CLS_B)
DO_1OP(vclsh, 2, int16_t, DO_CLS_H)
DO_1OP(vclsw, 4, int32_t, clrsb32)
#define DO_CLZ_B(N) (clz32(N) - 24)
#define DO_CLZ_H(N) (clz32(N) - 16)
DO_1OP(vclzb, 1, uint8_t, DO_CLZ_B)
DO_1OP(vclzh, 2, uint16_t, DO_CLZ_H)
DO_1OP(vclzw, 4, uint32_t, clz32)
DO_1OP(vrev16b, 2, uint16_t, bswap16)
DO_1OP(vrev32b, 4, uint32_t, bswap32)
DO_1OP(vrev32h, 4, uint32_t, hswap32)
DO_1OP(vrev64b, 8, uint64_t, bswap64)
DO_1OP(vrev64h, 8, uint64_t, hswap64)
DO_1OP(vrev64w, 8, uint64_t, wswap64)
#define DO_NOT(N) (~(N))
DO_1OP(vmvn, 8, uint64_t, DO_NOT)
#define DO_ABS(N) ((N) < 0 ? -(N) : (N))
#define DO_FABSH(N) ((N) & dup_const(MO_16, 0x7fff))
#define DO_FABSS(N) ((N) & dup_const(MO_32, 0x7fffffff))
DO_1OP(vabsb, 1, int8_t, DO_ABS)
DO_1OP(vabsh, 2, int16_t, DO_ABS)
DO_1OP(vabsw, 4, int32_t, DO_ABS)
/* We can do these 64 bits at a time */
DO_1OP(vfabsh, 8, uint64_t, DO_FABSH)
DO_1OP(vfabss, 8, uint64_t, DO_FABSS)
#define DO_NEG(N) (-(N))
#define DO_FNEGH(N) ((N) ^ dup_const(MO_16, 0x8000))
#define DO_FNEGS(N) ((N) ^ dup_const(MO_32, 0x80000000))
DO_1OP(vnegb, 1, int8_t, DO_NEG)
DO_1OP(vnegh, 2, int16_t, DO_NEG)
DO_1OP(vnegw, 4, int32_t, DO_NEG)
/* We can do these 64 bits at a time */
DO_1OP(vfnegh, 8, uint64_t, DO_FNEGH)
DO_1OP(vfnegs, 8, uint64_t, DO_FNEGS)
#define DO_2OP(OP, ESIZE, TYPE, FN) \
void HELPER(glue(mve_, OP))(CPUARMState *env, \
void *vd, void *vn, void *vm) \
{ \
TYPE *d = vd, *n = vn, *m = vm; \
uint16_t mask = mve_element_mask(env); \
unsigned e; \
for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
mergemask(&d[H##ESIZE(e)], \
FN(n[H##ESIZE(e)], m[H##ESIZE(e)]), mask); \
} \
mve_advance_vpt(env); \
}
/* provide unsigned 2-op helpers for all sizes */
#define DO_2OP_U(OP, FN) \
DO_2OP(OP##b, 1, uint8_t, FN) \
DO_2OP(OP##h, 2, uint16_t, FN) \
DO_2OP(OP##w, 4, uint32_t, FN)
/* provide signed 2-op helpers for all sizes */
#define DO_2OP_S(OP, FN) \
DO_2OP(OP##b, 1, int8_t, FN) \
DO_2OP(OP##h, 2, int16_t, FN) \
DO_2OP(OP##w, 4, int32_t, FN)
/*
* "Long" operations where two half-sized inputs (taken from either the
* top or the bottom of the input vector) produce a double-width result.
* Here ESIZE, TYPE are for the input, and LESIZE, LTYPE for the output.
*/
#define DO_2OP_L(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN) \
void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, void *vm) \
{ \
LTYPE *d = vd; \
TYPE *n = vn, *m = vm; \
uint16_t mask = mve_element_mask(env); \
unsigned le; \
for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \
LTYPE r = FN((LTYPE)n[H##ESIZE(le * 2 + TOP)], \
m[H##ESIZE(le * 2 + TOP)]); \
mergemask(&d[H##LESIZE(le)], r, mask); \
} \
mve_advance_vpt(env); \
}
#define DO_AND(N, M) ((N) & (M))
#define DO_BIC(N, M) ((N) & ~(M))
#define DO_ORR(N, M) ((N) | (M))
#define DO_ORN(N, M) ((N) | ~(M))
#define DO_EOR(N, M) ((N) ^ (M))
DO_2OP(vand, 8, uint64_t, DO_AND)
DO_2OP(vbic, 8, uint64_t, DO_BIC)
DO_2OP(vorr, 8, uint64_t, DO_ORR)
DO_2OP(vorn, 8, uint64_t, DO_ORN)
DO_2OP(veor, 8, uint64_t, DO_EOR)
#define DO_ADD(N, M) ((N) + (M))
#define DO_SUB(N, M) ((N) - (M))
#define DO_MUL(N, M) ((N) * (M))
DO_2OP_U(vadd, DO_ADD)
DO_2OP_U(vsub, DO_SUB)
DO_2OP_U(vmul, DO_MUL)
DO_2OP_L(vmullbsb, 0, 1, int8_t, 2, int16_t, DO_MUL)
DO_2OP_L(vmullbsh, 0, 2, int16_t, 4, int32_t, DO_MUL)
DO_2OP_L(vmullbsw, 0, 4, int32_t, 8, int64_t, DO_MUL)
DO_2OP_L(vmullbub, 0, 1, uint8_t, 2, uint16_t, DO_MUL)
DO_2OP_L(vmullbuh, 0, 2, uint16_t, 4, uint32_t, DO_MUL)
DO_2OP_L(vmullbuw, 0, 4, uint32_t, 8, uint64_t, DO_MUL)
DO_2OP_L(vmulltsb, 1, 1, int8_t, 2, int16_t, DO_MUL)
DO_2OP_L(vmulltsh, 1, 2, int16_t, 4, int32_t, DO_MUL)
DO_2OP_L(vmulltsw, 1, 4, int32_t, 8, int64_t, DO_MUL)
DO_2OP_L(vmulltub, 1, 1, uint8_t, 2, uint16_t, DO_MUL)
DO_2OP_L(vmulltuh, 1, 2, uint16_t, 4, uint32_t, DO_MUL)
DO_2OP_L(vmulltuw, 1, 4, uint32_t, 8, uint64_t, DO_MUL)
/*
* Because the computation type is at least twice as large as required,
* these work for both signed and unsigned source types.
*/
static inline uint8_t do_mulh_b(int32_t n, int32_t m)
{
return (n * m) >> 8;
}
static inline uint16_t do_mulh_h(int32_t n, int32_t m)
{
return (n * m) >> 16;
}
static inline uint32_t do_mulh_w(int64_t n, int64_t m)
{
return (n * m) >> 32;
}
static inline uint8_t do_rmulh_b(int32_t n, int32_t m)
{
return (n * m + (1U << 7)) >> 8;
}
static inline uint16_t do_rmulh_h(int32_t n, int32_t m)
{
return (n * m + (1U << 15)) >> 16;
}
static inline uint32_t do_rmulh_w(int64_t n, int64_t m)
{
return (n * m + (1U << 31)) >> 32;
}
DO_2OP(vmulhsb, 1, int8_t, do_mulh_b)
DO_2OP(vmulhsh, 2, int16_t, do_mulh_h)
DO_2OP(vmulhsw, 4, int32_t, do_mulh_w)
DO_2OP(vmulhub, 1, uint8_t, do_mulh_b)
DO_2OP(vmulhuh, 2, uint16_t, do_mulh_h)
DO_2OP(vmulhuw, 4, uint32_t, do_mulh_w)
DO_2OP(vrmulhsb, 1, int8_t, do_rmulh_b)
DO_2OP(vrmulhsh, 2, int16_t, do_rmulh_h)
DO_2OP(vrmulhsw, 4, int32_t, do_rmulh_w)
DO_2OP(vrmulhub, 1, uint8_t, do_rmulh_b)
DO_2OP(vrmulhuh, 2, uint16_t, do_rmulh_h)
DO_2OP(vrmulhuw, 4, uint32_t, do_rmulh_w)
#define DO_MAX(N, M) ((N) >= (M) ? (N) : (M))
#define DO_MIN(N, M) ((N) >= (M) ? (M) : (N))
DO_2OP_S(vmaxs, DO_MAX)
DO_2OP_U(vmaxu, DO_MAX)
DO_2OP_S(vmins, DO_MIN)
DO_2OP_U(vminu, DO_MIN)
#define DO_ABD(N, M) ((N) >= (M) ? (N) - (M) : (M) - (N))
DO_2OP_S(vabds, DO_ABD)
DO_2OP_U(vabdu, DO_ABD)
static inline uint32_t do_vhadd_u(uint32_t n, uint32_t m)
{
return ((uint64_t)n + m) >> 1;
}
static inline int32_t do_vhadd_s(int32_t n, int32_t m)
{
return ((int64_t)n + m) >> 1;
}
static inline uint32_t do_vhsub_u(uint32_t n, uint32_t m)
{
return ((uint64_t)n - m) >> 1;
}
static inline int32_t do_vhsub_s(int32_t n, int32_t m)
{
return ((int64_t)n - m) >> 1;
}
DO_2OP_S(vhadds, do_vhadd_s)
DO_2OP_U(vhaddu, do_vhadd_u)
DO_2OP_S(vhsubs, do_vhsub_s)
DO_2OP_U(vhsubu, do_vhsub_u)
static inline int32_t do_sat_bhw(int64_t val, int64_t min, int64_t max, bool *s)
{
if (val > max) {
*s = true;
return max;
} else if (val < min) {
*s = true;
return min;
}
return val;
}
#define DO_SQADD_B(n, m, s) do_sat_bhw((int64_t)n + m, INT8_MIN, INT8_MAX, s)
#define DO_SQADD_H(n, m, s) do_sat_bhw((int64_t)n + m, INT16_MIN, INT16_MAX, s)
#define DO_SQADD_W(n, m, s) do_sat_bhw((int64_t)n + m, INT32_MIN, INT32_MAX, s)
#define DO_UQADD_B(n, m, s) do_sat_bhw((int64_t)n + m, 0, UINT8_MAX, s)
#define DO_UQADD_H(n, m, s) do_sat_bhw((int64_t)n + m, 0, UINT16_MAX, s)
#define DO_UQADD_W(n, m, s) do_sat_bhw((int64_t)n + m, 0, UINT32_MAX, s)
#define DO_SQSUB_B(n, m, s) do_sat_bhw((int64_t)n - m, INT8_MIN, INT8_MAX, s)
#define DO_SQSUB_H(n, m, s) do_sat_bhw((int64_t)n - m, INT16_MIN, INT16_MAX, s)
#define DO_SQSUB_W(n, m, s) do_sat_bhw((int64_t)n - m, INT32_MIN, INT32_MAX, s)
#define DO_UQSUB_B(n, m, s) do_sat_bhw((int64_t)n - m, 0, UINT8_MAX, s)
#define DO_UQSUB_H(n, m, s) do_sat_bhw((int64_t)n - m, 0, UINT16_MAX, s)
#define DO_UQSUB_W(n, m, s) do_sat_bhw((int64_t)n - m, 0, UINT32_MAX, s)
#define DO_2OP_SCALAR(OP, ESIZE, TYPE, FN) \
void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \
uint32_t rm) \
{ \
TYPE *d = vd, *n = vn; \
TYPE m = rm; \
uint16_t mask = mve_element_mask(env); \
unsigned e; \
for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
mergemask(&d[H##ESIZE(e)], FN(n[H##ESIZE(e)], m), mask); \
} \
mve_advance_vpt(env); \
}
#define DO_2OP_SAT_SCALAR(OP, ESIZE, TYPE, FN) \
void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \
uint32_t rm) \
{ \
TYPE *d = vd, *n = vn; \
TYPE m = rm; \
uint16_t mask = mve_element_mask(env); \
unsigned e; \
bool qc = false; \
for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
bool sat = false; \
mergemask(&d[H##ESIZE(e)], FN(n[H##ESIZE(e)], m, &sat), \
mask); \
qc |= sat & mask & 1; \
} \
if (qc) { \
env->vfp.qc[0] = qc; \
} \
mve_advance_vpt(env); \
}
/* provide unsigned 2-op scalar helpers for all sizes */
#define DO_2OP_SCALAR_U(OP, FN) \
DO_2OP_SCALAR(OP##b, 1, uint8_t, FN) \
DO_2OP_SCALAR(OP##h, 2, uint16_t, FN) \
DO_2OP_SCALAR(OP##w, 4, uint32_t, FN)
#define DO_2OP_SCALAR_S(OP, FN) \
DO_2OP_SCALAR(OP##b, 1, int8_t, FN) \
DO_2OP_SCALAR(OP##h, 2, int16_t, FN) \
DO_2OP_SCALAR(OP##w, 4, int32_t, FN)
DO_2OP_SCALAR_U(vadd_scalar, DO_ADD)
DO_2OP_SCALAR_U(vsub_scalar, DO_SUB)
DO_2OP_SCALAR_U(vmul_scalar, DO_MUL)
DO_2OP_SCALAR_S(vhadds_scalar, do_vhadd_s)
DO_2OP_SCALAR_U(vhaddu_scalar, do_vhadd_u)
DO_2OP_SCALAR_S(vhsubs_scalar, do_vhsub_s)
DO_2OP_SCALAR_U(vhsubu_scalar, do_vhsub_u)
DO_2OP_SAT_SCALAR(vqaddu_scalarb, 1, uint8_t, DO_UQADD_B)
DO_2OP_SAT_SCALAR(vqaddu_scalarh, 2, uint16_t, DO_UQADD_H)
DO_2OP_SAT_SCALAR(vqaddu_scalarw, 4, uint32_t, DO_UQADD_W)
DO_2OP_SAT_SCALAR(vqadds_scalarb, 1, int8_t, DO_SQADD_B)
DO_2OP_SAT_SCALAR(vqadds_scalarh, 2, int16_t, DO_SQADD_H)
DO_2OP_SAT_SCALAR(vqadds_scalarw, 4, int32_t, DO_SQADD_W)
DO_2OP_SAT_SCALAR(vqsubu_scalarb, 1, uint8_t, DO_UQSUB_B)
DO_2OP_SAT_SCALAR(vqsubu_scalarh, 2, uint16_t, DO_UQSUB_H)
DO_2OP_SAT_SCALAR(vqsubu_scalarw, 4, uint32_t, DO_UQSUB_W)
DO_2OP_SAT_SCALAR(vqsubs_scalarb, 1, int8_t, DO_SQSUB_B)
DO_2OP_SAT_SCALAR(vqsubs_scalarh, 2, int16_t, DO_SQSUB_H)
DO_2OP_SAT_SCALAR(vqsubs_scalarw, 4, int32_t, DO_SQSUB_W)
static inline uint32_t do_vbrsrb(uint32_t n, uint32_t m)
{
m &= 0xff;
if (m == 0) {
return 0;
}
n = revbit8(n);
if (m < 8) {
n >>= 8 - m;
}
return n;
}
static inline uint32_t do_vbrsrh(uint32_t n, uint32_t m)
{
m &= 0xff;
if (m == 0) {
return 0;
}
n = revbit16(n);
if (m < 16) {
n >>= 16 - m;
}
return n;
}
static inline uint32_t do_vbrsrw(uint32_t n, uint32_t m)
{
m &= 0xff;
if (m == 0) {
return 0;
}
n = revbit32(n);
if (m < 32) {
n >>= 32 - m;
}
return n;
}
DO_2OP_SCALAR(vbrsrb, 1, uint8_t, do_vbrsrb)
DO_2OP_SCALAR(vbrsrh, 2, uint16_t, do_vbrsrh)
DO_2OP_SCALAR(vbrsrw, 4, uint32_t, do_vbrsrw)
/*
* Multiply add long dual accumulate ops.
*/
#define DO_LDAV(OP, ESIZE, TYPE, XCHG, EVENACC, ODDACC) \
uint64_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \
void *vm, uint64_t a) \
{ \
uint16_t mask = mve_element_mask(env); \
unsigned e; \
TYPE *n = vn, *m = vm; \
for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
if (mask & 1) { \
if (e & 1) { \
a ODDACC \
(int64_t)n[H##ESIZE(e - 1 * XCHG)] * m[H##ESIZE(e)]; \
} else { \
a EVENACC \
(int64_t)n[H##ESIZE(e + 1 * XCHG)] * m[H##ESIZE(e)]; \
} \
} \
} \
mve_advance_vpt(env); \
return a; \
}
DO_LDAV(vmlaldavsh, 2, int16_t, false, +=, +=)
DO_LDAV(vmlaldavxsh, 2, int16_t, true, +=, +=)
DO_LDAV(vmlaldavsw, 4, int32_t, false, +=, +=)
DO_LDAV(vmlaldavxsw, 4, int32_t, true, +=, +=)
DO_LDAV(vmlaldavuh, 2, uint16_t, false, +=, +=)
DO_LDAV(vmlaldavuw, 4, uint32_t, false, +=, +=)
DO_LDAV(vmlsldavsh, 2, int16_t, false, +=, -=)
DO_LDAV(vmlsldavxsh, 2, int16_t, true, +=, -=)
DO_LDAV(vmlsldavsw, 4, int32_t, false, +=, -=)
DO_LDAV(vmlsldavxsw, 4, int32_t, true, +=, -=)
/*
* Rounding multiply add long dual accumulate high: we must keep
* a 72-bit internal accumulator value and return the top 64 bits.
*/
#define DO_LDAVH(OP, ESIZE, TYPE, XCHG, EVENACC, ODDACC, TO128) \
uint64_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \
void *vm, uint64_t a) \
{ \
uint16_t mask = mve_element_mask(env); \
unsigned e; \
TYPE *n = vn, *m = vm; \
Int128 acc = int128_lshift(TO128(a), 8); \
for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
if (mask & 1) { \
if (e & 1) { \
acc = ODDACC(acc, TO128(n[H##ESIZE(e - 1 * XCHG)] * \
m[H##ESIZE(e)])); \
} else { \
acc = EVENACC(acc, TO128(n[H##ESIZE(e + 1 * XCHG)] * \
m[H##ESIZE(e)])); \
} \
acc = int128_add(acc, int128_make64(1 << 7)); \
} \
} \
mve_advance_vpt(env); \
return int128_getlo(int128_rshift(acc, 8)); \
}
DO_LDAVH(vrmlaldavhsw, 4, int32_t, false, int128_add, int128_add, int128_makes64)
DO_LDAVH(vrmlaldavhxsw, 4, int32_t, true, int128_add, int128_add, int128_makes64)
DO_LDAVH(vrmlaldavhuw, 4, uint32_t, false, int128_add, int128_add, int128_make64)
DO_LDAVH(vrmlsldavhsw, 4, int32_t, false, int128_add, int128_sub, int128_makes64)
DO_LDAVH(vrmlsldavhxsw, 4, int32_t, true, int128_add, int128_sub, int128_makes64)