2021-06-17 15:15:45 +03:00
|
|
|
/*
|
|
|
|
* M-profile MVE Operations
|
|
|
|
*
|
|
|
|
* Copyright (c) 2021 Linaro, Ltd.
|
|
|
|
*
|
|
|
|
* This library is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
* License as published by the Free Software Foundation; either
|
|
|
|
* version 2.1 of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This library is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* Lesser General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
|
|
|
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "qemu/osdep.h"
|
|
|
|
#include "cpu.h"
|
|
|
|
#include "internals.h"
|
|
|
|
#include "vec_internal.h"
|
|
|
|
#include "exec/helper-proto.h"
|
|
|
|
#include "exec/cpu_ldst.h"
|
|
|
|
#include "exec/exec-all.h"
|
2021-06-17 15:15:51 +03:00
|
|
|
#include "tcg/tcg.h"
|
2021-06-17 15:15:45 +03:00
|
|
|
|
2021-08-13 19:11:49 +03:00
|
|
|
static uint16_t mve_eci_mask(CPUARMState *env)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Return the mask of which elements in the MVE vector correspond
|
|
|
|
* to beats being executed. The mask has 1 bits for executed lanes
|
|
|
|
* and 0 bits where ECI says this beat was already executed.
|
|
|
|
*/
|
|
|
|
int eci;
|
|
|
|
|
|
|
|
if ((env->condexec_bits & 0xf) != 0) {
|
|
|
|
return 0xffff;
|
|
|
|
}
|
|
|
|
|
|
|
|
eci = env->condexec_bits >> 4;
|
|
|
|
switch (eci) {
|
|
|
|
case ECI_NONE:
|
|
|
|
return 0xffff;
|
|
|
|
case ECI_A0:
|
|
|
|
return 0xfff0;
|
|
|
|
case ECI_A0A1:
|
|
|
|
return 0xff00;
|
|
|
|
case ECI_A0A1A2:
|
|
|
|
case ECI_A0A1A2B0:
|
|
|
|
return 0xf000;
|
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-06-17 15:15:45 +03:00
|
|
|
static uint16_t mve_element_mask(CPUARMState *env)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Return the mask of which elements in the MVE vector should be
|
|
|
|
* updated. This is a combination of multiple things:
|
|
|
|
* (1) by default, we update every lane in the vector
|
|
|
|
* (2) VPT predication stores its state in the VPR register;
|
|
|
|
* (3) low-overhead-branch tail predication will mask out part
|
|
|
|
* the vector on the final iteration of the loop
|
|
|
|
* (4) if EPSR.ECI is set then we must execute only some beats
|
|
|
|
* of the insn
|
|
|
|
* We combine all these into a 16-bit result with the same semantics
|
|
|
|
* as VPR.P0: 0 to mask the lane, 1 if it is active.
|
|
|
|
* 8-bit vector ops will look at all bits of the result;
|
|
|
|
* 16-bit ops will look at bits 0, 2, 4, ...;
|
|
|
|
* 32-bit ops will look at bits 0, 4, 8 and 12.
|
|
|
|
* Compare pseudocode GetCurInstrBeat(), though that only returns
|
|
|
|
* the 4-bit slice of the mask corresponding to a single beat.
|
|
|
|
*/
|
|
|
|
uint16_t mask = FIELD_EX32(env->v7m.vpr, V7M_VPR, P0);
|
|
|
|
|
|
|
|
if (!(env->v7m.vpr & R_V7M_VPR_MASK01_MASK)) {
|
|
|
|
mask |= 0xff;
|
|
|
|
}
|
|
|
|
if (!(env->v7m.vpr & R_V7M_VPR_MASK23_MASK)) {
|
|
|
|
mask |= 0xff00;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (env->v7m.ltpsize < 4 &&
|
|
|
|
env->regs[14] <= (1 << (4 - env->v7m.ltpsize))) {
|
|
|
|
/*
|
|
|
|
* Tail predication active, and this is the last loop iteration.
|
|
|
|
* The element size is (1 << ltpsize), and we only want to process
|
|
|
|
* loopcount elements, so we want to retain the least significant
|
|
|
|
* (loopcount * esize) predicate bits and zero out bits above that.
|
|
|
|
*/
|
|
|
|
int masklen = env->regs[14] << env->v7m.ltpsize;
|
|
|
|
assert(masklen <= 16);
|
2021-08-13 19:11:48 +03:00
|
|
|
uint16_t ltpmask = masklen ? MAKE_64BIT_MASK(0, masklen) : 0;
|
|
|
|
mask &= ltpmask;
|
2021-06-17 15:15:45 +03:00
|
|
|
}
|
|
|
|
|
2021-08-13 19:11:49 +03:00
|
|
|
/*
|
|
|
|
* ECI bits indicate which beats are already executed;
|
|
|
|
* we handle this by effectively predicating them out.
|
|
|
|
*/
|
|
|
|
mask &= mve_eci_mask(env);
|
2021-06-17 15:15:45 +03:00
|
|
|
return mask;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mve_advance_vpt(CPUARMState *env)
|
|
|
|
{
|
|
|
|
/* Advance the VPT and ECI state if necessary */
|
|
|
|
uint32_t vpr = env->v7m.vpr;
|
|
|
|
unsigned mask01, mask23;
|
2021-08-13 19:11:49 +03:00
|
|
|
uint16_t inv_mask;
|
|
|
|
uint16_t eci_mask = mve_eci_mask(env);
|
2021-06-17 15:15:45 +03:00
|
|
|
|
|
|
|
if ((env->condexec_bits & 0xf) == 0) {
|
|
|
|
env->condexec_bits = (env->condexec_bits == (ECI_A0A1A2B0 << 4)) ?
|
|
|
|
(ECI_A0 << 4) : (ECI_NONE << 4);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!(vpr & (R_V7M_VPR_MASK01_MASK | R_V7M_VPR_MASK23_MASK))) {
|
|
|
|
/* VPT not enabled, nothing to do */
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2021-08-13 19:11:49 +03:00
|
|
|
/* Invert P0 bits if needed, but only for beats we actually executed */
|
2021-06-17 15:15:45 +03:00
|
|
|
mask01 = FIELD_EX32(vpr, V7M_VPR, MASK01);
|
|
|
|
mask23 = FIELD_EX32(vpr, V7M_VPR, MASK23);
|
2021-08-13 19:11:49 +03:00
|
|
|
/* Start by assuming we invert all bits corresponding to executed beats */
|
|
|
|
inv_mask = eci_mask;
|
|
|
|
if (mask01 <= 8) {
|
|
|
|
/* MASK01 says don't invert low half of P0 */
|
|
|
|
inv_mask &= ~0xff;
|
2021-06-17 15:15:45 +03:00
|
|
|
}
|
2021-08-13 19:11:49 +03:00
|
|
|
if (mask23 <= 8) {
|
|
|
|
/* MASK23 says don't invert high half of P0 */
|
|
|
|
inv_mask &= ~0xff00;
|
2021-06-17 15:15:45 +03:00
|
|
|
}
|
2021-08-13 19:11:49 +03:00
|
|
|
vpr ^= inv_mask;
|
|
|
|
/* Only update MASK01 if beat 1 executed */
|
|
|
|
if (eci_mask & 0xf0) {
|
|
|
|
vpr = FIELD_DP32(vpr, V7M_VPR, MASK01, mask01 << 1);
|
|
|
|
}
|
|
|
|
/* Beat 3 always executes, so update MASK23 */
|
2021-06-17 15:15:45 +03:00
|
|
|
vpr = FIELD_DP32(vpr, V7M_VPR, MASK23, mask23 << 1);
|
|
|
|
env->v7m.vpr = vpr;
|
|
|
|
}
|
|
|
|
|
2021-08-13 19:11:49 +03:00
|
|
|
/* For loads, predicated lanes are zeroed instead of keeping their old values */
|
2021-06-17 15:15:45 +03:00
|
|
|
#define DO_VLDR(OP, MSIZE, LDTYPE, ESIZE, TYPE) \
|
|
|
|
void HELPER(mve_##OP)(CPUARMState *env, void *vd, uint32_t addr) \
|
|
|
|
{ \
|
|
|
|
TYPE *d = vd; \
|
|
|
|
uint16_t mask = mve_element_mask(env); \
|
2021-08-13 19:11:49 +03:00
|
|
|
uint16_t eci_mask = mve_eci_mask(env); \
|
2021-06-17 15:15:45 +03:00
|
|
|
unsigned b, e; \
|
|
|
|
/* \
|
|
|
|
* R_SXTM allows the dest reg to become UNKNOWN for abandoned \
|
|
|
|
* beats so we don't care if we update part of the dest and \
|
|
|
|
* then take an exception. \
|
|
|
|
*/ \
|
|
|
|
for (b = 0, e = 0; b < 16; b += ESIZE, e++) { \
|
2021-08-13 19:11:49 +03:00
|
|
|
if (eci_mask & (1 << b)) { \
|
|
|
|
d[H##ESIZE(e)] = (mask & (1 << b)) ? \
|
|
|
|
cpu_##LDTYPE##_data_ra(env, addr, GETPC()) : 0; \
|
2021-06-17 15:15:45 +03:00
|
|
|
} \
|
|
|
|
addr += MSIZE; \
|
|
|
|
} \
|
|
|
|
mve_advance_vpt(env); \
|
|
|
|
}
|
|
|
|
|
|
|
|
#define DO_VSTR(OP, MSIZE, STTYPE, ESIZE, TYPE) \
|
|
|
|
void HELPER(mve_##OP)(CPUARMState *env, void *vd, uint32_t addr) \
|
|
|
|
{ \
|
|
|
|
TYPE *d = vd; \
|
|
|
|
uint16_t mask = mve_element_mask(env); \
|
|
|
|
unsigned b, e; \
|
|
|
|
for (b = 0, e = 0; b < 16; b += ESIZE, e++) { \
|
|
|
|
if (mask & (1 << b)) { \
|
|
|
|
cpu_##STTYPE##_data_ra(env, addr, d[H##ESIZE(e)], GETPC()); \
|
|
|
|
} \
|
|
|
|
addr += MSIZE; \
|
|
|
|
} \
|
|
|
|
mve_advance_vpt(env); \
|
|
|
|
}
|
|
|
|
|
|
|
|
DO_VLDR(vldrb, 1, ldub, 1, uint8_t)
|
|
|
|
DO_VLDR(vldrh, 2, lduw, 2, uint16_t)
|
|
|
|
DO_VLDR(vldrw, 4, ldl, 4, uint32_t)
|
|
|
|
|
|
|
|
DO_VSTR(vstrb, 1, stb, 1, uint8_t)
|
|
|
|
DO_VSTR(vstrh, 2, stw, 2, uint16_t)
|
|
|
|
DO_VSTR(vstrw, 4, stl, 4, uint32_t)
|
|
|
|
|
2021-06-17 15:15:46 +03:00
|
|
|
DO_VLDR(vldrb_sh, 1, ldsb, 2, int16_t)
|
|
|
|
DO_VLDR(vldrb_sw, 1, ldsb, 4, int32_t)
|
|
|
|
DO_VLDR(vldrb_uh, 1, ldub, 2, uint16_t)
|
|
|
|
DO_VLDR(vldrb_uw, 1, ldub, 4, uint32_t)
|
|
|
|
DO_VLDR(vldrh_sw, 2, ldsw, 4, int32_t)
|
|
|
|
DO_VLDR(vldrh_uw, 2, lduw, 4, uint32_t)
|
|
|
|
|
|
|
|
DO_VSTR(vstrb_h, 1, stb, 2, int16_t)
|
|
|
|
DO_VSTR(vstrb_w, 1, stb, 4, int32_t)
|
|
|
|
DO_VSTR(vstrh_w, 2, stw, 4, int32_t)
|
|
|
|
|
2021-06-17 15:15:45 +03:00
|
|
|
#undef DO_VLDR
|
|
|
|
#undef DO_VSTR
|
2021-06-17 15:15:47 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The mergemask(D, R, M) macro performs the operation "*D = R" but
|
|
|
|
* storing only the bytes which correspond to 1 bits in M,
|
|
|
|
* leaving other bytes in *D unchanged. We use _Generic
|
|
|
|
* to select the correct implementation based on the type of D.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static void mergemask_ub(uint8_t *d, uint8_t r, uint16_t mask)
|
|
|
|
{
|
|
|
|
if (mask & 1) {
|
|
|
|
*d = r;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mergemask_sb(int8_t *d, int8_t r, uint16_t mask)
|
|
|
|
{
|
|
|
|
mergemask_ub((uint8_t *)d, r, mask);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mergemask_uh(uint16_t *d, uint16_t r, uint16_t mask)
|
|
|
|
{
|
|
|
|
uint16_t bmask = expand_pred_b_data[mask & 3];
|
|
|
|
*d = (*d & ~bmask) | (r & bmask);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mergemask_sh(int16_t *d, int16_t r, uint16_t mask)
|
|
|
|
{
|
|
|
|
mergemask_uh((uint16_t *)d, r, mask);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mergemask_uw(uint32_t *d, uint32_t r, uint16_t mask)
|
|
|
|
{
|
|
|
|
uint32_t bmask = expand_pred_b_data[mask & 0xf];
|
|
|
|
*d = (*d & ~bmask) | (r & bmask);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mergemask_sw(int32_t *d, int32_t r, uint16_t mask)
|
|
|
|
{
|
|
|
|
mergemask_uw((uint32_t *)d, r, mask);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mergemask_uq(uint64_t *d, uint64_t r, uint16_t mask)
|
|
|
|
{
|
|
|
|
uint64_t bmask = expand_pred_b_data[mask & 0xff];
|
|
|
|
*d = (*d & ~bmask) | (r & bmask);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mergemask_sq(int64_t *d, int64_t r, uint16_t mask)
|
|
|
|
{
|
|
|
|
mergemask_uq((uint64_t *)d, r, mask);
|
|
|
|
}
|
|
|
|
|
|
|
|
#define mergemask(D, R, M) \
|
|
|
|
_Generic(D, \
|
|
|
|
uint8_t *: mergemask_ub, \
|
|
|
|
int8_t *: mergemask_sb, \
|
|
|
|
uint16_t *: mergemask_uh, \
|
|
|
|
int16_t *: mergemask_sh, \
|
|
|
|
uint32_t *: mergemask_uw, \
|
|
|
|
int32_t *: mergemask_sw, \
|
|
|
|
uint64_t *: mergemask_uq, \
|
|
|
|
int64_t *: mergemask_sq)(D, R, M)
|
|
|
|
|
2021-06-17 15:15:54 +03:00
|
|
|
void HELPER(mve_vdup)(CPUARMState *env, void *vd, uint32_t val)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* The generated code already replicated an 8 or 16 bit constant
|
|
|
|
* into the 32-bit value, so we only need to write the 32-bit
|
|
|
|
* value to all elements of the Qreg, allowing for predication.
|
|
|
|
*/
|
|
|
|
uint32_t *d = vd;
|
|
|
|
uint16_t mask = mve_element_mask(env);
|
|
|
|
unsigned e;
|
|
|
|
for (e = 0; e < 16 / 4; e++, mask >>= 4) {
|
|
|
|
mergemask(&d[H4(e)], val, mask);
|
|
|
|
}
|
|
|
|
mve_advance_vpt(env);
|
|
|
|
}
|
|
|
|
|
2021-06-17 15:15:47 +03:00
|
|
|
#define DO_1OP(OP, ESIZE, TYPE, FN) \
|
|
|
|
void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm) \
|
|
|
|
{ \
|
|
|
|
TYPE *d = vd, *m = vm; \
|
|
|
|
uint16_t mask = mve_element_mask(env); \
|
|
|
|
unsigned e; \
|
|
|
|
for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
|
|
|
|
mergemask(&d[H##ESIZE(e)], FN(m[H##ESIZE(e)]), mask); \
|
|
|
|
} \
|
|
|
|
mve_advance_vpt(env); \
|
|
|
|
}
|
|
|
|
|
2021-06-17 15:15:48 +03:00
|
|
|
#define DO_CLS_B(N) (clrsb32(N) - 24)
|
|
|
|
#define DO_CLS_H(N) (clrsb32(N) - 16)
|
|
|
|
|
|
|
|
DO_1OP(vclsb, 1, int8_t, DO_CLS_B)
|
|
|
|
DO_1OP(vclsh, 2, int16_t, DO_CLS_H)
|
|
|
|
DO_1OP(vclsw, 4, int32_t, clrsb32)
|
|
|
|
|
2021-06-17 15:15:47 +03:00
|
|
|
#define DO_CLZ_B(N) (clz32(N) - 24)
|
|
|
|
#define DO_CLZ_H(N) (clz32(N) - 16)
|
|
|
|
|
|
|
|
DO_1OP(vclzb, 1, uint8_t, DO_CLZ_B)
|
|
|
|
DO_1OP(vclzh, 2, uint16_t, DO_CLZ_H)
|
|
|
|
DO_1OP(vclzw, 4, uint32_t, clz32)
|
2021-06-17 15:15:49 +03:00
|
|
|
|
|
|
|
DO_1OP(vrev16b, 2, uint16_t, bswap16)
|
|
|
|
DO_1OP(vrev32b, 4, uint32_t, bswap32)
|
|
|
|
DO_1OP(vrev32h, 4, uint32_t, hswap32)
|
|
|
|
DO_1OP(vrev64b, 8, uint64_t, bswap64)
|
|
|
|
DO_1OP(vrev64h, 8, uint64_t, hswap64)
|
|
|
|
DO_1OP(vrev64w, 8, uint64_t, wswap64)
|
2021-06-17 15:15:50 +03:00
|
|
|
|
|
|
|
#define DO_NOT(N) (~(N))
|
|
|
|
|
|
|
|
DO_1OP(vmvn, 8, uint64_t, DO_NOT)
|
2021-06-17 15:15:51 +03:00
|
|
|
|
|
|
|
#define DO_ABS(N) ((N) < 0 ? -(N) : (N))
|
|
|
|
#define DO_FABSH(N) ((N) & dup_const(MO_16, 0x7fff))
|
|
|
|
#define DO_FABSS(N) ((N) & dup_const(MO_32, 0x7fffffff))
|
|
|
|
|
|
|
|
DO_1OP(vabsb, 1, int8_t, DO_ABS)
|
|
|
|
DO_1OP(vabsh, 2, int16_t, DO_ABS)
|
|
|
|
DO_1OP(vabsw, 4, int32_t, DO_ABS)
|
|
|
|
|
|
|
|
/* We can do these 64 bits at a time */
|
|
|
|
DO_1OP(vfabsh, 8, uint64_t, DO_FABSH)
|
|
|
|
DO_1OP(vfabss, 8, uint64_t, DO_FABSS)
|
2021-06-17 15:15:52 +03:00
|
|
|
|
|
|
|
#define DO_NEG(N) (-(N))
|
|
|
|
#define DO_FNEGH(N) ((N) ^ dup_const(MO_16, 0x8000))
|
|
|
|
#define DO_FNEGS(N) ((N) ^ dup_const(MO_32, 0x80000000))
|
|
|
|
|
|
|
|
DO_1OP(vnegb, 1, int8_t, DO_NEG)
|
|
|
|
DO_1OP(vnegh, 2, int16_t, DO_NEG)
|
|
|
|
DO_1OP(vnegw, 4, int32_t, DO_NEG)
|
|
|
|
|
|
|
|
/* We can do these 64 bits at a time */
|
|
|
|
DO_1OP(vfnegh, 8, uint64_t, DO_FNEGH)
|
|
|
|
DO_1OP(vfnegs, 8, uint64_t, DO_FNEGS)
|
2021-06-17 15:15:55 +03:00
|
|
|
|
2021-06-28 16:58:23 +03:00
|
|
|
/*
|
|
|
|
* 1 operand immediates: Vda is destination and possibly also one source.
|
|
|
|
* All these insns work at 64-bit widths.
|
|
|
|
*/
|
|
|
|
#define DO_1OP_IMM(OP, FN) \
|
|
|
|
void HELPER(mve_##OP)(CPUARMState *env, void *vda, uint64_t imm) \
|
|
|
|
{ \
|
|
|
|
uint64_t *da = vda; \
|
|
|
|
uint16_t mask = mve_element_mask(env); \
|
|
|
|
unsigned e; \
|
|
|
|
for (e = 0; e < 16 / 8; e++, mask >>= 8) { \
|
|
|
|
mergemask(&da[H8(e)], FN(da[H8(e)], imm), mask); \
|
|
|
|
} \
|
|
|
|
mve_advance_vpt(env); \
|
|
|
|
}
|
|
|
|
|
|
|
|
#define DO_MOVI(N, I) (I)
|
|
|
|
#define DO_ANDI(N, I) ((N) & (I))
|
|
|
|
#define DO_ORRI(N, I) ((N) | (I))
|
|
|
|
|
|
|
|
DO_1OP_IMM(vmovi, DO_MOVI)
|
|
|
|
DO_1OP_IMM(vandi, DO_ANDI)
|
|
|
|
DO_1OP_IMM(vorri, DO_ORRI)
|
|
|
|
|
2021-06-17 15:15:55 +03:00
|
|
|
#define DO_2OP(OP, ESIZE, TYPE, FN) \
|
|
|
|
void HELPER(glue(mve_, OP))(CPUARMState *env, \
|
|
|
|
void *vd, void *vn, void *vm) \
|
|
|
|
{ \
|
|
|
|
TYPE *d = vd, *n = vn, *m = vm; \
|
|
|
|
uint16_t mask = mve_element_mask(env); \
|
|
|
|
unsigned e; \
|
|
|
|
for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
|
|
|
|
mergemask(&d[H##ESIZE(e)], \
|
|
|
|
FN(n[H##ESIZE(e)], m[H##ESIZE(e)]), mask); \
|
|
|
|
} \
|
|
|
|
mve_advance_vpt(env); \
|
|
|
|
}
|
|
|
|
|
2021-06-17 15:15:56 +03:00
|
|
|
/* provide unsigned 2-op helpers for all sizes */
|
|
|
|
#define DO_2OP_U(OP, FN) \
|
|
|
|
DO_2OP(OP##b, 1, uint8_t, FN) \
|
|
|
|
DO_2OP(OP##h, 2, uint16_t, FN) \
|
|
|
|
DO_2OP(OP##w, 4, uint32_t, FN)
|
|
|
|
|
2021-06-17 15:15:59 +03:00
|
|
|
/* provide signed 2-op helpers for all sizes */
|
|
|
|
#define DO_2OP_S(OP, FN) \
|
|
|
|
DO_2OP(OP##b, 1, int8_t, FN) \
|
|
|
|
DO_2OP(OP##h, 2, int16_t, FN) \
|
|
|
|
DO_2OP(OP##w, 4, int32_t, FN)
|
|
|
|
|
2021-06-17 15:16:02 +03:00
|
|
|
/*
|
|
|
|
* "Long" operations where two half-sized inputs (taken from either the
|
|
|
|
* top or the bottom of the input vector) produce a double-width result.
|
|
|
|
* Here ESIZE, TYPE are for the input, and LESIZE, LTYPE for the output.
|
|
|
|
*/
|
|
|
|
#define DO_2OP_L(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN) \
|
|
|
|
void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, void *vm) \
|
|
|
|
{ \
|
|
|
|
LTYPE *d = vd; \
|
|
|
|
TYPE *n = vn, *m = vm; \
|
|
|
|
uint16_t mask = mve_element_mask(env); \
|
|
|
|
unsigned le; \
|
|
|
|
for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \
|
|
|
|
LTYPE r = FN((LTYPE)n[H##ESIZE(le * 2 + TOP)], \
|
|
|
|
m[H##ESIZE(le * 2 + TOP)]); \
|
|
|
|
mergemask(&d[H##LESIZE(le)], r, mask); \
|
|
|
|
} \
|
|
|
|
mve_advance_vpt(env); \
|
|
|
|
}
|
|
|
|
|
2021-06-17 15:16:14 +03:00
|
|
|
#define DO_2OP_SAT(OP, ESIZE, TYPE, FN) \
|
|
|
|
void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, void *vm) \
|
|
|
|
{ \
|
|
|
|
TYPE *d = vd, *n = vn, *m = vm; \
|
|
|
|
uint16_t mask = mve_element_mask(env); \
|
|
|
|
unsigned e; \
|
|
|
|
bool qc = false; \
|
|
|
|
for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
|
|
|
|
bool sat = false; \
|
|
|
|
TYPE r = FN(n[H##ESIZE(e)], m[H##ESIZE(e)], &sat); \
|
|
|
|
mergemask(&d[H##ESIZE(e)], r, mask); \
|
|
|
|
qc |= sat & mask & 1; \
|
|
|
|
} \
|
|
|
|
if (qc) { \
|
|
|
|
env->vfp.qc[0] = qc; \
|
|
|
|
} \
|
|
|
|
mve_advance_vpt(env); \
|
|
|
|
}
|
|
|
|
|
2021-06-17 15:16:16 +03:00
|
|
|
/* provide unsigned 2-op helpers for all sizes */
|
|
|
|
#define DO_2OP_SAT_U(OP, FN) \
|
|
|
|
DO_2OP_SAT(OP##b, 1, uint8_t, FN) \
|
|
|
|
DO_2OP_SAT(OP##h, 2, uint16_t, FN) \
|
|
|
|
DO_2OP_SAT(OP##w, 4, uint32_t, FN)
|
|
|
|
|
|
|
|
/* provide signed 2-op helpers for all sizes */
|
|
|
|
#define DO_2OP_SAT_S(OP, FN) \
|
|
|
|
DO_2OP_SAT(OP##b, 1, int8_t, FN) \
|
|
|
|
DO_2OP_SAT(OP##h, 2, int16_t, FN) \
|
|
|
|
DO_2OP_SAT(OP##w, 4, int32_t, FN)
|
|
|
|
|
2021-06-17 15:15:55 +03:00
|
|
|
#define DO_AND(N, M) ((N) & (M))
|
|
|
|
#define DO_BIC(N, M) ((N) & ~(M))
|
|
|
|
#define DO_ORR(N, M) ((N) | (M))
|
|
|
|
#define DO_ORN(N, M) ((N) | ~(M))
|
|
|
|
#define DO_EOR(N, M) ((N) ^ (M))
|
|
|
|
|
|
|
|
DO_2OP(vand, 8, uint64_t, DO_AND)
|
|
|
|
DO_2OP(vbic, 8, uint64_t, DO_BIC)
|
|
|
|
DO_2OP(vorr, 8, uint64_t, DO_ORR)
|
|
|
|
DO_2OP(vorn, 8, uint64_t, DO_ORN)
|
|
|
|
DO_2OP(veor, 8, uint64_t, DO_EOR)
|
2021-06-17 15:15:56 +03:00
|
|
|
|
|
|
|
#define DO_ADD(N, M) ((N) + (M))
|
|
|
|
#define DO_SUB(N, M) ((N) - (M))
|
|
|
|
#define DO_MUL(N, M) ((N) * (M))
|
|
|
|
|
|
|
|
DO_2OP_U(vadd, DO_ADD)
|
|
|
|
DO_2OP_U(vsub, DO_SUB)
|
|
|
|
DO_2OP_U(vmul, DO_MUL)
|
2021-06-17 15:15:57 +03:00
|
|
|
|
2021-06-17 15:16:02 +03:00
|
|
|
DO_2OP_L(vmullbsb, 0, 1, int8_t, 2, int16_t, DO_MUL)
|
|
|
|
DO_2OP_L(vmullbsh, 0, 2, int16_t, 4, int32_t, DO_MUL)
|
|
|
|
DO_2OP_L(vmullbsw, 0, 4, int32_t, 8, int64_t, DO_MUL)
|
|
|
|
DO_2OP_L(vmullbub, 0, 1, uint8_t, 2, uint16_t, DO_MUL)
|
|
|
|
DO_2OP_L(vmullbuh, 0, 2, uint16_t, 4, uint32_t, DO_MUL)
|
|
|
|
DO_2OP_L(vmullbuw, 0, 4, uint32_t, 8, uint64_t, DO_MUL)
|
|
|
|
|
|
|
|
DO_2OP_L(vmulltsb, 1, 1, int8_t, 2, int16_t, DO_MUL)
|
|
|
|
DO_2OP_L(vmulltsh, 1, 2, int16_t, 4, int32_t, DO_MUL)
|
|
|
|
DO_2OP_L(vmulltsw, 1, 4, int32_t, 8, int64_t, DO_MUL)
|
|
|
|
DO_2OP_L(vmulltub, 1, 1, uint8_t, 2, uint16_t, DO_MUL)
|
|
|
|
DO_2OP_L(vmulltuh, 1, 2, uint16_t, 4, uint32_t, DO_MUL)
|
|
|
|
DO_2OP_L(vmulltuw, 1, 4, uint32_t, 8, uint64_t, DO_MUL)
|
|
|
|
|
2021-08-13 19:11:50 +03:00
|
|
|
/*
|
|
|
|
* Polynomial multiply. We can always do this generating 64 bits
|
|
|
|
* of the result at a time, so we don't need to use DO_2OP_L.
|
|
|
|
*/
|
|
|
|
#define VMULLPH_MASK 0x00ff00ff00ff00ffULL
|
|
|
|
#define VMULLPW_MASK 0x0000ffff0000ffffULL
|
|
|
|
#define DO_VMULLPBH(N, M) pmull_h((N) & VMULLPH_MASK, (M) & VMULLPH_MASK)
|
|
|
|
#define DO_VMULLPTH(N, M) DO_VMULLPBH((N) >> 8, (M) >> 8)
|
|
|
|
#define DO_VMULLPBW(N, M) pmull_w((N) & VMULLPW_MASK, (M) & VMULLPW_MASK)
|
|
|
|
#define DO_VMULLPTW(N, M) DO_VMULLPBW((N) >> 16, (M) >> 16)
|
|
|
|
|
|
|
|
DO_2OP(vmullpbh, 8, uint64_t, DO_VMULLPBH)
|
|
|
|
DO_2OP(vmullpth, 8, uint64_t, DO_VMULLPTH)
|
|
|
|
DO_2OP(vmullpbw, 8, uint64_t, DO_VMULLPBW)
|
|
|
|
DO_2OP(vmullptw, 8, uint64_t, DO_VMULLPTW)
|
|
|
|
|
2021-06-17 15:15:57 +03:00
|
|
|
/*
|
|
|
|
* Because the computation type is at least twice as large as required,
|
|
|
|
* these work for both signed and unsigned source types.
|
|
|
|
*/
|
|
|
|
static inline uint8_t do_mulh_b(int32_t n, int32_t m)
|
|
|
|
{
|
|
|
|
return (n * m) >> 8;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint16_t do_mulh_h(int32_t n, int32_t m)
|
|
|
|
{
|
|
|
|
return (n * m) >> 16;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint32_t do_mulh_w(int64_t n, int64_t m)
|
|
|
|
{
|
|
|
|
return (n * m) >> 32;
|
|
|
|
}
|
|
|
|
|
2021-06-17 15:15:58 +03:00
|
|
|
static inline uint8_t do_rmulh_b(int32_t n, int32_t m)
|
|
|
|
{
|
|
|
|
return (n * m + (1U << 7)) >> 8;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint16_t do_rmulh_h(int32_t n, int32_t m)
|
|
|
|
{
|
|
|
|
return (n * m + (1U << 15)) >> 16;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint32_t do_rmulh_w(int64_t n, int64_t m)
|
|
|
|
{
|
|
|
|
return (n * m + (1U << 31)) >> 32;
|
|
|
|
}
|
|
|
|
|
2021-06-17 15:15:57 +03:00
|
|
|
DO_2OP(vmulhsb, 1, int8_t, do_mulh_b)
|
|
|
|
DO_2OP(vmulhsh, 2, int16_t, do_mulh_h)
|
|
|
|
DO_2OP(vmulhsw, 4, int32_t, do_mulh_w)
|
|
|
|
DO_2OP(vmulhub, 1, uint8_t, do_mulh_b)
|
|
|
|
DO_2OP(vmulhuh, 2, uint16_t, do_mulh_h)
|
|
|
|
DO_2OP(vmulhuw, 4, uint32_t, do_mulh_w)
|
2021-06-17 15:15:58 +03:00
|
|
|
|
|
|
|
DO_2OP(vrmulhsb, 1, int8_t, do_rmulh_b)
|
|
|
|
DO_2OP(vrmulhsh, 2, int16_t, do_rmulh_h)
|
|
|
|
DO_2OP(vrmulhsw, 4, int32_t, do_rmulh_w)
|
|
|
|
DO_2OP(vrmulhub, 1, uint8_t, do_rmulh_b)
|
|
|
|
DO_2OP(vrmulhuh, 2, uint16_t, do_rmulh_h)
|
|
|
|
DO_2OP(vrmulhuw, 4, uint32_t, do_rmulh_w)
|
2021-06-17 15:15:59 +03:00
|
|
|
|
|
|
|
#define DO_MAX(N, M) ((N) >= (M) ? (N) : (M))
|
|
|
|
#define DO_MIN(N, M) ((N) >= (M) ? (M) : (N))
|
|
|
|
|
|
|
|
DO_2OP_S(vmaxs, DO_MAX)
|
|
|
|
DO_2OP_U(vmaxu, DO_MAX)
|
|
|
|
DO_2OP_S(vmins, DO_MIN)
|
|
|
|
DO_2OP_U(vminu, DO_MIN)
|
2021-06-17 15:16:00 +03:00
|
|
|
|
|
|
|
#define DO_ABD(N, M) ((N) >= (M) ? (N) - (M) : (M) - (N))
|
|
|
|
|
|
|
|
DO_2OP_S(vabds, DO_ABD)
|
|
|
|
DO_2OP_U(vabdu, DO_ABD)
|
2021-06-17 15:16:01 +03:00
|
|
|
|
|
|
|
static inline uint32_t do_vhadd_u(uint32_t n, uint32_t m)
|
|
|
|
{
|
|
|
|
return ((uint64_t)n + m) >> 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int32_t do_vhadd_s(int32_t n, int32_t m)
|
|
|
|
{
|
|
|
|
return ((int64_t)n + m) >> 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint32_t do_vhsub_u(uint32_t n, uint32_t m)
|
|
|
|
{
|
|
|
|
return ((uint64_t)n - m) >> 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int32_t do_vhsub_s(int32_t n, int32_t m)
|
|
|
|
{
|
|
|
|
return ((int64_t)n - m) >> 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
DO_2OP_S(vhadds, do_vhadd_s)
|
|
|
|
DO_2OP_U(vhaddu, do_vhadd_u)
|
|
|
|
DO_2OP_S(vhsubs, do_vhsub_s)
|
|
|
|
DO_2OP_U(vhsubu, do_vhsub_u)
|
2021-06-17 15:16:03 +03:00
|
|
|
|
2021-06-17 15:16:18 +03:00
|
|
|
#define DO_VSHLS(N, M) do_sqrshl_bhs(N, (int8_t)(M), sizeof(N) * 8, false, NULL)
|
|
|
|
#define DO_VSHLU(N, M) do_uqrshl_bhs(N, (int8_t)(M), sizeof(N) * 8, false, NULL)
|
2021-06-17 15:16:19 +03:00
|
|
|
#define DO_VRSHLS(N, M) do_sqrshl_bhs(N, (int8_t)(M), sizeof(N) * 8, true, NULL)
|
|
|
|
#define DO_VRSHLU(N, M) do_uqrshl_bhs(N, (int8_t)(M), sizeof(N) * 8, true, NULL)
|
2021-06-17 15:16:18 +03:00
|
|
|
|
|
|
|
DO_2OP_S(vshls, DO_VSHLS)
|
|
|
|
DO_2OP_U(vshlu, DO_VSHLU)
|
2021-06-17 15:16:19 +03:00
|
|
|
DO_2OP_S(vrshls, DO_VRSHLS)
|
|
|
|
DO_2OP_U(vrshlu, DO_VRSHLU)
|
2021-06-17 15:16:18 +03:00
|
|
|
|
2021-06-17 15:16:23 +03:00
|
|
|
#define DO_RHADD_S(N, M) (((int64_t)(N) + (M) + 1) >> 1)
|
|
|
|
#define DO_RHADD_U(N, M) (((uint64_t)(N) + (M) + 1) >> 1)
|
|
|
|
|
|
|
|
DO_2OP_S(vrhadds, DO_RHADD_S)
|
|
|
|
DO_2OP_U(vrhaddu, DO_RHADD_U)
|
|
|
|
|
2021-06-17 15:16:24 +03:00
|
|
|
static void do_vadc(CPUARMState *env, uint32_t *d, uint32_t *n, uint32_t *m,
|
|
|
|
uint32_t inv, uint32_t carry_in, bool update_flags)
|
|
|
|
{
|
|
|
|
uint16_t mask = mve_element_mask(env);
|
|
|
|
unsigned e;
|
|
|
|
|
|
|
|
/* If any additions trigger, we will update flags. */
|
|
|
|
if (mask & 0x1111) {
|
|
|
|
update_flags = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (e = 0; e < 16 / 4; e++, mask >>= 4) {
|
|
|
|
uint64_t r = carry_in;
|
|
|
|
r += n[H4(e)];
|
|
|
|
r += m[H4(e)] ^ inv;
|
|
|
|
if (mask & 1) {
|
|
|
|
carry_in = r >> 32;
|
|
|
|
}
|
|
|
|
mergemask(&d[H4(e)], r, mask);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (update_flags) {
|
|
|
|
/* Store C, clear NZV. */
|
|
|
|
env->vfp.xregs[ARM_VFP_FPSCR] &= ~FPCR_NZCV_MASK;
|
|
|
|
env->vfp.xregs[ARM_VFP_FPSCR] |= carry_in * FPCR_C;
|
|
|
|
}
|
|
|
|
mve_advance_vpt(env);
|
|
|
|
}
|
|
|
|
|
|
|
|
void HELPER(mve_vadc)(CPUARMState *env, void *vd, void *vn, void *vm)
|
|
|
|
{
|
|
|
|
bool carry_in = env->vfp.xregs[ARM_VFP_FPSCR] & FPCR_C;
|
|
|
|
do_vadc(env, vd, vn, vm, 0, carry_in, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
void HELPER(mve_vsbc)(CPUARMState *env, void *vd, void *vn, void *vm)
|
|
|
|
{
|
|
|
|
bool carry_in = env->vfp.xregs[ARM_VFP_FPSCR] & FPCR_C;
|
|
|
|
do_vadc(env, vd, vn, vm, -1, carry_in, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void HELPER(mve_vadci)(CPUARMState *env, void *vd, void *vn, void *vm)
|
|
|
|
{
|
|
|
|
do_vadc(env, vd, vn, vm, 0, 0, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
void HELPER(mve_vsbci)(CPUARMState *env, void *vd, void *vn, void *vm)
|
|
|
|
{
|
|
|
|
do_vadc(env, vd, vn, vm, -1, 1, true);
|
|
|
|
}
|
|
|
|
|
2021-06-17 15:16:25 +03:00
|
|
|
#define DO_VCADD(OP, ESIZE, TYPE, FN0, FN1) \
|
|
|
|
void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, void *vm) \
|
|
|
|
{ \
|
|
|
|
TYPE *d = vd, *n = vn, *m = vm; \
|
|
|
|
uint16_t mask = mve_element_mask(env); \
|
|
|
|
unsigned e; \
|
|
|
|
TYPE r[16 / ESIZE]; \
|
|
|
|
/* Calculate all results first to avoid overwriting inputs */ \
|
|
|
|
for (e = 0; e < 16 / ESIZE; e++) { \
|
|
|
|
if (!(e & 1)) { \
|
|
|
|
r[e] = FN0(n[H##ESIZE(e)], m[H##ESIZE(e + 1)]); \
|
|
|
|
} else { \
|
|
|
|
r[e] = FN1(n[H##ESIZE(e)], m[H##ESIZE(e - 1)]); \
|
|
|
|
} \
|
|
|
|
} \
|
|
|
|
for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
|
|
|
|
mergemask(&d[H##ESIZE(e)], r[e], mask); \
|
|
|
|
} \
|
|
|
|
mve_advance_vpt(env); \
|
|
|
|
}
|
|
|
|
|
|
|
|
#define DO_VCADD_ALL(OP, FN0, FN1) \
|
|
|
|
DO_VCADD(OP##b, 1, int8_t, FN0, FN1) \
|
|
|
|
DO_VCADD(OP##h, 2, int16_t, FN0, FN1) \
|
|
|
|
DO_VCADD(OP##w, 4, int32_t, FN0, FN1)
|
|
|
|
|
|
|
|
DO_VCADD_ALL(vcadd90, DO_SUB, DO_ADD)
|
|
|
|
DO_VCADD_ALL(vcadd270, DO_ADD, DO_SUB)
|
2021-06-17 15:16:26 +03:00
|
|
|
DO_VCADD_ALL(vhcadd90, do_vhsub_s, do_vhadd_s)
|
|
|
|
DO_VCADD_ALL(vhcadd270, do_vhadd_s, do_vhsub_s)
|
2021-06-17 15:16:25 +03:00
|
|
|
|
2021-06-17 15:16:11 +03:00
|
|
|
static inline int32_t do_sat_bhw(int64_t val, int64_t min, int64_t max, bool *s)
|
|
|
|
{
|
|
|
|
if (val > max) {
|
|
|
|
*s = true;
|
|
|
|
return max;
|
|
|
|
} else if (val < min) {
|
|
|
|
*s = true;
|
|
|
|
return min;
|
|
|
|
}
|
|
|
|
return val;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define DO_SQADD_B(n, m, s) do_sat_bhw((int64_t)n + m, INT8_MIN, INT8_MAX, s)
|
|
|
|
#define DO_SQADD_H(n, m, s) do_sat_bhw((int64_t)n + m, INT16_MIN, INT16_MAX, s)
|
|
|
|
#define DO_SQADD_W(n, m, s) do_sat_bhw((int64_t)n + m, INT32_MIN, INT32_MAX, s)
|
|
|
|
|
|
|
|
#define DO_UQADD_B(n, m, s) do_sat_bhw((int64_t)n + m, 0, UINT8_MAX, s)
|
|
|
|
#define DO_UQADD_H(n, m, s) do_sat_bhw((int64_t)n + m, 0, UINT16_MAX, s)
|
|
|
|
#define DO_UQADD_W(n, m, s) do_sat_bhw((int64_t)n + m, 0, UINT32_MAX, s)
|
|
|
|
|
|
|
|
#define DO_SQSUB_B(n, m, s) do_sat_bhw((int64_t)n - m, INT8_MIN, INT8_MAX, s)
|
|
|
|
#define DO_SQSUB_H(n, m, s) do_sat_bhw((int64_t)n - m, INT16_MIN, INT16_MAX, s)
|
|
|
|
#define DO_SQSUB_W(n, m, s) do_sat_bhw((int64_t)n - m, INT32_MIN, INT32_MAX, s)
|
|
|
|
|
|
|
|
#define DO_UQSUB_B(n, m, s) do_sat_bhw((int64_t)n - m, 0, UINT8_MAX, s)
|
|
|
|
#define DO_UQSUB_H(n, m, s) do_sat_bhw((int64_t)n - m, 0, UINT16_MAX, s)
|
|
|
|
#define DO_UQSUB_W(n, m, s) do_sat_bhw((int64_t)n - m, 0, UINT32_MAX, s)
|
2021-06-17 15:16:03 +03:00
|
|
|
|
2021-06-17 15:16:12 +03:00
|
|
|
/*
|
|
|
|
* For QDMULH and QRDMULH we simplify "double and shift by esize" into
|
|
|
|
* "shift by esize-1", adjusting the QRDMULH rounding constant to match.
|
|
|
|
*/
|
|
|
|
#define DO_QDMULH_B(n, m, s) do_sat_bhw(((int64_t)n * m) >> 7, \
|
|
|
|
INT8_MIN, INT8_MAX, s)
|
|
|
|
#define DO_QDMULH_H(n, m, s) do_sat_bhw(((int64_t)n * m) >> 15, \
|
|
|
|
INT16_MIN, INT16_MAX, s)
|
|
|
|
#define DO_QDMULH_W(n, m, s) do_sat_bhw(((int64_t)n * m) >> 31, \
|
|
|
|
INT32_MIN, INT32_MAX, s)
|
|
|
|
|
|
|
|
#define DO_QRDMULH_B(n, m, s) do_sat_bhw(((int64_t)n * m + (1 << 6)) >> 7, \
|
|
|
|
INT8_MIN, INT8_MAX, s)
|
|
|
|
#define DO_QRDMULH_H(n, m, s) do_sat_bhw(((int64_t)n * m + (1 << 14)) >> 15, \
|
|
|
|
INT16_MIN, INT16_MAX, s)
|
|
|
|
#define DO_QRDMULH_W(n, m, s) do_sat_bhw(((int64_t)n * m + (1 << 30)) >> 31, \
|
|
|
|
INT32_MIN, INT32_MAX, s)
|
|
|
|
|
2021-06-17 15:16:14 +03:00
|
|
|
DO_2OP_SAT(vqdmulhb, 1, int8_t, DO_QDMULH_B)
|
|
|
|
DO_2OP_SAT(vqdmulhh, 2, int16_t, DO_QDMULH_H)
|
|
|
|
DO_2OP_SAT(vqdmulhw, 4, int32_t, DO_QDMULH_W)
|
|
|
|
|
|
|
|
DO_2OP_SAT(vqrdmulhb, 1, int8_t, DO_QRDMULH_B)
|
|
|
|
DO_2OP_SAT(vqrdmulhh, 2, int16_t, DO_QRDMULH_H)
|
|
|
|
DO_2OP_SAT(vqrdmulhw, 4, int32_t, DO_QRDMULH_W)
|
|
|
|
|
2021-06-17 15:16:15 +03:00
|
|
|
DO_2OP_SAT(vqaddub, 1, uint8_t, DO_UQADD_B)
|
|
|
|
DO_2OP_SAT(vqadduh, 2, uint16_t, DO_UQADD_H)
|
|
|
|
DO_2OP_SAT(vqadduw, 4, uint32_t, DO_UQADD_W)
|
|
|
|
DO_2OP_SAT(vqaddsb, 1, int8_t, DO_SQADD_B)
|
|
|
|
DO_2OP_SAT(vqaddsh, 2, int16_t, DO_SQADD_H)
|
|
|
|
DO_2OP_SAT(vqaddsw, 4, int32_t, DO_SQADD_W)
|
|
|
|
|
|
|
|
DO_2OP_SAT(vqsubub, 1, uint8_t, DO_UQSUB_B)
|
|
|
|
DO_2OP_SAT(vqsubuh, 2, uint16_t, DO_UQSUB_H)
|
|
|
|
DO_2OP_SAT(vqsubuw, 4, uint32_t, DO_UQSUB_W)
|
|
|
|
DO_2OP_SAT(vqsubsb, 1, int8_t, DO_SQSUB_B)
|
|
|
|
DO_2OP_SAT(vqsubsh, 2, int16_t, DO_SQSUB_H)
|
|
|
|
DO_2OP_SAT(vqsubsw, 4, int32_t, DO_SQSUB_W)
|
|
|
|
|
2021-06-17 15:16:16 +03:00
|
|
|
/*
|
|
|
|
* This wrapper fixes up the impedance mismatch between do_sqrshl_bhs()
|
|
|
|
* and friends wanting a uint32_t* sat and our needing a bool*.
|
|
|
|
*/
|
|
|
|
#define WRAP_QRSHL_HELPER(FN, N, M, ROUND, satp) \
|
|
|
|
({ \
|
|
|
|
uint32_t su32 = 0; \
|
|
|
|
typeof(N) r = FN(N, (int8_t)(M), sizeof(N) * 8, ROUND, &su32); \
|
|
|
|
if (su32) { \
|
|
|
|
*satp = true; \
|
|
|
|
} \
|
|
|
|
r; \
|
|
|
|
})
|
|
|
|
|
|
|
|
#define DO_SQSHL_OP(N, M, satp) \
|
|
|
|
WRAP_QRSHL_HELPER(do_sqrshl_bhs, N, M, false, satp)
|
|
|
|
#define DO_UQSHL_OP(N, M, satp) \
|
|
|
|
WRAP_QRSHL_HELPER(do_uqrshl_bhs, N, M, false, satp)
|
2021-06-17 15:16:17 +03:00
|
|
|
#define DO_SQRSHL_OP(N, M, satp) \
|
|
|
|
WRAP_QRSHL_HELPER(do_sqrshl_bhs, N, M, true, satp)
|
|
|
|
#define DO_UQRSHL_OP(N, M, satp) \
|
|
|
|
WRAP_QRSHL_HELPER(do_uqrshl_bhs, N, M, true, satp)
|
2021-06-28 16:58:24 +03:00
|
|
|
#define DO_SUQSHL_OP(N, M, satp) \
|
|
|
|
WRAP_QRSHL_HELPER(do_suqrshl_bhs, N, M, false, satp)
|
2021-06-17 15:16:16 +03:00
|
|
|
|
|
|
|
DO_2OP_SAT_S(vqshls, DO_SQSHL_OP)
|
|
|
|
DO_2OP_SAT_U(vqshlu, DO_UQSHL_OP)
|
2021-06-17 15:16:17 +03:00
|
|
|
DO_2OP_SAT_S(vqrshls, DO_SQRSHL_OP)
|
|
|
|
DO_2OP_SAT_U(vqrshlu, DO_UQRSHL_OP)
|
2021-06-17 15:16:16 +03:00
|
|
|
|
2021-06-17 15:16:20 +03:00
|
|
|
/*
|
|
|
|
* Multiply add dual returning high half
|
|
|
|
* The 'FN' here takes four inputs A, B, C, D, a 0/1 indicator of
|
|
|
|
* whether to add the rounding constant, and the pointer to the
|
|
|
|
* saturation flag, and should do "(A * B + C * D) * 2 + rounding constant",
|
|
|
|
* saturate to twice the input size and return the high half; or
|
|
|
|
* (A * B - C * D) etc for VQDMLSDH.
|
|
|
|
*/
|
|
|
|
#define DO_VQDMLADH_OP(OP, ESIZE, TYPE, XCHG, ROUND, FN) \
|
|
|
|
void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \
|
|
|
|
void *vm) \
|
|
|
|
{ \
|
|
|
|
TYPE *d = vd, *n = vn, *m = vm; \
|
|
|
|
uint16_t mask = mve_element_mask(env); \
|
|
|
|
unsigned e; \
|
|
|
|
bool qc = false; \
|
|
|
|
for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
|
|
|
|
bool sat = false; \
|
|
|
|
if ((e & 1) == XCHG) { \
|
|
|
|
TYPE r = FN(n[H##ESIZE(e)], \
|
|
|
|
m[H##ESIZE(e - XCHG)], \
|
|
|
|
n[H##ESIZE(e + (1 - 2 * XCHG))], \
|
|
|
|
m[H##ESIZE(e + (1 - XCHG))], \
|
|
|
|
ROUND, &sat); \
|
|
|
|
mergemask(&d[H##ESIZE(e)], r, mask); \
|
|
|
|
qc |= sat & mask & 1; \
|
|
|
|
} \
|
|
|
|
} \
|
|
|
|
if (qc) { \
|
|
|
|
env->vfp.qc[0] = qc; \
|
|
|
|
} \
|
|
|
|
mve_advance_vpt(env); \
|
|
|
|
}
|
|
|
|
|
|
|
|
static int8_t do_vqdmladh_b(int8_t a, int8_t b, int8_t c, int8_t d,
|
|
|
|
int round, bool *sat)
|
|
|
|
{
|
|
|
|
int64_t r = ((int64_t)a * b + (int64_t)c * d) * 2 + (round << 7);
|
|
|
|
return do_sat_bhw(r, INT16_MIN, INT16_MAX, sat) >> 8;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int16_t do_vqdmladh_h(int16_t a, int16_t b, int16_t c, int16_t d,
|
|
|
|
int round, bool *sat)
|
|
|
|
{
|
|
|
|
int64_t r = ((int64_t)a * b + (int64_t)c * d) * 2 + (round << 15);
|
|
|
|
return do_sat_bhw(r, INT32_MIN, INT32_MAX, sat) >> 16;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int32_t do_vqdmladh_w(int32_t a, int32_t b, int32_t c, int32_t d,
|
|
|
|
int round, bool *sat)
|
|
|
|
{
|
|
|
|
int64_t m1 = (int64_t)a * b;
|
|
|
|
int64_t m2 = (int64_t)c * d;
|
|
|
|
int64_t r;
|
|
|
|
/*
|
|
|
|
* Architecturally we should do the entire add, double, round
|
|
|
|
* and then check for saturation. We do three saturating adds,
|
|
|
|
* but we need to be careful about the order. If the first
|
|
|
|
* m1 + m2 saturates then it's impossible for the *2+rc to
|
|
|
|
* bring it back into the non-saturated range. However, if
|
|
|
|
* m1 + m2 is negative then it's possible that doing the doubling
|
|
|
|
* would take the intermediate result below INT64_MAX and the
|
|
|
|
* addition of the rounding constant then brings it back in range.
|
|
|
|
* So we add half the rounding constant before doubling rather
|
|
|
|
* than adding the rounding constant after the doubling.
|
|
|
|
*/
|
|
|
|
if (sadd64_overflow(m1, m2, &r) ||
|
|
|
|
sadd64_overflow(r, (round << 30), &r) ||
|
|
|
|
sadd64_overflow(r, r, &r)) {
|
|
|
|
*sat = true;
|
|
|
|
return r < 0 ? INT32_MAX : INT32_MIN;
|
|
|
|
}
|
|
|
|
return r >> 32;
|
|
|
|
}
|
|
|
|
|
2021-06-17 15:16:21 +03:00
|
|
|
static int8_t do_vqdmlsdh_b(int8_t a, int8_t b, int8_t c, int8_t d,
|
|
|
|
int round, bool *sat)
|
|
|
|
{
|
|
|
|
int64_t r = ((int64_t)a * b - (int64_t)c * d) * 2 + (round << 7);
|
|
|
|
return do_sat_bhw(r, INT16_MIN, INT16_MAX, sat) >> 8;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int16_t do_vqdmlsdh_h(int16_t a, int16_t b, int16_t c, int16_t d,
|
|
|
|
int round, bool *sat)
|
|
|
|
{
|
|
|
|
int64_t r = ((int64_t)a * b - (int64_t)c * d) * 2 + (round << 15);
|
|
|
|
return do_sat_bhw(r, INT32_MIN, INT32_MAX, sat) >> 16;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int32_t do_vqdmlsdh_w(int32_t a, int32_t b, int32_t c, int32_t d,
|
|
|
|
int round, bool *sat)
|
|
|
|
{
|
|
|
|
int64_t m1 = (int64_t)a * b;
|
|
|
|
int64_t m2 = (int64_t)c * d;
|
|
|
|
int64_t r;
|
|
|
|
/* The same ordering issue as in do_vqdmladh_w applies here too */
|
|
|
|
if (ssub64_overflow(m1, m2, &r) ||
|
|
|
|
sadd64_overflow(r, (round << 30), &r) ||
|
|
|
|
sadd64_overflow(r, r, &r)) {
|
|
|
|
*sat = true;
|
|
|
|
return r < 0 ? INT32_MAX : INT32_MIN;
|
|
|
|
}
|
|
|
|
return r >> 32;
|
|
|
|
}
|
|
|
|
|
2021-06-17 15:16:20 +03:00
|
|
|
DO_VQDMLADH_OP(vqdmladhb, 1, int8_t, 0, 0, do_vqdmladh_b)
|
|
|
|
DO_VQDMLADH_OP(vqdmladhh, 2, int16_t, 0, 0, do_vqdmladh_h)
|
|
|
|
DO_VQDMLADH_OP(vqdmladhw, 4, int32_t, 0, 0, do_vqdmladh_w)
|
|
|
|
DO_VQDMLADH_OP(vqdmladhxb, 1, int8_t, 1, 0, do_vqdmladh_b)
|
|
|
|
DO_VQDMLADH_OP(vqdmladhxh, 2, int16_t, 1, 0, do_vqdmladh_h)
|
|
|
|
DO_VQDMLADH_OP(vqdmladhxw, 4, int32_t, 1, 0, do_vqdmladh_w)
|
|
|
|
|
|
|
|
DO_VQDMLADH_OP(vqrdmladhb, 1, int8_t, 0, 1, do_vqdmladh_b)
|
|
|
|
DO_VQDMLADH_OP(vqrdmladhh, 2, int16_t, 0, 1, do_vqdmladh_h)
|
|
|
|
DO_VQDMLADH_OP(vqrdmladhw, 4, int32_t, 0, 1, do_vqdmladh_w)
|
|
|
|
DO_VQDMLADH_OP(vqrdmladhxb, 1, int8_t, 1, 1, do_vqdmladh_b)
|
|
|
|
DO_VQDMLADH_OP(vqrdmladhxh, 2, int16_t, 1, 1, do_vqdmladh_h)
|
|
|
|
DO_VQDMLADH_OP(vqrdmladhxw, 4, int32_t, 1, 1, do_vqdmladh_w)
|
|
|
|
|
2021-06-17 15:16:21 +03:00
|
|
|
DO_VQDMLADH_OP(vqdmlsdhb, 1, int8_t, 0, 0, do_vqdmlsdh_b)
|
|
|
|
DO_VQDMLADH_OP(vqdmlsdhh, 2, int16_t, 0, 0, do_vqdmlsdh_h)
|
|
|
|
DO_VQDMLADH_OP(vqdmlsdhw, 4, int32_t, 0, 0, do_vqdmlsdh_w)
|
|
|
|
DO_VQDMLADH_OP(vqdmlsdhxb, 1, int8_t, 1, 0, do_vqdmlsdh_b)
|
|
|
|
DO_VQDMLADH_OP(vqdmlsdhxh, 2, int16_t, 1, 0, do_vqdmlsdh_h)
|
|
|
|
DO_VQDMLADH_OP(vqdmlsdhxw, 4, int32_t, 1, 0, do_vqdmlsdh_w)
|
|
|
|
|
|
|
|
DO_VQDMLADH_OP(vqrdmlsdhb, 1, int8_t, 0, 1, do_vqdmlsdh_b)
|
|
|
|
DO_VQDMLADH_OP(vqrdmlsdhh, 2, int16_t, 0, 1, do_vqdmlsdh_h)
|
|
|
|
DO_VQDMLADH_OP(vqrdmlsdhw, 4, int32_t, 0, 1, do_vqdmlsdh_w)
|
|
|
|
DO_VQDMLADH_OP(vqrdmlsdhxb, 1, int8_t, 1, 1, do_vqdmlsdh_b)
|
|
|
|
DO_VQDMLADH_OP(vqrdmlsdhxh, 2, int16_t, 1, 1, do_vqdmlsdh_h)
|
|
|
|
DO_VQDMLADH_OP(vqrdmlsdhxw, 4, int32_t, 1, 1, do_vqdmlsdh_w)
|
|
|
|
|
2021-06-17 15:16:06 +03:00
|
|
|
#define DO_2OP_SCALAR(OP, ESIZE, TYPE, FN) \
|
|
|
|
void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \
|
|
|
|
uint32_t rm) \
|
|
|
|
{ \
|
|
|
|
TYPE *d = vd, *n = vn; \
|
|
|
|
TYPE m = rm; \
|
|
|
|
uint16_t mask = mve_element_mask(env); \
|
|
|
|
unsigned e; \
|
|
|
|
for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
|
|
|
|
mergemask(&d[H##ESIZE(e)], FN(n[H##ESIZE(e)], m), mask); \
|
|
|
|
} \
|
|
|
|
mve_advance_vpt(env); \
|
|
|
|
}
|
|
|
|
|
2021-06-17 15:16:11 +03:00
|
|
|
#define DO_2OP_SAT_SCALAR(OP, ESIZE, TYPE, FN) \
|
|
|
|
void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \
|
|
|
|
uint32_t rm) \
|
|
|
|
{ \
|
|
|
|
TYPE *d = vd, *n = vn; \
|
|
|
|
TYPE m = rm; \
|
|
|
|
uint16_t mask = mve_element_mask(env); \
|
|
|
|
unsigned e; \
|
|
|
|
bool qc = false; \
|
|
|
|
for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
|
|
|
|
bool sat = false; \
|
|
|
|
mergemask(&d[H##ESIZE(e)], FN(n[H##ESIZE(e)], m, &sat), \
|
|
|
|
mask); \
|
|
|
|
qc |= sat & mask & 1; \
|
|
|
|
} \
|
|
|
|
if (qc) { \
|
|
|
|
env->vfp.qc[0] = qc; \
|
|
|
|
} \
|
|
|
|
mve_advance_vpt(env); \
|
|
|
|
}
|
|
|
|
|
2021-08-13 19:11:52 +03:00
|
|
|
/* "accumulating" version where FN takes d as well as n and m */
|
|
|
|
#define DO_2OP_ACC_SCALAR(OP, ESIZE, TYPE, FN) \
|
|
|
|
void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \
|
|
|
|
uint32_t rm) \
|
|
|
|
{ \
|
|
|
|
TYPE *d = vd, *n = vn; \
|
|
|
|
TYPE m = rm; \
|
|
|
|
uint16_t mask = mve_element_mask(env); \
|
|
|
|
unsigned e; \
|
|
|
|
for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
|
|
|
|
mergemask(&d[H##ESIZE(e)], \
|
|
|
|
FN(d[H##ESIZE(e)], n[H##ESIZE(e)], m), mask); \
|
|
|
|
} \
|
|
|
|
mve_advance_vpt(env); \
|
|
|
|
}
|
|
|
|
|
2021-08-13 19:11:54 +03:00
|
|
|
#define DO_2OP_SAT_ACC_SCALAR(OP, ESIZE, TYPE, FN) \
|
|
|
|
void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \
|
|
|
|
uint32_t rm) \
|
|
|
|
{ \
|
|
|
|
TYPE *d = vd, *n = vn; \
|
|
|
|
TYPE m = rm; \
|
|
|
|
uint16_t mask = mve_element_mask(env); \
|
|
|
|
unsigned e; \
|
|
|
|
bool qc = false; \
|
|
|
|
for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
|
|
|
|
bool sat = false; \
|
|
|
|
mergemask(&d[H##ESIZE(e)], \
|
|
|
|
FN(d[H##ESIZE(e)], n[H##ESIZE(e)], m, &sat), \
|
|
|
|
mask); \
|
|
|
|
qc |= sat & mask & 1; \
|
|
|
|
} \
|
|
|
|
if (qc) { \
|
|
|
|
env->vfp.qc[0] = qc; \
|
|
|
|
} \
|
|
|
|
mve_advance_vpt(env); \
|
|
|
|
}
|
|
|
|
|
2021-06-17 15:16:06 +03:00
|
|
|
/* provide unsigned 2-op scalar helpers for all sizes */
|
|
|
|
#define DO_2OP_SCALAR_U(OP, FN) \
|
|
|
|
DO_2OP_SCALAR(OP##b, 1, uint8_t, FN) \
|
|
|
|
DO_2OP_SCALAR(OP##h, 2, uint16_t, FN) \
|
|
|
|
DO_2OP_SCALAR(OP##w, 4, uint32_t, FN)
|
2021-06-17 15:16:08 +03:00
|
|
|
#define DO_2OP_SCALAR_S(OP, FN) \
|
|
|
|
DO_2OP_SCALAR(OP##b, 1, int8_t, FN) \
|
|
|
|
DO_2OP_SCALAR(OP##h, 2, int16_t, FN) \
|
|
|
|
DO_2OP_SCALAR(OP##w, 4, int32_t, FN)
|
2021-06-17 15:16:06 +03:00
|
|
|
|
2021-08-13 19:11:52 +03:00
|
|
|
#define DO_2OP_ACC_SCALAR_U(OP, FN) \
|
|
|
|
DO_2OP_ACC_SCALAR(OP##b, 1, uint8_t, FN) \
|
|
|
|
DO_2OP_ACC_SCALAR(OP##h, 2, uint16_t, FN) \
|
|
|
|
DO_2OP_ACC_SCALAR(OP##w, 4, uint32_t, FN)
|
|
|
|
|
2021-06-17 15:16:06 +03:00
|
|
|
DO_2OP_SCALAR_U(vadd_scalar, DO_ADD)
|
2021-06-17 15:16:07 +03:00
|
|
|
DO_2OP_SCALAR_U(vsub_scalar, DO_SUB)
|
|
|
|
DO_2OP_SCALAR_U(vmul_scalar, DO_MUL)
|
2021-06-17 15:16:08 +03:00
|
|
|
DO_2OP_SCALAR_S(vhadds_scalar, do_vhadd_s)
|
|
|
|
DO_2OP_SCALAR_U(vhaddu_scalar, do_vhadd_u)
|
|
|
|
DO_2OP_SCALAR_S(vhsubs_scalar, do_vhsub_s)
|
|
|
|
DO_2OP_SCALAR_U(vhsubu_scalar, do_vhsub_u)
|
2021-06-17 15:16:06 +03:00
|
|
|
|
2021-06-17 15:16:11 +03:00
|
|
|
DO_2OP_SAT_SCALAR(vqaddu_scalarb, 1, uint8_t, DO_UQADD_B)
|
|
|
|
DO_2OP_SAT_SCALAR(vqaddu_scalarh, 2, uint16_t, DO_UQADD_H)
|
|
|
|
DO_2OP_SAT_SCALAR(vqaddu_scalarw, 4, uint32_t, DO_UQADD_W)
|
|
|
|
DO_2OP_SAT_SCALAR(vqadds_scalarb, 1, int8_t, DO_SQADD_B)
|
|
|
|
DO_2OP_SAT_SCALAR(vqadds_scalarh, 2, int16_t, DO_SQADD_H)
|
|
|
|
DO_2OP_SAT_SCALAR(vqadds_scalarw, 4, int32_t, DO_SQADD_W)
|
|
|
|
|
|
|
|
DO_2OP_SAT_SCALAR(vqsubu_scalarb, 1, uint8_t, DO_UQSUB_B)
|
|
|
|
DO_2OP_SAT_SCALAR(vqsubu_scalarh, 2, uint16_t, DO_UQSUB_H)
|
|
|
|
DO_2OP_SAT_SCALAR(vqsubu_scalarw, 4, uint32_t, DO_UQSUB_W)
|
|
|
|
DO_2OP_SAT_SCALAR(vqsubs_scalarb, 1, int8_t, DO_SQSUB_B)
|
|
|
|
DO_2OP_SAT_SCALAR(vqsubs_scalarh, 2, int16_t, DO_SQSUB_H)
|
|
|
|
DO_2OP_SAT_SCALAR(vqsubs_scalarw, 4, int32_t, DO_SQSUB_W)
|
|
|
|
|
2021-06-17 15:16:12 +03:00
|
|
|
DO_2OP_SAT_SCALAR(vqdmulh_scalarb, 1, int8_t, DO_QDMULH_B)
|
|
|
|
DO_2OP_SAT_SCALAR(vqdmulh_scalarh, 2, int16_t, DO_QDMULH_H)
|
|
|
|
DO_2OP_SAT_SCALAR(vqdmulh_scalarw, 4, int32_t, DO_QDMULH_W)
|
|
|
|
DO_2OP_SAT_SCALAR(vqrdmulh_scalarb, 1, int8_t, DO_QRDMULH_B)
|
|
|
|
DO_2OP_SAT_SCALAR(vqrdmulh_scalarh, 2, int16_t, DO_QRDMULH_H)
|
|
|
|
DO_2OP_SAT_SCALAR(vqrdmulh_scalarw, 4, int32_t, DO_QRDMULH_W)
|
|
|
|
|
2021-08-13 19:11:54 +03:00
|
|
|
static int8_t do_vqdmlah_b(int8_t a, int8_t b, int8_t c, int round, bool *sat)
|
|
|
|
{
|
|
|
|
int64_t r = (int64_t)a * b * 2 + ((int64_t)c << 8) + (round << 7);
|
|
|
|
return do_sat_bhw(r, INT16_MIN, INT16_MAX, sat) >> 8;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int16_t do_vqdmlah_h(int16_t a, int16_t b, int16_t c,
|
|
|
|
int round, bool *sat)
|
|
|
|
{
|
|
|
|
int64_t r = (int64_t)a * b * 2 + ((int64_t)c << 16) + (round << 15);
|
|
|
|
return do_sat_bhw(r, INT32_MIN, INT32_MAX, sat) >> 16;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int32_t do_vqdmlah_w(int32_t a, int32_t b, int32_t c,
|
|
|
|
int round, bool *sat)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Architecturally we should do the entire add, double, round
|
|
|
|
* and then check for saturation. We do three saturating adds,
|
|
|
|
* but we need to be careful about the order. If the first
|
|
|
|
* m1 + m2 saturates then it's impossible for the *2+rc to
|
|
|
|
* bring it back into the non-saturated range. However, if
|
|
|
|
* m1 + m2 is negative then it's possible that doing the doubling
|
|
|
|
* would take the intermediate result below INT64_MAX and the
|
|
|
|
* addition of the rounding constant then brings it back in range.
|
|
|
|
* So we add half the rounding constant and half the "c << esize"
|
|
|
|
* before doubling rather than adding the rounding constant after
|
|
|
|
* the doubling.
|
|
|
|
*/
|
|
|
|
int64_t m1 = (int64_t)a * b;
|
|
|
|
int64_t m2 = (int64_t)c << 31;
|
|
|
|
int64_t r;
|
|
|
|
if (sadd64_overflow(m1, m2, &r) ||
|
|
|
|
sadd64_overflow(r, (round << 30), &r) ||
|
|
|
|
sadd64_overflow(r, r, &r)) {
|
|
|
|
*sat = true;
|
|
|
|
return r < 0 ? INT32_MAX : INT32_MIN;
|
|
|
|
}
|
|
|
|
return r >> 32;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The *MLAH insns are vector * scalar + vector;
|
|
|
|
* the *MLASH insns are vector * vector + scalar
|
|
|
|
*/
|
|
|
|
#define DO_VQDMLAH_B(D, N, M, S) do_vqdmlah_b(N, M, D, 0, S)
|
|
|
|
#define DO_VQDMLAH_H(D, N, M, S) do_vqdmlah_h(N, M, D, 0, S)
|
|
|
|
#define DO_VQDMLAH_W(D, N, M, S) do_vqdmlah_w(N, M, D, 0, S)
|
|
|
|
#define DO_VQRDMLAH_B(D, N, M, S) do_vqdmlah_b(N, M, D, 1, S)
|
|
|
|
#define DO_VQRDMLAH_H(D, N, M, S) do_vqdmlah_h(N, M, D, 1, S)
|
|
|
|
#define DO_VQRDMLAH_W(D, N, M, S) do_vqdmlah_w(N, M, D, 1, S)
|
|
|
|
|
|
|
|
#define DO_VQDMLASH_B(D, N, M, S) do_vqdmlah_b(N, D, M, 0, S)
|
|
|
|
#define DO_VQDMLASH_H(D, N, M, S) do_vqdmlah_h(N, D, M, 0, S)
|
|
|
|
#define DO_VQDMLASH_W(D, N, M, S) do_vqdmlah_w(N, D, M, 0, S)
|
|
|
|
#define DO_VQRDMLASH_B(D, N, M, S) do_vqdmlah_b(N, D, M, 1, S)
|
|
|
|
#define DO_VQRDMLASH_H(D, N, M, S) do_vqdmlah_h(N, D, M, 1, S)
|
|
|
|
#define DO_VQRDMLASH_W(D, N, M, S) do_vqdmlah_w(N, D, M, 1, S)
|
|
|
|
|
|
|
|
DO_2OP_SAT_ACC_SCALAR(vqdmlahb, 1, int8_t, DO_VQDMLAH_B)
|
|
|
|
DO_2OP_SAT_ACC_SCALAR(vqdmlahh, 2, int16_t, DO_VQDMLAH_H)
|
|
|
|
DO_2OP_SAT_ACC_SCALAR(vqdmlahw, 4, int32_t, DO_VQDMLAH_W)
|
|
|
|
DO_2OP_SAT_ACC_SCALAR(vqrdmlahb, 1, int8_t, DO_VQRDMLAH_B)
|
|
|
|
DO_2OP_SAT_ACC_SCALAR(vqrdmlahh, 2, int16_t, DO_VQRDMLAH_H)
|
|
|
|
DO_2OP_SAT_ACC_SCALAR(vqrdmlahw, 4, int32_t, DO_VQRDMLAH_W)
|
|
|
|
|
|
|
|
DO_2OP_SAT_ACC_SCALAR(vqdmlashb, 1, int8_t, DO_VQDMLASH_B)
|
|
|
|
DO_2OP_SAT_ACC_SCALAR(vqdmlashh, 2, int16_t, DO_VQDMLASH_H)
|
|
|
|
DO_2OP_SAT_ACC_SCALAR(vqdmlashw, 4, int32_t, DO_VQDMLASH_W)
|
|
|
|
DO_2OP_SAT_ACC_SCALAR(vqrdmlashb, 1, int8_t, DO_VQRDMLASH_B)
|
|
|
|
DO_2OP_SAT_ACC_SCALAR(vqrdmlashh, 2, int16_t, DO_VQRDMLASH_H)
|
|
|
|
DO_2OP_SAT_ACC_SCALAR(vqrdmlashw, 4, int32_t, DO_VQRDMLASH_W)
|
|
|
|
|
2021-08-13 19:11:54 +03:00
|
|
|
/* Vector by scalar plus vector */
|
|
|
|
#define DO_VMLA(D, N, M) ((N) * (M) + (D))
|
|
|
|
|
|
|
|
DO_2OP_ACC_SCALAR_U(vmla, DO_VMLA)
|
|
|
|
|
2021-08-13 19:11:52 +03:00
|
|
|
/* Vector by vector plus scalar */
|
|
|
|
#define DO_VMLAS(D, N, M) ((N) * (D) + (M))
|
|
|
|
|
|
|
|
DO_2OP_ACC_SCALAR_U(vmlas, DO_VMLAS)
|
|
|
|
|
2021-06-17 15:16:13 +03:00
|
|
|
/*
|
|
|
|
* Long saturating scalar ops. As with DO_2OP_L, TYPE and H are for the
|
|
|
|
* input (smaller) type and LESIZE, LTYPE, LH for the output (long) type.
|
|
|
|
* SATMASK specifies which bits of the predicate mask matter for determining
|
|
|
|
* whether to propagate a saturation indication into FPSCR.QC -- for
|
|
|
|
* the 16x16->32 case we must check only the bit corresponding to the T or B
|
|
|
|
* half that we used, but for the 32x32->64 case we propagate if the mask
|
|
|
|
* bit is set for either half.
|
|
|
|
*/
|
|
|
|
#define DO_2OP_SAT_SCALAR_L(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN, SATMASK) \
|
|
|
|
void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \
|
|
|
|
uint32_t rm) \
|
|
|
|
{ \
|
|
|
|
LTYPE *d = vd; \
|
|
|
|
TYPE *n = vn; \
|
|
|
|
TYPE m = rm; \
|
|
|
|
uint16_t mask = mve_element_mask(env); \
|
|
|
|
unsigned le; \
|
|
|
|
bool qc = false; \
|
|
|
|
for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \
|
|
|
|
bool sat = false; \
|
|
|
|
LTYPE r = FN((LTYPE)n[H##ESIZE(le * 2 + TOP)], m, &sat); \
|
|
|
|
mergemask(&d[H##LESIZE(le)], r, mask); \
|
|
|
|
qc |= sat && (mask & SATMASK); \
|
|
|
|
} \
|
|
|
|
if (qc) { \
|
|
|
|
env->vfp.qc[0] = qc; \
|
|
|
|
} \
|
|
|
|
mve_advance_vpt(env); \
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int32_t do_qdmullh(int16_t n, int16_t m, bool *sat)
|
|
|
|
{
|
|
|
|
int64_t r = ((int64_t)n * m) * 2;
|
|
|
|
return do_sat_bhw(r, INT32_MIN, INT32_MAX, sat);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int64_t do_qdmullw(int32_t n, int32_t m, bool *sat)
|
|
|
|
{
|
|
|
|
/* The multiply can't overflow, but the doubling might */
|
|
|
|
int64_t r = (int64_t)n * m;
|
|
|
|
if (r > INT64_MAX / 2) {
|
|
|
|
*sat = true;
|
|
|
|
return INT64_MAX;
|
|
|
|
} else if (r < INT64_MIN / 2) {
|
|
|
|
*sat = true;
|
|
|
|
return INT64_MIN;
|
|
|
|
} else {
|
|
|
|
return r * 2;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#define SATMASK16B 1
|
|
|
|
#define SATMASK16T (1 << 2)
|
|
|
|
#define SATMASK32 ((1 << 4) | 1)
|
|
|
|
|
|
|
|
DO_2OP_SAT_SCALAR_L(vqdmullb_scalarh, 0, 2, int16_t, 4, int32_t, \
|
|
|
|
do_qdmullh, SATMASK16B)
|
|
|
|
DO_2OP_SAT_SCALAR_L(vqdmullb_scalarw, 0, 4, int32_t, 8, int64_t, \
|
|
|
|
do_qdmullw, SATMASK32)
|
|
|
|
DO_2OP_SAT_SCALAR_L(vqdmullt_scalarh, 1, 2, int16_t, 4, int32_t, \
|
|
|
|
do_qdmullh, SATMASK16T)
|
|
|
|
DO_2OP_SAT_SCALAR_L(vqdmullt_scalarw, 1, 4, int32_t, 8, int64_t, \
|
|
|
|
do_qdmullw, SATMASK32)
|
|
|
|
|
2021-06-17 15:16:22 +03:00
|
|
|
/*
|
|
|
|
* Long saturating ops
|
|
|
|
*/
|
|
|
|
#define DO_2OP_SAT_L(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN, SATMASK) \
|
|
|
|
void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \
|
|
|
|
void *vm) \
|
|
|
|
{ \
|
|
|
|
LTYPE *d = vd; \
|
|
|
|
TYPE *n = vn, *m = vm; \
|
|
|
|
uint16_t mask = mve_element_mask(env); \
|
|
|
|
unsigned le; \
|
|
|
|
bool qc = false; \
|
|
|
|
for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \
|
|
|
|
bool sat = false; \
|
|
|
|
LTYPE op1 = n[H##ESIZE(le * 2 + TOP)]; \
|
|
|
|
LTYPE op2 = m[H##ESIZE(le * 2 + TOP)]; \
|
|
|
|
mergemask(&d[H##LESIZE(le)], FN(op1, op2, &sat), mask); \
|
|
|
|
qc |= sat && (mask & SATMASK); \
|
|
|
|
} \
|
|
|
|
if (qc) { \
|
|
|
|
env->vfp.qc[0] = qc; \
|
|
|
|
} \
|
|
|
|
mve_advance_vpt(env); \
|
|
|
|
}
|
|
|
|
|
|
|
|
DO_2OP_SAT_L(vqdmullbh, 0, 2, int16_t, 4, int32_t, do_qdmullh, SATMASK16B)
|
|
|
|
DO_2OP_SAT_L(vqdmullbw, 0, 4, int32_t, 8, int64_t, do_qdmullw, SATMASK32)
|
|
|
|
DO_2OP_SAT_L(vqdmullth, 1, 2, int16_t, 4, int32_t, do_qdmullh, SATMASK16T)
|
|
|
|
DO_2OP_SAT_L(vqdmulltw, 1, 4, int32_t, 8, int64_t, do_qdmullw, SATMASK32)
|
|
|
|
|
2021-06-17 15:16:09 +03:00
|
|
|
static inline uint32_t do_vbrsrb(uint32_t n, uint32_t m)
|
|
|
|
{
|
|
|
|
m &= 0xff;
|
|
|
|
if (m == 0) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
n = revbit8(n);
|
|
|
|
if (m < 8) {
|
|
|
|
n >>= 8 - m;
|
|
|
|
}
|
|
|
|
return n;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint32_t do_vbrsrh(uint32_t n, uint32_t m)
|
|
|
|
{
|
|
|
|
m &= 0xff;
|
|
|
|
if (m == 0) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
n = revbit16(n);
|
|
|
|
if (m < 16) {
|
|
|
|
n >>= 16 - m;
|
|
|
|
}
|
|
|
|
return n;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint32_t do_vbrsrw(uint32_t n, uint32_t m)
|
|
|
|
{
|
|
|
|
m &= 0xff;
|
|
|
|
if (m == 0) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
n = revbit32(n);
|
|
|
|
if (m < 32) {
|
|
|
|
n >>= 32 - m;
|
|
|
|
}
|
|
|
|
return n;
|
|
|
|
}
|
|
|
|
|
|
|
|
DO_2OP_SCALAR(vbrsrb, 1, uint8_t, do_vbrsrb)
|
|
|
|
DO_2OP_SCALAR(vbrsrh, 2, uint16_t, do_vbrsrh)
|
|
|
|
DO_2OP_SCALAR(vbrsrw, 4, uint32_t, do_vbrsrw)
|
|
|
|
|
2021-06-17 15:16:03 +03:00
|
|
|
/*
|
|
|
|
* Multiply add long dual accumulate ops.
|
|
|
|
*/
|
|
|
|
#define DO_LDAV(OP, ESIZE, TYPE, XCHG, EVENACC, ODDACC) \
|
|
|
|
uint64_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \
|
|
|
|
void *vm, uint64_t a) \
|
|
|
|
{ \
|
|
|
|
uint16_t mask = mve_element_mask(env); \
|
|
|
|
unsigned e; \
|
|
|
|
TYPE *n = vn, *m = vm; \
|
|
|
|
for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
|
|
|
|
if (mask & 1) { \
|
|
|
|
if (e & 1) { \
|
|
|
|
a ODDACC \
|
|
|
|
(int64_t)n[H##ESIZE(e - 1 * XCHG)] * m[H##ESIZE(e)]; \
|
|
|
|
} else { \
|
|
|
|
a EVENACC \
|
|
|
|
(int64_t)n[H##ESIZE(e + 1 * XCHG)] * m[H##ESIZE(e)]; \
|
|
|
|
} \
|
|
|
|
} \
|
|
|
|
} \
|
|
|
|
mve_advance_vpt(env); \
|
|
|
|
return a; \
|
|
|
|
}
|
|
|
|
|
|
|
|
DO_LDAV(vmlaldavsh, 2, int16_t, false, +=, +=)
|
|
|
|
DO_LDAV(vmlaldavxsh, 2, int16_t, true, +=, +=)
|
|
|
|
DO_LDAV(vmlaldavsw, 4, int32_t, false, +=, +=)
|
|
|
|
DO_LDAV(vmlaldavxsw, 4, int32_t, true, +=, +=)
|
|
|
|
|
|
|
|
DO_LDAV(vmlaldavuh, 2, uint16_t, false, +=, +=)
|
|
|
|
DO_LDAV(vmlaldavuw, 4, uint32_t, false, +=, +=)
|
2021-06-17 15:16:04 +03:00
|
|
|
|
|
|
|
DO_LDAV(vmlsldavsh, 2, int16_t, false, +=, -=)
|
|
|
|
DO_LDAV(vmlsldavxsh, 2, int16_t, true, +=, -=)
|
|
|
|
DO_LDAV(vmlsldavsw, 4, int32_t, false, +=, -=)
|
|
|
|
DO_LDAV(vmlsldavxsw, 4, int32_t, true, +=, -=)
|
2021-06-17 15:16:05 +03:00
|
|
|
|
2021-08-13 19:11:54 +03:00
|
|
|
/*
|
|
|
|
* Multiply add dual accumulate ops
|
|
|
|
*/
|
|
|
|
#define DO_DAV(OP, ESIZE, TYPE, XCHG, EVENACC, ODDACC) \
|
|
|
|
uint32_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \
|
|
|
|
void *vm, uint32_t a) \
|
|
|
|
{ \
|
|
|
|
uint16_t mask = mve_element_mask(env); \
|
|
|
|
unsigned e; \
|
|
|
|
TYPE *n = vn, *m = vm; \
|
|
|
|
for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
|
|
|
|
if (mask & 1) { \
|
|
|
|
if (e & 1) { \
|
|
|
|
a ODDACC \
|
|
|
|
n[H##ESIZE(e - 1 * XCHG)] * m[H##ESIZE(e)]; \
|
|
|
|
} else { \
|
|
|
|
a EVENACC \
|
|
|
|
n[H##ESIZE(e + 1 * XCHG)] * m[H##ESIZE(e)]; \
|
|
|
|
} \
|
|
|
|
} \
|
|
|
|
} \
|
|
|
|
mve_advance_vpt(env); \
|
|
|
|
return a; \
|
|
|
|
}
|
|
|
|
|
|
|
|
#define DO_DAV_S(INSN, XCHG, EVENACC, ODDACC) \
|
|
|
|
DO_DAV(INSN##b, 1, int8_t, XCHG, EVENACC, ODDACC) \
|
|
|
|
DO_DAV(INSN##h, 2, int16_t, XCHG, EVENACC, ODDACC) \
|
|
|
|
DO_DAV(INSN##w, 4, int32_t, XCHG, EVENACC, ODDACC)
|
|
|
|
|
|
|
|
#define DO_DAV_U(INSN, XCHG, EVENACC, ODDACC) \
|
|
|
|
DO_DAV(INSN##b, 1, uint8_t, XCHG, EVENACC, ODDACC) \
|
|
|
|
DO_DAV(INSN##h, 2, uint16_t, XCHG, EVENACC, ODDACC) \
|
|
|
|
DO_DAV(INSN##w, 4, uint32_t, XCHG, EVENACC, ODDACC)
|
|
|
|
|
|
|
|
DO_DAV_S(vmladavs, false, +=, +=)
|
|
|
|
DO_DAV_U(vmladavu, false, +=, +=)
|
|
|
|
DO_DAV_S(vmlsdav, false, +=, -=)
|
|
|
|
DO_DAV_S(vmladavsx, true, +=, +=)
|
|
|
|
DO_DAV_S(vmlsdavx, true, +=, -=)
|
|
|
|
|
2021-06-17 15:16:05 +03:00
|
|
|
/*
|
2021-06-28 16:58:19 +03:00
|
|
|
* Rounding multiply add long dual accumulate high. In the pseudocode
|
|
|
|
* this is implemented with a 72-bit internal accumulator value of which
|
|
|
|
* the top 64 bits are returned. We optimize this to avoid having to
|
|
|
|
* use 128-bit arithmetic -- we can do this because the 74-bit accumulator
|
|
|
|
* is squashed back into 64-bits after each beat.
|
2021-06-17 15:16:05 +03:00
|
|
|
*/
|
2021-06-28 16:58:19 +03:00
|
|
|
#define DO_LDAVH(OP, TYPE, LTYPE, XCHG, SUB) \
|
2021-06-17 15:16:05 +03:00
|
|
|
uint64_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \
|
|
|
|
void *vm, uint64_t a) \
|
|
|
|
{ \
|
|
|
|
uint16_t mask = mve_element_mask(env); \
|
|
|
|
unsigned e; \
|
|
|
|
TYPE *n = vn, *m = vm; \
|
2021-06-28 16:58:19 +03:00
|
|
|
for (e = 0; e < 16 / 4; e++, mask >>= 4) { \
|
2021-06-17 15:16:05 +03:00
|
|
|
if (mask & 1) { \
|
2021-06-28 16:58:19 +03:00
|
|
|
LTYPE mul; \
|
2021-06-17 15:16:05 +03:00
|
|
|
if (e & 1) { \
|
2021-06-28 16:58:19 +03:00
|
|
|
mul = (LTYPE)n[H4(e - 1 * XCHG)] * m[H4(e)]; \
|
|
|
|
if (SUB) { \
|
|
|
|
mul = -mul; \
|
|
|
|
} \
|
2021-06-17 15:16:05 +03:00
|
|
|
} else { \
|
2021-06-28 16:58:19 +03:00
|
|
|
mul = (LTYPE)n[H4(e + 1 * XCHG)] * m[H4(e)]; \
|
2021-06-17 15:16:05 +03:00
|
|
|
} \
|
2021-06-28 16:58:19 +03:00
|
|
|
mul = (mul >> 8) + ((mul >> 7) & 1); \
|
|
|
|
a += mul; \
|
2021-06-17 15:16:05 +03:00
|
|
|
} \
|
|
|
|
} \
|
|
|
|
mve_advance_vpt(env); \
|
2021-06-28 16:58:19 +03:00
|
|
|
return a; \
|
2021-06-17 15:16:05 +03:00
|
|
|
}
|
|
|
|
|
2021-06-28 16:58:19 +03:00
|
|
|
DO_LDAVH(vrmlaldavhsw, int32_t, int64_t, false, false)
|
|
|
|
DO_LDAVH(vrmlaldavhxsw, int32_t, int64_t, true, false)
|
2021-06-17 15:16:05 +03:00
|
|
|
|
2021-06-28 16:58:19 +03:00
|
|
|
DO_LDAVH(vrmlaldavhuw, uint32_t, uint64_t, false, false)
|
2021-06-17 15:16:05 +03:00
|
|
|
|
2021-06-28 16:58:19 +03:00
|
|
|
DO_LDAVH(vrmlsldavhsw, int32_t, int64_t, false, true)
|
|
|
|
DO_LDAVH(vrmlsldavhxsw, int32_t, int64_t, true, true)
|
2021-06-17 15:16:27 +03:00
|
|
|
|
|
|
|
/* Vector add across vector */
|
|
|
|
#define DO_VADDV(OP, ESIZE, TYPE) \
|
|
|
|
uint32_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vm, \
|
|
|
|
uint32_t ra) \
|
|
|
|
{ \
|
|
|
|
uint16_t mask = mve_element_mask(env); \
|
|
|
|
unsigned e; \
|
|
|
|
TYPE *m = vm; \
|
|
|
|
for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
|
|
|
|
if (mask & 1) { \
|
|
|
|
ra += m[H##ESIZE(e)]; \
|
|
|
|
} \
|
|
|
|
} \
|
|
|
|
mve_advance_vpt(env); \
|
|
|
|
return ra; \
|
|
|
|
} \
|
|
|
|
|
2021-08-13 19:11:47 +03:00
|
|
|
DO_VADDV(vaddvsb, 1, int8_t)
|
|
|
|
DO_VADDV(vaddvsh, 2, int16_t)
|
|
|
|
DO_VADDV(vaddvsw, 4, int32_t)
|
2021-06-17 15:16:27 +03:00
|
|
|
DO_VADDV(vaddvub, 1, uint8_t)
|
|
|
|
DO_VADDV(vaddvuh, 2, uint16_t)
|
|
|
|
DO_VADDV(vaddvuw, 4, uint32_t)
|
2021-06-28 16:58:24 +03:00
|
|
|
|
2021-08-13 19:11:52 +03:00
|
|
|
/*
|
|
|
|
* Vector max/min across vector. Unlike VADDV, we must
|
|
|
|
* read ra as the element size, not its full width.
|
|
|
|
* We work with int64_t internally for simplicity.
|
|
|
|
*/
|
|
|
|
#define DO_VMAXMINV(OP, ESIZE, TYPE, RATYPE, FN) \
|
|
|
|
uint32_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vm, \
|
|
|
|
uint32_t ra_in) \
|
|
|
|
{ \
|
|
|
|
uint16_t mask = mve_element_mask(env); \
|
|
|
|
unsigned e; \
|
|
|
|
TYPE *m = vm; \
|
|
|
|
int64_t ra = (RATYPE)ra_in; \
|
|
|
|
for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
|
|
|
|
if (mask & 1) { \
|
|
|
|
ra = FN(ra, m[H##ESIZE(e)]); \
|
|
|
|
} \
|
|
|
|
} \
|
|
|
|
mve_advance_vpt(env); \
|
|
|
|
return ra; \
|
|
|
|
} \
|
|
|
|
|
|
|
|
#define DO_VMAXMINV_U(INSN, FN) \
|
|
|
|
DO_VMAXMINV(INSN##b, 1, uint8_t, uint8_t, FN) \
|
|
|
|
DO_VMAXMINV(INSN##h, 2, uint16_t, uint16_t, FN) \
|
|
|
|
DO_VMAXMINV(INSN##w, 4, uint32_t, uint32_t, FN)
|
|
|
|
#define DO_VMAXMINV_S(INSN, FN) \
|
|
|
|
DO_VMAXMINV(INSN##b, 1, int8_t, int8_t, FN) \
|
|
|
|
DO_VMAXMINV(INSN##h, 2, int16_t, int16_t, FN) \
|
|
|
|
DO_VMAXMINV(INSN##w, 4, int32_t, int32_t, FN)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Helpers for max and min of absolute values across vector:
|
|
|
|
* note that we only take the absolute value of 'm', not 'n'
|
|
|
|
*/
|
|
|
|
static int64_t do_maxa(int64_t n, int64_t m)
|
|
|
|
{
|
|
|
|
if (m < 0) {
|
|
|
|
m = -m;
|
|
|
|
}
|
|
|
|
return MAX(n, m);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int64_t do_mina(int64_t n, int64_t m)
|
|
|
|
{
|
|
|
|
if (m < 0) {
|
|
|
|
m = -m;
|
|
|
|
}
|
|
|
|
return MIN(n, m);
|
|
|
|
}
|
|
|
|
|
|
|
|
DO_VMAXMINV_S(vmaxvs, DO_MAX)
|
|
|
|
DO_VMAXMINV_U(vmaxvu, DO_MAX)
|
|
|
|
DO_VMAXMINV_S(vminvs, DO_MIN)
|
|
|
|
DO_VMAXMINV_U(vminvu, DO_MIN)
|
|
|
|
/*
|
|
|
|
* VMAXAV, VMINAV treat the general purpose input as unsigned
|
|
|
|
* and the vector elements as signed.
|
|
|
|
*/
|
|
|
|
DO_VMAXMINV(vmaxavb, 1, int8_t, uint8_t, do_maxa)
|
|
|
|
DO_VMAXMINV(vmaxavh, 2, int16_t, uint16_t, do_maxa)
|
|
|
|
DO_VMAXMINV(vmaxavw, 4, int32_t, uint32_t, do_maxa)
|
|
|
|
DO_VMAXMINV(vminavb, 1, int8_t, uint8_t, do_mina)
|
|
|
|
DO_VMAXMINV(vminavh, 2, int16_t, uint16_t, do_mina)
|
|
|
|
DO_VMAXMINV(vminavw, 4, int32_t, uint32_t, do_mina)
|
|
|
|
|
2021-08-13 19:11:53 +03:00
|
|
|
#define DO_VABAV(OP, ESIZE, TYPE) \
|
|
|
|
uint32_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \
|
|
|
|
void *vm, uint32_t ra) \
|
|
|
|
{ \
|
|
|
|
uint16_t mask = mve_element_mask(env); \
|
|
|
|
unsigned e; \
|
|
|
|
TYPE *m = vm, *n = vn; \
|
|
|
|
for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
|
|
|
|
if (mask & 1) { \
|
|
|
|
int64_t n0 = n[H##ESIZE(e)]; \
|
|
|
|
int64_t m0 = m[H##ESIZE(e)]; \
|
|
|
|
uint32_t r = n0 >= m0 ? (n0 - m0) : (m0 - n0); \
|
|
|
|
ra += r; \
|
|
|
|
} \
|
|
|
|
} \
|
|
|
|
mve_advance_vpt(env); \
|
|
|
|
return ra; \
|
|
|
|
}
|
|
|
|
|
|
|
|
DO_VABAV(vabavsb, 1, int8_t)
|
|
|
|
DO_VABAV(vabavsh, 2, int16_t)
|
|
|
|
DO_VABAV(vabavsw, 4, int32_t)
|
|
|
|
DO_VABAV(vabavub, 1, uint8_t)
|
|
|
|
DO_VABAV(vabavuh, 2, uint16_t)
|
|
|
|
DO_VABAV(vabavuw, 4, uint32_t)
|
|
|
|
|
2021-06-28 16:58:31 +03:00
|
|
|
#define DO_VADDLV(OP, TYPE, LTYPE) \
|
|
|
|
uint64_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vm, \
|
|
|
|
uint64_t ra) \
|
|
|
|
{ \
|
|
|
|
uint16_t mask = mve_element_mask(env); \
|
|
|
|
unsigned e; \
|
|
|
|
TYPE *m = vm; \
|
|
|
|
for (e = 0; e < 16 / 4; e++, mask >>= 4) { \
|
|
|
|
if (mask & 1) { \
|
|
|
|
ra += (LTYPE)m[H4(e)]; \
|
|
|
|
} \
|
|
|
|
} \
|
|
|
|
mve_advance_vpt(env); \
|
|
|
|
return ra; \
|
|
|
|
} \
|
|
|
|
|
|
|
|
DO_VADDLV(vaddlv_s, int32_t, int64_t)
|
|
|
|
DO_VADDLV(vaddlv_u, uint32_t, uint64_t)
|
|
|
|
|
2021-06-28 16:58:24 +03:00
|
|
|
/* Shifts by immediate */
|
|
|
|
#define DO_2SHIFT(OP, ESIZE, TYPE, FN) \
|
|
|
|
void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \
|
|
|
|
void *vm, uint32_t shift) \
|
|
|
|
{ \
|
|
|
|
TYPE *d = vd, *m = vm; \
|
|
|
|
uint16_t mask = mve_element_mask(env); \
|
|
|
|
unsigned e; \
|
|
|
|
for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
|
|
|
|
mergemask(&d[H##ESIZE(e)], \
|
|
|
|
FN(m[H##ESIZE(e)], shift), mask); \
|
|
|
|
} \
|
|
|
|
mve_advance_vpt(env); \
|
|
|
|
}
|
|
|
|
|
|
|
|
#define DO_2SHIFT_SAT(OP, ESIZE, TYPE, FN) \
|
|
|
|
void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \
|
|
|
|
void *vm, uint32_t shift) \
|
|
|
|
{ \
|
|
|
|
TYPE *d = vd, *m = vm; \
|
|
|
|
uint16_t mask = mve_element_mask(env); \
|
|
|
|
unsigned e; \
|
|
|
|
bool qc = false; \
|
|
|
|
for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
|
|
|
|
bool sat = false; \
|
|
|
|
mergemask(&d[H##ESIZE(e)], \
|
|
|
|
FN(m[H##ESIZE(e)], shift, &sat), mask); \
|
|
|
|
qc |= sat & mask & 1; \
|
|
|
|
} \
|
|
|
|
if (qc) { \
|
|
|
|
env->vfp.qc[0] = qc; \
|
|
|
|
} \
|
|
|
|
mve_advance_vpt(env); \
|
|
|
|
}
|
|
|
|
|
|
|
|
/* provide unsigned 2-op shift helpers for all sizes */
|
|
|
|
#define DO_2SHIFT_U(OP, FN) \
|
|
|
|
DO_2SHIFT(OP##b, 1, uint8_t, FN) \
|
|
|
|
DO_2SHIFT(OP##h, 2, uint16_t, FN) \
|
|
|
|
DO_2SHIFT(OP##w, 4, uint32_t, FN)
|
2021-06-28 16:58:25 +03:00
|
|
|
#define DO_2SHIFT_S(OP, FN) \
|
|
|
|
DO_2SHIFT(OP##b, 1, int8_t, FN) \
|
|
|
|
DO_2SHIFT(OP##h, 2, int16_t, FN) \
|
|
|
|
DO_2SHIFT(OP##w, 4, int32_t, FN)
|
2021-06-28 16:58:24 +03:00
|
|
|
|
|
|
|
#define DO_2SHIFT_SAT_U(OP, FN) \
|
|
|
|
DO_2SHIFT_SAT(OP##b, 1, uint8_t, FN) \
|
|
|
|
DO_2SHIFT_SAT(OP##h, 2, uint16_t, FN) \
|
|
|
|
DO_2SHIFT_SAT(OP##w, 4, uint32_t, FN)
|
|
|
|
#define DO_2SHIFT_SAT_S(OP, FN) \
|
|
|
|
DO_2SHIFT_SAT(OP##b, 1, int8_t, FN) \
|
|
|
|
DO_2SHIFT_SAT(OP##h, 2, int16_t, FN) \
|
|
|
|
DO_2SHIFT_SAT(OP##w, 4, int32_t, FN)
|
|
|
|
|
|
|
|
DO_2SHIFT_U(vshli_u, DO_VSHLU)
|
2021-06-28 16:58:25 +03:00
|
|
|
DO_2SHIFT_S(vshli_s, DO_VSHLS)
|
2021-06-28 16:58:24 +03:00
|
|
|
DO_2SHIFT_SAT_U(vqshli_u, DO_UQSHL_OP)
|
|
|
|
DO_2SHIFT_SAT_S(vqshli_s, DO_SQSHL_OP)
|
|
|
|
DO_2SHIFT_SAT_S(vqshlui_s, DO_SUQSHL_OP)
|
2021-06-28 16:58:25 +03:00
|
|
|
DO_2SHIFT_U(vrshli_u, DO_VRSHLU)
|
|
|
|
DO_2SHIFT_S(vrshli_s, DO_VRSHLS)
|
2021-08-13 19:11:52 +03:00
|
|
|
DO_2SHIFT_SAT_U(vqrshli_u, DO_UQRSHL_OP)
|
|
|
|
DO_2SHIFT_SAT_S(vqrshli_s, DO_SQRSHL_OP)
|
2021-06-28 16:58:26 +03:00
|
|
|
|
2021-06-28 16:58:27 +03:00
|
|
|
/* Shift-and-insert; we always work with 64 bits at a time */
|
|
|
|
#define DO_2SHIFT_INSERT(OP, ESIZE, SHIFTFN, MASKFN) \
|
|
|
|
void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \
|
|
|
|
void *vm, uint32_t shift) \
|
|
|
|
{ \
|
|
|
|
uint64_t *d = vd, *m = vm; \
|
|
|
|
uint16_t mask; \
|
|
|
|
uint64_t shiftmask; \
|
|
|
|
unsigned e; \
|
2021-08-13 19:11:47 +03:00
|
|
|
if (shift == ESIZE * 8) { \
|
2021-06-28 16:58:27 +03:00
|
|
|
/* \
|
2021-08-13 19:11:47 +03:00
|
|
|
* Only VSRI can shift by <dt>; it should mean "don't \
|
|
|
|
* update the destination". The generic logic can't handle \
|
|
|
|
* this because it would try to shift by an out-of-range \
|
|
|
|
* amount, so special case it here. \
|
2021-06-28 16:58:27 +03:00
|
|
|
*/ \
|
|
|
|
goto done; \
|
|
|
|
} \
|
|
|
|
assert(shift < ESIZE * 8); \
|
|
|
|
mask = mve_element_mask(env); \
|
|
|
|
/* ESIZE / 2 gives the MO_* value if ESIZE is in [1,2,4] */ \
|
|
|
|
shiftmask = dup_const(ESIZE / 2, MASKFN(ESIZE * 8, shift)); \
|
|
|
|
for (e = 0; e < 16 / 8; e++, mask >>= 8) { \
|
|
|
|
uint64_t r = (SHIFTFN(m[H8(e)], shift) & shiftmask) | \
|
|
|
|
(d[H8(e)] & ~shiftmask); \
|
|
|
|
mergemask(&d[H8(e)], r, mask); \
|
|
|
|
} \
|
|
|
|
done: \
|
|
|
|
mve_advance_vpt(env); \
|
|
|
|
}
|
|
|
|
|
|
|
|
#define DO_SHL(N, SHIFT) ((N) << (SHIFT))
|
|
|
|
#define DO_SHR(N, SHIFT) ((N) >> (SHIFT))
|
|
|
|
#define SHL_MASK(EBITS, SHIFT) MAKE_64BIT_MASK((SHIFT), (EBITS) - (SHIFT))
|
|
|
|
#define SHR_MASK(EBITS, SHIFT) MAKE_64BIT_MASK(0, (EBITS) - (SHIFT))
|
|
|
|
|
|
|
|
DO_2SHIFT_INSERT(vsrib, 1, DO_SHR, SHR_MASK)
|
|
|
|
DO_2SHIFT_INSERT(vsrih, 2, DO_SHR, SHR_MASK)
|
|
|
|
DO_2SHIFT_INSERT(vsriw, 4, DO_SHR, SHR_MASK)
|
|
|
|
DO_2SHIFT_INSERT(vslib, 1, DO_SHL, SHL_MASK)
|
|
|
|
DO_2SHIFT_INSERT(vslih, 2, DO_SHL, SHL_MASK)
|
|
|
|
DO_2SHIFT_INSERT(vsliw, 4, DO_SHL, SHL_MASK)
|
|
|
|
|
2021-06-28 16:58:26 +03:00
|
|
|
/*
|
|
|
|
* Long shifts taking half-sized inputs from top or bottom of the input
|
|
|
|
* vector and producing a double-width result. ESIZE, TYPE are for
|
|
|
|
* the input, and LESIZE, LTYPE for the output.
|
|
|
|
* Unlike the normal shift helpers, we do not handle negative shift counts,
|
|
|
|
* because the long shift is strictly left-only.
|
|
|
|
*/
|
|
|
|
#define DO_VSHLL(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE) \
|
|
|
|
void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \
|
|
|
|
void *vm, uint32_t shift) \
|
|
|
|
{ \
|
|
|
|
LTYPE *d = vd; \
|
|
|
|
TYPE *m = vm; \
|
|
|
|
uint16_t mask = mve_element_mask(env); \
|
|
|
|
unsigned le; \
|
|
|
|
assert(shift <= 16); \
|
|
|
|
for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \
|
|
|
|
LTYPE r = (LTYPE)m[H##ESIZE(le * 2 + TOP)] << shift; \
|
|
|
|
mergemask(&d[H##LESIZE(le)], r, mask); \
|
|
|
|
} \
|
|
|
|
mve_advance_vpt(env); \
|
|
|
|
}
|
|
|
|
|
|
|
|
#define DO_VSHLL_ALL(OP, TOP) \
|
|
|
|
DO_VSHLL(OP##sb, TOP, 1, int8_t, 2, int16_t) \
|
|
|
|
DO_VSHLL(OP##ub, TOP, 1, uint8_t, 2, uint16_t) \
|
|
|
|
DO_VSHLL(OP##sh, TOP, 2, int16_t, 4, int32_t) \
|
|
|
|
DO_VSHLL(OP##uh, TOP, 2, uint16_t, 4, uint32_t) \
|
|
|
|
|
|
|
|
DO_VSHLL_ALL(vshllb, false)
|
|
|
|
DO_VSHLL_ALL(vshllt, true)
|
2021-06-28 16:58:28 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Narrowing right shifts, taking a double sized input, shifting it
|
|
|
|
* and putting the result in either the top or bottom half of the output.
|
|
|
|
* ESIZE, TYPE are the output, and LESIZE, LTYPE the input.
|
|
|
|
*/
|
|
|
|
#define DO_VSHRN(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN) \
|
|
|
|
void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \
|
|
|
|
void *vm, uint32_t shift) \
|
|
|
|
{ \
|
|
|
|
LTYPE *m = vm; \
|
|
|
|
TYPE *d = vd; \
|
|
|
|
uint16_t mask = mve_element_mask(env); \
|
|
|
|
unsigned le; \
|
2021-08-13 19:11:47 +03:00
|
|
|
mask >>= ESIZE * TOP; \
|
2021-06-28 16:58:28 +03:00
|
|
|
for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \
|
|
|
|
TYPE r = FN(m[H##LESIZE(le)], shift); \
|
|
|
|
mergemask(&d[H##ESIZE(le * 2 + TOP)], r, mask); \
|
|
|
|
} \
|
|
|
|
mve_advance_vpt(env); \
|
|
|
|
}
|
|
|
|
|
|
|
|
#define DO_VSHRN_ALL(OP, FN) \
|
|
|
|
DO_VSHRN(OP##bb, false, 1, uint8_t, 2, uint16_t, FN) \
|
|
|
|
DO_VSHRN(OP##bh, false, 2, uint16_t, 4, uint32_t, FN) \
|
|
|
|
DO_VSHRN(OP##tb, true, 1, uint8_t, 2, uint16_t, FN) \
|
|
|
|
DO_VSHRN(OP##th, true, 2, uint16_t, 4, uint32_t, FN)
|
|
|
|
|
|
|
|
static inline uint64_t do_urshr(uint64_t x, unsigned sh)
|
|
|
|
{
|
|
|
|
if (likely(sh < 64)) {
|
|
|
|
return (x >> sh) + ((x >> (sh - 1)) & 1);
|
|
|
|
} else if (sh == 64) {
|
|
|
|
return x >> 63;
|
|
|
|
} else {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-06-28 16:58:29 +03:00
|
|
|
static inline int64_t do_srshr(int64_t x, unsigned sh)
|
|
|
|
{
|
|
|
|
if (likely(sh < 64)) {
|
|
|
|
return (x >> sh) + ((x >> (sh - 1)) & 1);
|
|
|
|
} else {
|
|
|
|
/* Rounding the sign bit always produces 0. */
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-06-28 16:58:28 +03:00
|
|
|
DO_VSHRN_ALL(vshrn, DO_SHR)
|
|
|
|
DO_VSHRN_ALL(vrshrn, do_urshr)
|
2021-06-28 16:58:29 +03:00
|
|
|
|
|
|
|
static inline int32_t do_sat_bhs(int64_t val, int64_t min, int64_t max,
|
|
|
|
bool *satp)
|
|
|
|
{
|
|
|
|
if (val > max) {
|
|
|
|
*satp = true;
|
|
|
|
return max;
|
|
|
|
} else if (val < min) {
|
|
|
|
*satp = true;
|
|
|
|
return min;
|
|
|
|
} else {
|
|
|
|
return val;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Saturating narrowing right shifts */
|
|
|
|
#define DO_VSHRN_SAT(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN) \
|
|
|
|
void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \
|
|
|
|
void *vm, uint32_t shift) \
|
|
|
|
{ \
|
|
|
|
LTYPE *m = vm; \
|
|
|
|
TYPE *d = vd; \
|
|
|
|
uint16_t mask = mve_element_mask(env); \
|
|
|
|
bool qc = false; \
|
|
|
|
unsigned le; \
|
2021-08-13 19:11:47 +03:00
|
|
|
mask >>= ESIZE * TOP; \
|
2021-06-28 16:58:29 +03:00
|
|
|
for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \
|
|
|
|
bool sat = false; \
|
|
|
|
TYPE r = FN(m[H##LESIZE(le)], shift, &sat); \
|
|
|
|
mergemask(&d[H##ESIZE(le * 2 + TOP)], r, mask); \
|
2021-08-13 19:11:47 +03:00
|
|
|
qc |= sat & mask & 1; \
|
2021-06-28 16:58:29 +03:00
|
|
|
} \
|
|
|
|
if (qc) { \
|
|
|
|
env->vfp.qc[0] = qc; \
|
|
|
|
} \
|
|
|
|
mve_advance_vpt(env); \
|
|
|
|
}
|
|
|
|
|
|
|
|
#define DO_VSHRN_SAT_UB(BOP, TOP, FN) \
|
|
|
|
DO_VSHRN_SAT(BOP, false, 1, uint8_t, 2, uint16_t, FN) \
|
|
|
|
DO_VSHRN_SAT(TOP, true, 1, uint8_t, 2, uint16_t, FN)
|
|
|
|
|
|
|
|
#define DO_VSHRN_SAT_UH(BOP, TOP, FN) \
|
|
|
|
DO_VSHRN_SAT(BOP, false, 2, uint16_t, 4, uint32_t, FN) \
|
|
|
|
DO_VSHRN_SAT(TOP, true, 2, uint16_t, 4, uint32_t, FN)
|
|
|
|
|
|
|
|
#define DO_VSHRN_SAT_SB(BOP, TOP, FN) \
|
|
|
|
DO_VSHRN_SAT(BOP, false, 1, int8_t, 2, int16_t, FN) \
|
|
|
|
DO_VSHRN_SAT(TOP, true, 1, int8_t, 2, int16_t, FN)
|
|
|
|
|
|
|
|
#define DO_VSHRN_SAT_SH(BOP, TOP, FN) \
|
|
|
|
DO_VSHRN_SAT(BOP, false, 2, int16_t, 4, int32_t, FN) \
|
|
|
|
DO_VSHRN_SAT(TOP, true, 2, int16_t, 4, int32_t, FN)
|
|
|
|
|
|
|
|
#define DO_SHRN_SB(N, M, SATP) \
|
|
|
|
do_sat_bhs((int64_t)(N) >> (M), INT8_MIN, INT8_MAX, SATP)
|
|
|
|
#define DO_SHRN_UB(N, M, SATP) \
|
|
|
|
do_sat_bhs((uint64_t)(N) >> (M), 0, UINT8_MAX, SATP)
|
|
|
|
#define DO_SHRUN_B(N, M, SATP) \
|
|
|
|
do_sat_bhs((int64_t)(N) >> (M), 0, UINT8_MAX, SATP)
|
|
|
|
|
|
|
|
#define DO_SHRN_SH(N, M, SATP) \
|
|
|
|
do_sat_bhs((int64_t)(N) >> (M), INT16_MIN, INT16_MAX, SATP)
|
|
|
|
#define DO_SHRN_UH(N, M, SATP) \
|
|
|
|
do_sat_bhs((uint64_t)(N) >> (M), 0, UINT16_MAX, SATP)
|
|
|
|
#define DO_SHRUN_H(N, M, SATP) \
|
|
|
|
do_sat_bhs((int64_t)(N) >> (M), 0, UINT16_MAX, SATP)
|
|
|
|
|
|
|
|
#define DO_RSHRN_SB(N, M, SATP) \
|
|
|
|
do_sat_bhs(do_srshr(N, M), INT8_MIN, INT8_MAX, SATP)
|
|
|
|
#define DO_RSHRN_UB(N, M, SATP) \
|
|
|
|
do_sat_bhs(do_urshr(N, M), 0, UINT8_MAX, SATP)
|
|
|
|
#define DO_RSHRUN_B(N, M, SATP) \
|
|
|
|
do_sat_bhs(do_srshr(N, M), 0, UINT8_MAX, SATP)
|
|
|
|
|
|
|
|
#define DO_RSHRN_SH(N, M, SATP) \
|
|
|
|
do_sat_bhs(do_srshr(N, M), INT16_MIN, INT16_MAX, SATP)
|
|
|
|
#define DO_RSHRN_UH(N, M, SATP) \
|
|
|
|
do_sat_bhs(do_urshr(N, M), 0, UINT16_MAX, SATP)
|
|
|
|
#define DO_RSHRUN_H(N, M, SATP) \
|
|
|
|
do_sat_bhs(do_srshr(N, M), 0, UINT16_MAX, SATP)
|
|
|
|
|
|
|
|
DO_VSHRN_SAT_SB(vqshrnb_sb, vqshrnt_sb, DO_SHRN_SB)
|
|
|
|
DO_VSHRN_SAT_SH(vqshrnb_sh, vqshrnt_sh, DO_SHRN_SH)
|
|
|
|
DO_VSHRN_SAT_UB(vqshrnb_ub, vqshrnt_ub, DO_SHRN_UB)
|
|
|
|
DO_VSHRN_SAT_UH(vqshrnb_uh, vqshrnt_uh, DO_SHRN_UH)
|
|
|
|
DO_VSHRN_SAT_SB(vqshrunbb, vqshruntb, DO_SHRUN_B)
|
|
|
|
DO_VSHRN_SAT_SH(vqshrunbh, vqshrunth, DO_SHRUN_H)
|
|
|
|
|
|
|
|
DO_VSHRN_SAT_SB(vqrshrnb_sb, vqrshrnt_sb, DO_RSHRN_SB)
|
|
|
|
DO_VSHRN_SAT_SH(vqrshrnb_sh, vqrshrnt_sh, DO_RSHRN_SH)
|
|
|
|
DO_VSHRN_SAT_UB(vqrshrnb_ub, vqrshrnt_ub, DO_RSHRN_UB)
|
|
|
|
DO_VSHRN_SAT_UH(vqrshrnb_uh, vqrshrnt_uh, DO_RSHRN_UH)
|
|
|
|
DO_VSHRN_SAT_SB(vqrshrunbb, vqrshruntb, DO_RSHRUN_B)
|
|
|
|
DO_VSHRN_SAT_SH(vqrshrunbh, vqrshrunth, DO_RSHRUN_H)
|
2021-06-28 16:58:30 +03:00
|
|
|
|
2021-08-13 19:11:53 +03:00
|
|
|
#define DO_VMOVN(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE) \
|
|
|
|
void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm) \
|
|
|
|
{ \
|
|
|
|
LTYPE *m = vm; \
|
|
|
|
TYPE *d = vd; \
|
|
|
|
uint16_t mask = mve_element_mask(env); \
|
|
|
|
unsigned le; \
|
|
|
|
mask >>= ESIZE * TOP; \
|
|
|
|
for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \
|
|
|
|
mergemask(&d[H##ESIZE(le * 2 + TOP)], \
|
|
|
|
m[H##LESIZE(le)], mask); \
|
|
|
|
} \
|
|
|
|
mve_advance_vpt(env); \
|
|
|
|
}
|
|
|
|
|
|
|
|
DO_VMOVN(vmovnbb, false, 1, uint8_t, 2, uint16_t)
|
|
|
|
DO_VMOVN(vmovnbh, false, 2, uint16_t, 4, uint32_t)
|
|
|
|
DO_VMOVN(vmovntb, true, 1, uint8_t, 2, uint16_t)
|
|
|
|
DO_VMOVN(vmovnth, true, 2, uint16_t, 4, uint32_t)
|
|
|
|
|
|
|
|
#define DO_VMOVN_SAT(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN) \
|
|
|
|
void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm) \
|
|
|
|
{ \
|
|
|
|
LTYPE *m = vm; \
|
|
|
|
TYPE *d = vd; \
|
|
|
|
uint16_t mask = mve_element_mask(env); \
|
|
|
|
bool qc = false; \
|
|
|
|
unsigned le; \
|
|
|
|
mask >>= ESIZE * TOP; \
|
|
|
|
for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \
|
|
|
|
bool sat = false; \
|
|
|
|
TYPE r = FN(m[H##LESIZE(le)], &sat); \
|
|
|
|
mergemask(&d[H##ESIZE(le * 2 + TOP)], r, mask); \
|
|
|
|
qc |= sat & mask & 1; \
|
|
|
|
} \
|
|
|
|
if (qc) { \
|
|
|
|
env->vfp.qc[0] = qc; \
|
|
|
|
} \
|
|
|
|
mve_advance_vpt(env); \
|
|
|
|
}
|
|
|
|
|
|
|
|
#define DO_VMOVN_SAT_UB(BOP, TOP, FN) \
|
|
|
|
DO_VMOVN_SAT(BOP, false, 1, uint8_t, 2, uint16_t, FN) \
|
|
|
|
DO_VMOVN_SAT(TOP, true, 1, uint8_t, 2, uint16_t, FN)
|
|
|
|
|
|
|
|
#define DO_VMOVN_SAT_UH(BOP, TOP, FN) \
|
|
|
|
DO_VMOVN_SAT(BOP, false, 2, uint16_t, 4, uint32_t, FN) \
|
|
|
|
DO_VMOVN_SAT(TOP, true, 2, uint16_t, 4, uint32_t, FN)
|
|
|
|
|
|
|
|
#define DO_VMOVN_SAT_SB(BOP, TOP, FN) \
|
|
|
|
DO_VMOVN_SAT(BOP, false, 1, int8_t, 2, int16_t, FN) \
|
|
|
|
DO_VMOVN_SAT(TOP, true, 1, int8_t, 2, int16_t, FN)
|
|
|
|
|
|
|
|
#define DO_VMOVN_SAT_SH(BOP, TOP, FN) \
|
|
|
|
DO_VMOVN_SAT(BOP, false, 2, int16_t, 4, int32_t, FN) \
|
|
|
|
DO_VMOVN_SAT(TOP, true, 2, int16_t, 4, int32_t, FN)
|
|
|
|
|
|
|
|
#define DO_VQMOVN_SB(N, SATP) \
|
|
|
|
do_sat_bhs((int64_t)(N), INT8_MIN, INT8_MAX, SATP)
|
|
|
|
#define DO_VQMOVN_UB(N, SATP) \
|
|
|
|
do_sat_bhs((uint64_t)(N), 0, UINT8_MAX, SATP)
|
|
|
|
#define DO_VQMOVUN_B(N, SATP) \
|
|
|
|
do_sat_bhs((int64_t)(N), 0, UINT8_MAX, SATP)
|
|
|
|
|
|
|
|
#define DO_VQMOVN_SH(N, SATP) \
|
|
|
|
do_sat_bhs((int64_t)(N), INT16_MIN, INT16_MAX, SATP)
|
|
|
|
#define DO_VQMOVN_UH(N, SATP) \
|
|
|
|
do_sat_bhs((uint64_t)(N), 0, UINT16_MAX, SATP)
|
|
|
|
#define DO_VQMOVUN_H(N, SATP) \
|
|
|
|
do_sat_bhs((int64_t)(N), 0, UINT16_MAX, SATP)
|
|
|
|
|
|
|
|
DO_VMOVN_SAT_SB(vqmovnbsb, vqmovntsb, DO_VQMOVN_SB)
|
|
|
|
DO_VMOVN_SAT_SH(vqmovnbsh, vqmovntsh, DO_VQMOVN_SH)
|
|
|
|
DO_VMOVN_SAT_UB(vqmovnbub, vqmovntub, DO_VQMOVN_UB)
|
|
|
|
DO_VMOVN_SAT_UH(vqmovnbuh, vqmovntuh, DO_VQMOVN_UH)
|
|
|
|
DO_VMOVN_SAT_SB(vqmovunbb, vqmovuntb, DO_VQMOVUN_B)
|
|
|
|
DO_VMOVN_SAT_SH(vqmovunbh, vqmovunth, DO_VQMOVUN_H)
|
|
|
|
|
2021-06-28 16:58:30 +03:00
|
|
|
uint32_t HELPER(mve_vshlc)(CPUARMState *env, void *vd, uint32_t rdm,
|
|
|
|
uint32_t shift)
|
|
|
|
{
|
|
|
|
uint32_t *d = vd;
|
|
|
|
uint16_t mask = mve_element_mask(env);
|
|
|
|
unsigned e;
|
|
|
|
uint32_t r;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For each 32-bit element, we shift it left, bringing in the
|
|
|
|
* low 'shift' bits of rdm at the bottom. Bits shifted out at
|
|
|
|
* the top become the new rdm, if the predicate mask permits.
|
|
|
|
* The final rdm value is returned to update the register.
|
|
|
|
* shift == 0 here means "shift by 32 bits".
|
|
|
|
*/
|
|
|
|
if (shift == 0) {
|
|
|
|
for (e = 0; e < 16 / 4; e++, mask >>= 4) {
|
|
|
|
r = rdm;
|
|
|
|
if (mask & 1) {
|
|
|
|
rdm = d[H4(e)];
|
|
|
|
}
|
|
|
|
mergemask(&d[H4(e)], r, mask);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
uint32_t shiftmask = MAKE_64BIT_MASK(0, shift);
|
|
|
|
|
|
|
|
for (e = 0; e < 16 / 4; e++, mask >>= 4) {
|
|
|
|
r = (d[H4(e)] << shift) | (rdm & shiftmask);
|
|
|
|
if (mask & 1) {
|
|
|
|
rdm = d[H4(e)] >> (32 - shift);
|
|
|
|
}
|
|
|
|
mergemask(&d[H4(e)], r, mask);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
mve_advance_vpt(env);
|
|
|
|
return rdm;
|
|
|
|
}
|
target/arm: Implement MVE long shifts by immediate
The MVE extension to v8.1M includes some new shift instructions which
sit entirely within the non-coprocessor part of the encoding space
and which operate only on general-purpose registers. They take up
the space which was previously UNPREDICTABLE MOVS and ORRS encodings
with Rm == 13 or 15.
Implement the long shifts by immediate, which perform shifts on a
pair of general-purpose registers treated as a 64-bit quantity, with
an immediate shift count between 1 and 32.
Awkwardly, because the MOVS and ORRS trans functions do not UNDEF for
the Rm==13,15 case, we need to explicitly emit code to UNDEF for the
cases where v8.1M now requires that. (Trying to change MOVS and ORRS
is too difficult, because the functions that generate the code are
shared between a dozen different kinds of arithmetic or logical
instruction for all A32, T16 and T32 encodings, and for some insns
and some encodings Rm==13,15 are valid.)
We make the helper functions we need for UQSHLL and SQSHLL take
a 32-bit value which the helper casts to int8_t because we'll need
these helpers also for the shift-by-register insns, where the shift
count might be < 0 or > 32.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 20210628135835.6690-16-peter.maydell@linaro.org
2021-06-28 16:58:32 +03:00
|
|
|
|
2021-06-28 16:58:33 +03:00
|
|
|
uint64_t HELPER(mve_sshrl)(CPUARMState *env, uint64_t n, uint32_t shift)
|
|
|
|
{
|
|
|
|
return do_sqrshl_d(n, -(int8_t)shift, false, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t HELPER(mve_ushll)(CPUARMState *env, uint64_t n, uint32_t shift)
|
|
|
|
{
|
|
|
|
return do_uqrshl_d(n, (int8_t)shift, false, NULL);
|
|
|
|
}
|
|
|
|
|
target/arm: Implement MVE long shifts by immediate
The MVE extension to v8.1M includes some new shift instructions which
sit entirely within the non-coprocessor part of the encoding space
and which operate only on general-purpose registers. They take up
the space which was previously UNPREDICTABLE MOVS and ORRS encodings
with Rm == 13 or 15.
Implement the long shifts by immediate, which perform shifts on a
pair of general-purpose registers treated as a 64-bit quantity, with
an immediate shift count between 1 and 32.
Awkwardly, because the MOVS and ORRS trans functions do not UNDEF for
the Rm==13,15 case, we need to explicitly emit code to UNDEF for the
cases where v8.1M now requires that. (Trying to change MOVS and ORRS
is too difficult, because the functions that generate the code are
shared between a dozen different kinds of arithmetic or logical
instruction for all A32, T16 and T32 encodings, and for some insns
and some encodings Rm==13,15 are valid.)
We make the helper functions we need for UQSHLL and SQSHLL take
a 32-bit value which the helper casts to int8_t because we'll need
these helpers also for the shift-by-register insns, where the shift
count might be < 0 or > 32.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 20210628135835.6690-16-peter.maydell@linaro.org
2021-06-28 16:58:32 +03:00
|
|
|
uint64_t HELPER(mve_sqshll)(CPUARMState *env, uint64_t n, uint32_t shift)
|
|
|
|
{
|
|
|
|
return do_sqrshl_d(n, (int8_t)shift, false, &env->QF);
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t HELPER(mve_uqshll)(CPUARMState *env, uint64_t n, uint32_t shift)
|
|
|
|
{
|
|
|
|
return do_uqrshl_d(n, (int8_t)shift, false, &env->QF);
|
|
|
|
}
|
2021-06-28 16:58:33 +03:00
|
|
|
|
|
|
|
uint64_t HELPER(mve_sqrshrl)(CPUARMState *env, uint64_t n, uint32_t shift)
|
|
|
|
{
|
|
|
|
return do_sqrshl_d(n, -(int8_t)shift, true, &env->QF);
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t HELPER(mve_uqrshll)(CPUARMState *env, uint64_t n, uint32_t shift)
|
|
|
|
{
|
|
|
|
return do_uqrshl_d(n, (int8_t)shift, true, &env->QF);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Operate on 64-bit values, but saturate at 48 bits */
|
|
|
|
static inline int64_t do_sqrshl48_d(int64_t src, int64_t shift,
|
|
|
|
bool round, uint32_t *sat)
|
|
|
|
{
|
2021-08-13 19:11:48 +03:00
|
|
|
int64_t val, extval;
|
|
|
|
|
2021-06-28 16:58:33 +03:00
|
|
|
if (shift <= -48) {
|
|
|
|
/* Rounding the sign bit always produces 0. */
|
|
|
|
if (round) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
return src >> 63;
|
|
|
|
} else if (shift < 0) {
|
|
|
|
if (round) {
|
|
|
|
src >>= -shift - 1;
|
2021-08-13 19:11:48 +03:00
|
|
|
val = (src >> 1) + (src & 1);
|
|
|
|
} else {
|
|
|
|
val = src >> -shift;
|
|
|
|
}
|
|
|
|
extval = sextract64(val, 0, 48);
|
|
|
|
if (!sat || val == extval) {
|
|
|
|
return extval;
|
2021-06-28 16:58:33 +03:00
|
|
|
}
|
|
|
|
} else if (shift < 48) {
|
2021-08-13 19:11:48 +03:00
|
|
|
int64_t extval = sextract64(src << shift, 0, 48);
|
|
|
|
if (!sat || src == (extval >> shift)) {
|
2021-06-28 16:58:33 +03:00
|
|
|
return extval;
|
|
|
|
}
|
|
|
|
} else if (!sat || src == 0) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
*sat = 1;
|
2021-08-13 19:11:48 +03:00
|
|
|
return src >= 0 ? MAKE_64BIT_MASK(0, 47) : MAKE_64BIT_MASK(47, 17);
|
2021-06-28 16:58:33 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Operate on 64-bit values, but saturate at 48 bits */
|
|
|
|
static inline uint64_t do_uqrshl48_d(uint64_t src, int64_t shift,
|
|
|
|
bool round, uint32_t *sat)
|
|
|
|
{
|
|
|
|
uint64_t val, extval;
|
|
|
|
|
|
|
|
if (shift <= -(48 + round)) {
|
|
|
|
return 0;
|
|
|
|
} else if (shift < 0) {
|
|
|
|
if (round) {
|
|
|
|
val = src >> (-shift - 1);
|
|
|
|
val = (val >> 1) + (val & 1);
|
|
|
|
} else {
|
|
|
|
val = src >> -shift;
|
|
|
|
}
|
|
|
|
extval = extract64(val, 0, 48);
|
|
|
|
if (!sat || val == extval) {
|
|
|
|
return extval;
|
|
|
|
}
|
|
|
|
} else if (shift < 48) {
|
2021-08-13 19:11:48 +03:00
|
|
|
uint64_t extval = extract64(src << shift, 0, 48);
|
|
|
|
if (!sat || src == (extval >> shift)) {
|
2021-06-28 16:58:33 +03:00
|
|
|
return extval;
|
|
|
|
}
|
|
|
|
} else if (!sat || src == 0) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
*sat = 1;
|
|
|
|
return MAKE_64BIT_MASK(0, 48);
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t HELPER(mve_sqrshrl48)(CPUARMState *env, uint64_t n, uint32_t shift)
|
|
|
|
{
|
|
|
|
return do_sqrshl48_d(n, -(int8_t)shift, true, &env->QF);
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t HELPER(mve_uqrshll48)(CPUARMState *env, uint64_t n, uint32_t shift)
|
|
|
|
{
|
|
|
|
return do_uqrshl48_d(n, (int8_t)shift, true, &env->QF);
|
|
|
|
}
|
2021-06-28 16:58:34 +03:00
|
|
|
|
|
|
|
uint32_t HELPER(mve_uqshl)(CPUARMState *env, uint32_t n, uint32_t shift)
|
|
|
|
{
|
|
|
|
return do_uqrshl_bhs(n, (int8_t)shift, 32, false, &env->QF);
|
|
|
|
}
|
|
|
|
|
|
|
|
uint32_t HELPER(mve_sqshl)(CPUARMState *env, uint32_t n, uint32_t shift)
|
|
|
|
{
|
|
|
|
return do_sqrshl_bhs(n, (int8_t)shift, 32, false, &env->QF);
|
|
|
|
}
|
2021-06-28 16:58:35 +03:00
|
|
|
|
|
|
|
uint32_t HELPER(mve_uqrshl)(CPUARMState *env, uint32_t n, uint32_t shift)
|
|
|
|
{
|
|
|
|
return do_uqrshl_bhs(n, (int8_t)shift, 32, true, &env->QF);
|
|
|
|
}
|
|
|
|
|
|
|
|
uint32_t HELPER(mve_sqrshr)(CPUARMState *env, uint32_t n, uint32_t shift)
|
|
|
|
{
|
|
|
|
return do_sqrshl_bhs(n, -(int8_t)shift, 32, true, &env->QF);
|
|
|
|
}
|
2021-08-13 19:11:50 +03:00
|
|
|
|
|
|
|
#define DO_VIDUP(OP, ESIZE, TYPE, FN) \
|
|
|
|
uint32_t HELPER(mve_##OP)(CPUARMState *env, void *vd, \
|
|
|
|
uint32_t offset, uint32_t imm) \
|
|
|
|
{ \
|
|
|
|
TYPE *d = vd; \
|
|
|
|
uint16_t mask = mve_element_mask(env); \
|
|
|
|
unsigned e; \
|
|
|
|
for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
|
|
|
|
mergemask(&d[H##ESIZE(e)], offset, mask); \
|
|
|
|
offset = FN(offset, imm); \
|
|
|
|
} \
|
|
|
|
mve_advance_vpt(env); \
|
|
|
|
return offset; \
|
|
|
|
}
|
|
|
|
|
|
|
|
#define DO_VIWDUP(OP, ESIZE, TYPE, FN) \
|
|
|
|
uint32_t HELPER(mve_##OP)(CPUARMState *env, void *vd, \
|
|
|
|
uint32_t offset, uint32_t wrap, \
|
|
|
|
uint32_t imm) \
|
|
|
|
{ \
|
|
|
|
TYPE *d = vd; \
|
|
|
|
uint16_t mask = mve_element_mask(env); \
|
|
|
|
unsigned e; \
|
|
|
|
for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
|
|
|
|
mergemask(&d[H##ESIZE(e)], offset, mask); \
|
|
|
|
offset = FN(offset, wrap, imm); \
|
|
|
|
} \
|
|
|
|
mve_advance_vpt(env); \
|
|
|
|
return offset; \
|
|
|
|
}
|
|
|
|
|
|
|
|
#define DO_VIDUP_ALL(OP, FN) \
|
|
|
|
DO_VIDUP(OP##b, 1, int8_t, FN) \
|
|
|
|
DO_VIDUP(OP##h, 2, int16_t, FN) \
|
|
|
|
DO_VIDUP(OP##w, 4, int32_t, FN)
|
|
|
|
|
|
|
|
#define DO_VIWDUP_ALL(OP, FN) \
|
|
|
|
DO_VIWDUP(OP##b, 1, int8_t, FN) \
|
|
|
|
DO_VIWDUP(OP##h, 2, int16_t, FN) \
|
|
|
|
DO_VIWDUP(OP##w, 4, int32_t, FN)
|
|
|
|
|
|
|
|
static uint32_t do_add_wrap(uint32_t offset, uint32_t wrap, uint32_t imm)
|
|
|
|
{
|
|
|
|
offset += imm;
|
|
|
|
if (offset == wrap) {
|
|
|
|
offset = 0;
|
|
|
|
}
|
|
|
|
return offset;
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint32_t do_sub_wrap(uint32_t offset, uint32_t wrap, uint32_t imm)
|
|
|
|
{
|
|
|
|
if (offset == 0) {
|
|
|
|
offset = wrap;
|
|
|
|
}
|
|
|
|
offset -= imm;
|
|
|
|
return offset;
|
|
|
|
}
|
|
|
|
|
|
|
|
DO_VIDUP_ALL(vidup, DO_ADD)
|
|
|
|
DO_VIWDUP_ALL(viwdup, do_add_wrap)
|
|
|
|
DO_VIWDUP_ALL(vdwdup, do_sub_wrap)
|
2021-08-13 19:11:51 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Vector comparison.
|
|
|
|
* P0 bits for non-executed beats (where eci_mask is 0) are unchanged.
|
|
|
|
* P0 bits for predicated lanes in executed beats (where mask is 0) are 0.
|
|
|
|
* P0 bits otherwise are updated with the results of the comparisons.
|
|
|
|
* We must also keep unchanged the MASK fields at the top of v7m.vpr.
|
|
|
|
*/
|
|
|
|
#define DO_VCMP(OP, ESIZE, TYPE, FN) \
|
|
|
|
void HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, void *vm) \
|
|
|
|
{ \
|
|
|
|
TYPE *n = vn, *m = vm; \
|
|
|
|
uint16_t mask = mve_element_mask(env); \
|
|
|
|
uint16_t eci_mask = mve_eci_mask(env); \
|
|
|
|
uint16_t beatpred = 0; \
|
|
|
|
uint16_t emask = MAKE_64BIT_MASK(0, ESIZE); \
|
|
|
|
unsigned e; \
|
|
|
|
for (e = 0; e < 16 / ESIZE; e++) { \
|
|
|
|
bool r = FN(n[H##ESIZE(e)], m[H##ESIZE(e)]); \
|
|
|
|
/* Comparison sets 0/1 bits for each byte in the element */ \
|
|
|
|
beatpred |= r * emask; \
|
|
|
|
emask <<= ESIZE; \
|
|
|
|
} \
|
|
|
|
beatpred &= mask; \
|
|
|
|
env->v7m.vpr = (env->v7m.vpr & ~(uint32_t)eci_mask) | \
|
|
|
|
(beatpred & eci_mask); \
|
|
|
|
mve_advance_vpt(env); \
|
|
|
|
}
|
|
|
|
|
2021-08-13 19:11:51 +03:00
|
|
|
#define DO_VCMP_SCALAR(OP, ESIZE, TYPE, FN) \
|
|
|
|
void HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \
|
|
|
|
uint32_t rm) \
|
|
|
|
{ \
|
|
|
|
TYPE *n = vn; \
|
|
|
|
uint16_t mask = mve_element_mask(env); \
|
|
|
|
uint16_t eci_mask = mve_eci_mask(env); \
|
|
|
|
uint16_t beatpred = 0; \
|
|
|
|
uint16_t emask = MAKE_64BIT_MASK(0, ESIZE); \
|
|
|
|
unsigned e; \
|
|
|
|
for (e = 0; e < 16 / ESIZE; e++) { \
|
|
|
|
bool r = FN(n[H##ESIZE(e)], (TYPE)rm); \
|
|
|
|
/* Comparison sets 0/1 bits for each byte in the element */ \
|
|
|
|
beatpred |= r * emask; \
|
|
|
|
emask <<= ESIZE; \
|
|
|
|
} \
|
|
|
|
beatpred &= mask; \
|
|
|
|
env->v7m.vpr = (env->v7m.vpr & ~(uint32_t)eci_mask) | \
|
|
|
|
(beatpred & eci_mask); \
|
|
|
|
mve_advance_vpt(env); \
|
|
|
|
}
|
|
|
|
|
|
|
|
#define DO_VCMP_S(OP, FN) \
|
|
|
|
DO_VCMP(OP##b, 1, int8_t, FN) \
|
|
|
|
DO_VCMP(OP##h, 2, int16_t, FN) \
|
|
|
|
DO_VCMP(OP##w, 4, int32_t, FN) \
|
|
|
|
DO_VCMP_SCALAR(OP##_scalarb, 1, int8_t, FN) \
|
|
|
|
DO_VCMP_SCALAR(OP##_scalarh, 2, int16_t, FN) \
|
|
|
|
DO_VCMP_SCALAR(OP##_scalarw, 4, int32_t, FN)
|
|
|
|
|
|
|
|
#define DO_VCMP_U(OP, FN) \
|
|
|
|
DO_VCMP(OP##b, 1, uint8_t, FN) \
|
|
|
|
DO_VCMP(OP##h, 2, uint16_t, FN) \
|
|
|
|
DO_VCMP(OP##w, 4, uint32_t, FN) \
|
|
|
|
DO_VCMP_SCALAR(OP##_scalarb, 1, uint8_t, FN) \
|
|
|
|
DO_VCMP_SCALAR(OP##_scalarh, 2, uint16_t, FN) \
|
|
|
|
DO_VCMP_SCALAR(OP##_scalarw, 4, uint32_t, FN)
|
2021-08-13 19:11:51 +03:00
|
|
|
|
|
|
|
#define DO_EQ(N, M) ((N) == (M))
|
|
|
|
#define DO_NE(N, M) ((N) != (M))
|
|
|
|
#define DO_EQ(N, M) ((N) == (M))
|
|
|
|
#define DO_EQ(N, M) ((N) == (M))
|
|
|
|
#define DO_GE(N, M) ((N) >= (M))
|
|
|
|
#define DO_LT(N, M) ((N) < (M))
|
|
|
|
#define DO_GT(N, M) ((N) > (M))
|
|
|
|
#define DO_LE(N, M) ((N) <= (M))
|
|
|
|
|
|
|
|
DO_VCMP_U(vcmpeq, DO_EQ)
|
|
|
|
DO_VCMP_U(vcmpne, DO_NE)
|
|
|
|
DO_VCMP_U(vcmpcs, DO_GE)
|
|
|
|
DO_VCMP_U(vcmphi, DO_GT)
|
|
|
|
DO_VCMP_S(vcmpge, DO_GE)
|
|
|
|
DO_VCMP_S(vcmplt, DO_LT)
|
|
|
|
DO_VCMP_S(vcmpgt, DO_GT)
|
|
|
|
DO_VCMP_S(vcmple, DO_LE)
|
2021-08-13 19:11:51 +03:00
|
|
|
|
|
|
|
void HELPER(mve_vpsel)(CPUARMState *env, void *vd, void *vn, void *vm)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Qd[n] = VPR.P0[n] ? Qn[n] : Qm[n]
|
|
|
|
* but note that whether bytes are written to Qd is still subject
|
|
|
|
* to (all forms of) predication in the usual way.
|
|
|
|
*/
|
|
|
|
uint64_t *d = vd, *n = vn, *m = vm;
|
|
|
|
uint16_t mask = mve_element_mask(env);
|
|
|
|
uint16_t p0 = FIELD_EX32(env->v7m.vpr, V7M_VPR, P0);
|
|
|
|
unsigned e;
|
|
|
|
for (e = 0; e < 16 / 8; e++, mask >>= 8, p0 >>= 8) {
|
|
|
|
uint64_t r = m[H8(e)];
|
|
|
|
mergemask(&r, n[H8(e)], p0);
|
|
|
|
mergemask(&d[H8(e)], r, mask);
|
|
|
|
}
|
|
|
|
mve_advance_vpt(env);
|
|
|
|
}
|
2021-08-13 19:11:55 +03:00
|
|
|
|
2021-08-13 19:11:56 +03:00
|
|
|
void HELPER(mve_vpnot)(CPUARMState *env)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* P0 bits for unexecuted beats (where eci_mask is 0) are unchanged.
|
|
|
|
* P0 bits for predicated lanes in executed bits (where mask is 0) are 0.
|
|
|
|
* P0 bits otherwise are inverted.
|
|
|
|
* (This is the same logic as VCMP.)
|
|
|
|
* This insn is itself subject to predication and to beat-wise execution,
|
|
|
|
* and after it executes VPT state advances in the usual way.
|
|
|
|
*/
|
|
|
|
uint16_t mask = mve_element_mask(env);
|
|
|
|
uint16_t eci_mask = mve_eci_mask(env);
|
|
|
|
uint16_t beatpred = ~env->v7m.vpr & mask;
|
|
|
|
env->v7m.vpr = (env->v7m.vpr & ~(uint32_t)eci_mask) | (beatpred & eci_mask);
|
|
|
|
mve_advance_vpt(env);
|
|
|
|
}
|
|
|
|
|
2021-08-13 19:11:55 +03:00
|
|
|
#define DO_1OP_SAT(OP, ESIZE, TYPE, FN) \
|
|
|
|
void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm) \
|
|
|
|
{ \
|
|
|
|
TYPE *d = vd, *m = vm; \
|
|
|
|
uint16_t mask = mve_element_mask(env); \
|
|
|
|
unsigned e; \
|
|
|
|
bool qc = false; \
|
|
|
|
for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
|
|
|
|
bool sat = false; \
|
|
|
|
mergemask(&d[H##ESIZE(e)], FN(m[H##ESIZE(e)], &sat), mask); \
|
|
|
|
qc |= sat & mask & 1; \
|
|
|
|
} \
|
|
|
|
if (qc) { \
|
|
|
|
env->vfp.qc[0] = qc; \
|
|
|
|
} \
|
|
|
|
mve_advance_vpt(env); \
|
|
|
|
}
|
|
|
|
|
|
|
|
#define DO_VQABS_B(N, SATP) \
|
|
|
|
do_sat_bhs(DO_ABS((int64_t)N), INT8_MIN, INT8_MAX, SATP)
|
|
|
|
#define DO_VQABS_H(N, SATP) \
|
|
|
|
do_sat_bhs(DO_ABS((int64_t)N), INT16_MIN, INT16_MAX, SATP)
|
|
|
|
#define DO_VQABS_W(N, SATP) \
|
|
|
|
do_sat_bhs(DO_ABS((int64_t)N), INT32_MIN, INT32_MAX, SATP)
|
|
|
|
|
|
|
|
#define DO_VQNEG_B(N, SATP) do_sat_bhs(-(int64_t)N, INT8_MIN, INT8_MAX, SATP)
|
|
|
|
#define DO_VQNEG_H(N, SATP) do_sat_bhs(-(int64_t)N, INT16_MIN, INT16_MAX, SATP)
|
|
|
|
#define DO_VQNEG_W(N, SATP) do_sat_bhs(-(int64_t)N, INT32_MIN, INT32_MAX, SATP)
|
|
|
|
|
|
|
|
DO_1OP_SAT(vqabsb, 1, int8_t, DO_VQABS_B)
|
|
|
|
DO_1OP_SAT(vqabsh, 2, int16_t, DO_VQABS_H)
|
|
|
|
DO_1OP_SAT(vqabsw, 4, int32_t, DO_VQABS_W)
|
|
|
|
|
|
|
|
DO_1OP_SAT(vqnegb, 1, int8_t, DO_VQNEG_B)
|
|
|
|
DO_1OP_SAT(vqnegh, 2, int16_t, DO_VQNEG_H)
|
|
|
|
DO_1OP_SAT(vqnegw, 4, int32_t, DO_VQNEG_W)
|
2021-08-13 19:11:55 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* VMAXA, VMINA: vd is unsigned; vm is signed, and we take its
|
|
|
|
* absolute value; we then do an unsigned comparison.
|
|
|
|
*/
|
|
|
|
#define DO_VMAXMINA(OP, ESIZE, STYPE, UTYPE, FN) \
|
|
|
|
void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm) \
|
|
|
|
{ \
|
|
|
|
UTYPE *d = vd; \
|
|
|
|
STYPE *m = vm; \
|
|
|
|
uint16_t mask = mve_element_mask(env); \
|
|
|
|
unsigned e; \
|
|
|
|
for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
|
|
|
|
UTYPE r = DO_ABS(m[H##ESIZE(e)]); \
|
|
|
|
r = FN(d[H##ESIZE(e)], r); \
|
|
|
|
mergemask(&d[H##ESIZE(e)], r, mask); \
|
|
|
|
} \
|
|
|
|
mve_advance_vpt(env); \
|
|
|
|
}
|
|
|
|
|
|
|
|
DO_VMAXMINA(vmaxab, 1, int8_t, uint8_t, DO_MAX)
|
|
|
|
DO_VMAXMINA(vmaxah, 2, int16_t, uint16_t, DO_MAX)
|
|
|
|
DO_VMAXMINA(vmaxaw, 4, int32_t, uint32_t, DO_MAX)
|
|
|
|
DO_VMAXMINA(vminab, 1, int8_t, uint8_t, DO_MIN)
|
|
|
|
DO_VMAXMINA(vminah, 2, int16_t, uint16_t, DO_MIN)
|
|
|
|
DO_VMAXMINA(vminaw, 4, int32_t, uint32_t, DO_MIN)
|