target/arm: Implement SVE floating-point complex add

Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 20180627043328.11531-29-richard.henderson@linaro.org
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Richard Henderson 2018-06-29 15:11:11 +01:00 committed by Peter Maydell
parent a21035822e
commit 76a9d9cdc4
4 changed files with 135 additions and 0 deletions

View File

@ -1092,6 +1092,13 @@ DEF_HELPER_FLAGS_6(sve_facgt_s, TCG_CALL_NO_RWG,
DEF_HELPER_FLAGS_6(sve_facgt_d, TCG_CALL_NO_RWG, DEF_HELPER_FLAGS_6(sve_facgt_d, TCG_CALL_NO_RWG,
void, ptr, ptr, ptr, ptr, ptr, i32) void, ptr, ptr, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_6(sve_fcadd_h, TCG_CALL_NO_RWG,
void, ptr, ptr, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_6(sve_fcadd_s, TCG_CALL_NO_RWG,
void, ptr, ptr, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_6(sve_fcadd_d, TCG_CALL_NO_RWG,
void, ptr, ptr, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_3(sve_fmla_zpzzz_h, TCG_CALL_NO_RWG, void, env, ptr, i32) DEF_HELPER_FLAGS_3(sve_fmla_zpzzz_h, TCG_CALL_NO_RWG, void, env, ptr, i32)
DEF_HELPER_FLAGS_3(sve_fmla_zpzzz_s, TCG_CALL_NO_RWG, void, env, ptr, i32) DEF_HELPER_FLAGS_3(sve_fmla_zpzzz_s, TCG_CALL_NO_RWG, void, env, ptr, i32)
DEF_HELPER_FLAGS_3(sve_fmla_zpzzz_d, TCG_CALL_NO_RWG, void, env, ptr, i32) DEF_HELPER_FLAGS_3(sve_fmla_zpzzz_d, TCG_CALL_NO_RWG, void, env, ptr, i32)

View File

@ -725,6 +725,10 @@ UMIN_zzi 00100101 .. 101 011 110 ........ ..... @rdn_i8u
# SVE integer multiply immediate (unpredicated) # SVE integer multiply immediate (unpredicated)
MUL_zzi 00100101 .. 110 000 110 ........ ..... @rdn_i8s MUL_zzi 00100101 .. 110 000 110 ........ ..... @rdn_i8s
# SVE floating-point complex add (predicated)
FCADD 01100100 esz:2 00000 rot:1 100 pg:3 rm:5 rd:5 \
rn=%reg_movprfx
### SVE FP Multiply-Add Indexed Group ### SVE FP Multiply-Add Indexed Group
# SVE floating-point multiply-add (indexed) # SVE floating-point multiply-add (indexed)

View File

@ -3657,6 +3657,106 @@ void HELPER(sve_ftmad_d)(void *vd, void *vn, void *vm, void *vs, uint32_t desc)
} }
} }
/*
* FP Complex Add
*/
void HELPER(sve_fcadd_h)(void *vd, void *vn, void *vm, void *vg,
void *vs, uint32_t desc)
{
intptr_t j, i = simd_oprsz(desc);
uint64_t *g = vg;
float16 neg_imag = float16_set_sign(0, simd_data(desc));
float16 neg_real = float16_chs(neg_imag);
do {
uint64_t pg = g[(i - 1) >> 6];
do {
float16 e0, e1, e2, e3;
/* I holds the real index; J holds the imag index. */
j = i - sizeof(float16);
i -= 2 * sizeof(float16);
e0 = *(float16 *)(vn + H1_2(i));
e1 = *(float16 *)(vm + H1_2(j)) ^ neg_real;
e2 = *(float16 *)(vn + H1_2(j));
e3 = *(float16 *)(vm + H1_2(i)) ^ neg_imag;
if (likely((pg >> (i & 63)) & 1)) {
*(float16 *)(vd + H1_2(i)) = float16_add(e0, e1, vs);
}
if (likely((pg >> (j & 63)) & 1)) {
*(float16 *)(vd + H1_2(j)) = float16_add(e2, e3, vs);
}
} while (i & 63);
} while (i != 0);
}
void HELPER(sve_fcadd_s)(void *vd, void *vn, void *vm, void *vg,
void *vs, uint32_t desc)
{
intptr_t j, i = simd_oprsz(desc);
uint64_t *g = vg;
float32 neg_imag = float32_set_sign(0, simd_data(desc));
float32 neg_real = float32_chs(neg_imag);
do {
uint64_t pg = g[(i - 1) >> 6];
do {
float32 e0, e1, e2, e3;
/* I holds the real index; J holds the imag index. */
j = i - sizeof(float32);
i -= 2 * sizeof(float32);
e0 = *(float32 *)(vn + H1_2(i));
e1 = *(float32 *)(vm + H1_2(j)) ^ neg_real;
e2 = *(float32 *)(vn + H1_2(j));
e3 = *(float32 *)(vm + H1_2(i)) ^ neg_imag;
if (likely((pg >> (i & 63)) & 1)) {
*(float32 *)(vd + H1_2(i)) = float32_add(e0, e1, vs);
}
if (likely((pg >> (j & 63)) & 1)) {
*(float32 *)(vd + H1_2(j)) = float32_add(e2, e3, vs);
}
} while (i & 63);
} while (i != 0);
}
void HELPER(sve_fcadd_d)(void *vd, void *vn, void *vm, void *vg,
void *vs, uint32_t desc)
{
intptr_t j, i = simd_oprsz(desc);
uint64_t *g = vg;
float64 neg_imag = float64_set_sign(0, simd_data(desc));
float64 neg_real = float64_chs(neg_imag);
do {
uint64_t pg = g[(i - 1) >> 6];
do {
float64 e0, e1, e2, e3;
/* I holds the real index; J holds the imag index. */
j = i - sizeof(float64);
i -= 2 * sizeof(float64);
e0 = *(float64 *)(vn + H1_2(i));
e1 = *(float64 *)(vm + H1_2(j)) ^ neg_real;
e2 = *(float64 *)(vn + H1_2(j));
e3 = *(float64 *)(vm + H1_2(i)) ^ neg_imag;
if (likely((pg >> (i & 63)) & 1)) {
*(float64 *)(vd + H1_2(i)) = float64_add(e0, e1, vs);
}
if (likely((pg >> (j & 63)) & 1)) {
*(float64 *)(vd + H1_2(j)) = float64_add(e2, e3, vs);
}
} while (i & 63);
} while (i != 0);
}
/* /*
* Load contiguous data, protected by a governing predicate. * Load contiguous data, protected by a governing predicate.
*/ */

View File

@ -3895,6 +3895,30 @@ DO_FPCMP(FACGT, facgt)
#undef DO_FPCMP #undef DO_FPCMP
static bool trans_FCADD(DisasContext *s, arg_FCADD *a, uint32_t insn)
{
static gen_helper_gvec_4_ptr * const fns[3] = {
gen_helper_sve_fcadd_h,
gen_helper_sve_fcadd_s,
gen_helper_sve_fcadd_d
};
if (a->esz == 0) {
return false;
}
if (sve_access_check(s)) {
unsigned vsz = vec_full_reg_size(s);
TCGv_ptr status = get_fpstatus_ptr(a->esz == MO_16);
tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, a->rd),
vec_full_reg_offset(s, a->rn),
vec_full_reg_offset(s, a->rm),
pred_full_reg_offset(s, a->pg),
status, vsz, vsz, a->rot, fns[a->esz - 1]);
tcg_temp_free_ptr(status);
}
return true;
}
typedef void gen_helper_sve_fmla(TCGv_env, TCGv_ptr, TCGv_i32); typedef void gen_helper_sve_fmla(TCGv_env, TCGv_ptr, TCGv_i32);
static bool do_fmla(DisasContext *s, arg_rprrr_esz *a, gen_helper_sve_fmla *fn) static bool do_fmla(DisasContext *s, arg_rprrr_esz *a, gen_helper_sve_fmla *fn)