/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * QEMU LoongArch LSX helper functions. * * Copyright (c) 2022-2023 Loongson Technology Corporation Limited */ #include "qemu/osdep.h" #include "cpu.h" #include "exec/exec-all.h" #include "exec/helper-proto.h" #define DO_ADD(a, b) (a + b) #define DO_SUB(a, b) (a - b) #define DO_ODD_EVEN(NAME, BIT, E1, E2, DO_OP) \ void HELPER(NAME)(CPULoongArchState *env, \ uint32_t vd, uint32_t vj, uint32_t vk) \ { \ int i; \ VReg *Vd = &(env->fpr[vd].vreg); \ VReg *Vj = &(env->fpr[vj].vreg); \ VReg *Vk = &(env->fpr[vk].vreg); \ typedef __typeof(Vd->E1(0)) TD; \ \ for (i = 0; i < LSX_LEN/BIT; i++) { \ Vd->E1(i) = DO_OP((TD)Vj->E2(2 * i + 1), (TD)Vk->E2(2 * i)); \ } \ } DO_ODD_EVEN(vhaddw_h_b, 16, H, B, DO_ADD) DO_ODD_EVEN(vhaddw_w_h, 32, W, H, DO_ADD) DO_ODD_EVEN(vhaddw_d_w, 64, D, W, DO_ADD) void HELPER(vhaddw_q_d)(CPULoongArchState *env, uint32_t vd, uint32_t vj, uint32_t vk) { VReg *Vd = &(env->fpr[vd].vreg); VReg *Vj = &(env->fpr[vj].vreg); VReg *Vk = &(env->fpr[vk].vreg); Vd->Q(0) = int128_add(int128_makes64(Vj->D(1)), int128_makes64(Vk->D(0))); } DO_ODD_EVEN(vhsubw_h_b, 16, H, B, DO_SUB) DO_ODD_EVEN(vhsubw_w_h, 32, W, H, DO_SUB) DO_ODD_EVEN(vhsubw_d_w, 64, D, W, DO_SUB) void HELPER(vhsubw_q_d)(CPULoongArchState *env, uint32_t vd, uint32_t vj, uint32_t vk) { VReg *Vd = &(env->fpr[vd].vreg); VReg *Vj = &(env->fpr[vj].vreg); VReg *Vk = &(env->fpr[vk].vreg); Vd->Q(0) = int128_sub(int128_makes64(Vj->D(1)), int128_makes64(Vk->D(0))); } DO_ODD_EVEN(vhaddw_hu_bu, 16, UH, UB, DO_ADD) DO_ODD_EVEN(vhaddw_wu_hu, 32, UW, UH, DO_ADD) DO_ODD_EVEN(vhaddw_du_wu, 64, UD, UW, DO_ADD) void HELPER(vhaddw_qu_du)(CPULoongArchState *env, uint32_t vd, uint32_t vj, uint32_t vk) { VReg *Vd = &(env->fpr[vd].vreg); VReg *Vj = &(env->fpr[vj].vreg); VReg *Vk = &(env->fpr[vk].vreg); Vd->Q(0) = int128_add(int128_make64((uint64_t)Vj->D(1)), int128_make64((uint64_t)Vk->D(0))); } DO_ODD_EVEN(vhsubw_hu_bu, 16, UH, UB, DO_SUB) DO_ODD_EVEN(vhsubw_wu_hu, 32, UW, UH, DO_SUB) DO_ODD_EVEN(vhsubw_du_wu, 64, UD, UW, DO_SUB) void HELPER(vhsubw_qu_du)(CPULoongArchState *env, uint32_t vd, uint32_t vj, uint32_t vk) { VReg *Vd = &(env->fpr[vd].vreg); VReg *Vj = &(env->fpr[vj].vreg); VReg *Vk = &(env->fpr[vk].vreg); Vd->Q(0) = int128_sub(int128_make64((uint64_t)Vj->D(1)), int128_make64((uint64_t)Vk->D(0))); } #define DO_EVEN(NAME, BIT, E1, E2, DO_OP) \ void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \ { \ int i; \ VReg *Vd = (VReg *)vd; \ VReg *Vj = (VReg *)vj; \ VReg *Vk = (VReg *)vk; \ typedef __typeof(Vd->E1(0)) TD; \ for (i = 0; i < LSX_LEN/BIT; i++) { \ Vd->E1(i) = DO_OP((TD)Vj->E2(2 * i) ,(TD)Vk->E2(2 * i)); \ } \ } #define DO_ODD(NAME, BIT, E1, E2, DO_OP) \ void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \ { \ int i; \ VReg *Vd = (VReg *)vd; \ VReg *Vj = (VReg *)vj; \ VReg *Vk = (VReg *)vk; \ typedef __typeof(Vd->E1(0)) TD; \ for (i = 0; i < LSX_LEN/BIT; i++) { \ Vd->E1(i) = DO_OP((TD)Vj->E2(2 * i + 1), (TD)Vk->E2(2 * i + 1)); \ } \ } void HELPER(vaddwev_q_d)(void *vd, void *vj, void *vk, uint32_t v) { VReg *Vd = (VReg *)vd; VReg *Vj = (VReg *)vj; VReg *Vk = (VReg *)vk; Vd->Q(0) = int128_add(int128_makes64(Vj->D(0)), int128_makes64(Vk->D(0))); } DO_EVEN(vaddwev_h_b, 16, H, B, DO_ADD) DO_EVEN(vaddwev_w_h, 32, W, H, DO_ADD) DO_EVEN(vaddwev_d_w, 64, D, W, DO_ADD) void HELPER(vaddwod_q_d)(void *vd, void *vj, void *vk, uint32_t v) { VReg *Vd = (VReg *)vd; VReg *Vj = (VReg *)vj; VReg *Vk = (VReg *)vk; Vd->Q(0) = int128_add(int128_makes64(Vj->D(1)), int128_makes64(Vk->D(1))); } DO_ODD(vaddwod_h_b, 16, H, B, DO_ADD) DO_ODD(vaddwod_w_h, 32, W, H, DO_ADD) DO_ODD(vaddwod_d_w, 64, D, W, DO_ADD) void HELPER(vsubwev_q_d)(void *vd, void *vj, void *vk, uint32_t v) { VReg *Vd = (VReg *)vd; VReg *Vj = (VReg *)vj; VReg *Vk = (VReg *)vk; Vd->Q(0) = int128_sub(int128_makes64(Vj->D(0)), int128_makes64(Vk->D(0))); } DO_EVEN(vsubwev_h_b, 16, H, B, DO_SUB) DO_EVEN(vsubwev_w_h, 32, W, H, DO_SUB) DO_EVEN(vsubwev_d_w, 64, D, W, DO_SUB) void HELPER(vsubwod_q_d)(void *vd, void *vj, void *vk, uint32_t v) { VReg *Vd = (VReg *)vd; VReg *Vj = (VReg *)vj; VReg *Vk = (VReg *)vk; Vd->Q(0) = int128_sub(int128_makes64(Vj->D(1)), int128_makes64(Vk->D(1))); } DO_ODD(vsubwod_h_b, 16, H, B, DO_SUB) DO_ODD(vsubwod_w_h, 32, W, H, DO_SUB) DO_ODD(vsubwod_d_w, 64, D, W, DO_SUB) void HELPER(vaddwev_q_du)(void *vd, void *vj, void *vk, uint32_t v) { VReg *Vd = (VReg *)vd; VReg *Vj = (VReg *)vj; VReg *Vk = (VReg *)vk; Vd->Q(0) = int128_add(int128_make64((uint64_t)Vj->D(0)), int128_make64((uint64_t)Vk->D(0))); } DO_EVEN(vaddwev_h_bu, 16, UH, UB, DO_ADD) DO_EVEN(vaddwev_w_hu, 32, UW, UH, DO_ADD) DO_EVEN(vaddwev_d_wu, 64, UD, UW, DO_ADD) void HELPER(vaddwod_q_du)(void *vd, void *vj, void *vk, uint32_t v) { VReg *Vd = (VReg *)vd; VReg *Vj = (VReg *)vj; VReg *Vk = (VReg *)vk; Vd->Q(0) = int128_add(int128_make64((uint64_t)Vj->D(1)), int128_make64((uint64_t)Vk->D(1))); } DO_ODD(vaddwod_h_bu, 16, UH, UB, DO_ADD) DO_ODD(vaddwod_w_hu, 32, UW, UH, DO_ADD) DO_ODD(vaddwod_d_wu, 64, UD, UW, DO_ADD) void HELPER(vsubwev_q_du)(void *vd, void *vj, void *vk, uint32_t v) { VReg *Vd = (VReg *)vd; VReg *Vj = (VReg *)vj; VReg *Vk = (VReg *)vk; Vd->Q(0) = int128_sub(int128_make64((uint64_t)Vj->D(0)), int128_make64((uint64_t)Vk->D(0))); } DO_EVEN(vsubwev_h_bu, 16, UH, UB, DO_SUB) DO_EVEN(vsubwev_w_hu, 32, UW, UH, DO_SUB) DO_EVEN(vsubwev_d_wu, 64, UD, UW, DO_SUB) void HELPER(vsubwod_q_du)(void *vd, void *vj, void *vk, uint32_t v) { VReg *Vd = (VReg *)vd; VReg *Vj = (VReg *)vj; VReg *Vk = (VReg *)vk; Vd->Q(0) = int128_sub(int128_make64((uint64_t)Vj->D(1)), int128_make64((uint64_t)Vk->D(1))); } DO_ODD(vsubwod_h_bu, 16, UH, UB, DO_SUB) DO_ODD(vsubwod_w_hu, 32, UW, UH, DO_SUB) DO_ODD(vsubwod_d_wu, 64, UD, UW, DO_SUB) #define DO_EVEN_U_S(NAME, BIT, ES1, EU1, ES2, EU2, DO_OP) \ void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \ { \ int i; \ VReg *Vd = (VReg *)vd; \ VReg *Vj = (VReg *)vj; \ VReg *Vk = (VReg *)vk; \ typedef __typeof(Vd->ES1(0)) TDS; \ typedef __typeof(Vd->EU1(0)) TDU; \ for (i = 0; i < LSX_LEN/BIT; i++) { \ Vd->ES1(i) = DO_OP((TDU)Vj->EU2(2 * i) ,(TDS)Vk->ES2(2 * i)); \ } \ } #define DO_ODD_U_S(NAME, BIT, ES1, EU1, ES2, EU2, DO_OP) \ void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \ { \ int i; \ VReg *Vd = (VReg *)vd; \ VReg *Vj = (VReg *)vj; \ VReg *Vk = (VReg *)vk; \ typedef __typeof(Vd->ES1(0)) TDS; \ typedef __typeof(Vd->EU1(0)) TDU; \ for (i = 0; i < LSX_LEN/BIT; i++) { \ Vd->ES1(i) = DO_OP((TDU)Vj->EU2(2 * i + 1), (TDS)Vk->ES2(2 * i + 1)); \ } \ } void HELPER(vaddwev_q_du_d)(void *vd, void *vj, void *vk, uint32_t v) { VReg *Vd = (VReg *)vd; VReg *Vj = (VReg *)vj; VReg *Vk = (VReg *)vk; Vd->Q(0) = int128_add(int128_make64((uint64_t)Vj->D(0)), int128_makes64(Vk->D(0))); } DO_EVEN_U_S(vaddwev_h_bu_b, 16, H, UH, B, UB, DO_ADD) DO_EVEN_U_S(vaddwev_w_hu_h, 32, W, UW, H, UH, DO_ADD) DO_EVEN_U_S(vaddwev_d_wu_w, 64, D, UD, W, UW, DO_ADD) void HELPER(vaddwod_q_du_d)(void *vd, void *vj, void *vk, uint32_t v) { VReg *Vd = (VReg *)vd; VReg *Vj = (VReg *)vj; VReg *Vk = (VReg *)vk; Vd->Q(0) = int128_add(int128_make64((uint64_t)Vj->D(1)), int128_makes64(Vk->D(1))); } DO_ODD_U_S(vaddwod_h_bu_b, 16, H, UH, B, UB, DO_ADD) DO_ODD_U_S(vaddwod_w_hu_h, 32, W, UW, H, UH, DO_ADD) DO_ODD_U_S(vaddwod_d_wu_w, 64, D, UD, W, UW, DO_ADD) #define DO_VAVG(a, b) ((a >> 1) + (b >> 1) + (a & b & 1)) #define DO_VAVGR(a, b) ((a >> 1) + (b >> 1) + ((a | b) & 1)) #define DO_3OP(NAME, BIT, E, DO_OP) \ void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \ { \ int i; \ VReg *Vd = (VReg *)vd; \ VReg *Vj = (VReg *)vj; \ VReg *Vk = (VReg *)vk; \ for (i = 0; i < LSX_LEN/BIT; i++) { \ Vd->E(i) = DO_OP(Vj->E(i), Vk->E(i)); \ } \ } DO_3OP(vavg_b, 8, B, DO_VAVG) DO_3OP(vavg_h, 16, H, DO_VAVG) DO_3OP(vavg_w, 32, W, DO_VAVG) DO_3OP(vavg_d, 64, D, DO_VAVG) DO_3OP(vavgr_b, 8, B, DO_VAVGR) DO_3OP(vavgr_h, 16, H, DO_VAVGR) DO_3OP(vavgr_w, 32, W, DO_VAVGR) DO_3OP(vavgr_d, 64, D, DO_VAVGR) DO_3OP(vavg_bu, 8, UB, DO_VAVG) DO_3OP(vavg_hu, 16, UH, DO_VAVG) DO_3OP(vavg_wu, 32, UW, DO_VAVG) DO_3OP(vavg_du, 64, UD, DO_VAVG) DO_3OP(vavgr_bu, 8, UB, DO_VAVGR) DO_3OP(vavgr_hu, 16, UH, DO_VAVGR) DO_3OP(vavgr_wu, 32, UW, DO_VAVGR) DO_3OP(vavgr_du, 64, UD, DO_VAVGR) #define DO_VABSD(a, b) ((a > b) ? (a -b) : (b-a)) DO_3OP(vabsd_b, 8, B, DO_VABSD) DO_3OP(vabsd_h, 16, H, DO_VABSD) DO_3OP(vabsd_w, 32, W, DO_VABSD) DO_3OP(vabsd_d, 64, D, DO_VABSD) DO_3OP(vabsd_bu, 8, UB, DO_VABSD) DO_3OP(vabsd_hu, 16, UH, DO_VABSD) DO_3OP(vabsd_wu, 32, UW, DO_VABSD) DO_3OP(vabsd_du, 64, UD, DO_VABSD) #define DO_VABS(a) ((a < 0) ? (-a) : (a)) #define DO_VADDA(NAME, BIT, E, DO_OP) \ void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \ { \ int i; \ VReg *Vd = (VReg *)vd; \ VReg *Vj = (VReg *)vj; \ VReg *Vk = (VReg *)vk; \ for (i = 0; i < LSX_LEN/BIT; i++) { \ Vd->E(i) = DO_OP(Vj->E(i)) + DO_OP(Vk->E(i)); \ } \ } DO_VADDA(vadda_b, 8, B, DO_VABS) DO_VADDA(vadda_h, 16, H, DO_VABS) DO_VADDA(vadda_w, 32, W, DO_VABS) DO_VADDA(vadda_d, 64, D, DO_VABS) #define DO_MIN(a, b) (a < b ? a : b) #define DO_MAX(a, b) (a > b ? a : b) #define VMINMAXI(NAME, BIT, E, DO_OP) \ void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t v) \ { \ int i; \ VReg *Vd = (VReg *)vd; \ VReg *Vj = (VReg *)vj; \ typedef __typeof(Vd->E(0)) TD; \ \ for (i = 0; i < LSX_LEN/BIT; i++) { \ Vd->E(i) = DO_OP(Vj->E(i), (TD)imm); \ } \ } VMINMAXI(vmini_b, 8, B, DO_MIN) VMINMAXI(vmini_h, 16, H, DO_MIN) VMINMAXI(vmini_w, 32, W, DO_MIN) VMINMAXI(vmini_d, 64, D, DO_MIN) VMINMAXI(vmaxi_b, 8, B, DO_MAX) VMINMAXI(vmaxi_h, 16, H, DO_MAX) VMINMAXI(vmaxi_w, 32, W, DO_MAX) VMINMAXI(vmaxi_d, 64, D, DO_MAX) VMINMAXI(vmini_bu, 8, UB, DO_MIN) VMINMAXI(vmini_hu, 16, UH, DO_MIN) VMINMAXI(vmini_wu, 32, UW, DO_MIN) VMINMAXI(vmini_du, 64, UD, DO_MIN) VMINMAXI(vmaxi_bu, 8, UB, DO_MAX) VMINMAXI(vmaxi_hu, 16, UH, DO_MAX) VMINMAXI(vmaxi_wu, 32, UW, DO_MAX) VMINMAXI(vmaxi_du, 64, UD, DO_MAX) #define DO_VMUH(NAME, BIT, E1, E2, DO_OP) \ void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \ { \ int i; \ VReg *Vd = (VReg *)vd; \ VReg *Vj = (VReg *)vj; \ VReg *Vk = (VReg *)vk; \ typedef __typeof(Vd->E1(0)) T; \ \ for (i = 0; i < LSX_LEN/BIT; i++) { \ Vd->E2(i) = ((T)Vj->E2(i)) * ((T)Vk->E2(i)) >> BIT; \ } \ } void HELPER(vmuh_d)(void *vd, void *vj, void *vk, uint32_t v) { uint64_t l, h1, h2; VReg *Vd = (VReg *)vd; VReg *Vj = (VReg *)vj; VReg *Vk = (VReg *)vk; muls64(&l, &h1, Vj->D(0), Vk->D(0)); muls64(&l, &h2, Vj->D(1), Vk->D(1)); Vd->D(0) = h1; Vd->D(1) = h2; } DO_VMUH(vmuh_b, 8, H, B, DO_MUH) DO_VMUH(vmuh_h, 16, W, H, DO_MUH) DO_VMUH(vmuh_w, 32, D, W, DO_MUH) void HELPER(vmuh_du)(void *vd, void *vj, void *vk, uint32_t v) { uint64_t l, h1, h2; VReg *Vd = (VReg *)vd; VReg *Vj = (VReg *)vj; VReg *Vk = (VReg *)vk; mulu64(&l, &h1, Vj->D(0), Vk->D(0)); mulu64(&l, &h2, Vj->D(1), Vk->D(1)); Vd->D(0) = h1; Vd->D(1) = h2; } DO_VMUH(vmuh_bu, 8, UH, UB, DO_MUH) DO_VMUH(vmuh_hu, 16, UW, UH, DO_MUH) DO_VMUH(vmuh_wu, 32, UD, UW, DO_MUH) #define DO_MUL(a, b) (a * b) DO_EVEN(vmulwev_h_b, 16, H, B, DO_MUL) DO_EVEN(vmulwev_w_h, 32, W, H, DO_MUL) DO_EVEN(vmulwev_d_w, 64, D, W, DO_MUL) DO_ODD(vmulwod_h_b, 16, H, B, DO_MUL) DO_ODD(vmulwod_w_h, 32, W, H, DO_MUL) DO_ODD(vmulwod_d_w, 64, D, W, DO_MUL) DO_EVEN(vmulwev_h_bu, 16, UH, UB, DO_MUL) DO_EVEN(vmulwev_w_hu, 32, UW, UH, DO_MUL) DO_EVEN(vmulwev_d_wu, 64, UD, UW, DO_MUL) DO_ODD(vmulwod_h_bu, 16, UH, UB, DO_MUL) DO_ODD(vmulwod_w_hu, 32, UW, UH, DO_MUL) DO_ODD(vmulwod_d_wu, 64, UD, UW, DO_MUL) DO_EVEN_U_S(vmulwev_h_bu_b, 16, H, UH, B, UB, DO_MUL) DO_EVEN_U_S(vmulwev_w_hu_h, 32, W, UW, H, UH, DO_MUL) DO_EVEN_U_S(vmulwev_d_wu_w, 64, D, UD, W, UW, DO_MUL) DO_ODD_U_S(vmulwod_h_bu_b, 16, H, UH, B, UB, DO_MUL) DO_ODD_U_S(vmulwod_w_hu_h, 32, W, UW, H, UH, DO_MUL) DO_ODD_U_S(vmulwod_d_wu_w, 64, D, UD, W, UW, DO_MUL) #define DO_MADD(a, b, c) (a + b * c) #define DO_MSUB(a, b, c) (a - b * c) #define VMADDSUB(NAME, BIT, E, DO_OP) \ void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \ { \ int i; \ VReg *Vd = (VReg *)vd; \ VReg *Vj = (VReg *)vj; \ VReg *Vk = (VReg *)vk; \ for (i = 0; i < LSX_LEN/BIT; i++) { \ Vd->E(i) = DO_OP(Vd->E(i), Vj->E(i) ,Vk->E(i)); \ } \ } VMADDSUB(vmadd_b, 8, B, DO_MADD) VMADDSUB(vmadd_h, 16, H, DO_MADD) VMADDSUB(vmadd_w, 32, W, DO_MADD) VMADDSUB(vmadd_d, 64, D, DO_MADD) VMADDSUB(vmsub_b, 8, B, DO_MSUB) VMADDSUB(vmsub_h, 16, H, DO_MSUB) VMADDSUB(vmsub_w, 32, W, DO_MSUB) VMADDSUB(vmsub_d, 64, D, DO_MSUB) #define VMADDWEV(NAME, BIT, E1, E2, DO_OP) \ void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \ { \ int i; \ VReg *Vd = (VReg *)vd; \ VReg *Vj = (VReg *)vj; \ VReg *Vk = (VReg *)vk; \ typedef __typeof(Vd->E1(0)) TD; \ \ for (i = 0; i < LSX_LEN/BIT; i++) { \ Vd->E1(i) += DO_OP((TD)Vj->E2(2 * i), (TD)Vk->E2(2 * i)); \ } \ } VMADDWEV(vmaddwev_h_b, 16, H, B, DO_MUL) VMADDWEV(vmaddwev_w_h, 32, W, H, DO_MUL) VMADDWEV(vmaddwev_d_w, 64, D, W, DO_MUL) VMADDWEV(vmaddwev_h_bu, 16, UH, UB, DO_MUL) VMADDWEV(vmaddwev_w_hu, 32, UW, UH, DO_MUL) VMADDWEV(vmaddwev_d_wu, 64, UD, UW, DO_MUL) #define VMADDWOD(NAME, BIT, E1, E2, DO_OP) \ void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \ { \ int i; \ VReg *Vd = (VReg *)vd; \ VReg *Vj = (VReg *)vj; \ VReg *Vk = (VReg *)vk; \ typedef __typeof(Vd->E1(0)) TD; \ \ for (i = 0; i < LSX_LEN/BIT; i++) { \ Vd->E1(i) += DO_OP((TD)Vj->E2(2 * i + 1), \ (TD)Vk->E2(2 * i + 1)); \ } \ } VMADDWOD(vmaddwod_h_b, 16, H, B, DO_MUL) VMADDWOD(vmaddwod_w_h, 32, W, H, DO_MUL) VMADDWOD(vmaddwod_d_w, 64, D, W, DO_MUL) VMADDWOD(vmaddwod_h_bu, 16, UH, UB, DO_MUL) VMADDWOD(vmaddwod_w_hu, 32, UW, UH, DO_MUL) VMADDWOD(vmaddwod_d_wu, 64, UD, UW, DO_MUL) #define VMADDWEV_U_S(NAME, BIT, ES1, EU1, ES2, EU2, DO_OP) \ void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \ { \ int i; \ VReg *Vd = (VReg *)vd; \ VReg *Vj = (VReg *)vj; \ VReg *Vk = (VReg *)vk; \ typedef __typeof(Vd->ES1(0)) TS1; \ typedef __typeof(Vd->EU1(0)) TU1; \ \ for (i = 0; i < LSX_LEN/BIT; i++) { \ Vd->ES1(i) += DO_OP((TU1)Vj->EU2(2 * i), \ (TS1)Vk->ES2(2 * i)); \ } \ } VMADDWEV_U_S(vmaddwev_h_bu_b, 16, H, UH, B, UB, DO_MUL) VMADDWEV_U_S(vmaddwev_w_hu_h, 32, W, UW, H, UH, DO_MUL) VMADDWEV_U_S(vmaddwev_d_wu_w, 64, D, UD, W, UW, DO_MUL) #define VMADDWOD_U_S(NAME, BIT, ES1, EU1, ES2, EU2, DO_OP) \ void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \ { \ int i; \ VReg *Vd = (VReg *)vd; \ VReg *Vj = (VReg *)vj; \ VReg *Vk = (VReg *)vk; \ typedef __typeof(Vd->ES1(0)) TS1; \ typedef __typeof(Vd->EU1(0)) TU1; \ \ for (i = 0; i < LSX_LEN/BIT; i++) { \ Vd->ES1(i) += DO_OP((TU1)Vj->EU2(2 * i + 1), \ (TS1)Vk->ES2(2 * i + 1)); \ } \ } VMADDWOD_U_S(vmaddwod_h_bu_b, 16, H, UH, B, UB, DO_MUL) VMADDWOD_U_S(vmaddwod_w_hu_h, 32, W, UW, H, UH, DO_MUL) VMADDWOD_U_S(vmaddwod_d_wu_w, 64, D, UD, W, UW, DO_MUL) #define DO_DIVU(N, M) (unlikely(M == 0) ? 0 : N / M) #define DO_REMU(N, M) (unlikely(M == 0) ? 0 : N % M) #define DO_DIV(N, M) (unlikely(M == 0) ? 0 :\ unlikely((N == -N) && (M == (__typeof(N))(-1))) ? N : N / M) #define DO_REM(N, M) (unlikely(M == 0) ? 0 :\ unlikely((N == -N) && (M == (__typeof(N))(-1))) ? 0 : N % M) #define VDIV(NAME, BIT, E, DO_OP) \ void HELPER(NAME)(CPULoongArchState *env, \ uint32_t vd, uint32_t vj, uint32_t vk) \ { \ int i; \ VReg *Vd = &(env->fpr[vd].vreg); \ VReg *Vj = &(env->fpr[vj].vreg); \ VReg *Vk = &(env->fpr[vk].vreg); \ for (i = 0; i < LSX_LEN/BIT; i++) { \ Vd->E(i) = DO_OP(Vj->E(i), Vk->E(i)); \ } \ } VDIV(vdiv_b, 8, B, DO_DIV) VDIV(vdiv_h, 16, H, DO_DIV) VDIV(vdiv_w, 32, W, DO_DIV) VDIV(vdiv_d, 64, D, DO_DIV) VDIV(vdiv_bu, 8, UB, DO_DIVU) VDIV(vdiv_hu, 16, UH, DO_DIVU) VDIV(vdiv_wu, 32, UW, DO_DIVU) VDIV(vdiv_du, 64, UD, DO_DIVU) VDIV(vmod_b, 8, B, DO_REM) VDIV(vmod_h, 16, H, DO_REM) VDIV(vmod_w, 32, W, DO_REM) VDIV(vmod_d, 64, D, DO_REM) VDIV(vmod_bu, 8, UB, DO_REMU) VDIV(vmod_hu, 16, UH, DO_REMU) VDIV(vmod_wu, 32, UW, DO_REMU) VDIV(vmod_du, 64, UD, DO_REMU) #define VSAT_S(NAME, BIT, E) \ void HELPER(NAME)(void *vd, void *vj, uint64_t max, uint32_t v) \ { \ int i; \ VReg *Vd = (VReg *)vd; \ VReg *Vj = (VReg *)vj; \ typedef __typeof(Vd->E(0)) TD; \ \ for (i = 0; i < LSX_LEN/BIT; i++) { \ Vd->E(i) = Vj->E(i) > (TD)max ? (TD)max : \ Vj->E(i) < (TD)~max ? (TD)~max: Vj->E(i); \ } \ } VSAT_S(vsat_b, 8, B) VSAT_S(vsat_h, 16, H) VSAT_S(vsat_w, 32, W) VSAT_S(vsat_d, 64, D) #define VSAT_U(NAME, BIT, E) \ void HELPER(NAME)(void *vd, void *vj, uint64_t max, uint32_t v) \ { \ int i; \ VReg *Vd = (VReg *)vd; \ VReg *Vj = (VReg *)vj; \ typedef __typeof(Vd->E(0)) TD; \ \ for (i = 0; i < LSX_LEN/BIT; i++) { \ Vd->E(i) = Vj->E(i) > (TD)max ? (TD)max : Vj->E(i); \ } \ } VSAT_U(vsat_bu, 8, UB) VSAT_U(vsat_hu, 16, UH) VSAT_U(vsat_wu, 32, UW) VSAT_U(vsat_du, 64, UD) #define VEXTH(NAME, BIT, E1, E2) \ void HELPER(NAME)(CPULoongArchState *env, uint32_t vd, uint32_t vj) \ { \ int i; \ VReg *Vd = &(env->fpr[vd].vreg); \ VReg *Vj = &(env->fpr[vj].vreg); \ \ for (i = 0; i < LSX_LEN/BIT; i++) { \ Vd->E1(i) = Vj->E2(i + LSX_LEN/BIT); \ } \ } void HELPER(vexth_q_d)(CPULoongArchState *env, uint32_t vd, uint32_t vj) { VReg *Vd = &(env->fpr[vd].vreg); VReg *Vj = &(env->fpr[vj].vreg); Vd->Q(0) = int128_makes64(Vj->D(1)); } void HELPER(vexth_qu_du)(CPULoongArchState *env, uint32_t vd, uint32_t vj) { VReg *Vd = &(env->fpr[vd].vreg); VReg *Vj = &(env->fpr[vj].vreg); Vd->Q(0) = int128_make64((uint64_t)Vj->D(1)); } VEXTH(vexth_h_b, 16, H, B) VEXTH(vexth_w_h, 32, W, H) VEXTH(vexth_d_w, 64, D, W) VEXTH(vexth_hu_bu, 16, UH, UB) VEXTH(vexth_wu_hu, 32, UW, UH) VEXTH(vexth_du_wu, 64, UD, UW) #define DO_SIGNCOV(a, b) (a == 0 ? 0 : a < 0 ? -b : b) DO_3OP(vsigncov_b, 8, B, DO_SIGNCOV) DO_3OP(vsigncov_h, 16, H, DO_SIGNCOV) DO_3OP(vsigncov_w, 32, W, DO_SIGNCOV) DO_3OP(vsigncov_d, 64, D, DO_SIGNCOV)