target-ppc: Altivec 2.07: Multiply Even/Odd Word Instructions

This patch adds the Multilpy Even/Odd Word instructions that are introduced
in Power ISA Version 2.07:

  - Vector Multiply Even Unsigned Word (vmuleuw)
  - Vector Multiply Even Signed Word (vmulesw)
  - Vector Multiply Odd Unsigned Word (vmulouw)
  - Vector Multiply Odd Signed Word (vmulosw)

Signed-off-by: Tom Musta <tommusta@gmail.com>
Signed-off-by: Alexander Graf <agraf@suse.de>
This commit is contained in:
Tom Musta 2014-02-12 15:23:01 -06:00 committed by Alexander Graf
parent aa9e930c88
commit 63be09365a
3 changed files with 14 additions and 0 deletions

View File

@ -166,12 +166,16 @@ DEF_HELPER_3(vmrghh, void, avr, avr, avr)
DEF_HELPER_3(vmrghw, void, avr, avr, avr)
DEF_HELPER_3(vmulesb, void, avr, avr, avr)
DEF_HELPER_3(vmulesh, void, avr, avr, avr)
DEF_HELPER_3(vmulesw, void, avr, avr, avr)
DEF_HELPER_3(vmuleub, void, avr, avr, avr)
DEF_HELPER_3(vmuleuh, void, avr, avr, avr)
DEF_HELPER_3(vmuleuw, void, avr, avr, avr)
DEF_HELPER_3(vmulosb, void, avr, avr, avr)
DEF_HELPER_3(vmulosh, void, avr, avr, avr)
DEF_HELPER_3(vmulosw, void, avr, avr, avr)
DEF_HELPER_3(vmuloub, void, avr, avr, avr)
DEF_HELPER_3(vmulouh, void, avr, avr, avr)
DEF_HELPER_3(vmulouw, void, avr, avr, avr)
DEF_HELPER_3(vsrab, void, avr, avr, avr)
DEF_HELPER_3(vsrah, void, avr, avr, avr)
DEF_HELPER_3(vsraw, void, avr, avr, avr)

View File

@ -1005,8 +1005,10 @@ void helper_vmsumuhs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
VMUL_DO(mulo##suffix, mul_element, prod_element, cast, 0)
VMUL(sb, s8, s16, int16_t)
VMUL(sh, s16, s32, int32_t)
VMUL(sw, s32, s64, int64_t)
VMUL(ub, u8, u16, uint16_t)
VMUL(uh, u16, u32, uint32_t)
VMUL(uw, u32, u64, uint64_t)
#undef VMUL_DO
#undef VMUL

View File

@ -6963,12 +6963,16 @@ GEN_VXFORM(vmrglh, 6, 5);
GEN_VXFORM(vmrglw, 6, 6);
GEN_VXFORM(vmuloub, 4, 0);
GEN_VXFORM(vmulouh, 4, 1);
GEN_VXFORM(vmulouw, 4, 2);
GEN_VXFORM(vmulosb, 4, 4);
GEN_VXFORM(vmulosh, 4, 5);
GEN_VXFORM(vmulosw, 4, 6);
GEN_VXFORM(vmuleub, 4, 8);
GEN_VXFORM(vmuleuh, 4, 9);
GEN_VXFORM(vmuleuw, 4, 10);
GEN_VXFORM(vmulesb, 4, 12);
GEN_VXFORM(vmulesh, 4, 13);
GEN_VXFORM(vmulesw, 4, 14);
GEN_VXFORM(vslb, 2, 4);
GEN_VXFORM(vslh, 2, 5);
GEN_VXFORM(vslw, 2, 6);
@ -10371,12 +10375,16 @@ GEN_VXFORM(vmrglh, 6, 5),
GEN_VXFORM(vmrglw, 6, 6),
GEN_VXFORM(vmuloub, 4, 0),
GEN_VXFORM(vmulouh, 4, 1),
GEN_VXFORM_207(vmulouw, 4, 2),
GEN_VXFORM(vmulosb, 4, 4),
GEN_VXFORM(vmulosh, 4, 5),
GEN_VXFORM_207(vmulosw, 4, 6),
GEN_VXFORM(vmuleub, 4, 8),
GEN_VXFORM(vmuleuh, 4, 9),
GEN_VXFORM_207(vmuleuw, 4, 10),
GEN_VXFORM(vmulesb, 4, 12),
GEN_VXFORM(vmulesh, 4, 13),
GEN_VXFORM_207(vmulesw, 4, 14),
GEN_VXFORM(vslb, 2, 4),
GEN_VXFORM(vslh, 2, 5),
GEN_VXFORM(vslw, 2, 6),