2012-05-30 08:23:28 +04:00
|
|
|
/*
|
|
|
|
* PowerPC integer and vector emulation helpers for QEMU.
|
|
|
|
*
|
|
|
|
* Copyright (c) 2003-2007 Jocelyn Mayer
|
|
|
|
*
|
|
|
|
* This library is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
* License as published by the Free Software Foundation; either
|
2020-10-19 09:11:26 +03:00
|
|
|
* version 2.1 of the License, or (at your option) any later version.
|
2012-05-30 08:23:28 +04:00
|
|
|
*
|
|
|
|
* This library is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* Lesser General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
|
|
|
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*/
|
Include qemu/main-loop.h less
In my "build everything" tree, changing qemu/main-loop.h triggers a
recompile of some 5600 out of 6600 objects (not counting tests and
objects that don't depend on qemu/osdep.h). It includes block/aio.h,
which in turn includes qemu/event_notifier.h, qemu/notify.h,
qemu/processor.h, qemu/qsp.h, qemu/queue.h, qemu/thread-posix.h,
qemu/thread.h, qemu/timer.h, and a few more.
Include qemu/main-loop.h only where it's needed. Touching it now
recompiles only some 1700 objects. For block/aio.h and
qemu/event_notifier.h, these numbers drop from 5600 to 2800. For the
others, they shrink only slightly.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Message-Id: <20190812052359.30071-21-armbru@redhat.com>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
2019-08-12 08:23:50 +03:00
|
|
|
|
2016-01-26 21:16:58 +03:00
|
|
|
#include "qemu/osdep.h"
|
2012-05-30 08:23:28 +04:00
|
|
|
#include "cpu.h"
|
2016-10-30 06:14:56 +03:00
|
|
|
#include "internal.h"
|
2012-12-17 21:20:00 +04:00
|
|
|
#include "qemu/host-utils.h"
|
Include qemu/main-loop.h less
In my "build everything" tree, changing qemu/main-loop.h triggers a
recompile of some 5600 out of 6600 objects (not counting tests and
objects that don't depend on qemu/osdep.h). It includes block/aio.h,
which in turn includes qemu/event_notifier.h, qemu/notify.h,
qemu/processor.h, qemu/qsp.h, qemu/queue.h, qemu/thread-posix.h,
qemu/thread.h, qemu/timer.h, and a few more.
Include qemu/main-loop.h only where it's needed. Touching it now
recompiles only some 1700 objects. For block/aio.h and
qemu/event_notifier.h, these numbers drop from 5600 to 2800. For the
others, they shrink only slightly.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Message-Id: <20190812052359.30071-21-armbru@redhat.com>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
2019-08-12 08:23:50 +03:00
|
|
|
#include "qemu/main-loop.h"
|
2021-03-15 21:45:59 +03:00
|
|
|
#include "qemu/log.h"
|
2014-04-08 09:31:41 +04:00
|
|
|
#include "exec/helper-proto.h"
|
2015-07-01 20:10:30 +03:00
|
|
|
#include "crypto/aes.h"
|
2018-01-19 21:24:22 +03:00
|
|
|
#include "fpu/softfloat.h"
|
2019-03-15 00:46:41 +03:00
|
|
|
#include "qapi/error.h"
|
|
|
|
#include "qemu/guest-random.h"
|
2022-03-02 08:51:38 +03:00
|
|
|
#include "tcg/tcg-gvec-desc.h"
|
2012-05-30 08:23:28 +04:00
|
|
|
|
|
|
|
#include "helper_regs.h"
|
|
|
|
/*****************************************************************************/
|
|
|
|
/* Fixed point operations helpers */
|
|
|
|
|
2017-02-23 22:56:26 +03:00
|
|
|
static inline void helper_update_ov_legacy(CPUPPCState *env, int ov)
|
|
|
|
{
|
|
|
|
if (unlikely(ov)) {
|
2022-09-06 15:55:21 +03:00
|
|
|
env->so = env->ov = env->ov32 = 1;
|
2017-02-23 22:56:26 +03:00
|
|
|
} else {
|
2022-09-06 15:55:21 +03:00
|
|
|
env->ov = env->ov32 = 0;
|
2017-02-23 22:56:26 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-01-07 20:05:53 +04:00
|
|
|
target_ulong helper_divweu(CPUPPCState *env, target_ulong ra, target_ulong rb,
|
|
|
|
uint32_t oe)
|
|
|
|
{
|
|
|
|
uint64_t rt = 0;
|
|
|
|
int overflow = 0;
|
|
|
|
|
|
|
|
uint64_t dividend = (uint64_t)ra << 32;
|
|
|
|
uint64_t divisor = (uint32_t)rb;
|
|
|
|
|
|
|
|
if (unlikely(divisor == 0)) {
|
|
|
|
overflow = 1;
|
|
|
|
} else {
|
|
|
|
rt = dividend / divisor;
|
|
|
|
overflow = rt > UINT32_MAX;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (unlikely(overflow)) {
|
|
|
|
rt = 0; /* Undefined */
|
|
|
|
}
|
|
|
|
|
|
|
|
if (oe) {
|
2017-02-23 22:56:26 +03:00
|
|
|
helper_update_ov_legacy(env, overflow);
|
2014-01-07 20:05:53 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
return (target_ulong)rt;
|
|
|
|
}
|
|
|
|
|
2014-01-07 20:05:54 +04:00
|
|
|
target_ulong helper_divwe(CPUPPCState *env, target_ulong ra, target_ulong rb,
|
|
|
|
uint32_t oe)
|
|
|
|
{
|
|
|
|
int64_t rt = 0;
|
|
|
|
int overflow = 0;
|
|
|
|
|
|
|
|
int64_t dividend = (int64_t)ra << 32;
|
|
|
|
int64_t divisor = (int64_t)((int32_t)rb);
|
|
|
|
|
|
|
|
if (unlikely((divisor == 0) ||
|
|
|
|
((divisor == -1ull) && (dividend == INT64_MIN)))) {
|
|
|
|
overflow = 1;
|
|
|
|
} else {
|
|
|
|
rt = dividend / divisor;
|
|
|
|
overflow = rt != (int32_t)rt;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (unlikely(overflow)) {
|
|
|
|
rt = 0; /* Undefined */
|
|
|
|
}
|
|
|
|
|
|
|
|
if (oe) {
|
2017-02-23 22:56:26 +03:00
|
|
|
helper_update_ov_legacy(env, overflow);
|
2014-01-07 20:05:54 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
return (target_ulong)rt;
|
|
|
|
}
|
|
|
|
|
2014-01-07 20:05:51 +04:00
|
|
|
#if defined(TARGET_PPC64)
|
|
|
|
|
|
|
|
uint64_t helper_divdeu(CPUPPCState *env, uint64_t ra, uint64_t rb, uint32_t oe)
|
|
|
|
{
|
|
|
|
uint64_t rt = 0;
|
|
|
|
int overflow = 0;
|
|
|
|
|
2021-10-25 22:11:36 +03:00
|
|
|
if (unlikely(rb == 0 || ra >= rb)) {
|
|
|
|
overflow = 1;
|
2014-01-07 20:05:51 +04:00
|
|
|
rt = 0; /* Undefined */
|
2021-10-25 22:11:36 +03:00
|
|
|
} else {
|
|
|
|
divu128(&rt, &ra, rb);
|
2014-01-07 20:05:51 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
if (oe) {
|
2017-02-23 22:56:26 +03:00
|
|
|
helper_update_ov_legacy(env, overflow);
|
2014-01-07 20:05:51 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
return rt;
|
|
|
|
}
|
|
|
|
|
2014-01-07 20:05:52 +04:00
|
|
|
uint64_t helper_divde(CPUPPCState *env, uint64_t rau, uint64_t rbu, uint32_t oe)
|
|
|
|
{
|
2021-10-25 22:11:38 +03:00
|
|
|
uint64_t rt = 0;
|
2014-01-07 20:05:52 +04:00
|
|
|
int64_t ra = (int64_t)rau;
|
|
|
|
int64_t rb = (int64_t)rbu;
|
2021-10-25 22:11:36 +03:00
|
|
|
int overflow = 0;
|
2014-01-07 20:05:52 +04:00
|
|
|
|
2021-10-25 22:11:36 +03:00
|
|
|
if (unlikely(rb == 0 || uabs64(ra) >= uabs64(rb))) {
|
|
|
|
overflow = 1;
|
2014-01-07 20:05:52 +04:00
|
|
|
rt = 0; /* Undefined */
|
2021-10-25 22:11:36 +03:00
|
|
|
} else {
|
|
|
|
divs128(&rt, &ra, rb);
|
2014-01-07 20:05:52 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
if (oe) {
|
2017-02-23 22:56:26 +03:00
|
|
|
helper_update_ov_legacy(env, overflow);
|
2014-01-07 20:05:52 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
return rt;
|
|
|
|
}
|
|
|
|
|
2014-01-07 20:05:51 +04:00
|
|
|
#endif
|
|
|
|
|
|
|
|
|
2012-05-30 08:23:28 +04:00
|
|
|
#if defined(TARGET_PPC64)
|
2016-07-26 14:58:34 +03:00
|
|
|
/* if x = 0xab, returns 0xababababababababa */
|
|
|
|
#define pattern(x) (((x) & 0xff) * (~(target_ulong)0 / 0xff))
|
|
|
|
|
2019-03-21 08:50:18 +03:00
|
|
|
/*
|
|
|
|
* subtract 1 from each byte, and with inverse, check if MSB is set at each
|
2016-07-26 14:58:34 +03:00
|
|
|
* byte.
|
|
|
|
* i.e. ((0x00 - 0x01) & ~(0x00)) & 0x80
|
|
|
|
* (0xFF & 0xFF) & 0x80 = 0x80 (zero found)
|
|
|
|
*/
|
|
|
|
#define haszero(v) (((v) - pattern(0x01)) & ~(v) & pattern(0x80))
|
|
|
|
|
|
|
|
/* When you XOR the pattern and there is a match, that byte will be zero */
|
|
|
|
#define hasvalue(x, n) (haszero((x) ^ pattern(n)))
|
|
|
|
|
|
|
|
uint32_t helper_cmpeqb(target_ulong ra, target_ulong rb)
|
|
|
|
{
|
2016-11-23 14:37:11 +03:00
|
|
|
return hasvalue(rb, ra) ? CRF_GT : 0;
|
2016-07-26 14:58:34 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
#undef pattern
|
|
|
|
#undef haszero
|
|
|
|
#undef hasvalue
|
|
|
|
|
2019-03-21 08:50:18 +03:00
|
|
|
/*
|
2019-03-15 00:46:41 +03:00
|
|
|
* Return a random number.
|
2016-09-16 13:51:47 +03:00
|
|
|
*/
|
2019-03-15 00:46:41 +03:00
|
|
|
uint64_t helper_darn32(void)
|
2016-09-16 13:51:47 +03:00
|
|
|
{
|
2019-03-15 00:46:41 +03:00
|
|
|
Error *err = NULL;
|
|
|
|
uint32_t ret;
|
|
|
|
|
|
|
|
if (qemu_guest_getrandom(&ret, sizeof(ret), &err) < 0) {
|
|
|
|
qemu_log_mask(LOG_UNIMP, "darn: Crypto failure: %s",
|
|
|
|
error_get_pretty(err));
|
|
|
|
error_free(err);
|
|
|
|
return -1;
|
|
|
|
}
|
2016-09-16 13:51:47 +03:00
|
|
|
|
2019-03-15 00:46:41 +03:00
|
|
|
return ret;
|
2016-09-16 13:51:47 +03:00
|
|
|
}
|
|
|
|
|
2019-03-15 00:46:41 +03:00
|
|
|
uint64_t helper_darn64(void)
|
|
|
|
{
|
|
|
|
Error *err = NULL;
|
|
|
|
uint64_t ret;
|
|
|
|
|
|
|
|
if (qemu_guest_getrandom(&ret, sizeof(ret), &err) < 0) {
|
|
|
|
qemu_log_mask(LOG_UNIMP, "darn: Crypto failure: %s",
|
|
|
|
error_get_pretty(err));
|
|
|
|
error_free(err);
|
|
|
|
return -1;
|
|
|
|
}
|
2012-05-30 08:23:28 +04:00
|
|
|
|
2019-03-15 00:46:41 +03:00
|
|
|
return ret;
|
|
|
|
}
|
2014-01-07 20:05:49 +04:00
|
|
|
|
|
|
|
uint64_t helper_bpermd(uint64_t rs, uint64_t rb)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
uint64_t ra = 0;
|
|
|
|
|
|
|
|
for (i = 0; i < 8; i++) {
|
2019-03-21 08:50:18 +03:00
|
|
|
int index = (rs >> (i * 8)) & 0xFF;
|
2014-01-07 20:05:49 +04:00
|
|
|
if (index < 64) {
|
2017-12-22 12:55:51 +03:00
|
|
|
if (rb & PPC_BIT(index)) {
|
2014-01-07 20:05:49 +04:00
|
|
|
ra |= 1 << i;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return ra;
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
2013-04-20 12:56:16 +04:00
|
|
|
target_ulong helper_cmpb(target_ulong rs, target_ulong rb)
|
|
|
|
{
|
|
|
|
target_ulong mask = 0xff;
|
|
|
|
target_ulong ra = 0;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < sizeof(target_ulong); i++) {
|
|
|
|
if ((rs & mask) == (rb & mask)) {
|
|
|
|
ra |= mask;
|
|
|
|
}
|
|
|
|
mask <<= 8;
|
|
|
|
}
|
|
|
|
return ra;
|
|
|
|
}
|
|
|
|
|
2012-05-30 08:23:28 +04:00
|
|
|
/* shift right arithmetic helper */
|
2012-05-30 08:23:29 +04:00
|
|
|
target_ulong helper_sraw(CPUPPCState *env, target_ulong value,
|
|
|
|
target_ulong shift)
|
2012-05-30 08:23:28 +04:00
|
|
|
{
|
|
|
|
int32_t ret;
|
|
|
|
|
|
|
|
if (likely(!(shift & 0x20))) {
|
|
|
|
if (likely((uint32_t)shift != 0)) {
|
|
|
|
shift &= 0x1f;
|
|
|
|
ret = (int32_t)value >> shift;
|
|
|
|
if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
|
2017-10-06 09:42:44 +03:00
|
|
|
env->ca32 = env->ca = 0;
|
2012-05-30 08:23:28 +04:00
|
|
|
} else {
|
2017-10-06 09:42:44 +03:00
|
|
|
env->ca32 = env->ca = 1;
|
2012-05-30 08:23:28 +04:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
ret = (int32_t)value;
|
2017-10-06 09:42:44 +03:00
|
|
|
env->ca32 = env->ca = 0;
|
2012-05-30 08:23:28 +04:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
ret = (int32_t)value >> 31;
|
2017-10-06 09:42:44 +03:00
|
|
|
env->ca32 = env->ca = (ret != 0);
|
2012-05-30 08:23:28 +04:00
|
|
|
}
|
|
|
|
return (target_long)ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
#if defined(TARGET_PPC64)
|
2012-05-30 08:23:29 +04:00
|
|
|
target_ulong helper_srad(CPUPPCState *env, target_ulong value,
|
|
|
|
target_ulong shift)
|
2012-05-30 08:23:28 +04:00
|
|
|
{
|
|
|
|
int64_t ret;
|
|
|
|
|
|
|
|
if (likely(!(shift & 0x40))) {
|
|
|
|
if (likely((uint64_t)shift != 0)) {
|
|
|
|
shift &= 0x3f;
|
|
|
|
ret = (int64_t)value >> shift;
|
2014-08-12 17:45:10 +04:00
|
|
|
if (likely(ret >= 0 || (value & ((1ULL << shift) - 1)) == 0)) {
|
2017-10-06 09:42:44 +03:00
|
|
|
env->ca32 = env->ca = 0;
|
2012-05-30 08:23:28 +04:00
|
|
|
} else {
|
2017-10-06 09:42:44 +03:00
|
|
|
env->ca32 = env->ca = 1;
|
2012-05-30 08:23:28 +04:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
ret = (int64_t)value;
|
2017-10-06 09:42:44 +03:00
|
|
|
env->ca32 = env->ca = 0;
|
2012-05-30 08:23:28 +04:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
ret = (int64_t)value >> 63;
|
2017-10-06 09:42:44 +03:00
|
|
|
env->ca32 = env->ca = (ret != 0);
|
2012-05-30 08:23:28 +04:00
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#if defined(TARGET_PPC64)
|
|
|
|
target_ulong helper_popcntb(target_ulong val)
|
|
|
|
{
|
2016-11-21 13:58:25 +03:00
|
|
|
/* Note that we don't fold past bytes */
|
2012-05-30 08:23:28 +04:00
|
|
|
val = (val & 0x5555555555555555ULL) + ((val >> 1) &
|
|
|
|
0x5555555555555555ULL);
|
|
|
|
val = (val & 0x3333333333333333ULL) + ((val >> 2) &
|
|
|
|
0x3333333333333333ULL);
|
|
|
|
val = (val & 0x0f0f0f0f0f0f0f0fULL) + ((val >> 4) &
|
|
|
|
0x0f0f0f0f0f0f0f0fULL);
|
|
|
|
return val;
|
|
|
|
}
|
|
|
|
|
|
|
|
target_ulong helper_popcntw(target_ulong val)
|
|
|
|
{
|
2016-11-21 13:58:25 +03:00
|
|
|
/* Note that we don't fold past words. */
|
2012-05-30 08:23:28 +04:00
|
|
|
val = (val & 0x5555555555555555ULL) + ((val >> 1) &
|
|
|
|
0x5555555555555555ULL);
|
|
|
|
val = (val & 0x3333333333333333ULL) + ((val >> 2) &
|
|
|
|
0x3333333333333333ULL);
|
|
|
|
val = (val & 0x0f0f0f0f0f0f0f0fULL) + ((val >> 4) &
|
|
|
|
0x0f0f0f0f0f0f0f0fULL);
|
|
|
|
val = (val & 0x00ff00ff00ff00ffULL) + ((val >> 8) &
|
|
|
|
0x00ff00ff00ff00ffULL);
|
|
|
|
val = (val & 0x0000ffff0000ffffULL) + ((val >> 16) &
|
|
|
|
0x0000ffff0000ffffULL);
|
|
|
|
return val;
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
target_ulong helper_popcntb(target_ulong val)
|
|
|
|
{
|
2016-11-21 13:58:25 +03:00
|
|
|
/* Note that we don't fold past bytes */
|
2012-05-30 08:23:28 +04:00
|
|
|
val = (val & 0x55555555) + ((val >> 1) & 0x55555555);
|
|
|
|
val = (val & 0x33333333) + ((val >> 2) & 0x33333333);
|
|
|
|
val = (val & 0x0f0f0f0f) + ((val >> 4) & 0x0f0f0f0f);
|
|
|
|
return val;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2021-11-04 15:36:55 +03:00
|
|
|
uint64_t helper_CFUGED(uint64_t src, uint64_t mask)
|
2021-06-01 22:35:25 +03:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Instead of processing the mask bit-by-bit from the most significant to
|
|
|
|
* the least significant bit, as described in PowerISA, we'll handle it in
|
|
|
|
* blocks of 'n' zeros/ones from LSB to MSB. To avoid the decision to use
|
|
|
|
* ctz or cto, we negate the mask at the end of the loop.
|
|
|
|
*/
|
|
|
|
target_ulong m, left = 0, right = 0;
|
|
|
|
unsigned int n, i = 64;
|
|
|
|
bool bit = false; /* tracks if we are processing zeros or ones */
|
|
|
|
|
|
|
|
if (mask == 0 || mask == -1) {
|
|
|
|
return src;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Processes the mask in blocks, from LSB to MSB */
|
|
|
|
while (i) {
|
|
|
|
/* Find how many bits we should take */
|
|
|
|
n = ctz64(mask);
|
|
|
|
if (n > i) {
|
|
|
|
n = i;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Extracts 'n' trailing bits of src and put them on the leading 'n'
|
|
|
|
* bits of 'right' or 'left', pushing down the previously extracted
|
|
|
|
* values.
|
|
|
|
*/
|
|
|
|
m = (1ll << n) - 1;
|
|
|
|
if (bit) {
|
|
|
|
right = ror64(right | (src & m), n);
|
|
|
|
} else {
|
|
|
|
left = ror64(left | (src & m), n);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Discards the processed bits from 'src' and 'mask'. Note that we are
|
|
|
|
* removing 'n' trailing zeros from 'mask', but the logical shift will
|
|
|
|
* add 'n' leading zeros back, so the population count of 'mask' is kept
|
|
|
|
* the same.
|
|
|
|
*/
|
|
|
|
src >>= n;
|
|
|
|
mask >>= n;
|
|
|
|
i -= n;
|
|
|
|
bit = !bit;
|
|
|
|
mask = ~mask;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* At the end, right was ror'ed ctpop(mask) times. To put it back in place,
|
|
|
|
* we'll shift it more 64-ctpop(mask) times.
|
|
|
|
*/
|
|
|
|
if (bit) {
|
|
|
|
n = ctpop64(mask);
|
|
|
|
} else {
|
|
|
|
n = 64 - ctpop64(mask);
|
|
|
|
}
|
|
|
|
|
|
|
|
return left | (right >> n);
|
|
|
|
}
|
|
|
|
|
2021-10-29 23:23:59 +03:00
|
|
|
uint64_t helper_PDEPD(uint64_t src, uint64_t mask)
|
|
|
|
{
|
|
|
|
int i, o;
|
|
|
|
uint64_t result = 0;
|
|
|
|
|
|
|
|
if (mask == -1) {
|
|
|
|
return src;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; mask != 0; i++) {
|
|
|
|
o = ctz64(mask);
|
|
|
|
mask &= mask - 1;
|
|
|
|
result |= ((src >> i) & 1) << o;
|
|
|
|
}
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
2021-10-29 23:24:00 +03:00
|
|
|
|
|
|
|
uint64_t helper_PEXTD(uint64_t src, uint64_t mask)
|
|
|
|
{
|
|
|
|
int i, o;
|
|
|
|
uint64_t result = 0;
|
|
|
|
|
|
|
|
if (mask == -1) {
|
|
|
|
return src;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (o = 0; mask != 0; o++) {
|
|
|
|
i = ctz64(mask);
|
|
|
|
mask &= mask - 1;
|
|
|
|
result |= ((src >> i) & 1) << o;
|
|
|
|
}
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
2021-10-29 23:23:59 +03:00
|
|
|
|
2012-05-30 08:23:28 +04:00
|
|
|
/*****************************************************************************/
|
|
|
|
/* Altivec extension helpers */
|
2022-03-23 18:57:17 +03:00
|
|
|
#if HOST_BIG_ENDIAN
|
2012-05-30 08:23:28 +04:00
|
|
|
#define VECTOR_FOR_INORDER_I(index, element) \
|
|
|
|
for (index = 0; index < ARRAY_SIZE(r->element); index++)
|
|
|
|
#else
|
|
|
|
#define VECTOR_FOR_INORDER_I(index, element) \
|
2019-03-21 08:50:18 +03:00
|
|
|
for (index = ARRAY_SIZE(r->element) - 1; index >= 0; index--)
|
2012-05-30 08:23:28 +04:00
|
|
|
#endif
|
|
|
|
|
|
|
|
/* Saturating arithmetic helpers. */
|
|
|
|
#define SATCVT(from, to, from_type, to_type, min, max) \
|
|
|
|
static inline to_type cvt##from##to(from_type x, int *sat) \
|
|
|
|
{ \
|
|
|
|
to_type r; \
|
|
|
|
\
|
|
|
|
if (x < (from_type)min) { \
|
|
|
|
r = min; \
|
|
|
|
*sat = 1; \
|
|
|
|
} else if (x > (from_type)max) { \
|
|
|
|
r = max; \
|
|
|
|
*sat = 1; \
|
|
|
|
} else { \
|
|
|
|
r = x; \
|
|
|
|
} \
|
|
|
|
return r; \
|
|
|
|
}
|
|
|
|
#define SATCVTU(from, to, from_type, to_type, min, max) \
|
|
|
|
static inline to_type cvt##from##to(from_type x, int *sat) \
|
|
|
|
{ \
|
|
|
|
to_type r; \
|
|
|
|
\
|
|
|
|
if (x > (from_type)max) { \
|
|
|
|
r = max; \
|
|
|
|
*sat = 1; \
|
|
|
|
} else { \
|
|
|
|
r = x; \
|
|
|
|
} \
|
|
|
|
return r; \
|
|
|
|
}
|
|
|
|
SATCVT(sh, sb, int16_t, int8_t, INT8_MIN, INT8_MAX)
|
|
|
|
SATCVT(sw, sh, int32_t, int16_t, INT16_MIN, INT16_MAX)
|
|
|
|
SATCVT(sd, sw, int64_t, int32_t, INT32_MIN, INT32_MAX)
|
|
|
|
|
|
|
|
SATCVTU(uh, ub, uint16_t, uint8_t, 0, UINT8_MAX)
|
|
|
|
SATCVTU(uw, uh, uint32_t, uint16_t, 0, UINT16_MAX)
|
|
|
|
SATCVTU(ud, uw, uint64_t, uint32_t, 0, UINT32_MAX)
|
|
|
|
SATCVT(sh, ub, int16_t, uint8_t, 0, UINT8_MAX)
|
|
|
|
SATCVT(sw, uh, int32_t, uint16_t, 0, UINT16_MAX)
|
|
|
|
SATCVT(sd, uw, int64_t, uint32_t, 0, UINT32_MAX)
|
|
|
|
#undef SATCVT
|
|
|
|
#undef SATCVTU
|
|
|
|
|
2019-02-15 13:00:50 +03:00
|
|
|
void helper_mtvscr(CPUPPCState *env, uint32_t vscr)
|
2012-05-30 08:23:28 +04:00
|
|
|
{
|
2021-05-12 17:08:03 +03:00
|
|
|
ppc_store_vscr(env, vscr);
|
2012-05-30 08:23:28 +04:00
|
|
|
}
|
|
|
|
|
2019-02-15 13:00:53 +03:00
|
|
|
uint32_t helper_mfvscr(CPUPPCState *env)
|
|
|
|
{
|
2021-05-12 17:08:03 +03:00
|
|
|
return ppc_get_vscr(env);
|
2019-02-15 13:00:53 +03:00
|
|
|
}
|
|
|
|
|
2019-02-15 13:00:55 +03:00
|
|
|
static inline void set_vscr_sat(CPUPPCState *env)
|
|
|
|
{
|
2019-02-15 13:00:56 +03:00
|
|
|
/* The choice of non-zero value is arbitrary. */
|
|
|
|
env->vscr_sat.u32[0] = 1;
|
2019-02-15 13:00:55 +03:00
|
|
|
}
|
|
|
|
|
2016-10-30 06:14:58 +03:00
|
|
|
/* vprtybq */
|
target/ppc: Move VPRTYB[WDQ] to decodetree and use gvec
Moved VPRTYBW and VPRTYBD to use gvec and both of them and VPRTYBQ to
decodetree. VPRTYBW and VPRTYBD now also use .fni4 and .fni8,
respectively.
vprtybw:
rept loop master patch
8 12500 0,01198900 0,00703100 (-41.4%)
25 4000 0,01070100 0,00571400 (-46.6%)
100 1000 0,01123300 0,00678200 (-39.6%)
500 200 0,01601500 0,01535600 (-4.1%)
2500 40 0,03872900 0,05562100 (43.6%)
8000 12 0,10047000 0,16643000 (65.7%)
vprtybd:
rept loop master patch
8 12500 0,00757700 0,00788100 (4.0%)
25 4000 0,00652500 0,00669600 (2.6%)
100 1000 0,00714400 0,00825400 (15.5%)
500 200 0,01211000 0,01903700 (57.2%)
2500 40 0,03483800 0,07021200 (101.5%)
8000 12 0,09591800 0,21036200 (119.3%)
vprtybq:
rept loop master patch
8 12500 0,00675600 0,00667200 (-1.2%)
25 4000 0,00619400 0,00643200 (3.8%)
100 1000 0,00707100 0,00751100 (6.2%)
500 200 0,01199300 0,01342000 (11.9%)
2500 40 0,03490900 0,04092900 (17.2%)
8000 12 0,09588200 0,11465100 (19.6%)
I wasn't expecting such a performance lost in both VPRTYBD and VPRTYBQ,
I'm not sure if it's worth to move those instructions. Comparing the
assembly of the helper with the TCGop they are pretty similar, so
I'm not sure why vprtybd took so much more time.
Signed-off-by: Lucas Mateus Castro (alqotel) <lucas.araujo@eldorado.org.br>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20221019125040.48028-6-lucas.araujo@eldorado.org.br>
Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
2022-10-19 15:50:33 +03:00
|
|
|
void helper_VPRTYBQ(ppc_avr_t *r, ppc_avr_t *b, uint32_t v)
|
2016-10-30 06:14:58 +03:00
|
|
|
{
|
|
|
|
uint64_t res = b->u64[0] ^ b->u64[1];
|
|
|
|
res ^= res >> 32;
|
|
|
|
res ^= res >> 16;
|
|
|
|
res ^= res >> 8;
|
2019-01-30 23:36:34 +03:00
|
|
|
r->VsrD(1) = res & 1;
|
|
|
|
r->VsrD(0) = 0;
|
2016-10-30 06:14:58 +03:00
|
|
|
}
|
|
|
|
|
2012-05-30 08:23:28 +04:00
|
|
|
#define VARITHFP(suffix, func) \
|
2012-05-30 08:23:29 +04:00
|
|
|
void helper_v##suffix(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, \
|
|
|
|
ppc_avr_t *b) \
|
2012-05-30 08:23:28 +04:00
|
|
|
{ \
|
|
|
|
int i; \
|
|
|
|
\
|
2019-01-02 12:14:21 +03:00
|
|
|
for (i = 0; i < ARRAY_SIZE(r->f32); i++) { \
|
|
|
|
r->f32[i] = func(a->f32[i], b->f32[i], &env->vec_status); \
|
2012-05-30 08:23:28 +04:00
|
|
|
} \
|
|
|
|
}
|
|
|
|
VARITHFP(addfp, float32_add)
|
|
|
|
VARITHFP(subfp, float32_sub)
|
2012-09-11 12:47:11 +04:00
|
|
|
VARITHFP(minfp, float32_min)
|
|
|
|
VARITHFP(maxfp, float32_max)
|
2012-05-30 08:23:28 +04:00
|
|
|
#undef VARITHFP
|
|
|
|
|
2012-09-11 12:47:12 +04:00
|
|
|
#define VARITHFPFMA(suffix, type) \
|
|
|
|
void helper_v##suffix(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, \
|
|
|
|
ppc_avr_t *b, ppc_avr_t *c) \
|
|
|
|
{ \
|
|
|
|
int i; \
|
2019-01-02 12:14:21 +03:00
|
|
|
for (i = 0; i < ARRAY_SIZE(r->f32); i++) { \
|
|
|
|
r->f32[i] = float32_muladd(a->f32[i], c->f32[i], b->f32[i], \
|
|
|
|
type, &env->vec_status); \
|
2012-09-11 12:47:12 +04:00
|
|
|
} \
|
|
|
|
}
|
|
|
|
VARITHFPFMA(maddfp, 0);
|
|
|
|
VARITHFPFMA(nmsubfp, float_muladd_negate_result | float_muladd_negate_c);
|
|
|
|
#undef VARITHFPFMA
|
|
|
|
|
2012-05-30 08:23:28 +04:00
|
|
|
#define VARITHSAT_CASE(type, op, cvt, element) \
|
|
|
|
{ \
|
|
|
|
type result = (type)a->element[i] op (type)b->element[i]; \
|
|
|
|
r->element[i] = cvt(result, &sat); \
|
|
|
|
}
|
|
|
|
|
|
|
|
#define VARITHSAT_DO(name, op, optype, cvt, element) \
|
2019-02-15 13:00:57 +03:00
|
|
|
void helper_v##name(ppc_avr_t *r, ppc_avr_t *vscr_sat, \
|
|
|
|
ppc_avr_t *a, ppc_avr_t *b, uint32_t desc) \
|
2012-05-30 08:23:28 +04:00
|
|
|
{ \
|
|
|
|
int sat = 0; \
|
|
|
|
int i; \
|
|
|
|
\
|
|
|
|
for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
|
2019-02-15 13:00:57 +03:00
|
|
|
VARITHSAT_CASE(optype, op, cvt, element); \
|
2012-05-30 08:23:28 +04:00
|
|
|
} \
|
|
|
|
if (sat) { \
|
2019-02-15 13:00:57 +03:00
|
|
|
vscr_sat->u32[0] = 1; \
|
2012-05-30 08:23:28 +04:00
|
|
|
} \
|
|
|
|
}
|
|
|
|
#define VARITHSAT_SIGNED(suffix, element, optype, cvt) \
|
|
|
|
VARITHSAT_DO(adds##suffix##s, +, optype, cvt, element) \
|
|
|
|
VARITHSAT_DO(subs##suffix##s, -, optype, cvt, element)
|
|
|
|
#define VARITHSAT_UNSIGNED(suffix, element, optype, cvt) \
|
|
|
|
VARITHSAT_DO(addu##suffix##s, +, optype, cvt, element) \
|
|
|
|
VARITHSAT_DO(subu##suffix##s, -, optype, cvt, element)
|
|
|
|
VARITHSAT_SIGNED(b, s8, int16_t, cvtshsb)
|
|
|
|
VARITHSAT_SIGNED(h, s16, int32_t, cvtswsh)
|
|
|
|
VARITHSAT_SIGNED(w, s32, int64_t, cvtsdsw)
|
|
|
|
VARITHSAT_UNSIGNED(b, u8, uint16_t, cvtshub)
|
|
|
|
VARITHSAT_UNSIGNED(h, u16, uint32_t, cvtswuh)
|
|
|
|
VARITHSAT_UNSIGNED(w, u32, uint64_t, cvtsduw)
|
|
|
|
#undef VARITHSAT_CASE
|
|
|
|
#undef VARITHSAT_DO
|
|
|
|
#undef VARITHSAT_SIGNED
|
|
|
|
#undef VARITHSAT_UNSIGNED
|
|
|
|
|
target/ppc: Move VAVG[SU][BHW] to decodetree and use gvec
Moved the instructions VAVGUB, VAVGUH, VAVGUW, VAVGSB, VAVGSH, VAVGSW,
to decodetree and use gvec with them. For these one the right shift
had to be made before the sum as to avoid an overflow, so add 1 at the
end if any of the entries had 1 in its LSB as to replicate the "+ 1"
before the shift described by the ISA.
vavgub:
rept loop master patch
8 12500 0,02616600 0,00754200 (-71.2%)
25 4000 0,02530000 0,00637700 (-74.8%)
100 1000 0,02604600 0,00790100 (-69.7%)
500 200 0,03189300 0,01838400 (-42.4%)
2500 40 0,06006900 0,06851000 (+14.1%)
8000 12 0,13941000 0,20548500 (+47.4%)
vavguh:
rept loop master patch
8 12500 0,01818200 0,00780600 (-57.1%)
25 4000 0,01789300 0,00641600 (-64.1%)
100 1000 0,01899100 0,00787200 (-58.5%)
500 200 0,02527200 0,01828400 (-27.7%)
2500 40 0,05361800 0,06773000 (+26.3%)
8000 12 0,12886600 0,20291400 (+57.5%)
vavguw:
rept loop master patch
8 12500 0,01423100 0,00776600 (-45.4%)
25 4000 0,01780800 0,00638600 (-64.1%)
100 1000 0,02085500 0,00787000 (-62.3%)
500 200 0,02737100 0,01828800 (-33.2%)
2500 40 0,05572600 0,06774200 (+21.6%)
8000 12 0,13101700 0,20311600 (+55.0%)
vavgsb:
rept loop master patch
8 12500 0,03006000 0,00788600 (-73.8%)
25 4000 0,02882200 0,00637800 (-77.9%)
100 1000 0,02958000 0,00791400 (-73.2%)
500 200 0,03548800 0,01860400 (-47.6%)
2500 40 0,06360000 0,06850800 (+7.7%)
8000 12 0,13816500 0,20550300 (+48.7%)
vavgsh:
rept loop master patch
8 12500 0,01965900 0,00776600 (-60.5%)
25 4000 0,01875400 0,00638700 (-65.9%)
100 1000 0,01952200 0,00786900 (-59.7%)
500 200 0,02562000 0,01760300 (-31.3%)
2500 40 0,05384300 0,06742800 (+25.2%)
8000 12 0,13240800 0,20330000 (+53.5%)
vavgsw:
rept loop master patch
8 12500 0,01407700 0,00775600 (-44.9%)
25 4000 0,01762300 0,00640000 (-63.7%)
100 1000 0,02046500 0,00788500 (-61.5%)
500 200 0,02745600 0,01843000 (-32.9%)
2500 40 0,05375500 0,06820500 (+26.9%)
8000 12 0,13068300 0,20304900 (+55.4%)
These results to me seems to indicate that with gvec the results have a
slower translation but faster execution.
Signed-off-by: Lucas Mateus Castro (alqotel) <lucas.araujo@eldorado.org.br>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20221019125040.48028-7-lucas.araujo@eldorado.org.br>
Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
2022-10-19 15:50:34 +03:00
|
|
|
#define VAVG(name, element, etype) \
|
|
|
|
void helper_##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t v)\
|
|
|
|
{ \
|
|
|
|
int i; \
|
|
|
|
\
|
|
|
|
for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
|
|
|
|
etype x = (etype)a->element[i] + (etype)b->element[i] + 1; \
|
|
|
|
r->element[i] = x >> 1; \
|
|
|
|
} \
|
2012-05-30 08:23:28 +04:00
|
|
|
}
|
|
|
|
|
target/ppc: Move VAVG[SU][BHW] to decodetree and use gvec
Moved the instructions VAVGUB, VAVGUH, VAVGUW, VAVGSB, VAVGSH, VAVGSW,
to decodetree and use gvec with them. For these one the right shift
had to be made before the sum as to avoid an overflow, so add 1 at the
end if any of the entries had 1 in its LSB as to replicate the "+ 1"
before the shift described by the ISA.
vavgub:
rept loop master patch
8 12500 0,02616600 0,00754200 (-71.2%)
25 4000 0,02530000 0,00637700 (-74.8%)
100 1000 0,02604600 0,00790100 (-69.7%)
500 200 0,03189300 0,01838400 (-42.4%)
2500 40 0,06006900 0,06851000 (+14.1%)
8000 12 0,13941000 0,20548500 (+47.4%)
vavguh:
rept loop master patch
8 12500 0,01818200 0,00780600 (-57.1%)
25 4000 0,01789300 0,00641600 (-64.1%)
100 1000 0,01899100 0,00787200 (-58.5%)
500 200 0,02527200 0,01828400 (-27.7%)
2500 40 0,05361800 0,06773000 (+26.3%)
8000 12 0,12886600 0,20291400 (+57.5%)
vavguw:
rept loop master patch
8 12500 0,01423100 0,00776600 (-45.4%)
25 4000 0,01780800 0,00638600 (-64.1%)
100 1000 0,02085500 0,00787000 (-62.3%)
500 200 0,02737100 0,01828800 (-33.2%)
2500 40 0,05572600 0,06774200 (+21.6%)
8000 12 0,13101700 0,20311600 (+55.0%)
vavgsb:
rept loop master patch
8 12500 0,03006000 0,00788600 (-73.8%)
25 4000 0,02882200 0,00637800 (-77.9%)
100 1000 0,02958000 0,00791400 (-73.2%)
500 200 0,03548800 0,01860400 (-47.6%)
2500 40 0,06360000 0,06850800 (+7.7%)
8000 12 0,13816500 0,20550300 (+48.7%)
vavgsh:
rept loop master patch
8 12500 0,01965900 0,00776600 (-60.5%)
25 4000 0,01875400 0,00638700 (-65.9%)
100 1000 0,01952200 0,00786900 (-59.7%)
500 200 0,02562000 0,01760300 (-31.3%)
2500 40 0,05384300 0,06742800 (+25.2%)
8000 12 0,13240800 0,20330000 (+53.5%)
vavgsw:
rept loop master patch
8 12500 0,01407700 0,00775600 (-44.9%)
25 4000 0,01762300 0,00640000 (-63.7%)
100 1000 0,02046500 0,00788500 (-61.5%)
500 200 0,02745600 0,01843000 (-32.9%)
2500 40 0,05375500 0,06820500 (+26.9%)
8000 12 0,13068300 0,20304900 (+55.4%)
These results to me seems to indicate that with gvec the results have a
slower translation but faster execution.
Signed-off-by: Lucas Mateus Castro (alqotel) <lucas.araujo@eldorado.org.br>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20221019125040.48028-7-lucas.araujo@eldorado.org.br>
Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
2022-10-19 15:50:34 +03:00
|
|
|
VAVG(VAVGSB, s8, int16_t)
|
|
|
|
VAVG(VAVGUB, u8, uint16_t)
|
|
|
|
VAVG(VAVGSH, s16, int32_t)
|
|
|
|
VAVG(VAVGUH, u16, uint32_t)
|
|
|
|
VAVG(VAVGSW, s32, int64_t)
|
|
|
|
VAVG(VAVGUW, u32, uint64_t)
|
2012-05-30 08:23:28 +04:00
|
|
|
#undef VAVG
|
|
|
|
|
target/ppc: Move VABSDU[BHW] to decodetree and use gvec
Moved VABSDUB, VABSDUH and VABSDUW to decodetree and use gvec to
translate them.
vabsdub:
rept loop master patch
8 12500 0,03601600 0,00688500 (-80.9%)
25 4000 0,03651000 0,00532100 (-85.4%)
100 1000 0,03666900 0,00595300 (-83.8%)
500 200 0,04305800 0,01244600 (-71.1%)
2500 40 0,06893300 0,04273700 (-38.0%)
8000 12 0,14633200 0,12660300 (-13.5%)
vabsduh:
rept loop master patch
8 12500 0,02172400 0,00687500 (-68.4%)
25 4000 0,02154100 0,00531500 (-75.3%)
100 1000 0,02235400 0,00596300 (-73.3%)
500 200 0,02827500 0,01245100 (-56.0%)
2500 40 0,05638400 0,04285500 (-24.0%)
8000 12 0,13166000 0,12641400 (-4.0%)
vabsduw:
rept loop master patch
8 12500 0,01646400 0,00688300 (-58.2%)
25 4000 0,01454500 0,00475500 (-67.3%)
100 1000 0,01545800 0,00511800 (-66.9%)
500 200 0,02168200 0,01114300 (-48.6%)
2500 40 0,04571300 0,04138800 (-9.5%)
8000 12 0,12209500 0,12178500 (-0.3%)
Same as VADDCUW and VSUBCUW, overall performance gain but it uses more
TCGop (4 before the patch, 6 after).
Signed-off-by: Lucas Mateus Castro (alqotel) <lucas.araujo@eldorado.org.br>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20221019125040.48028-8-lucas.araujo@eldorado.org.br>
Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
2022-10-19 15:50:35 +03:00
|
|
|
#define VABSDU(name, element) \
|
|
|
|
void helper_##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t v)\
|
2016-07-28 21:14:14 +03:00
|
|
|
{ \
|
|
|
|
int i; \
|
|
|
|
\
|
|
|
|
for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
|
|
|
|
r->element[i] = (a->element[i] > b->element[i]) ? \
|
|
|
|
(a->element[i] - b->element[i]) : \
|
|
|
|
(b->element[i] - a->element[i]); \
|
|
|
|
} \
|
|
|
|
}
|
|
|
|
|
2019-03-21 08:50:18 +03:00
|
|
|
/*
|
|
|
|
* VABSDU - Vector absolute difference unsigned
|
2016-07-28 21:14:14 +03:00
|
|
|
* name - instruction mnemonic suffix (b: byte, h: halfword, w: word)
|
|
|
|
* element - element type to access from vector
|
|
|
|
*/
|
target/ppc: Move VABSDU[BHW] to decodetree and use gvec
Moved VABSDUB, VABSDUH and VABSDUW to decodetree and use gvec to
translate them.
vabsdub:
rept loop master patch
8 12500 0,03601600 0,00688500 (-80.9%)
25 4000 0,03651000 0,00532100 (-85.4%)
100 1000 0,03666900 0,00595300 (-83.8%)
500 200 0,04305800 0,01244600 (-71.1%)
2500 40 0,06893300 0,04273700 (-38.0%)
8000 12 0,14633200 0,12660300 (-13.5%)
vabsduh:
rept loop master patch
8 12500 0,02172400 0,00687500 (-68.4%)
25 4000 0,02154100 0,00531500 (-75.3%)
100 1000 0,02235400 0,00596300 (-73.3%)
500 200 0,02827500 0,01245100 (-56.0%)
2500 40 0,05638400 0,04285500 (-24.0%)
8000 12 0,13166000 0,12641400 (-4.0%)
vabsduw:
rept loop master patch
8 12500 0,01646400 0,00688300 (-58.2%)
25 4000 0,01454500 0,00475500 (-67.3%)
100 1000 0,01545800 0,00511800 (-66.9%)
500 200 0,02168200 0,01114300 (-48.6%)
2500 40 0,04571300 0,04138800 (-9.5%)
8000 12 0,12209500 0,12178500 (-0.3%)
Same as VADDCUW and VSUBCUW, overall performance gain but it uses more
TCGop (4 before the patch, 6 after).
Signed-off-by: Lucas Mateus Castro (alqotel) <lucas.araujo@eldorado.org.br>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20221019125040.48028-8-lucas.araujo@eldorado.org.br>
Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
2022-10-19 15:50:35 +03:00
|
|
|
VABSDU(VABSDUB, u8)
|
|
|
|
VABSDU(VABSDUH, u16)
|
|
|
|
VABSDU(VABSDUW, u32)
|
2016-07-28 21:14:14 +03:00
|
|
|
#undef VABSDU
|
|
|
|
|
2012-05-30 08:23:28 +04:00
|
|
|
#define VCF(suffix, cvt, element) \
|
2012-05-30 08:23:29 +04:00
|
|
|
void helper_vcf##suffix(CPUPPCState *env, ppc_avr_t *r, \
|
|
|
|
ppc_avr_t *b, uint32_t uim) \
|
2012-05-30 08:23:28 +04:00
|
|
|
{ \
|
|
|
|
int i; \
|
|
|
|
\
|
2019-01-02 12:14:21 +03:00
|
|
|
for (i = 0; i < ARRAY_SIZE(r->f32); i++) { \
|
2012-05-30 08:23:28 +04:00
|
|
|
float32 t = cvt(b->element[i], &env->vec_status); \
|
2019-01-02 12:14:21 +03:00
|
|
|
r->f32[i] = float32_scalbn(t, -uim, &env->vec_status); \
|
2012-05-30 08:23:28 +04:00
|
|
|
} \
|
|
|
|
}
|
|
|
|
VCF(ux, uint32_to_float32, u32)
|
|
|
|
VCF(sx, int32_to_float32, s32)
|
|
|
|
#undef VCF
|
|
|
|
|
2022-03-02 08:51:37 +03:00
|
|
|
#define VCMPNEZ(NAME, ELEM) \
|
|
|
|
void helper_##NAME(ppc_vsr_t *t, ppc_vsr_t *a, ppc_vsr_t *b, uint32_t desc) \
|
|
|
|
{ \
|
|
|
|
for (int i = 0; i < ARRAY_SIZE(t->ELEM); i++) { \
|
|
|
|
t->ELEM[i] = ((a->ELEM[i] == 0) || (b->ELEM[i] == 0) || \
|
|
|
|
(a->ELEM[i] != b->ELEM[i])) ? -1 : 0; \
|
|
|
|
} \
|
2016-07-28 21:14:15 +03:00
|
|
|
}
|
2022-03-02 08:51:37 +03:00
|
|
|
VCMPNEZ(VCMPNEZB, u8)
|
|
|
|
VCMPNEZ(VCMPNEZH, u16)
|
|
|
|
VCMPNEZ(VCMPNEZW, u32)
|
|
|
|
#undef VCMPNEZ
|
2016-07-28 21:14:15 +03:00
|
|
|
|
2012-05-30 08:23:28 +04:00
|
|
|
#define VCMPFP_DO(suffix, compare, order, record) \
|
2012-05-30 08:23:29 +04:00
|
|
|
void helper_vcmp##suffix(CPUPPCState *env, ppc_avr_t *r, \
|
|
|
|
ppc_avr_t *a, ppc_avr_t *b) \
|
2012-05-30 08:23:28 +04:00
|
|
|
{ \
|
|
|
|
uint32_t ones = (uint32_t)-1; \
|
|
|
|
uint32_t all = ones; \
|
|
|
|
uint32_t none = 0; \
|
|
|
|
int i; \
|
|
|
|
\
|
2019-01-02 12:14:21 +03:00
|
|
|
for (i = 0; i < ARRAY_SIZE(r->f32); i++) { \
|
2012-05-30 08:23:28 +04:00
|
|
|
uint32_t result; \
|
2020-05-05 20:22:05 +03:00
|
|
|
FloatRelation rel = \
|
|
|
|
float32_compare_quiet(a->f32[i], b->f32[i], \
|
|
|
|
&env->vec_status); \
|
2012-05-30 08:23:28 +04:00
|
|
|
if (rel == float_relation_unordered) { \
|
|
|
|
result = 0; \
|
|
|
|
} else if (rel compare order) { \
|
|
|
|
result = ones; \
|
|
|
|
} else { \
|
|
|
|
result = 0; \
|
|
|
|
} \
|
|
|
|
r->u32[i] = result; \
|
|
|
|
all &= result; \
|
|
|
|
none |= result; \
|
|
|
|
} \
|
|
|
|
if (record) { \
|
|
|
|
env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1); \
|
|
|
|
} \
|
|
|
|
}
|
|
|
|
#define VCMPFP(suffix, compare, order) \
|
|
|
|
VCMPFP_DO(suffix, compare, order, 0) \
|
|
|
|
VCMPFP_DO(suffix##_dot, compare, order, 1)
|
|
|
|
VCMPFP(eqfp, ==, float_relation_equal)
|
|
|
|
VCMPFP(gefp, !=, float_relation_less)
|
|
|
|
VCMPFP(gtfp, ==, float_relation_greater)
|
|
|
|
#undef VCMPFP_DO
|
|
|
|
#undef VCMPFP
|
|
|
|
|
2012-05-30 08:23:29 +04:00
|
|
|
static inline void vcmpbfp_internal(CPUPPCState *env, ppc_avr_t *r,
|
|
|
|
ppc_avr_t *a, ppc_avr_t *b, int record)
|
2012-05-30 08:23:28 +04:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
int all_in = 0;
|
|
|
|
|
2019-01-02 12:14:21 +03:00
|
|
|
for (i = 0; i < ARRAY_SIZE(r->f32); i++) {
|
2020-05-05 20:22:05 +03:00
|
|
|
FloatRelation le_rel = float32_compare_quiet(a->f32[i], b->f32[i],
|
|
|
|
&env->vec_status);
|
2012-05-30 08:23:28 +04:00
|
|
|
if (le_rel == float_relation_unordered) {
|
|
|
|
r->u32[i] = 0xc0000000;
|
2014-10-31 19:39:54 +03:00
|
|
|
all_in = 1;
|
2012-05-30 08:23:28 +04:00
|
|
|
} else {
|
2019-01-02 12:14:21 +03:00
|
|
|
float32 bneg = float32_chs(b->f32[i]);
|
2020-05-05 20:22:05 +03:00
|
|
|
FloatRelation ge_rel = float32_compare_quiet(a->f32[i], bneg,
|
|
|
|
&env->vec_status);
|
2012-05-30 08:23:28 +04:00
|
|
|
int le = le_rel != float_relation_greater;
|
|
|
|
int ge = ge_rel != float_relation_less;
|
|
|
|
|
|
|
|
r->u32[i] = ((!le) << 31) | ((!ge) << 30);
|
|
|
|
all_in |= (!le | !ge);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (record) {
|
|
|
|
env->crf[6] = (all_in == 0) << 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-05-30 08:23:29 +04:00
|
|
|
void helper_vcmpbfp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
|
2012-05-30 08:23:28 +04:00
|
|
|
{
|
2012-05-30 08:23:29 +04:00
|
|
|
vcmpbfp_internal(env, r, a, b, 0);
|
2012-05-30 08:23:28 +04:00
|
|
|
}
|
|
|
|
|
2012-05-30 08:23:29 +04:00
|
|
|
void helper_vcmpbfp_dot(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
|
|
|
|
ppc_avr_t *b)
|
2012-05-30 08:23:28 +04:00
|
|
|
{
|
2012-05-30 08:23:29 +04:00
|
|
|
vcmpbfp_internal(env, r, a, b, 1);
|
2012-05-30 08:23:28 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
#define VCT(suffix, satcvt, element) \
|
2012-05-30 08:23:29 +04:00
|
|
|
void helper_vct##suffix(CPUPPCState *env, ppc_avr_t *r, \
|
|
|
|
ppc_avr_t *b, uint32_t uim) \
|
2012-05-30 08:23:28 +04:00
|
|
|
{ \
|
|
|
|
int i; \
|
|
|
|
int sat = 0; \
|
|
|
|
float_status s = env->vec_status; \
|
|
|
|
\
|
|
|
|
set_float_rounding_mode(float_round_to_zero, &s); \
|
2019-01-02 12:14:21 +03:00
|
|
|
for (i = 0; i < ARRAY_SIZE(r->f32); i++) { \
|
|
|
|
if (float32_is_any_nan(b->f32[i])) { \
|
2012-05-30 08:23:28 +04:00
|
|
|
r->element[i] = 0; \
|
|
|
|
} else { \
|
2019-01-02 12:14:21 +03:00
|
|
|
float64 t = float32_to_float64(b->f32[i], &s); \
|
2012-05-30 08:23:28 +04:00
|
|
|
int64_t j; \
|
|
|
|
\
|
|
|
|
t = float64_scalbn(t, uim, &s); \
|
|
|
|
j = float64_to_int64(t, &s); \
|
|
|
|
r->element[i] = satcvt(j, &sat); \
|
|
|
|
} \
|
|
|
|
} \
|
|
|
|
if (sat) { \
|
2019-02-15 13:00:55 +03:00
|
|
|
set_vscr_sat(env); \
|
2012-05-30 08:23:28 +04:00
|
|
|
} \
|
|
|
|
}
|
|
|
|
VCT(uxs, cvtsduw, u32)
|
|
|
|
VCT(sxs, cvtsdsw, s32)
|
|
|
|
#undef VCT
|
|
|
|
|
2022-05-24 17:05:31 +03:00
|
|
|
typedef int64_t do_ger(uint32_t, uint32_t, uint32_t);
|
|
|
|
|
|
|
|
static int64_t ger_rank8(uint32_t a, uint32_t b, uint32_t mask)
|
|
|
|
{
|
|
|
|
int64_t psum = 0;
|
|
|
|
for (int i = 0; i < 8; i++, mask >>= 1) {
|
|
|
|
if (mask & 1) {
|
target/ppc: avoid int32 multiply overflow in int_helper.c
Coverity is not thrilled about the multiply operations being done in
ger_rank8() and ger_rank2(), giving an error like the following:
Integer handling issues (OVERFLOW_BEFORE_WIDEN)
Potentially overflowing expression "sextract32(a, 4 * i, 4) *
sextract32(b, 4 * i, 4)" with type "int" (32 bits, signed) is evaluated
using 32-bit arithmetic, and then used in a context that expects an
expression of type "int64_t" (64 bits, signed).
Fix both instances where this occur by adding an int64_t cast in the
first operand, forcing the result to be 64 bit.
Fixes: Coverity CID 1489444, 1489443
Fixes: 345531533f26 ("target/ppc: Implemented xvi*ger* instructions")
Cc: Lucas Mateus Castro (alqotel) <lucas.araujo@eldorado.org.br>
Cc: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Lucas Mateus Castro (alqotel) <lucas.araujo@eldorado.org.br>
Message-Id: <20220602141449.118173-1-danielhb413@gmail.com>
Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
2022-06-02 17:14:49 +03:00
|
|
|
psum += (int64_t)sextract32(a, 4 * i, 4) * sextract32(b, 4 * i, 4);
|
2022-05-24 17:05:31 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return psum;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int64_t ger_rank4(uint32_t a, uint32_t b, uint32_t mask)
|
|
|
|
{
|
|
|
|
int64_t psum = 0;
|
|
|
|
for (int i = 0; i < 4; i++, mask >>= 1) {
|
|
|
|
if (mask & 1) {
|
|
|
|
psum += sextract32(a, 8 * i, 8) * (int64_t)extract32(b, 8 * i, 8);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return psum;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int64_t ger_rank2(uint32_t a, uint32_t b, uint32_t mask)
|
|
|
|
{
|
|
|
|
int64_t psum = 0;
|
|
|
|
for (int i = 0; i < 2; i++, mask >>= 1) {
|
|
|
|
if (mask & 1) {
|
target/ppc: avoid int32 multiply overflow in int_helper.c
Coverity is not thrilled about the multiply operations being done in
ger_rank8() and ger_rank2(), giving an error like the following:
Integer handling issues (OVERFLOW_BEFORE_WIDEN)
Potentially overflowing expression "sextract32(a, 4 * i, 4) *
sextract32(b, 4 * i, 4)" with type "int" (32 bits, signed) is evaluated
using 32-bit arithmetic, and then used in a context that expects an
expression of type "int64_t" (64 bits, signed).
Fix both instances where this occur by adding an int64_t cast in the
first operand, forcing the result to be 64 bit.
Fixes: Coverity CID 1489444, 1489443
Fixes: 345531533f26 ("target/ppc: Implemented xvi*ger* instructions")
Cc: Lucas Mateus Castro (alqotel) <lucas.araujo@eldorado.org.br>
Cc: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Lucas Mateus Castro (alqotel) <lucas.araujo@eldorado.org.br>
Message-Id: <20220602141449.118173-1-danielhb413@gmail.com>
Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
2022-06-02 17:14:49 +03:00
|
|
|
psum += (int64_t)sextract32(a, 16 * i, 16) *
|
|
|
|
sextract32(b, 16 * i, 16);
|
2022-05-24 17:05:31 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return psum;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void xviger(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b, ppc_acc_t *at,
|
|
|
|
uint32_t mask, bool sat, bool acc, do_ger ger)
|
|
|
|
{
|
|
|
|
uint8_t pmsk = FIELD_EX32(mask, GER_MSK, PMSK),
|
|
|
|
xmsk = FIELD_EX32(mask, GER_MSK, XMSK),
|
|
|
|
ymsk = FIELD_EX32(mask, GER_MSK, YMSK);
|
|
|
|
uint8_t xmsk_bit, ymsk_bit;
|
|
|
|
int64_t psum;
|
|
|
|
int i, j;
|
|
|
|
for (i = 0, xmsk_bit = 1 << 3; i < 4; i++, xmsk_bit >>= 1) {
|
|
|
|
for (j = 0, ymsk_bit = 1 << 3; j < 4; j++, ymsk_bit >>= 1) {
|
|
|
|
if ((xmsk_bit & xmsk) && (ymsk_bit & ymsk)) {
|
|
|
|
psum = ger(a->VsrW(i), b->VsrW(j), pmsk);
|
|
|
|
if (acc) {
|
|
|
|
psum += at[i].VsrSW(j);
|
|
|
|
}
|
|
|
|
if (sat && psum > INT32_MAX) {
|
|
|
|
set_vscr_sat(env);
|
|
|
|
at[i].VsrSW(j) = INT32_MAX;
|
|
|
|
} else if (sat && psum < INT32_MIN) {
|
|
|
|
set_vscr_sat(env);
|
|
|
|
at[i].VsrSW(j) = INT32_MIN;
|
|
|
|
} else {
|
|
|
|
at[i].VsrSW(j) = (int32_t) psum;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
at[i].VsrSW(j) = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
QEMU_FLATTEN
|
|
|
|
void helper_XVI4GER8(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
|
|
|
|
ppc_acc_t *at, uint32_t mask)
|
|
|
|
{
|
|
|
|
xviger(env, a, b, at, mask, false, false, ger_rank8);
|
|
|
|
}
|
|
|
|
|
|
|
|
QEMU_FLATTEN
|
|
|
|
void helper_XVI4GER8PP(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
|
|
|
|
ppc_acc_t *at, uint32_t mask)
|
|
|
|
{
|
|
|
|
xviger(env, a, b, at, mask, false, true, ger_rank8);
|
|
|
|
}
|
|
|
|
|
|
|
|
QEMU_FLATTEN
|
|
|
|
void helper_XVI8GER4(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
|
|
|
|
ppc_acc_t *at, uint32_t mask)
|
|
|
|
{
|
|
|
|
xviger(env, a, b, at, mask, false, false, ger_rank4);
|
|
|
|
}
|
|
|
|
|
|
|
|
QEMU_FLATTEN
|
|
|
|
void helper_XVI8GER4PP(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
|
|
|
|
ppc_acc_t *at, uint32_t mask)
|
|
|
|
{
|
|
|
|
xviger(env, a, b, at, mask, false, true, ger_rank4);
|
|
|
|
}
|
|
|
|
|
|
|
|
QEMU_FLATTEN
|
|
|
|
void helper_XVI8GER4SPP(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
|
|
|
|
ppc_acc_t *at, uint32_t mask)
|
|
|
|
{
|
|
|
|
xviger(env, a, b, at, mask, true, true, ger_rank4);
|
|
|
|
}
|
|
|
|
|
|
|
|
QEMU_FLATTEN
|
|
|
|
void helper_XVI16GER2(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
|
|
|
|
ppc_acc_t *at, uint32_t mask)
|
|
|
|
{
|
|
|
|
xviger(env, a, b, at, mask, false, false, ger_rank2);
|
|
|
|
}
|
|
|
|
|
|
|
|
QEMU_FLATTEN
|
|
|
|
void helper_XVI16GER2S(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
|
|
|
|
ppc_acc_t *at, uint32_t mask)
|
|
|
|
{
|
|
|
|
xviger(env, a, b, at, mask, true, false, ger_rank2);
|
|
|
|
}
|
|
|
|
|
|
|
|
QEMU_FLATTEN
|
|
|
|
void helper_XVI16GER2PP(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
|
|
|
|
ppc_acc_t *at, uint32_t mask)
|
|
|
|
{
|
|
|
|
xviger(env, a, b, at, mask, false, true, ger_rank2);
|
|
|
|
}
|
|
|
|
|
|
|
|
QEMU_FLATTEN
|
|
|
|
void helper_XVI16GER2SPP(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
|
|
|
|
ppc_acc_t *at, uint32_t mask)
|
|
|
|
{
|
|
|
|
xviger(env, a, b, at, mask, true, true, ger_rank2);
|
|
|
|
}
|
|
|
|
|
2016-09-28 08:45:18 +03:00
|
|
|
target_ulong helper_vclzlsbb(ppc_avr_t *r)
|
|
|
|
{
|
|
|
|
target_ulong count = 0;
|
|
|
|
int i;
|
2019-01-30 23:36:38 +03:00
|
|
|
for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
|
|
|
|
if (r->VsrB(i) & 0x01) {
|
2016-09-28 08:45:18 +03:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
count++;
|
|
|
|
}
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
|
|
|
target_ulong helper_vctzlsbb(ppc_avr_t *r)
|
|
|
|
{
|
|
|
|
target_ulong count = 0;
|
|
|
|
int i;
|
|
|
|
for (i = ARRAY_SIZE(r->u8) - 1; i >= 0; i--) {
|
2019-01-30 23:36:38 +03:00
|
|
|
if (r->VsrB(i) & 0x01) {
|
2016-09-28 08:45:18 +03:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
count++;
|
|
|
|
}
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
target/ppc: Move VMH[R]ADDSHS instruction to decodetree
This patch moves VMHADDSHS and VMHRADDSHS to decodetree I couldn't find
a satisfactory implementation with TCG inline.
vmhaddshs:
rept loop master patch
8 12500 0,02983400 0,02648500 (-11.2%)
25 4000 0,02946000 0,02518000 (-14.5%)
100 1000 0,03104300 0,02638000 (-15.0%)
500 200 0,04002000 0,03502500 (-12.5%)
2500 40 0,08090100 0,07562200 (-6.5%)
8000 12 0,19242600 0,18626800 (-3.2%)
vmhraddshs:
rept loop master patch
8 12500 0,03078600 0,02851000 (-7.4%)
25 4000 0,02793200 0,02746900 (-1.7%)
100 1000 0,02886000 0,02839900 (-1.6%)
500 200 0,03714700 0,03799200 (+2.3%)
2500 40 0,07948000 0,07852200 (-1.2%)
8000 12 0,19049800 0,18813900 (-1.2%)
Signed-off-by: Lucas Mateus Castro (alqotel) <lucas.araujo@eldorado.org.br>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20221019125040.48028-3-lucas.araujo@eldorado.org.br>
Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
2022-10-19 15:50:30 +03:00
|
|
|
void helper_VMHADDSHS(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
|
2012-05-30 08:23:29 +04:00
|
|
|
ppc_avr_t *b, ppc_avr_t *c)
|
2012-05-30 08:23:28 +04:00
|
|
|
{
|
|
|
|
int sat = 0;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
|
|
|
|
int32_t prod = a->s16[i] * b->s16[i];
|
|
|
|
int32_t t = (int32_t)c->s16[i] + (prod >> 15);
|
|
|
|
|
|
|
|
r->s16[i] = cvtswsh(t, &sat);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sat) {
|
2019-02-15 13:00:55 +03:00
|
|
|
set_vscr_sat(env);
|
2012-05-30 08:23:28 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
target/ppc: Move VMH[R]ADDSHS instruction to decodetree
This patch moves VMHADDSHS and VMHRADDSHS to decodetree I couldn't find
a satisfactory implementation with TCG inline.
vmhaddshs:
rept loop master patch
8 12500 0,02983400 0,02648500 (-11.2%)
25 4000 0,02946000 0,02518000 (-14.5%)
100 1000 0,03104300 0,02638000 (-15.0%)
500 200 0,04002000 0,03502500 (-12.5%)
2500 40 0,08090100 0,07562200 (-6.5%)
8000 12 0,19242600 0,18626800 (-3.2%)
vmhraddshs:
rept loop master patch
8 12500 0,03078600 0,02851000 (-7.4%)
25 4000 0,02793200 0,02746900 (-1.7%)
100 1000 0,02886000 0,02839900 (-1.6%)
500 200 0,03714700 0,03799200 (+2.3%)
2500 40 0,07948000 0,07852200 (-1.2%)
8000 12 0,19049800 0,18813900 (-1.2%)
Signed-off-by: Lucas Mateus Castro (alqotel) <lucas.araujo@eldorado.org.br>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20221019125040.48028-3-lucas.araujo@eldorado.org.br>
Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
2022-10-19 15:50:30 +03:00
|
|
|
void helper_VMHRADDSHS(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
|
2012-05-30 08:23:29 +04:00
|
|
|
ppc_avr_t *b, ppc_avr_t *c)
|
2012-05-30 08:23:28 +04:00
|
|
|
{
|
|
|
|
int sat = 0;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
|
|
|
|
int32_t prod = a->s16[i] * b->s16[i] + 0x00004000;
|
|
|
|
int32_t t = (int32_t)c->s16[i] + (prod >> 15);
|
|
|
|
r->s16[i] = cvtswsh(t, &sat);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sat) {
|
2019-02-15 13:00:55 +03:00
|
|
|
set_vscr_sat(env);
|
2012-05-30 08:23:28 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
target/ppc: Moved VMLADDUHM to decodetree and use gvec
This patch moves VMLADDUHM to decodetree a creates a gvec implementation
using mul_vec and add_vec.
rept loop master patch
8 12500 0,01810500 0,00903100 (-50.1%)
25 4000 0,01739400 0,00747700 (-57.0%)
100 1000 0,01843600 0,00901400 (-51.1%)
500 200 0,02574600 0,01971000 (-23.4%)
2500 40 0,05921600 0,07121800 (+20.3%)
8000 12 0,15326700 0,21725200 (+41.7%)
The significant difference in performance when REPT is low and LOOP is
high I think is due to the fact that the new implementation has a higher
translation time, as when using a helper only 5 TCGop are used but with
the patch a total of 10 TCGop are needed (Power lacks a direct mul_vec
equivalent so this instruction is implemented with the help of 5 others,
vmuleu, vmulou, vmrgh, vmrgl and vpkum).
Signed-off-by: Lucas Mateus Castro (alqotel) <lucas.araujo@eldorado.org.br>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20221019125040.48028-2-lucas.araujo@eldorado.org.br>
Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
2022-10-19 15:50:29 +03:00
|
|
|
void helper_VMLADDUHM(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c,
|
|
|
|
uint32_t v)
|
2012-05-30 08:23:28 +04:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
|
|
|
|
int32_t prod = a->s16[i] * b->s16[i];
|
|
|
|
r->s16[i] = (int16_t) (prod + c->s16[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-01-30 23:36:32 +03:00
|
|
|
#define VMRG_DO(name, element, access, ofs) \
|
|
|
|
void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
|
|
|
|
{ \
|
|
|
|
ppc_avr_t result; \
|
|
|
|
int i, half = ARRAY_SIZE(r->element) / 2; \
|
|
|
|
\
|
|
|
|
for (i = 0; i < half; i++) { \
|
|
|
|
result.access(i * 2 + 0) = a->access(i + ofs); \
|
|
|
|
result.access(i * 2 + 1) = b->access(i + ofs); \
|
|
|
|
} \
|
|
|
|
*r = result; \
|
|
|
|
}
|
|
|
|
|
|
|
|
#define VMRG(suffix, element, access) \
|
|
|
|
VMRG_DO(mrgl##suffix, element, access, half) \
|
|
|
|
VMRG_DO(mrgh##suffix, element, access, 0)
|
|
|
|
VMRG(b, u8, VsrB)
|
|
|
|
VMRG(h, u16, VsrH)
|
|
|
|
VMRG(w, u32, VsrW)
|
2012-05-30 08:23:28 +04:00
|
|
|
#undef VMRG_DO
|
|
|
|
#undef VMRG
|
|
|
|
|
2022-05-17 15:39:27 +03:00
|
|
|
void helper_VMSUMMBM(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
|
2012-05-30 08:23:28 +04:00
|
|
|
{
|
|
|
|
int32_t prod[16];
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(r->s8); i++) {
|
|
|
|
prod[i] = (int32_t)a->s8[i] * b->u8[i];
|
|
|
|
}
|
|
|
|
|
|
|
|
VECTOR_FOR_INORDER_I(i, s32) {
|
|
|
|
r->s32[i] = c->s32[i] + prod[4 * i] + prod[4 * i + 1] +
|
|
|
|
prod[4 * i + 2] + prod[4 * i + 3];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-05-17 15:39:29 +03:00
|
|
|
void helper_VMSUMSHM(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
|
2012-05-30 08:23:28 +04:00
|
|
|
{
|
|
|
|
int32_t prod[8];
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
|
|
|
|
prod[i] = a->s16[i] * b->s16[i];
|
|
|
|
}
|
|
|
|
|
|
|
|
VECTOR_FOR_INORDER_I(i, s32) {
|
|
|
|
r->s32[i] = c->s32[i] + prod[2 * i] + prod[2 * i + 1];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-05-17 15:39:29 +03:00
|
|
|
void helper_VMSUMSHS(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
|
2012-05-30 08:23:29 +04:00
|
|
|
ppc_avr_t *b, ppc_avr_t *c)
|
2012-05-30 08:23:28 +04:00
|
|
|
{
|
|
|
|
int32_t prod[8];
|
|
|
|
int i;
|
|
|
|
int sat = 0;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
|
|
|
|
prod[i] = (int32_t)a->s16[i] * b->s16[i];
|
|
|
|
}
|
|
|
|
|
|
|
|
VECTOR_FOR_INORDER_I(i, s32) {
|
|
|
|
int64_t t = (int64_t)c->s32[i] + prod[2 * i] + prod[2 * i + 1];
|
|
|
|
|
|
|
|
r->u32[i] = cvtsdsw(t, &sat);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sat) {
|
2019-02-15 13:00:55 +03:00
|
|
|
set_vscr_sat(env);
|
2012-05-30 08:23:28 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-05-17 15:39:27 +03:00
|
|
|
void helper_VMSUMUBM(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
|
2012-05-30 08:23:28 +04:00
|
|
|
{
|
|
|
|
uint16_t prod[16];
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
|
|
|
|
prod[i] = a->u8[i] * b->u8[i];
|
|
|
|
}
|
|
|
|
|
|
|
|
VECTOR_FOR_INORDER_I(i, u32) {
|
|
|
|
r->u32[i] = c->u32[i] + prod[4 * i] + prod[4 * i + 1] +
|
|
|
|
prod[4 * i + 2] + prod[4 * i + 3];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-05-17 15:39:28 +03:00
|
|
|
void helper_VMSUMUHM(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
|
2012-05-30 08:23:28 +04:00
|
|
|
{
|
|
|
|
uint32_t prod[8];
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(r->u16); i++) {
|
|
|
|
prod[i] = a->u16[i] * b->u16[i];
|
|
|
|
}
|
|
|
|
|
|
|
|
VECTOR_FOR_INORDER_I(i, u32) {
|
|
|
|
r->u32[i] = c->u32[i] + prod[2 * i] + prod[2 * i + 1];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-05-17 15:39:28 +03:00
|
|
|
void helper_VMSUMUHS(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
|
2012-05-30 08:23:29 +04:00
|
|
|
ppc_avr_t *b, ppc_avr_t *c)
|
2012-05-30 08:23:28 +04:00
|
|
|
{
|
|
|
|
uint32_t prod[8];
|
|
|
|
int i;
|
|
|
|
int sat = 0;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(r->u16); i++) {
|
|
|
|
prod[i] = a->u16[i] * b->u16[i];
|
|
|
|
}
|
|
|
|
|
|
|
|
VECTOR_FOR_INORDER_I(i, s32) {
|
|
|
|
uint64_t t = (uint64_t)c->u32[i] + prod[2 * i] + prod[2 * i + 1];
|
|
|
|
|
|
|
|
r->u32[i] = cvtuduw(t, &sat);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sat) {
|
2019-02-15 13:00:55 +03:00
|
|
|
set_vscr_sat(env);
|
2012-05-30 08:23:28 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-01-30 23:36:33 +03:00
|
|
|
#define VMUL_DO_EVN(name, mul_element, mul_access, prod_access, cast) \
|
target/ppc: moved vector even and odd multiplication to decodetree
Moved the instructions vmulesb, vmulosb, vmuleub, vmuloub,
vmulesh, vmulosh, vmuleuh, vmulouh, vmulesw, vmulosw,
muleuw and vmulouw from legacy to decodetree. Implemented
the instructions vmulesd, vmulosd, vmuleud, vmuloud.
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Lucas Mateus Castro (alqotel) <lucas.araujo@eldorado.org.br>
Signed-off-by: Matheus Ferst <matheus.ferst@eldorado.org.br>
Message-Id: <20220225210936.1749575-3-matheus.ferst@eldorado.org.br>
Signed-off-by: Cédric Le Goater <clg@kaod.org>
2022-03-02 08:51:36 +03:00
|
|
|
void helper_V##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
|
2012-05-30 08:23:28 +04:00
|
|
|
{ \
|
|
|
|
int i; \
|
|
|
|
\
|
2019-01-30 23:36:33 +03:00
|
|
|
for (i = 0; i < ARRAY_SIZE(r->mul_element); i += 2) { \
|
|
|
|
r->prod_access(i >> 1) = (cast)a->mul_access(i) * \
|
|
|
|
(cast)b->mul_access(i); \
|
|
|
|
} \
|
|
|
|
}
|
|
|
|
|
|
|
|
#define VMUL_DO_ODD(name, mul_element, mul_access, prod_access, cast) \
|
target/ppc: moved vector even and odd multiplication to decodetree
Moved the instructions vmulesb, vmulosb, vmuleub, vmuloub,
vmulesh, vmulosh, vmuleuh, vmulouh, vmulesw, vmulosw,
muleuw and vmulouw from legacy to decodetree. Implemented
the instructions vmulesd, vmulosd, vmuleud, vmuloud.
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Lucas Mateus Castro (alqotel) <lucas.araujo@eldorado.org.br>
Signed-off-by: Matheus Ferst <matheus.ferst@eldorado.org.br>
Message-Id: <20220225210936.1749575-3-matheus.ferst@eldorado.org.br>
Signed-off-by: Cédric Le Goater <clg@kaod.org>
2022-03-02 08:51:36 +03:00
|
|
|
void helper_V##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
|
2019-01-30 23:36:33 +03:00
|
|
|
{ \
|
|
|
|
int i; \
|
|
|
|
\
|
|
|
|
for (i = 0; i < ARRAY_SIZE(r->mul_element); i += 2) { \
|
|
|
|
r->prod_access(i >> 1) = (cast)a->mul_access(i + 1) * \
|
|
|
|
(cast)b->mul_access(i + 1); \
|
2012-05-30 08:23:28 +04:00
|
|
|
} \
|
|
|
|
}
|
2019-01-30 23:36:33 +03:00
|
|
|
|
|
|
|
#define VMUL(suffix, mul_element, mul_access, prod_access, cast) \
|
target/ppc: moved vector even and odd multiplication to decodetree
Moved the instructions vmulesb, vmulosb, vmuleub, vmuloub,
vmulesh, vmulosh, vmuleuh, vmulouh, vmulesw, vmulosw,
muleuw and vmulouw from legacy to decodetree. Implemented
the instructions vmulesd, vmulosd, vmuleud, vmuloud.
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Lucas Mateus Castro (alqotel) <lucas.araujo@eldorado.org.br>
Signed-off-by: Matheus Ferst <matheus.ferst@eldorado.org.br>
Message-Id: <20220225210936.1749575-3-matheus.ferst@eldorado.org.br>
Signed-off-by: Cédric Le Goater <clg@kaod.org>
2022-03-02 08:51:36 +03:00
|
|
|
VMUL_DO_EVN(MULE##suffix, mul_element, mul_access, prod_access, cast) \
|
|
|
|
VMUL_DO_ODD(MULO##suffix, mul_element, mul_access, prod_access, cast)
|
|
|
|
VMUL(SB, s8, VsrSB, VsrSH, int16_t)
|
|
|
|
VMUL(SH, s16, VsrSH, VsrSW, int32_t)
|
|
|
|
VMUL(SW, s32, VsrSW, VsrSD, int64_t)
|
|
|
|
VMUL(UB, u8, VsrB, VsrH, uint16_t)
|
|
|
|
VMUL(UH, u16, VsrH, VsrW, uint32_t)
|
|
|
|
VMUL(UW, u32, VsrW, VsrD, uint64_t)
|
2019-01-30 23:36:33 +03:00
|
|
|
#undef VMUL_DO_EVN
|
|
|
|
#undef VMUL_DO_ODD
|
2012-05-30 08:23:28 +04:00
|
|
|
#undef VMUL
|
|
|
|
|
2022-03-02 08:51:38 +03:00
|
|
|
void helper_XXPERMX(ppc_vsr_t *t, ppc_vsr_t *s0, ppc_vsr_t *s1, ppc_vsr_t *pcv,
|
|
|
|
target_ulong uim)
|
|
|
|
{
|
|
|
|
int i, idx;
|
|
|
|
ppc_vsr_t tmp = { .u64 = {0, 0} };
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(t->u8); i++) {
|
|
|
|
if ((pcv->VsrB(i) >> 5) == uim) {
|
|
|
|
idx = pcv->VsrB(i) & 0x1f;
|
|
|
|
if (idx < ARRAY_SIZE(t->u8)) {
|
|
|
|
tmp.VsrB(i) = s0->VsrB(idx);
|
|
|
|
} else {
|
|
|
|
tmp.VsrB(i) = s1->VsrB(idx - ARRAY_SIZE(t->u8));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
*t = tmp;
|
|
|
|
}
|
|
|
|
|
2022-05-25 16:49:48 +03:00
|
|
|
void helper_VDIVSQ(ppc_avr_t *t, ppc_avr_t *a, ppc_avr_t *b)
|
|
|
|
{
|
|
|
|
Int128 neg1 = int128_makes64(-1);
|
|
|
|
Int128 int128_min = int128_make128(0, INT64_MIN);
|
|
|
|
if (likely(int128_nz(b->s128) &&
|
|
|
|
(int128_ne(a->s128, int128_min) || int128_ne(b->s128, neg1)))) {
|
|
|
|
t->s128 = int128_divs(a->s128, b->s128);
|
|
|
|
} else {
|
|
|
|
t->s128 = a->s128; /* Undefined behavior */
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void helper_VDIVUQ(ppc_avr_t *t, ppc_avr_t *a, ppc_avr_t *b)
|
|
|
|
{
|
|
|
|
if (int128_nz(b->s128)) {
|
|
|
|
t->s128 = int128_divu(a->s128, b->s128);
|
|
|
|
} else {
|
|
|
|
t->s128 = a->s128; /* Undefined behavior */
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-05-25 16:49:52 +03:00
|
|
|
void helper_VDIVESD(ppc_avr_t *t, ppc_avr_t *a, ppc_avr_t *b)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
int64_t high;
|
|
|
|
uint64_t low;
|
|
|
|
for (i = 0; i < 2; i++) {
|
|
|
|
high = a->s64[i];
|
|
|
|
low = 0;
|
|
|
|
if (unlikely((high == INT64_MIN && b->s64[i] == -1) || !b->s64[i])) {
|
|
|
|
t->s64[i] = a->s64[i]; /* Undefined behavior */
|
|
|
|
} else {
|
|
|
|
divs128(&low, &high, b->s64[i]);
|
|
|
|
t->s64[i] = low;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void helper_VDIVEUD(ppc_avr_t *t, ppc_avr_t *a, ppc_avr_t *b)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
uint64_t high, low;
|
|
|
|
for (i = 0; i < 2; i++) {
|
|
|
|
high = a->u64[i];
|
|
|
|
low = 0;
|
|
|
|
if (unlikely(!b->u64[i])) {
|
|
|
|
t->u64[i] = a->u64[i]; /* Undefined behavior */
|
|
|
|
} else {
|
|
|
|
divu128(&low, &high, b->u64[i]);
|
|
|
|
t->u64[i] = low;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void helper_VDIVESQ(ppc_avr_t *t, ppc_avr_t *a, ppc_avr_t *b)
|
|
|
|
{
|
|
|
|
Int128 high, low;
|
|
|
|
Int128 int128_min = int128_make128(0, INT64_MIN);
|
|
|
|
Int128 neg1 = int128_makes64(-1);
|
|
|
|
|
|
|
|
high = a->s128;
|
|
|
|
low = int128_zero();
|
|
|
|
if (unlikely(!int128_nz(b->s128) ||
|
|
|
|
(int128_eq(b->s128, neg1) && int128_eq(high, int128_min)))) {
|
|
|
|
t->s128 = a->s128; /* Undefined behavior */
|
|
|
|
} else {
|
|
|
|
divs256(&low, &high, b->s128);
|
|
|
|
t->s128 = low;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void helper_VDIVEUQ(ppc_avr_t *t, ppc_avr_t *a, ppc_avr_t *b)
|
|
|
|
{
|
|
|
|
Int128 high, low;
|
|
|
|
|
|
|
|
high = a->s128;
|
|
|
|
low = int128_zero();
|
|
|
|
if (unlikely(!int128_nz(b->s128))) {
|
|
|
|
t->s128 = a->s128; /* Undefined behavior */
|
|
|
|
} else {
|
|
|
|
divu256(&low, &high, b->s128);
|
|
|
|
t->s128 = low;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-05-25 16:49:54 +03:00
|
|
|
void helper_VMODSQ(ppc_avr_t *t, ppc_avr_t *a, ppc_avr_t *b)
|
|
|
|
{
|
|
|
|
Int128 neg1 = int128_makes64(-1);
|
|
|
|
Int128 int128_min = int128_make128(0, INT64_MIN);
|
|
|
|
if (likely(int128_nz(b->s128) &&
|
|
|
|
(int128_ne(a->s128, int128_min) || int128_ne(b->s128, neg1)))) {
|
|
|
|
t->s128 = int128_rems(a->s128, b->s128);
|
|
|
|
} else {
|
|
|
|
t->s128 = int128_zero(); /* Undefined behavior */
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void helper_VMODUQ(ppc_avr_t *t, ppc_avr_t *a, ppc_avr_t *b)
|
|
|
|
{
|
|
|
|
if (likely(int128_nz(b->s128))) {
|
|
|
|
t->s128 = int128_remu(a->s128, b->s128);
|
|
|
|
} else {
|
|
|
|
t->s128 = int128_zero(); /* Undefined behavior */
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-03-02 08:51:37 +03:00
|
|
|
void helper_VPERM(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
|
2012-05-30 08:23:28 +04:00
|
|
|
{
|
|
|
|
ppc_avr_t result;
|
|
|
|
int i;
|
|
|
|
|
2019-01-30 23:36:38 +03:00
|
|
|
for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
|
|
|
|
int s = c->VsrB(i) & 0x1f;
|
2012-05-30 08:23:28 +04:00
|
|
|
int index = s & 0xf;
|
|
|
|
|
|
|
|
if (s & 0x10) {
|
2019-01-30 23:36:38 +03:00
|
|
|
result.VsrB(i) = b->VsrB(index);
|
2012-05-30 08:23:28 +04:00
|
|
|
} else {
|
2019-01-30 23:36:38 +03:00
|
|
|
result.VsrB(i) = a->VsrB(index);
|
2012-05-30 08:23:28 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
*r = result;
|
|
|
|
}
|
|
|
|
|
2022-03-02 08:51:37 +03:00
|
|
|
void helper_VPERMR(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
|
2016-09-06 08:04:10 +03:00
|
|
|
{
|
|
|
|
ppc_avr_t result;
|
|
|
|
int i;
|
|
|
|
|
2019-01-30 23:36:38 +03:00
|
|
|
for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
|
|
|
|
int s = c->VsrB(i) & 0x1f;
|
2016-09-06 08:04:10 +03:00
|
|
|
int index = 15 - (s & 0xf);
|
|
|
|
|
|
|
|
if (s & 0x10) {
|
2019-01-30 23:36:38 +03:00
|
|
|
result.VsrB(i) = a->VsrB(index);
|
2016-09-06 08:04:10 +03:00
|
|
|
} else {
|
2019-01-30 23:36:38 +03:00
|
|
|
result.VsrB(i) = b->VsrB(index);
|
2016-09-06 08:04:10 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
*r = result;
|
|
|
|
}
|
|
|
|
|
2022-03-05 09:16:47 +03:00
|
|
|
#define XXGENPCV_BE_EXP(NAME, SZ) \
|
2022-03-02 08:51:38 +03:00
|
|
|
void glue(helper_, glue(NAME, _be_exp))(ppc_vsr_t *t, ppc_vsr_t *b) \
|
|
|
|
{ \
|
|
|
|
ppc_vsr_t tmp; \
|
|
|
|
\
|
|
|
|
/* Initialize tmp with the result of an all-zeros mask */ \
|
|
|
|
tmp.VsrD(0) = 0x1011121314151617; \
|
|
|
|
tmp.VsrD(1) = 0x18191A1B1C1D1E1F; \
|
|
|
|
\
|
|
|
|
/* Iterate over the most significant byte of each element */ \
|
|
|
|
for (int i = 0, j = 0; i < ARRAY_SIZE(b->u8); i += SZ) { \
|
|
|
|
if (b->VsrB(i) & 0x80) { \
|
|
|
|
/* Update each byte of the element */ \
|
|
|
|
for (int k = 0; k < SZ; k++) { \
|
|
|
|
tmp.VsrB(i + k) = j + k; \
|
|
|
|
} \
|
|
|
|
j += SZ; \
|
|
|
|
} \
|
|
|
|
} \
|
|
|
|
\
|
|
|
|
*t = tmp; \
|
2022-03-05 09:16:47 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
#define XXGENPCV_BE_COMP(NAME, SZ) \
|
2022-03-02 08:51:38 +03:00
|
|
|
void glue(helper_, glue(NAME, _be_comp))(ppc_vsr_t *t, ppc_vsr_t *b)\
|
|
|
|
{ \
|
|
|
|
ppc_vsr_t tmp = { .u64 = { 0, 0 } }; \
|
|
|
|
\
|
|
|
|
/* Iterate over the most significant byte of each element */ \
|
|
|
|
for (int i = 0, j = 0; i < ARRAY_SIZE(b->u8); i += SZ) { \
|
|
|
|
if (b->VsrB(i) & 0x80) { \
|
|
|
|
/* Update each byte of the element */ \
|
|
|
|
for (int k = 0; k < SZ; k++) { \
|
|
|
|
tmp.VsrB(j + k) = i + k; \
|
|
|
|
} \
|
|
|
|
j += SZ; \
|
|
|
|
} \
|
|
|
|
} \
|
|
|
|
\
|
|
|
|
*t = tmp; \
|
2022-03-05 09:16:47 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
#define XXGENPCV_LE_EXP(NAME, SZ) \
|
2022-03-02 08:51:38 +03:00
|
|
|
void glue(helper_, glue(NAME, _le_exp))(ppc_vsr_t *t, ppc_vsr_t *b) \
|
|
|
|
{ \
|
|
|
|
ppc_vsr_t tmp; \
|
|
|
|
\
|
|
|
|
/* Initialize tmp with the result of an all-zeros mask */ \
|
|
|
|
tmp.VsrD(0) = 0x1F1E1D1C1B1A1918; \
|
|
|
|
tmp.VsrD(1) = 0x1716151413121110; \
|
|
|
|
\
|
|
|
|
/* Iterate over the most significant byte of each element */ \
|
|
|
|
for (int i = 0, j = 0; i < ARRAY_SIZE(b->u8); i += SZ) { \
|
|
|
|
/* Reverse indexing of "i" */ \
|
|
|
|
const int idx = ARRAY_SIZE(b->u8) - i - SZ; \
|
|
|
|
if (b->VsrB(idx) & 0x80) { \
|
|
|
|
/* Update each byte of the element */ \
|
|
|
|
for (int k = 0, rk = SZ - 1; k < SZ; k++, rk--) { \
|
|
|
|
tmp.VsrB(idx + rk) = j + k; \
|
|
|
|
} \
|
|
|
|
j += SZ; \
|
|
|
|
} \
|
|
|
|
} \
|
|
|
|
\
|
|
|
|
*t = tmp; \
|
2022-03-05 09:16:47 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
#define XXGENPCV_LE_COMP(NAME, SZ) \
|
2022-03-02 08:51:38 +03:00
|
|
|
void glue(helper_, glue(NAME, _le_comp))(ppc_vsr_t *t, ppc_vsr_t *b)\
|
|
|
|
{ \
|
|
|
|
ppc_vsr_t tmp = { .u64 = { 0, 0 } }; \
|
|
|
|
\
|
|
|
|
/* Iterate over the most significant byte of each element */ \
|
|
|
|
for (int i = 0, j = 0; i < ARRAY_SIZE(b->u8); i += SZ) { \
|
|
|
|
if (b->VsrB(ARRAY_SIZE(b->u8) - i - SZ) & 0x80) { \
|
|
|
|
/* Update each byte of the element */ \
|
|
|
|
for (int k = 0, rk = SZ - 1; k < SZ; k++, rk--) { \
|
|
|
|
/* Reverse indexing of "j" */ \
|
|
|
|
const int idx = ARRAY_SIZE(b->u8) - j - SZ; \
|
|
|
|
tmp.VsrB(idx + rk) = i + k; \
|
|
|
|
} \
|
|
|
|
j += SZ; \
|
|
|
|
} \
|
|
|
|
} \
|
|
|
|
\
|
|
|
|
*t = tmp; \
|
|
|
|
}
|
|
|
|
|
2022-03-05 09:16:47 +03:00
|
|
|
#define XXGENPCV(NAME, SZ) \
|
|
|
|
XXGENPCV_BE_EXP(NAME, SZ) \
|
|
|
|
XXGENPCV_BE_COMP(NAME, SZ) \
|
|
|
|
XXGENPCV_LE_EXP(NAME, SZ) \
|
|
|
|
XXGENPCV_LE_COMP(NAME, SZ) \
|
|
|
|
|
2022-03-02 08:51:38 +03:00
|
|
|
XXGENPCV(XXGENPCVBM, 1)
|
|
|
|
XXGENPCV(XXGENPCVHM, 2)
|
|
|
|
XXGENPCV(XXGENPCVWM, 4)
|
|
|
|
XXGENPCV(XXGENPCVDM, 8)
|
2022-03-05 09:16:47 +03:00
|
|
|
|
|
|
|
#undef XXGENPCV_BE_EXP
|
|
|
|
#undef XXGENPCV_BE_COMP
|
|
|
|
#undef XXGENPCV_LE_EXP
|
|
|
|
#undef XXGENPCV_LE_COMP
|
2022-03-02 08:51:38 +03:00
|
|
|
#undef XXGENPCV
|
|
|
|
|
2022-03-23 18:57:17 +03:00
|
|
|
#if HOST_BIG_ENDIAN
|
2014-02-13 01:23:12 +04:00
|
|
|
#define VBPERMQ_INDEX(avr, i) ((avr)->u8[(i)])
|
2016-09-06 08:04:09 +03:00
|
|
|
#define VBPERMD_INDEX(i) (i)
|
2014-02-13 01:23:12 +04:00
|
|
|
#define VBPERMQ_DW(index) (((index) & 0x40) != 0)
|
|
|
|
#else
|
2019-03-21 08:50:18 +03:00
|
|
|
#define VBPERMQ_INDEX(avr, i) ((avr)->u8[15 - (i)])
|
2016-09-06 08:04:09 +03:00
|
|
|
#define VBPERMD_INDEX(i) (1 - i)
|
2014-02-13 01:23:12 +04:00
|
|
|
#define VBPERMQ_DW(index) (((index) & 0x40) == 0)
|
|
|
|
#endif
|
2022-06-01 15:53:55 +03:00
|
|
|
#define EXTRACT_BIT(avr, i, index) \
|
|
|
|
(extract64((avr)->VsrD(i), 63 - index, 1))
|
2014-02-13 01:23:12 +04:00
|
|
|
|
2016-09-06 08:04:09 +03:00
|
|
|
void helper_vbpermd(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
|
|
|
|
{
|
|
|
|
int i, j;
|
|
|
|
ppc_avr_t result = { .u64 = { 0, 0 } };
|
|
|
|
VECTOR_FOR_INORDER_I(i, u64) {
|
|
|
|
for (j = 0; j < 8; j++) {
|
|
|
|
int index = VBPERMQ_INDEX(b, (i * 8) + j);
|
|
|
|
if (index < 64 && EXTRACT_BIT(a, i, index)) {
|
|
|
|
result.u64[VBPERMD_INDEX(i)] |= (0x80 >> j);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
*r = result;
|
|
|
|
}
|
|
|
|
|
2014-02-13 01:23:12 +04:00
|
|
|
void helper_vbpermq(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
uint64_t perm = 0;
|
|
|
|
|
|
|
|
VECTOR_FOR_INORDER_I(i, u8) {
|
|
|
|
int index = VBPERMQ_INDEX(b, i);
|
|
|
|
|
|
|
|
if (index < 128) {
|
2019-03-21 08:50:18 +03:00
|
|
|
uint64_t mask = (1ull << (63 - (index & 0x3F)));
|
2014-02-13 01:23:12 +04:00
|
|
|
if (a->u64[VBPERMQ_DW(index)] & mask) {
|
|
|
|
perm |= (0x8000 >> i);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-01-30 23:36:34 +03:00
|
|
|
r->VsrD(0) = perm;
|
|
|
|
r->VsrD(1) = 0;
|
2014-02-13 01:23:12 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
#undef VBPERMQ_INDEX
|
|
|
|
#undef VBPERMQ_DW
|
|
|
|
|
2014-02-13 01:23:15 +04:00
|
|
|
#define PMSUM(name, srcfld, trgfld, trgtyp) \
|
|
|
|
void helper_##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
|
|
|
|
{ \
|
|
|
|
int i, j; \
|
2019-03-21 08:50:18 +03:00
|
|
|
trgtyp prod[sizeof(ppc_avr_t) / sizeof(a->srcfld[0])]; \
|
2014-02-13 01:23:15 +04:00
|
|
|
\
|
|
|
|
VECTOR_FOR_INORDER_I(i, srcfld) { \
|
|
|
|
prod[i] = 0; \
|
|
|
|
for (j = 0; j < sizeof(a->srcfld[0]) * 8; j++) { \
|
2019-03-21 08:50:18 +03:00
|
|
|
if (a->srcfld[i] & (1ull << j)) { \
|
2014-02-13 01:23:15 +04:00
|
|
|
prod[i] ^= ((trgtyp)b->srcfld[i] << j); \
|
|
|
|
} \
|
|
|
|
} \
|
|
|
|
} \
|
|
|
|
\
|
|
|
|
VECTOR_FOR_INORDER_I(i, trgfld) { \
|
2019-03-21 08:50:18 +03:00
|
|
|
r->trgfld[i] = prod[2 * i] ^ prod[2 * i + 1]; \
|
2014-02-13 01:23:15 +04:00
|
|
|
} \
|
|
|
|
}
|
|
|
|
|
|
|
|
PMSUM(vpmsumb, u8, u16, uint16_t)
|
|
|
|
PMSUM(vpmsumh, u16, u32, uint32_t)
|
|
|
|
PMSUM(vpmsumw, u32, u64, uint64_t)
|
|
|
|
|
2022-06-06 18:00:31 +03:00
|
|
|
void helper_VPMSUMD(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
|
2014-02-13 01:23:15 +04:00
|
|
|
{
|
|
|
|
int i, j;
|
2022-06-06 18:00:31 +03:00
|
|
|
Int128 tmp, prod[2] = {int128_zero(), int128_zero()};
|
|
|
|
|
|
|
|
for (j = 0; j < 64; j++) {
|
|
|
|
for (i = 0; i < ARRAY_SIZE(r->u64); i++) {
|
|
|
|
if (a->VsrD(i) & (1ull << j)) {
|
|
|
|
tmp = int128_make64(b->VsrD(i));
|
|
|
|
tmp = int128_lshift(tmp, j);
|
|
|
|
prod[i] = int128_xor(prod[i], tmp);
|
2014-02-13 01:23:15 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-06-06 18:00:31 +03:00
|
|
|
r->s128 = int128_xor(prod[0], prod[1]);
|
2014-02-13 01:23:15 +04:00
|
|
|
}
|
|
|
|
|
2022-03-23 18:57:17 +03:00
|
|
|
#if HOST_BIG_ENDIAN
|
2012-05-30 08:23:28 +04:00
|
|
|
#define PKBIG 1
|
|
|
|
#else
|
|
|
|
#define PKBIG 0
|
|
|
|
#endif
|
|
|
|
void helper_vpkpx(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
|
|
|
|
{
|
|
|
|
int i, j;
|
|
|
|
ppc_avr_t result;
|
2022-03-23 18:57:17 +03:00
|
|
|
#if HOST_BIG_ENDIAN
|
2012-05-30 08:23:28 +04:00
|
|
|
const ppc_avr_t *x[2] = { a, b };
|
|
|
|
#else
|
|
|
|
const ppc_avr_t *x[2] = { b, a };
|
|
|
|
#endif
|
|
|
|
|
|
|
|
VECTOR_FOR_INORDER_I(i, u64) {
|
|
|
|
VECTOR_FOR_INORDER_I(j, u32) {
|
|
|
|
uint32_t e = x[i]->u32[j];
|
|
|
|
|
2019-03-21 08:50:18 +03:00
|
|
|
result.u16[4 * i + j] = (((e >> 9) & 0xfc00) |
|
|
|
|
((e >> 6) & 0x3e0) |
|
|
|
|
((e >> 3) & 0x1f));
|
2012-05-30 08:23:28 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
*r = result;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define VPK(suffix, from, to, cvt, dosat) \
|
2012-05-30 08:23:29 +04:00
|
|
|
void helper_vpk##suffix(CPUPPCState *env, ppc_avr_t *r, \
|
|
|
|
ppc_avr_t *a, ppc_avr_t *b) \
|
2012-05-30 08:23:28 +04:00
|
|
|
{ \
|
|
|
|
int i; \
|
|
|
|
int sat = 0; \
|
|
|
|
ppc_avr_t result; \
|
|
|
|
ppc_avr_t *a0 = PKBIG ? a : b; \
|
|
|
|
ppc_avr_t *a1 = PKBIG ? b : a; \
|
|
|
|
\
|
|
|
|
VECTOR_FOR_INORDER_I(i, from) { \
|
|
|
|
result.to[i] = cvt(a0->from[i], &sat); \
|
2019-03-21 08:50:18 +03:00
|
|
|
result.to[i + ARRAY_SIZE(r->from)] = cvt(a1->from[i], &sat);\
|
2012-05-30 08:23:28 +04:00
|
|
|
} \
|
|
|
|
*r = result; \
|
|
|
|
if (dosat && sat) { \
|
2019-02-15 13:00:55 +03:00
|
|
|
set_vscr_sat(env); \
|
2012-05-30 08:23:28 +04:00
|
|
|
} \
|
|
|
|
}
|
|
|
|
#define I(x, y) (x)
|
|
|
|
VPK(shss, s16, s8, cvtshsb, 1)
|
|
|
|
VPK(shus, s16, u8, cvtshub, 1)
|
|
|
|
VPK(swss, s32, s16, cvtswsh, 1)
|
|
|
|
VPK(swus, s32, u16, cvtswuh, 1)
|
2014-02-13 01:23:06 +04:00
|
|
|
VPK(sdss, s64, s32, cvtsdsw, 1)
|
|
|
|
VPK(sdus, s64, u32, cvtsduw, 1)
|
2012-05-30 08:23:28 +04:00
|
|
|
VPK(uhus, u16, u8, cvtuhub, 1)
|
|
|
|
VPK(uwus, u32, u16, cvtuwuh, 1)
|
2014-02-13 01:23:06 +04:00
|
|
|
VPK(udus, u64, u32, cvtuduw, 1)
|
2012-05-30 08:23:28 +04:00
|
|
|
VPK(uhum, u16, u8, I, 0)
|
|
|
|
VPK(uwum, u32, u16, I, 0)
|
2014-02-13 01:23:06 +04:00
|
|
|
VPK(udum, u64, u32, I, 0)
|
2012-05-30 08:23:28 +04:00
|
|
|
#undef I
|
|
|
|
#undef VPK
|
|
|
|
#undef PKBIG
|
|
|
|
|
2012-05-30 08:23:29 +04:00
|
|
|
void helper_vrefp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *b)
|
2012-05-30 08:23:28 +04:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
2019-01-02 12:14:21 +03:00
|
|
|
for (i = 0; i < ARRAY_SIZE(r->f32); i++) {
|
|
|
|
r->f32[i] = float32_div(float32_one, b->f32[i], &env->vec_status);
|
2012-05-30 08:23:28 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#define VRFI(suffix, rounding) \
|
2012-05-30 08:23:29 +04:00
|
|
|
void helper_vrfi##suffix(CPUPPCState *env, ppc_avr_t *r, \
|
|
|
|
ppc_avr_t *b) \
|
2012-05-30 08:23:28 +04:00
|
|
|
{ \
|
|
|
|
int i; \
|
|
|
|
float_status s = env->vec_status; \
|
|
|
|
\
|
|
|
|
set_float_rounding_mode(rounding, &s); \
|
2019-01-02 12:14:21 +03:00
|
|
|
for (i = 0; i < ARRAY_SIZE(r->f32); i++) { \
|
|
|
|
r->f32[i] = float32_round_to_int (b->f32[i], &s); \
|
2012-05-30 08:23:28 +04:00
|
|
|
} \
|
|
|
|
}
|
|
|
|
VRFI(n, float_round_nearest_even)
|
|
|
|
VRFI(m, float_round_down)
|
|
|
|
VRFI(p, float_round_up)
|
|
|
|
VRFI(z, float_round_to_zero)
|
|
|
|
#undef VRFI
|
|
|
|
|
2012-05-30 08:23:29 +04:00
|
|
|
void helper_vrsqrtefp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *b)
|
2012-05-30 08:23:28 +04:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
2019-01-02 12:14:21 +03:00
|
|
|
for (i = 0; i < ARRAY_SIZE(r->f32); i++) {
|
|
|
|
float32 t = float32_sqrt(b->f32[i], &env->vec_status);
|
2012-05-30 08:23:28 +04:00
|
|
|
|
2019-01-02 12:14:21 +03:00
|
|
|
r->f32[i] = float32_div(float32_one, t, &env->vec_status);
|
2012-05-30 08:23:28 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-03-02 08:51:37 +03:00
|
|
|
#define VRLMI(name, size, element, insert) \
|
|
|
|
void helper_##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t desc) \
|
|
|
|
{ \
|
|
|
|
int i; \
|
|
|
|
for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
|
|
|
|
uint##size##_t src1 = a->element[i]; \
|
|
|
|
uint##size##_t src2 = b->element[i]; \
|
|
|
|
uint##size##_t src3 = r->element[i]; \
|
|
|
|
uint##size##_t begin, end, shift, mask, rot_val; \
|
|
|
|
\
|
|
|
|
shift = extract##size(src2, 0, 6); \
|
|
|
|
end = extract##size(src2, 8, 6); \
|
|
|
|
begin = extract##size(src2, 16, 6); \
|
|
|
|
rot_val = rol##size(src1, shift); \
|
|
|
|
mask = mask_u##size(begin, end); \
|
|
|
|
if (insert) { \
|
|
|
|
r->element[i] = (rot_val & mask) | (src3 & ~mask); \
|
|
|
|
} else { \
|
|
|
|
r->element[i] = (rot_val & mask); \
|
|
|
|
} \
|
|
|
|
} \
|
2016-10-30 06:14:56 +03:00
|
|
|
}
|
|
|
|
|
2022-03-02 08:51:37 +03:00
|
|
|
VRLMI(VRLDMI, 64, u64, 1);
|
|
|
|
VRLMI(VRLWMI, 32, u32, 1);
|
|
|
|
VRLMI(VRLDNM, 64, u64, 0);
|
|
|
|
VRLMI(VRLWNM, 32, u32, 0);
|
2016-10-30 06:14:56 +03:00
|
|
|
|
2012-05-30 08:23:29 +04:00
|
|
|
void helper_vexptefp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *b)
|
2012-05-30 08:23:28 +04:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
2019-01-02 12:14:21 +03:00
|
|
|
for (i = 0; i < ARRAY_SIZE(r->f32); i++) {
|
|
|
|
r->f32[i] = float32_exp2(b->f32[i], &env->vec_status);
|
2012-05-30 08:23:28 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-05-30 08:23:29 +04:00
|
|
|
void helper_vlogefp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *b)
|
2012-05-30 08:23:28 +04:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
2019-01-02 12:14:21 +03:00
|
|
|
for (i = 0; i < ARRAY_SIZE(r->f32); i++) {
|
|
|
|
r->f32[i] = float32_log2(b->f32[i], &env->vec_status);
|
2012-05-30 08:23:28 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-08-26 17:14:46 +03:00
|
|
|
#define VEXTU_X_DO(name, size, left) \
|
|
|
|
target_ulong glue(helper_, name)(target_ulong a, ppc_avr_t *b) \
|
|
|
|
{ \
|
|
|
|
int index = (a & 0xf) * 8; \
|
|
|
|
if (left) { \
|
|
|
|
index = 128 - index - size; \
|
|
|
|
} \
|
|
|
|
return int128_getlo(int128_rshift(b->s128, index)) & \
|
|
|
|
MAKE_64BIT_MASK(0, size); \
|
|
|
|
}
|
2016-11-28 10:56:42 +03:00
|
|
|
VEXTU_X_DO(vextublx, 8, 1)
|
|
|
|
VEXTU_X_DO(vextuhlx, 16, 1)
|
|
|
|
VEXTU_X_DO(vextuwlx, 32, 1)
|
|
|
|
VEXTU_X_DO(vextubrx, 8, 0)
|
|
|
|
VEXTU_X_DO(vextuhrx, 16, 0)
|
|
|
|
VEXTU_X_DO(vextuwrx, 32, 0)
|
|
|
|
#undef VEXTU_X_DO
|
|
|
|
|
2016-07-28 21:14:16 +03:00
|
|
|
void helper_vslv(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
unsigned int shift, bytes, size;
|
|
|
|
|
|
|
|
size = ARRAY_SIZE(r->u8);
|
|
|
|
for (i = 0; i < size; i++) {
|
2019-05-07 03:48:08 +03:00
|
|
|
shift = b->VsrB(i) & 0x7; /* extract shift value */
|
|
|
|
bytes = (a->VsrB(i) << 8) + /* extract adjacent bytes */
|
|
|
|
(((i + 1) < size) ? a->VsrB(i + 1) : 0);
|
|
|
|
r->VsrB(i) = (bytes << shift) >> 8; /* shift and store result */
|
2016-07-28 21:14:16 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-28 21:14:17 +03:00
|
|
|
void helper_vsrv(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
unsigned int shift, bytes;
|
|
|
|
|
2019-03-21 08:50:18 +03:00
|
|
|
/*
|
|
|
|
* Use reverse order, as destination and source register can be
|
|
|
|
* same. Its being modified in place saving temporary, reverse
|
|
|
|
* order will guarantee that computed result is not fed back.
|
2016-07-28 21:14:17 +03:00
|
|
|
*/
|
|
|
|
for (i = ARRAY_SIZE(r->u8) - 1; i >= 0; i--) {
|
2019-05-07 03:48:08 +03:00
|
|
|
shift = b->VsrB(i) & 0x7; /* extract shift value */
|
|
|
|
bytes = ((i ? a->VsrB(i - 1) : 0) << 8) + a->VsrB(i);
|
2016-07-28 21:14:17 +03:00
|
|
|
/* extract adjacent bytes */
|
2019-05-07 03:48:08 +03:00
|
|
|
r->VsrB(i) = (bytes >> shift) & 0xFF; /* shift and store result */
|
2016-07-28 21:14:17 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-05-30 08:23:28 +04:00
|
|
|
void helper_vsldoi(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t shift)
|
|
|
|
{
|
|
|
|
int sh = shift & 0xf;
|
|
|
|
int i;
|
|
|
|
ppc_avr_t result;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
|
|
|
|
int index = sh + i;
|
|
|
|
if (index > 0xf) {
|
2019-01-30 23:36:38 +03:00
|
|
|
result.VsrB(i) = b->VsrB(index - 0x10);
|
2012-05-30 08:23:28 +04:00
|
|
|
} else {
|
2019-01-30 23:36:38 +03:00
|
|
|
result.VsrB(i) = a->VsrB(index);
|
2012-05-30 08:23:28 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
*r = result;
|
|
|
|
}
|
|
|
|
|
|
|
|
void helper_vslo(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
|
|
|
|
{
|
2019-01-30 23:36:34 +03:00
|
|
|
int sh = (b->VsrB(0xf) >> 3) & 0xf;
|
2012-05-30 08:23:28 +04:00
|
|
|
|
2022-03-23 18:57:17 +03:00
|
|
|
#if HOST_BIG_ENDIAN
|
2012-05-30 08:23:28 +04:00
|
|
|
memmove(&r->u8[0], &a->u8[sh], 16 - sh);
|
2019-03-21 08:50:18 +03:00
|
|
|
memset(&r->u8[16 - sh], 0, sh);
|
2012-05-30 08:23:28 +04:00
|
|
|
#else
|
|
|
|
memmove(&r->u8[sh], &a->u8[0], 16 - sh);
|
|
|
|
memset(&r->u8[0], 0, sh);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2022-03-23 18:57:17 +03:00
|
|
|
#if HOST_BIG_ENDIAN
|
2021-11-04 15:36:59 +03:00
|
|
|
#define ELEM_ADDR(VEC, IDX, SIZE) (&(VEC)->u8[IDX])
|
|
|
|
#else
|
|
|
|
#define ELEM_ADDR(VEC, IDX, SIZE) (&(VEC)->u8[15 - (IDX)] - (SIZE) + 1)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#define VINSX(SUFFIX, TYPE) \
|
|
|
|
void glue(glue(helper_VINS, SUFFIX), LX)(CPUPPCState *env, ppc_avr_t *t, \
|
|
|
|
uint64_t val, target_ulong index) \
|
|
|
|
{ \
|
|
|
|
const int maxidx = ARRAY_SIZE(t->u8) - sizeof(TYPE); \
|
|
|
|
target_long idx = index; \
|
|
|
|
\
|
|
|
|
if (idx < 0 || idx > maxidx) { \
|
|
|
|
idx = idx < 0 ? sizeof(TYPE) - idx : idx; \
|
|
|
|
qemu_log_mask(LOG_GUEST_ERROR, \
|
|
|
|
"Invalid index for Vector Insert Element after 0x" TARGET_FMT_lx \
|
|
|
|
", RA = " TARGET_FMT_ld " > %d\n", env->nip, idx, maxidx); \
|
|
|
|
} else { \
|
|
|
|
TYPE src = val; \
|
|
|
|
memcpy(ELEM_ADDR(t, idx, sizeof(TYPE)), &src, sizeof(TYPE)); \
|
|
|
|
} \
|
|
|
|
}
|
|
|
|
VINSX(B, uint8_t)
|
|
|
|
VINSX(H, uint16_t)
|
|
|
|
VINSX(W, uint32_t)
|
|
|
|
VINSX(D, uint64_t)
|
|
|
|
#undef ELEM_ADDR
|
|
|
|
#undef VINSX
|
2022-03-23 18:57:17 +03:00
|
|
|
#if HOST_BIG_ENDIAN
|
2021-11-04 15:37:03 +03:00
|
|
|
#define VEXTDVLX(NAME, SIZE) \
|
|
|
|
void helper_##NAME(CPUPPCState *env, ppc_avr_t *t, ppc_avr_t *a, ppc_avr_t *b, \
|
|
|
|
target_ulong index) \
|
|
|
|
{ \
|
|
|
|
const target_long idx = index; \
|
|
|
|
ppc_avr_t tmp[2] = { *a, *b }; \
|
|
|
|
memset(t, 0, sizeof(*t)); \
|
|
|
|
if (idx >= 0 && idx + SIZE <= sizeof(tmp)) { \
|
|
|
|
memcpy(&t->u8[ARRAY_SIZE(t->u8) / 2 - SIZE], (void *)tmp + idx, SIZE); \
|
|
|
|
} else { \
|
|
|
|
qemu_log_mask(LOG_GUEST_ERROR, "Invalid index for " #NAME " after 0x" \
|
|
|
|
TARGET_FMT_lx ", RC = " TARGET_FMT_ld " > %d\n", \
|
|
|
|
env->nip, idx < 0 ? SIZE - idx : idx, 32 - SIZE); \
|
|
|
|
} \
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
#define VEXTDVLX(NAME, SIZE) \
|
|
|
|
void helper_##NAME(CPUPPCState *env, ppc_avr_t *t, ppc_avr_t *a, ppc_avr_t *b, \
|
|
|
|
target_ulong index) \
|
|
|
|
{ \
|
|
|
|
const target_long idx = index; \
|
|
|
|
ppc_avr_t tmp[2] = { *b, *a }; \
|
|
|
|
memset(t, 0, sizeof(*t)); \
|
|
|
|
if (idx >= 0 && idx + SIZE <= sizeof(tmp)) { \
|
|
|
|
memcpy(&t->u8[ARRAY_SIZE(t->u8) / 2], \
|
|
|
|
(void *)tmp + sizeof(tmp) - SIZE - idx, SIZE); \
|
|
|
|
} else { \
|
|
|
|
qemu_log_mask(LOG_GUEST_ERROR, "Invalid index for " #NAME " after 0x" \
|
|
|
|
TARGET_FMT_lx ", RC = " TARGET_FMT_ld " > %d\n", \
|
|
|
|
env->nip, idx < 0 ? SIZE - idx : idx, 32 - SIZE); \
|
|
|
|
} \
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
VEXTDVLX(VEXTDUBVLX, 1)
|
|
|
|
VEXTDVLX(VEXTDUHVLX, 2)
|
|
|
|
VEXTDVLX(VEXTDUWVLX, 4)
|
|
|
|
VEXTDVLX(VEXTDDVLX, 8)
|
|
|
|
#undef VEXTDVLX
|
2022-03-23 18:57:17 +03:00
|
|
|
#if HOST_BIG_ENDIAN
|
2016-09-06 08:04:07 +03:00
|
|
|
#define VEXTRACT(suffix, element) \
|
|
|
|
void helper_vextract##suffix(ppc_avr_t *r, ppc_avr_t *b, uint32_t index) \
|
|
|
|
{ \
|
|
|
|
uint32_t es = sizeof(r->element[0]); \
|
|
|
|
memmove(&r->u8[8 - es], &b->u8[index], es); \
|
|
|
|
memset(&r->u8[8], 0, 8); \
|
|
|
|
memset(&r->u8[0], 0, 8 - es); \
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
#define VEXTRACT(suffix, element) \
|
|
|
|
void helper_vextract##suffix(ppc_avr_t *r, ppc_avr_t *b, uint32_t index) \
|
|
|
|
{ \
|
|
|
|
uint32_t es = sizeof(r->element[0]); \
|
|
|
|
uint32_t s = (16 - index) - es; \
|
|
|
|
memmove(&r->u8[8], &b->u8[s], es); \
|
|
|
|
memset(&r->u8[0], 0, 8); \
|
|
|
|
memset(&r->u8[8 + es], 0, 8 - es); \
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
VEXTRACT(ub, u8)
|
|
|
|
VEXTRACT(uh, u16)
|
|
|
|
VEXTRACT(uw, u32)
|
|
|
|
VEXTRACT(d, u64)
|
|
|
|
#undef VEXTRACT
|
2012-05-30 08:23:28 +04:00
|
|
|
|
2022-03-02 08:51:37 +03:00
|
|
|
#define VSTRI(NAME, ELEM, NUM_ELEMS, LEFT) \
|
|
|
|
uint32_t helper_##NAME(ppc_avr_t *t, ppc_avr_t *b) \
|
|
|
|
{ \
|
|
|
|
int i, idx, crf = 0; \
|
|
|
|
\
|
|
|
|
for (i = 0; i < NUM_ELEMS; i++) { \
|
|
|
|
idx = LEFT ? i : NUM_ELEMS - i - 1; \
|
|
|
|
if (b->Vsr##ELEM(idx)) { \
|
|
|
|
t->Vsr##ELEM(idx) = b->Vsr##ELEM(idx); \
|
|
|
|
} else { \
|
|
|
|
crf = 0b0010; \
|
|
|
|
break; \
|
|
|
|
} \
|
|
|
|
} \
|
|
|
|
\
|
|
|
|
for (; i < NUM_ELEMS; i++) { \
|
|
|
|
idx = LEFT ? i : NUM_ELEMS - i - 1; \
|
|
|
|
t->Vsr##ELEM(idx) = 0; \
|
|
|
|
} \
|
|
|
|
\
|
|
|
|
return crf; \
|
|
|
|
}
|
|
|
|
VSTRI(VSTRIBL, B, 16, true)
|
|
|
|
VSTRI(VSTRIBR, B, 16, false)
|
|
|
|
VSTRI(VSTRIHL, H, 8, true)
|
|
|
|
VSTRI(VSTRIHR, H, 8, false)
|
|
|
|
#undef VSTRI
|
|
|
|
|
2022-05-17 15:39:25 +03:00
|
|
|
void helper_XXEXTRACTUW(ppc_vsr_t *xt, ppc_vsr_t *xb, uint32_t index)
|
2017-01-06 09:14:43 +03:00
|
|
|
{
|
2019-06-16 15:37:39 +03:00
|
|
|
ppc_vsr_t t = { };
|
2017-01-06 09:14:43 +03:00
|
|
|
size_t es = sizeof(uint32_t);
|
|
|
|
uint32_t ext_index;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
ext_index = index;
|
|
|
|
for (i = 0; i < es; i++, ext_index++) {
|
2019-06-16 15:37:39 +03:00
|
|
|
t.VsrB(8 - es + i) = xb->VsrB(ext_index % 16);
|
2017-01-06 09:14:43 +03:00
|
|
|
}
|
|
|
|
|
2019-06-16 15:37:39 +03:00
|
|
|
*xt = t;
|
2017-01-06 09:14:43 +03:00
|
|
|
}
|
|
|
|
|
2022-05-17 15:39:25 +03:00
|
|
|
void helper_XXINSERTW(ppc_vsr_t *xt, ppc_vsr_t *xb, uint32_t index)
|
2017-01-06 09:14:44 +03:00
|
|
|
{
|
2019-06-16 15:37:39 +03:00
|
|
|
ppc_vsr_t t = *xt;
|
2017-01-06 09:14:44 +03:00
|
|
|
size_t es = sizeof(uint32_t);
|
|
|
|
int ins_index, i = 0;
|
|
|
|
|
|
|
|
ins_index = index;
|
|
|
|
for (i = 0; i < es && ins_index < 16; i++, ins_index++) {
|
2019-06-16 15:37:39 +03:00
|
|
|
t.VsrB(ins_index) = xb->VsrB(8 - es + i);
|
2017-01-06 09:14:44 +03:00
|
|
|
}
|
|
|
|
|
2019-06-16 15:37:39 +03:00
|
|
|
*xt = t;
|
2017-01-06 09:14:44 +03:00
|
|
|
}
|
|
|
|
|
2022-03-02 08:51:38 +03:00
|
|
|
void helper_XXEVAL(ppc_avr_t *t, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c,
|
|
|
|
uint32_t desc)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Instead of processing imm bit-by-bit, we'll skip the computation of
|
|
|
|
* conjunctions whose corresponding bit is unset.
|
|
|
|
*/
|
|
|
|
int bit, imm = simd_data(desc);
|
|
|
|
Int128 conj, disj = int128_zero();
|
|
|
|
|
|
|
|
/* Iterate over set bits from the least to the most significant bit */
|
|
|
|
while (imm) {
|
|
|
|
/*
|
|
|
|
* Get the next bit to be processed with ctz64. Invert the result of
|
|
|
|
* ctz64 to match the indexing used by PowerISA.
|
|
|
|
*/
|
|
|
|
bit = 7 - ctzl(imm);
|
|
|
|
if (bit & 0x4) {
|
|
|
|
conj = a->s128;
|
|
|
|
} else {
|
|
|
|
conj = int128_not(a->s128);
|
|
|
|
}
|
|
|
|
if (bit & 0x2) {
|
|
|
|
conj = int128_and(conj, b->s128);
|
|
|
|
} else {
|
|
|
|
conj = int128_and(conj, int128_not(b->s128));
|
|
|
|
}
|
|
|
|
if (bit & 0x1) {
|
|
|
|
conj = int128_and(conj, c->s128);
|
|
|
|
} else {
|
|
|
|
conj = int128_and(conj, int128_not(c->s128));
|
|
|
|
}
|
|
|
|
disj = int128_or(disj, conj);
|
|
|
|
|
|
|
|
/* Unset the least significant bit that is set */
|
|
|
|
imm &= imm - 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
t->s128 = disj;
|
|
|
|
}
|
|
|
|
|
2021-11-04 15:37:17 +03:00
|
|
|
#define XXBLEND(name, sz) \
|
|
|
|
void glue(helper_XXBLENDV, name)(ppc_avr_t *t, ppc_avr_t *a, ppc_avr_t *b, \
|
|
|
|
ppc_avr_t *c, uint32_t desc) \
|
|
|
|
{ \
|
|
|
|
for (int i = 0; i < ARRAY_SIZE(t->glue(u, sz)); i++) { \
|
|
|
|
t->glue(u, sz)[i] = (c->glue(s, sz)[i] >> (sz - 1)) ? \
|
|
|
|
b->glue(u, sz)[i] : a->glue(u, sz)[i]; \
|
|
|
|
} \
|
|
|
|
}
|
|
|
|
XXBLEND(B, 8)
|
|
|
|
XXBLEND(H, 16)
|
|
|
|
XXBLEND(W, 32)
|
|
|
|
XXBLEND(D, 64)
|
|
|
|
#undef XXBLEND
|
|
|
|
|
2012-05-30 08:23:28 +04:00
|
|
|
void helper_vsro(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
|
|
|
|
{
|
2019-01-30 23:36:34 +03:00
|
|
|
int sh = (b->VsrB(0xf) >> 3) & 0xf;
|
2012-05-30 08:23:28 +04:00
|
|
|
|
2022-03-23 18:57:17 +03:00
|
|
|
#if HOST_BIG_ENDIAN
|
2012-05-30 08:23:28 +04:00
|
|
|
memmove(&r->u8[sh], &a->u8[0], 16 - sh);
|
|
|
|
memset(&r->u8[0], 0, sh);
|
|
|
|
#else
|
|
|
|
memmove(&r->u8[0], &a->u8[sh], 16 - sh);
|
|
|
|
memset(&r->u8[16 - sh], 0, sh);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2012-05-30 08:23:29 +04:00
|
|
|
void helper_vsumsws(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
|
2012-05-30 08:23:28 +04:00
|
|
|
{
|
|
|
|
int64_t t;
|
|
|
|
int i, upper;
|
|
|
|
ppc_avr_t result;
|
|
|
|
int sat = 0;
|
|
|
|
|
2019-01-30 23:36:38 +03:00
|
|
|
upper = ARRAY_SIZE(r->s32) - 1;
|
|
|
|
t = (int64_t)b->VsrSW(upper);
|
2012-05-30 08:23:28 +04:00
|
|
|
for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
|
2019-01-30 23:36:38 +03:00
|
|
|
t += a->VsrSW(i);
|
|
|
|
result.VsrSW(i) = 0;
|
2012-05-30 08:23:28 +04:00
|
|
|
}
|
2019-01-30 23:36:38 +03:00
|
|
|
result.VsrSW(upper) = cvtsdsw(t, &sat);
|
2012-05-30 08:23:28 +04:00
|
|
|
*r = result;
|
|
|
|
|
|
|
|
if (sat) {
|
2019-02-15 13:00:55 +03:00
|
|
|
set_vscr_sat(env);
|
2012-05-30 08:23:28 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-05-30 08:23:29 +04:00
|
|
|
void helper_vsum2sws(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
|
2012-05-30 08:23:28 +04:00
|
|
|
{
|
|
|
|
int i, j, upper;
|
|
|
|
ppc_avr_t result;
|
|
|
|
int sat = 0;
|
|
|
|
|
|
|
|
upper = 1;
|
|
|
|
for (i = 0; i < ARRAY_SIZE(r->u64); i++) {
|
2019-01-30 23:36:38 +03:00
|
|
|
int64_t t = (int64_t)b->VsrSW(upper + i * 2);
|
2012-05-30 08:23:28 +04:00
|
|
|
|
2019-05-07 03:48:11 +03:00
|
|
|
result.VsrD(i) = 0;
|
2012-05-30 08:23:28 +04:00
|
|
|
for (j = 0; j < ARRAY_SIZE(r->u64); j++) {
|
2019-01-30 23:36:38 +03:00
|
|
|
t += a->VsrSW(2 * i + j);
|
2012-05-30 08:23:28 +04:00
|
|
|
}
|
2019-01-30 23:36:38 +03:00
|
|
|
result.VsrSW(upper + i * 2) = cvtsdsw(t, &sat);
|
2012-05-30 08:23:28 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
*r = result;
|
|
|
|
if (sat) {
|
2019-02-15 13:00:55 +03:00
|
|
|
set_vscr_sat(env);
|
2012-05-30 08:23:28 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-05-30 08:23:29 +04:00
|
|
|
void helper_vsum4sbs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
|
2012-05-30 08:23:28 +04:00
|
|
|
{
|
|
|
|
int i, j;
|
|
|
|
int sat = 0;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
|
|
|
|
int64_t t = (int64_t)b->s32[i];
|
|
|
|
|
|
|
|
for (j = 0; j < ARRAY_SIZE(r->s32); j++) {
|
|
|
|
t += a->s8[4 * i + j];
|
|
|
|
}
|
|
|
|
r->s32[i] = cvtsdsw(t, &sat);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sat) {
|
2019-02-15 13:00:55 +03:00
|
|
|
set_vscr_sat(env);
|
2012-05-30 08:23:28 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-05-30 08:23:29 +04:00
|
|
|
void helper_vsum4shs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
|
2012-05-30 08:23:28 +04:00
|
|
|
{
|
|
|
|
int sat = 0;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
|
|
|
|
int64_t t = (int64_t)b->s32[i];
|
|
|
|
|
|
|
|
t += a->s16[2 * i] + a->s16[2 * i + 1];
|
|
|
|
r->s32[i] = cvtsdsw(t, &sat);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sat) {
|
2019-02-15 13:00:55 +03:00
|
|
|
set_vscr_sat(env);
|
2012-05-30 08:23:28 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-05-30 08:23:29 +04:00
|
|
|
void helper_vsum4ubs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
|
2012-05-30 08:23:28 +04:00
|
|
|
{
|
|
|
|
int i, j;
|
|
|
|
int sat = 0;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
|
|
|
|
uint64_t t = (uint64_t)b->u32[i];
|
|
|
|
|
|
|
|
for (j = 0; j < ARRAY_SIZE(r->u32); j++) {
|
|
|
|
t += a->u8[4 * i + j];
|
|
|
|
}
|
|
|
|
r->u32[i] = cvtuduw(t, &sat);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sat) {
|
2019-02-15 13:00:55 +03:00
|
|
|
set_vscr_sat(env);
|
2012-05-30 08:23:28 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-03-23 18:57:17 +03:00
|
|
|
#if HOST_BIG_ENDIAN
|
2012-05-30 08:23:28 +04:00
|
|
|
#define UPKHI 1
|
|
|
|
#define UPKLO 0
|
|
|
|
#else
|
|
|
|
#define UPKHI 0
|
|
|
|
#define UPKLO 1
|
|
|
|
#endif
|
|
|
|
#define VUPKPX(suffix, hi) \
|
|
|
|
void helper_vupk##suffix(ppc_avr_t *r, ppc_avr_t *b) \
|
|
|
|
{ \
|
|
|
|
int i; \
|
|
|
|
ppc_avr_t result; \
|
|
|
|
\
|
|
|
|
for (i = 0; i < ARRAY_SIZE(r->u32); i++) { \
|
2019-03-21 08:50:18 +03:00
|
|
|
uint16_t e = b->u16[hi ? i : i + 4]; \
|
2012-05-30 08:23:28 +04:00
|
|
|
uint8_t a = (e >> 15) ? 0xff : 0; \
|
|
|
|
uint8_t r = (e >> 10) & 0x1f; \
|
|
|
|
uint8_t g = (e >> 5) & 0x1f; \
|
|
|
|
uint8_t b = e & 0x1f; \
|
|
|
|
\
|
|
|
|
result.u32[i] = (a << 24) | (r << 16) | (g << 8) | b; \
|
|
|
|
} \
|
|
|
|
*r = result; \
|
|
|
|
}
|
|
|
|
VUPKPX(lpx, UPKLO)
|
|
|
|
VUPKPX(hpx, UPKHI)
|
|
|
|
#undef VUPKPX
|
|
|
|
|
|
|
|
#define VUPK(suffix, unpacked, packee, hi) \
|
|
|
|
void helper_vupk##suffix(ppc_avr_t *r, ppc_avr_t *b) \
|
|
|
|
{ \
|
|
|
|
int i; \
|
|
|
|
ppc_avr_t result; \
|
|
|
|
\
|
|
|
|
if (hi) { \
|
|
|
|
for (i = 0; i < ARRAY_SIZE(r->unpacked); i++) { \
|
|
|
|
result.unpacked[i] = b->packee[i]; \
|
|
|
|
} \
|
|
|
|
} else { \
|
|
|
|
for (i = ARRAY_SIZE(r->unpacked); i < ARRAY_SIZE(r->packee); \
|
|
|
|
i++) { \
|
|
|
|
result.unpacked[i - ARRAY_SIZE(r->unpacked)] = b->packee[i]; \
|
|
|
|
} \
|
|
|
|
} \
|
|
|
|
*r = result; \
|
|
|
|
}
|
|
|
|
VUPK(hsb, s16, s8, UPKHI)
|
|
|
|
VUPK(hsh, s32, s16, UPKHI)
|
2014-02-13 01:23:07 +04:00
|
|
|
VUPK(hsw, s64, s32, UPKHI)
|
2012-05-30 08:23:28 +04:00
|
|
|
VUPK(lsb, s16, s8, UPKLO)
|
|
|
|
VUPK(lsh, s32, s16, UPKLO)
|
2014-02-13 01:23:07 +04:00
|
|
|
VUPK(lsw, s64, s32, UPKLO)
|
2012-05-30 08:23:28 +04:00
|
|
|
#undef VUPK
|
|
|
|
#undef UPKHI
|
|
|
|
#undef UPKLO
|
|
|
|
|
2014-02-13 01:23:03 +04:00
|
|
|
#define VGENERIC_DO(name, element) \
|
|
|
|
void helper_v##name(ppc_avr_t *r, ppc_avr_t *b) \
|
|
|
|
{ \
|
|
|
|
int i; \
|
|
|
|
\
|
2019-01-30 23:36:38 +03:00
|
|
|
for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
|
2014-02-13 01:23:03 +04:00
|
|
|
r->element[i] = name(b->element[i]); \
|
|
|
|
} \
|
|
|
|
}
|
|
|
|
|
|
|
|
#define clzb(v) ((v) ? clz32((uint32_t)(v) << 24) : 8)
|
|
|
|
#define clzh(v) ((v) ? clz32((uint32_t)(v) << 16) : 16)
|
|
|
|
|
|
|
|
VGENERIC_DO(clzb, u8)
|
|
|
|
VGENERIC_DO(clzh, u16)
|
|
|
|
|
|
|
|
#undef clzb
|
|
|
|
#undef clzh
|
|
|
|
|
2016-09-06 08:04:08 +03:00
|
|
|
#define ctzb(v) ((v) ? ctz32(v) : 8)
|
|
|
|
#define ctzh(v) ((v) ? ctz32(v) : 16)
|
|
|
|
#define ctzw(v) ctz32((v))
|
|
|
|
#define ctzd(v) ctz64((v))
|
|
|
|
|
|
|
|
VGENERIC_DO(ctzb, u8)
|
|
|
|
VGENERIC_DO(ctzh, u16)
|
|
|
|
VGENERIC_DO(ctzw, u32)
|
|
|
|
VGENERIC_DO(ctzd, u64)
|
|
|
|
|
|
|
|
#undef ctzb
|
|
|
|
#undef ctzh
|
|
|
|
#undef ctzw
|
|
|
|
#undef ctzd
|
|
|
|
|
2014-02-13 01:23:04 +04:00
|
|
|
#define popcntb(v) ctpop8(v)
|
|
|
|
#define popcnth(v) ctpop16(v)
|
|
|
|
#define popcntw(v) ctpop32(v)
|
|
|
|
#define popcntd(v) ctpop64(v)
|
|
|
|
|
|
|
|
VGENERIC_DO(popcntb, u8)
|
|
|
|
VGENERIC_DO(popcnth, u16)
|
|
|
|
VGENERIC_DO(popcntw, u32)
|
|
|
|
VGENERIC_DO(popcntd, u64)
|
|
|
|
|
|
|
|
#undef popcntb
|
|
|
|
#undef popcnth
|
|
|
|
#undef popcntw
|
|
|
|
#undef popcntd
|
2014-02-13 01:23:03 +04:00
|
|
|
|
|
|
|
#undef VGENERIC_DO
|
|
|
|
|
2022-06-06 18:00:32 +03:00
|
|
|
void helper_VADDUQM(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
|
2014-02-13 01:23:11 +04:00
|
|
|
{
|
2022-06-06 18:00:32 +03:00
|
|
|
r->s128 = int128_add(a->s128, b->s128);
|
2014-02-13 01:23:11 +04:00
|
|
|
}
|
|
|
|
|
2022-06-06 18:00:33 +03:00
|
|
|
void helper_VADDEUQM(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
|
2014-02-13 01:23:11 +04:00
|
|
|
{
|
2022-06-06 18:00:33 +03:00
|
|
|
r->s128 = int128_add(int128_add(a->s128, b->s128),
|
|
|
|
int128_make64(int128_getlo(c->s128) & 1));
|
2014-02-13 01:23:11 +04:00
|
|
|
}
|
|
|
|
|
2022-06-06 18:00:34 +03:00
|
|
|
void helper_VADDCUQ(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
|
2014-02-13 01:23:11 +04:00
|
|
|
{
|
2022-06-06 18:00:34 +03:00
|
|
|
r->VsrD(1) = int128_ult(int128_not(a->s128), b->s128);
|
2019-01-30 23:36:34 +03:00
|
|
|
r->VsrD(0) = 0;
|
2014-02-13 01:23:11 +04:00
|
|
|
}
|
|
|
|
|
2022-06-06 18:00:33 +03:00
|
|
|
void helper_VADDECUQ(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
|
2014-02-13 01:23:11 +04:00
|
|
|
{
|
2022-06-06 18:00:33 +03:00
|
|
|
bool carry_out = int128_ult(int128_not(a->s128), b->s128),
|
|
|
|
carry_in = int128_getlo(c->s128) & 1;
|
2014-02-13 01:23:11 +04:00
|
|
|
|
|
|
|
if (!carry_out && carry_in) {
|
2022-06-06 18:00:33 +03:00
|
|
|
carry_out = (int128_nz(a->s128) || int128_nz(b->s128)) &&
|
|
|
|
int128_eq(int128_add(a->s128, b->s128), int128_makes64(-1));
|
2014-02-13 01:23:11 +04:00
|
|
|
}
|
2022-06-06 18:00:33 +03:00
|
|
|
|
2019-01-30 23:36:34 +03:00
|
|
|
r->VsrD(0) = 0;
|
|
|
|
r->VsrD(1) = carry_out;
|
2014-02-13 01:23:11 +04:00
|
|
|
}
|
|
|
|
|
2022-06-06 18:00:35 +03:00
|
|
|
void helper_VSUBUQM(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
|
2014-02-13 01:23:11 +04:00
|
|
|
{
|
2022-06-06 18:00:35 +03:00
|
|
|
r->s128 = int128_sub(a->s128, b->s128);
|
2014-02-13 01:23:11 +04:00
|
|
|
}
|
|
|
|
|
2022-06-06 18:00:36 +03:00
|
|
|
void helper_VSUBEUQM(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
|
2014-02-13 01:23:11 +04:00
|
|
|
{
|
2022-06-06 18:00:36 +03:00
|
|
|
r->s128 = int128_add(int128_add(a->s128, int128_not(b->s128)),
|
|
|
|
int128_make64(int128_getlo(c->s128) & 1));
|
2014-02-13 01:23:11 +04:00
|
|
|
}
|
|
|
|
|
2022-06-06 18:00:37 +03:00
|
|
|
void helper_VSUBCUQ(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
|
2014-02-13 01:23:11 +04:00
|
|
|
{
|
2022-06-06 18:00:37 +03:00
|
|
|
Int128 tmp = int128_not(b->s128);
|
|
|
|
|
|
|
|
r->VsrD(1) = int128_ult(int128_not(a->s128), tmp) ||
|
|
|
|
int128_eq(int128_add(a->s128, tmp), int128_makes64(-1));
|
2019-01-30 23:36:34 +03:00
|
|
|
r->VsrD(0) = 0;
|
2014-02-13 01:23:11 +04:00
|
|
|
}
|
|
|
|
|
2022-06-06 18:00:36 +03:00
|
|
|
void helper_VSUBECUQ(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
|
2014-02-13 01:23:11 +04:00
|
|
|
{
|
2022-06-06 18:00:36 +03:00
|
|
|
Int128 tmp = int128_not(b->s128);
|
|
|
|
bool carry_out = int128_ult(int128_not(a->s128), tmp),
|
|
|
|
carry_in = int128_getlo(c->s128) & 1;
|
2014-02-13 01:23:11 +04:00
|
|
|
|
2022-06-06 18:00:36 +03:00
|
|
|
r->VsrD(1) = carry_out || (carry_in && int128_eq(int128_add(a->s128, tmp),
|
|
|
|
int128_makes64(-1)));
|
2019-01-30 23:36:34 +03:00
|
|
|
r->VsrD(0) = 0;
|
2014-02-13 01:23:11 +04:00
|
|
|
}
|
|
|
|
|
2014-02-13 01:23:16 +04:00
|
|
|
#define BCD_PLUS_PREF_1 0xC
|
|
|
|
#define BCD_PLUS_PREF_2 0xF
|
|
|
|
#define BCD_PLUS_ALT_1 0xA
|
|
|
|
#define BCD_NEG_PREF 0xD
|
|
|
|
#define BCD_NEG_ALT 0xB
|
|
|
|
#define BCD_PLUS_ALT_2 0xE
|
2016-11-08 19:50:22 +03:00
|
|
|
#define NATIONAL_PLUS 0x2B
|
|
|
|
#define NATIONAL_NEG 0x2D
|
2014-02-13 01:23:16 +04:00
|
|
|
|
2017-01-12 00:11:25 +03:00
|
|
|
#define BCD_DIG_BYTE(n) (15 - ((n) / 2))
|
2014-02-13 01:23:16 +04:00
|
|
|
|
|
|
|
static int bcd_get_sgn(ppc_avr_t *bcd)
|
|
|
|
{
|
2019-09-26 23:44:53 +03:00
|
|
|
switch (bcd->VsrB(BCD_DIG_BYTE(0)) & 0xF) {
|
2014-02-13 01:23:16 +04:00
|
|
|
case BCD_PLUS_PREF_1:
|
|
|
|
case BCD_PLUS_PREF_2:
|
|
|
|
case BCD_PLUS_ALT_1:
|
|
|
|
case BCD_PLUS_ALT_2:
|
|
|
|
{
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
case BCD_NEG_PREF:
|
|
|
|
case BCD_NEG_ALT:
|
|
|
|
{
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
default:
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int bcd_preferred_sgn(int sgn, int ps)
|
|
|
|
{
|
|
|
|
if (sgn >= 0) {
|
|
|
|
return (ps == 0) ? BCD_PLUS_PREF_1 : BCD_PLUS_PREF_2;
|
|
|
|
} else {
|
|
|
|
return BCD_NEG_PREF;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint8_t bcd_get_digit(ppc_avr_t *bcd, int n, int *invalid)
|
|
|
|
{
|
|
|
|
uint8_t result;
|
|
|
|
if (n & 1) {
|
2019-09-26 23:44:53 +03:00
|
|
|
result = bcd->VsrB(BCD_DIG_BYTE(n)) >> 4;
|
2014-02-13 01:23:16 +04:00
|
|
|
} else {
|
2019-09-26 23:44:53 +03:00
|
|
|
result = bcd->VsrB(BCD_DIG_BYTE(n)) & 0xF;
|
2014-02-13 01:23:16 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
if (unlikely(result > 9)) {
|
|
|
|
*invalid = true;
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void bcd_put_digit(ppc_avr_t *bcd, uint8_t digit, int n)
|
|
|
|
{
|
|
|
|
if (n & 1) {
|
2019-09-26 23:44:53 +03:00
|
|
|
bcd->VsrB(BCD_DIG_BYTE(n)) &= 0x0F;
|
|
|
|
bcd->VsrB(BCD_DIG_BYTE(n)) |= (digit << 4);
|
2014-02-13 01:23:16 +04:00
|
|
|
} else {
|
2019-09-26 23:44:53 +03:00
|
|
|
bcd->VsrB(BCD_DIG_BYTE(n)) &= 0xF0;
|
|
|
|
bcd->VsrB(BCD_DIG_BYTE(n)) |= digit;
|
2014-02-13 01:23:16 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-06 22:40:04 +03:00
|
|
|
static bool bcd_is_valid(ppc_avr_t *bcd)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
int invalid = 0;
|
|
|
|
|
|
|
|
if (bcd_get_sgn(bcd) == 0) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 1; i < 32; i++) {
|
|
|
|
bcd_get_digit(bcd, i, &invalid);
|
|
|
|
if (unlikely(invalid)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2016-11-08 19:50:22 +03:00
|
|
|
static int bcd_cmp_zero(ppc_avr_t *bcd)
|
|
|
|
{
|
2019-01-30 23:36:34 +03:00
|
|
|
if (bcd->VsrD(0) == 0 && (bcd->VsrD(1) >> 4) == 0) {
|
2016-11-23 14:37:11 +03:00
|
|
|
return CRF_EQ;
|
2016-11-08 19:50:22 +03:00
|
|
|
} else {
|
2016-11-23 14:37:11 +03:00
|
|
|
return (bcd_get_sgn(bcd) == 1) ? CRF_GT : CRF_LT;
|
2016-11-08 19:50:22 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint16_t get_national_digit(ppc_avr_t *reg, int n)
|
|
|
|
{
|
2019-01-30 23:36:38 +03:00
|
|
|
return reg->VsrH(7 - n);
|
2016-11-08 19:50:22 +03:00
|
|
|
}
|
|
|
|
|
2016-11-08 19:50:23 +03:00
|
|
|
static void set_national_digit(ppc_avr_t *reg, uint8_t val, int n)
|
|
|
|
{
|
2019-01-30 23:36:38 +03:00
|
|
|
reg->VsrH(7 - n) = val;
|
2016-11-08 19:50:23 +03:00
|
|
|
}
|
|
|
|
|
2014-02-13 01:23:16 +04:00
|
|
|
static int bcd_cmp_mag(ppc_avr_t *a, ppc_avr_t *b)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
int invalid = 0;
|
|
|
|
for (i = 31; i > 0; i--) {
|
|
|
|
uint8_t dig_a = bcd_get_digit(a, i, &invalid);
|
|
|
|
uint8_t dig_b = bcd_get_digit(b, i, &invalid);
|
|
|
|
if (unlikely(invalid)) {
|
2014-03-07 22:48:59 +04:00
|
|
|
return 0; /* doesn't matter */
|
2014-02-13 01:23:16 +04:00
|
|
|
} else if (dig_a > dig_b) {
|
|
|
|
return 1;
|
|
|
|
} else if (dig_a < dig_b) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
target/ppc: Fix bcdsub. emulation when result overflows
The commit d03b174a83 (target/ppc: simplify bcdadd/sub functions)
meant to simplify some of the code but it inadvertently altered the
way the CR6 field is set after the operation has overflowed.
The CR6 bits are set based on the *unbounded* result of the operation,
so we need to look at the result before returning from bcd_add_mag,
otherwise we will look at 0 when it overflows.
Consider the following subtraction:
v0 = 0x9999999999999999999999999999999c (maximum positive BCD value)
v1 = 0x0000000000000000000000000000001d (negative one BCD value)
bcdsub. v0,v0,v1,0
The Power ISA 2.07B says:
If the unbounded result is greater than zero, do the following.
If PS=0, the sign code of the result is set to 0b1100.
If PS=1, the sign code of the result is set to 0b1111.
If the operation overflows, CR field 6 is set to 0b0101. Otherwise,
CR field 6 is set to 0b0100.
POWER9 hardware:
vr0 = 0x0000000000000000000000000000000c (positive zero BCD value)
cr6 = 0b0101 (0x5) (positive, overflow)
QEMU:
vr0 = 0x0000000000000000000000000000000c (positive zero BCD value)
cr6 = 0b0011 (0x3) (zero, overflow) <--- wrong
This patch reverts the part of d03b174a83 that introduced the
problem and adds a test-case to avoid further regressions:
before:
$ make run-tcg-tests-ppc64le-linux-user
(...)
TEST bcdsub on ppc64le
bcdsub: qemu/tests/tcg/ppc64le/bcdsub.c:58: test_bcdsub_gt:
Assertion `(cr >> 4) == ((1 << 2) | (1 << 0))' failed.
Fixes: d03b174a83 (target/ppc: simplify bcdadd/sub functions)
Reported-by: Paul Clarke <pc@us.ibm.com>
Signed-off-by: Fabiano Rosas <farosas@linux.ibm.com>
Message-Id: <20210222194035.2723056-1-farosas@linux.ibm.com>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2021-02-22 22:40:35 +03:00
|
|
|
static int bcd_add_mag(ppc_avr_t *t, ppc_avr_t *a, ppc_avr_t *b, int *invalid,
|
2014-02-13 01:23:16 +04:00
|
|
|
int *overflow)
|
|
|
|
{
|
|
|
|
int carry = 0;
|
|
|
|
int i;
|
target/ppc: Fix bcdsub. emulation when result overflows
The commit d03b174a83 (target/ppc: simplify bcdadd/sub functions)
meant to simplify some of the code but it inadvertently altered the
way the CR6 field is set after the operation has overflowed.
The CR6 bits are set based on the *unbounded* result of the operation,
so we need to look at the result before returning from bcd_add_mag,
otherwise we will look at 0 when it overflows.
Consider the following subtraction:
v0 = 0x9999999999999999999999999999999c (maximum positive BCD value)
v1 = 0x0000000000000000000000000000001d (negative one BCD value)
bcdsub. v0,v0,v1,0
The Power ISA 2.07B says:
If the unbounded result is greater than zero, do the following.
If PS=0, the sign code of the result is set to 0b1100.
If PS=1, the sign code of the result is set to 0b1111.
If the operation overflows, CR field 6 is set to 0b0101. Otherwise,
CR field 6 is set to 0b0100.
POWER9 hardware:
vr0 = 0x0000000000000000000000000000000c (positive zero BCD value)
cr6 = 0b0101 (0x5) (positive, overflow)
QEMU:
vr0 = 0x0000000000000000000000000000000c (positive zero BCD value)
cr6 = 0b0011 (0x3) (zero, overflow) <--- wrong
This patch reverts the part of d03b174a83 that introduced the
problem and adds a test-case to avoid further regressions:
before:
$ make run-tcg-tests-ppc64le-linux-user
(...)
TEST bcdsub on ppc64le
bcdsub: qemu/tests/tcg/ppc64le/bcdsub.c:58: test_bcdsub_gt:
Assertion `(cr >> 4) == ((1 << 2) | (1 << 0))' failed.
Fixes: d03b174a83 (target/ppc: simplify bcdadd/sub functions)
Reported-by: Paul Clarke <pc@us.ibm.com>
Signed-off-by: Fabiano Rosas <farosas@linux.ibm.com>
Message-Id: <20210222194035.2723056-1-farosas@linux.ibm.com>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2021-02-22 22:40:35 +03:00
|
|
|
int is_zero = 1;
|
|
|
|
|
2014-02-13 01:23:16 +04:00
|
|
|
for (i = 1; i <= 31; i++) {
|
|
|
|
uint8_t digit = bcd_get_digit(a, i, invalid) +
|
|
|
|
bcd_get_digit(b, i, invalid) + carry;
|
target/ppc: Fix bcdsub. emulation when result overflows
The commit d03b174a83 (target/ppc: simplify bcdadd/sub functions)
meant to simplify some of the code but it inadvertently altered the
way the CR6 field is set after the operation has overflowed.
The CR6 bits are set based on the *unbounded* result of the operation,
so we need to look at the result before returning from bcd_add_mag,
otherwise we will look at 0 when it overflows.
Consider the following subtraction:
v0 = 0x9999999999999999999999999999999c (maximum positive BCD value)
v1 = 0x0000000000000000000000000000001d (negative one BCD value)
bcdsub. v0,v0,v1,0
The Power ISA 2.07B says:
If the unbounded result is greater than zero, do the following.
If PS=0, the sign code of the result is set to 0b1100.
If PS=1, the sign code of the result is set to 0b1111.
If the operation overflows, CR field 6 is set to 0b0101. Otherwise,
CR field 6 is set to 0b0100.
POWER9 hardware:
vr0 = 0x0000000000000000000000000000000c (positive zero BCD value)
cr6 = 0b0101 (0x5) (positive, overflow)
QEMU:
vr0 = 0x0000000000000000000000000000000c (positive zero BCD value)
cr6 = 0b0011 (0x3) (zero, overflow) <--- wrong
This patch reverts the part of d03b174a83 that introduced the
problem and adds a test-case to avoid further regressions:
before:
$ make run-tcg-tests-ppc64le-linux-user
(...)
TEST bcdsub on ppc64le
bcdsub: qemu/tests/tcg/ppc64le/bcdsub.c:58: test_bcdsub_gt:
Assertion `(cr >> 4) == ((1 << 2) | (1 << 0))' failed.
Fixes: d03b174a83 (target/ppc: simplify bcdadd/sub functions)
Reported-by: Paul Clarke <pc@us.ibm.com>
Signed-off-by: Fabiano Rosas <farosas@linux.ibm.com>
Message-Id: <20210222194035.2723056-1-farosas@linux.ibm.com>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2021-02-22 22:40:35 +03:00
|
|
|
is_zero &= (digit == 0);
|
2014-02-13 01:23:16 +04:00
|
|
|
if (digit > 9) {
|
|
|
|
carry = 1;
|
|
|
|
digit -= 10;
|
|
|
|
} else {
|
|
|
|
carry = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
bcd_put_digit(t, digit, i);
|
|
|
|
}
|
|
|
|
|
|
|
|
*overflow = carry;
|
target/ppc: Fix bcdsub. emulation when result overflows
The commit d03b174a83 (target/ppc: simplify bcdadd/sub functions)
meant to simplify some of the code but it inadvertently altered the
way the CR6 field is set after the operation has overflowed.
The CR6 bits are set based on the *unbounded* result of the operation,
so we need to look at the result before returning from bcd_add_mag,
otherwise we will look at 0 when it overflows.
Consider the following subtraction:
v0 = 0x9999999999999999999999999999999c (maximum positive BCD value)
v1 = 0x0000000000000000000000000000001d (negative one BCD value)
bcdsub. v0,v0,v1,0
The Power ISA 2.07B says:
If the unbounded result is greater than zero, do the following.
If PS=0, the sign code of the result is set to 0b1100.
If PS=1, the sign code of the result is set to 0b1111.
If the operation overflows, CR field 6 is set to 0b0101. Otherwise,
CR field 6 is set to 0b0100.
POWER9 hardware:
vr0 = 0x0000000000000000000000000000000c (positive zero BCD value)
cr6 = 0b0101 (0x5) (positive, overflow)
QEMU:
vr0 = 0x0000000000000000000000000000000c (positive zero BCD value)
cr6 = 0b0011 (0x3) (zero, overflow) <--- wrong
This patch reverts the part of d03b174a83 that introduced the
problem and adds a test-case to avoid further regressions:
before:
$ make run-tcg-tests-ppc64le-linux-user
(...)
TEST bcdsub on ppc64le
bcdsub: qemu/tests/tcg/ppc64le/bcdsub.c:58: test_bcdsub_gt:
Assertion `(cr >> 4) == ((1 << 2) | (1 << 0))' failed.
Fixes: d03b174a83 (target/ppc: simplify bcdadd/sub functions)
Reported-by: Paul Clarke <pc@us.ibm.com>
Signed-off-by: Fabiano Rosas <farosas@linux.ibm.com>
Message-Id: <20210222194035.2723056-1-farosas@linux.ibm.com>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2021-02-22 22:40:35 +03:00
|
|
|
return is_zero;
|
2014-02-13 01:23:16 +04:00
|
|
|
}
|
|
|
|
|
2018-07-30 20:09:17 +03:00
|
|
|
static void bcd_sub_mag(ppc_avr_t *t, ppc_avr_t *a, ppc_avr_t *b, int *invalid,
|
2014-02-13 01:23:16 +04:00
|
|
|
int *overflow)
|
|
|
|
{
|
|
|
|
int carry = 0;
|
|
|
|
int i;
|
2018-07-30 20:09:17 +03:00
|
|
|
|
2014-02-13 01:23:16 +04:00
|
|
|
for (i = 1; i <= 31; i++) {
|
|
|
|
uint8_t digit = bcd_get_digit(a, i, invalid) -
|
|
|
|
bcd_get_digit(b, i, invalid) + carry;
|
|
|
|
if (digit & 0x80) {
|
|
|
|
carry = -1;
|
|
|
|
digit += 10;
|
|
|
|
} else {
|
|
|
|
carry = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
bcd_put_digit(t, digit, i);
|
|
|
|
}
|
|
|
|
|
|
|
|
*overflow = carry;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint32_t helper_bcdadd(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t ps)
|
|
|
|
{
|
|
|
|
|
|
|
|
int sgna = bcd_get_sgn(a);
|
|
|
|
int sgnb = bcd_get_sgn(b);
|
|
|
|
int invalid = (sgna == 0) || (sgnb == 0);
|
|
|
|
int overflow = 0;
|
target/ppc: Fix bcdsub. emulation when result overflows
The commit d03b174a83 (target/ppc: simplify bcdadd/sub functions)
meant to simplify some of the code but it inadvertently altered the
way the CR6 field is set after the operation has overflowed.
The CR6 bits are set based on the *unbounded* result of the operation,
so we need to look at the result before returning from bcd_add_mag,
otherwise we will look at 0 when it overflows.
Consider the following subtraction:
v0 = 0x9999999999999999999999999999999c (maximum positive BCD value)
v1 = 0x0000000000000000000000000000001d (negative one BCD value)
bcdsub. v0,v0,v1,0
The Power ISA 2.07B says:
If the unbounded result is greater than zero, do the following.
If PS=0, the sign code of the result is set to 0b1100.
If PS=1, the sign code of the result is set to 0b1111.
If the operation overflows, CR field 6 is set to 0b0101. Otherwise,
CR field 6 is set to 0b0100.
POWER9 hardware:
vr0 = 0x0000000000000000000000000000000c (positive zero BCD value)
cr6 = 0b0101 (0x5) (positive, overflow)
QEMU:
vr0 = 0x0000000000000000000000000000000c (positive zero BCD value)
cr6 = 0b0011 (0x3) (zero, overflow) <--- wrong
This patch reverts the part of d03b174a83 that introduced the
problem and adds a test-case to avoid further regressions:
before:
$ make run-tcg-tests-ppc64le-linux-user
(...)
TEST bcdsub on ppc64le
bcdsub: qemu/tests/tcg/ppc64le/bcdsub.c:58: test_bcdsub_gt:
Assertion `(cr >> 4) == ((1 << 2) | (1 << 0))' failed.
Fixes: d03b174a83 (target/ppc: simplify bcdadd/sub functions)
Reported-by: Paul Clarke <pc@us.ibm.com>
Signed-off-by: Fabiano Rosas <farosas@linux.ibm.com>
Message-Id: <20210222194035.2723056-1-farosas@linux.ibm.com>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2021-02-22 22:40:35 +03:00
|
|
|
int zero = 0;
|
2014-02-13 01:23:16 +04:00
|
|
|
uint32_t cr = 0;
|
|
|
|
ppc_avr_t result = { .u64 = { 0, 0 } };
|
|
|
|
|
|
|
|
if (!invalid) {
|
|
|
|
if (sgna == sgnb) {
|
2019-09-26 23:44:53 +03:00
|
|
|
result.VsrB(BCD_DIG_BYTE(0)) = bcd_preferred_sgn(sgna, ps);
|
target/ppc: Fix bcdsub. emulation when result overflows
The commit d03b174a83 (target/ppc: simplify bcdadd/sub functions)
meant to simplify some of the code but it inadvertently altered the
way the CR6 field is set after the operation has overflowed.
The CR6 bits are set based on the *unbounded* result of the operation,
so we need to look at the result before returning from bcd_add_mag,
otherwise we will look at 0 when it overflows.
Consider the following subtraction:
v0 = 0x9999999999999999999999999999999c (maximum positive BCD value)
v1 = 0x0000000000000000000000000000001d (negative one BCD value)
bcdsub. v0,v0,v1,0
The Power ISA 2.07B says:
If the unbounded result is greater than zero, do the following.
If PS=0, the sign code of the result is set to 0b1100.
If PS=1, the sign code of the result is set to 0b1111.
If the operation overflows, CR field 6 is set to 0b0101. Otherwise,
CR field 6 is set to 0b0100.
POWER9 hardware:
vr0 = 0x0000000000000000000000000000000c (positive zero BCD value)
cr6 = 0b0101 (0x5) (positive, overflow)
QEMU:
vr0 = 0x0000000000000000000000000000000c (positive zero BCD value)
cr6 = 0b0011 (0x3) (zero, overflow) <--- wrong
This patch reverts the part of d03b174a83 that introduced the
problem and adds a test-case to avoid further regressions:
before:
$ make run-tcg-tests-ppc64le-linux-user
(...)
TEST bcdsub on ppc64le
bcdsub: qemu/tests/tcg/ppc64le/bcdsub.c:58: test_bcdsub_gt:
Assertion `(cr >> 4) == ((1 << 2) | (1 << 0))' failed.
Fixes: d03b174a83 (target/ppc: simplify bcdadd/sub functions)
Reported-by: Paul Clarke <pc@us.ibm.com>
Signed-off-by: Fabiano Rosas <farosas@linux.ibm.com>
Message-Id: <20210222194035.2723056-1-farosas@linux.ibm.com>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2021-02-22 22:40:35 +03:00
|
|
|
zero = bcd_add_mag(&result, a, b, &invalid, &overflow);
|
|
|
|
cr = (sgna > 0) ? CRF_GT : CRF_LT;
|
2014-02-13 01:23:16 +04:00
|
|
|
} else {
|
2018-07-30 20:09:17 +03:00
|
|
|
int magnitude = bcd_cmp_mag(a, b);
|
|
|
|
if (magnitude > 0) {
|
2019-09-26 23:44:53 +03:00
|
|
|
result.VsrB(BCD_DIG_BYTE(0)) = bcd_preferred_sgn(sgna, ps);
|
2018-07-30 20:09:17 +03:00
|
|
|
bcd_sub_mag(&result, a, b, &invalid, &overflow);
|
|
|
|
cr = (sgna > 0) ? CRF_GT : CRF_LT;
|
|
|
|
} else if (magnitude < 0) {
|
2019-09-26 23:44:53 +03:00
|
|
|
result.VsrB(BCD_DIG_BYTE(0)) = bcd_preferred_sgn(sgnb, ps);
|
2018-07-30 20:09:17 +03:00
|
|
|
bcd_sub_mag(&result, b, a, &invalid, &overflow);
|
|
|
|
cr = (sgnb > 0) ? CRF_GT : CRF_LT;
|
|
|
|
} else {
|
2019-09-26 23:44:53 +03:00
|
|
|
result.VsrB(BCD_DIG_BYTE(0)) = bcd_preferred_sgn(0, ps);
|
2018-07-30 20:09:17 +03:00
|
|
|
cr = CRF_EQ;
|
|
|
|
}
|
2014-02-13 01:23:16 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (unlikely(invalid)) {
|
2019-01-30 23:36:34 +03:00
|
|
|
result.VsrD(0) = result.VsrD(1) = -1;
|
2016-11-23 14:37:11 +03:00
|
|
|
cr = CRF_SO;
|
2014-02-13 01:23:16 +04:00
|
|
|
} else if (overflow) {
|
2016-11-23 14:37:11 +03:00
|
|
|
cr |= CRF_SO;
|
target/ppc: Fix bcdsub. emulation when result overflows
The commit d03b174a83 (target/ppc: simplify bcdadd/sub functions)
meant to simplify some of the code but it inadvertently altered the
way the CR6 field is set after the operation has overflowed.
The CR6 bits are set based on the *unbounded* result of the operation,
so we need to look at the result before returning from bcd_add_mag,
otherwise we will look at 0 when it overflows.
Consider the following subtraction:
v0 = 0x9999999999999999999999999999999c (maximum positive BCD value)
v1 = 0x0000000000000000000000000000001d (negative one BCD value)
bcdsub. v0,v0,v1,0
The Power ISA 2.07B says:
If the unbounded result is greater than zero, do the following.
If PS=0, the sign code of the result is set to 0b1100.
If PS=1, the sign code of the result is set to 0b1111.
If the operation overflows, CR field 6 is set to 0b0101. Otherwise,
CR field 6 is set to 0b0100.
POWER9 hardware:
vr0 = 0x0000000000000000000000000000000c (positive zero BCD value)
cr6 = 0b0101 (0x5) (positive, overflow)
QEMU:
vr0 = 0x0000000000000000000000000000000c (positive zero BCD value)
cr6 = 0b0011 (0x3) (zero, overflow) <--- wrong
This patch reverts the part of d03b174a83 that introduced the
problem and adds a test-case to avoid further regressions:
before:
$ make run-tcg-tests-ppc64le-linux-user
(...)
TEST bcdsub on ppc64le
bcdsub: qemu/tests/tcg/ppc64le/bcdsub.c:58: test_bcdsub_gt:
Assertion `(cr >> 4) == ((1 << 2) | (1 << 0))' failed.
Fixes: d03b174a83 (target/ppc: simplify bcdadd/sub functions)
Reported-by: Paul Clarke <pc@us.ibm.com>
Signed-off-by: Fabiano Rosas <farosas@linux.ibm.com>
Message-Id: <20210222194035.2723056-1-farosas@linux.ibm.com>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2021-02-22 22:40:35 +03:00
|
|
|
} else if (zero) {
|
|
|
|
cr |= CRF_EQ;
|
2014-02-13 01:23:16 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
*r = result;
|
|
|
|
|
|
|
|
return cr;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint32_t helper_bcdsub(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t ps)
|
|
|
|
{
|
|
|
|
ppc_avr_t bcopy = *b;
|
|
|
|
int sgnb = bcd_get_sgn(b);
|
|
|
|
if (sgnb < 0) {
|
|
|
|
bcd_put_digit(&bcopy, BCD_PLUS_PREF_1, 0);
|
|
|
|
} else if (sgnb > 0) {
|
|
|
|
bcd_put_digit(&bcopy, BCD_NEG_PREF, 0);
|
|
|
|
}
|
|
|
|
/* else invalid ... defer to bcdadd code for proper handling */
|
|
|
|
|
|
|
|
return helper_bcdadd(r, a, &bcopy, ps);
|
|
|
|
}
|
2014-02-13 01:23:03 +04:00
|
|
|
|
2016-11-08 19:50:22 +03:00
|
|
|
uint32_t helper_bcdcfn(ppc_avr_t *r, ppc_avr_t *b, uint32_t ps)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
int cr = 0;
|
|
|
|
uint16_t national = 0;
|
|
|
|
uint16_t sgnb = get_national_digit(b, 0);
|
|
|
|
ppc_avr_t ret = { .u64 = { 0, 0 } };
|
|
|
|
int invalid = (sgnb != NATIONAL_PLUS && sgnb != NATIONAL_NEG);
|
|
|
|
|
|
|
|
for (i = 1; i < 8; i++) {
|
|
|
|
national = get_national_digit(b, i);
|
|
|
|
if (unlikely(national < 0x30 || national > 0x39)) {
|
|
|
|
invalid = 1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
bcd_put_digit(&ret, national & 0xf, i);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sgnb == NATIONAL_PLUS) {
|
|
|
|
bcd_put_digit(&ret, (ps == 0) ? BCD_PLUS_PREF_1 : BCD_PLUS_PREF_2, 0);
|
|
|
|
} else {
|
|
|
|
bcd_put_digit(&ret, BCD_NEG_PREF, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
cr = bcd_cmp_zero(&ret);
|
|
|
|
|
|
|
|
if (unlikely(invalid)) {
|
2016-11-23 14:37:11 +03:00
|
|
|
cr = CRF_SO;
|
2016-11-08 19:50:22 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
*r = ret;
|
|
|
|
|
|
|
|
return cr;
|
|
|
|
}
|
|
|
|
|
2016-11-08 19:50:23 +03:00
|
|
|
uint32_t helper_bcdctn(ppc_avr_t *r, ppc_avr_t *b, uint32_t ps)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
int cr = 0;
|
|
|
|
int sgnb = bcd_get_sgn(b);
|
|
|
|
int invalid = (sgnb == 0);
|
|
|
|
ppc_avr_t ret = { .u64 = { 0, 0 } };
|
|
|
|
|
2019-01-30 23:36:34 +03:00
|
|
|
int ox_flag = (b->VsrD(0) != 0) || ((b->VsrD(1) >> 32) != 0);
|
2016-11-08 19:50:23 +03:00
|
|
|
|
|
|
|
for (i = 1; i < 8; i++) {
|
|
|
|
set_national_digit(&ret, 0x30 + bcd_get_digit(b, i, &invalid), i);
|
|
|
|
|
|
|
|
if (unlikely(invalid)) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
set_national_digit(&ret, (sgnb == -1) ? NATIONAL_NEG : NATIONAL_PLUS, 0);
|
|
|
|
|
|
|
|
cr = bcd_cmp_zero(b);
|
|
|
|
|
|
|
|
if (ox_flag) {
|
2016-11-23 14:37:11 +03:00
|
|
|
cr |= CRF_SO;
|
2016-11-08 19:50:23 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (unlikely(invalid)) {
|
2016-11-23 14:37:11 +03:00
|
|
|
cr = CRF_SO;
|
2016-11-08 19:50:23 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
*r = ret;
|
|
|
|
|
|
|
|
return cr;
|
|
|
|
}
|
|
|
|
|
2016-11-08 19:50:24 +03:00
|
|
|
uint32_t helper_bcdcfz(ppc_avr_t *r, ppc_avr_t *b, uint32_t ps)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
int cr = 0;
|
|
|
|
int invalid = 0;
|
|
|
|
int zone_digit = 0;
|
|
|
|
int zone_lead = ps ? 0xF : 0x3;
|
|
|
|
int digit = 0;
|
|
|
|
ppc_avr_t ret = { .u64 = { 0, 0 } };
|
2019-09-26 23:44:53 +03:00
|
|
|
int sgnb = b->VsrB(BCD_DIG_BYTE(0)) >> 4;
|
2016-11-08 19:50:24 +03:00
|
|
|
|
|
|
|
if (unlikely((sgnb < 0xA) && ps)) {
|
|
|
|
invalid = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < 16; i++) {
|
2019-09-26 23:44:53 +03:00
|
|
|
zone_digit = i ? b->VsrB(BCD_DIG_BYTE(i * 2)) >> 4 : zone_lead;
|
|
|
|
digit = b->VsrB(BCD_DIG_BYTE(i * 2)) & 0xF;
|
2016-11-08 19:50:24 +03:00
|
|
|
if (unlikely(zone_digit != zone_lead || digit > 0x9)) {
|
|
|
|
invalid = 1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
bcd_put_digit(&ret, digit, i + 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((ps && (sgnb == 0xB || sgnb == 0xD)) ||
|
|
|
|
(!ps && (sgnb & 0x4))) {
|
|
|
|
bcd_put_digit(&ret, BCD_NEG_PREF, 0);
|
|
|
|
} else {
|
|
|
|
bcd_put_digit(&ret, BCD_PLUS_PREF_1, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
cr = bcd_cmp_zero(&ret);
|
|
|
|
|
|
|
|
if (unlikely(invalid)) {
|
2016-11-23 14:37:11 +03:00
|
|
|
cr = CRF_SO;
|
2016-11-08 19:50:24 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
*r = ret;
|
|
|
|
|
|
|
|
return cr;
|
|
|
|
}
|
|
|
|
|
2016-11-08 19:50:25 +03:00
|
|
|
uint32_t helper_bcdctz(ppc_avr_t *r, ppc_avr_t *b, uint32_t ps)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
int cr = 0;
|
|
|
|
uint8_t digit = 0;
|
|
|
|
int sgnb = bcd_get_sgn(b);
|
|
|
|
int zone_lead = (ps) ? 0xF0 : 0x30;
|
|
|
|
int invalid = (sgnb == 0);
|
|
|
|
ppc_avr_t ret = { .u64 = { 0, 0 } };
|
|
|
|
|
2019-01-30 23:36:34 +03:00
|
|
|
int ox_flag = ((b->VsrD(0) >> 4) != 0);
|
2016-11-08 19:50:25 +03:00
|
|
|
|
|
|
|
for (i = 0; i < 16; i++) {
|
|
|
|
digit = bcd_get_digit(b, i + 1, &invalid);
|
|
|
|
|
|
|
|
if (unlikely(invalid)) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2019-09-26 23:44:53 +03:00
|
|
|
ret.VsrB(BCD_DIG_BYTE(i * 2)) = zone_lead + digit;
|
2016-11-08 19:50:25 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (ps) {
|
|
|
|
bcd_put_digit(&ret, (sgnb == 1) ? 0xC : 0xD, 1);
|
|
|
|
} else {
|
|
|
|
bcd_put_digit(&ret, (sgnb == 1) ? 0x3 : 0x7, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
cr = bcd_cmp_zero(b);
|
|
|
|
|
|
|
|
if (ox_flag) {
|
2016-11-23 14:37:11 +03:00
|
|
|
cr |= CRF_SO;
|
2016-11-08 19:50:25 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (unlikely(invalid)) {
|
2016-11-23 14:37:11 +03:00
|
|
|
cr = CRF_SO;
|
2016-11-08 19:50:25 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
*r = ret;
|
|
|
|
|
|
|
|
return cr;
|
|
|
|
}
|
|
|
|
|
2021-08-23 18:02:35 +03:00
|
|
|
/**
|
|
|
|
* Compare 2 128-bit unsigned integers, passed in as unsigned 64-bit pairs
|
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* > 0 if ahi|alo > bhi|blo,
|
|
|
|
* 0 if ahi|alo == bhi|blo,
|
|
|
|
* < 0 if ahi|alo < bhi|blo
|
|
|
|
*/
|
|
|
|
static inline int ucmp128(uint64_t alo, uint64_t ahi,
|
|
|
|
uint64_t blo, uint64_t bhi)
|
|
|
|
{
|
|
|
|
return (ahi == bhi) ?
|
|
|
|
(alo > blo ? 1 : (alo == blo ? 0 : -1)) :
|
|
|
|
(ahi > bhi ? 1 : -1);
|
|
|
|
}
|
|
|
|
|
2016-11-25 06:53:30 +03:00
|
|
|
uint32_t helper_bcdcfsq(ppc_avr_t *r, ppc_avr_t *b, uint32_t ps)
|
|
|
|
{
|
|
|
|
int i;
|
2021-08-23 18:02:35 +03:00
|
|
|
int cr;
|
2016-11-25 06:53:30 +03:00
|
|
|
uint64_t lo_value;
|
|
|
|
uint64_t hi_value;
|
2021-10-25 22:11:38 +03:00
|
|
|
uint64_t rem;
|
2016-11-25 06:53:30 +03:00
|
|
|
ppc_avr_t ret = { .u64 = { 0, 0 } };
|
|
|
|
|
2019-01-30 23:36:34 +03:00
|
|
|
if (b->VsrSD(0) < 0) {
|
|
|
|
lo_value = -b->VsrSD(1);
|
|
|
|
hi_value = ~b->VsrD(0) + !lo_value;
|
2016-11-25 06:53:30 +03:00
|
|
|
bcd_put_digit(&ret, 0xD, 0);
|
2021-08-23 18:02:35 +03:00
|
|
|
|
|
|
|
cr = CRF_LT;
|
2016-11-25 06:53:30 +03:00
|
|
|
} else {
|
2019-01-30 23:36:34 +03:00
|
|
|
lo_value = b->VsrD(1);
|
|
|
|
hi_value = b->VsrD(0);
|
2016-11-25 06:53:30 +03:00
|
|
|
bcd_put_digit(&ret, bcd_preferred_sgn(0, ps), 0);
|
|
|
|
|
2021-08-23 18:02:35 +03:00
|
|
|
if (hi_value == 0 && lo_value == 0) {
|
|
|
|
cr = CRF_EQ;
|
|
|
|
} else {
|
|
|
|
cr = CRF_GT;
|
|
|
|
}
|
2016-11-25 06:53:30 +03:00
|
|
|
}
|
|
|
|
|
2021-08-23 18:02:35 +03:00
|
|
|
/*
|
|
|
|
* Check src limits: abs(src) <= 10^31 - 1
|
|
|
|
*
|
|
|
|
* 10^31 - 1 = 0x0000007e37be2022 c0914b267fffffff
|
|
|
|
*/
|
|
|
|
if (ucmp128(lo_value, hi_value,
|
|
|
|
0xc0914b267fffffffULL, 0x7e37be2022ULL) > 0) {
|
|
|
|
cr |= CRF_SO;
|
2016-11-25 06:53:30 +03:00
|
|
|
|
2021-08-23 18:02:35 +03:00
|
|
|
/*
|
|
|
|
* According to the ISA, if src wouldn't fit in the destination
|
|
|
|
* register, the result is undefined.
|
|
|
|
* In that case, we leave r unchanged.
|
|
|
|
*/
|
|
|
|
} else {
|
2021-10-25 22:11:38 +03:00
|
|
|
rem = divu128(&lo_value, &hi_value, 1000000000000000ULL);
|
2016-11-25 06:53:30 +03:00
|
|
|
|
2021-10-25 22:11:38 +03:00
|
|
|
for (i = 1; i < 16; rem /= 10, i++) {
|
|
|
|
bcd_put_digit(&ret, rem % 10, i);
|
2021-08-23 18:02:35 +03:00
|
|
|
}
|
2016-11-25 06:53:30 +03:00
|
|
|
|
2021-08-23 18:02:35 +03:00
|
|
|
for (; i < 32; lo_value /= 10, i++) {
|
|
|
|
bcd_put_digit(&ret, lo_value % 10, i);
|
|
|
|
}
|
|
|
|
|
|
|
|
*r = ret;
|
|
|
|
}
|
2016-11-25 06:53:30 +03:00
|
|
|
|
|
|
|
return cr;
|
|
|
|
}
|
|
|
|
|
2016-11-25 06:53:31 +03:00
|
|
|
uint32_t helper_bcdctsq(ppc_avr_t *r, ppc_avr_t *b, uint32_t ps)
|
|
|
|
{
|
|
|
|
uint8_t i;
|
|
|
|
int cr;
|
|
|
|
uint64_t carry;
|
|
|
|
uint64_t unused;
|
|
|
|
uint64_t lo_value;
|
|
|
|
uint64_t hi_value = 0;
|
|
|
|
int sgnb = bcd_get_sgn(b);
|
|
|
|
int invalid = (sgnb == 0);
|
|
|
|
|
|
|
|
lo_value = bcd_get_digit(b, 31, &invalid);
|
|
|
|
for (i = 30; i > 0; i--) {
|
|
|
|
mulu64(&lo_value, &carry, lo_value, 10ULL);
|
|
|
|
mulu64(&hi_value, &unused, hi_value, 10ULL);
|
|
|
|
lo_value += bcd_get_digit(b, i, &invalid);
|
|
|
|
hi_value += carry;
|
|
|
|
|
|
|
|
if (unlikely(invalid)) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sgnb == -1) {
|
2019-01-30 23:36:34 +03:00
|
|
|
r->VsrSD(1) = -lo_value;
|
|
|
|
r->VsrSD(0) = ~hi_value + !r->VsrSD(1);
|
2016-11-25 06:53:31 +03:00
|
|
|
} else {
|
2019-01-30 23:36:34 +03:00
|
|
|
r->VsrSD(1) = lo_value;
|
|
|
|
r->VsrSD(0) = hi_value;
|
2016-11-25 06:53:31 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
cr = bcd_cmp_zero(b);
|
|
|
|
|
|
|
|
if (unlikely(invalid)) {
|
|
|
|
cr = CRF_SO;
|
|
|
|
}
|
|
|
|
|
|
|
|
return cr;
|
|
|
|
}
|
|
|
|
|
2016-11-25 06:53:32 +03:00
|
|
|
uint32_t helper_bcdcpsgn(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t ps)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
int invalid = 0;
|
|
|
|
|
|
|
|
if (bcd_get_sgn(a) == 0 || bcd_get_sgn(b) == 0) {
|
|
|
|
return CRF_SO;
|
|
|
|
}
|
|
|
|
|
|
|
|
*r = *a;
|
2019-09-26 23:44:53 +03:00
|
|
|
bcd_put_digit(r, b->VsrB(BCD_DIG_BYTE(0)) & 0xF, 0);
|
2016-11-25 06:53:32 +03:00
|
|
|
|
|
|
|
for (i = 1; i < 32; i++) {
|
|
|
|
bcd_get_digit(a, i, &invalid);
|
|
|
|
bcd_get_digit(b, i, &invalid);
|
|
|
|
if (unlikely(invalid)) {
|
|
|
|
return CRF_SO;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return bcd_cmp_zero(r);
|
|
|
|
}
|
|
|
|
|
2016-11-25 06:53:33 +03:00
|
|
|
uint32_t helper_bcdsetsgn(ppc_avr_t *r, ppc_avr_t *b, uint32_t ps)
|
|
|
|
{
|
|
|
|
int sgnb = bcd_get_sgn(b);
|
|
|
|
|
|
|
|
*r = *b;
|
|
|
|
bcd_put_digit(r, bcd_preferred_sgn(sgnb, ps), 0);
|
|
|
|
|
2016-12-06 22:40:04 +03:00
|
|
|
if (bcd_is_valid(b) == false) {
|
|
|
|
return CRF_SO;
|
2016-11-25 06:53:33 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return bcd_cmp_zero(r);
|
|
|
|
}
|
|
|
|
|
2017-01-10 05:10:10 +03:00
|
|
|
uint32_t helper_bcds(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t ps)
|
|
|
|
{
|
|
|
|
int cr;
|
2019-09-26 23:44:53 +03:00
|
|
|
int i = a->VsrSB(7);
|
2017-01-10 05:10:10 +03:00
|
|
|
bool ox_flag = false;
|
|
|
|
int sgnb = bcd_get_sgn(b);
|
|
|
|
ppc_avr_t ret = *b;
|
2019-01-30 23:36:34 +03:00
|
|
|
ret.VsrD(1) &= ~0xf;
|
2017-01-10 05:10:10 +03:00
|
|
|
|
|
|
|
if (bcd_is_valid(b) == false) {
|
|
|
|
return CRF_SO;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (unlikely(i > 31)) {
|
|
|
|
i = 31;
|
|
|
|
} else if (unlikely(i < -31)) {
|
|
|
|
i = -31;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (i > 0) {
|
2019-01-30 23:36:34 +03:00
|
|
|
ulshift(&ret.VsrD(1), &ret.VsrD(0), i * 4, &ox_flag);
|
2017-01-10 05:10:10 +03:00
|
|
|
} else {
|
2019-01-30 23:36:34 +03:00
|
|
|
urshift(&ret.VsrD(1), &ret.VsrD(0), -i * 4);
|
2017-01-10 05:10:10 +03:00
|
|
|
}
|
|
|
|
bcd_put_digit(&ret, bcd_preferred_sgn(sgnb, ps), 0);
|
|
|
|
|
|
|
|
*r = ret;
|
|
|
|
|
|
|
|
cr = bcd_cmp_zero(r);
|
|
|
|
if (ox_flag) {
|
|
|
|
cr |= CRF_SO;
|
|
|
|
}
|
|
|
|
|
|
|
|
return cr;
|
|
|
|
}
|
|
|
|
|
2017-01-10 05:10:11 +03:00
|
|
|
uint32_t helper_bcdus(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t ps)
|
|
|
|
{
|
|
|
|
int cr;
|
|
|
|
int i;
|
|
|
|
int invalid = 0;
|
|
|
|
bool ox_flag = false;
|
|
|
|
ppc_avr_t ret = *b;
|
|
|
|
|
|
|
|
for (i = 0; i < 32; i++) {
|
|
|
|
bcd_get_digit(b, i, &invalid);
|
|
|
|
|
|
|
|
if (unlikely(invalid)) {
|
|
|
|
return CRF_SO;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-09-26 23:44:53 +03:00
|
|
|
i = a->VsrSB(7);
|
2017-01-10 05:10:11 +03:00
|
|
|
if (i >= 32) {
|
|
|
|
ox_flag = true;
|
2019-01-30 23:36:34 +03:00
|
|
|
ret.VsrD(1) = ret.VsrD(0) = 0;
|
2017-01-10 05:10:11 +03:00
|
|
|
} else if (i <= -32) {
|
2019-01-30 23:36:34 +03:00
|
|
|
ret.VsrD(1) = ret.VsrD(0) = 0;
|
2017-01-10 05:10:11 +03:00
|
|
|
} else if (i > 0) {
|
2019-01-30 23:36:34 +03:00
|
|
|
ulshift(&ret.VsrD(1), &ret.VsrD(0), i * 4, &ox_flag);
|
2017-01-10 05:10:11 +03:00
|
|
|
} else {
|
2019-01-30 23:36:34 +03:00
|
|
|
urshift(&ret.VsrD(1), &ret.VsrD(0), -i * 4);
|
2017-01-10 05:10:11 +03:00
|
|
|
}
|
|
|
|
*r = ret;
|
|
|
|
|
|
|
|
cr = bcd_cmp_zero(r);
|
|
|
|
if (ox_flag) {
|
|
|
|
cr |= CRF_SO;
|
|
|
|
}
|
|
|
|
|
|
|
|
return cr;
|
|
|
|
}
|
|
|
|
|
2017-01-10 05:10:12 +03:00
|
|
|
uint32_t helper_bcdsr(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t ps)
|
|
|
|
{
|
|
|
|
int cr;
|
|
|
|
int unused = 0;
|
|
|
|
int invalid = 0;
|
|
|
|
bool ox_flag = false;
|
|
|
|
int sgnb = bcd_get_sgn(b);
|
|
|
|
ppc_avr_t ret = *b;
|
2019-01-30 23:36:34 +03:00
|
|
|
ret.VsrD(1) &= ~0xf;
|
2017-01-10 05:10:12 +03:00
|
|
|
|
2019-09-26 23:44:53 +03:00
|
|
|
int i = a->VsrSB(7);
|
|
|
|
ppc_avr_t bcd_one;
|
|
|
|
|
|
|
|
bcd_one.VsrD(0) = 0;
|
|
|
|
bcd_one.VsrD(1) = 0x10;
|
2017-01-10 05:10:12 +03:00
|
|
|
|
|
|
|
if (bcd_is_valid(b) == false) {
|
|
|
|
return CRF_SO;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (unlikely(i > 31)) {
|
|
|
|
i = 31;
|
|
|
|
} else if (unlikely(i < -31)) {
|
|
|
|
i = -31;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (i > 0) {
|
2019-01-30 23:36:34 +03:00
|
|
|
ulshift(&ret.VsrD(1), &ret.VsrD(0), i * 4, &ox_flag);
|
2017-01-10 05:10:12 +03:00
|
|
|
} else {
|
2019-01-30 23:36:34 +03:00
|
|
|
urshift(&ret.VsrD(1), &ret.VsrD(0), -i * 4);
|
2017-01-10 05:10:12 +03:00
|
|
|
|
|
|
|
if (bcd_get_digit(&ret, 0, &invalid) >= 5) {
|
|
|
|
bcd_add_mag(&ret, &ret, &bcd_one, &invalid, &unused);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
bcd_put_digit(&ret, bcd_preferred_sgn(sgnb, ps), 0);
|
|
|
|
|
|
|
|
cr = bcd_cmp_zero(&ret);
|
|
|
|
if (ox_flag) {
|
|
|
|
cr |= CRF_SO;
|
|
|
|
}
|
|
|
|
*r = ret;
|
|
|
|
|
|
|
|
return cr;
|
|
|
|
}
|
|
|
|
|
2017-01-12 23:08:32 +03:00
|
|
|
uint32_t helper_bcdtrunc(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t ps)
|
|
|
|
{
|
|
|
|
uint64_t mask;
|
|
|
|
uint32_t ox_flag = 0;
|
2019-09-26 23:44:53 +03:00
|
|
|
int i = a->VsrSH(3) + 1;
|
2017-01-12 23:08:32 +03:00
|
|
|
ppc_avr_t ret = *b;
|
|
|
|
|
|
|
|
if (bcd_is_valid(b) == false) {
|
|
|
|
return CRF_SO;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (i > 16 && i < 32) {
|
|
|
|
mask = (uint64_t)-1 >> (128 - i * 4);
|
2019-01-30 23:36:34 +03:00
|
|
|
if (ret.VsrD(0) & ~mask) {
|
2017-01-12 23:08:32 +03:00
|
|
|
ox_flag = CRF_SO;
|
|
|
|
}
|
|
|
|
|
2019-01-30 23:36:34 +03:00
|
|
|
ret.VsrD(0) &= mask;
|
2017-01-12 23:08:32 +03:00
|
|
|
} else if (i >= 0 && i <= 16) {
|
|
|
|
mask = (uint64_t)-1 >> (64 - i * 4);
|
2019-01-30 23:36:34 +03:00
|
|
|
if (ret.VsrD(0) || (ret.VsrD(1) & ~mask)) {
|
2017-01-12 23:08:32 +03:00
|
|
|
ox_flag = CRF_SO;
|
|
|
|
}
|
|
|
|
|
2019-01-30 23:36:34 +03:00
|
|
|
ret.VsrD(1) &= mask;
|
|
|
|
ret.VsrD(0) = 0;
|
2017-01-12 23:08:32 +03:00
|
|
|
}
|
|
|
|
bcd_put_digit(&ret, bcd_preferred_sgn(bcd_get_sgn(b), ps), 0);
|
|
|
|
*r = ret;
|
|
|
|
|
|
|
|
return bcd_cmp_zero(&ret) | ox_flag;
|
|
|
|
}
|
|
|
|
|
2017-01-12 23:08:33 +03:00
|
|
|
uint32_t helper_bcdutrunc(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t ps)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
uint64_t mask;
|
|
|
|
uint32_t ox_flag = 0;
|
|
|
|
int invalid = 0;
|
|
|
|
ppc_avr_t ret = *b;
|
|
|
|
|
|
|
|
for (i = 0; i < 32; i++) {
|
|
|
|
bcd_get_digit(b, i, &invalid);
|
|
|
|
|
|
|
|
if (unlikely(invalid)) {
|
|
|
|
return CRF_SO;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-09-26 23:44:53 +03:00
|
|
|
i = a->VsrSH(3);
|
2017-01-12 23:08:33 +03:00
|
|
|
if (i > 16 && i < 33) {
|
|
|
|
mask = (uint64_t)-1 >> (128 - i * 4);
|
2019-01-30 23:36:34 +03:00
|
|
|
if (ret.VsrD(0) & ~mask) {
|
2017-01-12 23:08:33 +03:00
|
|
|
ox_flag = CRF_SO;
|
|
|
|
}
|
|
|
|
|
2019-01-30 23:36:34 +03:00
|
|
|
ret.VsrD(0) &= mask;
|
2017-01-12 23:08:33 +03:00
|
|
|
} else if (i > 0 && i <= 16) {
|
|
|
|
mask = (uint64_t)-1 >> (64 - i * 4);
|
2019-01-30 23:36:34 +03:00
|
|
|
if (ret.VsrD(0) || (ret.VsrD(1) & ~mask)) {
|
2017-01-12 23:08:33 +03:00
|
|
|
ox_flag = CRF_SO;
|
|
|
|
}
|
|
|
|
|
2019-01-30 23:36:34 +03:00
|
|
|
ret.VsrD(1) &= mask;
|
|
|
|
ret.VsrD(0) = 0;
|
2017-01-12 23:08:33 +03:00
|
|
|
} else if (i == 0) {
|
2019-01-30 23:36:34 +03:00
|
|
|
if (ret.VsrD(0) || ret.VsrD(1)) {
|
2017-01-12 23:08:33 +03:00
|
|
|
ox_flag = CRF_SO;
|
|
|
|
}
|
2019-01-30 23:36:34 +03:00
|
|
|
ret.VsrD(0) = ret.VsrD(1) = 0;
|
2017-01-12 23:08:33 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
*r = ret;
|
2019-01-30 23:36:34 +03:00
|
|
|
if (r->VsrD(0) == 0 && r->VsrD(1) == 0) {
|
2017-01-12 23:08:33 +03:00
|
|
|
return ox_flag | CRF_EQ;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ox_flag | CRF_GT;
|
|
|
|
}
|
|
|
|
|
2014-03-13 18:13:30 +04:00
|
|
|
void helper_vsbox(ppc_avr_t *r, ppc_avr_t *a)
|
2014-02-13 01:23:17 +04:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
VECTOR_FOR_INORDER_I(i, u8) {
|
2014-03-13 18:13:30 +04:00
|
|
|
r->u8[i] = AES_sbox[a->u8[i]];
|
2014-02-13 01:23:17 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-03-13 18:13:30 +04:00
|
|
|
void helper_vcipher(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
|
2014-02-13 01:23:17 +04:00
|
|
|
{
|
2015-09-14 00:03:44 +03:00
|
|
|
ppc_avr_t result;
|
2014-02-13 01:23:17 +04:00
|
|
|
int i;
|
|
|
|
|
2014-03-13 18:13:30 +04:00
|
|
|
VECTOR_FOR_INORDER_I(i, u32) {
|
2019-01-02 12:14:23 +03:00
|
|
|
result.VsrW(i) = b->VsrW(i) ^
|
|
|
|
(AES_Te0[a->VsrB(AES_shifts[4 * i + 0])] ^
|
|
|
|
AES_Te1[a->VsrB(AES_shifts[4 * i + 1])] ^
|
|
|
|
AES_Te2[a->VsrB(AES_shifts[4 * i + 2])] ^
|
|
|
|
AES_Te3[a->VsrB(AES_shifts[4 * i + 3])]);
|
2014-02-13 01:23:17 +04:00
|
|
|
}
|
2015-09-14 00:03:44 +03:00
|
|
|
*r = result;
|
2014-02-13 01:23:17 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
void helper_vcipherlast(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
|
|
|
|
{
|
2015-09-14 00:03:44 +03:00
|
|
|
ppc_avr_t result;
|
2014-03-13 18:13:30 +04:00
|
|
|
int i;
|
|
|
|
|
|
|
|
VECTOR_FOR_INORDER_I(i, u8) {
|
2019-01-02 12:14:23 +03:00
|
|
|
result.VsrB(i) = b->VsrB(i) ^ (AES_sbox[a->VsrB(AES_shifts[i])]);
|
2014-03-13 18:13:30 +04:00
|
|
|
}
|
2015-09-14 00:03:44 +03:00
|
|
|
*r = result;
|
2014-02-13 01:23:17 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
void helper_vncipher(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
|
|
|
|
{
|
|
|
|
/* This differs from what is written in ISA V2.07. The RTL is */
|
|
|
|
/* incorrect and will be fixed in V2.07B. */
|
2014-03-13 18:13:30 +04:00
|
|
|
int i;
|
|
|
|
ppc_avr_t tmp;
|
|
|
|
|
|
|
|
VECTOR_FOR_INORDER_I(i, u8) {
|
2019-01-02 12:14:23 +03:00
|
|
|
tmp.VsrB(i) = b->VsrB(i) ^ AES_isbox[a->VsrB(AES_ishifts[i])];
|
2014-03-13 18:13:30 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
VECTOR_FOR_INORDER_I(i, u32) {
|
2019-01-02 12:14:23 +03:00
|
|
|
r->VsrW(i) =
|
|
|
|
AES_imc[tmp.VsrB(4 * i + 0)][0] ^
|
|
|
|
AES_imc[tmp.VsrB(4 * i + 1)][1] ^
|
|
|
|
AES_imc[tmp.VsrB(4 * i + 2)][2] ^
|
|
|
|
AES_imc[tmp.VsrB(4 * i + 3)][3];
|
2014-03-13 18:13:30 +04:00
|
|
|
}
|
2014-02-13 01:23:17 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
void helper_vncipherlast(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
|
|
|
|
{
|
2015-09-14 00:03:44 +03:00
|
|
|
ppc_avr_t result;
|
2014-03-13 18:13:30 +04:00
|
|
|
int i;
|
|
|
|
|
|
|
|
VECTOR_FOR_INORDER_I(i, u8) {
|
2019-01-02 12:14:23 +03:00
|
|
|
result.VsrB(i) = b->VsrB(i) ^ (AES_isbox[a->VsrB(AES_ishifts[i])]);
|
2014-03-13 18:13:30 +04:00
|
|
|
}
|
2015-09-14 00:03:44 +03:00
|
|
|
*r = result;
|
2014-02-13 01:23:17 +04:00
|
|
|
}
|
|
|
|
|
2014-02-13 01:23:18 +04:00
|
|
|
void helper_vshasigmaw(ppc_avr_t *r, ppc_avr_t *a, uint32_t st_six)
|
|
|
|
{
|
|
|
|
int st = (st_six & 0x10) != 0;
|
|
|
|
int six = st_six & 0xF;
|
|
|
|
int i;
|
|
|
|
|
2019-01-30 23:36:35 +03:00
|
|
|
for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
|
2014-02-13 01:23:18 +04:00
|
|
|
if (st == 0) {
|
|
|
|
if ((six & (0x8 >> i)) == 0) {
|
2019-01-30 23:36:37 +03:00
|
|
|
r->VsrW(i) = ror32(a->VsrW(i), 7) ^
|
|
|
|
ror32(a->VsrW(i), 18) ^
|
2019-01-30 23:36:35 +03:00
|
|
|
(a->VsrW(i) >> 3);
|
2014-02-13 01:23:18 +04:00
|
|
|
} else { /* six.bit[i] == 1 */
|
2019-01-30 23:36:37 +03:00
|
|
|
r->VsrW(i) = ror32(a->VsrW(i), 17) ^
|
|
|
|
ror32(a->VsrW(i), 19) ^
|
2019-01-30 23:36:35 +03:00
|
|
|
(a->VsrW(i) >> 10);
|
2014-02-13 01:23:18 +04:00
|
|
|
}
|
|
|
|
} else { /* st == 1 */
|
|
|
|
if ((six & (0x8 >> i)) == 0) {
|
2019-01-30 23:36:37 +03:00
|
|
|
r->VsrW(i) = ror32(a->VsrW(i), 2) ^
|
|
|
|
ror32(a->VsrW(i), 13) ^
|
|
|
|
ror32(a->VsrW(i), 22);
|
2014-02-13 01:23:18 +04:00
|
|
|
} else { /* six.bit[i] == 1 */
|
2019-01-30 23:36:37 +03:00
|
|
|
r->VsrW(i) = ror32(a->VsrW(i), 6) ^
|
|
|
|
ror32(a->VsrW(i), 11) ^
|
|
|
|
ror32(a->VsrW(i), 25);
|
2014-02-13 01:23:18 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void helper_vshasigmad(ppc_avr_t *r, ppc_avr_t *a, uint32_t st_six)
|
|
|
|
{
|
|
|
|
int st = (st_six & 0x10) != 0;
|
|
|
|
int six = st_six & 0xF;
|
|
|
|
int i;
|
|
|
|
|
2019-01-30 23:36:35 +03:00
|
|
|
for (i = 0; i < ARRAY_SIZE(r->u64); i++) {
|
2014-02-13 01:23:18 +04:00
|
|
|
if (st == 0) {
|
2019-03-21 08:50:18 +03:00
|
|
|
if ((six & (0x8 >> (2 * i))) == 0) {
|
2019-01-30 23:36:37 +03:00
|
|
|
r->VsrD(i) = ror64(a->VsrD(i), 1) ^
|
|
|
|
ror64(a->VsrD(i), 8) ^
|
2019-01-30 23:36:35 +03:00
|
|
|
(a->VsrD(i) >> 7);
|
2014-02-13 01:23:18 +04:00
|
|
|
} else { /* six.bit[2*i] == 1 */
|
2019-01-30 23:36:37 +03:00
|
|
|
r->VsrD(i) = ror64(a->VsrD(i), 19) ^
|
|
|
|
ror64(a->VsrD(i), 61) ^
|
2019-01-30 23:36:35 +03:00
|
|
|
(a->VsrD(i) >> 6);
|
2014-02-13 01:23:18 +04:00
|
|
|
}
|
|
|
|
} else { /* st == 1 */
|
2019-03-21 08:50:18 +03:00
|
|
|
if ((six & (0x8 >> (2 * i))) == 0) {
|
2019-01-30 23:36:37 +03:00
|
|
|
r->VsrD(i) = ror64(a->VsrD(i), 28) ^
|
|
|
|
ror64(a->VsrD(i), 34) ^
|
|
|
|
ror64(a->VsrD(i), 39);
|
2014-02-13 01:23:18 +04:00
|
|
|
} else { /* six.bit[2*i] == 1 */
|
2019-01-30 23:36:37 +03:00
|
|
|
r->VsrD(i) = ror64(a->VsrD(i), 14) ^
|
|
|
|
ror64(a->VsrD(i), 18) ^
|
|
|
|
ror64(a->VsrD(i), 41);
|
2014-02-13 01:23:18 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-02-13 01:23:19 +04:00
|
|
|
void helper_vpermxor(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
|
|
|
|
{
|
2015-09-14 00:03:44 +03:00
|
|
|
ppc_avr_t result;
|
2014-02-13 01:23:19 +04:00
|
|
|
int i;
|
2015-09-14 00:03:44 +03:00
|
|
|
|
2019-01-30 23:36:38 +03:00
|
|
|
for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
|
|
|
|
int indexA = c->VsrB(i) >> 4;
|
|
|
|
int indexB = c->VsrB(i) & 0xF;
|
|
|
|
|
|
|
|
result.VsrB(i) = a->VsrB(indexA) ^ b->VsrB(indexB);
|
2014-02-13 01:23:19 +04:00
|
|
|
}
|
2015-09-14 00:03:44 +03:00
|
|
|
*r = result;
|
2014-02-13 01:23:19 +04:00
|
|
|
}
|
|
|
|
|
2012-05-30 08:23:28 +04:00
|
|
|
#undef VECTOR_FOR_INORDER_I
|
|
|
|
|
|
|
|
/*****************************************************************************/
|
|
|
|
/* SPE extension helpers */
|
|
|
|
/* Use a table to make this quicker */
|
2012-05-30 08:23:42 +04:00
|
|
|
static const uint8_t hbrev[16] = {
|
2012-05-30 08:23:28 +04:00
|
|
|
0x0, 0x8, 0x4, 0xC, 0x2, 0xA, 0x6, 0xE,
|
|
|
|
0x1, 0x9, 0x5, 0xD, 0x3, 0xB, 0x7, 0xF,
|
|
|
|
};
|
|
|
|
|
|
|
|
static inline uint8_t byte_reverse(uint8_t val)
|
|
|
|
{
|
|
|
|
return hbrev[val >> 4] | (hbrev[val & 0xF] << 4);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint32_t word_reverse(uint32_t val)
|
|
|
|
{
|
|
|
|
return byte_reverse(val >> 24) | (byte_reverse(val >> 16) << 8) |
|
|
|
|
(byte_reverse(val >> 8) << 16) | (byte_reverse(val) << 24);
|
|
|
|
}
|
|
|
|
|
|
|
|
#define MASKBITS 16 /* Random value - to be fixed (implementation dependent) */
|
|
|
|
target_ulong helper_brinc(target_ulong arg1, target_ulong arg2)
|
|
|
|
{
|
|
|
|
uint32_t a, b, d, mask;
|
|
|
|
|
|
|
|
mask = UINT32_MAX >> (32 - MASKBITS);
|
|
|
|
a = arg1 & mask;
|
|
|
|
b = arg2 & mask;
|
|
|
|
d = word_reverse(1 + word_reverse(a | ~b));
|
|
|
|
return (arg1 & ~mask) | (d & b);
|
|
|
|
}
|
|
|
|
|
|
|
|
uint32_t helper_cntlsw32(uint32_t val)
|
|
|
|
{
|
|
|
|
if (val & 0x80000000) {
|
|
|
|
return clz32(~val);
|
|
|
|
} else {
|
|
|
|
return clz32(val);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
uint32_t helper_cntlzw32(uint32_t val)
|
|
|
|
{
|
|
|
|
return clz32(val);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* 440 specific */
|
2012-05-30 08:23:29 +04:00
|
|
|
target_ulong helper_dlmzb(CPUPPCState *env, target_ulong high,
|
|
|
|
target_ulong low, uint32_t update_Rc)
|
2012-05-30 08:23:28 +04:00
|
|
|
{
|
|
|
|
target_ulong mask;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
i = 1;
|
|
|
|
for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
|
|
|
|
if ((high & mask) == 0) {
|
|
|
|
if (update_Rc) {
|
|
|
|
env->crf[0] = 0x4;
|
|
|
|
}
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
i++;
|
|
|
|
}
|
|
|
|
for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
|
|
|
|
if ((low & mask) == 0) {
|
|
|
|
if (update_Rc) {
|
|
|
|
env->crf[0] = 0x8;
|
|
|
|
}
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
i++;
|
|
|
|
}
|
2014-08-28 21:15:03 +04:00
|
|
|
i = 8;
|
2012-05-30 08:23:28 +04:00
|
|
|
if (update_Rc) {
|
|
|
|
env->crf[0] = 0x2;
|
|
|
|
}
|
|
|
|
done:
|
|
|
|
env->xer = (env->xer & ~0x7F) | i;
|
|
|
|
if (update_Rc) {
|
|
|
|
env->crf[0] |= xer_so;
|
|
|
|
}
|
|
|
|
return i;
|
|
|
|
}
|