s390x TCG shift instruction fixes

Cherry-pick the following upstream commits:

521130f267 target/s390x: Fix SLDA sign bit index
57556b28af target/s390x: Fix SRDA CC calculation
df103c09bc target/s390x: Fix cc_calc_sla_64() missing overflows
6da170beda target/s390x: Fix shifting 32-bit values for more than 31 bits
This commit is contained in:
Ilya Leoshkevich 2022-01-27 13:09:28 +01:00
parent 5f89981a3c
commit ad984b9366
5 changed files with 51 additions and 85 deletions

View File

@ -339,36 +339,9 @@ static uint32_t cc_calc_icm(uint64_t mask, uint64_t val)
} }
} }
static uint32_t cc_calc_sla_32(uint32_t src, int shift) static uint32_t cc_calc_sla(uint64_t src, int shift)
{ {
uint32_t mask = ((1U << shift) - 1U) << (32 - shift); uint64_t mask = -1ULL << (63 - shift);
uint32_t sign = 1U << 31;
uint32_t match;
int32_t r;
/* Check if the sign bit stays the same. */
if (src & sign) {
match = mask;
} else {
match = 0;
}
if ((src & mask) != match) {
/* Overflow. */
return 3;
}
r = ((src << shift) & ~sign) | (src & sign);
if (r == 0) {
return 0;
} else if (r < 0) {
return 1;
}
return 2;
}
static uint32_t cc_calc_sla_64(uint64_t src, int shift)
{
uint64_t mask = ((1ULL << shift) - 1ULL) << (64 - shift);
uint64_t sign = 1ULL << 63; uint64_t sign = 1ULL << 63;
uint64_t match; uint64_t match;
int64_t r; int64_t r;
@ -516,11 +489,8 @@ static uint32_t do_calc_cc(CPUS390XState *env, uint32_t cc_op,
case CC_OP_ICM: case CC_OP_ICM:
r = cc_calc_icm(src, dst); r = cc_calc_icm(src, dst);
break; break;
case CC_OP_SLA_32: case CC_OP_SLA:
r = cc_calc_sla_32(src, dst); r = cc_calc_sla(src, dst);
break;
case CC_OP_SLA_64:
r = cc_calc_sla_64(src, dst);
break; break;
case CC_OP_FLOGR: case CC_OP_FLOGR:
r = cc_calc_flogr(dst); r = cc_calc_flogr(dst);

View File

@ -347,8 +347,7 @@ const char *cc_name(enum cc_op cc_op)
[CC_OP_NZ_F64] = "CC_OP_NZ_F64", [CC_OP_NZ_F64] = "CC_OP_NZ_F64",
[CC_OP_NZ_F128] = "CC_OP_NZ_F128", [CC_OP_NZ_F128] = "CC_OP_NZ_F128",
[CC_OP_ICM] = "CC_OP_ICM", [CC_OP_ICM] = "CC_OP_ICM",
[CC_OP_SLA_32] = "CC_OP_SLA_32", [CC_OP_SLA] = "CC_OP_SLA",
[CC_OP_SLA_64] = "CC_OP_SLA_64",
[CC_OP_FLOGR] = "CC_OP_FLOGR", [CC_OP_FLOGR] = "CC_OP_FLOGR",
[CC_OP_LCBB] = "CC_OP_LCBB", [CC_OP_LCBB] = "CC_OP_LCBB",
[CC_OP_VC] = "CC_OP_VC", [CC_OP_VC] = "CC_OP_VC",

View File

@ -734,8 +734,8 @@
C(0xb9e1, POPCNT, RRE, PC, 0, r2_o, r1, 0, popcnt, nz64) C(0xb9e1, POPCNT, RRE, PC, 0, r2_o, r1, 0, popcnt, nz64)
/* ROTATE LEFT SINGLE LOGICAL */ /* ROTATE LEFT SINGLE LOGICAL */
C(0xeb1d, RLL, RSY_a, Z, r3_o, sh32, new, r1_32, rll32, 0) C(0xeb1d, RLL, RSY_a, Z, r3_o, sh, new, r1_32, rll32, 0)
C(0xeb1c, RLLG, RSY_a, Z, r3_o, sh64, r1, 0, rll64, 0) C(0xeb1c, RLLG, RSY_a, Z, r3_o, sh, r1, 0, rll64, 0)
/* ROTATE THEN INSERT SELECTED BITS */ /* ROTATE THEN INSERT SELECTED BITS */
C(0xec55, RISBG, RIE_f, GIE, 0, r2, r1, 0, risbg, s64) C(0xec55, RISBG, RIE_f, GIE, 0, r2, r1, 0, risbg, s64)
@ -771,29 +771,29 @@
C(0x0400, SPM, RR_a, Z, r1, 0, 0, 0, spm, 0) C(0x0400, SPM, RR_a, Z, r1, 0, 0, 0, spm, 0)
/* SHIFT LEFT SINGLE */ /* SHIFT LEFT SINGLE */
D(0x8b00, SLA, RS_a, Z, r1, sh32, new, r1_32, sla, 0, 31) D(0x8b00, SLA, RS_a, Z, r1, sh, new, r1_32, sla, 0, 31)
D(0xebdd, SLAK, RSY_a, DO, r3, sh32, new, r1_32, sla, 0, 31) D(0xebdd, SLAK, RSY_a, DO, r3, sh, new, r1_32, sla, 0, 31)
D(0xeb0b, SLAG, RSY_a, Z, r3, sh64, r1, 0, sla, 0, 63) D(0xeb0b, SLAG, RSY_a, Z, r3, sh, r1, 0, sla, 0, 63)
/* SHIFT LEFT SINGLE LOGICAL */ /* SHIFT LEFT SINGLE LOGICAL */
C(0x8900, SLL, RS_a, Z, r1_o, sh32, new, r1_32, sll, 0) C(0x8900, SLL, RS_a, Z, r1_o, sh, new, r1_32, sll, 0)
C(0xebdf, SLLK, RSY_a, DO, r3_o, sh32, new, r1_32, sll, 0) C(0xebdf, SLLK, RSY_a, DO, r3_o, sh, new, r1_32, sll, 0)
C(0xeb0d, SLLG, RSY_a, Z, r3_o, sh64, r1, 0, sll, 0) C(0xeb0d, SLLG, RSY_a, Z, r3_o, sh, r1, 0, sll, 0)
/* SHIFT RIGHT SINGLE */ /* SHIFT RIGHT SINGLE */
C(0x8a00, SRA, RS_a, Z, r1_32s, sh32, new, r1_32, sra, s32) C(0x8a00, SRA, RS_a, Z, r1_32s, sh, new, r1_32, sra, s32)
C(0xebdc, SRAK, RSY_a, DO, r3_32s, sh32, new, r1_32, sra, s32) C(0xebdc, SRAK, RSY_a, DO, r3_32s, sh, new, r1_32, sra, s32)
C(0xeb0a, SRAG, RSY_a, Z, r3_o, sh64, r1, 0, sra, s64) C(0xeb0a, SRAG, RSY_a, Z, r3_o, sh, r1, 0, sra, s64)
/* SHIFT RIGHT SINGLE LOGICAL */ /* SHIFT RIGHT SINGLE LOGICAL */
C(0x8800, SRL, RS_a, Z, r1_32u, sh32, new, r1_32, srl, 0) C(0x8800, SRL, RS_a, Z, r1_32u, sh, new, r1_32, srl, 0)
C(0xebde, SRLK, RSY_a, DO, r3_32u, sh32, new, r1_32, srl, 0) C(0xebde, SRLK, RSY_a, DO, r3_32u, sh, new, r1_32, srl, 0)
C(0xeb0c, SRLG, RSY_a, Z, r3_o, sh64, r1, 0, srl, 0) C(0xeb0c, SRLG, RSY_a, Z, r3_o, sh, r1, 0, srl, 0)
/* SHIFT LEFT DOUBLE */ /* SHIFT LEFT DOUBLE */
D(0x8f00, SLDA, RS_a, Z, r1_D32, sh64, new, r1_D32, sla, 0, 31) D(0x8f00, SLDA, RS_a, Z, r1_D32, sh, new, r1_D32, sla, 0, 63)
/* SHIFT LEFT DOUBLE LOGICAL */ /* SHIFT LEFT DOUBLE LOGICAL */
C(0x8d00, SLDL, RS_a, Z, r1_D32, sh64, new, r1_D32, sll, 0) C(0x8d00, SLDL, RS_a, Z, r1_D32, sh, new, r1_D32, sll, 0)
/* SHIFT RIGHT DOUBLE */ /* SHIFT RIGHT DOUBLE */
C(0x8e00, SRDA, RS_a, Z, r1_D32, sh64, new, r1_D32, sra, s64) C(0x8e00, SRDA, RS_a, Z, r1_D32, sh, new, r1_D32, sra, s64)
/* SHIFT RIGHT DOUBLE LOGICAL */ /* SHIFT RIGHT DOUBLE LOGICAL */
C(0x8c00, SRDL, RS_a, Z, r1_D32, sh64, new, r1_D32, srl, 0) C(0x8c00, SRDL, RS_a, Z, r1_D32, sh, new, r1_D32, srl, 0)
/* SQUARE ROOT */ /* SQUARE ROOT */
F(0xb314, SQEBR, RRE, Z, 0, e2, new, e1, sqeb, 0, IF_BFP) F(0xb314, SQEBR, RRE, Z, 0, e2, new, e1, sqeb, 0, IF_BFP)

View File

@ -196,8 +196,7 @@ enum cc_op {
CC_OP_NZ_F128, /* FP dst != 0 (128bit) */ CC_OP_NZ_F128, /* FP dst != 0 (128bit) */
CC_OP_ICM, /* insert characters under mask */ CC_OP_ICM, /* insert characters under mask */
CC_OP_SLA_32, /* Calculate shift left signed (32bit) */ CC_OP_SLA, /* Calculate shift left signed */
CC_OP_SLA_64, /* Calculate shift left signed (64bit) */
CC_OP_FLOGR, /* find leftmost one */ CC_OP_FLOGR, /* find leftmost one */
CC_OP_LCBB, /* load count to block boundary */ CC_OP_LCBB, /* load count to block boundary */
CC_OP_VC, /* vector compare result */ CC_OP_VC, /* vector compare result */

View File

@ -658,8 +658,7 @@ static void gen_op_calc_cc(DisasContext *s)
case CC_OP_LTUGTU_64: case CC_OP_LTUGTU_64:
case CC_OP_TM_32: case CC_OP_TM_32:
case CC_OP_TM_64: case CC_OP_TM_64:
case CC_OP_SLA_32: case CC_OP_SLA:
case CC_OP_SLA_64:
case CC_OP_NZ_F128: case CC_OP_NZ_F128:
case CC_OP_VC: case CC_OP_VC:
/* 2 arguments */ /* 2 arguments */
@ -1243,21 +1242,6 @@ struct DisasInsn {
/* ====================================================================== */ /* ====================================================================== */
/* Miscellaneous helpers, used by several operations. */ /* Miscellaneous helpers, used by several operations. */
static void help_l2_shift(DisasContext *s, DisasOps *o, int mask)
{
TCGContext *tcg_ctx = s->uc->tcg_ctx;
int b2 = get_field(s, b2);
int d2 = get_field(s, d2);
if (b2 == 0) {
o->in2 = tcg_const_i64(tcg_ctx, d2 & mask);
} else {
o->in2 = get_address(s, 0, b2, d2);
tcg_gen_andi_i64(tcg_ctx, o->in2, o->in2, mask);
}
}
static DisasJumpType help_goto_direct(DisasContext *s, uint64_t dest) static DisasJumpType help_goto_direct(DisasContext *s, uint64_t dest)
{ {
TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGContext *tcg_ctx = s->uc->tcg_ctx;
@ -4278,10 +4262,19 @@ static DisasJumpType op_soc(DisasContext *s, DisasOps *o)
static DisasJumpType op_sla(DisasContext *s, DisasOps *o) static DisasJumpType op_sla(DisasContext *s, DisasOps *o)
{ {
TCGv_i64 t;
TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGContext *tcg_ctx = s->uc->tcg_ctx;
uint64_t sign = 1ull << s->insn->data; uint64_t sign = 1ull << s->insn->data;
enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64; if (s->insn->data == 31) {
gen_op_update2_cc_i64(s, cco, o->in1, o->in2); t = tcg_temp_new_i64(tcg_ctx);
tcg_gen_shli_i64(tcg_ctx, t, o->in1, 32);
} else {
t = o->in1;
}
gen_op_update2_cc_i64(s, CC_OP_SLA, t, o->in2);
if (s->insn->data == 31) {
tcg_temp_free_i64(tcg_ctx, t);
}
tcg_gen_shl_i64(tcg_ctx, o->out, o->in1, o->in2); tcg_gen_shl_i64(tcg_ctx, o->out, o->in1, o->in2);
/* The arithmetic left shift is curious in that it does not affect /* The arithmetic left shift is curious in that it does not affect
the sign bit. Copy that over from the source unchanged. */ the sign bit. Copy that over from the source unchanged. */
@ -5648,9 +5641,11 @@ static void wout_r1_D32(DisasContext *s, DisasOps *o)
{ {
TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGContext *tcg_ctx = s->uc->tcg_ctx;
int r1 = get_field(s, r1); int r1 = get_field(s, r1);
TCGv_i64 t = tcg_temp_new_i64(tcg_ctx);
store_reg32_i64(tcg_ctx, r1 + 1, o->out); store_reg32_i64(tcg_ctx, r1 + 1, o->out);
tcg_gen_shri_i64(tcg_ctx, o->out, o->out, 32); tcg_gen_shri_i64(tcg_ctx, t, o->out, 32);
store_reg32_i64(tcg_ctx, r1, o->out); store_reg32_i64(tcg_ctx, r1, t);
tcg_temp_free_i64(tcg_ctx, t);
} }
#define SPEC_wout_r1_D32 SPEC_r1_even #define SPEC_wout_r1_D32 SPEC_r1_even
@ -6190,17 +6185,20 @@ static void in2_ri2(DisasContext *s, DisasOps *o)
} }
#define SPEC_in2_ri2 0 #define SPEC_in2_ri2 0
static void in2_sh32(DisasContext *s, DisasOps *o) static void in2_sh(DisasContext *s, DisasOps *o)
{ {
help_l2_shift(s, o, 31); TCGContext *tcg_ctx = s->uc->tcg_ctx;
} int b2 = get_field(s, b2);
#define SPEC_in2_sh32 0 int d2 = get_field(s, d2);
static void in2_sh64(DisasContext *s, DisasOps *o) if (b2 == 0) {
{ o->in2 = tcg_const_i64(tcg_ctx, d2 & 0x3f);
help_l2_shift(s, o, 63); } else {
o->in2 = get_address(s, 0, b2, d2);
tcg_gen_andi_i64(tcg_ctx, o->in2, o->in2, 0x3f);
}
} }
#define SPEC_in2_sh64 0 #define SPEC_in2_sh 0
static void in2_m2_8u(DisasContext *s, DisasOps *o) static void in2_m2_8u(DisasContext *s, DisasOps *o)
{ {