tci: Mask shift counts to avoid undefined behavior

TCG now requires unspecified behavior rather than a potential crash,
bring the C shift within the letter of the law.

Signed-off-by: Richard Henderson <rth@twiddle.net>
This commit is contained in:
Richard Henderson 2014-03-18 08:44:05 -07:00
parent 50c5c4d125
commit 1976cccec8

20
tci.c
View File

@ -669,32 +669,32 @@ uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr)
t0 = *tb_ptr++; t0 = *tb_ptr++;
t1 = tci_read_ri32(&tb_ptr); t1 = tci_read_ri32(&tb_ptr);
t2 = tci_read_ri32(&tb_ptr); t2 = tci_read_ri32(&tb_ptr);
tci_write_reg32(t0, t1 << t2); tci_write_reg32(t0, t1 << (t2 & 31));
break; break;
case INDEX_op_shr_i32: case INDEX_op_shr_i32:
t0 = *tb_ptr++; t0 = *tb_ptr++;
t1 = tci_read_ri32(&tb_ptr); t1 = tci_read_ri32(&tb_ptr);
t2 = tci_read_ri32(&tb_ptr); t2 = tci_read_ri32(&tb_ptr);
tci_write_reg32(t0, t1 >> t2); tci_write_reg32(t0, t1 >> (t2 & 31));
break; break;
case INDEX_op_sar_i32: case INDEX_op_sar_i32:
t0 = *tb_ptr++; t0 = *tb_ptr++;
t1 = tci_read_ri32(&tb_ptr); t1 = tci_read_ri32(&tb_ptr);
t2 = tci_read_ri32(&tb_ptr); t2 = tci_read_ri32(&tb_ptr);
tci_write_reg32(t0, ((int32_t)t1 >> t2)); tci_write_reg32(t0, ((int32_t)t1 >> (t2 & 31)));
break; break;
#if TCG_TARGET_HAS_rot_i32 #if TCG_TARGET_HAS_rot_i32
case INDEX_op_rotl_i32: case INDEX_op_rotl_i32:
t0 = *tb_ptr++; t0 = *tb_ptr++;
t1 = tci_read_ri32(&tb_ptr); t1 = tci_read_ri32(&tb_ptr);
t2 = tci_read_ri32(&tb_ptr); t2 = tci_read_ri32(&tb_ptr);
tci_write_reg32(t0, rol32(t1, t2)); tci_write_reg32(t0, rol32(t1, t2 & 31));
break; break;
case INDEX_op_rotr_i32: case INDEX_op_rotr_i32:
t0 = *tb_ptr++; t0 = *tb_ptr++;
t1 = tci_read_ri32(&tb_ptr); t1 = tci_read_ri32(&tb_ptr);
t2 = tci_read_ri32(&tb_ptr); t2 = tci_read_ri32(&tb_ptr);
tci_write_reg32(t0, ror32(t1, t2)); tci_write_reg32(t0, ror32(t1, t2 & 31));
break; break;
#endif #endif
#if TCG_TARGET_HAS_deposit_i32 #if TCG_TARGET_HAS_deposit_i32
@ -936,32 +936,32 @@ uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr)
t0 = *tb_ptr++; t0 = *tb_ptr++;
t1 = tci_read_ri64(&tb_ptr); t1 = tci_read_ri64(&tb_ptr);
t2 = tci_read_ri64(&tb_ptr); t2 = tci_read_ri64(&tb_ptr);
tci_write_reg64(t0, t1 << t2); tci_write_reg64(t0, t1 << (t2 & 63));
break; break;
case INDEX_op_shr_i64: case INDEX_op_shr_i64:
t0 = *tb_ptr++; t0 = *tb_ptr++;
t1 = tci_read_ri64(&tb_ptr); t1 = tci_read_ri64(&tb_ptr);
t2 = tci_read_ri64(&tb_ptr); t2 = tci_read_ri64(&tb_ptr);
tci_write_reg64(t0, t1 >> t2); tci_write_reg64(t0, t1 >> (t2 & 63));
break; break;
case INDEX_op_sar_i64: case INDEX_op_sar_i64:
t0 = *tb_ptr++; t0 = *tb_ptr++;
t1 = tci_read_ri64(&tb_ptr); t1 = tci_read_ri64(&tb_ptr);
t2 = tci_read_ri64(&tb_ptr); t2 = tci_read_ri64(&tb_ptr);
tci_write_reg64(t0, ((int64_t)t1 >> t2)); tci_write_reg64(t0, ((int64_t)t1 >> (t2 & 63)));
break; break;
#if TCG_TARGET_HAS_rot_i64 #if TCG_TARGET_HAS_rot_i64
case INDEX_op_rotl_i64: case INDEX_op_rotl_i64:
t0 = *tb_ptr++; t0 = *tb_ptr++;
t1 = tci_read_ri64(&tb_ptr); t1 = tci_read_ri64(&tb_ptr);
t2 = tci_read_ri64(&tb_ptr); t2 = tci_read_ri64(&tb_ptr);
tci_write_reg64(t0, rol64(t1, t2)); tci_write_reg64(t0, rol64(t1, t2 & 63));
break; break;
case INDEX_op_rotr_i64: case INDEX_op_rotr_i64:
t0 = *tb_ptr++; t0 = *tb_ptr++;
t1 = tci_read_ri64(&tb_ptr); t1 = tci_read_ri64(&tb_ptr);
t2 = tci_read_ri64(&tb_ptr); t2 = tci_read_ri64(&tb_ptr);
tci_write_reg64(t0, ror64(t1, t2)); tci_write_reg64(t0, ror64(t1, t2 & 63));
break; break;
#endif #endif
#if TCG_TARGET_HAS_deposit_i64 #if TCG_TARGET_HAS_deposit_i64