diff --git a/target-arm/translate.c b/target-arm/translate.c index bfe0f21353..711676ff7d 100644 --- a/target-arm/translate.c +++ b/target-arm/translate.c @@ -221,8 +221,8 @@ static void store_reg(DisasContext *s, int reg, TCGv var) #define gen_op_rorl_T1_im(im) tcg_gen_rori_i32(cpu_T[1], cpu_T[1], im) /* Value extensions. */ -#define gen_uxtb(var) tcg_gen_andi_i32(var, var, 0xff) -#define gen_uxth(var) tcg_gen_andi_i32(var, var, 0xffff) +#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var) +#define gen_uxth(var) tcg_gen_ext16u_i32(var, var) #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var) #define gen_sxth(var) tcg_gen_ext16s_i32(var, var) @@ -1446,7 +1446,7 @@ static void gen_op_iwmmxt_setpsr_nz(void) static inline void gen_op_iwmmxt_addl_M0_wRn(int rn) { iwmmxt_load_reg(cpu_V1, rn); - tcg_gen_andi_i64(cpu_V1, cpu_V1, 0xffffffffu); + tcg_gen_ext32u_i64(cpu_V1, cpu_V1); tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1); } @@ -2704,7 +2704,7 @@ static void gen_neon_dup_u8(TCGv var, int shift) TCGv tmp = new_tmp(); if (shift) tcg_gen_shri_i32(var, var, shift); - tcg_gen_andi_i32(var, var, 0xff); + tcg_gen_ext8u_i32(var, var); tcg_gen_shli_i32(tmp, var, 8); tcg_gen_or_i32(var, var, tmp); tcg_gen_shli_i32(tmp, var, 16); @@ -2715,7 +2715,7 @@ static void gen_neon_dup_u8(TCGv var, int shift) static void gen_neon_dup_low16(TCGv var) { TCGv tmp = new_tmp(); - tcg_gen_andi_i32(var, var, 0xffff); + tcg_gen_ext16u_i32(var, var); tcg_gen_shli_i32(tmp, var, 16); tcg_gen_or_i32(var, var, tmp); dead_tmp(tmp); @@ -5862,7 +5862,7 @@ static void disas_arm_insn(CPUState * env, DisasContext *s) } else { /* MOVT */ tmp = load_reg(s, rd); - tcg_gen_andi_i32(tmp, tmp, 0xffff); + tcg_gen_ext16u_i32(tmp, tmp); tcg_gen_ori_i32(tmp, tmp, val << 16); } store_reg(s, rd, tmp); @@ -6378,10 +6378,10 @@ static void disas_arm_insn(CPUState * env, DisasContext *s) if (insn & (1 << 6)) { /* pkhtb */ tcg_gen_andi_i32(tmp, tmp, 0xffff0000); - tcg_gen_andi_i32(tmp2, tmp2, 0xffff); + tcg_gen_ext16u_i32(tmp2, tmp2); } else { /* pkhbt */ - tcg_gen_andi_i32(tmp, tmp, 0xffff); + tcg_gen_ext16u_i32(tmp, tmp); tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000); } tcg_gen_or_i32(tmp, tmp, tmp2); @@ -7700,7 +7700,7 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1) if (insn & (1 << 23)) { /* movt */ tmp = load_reg(s, rd); - tcg_gen_andi_i32(tmp, tmp, 0xffff); + tcg_gen_ext16u_i32(tmp, tmp); tcg_gen_ori_i32(tmp, tmp, imm << 16); } else { /* movw */ diff --git a/target-cris/translate.c b/target-cris/translate.c index 72eb48cdf9..89a0533857 100644 --- a/target-cris/translate.c +++ b/target-cris/translate.c @@ -1165,11 +1165,10 @@ static inline void t_gen_sext(TCGv d, TCGv s, int size) static inline void t_gen_zext(TCGv d, TCGv s, int size) { - /* TCG-FIXME: this is not optimal. Many archs have fast zext insns. */ if (size == 1) - tcg_gen_andi_i32(d, s, 0xff); + tcg_gen_ext8u_i32(d, s); else if (size == 2) - tcg_gen_andi_i32(d, s, 0xffff); + tcg_gen_ext16u_i32(d, s); else tcg_gen_mov_tl(d, s); } diff --git a/tcg/README b/tcg/README index c27c5abed9..c47d3b07ac 100644 --- a/tcg/README +++ b/tcg/README @@ -260,10 +260,13 @@ t0 = t1 Move t1 to t0 (both operands must have the same type). * ext8s_i32/i64 t0, t1 +ext8u_i32/i64 t0, t1 ext16s_i32/i64 t0, t1 +ext16u_i32/i64 t0, t1 ext32s_i64 t0, t1 +ext32u_i64 t0, t1 -8, 16 or 32 bit sign extension (both operands must have the same type) +8, 16 or 32 bit sign/zero extension (both operands must have the same type) * bswap16_i32 t0, t1 diff --git a/tcg/tcg-op.h b/tcg/tcg-op.h index 1daf130f17..8ab9536603 100644 --- a/tcg/tcg-op.h +++ b/tcg/tcg-op.h @@ -980,6 +980,18 @@ static inline void tcg_gen_ext16s_i32(TCGv ret, TCGv arg) #endif } +/* These are currently just for convenience. + We assume a target will recognise these automatically . */ +static inline void tcg_gen_ext8u_i32(TCGv ret, TCGv arg) +{ + tcg_gen_andi_i32(ret, arg, 0xffu); +} + +static inline void tcg_gen_ext16u_i32(TCGv ret, TCGv arg) +{ + tcg_gen_andi_i32(ret, arg, 0xffffu); +} + /* Note: we assume the two high bytes are set to zero */ static inline void tcg_gen_bswap16_i32(TCGv ret, TCGv arg) { @@ -1040,6 +1052,24 @@ static inline void tcg_gen_ext32s_i64(TCGv ret, TCGv arg) tcg_gen_sari_i32(TCGV_HIGH(ret), ret, 31); } +static inline void tcg_gen_ext8u_i64(TCGv ret, TCGv arg) +{ + tcg_gen_ext8u_i32(ret, arg); + tcg_gen_movi_i32(TCGV_HIGH(ret), 0); +} + +static inline void tcg_gen_ext16u_i64(TCGv ret, TCGv arg) +{ + tcg_gen_ext16u_i32(ret, arg); + tcg_gen_movi_i32(TCGV_HIGH(ret), 0); +} + +static inline void tcg_gen_ext32u_i64(TCGv ret, TCGv arg) +{ + tcg_gen_mov_i32(ret, arg); + tcg_gen_movi_i32(TCGV_HIGH(ret), 0); +} + static inline void tcg_gen_trunc_i64_i32(TCGv ret, TCGv arg) { tcg_gen_mov_i32(ret, arg); @@ -1100,6 +1130,21 @@ static inline void tcg_gen_ext32s_i64(TCGv ret, TCGv arg) #endif } +static inline void tcg_gen_ext8u_i64(TCGv ret, TCGv arg) +{ + tcg_gen_andi_i64(ret, arg, 0xffu); +} + +static inline void tcg_gen_ext16u_i64(TCGv ret, TCGv arg) +{ + tcg_gen_andi_i64(ret, arg, 0xffffu); +} + +static inline void tcg_gen_ext32u_i64(TCGv ret, TCGv arg) +{ + tcg_gen_andi_i64(ret, arg, 0xffffffffu); +} + /* Note: we assume the target supports move between 32 and 64 bit registers. This will probably break MIPS64 targets. */ static inline void tcg_gen_trunc_i64_i32(TCGv ret, TCGv arg) @@ -1111,7 +1156,7 @@ static inline void tcg_gen_trunc_i64_i32(TCGv ret, TCGv arg) registers */ static inline void tcg_gen_extu_i32_i64(TCGv ret, TCGv arg) { - tcg_gen_andi_i64(ret, arg, 0xffffffff); + tcg_gen_andi_i64(ret, arg, 0xffffffffu); } /* Note: we assume the target supports move between 32 and 64 bit