target/ppc: introduce avr_full_offset() function

All TCG vector operations require pointers to the base address of the vector
rather than separate access to the top and bottom 64-bits. Convert the VMX TCG
instructions to use a new avr_full_offset() function instead of avr64_offset()
which can then itself be written as a simple wrapper onto vsr_full_offset().

This same function can also reused in cpu_avr_ptr() to avoid having more than
one copy of the offset calculation logic.

Signed-off-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
Message-Id: <20190307180520.13868-5-mark.cave-ayland@ilande.co.uk>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
This commit is contained in:
Mark Cave-Ayland 2019-03-07 18:05:17 +00:00 committed by David Gibson
parent da7815ef31
commit c82a8a8542
3 changed files with 22 additions and 17 deletions

View File

@ -2598,14 +2598,24 @@ static inline int vsrl_offset(int i)
return offsetof(CPUPPCState, vsr[i].u64[1]); return offsetof(CPUPPCState, vsr[i].u64[1]);
} }
static inline int vsr_full_offset(int i)
{
return offsetof(CPUPPCState, vsr[i].u64[0]);
}
static inline uint64_t *cpu_vsrl_ptr(CPUPPCState *env, int i) static inline uint64_t *cpu_vsrl_ptr(CPUPPCState *env, int i)
{ {
return (uint64_t *)((uintptr_t)env + vsrl_offset(i)); return (uint64_t *)((uintptr_t)env + vsrl_offset(i));
} }
static inline int avr_full_offset(int i)
{
return vsr_full_offset(i + 32);
}
static inline ppc_avr_t *cpu_avr_ptr(CPUPPCState *env, int i) static inline ppc_avr_t *cpu_avr_ptr(CPUPPCState *env, int i)
{ {
return &env->vsr[32 + i]; return (ppc_avr_t *)((uintptr_t)env + avr_full_offset(i));
} }
void dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUPPCState *env); void dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUPPCState *env);

View File

@ -10,7 +10,7 @@
static inline TCGv_ptr gen_avr_ptr(int reg) static inline TCGv_ptr gen_avr_ptr(int reg)
{ {
TCGv_ptr r = tcg_temp_new_ptr(); TCGv_ptr r = tcg_temp_new_ptr();
tcg_gen_addi_ptr(r, cpu_env, offsetof(CPUPPCState, vsr[32 + reg].u64[0])); tcg_gen_addi_ptr(r, cpu_env, avr_full_offset(reg));
return r; return r;
} }
@ -205,7 +205,7 @@ static void gen_mtvscr(DisasContext *ctx)
} }
val = tcg_temp_new_i32(); val = tcg_temp_new_i32();
bofs = avr64_offset(rB(ctx->opcode), true); bofs = avr_full_offset(rB(ctx->opcode));
#ifdef HOST_WORDS_BIGENDIAN #ifdef HOST_WORDS_BIGENDIAN
bofs += 3 * 4; bofs += 3 * 4;
#endif #endif
@ -284,9 +284,9 @@ static void glue(gen_, name)(DisasContext *ctx) \
} \ } \
\ \
tcg_op(vece, \ tcg_op(vece, \
avr64_offset(rD(ctx->opcode), true), \ avr_full_offset(rD(ctx->opcode)), \
avr64_offset(rA(ctx->opcode), true), \ avr_full_offset(rA(ctx->opcode)), \
avr64_offset(rB(ctx->opcode), true), \ avr_full_offset(rB(ctx->opcode)), \
16, 16); \ 16, 16); \
} }
@ -578,10 +578,10 @@ static void glue(gen_, NAME)(DisasContext *ctx) \
gen_exception(ctx, POWERPC_EXCP_VPU); \ gen_exception(ctx, POWERPC_EXCP_VPU); \
return; \ return; \
} \ } \
tcg_gen_gvec_4(avr64_offset(rD(ctx->opcode), true), \ tcg_gen_gvec_4(avr_full_offset(rD(ctx->opcode)), \
offsetof(CPUPPCState, vscr_sat), \ offsetof(CPUPPCState, vscr_sat), \
avr64_offset(rA(ctx->opcode), true), \ avr_full_offset(rA(ctx->opcode)), \
avr64_offset(rB(ctx->opcode), true), \ avr_full_offset(rB(ctx->opcode)), \
16, 16, &g); \ 16, 16, &g); \
} }
@ -755,7 +755,7 @@ static void glue(gen_, name)(DisasContext *ctx) \
return; \ return; \
} \ } \
simm = SIMM5(ctx->opcode); \ simm = SIMM5(ctx->opcode); \
tcg_op(avr64_offset(rD(ctx->opcode), true), 16, 16, simm); \ tcg_op(avr_full_offset(rD(ctx->opcode)), 16, 16, simm); \
} }
GEN_VXFORM_DUPI(vspltisb, tcg_gen_gvec_dup8i, 6, 12); GEN_VXFORM_DUPI(vspltisb, tcg_gen_gvec_dup8i, 6, 12);
@ -850,8 +850,8 @@ static void gen_vsplt(DisasContext *ctx, int vece)
} }
uimm = UIMM5(ctx->opcode); uimm = UIMM5(ctx->opcode);
bofs = avr64_offset(rB(ctx->opcode), true); bofs = avr_full_offset(rB(ctx->opcode));
dofs = avr64_offset(rD(ctx->opcode), true); dofs = avr_full_offset(rD(ctx->opcode));
/* Experimental testing shows that hardware masks the immediate. */ /* Experimental testing shows that hardware masks the immediate. */
bofs += (uimm << vece) & 15; bofs += (uimm << vece) & 15;

View File

@ -10,11 +10,6 @@ static inline void set_vsrl(int n, TCGv_i64 src)
tcg_gen_st_i64(src, cpu_env, vsrl_offset(n)); tcg_gen_st_i64(src, cpu_env, vsrl_offset(n));
} }
static inline int vsr_full_offset(int n)
{
return offsetof(CPUPPCState, vsr[n].u64[0]);
}
static inline void get_cpu_vsrh(TCGv_i64 dst, int n) static inline void get_cpu_vsrh(TCGv_i64 dst, int n)
{ {
if (n < 32) { if (n < 32) {