tcg: Add gen_tb to TCGContext

This can replace four other variables that are references
into the TranslationBlock structure.

Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Richard Henderson 2022-11-26 18:39:55 -08:00
parent 3a50f424c9
commit b7e4afbd9f
4 changed files with 14 additions and 27 deletions

View File

@ -350,7 +350,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
tb->trace_vcpu_dstate = *cpu->trace_dstate;
tb_set_page_addr0(tb, phys_pc);
tb_set_page_addr1(tb, -1);
tcg_ctx->tb_cflags = cflags;
tcg_ctx->gen_tb = tb;
tb_overflow:
#ifdef CONFIG_PROFILER

View File

@ -552,20 +552,15 @@ struct TCGContext {
int nb_indirects;
int nb_ops;
/* goto_tb support */
tcg_insn_unit *code_buf;
uint16_t *tb_jmp_reset_offset; /* tb->jmp_reset_offset */
uintptr_t *tb_jmp_insn_offset; /* tb->jmp_target_arg if direct_jump */
uintptr_t *tb_jmp_target_addr; /* tb->jmp_target_arg if !direct_jump */
TCGRegSet reserved_regs;
uint32_t tb_cflags; /* cflags of the current TB */
intptr_t current_frame_offset;
intptr_t frame_start;
intptr_t frame_end;
TCGTemp *frame_temp;
tcg_insn_unit *code_ptr;
TranslationBlock *gen_tb; /* tb for which code is being generated */
tcg_insn_unit *code_buf; /* pointer for start of tb */
tcg_insn_unit *code_ptr; /* pointer for running end of tb */
#ifdef CONFIG_PROFILER
TCGProfile prof;

View File

@ -86,7 +86,7 @@ void tcg_gen_op6(TCGOpcode opc, TCGArg a1, TCGArg a2, TCGArg a3,
void tcg_gen_mb(TCGBar mb_type)
{
if (tcg_ctx->tb_cflags & CF_PARALLEL) {
if (tcg_ctx->gen_tb->cflags & CF_PARALLEL) {
tcg_gen_op1(INDEX_op_mb, mb_type);
}
}
@ -2782,7 +2782,7 @@ void tcg_gen_exit_tb(const TranslationBlock *tb, unsigned idx)
void tcg_gen_goto_tb(unsigned idx)
{
/* We tested CF_NO_GOTO_TB in translator_use_goto_tb. */
tcg_debug_assert(!(tcg_ctx->tb_cflags & CF_NO_GOTO_TB));
tcg_debug_assert(!(tcg_ctx->gen_tb->cflags & CF_NO_GOTO_TB));
/* We only support two chained exits. */
tcg_debug_assert(idx <= TB_EXIT_IDXMAX);
#ifdef CONFIG_DEBUG_TCG
@ -2798,7 +2798,7 @@ void tcg_gen_lookup_and_goto_ptr(void)
{
TCGv_ptr ptr;
if (tcg_ctx->tb_cflags & CF_NO_GOTO_PTR) {
if (tcg_ctx->gen_tb->cflags & CF_NO_GOTO_PTR) {
tcg_gen_exit_tb(NULL, 0);
return;
}
@ -3165,7 +3165,7 @@ void tcg_gen_atomic_cmpxchg_i32(TCGv_i32 retv, TCGv addr, TCGv_i32 cmpv,
{
memop = tcg_canonicalize_memop(memop, 0, 0);
if (!(tcg_ctx->tb_cflags & CF_PARALLEL)) {
if (!(tcg_ctx->gen_tb->cflags & CF_PARALLEL)) {
TCGv_i32 t1 = tcg_temp_new_i32();
TCGv_i32 t2 = tcg_temp_new_i32();
@ -3203,7 +3203,7 @@ void tcg_gen_atomic_cmpxchg_i64(TCGv_i64 retv, TCGv addr, TCGv_i64 cmpv,
{
memop = tcg_canonicalize_memop(memop, 1, 0);
if (!(tcg_ctx->tb_cflags & CF_PARALLEL)) {
if (!(tcg_ctx->gen_tb->cflags & CF_PARALLEL)) {
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_new_i64();
@ -3364,7 +3364,7 @@ static void * const table_##NAME[(MO_SIZE | MO_BSWAP) + 1] = { \
void tcg_gen_atomic_##NAME##_i32 \
(TCGv_i32 ret, TCGv addr, TCGv_i32 val, TCGArg idx, MemOp memop) \
{ \
if (tcg_ctx->tb_cflags & CF_PARALLEL) { \
if (tcg_ctx->gen_tb->cflags & CF_PARALLEL) { \
do_atomic_op_i32(ret, addr, val, idx, memop, table_##NAME); \
} else { \
do_nonatomic_op_i32(ret, addr, val, idx, memop, NEW, \
@ -3374,7 +3374,7 @@ void tcg_gen_atomic_##NAME##_i32 \
void tcg_gen_atomic_##NAME##_i64 \
(TCGv_i64 ret, TCGv addr, TCGv_i64 val, TCGArg idx, MemOp memop) \
{ \
if (tcg_ctx->tb_cflags & CF_PARALLEL) { \
if (tcg_ctx->gen_tb->cflags & CF_PARALLEL) { \
do_atomic_op_i64(ret, addr, val, idx, memop, table_##NAME); \
} else { \
do_nonatomic_op_i64(ret, addr, val, idx, memop, NEW, \

View File

@ -311,7 +311,7 @@ static void set_jmp_reset_offset(TCGContext *s, int which)
* We will check for overflow at the end of the opcode loop in
* tcg_gen_code, where we bound tcg_current_code_size to UINT16_MAX.
*/
s->tb_jmp_reset_offset[which] = tcg_current_code_size(s);
s->gen_tb->jmp_reset_offset[which] = tcg_current_code_size(s);
}
static void G_GNUC_UNUSED set_jmp_insn_offset(TCGContext *s, int which)
@ -321,7 +321,7 @@ static void G_GNUC_UNUSED set_jmp_insn_offset(TCGContext *s, int which)
* tcg_gen_code, where we bound tcg_current_code_size to UINT16_MAX.
*/
tcg_debug_assert(TCG_TARGET_HAS_direct_jump);
s->tb_jmp_insn_offset[which] = tcg_current_code_size(s);
s->gen_tb->jmp_target_arg[which] = tcg_current_code_size(s);
}
static uintptr_t G_GNUC_UNUSED get_jmp_target_addr(TCGContext *s, int which)
@ -330,7 +330,7 @@ static uintptr_t G_GNUC_UNUSED get_jmp_target_addr(TCGContext *s, int which)
* Return the read-execute version of the pointer, for the benefit
* of any pc-relative addressing mode.
*/
return (uintptr_t)tcg_splitwx_to_rx(&s->tb_jmp_target_addr[which]);
return (uintptr_t)tcg_splitwx_to_rx(s->gen_tb->jmp_target_arg + which);
}
/* Signal overflow, starting over with fewer guest insns. */
@ -4668,14 +4668,6 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb, target_ulong pc_start)
/* Initialize goto_tb jump offsets. */
tb->jmp_reset_offset[0] = TB_JMP_OFFSET_INVALID;
tb->jmp_reset_offset[1] = TB_JMP_OFFSET_INVALID;
tcg_ctx->tb_jmp_reset_offset = tb->jmp_reset_offset;
if (TCG_TARGET_HAS_direct_jump) {
tcg_ctx->tb_jmp_insn_offset = tb->jmp_target_arg;
tcg_ctx->tb_jmp_target_addr = NULL;
} else {
tcg_ctx->tb_jmp_insn_offset = NULL;
tcg_ctx->tb_jmp_target_addr = tb->jmp_target_arg;
}
tcg_reg_alloc_start(s);