tcg: Move tb_phys_invalidate_count to tb_ctx

We can call do_tb_phys_invalidate from an iocontext, which has
no per-thread tcg_ctx.  Move this to tb_ctx, which is global.
The actual update still takes place with a lock held, so only
an atomic set is required, not an atomic increment.

Resolves: https://gitlab.com/qemu-project/qemu/-/issues/457
Tested-by: Viktor Ashirov <vashirov@redhat.com>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Richard Henderson 2021-07-06 20:54:56 -07:00
parent 834361efd9
commit a4390647f7
4 changed files with 5 additions and 21 deletions

View File

@ -34,6 +34,7 @@ struct TBContext {
/* statistics */ /* statistics */
unsigned tb_flush_count; unsigned tb_flush_count;
unsigned tb_phys_invalidate_count;
}; };
extern TBContext tb_ctx; extern TBContext tb_ctx;

View File

@ -1219,8 +1219,8 @@ static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
/* suppress any remaining jumps to this TB */ /* suppress any remaining jumps to this TB */
tb_jmp_unlink(tb); tb_jmp_unlink(tb);
qatomic_set(&tcg_ctx->tb_phys_invalidate_count, qatomic_set(&tb_ctx.tb_phys_invalidate_count,
tcg_ctx->tb_phys_invalidate_count + 1); tb_ctx.tb_phys_invalidate_count + 1);
} }
static void tb_phys_invalidate__locked(TranslationBlock *tb) static void tb_phys_invalidate__locked(TranslationBlock *tb)
@ -2128,8 +2128,8 @@ void dump_exec_info(void)
qemu_printf("\nStatistics:\n"); qemu_printf("\nStatistics:\n");
qemu_printf("TB flush count %u\n", qemu_printf("TB flush count %u\n",
qatomic_read(&tb_ctx.tb_flush_count)); qatomic_read(&tb_ctx.tb_flush_count));
qemu_printf("TB invalidate count %zu\n", qemu_printf("TB invalidate count %u\n",
tcg_tb_phys_invalidate_count()); qatomic_read(&tb_ctx.tb_phys_invalidate_count));
tlb_flush_counts(&flush_full, &flush_part, &flush_elide); tlb_flush_counts(&flush_full, &flush_part, &flush_elide);
qemu_printf("TLB full flushes %zu\n", flush_full); qemu_printf("TLB full flushes %zu\n", flush_full);

View File

@ -579,8 +579,6 @@ struct TCGContext {
/* Threshold to flush the translated code buffer. */ /* Threshold to flush the translated code buffer. */
void *code_gen_highwater; void *code_gen_highwater;
size_t tb_phys_invalidate_count;
/* Track which vCPU triggers events */ /* Track which vCPU triggers events */
CPUState *cpu; /* *_trans */ CPUState *cpu; /* *_trans */
@ -815,7 +813,6 @@ size_t tcg_code_capacity(void);
void tcg_tb_insert(TranslationBlock *tb); void tcg_tb_insert(TranslationBlock *tb);
void tcg_tb_remove(TranslationBlock *tb); void tcg_tb_remove(TranslationBlock *tb);
size_t tcg_tb_phys_invalidate_count(void);
TranslationBlock *tcg_tb_lookup(uintptr_t tc_ptr); TranslationBlock *tcg_tb_lookup(uintptr_t tc_ptr);
void tcg_tb_foreach(GTraverseFunc func, gpointer user_data); void tcg_tb_foreach(GTraverseFunc func, gpointer user_data);
size_t tcg_nb_tbs(void); size_t tcg_nb_tbs(void);

View File

@ -980,17 +980,3 @@ size_t tcg_code_capacity(void)
return capacity; return capacity;
} }
size_t tcg_tb_phys_invalidate_count(void)
{
unsigned int n_ctxs = qatomic_read(&tcg_cur_ctxs);
unsigned int i;
size_t total = 0;
for (i = 0; i < n_ctxs; i++) {
const TCGContext *s = qatomic_read(&tcg_ctxs[i]);
total += qatomic_read(&s->tb_phys_invalidate_count);
}
return total;
}