accel/tcg: move CF_CLUSTER calculation to curr_cflags
There is nothing special about this compile flag that doesn't mean we can't just compute it with curr_cflags() which we should be using when building a new set. Signed-off-by: Alex Bennée <alex.bennee@linaro.org> Message-Id: <20210224165811.11567-3-alex.bennee@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
parent
6f04cb1c8f
commit
c0ae396a81
@ -249,8 +249,7 @@ void cpu_exec_step_atomic(CPUState *cpu)
|
|||||||
TranslationBlock *tb;
|
TranslationBlock *tb;
|
||||||
target_ulong cs_base, pc;
|
target_ulong cs_base, pc;
|
||||||
uint32_t flags;
|
uint32_t flags;
|
||||||
uint32_t cflags = 1;
|
uint32_t cflags = (curr_cflags(cpu) & ~CF_PARALLEL) | 1;
|
||||||
uint32_t cf_mask = cflags & CF_HASH_MASK;
|
|
||||||
int tb_exit;
|
int tb_exit;
|
||||||
|
|
||||||
if (sigsetjmp(cpu->jmp_env, 0) == 0) {
|
if (sigsetjmp(cpu->jmp_env, 0) == 0) {
|
||||||
@ -260,7 +259,7 @@ void cpu_exec_step_atomic(CPUState *cpu)
|
|||||||
cpu->running = true;
|
cpu->running = true;
|
||||||
|
|
||||||
cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
|
cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
|
||||||
tb = tb_lookup(cpu, pc, cs_base, flags, cf_mask);
|
tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
|
||||||
|
|
||||||
if (tb == NULL) {
|
if (tb == NULL) {
|
||||||
mmap_lock();
|
mmap_lock();
|
||||||
@ -497,7 +496,7 @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
|
|||||||
if (replay_has_exception()
|
if (replay_has_exception()
|
||||||
&& cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0) {
|
&& cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0) {
|
||||||
/* Execute just one insn to trigger exception pending in the log */
|
/* Execute just one insn to trigger exception pending in the log */
|
||||||
cpu->cflags_next_tb = (curr_cflags() & ~CF_USE_ICOUNT) | 1;
|
cpu->cflags_next_tb = (curr_cflags(cpu) & ~CF_USE_ICOUNT) | 1;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
return false;
|
return false;
|
||||||
@ -794,7 +793,7 @@ int cpu_exec(CPUState *cpu)
|
|||||||
have CF_INVALID set, -1 is a convenient invalid value that
|
have CF_INVALID set, -1 is a convenient invalid value that
|
||||||
does not require tcg headers for cpu_common_reset. */
|
does not require tcg headers for cpu_common_reset. */
|
||||||
if (cflags == -1) {
|
if (cflags == -1) {
|
||||||
cflags = curr_cflags();
|
cflags = curr_cflags(cpu);
|
||||||
} else {
|
} else {
|
||||||
cpu->cflags_next_tb = -1;
|
cpu->cflags_next_tb = -1;
|
||||||
}
|
}
|
||||||
|
@ -154,7 +154,7 @@ const void *HELPER(lookup_tb_ptr)(CPUArchState *env)
|
|||||||
|
|
||||||
cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
|
cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
|
||||||
|
|
||||||
tb = tb_lookup(cpu, pc, cs_base, flags, curr_cflags());
|
tb = tb_lookup(cpu, pc, cs_base, flags, curr_cflags(cpu));
|
||||||
if (tb == NULL) {
|
if (tb == NULL) {
|
||||||
return tcg_code_gen_epilogue;
|
return tcg_code_gen_epilogue;
|
||||||
}
|
}
|
||||||
|
@ -2194,7 +2194,7 @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
|
|||||||
if (current_tb_modified) {
|
if (current_tb_modified) {
|
||||||
page_collection_unlock(pages);
|
page_collection_unlock(pages);
|
||||||
/* Force execution of one insn next time. */
|
/* Force execution of one insn next time. */
|
||||||
cpu->cflags_next_tb = 1 | curr_cflags();
|
cpu->cflags_next_tb = 1 | curr_cflags(cpu);
|
||||||
mmap_unlock();
|
mmap_unlock();
|
||||||
cpu_loop_exit_noexc(cpu);
|
cpu_loop_exit_noexc(cpu);
|
||||||
}
|
}
|
||||||
@ -2362,7 +2362,7 @@ static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc)
|
|||||||
#ifdef TARGET_HAS_PRECISE_SMC
|
#ifdef TARGET_HAS_PRECISE_SMC
|
||||||
if (current_tb_modified) {
|
if (current_tb_modified) {
|
||||||
/* Force execution of one insn next time. */
|
/* Force execution of one insn next time. */
|
||||||
cpu->cflags_next_tb = 1 | curr_cflags();
|
cpu->cflags_next_tb = 1 | curr_cflags(cpu);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
@ -2438,7 +2438,7 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
|
|||||||
* operations only (which execute after completion) so we don't
|
* operations only (which execute after completion) so we don't
|
||||||
* double instrument the instruction.
|
* double instrument the instruction.
|
||||||
*/
|
*/
|
||||||
cpu->cflags_next_tb = curr_cflags() | CF_MEMI_ONLY | CF_LAST_IO | n;
|
cpu->cflags_next_tb = curr_cflags(cpu) | CF_MEMI_ONLY | CF_LAST_IO | n;
|
||||||
|
|
||||||
qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc,
|
qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc,
|
||||||
"cpu_io_recompile: rewound execution of TB to "
|
"cpu_io_recompile: rewound execution of TB to "
|
||||||
|
@ -519,10 +519,12 @@ static inline uint32_t tb_cflags(const TranslationBlock *tb)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* current cflags for hashing/comparison */
|
/* current cflags for hashing/comparison */
|
||||||
static inline uint32_t curr_cflags(void)
|
static inline uint32_t curr_cflags(CPUState *cpu)
|
||||||
{
|
{
|
||||||
return (parallel_cpus ? CF_PARALLEL : 0)
|
uint32_t cflags = deposit32(0, CF_CLUSTER_SHIFT, 8, cpu->cluster_index);
|
||||||
| (icount_enabled() ? CF_USE_ICOUNT : 0);
|
cflags |= parallel_cpus ? CF_PARALLEL : 0;
|
||||||
|
cflags |= icount_enabled() ? CF_USE_ICOUNT : 0;
|
||||||
|
return cflags;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* TranslationBlock invalidate API */
|
/* TranslationBlock invalidate API */
|
||||||
|
@ -27,9 +27,6 @@ static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc,
|
|||||||
hash = tb_jmp_cache_hash_func(pc);
|
hash = tb_jmp_cache_hash_func(pc);
|
||||||
tb = qatomic_rcu_read(&cpu->tb_jmp_cache[hash]);
|
tb = qatomic_rcu_read(&cpu->tb_jmp_cache[hash]);
|
||||||
|
|
||||||
cf_mask &= ~CF_CLUSTER_MASK;
|
|
||||||
cf_mask |= cpu->cluster_index << CF_CLUSTER_SHIFT;
|
|
||||||
|
|
||||||
if (likely(tb &&
|
if (likely(tb &&
|
||||||
tb->pc == pc &&
|
tb->pc == pc &&
|
||||||
tb->cs_base == cs_base &&
|
tb->cs_base == cs_base &&
|
||||||
|
@ -937,7 +937,7 @@ void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
|
|||||||
cpu_loop_exit_restore(cpu, ra);
|
cpu_loop_exit_restore(cpu, ra);
|
||||||
} else {
|
} else {
|
||||||
/* Force execution of one insn next time. */
|
/* Force execution of one insn next time. */
|
||||||
cpu->cflags_next_tb = 1 | curr_cflags();
|
cpu->cflags_next_tb = 1 | curr_cflags(cpu);
|
||||||
mmap_unlock();
|
mmap_unlock();
|
||||||
if (ra) {
|
if (ra) {
|
||||||
cpu_restore_state(cpu, ra, true);
|
cpu_restore_state(cpu, ra, true);
|
||||||
|
Loading…
Reference in New Issue
Block a user