cpu-exec: Move TB chaining into tb_find_fast()
Move tb_add_jump() call and surrounding code from cpu_exec() into tb_find_fast(). That simplifies cpu_exec() a little by hiding the direct chaining optimization details into tb_find_fast(). It also allows to move tb_lock()/tb_unlock() pair into tb_find_fast(), putting it closer to tb_find_slow() which also manipulates the lock. Suggested-by: Alex Bennée <alex.bennee@linaro.org> Signed-off-by: Sergey Fedorov <serge.fdrv@gmail.com> Signed-off-by: Sergey Fedorov <sergey.fedorov@linaro.org> Signed-off-by: Richard Henderson <rth@twiddle.net> [rth: Fixed rebase typo in nochain test.]
This commit is contained in:
parent
6f789be56d
commit
a0522c7a55
35
cpu-exec.c
35
cpu-exec.c
@ -320,7 +320,9 @@ found:
|
||||
return tb;
|
||||
}
|
||||
|
||||
static inline TranslationBlock *tb_find_fast(CPUState *cpu)
|
||||
static inline TranslationBlock *tb_find_fast(CPUState *cpu,
|
||||
TranslationBlock **last_tb,
|
||||
int tb_exit)
|
||||
{
|
||||
CPUArchState *env = (CPUArchState *)cpu->env_ptr;
|
||||
TranslationBlock *tb;
|
||||
@ -331,11 +333,24 @@ static inline TranslationBlock *tb_find_fast(CPUState *cpu)
|
||||
always be the same before a given translated block
|
||||
is executed. */
|
||||
cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
|
||||
tb_lock();
|
||||
tb = cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
|
||||
if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
|
||||
tb->flags != flags)) {
|
||||
tb = tb_find_slow(cpu, pc, cs_base, flags);
|
||||
}
|
||||
if (cpu->tb_flushed) {
|
||||
/* Ensure that no TB jump will be modified as the
|
||||
* translation buffer has been flushed.
|
||||
*/
|
||||
*last_tb = NULL;
|
||||
cpu->tb_flushed = false;
|
||||
}
|
||||
/* See if we can patch the calling TB. */
|
||||
if (*last_tb && !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
|
||||
tb_add_jump(*last_tb, tb_exit, tb);
|
||||
}
|
||||
tb_unlock();
|
||||
return tb;
|
||||
}
|
||||
|
||||
@ -441,7 +456,8 @@ int cpu_exec(CPUState *cpu)
|
||||
} else if (replay_has_exception()
|
||||
&& cpu->icount_decr.u16.low + cpu->icount_extra == 0) {
|
||||
/* try to cause an exception pending in the log */
|
||||
cpu_exec_nocache(cpu, 1, tb_find_fast(cpu), true);
|
||||
last_tb = NULL; /* Avoid chaining TBs */
|
||||
cpu_exec_nocache(cpu, 1, tb_find_fast(cpu, &last_tb, 0), true);
|
||||
ret = -1;
|
||||
break;
|
||||
#endif
|
||||
@ -511,20 +527,7 @@ int cpu_exec(CPUState *cpu)
|
||||
cpu->exception_index = EXCP_INTERRUPT;
|
||||
cpu_loop_exit(cpu);
|
||||
}
|
||||
tb_lock();
|
||||
tb = tb_find_fast(cpu);
|
||||
if (cpu->tb_flushed) {
|
||||
/* Ensure that no TB jump will be modified as the
|
||||
* translation buffer has been flushed.
|
||||
*/
|
||||
last_tb = NULL;
|
||||
cpu->tb_flushed = false;
|
||||
}
|
||||
/* See if we can patch the calling TB. */
|
||||
if (last_tb && !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
|
||||
tb_add_jump(last_tb, tb_exit, tb);
|
||||
}
|
||||
tb_unlock();
|
||||
tb = tb_find_fast(cpu, &last_tb, tb_exit);
|
||||
if (likely(!cpu->exit_request)) {
|
||||
uintptr_t ret;
|
||||
trace_exec_tb(tb, tb->pc);
|
||||
|
Loading…
Reference in New Issue
Block a user