accel/tcg: Move jmp-cache CF_PCREL
checks to caller
tb-jmp-cache.h contains a few small functions that only exist to hide a CF_PCREL check, however the caller often already performs such a check. This patch moves CF_PCREL checks from the callee to the caller, and also removes these functions which now only hide an access of the jmp-cache. Signed-off-by: Anton Johansson <anjo@rev.ng> Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> Message-Id: <20230227135202.9710-12-anjo@rev.ng> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
parent
f6680c5ea4
commit
2dd5b7a1b9
@ -254,10 +254,13 @@ static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc,
|
||||
|
||||
hash = tb_jmp_cache_hash_func(pc);
|
||||
jc = cpu->tb_jmp_cache;
|
||||
tb = tb_jmp_cache_get_tb(jc, cflags, hash);
|
||||
|
||||
if (cflags & CF_PCREL) {
|
||||
/* Use acquire to ensure current load of pc from jc. */
|
||||
tb = qatomic_load_acquire(&jc->array[hash].tb);
|
||||
|
||||
if (likely(tb &&
|
||||
tb_jmp_cache_get_pc(jc, hash, tb) == pc &&
|
||||
jc->array[hash].pc == pc &&
|
||||
tb->cs_base == cs_base &&
|
||||
tb->flags == flags &&
|
||||
tb->trace_vcpu_dstate == *cpu->trace_dstate &&
|
||||
@ -268,7 +271,29 @@ static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc,
|
||||
if (tb == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
tb_jmp_cache_set(jc, hash, tb, pc);
|
||||
jc->array[hash].pc = pc;
|
||||
/* Use store_release on tb to ensure pc is written first. */
|
||||
qatomic_store_release(&jc->array[hash].tb, tb);
|
||||
} else {
|
||||
/* Use rcu_read to ensure current load of pc from *tb. */
|
||||
tb = qatomic_rcu_read(&jc->array[hash].tb);
|
||||
|
||||
if (likely(tb &&
|
||||
tb_pc(tb) == pc &&
|
||||
tb->cs_base == cs_base &&
|
||||
tb->flags == flags &&
|
||||
tb->trace_vcpu_dstate == *cpu->trace_dstate &&
|
||||
tb_cflags(tb) == cflags)) {
|
||||
return tb;
|
||||
}
|
||||
tb = tb_htable_lookup(cpu, pc, cs_base, flags, cflags);
|
||||
if (tb == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
/* Use the pc value already stored in tb->pc. */
|
||||
qatomic_set(&jc->array[hash].tb, tb);
|
||||
}
|
||||
|
||||
return tb;
|
||||
}
|
||||
|
||||
@ -957,7 +982,8 @@ cpu_exec_loop(CPUState *cpu, SyncClocks *sc)
|
||||
* for the fast lookup
|
||||
*/
|
||||
h = tb_jmp_cache_hash_func(pc);
|
||||
tb_jmp_cache_set(cpu->tb_jmp_cache, h, tb, pc);
|
||||
/* Use the pc value already stored in tb->pc. */
|
||||
qatomic_set(&cpu->tb_jmp_cache->array[h].tb, tb);
|
||||
}
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
|
@ -25,40 +25,4 @@ struct CPUJumpCache {
|
||||
} array[TB_JMP_CACHE_SIZE];
|
||||
};
|
||||
|
||||
static inline TranslationBlock *
|
||||
tb_jmp_cache_get_tb(CPUJumpCache *jc, uint32_t cflags, uint32_t hash)
|
||||
{
|
||||
if (cflags & CF_PCREL) {
|
||||
/* Use acquire to ensure current load of pc from jc. */
|
||||
return qatomic_load_acquire(&jc->array[hash].tb);
|
||||
} else {
|
||||
/* Use rcu_read to ensure current load of pc from *tb. */
|
||||
return qatomic_rcu_read(&jc->array[hash].tb);
|
||||
}
|
||||
}
|
||||
|
||||
static inline target_ulong
|
||||
tb_jmp_cache_get_pc(CPUJumpCache *jc, uint32_t hash, TranslationBlock *tb)
|
||||
{
|
||||
if (tb_cflags(tb) & CF_PCREL) {
|
||||
return jc->array[hash].pc;
|
||||
} else {
|
||||
return tb_pc(tb);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
tb_jmp_cache_set(CPUJumpCache *jc, uint32_t hash,
|
||||
TranslationBlock *tb, target_ulong pc)
|
||||
{
|
||||
if (tb_cflags(tb) & CF_PCREL) {
|
||||
jc->array[hash].pc = pc;
|
||||
/* Use store_release on tb to ensure pc is written first. */
|
||||
qatomic_store_release(&jc->array[hash].tb, tb);
|
||||
} else{
|
||||
/* Use the pc value already stored in tb->pc. */
|
||||
qatomic_set(&jc->array[hash].tb, tb);
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* ACCEL_TCG_TB_JMP_CACHE_H */
|
||||
|
Loading…
Reference in New Issue
Block a user