accel/tcg: Precompute curr_cflags into cpu->tcg_cflags
The primary motivation is to remove a dozen insns along the fast-path in tb_lookup. As a byproduct, this allows us to completely remove parallel_cpus. Reviewed-by: Alex Bennée <alex.bennee@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
parent
872ebd884d
commit
6cc9d67c6f
@ -267,8 +267,6 @@ void cpu_exec_step_atomic(CPUState *cpu)
|
|||||||
mmap_unlock();
|
mmap_unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Since we got here, we know that parallel_cpus must be true. */
|
|
||||||
parallel_cpus = false;
|
|
||||||
cpu_exec_enter(cpu);
|
cpu_exec_enter(cpu);
|
||||||
/* execute the generated code */
|
/* execute the generated code */
|
||||||
trace_exec_tb(tb, pc);
|
trace_exec_tb(tb, pc);
|
||||||
@ -296,7 +294,6 @@ void cpu_exec_step_atomic(CPUState *cpu)
|
|||||||
* the execution.
|
* the execution.
|
||||||
*/
|
*/
|
||||||
g_assert(cpu_in_exclusive_context(cpu));
|
g_assert(cpu_in_exclusive_context(cpu));
|
||||||
parallel_cpus = true;
|
|
||||||
cpu->running = false;
|
cpu->running = false;
|
||||||
end_exclusive();
|
end_exclusive();
|
||||||
}
|
}
|
||||||
|
@ -114,8 +114,7 @@ void mttcg_start_vcpu_thread(CPUState *cpu)
|
|||||||
char thread_name[VCPU_THREAD_NAME_SIZE];
|
char thread_name[VCPU_THREAD_NAME_SIZE];
|
||||||
|
|
||||||
g_assert(tcg_enabled());
|
g_assert(tcg_enabled());
|
||||||
|
tcg_cpu_init_cflags(cpu, current_machine->smp.max_cpus > 1);
|
||||||
parallel_cpus = (current_machine->smp.max_cpus > 1);
|
|
||||||
|
|
||||||
cpu->thread = g_malloc0(sizeof(QemuThread));
|
cpu->thread = g_malloc0(sizeof(QemuThread));
|
||||||
cpu->halt_cond = g_malloc0(sizeof(QemuCond));
|
cpu->halt_cond = g_malloc0(sizeof(QemuCond));
|
||||||
|
@ -269,7 +269,7 @@ void rr_start_vcpu_thread(CPUState *cpu)
|
|||||||
static QemuThread *single_tcg_cpu_thread;
|
static QemuThread *single_tcg_cpu_thread;
|
||||||
|
|
||||||
g_assert(tcg_enabled());
|
g_assert(tcg_enabled());
|
||||||
parallel_cpus = false;
|
tcg_cpu_init_cflags(cpu, false);
|
||||||
|
|
||||||
if (!single_tcg_cpu_thread) {
|
if (!single_tcg_cpu_thread) {
|
||||||
cpu->thread = g_malloc0(sizeof(QemuThread));
|
cpu->thread = g_malloc0(sizeof(QemuThread));
|
||||||
|
@ -41,6 +41,14 @@
|
|||||||
|
|
||||||
/* common functionality among all TCG variants */
|
/* common functionality among all TCG variants */
|
||||||
|
|
||||||
|
void tcg_cpu_init_cflags(CPUState *cpu, bool parallel)
|
||||||
|
{
|
||||||
|
uint32_t cflags = cpu->cluster_index << CF_CLUSTER_SHIFT;
|
||||||
|
cflags |= parallel ? CF_PARALLEL : 0;
|
||||||
|
cflags |= icount_enabled() ? CF_USE_ICOUNT : 0;
|
||||||
|
cpu->tcg_cflags = cflags;
|
||||||
|
}
|
||||||
|
|
||||||
void tcg_cpus_destroy(CPUState *cpu)
|
void tcg_cpus_destroy(CPUState *cpu)
|
||||||
{
|
{
|
||||||
cpu_thread_signal_destroyed(cpu);
|
cpu_thread_signal_destroyed(cpu);
|
||||||
|
@ -17,5 +17,6 @@
|
|||||||
void tcg_cpus_destroy(CPUState *cpu);
|
void tcg_cpus_destroy(CPUState *cpu);
|
||||||
int tcg_cpus_exec(CPUState *cpu);
|
int tcg_cpus_exec(CPUState *cpu);
|
||||||
void tcg_handle_interrupt(CPUState *cpu, int mask);
|
void tcg_handle_interrupt(CPUState *cpu, int mask);
|
||||||
|
void tcg_cpu_init_cflags(CPUState *cpu, bool parallel);
|
||||||
|
|
||||||
#endif /* TCG_CPUS_H */
|
#endif /* TCG_CPUS_H */
|
||||||
|
@ -224,7 +224,6 @@ static void *l1_map[V_L1_MAX_SIZE];
|
|||||||
TCGContext tcg_init_ctx;
|
TCGContext tcg_init_ctx;
|
||||||
__thread TCGContext *tcg_ctx;
|
__thread TCGContext *tcg_ctx;
|
||||||
TBContext tb_ctx;
|
TBContext tb_ctx;
|
||||||
bool parallel_cpus;
|
|
||||||
|
|
||||||
static void page_table_config_init(void)
|
static void page_table_config_init(void)
|
||||||
{
|
{
|
||||||
@ -1867,9 +1866,6 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
|
|||||||
cflags = (cflags & ~CF_COUNT_MASK) | 1;
|
cflags = (cflags & ~CF_COUNT_MASK) | 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
cflags &= ~CF_CLUSTER_MASK;
|
|
||||||
cflags |= cpu->cluster_index << CF_CLUSTER_SHIFT;
|
|
||||||
|
|
||||||
max_insns = cflags & CF_COUNT_MASK;
|
max_insns = cflags & CF_COUNT_MASK;
|
||||||
if (max_insns == 0) {
|
if (max_insns == 0) {
|
||||||
max_insns = CF_COUNT_MASK;
|
max_insns = CF_COUNT_MASK;
|
||||||
|
@ -513,8 +513,6 @@ struct TranslationBlock {
|
|||||||
uintptr_t jmp_dest[2];
|
uintptr_t jmp_dest[2];
|
||||||
};
|
};
|
||||||
|
|
||||||
extern bool parallel_cpus;
|
|
||||||
|
|
||||||
/* Hide the qatomic_read to make code a little easier on the eyes */
|
/* Hide the qatomic_read to make code a little easier on the eyes */
|
||||||
static inline uint32_t tb_cflags(const TranslationBlock *tb)
|
static inline uint32_t tb_cflags(const TranslationBlock *tb)
|
||||||
{
|
{
|
||||||
@ -524,10 +522,7 @@ static inline uint32_t tb_cflags(const TranslationBlock *tb)
|
|||||||
/* current cflags for hashing/comparison */
|
/* current cflags for hashing/comparison */
|
||||||
static inline uint32_t curr_cflags(CPUState *cpu)
|
static inline uint32_t curr_cflags(CPUState *cpu)
|
||||||
{
|
{
|
||||||
uint32_t cflags = deposit32(0, CF_CLUSTER_SHIFT, 8, cpu->cluster_index);
|
return cpu->tcg_cflags;
|
||||||
cflags |= parallel_cpus ? CF_PARALLEL : 0;
|
|
||||||
cflags |= icount_enabled() ? CF_USE_ICOUNT : 0;
|
|
||||||
return cflags;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* TranslationBlock invalidate API */
|
/* TranslationBlock invalidate API */
|
||||||
|
@ -282,6 +282,7 @@ struct qemu_work_item;
|
|||||||
* to a cluster this will be UNASSIGNED_CLUSTER_INDEX; otherwise it will
|
* to a cluster this will be UNASSIGNED_CLUSTER_INDEX; otherwise it will
|
||||||
* be the same as the cluster-id property of the CPU object's TYPE_CPU_CLUSTER
|
* be the same as the cluster-id property of the CPU object's TYPE_CPU_CLUSTER
|
||||||
* QOM parent.
|
* QOM parent.
|
||||||
|
* @tcg_cflags: Pre-computed cflags for this cpu.
|
||||||
* @nr_cores: Number of cores within this CPU package.
|
* @nr_cores: Number of cores within this CPU package.
|
||||||
* @nr_threads: Number of threads within this CPU.
|
* @nr_threads: Number of threads within this CPU.
|
||||||
* @running: #true if CPU is currently running (lockless).
|
* @running: #true if CPU is currently running (lockless).
|
||||||
@ -412,6 +413,7 @@ struct CPUState {
|
|||||||
/* TODO Move common fields from CPUArchState here. */
|
/* TODO Move common fields from CPUArchState here. */
|
||||||
int cpu_index;
|
int cpu_index;
|
||||||
int cluster_index;
|
int cluster_index;
|
||||||
|
uint32_t tcg_cflags;
|
||||||
uint32_t halted;
|
uint32_t halted;
|
||||||
uint32_t can_do_io;
|
uint32_t can_do_io;
|
||||||
int32_t exception_index;
|
int32_t exception_index;
|
||||||
|
@ -205,6 +205,7 @@ CPUArchState *cpu_copy(CPUArchState *env)
|
|||||||
/* Reset non arch specific state */
|
/* Reset non arch specific state */
|
||||||
cpu_reset(new_cpu);
|
cpu_reset(new_cpu);
|
||||||
|
|
||||||
|
new_cpu->tcg_cflags = cpu->tcg_cflags;
|
||||||
memcpy(new_env, env, sizeof(CPUArchState));
|
memcpy(new_env, env, sizeof(CPUArchState));
|
||||||
|
|
||||||
/* Clone all break/watchpoints.
|
/* Clone all break/watchpoints.
|
||||||
|
@ -82,9 +82,11 @@ static abi_ulong get_sigframe(struct target_sigaction *ka,
|
|||||||
return (sp - frame_size) & -8ul;
|
return (sp - frame_size) & -8ul;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Notice when we're in the middle of a gUSA region and reset.
|
/*
|
||||||
Note that this will only occur for !parallel_cpus, as we will
|
* Notice when we're in the middle of a gUSA region and reset.
|
||||||
translate such sequences differently in a parallel context. */
|
* Note that this will only occur when #CF_PARALLEL is unset, as we
|
||||||
|
* will translate such sequences differently in a parallel context.
|
||||||
|
*/
|
||||||
static void unwind_gusa(CPUSH4State *regs)
|
static void unwind_gusa(CPUSH4State *regs)
|
||||||
{
|
{
|
||||||
/* If the stack pointer is sufficiently negative, and we haven't
|
/* If the stack pointer is sufficiently negative, and we haven't
|
||||||
|
@ -6481,6 +6481,16 @@ static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
|
|||||||
/* Grab a mutex so that thread setup appears atomic. */
|
/* Grab a mutex so that thread setup appears atomic. */
|
||||||
pthread_mutex_lock(&clone_lock);
|
pthread_mutex_lock(&clone_lock);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If this is our first additional thread, we need to ensure we
|
||||||
|
* generate code for parallel execution and flush old translations.
|
||||||
|
* Do this now so that the copy gets CF_PARALLEL too.
|
||||||
|
*/
|
||||||
|
if (!(cpu->tcg_cflags & CF_PARALLEL)) {
|
||||||
|
cpu->tcg_cflags |= CF_PARALLEL;
|
||||||
|
tb_flush(cpu);
|
||||||
|
}
|
||||||
|
|
||||||
/* we create a new CPU instance. */
|
/* we create a new CPU instance. */
|
||||||
new_env = cpu_copy(env);
|
new_env = cpu_copy(env);
|
||||||
/* Init regs that differ from the parent. */
|
/* Init regs that differ from the parent. */
|
||||||
@ -6521,14 +6531,6 @@ static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
|
|||||||
sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
|
sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
|
||||||
cpu->random_seed = qemu_guest_random_seed_thread_part1();
|
cpu->random_seed = qemu_guest_random_seed_thread_part1();
|
||||||
|
|
||||||
/* If this is our first additional thread, we need to ensure we
|
|
||||||
* generate code for parallel execution and flush old translations.
|
|
||||||
*/
|
|
||||||
if (!parallel_cpus) {
|
|
||||||
parallel_cpus = true;
|
|
||||||
tb_flush(cpu);
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = pthread_create(&info.thread, &attr, clone_func, &info);
|
ret = pthread_create(&info.thread, &attr, clone_func, &info);
|
||||||
/* TODO: Free new CPU state if thread creation failed. */
|
/* TODO: Free new CPU state if thread creation failed. */
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user