accel/tcg: fix race in cpu_exec_step_atomic (bug 1863025)
The bug describes a race whereby cpu_exec_step_atomic can acquire a TB which is invalidated by a tb_flush before we execute it. This doesn't affect the other cpu_exec modes as a tb_flush by it's nature can only occur on a quiescent system. The race was described as: B2. tcg_cpu_exec => cpu_exec => tb_find => tb_gen_code B3. tcg_tb_alloc obtains a new TB C3. TB obtained with tb_lookup__cpu_state or tb_gen_code (same TB as B2) A3. start_exclusive critical section entered A4. do_tb_flush is called, TB memory freed/re-allocated A5. end_exclusive exits critical section B2. tcg_cpu_exec => cpu_exec => tb_find => tb_gen_code B3. tcg_tb_alloc reallocates TB from B2 C4. start_exclusive critical section entered C5. cpu_tb_exec executes the TB code that was free in A4 The simplest fix is to widen the exclusive period to include the TB lookup. As a result we can drop the complication of checking we are in the exclusive region before we end it. Cc: Yifan <me@yifanlu.com> Buglink: https://bugs.launchpad.net/qemu/+bug/1863025 Reviewed-by: Paolo Bonzini <pbonzini@redhat.com> Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Signed-off-by: Alex Bennée <alex.bennee@linaro.org> Message-Id: <20200214144952.15502-1-alex.bennee@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
parent
e0175b7163
commit
886cc68943
@ -240,6 +240,8 @@ void cpu_exec_step_atomic(CPUState *cpu)
|
|||||||
uint32_t cf_mask = cflags & CF_HASH_MASK;
|
uint32_t cf_mask = cflags & CF_HASH_MASK;
|
||||||
|
|
||||||
if (sigsetjmp(cpu->jmp_env, 0) == 0) {
|
if (sigsetjmp(cpu->jmp_env, 0) == 0) {
|
||||||
|
start_exclusive();
|
||||||
|
|
||||||
tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, cf_mask);
|
tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, cf_mask);
|
||||||
if (tb == NULL) {
|
if (tb == NULL) {
|
||||||
mmap_lock();
|
mmap_lock();
|
||||||
@ -247,8 +249,6 @@ void cpu_exec_step_atomic(CPUState *cpu)
|
|||||||
mmap_unlock();
|
mmap_unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
start_exclusive();
|
|
||||||
|
|
||||||
/* Since we got here, we know that parallel_cpus must be true. */
|
/* Since we got here, we know that parallel_cpus must be true. */
|
||||||
parallel_cpus = false;
|
parallel_cpus = false;
|
||||||
cc->cpu_exec_enter(cpu);
|
cc->cpu_exec_enter(cpu);
|
||||||
@ -271,14 +271,15 @@ void cpu_exec_step_atomic(CPUState *cpu)
|
|||||||
qemu_plugin_disable_mem_helpers(cpu);
|
qemu_plugin_disable_mem_helpers(cpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cpu_in_exclusive_context(cpu)) {
|
|
||||||
/* We might longjump out of either the codegen or the
|
/*
|
||||||
* execution, so must make sure we only end the exclusive
|
* As we start the exclusive region before codegen we must still
|
||||||
* region if we started it.
|
* be in the region if we longjump out of either the codegen or
|
||||||
*/
|
* the execution.
|
||||||
parallel_cpus = true;
|
*/
|
||||||
end_exclusive();
|
g_assert(cpu_in_exclusive_context(cpu));
|
||||||
}
|
parallel_cpus = true;
|
||||||
|
end_exclusive();
|
||||||
}
|
}
|
||||||
|
|
||||||
struct tb_desc {
|
struct tb_desc {
|
||||||
|
Loading…
Reference in New Issue
Block a user