tcg: protect translation related stuff with tb_lock.
This protects all translation related work with tb_lock() too ensure thread safety. This effectively serialises all code generation. In addition to the code generation we also take the lock for TB invalidation. This has a knock on effect of meaning tb_lock() is held for modification of the SoftMMU TLB by non-self threads which will be used in later patches. Signed-off-by: KONRAD Frederic <fred.konrad@greensocs.com> Message-Id: <1439220437-23957-8-git-send-email-fred.konrad@greensocs.com> Signed-off-by: Emilio G. Cota <cota@braap.org> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> [AJB: moved into tree, clean-up history] Signed-off-by: Alex Bennée <alex.bennee@linaro.org> Reviewed-by: Richard Henderson <rth@twiddle.net> Message-Id: <20161027151030.20863-10-alex.bennee@linaro.org> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
e505a063ba
commit
a5e998262f
@ -211,15 +211,21 @@ static void cpu_exec_nocache(CPUState *cpu, int max_cycles,
|
|||||||
if (max_cycles > CF_COUNT_MASK)
|
if (max_cycles > CF_COUNT_MASK)
|
||||||
max_cycles = CF_COUNT_MASK;
|
max_cycles = CF_COUNT_MASK;
|
||||||
|
|
||||||
|
tb_lock();
|
||||||
tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
|
tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
|
||||||
max_cycles | CF_NOCACHE
|
max_cycles | CF_NOCACHE
|
||||||
| (ignore_icount ? CF_IGNORE_ICOUNT : 0));
|
| (ignore_icount ? CF_IGNORE_ICOUNT : 0));
|
||||||
tb->orig_tb = orig_tb;
|
tb->orig_tb = orig_tb;
|
||||||
|
tb_unlock();
|
||||||
|
|
||||||
/* execute the generated code */
|
/* execute the generated code */
|
||||||
trace_exec_tb_nocache(tb, tb->pc);
|
trace_exec_tb_nocache(tb, tb->pc);
|
||||||
cpu_tb_exec(cpu, tb);
|
cpu_tb_exec(cpu, tb);
|
||||||
|
|
||||||
|
tb_lock();
|
||||||
tb_phys_invalidate(tb, -1);
|
tb_phys_invalidate(tb, -1);
|
||||||
tb_free(tb);
|
tb_free(tb);
|
||||||
|
tb_unlock();
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
6
exec.c
6
exec.c
@ -2064,6 +2064,12 @@ static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
cpu->watchpoint_hit = wp;
|
cpu->watchpoint_hit = wp;
|
||||||
|
|
||||||
|
/* The tb_lock will be reset when cpu_loop_exit or
|
||||||
|
* cpu_loop_exit_noexc longjmp back into the cpu_exec
|
||||||
|
* main loop.
|
||||||
|
*/
|
||||||
|
tb_lock();
|
||||||
tb_check_watchpoint(cpu);
|
tb_check_watchpoint(cpu);
|
||||||
if (wp->flags & BP_STOP_BEFORE_ACCESS) {
|
if (wp->flags & BP_STOP_BEFORE_ACCESS) {
|
||||||
cpu->exception_index = EXCP_DEBUG;
|
cpu->exception_index = EXCP_DEBUG;
|
||||||
|
@ -17,6 +17,7 @@
|
|||||||
#include "sysemu/kvm.h"
|
#include "sysemu/kvm.h"
|
||||||
#include "hw/i386/apic_internal.h"
|
#include "hw/i386/apic_internal.h"
|
||||||
#include "hw/sysbus.h"
|
#include "hw/sysbus.h"
|
||||||
|
#include "tcg/tcg.h"
|
||||||
|
|
||||||
#define VAPIC_IO_PORT 0x7e
|
#define VAPIC_IO_PORT 0x7e
|
||||||
|
|
||||||
@ -449,6 +450,9 @@ static void patch_instruction(VAPICROMState *s, X86CPU *cpu, target_ulong ip)
|
|||||||
resume_all_vcpus();
|
resume_all_vcpus();
|
||||||
|
|
||||||
if (!kvm_enabled()) {
|
if (!kvm_enabled()) {
|
||||||
|
/* tb_lock will be reset when cpu_loop_exit_noexc longjmps
|
||||||
|
* back into the cpu_exec loop. */
|
||||||
|
tb_lock();
|
||||||
tb_gen_code(cs, current_pc, current_cs_base, current_flags, 1);
|
tb_gen_code(cs, current_pc, current_cs_base, current_flags, 1);
|
||||||
cpu_loop_exit_noexc(cs);
|
cpu_loop_exit_noexc(cs);
|
||||||
}
|
}
|
||||||
|
@ -363,7 +363,9 @@ static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
|
|||||||
bool cpu_restore_state(CPUState *cpu, uintptr_t retaddr)
|
bool cpu_restore_state(CPUState *cpu, uintptr_t retaddr)
|
||||||
{
|
{
|
||||||
TranslationBlock *tb;
|
TranslationBlock *tb;
|
||||||
|
bool r = false;
|
||||||
|
|
||||||
|
tb_lock();
|
||||||
tb = tb_find_pc(retaddr);
|
tb = tb_find_pc(retaddr);
|
||||||
if (tb) {
|
if (tb) {
|
||||||
cpu_restore_state_from_tb(cpu, tb, retaddr);
|
cpu_restore_state_from_tb(cpu, tb, retaddr);
|
||||||
@ -372,9 +374,11 @@ bool cpu_restore_state(CPUState *cpu, uintptr_t retaddr)
|
|||||||
tb_phys_invalidate(tb, -1);
|
tb_phys_invalidate(tb, -1);
|
||||||
tb_free(tb);
|
tb_free(tb);
|
||||||
}
|
}
|
||||||
return true;
|
r = true;
|
||||||
}
|
}
|
||||||
return false;
|
tb_unlock();
|
||||||
|
|
||||||
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
void page_size_init(void)
|
void page_size_init(void)
|
||||||
@ -1456,6 +1460,7 @@ void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
|
|||||||
/* we remove all the TBs in the range [start, end[ */
|
/* we remove all the TBs in the range [start, end[ */
|
||||||
/* XXX: see if in some cases it could be faster to invalidate all
|
/* XXX: see if in some cases it could be faster to invalidate all
|
||||||
the code */
|
the code */
|
||||||
|
tb_lock();
|
||||||
tb = p->first_tb;
|
tb = p->first_tb;
|
||||||
while (tb != NULL) {
|
while (tb != NULL) {
|
||||||
n = (uintptr_t)tb & 3;
|
n = (uintptr_t)tb & 3;
|
||||||
@ -1515,6 +1520,7 @@ void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
|
|||||||
cpu_loop_exit_noexc(cpu);
|
cpu_loop_exit_noexc(cpu);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
tb_unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_SOFTMMU
|
#ifdef CONFIG_SOFTMMU
|
||||||
@ -1584,6 +1590,8 @@ static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc)
|
|||||||
if (!p) {
|
if (!p) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
tb_lock();
|
||||||
tb = p->first_tb;
|
tb = p->first_tb;
|
||||||
#ifdef TARGET_HAS_PRECISE_SMC
|
#ifdef TARGET_HAS_PRECISE_SMC
|
||||||
if (tb && pc != 0) {
|
if (tb && pc != 0) {
|
||||||
@ -1621,9 +1629,13 @@ static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc)
|
|||||||
modifying the memory. It will ensure that it cannot modify
|
modifying the memory. It will ensure that it cannot modify
|
||||||
itself */
|
itself */
|
||||||
tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
|
tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
|
||||||
|
/* tb_lock will be reset after cpu_loop_exit_noexc longjmps
|
||||||
|
* back into the cpu_exec loop. */
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
tb_unlock();
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
@ -1718,6 +1730,7 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
|
|||||||
target_ulong pc, cs_base;
|
target_ulong pc, cs_base;
|
||||||
uint32_t flags;
|
uint32_t flags;
|
||||||
|
|
||||||
|
tb_lock();
|
||||||
tb = tb_find_pc(retaddr);
|
tb = tb_find_pc(retaddr);
|
||||||
if (!tb) {
|
if (!tb) {
|
||||||
cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p",
|
cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p",
|
||||||
@ -1769,11 +1782,16 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
|
|||||||
/* FIXME: In theory this could raise an exception. In practice
|
/* FIXME: In theory this could raise an exception. In practice
|
||||||
we have already translated the block once so it's probably ok. */
|
we have already translated the block once so it's probably ok. */
|
||||||
tb_gen_code(cpu, pc, cs_base, flags, cflags);
|
tb_gen_code(cpu, pc, cs_base, flags, cflags);
|
||||||
|
|
||||||
/* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
|
/* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
|
||||||
the first in the TB) then we end up generating a whole new TB and
|
* the first in the TB) then we end up generating a whole new TB and
|
||||||
repeating the fault, which is horribly inefficient.
|
* repeating the fault, which is horribly inefficient.
|
||||||
Better would be to execute just this insn uncached, or generate a
|
* Better would be to execute just this insn uncached, or generate a
|
||||||
second new TB. */
|
* second new TB.
|
||||||
|
*
|
||||||
|
* cpu_loop_exit_noexc will longjmp back to cpu_exec where the
|
||||||
|
* tb_lock gets reset.
|
||||||
|
*/
|
||||||
cpu_loop_exit_noexc(cpu);
|
cpu_loop_exit_noexc(cpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1837,6 +1855,8 @@ void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
|
|||||||
TranslationBlock *tb;
|
TranslationBlock *tb;
|
||||||
struct qht_stats hst;
|
struct qht_stats hst;
|
||||||
|
|
||||||
|
tb_lock();
|
||||||
|
|
||||||
target_code_size = 0;
|
target_code_size = 0;
|
||||||
max_target_code_size = 0;
|
max_target_code_size = 0;
|
||||||
cross_page = 0;
|
cross_page = 0;
|
||||||
@ -1898,6 +1918,8 @@ void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
|
|||||||
tcg_ctx.tb_ctx.tb_phys_invalidate_count);
|
tcg_ctx.tb_ctx.tb_phys_invalidate_count);
|
||||||
cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
|
cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
|
||||||
tcg_dump_info(f, cpu_fprintf);
|
tcg_dump_info(f, cpu_fprintf);
|
||||||
|
|
||||||
|
tb_unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
void dump_opcount_info(FILE *f, fprintf_function cpu_fprintf)
|
void dump_opcount_info(FILE *f, fprintf_function cpu_fprintf)
|
||||||
|
Loading…
Reference in New Issue
Block a user