replace spinlock by QemuMutex.
spinlock is only used in two cases: * cpu-exec.c: to protect TranslationBlock * mem_helper.c: for lock helper in target-i386 (which seems broken). It's a pthread_mutex_t in user-mode, so we can use QemuMutex directly, with an #ifdef. The #ifdef will be removed when multithreaded TCG will need the mutex as well. Signed-off-by: KONRAD Frederic <fred.konrad@greensocs.com> Message-Id: <1439220437-23957-5-git-send-email-fred.konrad@greensocs.com> Signed-off-by: Emilio G. Cota <cota@braap.org> [Merge Emilio G. Cota's patch to remove volatile. - Paolo] Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
d5f8d61390
commit
677ef6230b
14
cpu-exec.c
14
cpu-exec.c
@ -357,9 +357,6 @@ int cpu_exec(CPUState *cpu)
|
|||||||
uintptr_t next_tb;
|
uintptr_t next_tb;
|
||||||
SyncClocks sc;
|
SyncClocks sc;
|
||||||
|
|
||||||
/* This must be volatile so it is not trashed by longjmp() */
|
|
||||||
volatile bool have_tb_lock = false;
|
|
||||||
|
|
||||||
if (cpu->halted) {
|
if (cpu->halted) {
|
||||||
if (!cpu_has_work(cpu)) {
|
if (!cpu_has_work(cpu)) {
|
||||||
return EXCP_HALTED;
|
return EXCP_HALTED;
|
||||||
@ -468,8 +465,7 @@ int cpu_exec(CPUState *cpu)
|
|||||||
cpu->exception_index = EXCP_INTERRUPT;
|
cpu->exception_index = EXCP_INTERRUPT;
|
||||||
cpu_loop_exit(cpu);
|
cpu_loop_exit(cpu);
|
||||||
}
|
}
|
||||||
spin_lock(&tcg_ctx.tb_ctx.tb_lock);
|
tb_lock();
|
||||||
have_tb_lock = true;
|
|
||||||
tb = tb_find_fast(cpu);
|
tb = tb_find_fast(cpu);
|
||||||
/* Note: we do it here to avoid a gcc bug on Mac OS X when
|
/* Note: we do it here to avoid a gcc bug on Mac OS X when
|
||||||
doing it in tb_find_slow */
|
doing it in tb_find_slow */
|
||||||
@ -491,8 +487,7 @@ int cpu_exec(CPUState *cpu)
|
|||||||
tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
|
tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
|
||||||
next_tb & TB_EXIT_MASK, tb);
|
next_tb & TB_EXIT_MASK, tb);
|
||||||
}
|
}
|
||||||
have_tb_lock = false;
|
tb_unlock();
|
||||||
spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
|
|
||||||
if (likely(!cpu->exit_request)) {
|
if (likely(!cpu->exit_request)) {
|
||||||
trace_exec_tb(tb, tb->pc);
|
trace_exec_tb(tb, tb->pc);
|
||||||
tc_ptr = tb->tc_ptr;
|
tc_ptr = tb->tc_ptr;
|
||||||
@ -558,10 +553,7 @@ int cpu_exec(CPUState *cpu)
|
|||||||
x86_cpu = X86_CPU(cpu);
|
x86_cpu = X86_CPU(cpu);
|
||||||
env = &x86_cpu->env;
|
env = &x86_cpu->env;
|
||||||
#endif
|
#endif
|
||||||
if (have_tb_lock) {
|
tb_lock_reset();
|
||||||
spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
|
|
||||||
have_tb_lock = false;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
} /* for(;;) */
|
} /* for(;;) */
|
||||||
|
|
||||||
|
@ -225,7 +225,7 @@ struct TranslationBlock {
|
|||||||
struct TranslationBlock *jmp_first;
|
struct TranslationBlock *jmp_first;
|
||||||
};
|
};
|
||||||
|
|
||||||
#include "exec/spinlock.h"
|
#include "qemu/thread.h"
|
||||||
|
|
||||||
typedef struct TBContext TBContext;
|
typedef struct TBContext TBContext;
|
||||||
|
|
||||||
@ -235,7 +235,7 @@ struct TBContext {
|
|||||||
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
|
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
|
||||||
int nb_tbs;
|
int nb_tbs;
|
||||||
/* any access to the tbs or the page table must use this lock */
|
/* any access to the tbs or the page table must use this lock */
|
||||||
spinlock_t tb_lock;
|
QemuMutex tb_lock;
|
||||||
|
|
||||||
/* statistics */
|
/* statistics */
|
||||||
int tb_flush_count;
|
int tb_flush_count;
|
||||||
|
@ -105,7 +105,7 @@ static int pending_cpus;
|
|||||||
/* Make sure everything is in a consistent state for calling fork(). */
|
/* Make sure everything is in a consistent state for calling fork(). */
|
||||||
void fork_start(void)
|
void fork_start(void)
|
||||||
{
|
{
|
||||||
pthread_mutex_lock(&tcg_ctx.tb_ctx.tb_lock);
|
qemu_mutex_lock(&tcg_ctx.tb_ctx.tb_lock);
|
||||||
pthread_mutex_lock(&exclusive_lock);
|
pthread_mutex_lock(&exclusive_lock);
|
||||||
mmap_fork_start();
|
mmap_fork_start();
|
||||||
}
|
}
|
||||||
@ -127,11 +127,11 @@ void fork_end(int child)
|
|||||||
pthread_mutex_init(&cpu_list_mutex, NULL);
|
pthread_mutex_init(&cpu_list_mutex, NULL);
|
||||||
pthread_cond_init(&exclusive_cond, NULL);
|
pthread_cond_init(&exclusive_cond, NULL);
|
||||||
pthread_cond_init(&exclusive_resume, NULL);
|
pthread_cond_init(&exclusive_resume, NULL);
|
||||||
pthread_mutex_init(&tcg_ctx.tb_ctx.tb_lock, NULL);
|
qemu_mutex_init(&tcg_ctx.tb_ctx.tb_lock);
|
||||||
gdbserver_fork(thread_cpu);
|
gdbserver_fork(thread_cpu);
|
||||||
} else {
|
} else {
|
||||||
pthread_mutex_unlock(&exclusive_lock);
|
pthread_mutex_unlock(&exclusive_lock);
|
||||||
pthread_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
|
qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1318,6 +1318,9 @@ static inline MemTxAttrs cpu_get_mem_attrs(CPUX86State *env)
|
|||||||
void cpu_set_mxcsr(CPUX86State *env, uint32_t val);
|
void cpu_set_mxcsr(CPUX86State *env, uint32_t val);
|
||||||
void cpu_set_fpuc(CPUX86State *env, uint16_t val);
|
void cpu_set_fpuc(CPUX86State *env, uint16_t val);
|
||||||
|
|
||||||
|
/* mem_helper.c */
|
||||||
|
void helper_lock_init(void);
|
||||||
|
|
||||||
/* svm_helper.c */
|
/* svm_helper.c */
|
||||||
void cpu_svm_check_intercept_param(CPUX86State *env1, uint32_t type,
|
void cpu_svm_check_intercept_param(CPUX86State *env1, uint32_t type,
|
||||||
uint64_t param);
|
uint64_t param);
|
||||||
|
@ -23,18 +23,37 @@
|
|||||||
|
|
||||||
/* broken thread support */
|
/* broken thread support */
|
||||||
|
|
||||||
static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
|
#if defined(CONFIG_USER_ONLY)
|
||||||
|
QemuMutex global_cpu_lock;
|
||||||
|
|
||||||
void helper_lock(void)
|
void helper_lock(void)
|
||||||
{
|
{
|
||||||
spin_lock(&global_cpu_lock);
|
qemu_mutex_lock(&global_cpu_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
void helper_unlock(void)
|
void helper_unlock(void)
|
||||||
{
|
{
|
||||||
spin_unlock(&global_cpu_lock);
|
qemu_mutex_unlock(&global_cpu_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void helper_lock_init(void)
|
||||||
|
{
|
||||||
|
qemu_mutex_init(&global_cpu_lock);
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
void helper_lock(void)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
void helper_unlock(void)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
void helper_lock_init(void)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
void helper_cmpxchg8b(CPUX86State *env, target_ulong a0)
|
void helper_cmpxchg8b(CPUX86State *env, target_ulong a0)
|
||||||
{
|
{
|
||||||
uint64_t d;
|
uint64_t d;
|
||||||
|
@ -7899,6 +7899,8 @@ void optimize_flags_init(void)
|
|||||||
offsetof(CPUX86State, regs[i]),
|
offsetof(CPUX86State, regs[i]),
|
||||||
reg_names[i]);
|
reg_names[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
helper_lock_init();
|
||||||
}
|
}
|
||||||
|
|
||||||
/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
|
/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
|
||||||
|
@ -595,6 +595,10 @@ void *tcg_malloc_internal(TCGContext *s, int size);
|
|||||||
void tcg_pool_reset(TCGContext *s);
|
void tcg_pool_reset(TCGContext *s);
|
||||||
void tcg_pool_delete(TCGContext *s);
|
void tcg_pool_delete(TCGContext *s);
|
||||||
|
|
||||||
|
void tb_lock(void);
|
||||||
|
void tb_unlock(void);
|
||||||
|
void tb_lock_reset(void);
|
||||||
|
|
||||||
static inline void *tcg_malloc(int size)
|
static inline void *tcg_malloc(int size)
|
||||||
{
|
{
|
||||||
TCGContext *s = &tcg_ctx;
|
TCGContext *s = &tcg_ctx;
|
||||||
|
@ -128,6 +128,39 @@ static void *l1_map[V_L1_SIZE];
|
|||||||
/* code generation context */
|
/* code generation context */
|
||||||
TCGContext tcg_ctx;
|
TCGContext tcg_ctx;
|
||||||
|
|
||||||
|
/* translation block context */
|
||||||
|
#ifdef CONFIG_USER_ONLY
|
||||||
|
__thread int have_tb_lock;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
void tb_lock(void)
|
||||||
|
{
|
||||||
|
#ifdef CONFIG_USER_ONLY
|
||||||
|
assert(!have_tb_lock);
|
||||||
|
qemu_mutex_lock(&tcg_ctx.tb_ctx.tb_lock);
|
||||||
|
have_tb_lock++;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
void tb_unlock(void)
|
||||||
|
{
|
||||||
|
#ifdef CONFIG_USER_ONLY
|
||||||
|
assert(have_tb_lock);
|
||||||
|
have_tb_lock--;
|
||||||
|
qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
void tb_lock_reset(void)
|
||||||
|
{
|
||||||
|
#ifdef CONFIG_USER_ONLY
|
||||||
|
if (have_tb_lock) {
|
||||||
|
qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
|
||||||
|
have_tb_lock = 0;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
|
static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
|
||||||
tb_page_addr_t phys_page2);
|
tb_page_addr_t phys_page2);
|
||||||
static TranslationBlock *tb_find_pc(uintptr_t tc_ptr);
|
static TranslationBlock *tb_find_pc(uintptr_t tc_ptr);
|
||||||
@ -675,6 +708,7 @@ static inline void code_gen_alloc(size_t tb_size)
|
|||||||
CODE_GEN_AVG_BLOCK_SIZE;
|
CODE_GEN_AVG_BLOCK_SIZE;
|
||||||
tcg_ctx.tb_ctx.tbs =
|
tcg_ctx.tb_ctx.tbs =
|
||||||
g_malloc(tcg_ctx.code_gen_max_blocks * sizeof(TranslationBlock));
|
g_malloc(tcg_ctx.code_gen_max_blocks * sizeof(TranslationBlock));
|
||||||
|
qemu_mutex_init(&tcg_ctx.tb_ctx.tb_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Must be called before using the QEMU cpus. 'tb_size' is the size
|
/* Must be called before using the QEMU cpus. 'tb_size' is the size
|
||||||
|
Loading…
Reference in New Issue
Block a user