tcg: signal-free qemu_cpu_kick

Signals are slow and do not exist on Win32.  The previous patches
have done most of the legwork to introduce memory barriers (some
of them were even there already for the sake of Windows!) and
we can now set the flags directly in the iothread.

qemu_cpu_kick_thread is not used anymore on TCG, since the TCG thread is
never outside usermode while the CPU is running (not halted).  Instead run
the content of the signal handler (now in qemu_cpu_kick_no_halt) directly.
qemu_cpu_kick_no_halt is also used in qemu_mutex_lock_iothread to avoid
the overhead of qemu_cond_broadcast.

Reviewed-by: Richard Henderson <rth@twiddle.net>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Paolo Bonzini 2015-08-26 00:19:19 +02:00
parent 9102dedaa1
commit e0c382113f
4 changed files with 28 additions and 73 deletions

View File

@ -341,7 +341,7 @@ static void cpu_handle_debug_exception(CPUState *cpu)
/* main execution loop */
volatile sig_atomic_t exit_request;
bool exit_request;
CPUState *tcg_current_cpu;
int cpu_exec(CPUState *cpu)

91
cpus.c
View File

@ -661,19 +661,6 @@ static void cpu_handle_guest_debug(CPUState *cpu)
cpu->stopped = true;
}
static void cpu_signal(int sig)
{
CPUState *cpu;
/* Ensure whatever caused the exit has reached the CPU threads before
* writing exit_request.
*/
atomic_mb_set(&exit_request, 1);
cpu = atomic_mb_read(&tcg_current_cpu);
if (cpu) {
cpu_exit(cpu);
}
}
#ifdef CONFIG_LINUX
static void sigbus_reraise(void)
{
@ -786,29 +773,11 @@ static void qemu_kvm_init_cpu_signals(CPUState *cpu)
}
}
static void qemu_tcg_init_cpu_signals(void)
{
sigset_t set;
struct sigaction sigact;
memset(&sigact, 0, sizeof(sigact));
sigact.sa_handler = cpu_signal;
sigaction(SIG_IPI, &sigact, NULL);
sigemptyset(&set);
sigaddset(&set, SIG_IPI);
pthread_sigmask(SIG_UNBLOCK, &set, NULL);
}
#else /* _WIN32 */
static void qemu_kvm_init_cpu_signals(CPUState *cpu)
{
abort();
}
static void qemu_tcg_init_cpu_signals(void)
{
}
#endif /* _WIN32 */
static QemuMutex qemu_global_mutex;
@ -1046,7 +1015,6 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
rcu_register_thread();
qemu_mutex_lock_iothread();
qemu_tcg_init_cpu_signals();
qemu_thread_get_self(cpu->thread);
CPU_FOREACH(cpu) {
@ -1090,60 +1058,47 @@ static void qemu_cpu_kick_thread(CPUState *cpu)
#ifndef _WIN32
int err;
if (!tcg_enabled()) {
if (cpu->thread_kicked) {
return;
}
cpu->thread_kicked = true;
if (cpu->thread_kicked) {
return;
}
cpu->thread_kicked = true;
err = pthread_kill(cpu->thread->thread, SIG_IPI);
if (err) {
fprintf(stderr, "qemu:%s: %s", __func__, strerror(err));
exit(1);
}
#else /* _WIN32 */
if (!qemu_cpu_is_self(cpu)) {
CONTEXT tcgContext;
if (SuspendThread(cpu->hThread) == (DWORD)-1) {
fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__,
GetLastError());
exit(1);
}
/* On multi-core systems, we are not sure that the thread is actually
* suspended until we can get the context.
*/
tcgContext.ContextFlags = CONTEXT_CONTROL;
while (GetThreadContext(cpu->hThread, &tcgContext) != 0) {
continue;
}
cpu_signal(0);
if (ResumeThread(cpu->hThread) == (DWORD)-1) {
fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__,
GetLastError());
exit(1);
}
}
abort();
#endif
}
static void qemu_cpu_kick_no_halt(void)
{
CPUState *cpu;
/* Ensure whatever caused the exit has reached the CPU threads before
* writing exit_request.
*/
atomic_mb_set(&exit_request, 1);
cpu = atomic_mb_read(&tcg_current_cpu);
if (cpu) {
cpu_exit(cpu);
}
}
void qemu_cpu_kick(CPUState *cpu)
{
qemu_cond_broadcast(cpu->halt_cond);
qemu_cpu_kick_thread(cpu);
if (tcg_enabled()) {
qemu_cpu_kick_no_halt();
} else {
qemu_cpu_kick_thread(cpu);
}
}
void qemu_cpu_kick_self(void)
{
#ifndef _WIN32
assert(current_cpu);
qemu_cpu_kick_thread(current_cpu);
#else
abort();
#endif
}
bool qemu_cpu_is_self(CPUState *cpu)
@ -1175,7 +1130,7 @@ void qemu_mutex_lock_iothread(void)
atomic_dec(&iothread_requesting_mutex);
} else {
if (qemu_mutex_trylock(&qemu_global_mutex)) {
qemu_cpu_kick_thread(first_cpu);
qemu_cpu_kick_no_halt();
qemu_mutex_lock(&qemu_global_mutex);
}
atomic_dec(&iothread_requesting_mutex);

View File

@ -386,9 +386,9 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr);
/* vl.c */
extern int singlestep;
/* cpu-exec.c */
/* cpu-exec.c, accessed with atomic_mb_read/atomic_mb_set */
extern CPUState *tcg_current_cpu;
extern volatile sig_atomic_t exit_request;
extern bool exit_request;
#if !defined(CONFIG_USER_ONLY)
void migration_bitmap_extend(ram_addr_t old, ram_addr_t new);

View File

@ -268,7 +268,7 @@ struct CPUState {
bool created;
bool stop;
bool stopped;
volatile sig_atomic_t exit_request;
bool exit_request;
uint32_t interrupt_request;
int singlestep_enabled;
int64_t icount_extra;
@ -319,7 +319,7 @@ struct CPUState {
offset from AREG0. Leave this field at the end so as to make the
(absolute value) offset as small as possible. This reduces code
size, especially for hosts without large memory offsets. */
volatile sig_atomic_t tcg_exit_req;
uint32_t tcg_exit_req;
};
QTAILQ_HEAD(CPUTailQ, CPUState);