tcg: add kick timer for single-threaded vCPU emulation
Currently we rely on the side effect of the main loop grabbing the iothread_mutex to give any long running basic block chains a kick to ensure the next vCPU is scheduled. As this code is being re-factored and rationalised we now do it explicitly here. Signed-off-by: Alex Bennée <alex.bennee@linaro.org> Reviewed-by: Richard Henderson <rth@twiddle.net> Reviewed-by: Pranith Kumar <bobby.prani@gmail.com>
This commit is contained in:
parent
8d4e9146b3
commit
6546706d28
61
cpus.c
61
cpus.c
@ -767,6 +767,53 @@ void configure_icount(QemuOpts *opts, Error **errp)
|
||||
NANOSECONDS_PER_SECOND / 10);
|
||||
}
|
||||
|
||||
/***********************************************************/
|
||||
/* TCG vCPU kick timer
|
||||
*
|
||||
* The kick timer is responsible for moving single threaded vCPU
|
||||
* emulation on to the next vCPU. If more than one vCPU is running a
|
||||
* timer event with force a cpu->exit so the next vCPU can get
|
||||
* scheduled.
|
||||
*
|
||||
* The timer is removed if all vCPUs are idle and restarted again once
|
||||
* idleness is complete.
|
||||
*/
|
||||
|
||||
static QEMUTimer *tcg_kick_vcpu_timer;
|
||||
|
||||
static void qemu_cpu_kick_no_halt(void);
|
||||
|
||||
#define TCG_KICK_PERIOD (NANOSECONDS_PER_SECOND / 10)
|
||||
|
||||
static inline int64_t qemu_tcg_next_kick(void)
|
||||
{
|
||||
return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + TCG_KICK_PERIOD;
|
||||
}
|
||||
|
||||
static void kick_tcg_thread(void *opaque)
|
||||
{
|
||||
timer_mod(tcg_kick_vcpu_timer, qemu_tcg_next_kick());
|
||||
qemu_cpu_kick_no_halt();
|
||||
}
|
||||
|
||||
static void start_tcg_kick_timer(void)
|
||||
{
|
||||
if (!tcg_kick_vcpu_timer && CPU_NEXT(first_cpu)) {
|
||||
tcg_kick_vcpu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
|
||||
kick_tcg_thread, NULL);
|
||||
timer_mod(tcg_kick_vcpu_timer, qemu_tcg_next_kick());
|
||||
}
|
||||
}
|
||||
|
||||
static void stop_tcg_kick_timer(void)
|
||||
{
|
||||
if (tcg_kick_vcpu_timer) {
|
||||
timer_del(tcg_kick_vcpu_timer);
|
||||
tcg_kick_vcpu_timer = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/***********************************************************/
|
||||
void hw_error(const char *fmt, ...)
|
||||
{
|
||||
@ -1021,9 +1068,12 @@ static void qemu_wait_io_event_common(CPUState *cpu)
|
||||
static void qemu_tcg_wait_io_event(CPUState *cpu)
|
||||
{
|
||||
while (all_cpu_threads_idle()) {
|
||||
stop_tcg_kick_timer();
|
||||
qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
|
||||
}
|
||||
|
||||
start_tcg_kick_timer();
|
||||
|
||||
while (iothread_requesting_mutex) {
|
||||
qemu_cond_wait(&qemu_io_proceeded_cond, &qemu_global_mutex);
|
||||
}
|
||||
@ -1223,6 +1273,15 @@ static void deal_with_unplugged_cpus(void)
|
||||
}
|
||||
}
|
||||
|
||||
/* Single-threaded TCG
|
||||
*
|
||||
* In the single-threaded case each vCPU is simulated in turn. If
|
||||
* there is more than a single vCPU we create a simple timer to kick
|
||||
* the vCPU and ensure we don't get stuck in a tight loop in one vCPU.
|
||||
* This is done explicitly rather than relying on side-effects
|
||||
* elsewhere.
|
||||
*/
|
||||
|
||||
static void *qemu_tcg_cpu_thread_fn(void *arg)
|
||||
{
|
||||
CPUState *cpu = arg;
|
||||
@ -1249,6 +1308,8 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
|
||||
}
|
||||
}
|
||||
|
||||
start_tcg_kick_timer();
|
||||
|
||||
/* process any pending work */
|
||||
atomic_mb_set(&exit_request, 1);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user