qemu-timer: move icount to cpus.c
None of this is needed by tools, and most of it can even be made static inside cpus.c. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
dc2dfcf000
commit
946fb27c1d
295
cpus.c
295
cpus.c
@ -64,6 +64,281 @@
|
|||||||
|
|
||||||
static CPUState *next_cpu;
|
static CPUState *next_cpu;
|
||||||
|
|
||||||
|
/***********************************************************/
|
||||||
|
/* guest cycle counter */
|
||||||
|
|
||||||
|
/* Conversion factor from emulated instructions to virtual clock ticks. */
|
||||||
|
static int icount_time_shift;
|
||||||
|
/* Arbitrarily pick 1MIPS as the minimum allowable speed. */
|
||||||
|
#define MAX_ICOUNT_SHIFT 10
|
||||||
|
/* Compensate for varying guest execution speed. */
|
||||||
|
static int64_t qemu_icount_bias;
|
||||||
|
static QEMUTimer *icount_rt_timer;
|
||||||
|
static QEMUTimer *icount_vm_timer;
|
||||||
|
static QEMUTimer *icount_warp_timer;
|
||||||
|
static int64_t vm_clock_warp_start;
|
||||||
|
static int64_t qemu_icount;
|
||||||
|
|
||||||
|
typedef struct TimersState {
|
||||||
|
int64_t cpu_ticks_prev;
|
||||||
|
int64_t cpu_ticks_offset;
|
||||||
|
int64_t cpu_clock_offset;
|
||||||
|
int32_t cpu_ticks_enabled;
|
||||||
|
int64_t dummy;
|
||||||
|
} TimersState;
|
||||||
|
|
||||||
|
TimersState timers_state;
|
||||||
|
|
||||||
|
/* Return the virtual CPU time, based on the instruction counter. */
|
||||||
|
int64_t cpu_get_icount(void)
|
||||||
|
{
|
||||||
|
int64_t icount;
|
||||||
|
CPUState *env = cpu_single_env;;
|
||||||
|
|
||||||
|
icount = qemu_icount;
|
||||||
|
if (env) {
|
||||||
|
if (!can_do_io(env)) {
|
||||||
|
fprintf(stderr, "Bad clock read\n");
|
||||||
|
}
|
||||||
|
icount -= (env->icount_decr.u16.low + env->icount_extra);
|
||||||
|
}
|
||||||
|
return qemu_icount_bias + (icount << icount_time_shift);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* return the host CPU cycle counter and handle stop/restart */
|
||||||
|
int64_t cpu_get_ticks(void)
|
||||||
|
{
|
||||||
|
if (use_icount) {
|
||||||
|
return cpu_get_icount();
|
||||||
|
}
|
||||||
|
if (!timers_state.cpu_ticks_enabled) {
|
||||||
|
return timers_state.cpu_ticks_offset;
|
||||||
|
} else {
|
||||||
|
int64_t ticks;
|
||||||
|
ticks = cpu_get_real_ticks();
|
||||||
|
if (timers_state.cpu_ticks_prev > ticks) {
|
||||||
|
/* Note: non increasing ticks may happen if the host uses
|
||||||
|
software suspend */
|
||||||
|
timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks;
|
||||||
|
}
|
||||||
|
timers_state.cpu_ticks_prev = ticks;
|
||||||
|
return ticks + timers_state.cpu_ticks_offset;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* return the host CPU monotonic timer and handle stop/restart */
|
||||||
|
int64_t cpu_get_clock(void)
|
||||||
|
{
|
||||||
|
int64_t ti;
|
||||||
|
if (!timers_state.cpu_ticks_enabled) {
|
||||||
|
return timers_state.cpu_clock_offset;
|
||||||
|
} else {
|
||||||
|
ti = get_clock();
|
||||||
|
return ti + timers_state.cpu_clock_offset;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* enable cpu_get_ticks() */
|
||||||
|
void cpu_enable_ticks(void)
|
||||||
|
{
|
||||||
|
if (!timers_state.cpu_ticks_enabled) {
|
||||||
|
timers_state.cpu_ticks_offset -= cpu_get_real_ticks();
|
||||||
|
timers_state.cpu_clock_offset -= get_clock();
|
||||||
|
timers_state.cpu_ticks_enabled = 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* disable cpu_get_ticks() : the clock is stopped. You must not call
|
||||||
|
cpu_get_ticks() after that. */
|
||||||
|
void cpu_disable_ticks(void)
|
||||||
|
{
|
||||||
|
if (timers_state.cpu_ticks_enabled) {
|
||||||
|
timers_state.cpu_ticks_offset = cpu_get_ticks();
|
||||||
|
timers_state.cpu_clock_offset = cpu_get_clock();
|
||||||
|
timers_state.cpu_ticks_enabled = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Correlation between real and virtual time is always going to be
|
||||||
|
fairly approximate, so ignore small variation.
|
||||||
|
When the guest is idle real and virtual time will be aligned in
|
||||||
|
the IO wait loop. */
|
||||||
|
#define ICOUNT_WOBBLE (get_ticks_per_sec() / 10)
|
||||||
|
|
||||||
|
static void icount_adjust(void)
|
||||||
|
{
|
||||||
|
int64_t cur_time;
|
||||||
|
int64_t cur_icount;
|
||||||
|
int64_t delta;
|
||||||
|
static int64_t last_delta;
|
||||||
|
/* If the VM is not running, then do nothing. */
|
||||||
|
if (!runstate_is_running()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
cur_time = cpu_get_clock();
|
||||||
|
cur_icount = qemu_get_clock_ns(vm_clock);
|
||||||
|
delta = cur_icount - cur_time;
|
||||||
|
/* FIXME: This is a very crude algorithm, somewhat prone to oscillation. */
|
||||||
|
if (delta > 0
|
||||||
|
&& last_delta + ICOUNT_WOBBLE < delta * 2
|
||||||
|
&& icount_time_shift > 0) {
|
||||||
|
/* The guest is getting too far ahead. Slow time down. */
|
||||||
|
icount_time_shift--;
|
||||||
|
}
|
||||||
|
if (delta < 0
|
||||||
|
&& last_delta - ICOUNT_WOBBLE > delta * 2
|
||||||
|
&& icount_time_shift < MAX_ICOUNT_SHIFT) {
|
||||||
|
/* The guest is getting too far behind. Speed time up. */
|
||||||
|
icount_time_shift++;
|
||||||
|
}
|
||||||
|
last_delta = delta;
|
||||||
|
qemu_icount_bias = cur_icount - (qemu_icount << icount_time_shift);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void icount_adjust_rt(void *opaque)
|
||||||
|
{
|
||||||
|
qemu_mod_timer(icount_rt_timer,
|
||||||
|
qemu_get_clock_ms(rt_clock) + 1000);
|
||||||
|
icount_adjust();
|
||||||
|
}
|
||||||
|
|
||||||
|
static void icount_adjust_vm(void *opaque)
|
||||||
|
{
|
||||||
|
qemu_mod_timer(icount_vm_timer,
|
||||||
|
qemu_get_clock_ns(vm_clock) + get_ticks_per_sec() / 10);
|
||||||
|
icount_adjust();
|
||||||
|
}
|
||||||
|
|
||||||
|
static int64_t qemu_icount_round(int64_t count)
|
||||||
|
{
|
||||||
|
return (count + (1 << icount_time_shift) - 1) >> icount_time_shift;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void icount_warp_rt(void *opaque)
|
||||||
|
{
|
||||||
|
if (vm_clock_warp_start == -1) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (runstate_is_running()) {
|
||||||
|
int64_t clock = qemu_get_clock_ns(rt_clock);
|
||||||
|
int64_t warp_delta = clock - vm_clock_warp_start;
|
||||||
|
if (use_icount == 1) {
|
||||||
|
qemu_icount_bias += warp_delta;
|
||||||
|
} else {
|
||||||
|
/*
|
||||||
|
* In adaptive mode, do not let the vm_clock run too
|
||||||
|
* far ahead of real time.
|
||||||
|
*/
|
||||||
|
int64_t cur_time = cpu_get_clock();
|
||||||
|
int64_t cur_icount = qemu_get_clock_ns(vm_clock);
|
||||||
|
int64_t delta = cur_time - cur_icount;
|
||||||
|
qemu_icount_bias += MIN(warp_delta, delta);
|
||||||
|
}
|
||||||
|
if (qemu_clock_expired(vm_clock)) {
|
||||||
|
qemu_notify_event();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
vm_clock_warp_start = -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
void qemu_clock_warp(QEMUClock *clock)
|
||||||
|
{
|
||||||
|
int64_t deadline;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* There are too many global variables to make the "warp" behavior
|
||||||
|
* applicable to other clocks. But a clock argument removes the
|
||||||
|
* need for if statements all over the place.
|
||||||
|
*/
|
||||||
|
if (clock != vm_clock || !use_icount) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If the CPUs have been sleeping, advance the vm_clock timer now. This
|
||||||
|
* ensures that the deadline for the timer is computed correctly below.
|
||||||
|
* This also makes sure that the insn counter is synchronized before the
|
||||||
|
* CPU starts running, in case the CPU is woken by an event other than
|
||||||
|
* the earliest vm_clock timer.
|
||||||
|
*/
|
||||||
|
icount_warp_rt(NULL);
|
||||||
|
if (!all_cpu_threads_idle() || !qemu_clock_has_timers(vm_clock)) {
|
||||||
|
qemu_del_timer(icount_warp_timer);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
vm_clock_warp_start = qemu_get_clock_ns(rt_clock);
|
||||||
|
deadline = qemu_clock_deadline(vm_clock);
|
||||||
|
if (deadline > 0) {
|
||||||
|
/*
|
||||||
|
* Ensure the vm_clock proceeds even when the virtual CPU goes to
|
||||||
|
* sleep. Otherwise, the CPU might be waiting for a future timer
|
||||||
|
* interrupt to wake it up, but the interrupt never comes because
|
||||||
|
* the vCPU isn't running any insns and thus doesn't advance the
|
||||||
|
* vm_clock.
|
||||||
|
*
|
||||||
|
* An extreme solution for this problem would be to never let VCPUs
|
||||||
|
* sleep in icount mode if there is a pending vm_clock timer; rather
|
||||||
|
* time could just advance to the next vm_clock event. Instead, we
|
||||||
|
* do stop VCPUs and only advance vm_clock after some "real" time,
|
||||||
|
* (related to the time left until the next event) has passed. This
|
||||||
|
* rt_clock timer will do this. This avoids that the warps are too
|
||||||
|
* visible externally---for example, you will not be sending network
|
||||||
|
* packets continously instead of every 100ms.
|
||||||
|
*/
|
||||||
|
qemu_mod_timer(icount_warp_timer, vm_clock_warp_start + deadline);
|
||||||
|
} else {
|
||||||
|
qemu_notify_event();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static const VMStateDescription vmstate_timers = {
|
||||||
|
.name = "timer",
|
||||||
|
.version_id = 2,
|
||||||
|
.minimum_version_id = 1,
|
||||||
|
.minimum_version_id_old = 1,
|
||||||
|
.fields = (VMStateField[]) {
|
||||||
|
VMSTATE_INT64(cpu_ticks_offset, TimersState),
|
||||||
|
VMSTATE_INT64(dummy, TimersState),
|
||||||
|
VMSTATE_INT64_V(cpu_clock_offset, TimersState, 2),
|
||||||
|
VMSTATE_END_OF_LIST()
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
void configure_icount(const char *option)
|
||||||
|
{
|
||||||
|
vmstate_register(NULL, 0, &vmstate_timers, &timers_state);
|
||||||
|
if (!option) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
icount_warp_timer = qemu_new_timer_ns(rt_clock, icount_warp_rt, NULL);
|
||||||
|
if (strcmp(option, "auto") != 0) {
|
||||||
|
icount_time_shift = strtol(option, NULL, 0);
|
||||||
|
use_icount = 1;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
use_icount = 2;
|
||||||
|
|
||||||
|
/* 125MIPS seems a reasonable initial guess at the guest speed.
|
||||||
|
It will be corrected fairly quickly anyway. */
|
||||||
|
icount_time_shift = 3;
|
||||||
|
|
||||||
|
/* Have both realtime and virtual time triggers for speed adjustment.
|
||||||
|
The realtime trigger catches emulated time passing too slowly,
|
||||||
|
the virtual time trigger catches emulated time passing too fast.
|
||||||
|
Realtime triggers occur even when idle, so use them less frequently
|
||||||
|
than VM triggers. */
|
||||||
|
icount_rt_timer = qemu_new_timer_ms(rt_clock, icount_adjust_rt, NULL);
|
||||||
|
qemu_mod_timer(icount_rt_timer,
|
||||||
|
qemu_get_clock_ms(rt_clock) + 1000);
|
||||||
|
icount_vm_timer = qemu_new_timer_ns(vm_clock, icount_adjust_vm, NULL);
|
||||||
|
qemu_mod_timer(icount_vm_timer,
|
||||||
|
qemu_get_clock_ns(vm_clock) + get_ticks_per_sec() / 10);
|
||||||
|
}
|
||||||
|
|
||||||
/***********************************************************/
|
/***********************************************************/
|
||||||
void hw_error(const char *fmt, ...)
|
void hw_error(const char *fmt, ...)
|
||||||
{
|
{
|
||||||
@ -686,7 +961,7 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
|
|||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
cpu_exec_all();
|
cpu_exec_all();
|
||||||
if (use_icount && qemu_next_icount_deadline() <= 0) {
|
if (use_icount && qemu_clock_deadline(vm_clock) <= 0) {
|
||||||
qemu_notify_event();
|
qemu_notify_event();
|
||||||
}
|
}
|
||||||
qemu_tcg_wait_io_event();
|
qemu_tcg_wait_io_event();
|
||||||
@ -914,7 +1189,7 @@ static int tcg_cpu_exec(CPUState *env)
|
|||||||
qemu_icount -= (env->icount_decr.u16.low + env->icount_extra);
|
qemu_icount -= (env->icount_decr.u16.low + env->icount_extra);
|
||||||
env->icount_decr.u16.low = 0;
|
env->icount_decr.u16.low = 0;
|
||||||
env->icount_extra = 0;
|
env->icount_extra = 0;
|
||||||
count = qemu_icount_round(qemu_next_icount_deadline());
|
count = qemu_icount_round(qemu_clock_deadline(vm_clock));
|
||||||
qemu_icount += count;
|
qemu_icount += count;
|
||||||
decr = (count > 0xffff) ? 0xffff : count;
|
decr = (count > 0xffff) ? 0xffff : count;
|
||||||
count -= decr;
|
count -= decr;
|
||||||
@ -1006,22 +1281,6 @@ void set_cpu_log_filename(const char *optarg)
|
|||||||
cpu_set_log_filename(optarg);
|
cpu_set_log_filename(optarg);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Return the virtual CPU time, based on the instruction counter. */
|
|
||||||
int64_t cpu_get_icount(void)
|
|
||||||
{
|
|
||||||
int64_t icount;
|
|
||||||
CPUState *env = cpu_single_env;;
|
|
||||||
|
|
||||||
icount = qemu_icount;
|
|
||||||
if (env) {
|
|
||||||
if (!can_do_io(env)) {
|
|
||||||
fprintf(stderr, "Bad clock read\n");
|
|
||||||
}
|
|
||||||
icount -= (env->icount_decr.u16.low + env->icount_extra);
|
|
||||||
}
|
|
||||||
return qemu_icount_bias + (icount << icount_time_shift);
|
|
||||||
}
|
|
||||||
|
|
||||||
void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg)
|
void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg)
|
||||||
{
|
{
|
||||||
/* XXX: implement xxx_cpu_list for targets that still miss it */
|
/* XXX: implement xxx_cpu_list for targets that still miss it */
|
||||||
|
14
exec-all.h
14
exec-all.h
@ -356,4 +356,18 @@ extern int singlestep;
|
|||||||
/* cpu-exec.c */
|
/* cpu-exec.c */
|
||||||
extern volatile sig_atomic_t exit_request;
|
extern volatile sig_atomic_t exit_request;
|
||||||
|
|
||||||
|
/* Deterministic execution requires that IO only be performed on the last
|
||||||
|
instruction of a TB so that interrupts take effect immediately. */
|
||||||
|
static inline int can_do_io(CPUState *env)
|
||||||
|
{
|
||||||
|
if (!use_icount) {
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
/* If not executing code then assume we are ok. */
|
||||||
|
if (!env->current_tb) {
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
return env->can_do_io != 0;
|
||||||
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
3
exec.c
3
exec.c
@ -125,9 +125,6 @@ CPUState *cpu_single_env;
|
|||||||
1 = Precise instruction counting.
|
1 = Precise instruction counting.
|
||||||
2 = Adaptive rate instruction counting. */
|
2 = Adaptive rate instruction counting. */
|
||||||
int use_icount = 0;
|
int use_icount = 0;
|
||||||
/* Current instruction counter. While executing translated code this may
|
|
||||||
include some instructions that have not yet been executed. */
|
|
||||||
int64_t qemu_icount;
|
|
||||||
|
|
||||||
typedef struct PageDesc {
|
typedef struct PageDesc {
|
||||||
/* list of TBs intersecting this ram page */
|
/* list of TBs intersecting this ram page */
|
||||||
|
@ -96,6 +96,10 @@ static inline char *realpath(const char *path, char *resolved_path)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/* icount */
|
||||||
|
void configure_icount(const char *option);
|
||||||
|
extern int use_icount;
|
||||||
|
|
||||||
/* FIXME: Remove NEED_CPU_H. */
|
/* FIXME: Remove NEED_CPU_H. */
|
||||||
#ifndef NEED_CPU_H
|
#ifndef NEED_CPU_H
|
||||||
|
|
||||||
|
279
qemu-timer.c
279
qemu-timer.c
@ -46,82 +46,6 @@
|
|||||||
|
|
||||||
#include "qemu-timer.h"
|
#include "qemu-timer.h"
|
||||||
|
|
||||||
/* Conversion factor from emulated instructions to virtual clock ticks. */
|
|
||||||
int icount_time_shift;
|
|
||||||
/* Arbitrarily pick 1MIPS as the minimum allowable speed. */
|
|
||||||
#define MAX_ICOUNT_SHIFT 10
|
|
||||||
/* Compensate for varying guest execution speed. */
|
|
||||||
int64_t qemu_icount_bias;
|
|
||||||
static QEMUTimer *icount_rt_timer;
|
|
||||||
static QEMUTimer *icount_vm_timer;
|
|
||||||
|
|
||||||
/***********************************************************/
|
|
||||||
/* guest cycle counter */
|
|
||||||
|
|
||||||
typedef struct TimersState {
|
|
||||||
int64_t cpu_ticks_prev;
|
|
||||||
int64_t cpu_ticks_offset;
|
|
||||||
int64_t cpu_clock_offset;
|
|
||||||
int32_t cpu_ticks_enabled;
|
|
||||||
int64_t dummy;
|
|
||||||
} TimersState;
|
|
||||||
|
|
||||||
TimersState timers_state;
|
|
||||||
|
|
||||||
/* return the host CPU cycle counter and handle stop/restart */
|
|
||||||
int64_t cpu_get_ticks(void)
|
|
||||||
{
|
|
||||||
if (use_icount) {
|
|
||||||
return cpu_get_icount();
|
|
||||||
}
|
|
||||||
if (!timers_state.cpu_ticks_enabled) {
|
|
||||||
return timers_state.cpu_ticks_offset;
|
|
||||||
} else {
|
|
||||||
int64_t ticks;
|
|
||||||
ticks = cpu_get_real_ticks();
|
|
||||||
if (timers_state.cpu_ticks_prev > ticks) {
|
|
||||||
/* Note: non increasing ticks may happen if the host uses
|
|
||||||
software suspend */
|
|
||||||
timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks;
|
|
||||||
}
|
|
||||||
timers_state.cpu_ticks_prev = ticks;
|
|
||||||
return ticks + timers_state.cpu_ticks_offset;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* return the host CPU monotonic timer and handle stop/restart */
|
|
||||||
static int64_t cpu_get_clock(void)
|
|
||||||
{
|
|
||||||
int64_t ti;
|
|
||||||
if (!timers_state.cpu_ticks_enabled) {
|
|
||||||
return timers_state.cpu_clock_offset;
|
|
||||||
} else {
|
|
||||||
ti = get_clock();
|
|
||||||
return ti + timers_state.cpu_clock_offset;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* enable cpu_get_ticks() */
|
|
||||||
void cpu_enable_ticks(void)
|
|
||||||
{
|
|
||||||
if (!timers_state.cpu_ticks_enabled) {
|
|
||||||
timers_state.cpu_ticks_offset -= cpu_get_real_ticks();
|
|
||||||
timers_state.cpu_clock_offset -= get_clock();
|
|
||||||
timers_state.cpu_ticks_enabled = 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* disable cpu_get_ticks() : the clock is stopped. You must not call
|
|
||||||
cpu_get_ticks() after that. */
|
|
||||||
void cpu_disable_ticks(void)
|
|
||||||
{
|
|
||||||
if (timers_state.cpu_ticks_enabled) {
|
|
||||||
timers_state.cpu_ticks_offset = cpu_get_ticks();
|
|
||||||
timers_state.cpu_clock_offset = cpu_get_clock();
|
|
||||||
timers_state.cpu_ticks_enabled = 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/***********************************************************/
|
/***********************************************************/
|
||||||
/* timers */
|
/* timers */
|
||||||
|
|
||||||
@ -133,7 +57,6 @@ struct QEMUClock {
|
|||||||
int type;
|
int type;
|
||||||
int enabled;
|
int enabled;
|
||||||
|
|
||||||
QEMUTimer *warp_timer;
|
|
||||||
QEMUTimer *active_timers;
|
QEMUTimer *active_timers;
|
||||||
|
|
||||||
NotifierList reset_notifiers;
|
NotifierList reset_notifiers;
|
||||||
@ -252,61 +175,6 @@ static void dynticks_rearm_timer(struct qemu_alarm_timer *t, int64_t delta);
|
|||||||
|
|
||||||
#endif /* _WIN32 */
|
#endif /* _WIN32 */
|
||||||
|
|
||||||
/* Correlation between real and virtual time is always going to be
|
|
||||||
fairly approximate, so ignore small variation.
|
|
||||||
When the guest is idle real and virtual time will be aligned in
|
|
||||||
the IO wait loop. */
|
|
||||||
#define ICOUNT_WOBBLE (get_ticks_per_sec() / 10)
|
|
||||||
|
|
||||||
static void icount_adjust(void)
|
|
||||||
{
|
|
||||||
int64_t cur_time;
|
|
||||||
int64_t cur_icount;
|
|
||||||
int64_t delta;
|
|
||||||
static int64_t last_delta;
|
|
||||||
/* If the VM is not running, then do nothing. */
|
|
||||||
if (!runstate_is_running())
|
|
||||||
return;
|
|
||||||
|
|
||||||
cur_time = cpu_get_clock();
|
|
||||||
cur_icount = qemu_get_clock_ns(vm_clock);
|
|
||||||
delta = cur_icount - cur_time;
|
|
||||||
/* FIXME: This is a very crude algorithm, somewhat prone to oscillation. */
|
|
||||||
if (delta > 0
|
|
||||||
&& last_delta + ICOUNT_WOBBLE < delta * 2
|
|
||||||
&& icount_time_shift > 0) {
|
|
||||||
/* The guest is getting too far ahead. Slow time down. */
|
|
||||||
icount_time_shift--;
|
|
||||||
}
|
|
||||||
if (delta < 0
|
|
||||||
&& last_delta - ICOUNT_WOBBLE > delta * 2
|
|
||||||
&& icount_time_shift < MAX_ICOUNT_SHIFT) {
|
|
||||||
/* The guest is getting too far behind. Speed time up. */
|
|
||||||
icount_time_shift++;
|
|
||||||
}
|
|
||||||
last_delta = delta;
|
|
||||||
qemu_icount_bias = cur_icount - (qemu_icount << icount_time_shift);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void icount_adjust_rt(void * opaque)
|
|
||||||
{
|
|
||||||
qemu_mod_timer(icount_rt_timer,
|
|
||||||
qemu_get_clock_ms(rt_clock) + 1000);
|
|
||||||
icount_adjust();
|
|
||||||
}
|
|
||||||
|
|
||||||
static void icount_adjust_vm(void * opaque)
|
|
||||||
{
|
|
||||||
qemu_mod_timer(icount_vm_timer,
|
|
||||||
qemu_get_clock_ns(vm_clock) + get_ticks_per_sec() / 10);
|
|
||||||
icount_adjust();
|
|
||||||
}
|
|
||||||
|
|
||||||
int64_t qemu_icount_round(int64_t count)
|
|
||||||
{
|
|
||||||
return (count + (1 << icount_time_shift) - 1) >> icount_time_shift;
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct qemu_alarm_timer alarm_timers[] = {
|
static struct qemu_alarm_timer alarm_timers[] = {
|
||||||
#ifndef _WIN32
|
#ifndef _WIN32
|
||||||
#ifdef __linux__
|
#ifdef __linux__
|
||||||
@ -411,90 +279,6 @@ void qemu_clock_enable(QEMUClock *clock, int enabled)
|
|||||||
clock->enabled = enabled;
|
clock->enabled = enabled;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int64_t vm_clock_warp_start;
|
|
||||||
|
|
||||||
static void icount_warp_rt(void *opaque)
|
|
||||||
{
|
|
||||||
if (vm_clock_warp_start == -1) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (runstate_is_running()) {
|
|
||||||
int64_t clock = qemu_get_clock_ns(rt_clock);
|
|
||||||
int64_t warp_delta = clock - vm_clock_warp_start;
|
|
||||||
if (use_icount == 1) {
|
|
||||||
qemu_icount_bias += warp_delta;
|
|
||||||
} else {
|
|
||||||
/*
|
|
||||||
* In adaptive mode, do not let the vm_clock run too
|
|
||||||
* far ahead of real time.
|
|
||||||
*/
|
|
||||||
int64_t cur_time = cpu_get_clock();
|
|
||||||
int64_t cur_icount = qemu_get_clock_ns(vm_clock);
|
|
||||||
int64_t delta = cur_time - cur_icount;
|
|
||||||
qemu_icount_bias += MIN(warp_delta, delta);
|
|
||||||
}
|
|
||||||
if (qemu_timer_expired(vm_clock->active_timers,
|
|
||||||
qemu_get_clock_ns(vm_clock))) {
|
|
||||||
qemu_notify_event();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
vm_clock_warp_start = -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
void qemu_clock_warp(QEMUClock *clock)
|
|
||||||
{
|
|
||||||
int64_t deadline;
|
|
||||||
|
|
||||||
if (!clock->warp_timer) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* There are too many global variables to make the "warp" behavior
|
|
||||||
* applicable to other clocks. But a clock argument removes the
|
|
||||||
* need for if statements all over the place.
|
|
||||||
*/
|
|
||||||
assert(clock == vm_clock);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* If the CPUs have been sleeping, advance the vm_clock timer now. This
|
|
||||||
* ensures that the deadline for the timer is computed correctly below.
|
|
||||||
* This also makes sure that the insn counter is synchronized before the
|
|
||||||
* CPU starts running, in case the CPU is woken by an event other than
|
|
||||||
* the earliest vm_clock timer.
|
|
||||||
*/
|
|
||||||
icount_warp_rt(NULL);
|
|
||||||
if (!all_cpu_threads_idle() || !clock->active_timers) {
|
|
||||||
qemu_del_timer(clock->warp_timer);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
vm_clock_warp_start = qemu_get_clock_ns(rt_clock);
|
|
||||||
deadline = qemu_next_icount_deadline();
|
|
||||||
if (deadline > 0) {
|
|
||||||
/*
|
|
||||||
* Ensure the vm_clock proceeds even when the virtual CPU goes to
|
|
||||||
* sleep. Otherwise, the CPU might be waiting for a future timer
|
|
||||||
* interrupt to wake it up, but the interrupt never comes because
|
|
||||||
* the vCPU isn't running any insns and thus doesn't advance the
|
|
||||||
* vm_clock.
|
|
||||||
*
|
|
||||||
* An extreme solution for this problem would be to never let VCPUs
|
|
||||||
* sleep in icount mode if there is a pending vm_clock timer; rather
|
|
||||||
* time could just advance to the next vm_clock event. Instead, we
|
|
||||||
* do stop VCPUs and only advance vm_clock after some "real" time,
|
|
||||||
* (related to the time left until the next event) has passed. This
|
|
||||||
* rt_clock timer will do this. This avoids that the warps are too
|
|
||||||
* visible externally---for example, you will not be sending network
|
|
||||||
* packets continously instead of every 100ms.
|
|
||||||
*/
|
|
||||||
qemu_mod_timer(clock->warp_timer, vm_clock_warp_start + deadline);
|
|
||||||
} else {
|
|
||||||
qemu_notify_event();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
int64_t qemu_clock_has_timers(QEMUClock *clock)
|
int64_t qemu_clock_has_timers(QEMUClock *clock)
|
||||||
{
|
{
|
||||||
return !!clock->active_timers;
|
return !!clock->active_timers;
|
||||||
@ -709,52 +493,6 @@ void qemu_get_timer(QEMUFile *f, QEMUTimer *ts)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static const VMStateDescription vmstate_timers = {
|
|
||||||
.name = "timer",
|
|
||||||
.version_id = 2,
|
|
||||||
.minimum_version_id = 1,
|
|
||||||
.minimum_version_id_old = 1,
|
|
||||||
.fields = (VMStateField []) {
|
|
||||||
VMSTATE_INT64(cpu_ticks_offset, TimersState),
|
|
||||||
VMSTATE_INT64(dummy, TimersState),
|
|
||||||
VMSTATE_INT64_V(cpu_clock_offset, TimersState, 2),
|
|
||||||
VMSTATE_END_OF_LIST()
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
void configure_icount(const char *option)
|
|
||||||
{
|
|
||||||
vmstate_register(NULL, 0, &vmstate_timers, &timers_state);
|
|
||||||
if (!option)
|
|
||||||
return;
|
|
||||||
|
|
||||||
vm_clock->warp_timer = qemu_new_timer_ns(rt_clock, icount_warp_rt, NULL);
|
|
||||||
|
|
||||||
if (strcmp(option, "auto") != 0) {
|
|
||||||
icount_time_shift = strtol(option, NULL, 0);
|
|
||||||
use_icount = 1;
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
use_icount = 2;
|
|
||||||
|
|
||||||
/* 125MIPS seems a reasonable initial guess at the guest speed.
|
|
||||||
It will be corrected fairly quickly anyway. */
|
|
||||||
icount_time_shift = 3;
|
|
||||||
|
|
||||||
/* Have both realtime and virtual time triggers for speed adjustment.
|
|
||||||
The realtime trigger catches emulated time passing too slowly,
|
|
||||||
the virtual time trigger catches emulated time passing too fast.
|
|
||||||
Realtime triggers occur even when idle, so use them less frequently
|
|
||||||
than VM triggers. */
|
|
||||||
icount_rt_timer = qemu_new_timer_ms(rt_clock, icount_adjust_rt, NULL);
|
|
||||||
qemu_mod_timer(icount_rt_timer,
|
|
||||||
qemu_get_clock_ms(rt_clock) + 1000);
|
|
||||||
icount_vm_timer = qemu_new_timer_ns(vm_clock, icount_adjust_vm, NULL);
|
|
||||||
qemu_mod_timer(icount_vm_timer,
|
|
||||||
qemu_get_clock_ns(vm_clock) + get_ticks_per_sec() / 10);
|
|
||||||
}
|
|
||||||
|
|
||||||
void qemu_run_all_timers(void)
|
void qemu_run_all_timers(void)
|
||||||
{
|
{
|
||||||
alarm_timer->pending = 0;
|
alarm_timer->pending = 0;
|
||||||
@ -821,23 +559,6 @@ static void host_alarm_handler(int host_signum)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int64_t qemu_next_icount_deadline(void)
|
|
||||||
{
|
|
||||||
/* To avoid problems with overflow limit this to 2^32. */
|
|
||||||
int64_t delta = INT32_MAX;
|
|
||||||
|
|
||||||
assert(use_icount);
|
|
||||||
if (vm_clock->active_timers) {
|
|
||||||
delta = vm_clock->active_timers->expire_time -
|
|
||||||
qemu_get_clock_ns(vm_clock);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (delta < 0)
|
|
||||||
delta = 0;
|
|
||||||
|
|
||||||
return delta;
|
|
||||||
}
|
|
||||||
|
|
||||||
#if defined(__linux__)
|
#if defined(__linux__)
|
||||||
|
|
||||||
#include "compatfd.h"
|
#include "compatfd.h"
|
||||||
|
24
qemu-timer.h
24
qemu-timer.h
@ -58,9 +58,7 @@ int qemu_timer_expired(QEMUTimer *timer_head, int64_t current_time);
|
|||||||
|
|
||||||
void qemu_run_all_timers(void);
|
void qemu_run_all_timers(void);
|
||||||
int qemu_alarm_pending(void);
|
int qemu_alarm_pending(void);
|
||||||
int64_t qemu_next_icount_deadline(void);
|
|
||||||
void configure_alarms(char const *opt);
|
void configure_alarms(char const *opt);
|
||||||
void configure_icount(const char *option);
|
|
||||||
int qemu_calculate_timeout(void);
|
int qemu_calculate_timeout(void);
|
||||||
void init_clocks(void);
|
void init_clocks(void);
|
||||||
int init_timer_alarm(void);
|
int init_timer_alarm(void);
|
||||||
@ -153,12 +151,8 @@ void ptimer_run(ptimer_state *s, int oneshot);
|
|||||||
void ptimer_stop(ptimer_state *s);
|
void ptimer_stop(ptimer_state *s);
|
||||||
|
|
||||||
/* icount */
|
/* icount */
|
||||||
int64_t qemu_icount_round(int64_t count);
|
|
||||||
extern int64_t qemu_icount;
|
|
||||||
extern int use_icount;
|
|
||||||
extern int icount_time_shift;
|
|
||||||
extern int64_t qemu_icount_bias;
|
|
||||||
int64_t cpu_get_icount(void);
|
int64_t cpu_get_icount(void);
|
||||||
|
int64_t cpu_get_clock(void);
|
||||||
|
|
||||||
/*******************************************/
|
/*******************************************/
|
||||||
/* host CPU ticks (if available) */
|
/* host CPU ticks (if available) */
|
||||||
@ -314,22 +308,6 @@ static inline int64_t cpu_get_real_ticks (void)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef NEED_CPU_H
|
|
||||||
/* Deterministic execution requires that IO only be performed on the last
|
|
||||||
instruction of a TB so that interrupts take effect immediately. */
|
|
||||||
static inline int can_do_io(CPUState *env)
|
|
||||||
{
|
|
||||||
if (!use_icount)
|
|
||||||
return 1;
|
|
||||||
|
|
||||||
/* If not executing code then assume we are ok. */
|
|
||||||
if (!env->current_tb)
|
|
||||||
return 1;
|
|
||||||
|
|
||||||
return env->can_do_io != 0;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef CONFIG_PROFILER
|
#ifdef CONFIG_PROFILER
|
||||||
static inline int64_t profile_getclock(void)
|
static inline int64_t profile_getclock(void)
|
||||||
{
|
{
|
||||||
|
Loading…
Reference in New Issue
Block a user