cpu-timers, icount: new modules
refactoring of cpus.c continues with cpu timer state extraction. cpu-timers: responsible for the softmmu cpu timers state, including cpu clocks and ticks. icount: counts the TCG instructions executed. As such it is specific to the TCG accelerator. Therefore, it is built only under CONFIG_TCG. One complication is due to qtest, which uses an icount field to warp time as part of qtest (qtest_clock_warp). In order to solve this problem, provide a separate counter for qtest. This requires fixing assumptions scattered in the code that qtest_enabled() implies icount_enabled(), checking each specific case. Signed-off-by: Claudio Fontana <cfontana@suse.de> Reviewed-by: Richard Henderson <richard.henderson@linaro.org> [remove redundant initialization with qemu_spice_init] Reviewed-by: Alex Bennée <alex.bennee@linaro.org> [fix lingering calls to icount_get] Signed-off-by: Claudio Fontana <cfontana@suse.de> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
0ac0b47c44
commit
740b175973
@ -2321,6 +2321,8 @@ F: softmmu/vl.c
|
|||||||
F: softmmu/main.c
|
F: softmmu/main.c
|
||||||
F: softmmu/cpus.c
|
F: softmmu/cpus.c
|
||||||
F: softmmu/cpu-throttle.c
|
F: softmmu/cpu-throttle.c
|
||||||
|
F: softmmu/cpu-timers.c
|
||||||
|
F: softmmu/icount.c
|
||||||
F: qapi/run-state.json
|
F: qapi/run-state.json
|
||||||
|
|
||||||
Read, Copy, Update (RCU)
|
Read, Copy, Update (RCU)
|
||||||
|
@ -19,14 +19,10 @@
|
|||||||
#include "sysemu/accel.h"
|
#include "sysemu/accel.h"
|
||||||
#include "sysemu/qtest.h"
|
#include "sysemu/qtest.h"
|
||||||
#include "sysemu/cpus.h"
|
#include "sysemu/cpus.h"
|
||||||
|
#include "sysemu/cpu-timers.h"
|
||||||
|
|
||||||
static int qtest_init_accel(MachineState *ms)
|
static int qtest_init_accel(MachineState *ms)
|
||||||
{
|
{
|
||||||
QemuOpts *opts = qemu_opts_create(qemu_find_opts("icount"), NULL, 0,
|
|
||||||
&error_abort);
|
|
||||||
qemu_opt_set(opts, "shift", "0", &error_abort);
|
|
||||||
configure_icount(opts, &error_abort);
|
|
||||||
qemu_opts_del(opts);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -19,6 +19,7 @@
|
|||||||
|
|
||||||
#include "qemu/osdep.h"
|
#include "qemu/osdep.h"
|
||||||
#include "qemu-common.h"
|
#include "qemu-common.h"
|
||||||
|
#include "qemu/qemu-print.h"
|
||||||
#include "cpu.h"
|
#include "cpu.h"
|
||||||
#include "trace.h"
|
#include "trace.h"
|
||||||
#include "disas/disas.h"
|
#include "disas/disas.h"
|
||||||
@ -36,6 +37,8 @@
|
|||||||
#include "hw/i386/apic.h"
|
#include "hw/i386/apic.h"
|
||||||
#endif
|
#endif
|
||||||
#include "sysemu/cpus.h"
|
#include "sysemu/cpus.h"
|
||||||
|
#include "exec/cpu-all.h"
|
||||||
|
#include "sysemu/cpu-timers.h"
|
||||||
#include "sysemu/replay.h"
|
#include "sysemu/replay.h"
|
||||||
|
|
||||||
/* -icount align implementation. */
|
/* -icount align implementation. */
|
||||||
@ -56,6 +59,9 @@ typedef struct SyncClocks {
|
|||||||
#define MAX_DELAY_PRINT_RATE 2000000000LL
|
#define MAX_DELAY_PRINT_RATE 2000000000LL
|
||||||
#define MAX_NB_PRINTS 100
|
#define MAX_NB_PRINTS 100
|
||||||
|
|
||||||
|
static int64_t max_delay;
|
||||||
|
static int64_t max_advance;
|
||||||
|
|
||||||
static void align_clocks(SyncClocks *sc, CPUState *cpu)
|
static void align_clocks(SyncClocks *sc, CPUState *cpu)
|
||||||
{
|
{
|
||||||
int64_t cpu_icount;
|
int64_t cpu_icount;
|
||||||
@ -98,9 +104,9 @@ static void print_delay(const SyncClocks *sc)
|
|||||||
(-sc->diff_clk / (float)1000000000LL <
|
(-sc->diff_clk / (float)1000000000LL <
|
||||||
(threshold_delay - THRESHOLD_REDUCE))) {
|
(threshold_delay - THRESHOLD_REDUCE))) {
|
||||||
threshold_delay = (-sc->diff_clk / 1000000000LL) + 1;
|
threshold_delay = (-sc->diff_clk / 1000000000LL) + 1;
|
||||||
printf("Warning: The guest is now late by %.1f to %.1f seconds\n",
|
qemu_printf("Warning: The guest is now late by %.1f to %.1f seconds\n",
|
||||||
threshold_delay - 1,
|
threshold_delay - 1,
|
||||||
threshold_delay);
|
threshold_delay);
|
||||||
nb_prints++;
|
nb_prints++;
|
||||||
last_realtime_clock = sc->realtime_clock;
|
last_realtime_clock = sc->realtime_clock;
|
||||||
}
|
}
|
||||||
@ -615,7 +621,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
|
|||||||
|
|
||||||
/* Finally, check if we need to exit to the main loop. */
|
/* Finally, check if we need to exit to the main loop. */
|
||||||
if (unlikely(qatomic_read(&cpu->exit_request))
|
if (unlikely(qatomic_read(&cpu->exit_request))
|
||||||
|| (use_icount
|
|| (icount_enabled()
|
||||||
&& cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0)) {
|
&& cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0)) {
|
||||||
qatomic_set(&cpu->exit_request, 0);
|
qatomic_set(&cpu->exit_request, 0);
|
||||||
if (cpu->exception_index == -1) {
|
if (cpu->exception_index == -1) {
|
||||||
@ -656,7 +662,7 @@ static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Instruction counter expired. */
|
/* Instruction counter expired. */
|
||||||
assert(use_icount);
|
assert(icount_enabled());
|
||||||
#ifndef CONFIG_USER_ONLY
|
#ifndef CONFIG_USER_ONLY
|
||||||
/* Ensure global icount has gone forward */
|
/* Ensure global icount has gone forward */
|
||||||
cpu_update_icount(cpu);
|
cpu_update_icount(cpu);
|
||||||
@ -759,3 +765,26 @@ int cpu_exec(CPUState *cpu)
|
|||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifndef CONFIG_USER_ONLY
|
||||||
|
|
||||||
|
void dump_drift_info(void)
|
||||||
|
{
|
||||||
|
if (!icount_enabled()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
qemu_printf("Host - Guest clock %"PRIi64" ms\n",
|
||||||
|
(cpu_get_clock() - cpu_get_icount()) / SCALE_MS);
|
||||||
|
if (icount_align_option) {
|
||||||
|
qemu_printf("Max guest delay %"PRIi64" ms\n",
|
||||||
|
-max_delay / SCALE_MS);
|
||||||
|
qemu_printf("Max guest advance %"PRIi64" ms\n",
|
||||||
|
max_advance / SCALE_MS);
|
||||||
|
} else {
|
||||||
|
qemu_printf("Max guest delay NA\n");
|
||||||
|
qemu_printf("Max guest advance NA\n");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif /* !CONFIG_USER_ONLY */
|
||||||
|
@ -29,6 +29,7 @@
|
|||||||
#include "qom/object.h"
|
#include "qom/object.h"
|
||||||
#include "cpu.h"
|
#include "cpu.h"
|
||||||
#include "sysemu/cpus.h"
|
#include "sysemu/cpus.h"
|
||||||
|
#include "sysemu/cpu-timers.h"
|
||||||
#include "qemu/main-loop.h"
|
#include "qemu/main-loop.h"
|
||||||
#include "tcg/tcg.h"
|
#include "tcg/tcg.h"
|
||||||
#include "qapi/error.h"
|
#include "qapi/error.h"
|
||||||
@ -66,7 +67,7 @@ static void tcg_handle_interrupt(CPUState *cpu, int mask)
|
|||||||
qemu_cpu_kick(cpu);
|
qemu_cpu_kick(cpu);
|
||||||
} else {
|
} else {
|
||||||
qatomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1);
|
qatomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1);
|
||||||
if (use_icount &&
|
if (icount_enabled() &&
|
||||||
!cpu->can_do_io
|
!cpu->can_do_io
|
||||||
&& (mask & ~old_mask) != 0) {
|
&& (mask & ~old_mask) != 0) {
|
||||||
cpu_abort(cpu, "Raised interrupt while not in I/O function");
|
cpu_abort(cpu, "Raised interrupt while not in I/O function");
|
||||||
@ -105,7 +106,7 @@ static bool check_tcg_memory_orders_compatible(void)
|
|||||||
|
|
||||||
static bool default_mttcg_enabled(void)
|
static bool default_mttcg_enabled(void)
|
||||||
{
|
{
|
||||||
if (use_icount || TCG_OVERSIZED_GUEST) {
|
if (icount_enabled() || TCG_OVERSIZED_GUEST) {
|
||||||
return false;
|
return false;
|
||||||
} else {
|
} else {
|
||||||
#ifdef TARGET_SUPPORTS_MTTCG
|
#ifdef TARGET_SUPPORTS_MTTCG
|
||||||
@ -147,7 +148,7 @@ static void tcg_set_thread(Object *obj, const char *value, Error **errp)
|
|||||||
if (strcmp(value, "multi") == 0) {
|
if (strcmp(value, "multi") == 0) {
|
||||||
if (TCG_OVERSIZED_GUEST) {
|
if (TCG_OVERSIZED_GUEST) {
|
||||||
error_setg(errp, "No MTTCG when guest word size > hosts");
|
error_setg(errp, "No MTTCG when guest word size > hosts");
|
||||||
} else if (use_icount) {
|
} else if (icount_enabled()) {
|
||||||
error_setg(errp, "No MTTCG when icount is enabled");
|
error_setg(errp, "No MTTCG when icount is enabled");
|
||||||
} else {
|
} else {
|
||||||
#ifndef TARGET_SUPPORTS_MTTCG
|
#ifndef TARGET_SUPPORTS_MTTCG
|
||||||
|
@ -57,6 +57,7 @@
|
|||||||
#include "qemu/main-loop.h"
|
#include "qemu/main-loop.h"
|
||||||
#include "exec/log.h"
|
#include "exec/log.h"
|
||||||
#include "sysemu/cpus.h"
|
#include "sysemu/cpus.h"
|
||||||
|
#include "sysemu/cpu-timers.h"
|
||||||
#include "sysemu/tcg.h"
|
#include "sysemu/tcg.h"
|
||||||
|
|
||||||
/* #define DEBUG_TB_INVALIDATE */
|
/* #define DEBUG_TB_INVALIDATE */
|
||||||
@ -369,7 +370,7 @@ static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
|
|||||||
|
|
||||||
found:
|
found:
|
||||||
if (reset_icount && (tb_cflags(tb) & CF_USE_ICOUNT)) {
|
if (reset_icount && (tb_cflags(tb) & CF_USE_ICOUNT)) {
|
||||||
assert(use_icount);
|
assert(icount_enabled());
|
||||||
/* Reset the cycle counter to the start of the block
|
/* Reset the cycle counter to the start of the block
|
||||||
and shift if to the number of actually executed instructions */
|
and shift if to the number of actually executed instructions */
|
||||||
cpu_neg(cpu)->icount_decr.u16.low += num_insns - i;
|
cpu_neg(cpu)->icount_decr.u16.low += num_insns - i;
|
||||||
|
@ -13,7 +13,7 @@
|
|||||||
#include "trace/trace-root.h"
|
#include "trace/trace-root.h"
|
||||||
#include "qemu/thread.h"
|
#include "qemu/thread.h"
|
||||||
#include "qemu/main-loop.h"
|
#include "qemu/main-loop.h"
|
||||||
#include "sysemu/cpus.h"
|
#include "sysemu/cpu-timers.h"
|
||||||
#include "qemu/range.h"
|
#include "qemu/range.h"
|
||||||
|
|
||||||
/* #define DEBUG_IOMMU */
|
/* #define DEBUG_IOMMU */
|
||||||
@ -151,7 +151,7 @@ static void dma_blk_cb(void *opaque, int ret)
|
|||||||
* from several sectors. This code splits all SGs into several
|
* from several sectors. This code splits all SGs into several
|
||||||
* groups. SGs in every group do not overlap.
|
* groups. SGs in every group do not overlap.
|
||||||
*/
|
*/
|
||||||
if (mem && use_icount && dbs->dir == DMA_DIRECTION_FROM_DEVICE) {
|
if (mem && icount_enabled() && dbs->dir == DMA_DIRECTION_FROM_DEVICE) {
|
||||||
int i;
|
int i;
|
||||||
for (i = 0 ; i < dbs->iov.niov ; ++i) {
|
for (i = 0 ; i < dbs->iov.niov ; ++i) {
|
||||||
if (ranges_overlap((intptr_t)dbs->iov.iov[i].iov_base,
|
if (ranges_overlap((intptr_t)dbs->iov.iov[i].iov_base,
|
||||||
|
4
exec.c
4
exec.c
@ -102,10 +102,6 @@ uintptr_t qemu_host_page_size;
|
|||||||
intptr_t qemu_host_page_mask;
|
intptr_t qemu_host_page_mask;
|
||||||
|
|
||||||
#if !defined(CONFIG_USER_ONLY)
|
#if !defined(CONFIG_USER_ONLY)
|
||||||
/* 0 = Do not count executed instructions.
|
|
||||||
1 = Precise instruction counting.
|
|
||||||
2 = Adaptive rate instruction counting. */
|
|
||||||
int use_icount;
|
|
||||||
|
|
||||||
typedef struct PhysPageEntry PhysPageEntry;
|
typedef struct PhysPageEntry PhysPageEntry;
|
||||||
|
|
||||||
|
@ -7,11 +7,11 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
#include "qemu/osdep.h"
|
#include "qemu/osdep.h"
|
||||||
#include "qemu/timer.h"
|
|
||||||
#include "hw/ptimer.h"
|
#include "hw/ptimer.h"
|
||||||
#include "migration/vmstate.h"
|
#include "migration/vmstate.h"
|
||||||
#include "qemu/host-utils.h"
|
#include "qemu/host-utils.h"
|
||||||
#include "sysemu/replay.h"
|
#include "sysemu/replay.h"
|
||||||
|
#include "sysemu/cpu-timers.h"
|
||||||
#include "sysemu/qtest.h"
|
#include "sysemu/qtest.h"
|
||||||
#include "block/aio.h"
|
#include "block/aio.h"
|
||||||
#include "sysemu/cpus.h"
|
#include "sysemu/cpus.h"
|
||||||
@ -134,7 +134,8 @@ static void ptimer_reload(ptimer_state *s, int delta_adjust)
|
|||||||
* on the current generation of host machines.
|
* on the current generation of host machines.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
if (s->enabled == 1 && (delta * period < 10000) && !use_icount) {
|
if (s->enabled == 1 && (delta * period < 10000) &&
|
||||||
|
!icount_enabled() && !qtest_enabled()) {
|
||||||
period = 10000 / delta;
|
period = 10000 / delta;
|
||||||
period_frac = 0;
|
period_frac = 0;
|
||||||
}
|
}
|
||||||
@ -217,7 +218,8 @@ uint64_t ptimer_get_count(ptimer_state *s)
|
|||||||
uint32_t period_frac = s->period_frac;
|
uint32_t period_frac = s->period_frac;
|
||||||
uint64_t period = s->period;
|
uint64_t period = s->period;
|
||||||
|
|
||||||
if (!oneshot && (s->delta * period < 10000) && !use_icount) {
|
if (!oneshot && (s->delta * period < 10000) &&
|
||||||
|
!icount_enabled() && !qtest_enabled()) {
|
||||||
period = 10000 / s->delta;
|
period = 10000 / s->delta;
|
||||||
period_frac = 0;
|
period_frac = 0;
|
||||||
}
|
}
|
||||||
|
@ -34,6 +34,7 @@
|
|||||||
#include "sysemu/numa.h"
|
#include "sysemu/numa.h"
|
||||||
#include "sysemu/replay.h"
|
#include "sysemu/replay.h"
|
||||||
#include "sysemu/sysemu.h"
|
#include "sysemu/sysemu.h"
|
||||||
|
#include "sysemu/cpu-timers.h"
|
||||||
#include "trace.h"
|
#include "trace.h"
|
||||||
|
|
||||||
#include "hw/i386/x86.h"
|
#include "hw/i386/x86.h"
|
||||||
|
@ -407,8 +407,12 @@ static inline bool tlb_hit(target_ulong tlb_addr, target_ulong addr)
|
|||||||
return tlb_hit_page(tlb_addr, addr & TARGET_PAGE_MASK);
|
return tlb_hit_page(tlb_addr, addr & TARGET_PAGE_MASK);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_TCG
|
||||||
|
void dump_drift_info(void);
|
||||||
void dump_exec_info(void);
|
void dump_exec_info(void);
|
||||||
void dump_opcount_info(void);
|
void dump_opcount_info(void);
|
||||||
|
#endif /* CONFIG_TCG */
|
||||||
|
|
||||||
#endif /* !CONFIG_USER_ONLY */
|
#endif /* !CONFIG_USER_ONLY */
|
||||||
|
|
||||||
/* Returns: 0 on success, -1 on error */
|
/* Returns: 0 on success, -1 on error */
|
||||||
|
@ -25,7 +25,7 @@
|
|||||||
#ifdef CONFIG_TCG
|
#ifdef CONFIG_TCG
|
||||||
#include "exec/cpu_ldst.h"
|
#include "exec/cpu_ldst.h"
|
||||||
#endif
|
#endif
|
||||||
#include "sysemu/cpus.h"
|
#include "sysemu/cpu-timers.h"
|
||||||
|
|
||||||
/* allow to see translation results - the slowdown should be negligible, so we leave it */
|
/* allow to see translation results - the slowdown should be negligible, so we leave it */
|
||||||
#define DEBUG_DISAS
|
#define DEBUG_DISAS
|
||||||
@ -497,7 +497,7 @@ static inline uint32_t tb_cflags(const TranslationBlock *tb)
|
|||||||
static inline uint32_t curr_cflags(void)
|
static inline uint32_t curr_cflags(void)
|
||||||
{
|
{
|
||||||
return (parallel_cpus ? CF_PARALLEL : 0)
|
return (parallel_cpus ? CF_PARALLEL : 0)
|
||||||
| (use_icount ? CF_USE_ICOUNT : 0);
|
| (icount_enabled() ? CF_USE_ICOUNT : 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* TranslationBlock invalidate API */
|
/* TranslationBlock invalidate API */
|
||||||
|
@ -166,8 +166,8 @@ bool qemu_clock_expired(QEMUClockType type);
|
|||||||
*
|
*
|
||||||
* Determine whether a clock should be used for deadline
|
* Determine whether a clock should be used for deadline
|
||||||
* calculations. Some clocks, for instance vm_clock with
|
* calculations. Some clocks, for instance vm_clock with
|
||||||
* use_icount set, do not count in nanoseconds. Such clocks
|
* icount_enabled() set, do not count in nanoseconds.
|
||||||
* are not used for deadline calculations, and are presumed
|
* Such clocks are not used for deadline calculations, and are presumed
|
||||||
* to interrupt any poll using qemu_notify/aio_notify
|
* to interrupt any poll using qemu_notify/aio_notify
|
||||||
* etc.
|
* etc.
|
||||||
*
|
*
|
||||||
@ -224,13 +224,6 @@ void qemu_clock_notify(QEMUClockType type);
|
|||||||
*/
|
*/
|
||||||
void qemu_clock_enable(QEMUClockType type, bool enabled);
|
void qemu_clock_enable(QEMUClockType type, bool enabled);
|
||||||
|
|
||||||
/**
|
|
||||||
* qemu_start_warp_timer:
|
|
||||||
*
|
|
||||||
* Starts a timer for virtual clock update
|
|
||||||
*/
|
|
||||||
void qemu_start_warp_timer(void);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* qemu_clock_run_timers:
|
* qemu_clock_run_timers:
|
||||||
* @type: clock on which to operate
|
* @type: clock on which to operate
|
||||||
@ -791,12 +784,6 @@ static inline int64_t qemu_soonest_timeout(int64_t timeout1, int64_t timeout2)
|
|||||||
*/
|
*/
|
||||||
void init_clocks(QEMUTimerListNotifyCB *notify_cb);
|
void init_clocks(QEMUTimerListNotifyCB *notify_cb);
|
||||||
|
|
||||||
int64_t cpu_get_ticks(void);
|
|
||||||
/* Caller must hold BQL */
|
|
||||||
void cpu_enable_ticks(void);
|
|
||||||
/* Caller must hold BQL */
|
|
||||||
void cpu_disable_ticks(void);
|
|
||||||
|
|
||||||
static inline int64_t get_max_clock_jump(void)
|
static inline int64_t get_max_clock_jump(void)
|
||||||
{
|
{
|
||||||
/* This should be small enough to prevent excessive interrupts from being
|
/* This should be small enough to prevent excessive interrupts from being
|
||||||
@ -850,13 +837,6 @@ static inline int64_t get_clock(void)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* icount */
|
|
||||||
int64_t cpu_get_icount_raw(void);
|
|
||||||
int64_t cpu_get_icount(void);
|
|
||||||
int64_t cpu_get_clock(void);
|
|
||||||
int64_t cpu_icount_to_ns(int64_t icount);
|
|
||||||
void cpu_update_icount(CPUState *cpu);
|
|
||||||
|
|
||||||
/*******************************************/
|
/*******************************************/
|
||||||
/* host CPU ticks (if available) */
|
/* host CPU ticks (if available) */
|
||||||
|
|
||||||
|
87
include/sysemu/cpu-timers.h
Normal file
87
include/sysemu/cpu-timers.h
Normal file
@ -0,0 +1,87 @@
|
|||||||
|
/*
|
||||||
|
* CPU timers state API
|
||||||
|
*
|
||||||
|
* Copyright 2020 SUSE LLC
|
||||||
|
*
|
||||||
|
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
||||||
|
* See the COPYING file in the top-level directory.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
#ifndef SYSEMU_CPU_TIMERS_H
|
||||||
|
#define SYSEMU_CPU_TIMERS_H
|
||||||
|
|
||||||
|
#include "qemu/timer.h"
|
||||||
|
|
||||||
|
/* init the whole cpu timers API, including icount, ticks, and cpu_throttle */
|
||||||
|
void cpu_timers_init(void);
|
||||||
|
|
||||||
|
/* icount - Instruction Counter API */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* icount enablement state:
|
||||||
|
*
|
||||||
|
* 0 = Disabled - Do not count executed instructions.
|
||||||
|
* 1 = Enabled - Fixed conversion of insn to ns via "shift" option
|
||||||
|
* 2 = Enabled - Runtime adaptive algorithm to compute shift
|
||||||
|
*/
|
||||||
|
#ifdef CONFIG_TCG
|
||||||
|
extern int use_icount;
|
||||||
|
#define icount_enabled() (use_icount)
|
||||||
|
#else
|
||||||
|
#define icount_enabled() 0
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Update the icount with the executed instructions. Called by
|
||||||
|
* cpus-tcg vCPU thread so the main-loop can see time has moved forward.
|
||||||
|
*/
|
||||||
|
void cpu_update_icount(CPUState *cpu);
|
||||||
|
|
||||||
|
/* get raw icount value */
|
||||||
|
int64_t cpu_get_icount_raw(void);
|
||||||
|
|
||||||
|
/* return the virtual CPU time in ns, based on the instruction counter. */
|
||||||
|
int64_t cpu_get_icount(void);
|
||||||
|
/*
|
||||||
|
* convert an instruction counter value to ns, based on the icount shift.
|
||||||
|
* This shift is set as a fixed value with the icount "shift" option
|
||||||
|
* (precise mode), or it is constantly approximated and corrected at
|
||||||
|
* runtime in adaptive mode.
|
||||||
|
*/
|
||||||
|
int64_t cpu_icount_to_ns(int64_t icount);
|
||||||
|
|
||||||
|
/* configure the icount options, including "shift" */
|
||||||
|
void configure_icount(QemuOpts *opts, Error **errp);
|
||||||
|
|
||||||
|
/* used by tcg vcpu thread to calc icount budget */
|
||||||
|
int64_t qemu_icount_round(int64_t count);
|
||||||
|
|
||||||
|
/* if the CPUs are idle, start accounting real time to virtual clock. */
|
||||||
|
void qemu_start_warp_timer(void);
|
||||||
|
void qemu_account_warp_timer(void);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* CPU Ticks and Clock
|
||||||
|
*/
|
||||||
|
|
||||||
|
/* Caller must hold BQL */
|
||||||
|
void cpu_enable_ticks(void);
|
||||||
|
/* Caller must hold BQL */
|
||||||
|
void cpu_disable_ticks(void);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* return the time elapsed in VM between vm_start and vm_stop. Unless
|
||||||
|
* icount is active, cpu_get_ticks() uses units of the host CPU cycle
|
||||||
|
* counter.
|
||||||
|
*/
|
||||||
|
int64_t cpu_get_ticks(void);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Returns the monotonic time elapsed in VM, i.e.,
|
||||||
|
* the time between vm_start and vm_stop
|
||||||
|
*/
|
||||||
|
int64_t cpu_get_clock(void);
|
||||||
|
|
||||||
|
void qemu_timer_notify_cb(void *opaque, QEMUClockType type);
|
||||||
|
|
||||||
|
#endif /* SYSEMU_CPU_TIMERS_H */
|
@ -4,33 +4,23 @@
|
|||||||
#include "qemu/timer.h"
|
#include "qemu/timer.h"
|
||||||
|
|
||||||
/* cpus.c */
|
/* cpus.c */
|
||||||
|
bool all_cpu_threads_idle(void);
|
||||||
bool qemu_in_vcpu_thread(void);
|
bool qemu_in_vcpu_thread(void);
|
||||||
void qemu_init_cpu_loop(void);
|
void qemu_init_cpu_loop(void);
|
||||||
void resume_all_vcpus(void);
|
void resume_all_vcpus(void);
|
||||||
void pause_all_vcpus(void);
|
void pause_all_vcpus(void);
|
||||||
void cpu_stop_current(void);
|
void cpu_stop_current(void);
|
||||||
void cpu_ticks_init(void);
|
|
||||||
|
|
||||||
void configure_icount(QemuOpts *opts, Error **errp);
|
|
||||||
extern int use_icount;
|
|
||||||
extern int icount_align_option;
|
extern int icount_align_option;
|
||||||
|
|
||||||
/* drift information for info jit command */
|
|
||||||
extern int64_t max_delay;
|
|
||||||
extern int64_t max_advance;
|
|
||||||
void dump_drift_info(void);
|
|
||||||
|
|
||||||
/* Unblock cpu */
|
/* Unblock cpu */
|
||||||
void qemu_cpu_kick_self(void);
|
void qemu_cpu_kick_self(void);
|
||||||
void qemu_timer_notify_cb(void *opaque, QEMUClockType type);
|
|
||||||
|
|
||||||
void cpu_synchronize_all_states(void);
|
void cpu_synchronize_all_states(void);
|
||||||
void cpu_synchronize_all_post_reset(void);
|
void cpu_synchronize_all_post_reset(void);
|
||||||
void cpu_synchronize_all_post_init(void);
|
void cpu_synchronize_all_post_init(void);
|
||||||
void cpu_synchronize_all_pre_loadvm(void);
|
void cpu_synchronize_all_pre_loadvm(void);
|
||||||
|
|
||||||
void qtest_clock_warp(int64_t dest);
|
|
||||||
|
|
||||||
#ifndef CONFIG_USER_ONLY
|
#ifndef CONFIG_USER_ONLY
|
||||||
/* vl.c */
|
/* vl.c */
|
||||||
/* *-user doesn't have configurable SMP topology */
|
/* *-user doesn't have configurable SMP topology */
|
||||||
|
@ -30,4 +30,6 @@ void qtest_server_set_send_handler(void (*send)(void *, const char *),
|
|||||||
void *opaque);
|
void *opaque);
|
||||||
void qtest_server_inproc_recv(void *opaque, const char *buf);
|
void qtest_server_inproc_recv(void *opaque, const char *buf);
|
||||||
|
|
||||||
|
int64_t qtest_get_virtual_clock(void);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -11,10 +11,10 @@
|
|||||||
|
|
||||||
#include "qemu/osdep.h"
|
#include "qemu/osdep.h"
|
||||||
#include "qapi/error.h"
|
#include "qapi/error.h"
|
||||||
|
#include "sysemu/cpu-timers.h"
|
||||||
#include "sysemu/replay.h"
|
#include "sysemu/replay.h"
|
||||||
#include "sysemu/runstate.h"
|
#include "sysemu/runstate.h"
|
||||||
#include "replay-internal.h"
|
#include "replay-internal.h"
|
||||||
#include "qemu/timer.h"
|
|
||||||
#include "qemu/main-loop.h"
|
#include "qemu/main-loop.h"
|
||||||
#include "qemu/option.h"
|
#include "qemu/option.h"
|
||||||
#include "sysemu/cpus.h"
|
#include "sysemu/cpus.h"
|
||||||
@ -345,7 +345,7 @@ void replay_start(void)
|
|||||||
error_reportf_err(replay_blockers->data, "Record/replay: ");
|
error_reportf_err(replay_blockers->data, "Record/replay: ");
|
||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
if (!use_icount) {
|
if (!icount_enabled()) {
|
||||||
error_report("Please enable icount to use record/replay");
|
error_report("Please enable icount to use record/replay");
|
||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
|
284
softmmu/cpu-timers.c
Normal file
284
softmmu/cpu-timers.c
Normal file
@ -0,0 +1,284 @@
|
|||||||
|
/*
|
||||||
|
* QEMU System Emulator
|
||||||
|
*
|
||||||
|
* Copyright (c) 2003-2008 Fabrice Bellard
|
||||||
|
*
|
||||||
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
* of this software and associated documentation files (the "Software"), to deal
|
||||||
|
* in the Software without restriction, including without limitation the rights
|
||||||
|
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
* copies of the Software, and to permit persons to whom the Software is
|
||||||
|
* furnished to do so, subject to the following conditions:
|
||||||
|
*
|
||||||
|
* The above copyright notice and this permission notice shall be included in
|
||||||
|
* all copies or substantial portions of the Software.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||||
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||||
|
* THE SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "qemu/osdep.h"
|
||||||
|
#include "qemu-common.h"
|
||||||
|
#include "qemu/cutils.h"
|
||||||
|
#include "migration/vmstate.h"
|
||||||
|
#include "qapi/error.h"
|
||||||
|
#include "qemu/error-report.h"
|
||||||
|
#include "exec/exec-all.h"
|
||||||
|
#include "sysemu/cpus.h"
|
||||||
|
#include "sysemu/qtest.h"
|
||||||
|
#include "qemu/main-loop.h"
|
||||||
|
#include "qemu/option.h"
|
||||||
|
#include "qemu/seqlock.h"
|
||||||
|
#include "sysemu/replay.h"
|
||||||
|
#include "sysemu/runstate.h"
|
||||||
|
#include "hw/core/cpu.h"
|
||||||
|
#include "sysemu/cpu-timers.h"
|
||||||
|
#include "sysemu/cpu-throttle.h"
|
||||||
|
#include "timers-state.h"
|
||||||
|
|
||||||
|
/* clock and ticks */
|
||||||
|
|
||||||
|
static int64_t cpu_get_ticks_locked(void)
|
||||||
|
{
|
||||||
|
int64_t ticks = timers_state.cpu_ticks_offset;
|
||||||
|
if (timers_state.cpu_ticks_enabled) {
|
||||||
|
ticks += cpu_get_host_ticks();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (timers_state.cpu_ticks_prev > ticks) {
|
||||||
|
/* Non increasing ticks may happen if the host uses software suspend. */
|
||||||
|
timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks;
|
||||||
|
ticks = timers_state.cpu_ticks_prev;
|
||||||
|
}
|
||||||
|
|
||||||
|
timers_state.cpu_ticks_prev = ticks;
|
||||||
|
return ticks;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* return the time elapsed in VM between vm_start and vm_stop. Unless
|
||||||
|
* icount is active, cpu_get_ticks() uses units of the host CPU cycle
|
||||||
|
* counter.
|
||||||
|
*/
|
||||||
|
int64_t cpu_get_ticks(void)
|
||||||
|
{
|
||||||
|
int64_t ticks;
|
||||||
|
|
||||||
|
if (icount_enabled()) {
|
||||||
|
return cpu_get_icount();
|
||||||
|
}
|
||||||
|
|
||||||
|
qemu_spin_lock(&timers_state.vm_clock_lock);
|
||||||
|
ticks = cpu_get_ticks_locked();
|
||||||
|
qemu_spin_unlock(&timers_state.vm_clock_lock);
|
||||||
|
return ticks;
|
||||||
|
}
|
||||||
|
|
||||||
|
int64_t cpu_get_clock_locked(void)
|
||||||
|
{
|
||||||
|
int64_t time;
|
||||||
|
|
||||||
|
time = timers_state.cpu_clock_offset;
|
||||||
|
if (timers_state.cpu_ticks_enabled) {
|
||||||
|
time += get_clock();
|
||||||
|
}
|
||||||
|
|
||||||
|
return time;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Return the monotonic time elapsed in VM, i.e.,
|
||||||
|
* the time between vm_start and vm_stop
|
||||||
|
*/
|
||||||
|
int64_t cpu_get_clock(void)
|
||||||
|
{
|
||||||
|
int64_t ti;
|
||||||
|
unsigned start;
|
||||||
|
|
||||||
|
do {
|
||||||
|
start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
|
||||||
|
ti = cpu_get_clock_locked();
|
||||||
|
} while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
|
||||||
|
|
||||||
|
return ti;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* enable cpu_get_ticks()
|
||||||
|
* Caller must hold BQL which serves as mutex for vm_clock_seqlock.
|
||||||
|
*/
|
||||||
|
void cpu_enable_ticks(void)
|
||||||
|
{
|
||||||
|
seqlock_write_lock(&timers_state.vm_clock_seqlock,
|
||||||
|
&timers_state.vm_clock_lock);
|
||||||
|
if (!timers_state.cpu_ticks_enabled) {
|
||||||
|
timers_state.cpu_ticks_offset -= cpu_get_host_ticks();
|
||||||
|
timers_state.cpu_clock_offset -= get_clock();
|
||||||
|
timers_state.cpu_ticks_enabled = 1;
|
||||||
|
}
|
||||||
|
seqlock_write_unlock(&timers_state.vm_clock_seqlock,
|
||||||
|
&timers_state.vm_clock_lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* disable cpu_get_ticks() : the clock is stopped. You must not call
|
||||||
|
* cpu_get_ticks() after that.
|
||||||
|
* Caller must hold BQL which serves as mutex for vm_clock_seqlock.
|
||||||
|
*/
|
||||||
|
void cpu_disable_ticks(void)
|
||||||
|
{
|
||||||
|
seqlock_write_lock(&timers_state.vm_clock_seqlock,
|
||||||
|
&timers_state.vm_clock_lock);
|
||||||
|
if (timers_state.cpu_ticks_enabled) {
|
||||||
|
timers_state.cpu_ticks_offset += cpu_get_host_ticks();
|
||||||
|
timers_state.cpu_clock_offset = cpu_get_clock_locked();
|
||||||
|
timers_state.cpu_ticks_enabled = 0;
|
||||||
|
}
|
||||||
|
seqlock_write_unlock(&timers_state.vm_clock_seqlock,
|
||||||
|
&timers_state.vm_clock_lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool icount_state_needed(void *opaque)
|
||||||
|
{
|
||||||
|
return icount_enabled();
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool warp_timer_state_needed(void *opaque)
|
||||||
|
{
|
||||||
|
TimersState *s = opaque;
|
||||||
|
return s->icount_warp_timer != NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool adjust_timers_state_needed(void *opaque)
|
||||||
|
{
|
||||||
|
TimersState *s = opaque;
|
||||||
|
return s->icount_rt_timer != NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool shift_state_needed(void *opaque)
|
||||||
|
{
|
||||||
|
return icount_enabled() == 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Subsection for warp timer migration is optional, because may not be created
|
||||||
|
*/
|
||||||
|
static const VMStateDescription icount_vmstate_warp_timer = {
|
||||||
|
.name = "timer/icount/warp_timer",
|
||||||
|
.version_id = 1,
|
||||||
|
.minimum_version_id = 1,
|
||||||
|
.needed = warp_timer_state_needed,
|
||||||
|
.fields = (VMStateField[]) {
|
||||||
|
VMSTATE_INT64(vm_clock_warp_start, TimersState),
|
||||||
|
VMSTATE_TIMER_PTR(icount_warp_timer, TimersState),
|
||||||
|
VMSTATE_END_OF_LIST()
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
static const VMStateDescription icount_vmstate_adjust_timers = {
|
||||||
|
.name = "timer/icount/timers",
|
||||||
|
.version_id = 1,
|
||||||
|
.minimum_version_id = 1,
|
||||||
|
.needed = adjust_timers_state_needed,
|
||||||
|
.fields = (VMStateField[]) {
|
||||||
|
VMSTATE_TIMER_PTR(icount_rt_timer, TimersState),
|
||||||
|
VMSTATE_TIMER_PTR(icount_vm_timer, TimersState),
|
||||||
|
VMSTATE_END_OF_LIST()
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
static const VMStateDescription icount_vmstate_shift = {
|
||||||
|
.name = "timer/icount/shift",
|
||||||
|
.version_id = 1,
|
||||||
|
.minimum_version_id = 1,
|
||||||
|
.needed = shift_state_needed,
|
||||||
|
.fields = (VMStateField[]) {
|
||||||
|
VMSTATE_INT16(icount_time_shift, TimersState),
|
||||||
|
VMSTATE_END_OF_LIST()
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This is a subsection for icount migration.
|
||||||
|
*/
|
||||||
|
static const VMStateDescription icount_vmstate_timers = {
|
||||||
|
.name = "timer/icount",
|
||||||
|
.version_id = 1,
|
||||||
|
.minimum_version_id = 1,
|
||||||
|
.needed = icount_state_needed,
|
||||||
|
.fields = (VMStateField[]) {
|
||||||
|
VMSTATE_INT64(qemu_icount_bias, TimersState),
|
||||||
|
VMSTATE_INT64(qemu_icount, TimersState),
|
||||||
|
VMSTATE_END_OF_LIST()
|
||||||
|
},
|
||||||
|
.subsections = (const VMStateDescription * []) {
|
||||||
|
&icount_vmstate_warp_timer,
|
||||||
|
&icount_vmstate_adjust_timers,
|
||||||
|
&icount_vmstate_shift,
|
||||||
|
NULL
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
static const VMStateDescription vmstate_timers = {
|
||||||
|
.name = "timer",
|
||||||
|
.version_id = 2,
|
||||||
|
.minimum_version_id = 1,
|
||||||
|
.fields = (VMStateField[]) {
|
||||||
|
VMSTATE_INT64(cpu_ticks_offset, TimersState),
|
||||||
|
VMSTATE_UNUSED(8),
|
||||||
|
VMSTATE_INT64_V(cpu_clock_offset, TimersState, 2),
|
||||||
|
VMSTATE_END_OF_LIST()
|
||||||
|
},
|
||||||
|
.subsections = (const VMStateDescription * []) {
|
||||||
|
&icount_vmstate_timers,
|
||||||
|
NULL
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
static void do_nothing(CPUState *cpu, run_on_cpu_data unused)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
void qemu_timer_notify_cb(void *opaque, QEMUClockType type)
|
||||||
|
{
|
||||||
|
if (!icount_enabled() || type != QEMU_CLOCK_VIRTUAL) {
|
||||||
|
qemu_notify_event();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (qemu_in_vcpu_thread()) {
|
||||||
|
/*
|
||||||
|
* A CPU is currently running; kick it back out to the
|
||||||
|
* tcg_cpu_exec() loop so it will recalculate its
|
||||||
|
* icount deadline immediately.
|
||||||
|
*/
|
||||||
|
qemu_cpu_kick(current_cpu);
|
||||||
|
} else if (first_cpu) {
|
||||||
|
/*
|
||||||
|
* qemu_cpu_kick is not enough to kick a halted CPU out of
|
||||||
|
* qemu_tcg_wait_io_event. async_run_on_cpu, instead,
|
||||||
|
* causes cpu_thread_is_idle to return false. This way,
|
||||||
|
* handle_icount_deadline can run.
|
||||||
|
* If we have no CPUs at all for some reason, we don't
|
||||||
|
* need to do anything.
|
||||||
|
*/
|
||||||
|
async_run_on_cpu(first_cpu, do_nothing, RUN_ON_CPU_NULL);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
TimersState timers_state;
|
||||||
|
|
||||||
|
/* initialize timers state and the cpu throttle for convenience */
|
||||||
|
void cpu_timers_init(void)
|
||||||
|
{
|
||||||
|
seqlock_init(&timers_state.vm_clock_seqlock);
|
||||||
|
qemu_spin_init(&timers_state.vm_clock_lock);
|
||||||
|
vmstate_register(NULL, 0, &vmstate_timers, &timers_state);
|
||||||
|
|
||||||
|
cpu_throttle_init();
|
||||||
|
}
|
744
softmmu/cpus.c
744
softmmu/cpus.c
@ -58,11 +58,10 @@
|
|||||||
#include "hw/nmi.h"
|
#include "hw/nmi.h"
|
||||||
#include "sysemu/replay.h"
|
#include "sysemu/replay.h"
|
||||||
#include "sysemu/runstate.h"
|
#include "sysemu/runstate.h"
|
||||||
|
#include "sysemu/cpu-timers.h"
|
||||||
#include "hw/boards.h"
|
#include "hw/boards.h"
|
||||||
#include "hw/hw.h"
|
#include "hw/hw.h"
|
||||||
|
|
||||||
#include "sysemu/cpu-throttle.h"
|
|
||||||
|
|
||||||
#ifdef CONFIG_LINUX
|
#ifdef CONFIG_LINUX
|
||||||
|
|
||||||
#include <sys/prctl.h>
|
#include <sys/prctl.h>
|
||||||
@ -83,9 +82,6 @@
|
|||||||
|
|
||||||
static QemuMutex qemu_global_mutex;
|
static QemuMutex qemu_global_mutex;
|
||||||
|
|
||||||
int64_t max_delay;
|
|
||||||
int64_t max_advance;
|
|
||||||
|
|
||||||
bool cpu_is_stopped(CPUState *cpu)
|
bool cpu_is_stopped(CPUState *cpu)
|
||||||
{
|
{
|
||||||
return cpu->stopped || !runstate_is_running();
|
return cpu->stopped || !runstate_is_running();
|
||||||
@ -116,7 +112,7 @@ static bool cpu_thread_is_idle(CPUState *cpu)
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool all_cpu_threads_idle(void)
|
bool all_cpu_threads_idle(void)
|
||||||
{
|
{
|
||||||
CPUState *cpu;
|
CPUState *cpu;
|
||||||
|
|
||||||
@ -128,688 +124,9 @@ static bool all_cpu_threads_idle(void)
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
/***********************************************************/
|
|
||||||
/* guest cycle counter */
|
|
||||||
|
|
||||||
/* Protected by TimersState seqlock */
|
|
||||||
|
|
||||||
static bool icount_sleep = true;
|
|
||||||
/* Arbitrarily pick 1MIPS as the minimum allowable speed. */
|
|
||||||
#define MAX_ICOUNT_SHIFT 10
|
|
||||||
|
|
||||||
typedef struct TimersState {
|
|
||||||
/* Protected by BQL. */
|
|
||||||
int64_t cpu_ticks_prev;
|
|
||||||
int64_t cpu_ticks_offset;
|
|
||||||
|
|
||||||
/* Protect fields that can be respectively read outside the
|
|
||||||
* BQL, and written from multiple threads.
|
|
||||||
*/
|
|
||||||
QemuSeqLock vm_clock_seqlock;
|
|
||||||
QemuSpin vm_clock_lock;
|
|
||||||
|
|
||||||
int16_t cpu_ticks_enabled;
|
|
||||||
|
|
||||||
/* Conversion factor from emulated instructions to virtual clock ticks. */
|
|
||||||
int16_t icount_time_shift;
|
|
||||||
|
|
||||||
/* Compensate for varying guest execution speed. */
|
|
||||||
int64_t qemu_icount_bias;
|
|
||||||
|
|
||||||
int64_t vm_clock_warp_start;
|
|
||||||
int64_t cpu_clock_offset;
|
|
||||||
|
|
||||||
/* Only written by TCG thread */
|
|
||||||
int64_t qemu_icount;
|
|
||||||
|
|
||||||
/* for adjusting icount */
|
|
||||||
QEMUTimer *icount_rt_timer;
|
|
||||||
QEMUTimer *icount_vm_timer;
|
|
||||||
QEMUTimer *icount_warp_timer;
|
|
||||||
} TimersState;
|
|
||||||
|
|
||||||
static TimersState timers_state;
|
|
||||||
bool mttcg_enabled;
|
bool mttcg_enabled;
|
||||||
|
|
||||||
|
|
||||||
/* The current number of executed instructions is based on what we
|
|
||||||
* originally budgeted minus the current state of the decrementing
|
|
||||||
* icount counters in extra/u16.low.
|
|
||||||
*/
|
|
||||||
static int64_t cpu_get_icount_executed(CPUState *cpu)
|
|
||||||
{
|
|
||||||
return (cpu->icount_budget -
|
|
||||||
(cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra));
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Update the global shared timer_state.qemu_icount to take into
|
|
||||||
* account executed instructions. This is done by the TCG vCPU
|
|
||||||
* thread so the main-loop can see time has moved forward.
|
|
||||||
*/
|
|
||||||
static void cpu_update_icount_locked(CPUState *cpu)
|
|
||||||
{
|
|
||||||
int64_t executed = cpu_get_icount_executed(cpu);
|
|
||||||
cpu->icount_budget -= executed;
|
|
||||||
|
|
||||||
qatomic_set_i64(&timers_state.qemu_icount,
|
|
||||||
timers_state.qemu_icount + executed);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Update the global shared timer_state.qemu_icount to take into
|
|
||||||
* account executed instructions. This is done by the TCG vCPU
|
|
||||||
* thread so the main-loop can see time has moved forward.
|
|
||||||
*/
|
|
||||||
void cpu_update_icount(CPUState *cpu)
|
|
||||||
{
|
|
||||||
seqlock_write_lock(&timers_state.vm_clock_seqlock,
|
|
||||||
&timers_state.vm_clock_lock);
|
|
||||||
cpu_update_icount_locked(cpu);
|
|
||||||
seqlock_write_unlock(&timers_state.vm_clock_seqlock,
|
|
||||||
&timers_state.vm_clock_lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int64_t cpu_get_icount_raw_locked(void)
|
|
||||||
{
|
|
||||||
CPUState *cpu = current_cpu;
|
|
||||||
|
|
||||||
if (cpu && cpu->running) {
|
|
||||||
if (!cpu->can_do_io) {
|
|
||||||
error_report("Bad icount read");
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
/* Take into account what has run */
|
|
||||||
cpu_update_icount_locked(cpu);
|
|
||||||
}
|
|
||||||
/* The read is protected by the seqlock, but needs atomic64 to avoid UB */
|
|
||||||
return qatomic_read_i64(&timers_state.qemu_icount);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int64_t cpu_get_icount_locked(void)
|
|
||||||
{
|
|
||||||
int64_t icount = cpu_get_icount_raw_locked();
|
|
||||||
return qatomic_read_i64(&timers_state.qemu_icount_bias) +
|
|
||||||
cpu_icount_to_ns(icount);
|
|
||||||
}
|
|
||||||
|
|
||||||
int64_t cpu_get_icount_raw(void)
|
|
||||||
{
|
|
||||||
int64_t icount;
|
|
||||||
unsigned start;
|
|
||||||
|
|
||||||
do {
|
|
||||||
start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
|
|
||||||
icount = cpu_get_icount_raw_locked();
|
|
||||||
} while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
|
|
||||||
|
|
||||||
return icount;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Return the virtual CPU time, based on the instruction counter. */
|
|
||||||
int64_t cpu_get_icount(void)
|
|
||||||
{
|
|
||||||
int64_t icount;
|
|
||||||
unsigned start;
|
|
||||||
|
|
||||||
do {
|
|
||||||
start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
|
|
||||||
icount = cpu_get_icount_locked();
|
|
||||||
} while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
|
|
||||||
|
|
||||||
return icount;
|
|
||||||
}
|
|
||||||
|
|
||||||
int64_t cpu_icount_to_ns(int64_t icount)
|
|
||||||
{
|
|
||||||
return icount << qatomic_read(&timers_state.icount_time_shift);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int64_t cpu_get_ticks_locked(void)
|
|
||||||
{
|
|
||||||
int64_t ticks = timers_state.cpu_ticks_offset;
|
|
||||||
if (timers_state.cpu_ticks_enabled) {
|
|
||||||
ticks += cpu_get_host_ticks();
|
|
||||||
}
|
|
||||||
|
|
||||||
if (timers_state.cpu_ticks_prev > ticks) {
|
|
||||||
/* Non increasing ticks may happen if the host uses software suspend. */
|
|
||||||
timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks;
|
|
||||||
ticks = timers_state.cpu_ticks_prev;
|
|
||||||
}
|
|
||||||
|
|
||||||
timers_state.cpu_ticks_prev = ticks;
|
|
||||||
return ticks;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* return the time elapsed in VM between vm_start and vm_stop. Unless
|
|
||||||
* icount is active, cpu_get_ticks() uses units of the host CPU cycle
|
|
||||||
* counter.
|
|
||||||
*/
|
|
||||||
int64_t cpu_get_ticks(void)
|
|
||||||
{
|
|
||||||
int64_t ticks;
|
|
||||||
|
|
||||||
if (use_icount) {
|
|
||||||
return cpu_get_icount();
|
|
||||||
}
|
|
||||||
|
|
||||||
qemu_spin_lock(&timers_state.vm_clock_lock);
|
|
||||||
ticks = cpu_get_ticks_locked();
|
|
||||||
qemu_spin_unlock(&timers_state.vm_clock_lock);
|
|
||||||
return ticks;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int64_t cpu_get_clock_locked(void)
|
|
||||||
{
|
|
||||||
int64_t time;
|
|
||||||
|
|
||||||
time = timers_state.cpu_clock_offset;
|
|
||||||
if (timers_state.cpu_ticks_enabled) {
|
|
||||||
time += get_clock();
|
|
||||||
}
|
|
||||||
|
|
||||||
return time;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Return the monotonic time elapsed in VM, i.e.,
|
|
||||||
* the time between vm_start and vm_stop
|
|
||||||
*/
|
|
||||||
int64_t cpu_get_clock(void)
|
|
||||||
{
|
|
||||||
int64_t ti;
|
|
||||||
unsigned start;
|
|
||||||
|
|
||||||
do {
|
|
||||||
start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
|
|
||||||
ti = cpu_get_clock_locked();
|
|
||||||
} while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
|
|
||||||
|
|
||||||
return ti;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* enable cpu_get_ticks()
|
|
||||||
* Caller must hold BQL which serves as mutex for vm_clock_seqlock.
|
|
||||||
*/
|
|
||||||
void cpu_enable_ticks(void)
|
|
||||||
{
|
|
||||||
seqlock_write_lock(&timers_state.vm_clock_seqlock,
|
|
||||||
&timers_state.vm_clock_lock);
|
|
||||||
if (!timers_state.cpu_ticks_enabled) {
|
|
||||||
timers_state.cpu_ticks_offset -= cpu_get_host_ticks();
|
|
||||||
timers_state.cpu_clock_offset -= get_clock();
|
|
||||||
timers_state.cpu_ticks_enabled = 1;
|
|
||||||
}
|
|
||||||
seqlock_write_unlock(&timers_state.vm_clock_seqlock,
|
|
||||||
&timers_state.vm_clock_lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* disable cpu_get_ticks() : the clock is stopped. You must not call
|
|
||||||
* cpu_get_ticks() after that.
|
|
||||||
* Caller must hold BQL which serves as mutex for vm_clock_seqlock.
|
|
||||||
*/
|
|
||||||
void cpu_disable_ticks(void)
|
|
||||||
{
|
|
||||||
seqlock_write_lock(&timers_state.vm_clock_seqlock,
|
|
||||||
&timers_state.vm_clock_lock);
|
|
||||||
if (timers_state.cpu_ticks_enabled) {
|
|
||||||
timers_state.cpu_ticks_offset += cpu_get_host_ticks();
|
|
||||||
timers_state.cpu_clock_offset = cpu_get_clock_locked();
|
|
||||||
timers_state.cpu_ticks_enabled = 0;
|
|
||||||
}
|
|
||||||
seqlock_write_unlock(&timers_state.vm_clock_seqlock,
|
|
||||||
&timers_state.vm_clock_lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Correlation between real and virtual time is always going to be
|
|
||||||
fairly approximate, so ignore small variation.
|
|
||||||
When the guest is idle real and virtual time will be aligned in
|
|
||||||
the IO wait loop. */
|
|
||||||
#define ICOUNT_WOBBLE (NANOSECONDS_PER_SECOND / 10)
|
|
||||||
|
|
||||||
static void icount_adjust(void)
|
|
||||||
{
|
|
||||||
int64_t cur_time;
|
|
||||||
int64_t cur_icount;
|
|
||||||
int64_t delta;
|
|
||||||
|
|
||||||
/* Protected by TimersState mutex. */
|
|
||||||
static int64_t last_delta;
|
|
||||||
|
|
||||||
/* If the VM is not running, then do nothing. */
|
|
||||||
if (!runstate_is_running()) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
seqlock_write_lock(&timers_state.vm_clock_seqlock,
|
|
||||||
&timers_state.vm_clock_lock);
|
|
||||||
cur_time = REPLAY_CLOCK_LOCKED(REPLAY_CLOCK_VIRTUAL_RT,
|
|
||||||
cpu_get_clock_locked());
|
|
||||||
cur_icount = cpu_get_icount_locked();
|
|
||||||
|
|
||||||
delta = cur_icount - cur_time;
|
|
||||||
/* FIXME: This is a very crude algorithm, somewhat prone to oscillation. */
|
|
||||||
if (delta > 0
|
|
||||||
&& last_delta + ICOUNT_WOBBLE < delta * 2
|
|
||||||
&& timers_state.icount_time_shift > 0) {
|
|
||||||
/* The guest is getting too far ahead. Slow time down. */
|
|
||||||
qatomic_set(&timers_state.icount_time_shift,
|
|
||||||
timers_state.icount_time_shift - 1);
|
|
||||||
}
|
|
||||||
if (delta < 0
|
|
||||||
&& last_delta - ICOUNT_WOBBLE > delta * 2
|
|
||||||
&& timers_state.icount_time_shift < MAX_ICOUNT_SHIFT) {
|
|
||||||
/* The guest is getting too far behind. Speed time up. */
|
|
||||||
qatomic_set(&timers_state.icount_time_shift,
|
|
||||||
timers_state.icount_time_shift + 1);
|
|
||||||
}
|
|
||||||
last_delta = delta;
|
|
||||||
qatomic_set_i64(&timers_state.qemu_icount_bias,
|
|
||||||
cur_icount - (timers_state.qemu_icount
|
|
||||||
<< timers_state.icount_time_shift));
|
|
||||||
seqlock_write_unlock(&timers_state.vm_clock_seqlock,
|
|
||||||
&timers_state.vm_clock_lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void icount_adjust_rt(void *opaque)
|
|
||||||
{
|
|
||||||
timer_mod(timers_state.icount_rt_timer,
|
|
||||||
qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) + 1000);
|
|
||||||
icount_adjust();
|
|
||||||
}
|
|
||||||
|
|
||||||
static void icount_adjust_vm(void *opaque)
|
|
||||||
{
|
|
||||||
timer_mod(timers_state.icount_vm_timer,
|
|
||||||
qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
|
|
||||||
NANOSECONDS_PER_SECOND / 10);
|
|
||||||
icount_adjust();
|
|
||||||
}
|
|
||||||
|
|
||||||
static int64_t qemu_icount_round(int64_t count)
|
|
||||||
{
|
|
||||||
int shift = qatomic_read(&timers_state.icount_time_shift);
|
|
||||||
return (count + (1 << shift) - 1) >> shift;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void icount_warp_rt(void)
|
|
||||||
{
|
|
||||||
unsigned seq;
|
|
||||||
int64_t warp_start;
|
|
||||||
|
|
||||||
/* The icount_warp_timer is rescheduled soon after vm_clock_warp_start
|
|
||||||
* changes from -1 to another value, so the race here is okay.
|
|
||||||
*/
|
|
||||||
do {
|
|
||||||
seq = seqlock_read_begin(&timers_state.vm_clock_seqlock);
|
|
||||||
warp_start = timers_state.vm_clock_warp_start;
|
|
||||||
} while (seqlock_read_retry(&timers_state.vm_clock_seqlock, seq));
|
|
||||||
|
|
||||||
if (warp_start == -1) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
seqlock_write_lock(&timers_state.vm_clock_seqlock,
|
|
||||||
&timers_state.vm_clock_lock);
|
|
||||||
if (runstate_is_running()) {
|
|
||||||
int64_t clock = REPLAY_CLOCK_LOCKED(REPLAY_CLOCK_VIRTUAL_RT,
|
|
||||||
cpu_get_clock_locked());
|
|
||||||
int64_t warp_delta;
|
|
||||||
|
|
||||||
warp_delta = clock - timers_state.vm_clock_warp_start;
|
|
||||||
if (use_icount == 2) {
|
|
||||||
/*
|
|
||||||
* In adaptive mode, do not let QEMU_CLOCK_VIRTUAL run too
|
|
||||||
* far ahead of real time.
|
|
||||||
*/
|
|
||||||
int64_t cur_icount = cpu_get_icount_locked();
|
|
||||||
int64_t delta = clock - cur_icount;
|
|
||||||
warp_delta = MIN(warp_delta, delta);
|
|
||||||
}
|
|
||||||
qatomic_set_i64(&timers_state.qemu_icount_bias,
|
|
||||||
timers_state.qemu_icount_bias + warp_delta);
|
|
||||||
}
|
|
||||||
timers_state.vm_clock_warp_start = -1;
|
|
||||||
seqlock_write_unlock(&timers_state.vm_clock_seqlock,
|
|
||||||
&timers_state.vm_clock_lock);
|
|
||||||
|
|
||||||
if (qemu_clock_expired(QEMU_CLOCK_VIRTUAL)) {
|
|
||||||
qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static void icount_timer_cb(void *opaque)
|
|
||||||
{
|
|
||||||
/* No need for a checkpoint because the timer already synchronizes
|
|
||||||
* with CHECKPOINT_CLOCK_VIRTUAL_RT.
|
|
||||||
*/
|
|
||||||
icount_warp_rt();
|
|
||||||
}
|
|
||||||
|
|
||||||
void qtest_clock_warp(int64_t dest)
|
|
||||||
{
|
|
||||||
int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
|
|
||||||
AioContext *aio_context;
|
|
||||||
assert(qtest_enabled());
|
|
||||||
aio_context = qemu_get_aio_context();
|
|
||||||
while (clock < dest) {
|
|
||||||
int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL,
|
|
||||||
QEMU_TIMER_ATTR_ALL);
|
|
||||||
int64_t warp = qemu_soonest_timeout(dest - clock, deadline);
|
|
||||||
|
|
||||||
seqlock_write_lock(&timers_state.vm_clock_seqlock,
|
|
||||||
&timers_state.vm_clock_lock);
|
|
||||||
qatomic_set_i64(&timers_state.qemu_icount_bias,
|
|
||||||
timers_state.qemu_icount_bias + warp);
|
|
||||||
seqlock_write_unlock(&timers_state.vm_clock_seqlock,
|
|
||||||
&timers_state.vm_clock_lock);
|
|
||||||
|
|
||||||
qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL);
|
|
||||||
timerlist_run_timers(aio_context->tlg.tl[QEMU_CLOCK_VIRTUAL]);
|
|
||||||
clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
|
|
||||||
}
|
|
||||||
qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
|
|
||||||
}
|
|
||||||
|
|
||||||
void qemu_start_warp_timer(void)
|
|
||||||
{
|
|
||||||
int64_t clock;
|
|
||||||
int64_t deadline;
|
|
||||||
|
|
||||||
if (!use_icount) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Nothing to do if the VM is stopped: QEMU_CLOCK_VIRTUAL timers
|
|
||||||
* do not fire, so computing the deadline does not make sense.
|
|
||||||
*/
|
|
||||||
if (!runstate_is_running()) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (replay_mode != REPLAY_MODE_PLAY) {
|
|
||||||
if (!all_cpu_threads_idle()) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (qtest_enabled()) {
|
|
||||||
/* When testing, qtest commands advance icount. */
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
replay_checkpoint(CHECKPOINT_CLOCK_WARP_START);
|
|
||||||
} else {
|
|
||||||
/* warp clock deterministically in record/replay mode */
|
|
||||||
if (!replay_checkpoint(CHECKPOINT_CLOCK_WARP_START)) {
|
|
||||||
/* vCPU is sleeping and warp can't be started.
|
|
||||||
It is probably a race condition: notification sent
|
|
||||||
to vCPU was processed in advance and vCPU went to sleep.
|
|
||||||
Therefore we have to wake it up for doing someting. */
|
|
||||||
if (replay_has_checkpoint()) {
|
|
||||||
qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
|
|
||||||
}
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* We want to use the earliest deadline from ALL vm_clocks */
|
|
||||||
clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
|
|
||||||
deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL,
|
|
||||||
~QEMU_TIMER_ATTR_EXTERNAL);
|
|
||||||
if (deadline < 0) {
|
|
||||||
static bool notified;
|
|
||||||
if (!icount_sleep && !notified) {
|
|
||||||
warn_report("icount sleep disabled and no active timers");
|
|
||||||
notified = true;
|
|
||||||
}
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (deadline > 0) {
|
|
||||||
/*
|
|
||||||
* Ensure QEMU_CLOCK_VIRTUAL proceeds even when the virtual CPU goes to
|
|
||||||
* sleep. Otherwise, the CPU might be waiting for a future timer
|
|
||||||
* interrupt to wake it up, but the interrupt never comes because
|
|
||||||
* the vCPU isn't running any insns and thus doesn't advance the
|
|
||||||
* QEMU_CLOCK_VIRTUAL.
|
|
||||||
*/
|
|
||||||
if (!icount_sleep) {
|
|
||||||
/*
|
|
||||||
* We never let VCPUs sleep in no sleep icount mode.
|
|
||||||
* If there is a pending QEMU_CLOCK_VIRTUAL timer we just advance
|
|
||||||
* to the next QEMU_CLOCK_VIRTUAL event and notify it.
|
|
||||||
* It is useful when we want a deterministic execution time,
|
|
||||||
* isolated from host latencies.
|
|
||||||
*/
|
|
||||||
seqlock_write_lock(&timers_state.vm_clock_seqlock,
|
|
||||||
&timers_state.vm_clock_lock);
|
|
||||||
qatomic_set_i64(&timers_state.qemu_icount_bias,
|
|
||||||
timers_state.qemu_icount_bias + deadline);
|
|
||||||
seqlock_write_unlock(&timers_state.vm_clock_seqlock,
|
|
||||||
&timers_state.vm_clock_lock);
|
|
||||||
qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
|
|
||||||
} else {
|
|
||||||
/*
|
|
||||||
* We do stop VCPUs and only advance QEMU_CLOCK_VIRTUAL after some
|
|
||||||
* "real" time, (related to the time left until the next event) has
|
|
||||||
* passed. The QEMU_CLOCK_VIRTUAL_RT clock will do this.
|
|
||||||
* This avoids that the warps are visible externally; for example,
|
|
||||||
* you will not be sending network packets continuously instead of
|
|
||||||
* every 100ms.
|
|
||||||
*/
|
|
||||||
seqlock_write_lock(&timers_state.vm_clock_seqlock,
|
|
||||||
&timers_state.vm_clock_lock);
|
|
||||||
if (timers_state.vm_clock_warp_start == -1
|
|
||||||
|| timers_state.vm_clock_warp_start > clock) {
|
|
||||||
timers_state.vm_clock_warp_start = clock;
|
|
||||||
}
|
|
||||||
seqlock_write_unlock(&timers_state.vm_clock_seqlock,
|
|
||||||
&timers_state.vm_clock_lock);
|
|
||||||
timer_mod_anticipate(timers_state.icount_warp_timer,
|
|
||||||
clock + deadline);
|
|
||||||
}
|
|
||||||
} else if (deadline == 0) {
|
|
||||||
qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static void qemu_account_warp_timer(void)
|
|
||||||
{
|
|
||||||
if (!use_icount || !icount_sleep) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Nothing to do if the VM is stopped: QEMU_CLOCK_VIRTUAL timers
|
|
||||||
* do not fire, so computing the deadline does not make sense.
|
|
||||||
*/
|
|
||||||
if (!runstate_is_running()) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* warp clock deterministically in record/replay mode */
|
|
||||||
if (!replay_checkpoint(CHECKPOINT_CLOCK_WARP_ACCOUNT)) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
timer_del(timers_state.icount_warp_timer);
|
|
||||||
icount_warp_rt();
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool icount_state_needed(void *opaque)
|
|
||||||
{
|
|
||||||
return use_icount;
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool warp_timer_state_needed(void *opaque)
|
|
||||||
{
|
|
||||||
TimersState *s = opaque;
|
|
||||||
return s->icount_warp_timer != NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool adjust_timers_state_needed(void *opaque)
|
|
||||||
{
|
|
||||||
TimersState *s = opaque;
|
|
||||||
return s->icount_rt_timer != NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool shift_state_needed(void *opaque)
|
|
||||||
{
|
|
||||||
return use_icount == 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Subsection for warp timer migration is optional, because may not be created
|
|
||||||
*/
|
|
||||||
static const VMStateDescription icount_vmstate_warp_timer = {
|
|
||||||
.name = "timer/icount/warp_timer",
|
|
||||||
.version_id = 1,
|
|
||||||
.minimum_version_id = 1,
|
|
||||||
.needed = warp_timer_state_needed,
|
|
||||||
.fields = (VMStateField[]) {
|
|
||||||
VMSTATE_INT64(vm_clock_warp_start, TimersState),
|
|
||||||
VMSTATE_TIMER_PTR(icount_warp_timer, TimersState),
|
|
||||||
VMSTATE_END_OF_LIST()
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
static const VMStateDescription icount_vmstate_adjust_timers = {
|
|
||||||
.name = "timer/icount/timers",
|
|
||||||
.version_id = 1,
|
|
||||||
.minimum_version_id = 1,
|
|
||||||
.needed = adjust_timers_state_needed,
|
|
||||||
.fields = (VMStateField[]) {
|
|
||||||
VMSTATE_TIMER_PTR(icount_rt_timer, TimersState),
|
|
||||||
VMSTATE_TIMER_PTR(icount_vm_timer, TimersState),
|
|
||||||
VMSTATE_END_OF_LIST()
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
static const VMStateDescription icount_vmstate_shift = {
|
|
||||||
.name = "timer/icount/shift",
|
|
||||||
.version_id = 1,
|
|
||||||
.minimum_version_id = 1,
|
|
||||||
.needed = shift_state_needed,
|
|
||||||
.fields = (VMStateField[]) {
|
|
||||||
VMSTATE_INT16(icount_time_shift, TimersState),
|
|
||||||
VMSTATE_END_OF_LIST()
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
/*
|
|
||||||
* This is a subsection for icount migration.
|
|
||||||
*/
|
|
||||||
static const VMStateDescription icount_vmstate_timers = {
|
|
||||||
.name = "timer/icount",
|
|
||||||
.version_id = 1,
|
|
||||||
.minimum_version_id = 1,
|
|
||||||
.needed = icount_state_needed,
|
|
||||||
.fields = (VMStateField[]) {
|
|
||||||
VMSTATE_INT64(qemu_icount_bias, TimersState),
|
|
||||||
VMSTATE_INT64(qemu_icount, TimersState),
|
|
||||||
VMSTATE_END_OF_LIST()
|
|
||||||
},
|
|
||||||
.subsections = (const VMStateDescription*[]) {
|
|
||||||
&icount_vmstate_warp_timer,
|
|
||||||
&icount_vmstate_adjust_timers,
|
|
||||||
&icount_vmstate_shift,
|
|
||||||
NULL
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
static const VMStateDescription vmstate_timers = {
|
|
||||||
.name = "timer",
|
|
||||||
.version_id = 2,
|
|
||||||
.minimum_version_id = 1,
|
|
||||||
.fields = (VMStateField[]) {
|
|
||||||
VMSTATE_INT64(cpu_ticks_offset, TimersState),
|
|
||||||
VMSTATE_UNUSED(8),
|
|
||||||
VMSTATE_INT64_V(cpu_clock_offset, TimersState, 2),
|
|
||||||
VMSTATE_END_OF_LIST()
|
|
||||||
},
|
|
||||||
.subsections = (const VMStateDescription*[]) {
|
|
||||||
&icount_vmstate_timers,
|
|
||||||
NULL
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
void cpu_ticks_init(void)
|
|
||||||
{
|
|
||||||
seqlock_init(&timers_state.vm_clock_seqlock);
|
|
||||||
qemu_spin_init(&timers_state.vm_clock_lock);
|
|
||||||
vmstate_register(NULL, 0, &vmstate_timers, &timers_state);
|
|
||||||
cpu_throttle_init();
|
|
||||||
}
|
|
||||||
|
|
||||||
void configure_icount(QemuOpts *opts, Error **errp)
|
|
||||||
{
|
|
||||||
const char *option = qemu_opt_get(opts, "shift");
|
|
||||||
bool sleep = qemu_opt_get_bool(opts, "sleep", true);
|
|
||||||
bool align = qemu_opt_get_bool(opts, "align", false);
|
|
||||||
long time_shift = -1;
|
|
||||||
|
|
||||||
if (!option) {
|
|
||||||
if (qemu_opt_get(opts, "align") != NULL) {
|
|
||||||
error_setg(errp, "Please specify shift option when using align");
|
|
||||||
}
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (align && !sleep) {
|
|
||||||
error_setg(errp, "align=on and sleep=off are incompatible");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (strcmp(option, "auto") != 0) {
|
|
||||||
if (qemu_strtol(option, NULL, 0, &time_shift) < 0
|
|
||||||
|| time_shift < 0 || time_shift > MAX_ICOUNT_SHIFT) {
|
|
||||||
error_setg(errp, "icount: Invalid shift value");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
} else if (icount_align_option) {
|
|
||||||
error_setg(errp, "shift=auto and align=on are incompatible");
|
|
||||||
return;
|
|
||||||
} else if (!icount_sleep) {
|
|
||||||
error_setg(errp, "shift=auto and sleep=off are incompatible");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
icount_sleep = sleep;
|
|
||||||
if (icount_sleep) {
|
|
||||||
timers_state.icount_warp_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT,
|
|
||||||
icount_timer_cb, NULL);
|
|
||||||
}
|
|
||||||
|
|
||||||
icount_align_option = align;
|
|
||||||
|
|
||||||
if (time_shift >= 0) {
|
|
||||||
timers_state.icount_time_shift = time_shift;
|
|
||||||
use_icount = 1;
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
use_icount = 2;
|
|
||||||
|
|
||||||
/* 125MIPS seems a reasonable initial guess at the guest speed.
|
|
||||||
It will be corrected fairly quickly anyway. */
|
|
||||||
timers_state.icount_time_shift = 3;
|
|
||||||
|
|
||||||
/* Have both realtime and virtual time triggers for speed adjustment.
|
|
||||||
The realtime trigger catches emulated time passing too slowly,
|
|
||||||
the virtual time trigger catches emulated time passing too fast.
|
|
||||||
Realtime triggers occur even when idle, so use them less frequently
|
|
||||||
than VM triggers. */
|
|
||||||
timers_state.vm_clock_warp_start = -1;
|
|
||||||
timers_state.icount_rt_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL_RT,
|
|
||||||
icount_adjust_rt, NULL);
|
|
||||||
timer_mod(timers_state.icount_rt_timer,
|
|
||||||
qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) + 1000);
|
|
||||||
timers_state.icount_vm_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
|
|
||||||
icount_adjust_vm, NULL);
|
|
||||||
timer_mod(timers_state.icount_vm_timer,
|
|
||||||
qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
|
|
||||||
NANOSECONDS_PER_SECOND / 10);
|
|
||||||
}
|
|
||||||
|
|
||||||
/***********************************************************/
|
/***********************************************************/
|
||||||
/* TCG vCPU kick timer
|
/* TCG vCPU kick timer
|
||||||
*
|
*
|
||||||
@ -854,35 +171,6 @@ static void qemu_cpu_kick_rr_cpus(void)
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
static void do_nothing(CPUState *cpu, run_on_cpu_data unused)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
void qemu_timer_notify_cb(void *opaque, QEMUClockType type)
|
|
||||||
{
|
|
||||||
if (!use_icount || type != QEMU_CLOCK_VIRTUAL) {
|
|
||||||
qemu_notify_event();
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (qemu_in_vcpu_thread()) {
|
|
||||||
/* A CPU is currently running; kick it back out to the
|
|
||||||
* tcg_cpu_exec() loop so it will recalculate its
|
|
||||||
* icount deadline immediately.
|
|
||||||
*/
|
|
||||||
qemu_cpu_kick(current_cpu);
|
|
||||||
} else if (first_cpu) {
|
|
||||||
/* qemu_cpu_kick is not enough to kick a halted CPU out of
|
|
||||||
* qemu_tcg_wait_io_event. async_run_on_cpu, instead,
|
|
||||||
* causes cpu_thread_is_idle to return false. This way,
|
|
||||||
* handle_icount_deadline can run.
|
|
||||||
* If we have no CPUs at all for some reason, we don't
|
|
||||||
* need to do anything.
|
|
||||||
*/
|
|
||||||
async_run_on_cpu(first_cpu, do_nothing, RUN_ON_CPU_NULL);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static void kick_tcg_thread(void *opaque)
|
static void kick_tcg_thread(void *opaque)
|
||||||
{
|
{
|
||||||
timer_mod(tcg_kick_vcpu_timer, qemu_tcg_next_kick());
|
timer_mod(tcg_kick_vcpu_timer, qemu_tcg_next_kick());
|
||||||
@ -1288,7 +576,7 @@ static void notify_aio_contexts(void)
|
|||||||
static void handle_icount_deadline(void)
|
static void handle_icount_deadline(void)
|
||||||
{
|
{
|
||||||
assert(qemu_in_vcpu_thread());
|
assert(qemu_in_vcpu_thread());
|
||||||
if (use_icount) {
|
if (icount_enabled()) {
|
||||||
int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL,
|
int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL,
|
||||||
QEMU_TIMER_ATTR_ALL);
|
QEMU_TIMER_ATTR_ALL);
|
||||||
|
|
||||||
@ -1300,7 +588,7 @@ static void handle_icount_deadline(void)
|
|||||||
|
|
||||||
static void prepare_icount_for_run(CPUState *cpu)
|
static void prepare_icount_for_run(CPUState *cpu)
|
||||||
{
|
{
|
||||||
if (use_icount) {
|
if (icount_enabled()) {
|
||||||
int insns_left;
|
int insns_left;
|
||||||
|
|
||||||
/* These should always be cleared by process_icount_data after
|
/* These should always be cleared by process_icount_data after
|
||||||
@ -1325,7 +613,7 @@ static void prepare_icount_for_run(CPUState *cpu)
|
|||||||
|
|
||||||
static void process_icount_data(CPUState *cpu)
|
static void process_icount_data(CPUState *cpu)
|
||||||
{
|
{
|
||||||
if (use_icount) {
|
if (icount_enabled()) {
|
||||||
/* Account for executed instructions */
|
/* Account for executed instructions */
|
||||||
cpu_update_icount(cpu);
|
cpu_update_icount(cpu);
|
||||||
|
|
||||||
@ -1486,7 +774,7 @@ static void *qemu_tcg_rr_cpu_thread_fn(void *arg)
|
|||||||
qatomic_mb_set(&cpu->exit_request, 0);
|
qatomic_mb_set(&cpu->exit_request, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (use_icount && all_cpu_threads_idle()) {
|
if (icount_enabled() && all_cpu_threads_idle()) {
|
||||||
/*
|
/*
|
||||||
* When all cpus are sleeping (e.g in WFI), to avoid a deadlock
|
* When all cpus are sleeping (e.g in WFI), to avoid a deadlock
|
||||||
* in the main_loop, wake it up in order to start the warp timer.
|
* in the main_loop, wake it up in order to start the warp timer.
|
||||||
@ -1639,7 +927,7 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
|
|||||||
CPUState *cpu = arg;
|
CPUState *cpu = arg;
|
||||||
|
|
||||||
assert(tcg_enabled());
|
assert(tcg_enabled());
|
||||||
g_assert(!use_icount);
|
g_assert(!icount_enabled());
|
||||||
|
|
||||||
rcu_register_thread();
|
rcu_register_thread();
|
||||||
tcg_register_thread();
|
tcg_register_thread();
|
||||||
@ -2227,21 +1515,3 @@ void qmp_inject_nmi(Error **errp)
|
|||||||
nmi_monitor_handle(monitor_get_cpu_index(), errp);
|
nmi_monitor_handle(monitor_get_cpu_index(), errp);
|
||||||
}
|
}
|
||||||
|
|
||||||
void dump_drift_info(void)
|
|
||||||
{
|
|
||||||
if (!use_icount) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
qemu_printf("Host - Guest clock %"PRIi64" ms\n",
|
|
||||||
(cpu_get_clock() - cpu_get_icount())/SCALE_MS);
|
|
||||||
if (icount_align_option) {
|
|
||||||
qemu_printf("Max guest delay %"PRIi64" ms\n",
|
|
||||||
-max_delay / SCALE_MS);
|
|
||||||
qemu_printf("Max guest advance %"PRIi64" ms\n",
|
|
||||||
max_advance / SCALE_MS);
|
|
||||||
} else {
|
|
||||||
qemu_printf("Max guest delay NA\n");
|
|
||||||
qemu_printf("Max guest advance NA\n");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
492
softmmu/icount.c
Normal file
492
softmmu/icount.c
Normal file
@ -0,0 +1,492 @@
|
|||||||
|
/*
|
||||||
|
* QEMU System Emulator
|
||||||
|
*
|
||||||
|
* Copyright (c) 2003-2008 Fabrice Bellard
|
||||||
|
*
|
||||||
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
* of this software and associated documentation files (the "Software"), to deal
|
||||||
|
* in the Software without restriction, including without limitation the rights
|
||||||
|
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
* copies of the Software, and to permit persons to whom the Software is
|
||||||
|
* furnished to do so, subject to the following conditions:
|
||||||
|
*
|
||||||
|
* The above copyright notice and this permission notice shall be included in
|
||||||
|
* all copies or substantial portions of the Software.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||||
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||||
|
* THE SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "qemu/osdep.h"
|
||||||
|
#include "qemu-common.h"
|
||||||
|
#include "qemu/cutils.h"
|
||||||
|
#include "migration/vmstate.h"
|
||||||
|
#include "qapi/error.h"
|
||||||
|
#include "qemu/error-report.h"
|
||||||
|
#include "exec/exec-all.h"
|
||||||
|
#include "sysemu/cpus.h"
|
||||||
|
#include "sysemu/qtest.h"
|
||||||
|
#include "qemu/main-loop.h"
|
||||||
|
#include "qemu/option.h"
|
||||||
|
#include "qemu/seqlock.h"
|
||||||
|
#include "sysemu/replay.h"
|
||||||
|
#include "sysemu/runstate.h"
|
||||||
|
#include "hw/core/cpu.h"
|
||||||
|
#include "sysemu/cpu-timers.h"
|
||||||
|
#include "sysemu/cpu-throttle.h"
|
||||||
|
#include "timers-state.h"
|
||||||
|
|
||||||
|
/*
|
||||||
|
* ICOUNT: Instruction Counter
|
||||||
|
*
|
||||||
|
* this module is split off from cpu-timers because the icount part
|
||||||
|
* is TCG-specific, and does not need to be built for other accels.
|
||||||
|
*/
|
||||||
|
static bool icount_sleep = true;
|
||||||
|
/* Arbitrarily pick 1MIPS as the minimum allowable speed. */
|
||||||
|
#define MAX_ICOUNT_SHIFT 10
|
||||||
|
|
||||||
|
/*
|
||||||
|
* 0 = Do not count executed instructions.
|
||||||
|
* 1 = Fixed conversion of insn to ns via "shift" option
|
||||||
|
* 2 = Runtime adaptive algorithm to compute shift
|
||||||
|
*/
|
||||||
|
int use_icount;
|
||||||
|
|
||||||
|
static void icount_enable_precise(void)
|
||||||
|
{
|
||||||
|
use_icount = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void icount_enable_adaptive(void)
|
||||||
|
{
|
||||||
|
use_icount = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The current number of executed instructions is based on what we
|
||||||
|
* originally budgeted minus the current state of the decrementing
|
||||||
|
* icount counters in extra/u16.low.
|
||||||
|
*/
|
||||||
|
static int64_t cpu_get_icount_executed(CPUState *cpu)
|
||||||
|
{
|
||||||
|
return (cpu->icount_budget -
|
||||||
|
(cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra));
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Update the global shared timer_state.qemu_icount to take into
|
||||||
|
* account executed instructions. This is done by the TCG vCPU
|
||||||
|
* thread so the main-loop can see time has moved forward.
|
||||||
|
*/
|
||||||
|
static void cpu_update_icount_locked(CPUState *cpu)
|
||||||
|
{
|
||||||
|
int64_t executed = cpu_get_icount_executed(cpu);
|
||||||
|
cpu->icount_budget -= executed;
|
||||||
|
|
||||||
|
qatomic_set_i64(&timers_state.qemu_icount,
|
||||||
|
timers_state.qemu_icount + executed);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Update the global shared timer_state.qemu_icount to take into
|
||||||
|
* account executed instructions. This is done by the TCG vCPU
|
||||||
|
* thread so the main-loop can see time has moved forward.
|
||||||
|
*/
|
||||||
|
void cpu_update_icount(CPUState *cpu)
|
||||||
|
{
|
||||||
|
seqlock_write_lock(&timers_state.vm_clock_seqlock,
|
||||||
|
&timers_state.vm_clock_lock);
|
||||||
|
cpu_update_icount_locked(cpu);
|
||||||
|
seqlock_write_unlock(&timers_state.vm_clock_seqlock,
|
||||||
|
&timers_state.vm_clock_lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int64_t cpu_get_icount_raw_locked(void)
|
||||||
|
{
|
||||||
|
CPUState *cpu = current_cpu;
|
||||||
|
|
||||||
|
if (cpu && cpu->running) {
|
||||||
|
if (!cpu->can_do_io) {
|
||||||
|
error_report("Bad icount read");
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
/* Take into account what has run */
|
||||||
|
cpu_update_icount_locked(cpu);
|
||||||
|
}
|
||||||
|
/* The read is protected by the seqlock, but needs atomic64 to avoid UB */
|
||||||
|
return qatomic_read_i64(&timers_state.qemu_icount);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int64_t cpu_get_icount_locked(void)
|
||||||
|
{
|
||||||
|
int64_t icount = cpu_get_icount_raw_locked();
|
||||||
|
return qatomic_read_i64(&timers_state.qemu_icount_bias) +
|
||||||
|
cpu_icount_to_ns(icount);
|
||||||
|
}
|
||||||
|
|
||||||
|
int64_t cpu_get_icount_raw(void)
|
||||||
|
{
|
||||||
|
int64_t icount;
|
||||||
|
unsigned start;
|
||||||
|
|
||||||
|
do {
|
||||||
|
start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
|
||||||
|
icount = cpu_get_icount_raw_locked();
|
||||||
|
} while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
|
||||||
|
|
||||||
|
return icount;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Return the virtual CPU time, based on the instruction counter. */
|
||||||
|
int64_t cpu_get_icount(void)
|
||||||
|
{
|
||||||
|
int64_t icount;
|
||||||
|
unsigned start;
|
||||||
|
|
||||||
|
do {
|
||||||
|
start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
|
||||||
|
icount = cpu_get_icount_locked();
|
||||||
|
} while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
|
||||||
|
|
||||||
|
return icount;
|
||||||
|
}
|
||||||
|
|
||||||
|
int64_t cpu_icount_to_ns(int64_t icount)
|
||||||
|
{
|
||||||
|
return icount << qatomic_read(&timers_state.icount_time_shift);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Correlation between real and virtual time is always going to be
|
||||||
|
* fairly approximate, so ignore small variation.
|
||||||
|
* When the guest is idle real and virtual time will be aligned in
|
||||||
|
* the IO wait loop.
|
||||||
|
*/
|
||||||
|
#define ICOUNT_WOBBLE (NANOSECONDS_PER_SECOND / 10)
|
||||||
|
|
||||||
|
static void icount_adjust(void)
|
||||||
|
{
|
||||||
|
int64_t cur_time;
|
||||||
|
int64_t cur_icount;
|
||||||
|
int64_t delta;
|
||||||
|
|
||||||
|
/* Protected by TimersState mutex. */
|
||||||
|
static int64_t last_delta;
|
||||||
|
|
||||||
|
/* If the VM is not running, then do nothing. */
|
||||||
|
if (!runstate_is_running()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
seqlock_write_lock(&timers_state.vm_clock_seqlock,
|
||||||
|
&timers_state.vm_clock_lock);
|
||||||
|
cur_time = REPLAY_CLOCK_LOCKED(REPLAY_CLOCK_VIRTUAL_RT,
|
||||||
|
cpu_get_clock_locked());
|
||||||
|
cur_icount = cpu_get_icount_locked();
|
||||||
|
|
||||||
|
delta = cur_icount - cur_time;
|
||||||
|
/* FIXME: This is a very crude algorithm, somewhat prone to oscillation. */
|
||||||
|
if (delta > 0
|
||||||
|
&& last_delta + ICOUNT_WOBBLE < delta * 2
|
||||||
|
&& timers_state.icount_time_shift > 0) {
|
||||||
|
/* The guest is getting too far ahead. Slow time down. */
|
||||||
|
qatomic_set(&timers_state.icount_time_shift,
|
||||||
|
timers_state.icount_time_shift - 1);
|
||||||
|
}
|
||||||
|
if (delta < 0
|
||||||
|
&& last_delta - ICOUNT_WOBBLE > delta * 2
|
||||||
|
&& timers_state.icount_time_shift < MAX_ICOUNT_SHIFT) {
|
||||||
|
/* The guest is getting too far behind. Speed time up. */
|
||||||
|
qatomic_set(&timers_state.icount_time_shift,
|
||||||
|
timers_state.icount_time_shift + 1);
|
||||||
|
}
|
||||||
|
last_delta = delta;
|
||||||
|
qatomic_set_i64(&timers_state.qemu_icount_bias,
|
||||||
|
cur_icount - (timers_state.qemu_icount
|
||||||
|
<< timers_state.icount_time_shift));
|
||||||
|
seqlock_write_unlock(&timers_state.vm_clock_seqlock,
|
||||||
|
&timers_state.vm_clock_lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void icount_adjust_rt(void *opaque)
|
||||||
|
{
|
||||||
|
timer_mod(timers_state.icount_rt_timer,
|
||||||
|
qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) + 1000);
|
||||||
|
icount_adjust();
|
||||||
|
}
|
||||||
|
|
||||||
|
static void icount_adjust_vm(void *opaque)
|
||||||
|
{
|
||||||
|
timer_mod(timers_state.icount_vm_timer,
|
||||||
|
qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
|
||||||
|
NANOSECONDS_PER_SECOND / 10);
|
||||||
|
icount_adjust();
|
||||||
|
}
|
||||||
|
|
||||||
|
int64_t qemu_icount_round(int64_t count)
|
||||||
|
{
|
||||||
|
int shift = qatomic_read(&timers_state.icount_time_shift);
|
||||||
|
return (count + (1 << shift) - 1) >> shift;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void icount_warp_rt(void)
|
||||||
|
{
|
||||||
|
unsigned seq;
|
||||||
|
int64_t warp_start;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The icount_warp_timer is rescheduled soon after vm_clock_warp_start
|
||||||
|
* changes from -1 to another value, so the race here is okay.
|
||||||
|
*/
|
||||||
|
do {
|
||||||
|
seq = seqlock_read_begin(&timers_state.vm_clock_seqlock);
|
||||||
|
warp_start = timers_state.vm_clock_warp_start;
|
||||||
|
} while (seqlock_read_retry(&timers_state.vm_clock_seqlock, seq));
|
||||||
|
|
||||||
|
if (warp_start == -1) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
seqlock_write_lock(&timers_state.vm_clock_seqlock,
|
||||||
|
&timers_state.vm_clock_lock);
|
||||||
|
if (runstate_is_running()) {
|
||||||
|
int64_t clock = REPLAY_CLOCK_LOCKED(REPLAY_CLOCK_VIRTUAL_RT,
|
||||||
|
cpu_get_clock_locked());
|
||||||
|
int64_t warp_delta;
|
||||||
|
|
||||||
|
warp_delta = clock - timers_state.vm_clock_warp_start;
|
||||||
|
if (icount_enabled() == 2) {
|
||||||
|
/*
|
||||||
|
* In adaptive mode, do not let QEMU_CLOCK_VIRTUAL run too
|
||||||
|
* far ahead of real time.
|
||||||
|
*/
|
||||||
|
int64_t cur_icount = cpu_get_icount_locked();
|
||||||
|
int64_t delta = clock - cur_icount;
|
||||||
|
warp_delta = MIN(warp_delta, delta);
|
||||||
|
}
|
||||||
|
qatomic_set_i64(&timers_state.qemu_icount_bias,
|
||||||
|
timers_state.qemu_icount_bias + warp_delta);
|
||||||
|
}
|
||||||
|
timers_state.vm_clock_warp_start = -1;
|
||||||
|
seqlock_write_unlock(&timers_state.vm_clock_seqlock,
|
||||||
|
&timers_state.vm_clock_lock);
|
||||||
|
|
||||||
|
if (qemu_clock_expired(QEMU_CLOCK_VIRTUAL)) {
|
||||||
|
qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void icount_timer_cb(void *opaque)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* No need for a checkpoint because the timer already synchronizes
|
||||||
|
* with CHECKPOINT_CLOCK_VIRTUAL_RT.
|
||||||
|
*/
|
||||||
|
icount_warp_rt();
|
||||||
|
}
|
||||||
|
|
||||||
|
void qemu_start_warp_timer(void)
|
||||||
|
{
|
||||||
|
int64_t clock;
|
||||||
|
int64_t deadline;
|
||||||
|
|
||||||
|
assert(icount_enabled());
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Nothing to do if the VM is stopped: QEMU_CLOCK_VIRTUAL timers
|
||||||
|
* do not fire, so computing the deadline does not make sense.
|
||||||
|
*/
|
||||||
|
if (!runstate_is_running()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (replay_mode != REPLAY_MODE_PLAY) {
|
||||||
|
if (!all_cpu_threads_idle()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (qtest_enabled()) {
|
||||||
|
/* When testing, qtest commands advance icount. */
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
replay_checkpoint(CHECKPOINT_CLOCK_WARP_START);
|
||||||
|
} else {
|
||||||
|
/* warp clock deterministically in record/replay mode */
|
||||||
|
if (!replay_checkpoint(CHECKPOINT_CLOCK_WARP_START)) {
|
||||||
|
/*
|
||||||
|
* vCPU is sleeping and warp can't be started.
|
||||||
|
* It is probably a race condition: notification sent
|
||||||
|
* to vCPU was processed in advance and vCPU went to sleep.
|
||||||
|
* Therefore we have to wake it up for doing someting.
|
||||||
|
*/
|
||||||
|
if (replay_has_checkpoint()) {
|
||||||
|
qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* We want to use the earliest deadline from ALL vm_clocks */
|
||||||
|
clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
|
||||||
|
deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL,
|
||||||
|
~QEMU_TIMER_ATTR_EXTERNAL);
|
||||||
|
if (deadline < 0) {
|
||||||
|
static bool notified;
|
||||||
|
if (!icount_sleep && !notified) {
|
||||||
|
warn_report("icount sleep disabled and no active timers");
|
||||||
|
notified = true;
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (deadline > 0) {
|
||||||
|
/*
|
||||||
|
* Ensure QEMU_CLOCK_VIRTUAL proceeds even when the virtual CPU goes to
|
||||||
|
* sleep. Otherwise, the CPU might be waiting for a future timer
|
||||||
|
* interrupt to wake it up, but the interrupt never comes because
|
||||||
|
* the vCPU isn't running any insns and thus doesn't advance the
|
||||||
|
* QEMU_CLOCK_VIRTUAL.
|
||||||
|
*/
|
||||||
|
if (!icount_sleep) {
|
||||||
|
/*
|
||||||
|
* We never let VCPUs sleep in no sleep icount mode.
|
||||||
|
* If there is a pending QEMU_CLOCK_VIRTUAL timer we just advance
|
||||||
|
* to the next QEMU_CLOCK_VIRTUAL event and notify it.
|
||||||
|
* It is useful when we want a deterministic execution time,
|
||||||
|
* isolated from host latencies.
|
||||||
|
*/
|
||||||
|
seqlock_write_lock(&timers_state.vm_clock_seqlock,
|
||||||
|
&timers_state.vm_clock_lock);
|
||||||
|
qatomic_set_i64(&timers_state.qemu_icount_bias,
|
||||||
|
timers_state.qemu_icount_bias + deadline);
|
||||||
|
seqlock_write_unlock(&timers_state.vm_clock_seqlock,
|
||||||
|
&timers_state.vm_clock_lock);
|
||||||
|
qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
|
||||||
|
} else {
|
||||||
|
/*
|
||||||
|
* We do stop VCPUs and only advance QEMU_CLOCK_VIRTUAL after some
|
||||||
|
* "real" time, (related to the time left until the next event) has
|
||||||
|
* passed. The QEMU_CLOCK_VIRTUAL_RT clock will do this.
|
||||||
|
* This avoids that the warps are visible externally; for example,
|
||||||
|
* you will not be sending network packets continuously instead of
|
||||||
|
* every 100ms.
|
||||||
|
*/
|
||||||
|
seqlock_write_lock(&timers_state.vm_clock_seqlock,
|
||||||
|
&timers_state.vm_clock_lock);
|
||||||
|
if (timers_state.vm_clock_warp_start == -1
|
||||||
|
|| timers_state.vm_clock_warp_start > clock) {
|
||||||
|
timers_state.vm_clock_warp_start = clock;
|
||||||
|
}
|
||||||
|
seqlock_write_unlock(&timers_state.vm_clock_seqlock,
|
||||||
|
&timers_state.vm_clock_lock);
|
||||||
|
timer_mod_anticipate(timers_state.icount_warp_timer,
|
||||||
|
clock + deadline);
|
||||||
|
}
|
||||||
|
} else if (deadline == 0) {
|
||||||
|
qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void qemu_account_warp_timer(void)
|
||||||
|
{
|
||||||
|
if (!icount_enabled() || !icount_sleep) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Nothing to do if the VM is stopped: QEMU_CLOCK_VIRTUAL timers
|
||||||
|
* do not fire, so computing the deadline does not make sense.
|
||||||
|
*/
|
||||||
|
if (!runstate_is_running()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* warp clock deterministically in record/replay mode */
|
||||||
|
if (!replay_checkpoint(CHECKPOINT_CLOCK_WARP_ACCOUNT)) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
timer_del(timers_state.icount_warp_timer);
|
||||||
|
icount_warp_rt();
|
||||||
|
}
|
||||||
|
|
||||||
|
void configure_icount(QemuOpts *opts, Error **errp)
|
||||||
|
{
|
||||||
|
const char *option = qemu_opt_get(opts, "shift");
|
||||||
|
bool sleep = qemu_opt_get_bool(opts, "sleep", true);
|
||||||
|
bool align = qemu_opt_get_bool(opts, "align", false);
|
||||||
|
long time_shift = -1;
|
||||||
|
|
||||||
|
if (!option) {
|
||||||
|
if (qemu_opt_get(opts, "align") != NULL) {
|
||||||
|
error_setg(errp, "Please specify shift option when using align");
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (align && !sleep) {
|
||||||
|
error_setg(errp, "align=on and sleep=off are incompatible");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (strcmp(option, "auto") != 0) {
|
||||||
|
if (qemu_strtol(option, NULL, 0, &time_shift) < 0
|
||||||
|
|| time_shift < 0 || time_shift > MAX_ICOUNT_SHIFT) {
|
||||||
|
error_setg(errp, "icount: Invalid shift value");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
} else if (icount_align_option) {
|
||||||
|
error_setg(errp, "shift=auto and align=on are incompatible");
|
||||||
|
return;
|
||||||
|
} else if (!icount_sleep) {
|
||||||
|
error_setg(errp, "shift=auto and sleep=off are incompatible");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
icount_sleep = sleep;
|
||||||
|
if (icount_sleep) {
|
||||||
|
timers_state.icount_warp_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT,
|
||||||
|
icount_timer_cb, NULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
icount_align_option = align;
|
||||||
|
|
||||||
|
if (time_shift >= 0) {
|
||||||
|
timers_state.icount_time_shift = time_shift;
|
||||||
|
icount_enable_precise();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
icount_enable_adaptive();
|
||||||
|
|
||||||
|
/*
|
||||||
|
* 125MIPS seems a reasonable initial guess at the guest speed.
|
||||||
|
* It will be corrected fairly quickly anyway.
|
||||||
|
*/
|
||||||
|
timers_state.icount_time_shift = 3;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Have both realtime and virtual time triggers for speed adjustment.
|
||||||
|
* The realtime trigger catches emulated time passing too slowly,
|
||||||
|
* the virtual time trigger catches emulated time passing too fast.
|
||||||
|
* Realtime triggers occur even when idle, so use them less frequently
|
||||||
|
* than VM triggers.
|
||||||
|
*/
|
||||||
|
timers_state.vm_clock_warp_start = -1;
|
||||||
|
timers_state.icount_rt_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL_RT,
|
||||||
|
icount_adjust_rt, NULL);
|
||||||
|
timer_mod(timers_state.icount_rt_timer,
|
||||||
|
qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) + 1000);
|
||||||
|
timers_state.icount_vm_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
|
||||||
|
icount_adjust_vm, NULL);
|
||||||
|
timer_mod(timers_state.icount_vm_timer,
|
||||||
|
qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
|
||||||
|
NANOSECONDS_PER_SECOND / 10);
|
||||||
|
}
|
@ -1,4 +1,4 @@
|
|||||||
specific_ss.add(when: 'CONFIG_SOFTMMU', if_true: files(
|
specific_ss.add(when: 'CONFIG_SOFTMMU', if_true: [files(
|
||||||
'arch_init.c',
|
'arch_init.c',
|
||||||
'balloon.c',
|
'balloon.c',
|
||||||
'cpus.c',
|
'cpus.c',
|
||||||
@ -7,4 +7,10 @@ specific_ss.add(when: 'CONFIG_SOFTMMU', if_true: files(
|
|||||||
'memory.c',
|
'memory.c',
|
||||||
'memory_mapping.c',
|
'memory_mapping.c',
|
||||||
'qtest.c',
|
'qtest.c',
|
||||||
'vl.c'))
|
'vl.c',
|
||||||
|
'cpu-timers.c',
|
||||||
|
)])
|
||||||
|
|
||||||
|
specific_ss.add(when: ['CONFIG_SOFTMMU', 'CONFIG_TCG'], if_true: [files(
|
||||||
|
'icount.c'
|
||||||
|
)])
|
||||||
|
@ -21,7 +21,7 @@
|
|||||||
#include "exec/memory.h"
|
#include "exec/memory.h"
|
||||||
#include "hw/irq.h"
|
#include "hw/irq.h"
|
||||||
#include "sysemu/accel.h"
|
#include "sysemu/accel.h"
|
||||||
#include "sysemu/cpus.h"
|
#include "sysemu/cpu-timers.h"
|
||||||
#include "qemu/config-file.h"
|
#include "qemu/config-file.h"
|
||||||
#include "qemu/option.h"
|
#include "qemu/option.h"
|
||||||
#include "qemu/error-report.h"
|
#include "qemu/error-report.h"
|
||||||
@ -273,6 +273,38 @@ static void qtest_irq_handler(void *opaque, int n, int level)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int64_t qtest_clock_counter;
|
||||||
|
|
||||||
|
int64_t qtest_get_virtual_clock(void)
|
||||||
|
{
|
||||||
|
return qatomic_read_i64(&qtest_clock_counter);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void qtest_set_virtual_clock(int64_t count)
|
||||||
|
{
|
||||||
|
qatomic_set_i64(&qtest_clock_counter, count);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void qtest_clock_warp(int64_t dest)
|
||||||
|
{
|
||||||
|
int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
|
||||||
|
AioContext *aio_context;
|
||||||
|
assert(qtest_enabled());
|
||||||
|
aio_context = qemu_get_aio_context();
|
||||||
|
while (clock < dest) {
|
||||||
|
int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL,
|
||||||
|
QEMU_TIMER_ATTR_ALL);
|
||||||
|
int64_t warp = qemu_soonest_timeout(dest - clock, deadline);
|
||||||
|
|
||||||
|
qtest_set_virtual_clock(qtest_get_virtual_clock() + warp);
|
||||||
|
|
||||||
|
qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL);
|
||||||
|
timerlist_run_timers(aio_context->tlg.tl[QEMU_CLOCK_VIRTUAL]);
|
||||||
|
clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
|
||||||
|
}
|
||||||
|
qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
|
||||||
|
}
|
||||||
|
|
||||||
static void qtest_process_command(CharBackend *chr, gchar **words)
|
static void qtest_process_command(CharBackend *chr, gchar **words)
|
||||||
{
|
{
|
||||||
const gchar *command;
|
const gchar *command;
|
||||||
|
69
softmmu/timers-state.h
Normal file
69
softmmu/timers-state.h
Normal file
@ -0,0 +1,69 @@
|
|||||||
|
/*
|
||||||
|
* QEMU System Emulator
|
||||||
|
*
|
||||||
|
* Copyright (c) 2003-2008 Fabrice Bellard
|
||||||
|
*
|
||||||
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
* of this software and associated documentation files (the "Software"), to deal
|
||||||
|
* in the Software without restriction, including without limitation the rights
|
||||||
|
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
* copies of the Software, and to permit persons to whom the Software is
|
||||||
|
* furnished to do so, subject to the following conditions:
|
||||||
|
*
|
||||||
|
* The above copyright notice and this permission notice shall be included in
|
||||||
|
* all copies or substantial portions of the Software.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||||
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||||
|
* THE SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef TIMERS_STATE_H
|
||||||
|
#define TIMERS_STATE_H
|
||||||
|
|
||||||
|
/* timers state, for sharing between icount and cpu-timers */
|
||||||
|
|
||||||
|
typedef struct TimersState {
|
||||||
|
/* Protected by BQL. */
|
||||||
|
int64_t cpu_ticks_prev;
|
||||||
|
int64_t cpu_ticks_offset;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Protect fields that can be respectively read outside the
|
||||||
|
* BQL, and written from multiple threads.
|
||||||
|
*/
|
||||||
|
QemuSeqLock vm_clock_seqlock;
|
||||||
|
QemuSpin vm_clock_lock;
|
||||||
|
|
||||||
|
int16_t cpu_ticks_enabled;
|
||||||
|
|
||||||
|
/* Conversion factor from emulated instructions to virtual clock ticks. */
|
||||||
|
int16_t icount_time_shift;
|
||||||
|
|
||||||
|
/* Compensate for varying guest execution speed. */
|
||||||
|
int64_t qemu_icount_bias;
|
||||||
|
|
||||||
|
int64_t vm_clock_warp_start;
|
||||||
|
int64_t cpu_clock_offset;
|
||||||
|
|
||||||
|
/* Only written by TCG thread */
|
||||||
|
int64_t qemu_icount;
|
||||||
|
|
||||||
|
/* for adjusting icount */
|
||||||
|
QEMUTimer *icount_rt_timer;
|
||||||
|
QEMUTimer *icount_vm_timer;
|
||||||
|
QEMUTimer *icount_warp_timer;
|
||||||
|
} TimersState;
|
||||||
|
|
||||||
|
extern TimersState timers_state;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* icount needs this internal from cpu-timers when adjusting the icount shift.
|
||||||
|
*/
|
||||||
|
int64_t cpu_get_clock_locked(void);
|
||||||
|
|
||||||
|
#endif /* TIMERS_STATE_H */
|
@ -74,6 +74,7 @@
|
|||||||
#include "hw/audio/soundhw.h"
|
#include "hw/audio/soundhw.h"
|
||||||
#include "audio/audio.h"
|
#include "audio/audio.h"
|
||||||
#include "sysemu/cpus.h"
|
#include "sysemu/cpus.h"
|
||||||
|
#include "sysemu/cpu-timers.h"
|
||||||
#include "migration/colo.h"
|
#include "migration/colo.h"
|
||||||
#include "migration/postcopy-ram.h"
|
#include "migration/postcopy-ram.h"
|
||||||
#include "sysemu/kvm.h"
|
#include "sysemu/kvm.h"
|
||||||
@ -2804,7 +2805,7 @@ static void configure_accelerators(const char *progname)
|
|||||||
error_report("falling back to %s", ac->name);
|
error_report("falling back to %s", ac->name);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (use_icount && !(tcg_enabled() || qtest_enabled())) {
|
if (icount_enabled() && !tcg_enabled()) {
|
||||||
error_report("-icount is not allowed with hardware virtualization");
|
error_report("-icount is not allowed with hardware virtualization");
|
||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
@ -4254,7 +4255,8 @@ void qemu_init(int argc, char **argv, char **envp)
|
|||||||
semihosting_arg_fallback(kernel_filename, kernel_cmdline);
|
semihosting_arg_fallback(kernel_filename, kernel_cmdline);
|
||||||
}
|
}
|
||||||
|
|
||||||
cpu_ticks_init();
|
/* initialize cpu timers and VCPU throttle modules */
|
||||||
|
cpu_timers_init();
|
||||||
|
|
||||||
if (default_net) {
|
if (default_net) {
|
||||||
QemuOptsList *net = qemu_find_opts("net");
|
QemuOptsList *net = qemu_find_opts("net");
|
||||||
|
@ -1,7 +0,0 @@
|
|||||||
#include "qemu/osdep.h"
|
|
||||||
#include "qemu/timer.h"
|
|
||||||
|
|
||||||
void qemu_start_warp_timer(void)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
@ -1,5 +1,6 @@
|
|||||||
#include "qemu/osdep.h"
|
#include "qemu/osdep.h"
|
||||||
#include "qemu/timer.h"
|
#include "sysemu/cpu-timers.h"
|
||||||
|
#include "qemu/main-loop.h"
|
||||||
|
|
||||||
int64_t cpu_get_clock(void)
|
int64_t cpu_get_clock(void)
|
||||||
{
|
{
|
||||||
|
@ -1,16 +0,0 @@
|
|||||||
#include "qemu/osdep.h"
|
|
||||||
#include "qemu/timer.h"
|
|
||||||
#include "sysemu/cpus.h"
|
|
||||||
#include "qemu/main-loop.h"
|
|
||||||
|
|
||||||
int use_icount;
|
|
||||||
|
|
||||||
int64_t cpu_get_icount(void)
|
|
||||||
{
|
|
||||||
abort();
|
|
||||||
}
|
|
||||||
|
|
||||||
int64_t cpu_get_icount_raw(void)
|
|
||||||
{
|
|
||||||
abort();
|
|
||||||
}
|
|
45
stubs/icount.c
Normal file
45
stubs/icount.c
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
#include "qemu/osdep.h"
|
||||||
|
#include "qapi/error.h"
|
||||||
|
#include "sysemu/cpu-timers.h"
|
||||||
|
|
||||||
|
/* icount - Instruction Counter API */
|
||||||
|
|
||||||
|
int use_icount;
|
||||||
|
|
||||||
|
void cpu_update_icount(CPUState *cpu)
|
||||||
|
{
|
||||||
|
abort();
|
||||||
|
}
|
||||||
|
void configure_icount(QemuOpts *opts, Error **errp)
|
||||||
|
{
|
||||||
|
/* signal error */
|
||||||
|
error_setg(errp, "cannot configure icount, TCG support not available");
|
||||||
|
}
|
||||||
|
int64_t cpu_get_icount_raw(void)
|
||||||
|
{
|
||||||
|
abort();
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
int64_t cpu_get_icount(void)
|
||||||
|
{
|
||||||
|
abort();
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
int64_t cpu_icount_to_ns(int64_t icount)
|
||||||
|
{
|
||||||
|
abort();
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
int64_t qemu_icount_round(int64_t count)
|
||||||
|
{
|
||||||
|
abort();
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
void qemu_start_warp_timer(void)
|
||||||
|
{
|
||||||
|
abort();
|
||||||
|
}
|
||||||
|
void qemu_account_warp_timer(void)
|
||||||
|
{
|
||||||
|
abort();
|
||||||
|
}
|
@ -3,10 +3,10 @@ stub_ss.add(files('bdrv-next-monitor-owned.c'))
|
|||||||
stub_ss.add(files('blk-commit-all.c'))
|
stub_ss.add(files('blk-commit-all.c'))
|
||||||
stub_ss.add(files('blockdev-close-all-bdrv-states.c'))
|
stub_ss.add(files('blockdev-close-all-bdrv-states.c'))
|
||||||
stub_ss.add(files('change-state-handler.c'))
|
stub_ss.add(files('change-state-handler.c'))
|
||||||
stub_ss.add(files('clock-warp.c'))
|
|
||||||
stub_ss.add(files('cmos.c'))
|
stub_ss.add(files('cmos.c'))
|
||||||
stub_ss.add(files('cpu-get-clock.c'))
|
stub_ss.add(files('cpu-get-clock.c'))
|
||||||
stub_ss.add(files('cpu-get-icount.c'))
|
stub_ss.add(files('qemu-timer-notify-cb.c'))
|
||||||
|
stub_ss.add(files('icount.c'))
|
||||||
stub_ss.add(files('dump.c'))
|
stub_ss.add(files('dump.c'))
|
||||||
stub_ss.add(files('error-printf.c'))
|
stub_ss.add(files('error-printf.c'))
|
||||||
stub_ss.add(files('fdset.c'))
|
stub_ss.add(files('fdset.c'))
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
#include "qemu/osdep.h"
|
#include "qemu/osdep.h"
|
||||||
#include "sysemu/cpus.h"
|
#include "sysemu/cpu-timers.h"
|
||||||
#include "qemu/main-loop.h"
|
#include "qemu/main-loop.h"
|
||||||
|
|
||||||
void qemu_timer_notify_cb(void *opaque, QEMUClockType type)
|
void qemu_timer_notify_cb(void *opaque, QEMUClockType type)
|
||||||
|
@ -18,3 +18,8 @@ bool qtest_driver(void)
|
|||||||
{
|
{
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int64_t qtest_get_virtual_clock(void)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
@ -20,6 +20,7 @@
|
|||||||
#include "qemu/osdep.h"
|
#include "qemu/osdep.h"
|
||||||
#include "cpu.h"
|
#include "cpu.h"
|
||||||
#include "sysemu/cpus.h"
|
#include "sysemu/cpus.h"
|
||||||
|
#include "sysemu/cpu-timers.h"
|
||||||
#include "disas/disas.h"
|
#include "disas/disas.h"
|
||||||
#include "qemu/host-utils.h"
|
#include "qemu/host-utils.h"
|
||||||
#include "exec/exec-all.h"
|
#include "exec/exec-all.h"
|
||||||
@ -1329,7 +1330,7 @@ static DisasJumpType gen_mfpr(DisasContext *ctx, TCGv va, int regno)
|
|||||||
case 249: /* VMTIME */
|
case 249: /* VMTIME */
|
||||||
helper = gen_helper_get_vmtime;
|
helper = gen_helper_get_vmtime;
|
||||||
do_helper:
|
do_helper:
|
||||||
if (use_icount) {
|
if (icount_enabled()) {
|
||||||
gen_io_start();
|
gen_io_start();
|
||||||
helper(va);
|
helper(va);
|
||||||
return DISAS_PC_STALE;
|
return DISAS_PC_STALE;
|
||||||
|
@ -24,6 +24,7 @@
|
|||||||
#include "hw/irq.h"
|
#include "hw/irq.h"
|
||||||
#include "hw/semihosting/semihost.h"
|
#include "hw/semihosting/semihost.h"
|
||||||
#include "sysemu/cpus.h"
|
#include "sysemu/cpus.h"
|
||||||
|
#include "sysemu/cpu-timers.h"
|
||||||
#include "sysemu/kvm.h"
|
#include "sysemu/kvm.h"
|
||||||
#include "sysemu/tcg.h"
|
#include "sysemu/tcg.h"
|
||||||
#include "qemu/range.h"
|
#include "qemu/range.h"
|
||||||
@ -1206,7 +1207,7 @@ static int64_t cycles_ns_per(uint64_t cycles)
|
|||||||
|
|
||||||
static bool instructions_supported(CPUARMState *env)
|
static bool instructions_supported(CPUARMState *env)
|
||||||
{
|
{
|
||||||
return use_icount == 1 /* Precise instruction counting */;
|
return icount_enabled() == 1; /* Precise instruction counting */
|
||||||
}
|
}
|
||||||
|
|
||||||
static uint64_t instructions_get_count(CPUARMState *env)
|
static uint64_t instructions_get_count(CPUARMState *env)
|
||||||
|
@ -299,7 +299,7 @@ static int write_vstart(CPURISCVState *env, int csrno, target_ulong val)
|
|||||||
static int read_instret(CPURISCVState *env, int csrno, target_ulong *val)
|
static int read_instret(CPURISCVState *env, int csrno, target_ulong *val)
|
||||||
{
|
{
|
||||||
#if !defined(CONFIG_USER_ONLY)
|
#if !defined(CONFIG_USER_ONLY)
|
||||||
if (use_icount) {
|
if (icount_enabled()) {
|
||||||
*val = cpu_get_icount();
|
*val = cpu_get_icount();
|
||||||
} else {
|
} else {
|
||||||
*val = cpu_get_host_ticks();
|
*val = cpu_get_host_ticks();
|
||||||
@ -314,7 +314,7 @@ static int read_instret(CPURISCVState *env, int csrno, target_ulong *val)
|
|||||||
static int read_instreth(CPURISCVState *env, int csrno, target_ulong *val)
|
static int read_instreth(CPURISCVState *env, int csrno, target_ulong *val)
|
||||||
{
|
{
|
||||||
#if !defined(CONFIG_USER_ONLY)
|
#if !defined(CONFIG_USER_ONLY)
|
||||||
if (use_icount) {
|
if (icount_enabled()) {
|
||||||
*val = cpu_get_icount() >> 32;
|
*val = cpu_get_icount() >> 32;
|
||||||
} else {
|
} else {
|
||||||
*val = cpu_get_host_ticks() >> 32;
|
*val = cpu_get_host_ticks() >> 32;
|
||||||
|
@ -12,6 +12,7 @@
|
|||||||
#include "qemu/main-loop.h"
|
#include "qemu/main-loop.h"
|
||||||
#include "sysemu/replay.h"
|
#include "sysemu/replay.h"
|
||||||
#include "migration/vmstate.h"
|
#include "migration/vmstate.h"
|
||||||
|
#include "sysemu/cpu-timers.h"
|
||||||
|
|
||||||
#include "ptimer-test.h"
|
#include "ptimer-test.h"
|
||||||
|
|
||||||
@ -30,8 +31,8 @@ QEMUTimerListGroup main_loop_tlg;
|
|||||||
|
|
||||||
int64_t ptimer_test_time_ns;
|
int64_t ptimer_test_time_ns;
|
||||||
|
|
||||||
/* Do not artificially limit period - see hw/core/ptimer.c. */
|
/* under qtest_enabled(), will not artificially limit period - see hw/core/ptimer.c. */
|
||||||
int use_icount = 1;
|
int use_icount;
|
||||||
bool qtest_allowed;
|
bool qtest_allowed;
|
||||||
|
|
||||||
void timer_init_full(QEMUTimer *ts,
|
void timer_init_full(QEMUTimer *ts,
|
||||||
|
@ -11,7 +11,7 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
#include "qemu/osdep.h"
|
#include "qemu/osdep.h"
|
||||||
|
#include "sysemu/cpu-timers.h"
|
||||||
#include "qemu/timed-average.h"
|
#include "qemu/timed-average.h"
|
||||||
|
|
||||||
/* This is the clock for QEMU_CLOCK_VIRTUAL */
|
/* This is the clock for QEMU_CLOCK_VIRTUAL */
|
||||||
|
@ -27,7 +27,7 @@
|
|||||||
#include "qemu/cutils.h"
|
#include "qemu/cutils.h"
|
||||||
#include "qemu/timer.h"
|
#include "qemu/timer.h"
|
||||||
#include "sysemu/qtest.h"
|
#include "sysemu/qtest.h"
|
||||||
#include "sysemu/cpus.h"
|
#include "sysemu/cpu-timers.h"
|
||||||
#include "sysemu/replay.h"
|
#include "sysemu/replay.h"
|
||||||
#include "qemu/main-loop.h"
|
#include "qemu/main-loop.h"
|
||||||
#include "block/aio.h"
|
#include "block/aio.h"
|
||||||
@ -521,9 +521,13 @@ void main_loop_wait(int nonblocking)
|
|||||||
mlpoll.state = ret < 0 ? MAIN_LOOP_POLL_ERR : MAIN_LOOP_POLL_OK;
|
mlpoll.state = ret < 0 ? MAIN_LOOP_POLL_ERR : MAIN_LOOP_POLL_OK;
|
||||||
notifier_list_notify(&main_loop_poll_notifiers, &mlpoll);
|
notifier_list_notify(&main_loop_poll_notifiers, &mlpoll);
|
||||||
|
|
||||||
/* CPU thread can infinitely wait for event after
|
if (icount_enabled()) {
|
||||||
missing the warp */
|
/*
|
||||||
qemu_start_warp_timer();
|
* CPU thread can infinitely wait for event after
|
||||||
|
* missing the warp
|
||||||
|
*/
|
||||||
|
qemu_start_warp_timer();
|
||||||
|
}
|
||||||
qemu_clock_run_all_timers();
|
qemu_clock_run_all_timers();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -26,8 +26,10 @@
|
|||||||
#include "qemu/main-loop.h"
|
#include "qemu/main-loop.h"
|
||||||
#include "qemu/timer.h"
|
#include "qemu/timer.h"
|
||||||
#include "qemu/lockable.h"
|
#include "qemu/lockable.h"
|
||||||
|
#include "sysemu/cpu-timers.h"
|
||||||
#include "sysemu/replay.h"
|
#include "sysemu/replay.h"
|
||||||
#include "sysemu/cpus.h"
|
#include "sysemu/cpus.h"
|
||||||
|
#include "sysemu/qtest.h"
|
||||||
|
|
||||||
#ifdef CONFIG_POSIX
|
#ifdef CONFIG_POSIX
|
||||||
#include <pthread.h>
|
#include <pthread.h>
|
||||||
@ -134,7 +136,7 @@ static void qemu_clock_init(QEMUClockType type, QEMUTimerListNotifyCB *notify_cb
|
|||||||
|
|
||||||
bool qemu_clock_use_for_deadline(QEMUClockType type)
|
bool qemu_clock_use_for_deadline(QEMUClockType type)
|
||||||
{
|
{
|
||||||
return !(use_icount && (type == QEMU_CLOCK_VIRTUAL));
|
return !(icount_enabled() && (type == QEMU_CLOCK_VIRTUAL));
|
||||||
}
|
}
|
||||||
|
|
||||||
void qemu_clock_notify(QEMUClockType type)
|
void qemu_clock_notify(QEMUClockType type)
|
||||||
@ -416,7 +418,7 @@ static bool timer_mod_ns_locked(QEMUTimerList *timer_list,
|
|||||||
static void timerlist_rearm(QEMUTimerList *timer_list)
|
static void timerlist_rearm(QEMUTimerList *timer_list)
|
||||||
{
|
{
|
||||||
/* Interrupt execution to force deadline recalculation. */
|
/* Interrupt execution to force deadline recalculation. */
|
||||||
if (timer_list->clock->type == QEMU_CLOCK_VIRTUAL) {
|
if (icount_enabled() && timer_list->clock->type == QEMU_CLOCK_VIRTUAL) {
|
||||||
qemu_start_warp_timer();
|
qemu_start_warp_timer();
|
||||||
}
|
}
|
||||||
timerlist_notify(timer_list);
|
timerlist_notify(timer_list);
|
||||||
@ -633,8 +635,10 @@ int64_t qemu_clock_get_ns(QEMUClockType type)
|
|||||||
return get_clock();
|
return get_clock();
|
||||||
default:
|
default:
|
||||||
case QEMU_CLOCK_VIRTUAL:
|
case QEMU_CLOCK_VIRTUAL:
|
||||||
if (use_icount) {
|
if (icount_enabled()) {
|
||||||
return cpu_get_icount();
|
return cpu_get_icount();
|
||||||
|
} else if (qtest_enabled()) { /* for qtest_clock_warp */
|
||||||
|
return qtest_get_virtual_clock();
|
||||||
} else {
|
} else {
|
||||||
return cpu_get_clock();
|
return cpu_get_clock();
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user