s/cpu_get_real_ticks/cpu_get_host_ticks/
This should help clarify the purpose of the function that returns the host system's CPU cycle count. Signed-off-by: Christopher Covington <cov@codeaurora.org> Acked-by: Paolo Bonzini <pbonzini@redhat.com> ppc portion Acked-by: David Gibson <david@gibson.dropbear.id.au> Signed-off-by: Michael Tokarev <mjt@tls.msk.ru>
This commit is contained in:
parent
ec5fd40264
commit
4a7428c5a7
@ -108,7 +108,7 @@ void cpu_list_unlock(void)
|
|||||||
|
|
||||||
uint64_t cpu_get_tsc(CPUX86State *env)
|
uint64_t cpu_get_tsc(CPUX86State *env)
|
||||||
{
|
{
|
||||||
return cpu_get_real_ticks();
|
return cpu_get_host_ticks();
|
||||||
}
|
}
|
||||||
|
|
||||||
static void write_dt(void *ptr, unsigned long addr, unsigned long limit,
|
static void write_dt(void *ptr, unsigned long addr, unsigned long limit,
|
||||||
|
6
cpus.c
6
cpus.c
@ -199,7 +199,7 @@ int64_t cpu_get_ticks(void)
|
|||||||
|
|
||||||
ticks = timers_state.cpu_ticks_offset;
|
ticks = timers_state.cpu_ticks_offset;
|
||||||
if (timers_state.cpu_ticks_enabled) {
|
if (timers_state.cpu_ticks_enabled) {
|
||||||
ticks += cpu_get_real_ticks();
|
ticks += cpu_get_host_ticks();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (timers_state.cpu_ticks_prev > ticks) {
|
if (timers_state.cpu_ticks_prev > ticks) {
|
||||||
@ -247,7 +247,7 @@ void cpu_enable_ticks(void)
|
|||||||
/* Here, the really thing protected by seqlock is cpu_clock_offset. */
|
/* Here, the really thing protected by seqlock is cpu_clock_offset. */
|
||||||
seqlock_write_lock(&timers_state.vm_clock_seqlock);
|
seqlock_write_lock(&timers_state.vm_clock_seqlock);
|
||||||
if (!timers_state.cpu_ticks_enabled) {
|
if (!timers_state.cpu_ticks_enabled) {
|
||||||
timers_state.cpu_ticks_offset -= cpu_get_real_ticks();
|
timers_state.cpu_ticks_offset -= cpu_get_host_ticks();
|
||||||
timers_state.cpu_clock_offset -= get_clock();
|
timers_state.cpu_clock_offset -= get_clock();
|
||||||
timers_state.cpu_ticks_enabled = 1;
|
timers_state.cpu_ticks_enabled = 1;
|
||||||
}
|
}
|
||||||
@ -263,7 +263,7 @@ void cpu_disable_ticks(void)
|
|||||||
/* Here, the really thing protected by seqlock is cpu_clock_offset. */
|
/* Here, the really thing protected by seqlock is cpu_clock_offset. */
|
||||||
seqlock_write_lock(&timers_state.vm_clock_seqlock);
|
seqlock_write_lock(&timers_state.vm_clock_seqlock);
|
||||||
if (timers_state.cpu_ticks_enabled) {
|
if (timers_state.cpu_ticks_enabled) {
|
||||||
timers_state.cpu_ticks_offset += cpu_get_real_ticks();
|
timers_state.cpu_ticks_offset += cpu_get_host_ticks();
|
||||||
timers_state.cpu_clock_offset = cpu_get_clock_locked();
|
timers_state.cpu_clock_offset = cpu_get_clock_locked();
|
||||||
timers_state.cpu_ticks_enabled = 0;
|
timers_state.cpu_ticks_enabled = 0;
|
||||||
}
|
}
|
||||||
|
@ -848,7 +848,7 @@ static target_ulong h_xirr_x(PowerPCCPU *cpu, sPAPRMachineState *spapr,
|
|||||||
uint32_t xirr = icp_accept(ss);
|
uint32_t xirr = icp_accept(ss);
|
||||||
|
|
||||||
args[0] = xirr;
|
args[0] = xirr;
|
||||||
args[1] = cpu_get_real_ticks();
|
args[1] = cpu_get_host_ticks();
|
||||||
return H_SUCCESS;
|
return H_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -834,7 +834,7 @@ static void cpu_ppc_set_tb_clk (void *opaque, uint32_t freq)
|
|||||||
static void timebase_pre_save(void *opaque)
|
static void timebase_pre_save(void *opaque)
|
||||||
{
|
{
|
||||||
PPCTimebase *tb = opaque;
|
PPCTimebase *tb = opaque;
|
||||||
uint64_t ticks = cpu_get_real_ticks();
|
uint64_t ticks = cpu_get_host_ticks();
|
||||||
PowerPCCPU *first_ppc_cpu = POWERPC_CPU(first_cpu);
|
PowerPCCPU *first_ppc_cpu = POWERPC_CPU(first_cpu);
|
||||||
|
|
||||||
if (!first_ppc_cpu->env.tb_env) {
|
if (!first_ppc_cpu->env.tb_env) {
|
||||||
@ -878,7 +878,7 @@ static int timebase_post_load(void *opaque, int version_id)
|
|||||||
NANOSECONDS_PER_SECOND);
|
NANOSECONDS_PER_SECOND);
|
||||||
guest_tb = tb_remote->guest_timebase + MIN(0, migration_duration_tb);
|
guest_tb = tb_remote->guest_timebase + MIN(0, migration_duration_tb);
|
||||||
|
|
||||||
tb_off_adj = guest_tb - cpu_get_real_ticks();
|
tb_off_adj = guest_tb - cpu_get_host_ticks();
|
||||||
|
|
||||||
tb_off = first_ppc_cpu->env.tb_env->tb_offset;
|
tb_off = first_ppc_cpu->env.tb_env->tb_offset;
|
||||||
trace_ppc_tb_adjust(tb_off, tb_off_adj, tb_off_adj - tb_off,
|
trace_ppc_tb_adjust(tb_off, tb_off_adj, tb_off_adj - tb_off,
|
||||||
|
@ -857,7 +857,7 @@ int64_t cpu_icount_to_ns(int64_t icount);
|
|||||||
|
|
||||||
#if defined(_ARCH_PPC)
|
#if defined(_ARCH_PPC)
|
||||||
|
|
||||||
static inline int64_t cpu_get_real_ticks(void)
|
static inline int64_t cpu_get_host_ticks(void)
|
||||||
{
|
{
|
||||||
int64_t retval;
|
int64_t retval;
|
||||||
#ifdef _ARCH_PPC64
|
#ifdef _ARCH_PPC64
|
||||||
@ -883,7 +883,7 @@ static inline int64_t cpu_get_real_ticks(void)
|
|||||||
|
|
||||||
#elif defined(__i386__)
|
#elif defined(__i386__)
|
||||||
|
|
||||||
static inline int64_t cpu_get_real_ticks(void)
|
static inline int64_t cpu_get_host_ticks(void)
|
||||||
{
|
{
|
||||||
int64_t val;
|
int64_t val;
|
||||||
asm volatile ("rdtsc" : "=A" (val));
|
asm volatile ("rdtsc" : "=A" (val));
|
||||||
@ -892,7 +892,7 @@ static inline int64_t cpu_get_real_ticks(void)
|
|||||||
|
|
||||||
#elif defined(__x86_64__)
|
#elif defined(__x86_64__)
|
||||||
|
|
||||||
static inline int64_t cpu_get_real_ticks(void)
|
static inline int64_t cpu_get_host_ticks(void)
|
||||||
{
|
{
|
||||||
uint32_t low,high;
|
uint32_t low,high;
|
||||||
int64_t val;
|
int64_t val;
|
||||||
@ -905,7 +905,7 @@ static inline int64_t cpu_get_real_ticks(void)
|
|||||||
|
|
||||||
#elif defined(__hppa__)
|
#elif defined(__hppa__)
|
||||||
|
|
||||||
static inline int64_t cpu_get_real_ticks(void)
|
static inline int64_t cpu_get_host_ticks(void)
|
||||||
{
|
{
|
||||||
int val;
|
int val;
|
||||||
asm volatile ("mfctl %%cr16, %0" : "=r"(val));
|
asm volatile ("mfctl %%cr16, %0" : "=r"(val));
|
||||||
@ -914,7 +914,7 @@ static inline int64_t cpu_get_real_ticks(void)
|
|||||||
|
|
||||||
#elif defined(__ia64)
|
#elif defined(__ia64)
|
||||||
|
|
||||||
static inline int64_t cpu_get_real_ticks(void)
|
static inline int64_t cpu_get_host_ticks(void)
|
||||||
{
|
{
|
||||||
int64_t val;
|
int64_t val;
|
||||||
asm volatile ("mov %0 = ar.itc" : "=r"(val) :: "memory");
|
asm volatile ("mov %0 = ar.itc" : "=r"(val) :: "memory");
|
||||||
@ -923,7 +923,7 @@ static inline int64_t cpu_get_real_ticks(void)
|
|||||||
|
|
||||||
#elif defined(__s390__)
|
#elif defined(__s390__)
|
||||||
|
|
||||||
static inline int64_t cpu_get_real_ticks(void)
|
static inline int64_t cpu_get_host_ticks(void)
|
||||||
{
|
{
|
||||||
int64_t val;
|
int64_t val;
|
||||||
asm volatile("stck 0(%1)" : "=m" (val) : "a" (&val) : "cc");
|
asm volatile("stck 0(%1)" : "=m" (val) : "a" (&val) : "cc");
|
||||||
@ -932,7 +932,7 @@ static inline int64_t cpu_get_real_ticks(void)
|
|||||||
|
|
||||||
#elif defined(__sparc__)
|
#elif defined(__sparc__)
|
||||||
|
|
||||||
static inline int64_t cpu_get_real_ticks (void)
|
static inline int64_t cpu_get_host_ticks (void)
|
||||||
{
|
{
|
||||||
#if defined(_LP64)
|
#if defined(_LP64)
|
||||||
uint64_t rval;
|
uint64_t rval;
|
||||||
@ -970,7 +970,7 @@ static inline int64_t cpu_get_real_ticks (void)
|
|||||||
: "=r" (value)); \
|
: "=r" (value)); \
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int64_t cpu_get_real_ticks(void)
|
static inline int64_t cpu_get_host_ticks(void)
|
||||||
{
|
{
|
||||||
/* On kernels >= 2.6.25 rdhwr <reg>, $2 and $3 are emulated */
|
/* On kernels >= 2.6.25 rdhwr <reg>, $2 and $3 are emulated */
|
||||||
uint32_t count;
|
uint32_t count;
|
||||||
@ -986,7 +986,7 @@ static inline int64_t cpu_get_real_ticks(void)
|
|||||||
|
|
||||||
#elif defined(__alpha__)
|
#elif defined(__alpha__)
|
||||||
|
|
||||||
static inline int64_t cpu_get_real_ticks(void)
|
static inline int64_t cpu_get_host_ticks(void)
|
||||||
{
|
{
|
||||||
uint64_t cc;
|
uint64_t cc;
|
||||||
uint32_t cur, ofs;
|
uint32_t cur, ofs;
|
||||||
@ -1001,7 +1001,7 @@ static inline int64_t cpu_get_real_ticks(void)
|
|||||||
/* The host CPU doesn't have an easily accessible cycle counter.
|
/* The host CPU doesn't have an easily accessible cycle counter.
|
||||||
Just return a monotonically increasing value. This will be
|
Just return a monotonically increasing value. This will be
|
||||||
totally wrong, but hopefully better than nothing. */
|
totally wrong, but hopefully better than nothing. */
|
||||||
static inline int64_t cpu_get_real_ticks (void)
|
static inline int64_t cpu_get_host_ticks (void)
|
||||||
{
|
{
|
||||||
static int64_t ticks = 0;
|
static int64_t ticks = 0;
|
||||||
return ticks++;
|
return ticks++;
|
||||||
|
@ -215,7 +215,7 @@ void cpu_list_unlock(void)
|
|||||||
|
|
||||||
uint64_t cpu_get_tsc(CPUX86State *env)
|
uint64_t cpu_get_tsc(CPUX86State *env)
|
||||||
{
|
{
|
||||||
return cpu_get_real_ticks();
|
return cpu_get_host_ticks();
|
||||||
}
|
}
|
||||||
|
|
||||||
static void write_dt(void *ptr, unsigned long addr, unsigned long limit,
|
static void write_dt(void *ptr, unsigned long addr, unsigned long limit,
|
||||||
@ -1425,7 +1425,7 @@ void cpu_loop (CPUSPARCState *env)
|
|||||||
#ifdef TARGET_PPC
|
#ifdef TARGET_PPC
|
||||||
static inline uint64_t cpu_ppc_get_tb(CPUPPCState *env)
|
static inline uint64_t cpu_ppc_get_tb(CPUPPCState *env)
|
||||||
{
|
{
|
||||||
return cpu_get_real_ticks();
|
return cpu_get_host_ticks();
|
||||||
}
|
}
|
||||||
|
|
||||||
uint64_t cpu_ppc_load_tbl(CPUPPCState *env)
|
uint64_t cpu_ppc_load_tbl(CPUPPCState *env)
|
||||||
|
@ -34,7 +34,7 @@ uint64_t helper_load_pcc(CPUAlphaState *env)
|
|||||||
#else
|
#else
|
||||||
/* In user-mode, QEMU_CLOCK_VIRTUAL doesn't exist. Just pass through the host cpu
|
/* In user-mode, QEMU_CLOCK_VIRTUAL doesn't exist. Just pass through the host cpu
|
||||||
clock ticks. Also, don't bother taking PCC_OFS into account. */
|
clock ticks. Also, don't bother taking PCC_OFS into account. */
|
||||||
return (uint32_t)cpu_get_real_ticks();
|
return (uint32_t)cpu_get_host_ticks();
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user