c29b070418
The PMU raises a performance monitor exception (causing an interrupt when MSR[EE]=1) when MMCR0[PMAO] is set, and lowers it when clear. Wire this up and implement the interrupt delivery for books. Linux perf record can now collect PMI-driven samples. fire_PMC_interrupt is renamed to perfm_alert, which matches a bit closer to the new terminology used in the ISA and distinguishes the alert condition (e.g., counter overflow) from the PERFM (or EBB) interrupts. Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Reviewed-by: Daniel Henrique Barboza <danielhb413@gmail.com> Message-Id: <20230530134313.387252-2-npiggin@gmail.com> Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
366 lines
9.5 KiB
C
366 lines
9.5 KiB
C
/*
|
|
* PMU emulation helpers for TCG IBM POWER chips
|
|
*
|
|
* Copyright IBM Corp. 2021
|
|
*
|
|
* Authors:
|
|
* Daniel Henrique Barboza <danielhb413@gmail.com>
|
|
*
|
|
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
|
* See the COPYING file in the top-level directory.
|
|
*/
|
|
|
|
#include "qemu/osdep.h"
|
|
#include "cpu.h"
|
|
#include "helper_regs.h"
|
|
#include "exec/exec-all.h"
|
|
#include "exec/helper-proto.h"
|
|
#include "qemu/error-report.h"
|
|
#include "qemu/main-loop.h"
|
|
#include "hw/ppc/ppc.h"
|
|
#include "power8-pmu.h"
|
|
|
|
#if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
|
|
|
|
static bool pmc_has_overflow_enabled(CPUPPCState *env, int sprn)
|
|
{
|
|
if (sprn == SPR_POWER_PMC1) {
|
|
return env->spr[SPR_POWER_MMCR0] & MMCR0_PMC1CE;
|
|
}
|
|
|
|
return env->spr[SPR_POWER_MMCR0] & MMCR0_PMCjCE;
|
|
}
|
|
|
|
/*
|
|
* Called after MMCR0 or MMCR1 changes to update pmc_ins_cnt and pmc_cyc_cnt.
|
|
* hflags must subsequently be updated.
|
|
*/
|
|
static void pmu_update_summaries(CPUPPCState *env)
|
|
{
|
|
target_ulong mmcr0 = env->spr[SPR_POWER_MMCR0];
|
|
target_ulong mmcr1 = env->spr[SPR_POWER_MMCR1];
|
|
int ins_cnt = 0;
|
|
int cyc_cnt = 0;
|
|
|
|
if (mmcr0 & MMCR0_FC) {
|
|
goto out;
|
|
}
|
|
|
|
if (!(mmcr0 & MMCR0_FC14) && mmcr1 != 0) {
|
|
target_ulong sel;
|
|
|
|
sel = extract64(mmcr1, MMCR1_PMC1EVT_EXTR, MMCR1_EVT_SIZE);
|
|
switch (sel) {
|
|
case 0x02:
|
|
case 0xfe:
|
|
ins_cnt |= 1 << 1;
|
|
break;
|
|
case 0x1e:
|
|
case 0xf0:
|
|
cyc_cnt |= 1 << 1;
|
|
break;
|
|
}
|
|
|
|
sel = extract64(mmcr1, MMCR1_PMC2EVT_EXTR, MMCR1_EVT_SIZE);
|
|
ins_cnt |= (sel == 0x02) << 2;
|
|
cyc_cnt |= (sel == 0x1e) << 2;
|
|
|
|
sel = extract64(mmcr1, MMCR1_PMC3EVT_EXTR, MMCR1_EVT_SIZE);
|
|
ins_cnt |= (sel == 0x02) << 3;
|
|
cyc_cnt |= (sel == 0x1e) << 3;
|
|
|
|
sel = extract64(mmcr1, MMCR1_PMC4EVT_EXTR, MMCR1_EVT_SIZE);
|
|
ins_cnt |= ((sel == 0xfa) || (sel == 0x2)) << 4;
|
|
cyc_cnt |= (sel == 0x1e) << 4;
|
|
}
|
|
|
|
ins_cnt |= !(mmcr0 & MMCR0_FC56) << 5;
|
|
cyc_cnt |= !(mmcr0 & MMCR0_FC56) << 6;
|
|
|
|
out:
|
|
env->pmc_ins_cnt = ins_cnt;
|
|
env->pmc_cyc_cnt = cyc_cnt;
|
|
}
|
|
|
|
void pmu_mmcr01_updated(CPUPPCState *env)
|
|
{
|
|
PowerPCCPU *cpu = env_archcpu(env);
|
|
|
|
pmu_update_summaries(env);
|
|
hreg_update_pmu_hflags(env);
|
|
|
|
if (env->spr[SPR_POWER_MMCR0] & MMCR0_PMAO) {
|
|
ppc_set_irq(cpu, PPC_INTERRUPT_PERFM, 1);
|
|
} else {
|
|
ppc_set_irq(cpu, PPC_INTERRUPT_PERFM, 0);
|
|
}
|
|
|
|
/*
|
|
* Should this update overflow timers (if mmcr0 is updated) so they
|
|
* get set in cpu_post_load?
|
|
*/
|
|
}
|
|
|
|
static bool pmu_increment_insns(CPUPPCState *env, uint32_t num_insns)
|
|
{
|
|
target_ulong mmcr0 = env->spr[SPR_POWER_MMCR0];
|
|
unsigned ins_cnt = env->pmc_ins_cnt;
|
|
bool overflow_triggered = false;
|
|
target_ulong tmp;
|
|
|
|
if (ins_cnt & (1 << 1)) {
|
|
tmp = env->spr[SPR_POWER_PMC1];
|
|
tmp += num_insns;
|
|
if (tmp >= PMC_COUNTER_NEGATIVE_VAL && (mmcr0 & MMCR0_PMC1CE)) {
|
|
tmp = PMC_COUNTER_NEGATIVE_VAL;
|
|
overflow_triggered = true;
|
|
}
|
|
env->spr[SPR_POWER_PMC1] = tmp;
|
|
}
|
|
|
|
if (ins_cnt & (1 << 2)) {
|
|
tmp = env->spr[SPR_POWER_PMC2];
|
|
tmp += num_insns;
|
|
if (tmp >= PMC_COUNTER_NEGATIVE_VAL && (mmcr0 & MMCR0_PMCjCE)) {
|
|
tmp = PMC_COUNTER_NEGATIVE_VAL;
|
|
overflow_triggered = true;
|
|
}
|
|
env->spr[SPR_POWER_PMC2] = tmp;
|
|
}
|
|
|
|
if (ins_cnt & (1 << 3)) {
|
|
tmp = env->spr[SPR_POWER_PMC3];
|
|
tmp += num_insns;
|
|
if (tmp >= PMC_COUNTER_NEGATIVE_VAL && (mmcr0 & MMCR0_PMCjCE)) {
|
|
tmp = PMC_COUNTER_NEGATIVE_VAL;
|
|
overflow_triggered = true;
|
|
}
|
|
env->spr[SPR_POWER_PMC3] = tmp;
|
|
}
|
|
|
|
if (ins_cnt & (1 << 4)) {
|
|
target_ulong mmcr1 = env->spr[SPR_POWER_MMCR1];
|
|
int sel = extract64(mmcr1, MMCR1_PMC4EVT_EXTR, MMCR1_EVT_SIZE);
|
|
if (sel == 0x02 || (env->spr[SPR_CTRL] & CTRL_RUN)) {
|
|
tmp = env->spr[SPR_POWER_PMC4];
|
|
tmp += num_insns;
|
|
if (tmp >= PMC_COUNTER_NEGATIVE_VAL && (mmcr0 & MMCR0_PMCjCE)) {
|
|
tmp = PMC_COUNTER_NEGATIVE_VAL;
|
|
overflow_triggered = true;
|
|
}
|
|
env->spr[SPR_POWER_PMC4] = tmp;
|
|
}
|
|
}
|
|
|
|
if (ins_cnt & (1 << 5)) {
|
|
tmp = env->spr[SPR_POWER_PMC5];
|
|
tmp += num_insns;
|
|
if (tmp >= PMC_COUNTER_NEGATIVE_VAL && (mmcr0 & MMCR0_PMCjCE)) {
|
|
tmp = PMC_COUNTER_NEGATIVE_VAL;
|
|
overflow_triggered = true;
|
|
}
|
|
env->spr[SPR_POWER_PMC5] = tmp;
|
|
}
|
|
|
|
return overflow_triggered;
|
|
}
|
|
|
|
static void pmu_update_cycles(CPUPPCState *env)
|
|
{
|
|
uint64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
|
|
uint64_t time_delta = now - env->pmu_base_time;
|
|
int sprn, cyc_cnt = env->pmc_cyc_cnt;
|
|
|
|
for (sprn = SPR_POWER_PMC1; sprn <= SPR_POWER_PMC6; sprn++) {
|
|
if (cyc_cnt & (1 << (sprn - SPR_POWER_PMC1 + 1))) {
|
|
/*
|
|
* The pseries and powernv clock runs at 1Ghz, meaning
|
|
* that 1 nanosec equals 1 cycle.
|
|
*/
|
|
env->spr[sprn] += time_delta;
|
|
}
|
|
}
|
|
|
|
/* Update base_time for future calculations */
|
|
env->pmu_base_time = now;
|
|
}
|
|
|
|
/*
|
|
* Helper function to retrieve the cycle overflow timer of the
|
|
* 'sprn' counter.
|
|
*/
|
|
static QEMUTimer *get_cyc_overflow_timer(CPUPPCState *env, int sprn)
|
|
{
|
|
return env->pmu_cyc_overflow_timers[sprn - SPR_POWER_PMC1];
|
|
}
|
|
|
|
static void pmc_update_overflow_timer(CPUPPCState *env, int sprn)
|
|
{
|
|
QEMUTimer *pmc_overflow_timer = get_cyc_overflow_timer(env, sprn);
|
|
int64_t timeout;
|
|
|
|
/*
|
|
* PMC5 does not have an overflow timer and this pointer
|
|
* will be NULL.
|
|
*/
|
|
if (!pmc_overflow_timer) {
|
|
return;
|
|
}
|
|
|
|
if (!(env->pmc_cyc_cnt & (1 << (sprn - SPR_POWER_PMC1 + 1))) ||
|
|
!pmc_has_overflow_enabled(env, sprn)) {
|
|
/* Overflow timer is not needed for this counter */
|
|
timer_del(pmc_overflow_timer);
|
|
return;
|
|
}
|
|
|
|
if (env->spr[sprn] >= PMC_COUNTER_NEGATIVE_VAL) {
|
|
timeout = 0;
|
|
} else {
|
|
timeout = PMC_COUNTER_NEGATIVE_VAL - env->spr[sprn];
|
|
}
|
|
|
|
/*
|
|
* Use timer_mod_anticipate() because an overflow timer might
|
|
* be already running for this PMC.
|
|
*/
|
|
timer_mod_anticipate(pmc_overflow_timer, env->pmu_base_time + timeout);
|
|
}
|
|
|
|
static void pmu_update_overflow_timers(CPUPPCState *env)
|
|
{
|
|
int sprn;
|
|
|
|
/*
|
|
* Scroll through all PMCs and start counter overflow timers for
|
|
* PM_CYC events, if needed.
|
|
*/
|
|
for (sprn = SPR_POWER_PMC1; sprn <= SPR_POWER_PMC6; sprn++) {
|
|
pmc_update_overflow_timer(env, sprn);
|
|
}
|
|
}
|
|
|
|
static void pmu_delete_timers(CPUPPCState *env)
|
|
{
|
|
QEMUTimer *pmc_overflow_timer;
|
|
int sprn;
|
|
|
|
for (sprn = SPR_POWER_PMC1; sprn <= SPR_POWER_PMC6; sprn++) {
|
|
pmc_overflow_timer = get_cyc_overflow_timer(env, sprn);
|
|
|
|
if (pmc_overflow_timer) {
|
|
timer_del(pmc_overflow_timer);
|
|
}
|
|
}
|
|
}
|
|
|
|
void helper_store_mmcr0(CPUPPCState *env, target_ulong value)
|
|
{
|
|
pmu_update_cycles(env);
|
|
|
|
env->spr[SPR_POWER_MMCR0] = value;
|
|
|
|
pmu_mmcr01_updated(env);
|
|
|
|
/* Update cycle overflow timers with the current MMCR0 state */
|
|
pmu_update_overflow_timers(env);
|
|
}
|
|
|
|
void helper_store_mmcr1(CPUPPCState *env, uint64_t value)
|
|
{
|
|
pmu_update_cycles(env);
|
|
|
|
env->spr[SPR_POWER_MMCR1] = value;
|
|
|
|
pmu_mmcr01_updated(env);
|
|
}
|
|
|
|
target_ulong helper_read_pmc(CPUPPCState *env, uint32_t sprn)
|
|
{
|
|
pmu_update_cycles(env);
|
|
|
|
return env->spr[sprn];
|
|
}
|
|
|
|
void helper_store_pmc(CPUPPCState *env, uint32_t sprn, uint64_t value)
|
|
{
|
|
pmu_update_cycles(env);
|
|
|
|
env->spr[sprn] = (uint32_t)value;
|
|
|
|
pmc_update_overflow_timer(env, sprn);
|
|
}
|
|
|
|
static void perfm_alert(PowerPCCPU *cpu)
|
|
{
|
|
CPUPPCState *env = &cpu->env;
|
|
|
|
pmu_update_cycles(env);
|
|
|
|
if (env->spr[SPR_POWER_MMCR0] & MMCR0_FCECE) {
|
|
env->spr[SPR_POWER_MMCR0] |= MMCR0_FC;
|
|
|
|
/* Changing MMCR0_FC requires summaries and hflags update */
|
|
pmu_mmcr01_updated(env);
|
|
|
|
/*
|
|
* Delete all pending timers if we need to freeze
|
|
* the PMC. We'll restart them when the PMC starts
|
|
* running again.
|
|
*/
|
|
pmu_delete_timers(env);
|
|
}
|
|
|
|
if (env->spr[SPR_POWER_MMCR0] & MMCR0_PMAE) {
|
|
/* These MMCR0 bits do not require summaries or hflags update. */
|
|
env->spr[SPR_POWER_MMCR0] &= ~MMCR0_PMAE;
|
|
env->spr[SPR_POWER_MMCR0] |= MMCR0_PMAO;
|
|
ppc_set_irq(cpu, PPC_INTERRUPT_PERFM, 1);
|
|
}
|
|
|
|
raise_ebb_perfm_exception(env);
|
|
}
|
|
|
|
void helper_handle_pmc5_overflow(CPUPPCState *env)
|
|
{
|
|
env->spr[SPR_POWER_PMC5] = PMC_COUNTER_NEGATIVE_VAL;
|
|
perfm_alert(env_archcpu(env));
|
|
}
|
|
|
|
/* This helper assumes that the PMC is running. */
|
|
void helper_insns_inc(CPUPPCState *env, uint32_t num_insns)
|
|
{
|
|
bool overflow_triggered;
|
|
|
|
overflow_triggered = pmu_increment_insns(env, num_insns);
|
|
if (overflow_triggered) {
|
|
perfm_alert(env_archcpu(env));
|
|
}
|
|
}
|
|
|
|
static void cpu_ppc_pmu_timer_cb(void *opaque)
|
|
{
|
|
PowerPCCPU *cpu = opaque;
|
|
|
|
perfm_alert(cpu);
|
|
}
|
|
|
|
void cpu_ppc_pmu_init(CPUPPCState *env)
|
|
{
|
|
PowerPCCPU *cpu = env_archcpu(env);
|
|
int i, sprn;
|
|
|
|
for (sprn = SPR_POWER_PMC1; sprn <= SPR_POWER_PMC6; sprn++) {
|
|
if (sprn == SPR_POWER_PMC5) {
|
|
continue;
|
|
}
|
|
|
|
i = sprn - SPR_POWER_PMC1;
|
|
|
|
env->pmu_cyc_overflow_timers[i] = timer_new_ns(QEMU_CLOCK_VIRTUAL,
|
|
&cpu_ppc_pmu_timer_cb,
|
|
cpu);
|
|
}
|
|
}
|
|
#endif /* defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY) */
|