2021-12-17 19:57:18 +03:00
|
|
|
/*
|
|
|
|
* PMU emulation helpers for TCG IBM POWER chips
|
|
|
|
*
|
|
|
|
* Copyright IBM Corp. 2021
|
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Daniel Henrique Barboza <danielhb413@gmail.com>
|
|
|
|
*
|
|
|
|
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
|
|
|
* See the COPYING file in the top-level directory.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "qemu/osdep.h"
|
|
|
|
#include "cpu.h"
|
|
|
|
#include "helper_regs.h"
|
|
|
|
#include "exec/exec-all.h"
|
|
|
|
#include "exec/helper-proto.h"
|
|
|
|
#include "qemu/error-report.h"
|
2023-08-28 17:19:30 +03:00
|
|
|
#include "qemu/timer.h"
|
2021-12-17 19:57:18 +03:00
|
|
|
#include "hw/ppc/ppc.h"
|
2022-01-04 09:55:34 +03:00
|
|
|
#include "power8-pmu.h"
|
2021-12-17 19:57:18 +03:00
|
|
|
|
|
|
|
#if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
|
|
|
|
|
2021-12-17 19:57:18 +03:00
|
|
|
static bool pmc_has_overflow_enabled(CPUPPCState *env, int sprn)
|
|
|
|
{
|
|
|
|
if (sprn == SPR_POWER_PMC1) {
|
|
|
|
return env->spr[SPR_POWER_MMCR0] & MMCR0_PMC1CE;
|
|
|
|
}
|
|
|
|
|
|
|
|
return env->spr[SPR_POWER_MMCR0] & MMCR0_PMCjCE;
|
|
|
|
}
|
|
|
|
|
2023-05-30 16:04:47 +03:00
|
|
|
/*
|
|
|
|
* Called after MMCR0 or MMCR1 changes to update pmc_ins_cnt and pmc_cyc_cnt.
|
|
|
|
* hflags must subsequently be updated.
|
|
|
|
*/
|
|
|
|
static void pmu_update_summaries(CPUPPCState *env)
|
2021-12-17 19:57:18 +03:00
|
|
|
{
|
2022-01-04 09:55:34 +03:00
|
|
|
target_ulong mmcr0 = env->spr[SPR_POWER_MMCR0];
|
|
|
|
target_ulong mmcr1 = env->spr[SPR_POWER_MMCR1];
|
|
|
|
int ins_cnt = 0;
|
|
|
|
int cyc_cnt = 0;
|
2021-12-17 19:57:18 +03:00
|
|
|
|
2022-01-04 09:55:34 +03:00
|
|
|
if (mmcr0 & MMCR0_FC) {
|
2023-05-30 16:04:47 +03:00
|
|
|
goto out;
|
2022-01-04 09:55:34 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!(mmcr0 & MMCR0_FC14) && mmcr1 != 0) {
|
|
|
|
target_ulong sel;
|
|
|
|
|
|
|
|
sel = extract64(mmcr1, MMCR1_PMC1EVT_EXTR, MMCR1_EVT_SIZE);
|
|
|
|
switch (sel) {
|
|
|
|
case 0x02:
|
|
|
|
case 0xfe:
|
|
|
|
ins_cnt |= 1 << 1;
|
|
|
|
break;
|
|
|
|
case 0x1e:
|
|
|
|
case 0xf0:
|
|
|
|
cyc_cnt |= 1 << 1;
|
|
|
|
break;
|
2021-12-17 19:57:18 +03:00
|
|
|
}
|
2022-01-04 09:55:34 +03:00
|
|
|
|
|
|
|
sel = extract64(mmcr1, MMCR1_PMC2EVT_EXTR, MMCR1_EVT_SIZE);
|
|
|
|
ins_cnt |= (sel == 0x02) << 2;
|
|
|
|
cyc_cnt |= (sel == 0x1e) << 2;
|
|
|
|
|
|
|
|
sel = extract64(mmcr1, MMCR1_PMC3EVT_EXTR, MMCR1_EVT_SIZE);
|
|
|
|
ins_cnt |= (sel == 0x02) << 3;
|
|
|
|
cyc_cnt |= (sel == 0x1e) << 3;
|
|
|
|
|
|
|
|
sel = extract64(mmcr1, MMCR1_PMC4EVT_EXTR, MMCR1_EVT_SIZE);
|
|
|
|
ins_cnt |= ((sel == 0xfa) || (sel == 0x2)) << 4;
|
|
|
|
cyc_cnt |= (sel == 0x1e) << 4;
|
2021-12-17 19:57:18 +03:00
|
|
|
}
|
|
|
|
|
2022-01-04 09:55:34 +03:00
|
|
|
ins_cnt |= !(mmcr0 & MMCR0_FC56) << 5;
|
|
|
|
cyc_cnt |= !(mmcr0 & MMCR0_FC56) << 6;
|
|
|
|
|
2023-05-30 16:04:47 +03:00
|
|
|
out:
|
2022-01-04 09:55:34 +03:00
|
|
|
env->pmc_ins_cnt = ins_cnt;
|
|
|
|
env->pmc_cyc_cnt = cyc_cnt;
|
2023-05-30 16:04:47 +03:00
|
|
|
}
|
|
|
|
|
2024-03-28 13:41:33 +03:00
|
|
|
static void hreg_bhrb_filter_update(CPUPPCState *env)
|
|
|
|
{
|
|
|
|
target_long ifm;
|
|
|
|
|
|
|
|
if (!(env->spr[SPR_POWER_MMCR0] & MMCR0_PMAE)) {
|
|
|
|
/* disable recording to BHRB */
|
|
|
|
env->bhrb_filter = BHRB_TYPE_NORECORD;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
ifm = (env->spr[SPR_POWER_MMCRA] & MMCRA_IFM_MASK) >> MMCRA_IFM_SHIFT;
|
|
|
|
switch (ifm) {
|
|
|
|
case 0:
|
|
|
|
/* record all branches */
|
|
|
|
env->bhrb_filter = -1;
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
/* only record calls (LK = 1) */
|
|
|
|
env->bhrb_filter = BHRB_TYPE_CALL;
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
/* only record indirect branches */
|
|
|
|
env->bhrb_filter = BHRB_TYPE_INDIRECT;
|
|
|
|
break;
|
|
|
|
case 3:
|
|
|
|
/* only record conditional branches */
|
|
|
|
env->bhrb_filter = BHRB_TYPE_COND;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-03-28 13:41:29 +03:00
|
|
|
void pmu_mmcr01a_updated(CPUPPCState *env)
|
2023-05-30 16:04:47 +03:00
|
|
|
{
|
2023-05-30 16:43:13 +03:00
|
|
|
PowerPCCPU *cpu = env_archcpu(env);
|
|
|
|
|
2023-05-30 16:04:47 +03:00
|
|
|
pmu_update_summaries(env);
|
|
|
|
hreg_update_pmu_hflags(env);
|
2023-05-30 16:43:13 +03:00
|
|
|
|
|
|
|
if (env->spr[SPR_POWER_MMCR0] & MMCR0_PMAO) {
|
|
|
|
ppc_set_irq(cpu, PPC_INTERRUPT_PERFM, 1);
|
|
|
|
} else {
|
|
|
|
ppc_set_irq(cpu, PPC_INTERRUPT_PERFM, 0);
|
|
|
|
}
|
|
|
|
|
2024-03-28 13:41:33 +03:00
|
|
|
hreg_bhrb_filter_update(env);
|
|
|
|
|
2023-05-30 16:04:47 +03:00
|
|
|
/*
|
|
|
|
* Should this update overflow timers (if mmcr0 is updated) so they
|
|
|
|
* get set in cpu_post_load?
|
|
|
|
*/
|
2021-12-17 19:57:18 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static bool pmu_increment_insns(CPUPPCState *env, uint32_t num_insns)
|
|
|
|
{
|
2022-01-04 09:55:35 +03:00
|
|
|
target_ulong mmcr0 = env->spr[SPR_POWER_MMCR0];
|
|
|
|
unsigned ins_cnt = env->pmc_ins_cnt;
|
2021-12-17 19:57:18 +03:00
|
|
|
bool overflow_triggered = false;
|
2022-01-04 09:55:35 +03:00
|
|
|
target_ulong tmp;
|
|
|
|
|
2022-10-25 23:24:24 +03:00
|
|
|
if (ins_cnt & (1 << 1)) {
|
|
|
|
tmp = env->spr[SPR_POWER_PMC1];
|
|
|
|
tmp += num_insns;
|
|
|
|
if (tmp >= PMC_COUNTER_NEGATIVE_VAL && (mmcr0 & MMCR0_PMC1CE)) {
|
|
|
|
tmp = PMC_COUNTER_NEGATIVE_VAL;
|
|
|
|
overflow_triggered = true;
|
2021-12-17 19:57:18 +03:00
|
|
|
}
|
2022-10-25 23:24:24 +03:00
|
|
|
env->spr[SPR_POWER_PMC1] = tmp;
|
|
|
|
}
|
2021-12-17 19:57:18 +03:00
|
|
|
|
2022-10-25 23:24:24 +03:00
|
|
|
if (ins_cnt & (1 << 2)) {
|
|
|
|
tmp = env->spr[SPR_POWER_PMC2];
|
|
|
|
tmp += num_insns;
|
|
|
|
if (tmp >= PMC_COUNTER_NEGATIVE_VAL && (mmcr0 & MMCR0_PMCjCE)) {
|
|
|
|
tmp = PMC_COUNTER_NEGATIVE_VAL;
|
|
|
|
overflow_triggered = true;
|
|
|
|
}
|
|
|
|
env->spr[SPR_POWER_PMC2] = tmp;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ins_cnt & (1 << 3)) {
|
|
|
|
tmp = env->spr[SPR_POWER_PMC3];
|
|
|
|
tmp += num_insns;
|
|
|
|
if (tmp >= PMC_COUNTER_NEGATIVE_VAL && (mmcr0 & MMCR0_PMCjCE)) {
|
|
|
|
tmp = PMC_COUNTER_NEGATIVE_VAL;
|
|
|
|
overflow_triggered = true;
|
2021-12-17 19:57:18 +03:00
|
|
|
}
|
2022-10-25 23:24:24 +03:00
|
|
|
env->spr[SPR_POWER_PMC3] = tmp;
|
|
|
|
}
|
2021-12-17 19:57:18 +03:00
|
|
|
|
2022-10-25 23:24:24 +03:00
|
|
|
if (ins_cnt & (1 << 4)) {
|
|
|
|
target_ulong mmcr1 = env->spr[SPR_POWER_MMCR1];
|
|
|
|
int sel = extract64(mmcr1, MMCR1_PMC4EVT_EXTR, MMCR1_EVT_SIZE);
|
|
|
|
if (sel == 0x02 || (env->spr[SPR_CTRL] & CTRL_RUN)) {
|
|
|
|
tmp = env->spr[SPR_POWER_PMC4];
|
2022-01-04 09:55:35 +03:00
|
|
|
tmp += num_insns;
|
|
|
|
if (tmp >= PMC_COUNTER_NEGATIVE_VAL && (mmcr0 & MMCR0_PMCjCE)) {
|
|
|
|
tmp = PMC_COUNTER_NEGATIVE_VAL;
|
|
|
|
overflow_triggered = true;
|
|
|
|
}
|
2022-10-25 23:24:24 +03:00
|
|
|
env->spr[SPR_POWER_PMC4] = tmp;
|
2022-01-04 09:55:35 +03:00
|
|
|
}
|
|
|
|
}
|
2021-12-17 19:57:18 +03:00
|
|
|
|
2022-01-04 09:55:35 +03:00
|
|
|
if (ins_cnt & (1 << 5)) {
|
|
|
|
tmp = env->spr[SPR_POWER_PMC5];
|
|
|
|
tmp += num_insns;
|
|
|
|
if (tmp >= PMC_COUNTER_NEGATIVE_VAL && (mmcr0 & MMCR0_PMCjCE)) {
|
|
|
|
tmp = PMC_COUNTER_NEGATIVE_VAL;
|
2021-12-17 19:57:18 +03:00
|
|
|
overflow_triggered = true;
|
|
|
|
}
|
2022-01-04 09:55:35 +03:00
|
|
|
env->spr[SPR_POWER_PMC5] = tmp;
|
2021-12-17 19:57:18 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return overflow_triggered;
|
|
|
|
}
|
|
|
|
|
target/ppc: PMU basic cycle count for pseries TCG
This patch adds the barebones of the PMU logic by enabling cycle
counting. The overall logic goes as follows:
- MMCR0 reg initial value is set to 0x80000000 (MMCR0_FC set) to avoid
having to spin the PMU right at system init;
- to retrieve the events that are being profiled, pmc_get_event() will
check the current MMCR0 and MMCR1 value and return the appropriate
PMUEventType. For PMCs 1-4, event 0x2 is the implementation dependent
value of PMU_EVENT_INSTRUCTIONS and event 0x1E is the implementation
dependent value of PMU_EVENT_CYCLES. These events are supported by IBM
Power chips since Power8, at least, and the Linux Perf driver makes use
of these events until kernel v5.15. For PMC1, event 0xF0 is the
architected PowerISA event for cycles. Event 0xFE is the architected
PowerISA event for instructions;
- if the counter is frozen, either via the global MMCR0_FC bit or its
individual frozen counter bits, PMU_EVENT_INACTIVE is returned;
- pmu_update_cycles() will go through each counter and update the
values of all PMCs that are counting cycles. This function will be
called every time a MMCR0 update is done to keep counters values
up to date. Upcoming patches will use this function to allow the
counters to be properly updated during read/write of the PMCs
and MMCR1 writes.
Given that the base CPU frequency is fixed at 1Ghz for both powernv and
pseries clock, cycle calculation assumes that 1 nanosecond equals 1 CPU
cycle. Cycle value is then calculated by adding the elapsed time, in
nanoseconds, of the last cycle update done via pmu_update_cycles().
Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
Message-Id: <20211201151734.654994-3-danielhb413@gmail.com>
Signed-off-by: Cédric Le Goater <clg@kaod.org>
2021-12-17 19:57:18 +03:00
|
|
|
static void pmu_update_cycles(CPUPPCState *env)
|
|
|
|
{
|
|
|
|
uint64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
|
|
|
|
uint64_t time_delta = now - env->pmu_base_time;
|
2022-01-04 09:55:35 +03:00
|
|
|
int sprn, cyc_cnt = env->pmc_cyc_cnt;
|
target/ppc: PMU basic cycle count for pseries TCG
This patch adds the barebones of the PMU logic by enabling cycle
counting. The overall logic goes as follows:
- MMCR0 reg initial value is set to 0x80000000 (MMCR0_FC set) to avoid
having to spin the PMU right at system init;
- to retrieve the events that are being profiled, pmc_get_event() will
check the current MMCR0 and MMCR1 value and return the appropriate
PMUEventType. For PMCs 1-4, event 0x2 is the implementation dependent
value of PMU_EVENT_INSTRUCTIONS and event 0x1E is the implementation
dependent value of PMU_EVENT_CYCLES. These events are supported by IBM
Power chips since Power8, at least, and the Linux Perf driver makes use
of these events until kernel v5.15. For PMC1, event 0xF0 is the
architected PowerISA event for cycles. Event 0xFE is the architected
PowerISA event for instructions;
- if the counter is frozen, either via the global MMCR0_FC bit or its
individual frozen counter bits, PMU_EVENT_INACTIVE is returned;
- pmu_update_cycles() will go through each counter and update the
values of all PMCs that are counting cycles. This function will be
called every time a MMCR0 update is done to keep counters values
up to date. Upcoming patches will use this function to allow the
counters to be properly updated during read/write of the PMCs
and MMCR1 writes.
Given that the base CPU frequency is fixed at 1Ghz for both powernv and
pseries clock, cycle calculation assumes that 1 nanosecond equals 1 CPU
cycle. Cycle value is then calculated by adding the elapsed time, in
nanoseconds, of the last cycle update done via pmu_update_cycles().
Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
Message-Id: <20211201151734.654994-3-danielhb413@gmail.com>
Signed-off-by: Cédric Le Goater <clg@kaod.org>
2021-12-17 19:57:18 +03:00
|
|
|
|
|
|
|
for (sprn = SPR_POWER_PMC1; sprn <= SPR_POWER_PMC6; sprn++) {
|
2022-01-04 09:55:35 +03:00
|
|
|
if (cyc_cnt & (1 << (sprn - SPR_POWER_PMC1 + 1))) {
|
|
|
|
/*
|
|
|
|
* The pseries and powernv clock runs at 1Ghz, meaning
|
|
|
|
* that 1 nanosec equals 1 cycle.
|
|
|
|
*/
|
|
|
|
env->spr[sprn] += time_delta;
|
target/ppc: PMU basic cycle count for pseries TCG
This patch adds the barebones of the PMU logic by enabling cycle
counting. The overall logic goes as follows:
- MMCR0 reg initial value is set to 0x80000000 (MMCR0_FC set) to avoid
having to spin the PMU right at system init;
- to retrieve the events that are being profiled, pmc_get_event() will
check the current MMCR0 and MMCR1 value and return the appropriate
PMUEventType. For PMCs 1-4, event 0x2 is the implementation dependent
value of PMU_EVENT_INSTRUCTIONS and event 0x1E is the implementation
dependent value of PMU_EVENT_CYCLES. These events are supported by IBM
Power chips since Power8, at least, and the Linux Perf driver makes use
of these events until kernel v5.15. For PMC1, event 0xF0 is the
architected PowerISA event for cycles. Event 0xFE is the architected
PowerISA event for instructions;
- if the counter is frozen, either via the global MMCR0_FC bit or its
individual frozen counter bits, PMU_EVENT_INACTIVE is returned;
- pmu_update_cycles() will go through each counter and update the
values of all PMCs that are counting cycles. This function will be
called every time a MMCR0 update is done to keep counters values
up to date. Upcoming patches will use this function to allow the
counters to be properly updated during read/write of the PMCs
and MMCR1 writes.
Given that the base CPU frequency is fixed at 1Ghz for both powernv and
pseries clock, cycle calculation assumes that 1 nanosecond equals 1 CPU
cycle. Cycle value is then calculated by adding the elapsed time, in
nanoseconds, of the last cycle update done via pmu_update_cycles().
Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
Message-Id: <20211201151734.654994-3-danielhb413@gmail.com>
Signed-off-by: Cédric Le Goater <clg@kaod.org>
2021-12-17 19:57:18 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Update base_time for future calculations */
|
|
|
|
env->pmu_base_time = now;
|
|
|
|
}
|
|
|
|
|
2021-12-17 19:57:18 +03:00
|
|
|
/*
|
|
|
|
* Helper function to retrieve the cycle overflow timer of the
|
|
|
|
* 'sprn' counter.
|
|
|
|
*/
|
|
|
|
static QEMUTimer *get_cyc_overflow_timer(CPUPPCState *env, int sprn)
|
|
|
|
{
|
|
|
|
return env->pmu_cyc_overflow_timers[sprn - SPR_POWER_PMC1];
|
|
|
|
}
|
|
|
|
|
|
|
|
static void pmc_update_overflow_timer(CPUPPCState *env, int sprn)
|
|
|
|
{
|
|
|
|
QEMUTimer *pmc_overflow_timer = get_cyc_overflow_timer(env, sprn);
|
|
|
|
int64_t timeout;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* PMC5 does not have an overflow timer and this pointer
|
|
|
|
* will be NULL.
|
|
|
|
*/
|
|
|
|
if (!pmc_overflow_timer) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2022-01-04 09:55:35 +03:00
|
|
|
if (!(env->pmc_cyc_cnt & (1 << (sprn - SPR_POWER_PMC1 + 1))) ||
|
2021-12-17 19:57:18 +03:00
|
|
|
!pmc_has_overflow_enabled(env, sprn)) {
|
|
|
|
/* Overflow timer is not needed for this counter */
|
|
|
|
timer_del(pmc_overflow_timer);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (env->spr[sprn] >= PMC_COUNTER_NEGATIVE_VAL) {
|
2022-01-04 09:55:35 +03:00
|
|
|
timeout = 0;
|
2021-12-17 19:57:18 +03:00
|
|
|
} else {
|
|
|
|
timeout = PMC_COUNTER_NEGATIVE_VAL - env->spr[sprn];
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Use timer_mod_anticipate() because an overflow timer might
|
|
|
|
* be already running for this PMC.
|
|
|
|
*/
|
|
|
|
timer_mod_anticipate(pmc_overflow_timer, env->pmu_base_time + timeout);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void pmu_update_overflow_timers(CPUPPCState *env)
|
|
|
|
{
|
|
|
|
int sprn;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Scroll through all PMCs and start counter overflow timers for
|
|
|
|
* PM_CYC events, if needed.
|
|
|
|
*/
|
|
|
|
for (sprn = SPR_POWER_PMC1; sprn <= SPR_POWER_PMC6; sprn++) {
|
|
|
|
pmc_update_overflow_timer(env, sprn);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-03-02 08:51:36 +03:00
|
|
|
static void pmu_delete_timers(CPUPPCState *env)
|
|
|
|
{
|
|
|
|
QEMUTimer *pmc_overflow_timer;
|
|
|
|
int sprn;
|
|
|
|
|
|
|
|
for (sprn = SPR_POWER_PMC1; sprn <= SPR_POWER_PMC6; sprn++) {
|
|
|
|
pmc_overflow_timer = get_cyc_overflow_timer(env, sprn);
|
|
|
|
|
|
|
|
if (pmc_overflow_timer) {
|
|
|
|
timer_del(pmc_overflow_timer);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
target/ppc: PMU basic cycle count for pseries TCG
This patch adds the barebones of the PMU logic by enabling cycle
counting. The overall logic goes as follows:
- MMCR0 reg initial value is set to 0x80000000 (MMCR0_FC set) to avoid
having to spin the PMU right at system init;
- to retrieve the events that are being profiled, pmc_get_event() will
check the current MMCR0 and MMCR1 value and return the appropriate
PMUEventType. For PMCs 1-4, event 0x2 is the implementation dependent
value of PMU_EVENT_INSTRUCTIONS and event 0x1E is the implementation
dependent value of PMU_EVENT_CYCLES. These events are supported by IBM
Power chips since Power8, at least, and the Linux Perf driver makes use
of these events until kernel v5.15. For PMC1, event 0xF0 is the
architected PowerISA event for cycles. Event 0xFE is the architected
PowerISA event for instructions;
- if the counter is frozen, either via the global MMCR0_FC bit or its
individual frozen counter bits, PMU_EVENT_INACTIVE is returned;
- pmu_update_cycles() will go through each counter and update the
values of all PMCs that are counting cycles. This function will be
called every time a MMCR0 update is done to keep counters values
up to date. Upcoming patches will use this function to allow the
counters to be properly updated during read/write of the PMCs
and MMCR1 writes.
Given that the base CPU frequency is fixed at 1Ghz for both powernv and
pseries clock, cycle calculation assumes that 1 nanosecond equals 1 CPU
cycle. Cycle value is then calculated by adding the elapsed time, in
nanoseconds, of the last cycle update done via pmu_update_cycles().
Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
Message-Id: <20211201151734.654994-3-danielhb413@gmail.com>
Signed-off-by: Cédric Le Goater <clg@kaod.org>
2021-12-17 19:57:18 +03:00
|
|
|
void helper_store_mmcr0(CPUPPCState *env, target_ulong value)
|
|
|
|
{
|
|
|
|
pmu_update_cycles(env);
|
|
|
|
|
|
|
|
env->spr[SPR_POWER_MMCR0] = value;
|
|
|
|
|
2024-03-28 13:41:29 +03:00
|
|
|
pmu_mmcr01a_updated(env);
|
2021-12-17 19:57:18 +03:00
|
|
|
|
|
|
|
/* Update cycle overflow timers with the current MMCR0 state */
|
|
|
|
pmu_update_overflow_timers(env);
|
target/ppc: PMU basic cycle count for pseries TCG
This patch adds the barebones of the PMU logic by enabling cycle
counting. The overall logic goes as follows:
- MMCR0 reg initial value is set to 0x80000000 (MMCR0_FC set) to avoid
having to spin the PMU right at system init;
- to retrieve the events that are being profiled, pmc_get_event() will
check the current MMCR0 and MMCR1 value and return the appropriate
PMUEventType. For PMCs 1-4, event 0x2 is the implementation dependent
value of PMU_EVENT_INSTRUCTIONS and event 0x1E is the implementation
dependent value of PMU_EVENT_CYCLES. These events are supported by IBM
Power chips since Power8, at least, and the Linux Perf driver makes use
of these events until kernel v5.15. For PMC1, event 0xF0 is the
architected PowerISA event for cycles. Event 0xFE is the architected
PowerISA event for instructions;
- if the counter is frozen, either via the global MMCR0_FC bit or its
individual frozen counter bits, PMU_EVENT_INACTIVE is returned;
- pmu_update_cycles() will go through each counter and update the
values of all PMCs that are counting cycles. This function will be
called every time a MMCR0 update is done to keep counters values
up to date. Upcoming patches will use this function to allow the
counters to be properly updated during read/write of the PMCs
and MMCR1 writes.
Given that the base CPU frequency is fixed at 1Ghz for both powernv and
pseries clock, cycle calculation assumes that 1 nanosecond equals 1 CPU
cycle. Cycle value is then calculated by adding the elapsed time, in
nanoseconds, of the last cycle update done via pmu_update_cycles().
Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
Message-Id: <20211201151734.654994-3-danielhb413@gmail.com>
Signed-off-by: Cédric Le Goater <clg@kaod.org>
2021-12-17 19:57:18 +03:00
|
|
|
}
|
|
|
|
|
2021-12-17 19:57:18 +03:00
|
|
|
void helper_store_mmcr1(CPUPPCState *env, uint64_t value)
|
|
|
|
{
|
|
|
|
pmu_update_cycles(env);
|
|
|
|
|
|
|
|
env->spr[SPR_POWER_MMCR1] = value;
|
2021-12-17 19:57:18 +03:00
|
|
|
|
2024-03-28 13:41:29 +03:00
|
|
|
pmu_mmcr01a_updated(env);
|
|
|
|
}
|
|
|
|
|
|
|
|
void helper_store_mmcrA(CPUPPCState *env, uint64_t value)
|
|
|
|
{
|
|
|
|
env->spr[SPR_POWER_MMCRA] = value;
|
|
|
|
|
|
|
|
pmu_mmcr01a_updated(env);
|
2021-12-17 19:57:18 +03:00
|
|
|
}
|
|
|
|
|
2021-12-17 19:57:18 +03:00
|
|
|
target_ulong helper_read_pmc(CPUPPCState *env, uint32_t sprn)
|
|
|
|
{
|
|
|
|
pmu_update_cycles(env);
|
|
|
|
|
|
|
|
return env->spr[sprn];
|
|
|
|
}
|
|
|
|
|
|
|
|
void helper_store_pmc(CPUPPCState *env, uint32_t sprn, uint64_t value)
|
|
|
|
{
|
|
|
|
pmu_update_cycles(env);
|
|
|
|
|
target/ppc: Fix width of some 32-bit SPRs
Some 32-bit SPRs are incorrectly implemented as 64-bits on 64-bit
targets.
This changes VRSAVE, DSISR, HDSISR, DAWRX0, PIDR, LPIDR, DEXCR,
HDEXCR, CTRL, TSCR, MMCRH, and PMC[1-6] from to be 32-bit registers.
This only goes by the 32/64 classification in the architecture, it
does not try to implement finer details of SPR implementation (e.g.,
not all bits implemented as simple read/write storage).
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Reviewed-by: Harsh Prateek Bora <harshpb@linux.ibm.com>
Message-Id: <20230515092655.171206-2-npiggin@gmail.com>
Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
2023-05-15 12:26:47 +03:00
|
|
|
env->spr[sprn] = (uint32_t)value;
|
2021-12-17 19:57:18 +03:00
|
|
|
|
|
|
|
pmc_update_overflow_timer(env, sprn);
|
2021-12-17 19:57:18 +03:00
|
|
|
}
|
|
|
|
|
2023-05-30 16:43:13 +03:00
|
|
|
static void perfm_alert(PowerPCCPU *cpu)
|
2021-12-17 19:57:18 +03:00
|
|
|
{
|
|
|
|
CPUPPCState *env = &cpu->env;
|
|
|
|
|
2022-03-02 08:51:36 +03:00
|
|
|
pmu_update_cycles(env);
|
|
|
|
|
|
|
|
if (env->spr[SPR_POWER_MMCR0] & MMCR0_FCECE) {
|
|
|
|
env->spr[SPR_POWER_MMCR0] |= MMCR0_FC;
|
|
|
|
|
2023-05-30 16:04:47 +03:00
|
|
|
/* Changing MMCR0_FC requires summaries and hflags update */
|
2024-03-28 13:41:29 +03:00
|
|
|
pmu_mmcr01a_updated(env);
|
2022-03-02 08:51:36 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Delete all pending timers if we need to freeze
|
|
|
|
* the PMC. We'll restart them when the PMC starts
|
|
|
|
* running again.
|
|
|
|
*/
|
|
|
|
pmu_delete_timers(env);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (env->spr[SPR_POWER_MMCR0] & MMCR0_PMAE) {
|
2023-05-30 16:04:47 +03:00
|
|
|
/* These MMCR0 bits do not require summaries or hflags update. */
|
2022-03-02 08:51:36 +03:00
|
|
|
env->spr[SPR_POWER_MMCR0] &= ~MMCR0_PMAE;
|
|
|
|
env->spr[SPR_POWER_MMCR0] |= MMCR0_PMAO;
|
2023-05-30 16:43:13 +03:00
|
|
|
ppc_set_irq(cpu, PPC_INTERRUPT_PERFM, 1);
|
2021-12-17 19:57:18 +03:00
|
|
|
}
|
|
|
|
|
2022-03-02 08:51:36 +03:00
|
|
|
raise_ebb_perfm_exception(env);
|
2021-12-17 19:57:18 +03:00
|
|
|
}
|
|
|
|
|
2022-10-25 23:24:24 +03:00
|
|
|
void helper_handle_pmc5_overflow(CPUPPCState *env)
|
|
|
|
{
|
|
|
|
env->spr[SPR_POWER_PMC5] = PMC_COUNTER_NEGATIVE_VAL;
|
2023-05-30 16:43:13 +03:00
|
|
|
perfm_alert(env_archcpu(env));
|
2022-10-25 23:24:24 +03:00
|
|
|
}
|
|
|
|
|
2021-12-17 19:57:18 +03:00
|
|
|
/* This helper assumes that the PMC is running. */
|
|
|
|
void helper_insns_inc(CPUPPCState *env, uint32_t num_insns)
|
|
|
|
{
|
|
|
|
bool overflow_triggered;
|
|
|
|
|
|
|
|
overflow_triggered = pmu_increment_insns(env, num_insns);
|
|
|
|
if (overflow_triggered) {
|
2023-05-30 16:43:13 +03:00
|
|
|
perfm_alert(env_archcpu(env));
|
2021-12-17 19:57:18 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-12-17 19:57:18 +03:00
|
|
|
static void cpu_ppc_pmu_timer_cb(void *opaque)
|
|
|
|
{
|
|
|
|
PowerPCCPU *cpu = opaque;
|
|
|
|
|
2023-05-30 16:43:13 +03:00
|
|
|
perfm_alert(cpu);
|
2021-12-17 19:57:18 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
void cpu_ppc_pmu_init(CPUPPCState *env)
|
|
|
|
{
|
|
|
|
PowerPCCPU *cpu = env_archcpu(env);
|
|
|
|
int i, sprn;
|
|
|
|
|
|
|
|
for (sprn = SPR_POWER_PMC1; sprn <= SPR_POWER_PMC6; sprn++) {
|
|
|
|
if (sprn == SPR_POWER_PMC5) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
i = sprn - SPR_POWER_PMC1;
|
|
|
|
|
|
|
|
env->pmu_cyc_overflow_timers[i] = timer_new_ns(QEMU_CLOCK_VIRTUAL,
|
|
|
|
&cpu_ppc_pmu_timer_cb,
|
|
|
|
cpu);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif /* defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY) */
|