2012-05-30 08:23:22 +04:00
|
|
|
/*
|
|
|
|
* PowerPC exception emulation helpers for QEMU.
|
|
|
|
*
|
|
|
|
* Copyright (c) 2003-2007 Jocelyn Mayer
|
|
|
|
*
|
|
|
|
* This library is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
* License as published by the Free Software Foundation; either
|
2020-10-19 09:11:26 +03:00
|
|
|
* version 2.1 of the License, or (at your option) any later version.
|
2012-05-30 08:23:22 +04:00
|
|
|
*
|
|
|
|
* This library is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* Lesser General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
|
|
|
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*/
|
2016-01-26 21:16:58 +03:00
|
|
|
#include "qemu/osdep.h"
|
2017-06-13 13:55:29 +03:00
|
|
|
#include "qemu/main-loop.h"
|
2012-05-30 08:23:22 +04:00
|
|
|
#include "cpu.h"
|
2016-03-15 15:18:37 +03:00
|
|
|
#include "exec/exec-all.h"
|
2018-06-26 19:19:09 +03:00
|
|
|
#include "internal.h"
|
2012-05-30 08:23:22 +04:00
|
|
|
#include "helper_regs.h"
|
|
|
|
|
2021-09-20 09:12:00 +03:00
|
|
|
#include "trace.h"
|
|
|
|
|
2021-05-25 14:53:53 +03:00
|
|
|
#ifdef CONFIG_TCG
|
|
|
|
#include "exec/helper-proto.h"
|
|
|
|
#include "exec/cpu_ldst.h"
|
|
|
|
#endif
|
|
|
|
|
2019-03-21 13:20:28 +03:00
|
|
|
/* #define DEBUG_SOFTWARE_TLB */
|
2012-05-30 08:23:25 +04:00
|
|
|
|
|
|
|
/*****************************************************************************/
|
|
|
|
/* Exception processing */
|
2021-09-11 19:54:27 +03:00
|
|
|
#if !defined(CONFIG_USER_ONLY)
|
2013-08-26 10:31:06 +04:00
|
|
|
|
2022-01-04 09:55:34 +03:00
|
|
|
static const char *powerpc_excp_name(int excp)
|
|
|
|
{
|
|
|
|
switch (excp) {
|
|
|
|
case POWERPC_EXCP_CRITICAL: return "CRITICAL";
|
|
|
|
case POWERPC_EXCP_MCHECK: return "MCHECK";
|
|
|
|
case POWERPC_EXCP_DSI: return "DSI";
|
|
|
|
case POWERPC_EXCP_ISI: return "ISI";
|
|
|
|
case POWERPC_EXCP_EXTERNAL: return "EXTERNAL";
|
|
|
|
case POWERPC_EXCP_ALIGN: return "ALIGN";
|
|
|
|
case POWERPC_EXCP_PROGRAM: return "PROGRAM";
|
|
|
|
case POWERPC_EXCP_FPU: return "FPU";
|
|
|
|
case POWERPC_EXCP_SYSCALL: return "SYSCALL";
|
|
|
|
case POWERPC_EXCP_APU: return "APU";
|
|
|
|
case POWERPC_EXCP_DECR: return "DECR";
|
|
|
|
case POWERPC_EXCP_FIT: return "FIT";
|
|
|
|
case POWERPC_EXCP_WDT: return "WDT";
|
|
|
|
case POWERPC_EXCP_DTLB: return "DTLB";
|
|
|
|
case POWERPC_EXCP_ITLB: return "ITLB";
|
|
|
|
case POWERPC_EXCP_DEBUG: return "DEBUG";
|
|
|
|
case POWERPC_EXCP_SPEU: return "SPEU";
|
|
|
|
case POWERPC_EXCP_EFPDI: return "EFPDI";
|
|
|
|
case POWERPC_EXCP_EFPRI: return "EFPRI";
|
|
|
|
case POWERPC_EXCP_EPERFM: return "EPERFM";
|
|
|
|
case POWERPC_EXCP_DOORI: return "DOORI";
|
|
|
|
case POWERPC_EXCP_DOORCI: return "DOORCI";
|
|
|
|
case POWERPC_EXCP_GDOORI: return "GDOORI";
|
|
|
|
case POWERPC_EXCP_GDOORCI: return "GDOORCI";
|
|
|
|
case POWERPC_EXCP_HYPPRIV: return "HYPPRIV";
|
|
|
|
case POWERPC_EXCP_RESET: return "RESET";
|
|
|
|
case POWERPC_EXCP_DSEG: return "DSEG";
|
|
|
|
case POWERPC_EXCP_ISEG: return "ISEG";
|
|
|
|
case POWERPC_EXCP_HDECR: return "HDECR";
|
|
|
|
case POWERPC_EXCP_TRACE: return "TRACE";
|
|
|
|
case POWERPC_EXCP_HDSI: return "HDSI";
|
|
|
|
case POWERPC_EXCP_HISI: return "HISI";
|
|
|
|
case POWERPC_EXCP_HDSEG: return "HDSEG";
|
|
|
|
case POWERPC_EXCP_HISEG: return "HISEG";
|
|
|
|
case POWERPC_EXCP_VPU: return "VPU";
|
|
|
|
case POWERPC_EXCP_PIT: return "PIT";
|
|
|
|
case POWERPC_EXCP_IO: return "IO";
|
|
|
|
case POWERPC_EXCP_RUNM: return "RUNM";
|
|
|
|
case POWERPC_EXCP_EMUL: return "EMUL";
|
|
|
|
case POWERPC_EXCP_IFTLB: return "IFTLB";
|
|
|
|
case POWERPC_EXCP_DLTLB: return "DLTLB";
|
|
|
|
case POWERPC_EXCP_DSTLB: return "DSTLB";
|
|
|
|
case POWERPC_EXCP_FPA: return "FPA";
|
|
|
|
case POWERPC_EXCP_DABR: return "DABR";
|
|
|
|
case POWERPC_EXCP_IABR: return "IABR";
|
|
|
|
case POWERPC_EXCP_SMI: return "SMI";
|
|
|
|
case POWERPC_EXCP_PERFM: return "PERFM";
|
|
|
|
case POWERPC_EXCP_THERM: return "THERM";
|
|
|
|
case POWERPC_EXCP_VPUA: return "VPUA";
|
|
|
|
case POWERPC_EXCP_SOFTP: return "SOFTP";
|
|
|
|
case POWERPC_EXCP_MAINT: return "MAINT";
|
|
|
|
case POWERPC_EXCP_MEXTBR: return "MEXTBR";
|
|
|
|
case POWERPC_EXCP_NMEXTBR: return "NMEXTBR";
|
|
|
|
case POWERPC_EXCP_ITLBE: return "ITLBE";
|
|
|
|
case POWERPC_EXCP_DTLBE: return "DTLBE";
|
|
|
|
case POWERPC_EXCP_VSXU: return "VSXU";
|
|
|
|
case POWERPC_EXCP_FU: return "FU";
|
|
|
|
case POWERPC_EXCP_HV_EMU: return "HV_EMU";
|
|
|
|
case POWERPC_EXCP_HV_MAINT: return "HV_MAINT";
|
|
|
|
case POWERPC_EXCP_HV_FU: return "HV_FU";
|
|
|
|
case POWERPC_EXCP_SDOOR: return "SDOOR";
|
|
|
|
case POWERPC_EXCP_SDOOR_HV: return "SDOOR_HV";
|
|
|
|
case POWERPC_EXCP_HVIRT: return "HVIRT";
|
|
|
|
case POWERPC_EXCP_SYSCALL_VECTORED: return "SYSCALL_VECTORED";
|
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-01-04 09:55:34 +03:00
|
|
|
static void dump_syscall(CPUPPCState *env)
|
2012-05-30 08:23:25 +04:00
|
|
|
{
|
2020-03-17 08:49:17 +03:00
|
|
|
qemu_log_mask(CPU_LOG_INT, "syscall r0=%016" PRIx64
|
|
|
|
" r3=%016" PRIx64 " r4=%016" PRIx64 " r5=%016" PRIx64
|
|
|
|
" r6=%016" PRIx64 " r7=%016" PRIx64 " r8=%016" PRIx64
|
2012-05-30 08:23:25 +04:00
|
|
|
" nip=" TARGET_FMT_lx "\n",
|
|
|
|
ppc_dump_gpr(env, 0), ppc_dump_gpr(env, 3),
|
|
|
|
ppc_dump_gpr(env, 4), ppc_dump_gpr(env, 5),
|
2020-03-17 08:49:17 +03:00
|
|
|
ppc_dump_gpr(env, 6), ppc_dump_gpr(env, 7),
|
|
|
|
ppc_dump_gpr(env, 8), env->nip);
|
|
|
|
}
|
|
|
|
|
2022-01-04 09:55:34 +03:00
|
|
|
static void dump_hcall(CPUPPCState *env)
|
2020-03-17 08:49:17 +03:00
|
|
|
{
|
|
|
|
qemu_log_mask(CPU_LOG_INT, "hypercall r3=%016" PRIx64
|
2020-05-07 19:11:23 +03:00
|
|
|
" r4=%016" PRIx64 " r5=%016" PRIx64 " r6=%016" PRIx64
|
|
|
|
" r7=%016" PRIx64 " r8=%016" PRIx64 " r9=%016" PRIx64
|
|
|
|
" r10=%016" PRIx64 " r11=%016" PRIx64 " r12=%016" PRIx64
|
2020-03-17 08:49:17 +03:00
|
|
|
" nip=" TARGET_FMT_lx "\n",
|
|
|
|
ppc_dump_gpr(env, 3), ppc_dump_gpr(env, 4),
|
2020-05-07 19:11:23 +03:00
|
|
|
ppc_dump_gpr(env, 5), ppc_dump_gpr(env, 6),
|
|
|
|
ppc_dump_gpr(env, 7), ppc_dump_gpr(env, 8),
|
|
|
|
ppc_dump_gpr(env, 9), ppc_dump_gpr(env, 10),
|
|
|
|
ppc_dump_gpr(env, 11), ppc_dump_gpr(env, 12),
|
|
|
|
env->nip);
|
2012-05-30 08:23:25 +04:00
|
|
|
}
|
|
|
|
|
2019-02-15 19:16:42 +03:00
|
|
|
static int powerpc_reset_wakeup(CPUState *cs, CPUPPCState *env, int excp,
|
|
|
|
target_ulong *msr)
|
|
|
|
{
|
|
|
|
/* We no longer are in a PM state */
|
2019-02-15 19:16:43 +03:00
|
|
|
env->resume_as_sreset = false;
|
2019-02-15 19:16:42 +03:00
|
|
|
|
|
|
|
/* Pretend to be returning from doze always as we don't lose state */
|
2020-05-11 23:02:02 +03:00
|
|
|
*msr |= SRR1_WS_NOLOSS;
|
2019-02-15 19:16:42 +03:00
|
|
|
|
|
|
|
/* Machine checks are sent normally */
|
|
|
|
if (excp == POWERPC_EXCP_MCHECK) {
|
|
|
|
return excp;
|
|
|
|
}
|
|
|
|
switch (excp) {
|
|
|
|
case POWERPC_EXCP_RESET:
|
2020-05-11 23:02:02 +03:00
|
|
|
*msr |= SRR1_WAKERESET;
|
2019-02-15 19:16:42 +03:00
|
|
|
break;
|
|
|
|
case POWERPC_EXCP_EXTERNAL:
|
2020-05-11 23:02:02 +03:00
|
|
|
*msr |= SRR1_WAKEEE;
|
2019-02-15 19:16:42 +03:00
|
|
|
break;
|
|
|
|
case POWERPC_EXCP_DECR:
|
2020-05-11 23:02:02 +03:00
|
|
|
*msr |= SRR1_WAKEDEC;
|
2019-02-15 19:16:42 +03:00
|
|
|
break;
|
|
|
|
case POWERPC_EXCP_SDOOR:
|
2020-05-11 23:02:02 +03:00
|
|
|
*msr |= SRR1_WAKEDBELL;
|
2019-02-15 19:16:42 +03:00
|
|
|
break;
|
|
|
|
case POWERPC_EXCP_SDOOR_HV:
|
2020-05-11 23:02:02 +03:00
|
|
|
*msr |= SRR1_WAKEHDBELL;
|
2019-02-15 19:16:42 +03:00
|
|
|
break;
|
|
|
|
case POWERPC_EXCP_HV_MAINT:
|
2020-05-11 23:02:02 +03:00
|
|
|
*msr |= SRR1_WAKEHMI;
|
2019-02-15 19:16:42 +03:00
|
|
|
break;
|
2019-02-15 19:16:46 +03:00
|
|
|
case POWERPC_EXCP_HVIRT:
|
2020-05-11 23:02:02 +03:00
|
|
|
*msr |= SRR1_WAKEHVI;
|
2019-02-15 19:16:46 +03:00
|
|
|
break;
|
2019-02-15 19:16:42 +03:00
|
|
|
default:
|
|
|
|
cpu_abort(cs, "Unsupported exception %d in Power Save mode\n",
|
|
|
|
excp);
|
|
|
|
}
|
|
|
|
return POWERPC_EXCP_RESET;
|
|
|
|
}
|
|
|
|
|
2021-05-01 10:24:34 +03:00
|
|
|
/*
|
|
|
|
* AIL - Alternate Interrupt Location, a mode that allows interrupts to be
|
|
|
|
* taken with the MMU on, and which uses an alternate location (e.g., so the
|
|
|
|
* kernel/hv can map the vectors there with an effective address).
|
|
|
|
*
|
|
|
|
* An interrupt is considered to be taken "with AIL" or "AIL applies" if they
|
|
|
|
* are delivered in this way. AIL requires the LPCR to be set to enable this
|
|
|
|
* mode, and then a number of conditions have to be true for AIL to apply.
|
|
|
|
*
|
|
|
|
* First of all, SRESET, MCE, and HMI are always delivered without AIL, because
|
|
|
|
* they specifically want to be in real mode (e.g., the MCE might be signaling
|
|
|
|
* a SLB multi-hit which requires SLB flush before the MMU can be enabled).
|
|
|
|
*
|
|
|
|
* After that, behaviour depends on the current MSR[IR], MSR[DR], MSR[HV],
|
|
|
|
* whether or not the interrupt changes MSR[HV] from 0 to 1, and the current
|
|
|
|
* radix mode (LPCR[HR]).
|
|
|
|
*
|
|
|
|
* POWER8, POWER9 with LPCR[HR]=0
|
|
|
|
* | LPCR[AIL] | MSR[IR||DR] | MSR[HV] | new MSR[HV] | AIL |
|
|
|
|
* +-----------+-------------+---------+-------------+-----+
|
|
|
|
* | a | 00/01/10 | x | x | 0 |
|
|
|
|
* | a | 11 | 0 | 1 | 0 |
|
|
|
|
* | a | 11 | 1 | 1 | a |
|
|
|
|
* | a | 11 | 0 | 0 | a |
|
|
|
|
* +-------------------------------------------------------+
|
|
|
|
*
|
|
|
|
* POWER9 with LPCR[HR]=1
|
|
|
|
* | LPCR[AIL] | MSR[IR||DR] | MSR[HV] | new MSR[HV] | AIL |
|
|
|
|
* +-----------+-------------+---------+-------------+-----+
|
|
|
|
* | a | 00/01/10 | x | x | 0 |
|
|
|
|
* | a | 11 | x | x | a |
|
|
|
|
* +-------------------------------------------------------+
|
|
|
|
*
|
|
|
|
* The difference with POWER9 being that MSR[HV] 0->1 interrupts can be sent to
|
2021-05-01 10:24:35 +03:00
|
|
|
* the hypervisor in AIL mode if the guest is radix. This is good for
|
|
|
|
* performance but allows the guest to influence the AIL of hypervisor
|
|
|
|
* interrupts using its MSR, and also the hypervisor must disallow guest
|
|
|
|
* interrupts (MSR[HV] 0->0) from using AIL if the hypervisor does not want to
|
|
|
|
* use AIL for its MSR[HV] 0->1 interrupts.
|
|
|
|
*
|
|
|
|
* POWER10 addresses those issues with a new LPCR[HAIL] bit that is applied to
|
|
|
|
* interrupts that begin execution with MSR[HV]=1 (so both MSR[HV] 0->1 and
|
|
|
|
* MSR[HV] 1->1).
|
|
|
|
*
|
|
|
|
* HAIL=1 is equivalent to AIL=3, for interrupts delivered with MSR[HV]=1.
|
|
|
|
*
|
|
|
|
* POWER10 behaviour is
|
|
|
|
* | LPCR[AIL] | LPCR[HAIL] | MSR[IR||DR] | MSR[HV] | new MSR[HV] | AIL |
|
|
|
|
* +-----------+------------+-------------+---------+-------------+-----+
|
|
|
|
* | a | h | 00/01/10 | 0 | 0 | 0 |
|
|
|
|
* | a | h | 11 | 0 | 0 | a |
|
|
|
|
* | a | h | x | 0 | 1 | h |
|
|
|
|
* | a | h | 00/01/10 | 1 | 1 | 0 |
|
|
|
|
* | a | h | 11 | 1 | 1 | h |
|
|
|
|
* +--------------------------------------------------------------------+
|
2021-05-01 10:24:34 +03:00
|
|
|
*/
|
2022-01-04 09:55:34 +03:00
|
|
|
static void ppc_excp_apply_ail(PowerPCCPU *cpu, int excp_model, int excp,
|
2021-05-01 10:24:34 +03:00
|
|
|
target_ulong msr,
|
|
|
|
target_ulong *new_msr,
|
|
|
|
target_ulong *vector)
|
2019-03-01 01:57:55 +03:00
|
|
|
{
|
2021-05-01 10:24:34 +03:00
|
|
|
#if defined(TARGET_PPC64)
|
|
|
|
CPUPPCState *env = &cpu->env;
|
|
|
|
bool mmu_all_on = ((msr >> MSR_IR) & 1) && ((msr >> MSR_DR) & 1);
|
|
|
|
bool hv_escalation = !(msr & MSR_HVB) && (*new_msr & MSR_HVB);
|
|
|
|
int ail = 0;
|
|
|
|
|
|
|
|
if (excp == POWERPC_EXCP_MCHECK ||
|
|
|
|
excp == POWERPC_EXCP_RESET ||
|
|
|
|
excp == POWERPC_EXCP_HV_MAINT) {
|
|
|
|
/* SRESET, MCE, HMI never apply AIL */
|
|
|
|
return;
|
|
|
|
}
|
2019-03-01 01:57:55 +03:00
|
|
|
|
2021-05-01 10:24:34 +03:00
|
|
|
if (excp_model == POWERPC_EXCP_POWER8 ||
|
|
|
|
excp_model == POWERPC_EXCP_POWER9) {
|
|
|
|
if (!mmu_all_on) {
|
|
|
|
/* AIL only works if MSR[IR] and MSR[DR] are both enabled. */
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (hv_escalation && !(env->spr[SPR_LPCR] & LPCR_HR)) {
|
|
|
|
/*
|
|
|
|
* AIL does not work if there is a MSR[HV] 0->1 transition and the
|
|
|
|
* partition is in HPT mode. For radix guests, such interrupts are
|
|
|
|
* allowed to be delivered to the hypervisor in ail mode.
|
|
|
|
*/
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
ail = (env->spr[SPR_LPCR] & LPCR_AIL) >> LPCR_AIL_SHIFT;
|
|
|
|
if (ail == 0) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (ail == 1) {
|
|
|
|
/* AIL=1 is reserved, treat it like AIL=0 */
|
|
|
|
return;
|
|
|
|
}
|
2021-05-01 10:24:35 +03:00
|
|
|
|
|
|
|
} else if (excp_model == POWERPC_EXCP_POWER10) {
|
|
|
|
if (!mmu_all_on && !hv_escalation) {
|
|
|
|
/*
|
|
|
|
* AIL works for HV interrupts even with guest MSR[IR/DR] disabled.
|
|
|
|
* Guest->guest and HV->HV interrupts do require MMU on.
|
|
|
|
*/
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (*new_msr & MSR_HVB) {
|
|
|
|
if (!(env->spr[SPR_LPCR] & LPCR_HAIL)) {
|
|
|
|
/* HV interrupts depend on LPCR[HAIL] */
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
ail = 3; /* HAIL=1 gives AIL=3 behaviour for HV interrupts */
|
|
|
|
} else {
|
|
|
|
ail = (env->spr[SPR_LPCR] & LPCR_AIL) >> LPCR_AIL_SHIFT;
|
|
|
|
}
|
|
|
|
if (ail == 0) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (ail == 1 || ail == 2) {
|
|
|
|
/* AIL=1 and AIL=2 are reserved, treat them like AIL=0 */
|
|
|
|
return;
|
|
|
|
}
|
2021-05-01 10:24:34 +03:00
|
|
|
} else {
|
|
|
|
/* Other processors do not support AIL */
|
|
|
|
return;
|
2019-03-01 01:57:55 +03:00
|
|
|
}
|
|
|
|
|
2021-05-01 10:24:34 +03:00
|
|
|
/*
|
|
|
|
* AIL applies, so the new MSR gets IR and DR set, and an offset applied
|
|
|
|
* to the new IP.
|
|
|
|
*/
|
|
|
|
*new_msr |= (1 << MSR_IR) | (1 << MSR_DR);
|
|
|
|
|
|
|
|
if (excp != POWERPC_EXCP_SYSCALL_VECTORED) {
|
|
|
|
if (ail == 2) {
|
|
|
|
*vector |= 0x0000000000018000ull;
|
|
|
|
} else if (ail == 3) {
|
|
|
|
*vector |= 0xc000000000004000ull;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* scv AIL is a little different. AIL=2 does not change the address,
|
|
|
|
* only the MSR. AIL=3 replaces the 0x17000 base with 0xc...3000.
|
|
|
|
*/
|
|
|
|
if (ail == 3) {
|
|
|
|
*vector &= ~0x0000000000017000ull; /* Un-apply the base offset */
|
|
|
|
*vector |= 0xc000000000003000ull; /* Apply scv's AIL=3 offset */
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
2019-03-01 01:57:55 +03:00
|
|
|
}
|
2019-02-15 19:16:42 +03:00
|
|
|
|
2022-01-04 09:55:34 +03:00
|
|
|
static void powerpc_set_excp_state(PowerPCCPU *cpu,
|
2020-03-16 17:26:09 +03:00
|
|
|
target_ulong vector, target_ulong msr)
|
|
|
|
{
|
|
|
|
CPUState *cs = CPU(cpu);
|
|
|
|
CPUPPCState *env = &cpu->env;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We don't use hreg_store_msr here as already have treated any
|
|
|
|
* special case that could occur. Just store MSR and update hflags
|
|
|
|
*
|
|
|
|
* Note: We *MUST* not use hreg_store_msr() as-is anyway because it
|
|
|
|
* will prevent setting of the HV bit which some exceptions might need
|
|
|
|
* to do.
|
|
|
|
*/
|
|
|
|
env->msr = msr & env->msr_mask;
|
|
|
|
hreg_compute_hflags(env);
|
|
|
|
env->nip = vector;
|
|
|
|
/* Reset exception state */
|
|
|
|
cs->exception_index = POWERPC_EXCP_NONE;
|
|
|
|
env->error_code = 0;
|
|
|
|
|
|
|
|
/* Reset the reservation */
|
|
|
|
env->reserve_addr = -1;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Any interrupt is context synchronizing, check if TCG TLB needs
|
|
|
|
* a delayed flush on ppc64
|
|
|
|
*/
|
|
|
|
check_tlb_flush(env, false);
|
|
|
|
}
|
|
|
|
|
2019-03-21 13:20:28 +03:00
|
|
|
/*
|
|
|
|
* Note that this function should be greatly optimized when called
|
|
|
|
* with a constant excp, from ppc_hw_interrupt
|
2012-05-30 08:23:25 +04:00
|
|
|
*/
|
2022-01-04 09:55:34 +03:00
|
|
|
static void powerpc_excp(PowerPCCPU *cpu, int excp)
|
2012-05-30 08:23:25 +04:00
|
|
|
{
|
2013-08-26 10:31:06 +04:00
|
|
|
CPUState *cs = CPU(cpu);
|
2012-05-03 07:55:58 +04:00
|
|
|
CPUPPCState *env = &cpu->env;
|
2022-01-04 09:55:34 +03:00
|
|
|
int excp_model = env->excp_model;
|
2012-05-30 08:23:25 +04:00
|
|
|
target_ulong msr, new_msr, vector;
|
2022-01-04 09:55:34 +03:00
|
|
|
int srr0, srr1, lev = -1;
|
2012-05-30 08:23:25 +04:00
|
|
|
|
2022-01-04 09:55:34 +03:00
|
|
|
if (excp <= POWERPC_EXCP_NONE || excp >= POWERPC_EXCP_NB) {
|
|
|
|
cpu_abort(cs, "Invalid PowerPC exception %d. Aborting\n", excp);
|
|
|
|
}
|
|
|
|
|
2012-05-30 08:23:25 +04:00
|
|
|
qemu_log_mask(CPU_LOG_INT, "Raise exception at " TARGET_FMT_lx
|
2022-01-04 09:55:34 +03:00
|
|
|
" => %s (%d) error=%02x\n", env->nip, powerpc_excp_name(excp),
|
|
|
|
excp, env->error_code);
|
2012-05-30 08:23:25 +04:00
|
|
|
|
|
|
|
/* new srr1 value excluding must-be-zero bits */
|
2012-12-21 20:15:41 +04:00
|
|
|
if (excp_model == POWERPC_EXCP_BOOKE) {
|
|
|
|
msr = env->msr;
|
|
|
|
} else {
|
|
|
|
msr = env->msr & ~0x783f0000ULL;
|
|
|
|
}
|
2012-05-30 08:23:25 +04:00
|
|
|
|
2019-03-21 13:20:28 +03:00
|
|
|
/*
|
|
|
|
* new interrupt handler msr preserves existing HV and ME unless
|
2016-06-22 00:48:48 +03:00
|
|
|
* explicitly overriden
|
|
|
|
*/
|
|
|
|
new_msr = env->msr & (((target_ulong)1 << MSR_ME) | MSR_HVB);
|
2012-05-30 08:23:25 +04:00
|
|
|
|
|
|
|
/* target registers */
|
|
|
|
srr0 = SPR_SRR0;
|
|
|
|
srr1 = SPR_SRR1;
|
|
|
|
|
2019-02-15 19:16:41 +03:00
|
|
|
/*
|
|
|
|
* check for special resume at 0x100 from doze/nap/sleep/winkle on
|
|
|
|
* P7/P8/P9
|
|
|
|
*/
|
2019-02-15 19:16:43 +03:00
|
|
|
if (env->resume_as_sreset) {
|
2019-02-15 19:16:42 +03:00
|
|
|
excp = powerpc_reset_wakeup(cs, env, excp, &msr);
|
2016-06-22 00:48:55 +03:00
|
|
|
}
|
|
|
|
|
2019-03-21 13:20:28 +03:00
|
|
|
/*
|
|
|
|
* Hypervisor emulation assistance interrupt only exists on server
|
2016-06-22 00:48:51 +03:00
|
|
|
* arch 2.05 server or later. We also don't want to generate it if
|
|
|
|
* we don't have HVB in msr_mask (PAPR mode).
|
|
|
|
*/
|
|
|
|
if (excp == POWERPC_EXCP_HV_EMU
|
|
|
|
#if defined(TARGET_PPC64)
|
2020-12-09 20:35:36 +03:00
|
|
|
&& !(mmu_is_64bit(env->mmu_model) && (env->msr_mask & MSR_HVB))
|
2016-06-22 00:48:51 +03:00
|
|
|
#endif /* defined(TARGET_PPC64) */
|
|
|
|
|
|
|
|
) {
|
|
|
|
excp = POWERPC_EXCP_PROGRAM;
|
|
|
|
}
|
|
|
|
|
target/ppc: Fix e6500 boot
When Altivec support was added to the e6500 kernel in 2012[1], the
QEMU code was not changed, so we don't register the VPU/VPUA
exceptions for the e6500:
qemu: fatal: Raised an exception without defined vector 73
Note that the error message says 73, instead of 32, which is the IVOR
for VPU. This is because QEMU knows only knows about the VPU interrupt
for the 7400s. In theory, we should not be raising _that_ VPU
interrupt, but instead another one specific for the e6500.
We unfortunately cannot register e6500-specific VPU/VPUA interrupts
because the SPEU/EFPDI interrupts also use IVOR32/33. These are
present only in the e500v1/2 versions. From the user manual:
e500v1, e500v2: only SPEU/EFPDI/EFPRI
e500mc, e5500: no SPEU/EFPDI/EFPRI/VPU/VPUA
e6500: only VPU/VPUA
So I'm leaving IVOR32/33 as SPEU/EFPDI, but altering the dispatch code
to convert the VPU #73 to a #32 when we're in the e6500. Since the
handling for SPEU and VPU is the same this is the only change that's
needed. The EFPDI is not implemented and will cause an abort. I don't
think it worth it changing the error message to take VPUA into
consideration, so I'm not changing anything there.
This bug was discussed in the thread:
https://lists.gnu.org/archive/html/qemu-ppc/2021-06/msg00222.html
1- https://git.kernel.org/torvalds/c/cd66cc2ee52
Reported-by: <mario@locati.it>
Signed-off-by: Fabiano Rosas <farosas@linux.ibm.com>
Reviewed-by: Cédric Le Goater <clg@kaod.org>
Message-Id: <20211213133542.2608540-1-farosas@linux.ibm.com>
Signed-off-by: Cédric Le Goater <clg@kaod.org>
2021-12-17 19:57:18 +03:00
|
|
|
#ifdef TARGET_PPC64
|
|
|
|
/*
|
|
|
|
* SPEU and VPU share the same IVOR but they exist in different
|
|
|
|
* processors. SPEU is e500v1/2 only and VPU is e6500 only.
|
|
|
|
*/
|
|
|
|
if (excp_model == POWERPC_EXCP_BOOKE && excp == POWERPC_EXCP_VPU) {
|
|
|
|
excp = POWERPC_EXCP_SPEU;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2022-01-04 09:55:34 +03:00
|
|
|
vector = env->excp_vectors[excp];
|
|
|
|
if (vector == (target_ulong)-1ULL) {
|
|
|
|
cpu_abort(cs, "Raised an exception without defined vector %d\n",
|
|
|
|
excp);
|
|
|
|
}
|
|
|
|
|
|
|
|
vector |= env->excp_prefix;
|
|
|
|
|
2012-05-30 08:23:25 +04:00
|
|
|
switch (excp) {
|
|
|
|
case POWERPC_EXCP_CRITICAL: /* Critical input */
|
|
|
|
switch (excp_model) {
|
|
|
|
case POWERPC_EXCP_40x:
|
|
|
|
srr0 = SPR_40x_SRR2;
|
|
|
|
srr1 = SPR_40x_SRR3;
|
|
|
|
break;
|
|
|
|
case POWERPC_EXCP_BOOKE:
|
|
|
|
srr0 = SPR_BOOKE_CSRR0;
|
|
|
|
srr1 = SPR_BOOKE_CSRR1;
|
|
|
|
break;
|
|
|
|
case POWERPC_EXCP_G2:
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
goto excp_invalid;
|
|
|
|
}
|
2016-07-27 09:56:32 +03:00
|
|
|
break;
|
2012-05-30 08:23:25 +04:00
|
|
|
case POWERPC_EXCP_MCHECK: /* Machine check exception */
|
|
|
|
if (msr_me == 0) {
|
2019-03-21 13:20:28 +03:00
|
|
|
/*
|
|
|
|
* Machine check exception is not enabled. Enter
|
|
|
|
* checkstop state.
|
2012-05-30 08:23:25 +04:00
|
|
|
*/
|
2015-11-13 15:16:27 +03:00
|
|
|
fprintf(stderr, "Machine check while not allowed. "
|
|
|
|
"Entering checkstop state\n");
|
|
|
|
if (qemu_log_separate()) {
|
2012-05-30 08:23:25 +04:00
|
|
|
qemu_log("Machine check while not allowed. "
|
|
|
|
"Entering checkstop state\n");
|
|
|
|
}
|
2013-01-17 21:51:17 +04:00
|
|
|
cs->halted = 1;
|
2017-12-05 01:25:43 +03:00
|
|
|
cpu_interrupt_exittb(cs);
|
2012-05-30 08:23:25 +04:00
|
|
|
}
|
2016-10-27 15:50:58 +03:00
|
|
|
if (env->msr_mask & MSR_HVB) {
|
2019-03-21 13:20:28 +03:00
|
|
|
/*
|
|
|
|
* ISA specifies HV, but can be delivered to guest with HV
|
|
|
|
* clear (e.g., see FWNMI in PAPR).
|
2016-10-27 15:50:58 +03:00
|
|
|
*/
|
|
|
|
new_msr |= (target_ulong)MSR_HVB;
|
|
|
|
}
|
2012-05-30 08:23:25 +04:00
|
|
|
|
|
|
|
/* machine check exceptions don't have ME set */
|
|
|
|
new_msr &= ~((target_ulong)1 << MSR_ME);
|
|
|
|
|
|
|
|
/* XXX: should also have something loaded in DAR / DSISR */
|
|
|
|
switch (excp_model) {
|
|
|
|
case POWERPC_EXCP_40x:
|
|
|
|
srr0 = SPR_40x_SRR2;
|
|
|
|
srr1 = SPR_40x_SRR3;
|
|
|
|
break;
|
|
|
|
case POWERPC_EXCP_BOOKE:
|
2012-12-21 20:15:41 +04:00
|
|
|
/* FIXME: choose one or the other based on CPU type */
|
2012-05-30 08:23:25 +04:00
|
|
|
srr0 = SPR_BOOKE_MCSRR0;
|
|
|
|
srr1 = SPR_BOOKE_MCSRR1;
|
2022-01-04 09:55:34 +03:00
|
|
|
|
|
|
|
env->spr[SPR_BOOKE_CSRR0] = env->nip;
|
|
|
|
env->spr[SPR_BOOKE_CSRR1] = msr;
|
2012-05-30 08:23:25 +04:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
2016-07-27 09:56:32 +03:00
|
|
|
break;
|
2012-05-30 08:23:25 +04:00
|
|
|
case POWERPC_EXCP_DSI: /* Data storage exception */
|
2021-09-20 09:12:00 +03:00
|
|
|
trace_ppc_excp_dsi(env->spr[SPR_DSISR], env->spr[SPR_DAR]);
|
2016-07-27 09:56:32 +03:00
|
|
|
break;
|
2012-05-30 08:23:25 +04:00
|
|
|
case POWERPC_EXCP_ISI: /* Instruction storage exception */
|
2021-09-20 09:12:00 +03:00
|
|
|
trace_ppc_excp_isi(msr, env->nip);
|
2012-05-30 08:23:25 +04:00
|
|
|
msr |= env->error_code;
|
2016-07-27 09:56:32 +03:00
|
|
|
break;
|
2012-05-30 08:23:25 +04:00
|
|
|
case POWERPC_EXCP_EXTERNAL: /* External input */
|
2021-06-02 00:46:45 +03:00
|
|
|
{
|
|
|
|
bool lpes0;
|
|
|
|
|
2013-11-15 17:46:38 +04:00
|
|
|
cs = CPU(cpu);
|
|
|
|
|
2021-06-02 00:46:45 +03:00
|
|
|
/*
|
|
|
|
* Exception targeting modifiers
|
|
|
|
*
|
|
|
|
* LPES0 is supported on POWER7/8/9
|
|
|
|
* LPES1 is not supported (old iSeries mode)
|
|
|
|
*
|
|
|
|
* On anything else, we behave as if LPES0 is 1
|
|
|
|
* (externals don't alter MSR:HV)
|
|
|
|
*/
|
|
|
|
#if defined(TARGET_PPC64)
|
|
|
|
if (excp_model == POWERPC_EXCP_POWER7 ||
|
|
|
|
excp_model == POWERPC_EXCP_POWER8 ||
|
|
|
|
excp_model == POWERPC_EXCP_POWER9 ||
|
|
|
|
excp_model == POWERPC_EXCP_POWER10) {
|
|
|
|
lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0);
|
|
|
|
} else
|
|
|
|
#endif /* defined(TARGET_PPC64) */
|
|
|
|
{
|
|
|
|
lpes0 = true;
|
|
|
|
}
|
|
|
|
|
2016-06-22 00:48:48 +03:00
|
|
|
if (!lpes0) {
|
2012-05-30 08:23:25 +04:00
|
|
|
new_msr |= (target_ulong)MSR_HVB;
|
2016-06-22 00:48:48 +03:00
|
|
|
new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
|
|
|
|
srr0 = SPR_HSRR0;
|
|
|
|
srr1 = SPR_HSRR1;
|
2012-05-30 08:23:25 +04:00
|
|
|
}
|
2013-01-04 14:21:04 +04:00
|
|
|
if (env->mpic_proxy) {
|
|
|
|
/* IACK the IRQ on delivery */
|
2013-11-15 17:46:38 +04:00
|
|
|
env->spr[SPR_BOOKE_EPR] = ldl_phys(cs->as, env->mpic_iack);
|
2013-01-04 14:21:04 +04:00
|
|
|
}
|
2016-07-27 09:56:32 +03:00
|
|
|
break;
|
2021-06-02 00:46:45 +03:00
|
|
|
}
|
2012-05-30 08:23:25 +04:00
|
|
|
case POWERPC_EXCP_ALIGN: /* Alignment exception */
|
2021-12-17 19:57:18 +03:00
|
|
|
/* Get rS/rD and rA from faulting opcode */
|
2019-03-21 13:20:28 +03:00
|
|
|
/*
|
2021-12-17 19:57:18 +03:00
|
|
|
* Note: the opcode fields will not be set properly for a
|
|
|
|
* direct store load/store, but nobody cares as nobody
|
|
|
|
* actually uses direct store segments.
|
2016-07-27 09:56:41 +03:00
|
|
|
*/
|
2021-12-17 19:57:18 +03:00
|
|
|
env->spr[SPR_DSISR] |= (env->error_code & 0x03FF0000) >> 16;
|
2016-07-27 09:56:32 +03:00
|
|
|
break;
|
2012-05-30 08:23:25 +04:00
|
|
|
case POWERPC_EXCP_PROGRAM: /* Program exception */
|
|
|
|
switch (env->error_code & ~0xF) {
|
|
|
|
case POWERPC_EXCP_FP:
|
|
|
|
if ((msr_fe0 == 0 && msr_fe1 == 0) || msr_fp == 0) {
|
2021-09-20 09:12:00 +03:00
|
|
|
trace_ppc_excp_fp_ignore();
|
2013-08-26 10:31:06 +04:00
|
|
|
cs->exception_index = POWERPC_EXCP_NONE;
|
2012-05-30 08:23:25 +04:00
|
|
|
env->error_code = 0;
|
|
|
|
return;
|
|
|
|
}
|
2016-07-27 09:56:29 +03:00
|
|
|
|
2019-03-21 13:20:28 +03:00
|
|
|
/*
|
|
|
|
* FP exceptions always have NIP pointing to the faulting
|
2016-07-27 09:56:29 +03:00
|
|
|
* instruction, so always use store_next and claim we are
|
|
|
|
* precise in the MSR.
|
|
|
|
*/
|
2012-05-30 08:23:25 +04:00
|
|
|
msr |= 0x00100000;
|
2017-06-22 03:03:08 +03:00
|
|
|
env->spr[SPR_BOOKE_ESR] = ESR_FP;
|
2016-07-27 09:56:32 +03:00
|
|
|
break;
|
2012-05-30 08:23:25 +04:00
|
|
|
case POWERPC_EXCP_INVAL:
|
2021-09-20 09:12:00 +03:00
|
|
|
trace_ppc_excp_inval(env->nip);
|
2012-05-30 08:23:25 +04:00
|
|
|
msr |= 0x00080000;
|
|
|
|
env->spr[SPR_BOOKE_ESR] = ESR_PIL;
|
|
|
|
break;
|
|
|
|
case POWERPC_EXCP_PRIV:
|
|
|
|
msr |= 0x00040000;
|
|
|
|
env->spr[SPR_BOOKE_ESR] = ESR_PPR;
|
|
|
|
break;
|
|
|
|
case POWERPC_EXCP_TRAP:
|
|
|
|
msr |= 0x00020000;
|
|
|
|
env->spr[SPR_BOOKE_ESR] = ESR_PTR;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
/* Should never occur */
|
2013-09-03 19:38:47 +04:00
|
|
|
cpu_abort(cs, "Invalid program exception %d. Aborting\n",
|
2012-05-30 08:23:25 +04:00
|
|
|
env->error_code);
|
|
|
|
break;
|
|
|
|
}
|
2016-07-27 09:56:32 +03:00
|
|
|
break;
|
2012-05-30 08:23:25 +04:00
|
|
|
case POWERPC_EXCP_SYSCALL: /* System call exception */
|
|
|
|
lev = env->error_code;
|
2016-06-22 00:48:48 +03:00
|
|
|
|
2020-03-17 08:49:17 +03:00
|
|
|
if ((lev == 1) && cpu->vhyp) {
|
|
|
|
dump_hcall(env);
|
|
|
|
} else {
|
|
|
|
dump_syscall(env);
|
|
|
|
}
|
|
|
|
|
2019-03-21 13:20:28 +03:00
|
|
|
/*
|
|
|
|
* We need to correct the NIP which in this case is supposed
|
2016-07-27 09:56:32 +03:00
|
|
|
* to point to the next instruction
|
|
|
|
*/
|
|
|
|
env->nip += 4;
|
|
|
|
|
2016-06-22 00:48:48 +03:00
|
|
|
/* "PAPR mode" built-in hypercall emulation */
|
2016-10-28 14:06:21 +03:00
|
|
|
if ((lev == 1) && cpu->vhyp) {
|
|
|
|
PPCVirtualHypervisorClass *vhc =
|
|
|
|
PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
|
|
|
|
vhc->hypercall(cpu->vhyp, cpu);
|
2012-05-30 08:23:25 +04:00
|
|
|
return;
|
|
|
|
}
|
2016-06-22 00:48:48 +03:00
|
|
|
if (lev == 1) {
|
2012-05-30 08:23:25 +04:00
|
|
|
new_msr |= (target_ulong)MSR_HVB;
|
|
|
|
}
|
2016-07-27 09:56:32 +03:00
|
|
|
break;
|
2020-05-07 14:53:28 +03:00
|
|
|
case POWERPC_EXCP_SYSCALL_VECTORED: /* scv exception */
|
|
|
|
lev = env->error_code;
|
2021-06-02 00:46:46 +03:00
|
|
|
dump_syscall(env);
|
2020-05-07 14:53:28 +03:00
|
|
|
env->nip += 4;
|
|
|
|
new_msr |= env->msr & ((target_ulong)1 << MSR_EE);
|
|
|
|
new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
|
2022-01-04 09:55:34 +03:00
|
|
|
|
|
|
|
vector += lev * 0x20;
|
|
|
|
|
|
|
|
env->lr = env->nip;
|
|
|
|
env->ctr = msr;
|
2020-05-07 14:53:28 +03:00
|
|
|
break;
|
2016-07-27 09:56:32 +03:00
|
|
|
case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */
|
2012-05-30 08:23:25 +04:00
|
|
|
case POWERPC_EXCP_APU: /* Auxiliary processor unavailable */
|
|
|
|
case POWERPC_EXCP_DECR: /* Decrementer exception */
|
2016-07-27 09:56:32 +03:00
|
|
|
break;
|
2012-05-30 08:23:25 +04:00
|
|
|
case POWERPC_EXCP_FIT: /* Fixed-interval timer interrupt */
|
|
|
|
/* FIT on 4xx */
|
2021-09-20 09:12:00 +03:00
|
|
|
trace_ppc_excp_print("FIT");
|
2016-07-27 09:56:32 +03:00
|
|
|
break;
|
2012-05-30 08:23:25 +04:00
|
|
|
case POWERPC_EXCP_WDT: /* Watchdog timer interrupt */
|
2021-09-20 09:12:00 +03:00
|
|
|
trace_ppc_excp_print("WDT");
|
2012-05-30 08:23:25 +04:00
|
|
|
switch (excp_model) {
|
|
|
|
case POWERPC_EXCP_BOOKE:
|
|
|
|
srr0 = SPR_BOOKE_CSRR0;
|
|
|
|
srr1 = SPR_BOOKE_CSRR1;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
2016-07-27 09:56:32 +03:00
|
|
|
break;
|
2012-05-30 08:23:25 +04:00
|
|
|
case POWERPC_EXCP_DTLB: /* Data TLB error */
|
|
|
|
case POWERPC_EXCP_ITLB: /* Instruction TLB error */
|
2016-07-27 09:56:32 +03:00
|
|
|
break;
|
2012-05-30 08:23:25 +04:00
|
|
|
case POWERPC_EXCP_DEBUG: /* Debug interrupt */
|
2018-08-14 19:09:51 +03:00
|
|
|
if (env->flags & POWERPC_FLAG_DE) {
|
2012-12-21 20:15:41 +04:00
|
|
|
/* FIXME: choose one or the other based on CPU type */
|
2012-05-30 08:23:25 +04:00
|
|
|
srr0 = SPR_BOOKE_DSRR0;
|
|
|
|
srr1 = SPR_BOOKE_DSRR1;
|
2022-01-04 09:55:34 +03:00
|
|
|
|
|
|
|
env->spr[SPR_BOOKE_CSRR0] = env->nip;
|
|
|
|
env->spr[SPR_BOOKE_CSRR1] = msr;
|
|
|
|
|
2018-08-14 19:09:51 +03:00
|
|
|
/* DBSR already modified by caller */
|
|
|
|
} else {
|
|
|
|
cpu_abort(cs, "Debug exception triggered on unsupported model\n");
|
2012-05-30 08:23:25 +04:00
|
|
|
}
|
2016-07-27 09:56:32 +03:00
|
|
|
break;
|
target/ppc: Fix e6500 boot
When Altivec support was added to the e6500 kernel in 2012[1], the
QEMU code was not changed, so we don't register the VPU/VPUA
exceptions for the e6500:
qemu: fatal: Raised an exception without defined vector 73
Note that the error message says 73, instead of 32, which is the IVOR
for VPU. This is because QEMU knows only knows about the VPU interrupt
for the 7400s. In theory, we should not be raising _that_ VPU
interrupt, but instead another one specific for the e6500.
We unfortunately cannot register e6500-specific VPU/VPUA interrupts
because the SPEU/EFPDI interrupts also use IVOR32/33. These are
present only in the e500v1/2 versions. From the user manual:
e500v1, e500v2: only SPEU/EFPDI/EFPRI
e500mc, e5500: no SPEU/EFPDI/EFPRI/VPU/VPUA
e6500: only VPU/VPUA
So I'm leaving IVOR32/33 as SPEU/EFPDI, but altering the dispatch code
to convert the VPU #73 to a #32 when we're in the e6500. Since the
handling for SPEU and VPU is the same this is the only change that's
needed. The EFPDI is not implemented and will cause an abort. I don't
think it worth it changing the error message to take VPUA into
consideration, so I'm not changing anything there.
This bug was discussed in the thread:
https://lists.gnu.org/archive/html/qemu-ppc/2021-06/msg00222.html
1- https://git.kernel.org/torvalds/c/cd66cc2ee52
Reported-by: <mario@locati.it>
Signed-off-by: Fabiano Rosas <farosas@linux.ibm.com>
Reviewed-by: Cédric Le Goater <clg@kaod.org>
Message-Id: <20211213133542.2608540-1-farosas@linux.ibm.com>
Signed-off-by: Cédric Le Goater <clg@kaod.org>
2021-12-17 19:57:18 +03:00
|
|
|
case POWERPC_EXCP_SPEU: /* SPE/embedded floating-point unavailable/VPU */
|
2012-05-30 08:23:25 +04:00
|
|
|
env->spr[SPR_BOOKE_ESR] = ESR_SPV;
|
2016-07-27 09:56:32 +03:00
|
|
|
break;
|
2012-05-30 08:23:25 +04:00
|
|
|
case POWERPC_EXCP_EFPDI: /* Embedded floating-point data interrupt */
|
|
|
|
/* XXX: TODO */
|
2013-09-03 19:38:47 +04:00
|
|
|
cpu_abort(cs, "Embedded floating point data exception "
|
2012-05-30 08:23:25 +04:00
|
|
|
"is not implemented yet !\n");
|
|
|
|
env->spr[SPR_BOOKE_ESR] = ESR_SPV;
|
2016-07-27 09:56:32 +03:00
|
|
|
break;
|
2012-05-30 08:23:25 +04:00
|
|
|
case POWERPC_EXCP_EFPRI: /* Embedded floating-point round interrupt */
|
|
|
|
/* XXX: TODO */
|
2013-09-03 19:38:47 +04:00
|
|
|
cpu_abort(cs, "Embedded floating point round exception "
|
2012-05-30 08:23:25 +04:00
|
|
|
"is not implemented yet !\n");
|
|
|
|
env->spr[SPR_BOOKE_ESR] = ESR_SPV;
|
2016-07-27 09:56:32 +03:00
|
|
|
break;
|
2012-05-30 08:23:25 +04:00
|
|
|
case POWERPC_EXCP_EPERFM: /* Embedded performance monitor interrupt */
|
|
|
|
/* XXX: TODO */
|
2013-09-03 19:38:47 +04:00
|
|
|
cpu_abort(cs,
|
2012-05-30 08:23:25 +04:00
|
|
|
"Performance counter exception is not implemented yet !\n");
|
2016-07-27 09:56:32 +03:00
|
|
|
break;
|
2012-05-30 08:23:25 +04:00
|
|
|
case POWERPC_EXCP_DOORI: /* Embedded doorbell interrupt */
|
2016-07-27 09:56:32 +03:00
|
|
|
break;
|
2012-05-30 08:23:25 +04:00
|
|
|
case POWERPC_EXCP_DOORCI: /* Embedded doorbell critical interrupt */
|
|
|
|
srr0 = SPR_BOOKE_CSRR0;
|
|
|
|
srr1 = SPR_BOOKE_CSRR1;
|
2016-07-27 09:56:32 +03:00
|
|
|
break;
|
2012-05-30 08:23:25 +04:00
|
|
|
case POWERPC_EXCP_RESET: /* System reset exception */
|
2016-10-20 09:59:10 +03:00
|
|
|
/* A power-saving exception sets ME, otherwise it is unchanged */
|
2012-05-30 08:23:25 +04:00
|
|
|
if (msr_pow) {
|
|
|
|
/* indicate that we resumed from power save mode */
|
|
|
|
msr |= 0x10000;
|
2016-10-20 09:59:10 +03:00
|
|
|
new_msr |= ((target_ulong)1 << MSR_ME);
|
2012-05-30 08:23:25 +04:00
|
|
|
}
|
2016-10-27 15:50:58 +03:00
|
|
|
if (env->msr_mask & MSR_HVB) {
|
2019-03-21 13:20:28 +03:00
|
|
|
/*
|
|
|
|
* ISA specifies HV, but can be delivered to guest with HV
|
|
|
|
* clear (e.g., see FWNMI in PAPR, NMI injection in QEMU).
|
2016-10-27 15:50:58 +03:00
|
|
|
*/
|
|
|
|
new_msr |= (target_ulong)MSR_HVB;
|
|
|
|
} else {
|
|
|
|
if (msr_pow) {
|
|
|
|
cpu_abort(cs, "Trying to deliver power-saving system reset "
|
|
|
|
"exception %d with no HV support\n", excp);
|
|
|
|
}
|
|
|
|
}
|
2016-07-27 09:56:32 +03:00
|
|
|
break;
|
2012-05-30 08:23:25 +04:00
|
|
|
case POWERPC_EXCP_DSEG: /* Data segment exception */
|
|
|
|
case POWERPC_EXCP_ISEG: /* Instruction segment exception */
|
|
|
|
case POWERPC_EXCP_TRACE: /* Trace exception */
|
2016-07-27 09:56:32 +03:00
|
|
|
break;
|
2020-04-03 17:00:56 +03:00
|
|
|
case POWERPC_EXCP_HISI: /* Hypervisor instruction storage exception */
|
|
|
|
msr |= env->error_code;
|
2020-10-28 08:51:07 +03:00
|
|
|
/* fall through */
|
2016-07-27 09:56:32 +03:00
|
|
|
case POWERPC_EXCP_HDECR: /* Hypervisor decrementer exception */
|
2012-05-30 08:23:25 +04:00
|
|
|
case POWERPC_EXCP_HDSI: /* Hypervisor data storage exception */
|
|
|
|
case POWERPC_EXCP_HDSEG: /* Hypervisor data segment exception */
|
|
|
|
case POWERPC_EXCP_HISEG: /* Hypervisor instruction segment exception */
|
2018-01-18 17:54:03 +03:00
|
|
|
case POWERPC_EXCP_SDOOR_HV: /* Hypervisor Doorbell interrupt */
|
2016-07-27 09:56:32 +03:00
|
|
|
case POWERPC_EXCP_HV_EMU:
|
2019-02-15 19:16:46 +03:00
|
|
|
case POWERPC_EXCP_HVIRT: /* Hypervisor virtualization */
|
2012-05-30 08:23:25 +04:00
|
|
|
srr0 = SPR_HSRR0;
|
|
|
|
srr1 = SPR_HSRR1;
|
|
|
|
new_msr |= (target_ulong)MSR_HVB;
|
|
|
|
new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
|
2016-07-27 09:56:32 +03:00
|
|
|
break;
|
2012-05-30 08:23:25 +04:00
|
|
|
case POWERPC_EXCP_VPU: /* Vector unavailable exception */
|
2013-10-22 15:06:17 +04:00
|
|
|
case POWERPC_EXCP_VSXU: /* VSX unavailable exception */
|
2014-06-04 16:50:56 +04:00
|
|
|
case POWERPC_EXCP_FU: /* Facility unavailable exception */
|
2016-11-10 07:37:31 +03:00
|
|
|
#ifdef TARGET_PPC64
|
|
|
|
env->spr[SPR_FSCR] |= ((target_ulong)env->error_code << 56);
|
2020-01-20 13:49:35 +03:00
|
|
|
#endif
|
|
|
|
break;
|
|
|
|
case POWERPC_EXCP_HV_FU: /* Hypervisor Facility Unavailable Exception */
|
|
|
|
#ifdef TARGET_PPC64
|
|
|
|
env->spr[SPR_HFSCR] |= ((target_ulong)env->error_code << FSCR_IC_POS);
|
|
|
|
srr0 = SPR_HSRR0;
|
|
|
|
srr1 = SPR_HSRR1;
|
|
|
|
new_msr |= (target_ulong)MSR_HVB;
|
|
|
|
new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
|
2016-11-10 07:37:31 +03:00
|
|
|
#endif
|
2016-07-27 09:56:32 +03:00
|
|
|
break;
|
2012-05-30 08:23:25 +04:00
|
|
|
case POWERPC_EXCP_PIT: /* Programmable interval timer interrupt */
|
2021-09-20 09:12:00 +03:00
|
|
|
trace_ppc_excp_print("PIT");
|
2016-07-27 09:56:32 +03:00
|
|
|
break;
|
2012-05-30 08:23:25 +04:00
|
|
|
case POWERPC_EXCP_IO: /* IO error exception */
|
|
|
|
/* XXX: TODO */
|
2013-09-03 19:38:47 +04:00
|
|
|
cpu_abort(cs, "601 IO error exception is not implemented yet !\n");
|
2016-07-27 09:56:32 +03:00
|
|
|
break;
|
2012-05-30 08:23:25 +04:00
|
|
|
case POWERPC_EXCP_RUNM: /* Run mode exception */
|
|
|
|
/* XXX: TODO */
|
2013-09-03 19:38:47 +04:00
|
|
|
cpu_abort(cs, "601 run mode exception is not implemented yet !\n");
|
2016-07-27 09:56:32 +03:00
|
|
|
break;
|
2012-05-30 08:23:25 +04:00
|
|
|
case POWERPC_EXCP_EMUL: /* Emulation trap exception */
|
|
|
|
/* XXX: TODO */
|
2013-09-03 19:38:47 +04:00
|
|
|
cpu_abort(cs, "602 emulation trap exception "
|
2012-05-30 08:23:25 +04:00
|
|
|
"is not implemented yet !\n");
|
2016-07-27 09:56:32 +03:00
|
|
|
break;
|
2012-05-30 08:23:25 +04:00
|
|
|
case POWERPC_EXCP_IFTLB: /* Instruction fetch TLB error */
|
|
|
|
case POWERPC_EXCP_DLTLB: /* Data load TLB miss */
|
|
|
|
case POWERPC_EXCP_DSTLB: /* Data store TLB miss */
|
|
|
|
switch (excp_model) {
|
|
|
|
case POWERPC_EXCP_602:
|
|
|
|
case POWERPC_EXCP_603:
|
|
|
|
case POWERPC_EXCP_G2:
|
|
|
|
/* Swap temporary saved registers with GPRs */
|
|
|
|
if (!(new_msr & ((target_ulong)1 << MSR_TGPR))) {
|
|
|
|
new_msr |= (target_ulong)1 << MSR_TGPR;
|
|
|
|
hreg_swap_gpr_tgpr(env);
|
|
|
|
}
|
2021-06-02 00:46:47 +03:00
|
|
|
/* fall through */
|
2012-05-30 08:23:25 +04:00
|
|
|
case POWERPC_EXCP_7x5:
|
|
|
|
#if defined(DEBUG_SOFTWARE_TLB)
|
|
|
|
if (qemu_log_enabled()) {
|
|
|
|
const char *es;
|
|
|
|
target_ulong *miss, *cmp;
|
|
|
|
int en;
|
|
|
|
|
|
|
|
if (excp == POWERPC_EXCP_IFTLB) {
|
|
|
|
es = "I";
|
|
|
|
en = 'I';
|
|
|
|
miss = &env->spr[SPR_IMISS];
|
|
|
|
cmp = &env->spr[SPR_ICMP];
|
|
|
|
} else {
|
|
|
|
if (excp == POWERPC_EXCP_DLTLB) {
|
|
|
|
es = "DL";
|
|
|
|
} else {
|
|
|
|
es = "DS";
|
|
|
|
}
|
|
|
|
en = 'D';
|
|
|
|
miss = &env->spr[SPR_DMISS];
|
|
|
|
cmp = &env->spr[SPR_DCMP];
|
|
|
|
}
|
|
|
|
qemu_log("6xx %sTLB miss: %cM " TARGET_FMT_lx " %cC "
|
|
|
|
TARGET_FMT_lx " H1 " TARGET_FMT_lx " H2 "
|
|
|
|
TARGET_FMT_lx " %08x\n", es, en, *miss, en, *cmp,
|
|
|
|
env->spr[SPR_HASH1], env->spr[SPR_HASH2],
|
|
|
|
env->error_code);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
msr |= env->crf[0] << 28;
|
|
|
|
msr |= env->error_code; /* key, D/I, S/L bits */
|
|
|
|
/* Set way using a LRU mechanism */
|
|
|
|
msr |= ((env->last_way + 1) & (env->nb_ways - 1)) << 17;
|
|
|
|
break;
|
|
|
|
default:
|
2021-06-02 00:46:47 +03:00
|
|
|
cpu_abort(cs, "Invalid TLB miss exception\n");
|
2012-05-30 08:23:25 +04:00
|
|
|
break;
|
|
|
|
}
|
2016-07-27 09:56:32 +03:00
|
|
|
break;
|
2012-05-30 08:23:25 +04:00
|
|
|
case POWERPC_EXCP_FPA: /* Floating-point assist exception */
|
|
|
|
/* XXX: TODO */
|
2013-09-03 19:38:47 +04:00
|
|
|
cpu_abort(cs, "Floating point assist exception "
|
2012-05-30 08:23:25 +04:00
|
|
|
"is not implemented yet !\n");
|
2016-07-27 09:56:32 +03:00
|
|
|
break;
|
2012-05-30 08:23:25 +04:00
|
|
|
case POWERPC_EXCP_DABR: /* Data address breakpoint */
|
|
|
|
/* XXX: TODO */
|
2013-09-03 19:38:47 +04:00
|
|
|
cpu_abort(cs, "DABR exception is not implemented yet !\n");
|
2016-07-27 09:56:32 +03:00
|
|
|
break;
|
2012-05-30 08:23:25 +04:00
|
|
|
case POWERPC_EXCP_IABR: /* Instruction address breakpoint */
|
|
|
|
/* XXX: TODO */
|
2013-09-03 19:38:47 +04:00
|
|
|
cpu_abort(cs, "IABR exception is not implemented yet !\n");
|
2016-07-27 09:56:32 +03:00
|
|
|
break;
|
2012-05-30 08:23:25 +04:00
|
|
|
case POWERPC_EXCP_SMI: /* System management interrupt */
|
|
|
|
/* XXX: TODO */
|
2013-09-03 19:38:47 +04:00
|
|
|
cpu_abort(cs, "SMI exception is not implemented yet !\n");
|
2016-07-27 09:56:32 +03:00
|
|
|
break;
|
2012-05-30 08:23:25 +04:00
|
|
|
case POWERPC_EXCP_THERM: /* Thermal interrupt */
|
|
|
|
/* XXX: TODO */
|
2013-09-03 19:38:47 +04:00
|
|
|
cpu_abort(cs, "Thermal management exception "
|
2012-05-30 08:23:25 +04:00
|
|
|
"is not implemented yet !\n");
|
2016-07-27 09:56:32 +03:00
|
|
|
break;
|
2012-05-30 08:23:25 +04:00
|
|
|
case POWERPC_EXCP_PERFM: /* Embedded performance monitor interrupt */
|
|
|
|
/* XXX: TODO */
|
2013-09-03 19:38:47 +04:00
|
|
|
cpu_abort(cs,
|
2012-05-30 08:23:25 +04:00
|
|
|
"Performance counter exception is not implemented yet !\n");
|
2016-07-27 09:56:32 +03:00
|
|
|
break;
|
2012-05-30 08:23:25 +04:00
|
|
|
case POWERPC_EXCP_VPUA: /* Vector assist exception */
|
|
|
|
/* XXX: TODO */
|
2013-09-03 19:38:47 +04:00
|
|
|
cpu_abort(cs, "VPU assist exception is not implemented yet !\n");
|
2016-07-27 09:56:32 +03:00
|
|
|
break;
|
2012-05-30 08:23:25 +04:00
|
|
|
case POWERPC_EXCP_SOFTP: /* Soft patch exception */
|
|
|
|
/* XXX: TODO */
|
2013-09-03 19:38:47 +04:00
|
|
|
cpu_abort(cs,
|
2012-05-30 08:23:25 +04:00
|
|
|
"970 soft-patch exception is not implemented yet !\n");
|
2016-07-27 09:56:32 +03:00
|
|
|
break;
|
2012-05-30 08:23:25 +04:00
|
|
|
case POWERPC_EXCP_MAINT: /* Maintenance exception */
|
|
|
|
/* XXX: TODO */
|
2013-09-03 19:38:47 +04:00
|
|
|
cpu_abort(cs,
|
2012-05-30 08:23:25 +04:00
|
|
|
"970 maintenance exception is not implemented yet !\n");
|
2016-07-27 09:56:32 +03:00
|
|
|
break;
|
2012-05-30 08:23:25 +04:00
|
|
|
case POWERPC_EXCP_MEXTBR: /* Maskable external breakpoint */
|
|
|
|
/* XXX: TODO */
|
2013-09-03 19:38:47 +04:00
|
|
|
cpu_abort(cs, "Maskable external exception "
|
2012-05-30 08:23:25 +04:00
|
|
|
"is not implemented yet !\n");
|
2016-07-27 09:56:32 +03:00
|
|
|
break;
|
2012-05-30 08:23:25 +04:00
|
|
|
case POWERPC_EXCP_NMEXTBR: /* Non maskable external breakpoint */
|
|
|
|
/* XXX: TODO */
|
2013-09-03 19:38:47 +04:00
|
|
|
cpu_abort(cs, "Non maskable external exception "
|
2012-05-30 08:23:25 +04:00
|
|
|
"is not implemented yet !\n");
|
2016-07-27 09:56:32 +03:00
|
|
|
break;
|
2012-05-30 08:23:25 +04:00
|
|
|
default:
|
|
|
|
excp_invalid:
|
2013-09-03 19:38:47 +04:00
|
|
|
cpu_abort(cs, "Invalid PowerPC exception %d. Aborting\n", excp);
|
2012-05-30 08:23:25 +04:00
|
|
|
break;
|
|
|
|
}
|
2016-07-27 09:56:32 +03:00
|
|
|
|
2016-06-22 00:48:48 +03:00
|
|
|
/* Sanity check */
|
2016-10-27 15:50:58 +03:00
|
|
|
if (!(env->msr_mask & MSR_HVB)) {
|
|
|
|
if (new_msr & MSR_HVB) {
|
|
|
|
cpu_abort(cs, "Trying to deliver HV exception (MSR) %d with "
|
|
|
|
"no HV support\n", excp);
|
|
|
|
}
|
|
|
|
if (srr0 == SPR_HSRR0) {
|
|
|
|
cpu_abort(cs, "Trying to deliver HV exception (HSRR) %d with "
|
|
|
|
"no HV support\n", excp);
|
|
|
|
}
|
2016-06-22 00:48:48 +03:00
|
|
|
}
|
|
|
|
|
2019-03-21 13:20:28 +03:00
|
|
|
/*
|
|
|
|
* Sort out endianness of interrupt, this differs depending on the
|
2016-06-22 00:48:48 +03:00
|
|
|
* CPU, the HV mode, etc...
|
|
|
|
*/
|
2013-08-07 04:47:01 +04:00
|
|
|
#ifdef TARGET_PPC64
|
2016-06-22 00:48:48 +03:00
|
|
|
if (excp_model == POWERPC_EXCP_POWER7) {
|
|
|
|
if (!(new_msr & MSR_HVB) && (env->spr[SPR_LPCR] & LPCR_ILE)) {
|
|
|
|
new_msr |= (target_ulong)1 << MSR_LE;
|
|
|
|
}
|
|
|
|
} else if (excp_model == POWERPC_EXCP_POWER8) {
|
|
|
|
if (new_msr & MSR_HVB) {
|
2019-02-15 19:16:44 +03:00
|
|
|
if (env->spr[SPR_HID0] & HID0_HILE) {
|
|
|
|
new_msr |= (target_ulong)1 << MSR_LE;
|
|
|
|
}
|
|
|
|
} else if (env->spr[SPR_LPCR] & LPCR_ILE) {
|
|
|
|
new_msr |= (target_ulong)1 << MSR_LE;
|
|
|
|
}
|
2021-05-01 10:24:35 +03:00
|
|
|
} else if (excp_model == POWERPC_EXCP_POWER9 ||
|
|
|
|
excp_model == POWERPC_EXCP_POWER10) {
|
2019-02-15 19:16:44 +03:00
|
|
|
if (new_msr & MSR_HVB) {
|
|
|
|
if (env->spr[SPR_HID0] & HID0_POWER9_HILE) {
|
2016-06-22 00:48:48 +03:00
|
|
|
new_msr |= (target_ulong)1 << MSR_LE;
|
|
|
|
}
|
|
|
|
} else if (env->spr[SPR_LPCR] & LPCR_ILE) {
|
2013-08-07 04:47:01 +04:00
|
|
|
new_msr |= (target_ulong)1 << MSR_LE;
|
|
|
|
}
|
|
|
|
} else if (msr_ile) {
|
|
|
|
new_msr |= (target_ulong)1 << MSR_LE;
|
|
|
|
}
|
|
|
|
#else
|
2012-05-30 08:23:25 +04:00
|
|
|
if (msr_ile) {
|
|
|
|
new_msr |= (target_ulong)1 << MSR_LE;
|
|
|
|
}
|
2013-08-07 04:47:01 +04:00
|
|
|
#endif
|
2012-05-30 08:23:25 +04:00
|
|
|
|
|
|
|
#if defined(TARGET_PPC64)
|
|
|
|
if (excp_model == POWERPC_EXCP_BOOKE) {
|
2012-06-20 23:20:29 +04:00
|
|
|
if (env->spr[SPR_BOOKE_EPCR] & EPCR_ICM) {
|
|
|
|
/* Cat.64-bit: EPCR.ICM is copied to MSR.CM */
|
2012-05-30 08:23:25 +04:00
|
|
|
new_msr |= (target_ulong)1 << MSR_CM;
|
2012-06-20 23:20:29 +04:00
|
|
|
} else {
|
|
|
|
vector = (uint32_t)vector;
|
2012-05-30 08:23:25 +04:00
|
|
|
}
|
|
|
|
} else {
|
2020-12-09 20:35:36 +03:00
|
|
|
if (!msr_isf && !mmu_is_64bit(env->mmu_model)) {
|
2012-05-30 08:23:25 +04:00
|
|
|
vector = (uint32_t)vector;
|
|
|
|
} else {
|
|
|
|
new_msr |= (target_ulong)1 << MSR_SF;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
ppc: Do some batching of TCG tlb flushes
On ppc64 especially, we flush the tlb on any slbie or tlbie instruction.
However, those instructions often come in bursts of 3 or more (context
switch will favor a series of slbie's for example to an slbia if the
SLB has less than a certain number of entries in it, and tlbie's can
happen in a series, with PAPR, H_BULK_REMOVE can remove up to 4 entries
at a time.
Doing a tlb_flush() each time is a waste of time. We end up doing a memset
of the whole TLB, reloading it for the next instruction, memset'ing again,
etc...
Those instructions don't have to take effect immediately. For slbie, they
can wait for the next context synchronizing event. For tlbie, the next
tlbsync.
This implements batching by keeping a flag that indicates that we have a
TLB in need of flushing. We check it on interrupts, rfi's, isync's and
tlbsync and flush the TLB if needed.
This reduces the number of tlb_flush() on a boot to a ubuntu installer
first dialog screen from roughly 360K down to 36K.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
[clg: added a 'CPUPPCState *' variable in h_remove() and
h_bulk_remove() ]
Signed-off-by: Cédric Le Goater <clg@kaod.org>
[dwg: removed spurious whitespace change, use 0/1 not true/false
consistently, since tlb_need_flush has int type]
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2016-05-03 19:03:25 +03:00
|
|
|
|
2020-05-07 14:53:28 +03:00
|
|
|
if (excp != POWERPC_EXCP_SYSCALL_VECTORED) {
|
|
|
|
/* Save PC */
|
|
|
|
env->spr[srr0] = env->nip;
|
|
|
|
|
|
|
|
/* Save MSR */
|
|
|
|
env->spr[srr1] = msr;
|
|
|
|
}
|
|
|
|
|
2021-05-01 10:24:34 +03:00
|
|
|
/* This can update new_msr and vector if AIL applies */
|
|
|
|
ppc_excp_apply_ail(cpu, excp_model, excp, msr, &new_msr, &vector);
|
|
|
|
|
2020-03-16 17:26:09 +03:00
|
|
|
powerpc_set_excp_state(cpu, vector, new_msr);
|
2012-05-30 08:23:25 +04:00
|
|
|
}
|
|
|
|
|
2013-02-02 13:57:51 +04:00
|
|
|
void ppc_cpu_do_interrupt(CPUState *cs)
|
2012-05-30 08:23:25 +04:00
|
|
|
{
|
2013-02-02 13:57:51 +04:00
|
|
|
PowerPCCPU *cpu = POWERPC_CPU(cs);
|
2012-05-03 07:55:58 +04:00
|
|
|
|
2022-01-04 09:55:34 +03:00
|
|
|
powerpc_excp(cpu, cs->exception_index);
|
2012-05-30 08:23:25 +04:00
|
|
|
}
|
|
|
|
|
2014-09-13 20:45:32 +04:00
|
|
|
static void ppc_hw_interrupt(CPUPPCState *env)
|
2012-05-30 08:23:25 +04:00
|
|
|
{
|
2019-03-23 05:07:57 +03:00
|
|
|
PowerPCCPU *cpu = env_archcpu(env);
|
2019-02-15 19:16:40 +03:00
|
|
|
bool async_deliver;
|
2013-01-17 21:51:17 +04:00
|
|
|
|
2012-05-30 08:23:25 +04:00
|
|
|
/* External reset */
|
|
|
|
if (env->pending_interrupts & (1 << PPC_INTERRUPT_RESET)) {
|
|
|
|
env->pending_interrupts &= ~(1 << PPC_INTERRUPT_RESET);
|
2022-01-04 09:55:34 +03:00
|
|
|
powerpc_excp(cpu, POWERPC_EXCP_RESET);
|
2012-05-30 08:23:25 +04:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
/* Machine check exception */
|
|
|
|
if (env->pending_interrupts & (1 << PPC_INTERRUPT_MCK)) {
|
|
|
|
env->pending_interrupts &= ~(1 << PPC_INTERRUPT_MCK);
|
2022-01-04 09:55:34 +03:00
|
|
|
powerpc_excp(cpu, POWERPC_EXCP_MCHECK);
|
2012-05-30 08:23:25 +04:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
#if 0 /* TODO */
|
|
|
|
/* External debug exception */
|
|
|
|
if (env->pending_interrupts & (1 << PPC_INTERRUPT_DEBUG)) {
|
|
|
|
env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DEBUG);
|
2022-01-04 09:55:34 +03:00
|
|
|
powerpc_excp(cpu, POWERPC_EXCP_DEBUG);
|
2012-05-30 08:23:25 +04:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
#endif
|
2019-02-15 19:16:40 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* For interrupts that gate on MSR:EE, we need to do something a
|
|
|
|
* bit more subtle, as we need to let them through even when EE is
|
|
|
|
* clear when coming out of some power management states (in order
|
|
|
|
* for them to become a 0x100).
|
|
|
|
*/
|
2019-02-15 19:16:43 +03:00
|
|
|
async_deliver = (msr_ee != 0) || env->resume_as_sreset;
|
2019-02-15 19:16:40 +03:00
|
|
|
|
2016-06-27 09:55:19 +03:00
|
|
|
/* Hypervisor decrementer exception */
|
|
|
|
if (env->pending_interrupts & (1 << PPC_INTERRUPT_HDECR)) {
|
|
|
|
/* LPCR will be clear when not supported so this will work */
|
|
|
|
bool hdice = !!(env->spr[SPR_LPCR] & LPCR_HDICE);
|
2019-02-15 19:16:40 +03:00
|
|
|
if ((async_deliver || msr_hv == 0) && hdice) {
|
2016-06-27 09:55:19 +03:00
|
|
|
/* HDEC clears on delivery */
|
|
|
|
env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDECR);
|
2022-01-04 09:55:34 +03:00
|
|
|
powerpc_excp(cpu, POWERPC_EXCP_HDECR);
|
2012-05-30 08:23:25 +04:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
2019-02-15 19:16:46 +03:00
|
|
|
|
|
|
|
/* Hypervisor virtualization interrupt */
|
|
|
|
if (env->pending_interrupts & (1 << PPC_INTERRUPT_HVIRT)) {
|
|
|
|
/* LPCR will be clear when not supported so this will work */
|
|
|
|
bool hvice = !!(env->spr[SPR_LPCR] & LPCR_HVICE);
|
|
|
|
if ((async_deliver || msr_hv == 0) && hvice) {
|
2022-01-04 09:55:34 +03:00
|
|
|
powerpc_excp(cpu, POWERPC_EXCP_HVIRT);
|
2019-02-15 19:16:46 +03:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* External interrupt can ignore MSR:EE under some circumstances */
|
2016-06-27 09:55:17 +03:00
|
|
|
if (env->pending_interrupts & (1 << PPC_INTERRUPT_EXT)) {
|
|
|
|
bool lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0);
|
2019-02-15 19:16:48 +03:00
|
|
|
bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC);
|
|
|
|
/* HEIC blocks delivery to the hypervisor */
|
|
|
|
if ((async_deliver && !(heic && msr_hv && !msr_pr)) ||
|
|
|
|
(env->has_hv_mode && msr_hv == 0 && !lpes0)) {
|
2022-01-04 09:55:34 +03:00
|
|
|
powerpc_excp(cpu, POWERPC_EXCP_EXTERNAL);
|
2016-06-27 09:55:17 +03:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
2012-05-30 08:23:25 +04:00
|
|
|
if (msr_ce != 0) {
|
|
|
|
/* External critical interrupt */
|
|
|
|
if (env->pending_interrupts & (1 << PPC_INTERRUPT_CEXT)) {
|
2022-01-04 09:55:34 +03:00
|
|
|
powerpc_excp(cpu, POWERPC_EXCP_CRITICAL);
|
2012-05-30 08:23:25 +04:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
2019-02-15 19:16:40 +03:00
|
|
|
if (async_deliver != 0) {
|
2012-05-30 08:23:25 +04:00
|
|
|
/* Watchdog timer on embedded PowerPC */
|
|
|
|
if (env->pending_interrupts & (1 << PPC_INTERRUPT_WDT)) {
|
|
|
|
env->pending_interrupts &= ~(1 << PPC_INTERRUPT_WDT);
|
2022-01-04 09:55:34 +03:00
|
|
|
powerpc_excp(cpu, POWERPC_EXCP_WDT);
|
2012-05-30 08:23:25 +04:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (env->pending_interrupts & (1 << PPC_INTERRUPT_CDOORBELL)) {
|
|
|
|
env->pending_interrupts &= ~(1 << PPC_INTERRUPT_CDOORBELL);
|
2022-01-04 09:55:34 +03:00
|
|
|
powerpc_excp(cpu, POWERPC_EXCP_DOORCI);
|
2012-05-30 08:23:25 +04:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
/* Fixed interval timer on embedded PowerPC */
|
|
|
|
if (env->pending_interrupts & (1 << PPC_INTERRUPT_FIT)) {
|
|
|
|
env->pending_interrupts &= ~(1 << PPC_INTERRUPT_FIT);
|
2022-01-04 09:55:34 +03:00
|
|
|
powerpc_excp(cpu, POWERPC_EXCP_FIT);
|
2012-05-30 08:23:25 +04:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
/* Programmable interval timer on embedded PowerPC */
|
|
|
|
if (env->pending_interrupts & (1 << PPC_INTERRUPT_PIT)) {
|
|
|
|
env->pending_interrupts &= ~(1 << PPC_INTERRUPT_PIT);
|
2022-01-04 09:55:34 +03:00
|
|
|
powerpc_excp(cpu, POWERPC_EXCP_PIT);
|
2012-05-30 08:23:25 +04:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
/* Decrementer exception */
|
|
|
|
if (env->pending_interrupts & (1 << PPC_INTERRUPT_DECR)) {
|
2014-04-06 03:32:06 +04:00
|
|
|
if (ppc_decr_clear_on_delivery(env)) {
|
|
|
|
env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DECR);
|
|
|
|
}
|
2022-01-04 09:55:34 +03:00
|
|
|
powerpc_excp(cpu, POWERPC_EXCP_DECR);
|
2012-05-30 08:23:25 +04:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (env->pending_interrupts & (1 << PPC_INTERRUPT_DOORBELL)) {
|
|
|
|
env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DOORBELL);
|
2020-01-20 13:49:34 +03:00
|
|
|
if (is_book3s_arch2x(env)) {
|
2022-01-04 09:55:34 +03:00
|
|
|
powerpc_excp(cpu, POWERPC_EXCP_SDOOR);
|
2020-01-20 13:49:34 +03:00
|
|
|
} else {
|
2022-01-04 09:55:34 +03:00
|
|
|
powerpc_excp(cpu, POWERPC_EXCP_DOORI);
|
2020-01-20 13:49:34 +03:00
|
|
|
}
|
2012-05-30 08:23:25 +04:00
|
|
|
return;
|
|
|
|
}
|
2018-01-18 17:54:03 +03:00
|
|
|
if (env->pending_interrupts & (1 << PPC_INTERRUPT_HDOORBELL)) {
|
|
|
|
env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDOORBELL);
|
2022-01-04 09:55:34 +03:00
|
|
|
powerpc_excp(cpu, POWERPC_EXCP_SDOOR_HV);
|
2018-01-18 17:54:03 +03:00
|
|
|
return;
|
|
|
|
}
|
2012-05-30 08:23:25 +04:00
|
|
|
if (env->pending_interrupts & (1 << PPC_INTERRUPT_PERFM)) {
|
|
|
|
env->pending_interrupts &= ~(1 << PPC_INTERRUPT_PERFM);
|
2022-01-04 09:55:34 +03:00
|
|
|
powerpc_excp(cpu, POWERPC_EXCP_PERFM);
|
2012-05-30 08:23:25 +04:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
/* Thermal interrupt */
|
|
|
|
if (env->pending_interrupts & (1 << PPC_INTERRUPT_THERM)) {
|
|
|
|
env->pending_interrupts &= ~(1 << PPC_INTERRUPT_THERM);
|
2022-01-04 09:55:34 +03:00
|
|
|
powerpc_excp(cpu, POWERPC_EXCP_THERM);
|
2012-05-30 08:23:25 +04:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
2019-02-15 19:16:45 +03:00
|
|
|
|
|
|
|
if (env->resume_as_sreset) {
|
|
|
|
/*
|
|
|
|
* This is a bug ! It means that has_work took us out of halt without
|
|
|
|
* anything to deliver while in a PM state that requires getting
|
|
|
|
* out via a 0x100
|
|
|
|
*
|
|
|
|
* This means we will incorrectly execute past the power management
|
|
|
|
* instruction instead of triggering a reset.
|
|
|
|
*
|
2020-10-09 09:44:37 +03:00
|
|
|
* It generally means a discrepancy between the wakeup conditions in the
|
2019-02-15 19:16:45 +03:00
|
|
|
* processor has_work implementation and the logic in this function.
|
|
|
|
*/
|
2019-03-23 05:07:57 +03:00
|
|
|
cpu_abort(env_cpu(env),
|
2019-02-15 19:16:45 +03:00
|
|
|
"Wakeup from PM state but interrupt Undelivered");
|
|
|
|
}
|
2012-05-30 08:23:25 +04:00
|
|
|
}
|
2014-08-20 16:16:36 +04:00
|
|
|
|
2020-03-25 17:41:43 +03:00
|
|
|
void ppc_cpu_do_system_reset(CPUState *cs)
|
2014-08-20 16:16:36 +04:00
|
|
|
{
|
|
|
|
PowerPCCPU *cpu = POWERPC_CPU(cs);
|
|
|
|
|
2022-01-04 09:55:34 +03:00
|
|
|
powerpc_excp(cpu, POWERPC_EXCP_RESET);
|
2014-08-20 16:16:36 +04:00
|
|
|
}
|
2020-03-16 17:26:09 +03:00
|
|
|
|
|
|
|
void ppc_cpu_do_fwnmi_machine_check(CPUState *cs, target_ulong vector)
|
|
|
|
{
|
|
|
|
PowerPCCPU *cpu = POWERPC_CPU(cs);
|
|
|
|
CPUPPCState *env = &cpu->env;
|
|
|
|
target_ulong msr = 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set MSR and NIP for the handler, SRR0/1, DAR and DSISR have already
|
|
|
|
* been set by KVM.
|
|
|
|
*/
|
|
|
|
msr = (1ULL << MSR_ME);
|
|
|
|
msr |= env->msr & (1ULL << MSR_SF);
|
2021-06-22 17:09:25 +03:00
|
|
|
if (ppc_interrupts_little_endian(cpu)) {
|
2020-03-16 17:26:09 +03:00
|
|
|
msr |= (1ULL << MSR_LE);
|
|
|
|
}
|
|
|
|
|
|
|
|
powerpc_set_excp_state(cpu, vector, msr);
|
|
|
|
}
|
2012-05-30 08:23:25 +04:00
|
|
|
|
2014-09-13 20:45:32 +04:00
|
|
|
bool ppc_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
|
|
|
|
{
|
|
|
|
PowerPCCPU *cpu = POWERPC_CPU(cs);
|
|
|
|
CPUPPCState *env = &cpu->env;
|
|
|
|
|
|
|
|
if (interrupt_request & CPU_INTERRUPT_HARD) {
|
|
|
|
ppc_hw_interrupt(env);
|
|
|
|
if (env->pending_interrupts == 0) {
|
|
|
|
cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2021-09-11 19:54:27 +03:00
|
|
|
#endif /* !CONFIG_USER_ONLY */
|
|
|
|
|
2012-05-30 08:23:22 +04:00
|
|
|
/*****************************************************************************/
|
|
|
|
/* Exceptions processing helpers */
|
|
|
|
|
2016-07-27 09:56:19 +03:00
|
|
|
void raise_exception_err_ra(CPUPPCState *env, uint32_t exception,
|
|
|
|
uint32_t error_code, uintptr_t raddr)
|
2012-05-30 08:23:22 +04:00
|
|
|
{
|
2019-03-23 05:07:57 +03:00
|
|
|
CPUState *cs = env_cpu(env);
|
2013-08-26 10:31:06 +04:00
|
|
|
|
|
|
|
cs->exception_index = exception;
|
2012-05-30 08:23:22 +04:00
|
|
|
env->error_code = error_code;
|
2016-07-27 09:56:19 +03:00
|
|
|
cpu_loop_exit_restore(cs, raddr);
|
|
|
|
}
|
|
|
|
|
|
|
|
void raise_exception_err(CPUPPCState *env, uint32_t exception,
|
|
|
|
uint32_t error_code)
|
|
|
|
{
|
|
|
|
raise_exception_err_ra(env, exception, error_code, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
void raise_exception(CPUPPCState *env, uint32_t exception)
|
|
|
|
{
|
|
|
|
raise_exception_err_ra(env, exception, 0, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
void raise_exception_ra(CPUPPCState *env, uint32_t exception,
|
|
|
|
uintptr_t raddr)
|
|
|
|
{
|
|
|
|
raise_exception_err_ra(env, exception, 0, raddr);
|
|
|
|
}
|
|
|
|
|
2021-05-25 14:53:53 +03:00
|
|
|
#ifdef CONFIG_TCG
|
2016-07-27 09:56:19 +03:00
|
|
|
void helper_raise_exception_err(CPUPPCState *env, uint32_t exception,
|
|
|
|
uint32_t error_code)
|
|
|
|
{
|
|
|
|
raise_exception_err_ra(env, exception, error_code, 0);
|
2012-05-30 08:23:22 +04:00
|
|
|
}
|
|
|
|
|
2012-05-30 08:23:23 +04:00
|
|
|
void helper_raise_exception(CPUPPCState *env, uint32_t exception)
|
2012-05-30 08:23:22 +04:00
|
|
|
{
|
2016-07-27 09:56:19 +03:00
|
|
|
raise_exception_err_ra(env, exception, 0, 0);
|
2012-05-30 08:23:22 +04:00
|
|
|
}
|
2021-05-25 14:53:53 +03:00
|
|
|
#endif
|
2012-05-30 08:23:22 +04:00
|
|
|
|
|
|
|
#if !defined(CONFIG_USER_ONLY)
|
2021-05-25 14:53:53 +03:00
|
|
|
#ifdef CONFIG_TCG
|
2012-05-30 08:23:23 +04:00
|
|
|
void helper_store_msr(CPUPPCState *env, target_ulong val)
|
2012-05-30 08:23:22 +04:00
|
|
|
{
|
2016-07-27 09:56:19 +03:00
|
|
|
uint32_t excp = hreg_store_msr(env, val, 0);
|
2013-01-17 21:51:17 +04:00
|
|
|
|
2016-07-27 09:56:19 +03:00
|
|
|
if (excp != 0) {
|
2019-03-23 05:07:57 +03:00
|
|
|
CPUState *cs = env_cpu(env);
|
2017-12-05 01:25:43 +03:00
|
|
|
cpu_interrupt_exittb(cs);
|
2016-07-27 09:56:19 +03:00
|
|
|
raise_exception(env, excp);
|
2012-05-30 08:23:22 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-06-22 00:48:55 +03:00
|
|
|
#if defined(TARGET_PPC64)
|
2021-03-23 21:43:35 +03:00
|
|
|
void helper_scv(CPUPPCState *env, uint32_t lev)
|
|
|
|
{
|
|
|
|
if (env->spr[SPR_FSCR] & (1ull << FSCR_SCV)) {
|
|
|
|
raise_exception_err(env, POWERPC_EXCP_SYSCALL_VECTORED, lev);
|
|
|
|
} else {
|
|
|
|
raise_exception_err(env, POWERPC_EXCP_FU, FSCR_IC_SCV);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-06-22 00:48:55 +03:00
|
|
|
void helper_pminsn(CPUPPCState *env, powerpc_pm_insn_t insn)
|
|
|
|
{
|
|
|
|
CPUState *cs;
|
|
|
|
|
2019-03-23 05:07:57 +03:00
|
|
|
cs = env_cpu(env);
|
2016-06-22 00:48:55 +03:00
|
|
|
cs->halted = 1;
|
|
|
|
|
2019-02-15 19:16:40 +03:00
|
|
|
/* Condition for waking up at 0x100 */
|
2019-02-15 19:16:43 +03:00
|
|
|
env->resume_as_sreset = (insn != PPC_PM_STOP) ||
|
2019-02-15 19:16:41 +03:00
|
|
|
(env->spr[SPR_PSSCR] & PSSCR_EC);
|
2016-06-22 00:48:55 +03:00
|
|
|
}
|
|
|
|
#endif /* defined(TARGET_PPC64) */
|
2021-05-25 14:53:53 +03:00
|
|
|
#endif /* CONFIG_TCG */
|
2016-06-22 00:48:55 +03:00
|
|
|
|
2022-01-04 09:55:34 +03:00
|
|
|
static void do_rfi(CPUPPCState *env, target_ulong nip, target_ulong msr)
|
2012-05-30 08:23:22 +04:00
|
|
|
{
|
2019-03-23 05:07:57 +03:00
|
|
|
CPUState *cs = env_cpu(env);
|
2013-01-17 21:51:17 +04:00
|
|
|
|
2016-06-22 00:48:46 +03:00
|
|
|
/* MSR:POW cannot be set by any form of rfi */
|
|
|
|
msr &= ~(1ULL << MSR_POW);
|
|
|
|
|
2012-05-30 08:23:22 +04:00
|
|
|
#if defined(TARGET_PPC64)
|
2016-06-22 00:48:46 +03:00
|
|
|
/* Switching to 32-bit ? Crop the nip */
|
|
|
|
if (!msr_is_64bit(env, msr)) {
|
2012-05-30 08:23:22 +04:00
|
|
|
nip = (uint32_t)nip;
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
nip = (uint32_t)nip;
|
|
|
|
#endif
|
|
|
|
/* XXX: beware: this is false if VLE is supported */
|
|
|
|
env->nip = nip & ~((target_ulong)0x00000003);
|
|
|
|
hreg_store_msr(env, msr, 1);
|
2021-09-20 09:12:00 +03:00
|
|
|
trace_ppc_excp_rfi(env->nip, env->msr);
|
2019-03-21 13:20:28 +03:00
|
|
|
/*
|
|
|
|
* No need to raise an exception here, as rfi is always the last
|
|
|
|
* insn of a TB
|
2012-05-30 08:23:22 +04:00
|
|
|
*/
|
2017-12-05 01:25:43 +03:00
|
|
|
cpu_interrupt_exittb(cs);
|
2017-05-15 11:35:09 +03:00
|
|
|
/* Reset the reservation */
|
|
|
|
env->reserve_addr = -1;
|
|
|
|
|
ppc: Do some batching of TCG tlb flushes
On ppc64 especially, we flush the tlb on any slbie or tlbie instruction.
However, those instructions often come in bursts of 3 or more (context
switch will favor a series of slbie's for example to an slbia if the
SLB has less than a certain number of entries in it, and tlbie's can
happen in a series, with PAPR, H_BULK_REMOVE can remove up to 4 entries
at a time.
Doing a tlb_flush() each time is a waste of time. We end up doing a memset
of the whole TLB, reloading it for the next instruction, memset'ing again,
etc...
Those instructions don't have to take effect immediately. For slbie, they
can wait for the next context synchronizing event. For tlbie, the next
tlbsync.
This implements batching by keeping a flag that indicates that we have a
TLB in need of flushing. We check it on interrupts, rfi's, isync's and
tlbsync and flush the TLB if needed.
This reduces the number of tlb_flush() on a boot to a ubuntu installer
first dialog screen from roughly 360K down to 36K.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
[clg: added a 'CPUPPCState *' variable in h_remove() and
h_bulk_remove() ]
Signed-off-by: Cédric Le Goater <clg@kaod.org>
[dwg: removed spurious whitespace change, use 0/1 not true/false
consistently, since tlb_need_flush has int type]
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2016-05-03 19:03:25 +03:00
|
|
|
/* Context synchronizing: check if TCG TLB needs flush */
|
2016-09-20 19:35:00 +03:00
|
|
|
check_tlb_flush(env, false);
|
2012-05-30 08:23:22 +04:00
|
|
|
}
|
|
|
|
|
2021-05-25 14:53:53 +03:00
|
|
|
#ifdef CONFIG_TCG
|
2012-05-30 08:23:23 +04:00
|
|
|
void helper_rfi(CPUPPCState *env)
|
2012-05-30 08:23:22 +04:00
|
|
|
{
|
2016-06-22 00:48:46 +03:00
|
|
|
do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1] & 0xfffffffful);
|
2012-05-30 08:23:22 +04:00
|
|
|
}
|
|
|
|
|
2016-06-22 00:48:46 +03:00
|
|
|
#define MSR_BOOK3S_MASK
|
2012-05-30 08:23:22 +04:00
|
|
|
#if defined(TARGET_PPC64)
|
2012-05-30 08:23:23 +04:00
|
|
|
void helper_rfid(CPUPPCState *env)
|
2012-05-30 08:23:22 +04:00
|
|
|
{
|
2019-03-21 13:20:28 +03:00
|
|
|
/*
|
2020-10-09 09:44:37 +03:00
|
|
|
* The architecture defines a number of rules for which bits can
|
2019-03-21 13:20:28 +03:00
|
|
|
* change but in practice, we handle this in hreg_store_msr()
|
2016-06-22 00:48:46 +03:00
|
|
|
* which will be called by do_rfi(), so there is no need to filter
|
|
|
|
* here
|
|
|
|
*/
|
|
|
|
do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1]);
|
2012-05-30 08:23:22 +04:00
|
|
|
}
|
|
|
|
|
2020-05-07 14:53:28 +03:00
|
|
|
void helper_rfscv(CPUPPCState *env)
|
|
|
|
{
|
|
|
|
do_rfi(env, env->lr, env->ctr);
|
|
|
|
}
|
|
|
|
|
2012-05-30 08:23:23 +04:00
|
|
|
void helper_hrfid(CPUPPCState *env)
|
2012-05-30 08:23:22 +04:00
|
|
|
{
|
2016-06-22 00:48:46 +03:00
|
|
|
do_rfi(env, env->spr[SPR_HSRR0], env->spr[SPR_HSRR1]);
|
2012-05-30 08:23:22 +04:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2021-12-17 19:57:19 +03:00
|
|
|
#if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
|
|
|
|
void helper_rfebb(CPUPPCState *env, target_ulong s)
|
|
|
|
{
|
|
|
|
target_ulong msr = env->msr;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Handling of BESCR bits 32:33 according to PowerISA v3.1:
|
|
|
|
*
|
|
|
|
* "If BESCR 32:33 != 0b00 the instruction is treated as if
|
|
|
|
* the instruction form were invalid."
|
|
|
|
*/
|
|
|
|
if (env->spr[SPR_BESCR] & BESCR_INVALID) {
|
|
|
|
raise_exception_err(env, POWERPC_EXCP_PROGRAM,
|
|
|
|
POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
|
|
|
|
}
|
|
|
|
|
|
|
|
env->nip = env->spr[SPR_EBBRR];
|
|
|
|
|
|
|
|
/* Switching to 32-bit ? Crop the nip */
|
|
|
|
if (!msr_is_64bit(env, msr)) {
|
|
|
|
env->nip = (uint32_t)env->spr[SPR_EBBRR];
|
|
|
|
}
|
|
|
|
|
|
|
|
if (s) {
|
|
|
|
env->spr[SPR_BESCR] |= BESCR_GE;
|
|
|
|
} else {
|
|
|
|
env->spr[SPR_BESCR] &= ~BESCR_GE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2012-05-30 08:23:22 +04:00
|
|
|
/*****************************************************************************/
|
|
|
|
/* Embedded PowerPC specific helpers */
|
2012-05-30 08:23:23 +04:00
|
|
|
void helper_40x_rfci(CPUPPCState *env)
|
2012-05-30 08:23:22 +04:00
|
|
|
{
|
2016-06-22 00:48:46 +03:00
|
|
|
do_rfi(env, env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3]);
|
2012-05-30 08:23:22 +04:00
|
|
|
}
|
|
|
|
|
2012-05-30 08:23:23 +04:00
|
|
|
void helper_rfci(CPUPPCState *env)
|
2012-05-30 08:23:22 +04:00
|
|
|
{
|
2016-06-22 00:48:46 +03:00
|
|
|
do_rfi(env, env->spr[SPR_BOOKE_CSRR0], env->spr[SPR_BOOKE_CSRR1]);
|
2012-05-30 08:23:22 +04:00
|
|
|
}
|
|
|
|
|
2012-05-30 08:23:23 +04:00
|
|
|
void helper_rfdi(CPUPPCState *env)
|
2012-05-30 08:23:22 +04:00
|
|
|
{
|
2012-12-21 20:15:41 +04:00
|
|
|
/* FIXME: choose CSRR1 or DSRR1 based on cpu type */
|
2016-06-22 00:48:46 +03:00
|
|
|
do_rfi(env, env->spr[SPR_BOOKE_DSRR0], env->spr[SPR_BOOKE_DSRR1]);
|
2012-05-30 08:23:22 +04:00
|
|
|
}
|
|
|
|
|
2012-05-30 08:23:23 +04:00
|
|
|
void helper_rfmci(CPUPPCState *env)
|
2012-05-30 08:23:22 +04:00
|
|
|
{
|
2012-12-21 20:15:41 +04:00
|
|
|
/* FIXME: choose CSRR1 or MCSRR1 based on cpu type */
|
2016-06-22 00:48:46 +03:00
|
|
|
do_rfi(env, env->spr[SPR_BOOKE_MCSRR0], env->spr[SPR_BOOKE_MCSRR1]);
|
2012-05-30 08:23:22 +04:00
|
|
|
}
|
2021-05-25 14:53:53 +03:00
|
|
|
#endif /* CONFIG_TCG */
|
|
|
|
#endif /* !defined(CONFIG_USER_ONLY) */
|
2012-05-30 08:23:22 +04:00
|
|
|
|
2021-05-25 14:53:53 +03:00
|
|
|
#ifdef CONFIG_TCG
|
2012-05-30 08:23:23 +04:00
|
|
|
void helper_tw(CPUPPCState *env, target_ulong arg1, target_ulong arg2,
|
|
|
|
uint32_t flags)
|
2012-05-30 08:23:22 +04:00
|
|
|
{
|
|
|
|
if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) ||
|
|
|
|
((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) ||
|
|
|
|
((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) ||
|
|
|
|
((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) ||
|
|
|
|
((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) {
|
2016-07-27 09:56:38 +03:00
|
|
|
raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
|
|
|
|
POWERPC_EXCP_TRAP, GETPC());
|
2012-05-30 08:23:22 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#if defined(TARGET_PPC64)
|
2012-05-30 08:23:23 +04:00
|
|
|
void helper_td(CPUPPCState *env, target_ulong arg1, target_ulong arg2,
|
|
|
|
uint32_t flags)
|
2012-05-30 08:23:22 +04:00
|
|
|
{
|
|
|
|
if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) ||
|
|
|
|
((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) ||
|
|
|
|
((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) ||
|
|
|
|
((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) ||
|
|
|
|
((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01))))) {
|
2016-07-27 09:56:38 +03:00
|
|
|
raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
|
|
|
|
POWERPC_EXCP_TRAP, GETPC());
|
2012-05-30 08:23:22 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
2021-05-25 14:53:53 +03:00
|
|
|
#endif
|
2012-05-30 08:23:22 +04:00
|
|
|
|
|
|
|
#if !defined(CONFIG_USER_ONLY)
|
|
|
|
/*****************************************************************************/
|
|
|
|
/* PowerPC 601 specific instructions (POWER bridge) */
|
|
|
|
|
2021-05-25 14:53:53 +03:00
|
|
|
#ifdef CONFIG_TCG
|
2012-05-30 08:23:23 +04:00
|
|
|
void helper_rfsvc(CPUPPCState *env)
|
2012-05-30 08:23:22 +04:00
|
|
|
{
|
2016-06-22 00:48:46 +03:00
|
|
|
do_rfi(env, env->lr, env->ctr & 0x0000FFFF);
|
2012-05-30 08:23:22 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Embedded.Processor Control */
|
|
|
|
static int dbell2irq(target_ulong rb)
|
|
|
|
{
|
|
|
|
int msg = rb & DBELL_TYPE_MASK;
|
|
|
|
int irq = -1;
|
|
|
|
|
|
|
|
switch (msg) {
|
|
|
|
case DBELL_TYPE_DBELL:
|
|
|
|
irq = PPC_INTERRUPT_DOORBELL;
|
|
|
|
break;
|
|
|
|
case DBELL_TYPE_DBELL_CRIT:
|
|
|
|
irq = PPC_INTERRUPT_CDOORBELL;
|
|
|
|
break;
|
|
|
|
case DBELL_TYPE_G_DBELL:
|
|
|
|
case DBELL_TYPE_G_DBELL_CRIT:
|
|
|
|
case DBELL_TYPE_G_DBELL_MC:
|
|
|
|
/* XXX implement */
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return irq;
|
|
|
|
}
|
|
|
|
|
2012-05-30 08:23:23 +04:00
|
|
|
void helper_msgclr(CPUPPCState *env, target_ulong rb)
|
2012-05-30 08:23:22 +04:00
|
|
|
{
|
|
|
|
int irq = dbell2irq(rb);
|
|
|
|
|
|
|
|
if (irq < 0) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
env->pending_interrupts &= ~(1 << irq);
|
|
|
|
}
|
|
|
|
|
|
|
|
void helper_msgsnd(target_ulong rb)
|
|
|
|
{
|
|
|
|
int irq = dbell2irq(rb);
|
|
|
|
int pir = rb & DBELL_PIRTAG_MASK;
|
2013-05-30 00:29:20 +04:00
|
|
|
CPUState *cs;
|
2012-05-30 08:23:22 +04:00
|
|
|
|
|
|
|
if (irq < 0) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2017-06-13 13:55:29 +03:00
|
|
|
qemu_mutex_lock_iothread();
|
2013-06-25 01:50:24 +04:00
|
|
|
CPU_FOREACH(cs) {
|
2013-05-30 00:29:20 +04:00
|
|
|
PowerPCCPU *cpu = POWERPC_CPU(cs);
|
|
|
|
CPUPPCState *cenv = &cpu->env;
|
|
|
|
|
2012-05-30 08:23:22 +04:00
|
|
|
if ((rb & DBELL_BRDCAST) || (cenv->spr[SPR_BOOKE_PIR] == pir)) {
|
|
|
|
cenv->pending_interrupts |= 1 << irq;
|
2013-05-30 00:29:20 +04:00
|
|
|
cpu_interrupt(cs, CPU_INTERRUPT_HARD);
|
2012-05-30 08:23:22 +04:00
|
|
|
}
|
|
|
|
}
|
2017-06-13 13:55:29 +03:00
|
|
|
qemu_mutex_unlock_iothread();
|
2012-05-30 08:23:22 +04:00
|
|
|
}
|
2018-01-18 17:54:03 +03:00
|
|
|
|
|
|
|
/* Server Processor Control */
|
|
|
|
|
2020-01-20 13:49:34 +03:00
|
|
|
static bool dbell_type_server(target_ulong rb)
|
|
|
|
{
|
2019-03-21 13:20:28 +03:00
|
|
|
/*
|
|
|
|
* A Directed Hypervisor Doorbell message is sent only if the
|
2018-01-18 17:54:03 +03:00
|
|
|
* message type is 5. All other types are reserved and the
|
2019-03-21 13:20:28 +03:00
|
|
|
* instruction is a no-op
|
|
|
|
*/
|
2020-01-20 13:49:34 +03:00
|
|
|
return (rb & DBELL_TYPE_MASK) == DBELL_TYPE_DBELL_SERVER;
|
2018-01-18 17:54:03 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
void helper_book3s_msgclr(CPUPPCState *env, target_ulong rb)
|
|
|
|
{
|
2020-01-20 13:49:34 +03:00
|
|
|
if (!dbell_type_server(rb)) {
|
2018-01-18 17:54:03 +03:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-01-20 13:49:34 +03:00
|
|
|
env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDOORBELL);
|
2018-01-18 17:54:03 +03:00
|
|
|
}
|
|
|
|
|
2020-01-20 13:49:34 +03:00
|
|
|
static void book3s_msgsnd_common(int pir, int irq)
|
2018-01-18 17:54:03 +03:00
|
|
|
{
|
|
|
|
CPUState *cs;
|
|
|
|
|
|
|
|
qemu_mutex_lock_iothread();
|
|
|
|
CPU_FOREACH(cs) {
|
|
|
|
PowerPCCPU *cpu = POWERPC_CPU(cs);
|
|
|
|
CPUPPCState *cenv = &cpu->env;
|
|
|
|
|
|
|
|
/* TODO: broadcast message to all threads of the same processor */
|
|
|
|
if (cenv->spr_cb[SPR_PIR].default_value == pir) {
|
|
|
|
cenv->pending_interrupts |= 1 << irq;
|
|
|
|
cpu_interrupt(cs, CPU_INTERRUPT_HARD);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
qemu_mutex_unlock_iothread();
|
|
|
|
}
|
2020-01-20 13:49:34 +03:00
|
|
|
|
|
|
|
void helper_book3s_msgsnd(target_ulong rb)
|
|
|
|
{
|
|
|
|
int pir = rb & DBELL_PROCIDTAG_MASK;
|
|
|
|
|
|
|
|
if (!dbell_type_server(rb)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
book3s_msgsnd_common(pir, PPC_INTERRUPT_HDOORBELL);
|
|
|
|
}
|
|
|
|
|
|
|
|
#if defined(TARGET_PPC64)
|
|
|
|
void helper_book3s_msgclrp(CPUPPCState *env, target_ulong rb)
|
|
|
|
{
|
2020-01-20 13:49:35 +03:00
|
|
|
helper_hfscr_facility_check(env, HFSCR_MSGP, "msgclrp", HFSCR_IC_MSGP);
|
|
|
|
|
2020-01-20 13:49:34 +03:00
|
|
|
if (!dbell_type_server(rb)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DOORBELL);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* sends a message to other threads that are on the same
|
|
|
|
* multi-threaded processor
|
|
|
|
*/
|
|
|
|
void helper_book3s_msgsndp(CPUPPCState *env, target_ulong rb)
|
|
|
|
{
|
|
|
|
int pir = env->spr_cb[SPR_PIR].default_value;
|
|
|
|
|
2020-01-20 13:49:35 +03:00
|
|
|
helper_hfscr_facility_check(env, HFSCR_MSGP, "msgsndp", HFSCR_IC_MSGP);
|
|
|
|
|
2020-01-20 13:49:34 +03:00
|
|
|
if (!dbell_type_server(rb)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* TODO: TCG supports only one thread */
|
|
|
|
|
|
|
|
book3s_msgsnd_common(pir, PPC_INTERRUPT_DOORBELL);
|
|
|
|
}
|
2021-10-05 01:07:48 +03:00
|
|
|
#endif /* TARGET_PPC64 */
|
2018-06-26 19:19:09 +03:00
|
|
|
|
|
|
|
void ppc_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
|
|
|
|
MMUAccessType access_type,
|
|
|
|
int mmu_idx, uintptr_t retaddr)
|
|
|
|
{
|
|
|
|
CPUPPCState *env = cs->env_ptr;
|
2021-12-17 19:57:18 +03:00
|
|
|
uint32_t insn;
|
|
|
|
|
|
|
|
/* Restore state and reload the insn we executed, for filling in DSISR. */
|
|
|
|
cpu_restore_state(cs, retaddr, true);
|
|
|
|
insn = cpu_ldl_code(env, env->nip);
|
2018-06-26 19:19:09 +03:00
|
|
|
|
2021-07-30 02:25:00 +03:00
|
|
|
switch (env->mmu_model) {
|
|
|
|
case POWERPC_MMU_SOFT_4xx:
|
|
|
|
env->spr[SPR_40x_DEAR] = vaddr;
|
|
|
|
break;
|
|
|
|
case POWERPC_MMU_BOOKE:
|
|
|
|
case POWERPC_MMU_BOOKE206:
|
|
|
|
env->spr[SPR_BOOKE_DEAR] = vaddr;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
env->spr[SPR_DAR] = vaddr;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2018-06-26 19:19:09 +03:00
|
|
|
cs->exception_index = POWERPC_EXCP_ALIGN;
|
2021-12-17 19:57:18 +03:00
|
|
|
env->error_code = insn & 0x03FF0000;
|
|
|
|
cpu_loop_exit(cs);
|
2018-06-26 19:19:09 +03:00
|
|
|
}
|
2021-10-05 01:07:48 +03:00
|
|
|
#endif /* CONFIG_TCG */
|
|
|
|
#endif /* !CONFIG_USER_ONLY */
|