2012-05-30 08:23:37 +04:00
|
|
|
/*
|
|
|
|
* Miscellaneous PowerPC emulation helpers for QEMU.
|
|
|
|
*
|
|
|
|
* Copyright (c) 2003-2007 Jocelyn Mayer
|
|
|
|
*
|
|
|
|
* This library is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
* License as published by the Free Software Foundation; either
|
2020-10-19 09:11:26 +03:00
|
|
|
* version 2.1 of the License, or (at your option) any later version.
|
2012-05-30 08:23:37 +04:00
|
|
|
*
|
|
|
|
* This library is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* Lesser General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
|
|
|
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*/
|
Include qemu/main-loop.h less
In my "build everything" tree, changing qemu/main-loop.h triggers a
recompile of some 5600 out of 6600 objects (not counting tests and
objects that don't depend on qemu/osdep.h). It includes block/aio.h,
which in turn includes qemu/event_notifier.h, qemu/notify.h,
qemu/processor.h, qemu/qsp.h, qemu/queue.h, qemu/thread-posix.h,
qemu/thread.h, qemu/timer.h, and a few more.
Include qemu/main-loop.h only where it's needed. Touching it now
recompiles only some 1700 objects. For block/aio.h and
qemu/event_notifier.h, these numbers drop from 5600 to 2800. For the
others, they shrink only slightly.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Message-Id: <20190812052359.30071-21-armbru@redhat.com>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
2019-08-12 08:23:50 +03:00
|
|
|
|
2016-01-26 21:16:58 +03:00
|
|
|
#include "qemu/osdep.h"
|
2022-02-07 11:27:56 +03:00
|
|
|
#include "qemu/log.h"
|
2012-05-30 08:23:37 +04:00
|
|
|
#include "cpu.h"
|
2016-03-15 15:18:37 +03:00
|
|
|
#include "exec/exec-all.h"
|
2014-04-08 09:31:41 +04:00
|
|
|
#include "exec/helper-proto.h"
|
2018-06-04 11:45:13 +03:00
|
|
|
#include "qemu/error-report.h"
|
Include qemu/main-loop.h less
In my "build everything" tree, changing qemu/main-loop.h triggers a
recompile of some 5600 out of 6600 objects (not counting tests and
objects that don't depend on qemu/osdep.h). It includes block/aio.h,
which in turn includes qemu/event_notifier.h, qemu/notify.h,
qemu/processor.h, qemu/qsp.h, qemu/queue.h, qemu/thread-posix.h,
qemu/thread.h, qemu/timer.h, and a few more.
Include qemu/main-loop.h only where it's needed. Touching it now
recompiles only some 1700 objects. For block/aio.h and
qemu/event_notifier.h, these numbers drop from 5600 to 2800. For the
others, they shrink only slightly.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Message-Id: <20190812052359.30071-21-armbru@redhat.com>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
2019-08-12 08:23:50 +03:00
|
|
|
#include "qemu/main-loop.h"
|
2021-05-26 17:35:16 +03:00
|
|
|
#include "mmu-book3s-v3.h"
|
2022-10-11 23:48:02 +03:00
|
|
|
#include "hw/ppc/ppc.h"
|
2012-05-30 08:23:37 +04:00
|
|
|
|
|
|
|
#include "helper_regs.h"
|
|
|
|
|
|
|
|
/*****************************************************************************/
|
|
|
|
/* SPR accesses */
|
2012-05-30 08:23:38 +04:00
|
|
|
void helper_load_dump_spr(CPUPPCState *env, uint32_t sprn)
|
2012-05-30 08:23:37 +04:00
|
|
|
{
|
|
|
|
qemu_log("Read SPR %d %03x => " TARGET_FMT_lx "\n", sprn, sprn,
|
|
|
|
env->spr[sprn]);
|
|
|
|
}
|
|
|
|
|
2012-05-30 08:23:38 +04:00
|
|
|
void helper_store_dump_spr(CPUPPCState *env, uint32_t sprn)
|
2012-05-30 08:23:37 +04:00
|
|
|
{
|
|
|
|
qemu_log("Write SPR %d %03x <= " TARGET_FMT_lx "\n", sprn, sprn,
|
|
|
|
env->spr[sprn]);
|
|
|
|
}
|
2014-06-04 16:50:56 +04:00
|
|
|
|
2023-07-05 15:06:29 +03:00
|
|
|
void helper_spr_core_write_generic(CPUPPCState *env, uint32_t sprn,
|
|
|
|
target_ulong val)
|
|
|
|
{
|
|
|
|
CPUState *cs = env_cpu(env);
|
|
|
|
CPUState *ccs;
|
|
|
|
uint32_t nr_threads = cs->nr_threads;
|
|
|
|
uint32_t core_id = env->spr[SPR_PIR] & ~(nr_threads - 1);
|
|
|
|
|
|
|
|
assert(core_id == env->spr[SPR_PIR] - env->spr[SPR_TIR]);
|
|
|
|
|
|
|
|
if (nr_threads == 1) {
|
|
|
|
env->spr[sprn] = val;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
THREAD_SIBLING_FOREACH(cs, ccs) {
|
|
|
|
CPUPPCState *cenv = &POWERPC_CPU(ccs)->env;
|
|
|
|
cenv->spr[sprn] = val;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-06-22 12:33:52 +03:00
|
|
|
void helper_spr_write_CTRL(CPUPPCState *env, uint32_t sprn,
|
|
|
|
target_ulong val)
|
|
|
|
{
|
|
|
|
CPUState *cs = env_cpu(env);
|
|
|
|
CPUState *ccs;
|
|
|
|
uint32_t run = val & 1;
|
|
|
|
uint32_t ts, ts_mask;
|
|
|
|
|
|
|
|
assert(sprn == SPR_CTRL);
|
|
|
|
|
|
|
|
env->spr[sprn] &= ~1U;
|
|
|
|
env->spr[sprn] |= run;
|
|
|
|
|
|
|
|
ts_mask = ~(1U << (8 + env->spr[SPR_TIR]));
|
|
|
|
ts = run << (8 + env->spr[SPR_TIR]);
|
|
|
|
|
|
|
|
THREAD_SIBLING_FOREACH(cs, ccs) {
|
|
|
|
CPUPPCState *cenv = &POWERPC_CPU(ccs)->env;
|
|
|
|
|
|
|
|
cenv->spr[sprn] &= ts_mask;
|
|
|
|
cenv->spr[sprn] |= ts;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2014-06-04 16:50:56 +04:00
|
|
|
#ifdef TARGET_PPC64
|
2020-01-20 13:49:35 +03:00
|
|
|
static void raise_hv_fu_exception(CPUPPCState *env, uint32_t bit,
|
|
|
|
const char *caller, uint32_t cause,
|
|
|
|
uintptr_t raddr)
|
|
|
|
{
|
|
|
|
qemu_log_mask(CPU_LOG_INT, "HV Facility %d is unavailable (%s)\n",
|
|
|
|
bit, caller);
|
|
|
|
|
|
|
|
env->spr[SPR_HFSCR] &= ~((target_ulong)FSCR_IC_MASK << FSCR_IC_POS);
|
|
|
|
|
|
|
|
raise_exception_err_ra(env, POWERPC_EXCP_HV_FU, cause, raddr);
|
|
|
|
}
|
|
|
|
|
2014-06-04 16:50:56 +04:00
|
|
|
static void raise_fu_exception(CPUPPCState *env, uint32_t bit,
|
2016-07-27 09:56:36 +03:00
|
|
|
uint32_t sprn, uint32_t cause,
|
|
|
|
uintptr_t raddr)
|
2014-06-04 16:50:56 +04:00
|
|
|
{
|
|
|
|
qemu_log("Facility SPR %d is unavailable (SPR FSCR:%d)\n", sprn, bit);
|
|
|
|
|
|
|
|
env->spr[SPR_FSCR] &= ~((target_ulong)FSCR_IC_MASK << FSCR_IC_POS);
|
|
|
|
cause &= FSCR_IC_MASK;
|
|
|
|
env->spr[SPR_FSCR] |= (target_ulong)cause << FSCR_IC_POS;
|
|
|
|
|
2016-07-27 09:56:36 +03:00
|
|
|
raise_exception_err_ra(env, POWERPC_EXCP_FU, 0, raddr);
|
2014-06-04 16:50:56 +04:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2020-01-20 13:49:35 +03:00
|
|
|
void helper_hfscr_facility_check(CPUPPCState *env, uint32_t bit,
|
|
|
|
const char *caller, uint32_t cause)
|
|
|
|
{
|
|
|
|
#ifdef TARGET_PPC64
|
2022-05-05 00:05:38 +03:00
|
|
|
if ((env->msr_mask & MSR_HVB) && !FIELD_EX64(env->msr, MSR, HV) &&
|
2020-01-20 13:49:35 +03:00
|
|
|
!(env->spr[SPR_HFSCR] & (1UL << bit))) {
|
|
|
|
raise_hv_fu_exception(env, bit, caller, cause, GETPC());
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2014-06-04 16:50:56 +04:00
|
|
|
void helper_fscr_facility_check(CPUPPCState *env, uint32_t bit,
|
|
|
|
uint32_t sprn, uint32_t cause)
|
|
|
|
{
|
|
|
|
#ifdef TARGET_PPC64
|
|
|
|
if (env->spr[SPR_FSCR] & (1ULL << bit)) {
|
|
|
|
/* Facility is enabled, continue */
|
|
|
|
return;
|
|
|
|
}
|
2016-07-27 09:56:36 +03:00
|
|
|
raise_fu_exception(env, bit, sprn, cause, GETPC());
|
2014-06-04 16:50:56 +04:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2014-06-04 16:50:59 +04:00
|
|
|
void helper_msr_facility_check(CPUPPCState *env, uint32_t bit,
|
|
|
|
uint32_t sprn, uint32_t cause)
|
|
|
|
{
|
|
|
|
#ifdef TARGET_PPC64
|
|
|
|
if (env->msr & (1ULL << bit)) {
|
|
|
|
/* Facility is enabled, continue */
|
|
|
|
return;
|
|
|
|
}
|
2016-07-27 09:56:36 +03:00
|
|
|
raise_fu_exception(env, bit, sprn, cause, GETPC());
|
2014-06-04 16:50:59 +04:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2012-05-30 08:23:37 +04:00
|
|
|
#if !defined(CONFIG_USER_ONLY)
|
|
|
|
|
2012-05-30 08:23:38 +04:00
|
|
|
void helper_store_sdr1(CPUPPCState *env, target_ulong val)
|
2012-05-30 08:23:37 +04:00
|
|
|
{
|
2017-02-20 02:54:48 +03:00
|
|
|
if (env->spr[SPR_SDR1] != val) {
|
|
|
|
ppc_store_sdr1(env, val);
|
2019-03-23 05:07:57 +03:00
|
|
|
tlb_flush(env_cpu(env));
|
2014-02-20 21:52:17 +04:00
|
|
|
}
|
2012-05-30 08:23:37 +04:00
|
|
|
}
|
|
|
|
|
2018-04-24 14:30:42 +03:00
|
|
|
#if defined(TARGET_PPC64)
|
|
|
|
void helper_store_ptcr(CPUPPCState *env, target_ulong val)
|
|
|
|
{
|
|
|
|
if (env->spr[SPR_PTCR] != val) {
|
2021-05-26 17:35:16 +03:00
|
|
|
PowerPCCPU *cpu = env_archcpu(env);
|
|
|
|
target_ulong ptcr_mask = PTCR_PATB | PTCR_PATS;
|
|
|
|
target_ulong patbsize = val & PTCR_PATS;
|
|
|
|
|
|
|
|
qemu_log_mask(CPU_LOG_MMU, "%s: " TARGET_FMT_lx "\n", __func__, val);
|
|
|
|
|
|
|
|
assert(!cpu->vhyp);
|
|
|
|
assert(env->mmu_model & POWERPC_MMU_3_00);
|
|
|
|
|
|
|
|
if (val & ~ptcr_mask) {
|
|
|
|
error_report("Invalid bits 0x"TARGET_FMT_lx" set in PTCR",
|
|
|
|
val & ~ptcr_mask);
|
|
|
|
val &= ptcr_mask;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (patbsize > 24) {
|
|
|
|
error_report("Invalid Partition Table size 0x" TARGET_FMT_lx
|
|
|
|
" stored in PTCR", patbsize);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
env->spr[SPR_PTCR] = val;
|
2019-03-23 05:07:57 +03:00
|
|
|
tlb_flush(env_cpu(env));
|
2018-04-24 14:30:42 +03:00
|
|
|
}
|
|
|
|
}
|
2018-06-04 11:45:13 +03:00
|
|
|
|
|
|
|
void helper_store_pcr(CPUPPCState *env, target_ulong value)
|
|
|
|
{
|
2019-03-23 05:07:57 +03:00
|
|
|
PowerPCCPU *cpu = env_archcpu(env);
|
2018-06-04 11:45:13 +03:00
|
|
|
PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
|
|
|
|
|
|
|
|
env->spr[SPR_PCR] = value & pcc->pcr_mask;
|
|
|
|
}
|
2020-01-20 13:49:34 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* DPDES register is shared. Each bit reflects the state of the
|
|
|
|
* doorbell interrupt of a thread of the same core.
|
|
|
|
*/
|
|
|
|
target_ulong helper_load_dpdes(CPUPPCState *env)
|
|
|
|
{
|
2023-06-22 12:33:53 +03:00
|
|
|
CPUState *cs = env_cpu(env);
|
|
|
|
CPUState *ccs;
|
|
|
|
uint32_t nr_threads = cs->nr_threads;
|
2020-01-20 13:49:34 +03:00
|
|
|
target_ulong dpdes = 0;
|
|
|
|
|
2020-01-20 13:49:35 +03:00
|
|
|
helper_hfscr_facility_check(env, HFSCR_MSGP, "load DPDES", HFSCR_IC_MSGP);
|
|
|
|
|
target/ppc: Add LPAR-per-core vs per-thread mode flag
The Power ISA has the concept of sub-processors:
Hardware is allowed to sub-divide a multi-threaded processor into
"sub-processors" that appear to privileged programs as multi-threaded
processors with fewer threads.
POWER9 and POWER10 have two modes, either every thread is a
sub-processor or all threads appear as one multi-threaded processor. In
the user manuals these are known as "LPAR per thread" / "Thread LPAR",
and "LPAR per core" / "1 LPAR", respectively.
The practical difference is: in thread LPAR mode, non-hypervisor SPRs
are not shared between threads and msgsndp can not be used to message
siblings. In 1 LPAR mode, some SPRs are shared and msgsndp is usable.
Thrad LPAR allows multiple partitions to run concurrently on the same
core, and is a requirement for KVM to run on POWER9/10 (which does not
gang-schedule an LPAR on all threads of a core like POWER8 KVM).
Traditionally, SMT in PAPR environments including PowerVM and the
pseries QEMU machine with KVM acceleration behaves as in 1 LPAR mode.
In OPAL systems, Thread LPAR is used. When adding SMT to the powernv
machine, it is therefore preferable to emulate Thread LPAR.
To account for this difference between pseries and powernv, an LPAR mode
flag is added such that SPRs can be implemented as per-LPAR shared, and
that becomes either per-thread or per-core depending on the flag.
Reviewed-by: Joel Stanley <joel@jms.id.au>
Reviewed-by: Cédric Le Goater <clg@kaod.org>
Tested-by: Cédric Le Goater <clg@kaod.org>
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Message-ID: <20230705120631.27670-2-npiggin@gmail.com>
Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
2023-07-05 15:06:28 +03:00
|
|
|
if (!(env->flags & POWERPC_FLAG_SMT_1LPAR)) {
|
|
|
|
nr_threads = 1; /* DPDES behaves as 1-thread in LPAR-per-thread mode */
|
|
|
|
}
|
|
|
|
|
2023-06-22 12:33:53 +03:00
|
|
|
if (nr_threads == 1) {
|
|
|
|
if (env->pending_interrupts & PPC_INTERRUPT_DOORBELL) {
|
|
|
|
dpdes = 1;
|
|
|
|
}
|
|
|
|
return dpdes;
|
|
|
|
}
|
|
|
|
|
|
|
|
qemu_mutex_lock_iothread();
|
|
|
|
THREAD_SIBLING_FOREACH(cs, ccs) {
|
|
|
|
PowerPCCPU *ccpu = POWERPC_CPU(ccs);
|
|
|
|
CPUPPCState *cenv = &ccpu->env;
|
|
|
|
uint32_t thread_id = ppc_cpu_tir(ccpu);
|
|
|
|
|
|
|
|
if (cenv->pending_interrupts & PPC_INTERRUPT_DOORBELL) {
|
|
|
|
dpdes |= (0x1 << thread_id);
|
|
|
|
}
|
2020-01-20 13:49:34 +03:00
|
|
|
}
|
2023-06-22 12:33:53 +03:00
|
|
|
qemu_mutex_unlock_iothread();
|
2020-01-20 13:49:34 +03:00
|
|
|
|
|
|
|
return dpdes;
|
|
|
|
}
|
|
|
|
|
|
|
|
void helper_store_dpdes(CPUPPCState *env, target_ulong val)
|
|
|
|
{
|
|
|
|
PowerPCCPU *cpu = env_archcpu(env);
|
2023-06-22 12:33:53 +03:00
|
|
|
CPUState *cs = env_cpu(env);
|
|
|
|
CPUState *ccs;
|
|
|
|
uint32_t nr_threads = cs->nr_threads;
|
2020-01-20 13:49:34 +03:00
|
|
|
|
2020-01-20 13:49:35 +03:00
|
|
|
helper_hfscr_facility_check(env, HFSCR_MSGP, "store DPDES", HFSCR_IC_MSGP);
|
|
|
|
|
target/ppc: Add LPAR-per-core vs per-thread mode flag
The Power ISA has the concept of sub-processors:
Hardware is allowed to sub-divide a multi-threaded processor into
"sub-processors" that appear to privileged programs as multi-threaded
processors with fewer threads.
POWER9 and POWER10 have two modes, either every thread is a
sub-processor or all threads appear as one multi-threaded processor. In
the user manuals these are known as "LPAR per thread" / "Thread LPAR",
and "LPAR per core" / "1 LPAR", respectively.
The practical difference is: in thread LPAR mode, non-hypervisor SPRs
are not shared between threads and msgsndp can not be used to message
siblings. In 1 LPAR mode, some SPRs are shared and msgsndp is usable.
Thrad LPAR allows multiple partitions to run concurrently on the same
core, and is a requirement for KVM to run on POWER9/10 (which does not
gang-schedule an LPAR on all threads of a core like POWER8 KVM).
Traditionally, SMT in PAPR environments including PowerVM and the
pseries QEMU machine with KVM acceleration behaves as in 1 LPAR mode.
In OPAL systems, Thread LPAR is used. When adding SMT to the powernv
machine, it is therefore preferable to emulate Thread LPAR.
To account for this difference between pseries and powernv, an LPAR mode
flag is added such that SPRs can be implemented as per-LPAR shared, and
that becomes either per-thread or per-core depending on the flag.
Reviewed-by: Joel Stanley <joel@jms.id.au>
Reviewed-by: Cédric Le Goater <clg@kaod.org>
Tested-by: Cédric Le Goater <clg@kaod.org>
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Message-ID: <20230705120631.27670-2-npiggin@gmail.com>
Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
2023-07-05 15:06:28 +03:00
|
|
|
if (!(env->flags & POWERPC_FLAG_SMT_1LPAR)) {
|
|
|
|
nr_threads = 1; /* DPDES behaves as 1-thread in LPAR-per-thread mode */
|
|
|
|
}
|
|
|
|
|
2023-06-22 12:33:53 +03:00
|
|
|
if (val & ~(nr_threads - 1)) {
|
2020-01-20 13:49:34 +03:00
|
|
|
qemu_log_mask(LOG_GUEST_ERROR, "Invalid DPDES register value "
|
|
|
|
TARGET_FMT_lx"\n", val);
|
2023-06-22 12:33:53 +03:00
|
|
|
val &= (nr_threads - 1); /* Ignore the invalid bits */
|
|
|
|
}
|
|
|
|
|
|
|
|
if (nr_threads == 1) {
|
|
|
|
ppc_set_irq(cpu, PPC_INTERRUPT_DOORBELL, val & 0x1);
|
2020-01-20 13:49:34 +03:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2023-06-22 12:33:53 +03:00
|
|
|
/* Does iothread need to be locked for walking CPU list? */
|
|
|
|
qemu_mutex_lock_iothread();
|
|
|
|
THREAD_SIBLING_FOREACH(cs, ccs) {
|
|
|
|
PowerPCCPU *ccpu = POWERPC_CPU(ccs);
|
|
|
|
uint32_t thread_id = ppc_cpu_tir(ccpu);
|
|
|
|
|
|
|
|
ppc_set_irq(cpu, PPC_INTERRUPT_DOORBELL, val & (0x1 << thread_id));
|
|
|
|
}
|
|
|
|
qemu_mutex_unlock_iothread();
|
2020-01-20 13:49:34 +03:00
|
|
|
}
|
2018-04-24 14:30:42 +03:00
|
|
|
#endif /* defined(TARGET_PPC64) */
|
|
|
|
|
2017-04-13 09:02:36 +03:00
|
|
|
void helper_store_pidr(CPUPPCState *env, target_ulong val)
|
|
|
|
{
|
target/ppc: Fix width of some 32-bit SPRs
Some 32-bit SPRs are incorrectly implemented as 64-bits on 64-bit
targets.
This changes VRSAVE, DSISR, HDSISR, DAWRX0, PIDR, LPIDR, DEXCR,
HDEXCR, CTRL, TSCR, MMCRH, and PMC[1-6] from to be 32-bit registers.
This only goes by the 32/64 classification in the architecture, it
does not try to implement finer details of SPR implementation (e.g.,
not all bits implemented as simple read/write storage).
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Reviewed-by: Harsh Prateek Bora <harshpb@linux.ibm.com>
Message-Id: <20230515092655.171206-2-npiggin@gmail.com>
Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
2023-05-15 12:26:47 +03:00
|
|
|
env->spr[SPR_BOOKS_PID] = (uint32_t)val;
|
2019-03-23 05:07:57 +03:00
|
|
|
tlb_flush(env_cpu(env));
|
2017-04-13 09:02:36 +03:00
|
|
|
}
|
|
|
|
|
2019-02-15 20:00:26 +03:00
|
|
|
void helper_store_lpidr(CPUPPCState *env, target_ulong val)
|
|
|
|
{
|
target/ppc: Fix width of some 32-bit SPRs
Some 32-bit SPRs are incorrectly implemented as 64-bits on 64-bit
targets.
This changes VRSAVE, DSISR, HDSISR, DAWRX0, PIDR, LPIDR, DEXCR,
HDEXCR, CTRL, TSCR, MMCRH, and PMC[1-6] from to be 32-bit registers.
This only goes by the 32/64 classification in the architecture, it
does not try to implement finer details of SPR implementation (e.g.,
not all bits implemented as simple read/write storage).
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Reviewed-by: Harsh Prateek Bora <harshpb@linux.ibm.com>
Message-Id: <20230515092655.171206-2-npiggin@gmail.com>
Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
2023-05-15 12:26:47 +03:00
|
|
|
env->spr[SPR_LPIDR] = (uint32_t)val;
|
2019-02-15 20:00:26 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We need to flush the TLB on LPID changes as we only tag HV vs
|
|
|
|
* guest in TCG TLB. Also the quadrants means the HV will
|
|
|
|
* potentially access and cache entries for the current LPID as
|
|
|
|
* well.
|
|
|
|
*/
|
2019-03-23 05:07:57 +03:00
|
|
|
tlb_flush(env_cpu(env));
|
2019-02-15 20:00:26 +03:00
|
|
|
}
|
|
|
|
|
2012-05-30 08:23:38 +04:00
|
|
|
void helper_store_40x_dbcr0(CPUPPCState *env, target_ulong val)
|
2012-05-30 08:23:37 +04:00
|
|
|
{
|
2021-03-23 21:43:34 +03:00
|
|
|
/* Bits 26 & 27 affect single-stepping. */
|
|
|
|
hreg_compute_hflags(env);
|
|
|
|
/* Bits 28 & 29 affect reset or shutdown. */
|
2012-05-30 08:23:37 +04:00
|
|
|
store_40x_dbcr0(env, val);
|
|
|
|
}
|
|
|
|
|
2012-05-30 08:23:38 +04:00
|
|
|
void helper_store_40x_sler(CPUPPCState *env, target_ulong val)
|
2012-05-30 08:23:37 +04:00
|
|
|
{
|
|
|
|
store_40x_sler(env, val);
|
|
|
|
}
|
|
|
|
#endif
|
2012-05-30 08:23:39 +04:00
|
|
|
|
|
|
|
/*****************************************************************************/
|
|
|
|
/* Special registers manipulation */
|
|
|
|
|
2019-03-21 14:27:42 +03:00
|
|
|
/*
|
|
|
|
* This code is lifted from MacOnLinux. It is called whenever THRM1,2
|
|
|
|
* or 3 is read an fixes up the values in such a way that will make
|
|
|
|
* MacOS not hang. These registers exist on some 75x and 74xx
|
|
|
|
* processors.
|
2016-06-20 04:27:14 +03:00
|
|
|
*/
|
|
|
|
void helper_fixup_thrm(CPUPPCState *env)
|
|
|
|
{
|
|
|
|
target_ulong v, t;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
#define THRM1_TIN (1 << 31)
|
|
|
|
#define THRM1_TIV (1 << 30)
|
|
|
|
#define THRM1_THRES(x) (((x) & 0x7f) << 23)
|
|
|
|
#define THRM1_TID (1 << 2)
|
|
|
|
#define THRM1_TIE (1 << 1)
|
|
|
|
#define THRM1_V (1 << 0)
|
|
|
|
#define THRM3_E (1 << 0)
|
|
|
|
|
|
|
|
if (!(env->spr[SPR_THRM3] & THRM3_E)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Note: Thermal interrupts are unimplemented */
|
|
|
|
for (i = SPR_THRM1; i <= SPR_THRM2; i++) {
|
|
|
|
v = env->spr[i];
|
|
|
|
if (!(v & THRM1_V)) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
v |= THRM1_TIV;
|
|
|
|
v &= ~THRM1_TIN;
|
|
|
|
t = v & THRM1_THRES(127);
|
|
|
|
if ((v & THRM1_TID) && t < THRM1_THRES(24)) {
|
|
|
|
v |= THRM1_TIN;
|
|
|
|
}
|
|
|
|
if (!(v & THRM1_TID) && t > THRM1_THRES(24)) {
|
|
|
|
v |= THRM1_TIN;
|
|
|
|
}
|
|
|
|
env->spr[i] = v;
|
|
|
|
}
|
|
|
|
}
|