2007-10-26 01:35:50 +04:00
|
|
|
/*
|
|
|
|
* PowerPC emulation special registers manipulation helpers for qemu.
|
|
|
|
*
|
|
|
|
* Copyright (c) 2003-2007 Jocelyn Mayer
|
|
|
|
*
|
|
|
|
* This library is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
* License as published by the Free Software Foundation; either
|
|
|
|
* version 2 of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This library is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* Lesser General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
2009-07-17 00:47:01 +04:00
|
|
|
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
2007-10-26 01:35:50 +04:00
|
|
|
*/
|
|
|
|
|
2016-06-29 14:47:03 +03:00
|
|
|
#ifndef HELPER_REGS_H
|
|
|
|
#define HELPER_REGS_H
|
2007-10-26 01:35:50 +04:00
|
|
|
|
|
|
|
/* Swap temporary saved registers with GPRs */
|
2009-08-16 13:06:54 +04:00
|
|
|
static inline void hreg_swap_gpr_tgpr(CPUPPCState *env)
|
2007-10-26 01:35:50 +04:00
|
|
|
{
|
2008-09-04 09:26:09 +04:00
|
|
|
target_ulong tmp;
|
2007-10-26 01:35:50 +04:00
|
|
|
|
|
|
|
tmp = env->gpr[0];
|
|
|
|
env->gpr[0] = env->tgpr[0];
|
|
|
|
env->tgpr[0] = tmp;
|
|
|
|
tmp = env->gpr[1];
|
|
|
|
env->gpr[1] = env->tgpr[1];
|
|
|
|
env->tgpr[1] = tmp;
|
|
|
|
tmp = env->gpr[2];
|
|
|
|
env->gpr[2] = env->tgpr[2];
|
|
|
|
env->tgpr[2] = tmp;
|
|
|
|
tmp = env->gpr[3];
|
|
|
|
env->gpr[3] = env->tgpr[3];
|
|
|
|
env->tgpr[3] = tmp;
|
|
|
|
}
|
|
|
|
|
2009-08-16 13:06:54 +04:00
|
|
|
static inline void hreg_compute_mem_idx(CPUPPCState *env)
|
2007-11-04 05:55:33 +03:00
|
|
|
{
|
2016-07-09 06:41:31 +03:00
|
|
|
/* This is our encoding for server processors. The architecture
|
|
|
|
* specifies that there is no such thing as userspace with
|
|
|
|
* translation off, however it appears that MacOS does it and
|
|
|
|
* some 32-bit CPUs support it. Weird...
|
2016-05-03 19:03:24 +03:00
|
|
|
*
|
|
|
|
* 0 = Guest User space virtual mode
|
|
|
|
* 1 = Guest Kernel space virtual mode
|
2016-07-09 06:41:31 +03:00
|
|
|
* 2 = Guest User space real mode
|
|
|
|
* 3 = Guest Kernel space real mode
|
|
|
|
* 4 = HV User space virtual mode
|
|
|
|
* 5 = HV Kernel space virtual mode
|
|
|
|
* 6 = HV User space real mode
|
|
|
|
* 7 = HV Kernel space real mode
|
2016-05-03 19:03:24 +03:00
|
|
|
*
|
|
|
|
* For BookE, we need 8 MMU modes as follow:
|
|
|
|
*
|
|
|
|
* 0 = AS 0 HV User space
|
|
|
|
* 1 = AS 0 HV Kernel space
|
|
|
|
* 2 = AS 1 HV User space
|
|
|
|
* 3 = AS 1 HV Kernel space
|
|
|
|
* 4 = AS 0 Guest User space
|
|
|
|
* 5 = AS 0 Guest Kernel space
|
|
|
|
* 6 = AS 1 Guest User space
|
|
|
|
* 7 = AS 1 Guest Kernel space
|
|
|
|
*/
|
|
|
|
if (env->mmu_model & POWERPC_MMU_BOOKE) {
|
|
|
|
env->immu_idx = env->dmmu_idx = msr_pr ? 0 : 1;
|
|
|
|
env->immu_idx += msr_is ? 2 : 0;
|
|
|
|
env->dmmu_idx += msr_ds ? 2 : 0;
|
|
|
|
env->immu_idx += msr_gs ? 4 : 0;
|
|
|
|
env->dmmu_idx += msr_gs ? 4 : 0;
|
2007-11-18 00:14:09 +03:00
|
|
|
} else {
|
2016-07-09 06:41:31 +03:00
|
|
|
env->immu_idx = env->dmmu_idx = msr_pr ? 0 : 1;
|
|
|
|
env->immu_idx += msr_ir ? 0 : 2;
|
|
|
|
env->dmmu_idx += msr_dr ? 0 : 2;
|
|
|
|
env->immu_idx += msr_hv ? 4 : 0;
|
|
|
|
env->dmmu_idx += msr_hv ? 4 : 0;
|
2007-11-18 00:14:09 +03:00
|
|
|
}
|
2007-11-04 05:55:33 +03:00
|
|
|
}
|
|
|
|
|
2009-08-16 13:06:54 +04:00
|
|
|
static inline void hreg_compute_hflags(CPUPPCState *env)
|
2007-10-26 01:35:50 +04:00
|
|
|
{
|
|
|
|
target_ulong hflags_mask;
|
|
|
|
|
|
|
|
/* We 'forget' FE0 & FE1: we'll never generate imprecise exceptions */
|
|
|
|
hflags_mask = (1 << MSR_VR) | (1 << MSR_AP) | (1 << MSR_SA) |
|
|
|
|
(1 << MSR_PR) | (1 << MSR_FP) | (1 << MSR_SE) | (1 << MSR_BE) |
|
2016-06-07 05:50:20 +03:00
|
|
|
(1 << MSR_LE) | (1 << MSR_VSX) | (1 << MSR_IR) | (1 << MSR_DR);
|
2007-11-18 00:14:09 +03:00
|
|
|
hflags_mask |= (1ULL << MSR_CM) | (1ULL << MSR_SF) | MSR_HVB;
|
2007-11-04 05:55:33 +03:00
|
|
|
hreg_compute_mem_idx(env);
|
2007-10-26 01:35:50 +04:00
|
|
|
env->hflags = env->msr & hflags_mask;
|
2007-11-04 05:55:33 +03:00
|
|
|
/* Merge with hflags coming from other registers */
|
|
|
|
env->hflags |= env->hflags_nmsr;
|
2007-10-26 01:35:50 +04:00
|
|
|
}
|
|
|
|
|
2009-08-16 13:06:54 +04:00
|
|
|
static inline int hreg_store_msr(CPUPPCState *env, target_ulong value,
|
|
|
|
int alter_hv)
|
2007-10-26 01:35:50 +04:00
|
|
|
{
|
2007-10-26 03:14:50 +04:00
|
|
|
int excp;
|
2013-01-17 21:51:17 +04:00
|
|
|
#if !defined(CONFIG_USER_ONLY)
|
|
|
|
CPUState *cs = CPU(ppc_env_get_cpu(env));
|
|
|
|
#endif
|
2007-10-26 01:35:50 +04:00
|
|
|
|
|
|
|
excp = 0;
|
|
|
|
value &= env->msr_mask;
|
2013-01-17 21:51:17 +04:00
|
|
|
#if !defined(CONFIG_USER_ONLY)
|
2016-06-03 15:11:18 +03:00
|
|
|
/* Neither mtmsr nor guest state can alter HV */
|
|
|
|
if (!alter_hv || !(env->msr & MSR_HVB)) {
|
2007-11-18 00:14:09 +03:00
|
|
|
value &= ~MSR_HVB;
|
|
|
|
value |= env->msr & MSR_HVB;
|
|
|
|
}
|
2007-10-26 01:35:50 +04:00
|
|
|
if (((value >> MSR_IR) & 1) != msr_ir ||
|
|
|
|
((value >> MSR_DR) & 1) != msr_dr) {
|
2016-05-03 19:03:24 +03:00
|
|
|
cs->interrupt_request |= CPU_INTERRUPT_EXITTB;
|
|
|
|
}
|
|
|
|
if ((env->mmu_model & POWERPC_MMU_BOOKE) &&
|
|
|
|
((value >> MSR_GS) & 1) != msr_gs) {
|
2013-01-17 21:51:17 +04:00
|
|
|
cs->interrupt_request |= CPU_INTERRUPT_EXITTB;
|
2007-10-26 01:35:50 +04:00
|
|
|
}
|
|
|
|
if (unlikely((env->flags & POWERPC_FLAG_TGPR) &&
|
|
|
|
((value ^ env->msr) & (1 << MSR_TGPR)))) {
|
|
|
|
/* Swap temporary saved registers with GPRs */
|
|
|
|
hreg_swap_gpr_tgpr(env);
|
|
|
|
}
|
|
|
|
if (unlikely((value >> MSR_EP) & 1) != msr_ep) {
|
|
|
|
/* Change the exception prefix on PowerPC 601 */
|
|
|
|
env->excp_prefix = ((value >> MSR_EP) & 1) * 0xFFF00000;
|
|
|
|
}
|
2016-07-09 06:41:31 +03:00
|
|
|
/* If PR=1 then EE, IR and DR must be 1
|
|
|
|
*
|
|
|
|
* Note: We only enforce this on 64-bit processors. It appears that
|
|
|
|
* 32-bit implementations supports PR=1 and EE/DR/IR=0 and MacOS
|
|
|
|
* exploits it.
|
|
|
|
*/
|
|
|
|
if ((env->insns_flags & PPC_64B) && ((value >> MSR_PR) & 1)) {
|
2016-06-27 09:55:18 +03:00
|
|
|
value |= (1 << MSR_EE) | (1 << MSR_DR) | (1 << MSR_IR);
|
|
|
|
}
|
2007-10-26 01:35:50 +04:00
|
|
|
#endif
|
|
|
|
env->msr = value;
|
|
|
|
hreg_compute_hflags(env);
|
2013-01-17 21:51:17 +04:00
|
|
|
#if !defined(CONFIG_USER_ONLY)
|
2007-10-26 01:35:50 +04:00
|
|
|
if (unlikely(msr_pow == 1)) {
|
2014-04-07 00:40:47 +04:00
|
|
|
if (!env->pending_interrupts && (*env->check_pow)(env)) {
|
2013-01-17 21:51:17 +04:00
|
|
|
cs->halted = 1;
|
2007-10-26 01:35:50 +04:00
|
|
|
excp = EXCP_HALTED;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
return excp;
|
|
|
|
}
|
|
|
|
|
2016-06-07 05:50:22 +03:00
|
|
|
#if !defined(CONFIG_USER_ONLY)
|
2016-09-20 19:35:00 +03:00
|
|
|
static inline void check_tlb_flush(CPUPPCState *env, bool global)
|
ppc: Do some batching of TCG tlb flushes
On ppc64 especially, we flush the tlb on any slbie or tlbie instruction.
However, those instructions often come in bursts of 3 or more (context
switch will favor a series of slbie's for example to an slbia if the
SLB has less than a certain number of entries in it, and tlbie's can
happen in a series, with PAPR, H_BULK_REMOVE can remove up to 4 entries
at a time.
Doing a tlb_flush() each time is a waste of time. We end up doing a memset
of the whole TLB, reloading it for the next instruction, memset'ing again,
etc...
Those instructions don't have to take effect immediately. For slbie, they
can wait for the next context synchronizing event. For tlbie, the next
tlbsync.
This implements batching by keeping a flag that indicates that we have a
TLB in need of flushing. We check it on interrupts, rfi's, isync's and
tlbsync and flush the TLB if needed.
This reduces the number of tlb_flush() on a boot to a ubuntu installer
first dialog screen from roughly 360K down to 36K.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
[clg: added a 'CPUPPCState *' variable in h_remove() and
h_bulk_remove() ]
Signed-off-by: Cédric Le Goater <clg@kaod.org>
[dwg: removed spurious whitespace change, use 0/1 not true/false
consistently, since tlb_need_flush has int type]
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2016-05-03 19:03:25 +03:00
|
|
|
{
|
|
|
|
CPUState *cs = CPU(ppc_env_get_cpu(env));
|
2016-09-20 19:34:59 +03:00
|
|
|
if (env->tlb_need_flush & TLB_NEED_LOCAL_FLUSH) {
|
ppc: Do some batching of TCG tlb flushes
On ppc64 especially, we flush the tlb on any slbie or tlbie instruction.
However, those instructions often come in bursts of 3 or more (context
switch will favor a series of slbie's for example to an slbia if the
SLB has less than a certain number of entries in it, and tlbie's can
happen in a series, with PAPR, H_BULK_REMOVE can remove up to 4 entries
at a time.
Doing a tlb_flush() each time is a waste of time. We end up doing a memset
of the whole TLB, reloading it for the next instruction, memset'ing again,
etc...
Those instructions don't have to take effect immediately. For slbie, they
can wait for the next context synchronizing event. For tlbie, the next
tlbsync.
This implements batching by keeping a flag that indicates that we have a
TLB in need of flushing. We check it on interrupts, rfi's, isync's and
tlbsync and flush the TLB if needed.
This reduces the number of tlb_flush() on a boot to a ubuntu installer
first dialog screen from roughly 360K down to 36K.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
[clg: added a 'CPUPPCState *' variable in h_remove() and
h_bulk_remove() ]
Signed-off-by: Cédric Le Goater <clg@kaod.org>
[dwg: removed spurious whitespace change, use 0/1 not true/false
consistently, since tlb_need_flush has int type]
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2016-05-03 19:03:25 +03:00
|
|
|
tlb_flush(cs, 1);
|
2016-09-20 19:34:59 +03:00
|
|
|
env->tlb_need_flush &= ~TLB_NEED_LOCAL_FLUSH;
|
ppc: Do some batching of TCG tlb flushes
On ppc64 especially, we flush the tlb on any slbie or tlbie instruction.
However, those instructions often come in bursts of 3 or more (context
switch will favor a series of slbie's for example to an slbia if the
SLB has less than a certain number of entries in it, and tlbie's can
happen in a series, with PAPR, H_BULK_REMOVE can remove up to 4 entries
at a time.
Doing a tlb_flush() each time is a waste of time. We end up doing a memset
of the whole TLB, reloading it for the next instruction, memset'ing again,
etc...
Those instructions don't have to take effect immediately. For slbie, they
can wait for the next context synchronizing event. For tlbie, the next
tlbsync.
This implements batching by keeping a flag that indicates that we have a
TLB in need of flushing. We check it on interrupts, rfi's, isync's and
tlbsync and flush the TLB if needed.
This reduces the number of tlb_flush() on a boot to a ubuntu installer
first dialog screen from roughly 360K down to 36K.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
[clg: added a 'CPUPPCState *' variable in h_remove() and
h_bulk_remove() ]
Signed-off-by: Cédric Le Goater <clg@kaod.org>
[dwg: removed spurious whitespace change, use 0/1 not true/false
consistently, since tlb_need_flush has int type]
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2016-05-03 19:03:25 +03:00
|
|
|
}
|
2016-09-20 19:35:01 +03:00
|
|
|
|
|
|
|
/* Propagate TLB invalidations to other CPUs when the guest uses broadcast
|
|
|
|
* TLB invalidation instructions.
|
|
|
|
*/
|
|
|
|
if (global && (env->tlb_need_flush & TLB_NEED_GLOBAL_FLUSH)) {
|
|
|
|
CPUState *other_cs;
|
|
|
|
CPU_FOREACH(other_cs) {
|
|
|
|
if (other_cs != cs) {
|
|
|
|
PowerPCCPU *cpu = POWERPC_CPU(other_cs);
|
|
|
|
CPUPPCState *other_env = &cpu->env;
|
|
|
|
|
|
|
|
other_env->tlb_need_flush &= ~TLB_NEED_LOCAL_FLUSH;
|
|
|
|
tlb_flush(other_cs, 1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
env->tlb_need_flush &= ~TLB_NEED_GLOBAL_FLUSH;
|
|
|
|
}
|
ppc: Do some batching of TCG tlb flushes
On ppc64 especially, we flush the tlb on any slbie or tlbie instruction.
However, those instructions often come in bursts of 3 or more (context
switch will favor a series of slbie's for example to an slbia if the
SLB has less than a certain number of entries in it, and tlbie's can
happen in a series, with PAPR, H_BULK_REMOVE can remove up to 4 entries
at a time.
Doing a tlb_flush() each time is a waste of time. We end up doing a memset
of the whole TLB, reloading it for the next instruction, memset'ing again,
etc...
Those instructions don't have to take effect immediately. For slbie, they
can wait for the next context synchronizing event. For tlbie, the next
tlbsync.
This implements batching by keeping a flag that indicates that we have a
TLB in need of flushing. We check it on interrupts, rfi's, isync's and
tlbsync and flush the TLB if needed.
This reduces the number of tlb_flush() on a boot to a ubuntu installer
first dialog screen from roughly 360K down to 36K.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
[clg: added a 'CPUPPCState *' variable in h_remove() and
h_bulk_remove() ]
Signed-off-by: Cédric Le Goater <clg@kaod.org>
[dwg: removed spurious whitespace change, use 0/1 not true/false
consistently, since tlb_need_flush has int type]
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2016-05-03 19:03:25 +03:00
|
|
|
}
|
|
|
|
#else
|
2016-09-20 19:35:00 +03:00
|
|
|
static inline void check_tlb_flush(CPUPPCState *env, bool global) { }
|
ppc: Do some batching of TCG tlb flushes
On ppc64 especially, we flush the tlb on any slbie or tlbie instruction.
However, those instructions often come in bursts of 3 or more (context
switch will favor a series of slbie's for example to an slbia if the
SLB has less than a certain number of entries in it, and tlbie's can
happen in a series, with PAPR, H_BULK_REMOVE can remove up to 4 entries
at a time.
Doing a tlb_flush() each time is a waste of time. We end up doing a memset
of the whole TLB, reloading it for the next instruction, memset'ing again,
etc...
Those instructions don't have to take effect immediately. For slbie, they
can wait for the next context synchronizing event. For tlbie, the next
tlbsync.
This implements batching by keeping a flag that indicates that we have a
TLB in need of flushing. We check it on interrupts, rfi's, isync's and
tlbsync and flush the TLB if needed.
This reduces the number of tlb_flush() on a boot to a ubuntu installer
first dialog screen from roughly 360K down to 36K.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
[clg: added a 'CPUPPCState *' variable in h_remove() and
h_bulk_remove() ]
Signed-off-by: Cédric Le Goater <clg@kaod.org>
[dwg: removed spurious whitespace change, use 0/1 not true/false
consistently, since tlb_need_flush has int type]
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2016-05-03 19:03:25 +03:00
|
|
|
#endif
|
|
|
|
|
2016-06-29 14:47:03 +03:00
|
|
|
#endif /* HELPER_REGS_H */
|