2013-03-12 04:31:06 +04:00
|
|
|
/*
|
|
|
|
* PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU.
|
|
|
|
*
|
|
|
|
* Copyright (c) 2003-2007 Jocelyn Mayer
|
|
|
|
* Copyright (c) 2013 David Gibson, IBM Corporation
|
|
|
|
*
|
|
|
|
* This library is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
* License as published by the Free Software Foundation; either
|
|
|
|
* version 2 of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This library is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* Lesser General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
|
|
|
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*/
|
2016-01-26 21:16:58 +03:00
|
|
|
#include "qemu/osdep.h"
|
include/qemu/osdep.h: Don't include qapi/error.h
Commit 57cb38b included qapi/error.h into qemu/osdep.h to get the
Error typedef. Since then, we've moved to include qemu/osdep.h
everywhere. Its file comment explains: "To avoid getting into
possible circular include dependencies, this file should not include
any other QEMU headers, with the exceptions of config-host.h,
compiler.h, os-posix.h and os-win32.h, all of which are doing a
similar job to this file and are under similar constraints."
qapi/error.h doesn't do a similar job, and it doesn't adhere to
similar constraints: it includes qapi-types.h. That's in excess of
100KiB of crap most .c files don't actually need.
Add the typedef to qemu/typedefs.h, and include that instead of
qapi/error.h. Include qapi/error.h in .c files that need it and don't
get it now. Include qapi-types.h in qom/object.h for uint16List.
Update scripts/clean-includes accordingly. Update it further to match
reality: replace config.h by config-target.h, add sysemu/os-posix.h,
sysemu/os-win32.h. Update the list of includes in the qemu/osdep.h
comment quoted above similarly.
This reduces the number of objects depending on qapi/error.h from "all
of them" to less than a third. Unfortunately, the number depending on
qapi-types.h shrinks only a little. More work is needed for that one.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
[Fix compilation without the spice devel packages. - Paolo]
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2016-03-14 11:01:28 +03:00
|
|
|
#include "qapi/error.h"
|
2013-03-12 04:31:06 +04:00
|
|
|
#include "cpu.h"
|
2016-03-15 15:18:37 +03:00
|
|
|
#include "exec/exec-all.h"
|
2014-04-08 09:31:41 +04:00
|
|
|
#include "exec/helper-proto.h"
|
2016-01-27 03:52:57 +03:00
|
|
|
#include "qemu/error-report.h"
|
2013-03-12 04:31:06 +04:00
|
|
|
#include "sysemu/kvm.h"
|
|
|
|
#include "kvm_ppc.h"
|
|
|
|
#include "mmu-hash64.h"
|
2016-01-07 16:55:28 +03:00
|
|
|
#include "exec/log.h"
|
2013-03-12 04:31:06 +04:00
|
|
|
|
|
|
|
//#define DEBUG_SLB
|
|
|
|
|
|
|
|
#ifdef DEBUG_SLB
|
2015-11-13 15:34:23 +03:00
|
|
|
# define LOG_SLB(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__)
|
2013-03-12 04:31:06 +04:00
|
|
|
#else
|
|
|
|
# define LOG_SLB(...) do { } while (0)
|
|
|
|
#endif
|
|
|
|
|
2014-02-20 21:52:24 +04:00
|
|
|
/*
|
2016-03-08 03:35:15 +03:00
|
|
|
* Used to indicate that a CPU has its hash page table (HPT) managed
|
|
|
|
* within the host kernel
|
2014-02-20 21:52:24 +04:00
|
|
|
*/
|
2016-03-08 03:35:15 +03:00
|
|
|
#define MMU_HASH64_KVM_MANAGED_HPT ((void *)-1)
|
|
|
|
|
2013-03-12 04:31:06 +04:00
|
|
|
/*
|
|
|
|
* SLB handling
|
|
|
|
*/
|
|
|
|
|
2016-01-14 07:33:27 +03:00
|
|
|
static ppc_slb_t *slb_lookup(PowerPCCPU *cpu, target_ulong eaddr)
|
2013-03-12 04:31:06 +04:00
|
|
|
{
|
2016-01-14 07:33:27 +03:00
|
|
|
CPUPPCState *env = &cpu->env;
|
2013-03-12 04:31:06 +04:00
|
|
|
uint64_t esid_256M, esid_1T;
|
|
|
|
int n;
|
|
|
|
|
|
|
|
LOG_SLB("%s: eaddr " TARGET_FMT_lx "\n", __func__, eaddr);
|
|
|
|
|
|
|
|
esid_256M = (eaddr & SEGMENT_MASK_256M) | SLB_ESID_V;
|
|
|
|
esid_1T = (eaddr & SEGMENT_MASK_1T) | SLB_ESID_V;
|
|
|
|
|
|
|
|
for (n = 0; n < env->slb_nr; n++) {
|
|
|
|
ppc_slb_t *slb = &env->slb[n];
|
|
|
|
|
|
|
|
LOG_SLB("%s: slot %d %016" PRIx64 " %016"
|
|
|
|
PRIx64 "\n", __func__, n, slb->esid, slb->vsid);
|
|
|
|
/* We check for 1T matches on all MMUs here - if the MMU
|
|
|
|
* doesn't have 1T segment support, we will have prevented 1T
|
|
|
|
* entries from being inserted in the slbmte code. */
|
|
|
|
if (((slb->esid == esid_256M) &&
|
|
|
|
((slb->vsid & SLB_VSID_B) == SLB_VSID_B_256M))
|
|
|
|
|| ((slb->esid == esid_1T) &&
|
|
|
|
((slb->vsid & SLB_VSID_B) == SLB_VSID_B_1T))) {
|
|
|
|
return slb;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2016-01-14 07:33:27 +03:00
|
|
|
void dump_slb(FILE *f, fprintf_function cpu_fprintf, PowerPCCPU *cpu)
|
2013-03-12 04:31:06 +04:00
|
|
|
{
|
2016-01-14 07:33:27 +03:00
|
|
|
CPUPPCState *env = &cpu->env;
|
2013-03-12 04:31:06 +04:00
|
|
|
int i;
|
|
|
|
uint64_t slbe, slbv;
|
|
|
|
|
2016-01-14 07:33:27 +03:00
|
|
|
cpu_synchronize_state(CPU(cpu));
|
2013-03-12 04:31:06 +04:00
|
|
|
|
|
|
|
cpu_fprintf(f, "SLB\tESID\t\t\tVSID\n");
|
|
|
|
for (i = 0; i < env->slb_nr; i++) {
|
|
|
|
slbe = env->slb[i].esid;
|
|
|
|
slbv = env->slb[i].vsid;
|
|
|
|
if (slbe == 0 && slbv == 0) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
cpu_fprintf(f, "%d\t0x%016" PRIx64 "\t0x%016" PRIx64 "\n",
|
|
|
|
i, slbe, slbv);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void helper_slbia(CPUPPCState *env)
|
|
|
|
{
|
ppc: Do some batching of TCG tlb flushes
On ppc64 especially, we flush the tlb on any slbie or tlbie instruction.
However, those instructions often come in bursts of 3 or more (context
switch will favor a series of slbie's for example to an slbia if the
SLB has less than a certain number of entries in it, and tlbie's can
happen in a series, with PAPR, H_BULK_REMOVE can remove up to 4 entries
at a time.
Doing a tlb_flush() each time is a waste of time. We end up doing a memset
of the whole TLB, reloading it for the next instruction, memset'ing again,
etc...
Those instructions don't have to take effect immediately. For slbie, they
can wait for the next context synchronizing event. For tlbie, the next
tlbsync.
This implements batching by keeping a flag that indicates that we have a
TLB in need of flushing. We check it on interrupts, rfi's, isync's and
tlbsync and flush the TLB if needed.
This reduces the number of tlb_flush() on a boot to a ubuntu installer
first dialog screen from roughly 360K down to 36K.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
[clg: added a 'CPUPPCState *' variable in h_remove() and
h_bulk_remove() ]
Signed-off-by: Cédric Le Goater <clg@kaod.org>
[dwg: removed spurious whitespace change, use 0/1 not true/false
consistently, since tlb_need_flush has int type]
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2016-05-03 19:03:25 +03:00
|
|
|
int n;
|
2013-03-12 04:31:06 +04:00
|
|
|
|
|
|
|
/* XXX: Warning: slbia never invalidates the first segment */
|
|
|
|
for (n = 1; n < env->slb_nr; n++) {
|
|
|
|
ppc_slb_t *slb = &env->slb[n];
|
|
|
|
|
|
|
|
if (slb->esid & SLB_ESID_V) {
|
|
|
|
slb->esid &= ~SLB_ESID_V;
|
|
|
|
/* XXX: given the fact that segment size is 256 MB or 1TB,
|
|
|
|
* and we still don't have a tlb_flush_mask(env, n, mask)
|
|
|
|
* in QEMU, we just invalidate all TLBs
|
|
|
|
*/
|
2016-09-20 19:34:59 +03:00
|
|
|
env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
|
2013-03-12 04:31:06 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void helper_slbie(CPUPPCState *env, target_ulong addr)
|
|
|
|
{
|
2013-09-04 04:19:44 +04:00
|
|
|
PowerPCCPU *cpu = ppc_env_get_cpu(env);
|
2013-03-12 04:31:06 +04:00
|
|
|
ppc_slb_t *slb;
|
|
|
|
|
2016-01-14 07:33:27 +03:00
|
|
|
slb = slb_lookup(cpu, addr);
|
2013-03-12 04:31:06 +04:00
|
|
|
if (!slb) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (slb->esid & SLB_ESID_V) {
|
|
|
|
slb->esid &= ~SLB_ESID_V;
|
|
|
|
|
|
|
|
/* XXX: given the fact that segment size is 256 MB or 1TB,
|
|
|
|
* and we still don't have a tlb_flush_mask(env, n, mask)
|
|
|
|
* in QEMU, we just invalidate all TLBs
|
|
|
|
*/
|
2016-09-20 19:34:59 +03:00
|
|
|
env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
|
2013-03-12 04:31:06 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-01-27 03:07:29 +03:00
|
|
|
int ppc_store_slb(PowerPCCPU *cpu, target_ulong slot,
|
|
|
|
target_ulong esid, target_ulong vsid)
|
2013-03-12 04:31:06 +04:00
|
|
|
{
|
2016-01-14 07:33:27 +03:00
|
|
|
CPUPPCState *env = &cpu->env;
|
2013-03-12 04:31:06 +04:00
|
|
|
ppc_slb_t *slb = &env->slb[slot];
|
2016-01-27 03:52:57 +03:00
|
|
|
const struct ppc_one_seg_page_size *sps = NULL;
|
|
|
|
int i;
|
2013-03-12 04:31:06 +04:00
|
|
|
|
2016-01-27 03:07:29 +03:00
|
|
|
if (slot >= env->slb_nr) {
|
|
|
|
return -1; /* Bad slot number */
|
|
|
|
}
|
|
|
|
if (esid & ~(SLB_ESID_ESID | SLB_ESID_V)) {
|
|
|
|
return -1; /* Reserved bits set */
|
2013-03-12 04:31:06 +04:00
|
|
|
}
|
2016-01-27 03:07:29 +03:00
|
|
|
if (vsid & (SLB_VSID_B & ~SLB_VSID_B_1T)) {
|
2013-03-12 04:31:06 +04:00
|
|
|
return -1; /* Bad segment size */
|
|
|
|
}
|
2016-01-27 03:07:29 +03:00
|
|
|
if ((vsid & SLB_VSID_B) && !(env->mmu_model & POWERPC_MMU_1TSEG)) {
|
2013-03-12 04:31:06 +04:00
|
|
|
return -1; /* 1T segment on MMU that doesn't support it */
|
|
|
|
}
|
|
|
|
|
2016-01-27 03:52:57 +03:00
|
|
|
for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
|
|
|
|
const struct ppc_one_seg_page_size *sps1 = &env->sps.sps[i];
|
|
|
|
|
|
|
|
if (!sps1->page_shift) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((vsid & SLB_VSID_LLP_MASK) == sps1->slb_enc) {
|
|
|
|
sps = sps1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!sps) {
|
|
|
|
error_report("Bad page size encoding in SLB store: slot "TARGET_FMT_lu
|
|
|
|
" esid 0x"TARGET_FMT_lx" vsid 0x"TARGET_FMT_lx,
|
|
|
|
slot, esid, vsid);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2016-01-27 03:07:29 +03:00
|
|
|
slb->esid = esid;
|
|
|
|
slb->vsid = vsid;
|
2016-01-27 03:52:57 +03:00
|
|
|
slb->sps = sps;
|
2013-03-12 04:31:06 +04:00
|
|
|
|
|
|
|
LOG_SLB("%s: %d " TARGET_FMT_lx " - " TARGET_FMT_lx " => %016" PRIx64
|
2016-01-27 03:07:29 +03:00
|
|
|
" %016" PRIx64 "\n", __func__, slot, esid, vsid,
|
2013-03-12 04:31:06 +04:00
|
|
|
slb->esid, slb->vsid);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-01-14 07:33:27 +03:00
|
|
|
static int ppc_load_slb_esid(PowerPCCPU *cpu, target_ulong rb,
|
2013-03-12 04:31:06 +04:00
|
|
|
target_ulong *rt)
|
|
|
|
{
|
2016-01-14 07:33:27 +03:00
|
|
|
CPUPPCState *env = &cpu->env;
|
2013-03-12 04:31:06 +04:00
|
|
|
int slot = rb & 0xfff;
|
|
|
|
ppc_slb_t *slb = &env->slb[slot];
|
|
|
|
|
|
|
|
if (slot >= env->slb_nr) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
*rt = slb->esid;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-01-14 07:33:27 +03:00
|
|
|
static int ppc_load_slb_vsid(PowerPCCPU *cpu, target_ulong rb,
|
2013-03-12 04:31:06 +04:00
|
|
|
target_ulong *rt)
|
|
|
|
{
|
2016-01-14 07:33:27 +03:00
|
|
|
CPUPPCState *env = &cpu->env;
|
2013-03-12 04:31:06 +04:00
|
|
|
int slot = rb & 0xfff;
|
|
|
|
ppc_slb_t *slb = &env->slb[slot];
|
|
|
|
|
|
|
|
if (slot >= env->slb_nr) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
*rt = slb->vsid;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-06-07 05:50:27 +03:00
|
|
|
static int ppc_find_slb_vsid(PowerPCCPU *cpu, target_ulong rb,
|
|
|
|
target_ulong *rt)
|
|
|
|
{
|
|
|
|
CPUPPCState *env = &cpu->env;
|
|
|
|
ppc_slb_t *slb;
|
|
|
|
|
|
|
|
if (!msr_is_64bit(env, env->msr)) {
|
|
|
|
rb &= 0xffffffff;
|
|
|
|
}
|
|
|
|
slb = slb_lookup(cpu, rb);
|
|
|
|
if (slb == NULL) {
|
|
|
|
*rt = (target_ulong)-1ul;
|
|
|
|
} else {
|
|
|
|
*rt = slb->vsid;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-03-12 04:31:06 +04:00
|
|
|
void helper_store_slb(CPUPPCState *env, target_ulong rb, target_ulong rs)
|
|
|
|
{
|
2016-01-14 07:33:27 +03:00
|
|
|
PowerPCCPU *cpu = ppc_env_get_cpu(env);
|
|
|
|
|
2016-01-27 03:07:29 +03:00
|
|
|
if (ppc_store_slb(cpu, rb & 0xfff, rb & ~0xfffULL, rs) < 0) {
|
2016-07-27 09:56:34 +03:00
|
|
|
raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
|
|
|
|
POWERPC_EXCP_INVAL, GETPC());
|
2013-03-12 04:31:06 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
target_ulong helper_load_slb_esid(CPUPPCState *env, target_ulong rb)
|
|
|
|
{
|
2016-01-14 07:33:27 +03:00
|
|
|
PowerPCCPU *cpu = ppc_env_get_cpu(env);
|
2013-03-12 04:31:06 +04:00
|
|
|
target_ulong rt = 0;
|
|
|
|
|
2016-01-14 07:33:27 +03:00
|
|
|
if (ppc_load_slb_esid(cpu, rb, &rt) < 0) {
|
2016-07-27 09:56:34 +03:00
|
|
|
raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
|
|
|
|
POWERPC_EXCP_INVAL, GETPC());
|
2013-03-12 04:31:06 +04:00
|
|
|
}
|
|
|
|
return rt;
|
|
|
|
}
|
|
|
|
|
2016-06-07 05:50:27 +03:00
|
|
|
target_ulong helper_find_slb_vsid(CPUPPCState *env, target_ulong rb)
|
|
|
|
{
|
|
|
|
PowerPCCPU *cpu = ppc_env_get_cpu(env);
|
|
|
|
target_ulong rt = 0;
|
|
|
|
|
|
|
|
if (ppc_find_slb_vsid(cpu, rb, &rt) < 0) {
|
2016-07-27 09:56:34 +03:00
|
|
|
raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
|
|
|
|
POWERPC_EXCP_INVAL, GETPC());
|
2016-06-07 05:50:27 +03:00
|
|
|
}
|
|
|
|
return rt;
|
|
|
|
}
|
|
|
|
|
2013-03-12 04:31:06 +04:00
|
|
|
target_ulong helper_load_slb_vsid(CPUPPCState *env, target_ulong rb)
|
|
|
|
{
|
2016-01-14 07:33:27 +03:00
|
|
|
PowerPCCPU *cpu = ppc_env_get_cpu(env);
|
2013-03-12 04:31:06 +04:00
|
|
|
target_ulong rt = 0;
|
|
|
|
|
2016-01-14 07:33:27 +03:00
|
|
|
if (ppc_load_slb_vsid(cpu, rb, &rt) < 0) {
|
2016-07-27 09:56:34 +03:00
|
|
|
raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
|
|
|
|
POWERPC_EXCP_INVAL, GETPC());
|
2013-03-12 04:31:06 +04:00
|
|
|
}
|
|
|
|
return rt;
|
|
|
|
}
|
2013-03-12 04:31:07 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* 64-bit hash table MMU handling
|
|
|
|
*/
|
2016-03-08 03:33:46 +03:00
|
|
|
void ppc_hash64_set_sdr1(PowerPCCPU *cpu, target_ulong value,
|
|
|
|
Error **errp)
|
|
|
|
{
|
|
|
|
CPUPPCState *env = &cpu->env;
|
|
|
|
target_ulong htabsize = value & SDR_64_HTABSIZE;
|
|
|
|
|
|
|
|
env->spr[SPR_SDR1] = value;
|
|
|
|
if (htabsize > 28) {
|
|
|
|
error_setg(errp,
|
|
|
|
"Invalid HTABSIZE 0x" TARGET_FMT_lx" stored in SDR1",
|
|
|
|
htabsize);
|
|
|
|
htabsize = 28;
|
|
|
|
}
|
|
|
|
env->htab_mask = (1ULL << (htabsize + 18 - 7)) - 1;
|
|
|
|
env->htab_base = value & SDR_64_HTABORG;
|
|
|
|
}
|
|
|
|
|
|
|
|
void ppc_hash64_set_external_hpt(PowerPCCPU *cpu, void *hpt, int shift,
|
|
|
|
Error **errp)
|
|
|
|
{
|
|
|
|
CPUPPCState *env = &cpu->env;
|
|
|
|
Error *local_err = NULL;
|
|
|
|
|
2016-03-08 03:35:15 +03:00
|
|
|
if (hpt) {
|
|
|
|
env->external_htab = hpt;
|
|
|
|
} else {
|
|
|
|
env->external_htab = MMU_HASH64_KVM_MANAGED_HPT;
|
|
|
|
}
|
2016-03-08 03:33:46 +03:00
|
|
|
ppc_hash64_set_sdr1(cpu, (target_ulong)(uintptr_t)hpt | (shift - 18),
|
|
|
|
&local_err);
|
|
|
|
if (local_err) {
|
|
|
|
error_propagate(errp, local_err);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Not strictly necessary, but makes it clearer that an external
|
|
|
|
* htab is in use when debugging */
|
|
|
|
env->htab_base = -1;
|
|
|
|
|
|
|
|
if (kvm_enabled()) {
|
|
|
|
if (kvmppc_put_books_sregs(cpu) < 0) {
|
|
|
|
error_setg(errp, "Unable to update SDR1 in KVM");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2013-03-12 04:31:07 +04:00
|
|
|
|
2016-01-14 07:33:27 +03:00
|
|
|
static int ppc_hash64_pte_prot(PowerPCCPU *cpu,
|
2013-03-12 04:31:40 +04:00
|
|
|
ppc_slb_t *slb, ppc_hash_pte64_t pte)
|
2013-03-12 04:31:14 +04:00
|
|
|
{
|
2016-01-14 07:33:27 +03:00
|
|
|
CPUPPCState *env = &cpu->env;
|
2013-03-12 04:31:40 +04:00
|
|
|
unsigned pp, key;
|
|
|
|
/* Some pp bit combinations have undefined behaviour, so default
|
|
|
|
* to no access in those cases */
|
|
|
|
int prot = 0;
|
|
|
|
|
|
|
|
key = !!(msr_pr ? (slb->vsid & SLB_VSID_KP)
|
|
|
|
: (slb->vsid & SLB_VSID_KS));
|
|
|
|
pp = (pte.pte1 & HPTE64_R_PP) | ((pte.pte1 & HPTE64_R_PP0) >> 61);
|
2013-03-12 04:31:14 +04:00
|
|
|
|
|
|
|
if (key == 0) {
|
|
|
|
switch (pp) {
|
|
|
|
case 0x0:
|
|
|
|
case 0x1:
|
|
|
|
case 0x2:
|
2013-03-12 04:31:40 +04:00
|
|
|
prot = PAGE_READ | PAGE_WRITE;
|
|
|
|
break;
|
|
|
|
|
2013-03-12 04:31:14 +04:00
|
|
|
case 0x3:
|
|
|
|
case 0x6:
|
2013-03-12 04:31:40 +04:00
|
|
|
prot = PAGE_READ;
|
2013-03-12 04:31:14 +04:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
switch (pp) {
|
|
|
|
case 0x0:
|
|
|
|
case 0x6:
|
2013-03-12 04:31:40 +04:00
|
|
|
prot = 0;
|
2013-03-12 04:31:14 +04:00
|
|
|
break;
|
2013-03-12 04:31:40 +04:00
|
|
|
|
2013-03-12 04:31:14 +04:00
|
|
|
case 0x1:
|
|
|
|
case 0x3:
|
2013-03-12 04:31:40 +04:00
|
|
|
prot = PAGE_READ;
|
2013-03-12 04:31:14 +04:00
|
|
|
break;
|
2013-03-12 04:31:40 +04:00
|
|
|
|
2013-03-12 04:31:14 +04:00
|
|
|
case 0x2:
|
2013-03-12 04:31:40 +04:00
|
|
|
prot = PAGE_READ | PAGE_WRITE;
|
2013-03-12 04:31:14 +04:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-03-12 04:31:40 +04:00
|
|
|
/* No execute if either noexec or guarded bits set */
|
2013-03-12 04:31:41 +04:00
|
|
|
if (!(pte.pte1 & HPTE64_R_N) || (pte.pte1 & HPTE64_R_G)
|
|
|
|
|| (slb->vsid & SLB_VSID_N)) {
|
2013-03-12 04:31:40 +04:00
|
|
|
prot |= PAGE_EXEC;
|
2013-03-12 04:31:14 +04:00
|
|
|
}
|
|
|
|
|
2013-03-12 04:31:40 +04:00
|
|
|
return prot;
|
2013-03-12 04:31:14 +04:00
|
|
|
}
|
|
|
|
|
2016-01-14 07:33:27 +03:00
|
|
|
static int ppc_hash64_amr_prot(PowerPCCPU *cpu, ppc_hash_pte64_t pte)
|
2013-03-12 04:31:47 +04:00
|
|
|
{
|
2016-01-14 07:33:27 +03:00
|
|
|
CPUPPCState *env = &cpu->env;
|
2013-03-12 04:31:47 +04:00
|
|
|
int key, amrbits;
|
2014-02-04 21:21:39 +04:00
|
|
|
int prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
|
2013-03-12 04:31:47 +04:00
|
|
|
|
|
|
|
/* Only recent MMUs implement Virtual Page Class Key Protection */
|
|
|
|
if (!(env->mmu_model & POWERPC_MMU_AMR)) {
|
2014-02-04 21:21:39 +04:00
|
|
|
return prot;
|
2013-03-12 04:31:47 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
key = HPTE64_R_KEY(pte.pte1);
|
|
|
|
amrbits = (env->spr[SPR_AMR] >> 2*(31 - key)) & 0x3;
|
|
|
|
|
|
|
|
/* fprintf(stderr, "AMR protection: key=%d AMR=0x%" PRIx64 "\n", key, */
|
|
|
|
/* env->spr[SPR_AMR]); */
|
|
|
|
|
2014-02-04 21:21:39 +04:00
|
|
|
/*
|
|
|
|
* A store is permitted if the AMR bit is 0. Remove write
|
|
|
|
* protection if it is set.
|
|
|
|
*/
|
2013-03-12 04:31:47 +04:00
|
|
|
if (amrbits & 0x2) {
|
2014-02-04 21:21:39 +04:00
|
|
|
prot &= ~PAGE_WRITE;
|
2013-03-12 04:31:47 +04:00
|
|
|
}
|
2014-02-04 21:21:39 +04:00
|
|
|
/*
|
|
|
|
* A load is permitted if the AMR bit is 0. Remove read
|
|
|
|
* protection if it is set.
|
|
|
|
*/
|
2013-03-12 04:31:47 +04:00
|
|
|
if (amrbits & 0x1) {
|
2014-02-04 21:21:39 +04:00
|
|
|
prot &= ~PAGE_READ;
|
2013-03-12 04:31:47 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
return prot;
|
|
|
|
}
|
|
|
|
|
2014-02-20 21:52:24 +04:00
|
|
|
uint64_t ppc_hash64_start_access(PowerPCCPU *cpu, target_ulong pte_index)
|
|
|
|
{
|
|
|
|
uint64_t token = 0;
|
|
|
|
hwaddr pte_offset;
|
|
|
|
|
|
|
|
pte_offset = pte_index * HASH_PTE_SIZE_64;
|
2016-03-08 03:35:15 +03:00
|
|
|
if (cpu->env.external_htab == MMU_HASH64_KVM_MANAGED_HPT) {
|
2014-02-20 21:52:24 +04:00
|
|
|
/*
|
|
|
|
* HTAB is controlled by KVM. Fetch the PTEG into a new buffer.
|
|
|
|
*/
|
|
|
|
token = kvmppc_hash64_read_pteg(cpu, pte_index);
|
2016-03-08 03:35:15 +03:00
|
|
|
} else if (cpu->env.external_htab) {
|
2014-02-20 21:52:24 +04:00
|
|
|
/*
|
2016-03-08 03:35:15 +03:00
|
|
|
* HTAB is controlled by QEMU. Just point to the internally
|
|
|
|
* accessible PTEG.
|
2014-02-20 21:52:24 +04:00
|
|
|
*/
|
|
|
|
token = (uint64_t)(uintptr_t) cpu->env.external_htab + pte_offset;
|
|
|
|
} else if (cpu->env.htab_base) {
|
|
|
|
token = cpu->env.htab_base + pte_offset;
|
|
|
|
}
|
|
|
|
return token;
|
|
|
|
}
|
|
|
|
|
2016-03-08 03:35:15 +03:00
|
|
|
void ppc_hash64_stop_access(PowerPCCPU *cpu, uint64_t token)
|
2014-02-20 21:52:24 +04:00
|
|
|
{
|
2016-03-08 03:35:15 +03:00
|
|
|
if (cpu->env.external_htab == MMU_HASH64_KVM_MANAGED_HPT) {
|
2015-03-08 01:16:38 +03:00
|
|
|
kvmppc_hash64_free_pteg(token);
|
2014-02-20 21:52:24 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-05 05:17:56 +03:00
|
|
|
static unsigned hpte_page_shift(const struct ppc_one_seg_page_size *sps,
|
|
|
|
uint64_t pte0, uint64_t pte1)
|
2016-06-28 09:48:34 +03:00
|
|
|
{
|
2016-07-05 05:17:56 +03:00
|
|
|
int i;
|
|
|
|
|
|
|
|
if (!(pte0 & HPTE64_V_LARGE)) {
|
|
|
|
if (sps->page_shift != 12) {
|
|
|
|
/* 4kiB page in a non 4kiB segment */
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
/* Normal 4kiB page */
|
2016-06-28 09:48:34 +03:00
|
|
|
return 12;
|
2016-07-05 05:17:56 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
|
|
|
|
const struct ppc_one_page_size *ps = &sps->enc[i];
|
|
|
|
uint64_t mask;
|
|
|
|
|
|
|
|
if (!ps->page_shift) {
|
|
|
|
break;
|
2016-06-28 09:48:34 +03:00
|
|
|
}
|
2016-07-05 05:17:56 +03:00
|
|
|
|
|
|
|
if (ps->page_shift == 12) {
|
|
|
|
/* L bit is set so this can't be a 4kiB page */
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
mask = ((1ULL << ps->page_shift) - 1) & HPTE64_R_RPN;
|
|
|
|
|
2016-07-15 18:22:10 +03:00
|
|
|
if ((pte1 & mask) == ((uint64_t)ps->pte_enc << HPTE64_R_RPN_SHIFT)) {
|
2016-07-05 05:17:56 +03:00
|
|
|
return ps->page_shift;
|
2016-06-28 09:48:34 +03:00
|
|
|
}
|
|
|
|
}
|
2016-07-05 05:17:56 +03:00
|
|
|
|
|
|
|
return 0; /* Bad page size encoding */
|
2016-06-28 09:48:34 +03:00
|
|
|
}
|
|
|
|
|
2016-01-14 07:33:27 +03:00
|
|
|
static hwaddr ppc_hash64_pteg_search(PowerPCCPU *cpu, hwaddr hash,
|
2016-07-04 10:44:11 +03:00
|
|
|
const struct ppc_one_seg_page_size *sps,
|
|
|
|
target_ulong ptem,
|
2016-07-05 05:31:57 +03:00
|
|
|
ppc_hash_pte64_t *pte, unsigned *pshift)
|
2013-03-12 04:31:28 +04:00
|
|
|
{
|
2016-01-14 07:33:27 +03:00
|
|
|
CPUPPCState *env = &cpu->env;
|
2013-03-12 04:31:28 +04:00
|
|
|
int i;
|
2014-02-20 21:52:24 +04:00
|
|
|
uint64_t token;
|
|
|
|
target_ulong pte0, pte1;
|
|
|
|
target_ulong pte_index;
|
2013-03-12 04:31:28 +04:00
|
|
|
|
2014-02-20 21:52:24 +04:00
|
|
|
pte_index = (hash & env->htab_mask) * HPTES_PER_GROUP;
|
2016-01-14 07:33:27 +03:00
|
|
|
token = ppc_hash64_start_access(cpu, pte_index);
|
2014-02-20 21:52:24 +04:00
|
|
|
if (!token) {
|
|
|
|
return -1;
|
|
|
|
}
|
2013-03-12 04:31:28 +04:00
|
|
|
for (i = 0; i < HPTES_PER_GROUP; i++) {
|
2016-01-14 07:33:27 +03:00
|
|
|
pte0 = ppc_hash64_load_hpte0(cpu, token, i);
|
|
|
|
pte1 = ppc_hash64_load_hpte1(cpu, token, i);
|
2013-03-12 04:31:28 +04:00
|
|
|
|
2016-07-05 05:31:48 +03:00
|
|
|
/* This compares V, B, H (secondary) and the AVPN */
|
|
|
|
if (HPTE64_V_COMPARE(pte0, ptem)) {
|
2016-07-04 10:44:11 +03:00
|
|
|
*pshift = hpte_page_shift(sps, pte0, pte1);
|
2016-07-05 05:17:56 +03:00
|
|
|
/*
|
|
|
|
* If there is no match, ignore the PTE, it could simply
|
|
|
|
* be for a different segment size encoding and the
|
|
|
|
* architecture specifies we should not match. Linux will
|
|
|
|
* potentially leave behind PTEs for the wrong base page
|
|
|
|
* size when demoting segments.
|
|
|
|
*/
|
2016-07-05 05:31:57 +03:00
|
|
|
if (*pshift == 0) {
|
2016-06-28 09:48:34 +03:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
/* We don't do anything with pshift yet as qemu TLB only deals
|
|
|
|
* with 4K pages anyway
|
|
|
|
*/
|
2013-03-12 04:31:28 +04:00
|
|
|
pte->pte0 = pte0;
|
|
|
|
pte->pte1 = pte1;
|
2016-03-08 03:35:15 +03:00
|
|
|
ppc_hash64_stop_access(cpu, token);
|
2014-02-20 21:52:24 +04:00
|
|
|
return (pte_index + i) * HASH_PTE_SIZE_64;
|
2013-03-12 04:31:28 +04:00
|
|
|
}
|
|
|
|
}
|
2016-03-08 03:35:15 +03:00
|
|
|
ppc_hash64_stop_access(cpu, token);
|
2014-02-20 21:52:24 +04:00
|
|
|
/*
|
|
|
|
* We didn't find a valid entry.
|
|
|
|
*/
|
2013-03-12 04:31:28 +04:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2016-01-14 07:33:27 +03:00
|
|
|
static hwaddr ppc_hash64_htab_lookup(PowerPCCPU *cpu,
|
2013-03-12 04:31:30 +04:00
|
|
|
ppc_slb_t *slb, target_ulong eaddr,
|
2016-07-05 05:31:57 +03:00
|
|
|
ppc_hash_pte64_t *pte, unsigned *pshift)
|
2013-03-12 04:31:08 +04:00
|
|
|
{
|
2016-01-14 07:33:27 +03:00
|
|
|
CPUPPCState *env = &cpu->env;
|
2014-02-20 21:52:24 +04:00
|
|
|
hwaddr pte_offset;
|
2013-03-12 04:31:29 +04:00
|
|
|
hwaddr hash;
|
2016-01-27 03:52:57 +03:00
|
|
|
uint64_t vsid, epnmask, epn, ptem;
|
2016-07-04 10:44:11 +03:00
|
|
|
const struct ppc_one_seg_page_size *sps = slb->sps;
|
2016-01-27 03:52:57 +03:00
|
|
|
|
|
|
|
/* The SLB store path should prevent any bad page size encodings
|
|
|
|
* getting in there, so: */
|
2016-07-04 10:44:11 +03:00
|
|
|
assert(sps);
|
2013-03-12 04:31:29 +04:00
|
|
|
|
2016-07-04 10:44:11 +03:00
|
|
|
/* If ISL is set in LPCR we need to clamp the page size to 4K */
|
|
|
|
if (env->spr[SPR_LPCR] & LPCR_ISL) {
|
|
|
|
/* We assume that when using TCG, 4k is first entry of SPS */
|
|
|
|
sps = &env->sps.sps[0];
|
|
|
|
assert(sps->page_shift == 12);
|
|
|
|
}
|
|
|
|
|
|
|
|
epnmask = ~((1ULL << sps->page_shift) - 1);
|
2013-03-12 04:31:29 +04:00
|
|
|
|
|
|
|
if (slb->vsid & SLB_VSID_B) {
|
2013-03-12 04:31:31 +04:00
|
|
|
/* 1TB segment */
|
|
|
|
vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT_1T;
|
|
|
|
epn = (eaddr & ~SEGMENT_MASK_1T) & epnmask;
|
2016-07-04 10:44:11 +03:00
|
|
|
hash = vsid ^ (vsid << 25) ^ (epn >> sps->page_shift);
|
2013-03-12 04:31:29 +04:00
|
|
|
} else {
|
2013-03-12 04:31:31 +04:00
|
|
|
/* 256M segment */
|
|
|
|
vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT;
|
|
|
|
epn = (eaddr & ~SEGMENT_MASK_256M) & epnmask;
|
2016-07-04 10:44:11 +03:00
|
|
|
hash = vsid ^ (epn >> sps->page_shift);
|
2013-03-12 04:31:29 +04:00
|
|
|
}
|
2013-03-12 04:31:31 +04:00
|
|
|
ptem = (slb->vsid & SLB_VSID_PTEM) | ((epn >> 16) & HPTE64_V_AVPN);
|
2016-07-05 05:31:48 +03:00
|
|
|
ptem |= HPTE64_V_VALID;
|
2013-03-12 04:31:29 +04:00
|
|
|
|
|
|
|
/* Page address translation */
|
2014-12-13 19:48:18 +03:00
|
|
|
qemu_log_mask(CPU_LOG_MMU,
|
|
|
|
"htab_base " TARGET_FMT_plx " htab_mask " TARGET_FMT_plx
|
2013-03-12 04:31:29 +04:00
|
|
|
" hash " TARGET_FMT_plx "\n",
|
|
|
|
env->htab_base, env->htab_mask, hash);
|
|
|
|
|
|
|
|
/* Primary PTEG lookup */
|
2014-12-13 19:48:18 +03:00
|
|
|
qemu_log_mask(CPU_LOG_MMU,
|
|
|
|
"0 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx
|
2013-03-12 04:31:29 +04:00
|
|
|
" vsid=" TARGET_FMT_lx " ptem=" TARGET_FMT_lx
|
|
|
|
" hash=" TARGET_FMT_plx "\n",
|
|
|
|
env->htab_base, env->htab_mask, vsid, ptem, hash);
|
2016-07-04 10:44:11 +03:00
|
|
|
pte_offset = ppc_hash64_pteg_search(cpu, hash, sps, ptem, pte, pshift);
|
2013-03-12 04:31:30 +04:00
|
|
|
|
2013-03-12 04:31:29 +04:00
|
|
|
if (pte_offset == -1) {
|
|
|
|
/* Secondary PTEG lookup */
|
2016-07-05 05:31:48 +03:00
|
|
|
ptem |= HPTE64_V_SECONDARY;
|
2014-12-13 19:48:18 +03:00
|
|
|
qemu_log_mask(CPU_LOG_MMU,
|
|
|
|
"1 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx
|
2013-03-12 04:31:29 +04:00
|
|
|
" vsid=" TARGET_FMT_lx " api=" TARGET_FMT_lx
|
|
|
|
" hash=" TARGET_FMT_plx "\n", env->htab_base,
|
|
|
|
env->htab_mask, vsid, ptem, ~hash);
|
|
|
|
|
2016-07-04 10:44:11 +03:00
|
|
|
pte_offset = ppc_hash64_pteg_search(cpu, ~hash, sps, ptem, pte, pshift);
|
2013-03-12 04:31:29 +04:00
|
|
|
}
|
|
|
|
|
2013-03-12 04:31:30 +04:00
|
|
|
return pte_offset;
|
2013-03-12 04:31:08 +04:00
|
|
|
}
|
2013-03-12 04:31:09 +04:00
|
|
|
|
2016-01-27 04:01:20 +03:00
|
|
|
unsigned ppc_hash64_hpte_page_shift_noslb(PowerPCCPU *cpu,
|
2016-07-01 10:10:10 +03:00
|
|
|
uint64_t pte0, uint64_t pte1)
|
2016-01-27 04:01:20 +03:00
|
|
|
{
|
|
|
|
CPUPPCState *env = &cpu->env;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (!(pte0 & HPTE64_V_LARGE)) {
|
|
|
|
return 12;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The encodings in env->sps need to be carefully chosen so that
|
|
|
|
* this gives an unambiguous result.
|
|
|
|
*/
|
|
|
|
for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
|
|
|
|
const struct ppc_one_seg_page_size *sps = &env->sps.sps[i];
|
|
|
|
unsigned shift;
|
|
|
|
|
|
|
|
if (!sps->page_shift) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
shift = hpte_page_shift(sps, pte0, pte1);
|
|
|
|
if (shift) {
|
|
|
|
return shift;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-06-22 00:48:50 +03:00
|
|
|
static void ppc_hash64_set_isi(CPUState *cs, CPUPPCState *env,
|
|
|
|
uint64_t error_code)
|
|
|
|
{
|
|
|
|
bool vpm;
|
|
|
|
|
|
|
|
if (msr_ir) {
|
|
|
|
vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM1);
|
|
|
|
} else {
|
|
|
|
vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM0);
|
|
|
|
}
|
|
|
|
if (vpm && !msr_hv) {
|
|
|
|
cs->exception_index = POWERPC_EXCP_HISI;
|
|
|
|
} else {
|
|
|
|
cs->exception_index = POWERPC_EXCP_ISI;
|
|
|
|
}
|
|
|
|
env->error_code = error_code;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ppc_hash64_set_dsi(CPUState *cs, CPUPPCState *env, uint64_t dar,
|
|
|
|
uint64_t dsisr)
|
|
|
|
{
|
|
|
|
bool vpm;
|
|
|
|
|
|
|
|
if (msr_dr) {
|
|
|
|
vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM1);
|
|
|
|
} else {
|
|
|
|
vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM0);
|
|
|
|
}
|
|
|
|
if (vpm && !msr_hv) {
|
|
|
|
cs->exception_index = POWERPC_EXCP_HDSI;
|
|
|
|
env->spr[SPR_HDAR] = dar;
|
|
|
|
env->spr[SPR_HDSISR] = dsisr;
|
|
|
|
} else {
|
|
|
|
cs->exception_index = POWERPC_EXCP_DSI;
|
|
|
|
env->spr[SPR_DAR] = dar;
|
|
|
|
env->spr[SPR_DSISR] = dsisr;
|
|
|
|
}
|
|
|
|
env->error_code = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-03-15 17:12:16 +03:00
|
|
|
int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr,
|
2013-03-12 04:31:46 +04:00
|
|
|
int rwx, int mmu_idx)
|
2013-03-12 04:31:09 +04:00
|
|
|
{
|
2013-09-02 16:14:24 +04:00
|
|
|
CPUState *cs = CPU(cpu);
|
|
|
|
CPUPPCState *env = &cpu->env;
|
2013-03-12 04:31:09 +04:00
|
|
|
ppc_slb_t *slb;
|
2016-01-27 03:39:15 +03:00
|
|
|
unsigned apshift;
|
2013-03-12 04:31:30 +04:00
|
|
|
hwaddr pte_offset;
|
|
|
|
ppc_hash_pte64_t pte;
|
2013-03-12 04:31:47 +04:00
|
|
|
int pp_prot, amr_prot, prot;
|
2016-06-22 00:48:50 +03:00
|
|
|
uint64_t new_pte1, dsisr;
|
2013-03-12 04:31:40 +04:00
|
|
|
const int need_prot[] = {PAGE_READ, PAGE_WRITE, PAGE_EXEC};
|
2013-03-12 04:31:46 +04:00
|
|
|
hwaddr raddr;
|
2013-03-12 04:31:09 +04:00
|
|
|
|
2013-03-12 04:31:32 +04:00
|
|
|
assert((rwx == 0) || (rwx == 1) || (rwx == 2));
|
|
|
|
|
2016-07-05 00:37:08 +03:00
|
|
|
/* Note on LPCR usage: 970 uses HID4, but our special variant
|
|
|
|
* of store_spr copies relevant fields into env->spr[SPR_LPCR].
|
|
|
|
* Similarily we filter unimplemented bits when storing into
|
|
|
|
* LPCR depending on the MMU version. This code can thus just
|
|
|
|
* use the LPCR "as-is".
|
|
|
|
*/
|
|
|
|
|
2013-03-12 04:31:23 +04:00
|
|
|
/* 1. Handle real mode accesses */
|
|
|
|
if (((rwx == 2) && (msr_ir == 0)) || ((rwx != 2) && (msr_dr == 0))) {
|
2016-07-05 00:37:08 +03:00
|
|
|
/* Translation is supposedly "off" */
|
|
|
|
/* In real mode the top 4 effective address bits are (mostly) ignored */
|
2013-03-12 04:31:46 +04:00
|
|
|
raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL;
|
2016-07-05 00:37:08 +03:00
|
|
|
|
|
|
|
/* In HV mode, add HRMOR if top EA bit is clear */
|
|
|
|
if (msr_hv || !env->has_hv_mode) {
|
|
|
|
if (!(eaddr >> 63)) {
|
|
|
|
raddr |= env->spr[SPR_HRMOR];
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/* Otherwise, check VPM for RMA vs VRMA */
|
|
|
|
if (env->spr[SPR_LPCR] & LPCR_VPM0) {
|
|
|
|
slb = &env->vrma_slb;
|
|
|
|
if (slb->sps) {
|
|
|
|
goto skip_slb_search;
|
|
|
|
}
|
|
|
|
/* Not much else to do here */
|
|
|
|
cs->exception_index = POWERPC_EXCP_MCHECK;
|
|
|
|
env->error_code = 0;
|
|
|
|
return 1;
|
|
|
|
} else if (raddr < env->rmls) {
|
|
|
|
/* RMA. Check bounds in RMLS */
|
|
|
|
raddr |= env->spr[SPR_RMOR];
|
|
|
|
} else {
|
|
|
|
/* The access failed, generate the approriate interrupt */
|
|
|
|
if (rwx == 2) {
|
|
|
|
ppc_hash64_set_isi(cs, env, 0x08000000);
|
|
|
|
} else {
|
|
|
|
dsisr = 0x08000000;
|
|
|
|
if (rwx == 1) {
|
|
|
|
dsisr |= 0x02000000;
|
|
|
|
}
|
|
|
|
ppc_hash64_set_dsi(cs, env, eaddr, dsisr);
|
|
|
|
}
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
}
|
2013-09-03 15:59:37 +04:00
|
|
|
tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
|
2013-03-12 04:31:46 +04:00
|
|
|
PAGE_READ | PAGE_WRITE | PAGE_EXEC, mmu_idx,
|
|
|
|
TARGET_PAGE_SIZE);
|
2013-03-12 04:31:23 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-03-12 04:31:26 +04:00
|
|
|
/* 2. Translation is on, so look up the SLB */
|
2016-01-14 07:33:27 +03:00
|
|
|
slb = slb_lookup(cpu, eaddr);
|
2013-03-12 04:31:09 +04:00
|
|
|
if (!slb) {
|
2013-03-12 04:31:46 +04:00
|
|
|
if (rwx == 2) {
|
2013-08-26 10:31:06 +04:00
|
|
|
cs->exception_index = POWERPC_EXCP_ISEG;
|
2013-03-12 04:31:46 +04:00
|
|
|
env->error_code = 0;
|
|
|
|
} else {
|
2013-08-26 10:31:06 +04:00
|
|
|
cs->exception_index = POWERPC_EXCP_DSEG;
|
2013-03-12 04:31:46 +04:00
|
|
|
env->error_code = 0;
|
|
|
|
env->spr[SPR_DAR] = eaddr;
|
|
|
|
}
|
|
|
|
return 1;
|
2013-03-12 04:31:09 +04:00
|
|
|
}
|
|
|
|
|
2016-07-05 00:37:08 +03:00
|
|
|
skip_slb_search:
|
|
|
|
|
2013-03-12 04:31:26 +04:00
|
|
|
/* 3. Check for segment level no-execute violation */
|
|
|
|
if ((rwx == 2) && (slb->vsid & SLB_VSID_N)) {
|
2016-06-22 00:48:50 +03:00
|
|
|
ppc_hash64_set_isi(cs, env, 0x10000000);
|
2013-03-12 04:31:46 +04:00
|
|
|
return 1;
|
2013-03-12 04:31:26 +04:00
|
|
|
}
|
|
|
|
|
2013-03-12 04:31:30 +04:00
|
|
|
/* 4. Locate the PTE in the hash table */
|
2016-07-05 05:31:57 +03:00
|
|
|
pte_offset = ppc_hash64_htab_lookup(cpu, slb, eaddr, &pte, &apshift);
|
2013-03-12 04:31:30 +04:00
|
|
|
if (pte_offset == -1) {
|
2016-06-22 00:48:50 +03:00
|
|
|
dsisr = 0x40000000;
|
2013-03-12 04:31:46 +04:00
|
|
|
if (rwx == 2) {
|
2016-06-22 00:48:50 +03:00
|
|
|
ppc_hash64_set_isi(cs, env, dsisr);
|
2013-03-12 04:31:46 +04:00
|
|
|
} else {
|
|
|
|
if (rwx == 1) {
|
2016-06-22 00:48:50 +03:00
|
|
|
dsisr |= 0x02000000;
|
2013-03-12 04:31:46 +04:00
|
|
|
}
|
2016-06-22 00:48:50 +03:00
|
|
|
ppc_hash64_set_dsi(cs, env, eaddr, dsisr);
|
2013-03-12 04:31:46 +04:00
|
|
|
}
|
|
|
|
return 1;
|
2013-03-12 04:31:30 +04:00
|
|
|
}
|
2014-12-13 19:48:18 +03:00
|
|
|
qemu_log_mask(CPU_LOG_MMU,
|
|
|
|
"found PTE at offset %08" HWADDR_PRIx "\n", pte_offset);
|
2013-03-12 04:31:30 +04:00
|
|
|
|
|
|
|
/* 5. Check access permissions */
|
|
|
|
|
2016-01-14 07:33:27 +03:00
|
|
|
pp_prot = ppc_hash64_pte_prot(cpu, slb, pte);
|
|
|
|
amr_prot = ppc_hash64_amr_prot(cpu, pte);
|
2013-03-12 04:31:47 +04:00
|
|
|
prot = pp_prot & amr_prot;
|
2013-03-12 04:31:32 +04:00
|
|
|
|
2013-03-12 04:31:46 +04:00
|
|
|
if ((need_prot[rwx] & ~prot) != 0) {
|
2013-03-12 04:31:32 +04:00
|
|
|
/* Access right violation */
|
2014-12-13 19:48:18 +03:00
|
|
|
qemu_log_mask(CPU_LOG_MMU, "PTE access rejected\n");
|
2013-03-12 04:31:46 +04:00
|
|
|
if (rwx == 2) {
|
2016-06-22 00:48:50 +03:00
|
|
|
ppc_hash64_set_isi(cs, env, 0x08000000);
|
2013-03-12 04:31:46 +04:00
|
|
|
} else {
|
2016-06-22 00:48:50 +03:00
|
|
|
dsisr = 0;
|
2013-03-12 04:31:47 +04:00
|
|
|
if (need_prot[rwx] & ~pp_prot) {
|
|
|
|
dsisr |= 0x08000000;
|
|
|
|
}
|
2013-03-12 04:31:46 +04:00
|
|
|
if (rwx == 1) {
|
2013-03-12 04:31:47 +04:00
|
|
|
dsisr |= 0x02000000;
|
|
|
|
}
|
|
|
|
if (need_prot[rwx] & ~amr_prot) {
|
|
|
|
dsisr |= 0x00200000;
|
2013-03-12 04:31:46 +04:00
|
|
|
}
|
2016-06-22 00:48:50 +03:00
|
|
|
ppc_hash64_set_dsi(cs, env, eaddr, dsisr);
|
2013-03-12 04:31:46 +04:00
|
|
|
}
|
|
|
|
return 1;
|
2013-03-12 04:31:32 +04:00
|
|
|
}
|
|
|
|
|
2014-12-13 19:48:18 +03:00
|
|
|
qemu_log_mask(CPU_LOG_MMU, "PTE access granted !\n");
|
2013-03-12 04:31:38 +04:00
|
|
|
|
|
|
|
/* 6. Update PTE referenced and changed bits if necessary */
|
|
|
|
|
2013-03-12 04:31:42 +04:00
|
|
|
new_pte1 = pte.pte1 | HPTE64_R_R; /* set referenced bit */
|
|
|
|
if (rwx == 1) {
|
|
|
|
new_pte1 |= HPTE64_R_C; /* set changed (dirty) bit */
|
|
|
|
} else {
|
|
|
|
/* Treat the page as read-only for now, so that a later write
|
|
|
|
* will pass through this function again to set the C bit */
|
2013-03-12 04:31:46 +04:00
|
|
|
prot &= ~PAGE_WRITE;
|
2013-03-12 04:31:42 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
if (new_pte1 != pte.pte1) {
|
2016-01-14 07:33:27 +03:00
|
|
|
ppc_hash64_store_hpte(cpu, pte_offset / HASH_PTE_SIZE_64,
|
2014-02-20 21:52:31 +04:00
|
|
|
pte.pte0, new_pte1);
|
2013-03-12 04:31:30 +04:00
|
|
|
}
|
2013-03-12 04:31:09 +04:00
|
|
|
|
2013-03-12 04:31:43 +04:00
|
|
|
/* 7. Determine the real address from the PTE */
|
|
|
|
|
2016-01-27 03:39:15 +03:00
|
|
|
raddr = deposit64(pte.pte1 & HPTE64_R_RPN, 0, apshift, eaddr);
|
2013-03-12 04:31:46 +04:00
|
|
|
|
2013-09-03 15:59:37 +04:00
|
|
|
tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
|
2016-01-27 03:39:15 +03:00
|
|
|
prot, mmu_idx, 1ULL << apshift);
|
2013-03-12 04:31:40 +04:00
|
|
|
|
|
|
|
return 0;
|
2013-03-12 04:31:09 +04:00
|
|
|
}
|
2013-03-12 04:31:11 +04:00
|
|
|
|
2016-01-14 07:33:27 +03:00
|
|
|
hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr)
|
2013-03-12 04:31:13 +04:00
|
|
|
{
|
2016-01-14 07:33:27 +03:00
|
|
|
CPUPPCState *env = &cpu->env;
|
2013-03-12 04:31:45 +04:00
|
|
|
ppc_slb_t *slb;
|
2016-07-05 00:37:08 +03:00
|
|
|
hwaddr pte_offset, raddr;
|
2013-03-12 04:31:45 +04:00
|
|
|
ppc_hash_pte64_t pte;
|
2016-01-27 03:39:15 +03:00
|
|
|
unsigned apshift;
|
2013-03-12 04:31:45 +04:00
|
|
|
|
2016-07-05 00:37:08 +03:00
|
|
|
/* Handle real mode */
|
2013-03-12 04:31:45 +04:00
|
|
|
if (msr_dr == 0) {
|
|
|
|
/* In real mode the top 4 effective address bits are ignored */
|
2016-07-05 00:37:08 +03:00
|
|
|
raddr = addr & 0x0FFFFFFFFFFFFFFFULL;
|
2013-03-12 04:31:13 +04:00
|
|
|
|
2016-07-05 00:37:08 +03:00
|
|
|
/* In HV mode, add HRMOR if top EA bit is clear */
|
|
|
|
if ((msr_hv || !env->has_hv_mode) && !(addr >> 63)) {
|
|
|
|
return raddr | env->spr[SPR_HRMOR];
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Otherwise, check VPM for RMA vs VRMA */
|
|
|
|
if (env->spr[SPR_LPCR] & LPCR_VPM0) {
|
|
|
|
slb = &env->vrma_slb;
|
|
|
|
if (!slb->sps) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
} else if (raddr < env->rmls) {
|
|
|
|
/* RMA. Check bounds in RMLS */
|
|
|
|
return raddr | env->spr[SPR_RMOR];
|
|
|
|
} else {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
slb = slb_lookup(cpu, addr);
|
|
|
|
if (!slb) {
|
|
|
|
return -1;
|
|
|
|
}
|
2013-03-12 04:31:45 +04:00
|
|
|
}
|
|
|
|
|
2016-07-05 05:31:57 +03:00
|
|
|
pte_offset = ppc_hash64_htab_lookup(cpu, slb, addr, &pte, &apshift);
|
2013-03-12 04:31:45 +04:00
|
|
|
if (pte_offset == -1) {
|
2013-03-12 04:31:13 +04:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2016-01-27 03:39:15 +03:00
|
|
|
return deposit64(pte.pte1 & HPTE64_R_RPN, 0, apshift, addr)
|
2016-01-27 03:52:57 +03:00
|
|
|
& TARGET_PAGE_MASK;
|
2013-03-12 04:31:13 +04:00
|
|
|
}
|
2014-02-20 21:52:38 +04:00
|
|
|
|
2016-01-14 07:33:27 +03:00
|
|
|
void ppc_hash64_store_hpte(PowerPCCPU *cpu,
|
2014-02-20 21:52:38 +04:00
|
|
|
target_ulong pte_index,
|
|
|
|
target_ulong pte0, target_ulong pte1)
|
|
|
|
{
|
2016-01-14 07:33:27 +03:00
|
|
|
CPUPPCState *env = &cpu->env;
|
2014-02-20 21:52:38 +04:00
|
|
|
|
2016-03-08 03:35:15 +03:00
|
|
|
if (env->external_htab == MMU_HASH64_KVM_MANAGED_HPT) {
|
2015-03-08 01:16:38 +03:00
|
|
|
kvmppc_hash64_write_pte(env, pte_index, pte0, pte1);
|
|
|
|
return;
|
2014-02-20 21:52:38 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
pte_index *= HASH_PTE_SIZE_64;
|
|
|
|
if (env->external_htab) {
|
|
|
|
stq_p(env->external_htab + pte_index, pte0);
|
2016-01-14 07:33:27 +03:00
|
|
|
stq_p(env->external_htab + pte_index + HASH_PTE_SIZE_64 / 2, pte1);
|
2014-02-20 21:52:38 +04:00
|
|
|
} else {
|
2016-01-14 07:33:27 +03:00
|
|
|
stq_phys(CPU(cpu)->as, env->htab_base + pte_index, pte0);
|
|
|
|
stq_phys(CPU(cpu)->as,
|
|
|
|
env->htab_base + pte_index + HASH_PTE_SIZE_64 / 2, pte1);
|
2014-02-20 21:52:38 +04:00
|
|
|
}
|
|
|
|
}
|
2016-01-15 08:12:09 +03:00
|
|
|
|
|
|
|
void ppc_hash64_tlb_flush_hpte(PowerPCCPU *cpu,
|
|
|
|
target_ulong pte_index,
|
|
|
|
target_ulong pte0, target_ulong pte1)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* XXX: given the fact that there are too many segments to
|
|
|
|
* invalidate, and we still don't have a tlb_flush_mask(env, n,
|
|
|
|
* mask) in QEMU, we just invalidate all TLBs
|
|
|
|
*/
|
2016-09-20 19:35:01 +03:00
|
|
|
cpu->env.tlb_need_flush = TLB_NEED_GLOBAL_FLUSH | TLB_NEED_LOCAL_FLUSH;
|
2016-01-15 08:12:09 +03:00
|
|
|
}
|
2016-06-27 09:55:16 +03:00
|
|
|
|
2016-07-05 00:37:08 +03:00
|
|
|
void ppc_hash64_update_rmls(CPUPPCState *env)
|
|
|
|
{
|
|
|
|
uint64_t lpcr = env->spr[SPR_LPCR];
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is the full 4 bits encoding of POWER8. Previous
|
|
|
|
* CPUs only support a subset of these but the filtering
|
|
|
|
* is done when writing LPCR
|
|
|
|
*/
|
|
|
|
switch ((lpcr & LPCR_RMLS) >> LPCR_RMLS_SHIFT) {
|
|
|
|
case 0x8: /* 32MB */
|
|
|
|
env->rmls = 0x2000000ull;
|
|
|
|
break;
|
|
|
|
case 0x3: /* 64MB */
|
|
|
|
env->rmls = 0x4000000ull;
|
|
|
|
break;
|
|
|
|
case 0x7: /* 128MB */
|
|
|
|
env->rmls = 0x8000000ull;
|
|
|
|
break;
|
|
|
|
case 0x4: /* 256MB */
|
|
|
|
env->rmls = 0x10000000ull;
|
|
|
|
break;
|
|
|
|
case 0x2: /* 1GB */
|
|
|
|
env->rmls = 0x40000000ull;
|
|
|
|
break;
|
|
|
|
case 0x1: /* 16GB */
|
|
|
|
env->rmls = 0x400000000ull;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
/* What to do here ??? */
|
|
|
|
env->rmls = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void ppc_hash64_update_vrma(CPUPPCState *env)
|
|
|
|
{
|
|
|
|
const struct ppc_one_seg_page_size *sps = NULL;
|
|
|
|
target_ulong esid, vsid, lpcr;
|
|
|
|
ppc_slb_t *slb = &env->vrma_slb;
|
|
|
|
uint32_t vrmasd;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* First clear it */
|
|
|
|
slb->esid = slb->vsid = 0;
|
|
|
|
slb->sps = NULL;
|
|
|
|
|
|
|
|
/* Is VRMA enabled ? */
|
|
|
|
lpcr = env->spr[SPR_LPCR];
|
|
|
|
if (!(lpcr & LPCR_VPM0)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Make one up. Mostly ignore the ESID which will not be
|
|
|
|
* needed for translation
|
|
|
|
*/
|
|
|
|
vsid = SLB_VSID_VRMA;
|
|
|
|
vrmasd = (lpcr & LPCR_VRMASD) >> LPCR_VRMASD_SHIFT;
|
|
|
|
vsid |= (vrmasd << 4) & (SLB_VSID_L | SLB_VSID_LP);
|
|
|
|
esid = SLB_ESID_V;
|
|
|
|
|
|
|
|
for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
|
|
|
|
const struct ppc_one_seg_page_size *sps1 = &env->sps.sps[i];
|
|
|
|
|
|
|
|
if (!sps1->page_shift) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((vsid & SLB_VSID_LLP_MASK) == sps1->slb_enc) {
|
|
|
|
sps = sps1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!sps) {
|
|
|
|
error_report("Bad page size encoding esid 0x"TARGET_FMT_lx
|
|
|
|
" vsid 0x"TARGET_FMT_lx, esid, vsid);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
slb->vsid = vsid;
|
|
|
|
slb->esid = esid;
|
|
|
|
slb->sps = sps;
|
|
|
|
}
|
|
|
|
|
2016-06-27 09:55:16 +03:00
|
|
|
void helper_store_lpcr(CPUPPCState *env, target_ulong val)
|
|
|
|
{
|
|
|
|
uint64_t lpcr = 0;
|
|
|
|
|
|
|
|
/* Filter out bits */
|
|
|
|
switch (env->mmu_model) {
|
|
|
|
case POWERPC_MMU_64B: /* 970 */
|
|
|
|
if (val & 0x40) {
|
|
|
|
lpcr |= LPCR_LPES0;
|
|
|
|
}
|
|
|
|
if (val & 0x8000000000000000ull) {
|
|
|
|
lpcr |= LPCR_LPES1;
|
|
|
|
}
|
|
|
|
if (val & 0x20) {
|
|
|
|
lpcr |= (0x4ull << LPCR_RMLS_SHIFT);
|
|
|
|
}
|
|
|
|
if (val & 0x4000000000000000ull) {
|
|
|
|
lpcr |= (0x2ull << LPCR_RMLS_SHIFT);
|
|
|
|
}
|
|
|
|
if (val & 0x2000000000000000ull) {
|
|
|
|
lpcr |= (0x1ull << LPCR_RMLS_SHIFT);
|
|
|
|
}
|
|
|
|
env->spr[SPR_RMOR] = ((lpcr >> 41) & 0xffffull) << 26;
|
|
|
|
|
|
|
|
/* XXX We could also write LPID from HID4 here
|
|
|
|
* but since we don't tag any translation on it
|
|
|
|
* it doesn't actually matter
|
|
|
|
*/
|
|
|
|
/* XXX For proper emulation of 970 we also need
|
|
|
|
* to dig HRMOR out of HID5
|
|
|
|
*/
|
|
|
|
break;
|
|
|
|
case POWERPC_MMU_2_03: /* P5p */
|
|
|
|
lpcr = val & (LPCR_RMLS | LPCR_ILE |
|
|
|
|
LPCR_LPES0 | LPCR_LPES1 |
|
|
|
|
LPCR_RMI | LPCR_HDICE);
|
|
|
|
break;
|
|
|
|
case POWERPC_MMU_2_06: /* P7 */
|
|
|
|
lpcr = val & (LPCR_VPM0 | LPCR_VPM1 | LPCR_ISL | LPCR_DPFD |
|
|
|
|
LPCR_VRMASD | LPCR_RMLS | LPCR_ILE |
|
|
|
|
LPCR_P7_PECE0 | LPCR_P7_PECE1 | LPCR_P7_PECE2 |
|
|
|
|
LPCR_MER | LPCR_TC |
|
|
|
|
LPCR_LPES0 | LPCR_LPES1 | LPCR_HDICE);
|
|
|
|
break;
|
|
|
|
case POWERPC_MMU_2_07: /* P8 */
|
|
|
|
lpcr = val & (LPCR_VPM0 | LPCR_VPM1 | LPCR_ISL | LPCR_KBV |
|
|
|
|
LPCR_DPFD | LPCR_VRMASD | LPCR_RMLS | LPCR_ILE |
|
|
|
|
LPCR_AIL | LPCR_ONL | LPCR_P8_PECE0 | LPCR_P8_PECE1 |
|
|
|
|
LPCR_P8_PECE2 | LPCR_P8_PECE3 | LPCR_P8_PECE4 |
|
|
|
|
LPCR_MER | LPCR_TC | LPCR_LPES0 | LPCR_HDICE);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
;
|
|
|
|
}
|
|
|
|
env->spr[SPR_LPCR] = lpcr;
|
2016-07-05 00:37:08 +03:00
|
|
|
ppc_hash64_update_rmls(env);
|
|
|
|
ppc_hash64_update_vrma(env);
|
2016-06-27 09:55:16 +03:00
|
|
|
}
|