diff --git a/target/ppc/mmu-book3s-v3.h b/target/ppc/mmu-book3s-v3.h index a1326df969..c89d0bccfd 100644 --- a/target/ppc/mmu-book3s-v3.h +++ b/target/ppc/mmu-book3s-v3.h @@ -47,6 +47,19 @@ struct prtb_entry { uint64_t prtbe0, prtbe1; }; +/* + * These correspond to the mmu_idx values computed in + * hreg_compute_hflags_value. See the tables therein + * + * They are here because some bits are inverted for BookE MMUs + * not necessarily because they only work for BookS. However, + * we only needed to change BookS MMUs, we left the functions + * here to avoid other possible bugs for untested MMUs + */ +static inline bool mmuidx_pr(int idx) { return !(idx & 1); } +static inline bool mmuidx_real(int idx) { return idx & 2; } +static inline bool mmuidx_hv(int idx) { return idx & 4; } + #ifdef TARGET_PPC64 static inline bool ppc64_use_proc_tbl(PowerPCCPU *cpu) diff --git a/target/ppc/mmu-radix64.c b/target/ppc/mmu-radix64.c index cbd404bfa4..5b0e62e676 100644 --- a/target/ppc/mmu-radix64.c +++ b/target/ppc/mmu-radix64.c @@ -155,7 +155,7 @@ static void ppc_radix64_raise_hsi(PowerPCCPU *cpu, MMUAccessType access_type, static bool ppc_radix64_check_prot(PowerPCCPU *cpu, MMUAccessType access_type, uint64_t pte, int *fault_cause, int *prot, - bool partition_scoped) + int mmu_idx, bool partition_scoped) { CPUPPCState *env = &cpu->env; int need_prot; @@ -173,7 +173,8 @@ static bool ppc_radix64_check_prot(PowerPCCPU *cpu, MMUAccessType access_type, /* Determine permissions allowed by Encoded Access Authority */ if (!partition_scoped && (pte & R_PTE_EAA_PRIV) && msr_pr) { *prot = 0; - } else if (msr_pr || (pte & R_PTE_EAA_PRIV) || partition_scoped) { + } else if (mmuidx_pr(mmu_idx) || (pte & R_PTE_EAA_PRIV) || + partition_scoped) { *prot = ppc_radix64_get_prot_eaa(pte); } else { /* !msr_pr && !(pte & R_PTE_EAA_PRIV) && !partition_scoped */ *prot = ppc_radix64_get_prot_eaa(pte); @@ -299,7 +300,7 @@ static int ppc_radix64_partition_scoped_xlate(PowerPCCPU *cpu, ppc_v3_pate_t pate, hwaddr *h_raddr, int *h_prot, int *h_page_size, bool pde_addr, - bool guest_visible) + int mmu_idx, bool guest_visible) { int fault_cause = 0; hwaddr pte_addr; @@ -310,7 +311,8 @@ static int ppc_radix64_partition_scoped_xlate(PowerPCCPU *cpu, if (ppc_radix64_walk_tree(CPU(cpu)->as, g_raddr, pate.dw0 & PRTBE_R_RPDB, pate.dw0 & PRTBE_R_RPDS, h_raddr, h_page_size, &pte, &fault_cause, &pte_addr) || - ppc_radix64_check_prot(cpu, access_type, pte, &fault_cause, h_prot, true)) { + ppc_radix64_check_prot(cpu, access_type, pte, + &fault_cause, h_prot, mmu_idx, true)) { if (pde_addr) { /* address being translated was that of a guest pde */ fault_cause |= DSISR_PRTABLE_FAULT; } @@ -332,7 +334,7 @@ static int ppc_radix64_process_scoped_xlate(PowerPCCPU *cpu, vaddr eaddr, uint64_t pid, ppc_v3_pate_t pate, hwaddr *g_raddr, int *g_prot, int *g_page_size, - bool guest_visible) + int mmu_idx, bool guest_visible) { CPUState *cs = CPU(cpu); CPUPPCState *env = &cpu->env; @@ -367,7 +369,8 @@ static int ppc_radix64_process_scoped_xlate(PowerPCCPU *cpu, ret = ppc_radix64_partition_scoped_xlate(cpu, 0, eaddr, prtbe_addr, pate, &h_raddr, &h_prot, &h_page_size, true, - guest_visible); + /* mmu_idx is 5 because we're translating from hypervisor scope */ + 5, guest_visible); if (ret) { return ret; } @@ -407,7 +410,8 @@ static int ppc_radix64_process_scoped_xlate(PowerPCCPU *cpu, ret = ppc_radix64_partition_scoped_xlate(cpu, 0, eaddr, pte_addr, pate, &h_raddr, &h_prot, &h_page_size, true, - guest_visible); + /* mmu_idx is 5 because we're translating from hypervisor scope */ + 5, guest_visible); if (ret) { return ret; } @@ -431,7 +435,8 @@ static int ppc_radix64_process_scoped_xlate(PowerPCCPU *cpu, *g_raddr = (rpn & ~mask) | (eaddr & mask); } - if (ppc_radix64_check_prot(cpu, access_type, pte, &fault_cause, g_prot, false)) { + if (ppc_radix64_check_prot(cpu, access_type, pte, &fault_cause, + g_prot, mmu_idx, false)) { /* Access denied due to protection */ if (guest_visible) { ppc_radix64_raise_si(cpu, access_type, eaddr, fault_cause); @@ -464,7 +469,7 @@ static int ppc_radix64_process_scoped_xlate(PowerPCCPU *cpu, * +-------------+----------------+---------------+ */ bool ppc_radix64_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type, - hwaddr *raddr, int *psizep, int *protp, + hwaddr *raddr, int *psizep, int *protp, int mmu_idx, bool guest_visible) { CPUPPCState *env = &cpu->env; @@ -474,17 +479,17 @@ bool ppc_radix64_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type, hwaddr g_raddr; bool relocation; - assert(!(msr_hv && cpu->vhyp)); + assert(!(mmuidx_hv(mmu_idx) && cpu->vhyp)); - relocation = (access_type == MMU_INST_FETCH ? msr_ir : msr_dr); + relocation = !mmuidx_real(mmu_idx); /* HV or virtual hypervisor Real Mode Access */ - if (!relocation && (msr_hv || cpu->vhyp)) { + if (!relocation && (mmuidx_hv(mmu_idx) || cpu->vhyp)) { /* In real mode top 4 effective addr bits (mostly) ignored */ *raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL; /* In HV mode, add HRMOR if top EA bit is clear */ - if (msr_hv || !env->has_hv_mode) { + if (mmuidx_hv(mmu_idx) || !env->has_hv_mode) { if (!(eaddr >> 63)) { *raddr |= env->spr[SPR_HRMOR]; } @@ -546,7 +551,7 @@ bool ppc_radix64_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type, if (relocation) { int ret = ppc_radix64_process_scoped_xlate(cpu, access_type, eaddr, pid, pate, &g_raddr, &prot, - &psize, guest_visible); + &psize, mmu_idx, guest_visible); if (ret) { return false; } @@ -564,13 +569,13 @@ bool ppc_radix64_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type, * quadrants 1 or 2. Translates a guest real address to a host * real address. */ - if (lpid || !msr_hv) { + if (lpid || !mmuidx_hv(mmu_idx)) { int ret; ret = ppc_radix64_partition_scoped_xlate(cpu, access_type, eaddr, g_raddr, pate, raddr, &prot, &psize, false, - guest_visible); + mmu_idx, guest_visible); if (ret) { return false; } diff --git a/target/ppc/mmu-radix64.h b/target/ppc/mmu-radix64.h index 6b13b89b64..b70357cf34 100644 --- a/target/ppc/mmu-radix64.h +++ b/target/ppc/mmu-radix64.h @@ -45,7 +45,7 @@ #ifdef TARGET_PPC64 bool ppc_radix64_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type, - hwaddr *raddr, int *psizep, int *protp, + hwaddr *raddr, int *psizep, int *protp, int mmu_idx, bool guest_visible); static inline int ppc_radix64_get_prot_eaa(uint64_t pte) diff --git a/target/ppc/mmu_helper.c b/target/ppc/mmu_helper.c index 945ac41d42..46b4afce54 100644 --- a/target/ppc/mmu_helper.c +++ b/target/ppc/mmu_helper.c @@ -2900,7 +2900,7 @@ static bool ppc_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type, case POWERPC_MMU_3_00: if (ppc64_v3_radix(cpu)) { return ppc_radix64_xlate(cpu, eaddr, access_type, - raddrp, psizep, protp, guest_visible); + raddrp, psizep, protp, mmu_idx, guest_visible); } /* fall through */ case POWERPC_MMU_64B: @@ -2933,8 +2933,10 @@ hwaddr ppc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) * try an MMU_DATA_LOAD, we may not be able to read instructions * mapped by code TLBs, so we also try a MMU_INST_FETCH. */ - if (ppc_xlate(cpu, addr, MMU_DATA_LOAD, &raddr, &s, &p, 0, false) || - ppc_xlate(cpu, addr, MMU_INST_FETCH, &raddr, &s, &p, 0, false)) { + if (ppc_xlate(cpu, addr, MMU_DATA_LOAD, &raddr, &s, &p, + cpu_mmu_index(&cpu->env, false), false) || + ppc_xlate(cpu, addr, MMU_INST_FETCH, &raddr, &s, &p, + cpu_mmu_index(&cpu->env, true), false)) { return raddr & TARGET_PAGE_MASK; } return -1;