target/arm: Reorganize ARMMMUIdx
Prepare for, but do not yet implement, the EL2&0 regime. This involves adding the new MMUIdx enumerators and adjusting some of the MMUIdx related predicates to match. Tested-by: Alex Bennée <alex.bennee@linaro.org> Reviewed-by: Alex Bennée <alex.bennee@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org> Message-id: 20200206105448.4726-20-richard.henderson@linaro.org Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
parent
25568316b2
commit
b9f6033c1a
@ -29,6 +29,6 @@
|
|||||||
# define TARGET_PAGE_BITS_MIN 10
|
# define TARGET_PAGE_BITS_MIN 10
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define NB_MMU_MODES 8
|
#define NB_MMU_MODES 9
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
134
target/arm/cpu.h
134
target/arm/cpu.h
@ -2819,18 +2819,21 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
|
|||||||
* + NonSecure EL1 & 0 stage 1
|
* + NonSecure EL1 & 0 stage 1
|
||||||
* + NonSecure EL1 & 0 stage 2
|
* + NonSecure EL1 & 0 stage 2
|
||||||
* + NonSecure EL2
|
* + NonSecure EL2
|
||||||
* + Secure EL1 & EL0
|
* + NonSecure EL2 & 0 (ARMv8.1-VHE)
|
||||||
|
* + Secure EL1 & 0
|
||||||
* + Secure EL3
|
* + Secure EL3
|
||||||
* If EL3 is 32-bit:
|
* If EL3 is 32-bit:
|
||||||
* + NonSecure PL1 & 0 stage 1
|
* + NonSecure PL1 & 0 stage 1
|
||||||
* + NonSecure PL1 & 0 stage 2
|
* + NonSecure PL1 & 0 stage 2
|
||||||
* + NonSecure PL2
|
* + NonSecure PL2
|
||||||
* + Secure PL0 & PL1
|
* + Secure PL0
|
||||||
|
* + Secure PL1
|
||||||
* (reminder: for 32 bit EL3, Secure PL1 is *EL3*, not EL1.)
|
* (reminder: for 32 bit EL3, Secure PL1 is *EL3*, not EL1.)
|
||||||
*
|
*
|
||||||
* For QEMU, an mmu_idx is not quite the same as a translation regime because:
|
* For QEMU, an mmu_idx is not quite the same as a translation regime because:
|
||||||
* 1. we need to split the "EL1 & 0" regimes into two mmu_idxes, because they
|
* 1. we need to split the "EL1 & 0" and "EL2 & 0" regimes into two mmu_idxes,
|
||||||
* may differ in access permissions even if the VA->PA map is the same
|
* because they may differ in access permissions even if the VA->PA map is
|
||||||
|
* the same
|
||||||
* 2. we want to cache in our TLB the full VA->IPA->PA lookup for a stage 1+2
|
* 2. we want to cache in our TLB the full VA->IPA->PA lookup for a stage 1+2
|
||||||
* translation, which means that we have one mmu_idx that deals with two
|
* translation, which means that we have one mmu_idx that deals with two
|
||||||
* concatenated translation regimes [this sort of combined s1+2 TLB is
|
* concatenated translation regimes [this sort of combined s1+2 TLB is
|
||||||
@ -2842,19 +2845,23 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
|
|||||||
* 4. we can also safely fold together the "32 bit EL3" and "64 bit EL3"
|
* 4. we can also safely fold together the "32 bit EL3" and "64 bit EL3"
|
||||||
* translation regimes, because they map reasonably well to each other
|
* translation regimes, because they map reasonably well to each other
|
||||||
* and they can't both be active at the same time.
|
* and they can't both be active at the same time.
|
||||||
* This gives us the following list of mmu_idx values:
|
* 5. we want to be able to use the TLB for accesses done as part of a
|
||||||
|
* stage1 page table walk, rather than having to walk the stage2 page
|
||||||
|
* table over and over.
|
||||||
*
|
*
|
||||||
* NS EL0 (aka NS PL0) stage 1+2
|
* This gives us the following list of cases:
|
||||||
* NS EL1 (aka NS PL1) stage 1+2
|
*
|
||||||
|
* NS EL0 EL1&0 stage 1+2 (aka NS PL0)
|
||||||
|
* NS EL1 EL1&0 stage 1+2 (aka NS PL1)
|
||||||
|
* NS EL0 EL2&0
|
||||||
|
* NS EL2 EL2&0
|
||||||
* NS EL2 (aka NS PL2)
|
* NS EL2 (aka NS PL2)
|
||||||
|
* S EL0 EL1&0 (aka S PL0)
|
||||||
|
* S EL1 EL1&0 (not used if EL3 is 32 bit)
|
||||||
* S EL3 (aka S PL1)
|
* S EL3 (aka S PL1)
|
||||||
* S EL0 (aka S PL0)
|
* NS EL1&0 stage 2
|
||||||
* S EL1 (not used if EL3 is 32 bit)
|
|
||||||
* NS EL0+1 stage 2
|
|
||||||
*
|
*
|
||||||
* (The last of these is an mmu_idx because we want to be able to use the TLB
|
* for a total of 9 different mmu_idx.
|
||||||
* for the accesses done as part of a stage 1 page table walk, rather than
|
|
||||||
* having to walk the stage 2 page table over and over.)
|
|
||||||
*
|
*
|
||||||
* R profile CPUs have an MPU, but can use the same set of MMU indexes
|
* R profile CPUs have an MPU, but can use the same set of MMU indexes
|
||||||
* as A profile. They only need to distinguish NS EL0 and NS EL1 (and
|
* as A profile. They only need to distinguish NS EL0 and NS EL1 (and
|
||||||
@ -2892,26 +2899,47 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
|
|||||||
* For M profile we arrange them to have a bit for priv, a bit for negpri
|
* For M profile we arrange them to have a bit for priv, a bit for negpri
|
||||||
* and a bit for secure.
|
* and a bit for secure.
|
||||||
*/
|
*/
|
||||||
#define ARM_MMU_IDX_A 0x10 /* A profile */
|
#define ARM_MMU_IDX_A 0x10 /* A profile */
|
||||||
#define ARM_MMU_IDX_NOTLB 0x20 /* does not have a TLB */
|
#define ARM_MMU_IDX_NOTLB 0x20 /* does not have a TLB */
|
||||||
#define ARM_MMU_IDX_M 0x40 /* M profile */
|
#define ARM_MMU_IDX_M 0x40 /* M profile */
|
||||||
|
|
||||||
/* meanings of the bits for M profile mmu idx values */
|
/* Meanings of the bits for M profile mmu idx values */
|
||||||
#define ARM_MMU_IDX_M_PRIV 0x1
|
#define ARM_MMU_IDX_M_PRIV 0x1
|
||||||
#define ARM_MMU_IDX_M_NEGPRI 0x2
|
#define ARM_MMU_IDX_M_NEGPRI 0x2
|
||||||
#define ARM_MMU_IDX_M_S 0x4
|
#define ARM_MMU_IDX_M_S 0x4 /* Secure */
|
||||||
|
|
||||||
#define ARM_MMU_IDX_TYPE_MASK (~0x7)
|
#define ARM_MMU_IDX_TYPE_MASK \
|
||||||
#define ARM_MMU_IDX_COREIDX_MASK 0x7
|
(ARM_MMU_IDX_A | ARM_MMU_IDX_M | ARM_MMU_IDX_NOTLB)
|
||||||
|
#define ARM_MMU_IDX_COREIDX_MASK 0xf
|
||||||
|
|
||||||
typedef enum ARMMMUIdx {
|
typedef enum ARMMMUIdx {
|
||||||
ARMMMUIdx_E10_0 = 0 | ARM_MMU_IDX_A,
|
/*
|
||||||
ARMMMUIdx_E10_1 = 1 | ARM_MMU_IDX_A,
|
* A-profile.
|
||||||
ARMMMUIdx_E2 = 2 | ARM_MMU_IDX_A,
|
*/
|
||||||
ARMMMUIdx_SE3 = 3 | ARM_MMU_IDX_A,
|
ARMMMUIdx_E10_0 = 0 | ARM_MMU_IDX_A,
|
||||||
ARMMMUIdx_SE10_0 = 4 | ARM_MMU_IDX_A,
|
ARMMMUIdx_E20_0 = 1 | ARM_MMU_IDX_A,
|
||||||
ARMMMUIdx_SE10_1 = 5 | ARM_MMU_IDX_A,
|
|
||||||
ARMMMUIdx_Stage2 = 6 | ARM_MMU_IDX_A,
|
ARMMMUIdx_E10_1 = 2 | ARM_MMU_IDX_A,
|
||||||
|
|
||||||
|
ARMMMUIdx_E2 = 3 | ARM_MMU_IDX_A,
|
||||||
|
ARMMMUIdx_E20_2 = 4 | ARM_MMU_IDX_A,
|
||||||
|
|
||||||
|
ARMMMUIdx_SE10_0 = 5 | ARM_MMU_IDX_A,
|
||||||
|
ARMMMUIdx_SE10_1 = 6 | ARM_MMU_IDX_A,
|
||||||
|
ARMMMUIdx_SE3 = 7 | ARM_MMU_IDX_A,
|
||||||
|
|
||||||
|
ARMMMUIdx_Stage2 = 8 | ARM_MMU_IDX_A,
|
||||||
|
|
||||||
|
/*
|
||||||
|
* These are not allocated TLBs and are used only for AT system
|
||||||
|
* instructions or for the first stage of an S12 page table walk.
|
||||||
|
*/
|
||||||
|
ARMMMUIdx_Stage1_E0 = 0 | ARM_MMU_IDX_NOTLB,
|
||||||
|
ARMMMUIdx_Stage1_E1 = 1 | ARM_MMU_IDX_NOTLB,
|
||||||
|
|
||||||
|
/*
|
||||||
|
* M-profile.
|
||||||
|
*/
|
||||||
ARMMMUIdx_MUser = ARM_MMU_IDX_M,
|
ARMMMUIdx_MUser = ARM_MMU_IDX_M,
|
||||||
ARMMMUIdx_MPriv = ARM_MMU_IDX_M | ARM_MMU_IDX_M_PRIV,
|
ARMMMUIdx_MPriv = ARM_MMU_IDX_M | ARM_MMU_IDX_M_PRIV,
|
||||||
ARMMMUIdx_MUserNegPri = ARMMMUIdx_MUser | ARM_MMU_IDX_M_NEGPRI,
|
ARMMMUIdx_MUserNegPri = ARMMMUIdx_MUser | ARM_MMU_IDX_M_NEGPRI,
|
||||||
@ -2920,11 +2948,6 @@ typedef enum ARMMMUIdx {
|
|||||||
ARMMMUIdx_MSPriv = ARMMMUIdx_MPriv | ARM_MMU_IDX_M_S,
|
ARMMMUIdx_MSPriv = ARMMMUIdx_MPriv | ARM_MMU_IDX_M_S,
|
||||||
ARMMMUIdx_MSUserNegPri = ARMMMUIdx_MUserNegPri | ARM_MMU_IDX_M_S,
|
ARMMMUIdx_MSUserNegPri = ARMMMUIdx_MUserNegPri | ARM_MMU_IDX_M_S,
|
||||||
ARMMMUIdx_MSPrivNegPri = ARMMMUIdx_MPrivNegPri | ARM_MMU_IDX_M_S,
|
ARMMMUIdx_MSPrivNegPri = ARMMMUIdx_MPrivNegPri | ARM_MMU_IDX_M_S,
|
||||||
/* Indexes below here don't have TLBs and are used only for AT system
|
|
||||||
* instructions or for the first stage of an S12 page table walk.
|
|
||||||
*/
|
|
||||||
ARMMMUIdx_Stage1_E0 = 0 | ARM_MMU_IDX_NOTLB,
|
|
||||||
ARMMMUIdx_Stage1_E1 = 1 | ARM_MMU_IDX_NOTLB,
|
|
||||||
} ARMMMUIdx;
|
} ARMMMUIdx;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -2936,8 +2959,10 @@ typedef enum ARMMMUIdx {
|
|||||||
|
|
||||||
typedef enum ARMMMUIdxBit {
|
typedef enum ARMMMUIdxBit {
|
||||||
TO_CORE_BIT(E10_0),
|
TO_CORE_BIT(E10_0),
|
||||||
|
TO_CORE_BIT(E20_0),
|
||||||
TO_CORE_BIT(E10_1),
|
TO_CORE_BIT(E10_1),
|
||||||
TO_CORE_BIT(E2),
|
TO_CORE_BIT(E2),
|
||||||
|
TO_CORE_BIT(E20_2),
|
||||||
TO_CORE_BIT(SE10_0),
|
TO_CORE_BIT(SE10_0),
|
||||||
TO_CORE_BIT(SE10_1),
|
TO_CORE_BIT(SE10_1),
|
||||||
TO_CORE_BIT(SE3),
|
TO_CORE_BIT(SE3),
|
||||||
@ -2957,49 +2982,6 @@ typedef enum ARMMMUIdxBit {
|
|||||||
|
|
||||||
#define MMU_USER_IDX 0
|
#define MMU_USER_IDX 0
|
||||||
|
|
||||||
static inline int arm_to_core_mmu_idx(ARMMMUIdx mmu_idx)
|
|
||||||
{
|
|
||||||
return mmu_idx & ARM_MMU_IDX_COREIDX_MASK;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline ARMMMUIdx core_to_arm_mmu_idx(CPUARMState *env, int mmu_idx)
|
|
||||||
{
|
|
||||||
if (arm_feature(env, ARM_FEATURE_M)) {
|
|
||||||
return mmu_idx | ARM_MMU_IDX_M;
|
|
||||||
} else {
|
|
||||||
return mmu_idx | ARM_MMU_IDX_A;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Return the exception level we're running at if this is our mmu_idx */
|
|
||||||
static inline int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx)
|
|
||||||
{
|
|
||||||
switch (mmu_idx & ARM_MMU_IDX_TYPE_MASK) {
|
|
||||||
case ARM_MMU_IDX_A:
|
|
||||||
return mmu_idx & 3;
|
|
||||||
case ARM_MMU_IDX_M:
|
|
||||||
return mmu_idx & ARM_MMU_IDX_M_PRIV;
|
|
||||||
default:
|
|
||||||
g_assert_not_reached();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Return the MMU index for a v7M CPU with all relevant information
|
|
||||||
* manually specified.
|
|
||||||
*/
|
|
||||||
ARMMMUIdx arm_v7m_mmu_idx_all(CPUARMState *env,
|
|
||||||
bool secstate, bool priv, bool negpri);
|
|
||||||
|
|
||||||
/* Return the MMU index for a v7M CPU in the specified security and
|
|
||||||
* privilege state.
|
|
||||||
*/
|
|
||||||
ARMMMUIdx arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState *env,
|
|
||||||
bool secstate, bool priv);
|
|
||||||
|
|
||||||
/* Return the MMU index for a v7M CPU in the specified security state */
|
|
||||||
ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* cpu_mmu_index:
|
* cpu_mmu_index:
|
||||||
* @env: The cpu environment
|
* @env: The cpu environment
|
||||||
|
@ -8707,9 +8707,11 @@ void arm_cpu_do_interrupt(CPUState *cs)
|
|||||||
#endif /* !CONFIG_USER_ONLY */
|
#endif /* !CONFIG_USER_ONLY */
|
||||||
|
|
||||||
/* Return the exception level which controls this address translation regime */
|
/* Return the exception level which controls this address translation regime */
|
||||||
static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
|
static uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
|
||||||
{
|
{
|
||||||
switch (mmu_idx) {
|
switch (mmu_idx) {
|
||||||
|
case ARMMMUIdx_E20_0:
|
||||||
|
case ARMMMUIdx_E20_2:
|
||||||
case ARMMMUIdx_Stage2:
|
case ARMMMUIdx_Stage2:
|
||||||
case ARMMMUIdx_E2:
|
case ARMMMUIdx_E2:
|
||||||
return 2;
|
return 2;
|
||||||
@ -8720,6 +8722,8 @@ static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
|
|||||||
case ARMMMUIdx_SE10_1:
|
case ARMMMUIdx_SE10_1:
|
||||||
case ARMMMUIdx_Stage1_E0:
|
case ARMMMUIdx_Stage1_E0:
|
||||||
case ARMMMUIdx_Stage1_E1:
|
case ARMMMUIdx_Stage1_E1:
|
||||||
|
case ARMMMUIdx_E10_0:
|
||||||
|
case ARMMMUIdx_E10_1:
|
||||||
case ARMMMUIdx_MPrivNegPri:
|
case ARMMMUIdx_MPrivNegPri:
|
||||||
case ARMMMUIdx_MUserNegPri:
|
case ARMMMUIdx_MUserNegPri:
|
||||||
case ARMMMUIdx_MPriv:
|
case ARMMMUIdx_MPriv:
|
||||||
@ -8821,10 +8825,14 @@ static inline TCR *regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx)
|
|||||||
*/
|
*/
|
||||||
static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx)
|
static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx)
|
||||||
{
|
{
|
||||||
if (mmu_idx == ARMMMUIdx_E10_0 || mmu_idx == ARMMMUIdx_E10_1) {
|
switch (mmu_idx) {
|
||||||
mmu_idx += (ARMMMUIdx_Stage1_E0 - ARMMMUIdx_E10_0);
|
case ARMMMUIdx_E10_0:
|
||||||
|
return ARMMMUIdx_Stage1_E0;
|
||||||
|
case ARMMMUIdx_E10_1:
|
||||||
|
return ARMMMUIdx_Stage1_E1;
|
||||||
|
default:
|
||||||
|
return mmu_idx;
|
||||||
}
|
}
|
||||||
return mmu_idx;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Return true if the translation regime is using LPAE format page tables */
|
/* Return true if the translation regime is using LPAE format page tables */
|
||||||
@ -8857,6 +8865,7 @@ static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx)
|
|||||||
{
|
{
|
||||||
switch (mmu_idx) {
|
switch (mmu_idx) {
|
||||||
case ARMMMUIdx_SE10_0:
|
case ARMMMUIdx_SE10_0:
|
||||||
|
case ARMMMUIdx_E20_0:
|
||||||
case ARMMMUIdx_Stage1_E0:
|
case ARMMMUIdx_Stage1_E0:
|
||||||
case ARMMMUIdx_MUser:
|
case ARMMMUIdx_MUser:
|
||||||
case ARMMMUIdx_MSUser:
|
case ARMMMUIdx_MSUser:
|
||||||
@ -11282,6 +11291,31 @@ int fp_exception_el(CPUARMState *env, int cur_el)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Return the exception level we're running at if this is our mmu_idx */
|
||||||
|
int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx)
|
||||||
|
{
|
||||||
|
if (mmu_idx & ARM_MMU_IDX_M) {
|
||||||
|
return mmu_idx & ARM_MMU_IDX_M_PRIV;
|
||||||
|
}
|
||||||
|
|
||||||
|
switch (mmu_idx) {
|
||||||
|
case ARMMMUIdx_E10_0:
|
||||||
|
case ARMMMUIdx_E20_0:
|
||||||
|
case ARMMMUIdx_SE10_0:
|
||||||
|
return 0;
|
||||||
|
case ARMMMUIdx_E10_1:
|
||||||
|
case ARMMMUIdx_SE10_1:
|
||||||
|
return 1;
|
||||||
|
case ARMMMUIdx_E2:
|
||||||
|
case ARMMMUIdx_E20_2:
|
||||||
|
return 2;
|
||||||
|
case ARMMMUIdx_SE3:
|
||||||
|
return 3;
|
||||||
|
default:
|
||||||
|
g_assert_not_reached();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#ifndef CONFIG_TCG
|
#ifndef CONFIG_TCG
|
||||||
ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate)
|
ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate)
|
||||||
{
|
{
|
||||||
@ -11295,10 +11329,26 @@ ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el)
|
|||||||
return arm_v7m_mmu_idx_for_secstate(env, env->v7m.secure);
|
return arm_v7m_mmu_idx_for_secstate(env, env->v7m.secure);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (el < 2 && arm_is_secure_below_el3(env)) {
|
switch (el) {
|
||||||
return ARMMMUIdx_SE10_0 + el;
|
case 0:
|
||||||
} else {
|
/* TODO: ARMv8.1-VHE */
|
||||||
return ARMMMUIdx_E10_0 + el;
|
if (arm_is_secure_below_el3(env)) {
|
||||||
|
return ARMMMUIdx_SE10_0;
|
||||||
|
}
|
||||||
|
return ARMMMUIdx_E10_0;
|
||||||
|
case 1:
|
||||||
|
if (arm_is_secure_below_el3(env)) {
|
||||||
|
return ARMMMUIdx_SE10_1;
|
||||||
|
}
|
||||||
|
return ARMMMUIdx_E10_1;
|
||||||
|
case 2:
|
||||||
|
/* TODO: ARMv8.1-VHE */
|
||||||
|
/* TODO: ARMv8.4-SecEL2 */
|
||||||
|
return ARMMMUIdx_E2;
|
||||||
|
case 3:
|
||||||
|
return ARMMMUIdx_SE3;
|
||||||
|
default:
|
||||||
|
g_assert_not_reached();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -769,6 +769,39 @@ bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
|
|||||||
MMUAccessType access_type, int mmu_idx,
|
MMUAccessType access_type, int mmu_idx,
|
||||||
bool probe, uintptr_t retaddr);
|
bool probe, uintptr_t retaddr);
|
||||||
|
|
||||||
|
static inline int arm_to_core_mmu_idx(ARMMMUIdx mmu_idx)
|
||||||
|
{
|
||||||
|
return mmu_idx & ARM_MMU_IDX_COREIDX_MASK;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline ARMMMUIdx core_to_arm_mmu_idx(CPUARMState *env, int mmu_idx)
|
||||||
|
{
|
||||||
|
if (arm_feature(env, ARM_FEATURE_M)) {
|
||||||
|
return mmu_idx | ARM_MMU_IDX_M;
|
||||||
|
} else {
|
||||||
|
return mmu_idx | ARM_MMU_IDX_A;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Return the MMU index for a v7M CPU with all relevant information
|
||||||
|
* manually specified.
|
||||||
|
*/
|
||||||
|
ARMMMUIdx arm_v7m_mmu_idx_all(CPUARMState *env,
|
||||||
|
bool secstate, bool priv, bool negpri);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Return the MMU index for a v7M CPU in the specified security and
|
||||||
|
* privilege state.
|
||||||
|
*/
|
||||||
|
ARMMMUIdx arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState *env,
|
||||||
|
bool secstate, bool priv);
|
||||||
|
|
||||||
|
/* Return the MMU index for a v7M CPU in the specified security state */
|
||||||
|
ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate);
|
||||||
|
|
||||||
/* Return true if the stage 1 translation regime is using LPAE format page
|
/* Return true if the stage 1 translation regime is using LPAE format page
|
||||||
* tables */
|
* tables */
|
||||||
bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx);
|
bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx);
|
||||||
@ -810,6 +843,8 @@ static inline bool regime_is_secure(CPUARMState *env, ARMMMUIdx mmu_idx)
|
|||||||
switch (mmu_idx) {
|
switch (mmu_idx) {
|
||||||
case ARMMMUIdx_E10_0:
|
case ARMMMUIdx_E10_0:
|
||||||
case ARMMMUIdx_E10_1:
|
case ARMMMUIdx_E10_1:
|
||||||
|
case ARMMMUIdx_E20_0:
|
||||||
|
case ARMMMUIdx_E20_2:
|
||||||
case ARMMMUIdx_Stage1_E0:
|
case ARMMMUIdx_Stage1_E0:
|
||||||
case ARMMMUIdx_Stage1_E1:
|
case ARMMMUIdx_Stage1_E1:
|
||||||
case ARMMMUIdx_E2:
|
case ARMMMUIdx_E2:
|
||||||
|
@ -172,7 +172,6 @@ static inline int get_a32_user_mem_index(DisasContext *s)
|
|||||||
case ARMMMUIdx_MSUserNegPri:
|
case ARMMMUIdx_MSUserNegPri:
|
||||||
case ARMMMUIdx_MSPrivNegPri:
|
case ARMMMUIdx_MSPrivNegPri:
|
||||||
return arm_to_core_mmu_idx(ARMMMUIdx_MSUserNegPri);
|
return arm_to_core_mmu_idx(ARMMMUIdx_MSUserNegPri);
|
||||||
case ARMMMUIdx_Stage2:
|
|
||||||
default:
|
default:
|
||||||
g_assert_not_reached();
|
g_assert_not_reached();
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user