target-arm queue:
* Retry KVM_CREATE_VM call if it fails EINTR * allow setting SCR_EL3.EnTP2 when FEAT_SME is implemented * docs/nuvoton: Update URL for images * refactoring of page table walk code * hw/arm/boot: set CPTR_EL3.ESM and SCR_EL3.EnTP2 when booting Linux with EL3 * Don't allow guest to use unimplemented granule sizes * Report FEAT_GTG support -----BEGIN PGP SIGNATURE----- iQJNBAABCAA3FiEE4aXFk81BneKOgxXPPCUl7RQ2DN4FAmNEK54ZHHBldGVyLm1h eWRlbGxAbGluYXJvLm9yZwAKCRA8JSXtFDYM3kzHD/9StYmulAf0iwe1ZNp6NavK CioOgZi6XyZl4rS2DrCf6/IO5XRFJP68byZd4Po554r2jcPc149yTuQAn4wb7d5e kejMZRQeWsXdxschhoVzDp9fgfzyZBn9X+gbdEZFFPWzOHMyWuu4cTok0dAKQvQY tZDLGmKeTv4MRUFJCri0310Sq0T0v/nAX/AyFtpvIr2SBx7DVCWYY02s5R4Yy5+M ntDWb0j12r78/bPwI1ll+g19JXUV5Tfh9AsbcYjKv45kdftz/Xc8fBiSiEpxyMrF mnVrr3kesZHOYAnOr2K1MnwsF0vU41kRg7kMRqSnu7pZXlI/8tmRyXoPR3c2aDbW Q5HWtsA48j2h0CJ0ESzl5SQnl3TSPa94m/HmpRSBFrYkU727QgnWDhUmBb4n54xs 9iBJDhcKGZLq68CB2+j6ENdRNTndolr14OwwEns0lbkoiCKUOQY3AigtZJQGRBGM J5r3ED7jfTWpvP6vpp5X484fK6KVprSMxsRFDkmiwhbb3J+WtKLxbSlgsWIrkZ7s +JgTGfGB8sD9hJVuFZYyPQb/XWP8Bb8jfgsLsTu1vW9Xs1ASrLimFYdRO3hhwSg3 c5yubz6Vu9GB/JYh7hGprlMD5Yv48AA3if70hOu2d4P8A4OitavT7o+4Thwqjhds cSV1RsBJ8ha6L3CziZaKrQ== =s+1f -----END PGP SIGNATURE----- Merge tag 'pull-target-arm-20221010' of https://git.linaro.org/people/pmaydell/qemu-arm into staging target-arm queue: * Retry KVM_CREATE_VM call if it fails EINTR * allow setting SCR_EL3.EnTP2 when FEAT_SME is implemented * docs/nuvoton: Update URL for images * refactoring of page table walk code * hw/arm/boot: set CPTR_EL3.ESM and SCR_EL3.EnTP2 when booting Linux with EL3 * Don't allow guest to use unimplemented granule sizes * Report FEAT_GTG support # -----BEGIN PGP SIGNATURE----- # # iQJNBAABCAA3FiEE4aXFk81BneKOgxXPPCUl7RQ2DN4FAmNEK54ZHHBldGVyLm1h # eWRlbGxAbGluYXJvLm9yZwAKCRA8JSXtFDYM3kzHD/9StYmulAf0iwe1ZNp6NavK # CioOgZi6XyZl4rS2DrCf6/IO5XRFJP68byZd4Po554r2jcPc149yTuQAn4wb7d5e # kejMZRQeWsXdxschhoVzDp9fgfzyZBn9X+gbdEZFFPWzOHMyWuu4cTok0dAKQvQY # tZDLGmKeTv4MRUFJCri0310Sq0T0v/nAX/AyFtpvIr2SBx7DVCWYY02s5R4Yy5+M # ntDWb0j12r78/bPwI1ll+g19JXUV5Tfh9AsbcYjKv45kdftz/Xc8fBiSiEpxyMrF # mnVrr3kesZHOYAnOr2K1MnwsF0vU41kRg7kMRqSnu7pZXlI/8tmRyXoPR3c2aDbW # Q5HWtsA48j2h0CJ0ESzl5SQnl3TSPa94m/HmpRSBFrYkU727QgnWDhUmBb4n54xs # 9iBJDhcKGZLq68CB2+j6ENdRNTndolr14OwwEns0lbkoiCKUOQY3AigtZJQGRBGM # J5r3ED7jfTWpvP6vpp5X484fK6KVprSMxsRFDkmiwhbb3J+WtKLxbSlgsWIrkZ7s # +JgTGfGB8sD9hJVuFZYyPQb/XWP8Bb8jfgsLsTu1vW9Xs1ASrLimFYdRO3hhwSg3 # c5yubz6Vu9GB/JYh7hGprlMD5Yv48AA3if70hOu2d4P8A4OitavT7o+4Thwqjhds # cSV1RsBJ8ha6L3CziZaKrQ== # =s+1f # -----END PGP SIGNATURE----- # gpg: Signature made Mon 10 Oct 2022 10:26:38 EDT # gpg: using RSA key E1A5C593CD419DE28E8315CF3C2525ED14360CDE # gpg: issuer "peter.maydell@linaro.org" # gpg: Good signature from "Peter Maydell <peter.maydell@linaro.org>" [full] # gpg: aka "Peter Maydell <pmaydell@gmail.com>" [full] # gpg: aka "Peter Maydell <pmaydell@chiark.greenend.org.uk>" [full] # gpg: aka "Peter Maydell <peter@archaic.org.uk>" [unknown] # Primary key fingerprint: E1A5 C593 CD41 9DE2 8E83 15CF 3C25 25ED 1436 0CDE * tag 'pull-target-arm-20221010' of https://git.linaro.org/people/pmaydell/qemu-arm: (28 commits) docs/system/arm/emulation.rst: Report FEAT_GTG support target/arm: Use ARMGranuleSize in ARMVAParameters target/arm: Don't allow guest to use unimplemented granule sizes hw/arm/boot: set CPTR_EL3.ESM and SCR_EL3.EnTP2 when booting Linux with EL3 target/arm: Use tlb_set_page_full target/arm: Fix cacheattr in get_phys_addr_disabled target/arm: Split out get_phys_addr_disabled target/arm: Fix ATS12NSO* from S PL1 target/arm: Pass HCR to attribute subroutines. target/arm: Remove env argument from combined_attrs_fwb target/arm: Hoist read of *is_secure in S1_ptw_translate target/arm: Introduce arm_hcr_el2_eff_secstate target/arm: Drop secure check for HCR.TGE vs SCTLR_EL1.M target/arm: Reorg regime_translation_disabled target/arm: Fold secure and non-secure a-profile mmu indexes target/arm: Add is_secure parameter to do_ats_write target/arm: Merge regime_is_secure into get_phys_addr target/arm: Add TBFLAG_M32.SECURE target/arm: Add is_secure parameter to v7m_read_half_insn target/arm: Split out get_phys_addr_with_secure ... Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
commit
cdda364e1d
@ -31,6 +31,7 @@ the following architecture extensions:
|
||||
- FEAT_FRINTTS (Floating-point to integer instructions)
|
||||
- FEAT_FlagM (Flag manipulation instructions v2)
|
||||
- FEAT_FlagM2 (Enhancements to flag manipulation instructions)
|
||||
- FEAT_GTG (Guest translation granule size)
|
||||
- FEAT_HCX (Support for the HCRX_EL2 register)
|
||||
- FEAT_HPDS (Hierarchical permission disables)
|
||||
- FEAT_I8MM (AArch64 Int8 matrix multiplication instructions)
|
||||
|
@ -82,9 +82,9 @@ Boot options
|
||||
|
||||
The Nuvoton machines can boot from an OpenBMC firmware image, or directly into
|
||||
a kernel using the ``-kernel`` option. OpenBMC images for ``quanta-gsj`` and
|
||||
possibly others can be downloaded from the OpenPOWER jenkins :
|
||||
possibly others can be downloaded from the OpenBMC jenkins :
|
||||
|
||||
https://openpower.xyz/
|
||||
https://jenkins.openbmc.org/
|
||||
|
||||
The firmware image should be attached as an MTD drive. Example :
|
||||
|
||||
|
@ -763,6 +763,10 @@ static void do_cpu_reset(void *opaque)
|
||||
if (cpu_isar_feature(aa64_sve, cpu)) {
|
||||
env->cp15.cptr_el[3] |= R_CPTR_EL3_EZ_MASK;
|
||||
}
|
||||
if (cpu_isar_feature(aa64_sme, cpu)) {
|
||||
env->cp15.cptr_el[3] |= R_CPTR_EL3_ESM_MASK;
|
||||
env->cp15.scr_el3 |= SCR_ENTP2;
|
||||
}
|
||||
/* AArch64 kernels never boot in secure mode */
|
||||
assert(!info->secure_boot);
|
||||
/* This hook is only supported for AArch32 currently:
|
||||
|
@ -32,6 +32,6 @@
|
||||
# define TARGET_PAGE_BITS_MIN 10
|
||||
#endif
|
||||
|
||||
#define NB_MMU_MODES 15
|
||||
#define NB_MMU_MODES 8
|
||||
|
||||
#endif
|
||||
|
181
target/arm/cpu.h
181
target/arm/cpu.h
@ -1664,33 +1664,33 @@ static inline void xpsr_write(CPUARMState *env, uint32_t val, uint32_t mask)
|
||||
|
||||
#define HPFAR_NS (1ULL << 63)
|
||||
|
||||
#define SCR_NS (1U << 0)
|
||||
#define SCR_IRQ (1U << 1)
|
||||
#define SCR_FIQ (1U << 2)
|
||||
#define SCR_EA (1U << 3)
|
||||
#define SCR_FW (1U << 4)
|
||||
#define SCR_AW (1U << 5)
|
||||
#define SCR_NET (1U << 6)
|
||||
#define SCR_SMD (1U << 7)
|
||||
#define SCR_HCE (1U << 8)
|
||||
#define SCR_SIF (1U << 9)
|
||||
#define SCR_RW (1U << 10)
|
||||
#define SCR_ST (1U << 11)
|
||||
#define SCR_TWI (1U << 12)
|
||||
#define SCR_TWE (1U << 13)
|
||||
#define SCR_TLOR (1U << 14)
|
||||
#define SCR_TERR (1U << 15)
|
||||
#define SCR_APK (1U << 16)
|
||||
#define SCR_API (1U << 17)
|
||||
#define SCR_EEL2 (1U << 18)
|
||||
#define SCR_EASE (1U << 19)
|
||||
#define SCR_NMEA (1U << 20)
|
||||
#define SCR_FIEN (1U << 21)
|
||||
#define SCR_ENSCXT (1U << 25)
|
||||
#define SCR_ATA (1U << 26)
|
||||
#define SCR_FGTEN (1U << 27)
|
||||
#define SCR_ECVEN (1U << 28)
|
||||
#define SCR_TWEDEN (1U << 29)
|
||||
#define SCR_NS (1ULL << 0)
|
||||
#define SCR_IRQ (1ULL << 1)
|
||||
#define SCR_FIQ (1ULL << 2)
|
||||
#define SCR_EA (1ULL << 3)
|
||||
#define SCR_FW (1ULL << 4)
|
||||
#define SCR_AW (1ULL << 5)
|
||||
#define SCR_NET (1ULL << 6)
|
||||
#define SCR_SMD (1ULL << 7)
|
||||
#define SCR_HCE (1ULL << 8)
|
||||
#define SCR_SIF (1ULL << 9)
|
||||
#define SCR_RW (1ULL << 10)
|
||||
#define SCR_ST (1ULL << 11)
|
||||
#define SCR_TWI (1ULL << 12)
|
||||
#define SCR_TWE (1ULL << 13)
|
||||
#define SCR_TLOR (1ULL << 14)
|
||||
#define SCR_TERR (1ULL << 15)
|
||||
#define SCR_APK (1ULL << 16)
|
||||
#define SCR_API (1ULL << 17)
|
||||
#define SCR_EEL2 (1ULL << 18)
|
||||
#define SCR_EASE (1ULL << 19)
|
||||
#define SCR_NMEA (1ULL << 20)
|
||||
#define SCR_FIEN (1ULL << 21)
|
||||
#define SCR_ENSCXT (1ULL << 25)
|
||||
#define SCR_ATA (1ULL << 26)
|
||||
#define SCR_FGTEN (1ULL << 27)
|
||||
#define SCR_ECVEN (1ULL << 28)
|
||||
#define SCR_TWEDEN (1ULL << 29)
|
||||
#define SCR_TWEDEL MAKE_64BIT_MASK(30, 4)
|
||||
#define SCR_TME (1ULL << 34)
|
||||
#define SCR_AMVOFFEN (1ULL << 35)
|
||||
@ -2412,15 +2412,15 @@ static inline bool arm_is_secure(CPUARMState *env)
|
||||
* Return true if the current security state has AArch64 EL2 or AArch32 Hyp.
|
||||
* This corresponds to the pseudocode EL2Enabled()
|
||||
*/
|
||||
static inline bool arm_is_el2_enabled_secstate(CPUARMState *env, bool secure)
|
||||
{
|
||||
return arm_feature(env, ARM_FEATURE_EL2)
|
||||
&& (!secure || (env->cp15.scr_el3 & SCR_EEL2));
|
||||
}
|
||||
|
||||
static inline bool arm_is_el2_enabled(CPUARMState *env)
|
||||
{
|
||||
if (arm_feature(env, ARM_FEATURE_EL2)) {
|
||||
if (arm_is_secure_below_el3(env)) {
|
||||
return (env->cp15.scr_el3 & SCR_EEL2) != 0;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
return arm_is_el2_enabled_secstate(env, arm_is_secure_below_el3(env));
|
||||
}
|
||||
|
||||
#else
|
||||
@ -2434,6 +2434,11 @@ static inline bool arm_is_secure(CPUARMState *env)
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool arm_is_el2_enabled_secstate(CPUARMState *env, bool secure)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool arm_is_el2_enabled(CPUARMState *env)
|
||||
{
|
||||
return false;
|
||||
@ -2446,6 +2451,7 @@ static inline bool arm_is_el2_enabled(CPUARMState *env)
|
||||
* "for all purposes other than a direct read or write access of HCR_EL2."
|
||||
* Not included here is HCR_RW.
|
||||
*/
|
||||
uint64_t arm_hcr_el2_eff_secstate(CPUARMState *env, bool secure);
|
||||
uint64_t arm_hcr_el2_eff(CPUARMState *env);
|
||||
uint64_t arm_hcrx_el2_eff(CPUARMState *env);
|
||||
|
||||
@ -2884,26 +2890,27 @@ bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync);
|
||||
* table over and over.
|
||||
* 6. we need separate EL1/EL2 mmu_idx for handling the Privileged Access
|
||||
* Never (PAN) bit within PSTATE.
|
||||
* 7. we fold together the secure and non-secure regimes for A-profile,
|
||||
* because there are no banked system registers for aarch64, so the
|
||||
* process of switching between secure and non-secure is
|
||||
* already heavyweight.
|
||||
*
|
||||
* This gives us the following list of cases:
|
||||
*
|
||||
* NS EL0 EL1&0 stage 1+2 (aka NS PL0)
|
||||
* NS EL1 EL1&0 stage 1+2 (aka NS PL1)
|
||||
* NS EL1 EL1&0 stage 1+2 +PAN
|
||||
* NS EL0 EL2&0
|
||||
* NS EL2 EL2&0
|
||||
* NS EL2 EL2&0 +PAN
|
||||
* NS EL2 (aka NS PL2)
|
||||
* S EL0 EL1&0 (aka S PL0)
|
||||
* S EL1 EL1&0 (not used if EL3 is 32 bit)
|
||||
* S EL1 EL1&0 +PAN
|
||||
* S EL3 (aka S PL1)
|
||||
* EL0 EL1&0 stage 1+2 (aka NS PL0)
|
||||
* EL1 EL1&0 stage 1+2 (aka NS PL1)
|
||||
* EL1 EL1&0 stage 1+2 +PAN
|
||||
* EL0 EL2&0
|
||||
* EL2 EL2&0
|
||||
* EL2 EL2&0 +PAN
|
||||
* EL2 (aka NS PL2)
|
||||
* EL3 (aka S PL1)
|
||||
*
|
||||
* for a total of 11 different mmu_idx.
|
||||
* for a total of 8 different mmu_idx.
|
||||
*
|
||||
* R profile CPUs have an MPU, but can use the same set of MMU indexes
|
||||
* as A profile. They only need to distinguish NS EL0 and NS EL1 (and
|
||||
* NS EL2 if we ever model a Cortex-R52).
|
||||
* as A profile. They only need to distinguish EL0 and EL1 (and
|
||||
* EL2 if we ever model a Cortex-R52).
|
||||
*
|
||||
* M profile CPUs are rather different as they do not have a true MMU.
|
||||
* They have the following different MMU indexes:
|
||||
@ -2942,9 +2949,6 @@ bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync);
|
||||
#define ARM_MMU_IDX_NOTLB 0x20 /* does not have a TLB */
|
||||
#define ARM_MMU_IDX_M 0x40 /* M profile */
|
||||
|
||||
/* Meanings of the bits for A profile mmu idx values */
|
||||
#define ARM_MMU_IDX_A_NS 0x8
|
||||
|
||||
/* Meanings of the bits for M profile mmu idx values */
|
||||
#define ARM_MMU_IDX_M_PRIV 0x1
|
||||
#define ARM_MMU_IDX_M_NEGPRI 0x2
|
||||
@ -2958,22 +2962,14 @@ typedef enum ARMMMUIdx {
|
||||
/*
|
||||
* A-profile.
|
||||
*/
|
||||
ARMMMUIdx_SE10_0 = 0 | ARM_MMU_IDX_A,
|
||||
ARMMMUIdx_SE20_0 = 1 | ARM_MMU_IDX_A,
|
||||
ARMMMUIdx_SE10_1 = 2 | ARM_MMU_IDX_A,
|
||||
ARMMMUIdx_SE20_2 = 3 | ARM_MMU_IDX_A,
|
||||
ARMMMUIdx_SE10_1_PAN = 4 | ARM_MMU_IDX_A,
|
||||
ARMMMUIdx_SE20_2_PAN = 5 | ARM_MMU_IDX_A,
|
||||
ARMMMUIdx_SE2 = 6 | ARM_MMU_IDX_A,
|
||||
ARMMMUIdx_SE3 = 7 | ARM_MMU_IDX_A,
|
||||
|
||||
ARMMMUIdx_E10_0 = ARMMMUIdx_SE10_0 | ARM_MMU_IDX_A_NS,
|
||||
ARMMMUIdx_E20_0 = ARMMMUIdx_SE20_0 | ARM_MMU_IDX_A_NS,
|
||||
ARMMMUIdx_E10_1 = ARMMMUIdx_SE10_1 | ARM_MMU_IDX_A_NS,
|
||||
ARMMMUIdx_E20_2 = ARMMMUIdx_SE20_2 | ARM_MMU_IDX_A_NS,
|
||||
ARMMMUIdx_E10_1_PAN = ARMMMUIdx_SE10_1_PAN | ARM_MMU_IDX_A_NS,
|
||||
ARMMMUIdx_E20_2_PAN = ARMMMUIdx_SE20_2_PAN | ARM_MMU_IDX_A_NS,
|
||||
ARMMMUIdx_E2 = ARMMMUIdx_SE2 | ARM_MMU_IDX_A_NS,
|
||||
ARMMMUIdx_E10_0 = 0 | ARM_MMU_IDX_A,
|
||||
ARMMMUIdx_E20_0 = 1 | ARM_MMU_IDX_A,
|
||||
ARMMMUIdx_E10_1 = 2 | ARM_MMU_IDX_A,
|
||||
ARMMMUIdx_E20_2 = 3 | ARM_MMU_IDX_A,
|
||||
ARMMMUIdx_E10_1_PAN = 4 | ARM_MMU_IDX_A,
|
||||
ARMMMUIdx_E20_2_PAN = 5 | ARM_MMU_IDX_A,
|
||||
ARMMMUIdx_E2 = 6 | ARM_MMU_IDX_A,
|
||||
ARMMMUIdx_E3 = 7 | ARM_MMU_IDX_A,
|
||||
|
||||
/*
|
||||
* These are not allocated TLBs and are used only for AT system
|
||||
@ -2982,9 +2978,6 @@ typedef enum ARMMMUIdx {
|
||||
ARMMMUIdx_Stage1_E0 = 0 | ARM_MMU_IDX_NOTLB,
|
||||
ARMMMUIdx_Stage1_E1 = 1 | ARM_MMU_IDX_NOTLB,
|
||||
ARMMMUIdx_Stage1_E1_PAN = 2 | ARM_MMU_IDX_NOTLB,
|
||||
ARMMMUIdx_Stage1_SE0 = 3 | ARM_MMU_IDX_NOTLB,
|
||||
ARMMMUIdx_Stage1_SE1 = 4 | ARM_MMU_IDX_NOTLB,
|
||||
ARMMMUIdx_Stage1_SE1_PAN = 5 | ARM_MMU_IDX_NOTLB,
|
||||
/*
|
||||
* Not allocated a TLB: used only for second stage of an S12 page
|
||||
* table walk, or for descriptor loads during first stage of an S1
|
||||
@ -2992,8 +2985,8 @@ typedef enum ARMMMUIdx {
|
||||
* then various TLB flush insns which currently are no-ops or flush
|
||||
* only stage 1 MMU indexes will need to change to flush stage 2.
|
||||
*/
|
||||
ARMMMUIdx_Stage2 = 6 | ARM_MMU_IDX_NOTLB,
|
||||
ARMMMUIdx_Stage2_S = 7 | ARM_MMU_IDX_NOTLB,
|
||||
ARMMMUIdx_Stage2 = 3 | ARM_MMU_IDX_NOTLB,
|
||||
ARMMMUIdx_Stage2_S = 4 | ARM_MMU_IDX_NOTLB,
|
||||
|
||||
/*
|
||||
* M-profile.
|
||||
@ -3023,14 +3016,7 @@ typedef enum ARMMMUIdxBit {
|
||||
TO_CORE_BIT(E2),
|
||||
TO_CORE_BIT(E20_2),
|
||||
TO_CORE_BIT(E20_2_PAN),
|
||||
TO_CORE_BIT(SE10_0),
|
||||
TO_CORE_BIT(SE20_0),
|
||||
TO_CORE_BIT(SE10_1),
|
||||
TO_CORE_BIT(SE20_2),
|
||||
TO_CORE_BIT(SE10_1_PAN),
|
||||
TO_CORE_BIT(SE20_2_PAN),
|
||||
TO_CORE_BIT(SE2),
|
||||
TO_CORE_BIT(SE3),
|
||||
TO_CORE_BIT(E3),
|
||||
|
||||
TO_CORE_BIT(MUser),
|
||||
TO_CORE_BIT(MPriv),
|
||||
@ -3203,6 +3189,8 @@ FIELD(TBFLAG_M32, NEW_FP_CTXT_NEEDED, 3, 1) /* Not cached. */
|
||||
FIELD(TBFLAG_M32, FPCCR_S_WRONG, 4, 1) /* Not cached. */
|
||||
/* Set if MVE insns are definitely not predicated by VPR or LTPSIZE */
|
||||
FIELD(TBFLAG_M32, MVE_NO_PRED, 5, 1) /* Not cached. */
|
||||
/* Set if in secure mode */
|
||||
FIELD(TBFLAG_M32, SECURE, 6, 1)
|
||||
|
||||
/*
|
||||
* Bit usage when in AArch64 state
|
||||
@ -4109,6 +4097,39 @@ static inline bool isar_feature_aa64_tgran16_2_lpa2(const ARMISARegisters *id)
|
||||
return t >= 3 || (t == 0 && isar_feature_aa64_tgran16_lpa2(id));
|
||||
}
|
||||
|
||||
static inline bool isar_feature_aa64_tgran4(const ARMISARegisters *id)
|
||||
{
|
||||
return FIELD_SEX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN4) >= 0;
|
||||
}
|
||||
|
||||
static inline bool isar_feature_aa64_tgran16(const ARMISARegisters *id)
|
||||
{
|
||||
return FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN16) >= 1;
|
||||
}
|
||||
|
||||
static inline bool isar_feature_aa64_tgran64(const ARMISARegisters *id)
|
||||
{
|
||||
return FIELD_SEX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN64) >= 0;
|
||||
}
|
||||
|
||||
static inline bool isar_feature_aa64_tgran4_2(const ARMISARegisters *id)
|
||||
{
|
||||
unsigned t = FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN4_2);
|
||||
return t >= 2 || (t == 0 && isar_feature_aa64_tgran4(id));
|
||||
}
|
||||
|
||||
static inline bool isar_feature_aa64_tgran16_2(const ARMISARegisters *id)
|
||||
{
|
||||
unsigned t = FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN16_2);
|
||||
return t >= 2 || (t == 0 && isar_feature_aa64_tgran16(id));
|
||||
}
|
||||
|
||||
static inline bool isar_feature_aa64_tgran64_2(const ARMISARegisters *id)
|
||||
{
|
||||
unsigned t = FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN64_2);
|
||||
return t >= 2 || (t == 0 && isar_feature_aa64_tgran64(id));
|
||||
}
|
||||
|
||||
static inline bool isar_feature_aa64_ccidx(const ARMISARegisters *id)
|
||||
{
|
||||
return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, CCIDX) != 0;
|
||||
|
@ -1752,8 +1752,9 @@ static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
|
||||
{
|
||||
/* Begin with base v8.0 state. */
|
||||
uint32_t valid_mask = 0x3fff;
|
||||
uint64_t valid_mask = 0x3fff;
|
||||
ARMCPU *cpu = env_archcpu(env);
|
||||
uint64_t changed;
|
||||
|
||||
/*
|
||||
* Because SCR_EL3 is the "real" cpreg and SCR is the alias, reset always
|
||||
@ -1789,6 +1790,9 @@ static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
|
||||
if (cpu_isar_feature(aa64_doublefault, cpu)) {
|
||||
valid_mask |= SCR_EASE | SCR_NMEA;
|
||||
}
|
||||
if (cpu_isar_feature(aa64_sme, cpu)) {
|
||||
valid_mask |= SCR_ENTP2;
|
||||
}
|
||||
} else {
|
||||
valid_mask &= ~(SCR_RW | SCR_ST);
|
||||
if (cpu_isar_feature(aa32_ras, cpu)) {
|
||||
@ -1813,7 +1817,22 @@ static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
|
||||
|
||||
/* Clear all-context RES0 bits. */
|
||||
value &= valid_mask;
|
||||
raw_write(env, ri, value);
|
||||
changed = env->cp15.scr_el3 ^ value;
|
||||
env->cp15.scr_el3 = value;
|
||||
|
||||
/*
|
||||
* If SCR_EL3.NS changes, i.e. arm_is_secure_below_el3, then
|
||||
* we must invalidate all TLBs below EL3.
|
||||
*/
|
||||
if (changed & SCR_NS) {
|
||||
tlb_flush_by_mmuidx(env_cpu(env), (ARMMMUIdxBit_E10_0 |
|
||||
ARMMMUIdxBit_E20_0 |
|
||||
ARMMMUIdxBit_E10_1 |
|
||||
ARMMMUIdxBit_E20_2 |
|
||||
ARMMMUIdxBit_E10_1_PAN |
|
||||
ARMMMUIdxBit_E20_2_PAN |
|
||||
ARMMMUIdxBit_E2));
|
||||
}
|
||||
}
|
||||
|
||||
static void scr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
|
||||
@ -2644,9 +2663,6 @@ static int gt_phys_redir_timeridx(CPUARMState *env)
|
||||
case ARMMMUIdx_E20_0:
|
||||
case ARMMMUIdx_E20_2:
|
||||
case ARMMMUIdx_E20_2_PAN:
|
||||
case ARMMMUIdx_SE20_0:
|
||||
case ARMMMUIdx_SE20_2:
|
||||
case ARMMMUIdx_SE20_2_PAN:
|
||||
return GTIMER_HYP;
|
||||
default:
|
||||
return GTIMER_PHYS;
|
||||
@ -2659,9 +2675,6 @@ static int gt_virt_redir_timeridx(CPUARMState *env)
|
||||
case ARMMMUIdx_E20_0:
|
||||
case ARMMMUIdx_E20_2:
|
||||
case ARMMMUIdx_E20_2_PAN:
|
||||
case ARMMMUIdx_SE20_0:
|
||||
case ARMMMUIdx_SE20_2:
|
||||
case ARMMMUIdx_SE20_2_PAN:
|
||||
return GTIMER_HYPVIRT;
|
||||
default:
|
||||
return GTIMER_VIRT;
|
||||
@ -3188,7 +3201,8 @@ static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
|
||||
#ifdef CONFIG_TCG
|
||||
static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
|
||||
MMUAccessType access_type, ARMMMUIdx mmu_idx)
|
||||
MMUAccessType access_type, ARMMMUIdx mmu_idx,
|
||||
bool is_secure)
|
||||
{
|
||||
bool ret;
|
||||
uint64_t par64;
|
||||
@ -3196,7 +3210,8 @@ static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
|
||||
ARMMMUFaultInfo fi = {};
|
||||
GetPhysAddrResult res = {};
|
||||
|
||||
ret = get_phys_addr(env, value, access_type, mmu_idx, &res, &fi);
|
||||
ret = get_phys_addr_with_secure(env, value, access_type, mmu_idx,
|
||||
is_secure, &res, &fi);
|
||||
|
||||
/*
|
||||
* ATS operations only do S1 or S1+S2 translations, so we never
|
||||
@ -3308,8 +3323,8 @@ static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
|
||||
/* Create a 64-bit PAR */
|
||||
par64 = (1 << 11); /* LPAE bit always set */
|
||||
if (!ret) {
|
||||
par64 |= res.phys & ~0xfffULL;
|
||||
if (!res.attrs.secure) {
|
||||
par64 |= res.f.phys_addr & ~0xfffULL;
|
||||
if (!res.f.attrs.secure) {
|
||||
par64 |= (1 << 9); /* NS */
|
||||
}
|
||||
par64 |= (uint64_t)res.cacheattrs.attrs << 56; /* ATTR */
|
||||
@ -3333,13 +3348,13 @@ static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
|
||||
*/
|
||||
if (!ret) {
|
||||
/* We do not set any attribute bits in the PAR */
|
||||
if (res.page_size == (1 << 24)
|
||||
if (res.f.lg_page_size == 24
|
||||
&& arm_feature(env, ARM_FEATURE_V7)) {
|
||||
par64 = (res.phys & 0xff000000) | (1 << 1);
|
||||
par64 = (res.f.phys_addr & 0xff000000) | (1 << 1);
|
||||
} else {
|
||||
par64 = res.phys & 0xfffff000;
|
||||
par64 = res.f.phys_addr & 0xfffff000;
|
||||
}
|
||||
if (!res.attrs.secure) {
|
||||
if (!res.f.attrs.secure) {
|
||||
par64 |= (1 << 9); /* NS */
|
||||
}
|
||||
} else {
|
||||
@ -3367,17 +3382,17 @@ static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
|
||||
/* stage 1 current state PL1: ATS1CPR, ATS1CPW, ATS1CPRP, ATS1CPWP */
|
||||
switch (el) {
|
||||
case 3:
|
||||
mmu_idx = ARMMMUIdx_SE3;
|
||||
mmu_idx = ARMMMUIdx_E3;
|
||||
secure = true;
|
||||
break;
|
||||
case 2:
|
||||
g_assert(!secure); /* ARMv8.4-SecEL2 is 64-bit only */
|
||||
/* fall through */
|
||||
case 1:
|
||||
if (ri->crm == 9 && (env->uncached_cpsr & CPSR_PAN)) {
|
||||
mmu_idx = (secure ? ARMMMUIdx_Stage1_SE1_PAN
|
||||
: ARMMMUIdx_Stage1_E1_PAN);
|
||||
mmu_idx = ARMMMUIdx_Stage1_E1_PAN;
|
||||
} else {
|
||||
mmu_idx = secure ? ARMMMUIdx_Stage1_SE1 : ARMMMUIdx_Stage1_E1;
|
||||
mmu_idx = ARMMMUIdx_Stage1_E1;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
@ -3388,14 +3403,15 @@ static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
|
||||
/* stage 1 current state PL0: ATS1CUR, ATS1CUW */
|
||||
switch (el) {
|
||||
case 3:
|
||||
mmu_idx = ARMMMUIdx_SE10_0;
|
||||
mmu_idx = ARMMMUIdx_E10_0;
|
||||
secure = true;
|
||||
break;
|
||||
case 2:
|
||||
g_assert(!secure); /* ARMv8.4-SecEL2 is 64-bit only */
|
||||
mmu_idx = ARMMMUIdx_Stage1_E0;
|
||||
break;
|
||||
case 1:
|
||||
mmu_idx = secure ? ARMMMUIdx_Stage1_SE0 : ARMMMUIdx_Stage1_E0;
|
||||
mmu_idx = ARMMMUIdx_Stage1_E0;
|
||||
break;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
@ -3404,16 +3420,18 @@ static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
|
||||
case 4:
|
||||
/* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */
|
||||
mmu_idx = ARMMMUIdx_E10_1;
|
||||
secure = false;
|
||||
break;
|
||||
case 6:
|
||||
/* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */
|
||||
mmu_idx = ARMMMUIdx_E10_0;
|
||||
secure = false;
|
||||
break;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
|
||||
par64 = do_ats_write(env, value, access_type, mmu_idx);
|
||||
par64 = do_ats_write(env, value, access_type, mmu_idx, secure);
|
||||
|
||||
A32_BANKED_CURRENT_REG_SET(env, par, par64);
|
||||
#else
|
||||
@ -3429,7 +3447,8 @@ static void ats1h_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
|
||||
uint64_t par64;
|
||||
|
||||
par64 = do_ats_write(env, value, access_type, ARMMMUIdx_E2);
|
||||
/* There is no SecureEL2 for AArch32. */
|
||||
par64 = do_ats_write(env, value, access_type, ARMMMUIdx_E2, false);
|
||||
|
||||
A32_BANKED_CURRENT_REG_SET(env, par, par64);
|
||||
#else
|
||||
@ -3461,36 +3480,37 @@ static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
switch (ri->opc1) {
|
||||
case 0: /* AT S1E1R, AT S1E1W, AT S1E1RP, AT S1E1WP */
|
||||
if (ri->crm == 9 && (env->pstate & PSTATE_PAN)) {
|
||||
mmu_idx = (secure ? ARMMMUIdx_Stage1_SE1_PAN
|
||||
: ARMMMUIdx_Stage1_E1_PAN);
|
||||
mmu_idx = ARMMMUIdx_Stage1_E1_PAN;
|
||||
} else {
|
||||
mmu_idx = secure ? ARMMMUIdx_Stage1_SE1 : ARMMMUIdx_Stage1_E1;
|
||||
mmu_idx = ARMMMUIdx_Stage1_E1;
|
||||
}
|
||||
break;
|
||||
case 4: /* AT S1E2R, AT S1E2W */
|
||||
mmu_idx = secure ? ARMMMUIdx_SE2 : ARMMMUIdx_E2;
|
||||
mmu_idx = ARMMMUIdx_E2;
|
||||
break;
|
||||
case 6: /* AT S1E3R, AT S1E3W */
|
||||
mmu_idx = ARMMMUIdx_SE3;
|
||||
mmu_idx = ARMMMUIdx_E3;
|
||||
secure = true;
|
||||
break;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
break;
|
||||
case 2: /* AT S1E0R, AT S1E0W */
|
||||
mmu_idx = secure ? ARMMMUIdx_Stage1_SE0 : ARMMMUIdx_Stage1_E0;
|
||||
mmu_idx = ARMMMUIdx_Stage1_E0;
|
||||
break;
|
||||
case 4: /* AT S12E1R, AT S12E1W */
|
||||
mmu_idx = secure ? ARMMMUIdx_SE10_1 : ARMMMUIdx_E10_1;
|
||||
mmu_idx = ARMMMUIdx_E10_1;
|
||||
break;
|
||||
case 6: /* AT S12E0R, AT S12E0W */
|
||||
mmu_idx = secure ? ARMMMUIdx_SE10_0 : ARMMMUIdx_E10_0;
|
||||
mmu_idx = ARMMMUIdx_E10_0;
|
||||
break;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
|
||||
env->cp15.par_el[1] = do_ats_write(env, value, access_type, mmu_idx);
|
||||
env->cp15.par_el[1] = do_ats_write(env, value, access_type,
|
||||
mmu_idx, secure);
|
||||
#else
|
||||
/* Handled by hardware accelerator. */
|
||||
g_assert_not_reached();
|
||||
@ -3753,11 +3773,6 @@ static void vmsa_tcr_ttbr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
uint16_t mask = ARMMMUIdxBit_E20_2 |
|
||||
ARMMMUIdxBit_E20_2_PAN |
|
||||
ARMMMUIdxBit_E20_0;
|
||||
|
||||
if (arm_is_secure_below_el3(env)) {
|
||||
mask >>= ARM_MMU_IDX_A_NS;
|
||||
}
|
||||
|
||||
tlb_flush_by_mmuidx(env_cpu(env), mask);
|
||||
}
|
||||
raw_write(env, ri, value);
|
||||
@ -3777,11 +3792,6 @@ static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
uint16_t mask = ARMMMUIdxBit_E10_1 |
|
||||
ARMMMUIdxBit_E10_1_PAN |
|
||||
ARMMMUIdxBit_E10_0;
|
||||
|
||||
if (arm_is_secure_below_el3(env)) {
|
||||
mask >>= ARM_MMU_IDX_A_NS;
|
||||
}
|
||||
|
||||
tlb_flush_by_mmuidx(cs, mask);
|
||||
raw_write(env, ri, value);
|
||||
}
|
||||
@ -4252,11 +4262,6 @@ static int vae1_tlbmask(CPUARMState *env)
|
||||
ARMMMUIdxBit_E10_1_PAN |
|
||||
ARMMMUIdxBit_E10_0;
|
||||
}
|
||||
|
||||
if (arm_is_secure_below_el3(env)) {
|
||||
mask >>= ARM_MMU_IDX_A_NS;
|
||||
}
|
||||
|
||||
return mask;
|
||||
}
|
||||
|
||||
@ -4283,10 +4288,6 @@ static int vae1_tlbbits(CPUARMState *env, uint64_t addr)
|
||||
mmu_idx = ARMMMUIdx_E10_0;
|
||||
}
|
||||
|
||||
if (arm_is_secure_below_el3(env)) {
|
||||
mmu_idx &= ~ARM_MMU_IDX_A_NS;
|
||||
}
|
||||
|
||||
return tlbbits_for_regime(env, mmu_idx, addr);
|
||||
}
|
||||
|
||||
@ -4319,30 +4320,17 @@ static int alle1_tlbmask(CPUARMState *env)
|
||||
* stage 2 translations, whereas most other scopes only invalidate
|
||||
* stage 1 translations.
|
||||
*/
|
||||
if (arm_is_secure_below_el3(env)) {
|
||||
return ARMMMUIdxBit_SE10_1 |
|
||||
ARMMMUIdxBit_SE10_1_PAN |
|
||||
ARMMMUIdxBit_SE10_0;
|
||||
} else {
|
||||
return ARMMMUIdxBit_E10_1 |
|
||||
ARMMMUIdxBit_E10_1_PAN |
|
||||
ARMMMUIdxBit_E10_0;
|
||||
}
|
||||
return (ARMMMUIdxBit_E10_1 |
|
||||
ARMMMUIdxBit_E10_1_PAN |
|
||||
ARMMMUIdxBit_E10_0);
|
||||
}
|
||||
|
||||
static int e2_tlbmask(CPUARMState *env)
|
||||
{
|
||||
if (arm_is_secure_below_el3(env)) {
|
||||
return ARMMMUIdxBit_SE20_0 |
|
||||
ARMMMUIdxBit_SE20_2 |
|
||||
ARMMMUIdxBit_SE20_2_PAN |
|
||||
ARMMMUIdxBit_SE2;
|
||||
} else {
|
||||
return ARMMMUIdxBit_E20_0 |
|
||||
ARMMMUIdxBit_E20_2 |
|
||||
ARMMMUIdxBit_E20_2_PAN |
|
||||
ARMMMUIdxBit_E2;
|
||||
}
|
||||
return (ARMMMUIdxBit_E20_0 |
|
||||
ARMMMUIdxBit_E20_2 |
|
||||
ARMMMUIdxBit_E20_2_PAN |
|
||||
ARMMMUIdxBit_E2);
|
||||
}
|
||||
|
||||
static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
@ -4369,7 +4357,7 @@ static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
ARMCPU *cpu = env_archcpu(env);
|
||||
CPUState *cs = CPU(cpu);
|
||||
|
||||
tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_SE3);
|
||||
tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_E3);
|
||||
}
|
||||
|
||||
static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
@ -4395,7 +4383,7 @@ static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
{
|
||||
CPUState *cs = env_cpu(env);
|
||||
|
||||
tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_SE3);
|
||||
tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_E3);
|
||||
}
|
||||
|
||||
static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
@ -4423,7 +4411,7 @@ static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
CPUState *cs = CPU(cpu);
|
||||
uint64_t pageaddr = sextract64(value << 12, 0, 56);
|
||||
|
||||
tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_SE3);
|
||||
tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_E3);
|
||||
}
|
||||
|
||||
static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
@ -4462,12 +4450,10 @@ static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
{
|
||||
CPUState *cs = env_cpu(env);
|
||||
uint64_t pageaddr = sextract64(value << 12, 0, 56);
|
||||
bool secure = arm_is_secure_below_el3(env);
|
||||
int mask = secure ? ARMMMUIdxBit_SE2 : ARMMMUIdxBit_E2;
|
||||
int bits = tlbbits_for_regime(env, secure ? ARMMMUIdx_SE2 : ARMMMUIdx_E2,
|
||||
pageaddr);
|
||||
int bits = tlbbits_for_regime(env, ARMMMUIdx_E2, pageaddr);
|
||||
|
||||
tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, mask, bits);
|
||||
tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr,
|
||||
ARMMMUIdxBit_E2, bits);
|
||||
}
|
||||
|
||||
static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
@ -4475,10 +4461,10 @@ static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
{
|
||||
CPUState *cs = env_cpu(env);
|
||||
uint64_t pageaddr = sextract64(value << 12, 0, 56);
|
||||
int bits = tlbbits_for_regime(env, ARMMMUIdx_SE3, pageaddr);
|
||||
int bits = tlbbits_for_regime(env, ARMMMUIdx_E3, pageaddr);
|
||||
|
||||
tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr,
|
||||
ARMMMUIdxBit_SE3, bits);
|
||||
ARMMMUIdxBit_E3, bits);
|
||||
}
|
||||
|
||||
#ifdef TARGET_AARCH64
|
||||
@ -4487,6 +4473,24 @@ typedef struct {
|
||||
uint64_t length;
|
||||
} TLBIRange;
|
||||
|
||||
static ARMGranuleSize tlbi_range_tg_to_gran_size(int tg)
|
||||
{
|
||||
/*
|
||||
* Note that the TLBI range TG field encoding differs from both
|
||||
* TG0 and TG1 encodings.
|
||||
*/
|
||||
switch (tg) {
|
||||
case 1:
|
||||
return Gran4K;
|
||||
case 2:
|
||||
return Gran16K;
|
||||
case 3:
|
||||
return Gran64K;
|
||||
default:
|
||||
return GranInvalid;
|
||||
}
|
||||
}
|
||||
|
||||
static TLBIRange tlbi_aa64_get_range(CPUARMState *env, ARMMMUIdx mmuidx,
|
||||
uint64_t value)
|
||||
{
|
||||
@ -4495,17 +4499,19 @@ static TLBIRange tlbi_aa64_get_range(CPUARMState *env, ARMMMUIdx mmuidx,
|
||||
uint64_t select = sextract64(value, 36, 1);
|
||||
ARMVAParameters param = aa64_va_parameters(env, select, mmuidx, true);
|
||||
TLBIRange ret = { };
|
||||
ARMGranuleSize gran;
|
||||
|
||||
page_size_granule = extract64(value, 46, 2);
|
||||
gran = tlbi_range_tg_to_gran_size(page_size_granule);
|
||||
|
||||
/* The granule encoded in value must match the granule in use. */
|
||||
if (page_size_granule != (param.using64k ? 3 : param.using16k ? 2 : 1)) {
|
||||
if (gran != param.gran) {
|
||||
qemu_log_mask(LOG_GUEST_ERROR, "Invalid tlbi page size granule %d\n",
|
||||
page_size_granule);
|
||||
return ret;
|
||||
}
|
||||
|
||||
page_shift = (page_size_granule - 1) * 2 + 12;
|
||||
page_shift = arm_granule_bits(gran);
|
||||
num = extract64(value, 39, 5);
|
||||
scale = extract64(value, 44, 2);
|
||||
exponent = (5 * scale) + 1;
|
||||
@ -4584,8 +4590,7 @@ static void tlbi_aa64_rvae1is_write(CPUARMState *env,
|
||||
|
||||
static int vae2_tlbmask(CPUARMState *env)
|
||||
{
|
||||
return (arm_is_secure_below_el3(env)
|
||||
? ARMMMUIdxBit_SE2 : ARMMMUIdxBit_E2);
|
||||
return ARMMMUIdxBit_E2;
|
||||
}
|
||||
|
||||
static void tlbi_aa64_rvae2_write(CPUARMState *env,
|
||||
@ -4631,8 +4636,7 @@ static void tlbi_aa64_rvae3_write(CPUARMState *env,
|
||||
* flush-last-level-only.
|
||||
*/
|
||||
|
||||
do_rvae_write(env, value, ARMMMUIdxBit_SE3,
|
||||
tlb_force_broadcast(env));
|
||||
do_rvae_write(env, value, ARMMMUIdxBit_E3, tlb_force_broadcast(env));
|
||||
}
|
||||
|
||||
static void tlbi_aa64_rvae3is_write(CPUARMState *env,
|
||||
@ -4646,7 +4650,7 @@ static void tlbi_aa64_rvae3is_write(CPUARMState *env,
|
||||
* flush-last-level-only or inner/outer specific flushes.
|
||||
*/
|
||||
|
||||
do_rvae_write(env, value, ARMMMUIdxBit_SE3, true);
|
||||
do_rvae_write(env, value, ARMMMUIdxBit_E3, true);
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -5245,15 +5249,15 @@ static void hcr_writelow(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
}
|
||||
|
||||
/*
|
||||
* Return the effective value of HCR_EL2.
|
||||
* Return the effective value of HCR_EL2, at the given security state.
|
||||
* Bits that are not included here:
|
||||
* RW (read from SCR_EL3.RW as needed)
|
||||
*/
|
||||
uint64_t arm_hcr_el2_eff(CPUARMState *env)
|
||||
uint64_t arm_hcr_el2_eff_secstate(CPUARMState *env, bool secure)
|
||||
{
|
||||
uint64_t ret = env->cp15.hcr_el2;
|
||||
|
||||
if (!arm_is_el2_enabled(env)) {
|
||||
if (!arm_is_el2_enabled_secstate(env, secure)) {
|
||||
/*
|
||||
* "This register has no effect if EL2 is not enabled in the
|
||||
* current Security state". This is ARMv8.4-SecEL2 speak for
|
||||
@ -5312,6 +5316,11 @@ uint64_t arm_hcr_el2_eff(CPUARMState *env)
|
||||
return ret;
|
||||
}
|
||||
|
||||
uint64_t arm_hcr_el2_eff(CPUARMState *env)
|
||||
{
|
||||
return arm_hcr_el2_eff_secstate(env, arm_is_secure_below_el3(env));
|
||||
}
|
||||
|
||||
/*
|
||||
* Corresponds to ARM pseudocode function ELIsInHost().
|
||||
*/
|
||||
@ -10259,8 +10268,7 @@ uint64_t arm_sctlr(CPUARMState *env, int el)
|
||||
/* Only EL0 needs to be adjusted for EL1&0 or EL2&0. */
|
||||
if (el == 0) {
|
||||
ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, 0);
|
||||
el = (mmu_idx == ARMMMUIdx_E20_0 || mmu_idx == ARMMMUIdx_SE20_0)
|
||||
? 2 : 1;
|
||||
el = mmu_idx == ARMMMUIdx_E20_0 ? 2 : 1;
|
||||
}
|
||||
return env->cp15.sctlr_el[el];
|
||||
}
|
||||
@ -10299,20 +10307,105 @@ static int aa64_va_parameter_tcma(uint64_t tcr, ARMMMUIdx mmu_idx)
|
||||
}
|
||||
}
|
||||
|
||||
static ARMGranuleSize tg0_to_gran_size(int tg)
|
||||
{
|
||||
switch (tg) {
|
||||
case 0:
|
||||
return Gran4K;
|
||||
case 1:
|
||||
return Gran64K;
|
||||
case 2:
|
||||
return Gran16K;
|
||||
default:
|
||||
return GranInvalid;
|
||||
}
|
||||
}
|
||||
|
||||
static ARMGranuleSize tg1_to_gran_size(int tg)
|
||||
{
|
||||
switch (tg) {
|
||||
case 1:
|
||||
return Gran16K;
|
||||
case 2:
|
||||
return Gran4K;
|
||||
case 3:
|
||||
return Gran64K;
|
||||
default:
|
||||
return GranInvalid;
|
||||
}
|
||||
}
|
||||
|
||||
static inline bool have4k(ARMCPU *cpu, bool stage2)
|
||||
{
|
||||
return stage2 ? cpu_isar_feature(aa64_tgran4_2, cpu)
|
||||
: cpu_isar_feature(aa64_tgran4, cpu);
|
||||
}
|
||||
|
||||
static inline bool have16k(ARMCPU *cpu, bool stage2)
|
||||
{
|
||||
return stage2 ? cpu_isar_feature(aa64_tgran16_2, cpu)
|
||||
: cpu_isar_feature(aa64_tgran16, cpu);
|
||||
}
|
||||
|
||||
static inline bool have64k(ARMCPU *cpu, bool stage2)
|
||||
{
|
||||
return stage2 ? cpu_isar_feature(aa64_tgran64_2, cpu)
|
||||
: cpu_isar_feature(aa64_tgran64, cpu);
|
||||
}
|
||||
|
||||
static ARMGranuleSize sanitize_gran_size(ARMCPU *cpu, ARMGranuleSize gran,
|
||||
bool stage2)
|
||||
{
|
||||
switch (gran) {
|
||||
case Gran4K:
|
||||
if (have4k(cpu, stage2)) {
|
||||
return gran;
|
||||
}
|
||||
break;
|
||||
case Gran16K:
|
||||
if (have16k(cpu, stage2)) {
|
||||
return gran;
|
||||
}
|
||||
break;
|
||||
case Gran64K:
|
||||
if (have64k(cpu, stage2)) {
|
||||
return gran;
|
||||
}
|
||||
break;
|
||||
case GranInvalid:
|
||||
break;
|
||||
}
|
||||
/*
|
||||
* If the guest selects a granule size that isn't implemented,
|
||||
* the architecture requires that we behave as if it selected one
|
||||
* that is (with an IMPDEF choice of which one to pick). We choose
|
||||
* to implement the smallest supported granule size.
|
||||
*/
|
||||
if (have4k(cpu, stage2)) {
|
||||
return Gran4K;
|
||||
}
|
||||
if (have16k(cpu, stage2)) {
|
||||
return Gran16K;
|
||||
}
|
||||
assert(have64k(cpu, stage2));
|
||||
return Gran64K;
|
||||
}
|
||||
|
||||
ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
|
||||
ARMMMUIdx mmu_idx, bool data)
|
||||
{
|
||||
uint64_t tcr = regime_tcr(env, mmu_idx);
|
||||
bool epd, hpd, using16k, using64k, tsz_oob, ds;
|
||||
bool epd, hpd, tsz_oob, ds;
|
||||
int select, tsz, tbi, max_tsz, min_tsz, ps, sh;
|
||||
ARMGranuleSize gran;
|
||||
ARMCPU *cpu = env_archcpu(env);
|
||||
bool stage2 = mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S;
|
||||
|
||||
if (!regime_has_2_ranges(mmu_idx)) {
|
||||
select = 0;
|
||||
tsz = extract32(tcr, 0, 6);
|
||||
using64k = extract32(tcr, 14, 1);
|
||||
using16k = extract32(tcr, 15, 1);
|
||||
if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
|
||||
gran = tg0_to_gran_size(extract32(tcr, 14, 2));
|
||||
if (stage2) {
|
||||
/* VTCR_EL2 */
|
||||
hpd = false;
|
||||
} else {
|
||||
@ -10330,16 +10423,13 @@ ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
|
||||
select = extract64(va, 55, 1);
|
||||
if (!select) {
|
||||
tsz = extract32(tcr, 0, 6);
|
||||
gran = tg0_to_gran_size(extract32(tcr, 14, 2));
|
||||
epd = extract32(tcr, 7, 1);
|
||||
sh = extract32(tcr, 12, 2);
|
||||
using64k = extract32(tcr, 14, 1);
|
||||
using16k = extract32(tcr, 15, 1);
|
||||
hpd = extract64(tcr, 41, 1);
|
||||
} else {
|
||||
int tg = extract32(tcr, 30, 2);
|
||||
using16k = tg == 1;
|
||||
using64k = tg == 3;
|
||||
tsz = extract32(tcr, 16, 6);
|
||||
gran = tg1_to_gran_size(extract32(tcr, 30, 2));
|
||||
epd = extract32(tcr, 23, 1);
|
||||
sh = extract32(tcr, 28, 2);
|
||||
hpd = extract64(tcr, 42, 1);
|
||||
@ -10348,8 +10438,10 @@ ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
|
||||
ds = extract64(tcr, 59, 1);
|
||||
}
|
||||
|
||||
gran = sanitize_gran_size(cpu, gran, stage2);
|
||||
|
||||
if (cpu_isar_feature(aa64_st, cpu)) {
|
||||
max_tsz = 48 - using64k;
|
||||
max_tsz = 48 - (gran == Gran64K);
|
||||
} else {
|
||||
max_tsz = 39;
|
||||
}
|
||||
@ -10359,7 +10451,7 @@ ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
|
||||
* adjust the effective value of DS, as documented.
|
||||
*/
|
||||
min_tsz = 16;
|
||||
if (using64k) {
|
||||
if (gran == Gran64K) {
|
||||
if (cpu_isar_feature(aa64_lva, cpu)) {
|
||||
min_tsz = 12;
|
||||
}
|
||||
@ -10368,14 +10460,14 @@ ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
|
||||
switch (mmu_idx) {
|
||||
case ARMMMUIdx_Stage2:
|
||||
case ARMMMUIdx_Stage2_S:
|
||||
if (using16k) {
|
||||
if (gran == Gran16K) {
|
||||
ds = cpu_isar_feature(aa64_tgran16_2_lpa2, cpu);
|
||||
} else {
|
||||
ds = cpu_isar_feature(aa64_tgran4_2_lpa2, cpu);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
if (using16k) {
|
||||
if (gran == Gran16K) {
|
||||
ds = cpu_isar_feature(aa64_tgran16_lpa2, cpu);
|
||||
} else {
|
||||
ds = cpu_isar_feature(aa64_tgran4_lpa2, cpu);
|
||||
@ -10412,10 +10504,9 @@ ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
|
||||
.tbi = tbi,
|
||||
.epd = epd,
|
||||
.hpd = hpd,
|
||||
.using16k = using16k,
|
||||
.using64k = using64k,
|
||||
.tsz_oob = tsz_oob,
|
||||
.ds = ds,
|
||||
.gran = gran,
|
||||
};
|
||||
}
|
||||
|
||||
@ -10804,22 +10895,15 @@ int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx)
|
||||
switch (mmu_idx) {
|
||||
case ARMMMUIdx_E10_0:
|
||||
case ARMMMUIdx_E20_0:
|
||||
case ARMMMUIdx_SE10_0:
|
||||
case ARMMMUIdx_SE20_0:
|
||||
return 0;
|
||||
case ARMMMUIdx_E10_1:
|
||||
case ARMMMUIdx_E10_1_PAN:
|
||||
case ARMMMUIdx_SE10_1:
|
||||
case ARMMMUIdx_SE10_1_PAN:
|
||||
return 1;
|
||||
case ARMMMUIdx_E2:
|
||||
case ARMMMUIdx_E20_2:
|
||||
case ARMMMUIdx_E20_2_PAN:
|
||||
case ARMMMUIdx_SE2:
|
||||
case ARMMMUIdx_SE20_2:
|
||||
case ARMMMUIdx_SE20_2_PAN:
|
||||
return 2;
|
||||
case ARMMMUIdx_SE3:
|
||||
case ARMMMUIdx_E3:
|
||||
return 3;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
@ -10872,15 +10956,11 @@ ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el)
|
||||
}
|
||||
break;
|
||||
case 3:
|
||||
return ARMMMUIdx_SE3;
|
||||
return ARMMMUIdx_E3;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
|
||||
if (arm_is_secure_below_el3(env)) {
|
||||
idx &= ~ARM_MMU_IDX_A_NS;
|
||||
}
|
||||
|
||||
return idx;
|
||||
}
|
||||
|
||||
@ -10945,6 +11025,10 @@ static CPUARMTBFlags rebuild_hflags_m32(CPUARMState *env, int fp_el,
|
||||
DP_TBFLAG_M32(flags, STACKCHECK, 1);
|
||||
}
|
||||
|
||||
if (arm_feature(env, ARM_FEATURE_M_SECURITY) && env->v7m.secure) {
|
||||
DP_TBFLAG_M32(flags, SECURE, 1);
|
||||
}
|
||||
|
||||
return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
|
||||
}
|
||||
|
||||
@ -11079,15 +11163,11 @@ static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
|
||||
switch (mmu_idx) {
|
||||
case ARMMMUIdx_E10_1:
|
||||
case ARMMMUIdx_E10_1_PAN:
|
||||
case ARMMMUIdx_SE10_1:
|
||||
case ARMMMUIdx_SE10_1_PAN:
|
||||
/* TODO: ARMv8.3-NV */
|
||||
DP_TBFLAG_A64(flags, UNPRIV, 1);
|
||||
break;
|
||||
case ARMMMUIdx_E20_2:
|
||||
case ARMMMUIdx_E20_2_PAN:
|
||||
case ARMMMUIdx_SE20_2:
|
||||
case ARMMMUIdx_SE20_2_PAN:
|
||||
/*
|
||||
* Note that EL20_2 is gated by HCR_EL2.E2H == 1, but EL20_0 is
|
||||
* gated by HCR_EL2.<E2H,TGE> == '11', and so is LDTR.
|
||||
|
@ -649,78 +649,24 @@ static inline bool regime_has_2_ranges(ARMMMUIdx mmu_idx)
|
||||
case ARMMMUIdx_Stage1_E0:
|
||||
case ARMMMUIdx_Stage1_E1:
|
||||
case ARMMMUIdx_Stage1_E1_PAN:
|
||||
case ARMMMUIdx_Stage1_SE0:
|
||||
case ARMMMUIdx_Stage1_SE1:
|
||||
case ARMMMUIdx_Stage1_SE1_PAN:
|
||||
case ARMMMUIdx_E10_0:
|
||||
case ARMMMUIdx_E10_1:
|
||||
case ARMMMUIdx_E10_1_PAN:
|
||||
case ARMMMUIdx_E20_0:
|
||||
case ARMMMUIdx_E20_2:
|
||||
case ARMMMUIdx_E20_2_PAN:
|
||||
case ARMMMUIdx_SE10_0:
|
||||
case ARMMMUIdx_SE10_1:
|
||||
case ARMMMUIdx_SE10_1_PAN:
|
||||
case ARMMMUIdx_SE20_0:
|
||||
case ARMMMUIdx_SE20_2:
|
||||
case ARMMMUIdx_SE20_2_PAN:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/* Return true if this address translation regime is secure */
|
||||
static inline bool regime_is_secure(CPUARMState *env, ARMMMUIdx mmu_idx)
|
||||
{
|
||||
switch (mmu_idx) {
|
||||
case ARMMMUIdx_E10_0:
|
||||
case ARMMMUIdx_E10_1:
|
||||
case ARMMMUIdx_E10_1_PAN:
|
||||
case ARMMMUIdx_E20_0:
|
||||
case ARMMMUIdx_E20_2:
|
||||
case ARMMMUIdx_E20_2_PAN:
|
||||
case ARMMMUIdx_Stage1_E0:
|
||||
case ARMMMUIdx_Stage1_E1:
|
||||
case ARMMMUIdx_Stage1_E1_PAN:
|
||||
case ARMMMUIdx_E2:
|
||||
case ARMMMUIdx_Stage2:
|
||||
case ARMMMUIdx_MPrivNegPri:
|
||||
case ARMMMUIdx_MUserNegPri:
|
||||
case ARMMMUIdx_MPriv:
|
||||
case ARMMMUIdx_MUser:
|
||||
return false;
|
||||
case ARMMMUIdx_SE3:
|
||||
case ARMMMUIdx_SE10_0:
|
||||
case ARMMMUIdx_SE10_1:
|
||||
case ARMMMUIdx_SE10_1_PAN:
|
||||
case ARMMMUIdx_SE20_0:
|
||||
case ARMMMUIdx_SE20_2:
|
||||
case ARMMMUIdx_SE20_2_PAN:
|
||||
case ARMMMUIdx_Stage1_SE0:
|
||||
case ARMMMUIdx_Stage1_SE1:
|
||||
case ARMMMUIdx_Stage1_SE1_PAN:
|
||||
case ARMMMUIdx_SE2:
|
||||
case ARMMMUIdx_Stage2_S:
|
||||
case ARMMMUIdx_MSPrivNegPri:
|
||||
case ARMMMUIdx_MSUserNegPri:
|
||||
case ARMMMUIdx_MSPriv:
|
||||
case ARMMMUIdx_MSUser:
|
||||
return true;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
}
|
||||
|
||||
static inline bool regime_is_pan(CPUARMState *env, ARMMMUIdx mmu_idx)
|
||||
{
|
||||
switch (mmu_idx) {
|
||||
case ARMMMUIdx_Stage1_E1_PAN:
|
||||
case ARMMMUIdx_Stage1_SE1_PAN:
|
||||
case ARMMMUIdx_E10_1_PAN:
|
||||
case ARMMMUIdx_E20_2_PAN:
|
||||
case ARMMMUIdx_SE10_1_PAN:
|
||||
case ARMMMUIdx_SE20_2_PAN:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
@ -731,30 +677,20 @@ static inline bool regime_is_pan(CPUARMState *env, ARMMMUIdx mmu_idx)
|
||||
static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
|
||||
{
|
||||
switch (mmu_idx) {
|
||||
case ARMMMUIdx_SE20_0:
|
||||
case ARMMMUIdx_SE20_2:
|
||||
case ARMMMUIdx_SE20_2_PAN:
|
||||
case ARMMMUIdx_E20_0:
|
||||
case ARMMMUIdx_E20_2:
|
||||
case ARMMMUIdx_E20_2_PAN:
|
||||
case ARMMMUIdx_Stage2:
|
||||
case ARMMMUIdx_Stage2_S:
|
||||
case ARMMMUIdx_SE2:
|
||||
case ARMMMUIdx_E2:
|
||||
return 2;
|
||||
case ARMMMUIdx_SE3:
|
||||
case ARMMMUIdx_E3:
|
||||
return 3;
|
||||
case ARMMMUIdx_SE10_0:
|
||||
case ARMMMUIdx_Stage1_SE0:
|
||||
return arm_el_is_aa64(env, 3) ? 1 : 3;
|
||||
case ARMMMUIdx_SE10_1:
|
||||
case ARMMMUIdx_SE10_1_PAN:
|
||||
case ARMMMUIdx_E10_0:
|
||||
case ARMMMUIdx_Stage1_E0:
|
||||
return arm_el_is_aa64(env, 3) || !arm_is_secure_below_el3(env) ? 1 : 3;
|
||||
case ARMMMUIdx_Stage1_E1:
|
||||
case ARMMMUIdx_Stage1_E1_PAN:
|
||||
case ARMMMUIdx_Stage1_SE1:
|
||||
case ARMMMUIdx_Stage1_SE1_PAN:
|
||||
case ARMMMUIdx_E10_0:
|
||||
case ARMMMUIdx_E10_1:
|
||||
case ARMMMUIdx_E10_1_PAN:
|
||||
case ARMMMUIdx_MPrivNegPri:
|
||||
@ -996,9 +932,6 @@ static inline bool arm_mmu_idx_is_stage1_of_2(ARMMMUIdx mmu_idx)
|
||||
case ARMMMUIdx_Stage1_E0:
|
||||
case ARMMMUIdx_Stage1_E1:
|
||||
case ARMMMUIdx_Stage1_E1_PAN:
|
||||
case ARMMMUIdx_Stage1_SE0:
|
||||
case ARMMMUIdx_Stage1_SE1:
|
||||
case ARMMMUIdx_Stage1_SE1_PAN:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
@ -1065,6 +998,35 @@ static inline uint32_t aarch64_pstate_valid_mask(const ARMISARegisters *id)
|
||||
return valid;
|
||||
}
|
||||
|
||||
/* Granule size (i.e. page size) */
|
||||
typedef enum ARMGranuleSize {
|
||||
/* Same order as TG0 encoding */
|
||||
Gran4K,
|
||||
Gran64K,
|
||||
Gran16K,
|
||||
GranInvalid,
|
||||
} ARMGranuleSize;
|
||||
|
||||
/**
|
||||
* arm_granule_bits: Return address size of the granule in bits
|
||||
*
|
||||
* Return the address size of the granule in bits. This corresponds
|
||||
* to the pseudocode TGxGranuleBits().
|
||||
*/
|
||||
static inline int arm_granule_bits(ARMGranuleSize gran)
|
||||
{
|
||||
switch (gran) {
|
||||
case Gran64K:
|
||||
return 16;
|
||||
case Gran16K:
|
||||
return 14;
|
||||
case Gran4K:
|
||||
return 12;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Parameters of a given virtual address, as extracted from the
|
||||
* translation control register (TCR) for a given regime.
|
||||
@ -1077,10 +1039,9 @@ typedef struct ARMVAParameters {
|
||||
bool tbi : 1;
|
||||
bool epd : 1;
|
||||
bool hpd : 1;
|
||||
bool using16k : 1;
|
||||
bool using64k : 1;
|
||||
bool tsz_oob : 1; /* tsz has been clamped to legal range */
|
||||
bool ds : 1;
|
||||
ARMGranuleSize gran : 2;
|
||||
} ARMVAParameters;
|
||||
|
||||
ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
|
||||
@ -1138,13 +1099,50 @@ typedef struct ARMCacheAttrs {
|
||||
|
||||
/* Fields that are valid upon success. */
|
||||
typedef struct GetPhysAddrResult {
|
||||
hwaddr phys;
|
||||
target_ulong page_size;
|
||||
int prot;
|
||||
MemTxAttrs attrs;
|
||||
CPUTLBEntryFull f;
|
||||
ARMCacheAttrs cacheattrs;
|
||||
} GetPhysAddrResult;
|
||||
|
||||
/**
|
||||
* get_phys_addr_with_secure: get the physical address for a virtual address
|
||||
* @env: CPUARMState
|
||||
* @address: virtual address to get physical address for
|
||||
* @access_type: 0 for read, 1 for write, 2 for execute
|
||||
* @mmu_idx: MMU index indicating required translation regime
|
||||
* @is_secure: security state for the access
|
||||
* @result: set on translation success.
|
||||
* @fi: set to fault info if the translation fails
|
||||
*
|
||||
* Find the physical address corresponding to the given virtual address,
|
||||
* by doing a translation table walk on MMU based systems or using the
|
||||
* MPU state on MPU based systems.
|
||||
*
|
||||
* Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
|
||||
* prot and page_size may not be filled in, and the populated fsr value provides
|
||||
* information on why the translation aborted, in the format of a
|
||||
* DFSR/IFSR fault register, with the following caveats:
|
||||
* * we honour the short vs long DFSR format differences.
|
||||
* * the WnR bit is never set (the caller must do this).
|
||||
* * for PSMAv5 based systems we don't bother to return a full FSR format
|
||||
* value.
|
||||
*/
|
||||
bool get_phys_addr_with_secure(CPUARMState *env, target_ulong address,
|
||||
MMUAccessType access_type,
|
||||
ARMMMUIdx mmu_idx, bool is_secure,
|
||||
GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
|
||||
__attribute__((nonnull));
|
||||
|
||||
/**
|
||||
* get_phys_addr: get the physical address for a virtual address
|
||||
* @env: CPUARMState
|
||||
* @address: virtual address to get physical address for
|
||||
* @access_type: 0 for read, 1 for write, 2 for execute
|
||||
* @mmu_idx: MMU index indicating required translation regime
|
||||
* @result: set on translation success.
|
||||
* @fi: set to fault info if the translation fails
|
||||
*
|
||||
* Similarly, but use the security regime of @mmu_idx.
|
||||
*/
|
||||
bool get_phys_addr(CPUARMState *env, target_ulong address,
|
||||
MMUAccessType access_type, ARMMMUIdx mmu_idx,
|
||||
GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
|
||||
|
@ -79,7 +79,9 @@ bool kvm_arm_create_scratch_host_vcpu(const uint32_t *cpus_to_try,
|
||||
if (max_vm_pa_size < 0) {
|
||||
max_vm_pa_size = 0;
|
||||
}
|
||||
vmfd = ioctl(kvmfd, KVM_CREATE_VM, max_vm_pa_size);
|
||||
do {
|
||||
vmfd = ioctl(kvmfd, KVM_CREATE_VM, max_vm_pa_size);
|
||||
} while (vmfd == -1 && errno == EINTR);
|
||||
if (vmfd < 0) {
|
||||
goto err;
|
||||
}
|
||||
|
@ -223,8 +223,8 @@ static bool v7m_stack_write(ARMCPU *cpu, uint32_t addr, uint32_t value,
|
||||
}
|
||||
goto pend_fault;
|
||||
}
|
||||
address_space_stl_le(arm_addressspace(cs, res.attrs), res.phys, value,
|
||||
res.attrs, &txres);
|
||||
address_space_stl_le(arm_addressspace(cs, res.f.attrs), res.f.phys_addr,
|
||||
value, res.f.attrs, &txres);
|
||||
if (txres != MEMTX_OK) {
|
||||
/* BusFault trying to write the data */
|
||||
if (mode == STACK_LAZYFP) {
|
||||
@ -298,8 +298,8 @@ static bool v7m_stack_read(ARMCPU *cpu, uint32_t *dest, uint32_t addr,
|
||||
goto pend_fault;
|
||||
}
|
||||
|
||||
value = address_space_ldl(arm_addressspace(cs, res.attrs), res.phys,
|
||||
res.attrs, &txres);
|
||||
value = address_space_ldl(arm_addressspace(cs, res.f.attrs),
|
||||
res.f.phys_addr, res.f.attrs, &txres);
|
||||
if (txres != MEMTX_OK) {
|
||||
/* BusFault trying to read the data */
|
||||
qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.UNSTKERR\n");
|
||||
@ -1981,7 +1981,7 @@ static bool do_v7m_function_return(ARMCPU *cpu)
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx,
|
||||
static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx, bool secure,
|
||||
uint32_t addr, uint16_t *insn)
|
||||
{
|
||||
/*
|
||||
@ -2003,8 +2003,7 @@ static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx,
|
||||
ARMMMUFaultInfo fi = {};
|
||||
MemTxResult txres;
|
||||
|
||||
v8m_security_lookup(env, addr, MMU_INST_FETCH, mmu_idx,
|
||||
regime_is_secure(env, mmu_idx), &sattrs);
|
||||
v8m_security_lookup(env, addr, MMU_INST_FETCH, mmu_idx, secure, &sattrs);
|
||||
if (!sattrs.nsc || sattrs.ns) {
|
||||
/*
|
||||
* This must be the second half of the insn, and it straddles a
|
||||
@ -2023,8 +2022,8 @@ static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx,
|
||||
qemu_log_mask(CPU_LOG_INT, "...really MemManage with CFSR.IACCVIOL\n");
|
||||
return false;
|
||||
}
|
||||
*insn = address_space_lduw_le(arm_addressspace(cs, res.attrs), res.phys,
|
||||
res.attrs, &txres);
|
||||
*insn = address_space_lduw_le(arm_addressspace(cs, res.f.attrs),
|
||||
res.f.phys_addr, res.f.attrs, &txres);
|
||||
if (txres != MEMTX_OK) {
|
||||
env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK;
|
||||
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
|
||||
@ -2070,8 +2069,8 @@ static bool v7m_read_sg_stack_word(ARMCPU *cpu, ARMMMUIdx mmu_idx,
|
||||
}
|
||||
return false;
|
||||
}
|
||||
value = address_space_ldl(arm_addressspace(cs, res.attrs), res.phys,
|
||||
res.attrs, &txres);
|
||||
value = address_space_ldl(arm_addressspace(cs, res.f.attrs),
|
||||
res.f.phys_addr, res.f.attrs, &txres);
|
||||
if (txres != MEMTX_OK) {
|
||||
/* BusFault trying to read the data */
|
||||
qemu_log_mask(CPU_LOG_INT,
|
||||
@ -2109,7 +2108,7 @@ static bool v7m_handle_execute_nsc(ARMCPU *cpu)
|
||||
/* We want to do the MPU lookup as secure; work out what mmu_idx that is */
|
||||
mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true);
|
||||
|
||||
if (!v7m_read_half_insn(cpu, mmu_idx, env->regs[15], &insn)) {
|
||||
if (!v7m_read_half_insn(cpu, mmu_idx, true, env->regs[15], &insn)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -2125,7 +2124,7 @@ static bool v7m_handle_execute_nsc(ARMCPU *cpu)
|
||||
goto gen_invep;
|
||||
}
|
||||
|
||||
if (!v7m_read_half_insn(cpu, mmu_idx, env->regs[15] + 2, &insn)) {
|
||||
if (!v7m_read_half_insn(cpu, mmu_idx, true, env->regs[15] + 2, &insn)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -2818,8 +2817,8 @@ uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
|
||||
} else {
|
||||
mrvalid = true;
|
||||
}
|
||||
r = res.prot & PAGE_READ;
|
||||
rw = res.prot & PAGE_WRITE;
|
||||
r = res.f.prot & PAGE_READ;
|
||||
rw = res.f.prot & PAGE_WRITE;
|
||||
} else {
|
||||
r = false;
|
||||
rw = false;
|
||||
|
570
target/arm/ptw.c
570
target/arm/ptw.c
File diff suppressed because it is too large
Load Diff
@ -227,17 +227,16 @@ bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
|
||||
* target page size are handled specially, so for those we
|
||||
* pass in the exact addresses.
|
||||
*/
|
||||
if (res.page_size >= TARGET_PAGE_SIZE) {
|
||||
res.phys &= TARGET_PAGE_MASK;
|
||||
if (res.f.lg_page_size >= TARGET_PAGE_BITS) {
|
||||
res.f.phys_addr &= TARGET_PAGE_MASK;
|
||||
address &= TARGET_PAGE_MASK;
|
||||
}
|
||||
/* Notice and record tagged memory. */
|
||||
if (cpu_isar_feature(aa64_mte, cpu) && res.cacheattrs.attrs == 0xf0) {
|
||||
arm_tlb_mte_tagged(&res.attrs) = true;
|
||||
arm_tlb_mte_tagged(&res.f.attrs) = true;
|
||||
}
|
||||
|
||||
tlb_set_page_with_attrs(cs, address, res.phys, res.attrs,
|
||||
res.prot, mmu_idx, res.page_size);
|
||||
tlb_set_page_full(cs, mmu_idx, address, &res.f);
|
||||
return true;
|
||||
} else if (probe) {
|
||||
return false;
|
||||
|
@ -111,14 +111,6 @@ static int get_a64_user_mem_index(DisasContext *s)
|
||||
case ARMMMUIdx_E20_2_PAN:
|
||||
useridx = ARMMMUIdx_E20_0;
|
||||
break;
|
||||
case ARMMMUIdx_SE10_1:
|
||||
case ARMMMUIdx_SE10_1_PAN:
|
||||
useridx = ARMMMUIdx_SE10_0;
|
||||
break;
|
||||
case ARMMMUIdx_SE20_2:
|
||||
case ARMMMUIdx_SE20_2_PAN:
|
||||
useridx = ARMMMUIdx_SE20_0;
|
||||
break;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
|
@ -237,16 +237,12 @@ static inline int get_a32_user_mem_index(DisasContext *s)
|
||||
* otherwise, access as if at PL0.
|
||||
*/
|
||||
switch (s->mmu_idx) {
|
||||
case ARMMMUIdx_E3:
|
||||
case ARMMMUIdx_E2: /* this one is UNPREDICTABLE */
|
||||
case ARMMMUIdx_E10_0:
|
||||
case ARMMMUIdx_E10_1:
|
||||
case ARMMMUIdx_E10_1_PAN:
|
||||
return arm_to_core_mmu_idx(ARMMMUIdx_E10_0);
|
||||
case ARMMMUIdx_SE3:
|
||||
case ARMMMUIdx_SE10_0:
|
||||
case ARMMMUIdx_SE10_1:
|
||||
case ARMMMUIdx_SE10_1_PAN:
|
||||
return arm_to_core_mmu_idx(ARMMMUIdx_SE10_0);
|
||||
case ARMMMUIdx_MUser:
|
||||
case ARMMMUIdx_MPriv:
|
||||
return arm_to_core_mmu_idx(ARMMMUIdx_MUser);
|
||||
@ -9351,8 +9347,7 @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
|
||||
dc->vfp_enabled = 1;
|
||||
dc->be_data = MO_TE;
|
||||
dc->v7m_handler_mode = EX_TBFLAG_M32(tb_flags, HANDLER);
|
||||
dc->v8m_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
|
||||
regime_is_secure(env, dc->mmu_idx);
|
||||
dc->v8m_secure = EX_TBFLAG_M32(tb_flags, SECURE);
|
||||
dc->v8m_stackcheck = EX_TBFLAG_M32(tb_flags, STACKCHECK);
|
||||
dc->v8m_fpccr_s_wrong = EX_TBFLAG_M32(tb_flags, FPCCR_S_WRONG);
|
||||
dc->v7m_new_fp_ctxt_needed =
|
||||
|
Loading…
x
Reference in New Issue
Block a user