target/arm: Recover 4 bits from TBFLAGs
We had completely run out of TBFLAG bits. Split A- and M-profile bits into two overlapping buckets. This results in 4 free bits. We used to initialize all of the a32 and m32 fields in DisasContext by assignment, in arm_tr_init_disas_context. Now we only initialize either the a32 or m32 by assignment, because the bits overlap in tbflags. So zero the entire structure in gen_intermediate_code. Tested-by: Alex Bennée <alex.bennee@linaro.org> Reviewed-by: Alex Bennée <alex.bennee@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org> Message-id: 20200206105448.4726-16-richard.henderson@linaro.org Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
parent
e013b74113
commit
79cabf1f47
@ -3214,6 +3214,16 @@ typedef ARMCPU ArchCPU;
|
|||||||
* We put flags which are shared between 32 and 64 bit mode at the top
|
* We put flags which are shared between 32 and 64 bit mode at the top
|
||||||
* of the word, and flags which apply to only one mode at the bottom.
|
* of the word, and flags which apply to only one mode at the bottom.
|
||||||
*
|
*
|
||||||
|
* 31 21 18 14 9 0
|
||||||
|
* +--------------+-----+-----+----------+--------------+
|
||||||
|
* | | | TBFLAG_A32 | |
|
||||||
|
* | | +-----+----------+ TBFLAG_AM32 |
|
||||||
|
* | TBFLAG_ANY | |TBFLAG_M32| |
|
||||||
|
* | | +-------------------------|
|
||||||
|
* | | | TBFLAG_A64 |
|
||||||
|
* +--------------+-----------+-------------------------+
|
||||||
|
* 31 21 14 0
|
||||||
|
*
|
||||||
* Unless otherwise noted, these bits are cached in env->hflags.
|
* Unless otherwise noted, these bits are cached in env->hflags.
|
||||||
*/
|
*/
|
||||||
FIELD(TBFLAG_ANY, AARCH64_STATE, 31, 1)
|
FIELD(TBFLAG_ANY, AARCH64_STATE, 31, 1)
|
||||||
@ -3223,46 +3233,54 @@ FIELD(TBFLAG_ANY, PSTATE_SS, 26, 1) /* Not cached. */
|
|||||||
/* Target EL if we take a floating-point-disabled exception */
|
/* Target EL if we take a floating-point-disabled exception */
|
||||||
FIELD(TBFLAG_ANY, FPEXC_EL, 24, 2)
|
FIELD(TBFLAG_ANY, FPEXC_EL, 24, 2)
|
||||||
FIELD(TBFLAG_ANY, BE_DATA, 23, 1)
|
FIELD(TBFLAG_ANY, BE_DATA, 23, 1)
|
||||||
/*
|
/* For A-profile only, target EL for debug exceptions. */
|
||||||
* For A-profile only, target EL for debug exceptions.
|
|
||||||
* Note that this overlaps with the M-profile-only HANDLER and STACKCHECK bits.
|
|
||||||
*/
|
|
||||||
FIELD(TBFLAG_ANY, DEBUG_TARGET_EL, 21, 2)
|
FIELD(TBFLAG_ANY, DEBUG_TARGET_EL, 21, 2)
|
||||||
|
|
||||||
/* Bit usage when in AArch32 state: */
|
/*
|
||||||
FIELD(TBFLAG_A32, THUMB, 0, 1) /* Not cached. */
|
* Bit usage when in AArch32 state, both A- and M-profile.
|
||||||
FIELD(TBFLAG_A32, VECLEN, 1, 3) /* Not cached. */
|
*/
|
||||||
FIELD(TBFLAG_A32, VECSTRIDE, 4, 2) /* Not cached. */
|
FIELD(TBFLAG_AM32, CONDEXEC, 0, 8) /* Not cached. */
|
||||||
|
FIELD(TBFLAG_AM32, THUMB, 8, 1) /* Not cached. */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Bit usage when in AArch32 state, for A-profile only.
|
||||||
|
*/
|
||||||
|
FIELD(TBFLAG_A32, VECLEN, 9, 3) /* Not cached. */
|
||||||
|
FIELD(TBFLAG_A32, VECSTRIDE, 12, 2) /* Not cached. */
|
||||||
/*
|
/*
|
||||||
* We store the bottom two bits of the CPAR as TB flags and handle
|
* We store the bottom two bits of the CPAR as TB flags and handle
|
||||||
* checks on the other bits at runtime. This shares the same bits as
|
* checks on the other bits at runtime. This shares the same bits as
|
||||||
* VECSTRIDE, which is OK as no XScale CPU has VFP.
|
* VECSTRIDE, which is OK as no XScale CPU has VFP.
|
||||||
* Not cached, because VECLEN+VECSTRIDE are not cached.
|
* Not cached, because VECLEN+VECSTRIDE are not cached.
|
||||||
*/
|
*/
|
||||||
FIELD(TBFLAG_A32, XSCALE_CPAR, 4, 2)
|
FIELD(TBFLAG_A32, XSCALE_CPAR, 12, 2)
|
||||||
|
FIELD(TBFLAG_A32, VFPEN, 14, 1) /* Partially cached, minus FPEXC. */
|
||||||
|
FIELD(TBFLAG_A32, SCTLR_B, 15, 1)
|
||||||
|
FIELD(TBFLAG_A32, HSTR_ACTIVE, 16, 1)
|
||||||
/*
|
/*
|
||||||
* Indicates whether cp register reads and writes by guest code should access
|
* Indicates whether cp register reads and writes by guest code should access
|
||||||
* the secure or nonsecure bank of banked registers; note that this is not
|
* the secure or nonsecure bank of banked registers; note that this is not
|
||||||
* the same thing as the current security state of the processor!
|
* the same thing as the current security state of the processor!
|
||||||
*/
|
*/
|
||||||
FIELD(TBFLAG_A32, NS, 6, 1)
|
FIELD(TBFLAG_A32, NS, 17, 1)
|
||||||
FIELD(TBFLAG_A32, VFPEN, 7, 1) /* Partially cached, minus FPEXC. */
|
|
||||||
FIELD(TBFLAG_A32, CONDEXEC, 8, 8) /* Not cached. */
|
|
||||||
FIELD(TBFLAG_A32, SCTLR_B, 16, 1)
|
|
||||||
FIELD(TBFLAG_A32, HSTR_ACTIVE, 17, 1)
|
|
||||||
|
|
||||||
/* For M profile only, set if FPCCR.LSPACT is set */
|
/*
|
||||||
FIELD(TBFLAG_A32, LSPACT, 18, 1) /* Not cached. */
|
* Bit usage when in AArch32 state, for M-profile only.
|
||||||
/* For M profile only, set if we must create a new FP context */
|
*/
|
||||||
FIELD(TBFLAG_A32, NEW_FP_CTXT_NEEDED, 19, 1) /* Not cached. */
|
/* Handler (ie not Thread) mode */
|
||||||
/* For M profile only, set if FPCCR.S does not match current security state */
|
FIELD(TBFLAG_M32, HANDLER, 9, 1)
|
||||||
FIELD(TBFLAG_A32, FPCCR_S_WRONG, 20, 1) /* Not cached. */
|
/* Whether we should generate stack-limit checks */
|
||||||
/* For M profile only, Handler (ie not Thread) mode */
|
FIELD(TBFLAG_M32, STACKCHECK, 10, 1)
|
||||||
FIELD(TBFLAG_A32, HANDLER, 21, 1)
|
/* Set if FPCCR.LSPACT is set */
|
||||||
/* For M profile only, whether we should generate stack-limit checks */
|
FIELD(TBFLAG_M32, LSPACT, 11, 1) /* Not cached. */
|
||||||
FIELD(TBFLAG_A32, STACKCHECK, 22, 1)
|
/* Set if we must create a new FP context */
|
||||||
|
FIELD(TBFLAG_M32, NEW_FP_CTXT_NEEDED, 12, 1) /* Not cached. */
|
||||||
|
/* Set if FPCCR.S does not match current security state */
|
||||||
|
FIELD(TBFLAG_M32, FPCCR_S_WRONG, 13, 1) /* Not cached. */
|
||||||
|
|
||||||
/* Bit usage when in AArch64 state */
|
/*
|
||||||
|
* Bit usage when in AArch64 state
|
||||||
|
*/
|
||||||
FIELD(TBFLAG_A64, TBII, 0, 2)
|
FIELD(TBFLAG_A64, TBII, 0, 2)
|
||||||
FIELD(TBFLAG_A64, SVEEXC_EL, 2, 2)
|
FIELD(TBFLAG_A64, SVEEXC_EL, 2, 2)
|
||||||
FIELD(TBFLAG_A64, ZCR_LEN, 4, 4)
|
FIELD(TBFLAG_A64, ZCR_LEN, 4, 4)
|
||||||
|
@ -11353,11 +11353,8 @@ static uint32_t rebuild_hflags_m32(CPUARMState *env, int fp_el,
|
|||||||
{
|
{
|
||||||
uint32_t flags = 0;
|
uint32_t flags = 0;
|
||||||
|
|
||||||
/* v8M always enables the fpu. */
|
|
||||||
flags = FIELD_DP32(flags, TBFLAG_A32, VFPEN, 1);
|
|
||||||
|
|
||||||
if (arm_v7m_is_handler_mode(env)) {
|
if (arm_v7m_is_handler_mode(env)) {
|
||||||
flags = FIELD_DP32(flags, TBFLAG_A32, HANDLER, 1);
|
flags = FIELD_DP32(flags, TBFLAG_M32, HANDLER, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -11368,7 +11365,7 @@ static uint32_t rebuild_hflags_m32(CPUARMState *env, int fp_el,
|
|||||||
if (arm_feature(env, ARM_FEATURE_V8) &&
|
if (arm_feature(env, ARM_FEATURE_V8) &&
|
||||||
!((mmu_idx & ARM_MMU_IDX_M_NEGPRI) &&
|
!((mmu_idx & ARM_MMU_IDX_M_NEGPRI) &&
|
||||||
(env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_STKOFHFNMIGN_MASK))) {
|
(env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_STKOFHFNMIGN_MASK))) {
|
||||||
flags = FIELD_DP32(flags, TBFLAG_A32, STACKCHECK, 1);
|
flags = FIELD_DP32(flags, TBFLAG_M32, STACKCHECK, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
|
return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
|
||||||
@ -11561,7 +11558,7 @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
|
|||||||
if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
|
if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
|
||||||
FIELD_EX32(env->v7m.fpccr[M_REG_S], V7M_FPCCR, S)
|
FIELD_EX32(env->v7m.fpccr[M_REG_S], V7M_FPCCR, S)
|
||||||
!= env->v7m.secure) {
|
!= env->v7m.secure) {
|
||||||
flags = FIELD_DP32(flags, TBFLAG_A32, FPCCR_S_WRONG, 1);
|
flags = FIELD_DP32(flags, TBFLAG_M32, FPCCR_S_WRONG, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((env->v7m.fpccr[env->v7m.secure] & R_V7M_FPCCR_ASPEN_MASK) &&
|
if ((env->v7m.fpccr[env->v7m.secure] & R_V7M_FPCCR_ASPEN_MASK) &&
|
||||||
@ -11573,12 +11570,12 @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
|
|||||||
* active FP context; we must create a new FP context before
|
* active FP context; we must create a new FP context before
|
||||||
* executing any FP insn.
|
* executing any FP insn.
|
||||||
*/
|
*/
|
||||||
flags = FIELD_DP32(flags, TBFLAG_A32, NEW_FP_CTXT_NEEDED, 1);
|
flags = FIELD_DP32(flags, TBFLAG_M32, NEW_FP_CTXT_NEEDED, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
|
bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
|
||||||
if (env->v7m.fpccr[is_secure] & R_V7M_FPCCR_LSPACT_MASK) {
|
if (env->v7m.fpccr[is_secure] & R_V7M_FPCCR_LSPACT_MASK) {
|
||||||
flags = FIELD_DP32(flags, TBFLAG_A32, LSPACT, 1);
|
flags = FIELD_DP32(flags, TBFLAG_M32, LSPACT, 1);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
@ -11599,8 +11596,8 @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
flags = FIELD_DP32(flags, TBFLAG_A32, THUMB, env->thumb);
|
flags = FIELD_DP32(flags, TBFLAG_AM32, THUMB, env->thumb);
|
||||||
flags = FIELD_DP32(flags, TBFLAG_A32, CONDEXEC, env->condexec_bits);
|
flags = FIELD_DP32(flags, TBFLAG_AM32, CONDEXEC, env->condexec_bits);
|
||||||
pstate_for_ss = env->uncached_cpsr;
|
pstate_for_ss = env->uncached_cpsr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -10848,38 +10848,48 @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
|
|||||||
*/
|
*/
|
||||||
dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
|
dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
|
||||||
!arm_el_is_aa64(env, 3);
|
!arm_el_is_aa64(env, 3);
|
||||||
dc->thumb = FIELD_EX32(tb_flags, TBFLAG_A32, THUMB);
|
dc->thumb = FIELD_EX32(tb_flags, TBFLAG_AM32, THUMB);
|
||||||
dc->sctlr_b = FIELD_EX32(tb_flags, TBFLAG_A32, SCTLR_B);
|
|
||||||
dc->hstr_active = FIELD_EX32(tb_flags, TBFLAG_A32, HSTR_ACTIVE);
|
|
||||||
dc->be_data = FIELD_EX32(tb_flags, TBFLAG_ANY, BE_DATA) ? MO_BE : MO_LE;
|
dc->be_data = FIELD_EX32(tb_flags, TBFLAG_ANY, BE_DATA) ? MO_BE : MO_LE;
|
||||||
condexec = FIELD_EX32(tb_flags, TBFLAG_A32, CONDEXEC);
|
condexec = FIELD_EX32(tb_flags, TBFLAG_AM32, CONDEXEC);
|
||||||
dc->condexec_mask = (condexec & 0xf) << 1;
|
dc->condexec_mask = (condexec & 0xf) << 1;
|
||||||
dc->condexec_cond = condexec >> 4;
|
dc->condexec_cond = condexec >> 4;
|
||||||
|
|
||||||
core_mmu_idx = FIELD_EX32(tb_flags, TBFLAG_ANY, MMUIDX);
|
core_mmu_idx = FIELD_EX32(tb_flags, TBFLAG_ANY, MMUIDX);
|
||||||
dc->mmu_idx = core_to_arm_mmu_idx(env, core_mmu_idx);
|
dc->mmu_idx = core_to_arm_mmu_idx(env, core_mmu_idx);
|
||||||
dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
|
dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
|
||||||
#if !defined(CONFIG_USER_ONLY)
|
#if !defined(CONFIG_USER_ONLY)
|
||||||
dc->user = (dc->current_el == 0);
|
dc->user = (dc->current_el == 0);
|
||||||
#endif
|
#endif
|
||||||
dc->ns = FIELD_EX32(tb_flags, TBFLAG_A32, NS);
|
|
||||||
dc->fp_excp_el = FIELD_EX32(tb_flags, TBFLAG_ANY, FPEXC_EL);
|
dc->fp_excp_el = FIELD_EX32(tb_flags, TBFLAG_ANY, FPEXC_EL);
|
||||||
dc->vfp_enabled = FIELD_EX32(tb_flags, TBFLAG_A32, VFPEN);
|
|
||||||
dc->vec_len = FIELD_EX32(tb_flags, TBFLAG_A32, VECLEN);
|
if (arm_feature(env, ARM_FEATURE_M)) {
|
||||||
if (arm_feature(env, ARM_FEATURE_XSCALE)) {
|
dc->vfp_enabled = 1;
|
||||||
dc->c15_cpar = FIELD_EX32(tb_flags, TBFLAG_A32, XSCALE_CPAR);
|
dc->be_data = MO_TE;
|
||||||
dc->vec_stride = 0;
|
dc->v7m_handler_mode = FIELD_EX32(tb_flags, TBFLAG_M32, HANDLER);
|
||||||
|
dc->v8m_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
|
||||||
|
regime_is_secure(env, dc->mmu_idx);
|
||||||
|
dc->v8m_stackcheck = FIELD_EX32(tb_flags, TBFLAG_M32, STACKCHECK);
|
||||||
|
dc->v8m_fpccr_s_wrong =
|
||||||
|
FIELD_EX32(tb_flags, TBFLAG_M32, FPCCR_S_WRONG);
|
||||||
|
dc->v7m_new_fp_ctxt_needed =
|
||||||
|
FIELD_EX32(tb_flags, TBFLAG_M32, NEW_FP_CTXT_NEEDED);
|
||||||
|
dc->v7m_lspact = FIELD_EX32(tb_flags, TBFLAG_M32, LSPACT);
|
||||||
} else {
|
} else {
|
||||||
dc->vec_stride = FIELD_EX32(tb_flags, TBFLAG_A32, VECSTRIDE);
|
dc->be_data =
|
||||||
dc->c15_cpar = 0;
|
FIELD_EX32(tb_flags, TBFLAG_ANY, BE_DATA) ? MO_BE : MO_LE;
|
||||||
|
dc->debug_target_el =
|
||||||
|
FIELD_EX32(tb_flags, TBFLAG_ANY, DEBUG_TARGET_EL);
|
||||||
|
dc->sctlr_b = FIELD_EX32(tb_flags, TBFLAG_A32, SCTLR_B);
|
||||||
|
dc->hstr_active = FIELD_EX32(tb_flags, TBFLAG_A32, HSTR_ACTIVE);
|
||||||
|
dc->ns = FIELD_EX32(tb_flags, TBFLAG_A32, NS);
|
||||||
|
dc->vfp_enabled = FIELD_EX32(tb_flags, TBFLAG_A32, VFPEN);
|
||||||
|
if (arm_feature(env, ARM_FEATURE_XSCALE)) {
|
||||||
|
dc->c15_cpar = FIELD_EX32(tb_flags, TBFLAG_A32, XSCALE_CPAR);
|
||||||
|
} else {
|
||||||
|
dc->vec_len = FIELD_EX32(tb_flags, TBFLAG_A32, VECLEN);
|
||||||
|
dc->vec_stride = FIELD_EX32(tb_flags, TBFLAG_A32, VECSTRIDE);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
dc->v7m_handler_mode = FIELD_EX32(tb_flags, TBFLAG_A32, HANDLER);
|
|
||||||
dc->v8m_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
|
|
||||||
regime_is_secure(env, dc->mmu_idx);
|
|
||||||
dc->v8m_stackcheck = FIELD_EX32(tb_flags, TBFLAG_A32, STACKCHECK);
|
|
||||||
dc->v8m_fpccr_s_wrong = FIELD_EX32(tb_flags, TBFLAG_A32, FPCCR_S_WRONG);
|
|
||||||
dc->v7m_new_fp_ctxt_needed =
|
|
||||||
FIELD_EX32(tb_flags, TBFLAG_A32, NEW_FP_CTXT_NEEDED);
|
|
||||||
dc->v7m_lspact = FIELD_EX32(tb_flags, TBFLAG_A32, LSPACT);
|
|
||||||
dc->cp_regs = cpu->cp_regs;
|
dc->cp_regs = cpu->cp_regs;
|
||||||
dc->features = env->features;
|
dc->features = env->features;
|
||||||
|
|
||||||
@ -10901,9 +10911,6 @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
|
|||||||
dc->ss_active = FIELD_EX32(tb_flags, TBFLAG_ANY, SS_ACTIVE);
|
dc->ss_active = FIELD_EX32(tb_flags, TBFLAG_ANY, SS_ACTIVE);
|
||||||
dc->pstate_ss = FIELD_EX32(tb_flags, TBFLAG_ANY, PSTATE_SS);
|
dc->pstate_ss = FIELD_EX32(tb_flags, TBFLAG_ANY, PSTATE_SS);
|
||||||
dc->is_ldex = false;
|
dc->is_ldex = false;
|
||||||
if (!arm_feature(env, ARM_FEATURE_M)) {
|
|
||||||
dc->debug_target_el = FIELD_EX32(tb_flags, TBFLAG_ANY, DEBUG_TARGET_EL);
|
|
||||||
}
|
|
||||||
|
|
||||||
dc->page_start = dc->base.pc_first & TARGET_PAGE_MASK;
|
dc->page_start = dc->base.pc_first & TARGET_PAGE_MASK;
|
||||||
|
|
||||||
@ -11340,10 +11347,10 @@ static const TranslatorOps thumb_translator_ops = {
|
|||||||
/* generate intermediate code for basic block 'tb'. */
|
/* generate intermediate code for basic block 'tb'. */
|
||||||
void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
|
void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
|
||||||
{
|
{
|
||||||
DisasContext dc;
|
DisasContext dc = { };
|
||||||
const TranslatorOps *ops = &arm_translator_ops;
|
const TranslatorOps *ops = &arm_translator_ops;
|
||||||
|
|
||||||
if (FIELD_EX32(tb->flags, TBFLAG_A32, THUMB)) {
|
if (FIELD_EX32(tb->flags, TBFLAG_AM32, THUMB)) {
|
||||||
ops = &thumb_translator_ops;
|
ops = &thumb_translator_ops;
|
||||||
}
|
}
|
||||||
#ifdef TARGET_AARCH64
|
#ifdef TARGET_AARCH64
|
||||||
|
Loading…
Reference in New Issue
Block a user