target/arm: Implement helper_mte_check1
Fill out the stub that was added earlier. Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org> Message-id: 20200626033144.790098-26-richard.henderson@linaro.org Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
parent
73ceeb0011
commit
2e34ff45f3
@ -1318,6 +1318,10 @@ FIELD(MTEDESC, WRITE, 8, 1)
|
||||
FIELD(MTEDESC, ESIZE, 9, 5)
|
||||
FIELD(MTEDESC, TSIZE, 14, 10) /* mte_checkN only */
|
||||
|
||||
bool mte_probe1(CPUARMState *env, uint32_t desc, uint64_t ptr);
|
||||
uint64_t mte_check1(CPUARMState *env, uint32_t desc,
|
||||
uint64_t ptr, uintptr_t ra);
|
||||
|
||||
static inline int allocation_tag_from_addr(uint64_t ptr)
|
||||
{
|
||||
return extract64(ptr, 56, 4);
|
||||
@ -1328,4 +1332,48 @@ static inline uint64_t address_with_allocation_tag(uint64_t ptr, int rtag)
|
||||
return deposit64(ptr, 56, 4, rtag);
|
||||
}
|
||||
|
||||
/* Return true if tbi bits mean that the access is checked. */
|
||||
static inline bool tbi_check(uint32_t desc, int bit55)
|
||||
{
|
||||
return (desc >> (R_MTEDESC_TBI_SHIFT + bit55)) & 1;
|
||||
}
|
||||
|
||||
/* Return true if tcma bits mean that the access is unchecked. */
|
||||
static inline bool tcma_check(uint32_t desc, int bit55, int ptr_tag)
|
||||
{
|
||||
/*
|
||||
* We had extracted bit55 and ptr_tag for other reasons, so fold
|
||||
* (ptr<59:55> == 00000 || ptr<59:55> == 11111) into a single test.
|
||||
*/
|
||||
bool match = ((ptr_tag + bit55) & 0xf) == 0;
|
||||
bool tcma = (desc >> (R_MTEDESC_TCMA_SHIFT + bit55)) & 1;
|
||||
return tcma && match;
|
||||
}
|
||||
|
||||
/*
|
||||
* For TBI, ideally, we would do nothing. Proper behaviour on fault is
|
||||
* for the tag to be present in the FAR_ELx register. But for user-only
|
||||
* mode, we do not have a TLB with which to implement this, so we must
|
||||
* remove the top byte.
|
||||
*/
|
||||
static inline uint64_t useronly_clean_ptr(uint64_t ptr)
|
||||
{
|
||||
/* TBI is known to be enabled. */
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
ptr = sextract64(ptr, 0, 56);
|
||||
#endif
|
||||
return ptr;
|
||||
}
|
||||
|
||||
static inline uint64_t useronly_maybe_clean_ptr(uint32_t desc, uint64_t ptr)
|
||||
{
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
int64_t clean_ptr = sextract64(ptr, 0, 56);
|
||||
if (tbi_check(desc, clean_ptr < 0)) {
|
||||
ptr = clean_ptr;
|
||||
}
|
||||
#endif
|
||||
return ptr;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -359,12 +359,142 @@ void HELPER(stzgm_tags)(CPUARMState *env, uint64_t ptr, uint64_t val)
|
||||
}
|
||||
}
|
||||
|
||||
/* Record a tag check failure. */
|
||||
static void mte_check_fail(CPUARMState *env, int mmu_idx,
|
||||
uint64_t dirty_ptr, uintptr_t ra)
|
||||
{
|
||||
ARMMMUIdx arm_mmu_idx = core_to_aa64_mmu_idx(mmu_idx);
|
||||
int el, reg_el, tcf, select;
|
||||
uint64_t sctlr;
|
||||
|
||||
reg_el = regime_el(env, arm_mmu_idx);
|
||||
sctlr = env->cp15.sctlr_el[reg_el];
|
||||
|
||||
switch (arm_mmu_idx) {
|
||||
case ARMMMUIdx_E10_0:
|
||||
case ARMMMUIdx_E20_0:
|
||||
el = 0;
|
||||
tcf = extract64(sctlr, 38, 2);
|
||||
break;
|
||||
default:
|
||||
el = reg_el;
|
||||
tcf = extract64(sctlr, 40, 2);
|
||||
}
|
||||
|
||||
switch (tcf) {
|
||||
case 1:
|
||||
/*
|
||||
* Tag check fail causes a synchronous exception.
|
||||
*
|
||||
* In restore_state_to_opc, we set the exception syndrome
|
||||
* for the load or store operation. Unwind first so we
|
||||
* may overwrite that with the syndrome for the tag check.
|
||||
*/
|
||||
cpu_restore_state(env_cpu(env), ra, true);
|
||||
env->exception.vaddress = dirty_ptr;
|
||||
raise_exception(env, EXCP_DATA_ABORT,
|
||||
syn_data_abort_no_iss(el != 0, 0, 0, 0, 0, 0, 0x11),
|
||||
exception_target_el(env));
|
||||
/* noreturn, but fall through to the assert anyway */
|
||||
|
||||
case 0:
|
||||
/*
|
||||
* Tag check fail does not affect the PE.
|
||||
* We eliminate this case by not setting MTE_ACTIVE
|
||||
* in tb_flags, so that we never make this runtime call.
|
||||
*/
|
||||
g_assert_not_reached();
|
||||
|
||||
case 2:
|
||||
/* Tag check fail causes asynchronous flag set. */
|
||||
mmu_idx = arm_mmu_idx_el(env, el);
|
||||
if (regime_has_2_ranges(mmu_idx)) {
|
||||
select = extract64(dirty_ptr, 55, 1);
|
||||
} else {
|
||||
select = 0;
|
||||
}
|
||||
env->cp15.tfsr_el[el] |= 1 << select;
|
||||
break;
|
||||
|
||||
default:
|
||||
/* Case 3: Reserved. */
|
||||
qemu_log_mask(LOG_GUEST_ERROR,
|
||||
"Tag check failure with SCTLR_EL%d.TCF%s "
|
||||
"set to reserved value %d\n",
|
||||
reg_el, el ? "" : "0", tcf);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Perform an MTE checked access for a single logical or atomic access.
|
||||
*/
|
||||
static bool mte_probe1_int(CPUARMState *env, uint32_t desc, uint64_t ptr,
|
||||
uintptr_t ra, int bit55)
|
||||
{
|
||||
int mem_tag, mmu_idx, ptr_tag, size;
|
||||
MMUAccessType type;
|
||||
uint8_t *mem;
|
||||
|
||||
ptr_tag = allocation_tag_from_addr(ptr);
|
||||
|
||||
if (tcma_check(desc, bit55, ptr_tag)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
|
||||
type = FIELD_EX32(desc, MTEDESC, WRITE) ? MMU_DATA_STORE : MMU_DATA_LOAD;
|
||||
size = FIELD_EX32(desc, MTEDESC, ESIZE);
|
||||
|
||||
mem = allocation_tag_mem(env, mmu_idx, ptr, type, size,
|
||||
MMU_DATA_LOAD, 1, ra);
|
||||
if (!mem) {
|
||||
return true;
|
||||
}
|
||||
|
||||
mem_tag = load_tag1(ptr, mem);
|
||||
return ptr_tag == mem_tag;
|
||||
}
|
||||
|
||||
/*
|
||||
* No-fault version of mte_check1, to be used by SVE for MemSingleNF.
|
||||
* Returns false if the access is Checked and the check failed. This
|
||||
* is only intended to probe the tag -- the validity of the page must
|
||||
* be checked beforehand.
|
||||
*/
|
||||
bool mte_probe1(CPUARMState *env, uint32_t desc, uint64_t ptr)
|
||||
{
|
||||
int bit55 = extract64(ptr, 55, 1);
|
||||
|
||||
/* If TBI is disabled, the access is unchecked. */
|
||||
if (unlikely(!tbi_check(desc, bit55))) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return mte_probe1_int(env, desc, ptr, 0, bit55);
|
||||
}
|
||||
|
||||
uint64_t mte_check1(CPUARMState *env, uint32_t desc,
|
||||
uint64_t ptr, uintptr_t ra)
|
||||
{
|
||||
int bit55 = extract64(ptr, 55, 1);
|
||||
|
||||
/* If TBI is disabled, the access is unchecked, and ptr is not dirty. */
|
||||
if (unlikely(!tbi_check(desc, bit55))) {
|
||||
return ptr;
|
||||
}
|
||||
|
||||
if (unlikely(!mte_probe1_int(env, desc, ptr, ra, bit55))) {
|
||||
int mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
|
||||
mte_check_fail(env, mmu_idx, ptr, ra);
|
||||
}
|
||||
|
||||
return useronly_clean_ptr(ptr);
|
||||
}
|
||||
|
||||
uint64_t HELPER(mte_check1)(CPUARMState *env, uint32_t desc, uint64_t ptr)
|
||||
{
|
||||
return ptr;
|
||||
return mte_check1(env, desc, ptr, GETPC());
|
||||
}
|
||||
|
||||
/*
|
||||
|
Loading…
Reference in New Issue
Block a user