target/arm: Implement TCGCPUOps.tlb_fill_align
Fill in the tlb_fill_align hook. Handle alignment not due to memory type, since that's no longer handled by generic code. Pass memop to get_phys_addr. Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
parent
64bda5106c
commit
1ba3cb8877
@ -2663,7 +2663,7 @@ static const TCGCPUOps arm_tcg_ops = {
|
||||
.record_sigsegv = arm_cpu_record_sigsegv,
|
||||
.record_sigbus = arm_cpu_record_sigbus,
|
||||
#else
|
||||
.tlb_fill = arm_cpu_tlb_fill,
|
||||
.tlb_fill_align = arm_cpu_tlb_fill_align,
|
||||
.cpu_exec_interrupt = arm_cpu_exec_interrupt,
|
||||
.cpu_exec_halt = arm_cpu_exec_halt,
|
||||
.do_interrupt = arm_cpu_do_interrupt,
|
||||
|
@ -816,9 +816,9 @@ void arm_cpu_record_sigsegv(CPUState *cpu, vaddr addr,
|
||||
void arm_cpu_record_sigbus(CPUState *cpu, vaddr addr,
|
||||
MMUAccessType access_type, uintptr_t ra);
|
||||
#else
|
||||
bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
|
||||
MMUAccessType access_type, int mmu_idx,
|
||||
bool probe, uintptr_t retaddr);
|
||||
bool arm_cpu_tlb_fill_align(CPUState *cs, CPUTLBEntryFull *out, vaddr addr,
|
||||
MMUAccessType access_type, int mmu_idx,
|
||||
MemOp memop, int size, bool probe, uintptr_t ra);
|
||||
#endif
|
||||
|
||||
static inline int arm_to_core_mmu_idx(ARMMMUIdx mmu_idx)
|
||||
|
@ -242,7 +242,7 @@ static const TCGCPUOps arm_v7m_tcg_ops = {
|
||||
.record_sigsegv = arm_cpu_record_sigsegv,
|
||||
.record_sigbus = arm_cpu_record_sigbus,
|
||||
#else
|
||||
.tlb_fill = arm_cpu_tlb_fill,
|
||||
.tlb_fill_align = arm_cpu_tlb_fill_align,
|
||||
.cpu_exec_interrupt = arm_v7m_cpu_exec_interrupt,
|
||||
.cpu_exec_halt = arm_cpu_exec_halt,
|
||||
.do_interrupt = arm_v7m_cpu_do_interrupt,
|
||||
|
@ -318,14 +318,13 @@ void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
|
||||
arm_deliver_fault(cpu, addr, access_type, mmu_idx, &fi);
|
||||
}
|
||||
|
||||
bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
|
||||
MMUAccessType access_type, int mmu_idx,
|
||||
bool probe, uintptr_t retaddr)
|
||||
bool arm_cpu_tlb_fill_align(CPUState *cs, CPUTLBEntryFull *out, vaddr address,
|
||||
MMUAccessType access_type, int mmu_idx,
|
||||
MemOp memop, int size, bool probe, uintptr_t ra)
|
||||
{
|
||||
ARMCPU *cpu = ARM_CPU(cs);
|
||||
GetPhysAddrResult res = {};
|
||||
ARMMMUFaultInfo local_fi, *fi;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Allow S1_ptw_translate to see any fault generated here.
|
||||
@ -339,37 +338,27 @@ bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
|
||||
}
|
||||
|
||||
/*
|
||||
* Walk the page table and (if the mapping exists) add the page
|
||||
* to the TLB. On success, return true. Otherwise, if probing,
|
||||
* return false. Otherwise populate fsr with ARM DFSR/IFSR fault
|
||||
* register format, and signal the fault.
|
||||
* Per R_XCHFJ, alignment fault not due to memory type has
|
||||
* highest precedence. Otherwise, walk the page table and
|
||||
* and collect the page description.
|
||||
*/
|
||||
ret = get_phys_addr(&cpu->env, address, access_type, 0,
|
||||
core_to_arm_mmu_idx(&cpu->env, mmu_idx),
|
||||
&res, fi);
|
||||
if (likely(!ret)) {
|
||||
/*
|
||||
* Map a single [sub]page. Regions smaller than our declared
|
||||
* target page size are handled specially, so for those we
|
||||
* pass in the exact addresses.
|
||||
*/
|
||||
if (res.f.lg_page_size >= TARGET_PAGE_BITS) {
|
||||
res.f.phys_addr &= TARGET_PAGE_MASK;
|
||||
address &= TARGET_PAGE_MASK;
|
||||
}
|
||||
|
||||
if (address & ((1 << memop_alignment_bits(memop)) - 1)) {
|
||||
fi->type = ARMFault_Alignment;
|
||||
} else if (!get_phys_addr(&cpu->env, address, access_type, memop,
|
||||
core_to_arm_mmu_idx(&cpu->env, mmu_idx),
|
||||
&res, fi)) {
|
||||
res.f.extra.arm.pte_attrs = res.cacheattrs.attrs;
|
||||
res.f.extra.arm.shareability = res.cacheattrs.shareability;
|
||||
|
||||
tlb_set_page_full(cs, mmu_idx, address, &res.f);
|
||||
*out = res.f;
|
||||
return true;
|
||||
} else if (probe) {
|
||||
return false;
|
||||
} else {
|
||||
/* now we have a real cpu fault */
|
||||
cpu_restore_state(cs, retaddr);
|
||||
arm_deliver_fault(cpu, address, access_type, mmu_idx, fi);
|
||||
}
|
||||
if (probe) {
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Now we have a real cpu fault. */
|
||||
cpu_restore_state(cs, ra);
|
||||
arm_deliver_fault(cpu, address, access_type, mmu_idx, fi);
|
||||
}
|
||||
#else
|
||||
void arm_cpu_record_sigsegv(CPUState *cs, vaddr addr,
|
||||
|
Loading…
Reference in New Issue
Block a user