target-arm:
* more EL2/EL3 preparation work * don't handle c15_cpar changes via tb_flush() * fix some unused function warnings in ARM devices * build the GDB XML for 32 bit CPUs into qemu-*-aarch64 * implement guest breakpoint support -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQIcBAABCAAGBQJUKaQhAAoJEDwlJe0UNgzeDcQQAKwLgp/Z3G+uZ8r3eh/J1iJ7 RRfkiM9WqM92sZ6VvS6bJ48Q8ESiSF1csk2PKW1tuDx2pbpAG6MDJvFs//YLZWx2 gsSGZJ8grlvEZ/Tv10iJ5ttU1vOWicbw5FgvIrdel/94wr/gChvu64p2DIrXDslD GPbr5wVNb/XXa3HuUNX66L2soxpf+YJc2fGunycp0aRRbBL8G+4dnPAt89iVFN2R PGo/VL2qSGmyTpUk7JSQK12YBE/4BFNu+8KUgiUqVS1rIan8NkVU97VVKRlcd6e9 wl5s0Bb1sAow6hZMoNpnXPnDD8LF3w1SCXPM3mLUq4/iO8t2uuITq89UCNsjFrYy QRkSq6PC0MLry+31Bfw9loDbEHMxs2fMwCQCJEdtVfZgHY/bRXAPLU/0luwvnPRU gofqBtypakRzJpaDjA/y0KapaMtJr0KOrlcbntNztPFOf4X9QtGZaQ2fzrAfxtSR 7VbHdXKAwmWN2Q4r4GE0ixA0eCZWS9ji87LHv380pPkuOaYnoRq+A1KQlypkRnNr ifR/Iz6QXB1GPFpbUiyh9WIltIIS4mwsOi/Y0rS7dGc40okItceb+ezulCNDPKNZ IX+XDR5YFEKxWzoklMgLLZPFPjxSRKMNG6Grpowl+PH+fFsBz4hObkFMn2zajn8g zp0lcq++iyjIE/GNkBXw =LA6N -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/pmaydell/tags/pull-target-arm-20140929' into staging target-arm: * more EL2/EL3 preparation work * don't handle c15_cpar changes via tb_flush() * fix some unused function warnings in ARM devices * build the GDB XML for 32 bit CPUs into qemu-*-aarch64 * implement guest breakpoint support # gpg: Signature made Mon 29 Sep 2014 19:25:37 BST using RSA key ID 14360CDE # gpg: Good signature from "Peter Maydell <peter.maydell@linaro.org>" * remotes/pmaydell/tags/pull-target-arm-20140929: target-arm: Add support for VIRQ and VFIQ target-arm: Add IRQ and FIQ routing to EL2 and 3 target-arm: A64: Emulate the SMC insn target-arm: Add a Hypervisor Trap exception type target-arm: A64: Emulate the HVC insn target-arm: A64: Correct updates to FAR and ESR on exceptions target-arm: Don't take interrupts targeting lower ELs target-arm: Break out exception masking to a separate func target-arm: A64: Refactor aarch64_cpu_do_interrupt target-arm: Add SCR_EL3 target-arm: Add HCR_EL2 target-arm: Don't handle c15_cpar changes via tb_flush() hw/input/tsc210x.c: Delete unused array tsc2101_rates hw/display/pxa2xx_lcd.c: Remove unused function pxa2xx_dma_rdst_set hw/intc/imx_avic.c: Remove unused function imx_avic_set_prio() hw/display/blizzard.c: Delete unused function blizzard_rgb2yuv configure: Build GDB XML for 32 bit ARM CPUs into qemu aarch64 binaries target-arm: Implement handling of breakpoint firing target-arm: Implement setting guest breakpoints Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
29429c7244
2
configure
vendored
2
configure
vendored
@ -5028,7 +5028,7 @@ case "$target_name" in
|
||||
aarch64)
|
||||
TARGET_BASE_ARCH=arm
|
||||
bflt="yes"
|
||||
gdb_xml_files="aarch64-core.xml aarch64-fpu.xml"
|
||||
gdb_xml_files="aarch64-core.xml aarch64-fpu.xml arm-core.xml arm-vfp.xml arm-vfp3.xml arm-neon.xml"
|
||||
;;
|
||||
cris)
|
||||
;;
|
||||
|
@ -134,14 +134,6 @@ static const int blizzard_iformat_bpp[0x10] = {
|
||||
0, 0, 0, 0, 0, 0,
|
||||
};
|
||||
|
||||
static inline void blizzard_rgb2yuv(int r, int g, int b,
|
||||
int *y, int *u, int *v)
|
||||
{
|
||||
*y = 0x10 + ((0x838 * r + 0x1022 * g + 0x322 * b) >> 13);
|
||||
*u = 0x80 + ((0xe0e * b - 0x04c1 * r - 0x94e * g) >> 13);
|
||||
*v = 0x80 + ((0xe0e * r - 0x0bc7 * g - 0x247 * b) >> 13);
|
||||
}
|
||||
|
||||
static void blizzard_window(BlizzardState *s)
|
||||
{
|
||||
DisplaySurface *surface = qemu_console_surface(s->con);
|
||||
|
@ -279,14 +279,6 @@ static inline void pxa2xx_dma_ber_set(PXA2xxLCDState *s, int ch)
|
||||
s->liidr = s->dma_ch[ch].id;
|
||||
}
|
||||
|
||||
/* Set Read Status interrupt high and poke associated registers */
|
||||
static inline void pxa2xx_dma_rdst_set(PXA2xxLCDState *s)
|
||||
{
|
||||
s->status[0] |= LCSR0_RDST;
|
||||
if (s->irqlevel && !(s->control[0] & LCCR0_RDSTM))
|
||||
s->status[0] |= LCSR0_SINT;
|
||||
}
|
||||
|
||||
/* Load new Frame Descriptors from DMA */
|
||||
static void pxa2xx_descriptor_load(PXA2xxLCDState *s)
|
||||
{
|
||||
|
@ -215,36 +215,6 @@ typedef struct {
|
||||
int fsref;
|
||||
} TSC210xRateInfo;
|
||||
|
||||
/* { rate, dsor, fsref } */
|
||||
static const TSC210xRateInfo tsc2101_rates[] = {
|
||||
/* Fsref / 6.0 */
|
||||
{ 7350, 7, 1 },
|
||||
{ 8000, 7, 0 },
|
||||
/* Fsref / 5.5 */
|
||||
{ 8018, 6, 1 },
|
||||
{ 8727, 6, 0 },
|
||||
/* Fsref / 5.0 */
|
||||
{ 8820, 5, 1 },
|
||||
{ 9600, 5, 0 },
|
||||
/* Fsref / 4.0 */
|
||||
{ 11025, 4, 1 },
|
||||
{ 12000, 4, 0 },
|
||||
/* Fsref / 3.0 */
|
||||
{ 14700, 3, 1 },
|
||||
{ 16000, 3, 0 },
|
||||
/* Fsref / 2.0 */
|
||||
{ 22050, 2, 1 },
|
||||
{ 24000, 2, 0 },
|
||||
/* Fsref / 1.5 */
|
||||
{ 29400, 1, 1 },
|
||||
{ 32000, 1, 0 },
|
||||
/* Fsref */
|
||||
{ 44100, 0, 1 },
|
||||
{ 48000, 0, 0 },
|
||||
|
||||
{ 0, 0, 0 },
|
||||
};
|
||||
|
||||
/* { rate, dsor, fsref } */
|
||||
static const TSC210xRateInfo tsc2102_rates[] = {
|
||||
/* Fsref / 6.0 */
|
||||
|
@ -97,15 +97,6 @@ static inline int imx_avic_prio(IMXAVICState *s, int irq)
|
||||
return 0xf & (s->prio[word] >> part);
|
||||
}
|
||||
|
||||
static inline void imx_avic_set_prio(IMXAVICState *s, int irq, int prio)
|
||||
{
|
||||
uint32_t word = irq / PRIO_PER_WORD;
|
||||
uint32_t part = 4 * (irq % PRIO_PER_WORD);
|
||||
uint32_t mask = ~(0xf << part);
|
||||
s->prio[word] &= mask;
|
||||
s->prio[word] |= prio << part;
|
||||
}
|
||||
|
||||
/* Update interrupts. */
|
||||
static void imx_avic_update(IMXAVICState *s)
|
||||
{
|
||||
|
@ -41,7 +41,9 @@ static void arm_cpu_set_pc(CPUState *cs, vaddr value)
|
||||
static bool arm_cpu_has_work(CPUState *cs)
|
||||
{
|
||||
return cs->interrupt_request &
|
||||
(CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB);
|
||||
(CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD
|
||||
| CPU_INTERRUPT_VFIQ | CPU_INTERRUPT_VIRQ
|
||||
| CPU_INTERRUPT_EXITTB);
|
||||
}
|
||||
|
||||
static void cp_reg_reset(gpointer key, gpointer value, gpointer opaque)
|
||||
@ -173,11 +175,6 @@ static void arm_cpu_reset(CPUState *s)
|
||||
set_float_detect_tininess(float_tininess_before_rounding,
|
||||
&env->vfp.standard_fp_status);
|
||||
tlb_flush(s, 1);
|
||||
/* Reset is a state change for some CPUARMState fields which we
|
||||
* bake assumptions about into translated code, so we need to
|
||||
* tb_flush().
|
||||
*/
|
||||
tb_flush(env);
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
if (kvm_enabled()) {
|
||||
@ -185,18 +182,17 @@ static void arm_cpu_reset(CPUState *s)
|
||||
}
|
||||
#endif
|
||||
|
||||
hw_breakpoint_update_all(cpu);
|
||||
hw_watchpoint_update_all(cpu);
|
||||
}
|
||||
|
||||
bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
|
||||
{
|
||||
CPUClass *cc = CPU_GET_CLASS(cs);
|
||||
ARMCPU *cpu = ARM_CPU(cs);
|
||||
CPUARMState *env = &cpu->env;
|
||||
bool ret = false;
|
||||
|
||||
if (interrupt_request & CPU_INTERRUPT_FIQ
|
||||
&& !(env->daif & PSTATE_F)) {
|
||||
&& arm_excp_unmasked(cs, EXCP_FIQ)) {
|
||||
cs->exception_index = EXCP_FIQ;
|
||||
cc->do_interrupt(cs);
|
||||
ret = true;
|
||||
@ -211,12 +207,23 @@ bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
|
||||
We avoid this by disabling interrupts when
|
||||
pc contains a magic address. */
|
||||
if (interrupt_request & CPU_INTERRUPT_HARD
|
||||
&& !(env->daif & PSTATE_I)
|
||||
&& (!IS_M(env) || env->regs[15] < 0xfffffff0)) {
|
||||
&& arm_excp_unmasked(cs, EXCP_IRQ)) {
|
||||
cs->exception_index = EXCP_IRQ;
|
||||
cc->do_interrupt(cs);
|
||||
ret = true;
|
||||
}
|
||||
if (interrupt_request & CPU_INTERRUPT_VIRQ
|
||||
&& arm_excp_unmasked(cs, EXCP_VIRQ)) {
|
||||
cs->exception_index = EXCP_VIRQ;
|
||||
cc->do_interrupt(cs);
|
||||
ret = true;
|
||||
}
|
||||
if (interrupt_request & CPU_INTERRUPT_VFIQ
|
||||
&& arm_excp_unmasked(cs, EXCP_VFIQ)) {
|
||||
cs->exception_index = EXCP_VFIQ;
|
||||
cc->do_interrupt(cs);
|
||||
ret = true;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -225,21 +232,29 @@ bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
|
||||
static void arm_cpu_set_irq(void *opaque, int irq, int level)
|
||||
{
|
||||
ARMCPU *cpu = opaque;
|
||||
CPUARMState *env = &cpu->env;
|
||||
CPUState *cs = CPU(cpu);
|
||||
static const int mask[] = {
|
||||
[ARM_CPU_IRQ] = CPU_INTERRUPT_HARD,
|
||||
[ARM_CPU_FIQ] = CPU_INTERRUPT_FIQ,
|
||||
[ARM_CPU_VIRQ] = CPU_INTERRUPT_VIRQ,
|
||||
[ARM_CPU_VFIQ] = CPU_INTERRUPT_VFIQ
|
||||
};
|
||||
|
||||
switch (irq) {
|
||||
case ARM_CPU_IRQ:
|
||||
if (level) {
|
||||
cpu_interrupt(cs, CPU_INTERRUPT_HARD);
|
||||
} else {
|
||||
cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
|
||||
case ARM_CPU_VIRQ:
|
||||
case ARM_CPU_VFIQ:
|
||||
if (!arm_feature(env, ARM_FEATURE_EL2)) {
|
||||
hw_error("%s: Virtual interrupt line %d with no EL2 support\n",
|
||||
__func__, irq);
|
||||
}
|
||||
break;
|
||||
/* fall through */
|
||||
case ARM_CPU_IRQ:
|
||||
case ARM_CPU_FIQ:
|
||||
if (level) {
|
||||
cpu_interrupt(cs, CPU_INTERRUPT_FIQ);
|
||||
cpu_interrupt(cs, mask[irq]);
|
||||
} else {
|
||||
cpu_reset_interrupt(cs, CPU_INTERRUPT_FIQ);
|
||||
cpu_reset_interrupt(cs, mask[irq]);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
@ -289,9 +304,12 @@ static void arm_cpu_initfn(Object *obj)
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
/* Our inbound IRQ and FIQ lines */
|
||||
if (kvm_enabled()) {
|
||||
qdev_init_gpio_in(DEVICE(cpu), arm_cpu_kvm_set_irq, 2);
|
||||
/* VIRQ and VFIQ are unused with KVM but we add them to maintain
|
||||
* the same interface as non-KVM CPUs.
|
||||
*/
|
||||
qdev_init_gpio_in(DEVICE(cpu), arm_cpu_kvm_set_irq, 4);
|
||||
} else {
|
||||
qdev_init_gpio_in(DEVICE(cpu), arm_cpu_set_irq, 2);
|
||||
qdev_init_gpio_in(DEVICE(cpu), arm_cpu_set_irq, 4);
|
||||
}
|
||||
|
||||
cpu->gt_timer[GTIMER_PHYS] = timer_new(QEMU_CLOCK_VIRTUAL, GTIMER_SCALE,
|
||||
|
138
target-arm/cpu.h
138
target-arm/cpu.h
@ -51,6 +51,11 @@
|
||||
#define EXCP_EXCEPTION_EXIT 8 /* Return from v7M exception. */
|
||||
#define EXCP_KERNEL_TRAP 9 /* Jumped to kernel code page. */
|
||||
#define EXCP_STREX 10
|
||||
#define EXCP_HVC 11 /* HyperVisor Call */
|
||||
#define EXCP_HYP_TRAP 12
|
||||
#define EXCP_SMC 13 /* Secure Monitor Call */
|
||||
#define EXCP_VIRQ 14
|
||||
#define EXCP_VFIQ 15
|
||||
|
||||
#define ARMV7M_EXCP_RESET 1
|
||||
#define ARMV7M_EXCP_NMI 2
|
||||
@ -65,6 +70,8 @@
|
||||
|
||||
/* ARM-specific interrupt pending bits. */
|
||||
#define CPU_INTERRUPT_FIQ CPU_INTERRUPT_TGT_EXT_1
|
||||
#define CPU_INTERRUPT_VIRQ CPU_INTERRUPT_TGT_EXT_2
|
||||
#define CPU_INTERRUPT_VFIQ CPU_INTERRUPT_TGT_EXT_3
|
||||
|
||||
/* The usual mapping for an AArch64 system register to its AArch32
|
||||
* counterpart is for the 32 bit world to have access to the lower
|
||||
@ -80,9 +87,11 @@
|
||||
#define offsetofhigh32(S, M) (offsetof(S, M) + sizeof(uint32_t))
|
||||
#endif
|
||||
|
||||
/* Meanings of the ARMCPU object's two inbound GPIO lines */
|
||||
/* Meanings of the ARMCPU object's four inbound GPIO lines */
|
||||
#define ARM_CPU_IRQ 0
|
||||
#define ARM_CPU_FIQ 1
|
||||
#define ARM_CPU_VIRQ 2
|
||||
#define ARM_CPU_VFIQ 3
|
||||
|
||||
typedef void ARMWriteCPFunc(void *opaque, int cp_info,
|
||||
int srcreg, int operand, uint32_t value);
|
||||
@ -172,7 +181,6 @@ typedef struct CPUARMState {
|
||||
uint64_t c1_sys; /* System control register. */
|
||||
uint64_t c1_coproc; /* Coprocessor access register. */
|
||||
uint32_t c1_xscaleauxcr; /* XScale auxiliary control register. */
|
||||
uint32_t c1_scr; /* secure config register. */
|
||||
uint64_t ttbr0_el1; /* MMU translation table base 0. */
|
||||
uint64_t ttbr1_el1; /* MMU translation table base 1. */
|
||||
uint64_t c2_control; /* MMU translation table base control. */
|
||||
@ -184,6 +192,8 @@ typedef struct CPUARMState {
|
||||
MPU write buffer control. */
|
||||
uint32_t pmsav5_data_ap; /* PMSAv5 MPU data access permissions */
|
||||
uint32_t pmsav5_insn_ap; /* PMSAv5 MPU insn access permissions */
|
||||
uint64_t hcr_el2; /* Hypervisor configuration register */
|
||||
uint64_t scr_el3; /* Secure configuration register. */
|
||||
uint32_t ifsr_el2; /* Fault status registers. */
|
||||
uint64_t esr_el[4];
|
||||
uint32_t c6_region[8]; /* MPU base/size registers. */
|
||||
@ -323,6 +333,7 @@ typedef struct CPUARMState {
|
||||
int eabi;
|
||||
#endif
|
||||
|
||||
struct CPUBreakpoint *cpu_breakpoint[16];
|
||||
struct CPUWatchpoint *cpu_watchpoint[16];
|
||||
|
||||
CPU_COMMON
|
||||
@ -498,6 +509,12 @@ void pmccntr_sync(CPUARMState *env);
|
||||
#define PSTATE_MODE_EL1t 4
|
||||
#define PSTATE_MODE_EL0t 0
|
||||
|
||||
/* Map EL and handler into a PSTATE_MODE. */
|
||||
static inline unsigned int aarch64_pstate_mode(unsigned int el, bool handler)
|
||||
{
|
||||
return (el << 2) | handler;
|
||||
}
|
||||
|
||||
/* Return the current PSTATE value. For the moment we don't support 32<->64 bit
|
||||
* interprocessing, so we don't attempt to sync with the cpsr state used by
|
||||
* the 32 bit decoder.
|
||||
@ -565,6 +582,58 @@ static inline void xpsr_write(CPUARMState *env, uint32_t val, uint32_t mask)
|
||||
}
|
||||
}
|
||||
|
||||
#define HCR_VM (1ULL << 0)
|
||||
#define HCR_SWIO (1ULL << 1)
|
||||
#define HCR_PTW (1ULL << 2)
|
||||
#define HCR_FMO (1ULL << 3)
|
||||
#define HCR_IMO (1ULL << 4)
|
||||
#define HCR_AMO (1ULL << 5)
|
||||
#define HCR_VF (1ULL << 6)
|
||||
#define HCR_VI (1ULL << 7)
|
||||
#define HCR_VSE (1ULL << 8)
|
||||
#define HCR_FB (1ULL << 9)
|
||||
#define HCR_BSU_MASK (3ULL << 10)
|
||||
#define HCR_DC (1ULL << 12)
|
||||
#define HCR_TWI (1ULL << 13)
|
||||
#define HCR_TWE (1ULL << 14)
|
||||
#define HCR_TID0 (1ULL << 15)
|
||||
#define HCR_TID1 (1ULL << 16)
|
||||
#define HCR_TID2 (1ULL << 17)
|
||||
#define HCR_TID3 (1ULL << 18)
|
||||
#define HCR_TSC (1ULL << 19)
|
||||
#define HCR_TIDCP (1ULL << 20)
|
||||
#define HCR_TACR (1ULL << 21)
|
||||
#define HCR_TSW (1ULL << 22)
|
||||
#define HCR_TPC (1ULL << 23)
|
||||
#define HCR_TPU (1ULL << 24)
|
||||
#define HCR_TTLB (1ULL << 25)
|
||||
#define HCR_TVM (1ULL << 26)
|
||||
#define HCR_TGE (1ULL << 27)
|
||||
#define HCR_TDZ (1ULL << 28)
|
||||
#define HCR_HCD (1ULL << 29)
|
||||
#define HCR_TRVM (1ULL << 30)
|
||||
#define HCR_RW (1ULL << 31)
|
||||
#define HCR_CD (1ULL << 32)
|
||||
#define HCR_ID (1ULL << 33)
|
||||
#define HCR_MASK ((1ULL << 34) - 1)
|
||||
|
||||
#define SCR_NS (1U << 0)
|
||||
#define SCR_IRQ (1U << 1)
|
||||
#define SCR_FIQ (1U << 2)
|
||||
#define SCR_EA (1U << 3)
|
||||
#define SCR_FW (1U << 4)
|
||||
#define SCR_AW (1U << 5)
|
||||
#define SCR_NET (1U << 6)
|
||||
#define SCR_SMD (1U << 7)
|
||||
#define SCR_HCE (1U << 8)
|
||||
#define SCR_SIF (1U << 9)
|
||||
#define SCR_RW (1U << 10)
|
||||
#define SCR_ST (1U << 11)
|
||||
#define SCR_TWI (1U << 12)
|
||||
#define SCR_TWE (1U << 13)
|
||||
#define SCR_AARCH32_MASK (0x3fff & ~(SCR_RW | SCR_ST))
|
||||
#define SCR_AARCH64_MASK (0x3fff & ~SCR_NET)
|
||||
|
||||
/* Return the current FPSCR value. */
|
||||
uint32_t vfp_get_fpscr(CPUARMState *env);
|
||||
void vfp_set_fpscr(CPUARMState *env, uint32_t val);
|
||||
@ -701,6 +770,7 @@ static inline bool arm_el_is_aa64(CPUARMState *env, int el)
|
||||
}
|
||||
|
||||
void arm_cpu_list(FILE *f, fprintf_function cpu_fprintf);
|
||||
unsigned int arm_excp_target_el(CPUState *cs, unsigned int excp_idx);
|
||||
|
||||
/* Interface between CPU and Interrupt controller. */
|
||||
void armv7m_nvic_set_pending(void *opaque, int irq);
|
||||
@ -1111,6 +1181,61 @@ bool write_cpustate_to_list(ARMCPU *cpu);
|
||||
# define TARGET_VIRT_ADDR_SPACE_BITS 32
|
||||
#endif
|
||||
|
||||
static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx)
|
||||
{
|
||||
CPUARMState *env = cs->env_ptr;
|
||||
unsigned int cur_el = arm_current_pl(env);
|
||||
unsigned int target_el = arm_excp_target_el(cs, excp_idx);
|
||||
/* FIXME: Use actual secure state. */
|
||||
bool secure = false;
|
||||
/* If in EL1/0, Physical IRQ routing to EL2 only happens from NS state. */
|
||||
bool irq_can_hyp = !secure && cur_el < 2 && target_el == 2;
|
||||
/* ARMv7-M interrupt return works by loading a magic value
|
||||
* into the PC. On real hardware the load causes the
|
||||
* return to occur. The qemu implementation performs the
|
||||
* jump normally, then does the exception return when the
|
||||
* CPU tries to execute code at the magic address.
|
||||
* This will cause the magic PC value to be pushed to
|
||||
* the stack if an interrupt occurred at the wrong time.
|
||||
* We avoid this by disabling interrupts when
|
||||
* pc contains a magic address.
|
||||
*/
|
||||
bool irq_unmasked = !(env->daif & PSTATE_I)
|
||||
&& (!IS_M(env) || env->regs[15] < 0xfffffff0);
|
||||
|
||||
/* Don't take exceptions if they target a lower EL. */
|
||||
if (cur_el > target_el) {
|
||||
return false;
|
||||
}
|
||||
|
||||
switch (excp_idx) {
|
||||
case EXCP_FIQ:
|
||||
if (irq_can_hyp && (env->cp15.hcr_el2 & HCR_FMO)) {
|
||||
return true;
|
||||
}
|
||||
return !(env->daif & PSTATE_F);
|
||||
case EXCP_IRQ:
|
||||
if (irq_can_hyp && (env->cp15.hcr_el2 & HCR_IMO)) {
|
||||
return true;
|
||||
}
|
||||
return irq_unmasked;
|
||||
case EXCP_VFIQ:
|
||||
if (!secure && !(env->cp15.hcr_el2 & HCR_FMO)) {
|
||||
/* VFIQs are only taken when hypervized and non-secure. */
|
||||
return false;
|
||||
}
|
||||
return !(env->daif & PSTATE_F);
|
||||
case EXCP_VIRQ:
|
||||
if (!secure && !(env->cp15.hcr_el2 & HCR_IMO)) {
|
||||
/* VIRQs are only taken when hypervized and non-secure. */
|
||||
return false;
|
||||
}
|
||||
return irq_unmasked;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
}
|
||||
|
||||
static inline CPUARMState *cpu_init(const char *cpu_model)
|
||||
{
|
||||
ARMCPU *cpu = cpu_arm_init(cpu_model);
|
||||
@ -1223,6 +1348,11 @@ static inline bool arm_singlestep_active(CPUARMState *env)
|
||||
#define ARM_TBFLAG_SS_ACTIVE_MASK (1 << ARM_TBFLAG_SS_ACTIVE_SHIFT)
|
||||
#define ARM_TBFLAG_PSTATE_SS_SHIFT 19
|
||||
#define ARM_TBFLAG_PSTATE_SS_MASK (1 << ARM_TBFLAG_PSTATE_SS_SHIFT)
|
||||
/* We store the bottom two bits of the CPAR as TB flags and handle
|
||||
* checks on the other bits at runtime
|
||||
*/
|
||||
#define ARM_TBFLAG_XSCALE_CPAR_SHIFT 20
|
||||
#define ARM_TBFLAG_XSCALE_CPAR_MASK (3 << ARM_TBFLAG_XSCALE_CPAR_SHIFT)
|
||||
|
||||
/* Bit usage when in AArch64 state */
|
||||
#define ARM_TBFLAG_AA64_EL_SHIFT 0
|
||||
@ -1257,6 +1387,8 @@ static inline bool arm_singlestep_active(CPUARMState *env)
|
||||
(((F) & ARM_TBFLAG_SS_ACTIVE_MASK) >> ARM_TBFLAG_SS_ACTIVE_SHIFT)
|
||||
#define ARM_TBFLAG_PSTATE_SS(F) \
|
||||
(((F) & ARM_TBFLAG_PSTATE_SS_MASK) >> ARM_TBFLAG_PSTATE_SS_SHIFT)
|
||||
#define ARM_TBFLAG_XSCALE_CPAR(F) \
|
||||
(((F) & ARM_TBFLAG_XSCALE_CPAR_MASK) >> ARM_TBFLAG_XSCALE_CPAR_SHIFT)
|
||||
#define ARM_TBFLAG_AA64_EL(F) \
|
||||
(((F) & ARM_TBFLAG_AA64_EL_MASK) >> ARM_TBFLAG_AA64_EL_SHIFT)
|
||||
#define ARM_TBFLAG_AA64_FPEN(F) \
|
||||
@ -1334,6 +1466,8 @@ static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
|
||||
*flags |= ARM_TBFLAG_PSTATE_SS_MASK;
|
||||
}
|
||||
}
|
||||
*flags |= (extract32(env->cp15.c15_cpar, 0, 2)
|
||||
<< ARM_TBFLAG_XSCALE_CPAR_SHIFT);
|
||||
}
|
||||
|
||||
*cs_base = 0;
|
||||
|
@ -443,10 +443,12 @@ void aarch64_cpu_do_interrupt(CPUState *cs)
|
||||
{
|
||||
ARMCPU *cpu = ARM_CPU(cs);
|
||||
CPUARMState *env = &cpu->env;
|
||||
target_ulong addr = env->cp15.vbar_el[1];
|
||||
unsigned int new_el = arm_excp_target_el(cs, cs->exception_index);
|
||||
target_ulong addr = env->cp15.vbar_el[new_el];
|
||||
unsigned int new_mode = aarch64_pstate_mode(new_el, true);
|
||||
int i;
|
||||
|
||||
if (arm_current_pl(env) == 0) {
|
||||
if (arm_current_pl(env) < new_el) {
|
||||
if (env->aarch64) {
|
||||
addr += 0x400;
|
||||
} else {
|
||||
@ -464,23 +466,27 @@ void aarch64_cpu_do_interrupt(CPUState *cs)
|
||||
env->exception.syndrome);
|
||||
}
|
||||
|
||||
env->cp15.esr_el[1] = env->exception.syndrome;
|
||||
env->cp15.far_el[1] = env->exception.vaddress;
|
||||
|
||||
switch (cs->exception_index) {
|
||||
case EXCP_PREFETCH_ABORT:
|
||||
case EXCP_DATA_ABORT:
|
||||
env->cp15.far_el[new_el] = env->exception.vaddress;
|
||||
qemu_log_mask(CPU_LOG_INT, "...with FAR 0x%" PRIx64 "\n",
|
||||
env->cp15.far_el[1]);
|
||||
break;
|
||||
env->cp15.far_el[new_el]);
|
||||
/* fall through */
|
||||
case EXCP_BKPT:
|
||||
case EXCP_UDEF:
|
||||
case EXCP_SWI:
|
||||
case EXCP_HVC:
|
||||
case EXCP_HYP_TRAP:
|
||||
case EXCP_SMC:
|
||||
env->cp15.esr_el[new_el] = env->exception.syndrome;
|
||||
break;
|
||||
case EXCP_IRQ:
|
||||
case EXCP_VIRQ:
|
||||
addr += 0x80;
|
||||
break;
|
||||
case EXCP_FIQ:
|
||||
case EXCP_VFIQ:
|
||||
addr += 0x100;
|
||||
break;
|
||||
default:
|
||||
@ -488,15 +494,15 @@ void aarch64_cpu_do_interrupt(CPUState *cs)
|
||||
}
|
||||
|
||||
if (is_a64(env)) {
|
||||
env->banked_spsr[aarch64_banked_spsr_index(1)] = pstate_read(env);
|
||||
env->banked_spsr[aarch64_banked_spsr_index(new_el)] = pstate_read(env);
|
||||
aarch64_save_sp(env, arm_current_pl(env));
|
||||
env->elr_el[1] = env->pc;
|
||||
env->elr_el[new_el] = env->pc;
|
||||
} else {
|
||||
env->banked_spsr[0] = cpsr_read(env);
|
||||
if (!env->thumb) {
|
||||
env->cp15.esr_el[1] |= 1 << 25;
|
||||
env->cp15.esr_el[new_el] |= 1 << 25;
|
||||
}
|
||||
env->elr_el[1] = env->regs[15];
|
||||
env->elr_el[new_el] = env->regs[15];
|
||||
|
||||
for (i = 0; i < 15; i++) {
|
||||
env->xregs[i] = env->regs[i];
|
||||
@ -505,9 +511,9 @@ void aarch64_cpu_do_interrupt(CPUState *cs)
|
||||
env->condexec_bits = 0;
|
||||
}
|
||||
|
||||
pstate_write(env, PSTATE_DAIF | PSTATE_MODE_EL1h);
|
||||
pstate_write(env, PSTATE_DAIF | new_mode);
|
||||
env->aarch64 = 1;
|
||||
aarch64_restore_sp(env, 1);
|
||||
aarch64_restore_sp(env, new_el);
|
||||
|
||||
env->pc = addr;
|
||||
cs->interrupt_request |= CPU_INTERRUPT_EXITTB;
|
||||
|
@ -747,6 +747,32 @@ static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
raw_write(env, ri, value & ~0x1FULL);
|
||||
}
|
||||
|
||||
static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
|
||||
{
|
||||
/* We only mask off bits that are RES0 both for AArch64 and AArch32.
|
||||
* For bits that vary between AArch32/64, code needs to check the
|
||||
* current execution mode before directly using the feature bit.
|
||||
*/
|
||||
uint32_t valid_mask = SCR_AARCH64_MASK | SCR_AARCH32_MASK;
|
||||
|
||||
if (!arm_feature(env, ARM_FEATURE_EL2)) {
|
||||
valid_mask &= ~SCR_HCE;
|
||||
|
||||
/* On ARMv7, SMD (or SCD as it is called in v7) is only
|
||||
* supported if EL2 exists. The bit is UNK/SBZP when
|
||||
* EL2 is unavailable. In QEMU ARMv7, we force it to always zero
|
||||
* when EL2 is unavailable.
|
||||
*/
|
||||
if (arm_feature(env, ARM_FEATURE_V7)) {
|
||||
valid_mask &= ~SCR_SMD;
|
||||
}
|
||||
}
|
||||
|
||||
/* Clear all-context RES0 bits. */
|
||||
value &= valid_mask;
|
||||
raw_write(env, ri, value);
|
||||
}
|
||||
|
||||
static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
|
||||
{
|
||||
ARMCPU *cpu = arm_env_get_cpu(env);
|
||||
@ -873,8 +899,8 @@ static const ARMCPRegInfo v7_cp_reginfo[] = {
|
||||
.fieldoffset = offsetof(CPUARMState, cp15.vbar_el[1]),
|
||||
.resetvalue = 0 },
|
||||
{ .name = "SCR", .cp = 15, .crn = 1, .crm = 1, .opc1 = 0, .opc2 = 0,
|
||||
.access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c1_scr),
|
||||
.resetvalue = 0, },
|
||||
.access = PL1_RW, .fieldoffset = offsetoflow32(CPUARMState, cp15.scr_el3),
|
||||
.resetvalue = 0, .writefn = scr_write },
|
||||
{ .name = "CCSIDR", .state = ARM_CP_STATE_BOTH,
|
||||
.opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0,
|
||||
.access = PL1_R, .readfn = ccsidr_read, .type = ARM_CP_NO_MIGRATE },
|
||||
@ -1714,12 +1740,7 @@ static const ARMCPRegInfo omap_cp_reginfo[] = {
|
||||
static void xscale_cpar_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
uint64_t value)
|
||||
{
|
||||
value &= 0x3fff;
|
||||
if (env->cp15.c15_cpar != value) {
|
||||
/* Changes cp0 to cp13 behavior, so needs a TB flush. */
|
||||
tb_flush(env);
|
||||
env->cp15.c15_cpar = value;
|
||||
}
|
||||
env->cp15.c15_cpar = value & 0x3fff;
|
||||
}
|
||||
|
||||
static const ARMCPRegInfo xscale_cp_reginfo[] = {
|
||||
@ -2230,10 +2251,44 @@ static const ARMCPRegInfo v8_el3_no_el2_cp_reginfo[] = {
|
||||
.opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0,
|
||||
.access = PL2_RW,
|
||||
.readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore },
|
||||
{ .name = "HCR_EL2", .state = ARM_CP_STATE_AA64,
|
||||
.type = ARM_CP_NO_MIGRATE,
|
||||
.opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
|
||||
.access = PL2_RW,
|
||||
.readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore },
|
||||
REGINFO_SENTINEL
|
||||
};
|
||||
|
||||
static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
|
||||
{
|
||||
ARMCPU *cpu = arm_env_get_cpu(env);
|
||||
uint64_t valid_mask = HCR_MASK;
|
||||
|
||||
if (arm_feature(env, ARM_FEATURE_EL3)) {
|
||||
valid_mask &= ~HCR_HCD;
|
||||
} else {
|
||||
valid_mask &= ~HCR_TSC;
|
||||
}
|
||||
|
||||
/* Clear RES0 bits. */
|
||||
value &= valid_mask;
|
||||
|
||||
/* These bits change the MMU setup:
|
||||
* HCR_VM enables stage 2 translation
|
||||
* HCR_PTW forbids certain page-table setups
|
||||
* HCR_DC Disables stage1 and enables stage2 translation
|
||||
*/
|
||||
if ((raw_read(env, ri) ^ value) & (HCR_VM | HCR_PTW | HCR_DC)) {
|
||||
tlb_flush(CPU(cpu), 1);
|
||||
}
|
||||
raw_write(env, ri, value);
|
||||
}
|
||||
|
||||
static const ARMCPRegInfo v8_el2_cp_reginfo[] = {
|
||||
{ .name = "HCR_EL2", .state = ARM_CP_STATE_AA64,
|
||||
.opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
|
||||
.access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
|
||||
.writefn = hcr_write },
|
||||
{ .name = "ELR_EL2", .state = ARM_CP_STATE_AA64,
|
||||
.type = ARM_CP_NO_MIGRATE,
|
||||
.opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 1,
|
||||
@ -2280,6 +2335,11 @@ static const ARMCPRegInfo v8_el3_cp_reginfo[] = {
|
||||
.access = PL3_RW, .writefn = vbar_write,
|
||||
.fieldoffset = offsetof(CPUARMState, cp15.vbar_el[3]),
|
||||
.resetvalue = 0 },
|
||||
{ .name = "SCR_EL3", .state = ARM_CP_STATE_AA64,
|
||||
.type = ARM_CP_NO_MIGRATE,
|
||||
.opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 0,
|
||||
.access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.scr_el3),
|
||||
.writefn = scr_write },
|
||||
REGINFO_SENTINEL
|
||||
};
|
||||
|
||||
@ -2492,6 +2552,124 @@ static void dbgwcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
hw_watchpoint_update(cpu, i);
|
||||
}
|
||||
|
||||
void hw_breakpoint_update(ARMCPU *cpu, int n)
|
||||
{
|
||||
CPUARMState *env = &cpu->env;
|
||||
uint64_t bvr = env->cp15.dbgbvr[n];
|
||||
uint64_t bcr = env->cp15.dbgbcr[n];
|
||||
vaddr addr;
|
||||
int bt;
|
||||
int flags = BP_CPU;
|
||||
|
||||
if (env->cpu_breakpoint[n]) {
|
||||
cpu_breakpoint_remove_by_ref(CPU(cpu), env->cpu_breakpoint[n]);
|
||||
env->cpu_breakpoint[n] = NULL;
|
||||
}
|
||||
|
||||
if (!extract64(bcr, 0, 1)) {
|
||||
/* E bit clear : watchpoint disabled */
|
||||
return;
|
||||
}
|
||||
|
||||
bt = extract64(bcr, 20, 4);
|
||||
|
||||
switch (bt) {
|
||||
case 4: /* unlinked address mismatch (reserved if AArch64) */
|
||||
case 5: /* linked address mismatch (reserved if AArch64) */
|
||||
qemu_log_mask(LOG_UNIMP,
|
||||
"arm: address mismatch breakpoint types not implemented");
|
||||
return;
|
||||
case 0: /* unlinked address match */
|
||||
case 1: /* linked address match */
|
||||
{
|
||||
/* Bits [63:49] are hardwired to the value of bit [48]; that is,
|
||||
* we behave as if the register was sign extended. Bits [1:0] are
|
||||
* RES0. The BAS field is used to allow setting breakpoints on 16
|
||||
* bit wide instructions; it is CONSTRAINED UNPREDICTABLE whether
|
||||
* a bp will fire if the addresses covered by the bp and the addresses
|
||||
* covered by the insn overlap but the insn doesn't start at the
|
||||
* start of the bp address range. We choose to require the insn and
|
||||
* the bp to have the same address. The constraints on writing to
|
||||
* BAS enforced in dbgbcr_write mean we have only four cases:
|
||||
* 0b0000 => no breakpoint
|
||||
* 0b0011 => breakpoint on addr
|
||||
* 0b1100 => breakpoint on addr + 2
|
||||
* 0b1111 => breakpoint on addr
|
||||
* See also figure D2-3 in the v8 ARM ARM (DDI0487A.c).
|
||||
*/
|
||||
int bas = extract64(bcr, 5, 4);
|
||||
addr = sextract64(bvr, 0, 49) & ~3ULL;
|
||||
if (bas == 0) {
|
||||
return;
|
||||
}
|
||||
if (bas == 0xc) {
|
||||
addr += 2;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 2: /* unlinked context ID match */
|
||||
case 8: /* unlinked VMID match (reserved if no EL2) */
|
||||
case 10: /* unlinked context ID and VMID match (reserved if no EL2) */
|
||||
qemu_log_mask(LOG_UNIMP,
|
||||
"arm: unlinked context breakpoint types not implemented");
|
||||
return;
|
||||
case 9: /* linked VMID match (reserved if no EL2) */
|
||||
case 11: /* linked context ID and VMID match (reserved if no EL2) */
|
||||
case 3: /* linked context ID match */
|
||||
default:
|
||||
/* We must generate no events for Linked context matches (unless
|
||||
* they are linked to by some other bp/wp, which is handled in
|
||||
* updates for the linking bp/wp). We choose to also generate no events
|
||||
* for reserved values.
|
||||
*/
|
||||
return;
|
||||
}
|
||||
|
||||
cpu_breakpoint_insert(CPU(cpu), addr, flags, &env->cpu_breakpoint[n]);
|
||||
}
|
||||
|
||||
void hw_breakpoint_update_all(ARMCPU *cpu)
|
||||
{
|
||||
int i;
|
||||
CPUARMState *env = &cpu->env;
|
||||
|
||||
/* Completely clear out existing QEMU breakpoints and our array, to
|
||||
* avoid possible stale entries following migration load.
|
||||
*/
|
||||
cpu_breakpoint_remove_all(CPU(cpu), BP_CPU);
|
||||
memset(env->cpu_breakpoint, 0, sizeof(env->cpu_breakpoint));
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_breakpoint); i++) {
|
||||
hw_breakpoint_update(cpu, i);
|
||||
}
|
||||
}
|
||||
|
||||
static void dbgbvr_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
uint64_t value)
|
||||
{
|
||||
ARMCPU *cpu = arm_env_get_cpu(env);
|
||||
int i = ri->crm;
|
||||
|
||||
raw_write(env, ri, value);
|
||||
hw_breakpoint_update(cpu, i);
|
||||
}
|
||||
|
||||
static void dbgbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
uint64_t value)
|
||||
{
|
||||
ARMCPU *cpu = arm_env_get_cpu(env);
|
||||
int i = ri->crm;
|
||||
|
||||
/* BAS[3] is a read-only copy of BAS[2], and BAS[1] a read-only
|
||||
* copy of BAS[0].
|
||||
*/
|
||||
value = deposit64(value, 6, 1, extract64(value, 5, 1));
|
||||
value = deposit64(value, 8, 1, extract64(value, 7, 1));
|
||||
|
||||
raw_write(env, ri, value);
|
||||
hw_breakpoint_update(cpu, i);
|
||||
}
|
||||
|
||||
static void define_debug_regs(ARMCPU *cpu)
|
||||
{
|
||||
/* Define v7 and v8 architectural debug registers.
|
||||
@ -2533,11 +2711,15 @@ static void define_debug_regs(ARMCPU *cpu)
|
||||
{ .name = "DBGBVR", .state = ARM_CP_STATE_BOTH,
|
||||
.cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 4,
|
||||
.access = PL1_RW,
|
||||
.fieldoffset = offsetof(CPUARMState, cp15.dbgbvr[i]) },
|
||||
.fieldoffset = offsetof(CPUARMState, cp15.dbgbvr[i]),
|
||||
.writefn = dbgbvr_write, .raw_writefn = raw_write
|
||||
},
|
||||
{ .name = "DBGBCR", .state = ARM_CP_STATE_BOTH,
|
||||
.cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 5,
|
||||
.access = PL1_RW,
|
||||
.fieldoffset = offsetof(CPUARMState, cp15.dbgbcr[i]) },
|
||||
.fieldoffset = offsetof(CPUARMState, cp15.dbgbcr[i]),
|
||||
.writefn = dbgbcr_write, .raw_writefn = raw_write
|
||||
},
|
||||
REGINFO_SENTINEL
|
||||
};
|
||||
define_arm_cp_regs(cpu, dbgregs);
|
||||
@ -3522,6 +3704,11 @@ uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode)
|
||||
return 0;
|
||||
}
|
||||
|
||||
unsigned int arm_excp_target_el(CPUState *cs, unsigned int excp_idx)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
/* Map CPU modes onto saved register banks. */
|
||||
@ -3577,6 +3764,57 @@ void switch_mode(CPUARMState *env, int mode)
|
||||
env->spsr = env->banked_spsr[i];
|
||||
}
|
||||
|
||||
/*
|
||||
* Determine the target EL for a given exception type.
|
||||
*/
|
||||
unsigned int arm_excp_target_el(CPUState *cs, unsigned int excp_idx)
|
||||
{
|
||||
ARMCPU *cpu = ARM_CPU(cs);
|
||||
CPUARMState *env = &cpu->env;
|
||||
unsigned int cur_el = arm_current_pl(env);
|
||||
unsigned int target_el;
|
||||
/* FIXME: Use actual secure state. */
|
||||
bool secure = false;
|
||||
|
||||
if (!env->aarch64) {
|
||||
/* TODO: Add EL2 and 3 exception handling for AArch32. */
|
||||
return 1;
|
||||
}
|
||||
|
||||
switch (excp_idx) {
|
||||
case EXCP_HVC:
|
||||
case EXCP_HYP_TRAP:
|
||||
target_el = 2;
|
||||
break;
|
||||
case EXCP_SMC:
|
||||
target_el = 3;
|
||||
break;
|
||||
case EXCP_FIQ:
|
||||
case EXCP_IRQ:
|
||||
{
|
||||
const uint64_t hcr_mask = excp_idx == EXCP_FIQ ? HCR_FMO : HCR_IMO;
|
||||
const uint32_t scr_mask = excp_idx == EXCP_FIQ ? SCR_FIQ : SCR_IRQ;
|
||||
|
||||
target_el = 1;
|
||||
if (!secure && (env->cp15.hcr_el2 & hcr_mask)) {
|
||||
target_el = 2;
|
||||
}
|
||||
if (env->cp15.scr_el3 & scr_mask) {
|
||||
target_el = 3;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case EXCP_VIRQ:
|
||||
case EXCP_VFIQ:
|
||||
target_el = 1;
|
||||
break;
|
||||
default:
|
||||
target_el = MAX(cur_el, 1);
|
||||
break;
|
||||
}
|
||||
return target_el;
|
||||
}
|
||||
|
||||
static void v7m_push(CPUARMState *env, uint32_t val)
|
||||
{
|
||||
CPUState *cs = CPU(arm_env_get_cpu(env));
|
||||
|
@ -50,6 +50,8 @@ DEF_HELPER_2(exception_internal, void, env, i32)
|
||||
DEF_HELPER_3(exception_with_syndrome, void, env, i32, i32)
|
||||
DEF_HELPER_1(wfi, void, env)
|
||||
DEF_HELPER_1(wfe, void, env)
|
||||
DEF_HELPER_1(pre_hvc, void, env)
|
||||
DEF_HELPER_2(pre_smc, void, env, i32)
|
||||
|
||||
DEF_HELPER_3(cpsr_write, void, env, i32, i32)
|
||||
DEF_HELPER_1(cpsr_read, i32, env)
|
||||
|
@ -53,6 +53,11 @@ static const char * const excnames[] = {
|
||||
[EXCP_EXCEPTION_EXIT] = "QEMU v7M exception exit",
|
||||
[EXCP_KERNEL_TRAP] = "QEMU intercept of kernel commpage",
|
||||
[EXCP_STREX] = "QEMU intercept of STREX",
|
||||
[EXCP_HVC] = "Hypervisor Call",
|
||||
[EXCP_HYP_TRAP] = "Hypervisor Trap",
|
||||
[EXCP_SMC] = "Secure Monitor Call",
|
||||
[EXCP_VIRQ] = "Virtual IRQ",
|
||||
[EXCP_VFIQ] = "Virtual FIQ",
|
||||
};
|
||||
|
||||
static inline void arm_log_exception(int idx)
|
||||
@ -215,6 +220,16 @@ static inline uint32_t syn_aa64_svc(uint32_t imm16)
|
||||
return (EC_AA64_SVC << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff);
|
||||
}
|
||||
|
||||
static inline uint32_t syn_aa64_hvc(uint32_t imm16)
|
||||
{
|
||||
return (EC_AA64_HVC << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff);
|
||||
}
|
||||
|
||||
static inline uint32_t syn_aa64_smc(uint32_t imm16)
|
||||
{
|
||||
return (EC_AA64_SMC << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff);
|
||||
}
|
||||
|
||||
static inline uint32_t syn_aa32_svc(uint32_t imm16, bool is_thumb)
|
||||
{
|
||||
return (EC_AA32_SVC << ARM_EL_EC_SHIFT) | (imm16 & 0xffff)
|
||||
@ -313,6 +328,12 @@ static inline uint32_t syn_watchpoint(int same_el, int cm, int wnr)
|
||||
| (cm << 8) | (wnr << 6) | 0x22;
|
||||
}
|
||||
|
||||
static inline uint32_t syn_breakpoint(int same_el)
|
||||
{
|
||||
return (EC_BREAKPOINT << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT)
|
||||
| ARM_EL_IL | 0x22;
|
||||
}
|
||||
|
||||
/* Update a QEMU watchpoint based on the information the guest has set in the
|
||||
* DBGWCR<n>_EL1 and DBGWVR<n>_EL1 registers.
|
||||
*/
|
||||
@ -322,6 +343,15 @@ void hw_watchpoint_update(ARMCPU *cpu, int n);
|
||||
* suitable for use after migration or on reset.
|
||||
*/
|
||||
void hw_watchpoint_update_all(ARMCPU *cpu);
|
||||
/* Update a QEMU breakpoint based on the information the guest has set in the
|
||||
* DBGBCR<n>_EL1 and DBGBVR<n>_EL1 registers.
|
||||
*/
|
||||
void hw_breakpoint_update(ARMCPU *cpu, int n);
|
||||
/* Update the QEMU breakpoints for every guest breakpoint. This does a
|
||||
* complete delete-and-reinstate of the QEMU breakpoint list and so is
|
||||
* suitable for use after migration or on reset.
|
||||
*/
|
||||
void hw_breakpoint_update_all(ARMCPU *cpu);
|
||||
|
||||
/* Callback function for when a watchpoint or breakpoint triggers. */
|
||||
void arm_debug_excp_handler(CPUState *cs);
|
||||
|
@ -214,6 +214,7 @@ static int cpu_post_load(void *opaque, int version_id)
|
||||
}
|
||||
}
|
||||
|
||||
hw_breakpoint_update_all(cpu);
|
||||
hw_watchpoint_update_all(cpu);
|
||||
|
||||
return 0;
|
||||
|
@ -301,6 +301,17 @@ void HELPER(set_user_reg)(CPUARMState *env, uint32_t regno, uint32_t val)
|
||||
void HELPER(access_check_cp_reg)(CPUARMState *env, void *rip, uint32_t syndrome)
|
||||
{
|
||||
const ARMCPRegInfo *ri = rip;
|
||||
|
||||
if (arm_feature(env, ARM_FEATURE_XSCALE) && ri->cp < 14
|
||||
&& extract32(env->cp15.c15_cpar, ri->cp, 1) == 0) {
|
||||
env->exception.syndrome = syndrome;
|
||||
raise_exception(env, EXCP_UDEF);
|
||||
}
|
||||
|
||||
if (!ri->accessfn) {
|
||||
return;
|
||||
}
|
||||
|
||||
switch (ri->accessfn(env, ri)) {
|
||||
case CP_ACCESS_OK:
|
||||
return;
|
||||
@ -374,6 +385,63 @@ void HELPER(clear_pstate_ss)(CPUARMState *env)
|
||||
env->pstate &= ~PSTATE_SS;
|
||||
}
|
||||
|
||||
void HELPER(pre_hvc)(CPUARMState *env)
|
||||
{
|
||||
int cur_el = arm_current_pl(env);
|
||||
/* FIXME: Use actual secure state. */
|
||||
bool secure = false;
|
||||
bool undef;
|
||||
|
||||
/* We've already checked that EL2 exists at translation time.
|
||||
* EL3.HCE has priority over EL2.HCD.
|
||||
*/
|
||||
if (arm_feature(env, ARM_FEATURE_EL3)) {
|
||||
undef = !(env->cp15.scr_el3 & SCR_HCE);
|
||||
} else {
|
||||
undef = env->cp15.hcr_el2 & HCR_HCD;
|
||||
}
|
||||
|
||||
/* In ARMv7 and ARMv8/AArch32, HVC is undef in secure state.
|
||||
* For ARMv8/AArch64, HVC is allowed in EL3.
|
||||
* Note that we've already trapped HVC from EL0 at translation
|
||||
* time.
|
||||
*/
|
||||
if (secure && (!is_a64(env) || cur_el == 1)) {
|
||||
undef = true;
|
||||
}
|
||||
|
||||
if (undef) {
|
||||
env->exception.syndrome = syn_uncategorized();
|
||||
raise_exception(env, EXCP_UDEF);
|
||||
}
|
||||
}
|
||||
|
||||
void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome)
|
||||
{
|
||||
int cur_el = arm_current_pl(env);
|
||||
/* FIXME: Use real secure state. */
|
||||
bool secure = false;
|
||||
bool smd = env->cp15.scr_el3 & SCR_SMD;
|
||||
/* On ARMv8 AArch32, SMD only applies to NS state.
|
||||
* On ARMv7 SMD only applies to NS state and only if EL2 is available.
|
||||
* For ARMv7 non EL2, we force SMD to zero so we don't need to re-check
|
||||
* the EL2 condition here.
|
||||
*/
|
||||
bool undef = is_a64(env) ? smd : (!secure && smd);
|
||||
|
||||
/* In NS EL1, HCR controlled routing to EL2 has priority over SMD. */
|
||||
if (!secure && cur_el == 1 && (env->cp15.hcr_el2 & HCR_TSC)) {
|
||||
env->exception.syndrome = syndrome;
|
||||
raise_exception(env, EXCP_HYP_TRAP);
|
||||
}
|
||||
|
||||
/* We've already checked that EL3 exists at translation time. */
|
||||
if (undef) {
|
||||
env->exception.syndrome = syn_uncategorized();
|
||||
raise_exception(env, EXCP_UDEF);
|
||||
}
|
||||
}
|
||||
|
||||
void HELPER(exception_return)(CPUARMState *env)
|
||||
{
|
||||
int cur_el = arm_current_pl(env);
|
||||
@ -511,32 +579,43 @@ static bool linked_bp_matches(ARMCPU *cpu, int lbn)
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool wp_matches(ARMCPU *cpu, int n)
|
||||
static bool bp_wp_matches(ARMCPU *cpu, int n, bool is_wp)
|
||||
{
|
||||
CPUARMState *env = &cpu->env;
|
||||
uint64_t wcr = env->cp15.dbgwcr[n];
|
||||
uint64_t cr;
|
||||
int pac, hmc, ssc, wt, lbn;
|
||||
/* TODO: check against CPU security state when we implement TrustZone */
|
||||
bool is_secure = false;
|
||||
|
||||
if (!env->cpu_watchpoint[n]
|
||||
|| !(env->cpu_watchpoint[n]->flags & BP_WATCHPOINT_HIT)) {
|
||||
return false;
|
||||
}
|
||||
if (is_wp) {
|
||||
if (!env->cpu_watchpoint[n]
|
||||
|| !(env->cpu_watchpoint[n]->flags & BP_WATCHPOINT_HIT)) {
|
||||
return false;
|
||||
}
|
||||
cr = env->cp15.dbgwcr[n];
|
||||
} else {
|
||||
uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
|
||||
|
||||
if (!env->cpu_breakpoint[n] || env->cpu_breakpoint[n]->pc != pc) {
|
||||
return false;
|
||||
}
|
||||
cr = env->cp15.dbgbcr[n];
|
||||
}
|
||||
/* The WATCHPOINT_HIT flag guarantees us that the watchpoint is
|
||||
* enabled and that the address and access type match; check the
|
||||
* remaining fields, including linked breakpoints.
|
||||
* Note that some combinations of {PAC, HMC SSC} are reserved and
|
||||
* enabled and that the address and access type match; for breakpoints
|
||||
* we know the address matched; check the remaining fields, including
|
||||
* linked breakpoints. We rely on WCR and BCR having the same layout
|
||||
* for the LBN, SSC, HMC, PAC/PMC and is-linked fields.
|
||||
* Note that some combinations of {PAC, HMC, SSC} are reserved and
|
||||
* must act either like some valid combination or as if the watchpoint
|
||||
* were disabled. We choose the former, and use this together with
|
||||
* the fact that EL3 must always be Secure and EL2 must always be
|
||||
* Non-Secure to simplify the code slightly compared to the full
|
||||
* table in the ARM ARM.
|
||||
*/
|
||||
pac = extract64(wcr, 1, 2);
|
||||
hmc = extract64(wcr, 13, 1);
|
||||
ssc = extract64(wcr, 14, 2);
|
||||
pac = extract64(cr, 1, 2);
|
||||
hmc = extract64(cr, 13, 1);
|
||||
ssc = extract64(cr, 14, 2);
|
||||
|
||||
switch (ssc) {
|
||||
case 0:
|
||||
@ -560,6 +639,7 @@ static bool wp_matches(ARMCPU *cpu, int n)
|
||||
* Implementing this would require reworking the core watchpoint code
|
||||
* to plumb the mmu_idx through to this point. Luckily Linux does not
|
||||
* rely on this behaviour currently.
|
||||
* For breakpoints we do want to use the current CPU state.
|
||||
*/
|
||||
switch (arm_current_pl(env)) {
|
||||
case 3:
|
||||
@ -582,8 +662,8 @@ static bool wp_matches(ARMCPU *cpu, int n)
|
||||
g_assert_not_reached();
|
||||
}
|
||||
|
||||
wt = extract64(wcr, 20, 1);
|
||||
lbn = extract64(wcr, 16, 4);
|
||||
wt = extract64(cr, 20, 1);
|
||||
lbn = extract64(cr, 16, 4);
|
||||
|
||||
if (wt && !linked_bp_matches(cpu, lbn)) {
|
||||
return false;
|
||||
@ -606,7 +686,28 @@ static bool check_watchpoints(ARMCPU *cpu)
|
||||
}
|
||||
|
||||
for (n = 0; n < ARRAY_SIZE(env->cpu_watchpoint); n++) {
|
||||
if (wp_matches(cpu, n)) {
|
||||
if (bp_wp_matches(cpu, n, true)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool check_breakpoints(ARMCPU *cpu)
|
||||
{
|
||||
CPUARMState *env = &cpu->env;
|
||||
int n;
|
||||
|
||||
/* If breakpoints are disabled globally or we can't take debug
|
||||
* exceptions here then breakpoint firings are ignored.
|
||||
*/
|
||||
if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
|
||||
|| !arm_generate_debug_exceptions(env)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
for (n = 0; n < ARRAY_SIZE(env->cpu_breakpoint); n++) {
|
||||
if (bp_wp_matches(cpu, n, false)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@ -641,6 +742,18 @@ void arm_debug_excp_handler(CPUState *cs)
|
||||
cpu_resume_from_signal(cs, NULL);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (check_breakpoints(cpu)) {
|
||||
bool same_el = (arm_debug_target_el(env) == arm_current_pl(env));
|
||||
env->exception.syndrome = syn_breakpoint(same_el);
|
||||
if (extended_addresses_enabled(env)) {
|
||||
env->exception.fsr = (1 << 9) | 0x22;
|
||||
} else {
|
||||
env->exception.fsr = 0x2;
|
||||
}
|
||||
/* FAR is UNKNOWN, so doesn't need setting */
|
||||
raise_exception(env, EXCP_PREFETCH_ABORT);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1470,23 +1470,49 @@ static void disas_exc(DisasContext *s, uint32_t insn)
|
||||
int opc = extract32(insn, 21, 3);
|
||||
int op2_ll = extract32(insn, 0, 5);
|
||||
int imm16 = extract32(insn, 5, 16);
|
||||
TCGv_i32 tmp;
|
||||
|
||||
switch (opc) {
|
||||
case 0:
|
||||
/* SVC, HVC, SMC; since we don't support the Virtualization
|
||||
* or TrustZone extensions these all UNDEF except SVC.
|
||||
*/
|
||||
if (op2_ll != 1) {
|
||||
unallocated_encoding(s);
|
||||
break;
|
||||
}
|
||||
/* For SVC, HVC and SMC we advance the single-step state
|
||||
* machine before taking the exception. This is architecturally
|
||||
* mandated, to ensure that single-stepping a system call
|
||||
* instruction works properly.
|
||||
*/
|
||||
gen_ss_advance(s);
|
||||
gen_exception_insn(s, 0, EXCP_SWI, syn_aa64_svc(imm16));
|
||||
switch (op2_ll) {
|
||||
case 1:
|
||||
gen_ss_advance(s);
|
||||
gen_exception_insn(s, 0, EXCP_SWI, syn_aa64_svc(imm16));
|
||||
break;
|
||||
case 2:
|
||||
if (!arm_dc_feature(s, ARM_FEATURE_EL2) || s->current_pl == 0) {
|
||||
unallocated_encoding(s);
|
||||
break;
|
||||
}
|
||||
/* The pre HVC helper handles cases when HVC gets trapped
|
||||
* as an undefined insn by runtime configuration.
|
||||
*/
|
||||
gen_a64_set_pc_im(s->pc - 4);
|
||||
gen_helper_pre_hvc(cpu_env);
|
||||
gen_ss_advance(s);
|
||||
gen_exception_insn(s, 0, EXCP_HVC, syn_aa64_hvc(imm16));
|
||||
break;
|
||||
case 3:
|
||||
if (!arm_dc_feature(s, ARM_FEATURE_EL3) || s->current_pl == 0) {
|
||||
unallocated_encoding(s);
|
||||
break;
|
||||
}
|
||||
gen_a64_set_pc_im(s->pc - 4);
|
||||
tmp = tcg_const_i32(syn_aa64_smc(imm16));
|
||||
gen_helper_pre_smc(cpu_env, tmp);
|
||||
tcg_temp_free_i32(tmp);
|
||||
gen_ss_advance(s);
|
||||
gen_exception_insn(s, 0, EXCP_SMC, syn_aa64_smc(imm16));
|
||||
break;
|
||||
default:
|
||||
unallocated_encoding(s);
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case 1:
|
||||
if (op2_ll != 0) {
|
||||
|
@ -7001,22 +7001,18 @@ static int disas_coproc_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
|
||||
const ARMCPRegInfo *ri;
|
||||
|
||||
cpnum = (insn >> 8) & 0xf;
|
||||
if (arm_feature(env, ARM_FEATURE_XSCALE)
|
||||
&& ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
|
||||
return 1;
|
||||
|
||||
/* First check for coprocessor space used for actual instructions */
|
||||
switch (cpnum) {
|
||||
case 0:
|
||||
case 1:
|
||||
if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
|
||||
return disas_iwmmxt_insn(env, s, insn);
|
||||
} else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
|
||||
return disas_dsp_insn(env, s, insn);
|
||||
}
|
||||
return 1;
|
||||
default:
|
||||
break;
|
||||
/* First check for coprocessor space used for XScale/iwMMXt insns */
|
||||
if (arm_feature(env, ARM_FEATURE_XSCALE) && (cpnum < 2)) {
|
||||
if (extract32(s->c15_cpar, cpnum, 1) == 0) {
|
||||
return 1;
|
||||
}
|
||||
if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
|
||||
return disas_iwmmxt_insn(env, s, insn);
|
||||
} else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
|
||||
return disas_dsp_insn(env, s, insn);
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* Otherwise treat as a generic register access */
|
||||
@ -7049,9 +7045,12 @@ static int disas_coproc_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (ri->accessfn) {
|
||||
if (ri->accessfn ||
|
||||
(arm_feature(env, ARM_FEATURE_XSCALE) && cpnum < 14)) {
|
||||
/* Emit code to perform further access permissions checks at
|
||||
* runtime; this may result in an exception.
|
||||
* Note that on XScale all cp0..c13 registers do an access check
|
||||
* call in order to handle c15_cpar.
|
||||
*/
|
||||
TCGv_ptr tmpptr;
|
||||
TCGv_i32 tcg_syn;
|
||||
@ -7675,9 +7674,11 @@ static void disas_arm_insn(CPUARMState * env, DisasContext *s)
|
||||
} else if ((insn & 0x0e000f00) == 0x0c000100) {
|
||||
if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
|
||||
/* iWMMXt register transfer. */
|
||||
if (env->cp15.c15_cpar & (1 << 1))
|
||||
if (!disas_iwmmxt_insn(env, s, insn))
|
||||
if (extract32(s->c15_cpar, 1, 1)) {
|
||||
if (!disas_iwmmxt_insn(env, s, insn)) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if ((insn & 0x0fe00000) == 0x0c400000) {
|
||||
/* Coprocessor double register transfer. */
|
||||
@ -10942,6 +10943,7 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu,
|
||||
dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
|
||||
dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
|
||||
dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
|
||||
dc->c15_cpar = ARM_TBFLAG_XSCALE_CPAR(tb->flags);
|
||||
dc->cp_regs = cpu->cp_regs;
|
||||
dc->current_pl = arm_current_pl(env);
|
||||
dc->features = env->features;
|
||||
|
@ -52,6 +52,8 @@ typedef struct DisasContext {
|
||||
bool is_ldex;
|
||||
/* True if a single-step exception will be taken to the current EL */
|
||||
bool ss_same_el;
|
||||
/* Bottom two bits of XScale c15_cpar coprocessor access control reg */
|
||||
int c15_cpar;
|
||||
#define TMP_A64_MAX 16
|
||||
int tmp_a64_count;
|
||||
TCGv_i64 tmp_a64[TMP_A64_MAX];
|
||||
|
Loading…
Reference in New Issue
Block a user