602f6e42cf
Instead of gating the A32/T32 FP16 conversion instructions on the ARM_FEATURE_VFP_FP16 flag, switch to our new approach of looking at ID register bits. In this case MVFR1 fields FPHP and SIMDHP indicate the presence of these insns. This change doesn't alter behaviour for any of our CPUs. Signed-off-by: Peter Maydell <peter.maydell@linaro.org> Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Message-id: 20190222170936.13268-2-peter.maydell@linaro.org
2272 lines
73 KiB
C
2272 lines
73 KiB
C
/*
|
|
* QEMU ARM CPU
|
|
*
|
|
* Copyright (c) 2012 SUSE LINUX Products GmbH
|
|
*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU General Public License
|
|
* as published by the Free Software Foundation; either version 2
|
|
* of the License, or (at your option) any later version.
|
|
*
|
|
* This program is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
* GNU General Public License for more details.
|
|
*
|
|
* You should have received a copy of the GNU General Public License
|
|
* along with this program; if not, see
|
|
* <http://www.gnu.org/licenses/gpl-2.0.html>
|
|
*/
|
|
|
|
#include "qemu/osdep.h"
|
|
#include "target/arm/idau.h"
|
|
#include "qemu/error-report.h"
|
|
#include "qapi/error.h"
|
|
#include "qapi/visitor.h"
|
|
#include "cpu.h"
|
|
#include "internals.h"
|
|
#include "qemu-common.h"
|
|
#include "exec/exec-all.h"
|
|
#include "hw/qdev-properties.h"
|
|
#if !defined(CONFIG_USER_ONLY)
|
|
#include "hw/loader.h"
|
|
#endif
|
|
#include "hw/arm/arm.h"
|
|
#include "sysemu/sysemu.h"
|
|
#include "sysemu/hw_accel.h"
|
|
#include "kvm_arm.h"
|
|
#include "disas/capstone.h"
|
|
#include "fpu/softfloat.h"
|
|
|
|
static void arm_cpu_set_pc(CPUState *cs, vaddr value)
|
|
{
|
|
ARMCPU *cpu = ARM_CPU(cs);
|
|
CPUARMState *env = &cpu->env;
|
|
|
|
if (is_a64(env)) {
|
|
env->pc = value;
|
|
env->thumb = 0;
|
|
} else {
|
|
env->regs[15] = value & ~1;
|
|
env->thumb = value & 1;
|
|
}
|
|
}
|
|
|
|
static void arm_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
|
|
{
|
|
ARMCPU *cpu = ARM_CPU(cs);
|
|
CPUARMState *env = &cpu->env;
|
|
|
|
/*
|
|
* It's OK to look at env for the current mode here, because it's
|
|
* never possible for an AArch64 TB to chain to an AArch32 TB.
|
|
*/
|
|
if (is_a64(env)) {
|
|
env->pc = tb->pc;
|
|
} else {
|
|
env->regs[15] = tb->pc;
|
|
}
|
|
}
|
|
|
|
static bool arm_cpu_has_work(CPUState *cs)
|
|
{
|
|
ARMCPU *cpu = ARM_CPU(cs);
|
|
|
|
return (cpu->power_state != PSCI_OFF)
|
|
&& cs->interrupt_request &
|
|
(CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD
|
|
| CPU_INTERRUPT_VFIQ | CPU_INTERRUPT_VIRQ
|
|
| CPU_INTERRUPT_EXITTB);
|
|
}
|
|
|
|
void arm_register_pre_el_change_hook(ARMCPU *cpu, ARMELChangeHookFn *hook,
|
|
void *opaque)
|
|
{
|
|
ARMELChangeHook *entry = g_new0(ARMELChangeHook, 1);
|
|
|
|
entry->hook = hook;
|
|
entry->opaque = opaque;
|
|
|
|
QLIST_INSERT_HEAD(&cpu->pre_el_change_hooks, entry, node);
|
|
}
|
|
|
|
void arm_register_el_change_hook(ARMCPU *cpu, ARMELChangeHookFn *hook,
|
|
void *opaque)
|
|
{
|
|
ARMELChangeHook *entry = g_new0(ARMELChangeHook, 1);
|
|
|
|
entry->hook = hook;
|
|
entry->opaque = opaque;
|
|
|
|
QLIST_INSERT_HEAD(&cpu->el_change_hooks, entry, node);
|
|
}
|
|
|
|
static void cp_reg_reset(gpointer key, gpointer value, gpointer opaque)
|
|
{
|
|
/* Reset a single ARMCPRegInfo register */
|
|
ARMCPRegInfo *ri = value;
|
|
ARMCPU *cpu = opaque;
|
|
|
|
if (ri->type & (ARM_CP_SPECIAL | ARM_CP_ALIAS)) {
|
|
return;
|
|
}
|
|
|
|
if (ri->resetfn) {
|
|
ri->resetfn(&cpu->env, ri);
|
|
return;
|
|
}
|
|
|
|
/* A zero offset is never possible as it would be regs[0]
|
|
* so we use it to indicate that reset is being handled elsewhere.
|
|
* This is basically only used for fields in non-core coprocessors
|
|
* (like the pxa2xx ones).
|
|
*/
|
|
if (!ri->fieldoffset) {
|
|
return;
|
|
}
|
|
|
|
if (cpreg_field_is_64bit(ri)) {
|
|
CPREG_FIELD64(&cpu->env, ri) = ri->resetvalue;
|
|
} else {
|
|
CPREG_FIELD32(&cpu->env, ri) = ri->resetvalue;
|
|
}
|
|
}
|
|
|
|
static void cp_reg_check_reset(gpointer key, gpointer value, gpointer opaque)
|
|
{
|
|
/* Purely an assertion check: we've already done reset once,
|
|
* so now check that running the reset for the cpreg doesn't
|
|
* change its value. This traps bugs where two different cpregs
|
|
* both try to reset the same state field but to different values.
|
|
*/
|
|
ARMCPRegInfo *ri = value;
|
|
ARMCPU *cpu = opaque;
|
|
uint64_t oldvalue, newvalue;
|
|
|
|
if (ri->type & (ARM_CP_SPECIAL | ARM_CP_ALIAS | ARM_CP_NO_RAW)) {
|
|
return;
|
|
}
|
|
|
|
oldvalue = read_raw_cp_reg(&cpu->env, ri);
|
|
cp_reg_reset(key, value, opaque);
|
|
newvalue = read_raw_cp_reg(&cpu->env, ri);
|
|
assert(oldvalue == newvalue);
|
|
}
|
|
|
|
/* CPUClass::reset() */
|
|
static void arm_cpu_reset(CPUState *s)
|
|
{
|
|
ARMCPU *cpu = ARM_CPU(s);
|
|
ARMCPUClass *acc = ARM_CPU_GET_CLASS(cpu);
|
|
CPUARMState *env = &cpu->env;
|
|
|
|
acc->parent_reset(s);
|
|
|
|
memset(env, 0, offsetof(CPUARMState, end_reset_fields));
|
|
|
|
g_hash_table_foreach(cpu->cp_regs, cp_reg_reset, cpu);
|
|
g_hash_table_foreach(cpu->cp_regs, cp_reg_check_reset, cpu);
|
|
|
|
env->vfp.xregs[ARM_VFP_FPSID] = cpu->reset_fpsid;
|
|
env->vfp.xregs[ARM_VFP_MVFR0] = cpu->isar.mvfr0;
|
|
env->vfp.xregs[ARM_VFP_MVFR1] = cpu->isar.mvfr1;
|
|
env->vfp.xregs[ARM_VFP_MVFR2] = cpu->isar.mvfr2;
|
|
|
|
cpu->power_state = cpu->start_powered_off ? PSCI_OFF : PSCI_ON;
|
|
s->halted = cpu->start_powered_off;
|
|
|
|
if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
|
|
env->iwmmxt.cregs[ARM_IWMMXT_wCID] = 0x69051000 | 'Q';
|
|
}
|
|
|
|
if (arm_feature(env, ARM_FEATURE_AARCH64)) {
|
|
/* 64 bit CPUs always start in 64 bit mode */
|
|
env->aarch64 = 1;
|
|
#if defined(CONFIG_USER_ONLY)
|
|
env->pstate = PSTATE_MODE_EL0t;
|
|
/* Userspace expects access to DC ZVA, CTL_EL0 and the cache ops */
|
|
env->cp15.sctlr_el[1] |= SCTLR_UCT | SCTLR_UCI | SCTLR_DZE;
|
|
/* Enable all PAC keys. */
|
|
env->cp15.sctlr_el[1] |= (SCTLR_EnIA | SCTLR_EnIB |
|
|
SCTLR_EnDA | SCTLR_EnDB);
|
|
/* Enable all PAC instructions */
|
|
env->cp15.hcr_el2 |= HCR_API;
|
|
env->cp15.scr_el3 |= SCR_API;
|
|
/* and to the FP/Neon instructions */
|
|
env->cp15.cpacr_el1 = deposit64(env->cp15.cpacr_el1, 20, 2, 3);
|
|
/* and to the SVE instructions */
|
|
env->cp15.cpacr_el1 = deposit64(env->cp15.cpacr_el1, 16, 2, 3);
|
|
env->cp15.cptr_el[3] |= CPTR_EZ;
|
|
/* with maximum vector length */
|
|
env->vfp.zcr_el[1] = cpu->sve_max_vq - 1;
|
|
env->vfp.zcr_el[2] = env->vfp.zcr_el[1];
|
|
env->vfp.zcr_el[3] = env->vfp.zcr_el[1];
|
|
/*
|
|
* Enable TBI0 and TBI1. While the real kernel only enables TBI0,
|
|
* turning on both here will produce smaller code and otherwise
|
|
* make no difference to the user-level emulation.
|
|
*/
|
|
env->cp15.tcr_el[1].raw_tcr = (3ULL << 37);
|
|
#else
|
|
/* Reset into the highest available EL */
|
|
if (arm_feature(env, ARM_FEATURE_EL3)) {
|
|
env->pstate = PSTATE_MODE_EL3h;
|
|
} else if (arm_feature(env, ARM_FEATURE_EL2)) {
|
|
env->pstate = PSTATE_MODE_EL2h;
|
|
} else {
|
|
env->pstate = PSTATE_MODE_EL1h;
|
|
}
|
|
env->pc = cpu->rvbar;
|
|
#endif
|
|
} else {
|
|
#if defined(CONFIG_USER_ONLY)
|
|
/* Userspace expects access to cp10 and cp11 for FP/Neon */
|
|
env->cp15.cpacr_el1 = deposit64(env->cp15.cpacr_el1, 20, 4, 0xf);
|
|
#endif
|
|
}
|
|
|
|
#if defined(CONFIG_USER_ONLY)
|
|
env->uncached_cpsr = ARM_CPU_MODE_USR;
|
|
/* For user mode we must enable access to coprocessors */
|
|
env->vfp.xregs[ARM_VFP_FPEXC] = 1 << 30;
|
|
if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
|
|
env->cp15.c15_cpar = 3;
|
|
} else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
|
|
env->cp15.c15_cpar = 1;
|
|
}
|
|
#else
|
|
|
|
/*
|
|
* If the highest available EL is EL2, AArch32 will start in Hyp
|
|
* mode; otherwise it starts in SVC. Note that if we start in
|
|
* AArch64 then these values in the uncached_cpsr will be ignored.
|
|
*/
|
|
if (arm_feature(env, ARM_FEATURE_EL2) &&
|
|
!arm_feature(env, ARM_FEATURE_EL3)) {
|
|
env->uncached_cpsr = ARM_CPU_MODE_HYP;
|
|
} else {
|
|
env->uncached_cpsr = ARM_CPU_MODE_SVC;
|
|
}
|
|
env->daif = PSTATE_D | PSTATE_A | PSTATE_I | PSTATE_F;
|
|
|
|
if (arm_feature(env, ARM_FEATURE_M)) {
|
|
uint32_t initial_msp; /* Loaded from 0x0 */
|
|
uint32_t initial_pc; /* Loaded from 0x4 */
|
|
uint8_t *rom;
|
|
uint32_t vecbase;
|
|
|
|
if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
|
|
env->v7m.secure = true;
|
|
} else {
|
|
/* This bit resets to 0 if security is supported, but 1 if
|
|
* it is not. The bit is not present in v7M, but we set it
|
|
* here so we can avoid having to make checks on it conditional
|
|
* on ARM_FEATURE_V8 (we don't let the guest see the bit).
|
|
*/
|
|
env->v7m.aircr = R_V7M_AIRCR_BFHFNMINS_MASK;
|
|
}
|
|
|
|
/* In v7M the reset value of this bit is IMPDEF, but ARM recommends
|
|
* that it resets to 1, so QEMU always does that rather than making
|
|
* it dependent on CPU model. In v8M it is RES1.
|
|
*/
|
|
env->v7m.ccr[M_REG_NS] = R_V7M_CCR_STKALIGN_MASK;
|
|
env->v7m.ccr[M_REG_S] = R_V7M_CCR_STKALIGN_MASK;
|
|
if (arm_feature(env, ARM_FEATURE_V8)) {
|
|
/* in v8M the NONBASETHRDENA bit [0] is RES1 */
|
|
env->v7m.ccr[M_REG_NS] |= R_V7M_CCR_NONBASETHRDENA_MASK;
|
|
env->v7m.ccr[M_REG_S] |= R_V7M_CCR_NONBASETHRDENA_MASK;
|
|
}
|
|
if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
|
|
env->v7m.ccr[M_REG_NS] |= R_V7M_CCR_UNALIGN_TRP_MASK;
|
|
env->v7m.ccr[M_REG_S] |= R_V7M_CCR_UNALIGN_TRP_MASK;
|
|
}
|
|
|
|
/* Unlike A/R profile, M profile defines the reset LR value */
|
|
env->regs[14] = 0xffffffff;
|
|
|
|
env->v7m.vecbase[M_REG_S] = cpu->init_svtor & 0xffffff80;
|
|
|
|
/* Load the initial SP and PC from offset 0 and 4 in the vector table */
|
|
vecbase = env->v7m.vecbase[env->v7m.secure];
|
|
rom = rom_ptr(vecbase, 8);
|
|
if (rom) {
|
|
/* Address zero is covered by ROM which hasn't yet been
|
|
* copied into physical memory.
|
|
*/
|
|
initial_msp = ldl_p(rom);
|
|
initial_pc = ldl_p(rom + 4);
|
|
} else {
|
|
/* Address zero not covered by a ROM blob, or the ROM blob
|
|
* is in non-modifiable memory and this is a second reset after
|
|
* it got copied into memory. In the latter case, rom_ptr
|
|
* will return a NULL pointer and we should use ldl_phys instead.
|
|
*/
|
|
initial_msp = ldl_phys(s->as, vecbase);
|
|
initial_pc = ldl_phys(s->as, vecbase + 4);
|
|
}
|
|
|
|
env->regs[13] = initial_msp & 0xFFFFFFFC;
|
|
env->regs[15] = initial_pc & ~1;
|
|
env->thumb = initial_pc & 1;
|
|
}
|
|
|
|
/* AArch32 has a hard highvec setting of 0xFFFF0000. If we are currently
|
|
* executing as AArch32 then check if highvecs are enabled and
|
|
* adjust the PC accordingly.
|
|
*/
|
|
if (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_V) {
|
|
env->regs[15] = 0xFFFF0000;
|
|
}
|
|
|
|
/* M profile requires that reset clears the exclusive monitor;
|
|
* A profile does not, but clearing it makes more sense than having it
|
|
* set with an exclusive access on address zero.
|
|
*/
|
|
arm_clear_exclusive(env);
|
|
|
|
env->vfp.xregs[ARM_VFP_FPEXC] = 0;
|
|
#endif
|
|
|
|
if (arm_feature(env, ARM_FEATURE_PMSA)) {
|
|
if (cpu->pmsav7_dregion > 0) {
|
|
if (arm_feature(env, ARM_FEATURE_V8)) {
|
|
memset(env->pmsav8.rbar[M_REG_NS], 0,
|
|
sizeof(*env->pmsav8.rbar[M_REG_NS])
|
|
* cpu->pmsav7_dregion);
|
|
memset(env->pmsav8.rlar[M_REG_NS], 0,
|
|
sizeof(*env->pmsav8.rlar[M_REG_NS])
|
|
* cpu->pmsav7_dregion);
|
|
if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
|
|
memset(env->pmsav8.rbar[M_REG_S], 0,
|
|
sizeof(*env->pmsav8.rbar[M_REG_S])
|
|
* cpu->pmsav7_dregion);
|
|
memset(env->pmsav8.rlar[M_REG_S], 0,
|
|
sizeof(*env->pmsav8.rlar[M_REG_S])
|
|
* cpu->pmsav7_dregion);
|
|
}
|
|
} else if (arm_feature(env, ARM_FEATURE_V7)) {
|
|
memset(env->pmsav7.drbar, 0,
|
|
sizeof(*env->pmsav7.drbar) * cpu->pmsav7_dregion);
|
|
memset(env->pmsav7.drsr, 0,
|
|
sizeof(*env->pmsav7.drsr) * cpu->pmsav7_dregion);
|
|
memset(env->pmsav7.dracr, 0,
|
|
sizeof(*env->pmsav7.dracr) * cpu->pmsav7_dregion);
|
|
}
|
|
}
|
|
env->pmsav7.rnr[M_REG_NS] = 0;
|
|
env->pmsav7.rnr[M_REG_S] = 0;
|
|
env->pmsav8.mair0[M_REG_NS] = 0;
|
|
env->pmsav8.mair0[M_REG_S] = 0;
|
|
env->pmsav8.mair1[M_REG_NS] = 0;
|
|
env->pmsav8.mair1[M_REG_S] = 0;
|
|
}
|
|
|
|
if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
|
|
if (cpu->sau_sregion > 0) {
|
|
memset(env->sau.rbar, 0, sizeof(*env->sau.rbar) * cpu->sau_sregion);
|
|
memset(env->sau.rlar, 0, sizeof(*env->sau.rlar) * cpu->sau_sregion);
|
|
}
|
|
env->sau.rnr = 0;
|
|
/* SAU_CTRL reset value is IMPDEF; we choose 0, which is what
|
|
* the Cortex-M33 does.
|
|
*/
|
|
env->sau.ctrl = 0;
|
|
}
|
|
|
|
set_flush_to_zero(1, &env->vfp.standard_fp_status);
|
|
set_flush_inputs_to_zero(1, &env->vfp.standard_fp_status);
|
|
set_default_nan_mode(1, &env->vfp.standard_fp_status);
|
|
set_float_detect_tininess(float_tininess_before_rounding,
|
|
&env->vfp.fp_status);
|
|
set_float_detect_tininess(float_tininess_before_rounding,
|
|
&env->vfp.standard_fp_status);
|
|
set_float_detect_tininess(float_tininess_before_rounding,
|
|
&env->vfp.fp_status_f16);
|
|
#ifndef CONFIG_USER_ONLY
|
|
if (kvm_enabled()) {
|
|
kvm_arm_reset_vcpu(cpu);
|
|
}
|
|
#endif
|
|
|
|
hw_breakpoint_update_all(cpu);
|
|
hw_watchpoint_update_all(cpu);
|
|
}
|
|
|
|
bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
|
|
{
|
|
CPUClass *cc = CPU_GET_CLASS(cs);
|
|
CPUARMState *env = cs->env_ptr;
|
|
uint32_t cur_el = arm_current_el(env);
|
|
bool secure = arm_is_secure(env);
|
|
uint32_t target_el;
|
|
uint32_t excp_idx;
|
|
bool ret = false;
|
|
|
|
if (interrupt_request & CPU_INTERRUPT_FIQ) {
|
|
excp_idx = EXCP_FIQ;
|
|
target_el = arm_phys_excp_target_el(cs, excp_idx, cur_el, secure);
|
|
if (arm_excp_unmasked(cs, excp_idx, target_el)) {
|
|
cs->exception_index = excp_idx;
|
|
env->exception.target_el = target_el;
|
|
cc->do_interrupt(cs);
|
|
ret = true;
|
|
}
|
|
}
|
|
if (interrupt_request & CPU_INTERRUPT_HARD) {
|
|
excp_idx = EXCP_IRQ;
|
|
target_el = arm_phys_excp_target_el(cs, excp_idx, cur_el, secure);
|
|
if (arm_excp_unmasked(cs, excp_idx, target_el)) {
|
|
cs->exception_index = excp_idx;
|
|
env->exception.target_el = target_el;
|
|
cc->do_interrupt(cs);
|
|
ret = true;
|
|
}
|
|
}
|
|
if (interrupt_request & CPU_INTERRUPT_VIRQ) {
|
|
excp_idx = EXCP_VIRQ;
|
|
target_el = 1;
|
|
if (arm_excp_unmasked(cs, excp_idx, target_el)) {
|
|
cs->exception_index = excp_idx;
|
|
env->exception.target_el = target_el;
|
|
cc->do_interrupt(cs);
|
|
ret = true;
|
|
}
|
|
}
|
|
if (interrupt_request & CPU_INTERRUPT_VFIQ) {
|
|
excp_idx = EXCP_VFIQ;
|
|
target_el = 1;
|
|
if (arm_excp_unmasked(cs, excp_idx, target_el)) {
|
|
cs->exception_index = excp_idx;
|
|
env->exception.target_el = target_el;
|
|
cc->do_interrupt(cs);
|
|
ret = true;
|
|
}
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
|
|
#if !defined(CONFIG_USER_ONLY) || !defined(TARGET_AARCH64)
|
|
static bool arm_v7m_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
|
|
{
|
|
CPUClass *cc = CPU_GET_CLASS(cs);
|
|
ARMCPU *cpu = ARM_CPU(cs);
|
|
CPUARMState *env = &cpu->env;
|
|
bool ret = false;
|
|
|
|
/* ARMv7-M interrupt masking works differently than -A or -R.
|
|
* There is no FIQ/IRQ distinction. Instead of I and F bits
|
|
* masking FIQ and IRQ interrupts, an exception is taken only
|
|
* if it is higher priority than the current execution priority
|
|
* (which depends on state like BASEPRI, FAULTMASK and the
|
|
* currently active exception).
|
|
*/
|
|
if (interrupt_request & CPU_INTERRUPT_HARD
|
|
&& (armv7m_nvic_can_take_pending_exception(env->nvic))) {
|
|
cs->exception_index = EXCP_IRQ;
|
|
cc->do_interrupt(cs);
|
|
ret = true;
|
|
}
|
|
return ret;
|
|
}
|
|
#endif
|
|
|
|
void arm_cpu_update_virq(ARMCPU *cpu)
|
|
{
|
|
/*
|
|
* Update the interrupt level for VIRQ, which is the logical OR of
|
|
* the HCR_EL2.VI bit and the input line level from the GIC.
|
|
*/
|
|
CPUARMState *env = &cpu->env;
|
|
CPUState *cs = CPU(cpu);
|
|
|
|
bool new_state = (env->cp15.hcr_el2 & HCR_VI) ||
|
|
(env->irq_line_state & CPU_INTERRUPT_VIRQ);
|
|
|
|
if (new_state != ((cs->interrupt_request & CPU_INTERRUPT_VIRQ) != 0)) {
|
|
if (new_state) {
|
|
cpu_interrupt(cs, CPU_INTERRUPT_VIRQ);
|
|
} else {
|
|
cpu_reset_interrupt(cs, CPU_INTERRUPT_VIRQ);
|
|
}
|
|
}
|
|
}
|
|
|
|
void arm_cpu_update_vfiq(ARMCPU *cpu)
|
|
{
|
|
/*
|
|
* Update the interrupt level for VFIQ, which is the logical OR of
|
|
* the HCR_EL2.VF bit and the input line level from the GIC.
|
|
*/
|
|
CPUARMState *env = &cpu->env;
|
|
CPUState *cs = CPU(cpu);
|
|
|
|
bool new_state = (env->cp15.hcr_el2 & HCR_VF) ||
|
|
(env->irq_line_state & CPU_INTERRUPT_VFIQ);
|
|
|
|
if (new_state != ((cs->interrupt_request & CPU_INTERRUPT_VFIQ) != 0)) {
|
|
if (new_state) {
|
|
cpu_interrupt(cs, CPU_INTERRUPT_VFIQ);
|
|
} else {
|
|
cpu_reset_interrupt(cs, CPU_INTERRUPT_VFIQ);
|
|
}
|
|
}
|
|
}
|
|
|
|
#ifndef CONFIG_USER_ONLY
|
|
static void arm_cpu_set_irq(void *opaque, int irq, int level)
|
|
{
|
|
ARMCPU *cpu = opaque;
|
|
CPUARMState *env = &cpu->env;
|
|
CPUState *cs = CPU(cpu);
|
|
static const int mask[] = {
|
|
[ARM_CPU_IRQ] = CPU_INTERRUPT_HARD,
|
|
[ARM_CPU_FIQ] = CPU_INTERRUPT_FIQ,
|
|
[ARM_CPU_VIRQ] = CPU_INTERRUPT_VIRQ,
|
|
[ARM_CPU_VFIQ] = CPU_INTERRUPT_VFIQ
|
|
};
|
|
|
|
if (level) {
|
|
env->irq_line_state |= mask[irq];
|
|
} else {
|
|
env->irq_line_state &= ~mask[irq];
|
|
}
|
|
|
|
switch (irq) {
|
|
case ARM_CPU_VIRQ:
|
|
assert(arm_feature(env, ARM_FEATURE_EL2));
|
|
arm_cpu_update_virq(cpu);
|
|
break;
|
|
case ARM_CPU_VFIQ:
|
|
assert(arm_feature(env, ARM_FEATURE_EL2));
|
|
arm_cpu_update_vfiq(cpu);
|
|
break;
|
|
case ARM_CPU_IRQ:
|
|
case ARM_CPU_FIQ:
|
|
if (level) {
|
|
cpu_interrupt(cs, mask[irq]);
|
|
} else {
|
|
cpu_reset_interrupt(cs, mask[irq]);
|
|
}
|
|
break;
|
|
default:
|
|
g_assert_not_reached();
|
|
}
|
|
}
|
|
|
|
static void arm_cpu_kvm_set_irq(void *opaque, int irq, int level)
|
|
{
|
|
#ifdef CONFIG_KVM
|
|
ARMCPU *cpu = opaque;
|
|
CPUARMState *env = &cpu->env;
|
|
CPUState *cs = CPU(cpu);
|
|
int kvm_irq = KVM_ARM_IRQ_TYPE_CPU << KVM_ARM_IRQ_TYPE_SHIFT;
|
|
uint32_t linestate_bit;
|
|
|
|
switch (irq) {
|
|
case ARM_CPU_IRQ:
|
|
kvm_irq |= KVM_ARM_IRQ_CPU_IRQ;
|
|
linestate_bit = CPU_INTERRUPT_HARD;
|
|
break;
|
|
case ARM_CPU_FIQ:
|
|
kvm_irq |= KVM_ARM_IRQ_CPU_FIQ;
|
|
linestate_bit = CPU_INTERRUPT_FIQ;
|
|
break;
|
|
default:
|
|
g_assert_not_reached();
|
|
}
|
|
|
|
if (level) {
|
|
env->irq_line_state |= linestate_bit;
|
|
} else {
|
|
env->irq_line_state &= ~linestate_bit;
|
|
}
|
|
|
|
kvm_irq |= cs->cpu_index << KVM_ARM_IRQ_VCPU_SHIFT;
|
|
kvm_set_irq(kvm_state, kvm_irq, level ? 1 : 0);
|
|
#endif
|
|
}
|
|
|
|
static bool arm_cpu_virtio_is_big_endian(CPUState *cs)
|
|
{
|
|
ARMCPU *cpu = ARM_CPU(cs);
|
|
CPUARMState *env = &cpu->env;
|
|
|
|
cpu_synchronize_state(cs);
|
|
return arm_cpu_data_is_big_endian(env);
|
|
}
|
|
|
|
#endif
|
|
|
|
static inline void set_feature(CPUARMState *env, int feature)
|
|
{
|
|
env->features |= 1ULL << feature;
|
|
}
|
|
|
|
static inline void unset_feature(CPUARMState *env, int feature)
|
|
{
|
|
env->features &= ~(1ULL << feature);
|
|
}
|
|
|
|
static int
|
|
print_insn_thumb1(bfd_vma pc, disassemble_info *info)
|
|
{
|
|
return print_insn_arm(pc | 1, info);
|
|
}
|
|
|
|
static void arm_disas_set_info(CPUState *cpu, disassemble_info *info)
|
|
{
|
|
ARMCPU *ac = ARM_CPU(cpu);
|
|
CPUARMState *env = &ac->env;
|
|
bool sctlr_b;
|
|
|
|
if (is_a64(env)) {
|
|
/* We might not be compiled with the A64 disassembler
|
|
* because it needs a C++ compiler. Leave print_insn
|
|
* unset in this case to use the caller default behaviour.
|
|
*/
|
|
#if defined(CONFIG_ARM_A64_DIS)
|
|
info->print_insn = print_insn_arm_a64;
|
|
#endif
|
|
info->cap_arch = CS_ARCH_ARM64;
|
|
info->cap_insn_unit = 4;
|
|
info->cap_insn_split = 4;
|
|
} else {
|
|
int cap_mode;
|
|
if (env->thumb) {
|
|
info->print_insn = print_insn_thumb1;
|
|
info->cap_insn_unit = 2;
|
|
info->cap_insn_split = 4;
|
|
cap_mode = CS_MODE_THUMB;
|
|
} else {
|
|
info->print_insn = print_insn_arm;
|
|
info->cap_insn_unit = 4;
|
|
info->cap_insn_split = 4;
|
|
cap_mode = CS_MODE_ARM;
|
|
}
|
|
if (arm_feature(env, ARM_FEATURE_V8)) {
|
|
cap_mode |= CS_MODE_V8;
|
|
}
|
|
if (arm_feature(env, ARM_FEATURE_M)) {
|
|
cap_mode |= CS_MODE_MCLASS;
|
|
}
|
|
info->cap_arch = CS_ARCH_ARM;
|
|
info->cap_mode = cap_mode;
|
|
}
|
|
|
|
sctlr_b = arm_sctlr_b(env);
|
|
if (bswap_code(sctlr_b)) {
|
|
#ifdef TARGET_WORDS_BIGENDIAN
|
|
info->endian = BFD_ENDIAN_LITTLE;
|
|
#else
|
|
info->endian = BFD_ENDIAN_BIG;
|
|
#endif
|
|
}
|
|
info->flags &= ~INSN_ARM_BE32;
|
|
#ifndef CONFIG_USER_ONLY
|
|
if (sctlr_b) {
|
|
info->flags |= INSN_ARM_BE32;
|
|
}
|
|
#endif
|
|
}
|
|
|
|
uint64_t arm_cpu_mp_affinity(int idx, uint8_t clustersz)
|
|
{
|
|
uint32_t Aff1 = idx / clustersz;
|
|
uint32_t Aff0 = idx % clustersz;
|
|
return (Aff1 << ARM_AFF1_SHIFT) | Aff0;
|
|
}
|
|
|
|
static void cpreg_hashtable_data_destroy(gpointer data)
|
|
{
|
|
/*
|
|
* Destroy function for cpu->cp_regs hashtable data entries.
|
|
* We must free the name string because it was g_strdup()ed in
|
|
* add_cpreg_to_hashtable(). It's OK to cast away the 'const'
|
|
* from r->name because we know we definitely allocated it.
|
|
*/
|
|
ARMCPRegInfo *r = data;
|
|
|
|
g_free((void *)r->name);
|
|
g_free(r);
|
|
}
|
|
|
|
static void arm_cpu_initfn(Object *obj)
|
|
{
|
|
CPUState *cs = CPU(obj);
|
|
ARMCPU *cpu = ARM_CPU(obj);
|
|
|
|
cs->env_ptr = &cpu->env;
|
|
cpu->cp_regs = g_hash_table_new_full(g_int_hash, g_int_equal,
|
|
g_free, cpreg_hashtable_data_destroy);
|
|
|
|
QLIST_INIT(&cpu->pre_el_change_hooks);
|
|
QLIST_INIT(&cpu->el_change_hooks);
|
|
|
|
#ifndef CONFIG_USER_ONLY
|
|
/* Our inbound IRQ and FIQ lines */
|
|
if (kvm_enabled()) {
|
|
/* VIRQ and VFIQ are unused with KVM but we add them to maintain
|
|
* the same interface as non-KVM CPUs.
|
|
*/
|
|
qdev_init_gpio_in(DEVICE(cpu), arm_cpu_kvm_set_irq, 4);
|
|
} else {
|
|
qdev_init_gpio_in(DEVICE(cpu), arm_cpu_set_irq, 4);
|
|
}
|
|
|
|
qdev_init_gpio_out(DEVICE(cpu), cpu->gt_timer_outputs,
|
|
ARRAY_SIZE(cpu->gt_timer_outputs));
|
|
|
|
qdev_init_gpio_out_named(DEVICE(cpu), &cpu->gicv3_maintenance_interrupt,
|
|
"gicv3-maintenance-interrupt", 1);
|
|
qdev_init_gpio_out_named(DEVICE(cpu), &cpu->pmu_interrupt,
|
|
"pmu-interrupt", 1);
|
|
#endif
|
|
|
|
/* DTB consumers generally don't in fact care what the 'compatible'
|
|
* string is, so always provide some string and trust that a hypothetical
|
|
* picky DTB consumer will also provide a helpful error message.
|
|
*/
|
|
cpu->dtb_compatible = "qemu,unknown";
|
|
cpu->psci_version = 1; /* By default assume PSCI v0.1 */
|
|
cpu->kvm_target = QEMU_KVM_ARM_TARGET_NONE;
|
|
|
|
if (tcg_enabled()) {
|
|
cpu->psci_version = 2; /* TCG implements PSCI 0.2 */
|
|
}
|
|
}
|
|
|
|
static Property arm_cpu_reset_cbar_property =
|
|
DEFINE_PROP_UINT64("reset-cbar", ARMCPU, reset_cbar, 0);
|
|
|
|
static Property arm_cpu_reset_hivecs_property =
|
|
DEFINE_PROP_BOOL("reset-hivecs", ARMCPU, reset_hivecs, false);
|
|
|
|
static Property arm_cpu_rvbar_property =
|
|
DEFINE_PROP_UINT64("rvbar", ARMCPU, rvbar, 0);
|
|
|
|
static Property arm_cpu_has_el2_property =
|
|
DEFINE_PROP_BOOL("has_el2", ARMCPU, has_el2, true);
|
|
|
|
static Property arm_cpu_has_el3_property =
|
|
DEFINE_PROP_BOOL("has_el3", ARMCPU, has_el3, true);
|
|
|
|
static Property arm_cpu_cfgend_property =
|
|
DEFINE_PROP_BOOL("cfgend", ARMCPU, cfgend, false);
|
|
|
|
/* use property name "pmu" to match other archs and virt tools */
|
|
static Property arm_cpu_has_pmu_property =
|
|
DEFINE_PROP_BOOL("pmu", ARMCPU, has_pmu, true);
|
|
|
|
static Property arm_cpu_has_mpu_property =
|
|
DEFINE_PROP_BOOL("has-mpu", ARMCPU, has_mpu, true);
|
|
|
|
/* This is like DEFINE_PROP_UINT32 but it doesn't set the default value,
|
|
* because the CPU initfn will have already set cpu->pmsav7_dregion to
|
|
* the right value for that particular CPU type, and we don't want
|
|
* to override that with an incorrect constant value.
|
|
*/
|
|
static Property arm_cpu_pmsav7_dregion_property =
|
|
DEFINE_PROP_UNSIGNED_NODEFAULT("pmsav7-dregion", ARMCPU,
|
|
pmsav7_dregion,
|
|
qdev_prop_uint32, uint32_t);
|
|
|
|
static void arm_get_init_svtor(Object *obj, Visitor *v, const char *name,
|
|
void *opaque, Error **errp)
|
|
{
|
|
ARMCPU *cpu = ARM_CPU(obj);
|
|
|
|
visit_type_uint32(v, name, &cpu->init_svtor, errp);
|
|
}
|
|
|
|
static void arm_set_init_svtor(Object *obj, Visitor *v, const char *name,
|
|
void *opaque, Error **errp)
|
|
{
|
|
ARMCPU *cpu = ARM_CPU(obj);
|
|
|
|
visit_type_uint32(v, name, &cpu->init_svtor, errp);
|
|
}
|
|
|
|
void arm_cpu_post_init(Object *obj)
|
|
{
|
|
ARMCPU *cpu = ARM_CPU(obj);
|
|
|
|
/* M profile implies PMSA. We have to do this here rather than
|
|
* in realize with the other feature-implication checks because
|
|
* we look at the PMSA bit to see if we should add some properties.
|
|
*/
|
|
if (arm_feature(&cpu->env, ARM_FEATURE_M)) {
|
|
set_feature(&cpu->env, ARM_FEATURE_PMSA);
|
|
}
|
|
|
|
if (arm_feature(&cpu->env, ARM_FEATURE_CBAR) ||
|
|
arm_feature(&cpu->env, ARM_FEATURE_CBAR_RO)) {
|
|
qdev_property_add_static(DEVICE(obj), &arm_cpu_reset_cbar_property,
|
|
&error_abort);
|
|
}
|
|
|
|
if (!arm_feature(&cpu->env, ARM_FEATURE_M)) {
|
|
qdev_property_add_static(DEVICE(obj), &arm_cpu_reset_hivecs_property,
|
|
&error_abort);
|
|
}
|
|
|
|
if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
|
|
qdev_property_add_static(DEVICE(obj), &arm_cpu_rvbar_property,
|
|
&error_abort);
|
|
}
|
|
|
|
if (arm_feature(&cpu->env, ARM_FEATURE_EL3)) {
|
|
/* Add the has_el3 state CPU property only if EL3 is allowed. This will
|
|
* prevent "has_el3" from existing on CPUs which cannot support EL3.
|
|
*/
|
|
qdev_property_add_static(DEVICE(obj), &arm_cpu_has_el3_property,
|
|
&error_abort);
|
|
|
|
#ifndef CONFIG_USER_ONLY
|
|
object_property_add_link(obj, "secure-memory",
|
|
TYPE_MEMORY_REGION,
|
|
(Object **)&cpu->secure_memory,
|
|
qdev_prop_allow_set_link_before_realize,
|
|
OBJ_PROP_LINK_STRONG,
|
|
&error_abort);
|
|
#endif
|
|
}
|
|
|
|
if (arm_feature(&cpu->env, ARM_FEATURE_EL2)) {
|
|
qdev_property_add_static(DEVICE(obj), &arm_cpu_has_el2_property,
|
|
&error_abort);
|
|
}
|
|
|
|
if (arm_feature(&cpu->env, ARM_FEATURE_PMU)) {
|
|
qdev_property_add_static(DEVICE(obj), &arm_cpu_has_pmu_property,
|
|
&error_abort);
|
|
}
|
|
|
|
if (arm_feature(&cpu->env, ARM_FEATURE_PMSA)) {
|
|
qdev_property_add_static(DEVICE(obj), &arm_cpu_has_mpu_property,
|
|
&error_abort);
|
|
if (arm_feature(&cpu->env, ARM_FEATURE_V7)) {
|
|
qdev_property_add_static(DEVICE(obj),
|
|
&arm_cpu_pmsav7_dregion_property,
|
|
&error_abort);
|
|
}
|
|
}
|
|
|
|
if (arm_feature(&cpu->env, ARM_FEATURE_M_SECURITY)) {
|
|
object_property_add_link(obj, "idau", TYPE_IDAU_INTERFACE, &cpu->idau,
|
|
qdev_prop_allow_set_link_before_realize,
|
|
OBJ_PROP_LINK_STRONG,
|
|
&error_abort);
|
|
/*
|
|
* M profile: initial value of the Secure VTOR. We can't just use
|
|
* a simple DEFINE_PROP_UINT32 for this because we want to permit
|
|
* the property to be set after realize.
|
|
*/
|
|
object_property_add(obj, "init-svtor", "uint32",
|
|
arm_get_init_svtor, arm_set_init_svtor,
|
|
NULL, NULL, &error_abort);
|
|
}
|
|
|
|
qdev_property_add_static(DEVICE(obj), &arm_cpu_cfgend_property,
|
|
&error_abort);
|
|
}
|
|
|
|
static void arm_cpu_finalizefn(Object *obj)
|
|
{
|
|
ARMCPU *cpu = ARM_CPU(obj);
|
|
ARMELChangeHook *hook, *next;
|
|
|
|
g_hash_table_destroy(cpu->cp_regs);
|
|
|
|
QLIST_FOREACH_SAFE(hook, &cpu->pre_el_change_hooks, node, next) {
|
|
QLIST_REMOVE(hook, node);
|
|
g_free(hook);
|
|
}
|
|
QLIST_FOREACH_SAFE(hook, &cpu->el_change_hooks, node, next) {
|
|
QLIST_REMOVE(hook, node);
|
|
g_free(hook);
|
|
}
|
|
#ifndef CONFIG_USER_ONLY
|
|
if (cpu->pmu_timer) {
|
|
timer_del(cpu->pmu_timer);
|
|
timer_deinit(cpu->pmu_timer);
|
|
timer_free(cpu->pmu_timer);
|
|
}
|
|
#endif
|
|
}
|
|
|
|
static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
|
|
{
|
|
CPUState *cs = CPU(dev);
|
|
ARMCPU *cpu = ARM_CPU(dev);
|
|
ARMCPUClass *acc = ARM_CPU_GET_CLASS(dev);
|
|
CPUARMState *env = &cpu->env;
|
|
int pagebits;
|
|
Error *local_err = NULL;
|
|
bool no_aa32 = false;
|
|
|
|
/* If we needed to query the host kernel for the CPU features
|
|
* then it's possible that might have failed in the initfn, but
|
|
* this is the first point where we can report it.
|
|
*/
|
|
if (cpu->host_cpu_probe_failed) {
|
|
if (!kvm_enabled()) {
|
|
error_setg(errp, "The 'host' CPU type can only be used with KVM");
|
|
} else {
|
|
error_setg(errp, "Failed to retrieve host CPU features");
|
|
}
|
|
return;
|
|
}
|
|
|
|
#ifndef CONFIG_USER_ONLY
|
|
/* The NVIC and M-profile CPU are two halves of a single piece of
|
|
* hardware; trying to use one without the other is a command line
|
|
* error and will result in segfaults if not caught here.
|
|
*/
|
|
if (arm_feature(env, ARM_FEATURE_M)) {
|
|
if (!env->nvic) {
|
|
error_setg(errp, "This board cannot be used with Cortex-M CPUs");
|
|
return;
|
|
}
|
|
} else {
|
|
if (env->nvic) {
|
|
error_setg(errp, "This board can only be used with Cortex-M CPUs");
|
|
return;
|
|
}
|
|
}
|
|
|
|
cpu->gt_timer[GTIMER_PHYS] = timer_new(QEMU_CLOCK_VIRTUAL, GTIMER_SCALE,
|
|
arm_gt_ptimer_cb, cpu);
|
|
cpu->gt_timer[GTIMER_VIRT] = timer_new(QEMU_CLOCK_VIRTUAL, GTIMER_SCALE,
|
|
arm_gt_vtimer_cb, cpu);
|
|
cpu->gt_timer[GTIMER_HYP] = timer_new(QEMU_CLOCK_VIRTUAL, GTIMER_SCALE,
|
|
arm_gt_htimer_cb, cpu);
|
|
cpu->gt_timer[GTIMER_SEC] = timer_new(QEMU_CLOCK_VIRTUAL, GTIMER_SCALE,
|
|
arm_gt_stimer_cb, cpu);
|
|
#endif
|
|
|
|
cpu_exec_realizefn(cs, &local_err);
|
|
if (local_err != NULL) {
|
|
error_propagate(errp, local_err);
|
|
return;
|
|
}
|
|
|
|
/* Some features automatically imply others: */
|
|
if (arm_feature(env, ARM_FEATURE_V8)) {
|
|
if (arm_feature(env, ARM_FEATURE_M)) {
|
|
set_feature(env, ARM_FEATURE_V7);
|
|
} else {
|
|
set_feature(env, ARM_FEATURE_V7VE);
|
|
}
|
|
}
|
|
|
|
/*
|
|
* There exist AArch64 cpus without AArch32 support. When KVM
|
|
* queries ID_ISAR0_EL1 on such a host, the value is UNKNOWN.
|
|
* Similarly, we cannot check ID_AA64PFR0 without AArch64 support.
|
|
*/
|
|
if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
|
|
no_aa32 = !cpu_isar_feature(aa64_aa32, cpu);
|
|
}
|
|
|
|
if (arm_feature(env, ARM_FEATURE_V7VE)) {
|
|
/* v7 Virtualization Extensions. In real hardware this implies
|
|
* EL2 and also the presence of the Security Extensions.
|
|
* For QEMU, for backwards-compatibility we implement some
|
|
* CPUs or CPU configs which have no actual EL2 or EL3 but do
|
|
* include the various other features that V7VE implies.
|
|
* Presence of EL2 itself is ARM_FEATURE_EL2, and of the
|
|
* Security Extensions is ARM_FEATURE_EL3.
|
|
*/
|
|
assert(no_aa32 || cpu_isar_feature(arm_div, cpu));
|
|
set_feature(env, ARM_FEATURE_LPAE);
|
|
set_feature(env, ARM_FEATURE_V7);
|
|
}
|
|
if (arm_feature(env, ARM_FEATURE_V7)) {
|
|
set_feature(env, ARM_FEATURE_VAPA);
|
|
set_feature(env, ARM_FEATURE_THUMB2);
|
|
set_feature(env, ARM_FEATURE_MPIDR);
|
|
if (!arm_feature(env, ARM_FEATURE_M)) {
|
|
set_feature(env, ARM_FEATURE_V6K);
|
|
} else {
|
|
set_feature(env, ARM_FEATURE_V6);
|
|
}
|
|
|
|
/* Always define VBAR for V7 CPUs even if it doesn't exist in
|
|
* non-EL3 configs. This is needed by some legacy boards.
|
|
*/
|
|
set_feature(env, ARM_FEATURE_VBAR);
|
|
}
|
|
if (arm_feature(env, ARM_FEATURE_V6K)) {
|
|
set_feature(env, ARM_FEATURE_V6);
|
|
set_feature(env, ARM_FEATURE_MVFR);
|
|
}
|
|
if (arm_feature(env, ARM_FEATURE_V6)) {
|
|
set_feature(env, ARM_FEATURE_V5);
|
|
if (!arm_feature(env, ARM_FEATURE_M)) {
|
|
assert(no_aa32 || cpu_isar_feature(jazelle, cpu));
|
|
set_feature(env, ARM_FEATURE_AUXCR);
|
|
}
|
|
}
|
|
if (arm_feature(env, ARM_FEATURE_V5)) {
|
|
set_feature(env, ARM_FEATURE_V4T);
|
|
}
|
|
if (arm_feature(env, ARM_FEATURE_VFP4)) {
|
|
set_feature(env, ARM_FEATURE_VFP3);
|
|
}
|
|
if (arm_feature(env, ARM_FEATURE_VFP3)) {
|
|
set_feature(env, ARM_FEATURE_VFP);
|
|
}
|
|
if (arm_feature(env, ARM_FEATURE_LPAE)) {
|
|
set_feature(env, ARM_FEATURE_V7MP);
|
|
set_feature(env, ARM_FEATURE_PXN);
|
|
}
|
|
if (arm_feature(env, ARM_FEATURE_CBAR_RO)) {
|
|
set_feature(env, ARM_FEATURE_CBAR);
|
|
}
|
|
if (arm_feature(env, ARM_FEATURE_THUMB2) &&
|
|
!arm_feature(env, ARM_FEATURE_M)) {
|
|
set_feature(env, ARM_FEATURE_THUMB_DSP);
|
|
}
|
|
|
|
if (arm_feature(env, ARM_FEATURE_V7) &&
|
|
!arm_feature(env, ARM_FEATURE_M) &&
|
|
!arm_feature(env, ARM_FEATURE_PMSA)) {
|
|
/* v7VMSA drops support for the old ARMv5 tiny pages, so we
|
|
* can use 4K pages.
|
|
*/
|
|
pagebits = 12;
|
|
} else {
|
|
/* For CPUs which might have tiny 1K pages, or which have an
|
|
* MPU and might have small region sizes, stick with 1K pages.
|
|
*/
|
|
pagebits = 10;
|
|
}
|
|
if (!set_preferred_target_page_bits(pagebits)) {
|
|
/* This can only ever happen for hotplugging a CPU, or if
|
|
* the board code incorrectly creates a CPU which it has
|
|
* promised via minimum_page_size that it will not.
|
|
*/
|
|
error_setg(errp, "This CPU requires a smaller page size than the "
|
|
"system is using");
|
|
return;
|
|
}
|
|
|
|
/* This cpu-id-to-MPIDR affinity is used only for TCG; KVM will override it.
|
|
* We don't support setting cluster ID ([16..23]) (known as Aff2
|
|
* in later ARM ARM versions), or any of the higher affinity level fields,
|
|
* so these bits always RAZ.
|
|
*/
|
|
if (cpu->mp_affinity == ARM64_AFFINITY_INVALID) {
|
|
cpu->mp_affinity = arm_cpu_mp_affinity(cs->cpu_index,
|
|
ARM_DEFAULT_CPUS_PER_CLUSTER);
|
|
}
|
|
|
|
if (cpu->reset_hivecs) {
|
|
cpu->reset_sctlr |= (1 << 13);
|
|
}
|
|
|
|
if (cpu->cfgend) {
|
|
if (arm_feature(&cpu->env, ARM_FEATURE_V7)) {
|
|
cpu->reset_sctlr |= SCTLR_EE;
|
|
} else {
|
|
cpu->reset_sctlr |= SCTLR_B;
|
|
}
|
|
}
|
|
|
|
if (!cpu->has_el3) {
|
|
/* If the has_el3 CPU property is disabled then we need to disable the
|
|
* feature.
|
|
*/
|
|
unset_feature(env, ARM_FEATURE_EL3);
|
|
|
|
/* Disable the security extension feature bits in the processor feature
|
|
* registers as well. These are id_pfr1[7:4] and id_aa64pfr0[15:12].
|
|
*/
|
|
cpu->id_pfr1 &= ~0xf0;
|
|
cpu->isar.id_aa64pfr0 &= ~0xf000;
|
|
}
|
|
|
|
if (!cpu->has_el2) {
|
|
unset_feature(env, ARM_FEATURE_EL2);
|
|
}
|
|
|
|
if (!cpu->has_pmu) {
|
|
unset_feature(env, ARM_FEATURE_PMU);
|
|
}
|
|
if (arm_feature(env, ARM_FEATURE_PMU)) {
|
|
pmu_init(cpu);
|
|
|
|
if (!kvm_enabled()) {
|
|
arm_register_pre_el_change_hook(cpu, &pmu_pre_el_change, 0);
|
|
arm_register_el_change_hook(cpu, &pmu_post_el_change, 0);
|
|
}
|
|
|
|
#ifndef CONFIG_USER_ONLY
|
|
cpu->pmu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, arm_pmu_timer_cb,
|
|
cpu);
|
|
#endif
|
|
} else {
|
|
cpu->id_aa64dfr0 &= ~0xf00;
|
|
cpu->pmceid0 = 0;
|
|
cpu->pmceid1 = 0;
|
|
}
|
|
|
|
if (!arm_feature(env, ARM_FEATURE_EL2)) {
|
|
/* Disable the hypervisor feature bits in the processor feature
|
|
* registers if we don't have EL2. These are id_pfr1[15:12] and
|
|
* id_aa64pfr0_el1[11:8].
|
|
*/
|
|
cpu->isar.id_aa64pfr0 &= ~0xf00;
|
|
cpu->id_pfr1 &= ~0xf000;
|
|
}
|
|
|
|
/* MPU can be configured out of a PMSA CPU either by setting has-mpu
|
|
* to false or by setting pmsav7-dregion to 0.
|
|
*/
|
|
if (!cpu->has_mpu) {
|
|
cpu->pmsav7_dregion = 0;
|
|
}
|
|
if (cpu->pmsav7_dregion == 0) {
|
|
cpu->has_mpu = false;
|
|
}
|
|
|
|
if (arm_feature(env, ARM_FEATURE_PMSA) &&
|
|
arm_feature(env, ARM_FEATURE_V7)) {
|
|
uint32_t nr = cpu->pmsav7_dregion;
|
|
|
|
if (nr > 0xff) {
|
|
error_setg(errp, "PMSAv7 MPU #regions invalid %" PRIu32, nr);
|
|
return;
|
|
}
|
|
|
|
if (nr) {
|
|
if (arm_feature(env, ARM_FEATURE_V8)) {
|
|
/* PMSAv8 */
|
|
env->pmsav8.rbar[M_REG_NS] = g_new0(uint32_t, nr);
|
|
env->pmsav8.rlar[M_REG_NS] = g_new0(uint32_t, nr);
|
|
if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
|
|
env->pmsav8.rbar[M_REG_S] = g_new0(uint32_t, nr);
|
|
env->pmsav8.rlar[M_REG_S] = g_new0(uint32_t, nr);
|
|
}
|
|
} else {
|
|
env->pmsav7.drbar = g_new0(uint32_t, nr);
|
|
env->pmsav7.drsr = g_new0(uint32_t, nr);
|
|
env->pmsav7.dracr = g_new0(uint32_t, nr);
|
|
}
|
|
}
|
|
}
|
|
|
|
if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
|
|
uint32_t nr = cpu->sau_sregion;
|
|
|
|
if (nr > 0xff) {
|
|
error_setg(errp, "v8M SAU #regions invalid %" PRIu32, nr);
|
|
return;
|
|
}
|
|
|
|
if (nr) {
|
|
env->sau.rbar = g_new0(uint32_t, nr);
|
|
env->sau.rlar = g_new0(uint32_t, nr);
|
|
}
|
|
}
|
|
|
|
if (arm_feature(env, ARM_FEATURE_EL3)) {
|
|
set_feature(env, ARM_FEATURE_VBAR);
|
|
}
|
|
|
|
register_cp_regs_for_features(cpu);
|
|
arm_cpu_register_gdb_regs_for_features(cpu);
|
|
|
|
init_cpreg_list(cpu);
|
|
|
|
#ifndef CONFIG_USER_ONLY
|
|
if (cpu->has_el3 || arm_feature(env, ARM_FEATURE_M_SECURITY)) {
|
|
cs->num_ases = 2;
|
|
|
|
if (!cpu->secure_memory) {
|
|
cpu->secure_memory = cs->memory;
|
|
}
|
|
cpu_address_space_init(cs, ARMASIdx_S, "cpu-secure-memory",
|
|
cpu->secure_memory);
|
|
} else {
|
|
cs->num_ases = 1;
|
|
}
|
|
cpu_address_space_init(cs, ARMASIdx_NS, "cpu-memory", cs->memory);
|
|
|
|
/* No core_count specified, default to smp_cpus. */
|
|
if (cpu->core_count == -1) {
|
|
cpu->core_count = smp_cpus;
|
|
}
|
|
#endif
|
|
|
|
qemu_init_vcpu(cs);
|
|
cpu_reset(cs);
|
|
|
|
acc->parent_realize(dev, errp);
|
|
}
|
|
|
|
static ObjectClass *arm_cpu_class_by_name(const char *cpu_model)
|
|
{
|
|
ObjectClass *oc;
|
|
char *typename;
|
|
char **cpuname;
|
|
const char *cpunamestr;
|
|
|
|
cpuname = g_strsplit(cpu_model, ",", 1);
|
|
cpunamestr = cpuname[0];
|
|
#ifdef CONFIG_USER_ONLY
|
|
/* For backwards compatibility usermode emulation allows "-cpu any",
|
|
* which has the same semantics as "-cpu max".
|
|
*/
|
|
if (!strcmp(cpunamestr, "any")) {
|
|
cpunamestr = "max";
|
|
}
|
|
#endif
|
|
typename = g_strdup_printf(ARM_CPU_TYPE_NAME("%s"), cpunamestr);
|
|
oc = object_class_by_name(typename);
|
|
g_strfreev(cpuname);
|
|
g_free(typename);
|
|
if (!oc || !object_class_dynamic_cast(oc, TYPE_ARM_CPU) ||
|
|
object_class_is_abstract(oc)) {
|
|
return NULL;
|
|
}
|
|
return oc;
|
|
}
|
|
|
|
/* CPU models. These are not needed for the AArch64 linux-user build. */
|
|
#if !defined(CONFIG_USER_ONLY) || !defined(TARGET_AARCH64)
|
|
|
|
static void arm926_initfn(Object *obj)
|
|
{
|
|
ARMCPU *cpu = ARM_CPU(obj);
|
|
|
|
cpu->dtb_compatible = "arm,arm926";
|
|
set_feature(&cpu->env, ARM_FEATURE_V5);
|
|
set_feature(&cpu->env, ARM_FEATURE_VFP);
|
|
set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
|
|
set_feature(&cpu->env, ARM_FEATURE_CACHE_TEST_CLEAN);
|
|
cpu->midr = 0x41069265;
|
|
cpu->reset_fpsid = 0x41011090;
|
|
cpu->ctr = 0x1dd20d2;
|
|
cpu->reset_sctlr = 0x00090078;
|
|
|
|
/*
|
|
* ARMv5 does not have the ID_ISAR registers, but we can still
|
|
* set the field to indicate Jazelle support within QEMU.
|
|
*/
|
|
cpu->isar.id_isar1 = FIELD_DP32(cpu->isar.id_isar1, ID_ISAR1, JAZELLE, 1);
|
|
}
|
|
|
|
static void arm946_initfn(Object *obj)
|
|
{
|
|
ARMCPU *cpu = ARM_CPU(obj);
|
|
|
|
cpu->dtb_compatible = "arm,arm946";
|
|
set_feature(&cpu->env, ARM_FEATURE_V5);
|
|
set_feature(&cpu->env, ARM_FEATURE_PMSA);
|
|
set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
|
|
cpu->midr = 0x41059461;
|
|
cpu->ctr = 0x0f004006;
|
|
cpu->reset_sctlr = 0x00000078;
|
|
}
|
|
|
|
static void arm1026_initfn(Object *obj)
|
|
{
|
|
ARMCPU *cpu = ARM_CPU(obj);
|
|
|
|
cpu->dtb_compatible = "arm,arm1026";
|
|
set_feature(&cpu->env, ARM_FEATURE_V5);
|
|
set_feature(&cpu->env, ARM_FEATURE_VFP);
|
|
set_feature(&cpu->env, ARM_FEATURE_AUXCR);
|
|
set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
|
|
set_feature(&cpu->env, ARM_FEATURE_CACHE_TEST_CLEAN);
|
|
cpu->midr = 0x4106a262;
|
|
cpu->reset_fpsid = 0x410110a0;
|
|
cpu->ctr = 0x1dd20d2;
|
|
cpu->reset_sctlr = 0x00090078;
|
|
cpu->reset_auxcr = 1;
|
|
|
|
/*
|
|
* ARMv5 does not have the ID_ISAR registers, but we can still
|
|
* set the field to indicate Jazelle support within QEMU.
|
|
*/
|
|
cpu->isar.id_isar1 = FIELD_DP32(cpu->isar.id_isar1, ID_ISAR1, JAZELLE, 1);
|
|
|
|
{
|
|
/* The 1026 had an IFAR at c6,c0,0,1 rather than the ARMv6 c6,c0,0,2 */
|
|
ARMCPRegInfo ifar = {
|
|
.name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1,
|
|
.access = PL1_RW,
|
|
.fieldoffset = offsetof(CPUARMState, cp15.ifar_ns),
|
|
.resetvalue = 0
|
|
};
|
|
define_one_arm_cp_reg(cpu, &ifar);
|
|
}
|
|
}
|
|
|
|
static void arm1136_r2_initfn(Object *obj)
|
|
{
|
|
ARMCPU *cpu = ARM_CPU(obj);
|
|
/* What qemu calls "arm1136_r2" is actually the 1136 r0p2, ie an
|
|
* older core than plain "arm1136". In particular this does not
|
|
* have the v6K features.
|
|
* These ID register values are correct for 1136 but may be wrong
|
|
* for 1136_r2 (in particular r0p2 does not actually implement most
|
|
* of the ID registers).
|
|
*/
|
|
|
|
cpu->dtb_compatible = "arm,arm1136";
|
|
set_feature(&cpu->env, ARM_FEATURE_V6);
|
|
set_feature(&cpu->env, ARM_FEATURE_VFP);
|
|
set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
|
|
set_feature(&cpu->env, ARM_FEATURE_CACHE_DIRTY_REG);
|
|
set_feature(&cpu->env, ARM_FEATURE_CACHE_BLOCK_OPS);
|
|
cpu->midr = 0x4107b362;
|
|
cpu->reset_fpsid = 0x410120b4;
|
|
cpu->isar.mvfr0 = 0x11111111;
|
|
cpu->isar.mvfr1 = 0x00000000;
|
|
cpu->ctr = 0x1dd20d2;
|
|
cpu->reset_sctlr = 0x00050078;
|
|
cpu->id_pfr0 = 0x111;
|
|
cpu->id_pfr1 = 0x1;
|
|
cpu->id_dfr0 = 0x2;
|
|
cpu->id_afr0 = 0x3;
|
|
cpu->id_mmfr0 = 0x01130003;
|
|
cpu->id_mmfr1 = 0x10030302;
|
|
cpu->id_mmfr2 = 0x01222110;
|
|
cpu->isar.id_isar0 = 0x00140011;
|
|
cpu->isar.id_isar1 = 0x12002111;
|
|
cpu->isar.id_isar2 = 0x11231111;
|
|
cpu->isar.id_isar3 = 0x01102131;
|
|
cpu->isar.id_isar4 = 0x141;
|
|
cpu->reset_auxcr = 7;
|
|
}
|
|
|
|
static void arm1136_initfn(Object *obj)
|
|
{
|
|
ARMCPU *cpu = ARM_CPU(obj);
|
|
|
|
cpu->dtb_compatible = "arm,arm1136";
|
|
set_feature(&cpu->env, ARM_FEATURE_V6K);
|
|
set_feature(&cpu->env, ARM_FEATURE_V6);
|
|
set_feature(&cpu->env, ARM_FEATURE_VFP);
|
|
set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
|
|
set_feature(&cpu->env, ARM_FEATURE_CACHE_DIRTY_REG);
|
|
set_feature(&cpu->env, ARM_FEATURE_CACHE_BLOCK_OPS);
|
|
cpu->midr = 0x4117b363;
|
|
cpu->reset_fpsid = 0x410120b4;
|
|
cpu->isar.mvfr0 = 0x11111111;
|
|
cpu->isar.mvfr1 = 0x00000000;
|
|
cpu->ctr = 0x1dd20d2;
|
|
cpu->reset_sctlr = 0x00050078;
|
|
cpu->id_pfr0 = 0x111;
|
|
cpu->id_pfr1 = 0x1;
|
|
cpu->id_dfr0 = 0x2;
|
|
cpu->id_afr0 = 0x3;
|
|
cpu->id_mmfr0 = 0x01130003;
|
|
cpu->id_mmfr1 = 0x10030302;
|
|
cpu->id_mmfr2 = 0x01222110;
|
|
cpu->isar.id_isar0 = 0x00140011;
|
|
cpu->isar.id_isar1 = 0x12002111;
|
|
cpu->isar.id_isar2 = 0x11231111;
|
|
cpu->isar.id_isar3 = 0x01102131;
|
|
cpu->isar.id_isar4 = 0x141;
|
|
cpu->reset_auxcr = 7;
|
|
}
|
|
|
|
static void arm1176_initfn(Object *obj)
|
|
{
|
|
ARMCPU *cpu = ARM_CPU(obj);
|
|
|
|
cpu->dtb_compatible = "arm,arm1176";
|
|
set_feature(&cpu->env, ARM_FEATURE_V6K);
|
|
set_feature(&cpu->env, ARM_FEATURE_VFP);
|
|
set_feature(&cpu->env, ARM_FEATURE_VAPA);
|
|
set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
|
|
set_feature(&cpu->env, ARM_FEATURE_CACHE_DIRTY_REG);
|
|
set_feature(&cpu->env, ARM_FEATURE_CACHE_BLOCK_OPS);
|
|
set_feature(&cpu->env, ARM_FEATURE_EL3);
|
|
cpu->midr = 0x410fb767;
|
|
cpu->reset_fpsid = 0x410120b5;
|
|
cpu->isar.mvfr0 = 0x11111111;
|
|
cpu->isar.mvfr1 = 0x00000000;
|
|
cpu->ctr = 0x1dd20d2;
|
|
cpu->reset_sctlr = 0x00050078;
|
|
cpu->id_pfr0 = 0x111;
|
|
cpu->id_pfr1 = 0x11;
|
|
cpu->id_dfr0 = 0x33;
|
|
cpu->id_afr0 = 0;
|
|
cpu->id_mmfr0 = 0x01130003;
|
|
cpu->id_mmfr1 = 0x10030302;
|
|
cpu->id_mmfr2 = 0x01222100;
|
|
cpu->isar.id_isar0 = 0x0140011;
|
|
cpu->isar.id_isar1 = 0x12002111;
|
|
cpu->isar.id_isar2 = 0x11231121;
|
|
cpu->isar.id_isar3 = 0x01102131;
|
|
cpu->isar.id_isar4 = 0x01141;
|
|
cpu->reset_auxcr = 7;
|
|
}
|
|
|
|
static void arm11mpcore_initfn(Object *obj)
|
|
{
|
|
ARMCPU *cpu = ARM_CPU(obj);
|
|
|
|
cpu->dtb_compatible = "arm,arm11mpcore";
|
|
set_feature(&cpu->env, ARM_FEATURE_V6K);
|
|
set_feature(&cpu->env, ARM_FEATURE_VFP);
|
|
set_feature(&cpu->env, ARM_FEATURE_VAPA);
|
|
set_feature(&cpu->env, ARM_FEATURE_MPIDR);
|
|
set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
|
|
cpu->midr = 0x410fb022;
|
|
cpu->reset_fpsid = 0x410120b4;
|
|
cpu->isar.mvfr0 = 0x11111111;
|
|
cpu->isar.mvfr1 = 0x00000000;
|
|
cpu->ctr = 0x1d192992; /* 32K icache 32K dcache */
|
|
cpu->id_pfr0 = 0x111;
|
|
cpu->id_pfr1 = 0x1;
|
|
cpu->id_dfr0 = 0;
|
|
cpu->id_afr0 = 0x2;
|
|
cpu->id_mmfr0 = 0x01100103;
|
|
cpu->id_mmfr1 = 0x10020302;
|
|
cpu->id_mmfr2 = 0x01222000;
|
|
cpu->isar.id_isar0 = 0x00100011;
|
|
cpu->isar.id_isar1 = 0x12002111;
|
|
cpu->isar.id_isar2 = 0x11221011;
|
|
cpu->isar.id_isar3 = 0x01102131;
|
|
cpu->isar.id_isar4 = 0x141;
|
|
cpu->reset_auxcr = 1;
|
|
}
|
|
|
|
static void cortex_m0_initfn(Object *obj)
|
|
{
|
|
ARMCPU *cpu = ARM_CPU(obj);
|
|
set_feature(&cpu->env, ARM_FEATURE_V6);
|
|
set_feature(&cpu->env, ARM_FEATURE_M);
|
|
|
|
cpu->midr = 0x410cc200;
|
|
}
|
|
|
|
static void cortex_m3_initfn(Object *obj)
|
|
{
|
|
ARMCPU *cpu = ARM_CPU(obj);
|
|
set_feature(&cpu->env, ARM_FEATURE_V7);
|
|
set_feature(&cpu->env, ARM_FEATURE_M);
|
|
set_feature(&cpu->env, ARM_FEATURE_M_MAIN);
|
|
cpu->midr = 0x410fc231;
|
|
cpu->pmsav7_dregion = 8;
|
|
cpu->id_pfr0 = 0x00000030;
|
|
cpu->id_pfr1 = 0x00000200;
|
|
cpu->id_dfr0 = 0x00100000;
|
|
cpu->id_afr0 = 0x00000000;
|
|
cpu->id_mmfr0 = 0x00000030;
|
|
cpu->id_mmfr1 = 0x00000000;
|
|
cpu->id_mmfr2 = 0x00000000;
|
|
cpu->id_mmfr3 = 0x00000000;
|
|
cpu->isar.id_isar0 = 0x01141110;
|
|
cpu->isar.id_isar1 = 0x02111000;
|
|
cpu->isar.id_isar2 = 0x21112231;
|
|
cpu->isar.id_isar3 = 0x01111110;
|
|
cpu->isar.id_isar4 = 0x01310102;
|
|
cpu->isar.id_isar5 = 0x00000000;
|
|
cpu->isar.id_isar6 = 0x00000000;
|
|
}
|
|
|
|
static void cortex_m4_initfn(Object *obj)
|
|
{
|
|
ARMCPU *cpu = ARM_CPU(obj);
|
|
|
|
set_feature(&cpu->env, ARM_FEATURE_V7);
|
|
set_feature(&cpu->env, ARM_FEATURE_M);
|
|
set_feature(&cpu->env, ARM_FEATURE_M_MAIN);
|
|
set_feature(&cpu->env, ARM_FEATURE_THUMB_DSP);
|
|
cpu->midr = 0x410fc240; /* r0p0 */
|
|
cpu->pmsav7_dregion = 8;
|
|
cpu->id_pfr0 = 0x00000030;
|
|
cpu->id_pfr1 = 0x00000200;
|
|
cpu->id_dfr0 = 0x00100000;
|
|
cpu->id_afr0 = 0x00000000;
|
|
cpu->id_mmfr0 = 0x00000030;
|
|
cpu->id_mmfr1 = 0x00000000;
|
|
cpu->id_mmfr2 = 0x00000000;
|
|
cpu->id_mmfr3 = 0x00000000;
|
|
cpu->isar.id_isar0 = 0x01141110;
|
|
cpu->isar.id_isar1 = 0x02111000;
|
|
cpu->isar.id_isar2 = 0x21112231;
|
|
cpu->isar.id_isar3 = 0x01111110;
|
|
cpu->isar.id_isar4 = 0x01310102;
|
|
cpu->isar.id_isar5 = 0x00000000;
|
|
cpu->isar.id_isar6 = 0x00000000;
|
|
}
|
|
|
|
static void cortex_m33_initfn(Object *obj)
|
|
{
|
|
ARMCPU *cpu = ARM_CPU(obj);
|
|
|
|
set_feature(&cpu->env, ARM_FEATURE_V8);
|
|
set_feature(&cpu->env, ARM_FEATURE_M);
|
|
set_feature(&cpu->env, ARM_FEATURE_M_MAIN);
|
|
set_feature(&cpu->env, ARM_FEATURE_M_SECURITY);
|
|
set_feature(&cpu->env, ARM_FEATURE_THUMB_DSP);
|
|
cpu->midr = 0x410fd213; /* r0p3 */
|
|
cpu->pmsav7_dregion = 16;
|
|
cpu->sau_sregion = 8;
|
|
cpu->id_pfr0 = 0x00000030;
|
|
cpu->id_pfr1 = 0x00000210;
|
|
cpu->id_dfr0 = 0x00200000;
|
|
cpu->id_afr0 = 0x00000000;
|
|
cpu->id_mmfr0 = 0x00101F40;
|
|
cpu->id_mmfr1 = 0x00000000;
|
|
cpu->id_mmfr2 = 0x01000000;
|
|
cpu->id_mmfr3 = 0x00000000;
|
|
cpu->isar.id_isar0 = 0x01101110;
|
|
cpu->isar.id_isar1 = 0x02212000;
|
|
cpu->isar.id_isar2 = 0x20232232;
|
|
cpu->isar.id_isar3 = 0x01111131;
|
|
cpu->isar.id_isar4 = 0x01310132;
|
|
cpu->isar.id_isar5 = 0x00000000;
|
|
cpu->isar.id_isar6 = 0x00000000;
|
|
cpu->clidr = 0x00000000;
|
|
cpu->ctr = 0x8000c000;
|
|
}
|
|
|
|
static void arm_v7m_class_init(ObjectClass *oc, void *data)
|
|
{
|
|
ARMCPUClass *acc = ARM_CPU_CLASS(oc);
|
|
CPUClass *cc = CPU_CLASS(oc);
|
|
|
|
acc->info = data;
|
|
#ifndef CONFIG_USER_ONLY
|
|
cc->do_interrupt = arm_v7m_cpu_do_interrupt;
|
|
#endif
|
|
|
|
cc->cpu_exec_interrupt = arm_v7m_cpu_exec_interrupt;
|
|
}
|
|
|
|
static const ARMCPRegInfo cortexr5_cp_reginfo[] = {
|
|
/* Dummy the TCM region regs for the moment */
|
|
{ .name = "ATCM", .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 0,
|
|
.access = PL1_RW, .type = ARM_CP_CONST },
|
|
{ .name = "BTCM", .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 1,
|
|
.access = PL1_RW, .type = ARM_CP_CONST },
|
|
{ .name = "DCACHE_INVAL", .cp = 15, .opc1 = 0, .crn = 15, .crm = 5,
|
|
.opc2 = 0, .access = PL1_W, .type = ARM_CP_NOP },
|
|
REGINFO_SENTINEL
|
|
};
|
|
|
|
static void cortex_r5_initfn(Object *obj)
|
|
{
|
|
ARMCPU *cpu = ARM_CPU(obj);
|
|
|
|
set_feature(&cpu->env, ARM_FEATURE_V7);
|
|
set_feature(&cpu->env, ARM_FEATURE_V7MP);
|
|
set_feature(&cpu->env, ARM_FEATURE_PMSA);
|
|
cpu->midr = 0x411fc153; /* r1p3 */
|
|
cpu->id_pfr0 = 0x0131;
|
|
cpu->id_pfr1 = 0x001;
|
|
cpu->id_dfr0 = 0x010400;
|
|
cpu->id_afr0 = 0x0;
|
|
cpu->id_mmfr0 = 0x0210030;
|
|
cpu->id_mmfr1 = 0x00000000;
|
|
cpu->id_mmfr2 = 0x01200000;
|
|
cpu->id_mmfr3 = 0x0211;
|
|
cpu->isar.id_isar0 = 0x02101111;
|
|
cpu->isar.id_isar1 = 0x13112111;
|
|
cpu->isar.id_isar2 = 0x21232141;
|
|
cpu->isar.id_isar3 = 0x01112131;
|
|
cpu->isar.id_isar4 = 0x0010142;
|
|
cpu->isar.id_isar5 = 0x0;
|
|
cpu->isar.id_isar6 = 0x0;
|
|
cpu->mp_is_up = true;
|
|
cpu->pmsav7_dregion = 16;
|
|
define_arm_cp_regs(cpu, cortexr5_cp_reginfo);
|
|
}
|
|
|
|
static void cortex_r5f_initfn(Object *obj)
|
|
{
|
|
ARMCPU *cpu = ARM_CPU(obj);
|
|
|
|
cortex_r5_initfn(obj);
|
|
set_feature(&cpu->env, ARM_FEATURE_VFP3);
|
|
}
|
|
|
|
static const ARMCPRegInfo cortexa8_cp_reginfo[] = {
|
|
{ .name = "L2LOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 1, .opc2 = 0,
|
|
.access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
|
|
{ .name = "L2AUXCR", .cp = 15, .crn = 9, .crm = 0, .opc1 = 1, .opc2 = 2,
|
|
.access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
|
|
REGINFO_SENTINEL
|
|
};
|
|
|
|
static void cortex_a8_initfn(Object *obj)
|
|
{
|
|
ARMCPU *cpu = ARM_CPU(obj);
|
|
|
|
cpu->dtb_compatible = "arm,cortex-a8";
|
|
set_feature(&cpu->env, ARM_FEATURE_V7);
|
|
set_feature(&cpu->env, ARM_FEATURE_VFP3);
|
|
set_feature(&cpu->env, ARM_FEATURE_NEON);
|
|
set_feature(&cpu->env, ARM_FEATURE_THUMB2EE);
|
|
set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
|
|
set_feature(&cpu->env, ARM_FEATURE_EL3);
|
|
cpu->midr = 0x410fc080;
|
|
cpu->reset_fpsid = 0x410330c0;
|
|
cpu->isar.mvfr0 = 0x11110222;
|
|
cpu->isar.mvfr1 = 0x00011111;
|
|
cpu->ctr = 0x82048004;
|
|
cpu->reset_sctlr = 0x00c50078;
|
|
cpu->id_pfr0 = 0x1031;
|
|
cpu->id_pfr1 = 0x11;
|
|
cpu->id_dfr0 = 0x400;
|
|
cpu->id_afr0 = 0;
|
|
cpu->id_mmfr0 = 0x31100003;
|
|
cpu->id_mmfr1 = 0x20000000;
|
|
cpu->id_mmfr2 = 0x01202000;
|
|
cpu->id_mmfr3 = 0x11;
|
|
cpu->isar.id_isar0 = 0x00101111;
|
|
cpu->isar.id_isar1 = 0x12112111;
|
|
cpu->isar.id_isar2 = 0x21232031;
|
|
cpu->isar.id_isar3 = 0x11112131;
|
|
cpu->isar.id_isar4 = 0x00111142;
|
|
cpu->dbgdidr = 0x15141000;
|
|
cpu->clidr = (1 << 27) | (2 << 24) | 3;
|
|
cpu->ccsidr[0] = 0xe007e01a; /* 16k L1 dcache. */
|
|
cpu->ccsidr[1] = 0x2007e01a; /* 16k L1 icache. */
|
|
cpu->ccsidr[2] = 0xf0000000; /* No L2 icache. */
|
|
cpu->reset_auxcr = 2;
|
|
define_arm_cp_regs(cpu, cortexa8_cp_reginfo);
|
|
}
|
|
|
|
static const ARMCPRegInfo cortexa9_cp_reginfo[] = {
|
|
/* power_control should be set to maximum latency. Again,
|
|
* default to 0 and set by private hook
|
|
*/
|
|
{ .name = "A9_PWRCTL", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0,
|
|
.access = PL1_RW, .resetvalue = 0,
|
|
.fieldoffset = offsetof(CPUARMState, cp15.c15_power_control) },
|
|
{ .name = "A9_DIAG", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 1,
|
|
.access = PL1_RW, .resetvalue = 0,
|
|
.fieldoffset = offsetof(CPUARMState, cp15.c15_diagnostic) },
|
|
{ .name = "A9_PWRDIAG", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 2,
|
|
.access = PL1_RW, .resetvalue = 0,
|
|
.fieldoffset = offsetof(CPUARMState, cp15.c15_power_diagnostic) },
|
|
{ .name = "NEONBUSY", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0,
|
|
.access = PL1_RW, .resetvalue = 0, .type = ARM_CP_CONST },
|
|
/* TLB lockdown control */
|
|
{ .name = "TLB_LOCKR", .cp = 15, .crn = 15, .crm = 4, .opc1 = 5, .opc2 = 2,
|
|
.access = PL1_W, .resetvalue = 0, .type = ARM_CP_NOP },
|
|
{ .name = "TLB_LOCKW", .cp = 15, .crn = 15, .crm = 4, .opc1 = 5, .opc2 = 4,
|
|
.access = PL1_W, .resetvalue = 0, .type = ARM_CP_NOP },
|
|
{ .name = "TLB_VA", .cp = 15, .crn = 15, .crm = 5, .opc1 = 5, .opc2 = 2,
|
|
.access = PL1_RW, .resetvalue = 0, .type = ARM_CP_CONST },
|
|
{ .name = "TLB_PA", .cp = 15, .crn = 15, .crm = 6, .opc1 = 5, .opc2 = 2,
|
|
.access = PL1_RW, .resetvalue = 0, .type = ARM_CP_CONST },
|
|
{ .name = "TLB_ATTR", .cp = 15, .crn = 15, .crm = 7, .opc1 = 5, .opc2 = 2,
|
|
.access = PL1_RW, .resetvalue = 0, .type = ARM_CP_CONST },
|
|
REGINFO_SENTINEL
|
|
};
|
|
|
|
static void cortex_a9_initfn(Object *obj)
|
|
{
|
|
ARMCPU *cpu = ARM_CPU(obj);
|
|
|
|
cpu->dtb_compatible = "arm,cortex-a9";
|
|
set_feature(&cpu->env, ARM_FEATURE_V7);
|
|
set_feature(&cpu->env, ARM_FEATURE_VFP3);
|
|
set_feature(&cpu->env, ARM_FEATURE_NEON);
|
|
set_feature(&cpu->env, ARM_FEATURE_THUMB2EE);
|
|
set_feature(&cpu->env, ARM_FEATURE_EL3);
|
|
/* Note that A9 supports the MP extensions even for
|
|
* A9UP and single-core A9MP (which are both different
|
|
* and valid configurations; we don't model A9UP).
|
|
*/
|
|
set_feature(&cpu->env, ARM_FEATURE_V7MP);
|
|
set_feature(&cpu->env, ARM_FEATURE_CBAR);
|
|
cpu->midr = 0x410fc090;
|
|
cpu->reset_fpsid = 0x41033090;
|
|
cpu->isar.mvfr0 = 0x11110222;
|
|
cpu->isar.mvfr1 = 0x01111111;
|
|
cpu->ctr = 0x80038003;
|
|
cpu->reset_sctlr = 0x00c50078;
|
|
cpu->id_pfr0 = 0x1031;
|
|
cpu->id_pfr1 = 0x11;
|
|
cpu->id_dfr0 = 0x000;
|
|
cpu->id_afr0 = 0;
|
|
cpu->id_mmfr0 = 0x00100103;
|
|
cpu->id_mmfr1 = 0x20000000;
|
|
cpu->id_mmfr2 = 0x01230000;
|
|
cpu->id_mmfr3 = 0x00002111;
|
|
cpu->isar.id_isar0 = 0x00101111;
|
|
cpu->isar.id_isar1 = 0x13112111;
|
|
cpu->isar.id_isar2 = 0x21232041;
|
|
cpu->isar.id_isar3 = 0x11112131;
|
|
cpu->isar.id_isar4 = 0x00111142;
|
|
cpu->dbgdidr = 0x35141000;
|
|
cpu->clidr = (1 << 27) | (1 << 24) | 3;
|
|
cpu->ccsidr[0] = 0xe00fe019; /* 16k L1 dcache. */
|
|
cpu->ccsidr[1] = 0x200fe019; /* 16k L1 icache. */
|
|
define_arm_cp_regs(cpu, cortexa9_cp_reginfo);
|
|
}
|
|
|
|
#ifndef CONFIG_USER_ONLY
|
|
static uint64_t a15_l2ctlr_read(CPUARMState *env, const ARMCPRegInfo *ri)
|
|
{
|
|
/* Linux wants the number of processors from here.
|
|
* Might as well set the interrupt-controller bit too.
|
|
*/
|
|
return ((smp_cpus - 1) << 24) | (1 << 23);
|
|
}
|
|
#endif
|
|
|
|
static const ARMCPRegInfo cortexa15_cp_reginfo[] = {
|
|
#ifndef CONFIG_USER_ONLY
|
|
{ .name = "L2CTLR", .cp = 15, .crn = 9, .crm = 0, .opc1 = 1, .opc2 = 2,
|
|
.access = PL1_RW, .resetvalue = 0, .readfn = a15_l2ctlr_read,
|
|
.writefn = arm_cp_write_ignore, },
|
|
#endif
|
|
{ .name = "L2ECTLR", .cp = 15, .crn = 9, .crm = 0, .opc1 = 1, .opc2 = 3,
|
|
.access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
|
|
REGINFO_SENTINEL
|
|
};
|
|
|
|
static void cortex_a7_initfn(Object *obj)
|
|
{
|
|
ARMCPU *cpu = ARM_CPU(obj);
|
|
|
|
cpu->dtb_compatible = "arm,cortex-a7";
|
|
set_feature(&cpu->env, ARM_FEATURE_V7VE);
|
|
set_feature(&cpu->env, ARM_FEATURE_VFP4);
|
|
set_feature(&cpu->env, ARM_FEATURE_NEON);
|
|
set_feature(&cpu->env, ARM_FEATURE_THUMB2EE);
|
|
set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
|
|
set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
|
|
set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
|
|
set_feature(&cpu->env, ARM_FEATURE_EL2);
|
|
set_feature(&cpu->env, ARM_FEATURE_EL3);
|
|
cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A7;
|
|
cpu->midr = 0x410fc075;
|
|
cpu->reset_fpsid = 0x41023075;
|
|
cpu->isar.mvfr0 = 0x10110222;
|
|
cpu->isar.mvfr1 = 0x11111111;
|
|
cpu->ctr = 0x84448003;
|
|
cpu->reset_sctlr = 0x00c50078;
|
|
cpu->id_pfr0 = 0x00001131;
|
|
cpu->id_pfr1 = 0x00011011;
|
|
cpu->id_dfr0 = 0x02010555;
|
|
cpu->id_afr0 = 0x00000000;
|
|
cpu->id_mmfr0 = 0x10101105;
|
|
cpu->id_mmfr1 = 0x40000000;
|
|
cpu->id_mmfr2 = 0x01240000;
|
|
cpu->id_mmfr3 = 0x02102211;
|
|
/* a7_mpcore_r0p5_trm, page 4-4 gives 0x01101110; but
|
|
* table 4-41 gives 0x02101110, which includes the arm div insns.
|
|
*/
|
|
cpu->isar.id_isar0 = 0x02101110;
|
|
cpu->isar.id_isar1 = 0x13112111;
|
|
cpu->isar.id_isar2 = 0x21232041;
|
|
cpu->isar.id_isar3 = 0x11112131;
|
|
cpu->isar.id_isar4 = 0x10011142;
|
|
cpu->dbgdidr = 0x3515f005;
|
|
cpu->clidr = 0x0a200023;
|
|
cpu->ccsidr[0] = 0x701fe00a; /* 32K L1 dcache */
|
|
cpu->ccsidr[1] = 0x201fe00a; /* 32K L1 icache */
|
|
cpu->ccsidr[2] = 0x711fe07a; /* 4096K L2 unified cache */
|
|
define_arm_cp_regs(cpu, cortexa15_cp_reginfo); /* Same as A15 */
|
|
}
|
|
|
|
static void cortex_a15_initfn(Object *obj)
|
|
{
|
|
ARMCPU *cpu = ARM_CPU(obj);
|
|
|
|
cpu->dtb_compatible = "arm,cortex-a15";
|
|
set_feature(&cpu->env, ARM_FEATURE_V7VE);
|
|
set_feature(&cpu->env, ARM_FEATURE_VFP4);
|
|
set_feature(&cpu->env, ARM_FEATURE_NEON);
|
|
set_feature(&cpu->env, ARM_FEATURE_THUMB2EE);
|
|
set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
|
|
set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
|
|
set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
|
|
set_feature(&cpu->env, ARM_FEATURE_EL2);
|
|
set_feature(&cpu->env, ARM_FEATURE_EL3);
|
|
cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A15;
|
|
cpu->midr = 0x412fc0f1;
|
|
cpu->reset_fpsid = 0x410430f0;
|
|
cpu->isar.mvfr0 = 0x10110222;
|
|
cpu->isar.mvfr1 = 0x11111111;
|
|
cpu->ctr = 0x8444c004;
|
|
cpu->reset_sctlr = 0x00c50078;
|
|
cpu->id_pfr0 = 0x00001131;
|
|
cpu->id_pfr1 = 0x00011011;
|
|
cpu->id_dfr0 = 0x02010555;
|
|
cpu->id_afr0 = 0x00000000;
|
|
cpu->id_mmfr0 = 0x10201105;
|
|
cpu->id_mmfr1 = 0x20000000;
|
|
cpu->id_mmfr2 = 0x01240000;
|
|
cpu->id_mmfr3 = 0x02102211;
|
|
cpu->isar.id_isar0 = 0x02101110;
|
|
cpu->isar.id_isar1 = 0x13112111;
|
|
cpu->isar.id_isar2 = 0x21232041;
|
|
cpu->isar.id_isar3 = 0x11112131;
|
|
cpu->isar.id_isar4 = 0x10011142;
|
|
cpu->dbgdidr = 0x3515f021;
|
|
cpu->clidr = 0x0a200023;
|
|
cpu->ccsidr[0] = 0x701fe00a; /* 32K L1 dcache */
|
|
cpu->ccsidr[1] = 0x201fe00a; /* 32K L1 icache */
|
|
cpu->ccsidr[2] = 0x711fe07a; /* 4096K L2 unified cache */
|
|
define_arm_cp_regs(cpu, cortexa15_cp_reginfo);
|
|
}
|
|
|
|
static void ti925t_initfn(Object *obj)
|
|
{
|
|
ARMCPU *cpu = ARM_CPU(obj);
|
|
set_feature(&cpu->env, ARM_FEATURE_V4T);
|
|
set_feature(&cpu->env, ARM_FEATURE_OMAPCP);
|
|
cpu->midr = ARM_CPUID_TI925T;
|
|
cpu->ctr = 0x5109149;
|
|
cpu->reset_sctlr = 0x00000070;
|
|
}
|
|
|
|
static void sa1100_initfn(Object *obj)
|
|
{
|
|
ARMCPU *cpu = ARM_CPU(obj);
|
|
|
|
cpu->dtb_compatible = "intel,sa1100";
|
|
set_feature(&cpu->env, ARM_FEATURE_STRONGARM);
|
|
set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
|
|
cpu->midr = 0x4401A11B;
|
|
cpu->reset_sctlr = 0x00000070;
|
|
}
|
|
|
|
static void sa1110_initfn(Object *obj)
|
|
{
|
|
ARMCPU *cpu = ARM_CPU(obj);
|
|
set_feature(&cpu->env, ARM_FEATURE_STRONGARM);
|
|
set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
|
|
cpu->midr = 0x6901B119;
|
|
cpu->reset_sctlr = 0x00000070;
|
|
}
|
|
|
|
static void pxa250_initfn(Object *obj)
|
|
{
|
|
ARMCPU *cpu = ARM_CPU(obj);
|
|
|
|
cpu->dtb_compatible = "marvell,xscale";
|
|
set_feature(&cpu->env, ARM_FEATURE_V5);
|
|
set_feature(&cpu->env, ARM_FEATURE_XSCALE);
|
|
cpu->midr = 0x69052100;
|
|
cpu->ctr = 0xd172172;
|
|
cpu->reset_sctlr = 0x00000078;
|
|
}
|
|
|
|
static void pxa255_initfn(Object *obj)
|
|
{
|
|
ARMCPU *cpu = ARM_CPU(obj);
|
|
|
|
cpu->dtb_compatible = "marvell,xscale";
|
|
set_feature(&cpu->env, ARM_FEATURE_V5);
|
|
set_feature(&cpu->env, ARM_FEATURE_XSCALE);
|
|
cpu->midr = 0x69052d00;
|
|
cpu->ctr = 0xd172172;
|
|
cpu->reset_sctlr = 0x00000078;
|
|
}
|
|
|
|
static void pxa260_initfn(Object *obj)
|
|
{
|
|
ARMCPU *cpu = ARM_CPU(obj);
|
|
|
|
cpu->dtb_compatible = "marvell,xscale";
|
|
set_feature(&cpu->env, ARM_FEATURE_V5);
|
|
set_feature(&cpu->env, ARM_FEATURE_XSCALE);
|
|
cpu->midr = 0x69052903;
|
|
cpu->ctr = 0xd172172;
|
|
cpu->reset_sctlr = 0x00000078;
|
|
}
|
|
|
|
static void pxa261_initfn(Object *obj)
|
|
{
|
|
ARMCPU *cpu = ARM_CPU(obj);
|
|
|
|
cpu->dtb_compatible = "marvell,xscale";
|
|
set_feature(&cpu->env, ARM_FEATURE_V5);
|
|
set_feature(&cpu->env, ARM_FEATURE_XSCALE);
|
|
cpu->midr = 0x69052d05;
|
|
cpu->ctr = 0xd172172;
|
|
cpu->reset_sctlr = 0x00000078;
|
|
}
|
|
|
|
static void pxa262_initfn(Object *obj)
|
|
{
|
|
ARMCPU *cpu = ARM_CPU(obj);
|
|
|
|
cpu->dtb_compatible = "marvell,xscale";
|
|
set_feature(&cpu->env, ARM_FEATURE_V5);
|
|
set_feature(&cpu->env, ARM_FEATURE_XSCALE);
|
|
cpu->midr = 0x69052d06;
|
|
cpu->ctr = 0xd172172;
|
|
cpu->reset_sctlr = 0x00000078;
|
|
}
|
|
|
|
static void pxa270a0_initfn(Object *obj)
|
|
{
|
|
ARMCPU *cpu = ARM_CPU(obj);
|
|
|
|
cpu->dtb_compatible = "marvell,xscale";
|
|
set_feature(&cpu->env, ARM_FEATURE_V5);
|
|
set_feature(&cpu->env, ARM_FEATURE_XSCALE);
|
|
set_feature(&cpu->env, ARM_FEATURE_IWMMXT);
|
|
cpu->midr = 0x69054110;
|
|
cpu->ctr = 0xd172172;
|
|
cpu->reset_sctlr = 0x00000078;
|
|
}
|
|
|
|
static void pxa270a1_initfn(Object *obj)
|
|
{
|
|
ARMCPU *cpu = ARM_CPU(obj);
|
|
|
|
cpu->dtb_compatible = "marvell,xscale";
|
|
set_feature(&cpu->env, ARM_FEATURE_V5);
|
|
set_feature(&cpu->env, ARM_FEATURE_XSCALE);
|
|
set_feature(&cpu->env, ARM_FEATURE_IWMMXT);
|
|
cpu->midr = 0x69054111;
|
|
cpu->ctr = 0xd172172;
|
|
cpu->reset_sctlr = 0x00000078;
|
|
}
|
|
|
|
static void pxa270b0_initfn(Object *obj)
|
|
{
|
|
ARMCPU *cpu = ARM_CPU(obj);
|
|
|
|
cpu->dtb_compatible = "marvell,xscale";
|
|
set_feature(&cpu->env, ARM_FEATURE_V5);
|
|
set_feature(&cpu->env, ARM_FEATURE_XSCALE);
|
|
set_feature(&cpu->env, ARM_FEATURE_IWMMXT);
|
|
cpu->midr = 0x69054112;
|
|
cpu->ctr = 0xd172172;
|
|
cpu->reset_sctlr = 0x00000078;
|
|
}
|
|
|
|
static void pxa270b1_initfn(Object *obj)
|
|
{
|
|
ARMCPU *cpu = ARM_CPU(obj);
|
|
|
|
cpu->dtb_compatible = "marvell,xscale";
|
|
set_feature(&cpu->env, ARM_FEATURE_V5);
|
|
set_feature(&cpu->env, ARM_FEATURE_XSCALE);
|
|
set_feature(&cpu->env, ARM_FEATURE_IWMMXT);
|
|
cpu->midr = 0x69054113;
|
|
cpu->ctr = 0xd172172;
|
|
cpu->reset_sctlr = 0x00000078;
|
|
}
|
|
|
|
static void pxa270c0_initfn(Object *obj)
|
|
{
|
|
ARMCPU *cpu = ARM_CPU(obj);
|
|
|
|
cpu->dtb_compatible = "marvell,xscale";
|
|
set_feature(&cpu->env, ARM_FEATURE_V5);
|
|
set_feature(&cpu->env, ARM_FEATURE_XSCALE);
|
|
set_feature(&cpu->env, ARM_FEATURE_IWMMXT);
|
|
cpu->midr = 0x69054114;
|
|
cpu->ctr = 0xd172172;
|
|
cpu->reset_sctlr = 0x00000078;
|
|
}
|
|
|
|
static void pxa270c5_initfn(Object *obj)
|
|
{
|
|
ARMCPU *cpu = ARM_CPU(obj);
|
|
|
|
cpu->dtb_compatible = "marvell,xscale";
|
|
set_feature(&cpu->env, ARM_FEATURE_V5);
|
|
set_feature(&cpu->env, ARM_FEATURE_XSCALE);
|
|
set_feature(&cpu->env, ARM_FEATURE_IWMMXT);
|
|
cpu->midr = 0x69054117;
|
|
cpu->ctr = 0xd172172;
|
|
cpu->reset_sctlr = 0x00000078;
|
|
}
|
|
|
|
#ifndef TARGET_AARCH64
|
|
/* -cpu max: if KVM is enabled, like -cpu host (best possible with this host);
|
|
* otherwise, a CPU with as many features enabled as our emulation supports.
|
|
* The version of '-cpu max' for qemu-system-aarch64 is defined in cpu64.c;
|
|
* this only needs to handle 32 bits.
|
|
*/
|
|
static void arm_max_initfn(Object *obj)
|
|
{
|
|
ARMCPU *cpu = ARM_CPU(obj);
|
|
|
|
if (kvm_enabled()) {
|
|
kvm_arm_set_cpu_features_from_host(cpu);
|
|
} else {
|
|
cortex_a15_initfn(obj);
|
|
#ifdef CONFIG_USER_ONLY
|
|
/* We don't set these in system emulation mode for the moment,
|
|
* since we don't correctly set (all of) the ID registers to
|
|
* advertise them.
|
|
*/
|
|
set_feature(&cpu->env, ARM_FEATURE_V8);
|
|
{
|
|
uint32_t t;
|
|
|
|
t = cpu->isar.id_isar5;
|
|
t = FIELD_DP32(t, ID_ISAR5, AES, 2);
|
|
t = FIELD_DP32(t, ID_ISAR5, SHA1, 1);
|
|
t = FIELD_DP32(t, ID_ISAR5, SHA2, 1);
|
|
t = FIELD_DP32(t, ID_ISAR5, CRC32, 1);
|
|
t = FIELD_DP32(t, ID_ISAR5, RDM, 1);
|
|
t = FIELD_DP32(t, ID_ISAR5, VCMA, 1);
|
|
cpu->isar.id_isar5 = t;
|
|
|
|
t = cpu->isar.id_isar6;
|
|
t = FIELD_DP32(t, ID_ISAR6, JSCVT, 1);
|
|
t = FIELD_DP32(t, ID_ISAR6, DP, 1);
|
|
cpu->isar.id_isar6 = t;
|
|
|
|
t = cpu->id_mmfr4;
|
|
t = FIELD_DP32(t, ID_MMFR4, HPDS, 1); /* AA32HPD */
|
|
cpu->id_mmfr4 = t;
|
|
}
|
|
#endif
|
|
}
|
|
}
|
|
#endif
|
|
|
|
#endif /* !defined(CONFIG_USER_ONLY) || !defined(TARGET_AARCH64) */
|
|
|
|
struct ARMCPUInfo {
|
|
const char *name;
|
|
void (*initfn)(Object *obj);
|
|
void (*class_init)(ObjectClass *oc, void *data);
|
|
};
|
|
|
|
static const ARMCPUInfo arm_cpus[] = {
|
|
#if !defined(CONFIG_USER_ONLY) || !defined(TARGET_AARCH64)
|
|
{ .name = "arm926", .initfn = arm926_initfn },
|
|
{ .name = "arm946", .initfn = arm946_initfn },
|
|
{ .name = "arm1026", .initfn = arm1026_initfn },
|
|
/* What QEMU calls "arm1136-r2" is actually the 1136 r0p2, i.e. an
|
|
* older core than plain "arm1136". In particular this does not
|
|
* have the v6K features.
|
|
*/
|
|
{ .name = "arm1136-r2", .initfn = arm1136_r2_initfn },
|
|
{ .name = "arm1136", .initfn = arm1136_initfn },
|
|
{ .name = "arm1176", .initfn = arm1176_initfn },
|
|
{ .name = "arm11mpcore", .initfn = arm11mpcore_initfn },
|
|
{ .name = "cortex-m0", .initfn = cortex_m0_initfn,
|
|
.class_init = arm_v7m_class_init },
|
|
{ .name = "cortex-m3", .initfn = cortex_m3_initfn,
|
|
.class_init = arm_v7m_class_init },
|
|
{ .name = "cortex-m4", .initfn = cortex_m4_initfn,
|
|
.class_init = arm_v7m_class_init },
|
|
{ .name = "cortex-m33", .initfn = cortex_m33_initfn,
|
|
.class_init = arm_v7m_class_init },
|
|
{ .name = "cortex-r5", .initfn = cortex_r5_initfn },
|
|
{ .name = "cortex-r5f", .initfn = cortex_r5f_initfn },
|
|
{ .name = "cortex-a7", .initfn = cortex_a7_initfn },
|
|
{ .name = "cortex-a8", .initfn = cortex_a8_initfn },
|
|
{ .name = "cortex-a9", .initfn = cortex_a9_initfn },
|
|
{ .name = "cortex-a15", .initfn = cortex_a15_initfn },
|
|
{ .name = "ti925t", .initfn = ti925t_initfn },
|
|
{ .name = "sa1100", .initfn = sa1100_initfn },
|
|
{ .name = "sa1110", .initfn = sa1110_initfn },
|
|
{ .name = "pxa250", .initfn = pxa250_initfn },
|
|
{ .name = "pxa255", .initfn = pxa255_initfn },
|
|
{ .name = "pxa260", .initfn = pxa260_initfn },
|
|
{ .name = "pxa261", .initfn = pxa261_initfn },
|
|
{ .name = "pxa262", .initfn = pxa262_initfn },
|
|
/* "pxa270" is an alias for "pxa270-a0" */
|
|
{ .name = "pxa270", .initfn = pxa270a0_initfn },
|
|
{ .name = "pxa270-a0", .initfn = pxa270a0_initfn },
|
|
{ .name = "pxa270-a1", .initfn = pxa270a1_initfn },
|
|
{ .name = "pxa270-b0", .initfn = pxa270b0_initfn },
|
|
{ .name = "pxa270-b1", .initfn = pxa270b1_initfn },
|
|
{ .name = "pxa270-c0", .initfn = pxa270c0_initfn },
|
|
{ .name = "pxa270-c5", .initfn = pxa270c5_initfn },
|
|
#ifndef TARGET_AARCH64
|
|
{ .name = "max", .initfn = arm_max_initfn },
|
|
#endif
|
|
#ifdef CONFIG_USER_ONLY
|
|
{ .name = "any", .initfn = arm_max_initfn },
|
|
#endif
|
|
#endif
|
|
{ .name = NULL }
|
|
};
|
|
|
|
static Property arm_cpu_properties[] = {
|
|
DEFINE_PROP_BOOL("start-powered-off", ARMCPU, start_powered_off, false),
|
|
DEFINE_PROP_UINT32("psci-conduit", ARMCPU, psci_conduit, 0),
|
|
DEFINE_PROP_UINT32("midr", ARMCPU, midr, 0),
|
|
DEFINE_PROP_UINT64("mp-affinity", ARMCPU,
|
|
mp_affinity, ARM64_AFFINITY_INVALID),
|
|
DEFINE_PROP_INT32("node-id", ARMCPU, node_id, CPU_UNSET_NUMA_NODE_ID),
|
|
DEFINE_PROP_INT32("core-count", ARMCPU, core_count, -1),
|
|
DEFINE_PROP_END_OF_LIST()
|
|
};
|
|
|
|
#ifdef CONFIG_USER_ONLY
|
|
static int arm_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size,
|
|
int rw, int mmu_idx)
|
|
{
|
|
ARMCPU *cpu = ARM_CPU(cs);
|
|
CPUARMState *env = &cpu->env;
|
|
|
|
env->exception.vaddress = address;
|
|
if (rw == 2) {
|
|
cs->exception_index = EXCP_PREFETCH_ABORT;
|
|
} else {
|
|
cs->exception_index = EXCP_DATA_ABORT;
|
|
}
|
|
return 1;
|
|
}
|
|
#endif
|
|
|
|
static gchar *arm_gdb_arch_name(CPUState *cs)
|
|
{
|
|
ARMCPU *cpu = ARM_CPU(cs);
|
|
CPUARMState *env = &cpu->env;
|
|
|
|
if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
|
|
return g_strdup("iwmmxt");
|
|
}
|
|
return g_strdup("arm");
|
|
}
|
|
|
|
static void arm_cpu_class_init(ObjectClass *oc, void *data)
|
|
{
|
|
ARMCPUClass *acc = ARM_CPU_CLASS(oc);
|
|
CPUClass *cc = CPU_CLASS(acc);
|
|
DeviceClass *dc = DEVICE_CLASS(oc);
|
|
|
|
device_class_set_parent_realize(dc, arm_cpu_realizefn,
|
|
&acc->parent_realize);
|
|
dc->props = arm_cpu_properties;
|
|
|
|
acc->parent_reset = cc->reset;
|
|
cc->reset = arm_cpu_reset;
|
|
|
|
cc->class_by_name = arm_cpu_class_by_name;
|
|
cc->has_work = arm_cpu_has_work;
|
|
cc->cpu_exec_interrupt = arm_cpu_exec_interrupt;
|
|
cc->dump_state = arm_cpu_dump_state;
|
|
cc->set_pc = arm_cpu_set_pc;
|
|
cc->synchronize_from_tb = arm_cpu_synchronize_from_tb;
|
|
cc->gdb_read_register = arm_cpu_gdb_read_register;
|
|
cc->gdb_write_register = arm_cpu_gdb_write_register;
|
|
#ifdef CONFIG_USER_ONLY
|
|
cc->handle_mmu_fault = arm_cpu_handle_mmu_fault;
|
|
#else
|
|
cc->do_interrupt = arm_cpu_do_interrupt;
|
|
cc->do_unaligned_access = arm_cpu_do_unaligned_access;
|
|
cc->do_transaction_failed = arm_cpu_do_transaction_failed;
|
|
cc->get_phys_page_attrs_debug = arm_cpu_get_phys_page_attrs_debug;
|
|
cc->asidx_from_attrs = arm_asidx_from_attrs;
|
|
cc->vmsd = &vmstate_arm_cpu;
|
|
cc->virtio_is_big_endian = arm_cpu_virtio_is_big_endian;
|
|
cc->write_elf64_note = arm_cpu_write_elf64_note;
|
|
cc->write_elf32_note = arm_cpu_write_elf32_note;
|
|
#endif
|
|
cc->gdb_num_core_regs = 26;
|
|
cc->gdb_core_xml_file = "arm-core.xml";
|
|
cc->gdb_arch_name = arm_gdb_arch_name;
|
|
cc->gdb_get_dynamic_xml = arm_gdb_get_dynamic_xml;
|
|
cc->gdb_stop_before_watchpoint = true;
|
|
cc->debug_excp_handler = arm_debug_excp_handler;
|
|
cc->debug_check_watchpoint = arm_debug_check_watchpoint;
|
|
#if !defined(CONFIG_USER_ONLY)
|
|
cc->adjust_watchpoint_address = arm_adjust_watchpoint_address;
|
|
#endif
|
|
|
|
cc->disas_set_info = arm_disas_set_info;
|
|
#ifdef CONFIG_TCG
|
|
cc->tcg_initialize = arm_translate_init;
|
|
#endif
|
|
}
|
|
|
|
#ifdef CONFIG_KVM
|
|
static void arm_host_initfn(Object *obj)
|
|
{
|
|
ARMCPU *cpu = ARM_CPU(obj);
|
|
|
|
kvm_arm_set_cpu_features_from_host(cpu);
|
|
arm_cpu_post_init(obj);
|
|
}
|
|
|
|
static const TypeInfo host_arm_cpu_type_info = {
|
|
.name = TYPE_ARM_HOST_CPU,
|
|
#ifdef TARGET_AARCH64
|
|
.parent = TYPE_AARCH64_CPU,
|
|
#else
|
|
.parent = TYPE_ARM_CPU,
|
|
#endif
|
|
.instance_init = arm_host_initfn,
|
|
};
|
|
|
|
#endif
|
|
|
|
static void arm_cpu_instance_init(Object *obj)
|
|
{
|
|
ARMCPUClass *acc = ARM_CPU_GET_CLASS(obj);
|
|
|
|
acc->info->initfn(obj);
|
|
arm_cpu_post_init(obj);
|
|
}
|
|
|
|
static void cpu_register_class_init(ObjectClass *oc, void *data)
|
|
{
|
|
ARMCPUClass *acc = ARM_CPU_CLASS(oc);
|
|
|
|
acc->info = data;
|
|
}
|
|
|
|
static void cpu_register(const ARMCPUInfo *info)
|
|
{
|
|
TypeInfo type_info = {
|
|
.parent = TYPE_ARM_CPU,
|
|
.instance_size = sizeof(ARMCPU),
|
|
.instance_init = arm_cpu_instance_init,
|
|
.class_size = sizeof(ARMCPUClass),
|
|
.class_init = info->class_init ?: cpu_register_class_init,
|
|
.class_data = (void *)info,
|
|
};
|
|
|
|
type_info.name = g_strdup_printf("%s-" TYPE_ARM_CPU, info->name);
|
|
type_register(&type_info);
|
|
g_free((void *)type_info.name);
|
|
}
|
|
|
|
static const TypeInfo arm_cpu_type_info = {
|
|
.name = TYPE_ARM_CPU,
|
|
.parent = TYPE_CPU,
|
|
.instance_size = sizeof(ARMCPU),
|
|
.instance_init = arm_cpu_initfn,
|
|
.instance_finalize = arm_cpu_finalizefn,
|
|
.abstract = true,
|
|
.class_size = sizeof(ARMCPUClass),
|
|
.class_init = arm_cpu_class_init,
|
|
};
|
|
|
|
static const TypeInfo idau_interface_type_info = {
|
|
.name = TYPE_IDAU_INTERFACE,
|
|
.parent = TYPE_INTERFACE,
|
|
.class_size = sizeof(IDAUInterfaceClass),
|
|
};
|
|
|
|
static void arm_cpu_register_types(void)
|
|
{
|
|
const ARMCPUInfo *info = arm_cpus;
|
|
|
|
type_register_static(&arm_cpu_type_info);
|
|
type_register_static(&idau_interface_type_info);
|
|
|
|
while (info->name) {
|
|
cpu_register(info);
|
|
info++;
|
|
}
|
|
|
|
#ifdef CONFIG_KVM
|
|
type_register_static(&host_arm_cpu_type_info);
|
|
#endif
|
|
}
|
|
|
|
type_init(arm_cpu_register_types)
|