2012-03-29 08:50:31 +04:00
|
|
|
/*
|
|
|
|
* QEMU ARM CPU
|
|
|
|
*
|
|
|
|
* Copyright (c) 2012 SUSE LINUX Products GmbH
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
* as published by the Free Software Foundation; either version 2
|
|
|
|
* of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program; if not, see
|
|
|
|
* <http://www.gnu.org/licenses/gpl-2.0.html>
|
|
|
|
*/
|
|
|
|
|
2015-12-07 19:23:44 +03:00
|
|
|
#include "qemu/osdep.h"
|
2019-07-01 19:26:20 +03:00
|
|
|
#include "qemu/qemu-print.h"
|
2022-02-07 11:27:54 +03:00
|
|
|
#include "qemu/timer.h"
|
2022-03-15 23:43:05 +03:00
|
|
|
#include "qemu/log.h"
|
2022-03-23 18:57:34 +03:00
|
|
|
#include "exec/page-vary.h"
|
2018-03-02 13:45:36 +03:00
|
|
|
#include "target/arm/idau.h"
|
2019-05-23 17:35:07 +03:00
|
|
|
#include "qemu/module.h"
|
include/qemu/osdep.h: Don't include qapi/error.h
Commit 57cb38b included qapi/error.h into qemu/osdep.h to get the
Error typedef. Since then, we've moved to include qemu/osdep.h
everywhere. Its file comment explains: "To avoid getting into
possible circular include dependencies, this file should not include
any other QEMU headers, with the exceptions of config-host.h,
compiler.h, os-posix.h and os-win32.h, all of which are doing a
similar job to this file and are under similar constraints."
qapi/error.h doesn't do a similar job, and it doesn't adhere to
similar constraints: it includes qapi-types.h. That's in excess of
100KiB of crap most .c files don't actually need.
Add the typedef to qemu/typedefs.h, and include that instead of
qapi/error.h. Include qapi/error.h in .c files that need it and don't
get it now. Include qapi-types.h in qom/object.h for uint16List.
Update scripts/clean-includes accordingly. Update it further to match
reality: replace config.h by config-target.h, add sysemu/os-posix.h,
sysemu/os-win32.h. Update the list of includes in the qemu/osdep.h
comment quoted above similarly.
This reduces the number of objects depending on qapi/error.h from "all
of them" to less than a third. Unfortunately, the number depending on
qapi-types.h shrinks only a little. More work is needed for that one.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
[Fix compilation without the spice devel packages. - Paolo]
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2016-03-14 11:01:28 +03:00
|
|
|
#include "qapi/error.h"
|
2012-04-20 11:39:14 +04:00
|
|
|
#include "cpu.h"
|
2021-02-04 19:39:23 +03:00
|
|
|
#ifdef CONFIG_TCG
|
|
|
|
#include "hw/core/tcg-cpu-ops.h"
|
|
|
|
#endif /* CONFIG_TCG */
|
2014-04-15 22:18:37 +04:00
|
|
|
#include "internals.h"
|
2023-10-24 19:35:05 +03:00
|
|
|
#include "cpu-features.h"
|
2016-03-15 15:18:37 +03:00
|
|
|
#include "exec/exec-all.h"
|
2013-11-22 21:17:13 +04:00
|
|
|
#include "hw/qdev-properties.h"
|
2012-04-20 21:58:36 +04:00
|
|
|
#if !defined(CONFIG_USER_ONLY)
|
|
|
|
#include "hw/loader.h"
|
2019-05-18 23:54:26 +03:00
|
|
|
#include "hw/boards.h"
|
2023-02-07 01:35:02 +03:00
|
|
|
#ifdef CONFIG_TCG
|
2023-02-07 01:35:01 +03:00
|
|
|
#include "hw/intc/armv7m_nvic.h"
|
2023-02-07 01:35:02 +03:00
|
|
|
#endif /* CONFIG_TCG */
|
|
|
|
#endif /* !CONFIG_USER_ONLY */
|
2019-05-23 17:35:05 +03:00
|
|
|
#include "sysemu/tcg.h"
|
2022-06-24 17:42:56 +03:00
|
|
|
#include "sysemu/qtest.h"
|
2017-01-10 13:59:55 +03:00
|
|
|
#include "sysemu/hw_accel.h"
|
2013-03-20 16:11:56 +04:00
|
|
|
#include "kvm_arm.h"
|
2017-09-14 19:51:06 +03:00
|
|
|
#include "disas/capstone.h"
|
2018-01-19 21:24:22 +03:00
|
|
|
#include "fpu/softfloat.h"
|
2022-05-01 08:49:43 +03:00
|
|
|
#include "cpregs.h"
|
2012-03-29 08:50:31 +04:00
|
|
|
|
2013-06-21 21:09:18 +04:00
|
|
|
static void arm_cpu_set_pc(CPUState *cs, vaddr value)
|
|
|
|
{
|
|
|
|
ARMCPU *cpu = ARM_CPU(cs);
|
2019-02-01 17:55:46 +03:00
|
|
|
CPUARMState *env = &cpu->env;
|
|
|
|
|
|
|
|
if (is_a64(env)) {
|
|
|
|
env->pc = value;
|
2022-04-17 20:43:35 +03:00
|
|
|
env->thumb = false;
|
2019-02-01 17:55:46 +03:00
|
|
|
} else {
|
|
|
|
env->regs[15] = value & ~1;
|
|
|
|
env->thumb = value & 1;
|
|
|
|
}
|
|
|
|
}
|
2013-06-21 21:09:18 +04:00
|
|
|
|
2022-09-30 20:31:21 +03:00
|
|
|
static vaddr arm_cpu_get_pc(CPUState *cs)
|
|
|
|
{
|
|
|
|
ARMCPU *cpu = ARM_CPU(cs);
|
|
|
|
CPUARMState *env = &cpu->env;
|
|
|
|
|
|
|
|
if (is_a64(env)) {
|
|
|
|
return env->pc;
|
|
|
|
} else {
|
|
|
|
return env->regs[15];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-02-04 19:39:12 +03:00
|
|
|
#ifdef CONFIG_TCG
|
2021-02-04 19:39:23 +03:00
|
|
|
void arm_cpu_synchronize_from_tb(CPUState *cs,
|
|
|
|
const TranslationBlock *tb)
|
2019-02-01 17:55:46 +03:00
|
|
|
{
|
2023-02-27 16:51:41 +03:00
|
|
|
/* The program counter is always up to date with CF_PCREL. */
|
|
|
|
if (!(tb_cflags(tb) & CF_PCREL)) {
|
2023-09-14 03:22:49 +03:00
|
|
|
CPUARMState *env = cpu_env(cs);
|
2022-10-20 06:06:41 +03:00
|
|
|
/*
|
|
|
|
* It's OK to look at env for the current mode here, because it's
|
|
|
|
* never possible for an AArch64 TB to chain to an AArch32 TB.
|
|
|
|
*/
|
|
|
|
if (is_a64(env)) {
|
2023-02-27 16:52:01 +03:00
|
|
|
env->pc = tb->pc;
|
2022-10-20 06:06:41 +03:00
|
|
|
} else {
|
2023-02-27 16:52:01 +03:00
|
|
|
env->regs[15] = tb->pc;
|
2022-10-20 06:06:41 +03:00
|
|
|
}
|
2019-02-01 17:55:46 +03:00
|
|
|
}
|
2013-06-21 21:09:18 +04:00
|
|
|
}
|
2022-10-24 12:59:18 +03:00
|
|
|
|
2022-11-29 23:41:46 +03:00
|
|
|
void arm_restore_state_to_opc(CPUState *cs,
|
|
|
|
const TranslationBlock *tb,
|
|
|
|
const uint64_t *data)
|
2022-10-24 12:59:18 +03:00
|
|
|
{
|
2023-09-14 03:22:49 +03:00
|
|
|
CPUARMState *env = cpu_env(cs);
|
2022-10-24 12:59:18 +03:00
|
|
|
|
|
|
|
if (is_a64(env)) {
|
2023-02-27 16:51:41 +03:00
|
|
|
if (tb_cflags(tb) & CF_PCREL) {
|
2022-10-24 12:59:18 +03:00
|
|
|
env->pc = (env->pc & TARGET_PAGE_MASK) | data[0];
|
|
|
|
} else {
|
|
|
|
env->pc = data[0];
|
|
|
|
}
|
|
|
|
env->condexec_bits = 0;
|
|
|
|
env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
|
|
|
|
} else {
|
2023-02-27 16:51:41 +03:00
|
|
|
if (tb_cflags(tb) & CF_PCREL) {
|
2022-10-24 12:59:18 +03:00
|
|
|
env->regs[15] = (env->regs[15] & TARGET_PAGE_MASK) | data[0];
|
|
|
|
} else {
|
|
|
|
env->regs[15] = data[0];
|
|
|
|
}
|
|
|
|
env->condexec_bits = data[1];
|
|
|
|
env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
|
|
|
|
}
|
|
|
|
}
|
2021-02-04 19:39:12 +03:00
|
|
|
#endif /* CONFIG_TCG */
|
2013-06-21 21:09:18 +04:00
|
|
|
|
2013-08-25 20:53:55 +04:00
|
|
|
static bool arm_cpu_has_work(CPUState *cs)
|
|
|
|
{
|
2014-10-24 15:19:12 +04:00
|
|
|
ARMCPU *cpu = ARM_CPU(cs);
|
|
|
|
|
2017-02-23 21:29:23 +03:00
|
|
|
return (cpu->power_state != PSCI_OFF)
|
2014-10-24 15:19:12 +04:00
|
|
|
&& cs->interrupt_request &
|
2014-09-29 21:48:51 +04:00
|
|
|
(CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD
|
2022-05-06 21:02:33 +03:00
|
|
|
| CPU_INTERRUPT_VFIQ | CPU_INTERRUPT_VIRQ | CPU_INTERRUPT_VSERR
|
2014-09-29 21:48:51 +04:00
|
|
|
| CPU_INTERRUPT_EXITTB);
|
2013-08-25 20:53:55 +04:00
|
|
|
}
|
|
|
|
|
2018-04-26 13:04:39 +03:00
|
|
|
void arm_register_pre_el_change_hook(ARMCPU *cpu, ARMELChangeHookFn *hook,
|
|
|
|
void *opaque)
|
|
|
|
{
|
|
|
|
ARMELChangeHook *entry = g_new0(ARMELChangeHook, 1);
|
|
|
|
|
|
|
|
entry->hook = hook;
|
|
|
|
entry->opaque = opaque;
|
|
|
|
|
|
|
|
QLIST_INSERT_HEAD(&cpu->pre_el_change_hooks, entry, node);
|
|
|
|
}
|
|
|
|
|
2018-04-26 13:04:39 +03:00
|
|
|
void arm_register_el_change_hook(ARMCPU *cpu, ARMELChangeHookFn *hook,
|
2016-06-17 17:23:46 +03:00
|
|
|
void *opaque)
|
|
|
|
{
|
2018-04-26 13:04:39 +03:00
|
|
|
ARMELChangeHook *entry = g_new0(ARMELChangeHook, 1);
|
|
|
|
|
|
|
|
entry->hook = hook;
|
|
|
|
entry->opaque = opaque;
|
|
|
|
|
|
|
|
QLIST_INSERT_HEAD(&cpu->el_change_hooks, entry, node);
|
2016-06-17 17:23:46 +03:00
|
|
|
}
|
|
|
|
|
2012-06-20 15:57:06 +04:00
|
|
|
static void cp_reg_reset(gpointer key, gpointer value, gpointer opaque)
|
|
|
|
{
|
|
|
|
/* Reset a single ARMCPRegInfo register */
|
|
|
|
ARMCPRegInfo *ri = value;
|
|
|
|
ARMCPU *cpu = opaque;
|
|
|
|
|
2022-05-01 08:49:47 +03:00
|
|
|
if (ri->type & (ARM_CP_SPECIAL_MASK | ARM_CP_ALIAS)) {
|
2012-06-20 15:57:06 +04:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ri->resetfn) {
|
|
|
|
ri->resetfn(&cpu->env, ri);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* A zero offset is never possible as it would be regs[0]
|
|
|
|
* so we use it to indicate that reset is being handled elsewhere.
|
|
|
|
* This is basically only used for fields in non-core coprocessors
|
|
|
|
* (like the pxa2xx ones).
|
|
|
|
*/
|
|
|
|
if (!ri->fieldoffset) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2014-02-26 21:20:01 +04:00
|
|
|
if (cpreg_field_is_64bit(ri)) {
|
2012-06-20 15:57:06 +04:00
|
|
|
CPREG_FIELD64(&cpu->env, ri) = ri->resetvalue;
|
|
|
|
} else {
|
|
|
|
CPREG_FIELD32(&cpu->env, ri) = ri->resetvalue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-08-13 13:26:21 +03:00
|
|
|
static void cp_reg_check_reset(gpointer key, gpointer value, gpointer opaque)
|
|
|
|
{
|
|
|
|
/* Purely an assertion check: we've already done reset once,
|
|
|
|
* so now check that running the reset for the cpreg doesn't
|
|
|
|
* change its value. This traps bugs where two different cpregs
|
|
|
|
* both try to reset the same state field but to different values.
|
|
|
|
*/
|
|
|
|
ARMCPRegInfo *ri = value;
|
|
|
|
ARMCPU *cpu = opaque;
|
|
|
|
uint64_t oldvalue, newvalue;
|
|
|
|
|
2022-05-01 08:49:47 +03:00
|
|
|
if (ri->type & (ARM_CP_SPECIAL_MASK | ARM_CP_ALIAS | ARM_CP_NO_RAW)) {
|
2015-08-13 13:26:21 +03:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
oldvalue = read_raw_cp_reg(&cpu->env, ri);
|
|
|
|
cp_reg_reset(key, value, opaque);
|
|
|
|
newvalue = read_raw_cp_reg(&cpu->env, ri);
|
|
|
|
assert(oldvalue == newvalue);
|
|
|
|
}
|
|
|
|
|
2022-11-24 14:50:05 +03:00
|
|
|
static void arm_cpu_reset_hold(Object *obj)
|
2012-03-29 08:50:31 +04:00
|
|
|
{
|
2022-11-24 14:50:05 +03:00
|
|
|
CPUState *s = CPU(obj);
|
2012-03-29 08:50:31 +04:00
|
|
|
ARMCPU *cpu = ARM_CPU(s);
|
|
|
|
ARMCPUClass *acc = ARM_CPU_GET_CLASS(cpu);
|
2012-04-20 21:58:36 +04:00
|
|
|
CPUARMState *env = &cpu->env;
|
|
|
|
|
2022-11-24 14:50:05 +03:00
|
|
|
if (acc->parent_phases.hold) {
|
|
|
|
acc->parent_phases.hold(obj);
|
|
|
|
}
|
2012-03-29 08:50:31 +04:00
|
|
|
|
2016-11-14 17:19:17 +03:00
|
|
|
memset(env, 0, offsetof(CPUARMState, end_reset_fields));
|
|
|
|
|
2012-06-20 15:57:06 +04:00
|
|
|
g_hash_table_foreach(cpu->cp_regs, cp_reg_reset, cpu);
|
2015-08-13 13:26:21 +03:00
|
|
|
g_hash_table_foreach(cpu->cp_regs, cp_reg_check_reset, cpu);
|
|
|
|
|
2012-04-20 21:58:36 +04:00
|
|
|
env->vfp.xregs[ARM_VFP_FPSID] = cpu->reset_fpsid;
|
2018-10-24 09:50:16 +03:00
|
|
|
env->vfp.xregs[ARM_VFP_MVFR0] = cpu->isar.mvfr0;
|
|
|
|
env->vfp.xregs[ARM_VFP_MVFR1] = cpu->isar.mvfr1;
|
|
|
|
env->vfp.xregs[ARM_VFP_MVFR2] = cpu->isar.mvfr2;
|
2012-04-20 21:58:36 +04:00
|
|
|
|
2020-08-26 08:55:28 +03:00
|
|
|
cpu->power_state = s->start_powered_off ? PSCI_OFF : PSCI_ON;
|
2014-10-24 15:19:12 +04:00
|
|
|
|
2012-04-20 21:58:36 +04:00
|
|
|
if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
|
|
|
|
env->iwmmxt.cregs[ARM_IWMMXT_wCID] = 0x69051000 | 'Q';
|
|
|
|
}
|
|
|
|
|
2013-09-03 23:12:09 +04:00
|
|
|
if (arm_feature(env, ARM_FEATURE_AARCH64)) {
|
|
|
|
/* 64 bit CPUs always start in 64 bit mode */
|
2022-04-17 20:43:32 +03:00
|
|
|
env->aarch64 = true;
|
2013-12-17 23:42:30 +04:00
|
|
|
#if defined(CONFIG_USER_ONLY)
|
|
|
|
env->pstate = PSTATE_MODE_EL0t;
|
2014-10-24 15:19:13 +04:00
|
|
|
/* Userspace expects access to DC ZVA, CTL_EL0 and the cache ops */
|
2014-12-11 15:07:50 +03:00
|
|
|
env->cp15.sctlr_el[1] |= SCTLR_UCT | SCTLR_UCI | SCTLR_DZE;
|
2019-01-26 01:57:08 +03:00
|
|
|
/* Enable all PAC keys. */
|
|
|
|
env->cp15.sctlr_el[1] |= (SCTLR_EnIA | SCTLR_EnIB |
|
|
|
|
SCTLR_EnDA | SCTLR_EnDB);
|
2022-04-27 07:23:12 +03:00
|
|
|
/* Trap on btype=3 for PACIxSP. */
|
|
|
|
env->cp15.sctlr_el[1] |= SCTLR_BT0;
|
2023-09-01 02:24:41 +03:00
|
|
|
/* Trap on implementation defined registers. */
|
|
|
|
if (cpu_isar_feature(aa64_tidcp1, cpu)) {
|
|
|
|
env->cp15.sctlr_el[1] |= SCTLR_TIDCP;
|
|
|
|
}
|
2014-04-15 22:18:39 +04:00
|
|
|
/* and to the FP/Neon instructions */
|
2022-05-17 08:48:45 +03:00
|
|
|
env->cp15.cpacr_el1 = FIELD_DP64(env->cp15.cpacr_el1,
|
|
|
|
CPACR_EL1, FPEN, 3);
|
2022-07-08 18:15:38 +03:00
|
|
|
/* and to the SVE instructions, with default vector length */
|
2020-03-16 20:21:44 +03:00
|
|
|
if (cpu_isar_feature(aa64_sve, cpu)) {
|
2022-07-08 18:15:38 +03:00
|
|
|
env->cp15.cpacr_el1 = FIELD_DP64(env->cp15.cpacr_el1,
|
|
|
|
CPACR_EL1, ZEN, 3);
|
2022-06-08 21:38:56 +03:00
|
|
|
env->vfp.zcr_el[1] = cpu->sve_default_vq - 1;
|
2020-03-16 20:21:44 +03:00
|
|
|
}
|
2022-07-08 18:15:39 +03:00
|
|
|
/* and for SME instructions, with default vector length, and TPIDR2 */
|
|
|
|
if (cpu_isar_feature(aa64_sme, cpu)) {
|
|
|
|
env->cp15.sctlr_el[1] |= SCTLR_EnTP2;
|
|
|
|
env->cp15.cpacr_el1 = FIELD_DP64(env->cp15.cpacr_el1,
|
|
|
|
CPACR_EL1, SMEN, 3);
|
|
|
|
env->vfp.smcr_el[1] = cpu->sme_default_vq - 1;
|
|
|
|
if (cpu_isar_feature(aa64_sme_fa64, cpu)) {
|
|
|
|
env->vfp.smcr_el[1] = FIELD_DP64(env->vfp.smcr_el[1],
|
|
|
|
SMCR, FA64, 1);
|
|
|
|
}
|
|
|
|
}
|
2019-02-05 19:52:40 +03:00
|
|
|
/*
|
2022-03-02 00:59:42 +03:00
|
|
|
* Enable 48-bit address space (TODO: take reserved_va into account).
|
2021-02-12 21:48:53 +03:00
|
|
|
* Enable TBI0 but not TBI1.
|
|
|
|
* Note that this must match useronly_clean_ptr.
|
2019-02-05 19:52:40 +03:00
|
|
|
*/
|
2022-07-14 16:23:02 +03:00
|
|
|
env->cp15.tcr_el[1] = 5 | (1ULL << 37);
|
2021-02-12 21:49:01 +03:00
|
|
|
|
|
|
|
/* Enable MTE */
|
|
|
|
if (cpu_isar_feature(aa64_mte, cpu)) {
|
|
|
|
/* Enable tag access, but leave TCF0 as No Effect (0). */
|
|
|
|
env->cp15.sctlr_el[1] |= SCTLR_ATA0;
|
|
|
|
/*
|
|
|
|
* Exclude all tags, so that tag 0 is always used.
|
|
|
|
* This corresponds to Linux current->thread.gcr_incl = 0.
|
|
|
|
*
|
|
|
|
* Set RRND, so that helper_irg() will generate a seed later.
|
|
|
|
* Here in cpu_reset(), the crypto subsystem has not yet been
|
|
|
|
* initialized.
|
|
|
|
*/
|
|
|
|
env->cp15.gcr_el1 = 0x1ffff;
|
|
|
|
}
|
2022-05-06 21:02:38 +03:00
|
|
|
/*
|
|
|
|
* Disable access to SCXTNUM_EL0 from CSV2_1p2.
|
|
|
|
* This is not yet exposed from the Linux kernel in any way.
|
|
|
|
*/
|
|
|
|
env->cp15.sctlr_el[1] |= SCTLR_TSCXT;
|
2023-06-06 12:19:40 +03:00
|
|
|
/* Disable access to Debug Communication Channel (DCC). */
|
|
|
|
env->cp15.mdscr_el1 |= 1 << 12;
|
2023-10-30 20:39:58 +03:00
|
|
|
/* Enable FEAT_MOPS */
|
|
|
|
env->cp15.sctlr_el[1] |= SCTLR_MSCEN;
|
2013-12-17 23:42:30 +04:00
|
|
|
#else
|
2015-02-05 16:37:22 +03:00
|
|
|
/* Reset into the highest available EL */
|
|
|
|
if (arm_feature(env, ARM_FEATURE_EL3)) {
|
|
|
|
env->pstate = PSTATE_MODE_EL3h;
|
|
|
|
} else if (arm_feature(env, ARM_FEATURE_EL2)) {
|
|
|
|
env->pstate = PSTATE_MODE_EL2h;
|
|
|
|
} else {
|
|
|
|
env->pstate = PSTATE_MODE_EL1h;
|
|
|
|
}
|
2022-03-16 19:46:41 +03:00
|
|
|
|
|
|
|
/* Sample rvbar at reset. */
|
|
|
|
env->cp15.rvbar = cpu->rvbar_prop;
|
|
|
|
env->pc = env->cp15.rvbar;
|
2014-04-15 22:18:39 +04:00
|
|
|
#endif
|
|
|
|
} else {
|
|
|
|
#if defined(CONFIG_USER_ONLY)
|
|
|
|
/* Userspace expects access to cp10 and cp11 for FP/Neon */
|
2022-05-17 08:48:45 +03:00
|
|
|
env->cp15.cpacr_el1 = FIELD_DP64(env->cp15.cpacr_el1,
|
|
|
|
CPACR, CP10, 3);
|
|
|
|
env->cp15.cpacr_el1 = FIELD_DP64(env->cp15.cpacr_el1,
|
|
|
|
CPACR, CP11, 3);
|
2013-12-17 23:42:30 +04:00
|
|
|
#endif
|
2022-12-06 13:24:59 +03:00
|
|
|
if (arm_feature(env, ARM_FEATURE_V8)) {
|
|
|
|
env->cp15.rvbar = cpu->rvbar_prop;
|
|
|
|
env->regs[15] = cpu->rvbar_prop;
|
|
|
|
}
|
2013-09-03 23:12:09 +04:00
|
|
|
}
|
|
|
|
|
2012-04-20 21:58:36 +04:00
|
|
|
#if defined(CONFIG_USER_ONLY)
|
|
|
|
env->uncached_cpsr = ARM_CPU_MODE_USR;
|
|
|
|
/* For user mode we must enable access to coprocessors */
|
|
|
|
env->vfp.xregs[ARM_VFP_FPEXC] = 1 << 30;
|
|
|
|
if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
|
|
|
|
env->cp15.c15_cpar = 3;
|
|
|
|
} else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
|
|
|
|
env->cp15.c15_cpar = 1;
|
|
|
|
}
|
|
|
|
#else
|
2018-09-25 16:02:33 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If the highest available EL is EL2, AArch32 will start in Hyp
|
|
|
|
* mode; otherwise it starts in SVC. Note that if we start in
|
|
|
|
* AArch64 then these values in the uncached_cpsr will be ignored.
|
|
|
|
*/
|
|
|
|
if (arm_feature(env, ARM_FEATURE_EL2) &&
|
|
|
|
!arm_feature(env, ARM_FEATURE_EL3)) {
|
|
|
|
env->uncached_cpsr = ARM_CPU_MODE_HYP;
|
|
|
|
} else {
|
|
|
|
env->uncached_cpsr = ARM_CPU_MODE_SVC;
|
|
|
|
}
|
2014-02-26 21:20:06 +04:00
|
|
|
env->daif = PSTATE_D | PSTATE_A | PSTATE_I | PSTATE_F;
|
2021-09-20 11:54:33 +03:00
|
|
|
|
|
|
|
/* AArch32 has a hard highvec setting of 0xFFFF0000. If we are currently
|
|
|
|
* executing as AArch32 then check if highvecs are enabled and
|
|
|
|
* adjust the PC accordingly.
|
|
|
|
*/
|
|
|
|
if (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_V) {
|
|
|
|
env->regs[15] = 0xFFFF0000;
|
|
|
|
}
|
|
|
|
|
|
|
|
env->vfp.xregs[ARM_VFP_FPEXC] = 0;
|
2021-09-20 11:54:33 +03:00
|
|
|
#endif
|
2017-01-27 18:20:24 +03:00
|
|
|
|
2017-01-27 18:20:22 +03:00
|
|
|
if (arm_feature(env, ARM_FEATURE_M)) {
|
2021-09-20 11:54:33 +03:00
|
|
|
#ifndef CONFIG_USER_ONLY
|
2014-09-12 17:06:48 +04:00
|
|
|
uint32_t initial_msp; /* Loaded from 0x0 */
|
|
|
|
uint32_t initial_pc; /* Loaded from 0x4 */
|
2012-04-20 21:58:36 +04:00
|
|
|
uint8_t *rom;
|
2018-03-02 13:45:37 +03:00
|
|
|
uint32_t vecbase;
|
2021-09-20 11:54:33 +03:00
|
|
|
#endif
|
2014-09-12 17:06:48 +04:00
|
|
|
|
2020-10-19 18:13:01 +03:00
|
|
|
if (cpu_isar_feature(aa32_lob, cpu)) {
|
|
|
|
/*
|
|
|
|
* LTPSIZE is constant 4 if MVE not implemented, and resets
|
|
|
|
* to an UNKNOWN value if MVE is implemented. We choose to
|
|
|
|
* always reset to 4.
|
|
|
|
*/
|
|
|
|
env->v7m.ltpsize = 4;
|
2020-11-20 00:56:04 +03:00
|
|
|
/* The LTPSIZE field in FPDSCR is constant and reads as 4. */
|
|
|
|
env->v7m.fpdscr[M_REG_NS] = 4 << FPCR_LTPSIZE_SHIFT;
|
|
|
|
env->v7m.fpdscr[M_REG_S] = 4 << FPCR_LTPSIZE_SHIFT;
|
2020-10-19 18:13:01 +03:00
|
|
|
}
|
|
|
|
|
2017-09-07 15:54:52 +03:00
|
|
|
if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
|
|
|
|
env->v7m.secure = true;
|
2017-09-12 21:13:52 +03:00
|
|
|
} else {
|
|
|
|
/* This bit resets to 0 if security is supported, but 1 if
|
|
|
|
* it is not. The bit is not present in v7M, but we set it
|
|
|
|
* here so we can avoid having to make checks on it conditional
|
|
|
|
* on ARM_FEATURE_V8 (we don't let the guest see the bit).
|
|
|
|
*/
|
|
|
|
env->v7m.aircr = R_V7M_AIRCR_BFHFNMINS_MASK;
|
2019-08-01 13:57:42 +03:00
|
|
|
/*
|
|
|
|
* Set NSACR to indicate "NS access permitted to everything";
|
|
|
|
* this avoids having to have all the tests of it being
|
|
|
|
* conditional on ARM_FEATURE_M_SECURITY. Note also that from
|
|
|
|
* v8.1M the guest-visible value of NSACR in a CPU without the
|
|
|
|
* Security Extension is 0xcff.
|
|
|
|
*/
|
|
|
|
env->v7m.nsacr = 0xcff;
|
2017-09-07 15:54:52 +03:00
|
|
|
}
|
|
|
|
|
2017-09-07 15:54:54 +03:00
|
|
|
/* In v7M the reset value of this bit is IMPDEF, but ARM recommends
|
armv7m: add state for v7M CCR, CFSR, HFSR, DFSR, MMFAR, BFAR
Add the structure fields, VMState fields, reset code and macros for
the v7M system control registers CCR, CFSR, HFSR, DFSR, MMFAR and
BFAR.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Message-id: 1485285380-10565-4-git-send-email-peter.maydell@linaro.org
2017-01-27 18:20:23 +03:00
|
|
|
* that it resets to 1, so QEMU always does that rather than making
|
2017-09-07 15:54:54 +03:00
|
|
|
* it dependent on CPU model. In v8M it is RES1.
|
armv7m: add state for v7M CCR, CFSR, HFSR, DFSR, MMFAR, BFAR
Add the structure fields, VMState fields, reset code and macros for
the v7M system control registers CCR, CFSR, HFSR, DFSR, MMFAR and
BFAR.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Message-id: 1485285380-10565-4-git-send-email-peter.maydell@linaro.org
2017-01-27 18:20:23 +03:00
|
|
|
*/
|
2017-09-07 15:54:54 +03:00
|
|
|
env->v7m.ccr[M_REG_NS] = R_V7M_CCR_STKALIGN_MASK;
|
|
|
|
env->v7m.ccr[M_REG_S] = R_V7M_CCR_STKALIGN_MASK;
|
|
|
|
if (arm_feature(env, ARM_FEATURE_V8)) {
|
|
|
|
/* in v8M the NONBASETHRDENA bit [0] is RES1 */
|
|
|
|
env->v7m.ccr[M_REG_NS] |= R_V7M_CCR_NONBASETHRDENA_MASK;
|
|
|
|
env->v7m.ccr[M_REG_S] |= R_V7M_CCR_NONBASETHRDENA_MASK;
|
|
|
|
}
|
2018-08-14 19:17:19 +03:00
|
|
|
if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
|
|
|
|
env->v7m.ccr[M_REG_NS] |= R_V7M_CCR_UNALIGN_TRP_MASK;
|
|
|
|
env->v7m.ccr[M_REG_S] |= R_V7M_CCR_UNALIGN_TRP_MASK;
|
|
|
|
}
|
armv7m: add state for v7M CCR, CFSR, HFSR, DFSR, MMFAR, BFAR
Add the structure fields, VMState fields, reset code and macros for
the v7M system control registers CCR, CFSR, HFSR, DFSR, MMFAR and
BFAR.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Message-id: 1485285380-10565-4-git-send-email-peter.maydell@linaro.org
2017-01-27 18:20:23 +03:00
|
|
|
|
2020-02-25 01:22:16 +03:00
|
|
|
if (cpu_isar_feature(aa32_vfp_simd, cpu)) {
|
2019-04-29 19:35:58 +03:00
|
|
|
env->v7m.fpccr[M_REG_NS] = R_V7M_FPCCR_ASPEN_MASK;
|
|
|
|
env->v7m.fpccr[M_REG_S] = R_V7M_FPCCR_ASPEN_MASK |
|
|
|
|
R_V7M_FPCCR_LSPEN_MASK | R_V7M_FPCCR_S_MASK;
|
|
|
|
}
|
2021-09-20 11:54:33 +03:00
|
|
|
|
|
|
|
#ifndef CONFIG_USER_ONLY
|
2017-01-27 18:20:24 +03:00
|
|
|
/* Unlike A/R profile, M profile defines the reset LR value */
|
|
|
|
env->regs[14] = 0xffffffff;
|
|
|
|
|
2018-03-02 13:45:37 +03:00
|
|
|
env->v7m.vecbase[M_REG_S] = cpu->init_svtor & 0xffffff80;
|
2021-05-20 18:28:40 +03:00
|
|
|
env->v7m.vecbase[M_REG_NS] = cpu->init_nsvtor & 0xffffff80;
|
2018-03-02 13:45:37 +03:00
|
|
|
|
|
|
|
/* Load the initial SP and PC from offset 0 and 4 in the vector table */
|
|
|
|
vecbase = env->v7m.vecbase[env->v7m.secure];
|
2021-03-18 20:48:23 +03:00
|
|
|
rom = rom_ptr_for_as(s->as, vecbase, 8);
|
2012-04-20 21:58:36 +04:00
|
|
|
if (rom) {
|
2014-09-12 17:06:48 +04:00
|
|
|
/* Address zero is covered by ROM which hasn't yet been
|
|
|
|
* copied into physical memory.
|
|
|
|
*/
|
|
|
|
initial_msp = ldl_p(rom);
|
|
|
|
initial_pc = ldl_p(rom + 4);
|
|
|
|
} else {
|
|
|
|
/* Address zero not covered by a ROM blob, or the ROM blob
|
|
|
|
* is in non-modifiable memory and this is a second reset after
|
|
|
|
* it got copied into memory. In the latter case, rom_ptr
|
|
|
|
* will return a NULL pointer and we should use ldl_phys instead.
|
|
|
|
*/
|
2018-03-02 13:45:37 +03:00
|
|
|
initial_msp = ldl_phys(s->as, vecbase);
|
|
|
|
initial_pc = ldl_phys(s->as, vecbase + 4);
|
2012-04-20 21:58:36 +04:00
|
|
|
}
|
2014-09-12 17:06:48 +04:00
|
|
|
|
2022-03-15 23:43:05 +03:00
|
|
|
qemu_log_mask(CPU_LOG_INT,
|
|
|
|
"Loaded reset SP 0x%x PC 0x%x from vector table\n",
|
|
|
|
initial_msp, initial_pc);
|
|
|
|
|
2014-09-12 17:06:48 +04:00
|
|
|
env->regs[13] = initial_msp & 0xFFFFFFFC;
|
|
|
|
env->regs[15] = initial_pc & ~1;
|
|
|
|
env->thumb = initial_pc & 1;
|
2021-09-20 11:54:33 +03:00
|
|
|
#else
|
|
|
|
/*
|
|
|
|
* For user mode we run non-secure and with access to the FPU.
|
|
|
|
* The FPU context is active (ie does not need further setup)
|
|
|
|
* and is owned by non-secure.
|
|
|
|
*/
|
|
|
|
env->v7m.secure = false;
|
|
|
|
env->v7m.nsacr = 0xcff;
|
|
|
|
env->v7m.cpacr[M_REG_NS] = 0xf0ffff;
|
|
|
|
env->v7m.fpccr[M_REG_S] &=
|
|
|
|
~(R_V7M_FPCCR_LSPEN_MASK | R_V7M_FPCCR_S_MASK);
|
|
|
|
env->v7m.control[M_REG_S] |= R_V7M_CONTROL_FPCA_MASK;
|
|
|
|
#endif
|
2012-04-20 21:58:36 +04:00
|
|
|
}
|
2013-12-17 23:42:29 +04:00
|
|
|
|
2017-09-14 20:43:16 +03:00
|
|
|
/* M profile requires that reset clears the exclusive monitor;
|
|
|
|
* A profile does not, but clearing it makes more sense than having it
|
|
|
|
* set with an exclusive access on address zero.
|
|
|
|
*/
|
|
|
|
arm_clear_exclusive(env);
|
|
|
|
|
2017-09-07 15:54:51 +03:00
|
|
|
if (arm_feature(env, ARM_FEATURE_PMSA)) {
|
2017-07-27 13:59:09 +03:00
|
|
|
if (cpu->pmsav7_dregion > 0) {
|
2017-09-07 15:54:51 +03:00
|
|
|
if (arm_feature(env, ARM_FEATURE_V8)) {
|
2017-09-07 15:54:53 +03:00
|
|
|
memset(env->pmsav8.rbar[M_REG_NS], 0,
|
|
|
|
sizeof(*env->pmsav8.rbar[M_REG_NS])
|
|
|
|
* cpu->pmsav7_dregion);
|
|
|
|
memset(env->pmsav8.rlar[M_REG_NS], 0,
|
|
|
|
sizeof(*env->pmsav8.rlar[M_REG_NS])
|
|
|
|
* cpu->pmsav7_dregion);
|
|
|
|
if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
|
|
|
|
memset(env->pmsav8.rbar[M_REG_S], 0,
|
|
|
|
sizeof(*env->pmsav8.rbar[M_REG_S])
|
|
|
|
* cpu->pmsav7_dregion);
|
|
|
|
memset(env->pmsav8.rlar[M_REG_S], 0,
|
|
|
|
sizeof(*env->pmsav8.rlar[M_REG_S])
|
|
|
|
* cpu->pmsav7_dregion);
|
|
|
|
}
|
2017-09-07 15:54:51 +03:00
|
|
|
} else if (arm_feature(env, ARM_FEATURE_V7)) {
|
|
|
|
memset(env->pmsav7.drbar, 0,
|
|
|
|
sizeof(*env->pmsav7.drbar) * cpu->pmsav7_dregion);
|
|
|
|
memset(env->pmsav7.drsr, 0,
|
|
|
|
sizeof(*env->pmsav7.drsr) * cpu->pmsav7_dregion);
|
|
|
|
memset(env->pmsav7.dracr, 0,
|
|
|
|
sizeof(*env->pmsav7.dracr) * cpu->pmsav7_dregion);
|
|
|
|
}
|
2017-07-27 13:59:09 +03:00
|
|
|
}
|
2022-12-06 13:25:02 +03:00
|
|
|
|
|
|
|
if (cpu->pmsav8r_hdregion > 0) {
|
|
|
|
memset(env->pmsav8.hprbar, 0,
|
|
|
|
sizeof(*env->pmsav8.hprbar) * cpu->pmsav8r_hdregion);
|
|
|
|
memset(env->pmsav8.hprlar, 0,
|
|
|
|
sizeof(*env->pmsav8.hprlar) * cpu->pmsav8r_hdregion);
|
|
|
|
}
|
|
|
|
|
2017-09-07 15:54:53 +03:00
|
|
|
env->pmsav7.rnr[M_REG_NS] = 0;
|
|
|
|
env->pmsav7.rnr[M_REG_S] = 0;
|
2017-09-07 15:54:53 +03:00
|
|
|
env->pmsav8.mair0[M_REG_NS] = 0;
|
|
|
|
env->pmsav8.mair0[M_REG_S] = 0;
|
|
|
|
env->pmsav8.mair1[M_REG_NS] = 0;
|
|
|
|
env->pmsav8.mair1[M_REG_S] = 0;
|
2017-07-27 13:59:09 +03:00
|
|
|
}
|
|
|
|
|
2017-10-06 18:46:49 +03:00
|
|
|
if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
|
|
|
|
if (cpu->sau_sregion > 0) {
|
|
|
|
memset(env->sau.rbar, 0, sizeof(*env->sau.rbar) * cpu->sau_sregion);
|
|
|
|
memset(env->sau.rlar, 0, sizeof(*env->sau.rlar) * cpu->sau_sregion);
|
|
|
|
}
|
|
|
|
env->sau.rnr = 0;
|
|
|
|
/* SAU_CTRL reset value is IMPDEF; we choose 0, which is what
|
|
|
|
* the Cortex-M33 does.
|
|
|
|
*/
|
|
|
|
env->sau.ctrl = 0;
|
|
|
|
}
|
|
|
|
|
2012-04-20 21:58:36 +04:00
|
|
|
set_flush_to_zero(1, &env->vfp.standard_fp_status);
|
|
|
|
set_flush_inputs_to_zero(1, &env->vfp.standard_fp_status);
|
|
|
|
set_default_nan_mode(1, &env->vfp.standard_fp_status);
|
2020-08-06 13:44:52 +03:00
|
|
|
set_default_nan_mode(1, &env->vfp.standard_fp_status_f16);
|
2012-04-20 21:58:36 +04:00
|
|
|
set_float_detect_tininess(float_tininess_before_rounding,
|
|
|
|
&env->vfp.fp_status);
|
|
|
|
set_float_detect_tininess(float_tininess_before_rounding,
|
|
|
|
&env->vfp.standard_fp_status);
|
2018-05-15 16:58:42 +03:00
|
|
|
set_float_detect_tininess(float_tininess_before_rounding,
|
|
|
|
&env->vfp.fp_status_f16);
|
2020-08-06 13:44:52 +03:00
|
|
|
set_float_detect_tininess(float_tininess_before_rounding,
|
|
|
|
&env->vfp.standard_fp_status_f16);
|
2013-03-20 16:11:56 +04:00
|
|
|
#ifndef CONFIG_USER_ONLY
|
|
|
|
if (kvm_enabled()) {
|
|
|
|
kvm_arm_reset_vcpu(cpu);
|
|
|
|
}
|
|
|
|
#endif
|
2014-09-12 17:06:49 +04:00
|
|
|
|
2023-02-17 23:11:27 +03:00
|
|
|
if (tcg_enabled()) {
|
|
|
|
hw_breakpoint_update_all(cpu);
|
|
|
|
hw_watchpoint_update_all(cpu);
|
2023-02-17 23:11:32 +03:00
|
|
|
|
|
|
|
arm_rebuild_hflags(env);
|
2023-02-17 23:11:27 +03:00
|
|
|
}
|
2012-03-29 08:50:31 +04:00
|
|
|
}
|
|
|
|
|
target/arm/arm-powerctl: Correctly init CPUs when powered on to lower EL
The code for powering on a CPU in arm-powerctl.c has two separate
use cases:
* emulation of a real hardware power controller
* emulation of firmware interfaces (primarily PSCI) with
CPU on/off APIs
For the first case, we only need to reset the CPU and set its
starting PC and X0. For the second case, because we're emulating the
firmware we need to ensure that it's in the state that the firmware
provides. In particular, when we reset to a lower EL than the
highest one we are emulating, we need to put the CPU into a state
that permits correct running at that lower EL. We already do a
little of this in arm-powerctl.c (for instance we set SCR_HCE to
enable the HVC insn) but we don't do enough of it. This means that
in the case where we are emulating EL3 but also providing emulated
PSCI the guest will crash when a secondary core tries to use a
feature that needs an SCR_EL3 bit to be set, such as MTE or PAuth.
The hw/arm/boot.c code also has to support this "start guest code in
an EL that's lower than the highest emulated EL" case in order to do
direct guest kernel booting; it has all the necessary initialization
code to set the SCR_EL3 bits. Pull the relevant boot.c code out into
a separate function so we can share it between there and
arm-powerctl.c.
This refactoring has a few code changes that look like they
might be behaviour changes but aren't:
* if info->secure_boot is false and info->secure_board_setup is
true, then the old code would start the first CPU in Hyp
mode but without changing SCR.NS and NSACR.{CP11,CP10}.
This was wrong behaviour because there's no such thing
as Secure Hyp mode. The new code will leave the CPU in SVC.
(There is no board which sets secure_boot to false and
secure_board_setup to true, so this isn't a behaviour
change for any of our boards.)
* we don't explicitly clear SCR.NS when arm-powerctl.c
does a CPU-on to EL3. This was a no-op because CPU reset
will reset to NS == 0.
And some real behaviour changes:
* we no longer set HCR_EL2.RW when booting into EL2: the guest
can and should do that themselves before dropping into their
EL1 code. (arm-powerctl and boot did this differently; I
opted to use the logic from arm-powerctl, which only sets
HCR_EL2.RW when it's directly starting the guest in EL1,
because it's more correct, and I don't expect guests to be
accidentally depending on our having set the RW bit for them.)
* if we are booting a CPU into AArch32 Secure SVC then we won't
set SCR.HCE any more. This affects only the vexpress-a15 and
raspi2b machine types. Guests booting in this case will either:
- be able to set SCR.HCE themselves as part of moving from
Secure SVC into NS Hyp mode
- will move from Secure SVC to NS SVC, and won't care about
behaviour of the HVC insn
- will stay in Secure SVC, and won't care about HVC
* on an arm-powerctl CPU-on we will now set the SCR bits for
pauth/mte/sve/sme/hcx/fgt features
The first two of these are very minor and I don't expect guest
code to trip over them, so I didn't judge it worth convoluting
the code in an attempt to keep exactly the same boot.c behaviour.
The third change fixes issue 1899.
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1899
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 20230926155619.4028618-1-peter.maydell@linaro.org
2023-09-26 18:56:19 +03:00
|
|
|
void arm_emulate_firmware_reset(CPUState *cpustate, int target_el)
|
|
|
|
{
|
|
|
|
ARMCPU *cpu = ARM_CPU(cpustate);
|
|
|
|
CPUARMState *env = &cpu->env;
|
|
|
|
bool have_el3 = arm_feature(env, ARM_FEATURE_EL3);
|
|
|
|
bool have_el2 = arm_feature(env, ARM_FEATURE_EL2);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check we have the EL we're aiming for. If that is the
|
|
|
|
* highest implemented EL, then cpu_reset has already done
|
|
|
|
* all the work.
|
|
|
|
*/
|
|
|
|
switch (target_el) {
|
|
|
|
case 3:
|
|
|
|
assert(have_el3);
|
|
|
|
return;
|
|
|
|
case 2:
|
|
|
|
assert(have_el2);
|
|
|
|
if (!have_el3) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
if (!have_el3 && !have_el2) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (have_el3) {
|
|
|
|
/*
|
|
|
|
* Set the EL3 state so code can run at EL2. This should match
|
|
|
|
* the requirements set by Linux in its booting spec.
|
|
|
|
*/
|
|
|
|
if (env->aarch64) {
|
|
|
|
env->cp15.scr_el3 |= SCR_RW;
|
|
|
|
if (cpu_isar_feature(aa64_pauth, cpu)) {
|
|
|
|
env->cp15.scr_el3 |= SCR_API | SCR_APK;
|
|
|
|
}
|
|
|
|
if (cpu_isar_feature(aa64_mte, cpu)) {
|
|
|
|
env->cp15.scr_el3 |= SCR_ATA;
|
|
|
|
}
|
|
|
|
if (cpu_isar_feature(aa64_sve, cpu)) {
|
|
|
|
env->cp15.cptr_el[3] |= R_CPTR_EL3_EZ_MASK;
|
|
|
|
env->vfp.zcr_el[3] = 0xf;
|
|
|
|
}
|
|
|
|
if (cpu_isar_feature(aa64_sme, cpu)) {
|
|
|
|
env->cp15.cptr_el[3] |= R_CPTR_EL3_ESM_MASK;
|
|
|
|
env->cp15.scr_el3 |= SCR_ENTP2;
|
|
|
|
env->vfp.smcr_el[3] = 0xf;
|
|
|
|
}
|
|
|
|
if (cpu_isar_feature(aa64_hcx, cpu)) {
|
|
|
|
env->cp15.scr_el3 |= SCR_HXEN;
|
|
|
|
}
|
|
|
|
if (cpu_isar_feature(aa64_fgt, cpu)) {
|
|
|
|
env->cp15.scr_el3 |= SCR_FGTEN;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (target_el == 2) {
|
|
|
|
/* If the guest is at EL2 then Linux expects the HVC insn to work */
|
|
|
|
env->cp15.scr_el3 |= SCR_HCE;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Put CPU into non-secure state */
|
|
|
|
env->cp15.scr_el3 |= SCR_NS;
|
|
|
|
/* Set NSACR.{CP11,CP10} so NS can access the FPU */
|
|
|
|
env->cp15.nsacr |= 3 << 10;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (have_el2 && target_el < 2) {
|
|
|
|
/* Set EL2 state so code can run at EL1. */
|
|
|
|
if (env->aarch64) {
|
|
|
|
env->cp15.hcr_el2 |= HCR_RW;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Set the CPU to the desired state */
|
|
|
|
if (env->aarch64) {
|
|
|
|
env->pstate = aarch64_pstate_mode(target_el, true);
|
|
|
|
} else {
|
|
|
|
static const uint32_t mode_for_el[] = {
|
|
|
|
0,
|
|
|
|
ARM_CPU_MODE_SVC,
|
|
|
|
ARM_CPU_MODE_HYP,
|
|
|
|
ARM_CPU_MODE_SVC,
|
|
|
|
};
|
|
|
|
|
|
|
|
cpsr_write(env, mode_for_el[target_el], CPSR_M, CPSRWriteRaw);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2022-12-14 17:27:14 +03:00
|
|
|
#if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
|
2021-09-11 19:54:17 +03:00
|
|
|
|
2020-02-07 17:04:27 +03:00
|
|
|
static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
|
2020-02-07 17:04:27 +03:00
|
|
|
unsigned int target_el,
|
|
|
|
unsigned int cur_el, bool secure,
|
|
|
|
uint64_t hcr_el2)
|
2020-02-07 17:04:27 +03:00
|
|
|
{
|
2023-09-14 03:22:49 +03:00
|
|
|
CPUARMState *env = cpu_env(cs);
|
2020-02-07 17:04:27 +03:00
|
|
|
bool pstate_unmasked;
|
2020-02-07 17:04:27 +03:00
|
|
|
bool unmasked = false;
|
2020-02-07 17:04:27 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Don't take exceptions if they target a lower EL.
|
|
|
|
* This check should catch any exceptions that would not be taken
|
|
|
|
* but left pending.
|
|
|
|
*/
|
|
|
|
if (cur_el > target_el) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (excp_idx) {
|
|
|
|
case EXCP_FIQ:
|
|
|
|
pstate_unmasked = !(env->daif & PSTATE_F);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case EXCP_IRQ:
|
|
|
|
pstate_unmasked = !(env->daif & PSTATE_I);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case EXCP_VFIQ:
|
2021-01-12 13:44:53 +03:00
|
|
|
if (!(hcr_el2 & HCR_FMO) || (hcr_el2 & HCR_TGE)) {
|
|
|
|
/* VFIQs are only taken when hypervized. */
|
2020-02-07 17:04:27 +03:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return !(env->daif & PSTATE_F);
|
|
|
|
case EXCP_VIRQ:
|
2021-01-12 13:44:53 +03:00
|
|
|
if (!(hcr_el2 & HCR_IMO) || (hcr_el2 & HCR_TGE)) {
|
|
|
|
/* VIRQs are only taken when hypervized. */
|
2020-02-07 17:04:27 +03:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return !(env->daif & PSTATE_I);
|
2022-05-06 21:02:33 +03:00
|
|
|
case EXCP_VSERR:
|
|
|
|
if (!(hcr_el2 & HCR_AMO) || (hcr_el2 & HCR_TGE)) {
|
|
|
|
/* VIRQs are only taken when hypervized. */
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return !(env->daif & PSTATE_A);
|
2020-02-07 17:04:27 +03:00
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Use the target EL, current execution state and SCR/HCR settings to
|
|
|
|
* determine whether the corresponding CPSR bit is used to mask the
|
|
|
|
* interrupt.
|
|
|
|
*/
|
|
|
|
if ((target_el > cur_el) && (target_el != 1)) {
|
|
|
|
/* Exceptions targeting a higher EL may not be maskable */
|
|
|
|
if (arm_feature(env, ARM_FEATURE_AARCH64)) {
|
2022-10-17 12:24:32 +03:00
|
|
|
switch (target_el) {
|
|
|
|
case 2:
|
|
|
|
/*
|
|
|
|
* According to ARM DDI 0487H.a, an interrupt can be masked
|
|
|
|
* when HCR_E2H and HCR_TGE are both set regardless of the
|
|
|
|
* current Security state. Note that we need to revisit this
|
|
|
|
* part again once we need to support NMI.
|
|
|
|
*/
|
|
|
|
if ((hcr_el2 & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
|
|
|
|
unmasked = true;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 3:
|
|
|
|
/* Interrupt cannot be masked when the target EL is 3 */
|
2020-02-07 17:04:27 +03:00
|
|
|
unmasked = true;
|
2022-10-17 12:24:32 +03:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
2020-02-07 17:04:27 +03:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* The old 32-bit-only environment has a more complicated
|
|
|
|
* masking setup. HCR and SCR bits not only affect interrupt
|
|
|
|
* routing but also change the behaviour of masking.
|
|
|
|
*/
|
|
|
|
bool hcr, scr;
|
|
|
|
|
|
|
|
switch (excp_idx) {
|
|
|
|
case EXCP_FIQ:
|
|
|
|
/*
|
|
|
|
* If FIQs are routed to EL3 or EL2 then there are cases where
|
|
|
|
* we override the CPSR.F in determining if the exception is
|
|
|
|
* masked or not. If neither of these are set then we fall back
|
|
|
|
* to the CPSR.F setting otherwise we further assess the state
|
|
|
|
* below.
|
|
|
|
*/
|
|
|
|
hcr = hcr_el2 & HCR_FMO;
|
|
|
|
scr = (env->cp15.scr_el3 & SCR_FIQ);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* When EL3 is 32-bit, the SCR.FW bit controls whether the
|
|
|
|
* CPSR.F bit masks FIQ interrupts when taken in non-secure
|
|
|
|
* state. If SCR.FW is set then FIQs can be masked by CPSR.F
|
|
|
|
* when non-secure but only when FIQs are only routed to EL3.
|
|
|
|
*/
|
|
|
|
scr = scr && !((env->cp15.scr_el3 & SCR_FW) && !hcr);
|
|
|
|
break;
|
|
|
|
case EXCP_IRQ:
|
|
|
|
/*
|
|
|
|
* When EL3 execution state is 32-bit, if HCR.IMO is set then
|
|
|
|
* we may override the CPSR.I masking when in non-secure state.
|
|
|
|
* The SCR.IRQ setting has already been taken into consideration
|
|
|
|
* when setting the target EL, so it does not have a further
|
|
|
|
* affect here.
|
|
|
|
*/
|
|
|
|
hcr = hcr_el2 & HCR_IMO;
|
|
|
|
scr = false;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((scr || hcr) && !secure) {
|
2020-02-07 17:04:27 +03:00
|
|
|
unmasked = true;
|
2020-02-07 17:04:27 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2023-07-14 14:14:49 +03:00
|
|
|
* The PSTATE bits only mask the interrupt if we have not overridden the
|
2020-02-07 17:04:27 +03:00
|
|
|
* ability above.
|
|
|
|
*/
|
|
|
|
return unmasked || pstate_unmasked;
|
|
|
|
}
|
|
|
|
|
2021-09-11 19:54:17 +03:00
|
|
|
static bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
|
2014-09-13 20:45:25 +04:00
|
|
|
{
|
|
|
|
CPUClass *cc = CPU_GET_CLASS(cs);
|
2023-09-14 03:22:49 +03:00
|
|
|
CPUARMState *env = cpu_env(cs);
|
2015-05-29 13:28:51 +03:00
|
|
|
uint32_t cur_el = arm_current_el(env);
|
|
|
|
bool secure = arm_is_secure(env);
|
2020-02-07 17:04:27 +03:00
|
|
|
uint64_t hcr_el2 = arm_hcr_el2_eff(env);
|
2015-05-29 13:28:51 +03:00
|
|
|
uint32_t target_el;
|
|
|
|
uint32_t excp_idx;
|
2020-02-07 17:04:27 +03:00
|
|
|
|
|
|
|
/* The prioritization of interrupts is IMPLEMENTATION DEFINED. */
|
2014-09-13 20:45:25 +04:00
|
|
|
|
2015-05-29 13:28:51 +03:00
|
|
|
if (interrupt_request & CPU_INTERRUPT_FIQ) {
|
|
|
|
excp_idx = EXCP_FIQ;
|
|
|
|
target_el = arm_phys_excp_target_el(cs, excp_idx, cur_el, secure);
|
2020-02-07 17:04:27 +03:00
|
|
|
if (arm_excp_unmasked(cs, excp_idx, target_el,
|
|
|
|
cur_el, secure, hcr_el2)) {
|
2020-02-07 17:04:27 +03:00
|
|
|
goto found;
|
2015-05-29 13:28:51 +03:00
|
|
|
}
|
2014-09-13 20:45:25 +04:00
|
|
|
}
|
2015-05-29 13:28:51 +03:00
|
|
|
if (interrupt_request & CPU_INTERRUPT_HARD) {
|
|
|
|
excp_idx = EXCP_IRQ;
|
|
|
|
target_el = arm_phys_excp_target_el(cs, excp_idx, cur_el, secure);
|
2020-02-07 17:04:27 +03:00
|
|
|
if (arm_excp_unmasked(cs, excp_idx, target_el,
|
|
|
|
cur_el, secure, hcr_el2)) {
|
2020-02-07 17:04:27 +03:00
|
|
|
goto found;
|
2015-05-29 13:28:51 +03:00
|
|
|
}
|
2014-09-13 20:45:25 +04:00
|
|
|
}
|
2015-05-29 13:28:51 +03:00
|
|
|
if (interrupt_request & CPU_INTERRUPT_VIRQ) {
|
|
|
|
excp_idx = EXCP_VIRQ;
|
|
|
|
target_el = 1;
|
2020-02-07 17:04:27 +03:00
|
|
|
if (arm_excp_unmasked(cs, excp_idx, target_el,
|
|
|
|
cur_el, secure, hcr_el2)) {
|
2020-02-07 17:04:27 +03:00
|
|
|
goto found;
|
2015-05-29 13:28:51 +03:00
|
|
|
}
|
2014-09-29 21:48:51 +04:00
|
|
|
}
|
2015-05-29 13:28:51 +03:00
|
|
|
if (interrupt_request & CPU_INTERRUPT_VFIQ) {
|
|
|
|
excp_idx = EXCP_VFIQ;
|
|
|
|
target_el = 1;
|
2020-02-07 17:04:27 +03:00
|
|
|
if (arm_excp_unmasked(cs, excp_idx, target_el,
|
|
|
|
cur_el, secure, hcr_el2)) {
|
2020-02-07 17:04:27 +03:00
|
|
|
goto found;
|
2015-05-29 13:28:51 +03:00
|
|
|
}
|
2014-09-29 21:48:51 +04:00
|
|
|
}
|
2022-05-06 21:02:33 +03:00
|
|
|
if (interrupt_request & CPU_INTERRUPT_VSERR) {
|
|
|
|
excp_idx = EXCP_VSERR;
|
|
|
|
target_el = 1;
|
|
|
|
if (arm_excp_unmasked(cs, excp_idx, target_el,
|
|
|
|
cur_el, secure, hcr_el2)) {
|
|
|
|
/* Taking a virtual abort clears HCR_EL2.VSE */
|
|
|
|
env->cp15.hcr_el2 &= ~HCR_VSE;
|
|
|
|
cpu_reset_interrupt(cs, CPU_INTERRUPT_VSERR);
|
|
|
|
goto found;
|
|
|
|
}
|
|
|
|
}
|
2020-02-07 17:04:27 +03:00
|
|
|
return false;
|
2014-09-13 20:45:25 +04:00
|
|
|
|
2020-02-07 17:04:27 +03:00
|
|
|
found:
|
|
|
|
cs->exception_index = excp_idx;
|
|
|
|
env->exception.target_el = target_el;
|
2021-02-04 19:39:23 +03:00
|
|
|
cc->tcg_ops->do_interrupt(cs);
|
2020-02-07 17:04:27 +03:00
|
|
|
return true;
|
2014-09-13 20:45:25 +04:00
|
|
|
}
|
2022-12-14 17:27:14 +03:00
|
|
|
|
|
|
|
#endif /* CONFIG_TCG && !CONFIG_USER_ONLY */
|
2014-09-13 20:45:25 +04:00
|
|
|
|
2018-11-13 13:47:59 +03:00
|
|
|
void arm_cpu_update_virq(ARMCPU *cpu)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Update the interrupt level for VIRQ, which is the logical OR of
|
|
|
|
* the HCR_EL2.VI bit and the input line level from the GIC.
|
|
|
|
*/
|
|
|
|
CPUARMState *env = &cpu->env;
|
|
|
|
CPUState *cs = CPU(cpu);
|
|
|
|
|
|
|
|
bool new_state = (env->cp15.hcr_el2 & HCR_VI) ||
|
|
|
|
(env->irq_line_state & CPU_INTERRUPT_VIRQ);
|
|
|
|
|
|
|
|
if (new_state != ((cs->interrupt_request & CPU_INTERRUPT_VIRQ) != 0)) {
|
|
|
|
if (new_state) {
|
|
|
|
cpu_interrupt(cs, CPU_INTERRUPT_VIRQ);
|
|
|
|
} else {
|
|
|
|
cpu_reset_interrupt(cs, CPU_INTERRUPT_VIRQ);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void arm_cpu_update_vfiq(ARMCPU *cpu)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Update the interrupt level for VFIQ, which is the logical OR of
|
|
|
|
* the HCR_EL2.VF bit and the input line level from the GIC.
|
|
|
|
*/
|
|
|
|
CPUARMState *env = &cpu->env;
|
|
|
|
CPUState *cs = CPU(cpu);
|
|
|
|
|
|
|
|
bool new_state = (env->cp15.hcr_el2 & HCR_VF) ||
|
|
|
|
(env->irq_line_state & CPU_INTERRUPT_VFIQ);
|
|
|
|
|
|
|
|
if (new_state != ((cs->interrupt_request & CPU_INTERRUPT_VFIQ) != 0)) {
|
|
|
|
if (new_state) {
|
|
|
|
cpu_interrupt(cs, CPU_INTERRUPT_VFIQ);
|
|
|
|
} else {
|
|
|
|
cpu_reset_interrupt(cs, CPU_INTERRUPT_VFIQ);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-05-06 21:02:33 +03:00
|
|
|
void arm_cpu_update_vserr(ARMCPU *cpu)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Update the interrupt level for VSERR, which is the HCR_EL2.VSE bit.
|
|
|
|
*/
|
|
|
|
CPUARMState *env = &cpu->env;
|
|
|
|
CPUState *cs = CPU(cpu);
|
|
|
|
|
|
|
|
bool new_state = env->cp15.hcr_el2 & HCR_VSE;
|
|
|
|
|
|
|
|
if (new_state != ((cs->interrupt_request & CPU_INTERRUPT_VSERR) != 0)) {
|
|
|
|
if (new_state) {
|
|
|
|
cpu_interrupt(cs, CPU_INTERRUPT_VSERR);
|
|
|
|
} else {
|
|
|
|
cpu_reset_interrupt(cs, CPU_INTERRUPT_VSERR);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-08-20 17:54:28 +04:00
|
|
|
#ifndef CONFIG_USER_ONLY
|
|
|
|
static void arm_cpu_set_irq(void *opaque, int irq, int level)
|
|
|
|
{
|
|
|
|
ARMCPU *cpu = opaque;
|
2014-09-29 21:48:51 +04:00
|
|
|
CPUARMState *env = &cpu->env;
|
2013-08-20 17:54:28 +04:00
|
|
|
CPUState *cs = CPU(cpu);
|
2014-09-29 21:48:51 +04:00
|
|
|
static const int mask[] = {
|
|
|
|
[ARM_CPU_IRQ] = CPU_INTERRUPT_HARD,
|
|
|
|
[ARM_CPU_FIQ] = CPU_INTERRUPT_FIQ,
|
|
|
|
[ARM_CPU_VIRQ] = CPU_INTERRUPT_VIRQ,
|
|
|
|
[ARM_CPU_VFIQ] = CPU_INTERRUPT_VFIQ
|
|
|
|
};
|
2013-08-20 17:54:28 +04:00
|
|
|
|
2022-04-08 17:15:14 +03:00
|
|
|
if (!arm_feature(env, ARM_FEATURE_EL2) &&
|
|
|
|
(irq == ARM_CPU_VIRQ || irq == ARM_CPU_VFIQ)) {
|
|
|
|
/*
|
|
|
|
* The GIC might tell us about VIRQ and VFIQ state, but if we don't
|
|
|
|
* have EL2 support we don't care. (Unless the guest is doing something
|
|
|
|
* silly this will only be calls saying "level is still 0".)
|
|
|
|
*/
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-11-13 13:47:59 +03:00
|
|
|
if (level) {
|
|
|
|
env->irq_line_state |= mask[irq];
|
|
|
|
} else {
|
|
|
|
env->irq_line_state &= ~mask[irq];
|
|
|
|
}
|
|
|
|
|
2013-08-20 17:54:28 +04:00
|
|
|
switch (irq) {
|
2014-09-29 21:48:51 +04:00
|
|
|
case ARM_CPU_VIRQ:
|
2018-11-13 13:47:59 +03:00
|
|
|
arm_cpu_update_virq(cpu);
|
|
|
|
break;
|
2014-09-29 21:48:51 +04:00
|
|
|
case ARM_CPU_VFIQ:
|
2018-11-13 13:47:59 +03:00
|
|
|
arm_cpu_update_vfiq(cpu);
|
|
|
|
break;
|
2014-09-29 21:48:51 +04:00
|
|
|
case ARM_CPU_IRQ:
|
2013-08-20 17:54:28 +04:00
|
|
|
case ARM_CPU_FIQ:
|
|
|
|
if (level) {
|
2014-09-29 21:48:51 +04:00
|
|
|
cpu_interrupt(cs, mask[irq]);
|
2013-08-20 17:54:28 +04:00
|
|
|
} else {
|
2014-09-29 21:48:51 +04:00
|
|
|
cpu_reset_interrupt(cs, mask[irq]);
|
2013-08-20 17:54:28 +04:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
2015-09-07 12:39:29 +03:00
|
|
|
g_assert_not_reached();
|
2013-08-20 17:54:28 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void arm_cpu_kvm_set_irq(void *opaque, int irq, int level)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_KVM
|
|
|
|
ARMCPU *cpu = opaque;
|
2018-11-13 13:47:59 +03:00
|
|
|
CPUARMState *env = &cpu->env;
|
2013-08-20 17:54:28 +04:00
|
|
|
CPUState *cs = CPU(cpu);
|
2018-11-13 13:47:59 +03:00
|
|
|
uint32_t linestate_bit;
|
2019-10-03 18:46:39 +03:00
|
|
|
int irq_id;
|
2013-08-20 17:54:28 +04:00
|
|
|
|
|
|
|
switch (irq) {
|
|
|
|
case ARM_CPU_IRQ:
|
2019-10-03 18:46:39 +03:00
|
|
|
irq_id = KVM_ARM_IRQ_CPU_IRQ;
|
2018-11-13 13:47:59 +03:00
|
|
|
linestate_bit = CPU_INTERRUPT_HARD;
|
2013-08-20 17:54:28 +04:00
|
|
|
break;
|
|
|
|
case ARM_CPU_FIQ:
|
2019-10-03 18:46:39 +03:00
|
|
|
irq_id = KVM_ARM_IRQ_CPU_FIQ;
|
2018-11-13 13:47:59 +03:00
|
|
|
linestate_bit = CPU_INTERRUPT_FIQ;
|
2013-08-20 17:54:28 +04:00
|
|
|
break;
|
|
|
|
default:
|
2015-09-07 12:39:29 +03:00
|
|
|
g_assert_not_reached();
|
2013-08-20 17:54:28 +04:00
|
|
|
}
|
2018-11-13 13:47:59 +03:00
|
|
|
|
|
|
|
if (level) {
|
|
|
|
env->irq_line_state |= linestate_bit;
|
|
|
|
} else {
|
|
|
|
env->irq_line_state &= ~linestate_bit;
|
|
|
|
}
|
2019-10-03 18:46:39 +03:00
|
|
|
kvm_arm_set_irq(cs->cpu_index, KVM_ARM_IRQ_TYPE_CPU, irq_id, !!level);
|
2013-08-20 17:54:28 +04:00
|
|
|
#endif
|
|
|
|
}
|
2015-02-05 16:37:25 +03:00
|
|
|
|
2016-03-04 14:30:19 +03:00
|
|
|
static bool arm_cpu_virtio_is_big_endian(CPUState *cs)
|
2015-02-05 16:37:25 +03:00
|
|
|
{
|
|
|
|
ARMCPU *cpu = ARM_CPU(cs);
|
|
|
|
CPUARMState *env = &cpu->env;
|
|
|
|
|
|
|
|
cpu_synchronize_state(cs);
|
2016-03-04 14:30:19 +03:00
|
|
|
return arm_cpu_data_is_big_endian(env);
|
2015-02-05 16:37:25 +03:00
|
|
|
}
|
|
|
|
|
2013-08-20 17:54:28 +04:00
|
|
|
#endif
|
|
|
|
|
2015-06-24 06:57:35 +03:00
|
|
|
static void arm_disas_set_info(CPUState *cpu, disassemble_info *info)
|
|
|
|
{
|
|
|
|
ARMCPU *ac = ARM_CPU(cpu);
|
|
|
|
CPUARMState *env = &ac->env;
|
2017-10-20 00:13:02 +03:00
|
|
|
bool sctlr_b;
|
2015-06-24 06:57:35 +03:00
|
|
|
|
|
|
|
if (is_a64(env)) {
|
2017-09-14 19:51:06 +03:00
|
|
|
info->cap_arch = CS_ARCH_ARM64;
|
2017-11-07 15:19:18 +03:00
|
|
|
info->cap_insn_unit = 4;
|
|
|
|
info->cap_insn_split = 4;
|
2015-06-24 06:57:35 +03:00
|
|
|
} else {
|
2017-09-14 19:51:06 +03:00
|
|
|
int cap_mode;
|
|
|
|
if (env->thumb) {
|
2017-11-07 15:19:18 +03:00
|
|
|
info->cap_insn_unit = 2;
|
|
|
|
info->cap_insn_split = 4;
|
2017-09-14 19:51:06 +03:00
|
|
|
cap_mode = CS_MODE_THUMB;
|
|
|
|
} else {
|
2017-11-07 15:19:18 +03:00
|
|
|
info->cap_insn_unit = 4;
|
|
|
|
info->cap_insn_split = 4;
|
2017-09-14 19:51:06 +03:00
|
|
|
cap_mode = CS_MODE_ARM;
|
|
|
|
}
|
|
|
|
if (arm_feature(env, ARM_FEATURE_V8)) {
|
|
|
|
cap_mode |= CS_MODE_V8;
|
|
|
|
}
|
|
|
|
if (arm_feature(env, ARM_FEATURE_M)) {
|
|
|
|
cap_mode |= CS_MODE_MCLASS;
|
|
|
|
}
|
|
|
|
info->cap_arch = CS_ARCH_ARM;
|
|
|
|
info->cap_mode = cap_mode;
|
2015-06-24 06:57:35 +03:00
|
|
|
}
|
2017-10-20 00:13:02 +03:00
|
|
|
|
|
|
|
sctlr_b = arm_sctlr_b(env);
|
|
|
|
if (bswap_code(sctlr_b)) {
|
2022-03-23 18:57:18 +03:00
|
|
|
#if TARGET_BIG_ENDIAN
|
2015-06-24 06:57:35 +03:00
|
|
|
info->endian = BFD_ENDIAN_LITTLE;
|
|
|
|
#else
|
|
|
|
info->endian = BFD_ENDIAN_BIG;
|
|
|
|
#endif
|
|
|
|
}
|
2017-02-07 21:29:59 +03:00
|
|
|
info->flags &= ~INSN_ARM_BE32;
|
2017-10-20 00:13:02 +03:00
|
|
|
#ifndef CONFIG_USER_ONLY
|
|
|
|
if (sctlr_b) {
|
2017-02-07 21:29:59 +03:00
|
|
|
info->flags |= INSN_ARM_BE32;
|
|
|
|
}
|
2017-10-20 00:13:02 +03:00
|
|
|
#endif
|
2015-06-24 06:57:35 +03:00
|
|
|
}
|
|
|
|
|
2019-07-01 19:26:20 +03:00
|
|
|
#ifdef TARGET_AARCH64
|
|
|
|
|
|
|
|
static void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags)
|
|
|
|
{
|
|
|
|
ARMCPU *cpu = ARM_CPU(cs);
|
|
|
|
CPUARMState *env = &cpu->env;
|
|
|
|
uint32_t psr = pstate_read(env);
|
2023-07-04 16:08:47 +03:00
|
|
|
int i, j;
|
2019-07-01 19:26:20 +03:00
|
|
|
int el = arm_current_el(env);
|
|
|
|
const char *ns_status;
|
2022-07-08 18:14:56 +03:00
|
|
|
bool sve;
|
2019-07-01 19:26:20 +03:00
|
|
|
|
|
|
|
qemu_fprintf(f, " PC=%016" PRIx64 " ", env->pc);
|
|
|
|
for (i = 0; i < 32; i++) {
|
|
|
|
if (i == 31) {
|
|
|
|
qemu_fprintf(f, " SP=%016" PRIx64 "\n", env->xregs[i]);
|
|
|
|
} else {
|
|
|
|
qemu_fprintf(f, "X%02d=%016" PRIx64 "%s", i, env->xregs[i],
|
|
|
|
(i + 2) % 3 ? " " : "\n");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (arm_feature(env, ARM_FEATURE_EL3) && el != 3) {
|
|
|
|
ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
|
|
|
|
} else {
|
|
|
|
ns_status = "";
|
|
|
|
}
|
|
|
|
qemu_fprintf(f, "PSTATE=%08x %c%c%c%c %sEL%d%c",
|
|
|
|
psr,
|
|
|
|
psr & PSTATE_N ? 'N' : '-',
|
|
|
|
psr & PSTATE_Z ? 'Z' : '-',
|
|
|
|
psr & PSTATE_C ? 'C' : '-',
|
|
|
|
psr & PSTATE_V ? 'V' : '-',
|
|
|
|
ns_status,
|
|
|
|
el,
|
|
|
|
psr & PSTATE_SP ? 'h' : 't');
|
|
|
|
|
2022-07-08 18:14:56 +03:00
|
|
|
if (cpu_isar_feature(aa64_sme, cpu)) {
|
|
|
|
qemu_fprintf(f, " SVCR=%08" PRIx64 " %c%c",
|
|
|
|
env->svcr,
|
|
|
|
(FIELD_EX64(env->svcr, SVCR, ZA) ? 'Z' : '-'),
|
|
|
|
(FIELD_EX64(env->svcr, SVCR, SM) ? 'S' : '-'));
|
|
|
|
}
|
2019-07-01 19:26:20 +03:00
|
|
|
if (cpu_isar_feature(aa64_bti, cpu)) {
|
|
|
|
qemu_fprintf(f, " BTYPE=%d", (psr & PSTATE_BTYPE) >> 10);
|
|
|
|
}
|
|
|
|
if (!(flags & CPU_DUMP_FPU)) {
|
|
|
|
qemu_fprintf(f, "\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (fp_exception_el(env, el) != 0) {
|
|
|
|
qemu_fprintf(f, " FPU disabled\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
qemu_fprintf(f, " FPCR=%08x FPSR=%08x\n",
|
|
|
|
vfp_get_fpcr(env), vfp_get_fpsr(env));
|
|
|
|
|
2022-07-08 18:14:56 +03:00
|
|
|
if (cpu_isar_feature(aa64_sme, cpu) && FIELD_EX64(env->svcr, SVCR, SM)) {
|
|
|
|
sve = sme_exception_el(env, el) == 0;
|
|
|
|
} else if (cpu_isar_feature(aa64_sve, cpu)) {
|
|
|
|
sve = sve_exception_el(env, el) == 0;
|
|
|
|
} else {
|
|
|
|
sve = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sve) {
|
2023-07-04 16:08:47 +03:00
|
|
|
int zcr_len = sve_vqm1_for_el(env, el);
|
2019-07-01 19:26:20 +03:00
|
|
|
|
|
|
|
for (i = 0; i <= FFR_PRED_NUM; i++) {
|
|
|
|
bool eol;
|
|
|
|
if (i == FFR_PRED_NUM) {
|
|
|
|
qemu_fprintf(f, "FFR=");
|
|
|
|
/* It's last, so end the line. */
|
|
|
|
eol = true;
|
|
|
|
} else {
|
|
|
|
qemu_fprintf(f, "P%02d=", i);
|
|
|
|
switch (zcr_len) {
|
|
|
|
case 0:
|
|
|
|
eol = i % 8 == 7;
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
eol = i % 6 == 5;
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
case 3:
|
|
|
|
eol = i % 3 == 2;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
/* More than one quadword per predicate. */
|
|
|
|
eol = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for (j = zcr_len / 4; j >= 0; j--) {
|
|
|
|
int digits;
|
|
|
|
if (j * 4 + 4 <= zcr_len + 1) {
|
|
|
|
digits = 16;
|
|
|
|
} else {
|
|
|
|
digits = (zcr_len % 4 + 1) * 4;
|
|
|
|
}
|
|
|
|
qemu_fprintf(f, "%0*" PRIx64 "%s", digits,
|
|
|
|
env->vfp.pregs[i].p[j],
|
|
|
|
j ? ":" : eol ? "\n" : " ");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-07-04 16:08:47 +03:00
|
|
|
if (zcr_len == 0) {
|
|
|
|
/*
|
|
|
|
* With vl=16, there are only 37 columns per register,
|
|
|
|
* so output two registers per line.
|
|
|
|
*/
|
|
|
|
for (i = 0; i < 32; i++) {
|
2019-07-01 19:26:20 +03:00
|
|
|
qemu_fprintf(f, "Z%02d=%016" PRIx64 ":%016" PRIx64 "%s",
|
|
|
|
i, env->vfp.zregs[i].d[1],
|
|
|
|
env->vfp.zregs[i].d[0], i & 1 ? "\n" : " ");
|
2023-07-04 16:08:47 +03:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
for (i = 0; i < 32; i++) {
|
|
|
|
qemu_fprintf(f, "Z%02d=", i);
|
2019-07-01 19:26:20 +03:00
|
|
|
for (j = zcr_len; j >= 0; j--) {
|
|
|
|
qemu_fprintf(f, "%016" PRIx64 ":%016" PRIx64 "%s",
|
|
|
|
env->vfp.zregs[i].d[j * 2 + 1],
|
2023-07-04 16:08:47 +03:00
|
|
|
env->vfp.zregs[i].d[j * 2 + 0],
|
|
|
|
j ? ":" : "\n");
|
2019-07-01 19:26:20 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
for (i = 0; i < 32; i++) {
|
|
|
|
uint64_t *q = aa64_vfp_qreg(env, i);
|
|
|
|
qemu_fprintf(f, "Q%02d=%016" PRIx64 ":%016" PRIx64 "%s",
|
|
|
|
i, q[1], q[0], (i & 1 ? "\n" : " "));
|
|
|
|
}
|
|
|
|
}
|
2023-07-04 16:08:48 +03:00
|
|
|
|
|
|
|
if (cpu_isar_feature(aa64_sme, cpu) &&
|
|
|
|
FIELD_EX64(env->svcr, SVCR, ZA) &&
|
|
|
|
sme_exception_el(env, el) == 0) {
|
|
|
|
int zcr_len = sve_vqm1_for_el_sm(env, el, true);
|
|
|
|
int svl = (zcr_len + 1) * 16;
|
|
|
|
int svl_lg10 = svl < 100 ? 2 : 3;
|
|
|
|
|
|
|
|
for (i = 0; i < svl; i++) {
|
|
|
|
qemu_fprintf(f, "ZA[%0*d]=", svl_lg10, i);
|
|
|
|
for (j = zcr_len; j >= 0; --j) {
|
|
|
|
qemu_fprintf(f, "%016" PRIx64 ":%016" PRIx64 "%c",
|
|
|
|
env->zarray[i].d[2 * j + 1],
|
|
|
|
env->zarray[i].d[2 * j],
|
|
|
|
j ? ':' : '\n');
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-07-01 19:26:20 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
static inline void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags)
|
|
|
|
{
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
static void arm_cpu_dump_state(CPUState *cs, FILE *f, int flags)
|
|
|
|
{
|
|
|
|
ARMCPU *cpu = ARM_CPU(cs);
|
|
|
|
CPUARMState *env = &cpu->env;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (is_a64(env)) {
|
|
|
|
aarch64_cpu_dump_state(cs, f, flags);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < 16; i++) {
|
|
|
|
qemu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
|
|
|
|
if ((i % 4) == 3) {
|
|
|
|
qemu_fprintf(f, "\n");
|
|
|
|
} else {
|
|
|
|
qemu_fprintf(f, " ");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (arm_feature(env, ARM_FEATURE_M)) {
|
|
|
|
uint32_t xpsr = xpsr_read(env);
|
|
|
|
const char *mode;
|
|
|
|
const char *ns_status = "";
|
|
|
|
|
|
|
|
if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
|
|
|
|
ns_status = env->v7m.secure ? "S " : "NS ";
|
|
|
|
}
|
|
|
|
|
|
|
|
if (xpsr & XPSR_EXCP) {
|
|
|
|
mode = "handler";
|
|
|
|
} else {
|
|
|
|
if (env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_NPRIV_MASK) {
|
|
|
|
mode = "unpriv-thread";
|
|
|
|
} else {
|
|
|
|
mode = "priv-thread";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
qemu_fprintf(f, "XPSR=%08x %c%c%c%c %c %s%s\n",
|
|
|
|
xpsr,
|
|
|
|
xpsr & XPSR_N ? 'N' : '-',
|
|
|
|
xpsr & XPSR_Z ? 'Z' : '-',
|
|
|
|
xpsr & XPSR_C ? 'C' : '-',
|
|
|
|
xpsr & XPSR_V ? 'V' : '-',
|
|
|
|
xpsr & XPSR_T ? 'T' : 'A',
|
|
|
|
ns_status,
|
|
|
|
mode);
|
|
|
|
} else {
|
|
|
|
uint32_t psr = cpsr_read(env);
|
|
|
|
const char *ns_status = "";
|
|
|
|
|
|
|
|
if (arm_feature(env, ARM_FEATURE_EL3) &&
|
|
|
|
(psr & CPSR_M) != ARM_CPU_MODE_MON) {
|
|
|
|
ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
|
|
|
|
}
|
|
|
|
|
|
|
|
qemu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%s%d\n",
|
|
|
|
psr,
|
|
|
|
psr & CPSR_N ? 'N' : '-',
|
|
|
|
psr & CPSR_Z ? 'Z' : '-',
|
|
|
|
psr & CPSR_C ? 'C' : '-',
|
|
|
|
psr & CPSR_V ? 'V' : '-',
|
|
|
|
psr & CPSR_T ? 'T' : 'A',
|
|
|
|
ns_status,
|
|
|
|
aarch32_mode_name(psr), (psr & 0x10) ? 32 : 26);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (flags & CPU_DUMP_FPU) {
|
|
|
|
int numvfpregs = 0;
|
2020-02-14 21:15:31 +03:00
|
|
|
if (cpu_isar_feature(aa32_simd_r32, cpu)) {
|
|
|
|
numvfpregs = 32;
|
2020-02-25 01:22:16 +03:00
|
|
|
} else if (cpu_isar_feature(aa32_vfp_simd, cpu)) {
|
2020-02-14 21:15:31 +03:00
|
|
|
numvfpregs = 16;
|
2019-07-01 19:26:20 +03:00
|
|
|
}
|
|
|
|
for (i = 0; i < numvfpregs; i++) {
|
|
|
|
uint64_t v = *aa32_vfp_dreg(env, i);
|
|
|
|
qemu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
|
|
|
|
i * 2, (uint32_t)v,
|
|
|
|
i * 2 + 1, (uint32_t)(v >> 32),
|
|
|
|
i, v);
|
|
|
|
}
|
|
|
|
qemu_fprintf(f, "FPSCR: %08x\n", vfp_get_fpscr(env));
|
2021-08-13 19:11:47 +03:00
|
|
|
if (cpu_isar_feature(aa32_mve, cpu)) {
|
|
|
|
qemu_fprintf(f, "VPR: %08x\n", env->v7m.vpr);
|
|
|
|
}
|
2019-07-01 19:26:20 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-03 15:56:56 +03:00
|
|
|
uint64_t arm_cpu_mp_affinity(int idx, uint8_t clustersz)
|
|
|
|
{
|
|
|
|
uint32_t Aff1 = idx / clustersz;
|
|
|
|
uint32_t Aff0 = idx % clustersz;
|
|
|
|
return (Aff1 << ARM_AFF1_SHIFT) | Aff0;
|
|
|
|
}
|
|
|
|
|
2012-04-20 21:58:31 +04:00
|
|
|
static void arm_cpu_initfn(Object *obj)
|
|
|
|
{
|
|
|
|
ARMCPU *cpu = ARM_CPU(obj);
|
|
|
|
|
2022-05-01 08:49:53 +03:00
|
|
|
cpu->cp_regs = g_hash_table_new_full(g_direct_hash, g_direct_equal,
|
2022-05-01 08:49:54 +03:00
|
|
|
NULL, g_free);
|
2013-01-19 10:37:45 +04:00
|
|
|
|
2018-04-26 13:04:39 +03:00
|
|
|
QLIST_INIT(&cpu->pre_el_change_hooks);
|
2018-04-26 13:04:39 +03:00
|
|
|
QLIST_INIT(&cpu->el_change_hooks);
|
|
|
|
|
2021-07-23 23:33:44 +03:00
|
|
|
#ifdef CONFIG_USER_ONLY
|
|
|
|
# ifdef TARGET_AARCH64
|
|
|
|
/*
|
2022-06-20 20:52:01 +03:00
|
|
|
* The linux kernel defaults to 512-bit for SVE, and 256-bit for SME.
|
|
|
|
* These values were chosen to fit within the default signal frame.
|
|
|
|
* See documentation for /proc/sys/abi/{sve,sme}_default_vector_length,
|
|
|
|
* and our corresponding cpu property.
|
2021-07-23 23:33:44 +03:00
|
|
|
*/
|
|
|
|
cpu->sve_default_vq = 4;
|
2022-06-20 20:52:01 +03:00
|
|
|
cpu->sme_default_vq = 2;
|
2021-07-23 23:33:44 +03:00
|
|
|
# endif
|
|
|
|
#else
|
2013-08-20 17:54:28 +04:00
|
|
|
/* Our inbound IRQ and FIQ lines */
|
|
|
|
if (kvm_enabled()) {
|
2014-09-29 21:48:51 +04:00
|
|
|
/* VIRQ and VFIQ are unused with KVM but we add them to maintain
|
|
|
|
* the same interface as non-KVM CPUs.
|
|
|
|
*/
|
|
|
|
qdev_init_gpio_in(DEVICE(cpu), arm_cpu_kvm_set_irq, 4);
|
2013-08-20 17:54:28 +04:00
|
|
|
} else {
|
2014-09-29 21:48:51 +04:00
|
|
|
qdev_init_gpio_in(DEVICE(cpu), arm_cpu_set_irq, 4);
|
2013-08-20 17:54:28 +04:00
|
|
|
}
|
2013-08-20 17:54:31 +04:00
|
|
|
|
|
|
|
qdev_init_gpio_out(DEVICE(cpu), cpu->gt_timer_outputs,
|
|
|
|
ARRAY_SIZE(cpu->gt_timer_outputs));
|
2017-01-20 14:15:09 +03:00
|
|
|
|
|
|
|
qdev_init_gpio_out_named(DEVICE(cpu), &cpu->gicv3_maintenance_interrupt,
|
|
|
|
"gicv3-maintenance-interrupt", 1);
|
2017-09-04 17:21:53 +03:00
|
|
|
qdev_init_gpio_out_named(DEVICE(cpu), &cpu->pmu_interrupt,
|
|
|
|
"pmu-interrupt", 1);
|
2013-08-20 17:54:28 +04:00
|
|
|
#endif
|
|
|
|
|
2013-11-22 21:17:12 +04:00
|
|
|
/* DTB consumers generally don't in fact care what the 'compatible'
|
|
|
|
* string is, so always provide some string and trust that a hypothetical
|
|
|
|
* picky DTB consumer will also provide a helpful error message.
|
|
|
|
*/
|
|
|
|
cpu->dtb_compatible = "qemu,unknown";
|
2022-02-13 06:57:53 +03:00
|
|
|
cpu->psci_version = QEMU_PSCI_VERSION_0_1; /* By default assume PSCI v0.1 */
|
2013-11-22 21:17:16 +04:00
|
|
|
cpu->kvm_target = QEMU_KVM_ARM_TARGET_NONE;
|
2013-11-22 21:17:12 +04:00
|
|
|
|
2021-09-16 18:54:01 +03:00
|
|
|
if (tcg_enabled() || hvf_enabled()) {
|
2022-02-13 06:57:53 +03:00
|
|
|
/* TCG and HVF implement PSCI 1.1 */
|
|
|
|
cpu->psci_version = QEMU_PSCI_VERSION_1_1;
|
2013-01-19 10:37:45 +04:00
|
|
|
}
|
2012-06-20 15:57:06 +04:00
|
|
|
}
|
|
|
|
|
2019-12-20 17:02:59 +03:00
|
|
|
static Property arm_cpu_gt_cntfrq_property =
|
|
|
|
DEFINE_PROP_UINT64("cntfrq", ARMCPU, gt_cntfrq_hz,
|
|
|
|
NANOSECONDS_PER_SECOND / GTIMER_SCALE);
|
|
|
|
|
2013-12-17 23:42:28 +04:00
|
|
|
static Property arm_cpu_reset_cbar_property =
|
2014-04-15 22:18:49 +04:00
|
|
|
DEFINE_PROP_UINT64("reset-cbar", ARMCPU, reset_cbar, 0);
|
2013-12-17 23:42:28 +04:00
|
|
|
|
2013-12-17 23:42:29 +04:00
|
|
|
static Property arm_cpu_reset_hivecs_property =
|
|
|
|
DEFINE_PROP_BOOL("reset-hivecs", ARMCPU, reset_hivecs, false);
|
|
|
|
|
2020-03-05 19:09:17 +03:00
|
|
|
#ifndef CONFIG_USER_ONLY
|
2017-01-20 14:15:10 +03:00
|
|
|
static Property arm_cpu_has_el2_property =
|
|
|
|
DEFINE_PROP_BOOL("has_el2", ARMCPU, has_el2, true);
|
|
|
|
|
2014-12-16 02:09:46 +03:00
|
|
|
static Property arm_cpu_has_el3_property =
|
|
|
|
DEFINE_PROP_BOOL("has_el3", ARMCPU, has_el3, true);
|
2020-03-05 19:09:17 +03:00
|
|
|
#endif
|
2014-12-16 02:09:46 +03:00
|
|
|
|
2017-02-07 21:29:59 +03:00
|
|
|
static Property arm_cpu_cfgend_property =
|
|
|
|
DEFINE_PROP_BOOL("cfgend", ARMCPU, cfgend, false);
|
|
|
|
|
2019-05-17 20:40:43 +03:00
|
|
|
static Property arm_cpu_has_vfp_property =
|
|
|
|
DEFINE_PROP_BOOL("vfp", ARMCPU, has_vfp, true);
|
|
|
|
|
2023-06-07 07:39:43 +03:00
|
|
|
static Property arm_cpu_has_vfp_d32_property =
|
|
|
|
DEFINE_PROP_BOOL("vfp-d32", ARMCPU, has_vfp_d32, true);
|
|
|
|
|
2019-05-17 20:40:43 +03:00
|
|
|
static Property arm_cpu_has_neon_property =
|
|
|
|
DEFINE_PROP_BOOL("neon", ARMCPU, has_neon, true);
|
|
|
|
|
2019-05-17 20:40:44 +03:00
|
|
|
static Property arm_cpu_has_dsp_property =
|
|
|
|
DEFINE_PROP_BOOL("dsp", ARMCPU, has_dsp, true);
|
|
|
|
|
2015-06-15 20:06:10 +03:00
|
|
|
static Property arm_cpu_has_mpu_property =
|
|
|
|
DEFINE_PROP_BOOL("has-mpu", ARMCPU, has_mpu, true);
|
|
|
|
|
2017-07-17 15:36:07 +03:00
|
|
|
/* This is like DEFINE_PROP_UINT32 but it doesn't set the default value,
|
|
|
|
* because the CPU initfn will have already set cpu->pmsav7_dregion to
|
|
|
|
* the right value for that particular CPU type, and we don't want
|
|
|
|
* to override that with an incorrect constant value.
|
|
|
|
*/
|
2015-06-19 16:17:44 +03:00
|
|
|
static Property arm_cpu_pmsav7_dregion_property =
|
2017-07-17 15:36:07 +03:00
|
|
|
DEFINE_PROP_UNSIGNED_NODEFAULT("pmsav7-dregion", ARMCPU,
|
|
|
|
pmsav7_dregion,
|
|
|
|
qdev_prop_uint32, uint32_t);
|
2015-06-19 16:17:44 +03:00
|
|
|
|
2019-08-02 15:25:27 +03:00
|
|
|
static bool arm_get_pmu(Object *obj, Error **errp)
|
|
|
|
{
|
|
|
|
ARMCPU *cpu = ARM_CPU(obj);
|
|
|
|
|
|
|
|
return cpu->has_pmu;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void arm_set_pmu(Object *obj, bool value, Error **errp)
|
|
|
|
{
|
|
|
|
ARMCPU *cpu = ARM_CPU(obj);
|
|
|
|
|
|
|
|
if (value) {
|
target/arm: Check supported KVM features globally (not per vCPU)
Since commit d70c996df23f, when enabling the PMU we get:
$ qemu-system-aarch64 -cpu host,pmu=on -M virt,accel=kvm,gic-version=3
Segmentation fault (core dumped)
Thread 1 "qemu-system-aar" received signal SIGSEGV, Segmentation fault.
0x0000aaaaaae356d0 in kvm_ioctl (s=0x0, type=44547) at accel/kvm/kvm-all.c:2588
2588 ret = ioctl(s->fd, type, arg);
(gdb) bt
#0 0x0000aaaaaae356d0 in kvm_ioctl (s=0x0, type=44547) at accel/kvm/kvm-all.c:2588
#1 0x0000aaaaaae31568 in kvm_check_extension (s=0x0, extension=126) at accel/kvm/kvm-all.c:916
#2 0x0000aaaaaafce254 in kvm_arm_pmu_supported (cpu=0xaaaaac214ab0) at target/arm/kvm.c:213
#3 0x0000aaaaaafc0f94 in arm_set_pmu (obj=0xaaaaac214ab0, value=true, errp=0xffffffffe438) at target/arm/cpu.c:1111
#4 0x0000aaaaab5533ac in property_set_bool (obj=0xaaaaac214ab0, v=0xaaaaac223a80, name=0xaaaaac11a970 "pmu", opaque=0xaaaaac222730, errp=0xffffffffe438) at qom/object.c:2170
#5 0x0000aaaaab5512f0 in object_property_set (obj=0xaaaaac214ab0, v=0xaaaaac223a80, name=0xaaaaac11a970 "pmu", errp=0xffffffffe438) at qom/object.c:1328
#6 0x0000aaaaab551e10 in object_property_parse (obj=0xaaaaac214ab0, string=0xaaaaac11b4c0 "on", name=0xaaaaac11a970 "pmu", errp=0xffffffffe438) at qom/object.c:1561
#7 0x0000aaaaab54ee8c in object_apply_global_props (obj=0xaaaaac214ab0, props=0xaaaaac018e20, errp=0xaaaaabd6fd88 <error_fatal>) at qom/object.c:407
#8 0x0000aaaaab1dd5a4 in qdev_prop_set_globals (dev=0xaaaaac214ab0) at hw/core/qdev-properties.c:1218
#9 0x0000aaaaab1d9fac in device_post_init (obj=0xaaaaac214ab0) at hw/core/qdev.c:1050
...
#15 0x0000aaaaab54f310 in object_initialize_with_type (obj=0xaaaaac214ab0, size=52208, type=0xaaaaabe237f0) at qom/object.c:512
#16 0x0000aaaaab54fa24 in object_new_with_type (type=0xaaaaabe237f0) at qom/object.c:687
#17 0x0000aaaaab54fa80 in object_new (typename=0xaaaaabe23970 "host-arm-cpu") at qom/object.c:702
#18 0x0000aaaaaaf04a74 in machvirt_init (machine=0xaaaaac0a8550) at hw/arm/virt.c:1770
#19 0x0000aaaaab1e8720 in machine_run_board_init (machine=0xaaaaac0a8550) at hw/core/machine.c:1138
#20 0x0000aaaaaaf95394 in qemu_init (argc=5, argv=0xffffffffea58, envp=0xffffffffea88) at softmmu/vl.c:4348
#21 0x0000aaaaaada3f74 in main (argc=<optimized out>, argv=<optimized out>, envp=<optimized out>) at softmmu/main.c:48
This is because in frame #2, cpu->kvm_state is still NULL
(the vCPU is not yet realized).
KVM has a hard requirement of all cores supporting the same
feature set. We only need to check if the accelerator supports
a feature, not each vCPU individually.
Fix by removing the 'CPUState *cpu' argument from the
kvm_arm_<FEATURE>_supported() functions.
Fixes: d70c996df23f ('Use CPUState::kvm_state in kvm_arm_pmu_supported')
Reported-by: Haibo Xu <haibo.xu@linaro.org>
Reviewed-by: Andrew Jones <drjones@redhat.com>
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2020-06-23 12:06:22 +03:00
|
|
|
if (kvm_enabled() && !kvm_arm_pmu_supported()) {
|
2019-08-02 15:25:27 +03:00
|
|
|
error_setg(errp, "'pmu' feature not supported by KVM on this host");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
set_feature(&cpu->env, ARM_FEATURE_PMU);
|
|
|
|
} else {
|
|
|
|
unset_feature(&cpu->env, ARM_FEATURE_PMU);
|
|
|
|
}
|
|
|
|
cpu->has_pmu = value;
|
|
|
|
}
|
|
|
|
|
2019-12-20 17:02:59 +03:00
|
|
|
unsigned int gt_cntfrq_period_ns(ARMCPU *cpu)
|
|
|
|
{
|
2019-12-20 17:02:59 +03:00
|
|
|
/*
|
|
|
|
* The exact approach to calculating guest ticks is:
|
|
|
|
*
|
|
|
|
* muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), cpu->gt_cntfrq_hz,
|
|
|
|
* NANOSECONDS_PER_SECOND);
|
|
|
|
*
|
|
|
|
* We don't do that. Rather we intentionally use integer division
|
|
|
|
* truncation below and in the caller for the conversion of host monotonic
|
|
|
|
* time to guest ticks to provide the exact inverse for the semantics of
|
|
|
|
* the QEMUTimer scale factor. QEMUTimer's scale facter is an integer, so
|
|
|
|
* it loses precision when representing frequencies where
|
|
|
|
* `(NANOSECONDS_PER_SECOND % cpu->gt_cntfrq) > 0` holds. Failing to
|
|
|
|
* provide an exact inverse leads to scheduling timers with negative
|
|
|
|
* periods, which in turn leads to sticky behaviour in the guest.
|
|
|
|
*
|
|
|
|
* Finally, CNTFRQ is effectively capped at 1GHz to ensure our scale factor
|
|
|
|
* cannot become zero.
|
|
|
|
*/
|
2019-12-20 17:02:59 +03:00
|
|
|
return NANOSECONDS_PER_SECOND > cpu->gt_cntfrq_hz ?
|
|
|
|
NANOSECONDS_PER_SECOND / cpu->gt_cntfrq_hz : 1;
|
|
|
|
}
|
|
|
|
|
target/arm: Do all "ARM_FEATURE_X implies Y" checks in post_init
Where architecturally one ARM_FEATURE_X flag implies another
ARM_FEATURE_Y, we allow the CPU init function to only set X, and then
set Y for it. Currently we do this in two places -- we set a few
flags in arm_cpu_post_init() because we need them to decide which
properties to create on the CPU object, and then we do the rest in
arm_cpu_realizefn(). However, this is fragile, because it's easy to
add a new property and not notice that this means that an X-implies-Y
check now has to move from realize to post-init.
As a specific example, the pmsav7-dregion property is conditional
on ARM_FEATURE_PMSA && ARM_FEATURE_V7, which means it won't appear
on the Cortex-M33 and -M55, because they set ARM_FEATURE_V8 and
rely on V8-implies-V7, which doesn't happen until the realizefn.
Move all of these X-implies-Y checks into a new function, which
we call at the top of arm_cpu_post_init(), so the feature bits
are available at that point.
This does now give us the reverse issue, that if there's a feature
bit which is enabled or disabled by the setting of a property then
then X-implies-Y features that are dependent on that property need to
be in realize, not in this new function. But the only one of those
is the "EL3 implies VBAR" which is already in the right place, so
putting things this way round seems better to me.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 20230724174335.2150499-2-peter.maydell@linaro.org
2023-07-24 20:43:33 +03:00
|
|
|
static void arm_cpu_propagate_feature_implications(ARMCPU *cpu)
|
|
|
|
{
|
|
|
|
CPUARMState *env = &cpu->env;
|
|
|
|
bool no_aa32 = false;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Some features automatically imply others: set the feature
|
|
|
|
* bits explicitly for these cases.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (arm_feature(env, ARM_FEATURE_M)) {
|
|
|
|
set_feature(env, ARM_FEATURE_PMSA);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (arm_feature(env, ARM_FEATURE_V8)) {
|
|
|
|
if (arm_feature(env, ARM_FEATURE_M)) {
|
|
|
|
set_feature(env, ARM_FEATURE_V7);
|
|
|
|
} else {
|
|
|
|
set_feature(env, ARM_FEATURE_V7VE);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* There exist AArch64 cpus without AArch32 support. When KVM
|
|
|
|
* queries ID_ISAR0_EL1 on such a host, the value is UNKNOWN.
|
|
|
|
* Similarly, we cannot check ID_AA64PFR0 without AArch64 support.
|
|
|
|
* As a general principle, we also do not make ID register
|
|
|
|
* consistency checks anywhere unless using TCG, because only
|
|
|
|
* for TCG would a consistency-check failure be a QEMU bug.
|
|
|
|
*/
|
|
|
|
if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
|
|
|
|
no_aa32 = !cpu_isar_feature(aa64_aa32, cpu);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (arm_feature(env, ARM_FEATURE_V7VE)) {
|
|
|
|
/*
|
|
|
|
* v7 Virtualization Extensions. In real hardware this implies
|
|
|
|
* EL2 and also the presence of the Security Extensions.
|
|
|
|
* For QEMU, for backwards-compatibility we implement some
|
|
|
|
* CPUs or CPU configs which have no actual EL2 or EL3 but do
|
|
|
|
* include the various other features that V7VE implies.
|
|
|
|
* Presence of EL2 itself is ARM_FEATURE_EL2, and of the
|
|
|
|
* Security Extensions is ARM_FEATURE_EL3.
|
|
|
|
*/
|
|
|
|
assert(!tcg_enabled() || no_aa32 ||
|
|
|
|
cpu_isar_feature(aa32_arm_div, cpu));
|
|
|
|
set_feature(env, ARM_FEATURE_LPAE);
|
|
|
|
set_feature(env, ARM_FEATURE_V7);
|
|
|
|
}
|
|
|
|
if (arm_feature(env, ARM_FEATURE_V7)) {
|
|
|
|
set_feature(env, ARM_FEATURE_VAPA);
|
|
|
|
set_feature(env, ARM_FEATURE_THUMB2);
|
|
|
|
set_feature(env, ARM_FEATURE_MPIDR);
|
|
|
|
if (!arm_feature(env, ARM_FEATURE_M)) {
|
|
|
|
set_feature(env, ARM_FEATURE_V6K);
|
|
|
|
} else {
|
|
|
|
set_feature(env, ARM_FEATURE_V6);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Always define VBAR for V7 CPUs even if it doesn't exist in
|
|
|
|
* non-EL3 configs. This is needed by some legacy boards.
|
|
|
|
*/
|
|
|
|
set_feature(env, ARM_FEATURE_VBAR);
|
|
|
|
}
|
|
|
|
if (arm_feature(env, ARM_FEATURE_V6K)) {
|
|
|
|
set_feature(env, ARM_FEATURE_V6);
|
|
|
|
set_feature(env, ARM_FEATURE_MVFR);
|
|
|
|
}
|
|
|
|
if (arm_feature(env, ARM_FEATURE_V6)) {
|
|
|
|
set_feature(env, ARM_FEATURE_V5);
|
|
|
|
if (!arm_feature(env, ARM_FEATURE_M)) {
|
|
|
|
assert(!tcg_enabled() || no_aa32 ||
|
|
|
|
cpu_isar_feature(aa32_jazelle, cpu));
|
|
|
|
set_feature(env, ARM_FEATURE_AUXCR);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (arm_feature(env, ARM_FEATURE_V5)) {
|
|
|
|
set_feature(env, ARM_FEATURE_V4T);
|
|
|
|
}
|
|
|
|
if (arm_feature(env, ARM_FEATURE_LPAE)) {
|
|
|
|
set_feature(env, ARM_FEATURE_V7MP);
|
|
|
|
}
|
|
|
|
if (arm_feature(env, ARM_FEATURE_CBAR_RO)) {
|
|
|
|
set_feature(env, ARM_FEATURE_CBAR);
|
|
|
|
}
|
|
|
|
if (arm_feature(env, ARM_FEATURE_THUMB2) &&
|
|
|
|
!arm_feature(env, ARM_FEATURE_M)) {
|
|
|
|
set_feature(env, ARM_FEATURE_THUMB_DSP);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-27 11:55:59 +03:00
|
|
|
void arm_cpu_post_init(Object *obj)
|
2013-12-17 23:42:28 +04:00
|
|
|
{
|
|
|
|
ARMCPU *cpu = ARM_CPU(obj);
|
|
|
|
|
target/arm: Do all "ARM_FEATURE_X implies Y" checks in post_init
Where architecturally one ARM_FEATURE_X flag implies another
ARM_FEATURE_Y, we allow the CPU init function to only set X, and then
set Y for it. Currently we do this in two places -- we set a few
flags in arm_cpu_post_init() because we need them to decide which
properties to create on the CPU object, and then we do the rest in
arm_cpu_realizefn(). However, this is fragile, because it's easy to
add a new property and not notice that this means that an X-implies-Y
check now has to move from realize to post-init.
As a specific example, the pmsav7-dregion property is conditional
on ARM_FEATURE_PMSA && ARM_FEATURE_V7, which means it won't appear
on the Cortex-M33 and -M55, because they set ARM_FEATURE_V8 and
rely on V8-implies-V7, which doesn't happen until the realizefn.
Move all of these X-implies-Y checks into a new function, which
we call at the top of arm_cpu_post_init(), so the feature bits
are available at that point.
This does now give us the reverse issue, that if there's a feature
bit which is enabled or disabled by the setting of a property then
then X-implies-Y features that are dependent on that property need to
be in realize, not in this new function. But the only one of those
is the "EL3 implies VBAR" which is already in the right place, so
putting things this way round seems better to me.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 20230724174335.2150499-2-peter.maydell@linaro.org
2023-07-24 20:43:33 +03:00
|
|
|
/*
|
|
|
|
* Some features imply others. Figure this out now, because we
|
|
|
|
* are going to look at the feature bits in deciding which
|
|
|
|
* properties to add.
|
2017-06-02 13:51:48 +03:00
|
|
|
*/
|
target/arm: Do all "ARM_FEATURE_X implies Y" checks in post_init
Where architecturally one ARM_FEATURE_X flag implies another
ARM_FEATURE_Y, we allow the CPU init function to only set X, and then
set Y for it. Currently we do this in two places -- we set a few
flags in arm_cpu_post_init() because we need them to decide which
properties to create on the CPU object, and then we do the rest in
arm_cpu_realizefn(). However, this is fragile, because it's easy to
add a new property and not notice that this means that an X-implies-Y
check now has to move from realize to post-init.
As a specific example, the pmsav7-dregion property is conditional
on ARM_FEATURE_PMSA && ARM_FEATURE_V7, which means it won't appear
on the Cortex-M33 and -M55, because they set ARM_FEATURE_V8 and
rely on V8-implies-V7, which doesn't happen until the realizefn.
Move all of these X-implies-Y checks into a new function, which
we call at the top of arm_cpu_post_init(), so the feature bits
are available at that point.
This does now give us the reverse issue, that if there's a feature
bit which is enabled or disabled by the setting of a property then
then X-implies-Y features that are dependent on that property need to
be in realize, not in this new function. But the only one of those
is the "EL3 implies VBAR" which is already in the right place, so
putting things this way round seems better to me.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 20230724174335.2150499-2-peter.maydell@linaro.org
2023-07-24 20:43:33 +03:00
|
|
|
arm_cpu_propagate_feature_implications(cpu);
|
2017-06-02 13:51:48 +03:00
|
|
|
|
2014-04-15 22:18:49 +04:00
|
|
|
if (arm_feature(&cpu->env, ARM_FEATURE_CBAR) ||
|
|
|
|
arm_feature(&cpu->env, ARM_FEATURE_CBAR_RO)) {
|
2020-01-10 18:30:16 +03:00
|
|
|
qdev_property_add_static(DEVICE(obj), &arm_cpu_reset_cbar_property);
|
2013-12-17 23:42:28 +04:00
|
|
|
}
|
2013-12-17 23:42:29 +04:00
|
|
|
|
|
|
|
if (!arm_feature(&cpu->env, ARM_FEATURE_M)) {
|
2020-01-10 18:30:16 +03:00
|
|
|
qdev_property_add_static(DEVICE(obj), &arm_cpu_reset_hivecs_property);
|
2013-12-17 23:42:29 +04:00
|
|
|
}
|
2014-04-15 22:18:48 +04:00
|
|
|
|
2022-12-06 13:24:59 +03:00
|
|
|
if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
|
2022-03-16 19:46:41 +03:00
|
|
|
object_property_add_uint64_ptr(obj, "rvbar",
|
|
|
|
&cpu->rvbar_prop,
|
|
|
|
OBJ_PROP_FLAG_READWRITE);
|
2014-04-15 22:18:48 +04:00
|
|
|
}
|
2014-12-16 02:09:46 +03:00
|
|
|
|
2020-03-05 19:09:17 +03:00
|
|
|
#ifndef CONFIG_USER_ONLY
|
2014-12-16 02:09:46 +03:00
|
|
|
if (arm_feature(&cpu->env, ARM_FEATURE_EL3)) {
|
|
|
|
/* Add the has_el3 state CPU property only if EL3 is allowed. This will
|
|
|
|
* prevent "has_el3" from existing on CPUs which cannot support EL3.
|
|
|
|
*/
|
2020-01-10 18:30:16 +03:00
|
|
|
qdev_property_add_static(DEVICE(obj), &arm_cpu_has_el3_property);
|
2016-01-21 17:15:06 +03:00
|
|
|
|
|
|
|
object_property_add_link(obj, "secure-memory",
|
|
|
|
TYPE_MEMORY_REGION,
|
|
|
|
(Object **)&cpu->secure_memory,
|
|
|
|
qdev_prop_allow_set_link_before_realize,
|
qom: Drop parameter @errp of object_property_add() & friends
The only way object_property_add() can fail is when a property with
the same name already exists. Since our property names are all
hardcoded, failure is a programming error, and the appropriate way to
handle it is passing &error_abort.
Same for its variants, except for object_property_add_child(), which
additionally fails when the child already has a parent. Parentage is
also under program control, so this is a programming error, too.
We have a bit over 500 callers. Almost half of them pass
&error_abort, slightly fewer ignore errors, one test case handles
errors, and the remaining few callers pass them to their own callers.
The previous few commits demonstrated once again that ignoring
programming errors is a bad idea.
Of the few ones that pass on errors, several violate the Error API.
The Error ** argument must be NULL, &error_abort, &error_fatal, or a
pointer to a variable containing NULL. Passing an argument of the
latter kind twice without clearing it in between is wrong: if the
first call sets an error, it no longer points to NULL for the second
call. ich9_pm_add_properties(), sparc32_ledma_realize(),
sparc32_dma_realize(), xilinx_axidma_realize(), xilinx_enet_realize()
are wrong that way.
When the one appropriate choice of argument is &error_abort, letting
users pick the argument is a bad idea.
Drop parameter @errp and assert the preconditions instead.
There's one exception to "duplicate property name is a programming
error": the way object_property_add() implements the magic (and
undocumented) "automatic arrayification". Don't drop @errp there.
Instead, rename object_property_add() to object_property_try_add(),
and add the obvious wrapper object_property_add().
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <20200505152926.18877-15-armbru@redhat.com>
[Two semantic rebase conflicts resolved]
2020-05-05 18:29:22 +03:00
|
|
|
OBJ_PROP_LINK_STRONG);
|
2014-12-16 02:09:46 +03:00
|
|
|
}
|
2015-06-15 20:06:10 +03:00
|
|
|
|
2017-01-20 14:15:10 +03:00
|
|
|
if (arm_feature(&cpu->env, ARM_FEATURE_EL2)) {
|
2020-01-10 18:30:16 +03:00
|
|
|
qdev_property_add_static(DEVICE(obj), &arm_cpu_has_el2_property);
|
2017-01-20 14:15:10 +03:00
|
|
|
}
|
2020-03-05 19:09:17 +03:00
|
|
|
#endif
|
2017-01-20 14:15:10 +03:00
|
|
|
|
2016-10-28 16:12:31 +03:00
|
|
|
if (arm_feature(&cpu->env, ARM_FEATURE_PMU)) {
|
2019-08-02 15:25:27 +03:00
|
|
|
cpu->has_pmu = true;
|
qom: Drop parameter @errp of object_property_add() & friends
The only way object_property_add() can fail is when a property with
the same name already exists. Since our property names are all
hardcoded, failure is a programming error, and the appropriate way to
handle it is passing &error_abort.
Same for its variants, except for object_property_add_child(), which
additionally fails when the child already has a parent. Parentage is
also under program control, so this is a programming error, too.
We have a bit over 500 callers. Almost half of them pass
&error_abort, slightly fewer ignore errors, one test case handles
errors, and the remaining few callers pass them to their own callers.
The previous few commits demonstrated once again that ignoring
programming errors is a bad idea.
Of the few ones that pass on errors, several violate the Error API.
The Error ** argument must be NULL, &error_abort, &error_fatal, or a
pointer to a variable containing NULL. Passing an argument of the
latter kind twice without clearing it in between is wrong: if the
first call sets an error, it no longer points to NULL for the second
call. ich9_pm_add_properties(), sparc32_ledma_realize(),
sparc32_dma_realize(), xilinx_axidma_realize(), xilinx_enet_realize()
are wrong that way.
When the one appropriate choice of argument is &error_abort, letting
users pick the argument is a bad idea.
Drop parameter @errp and assert the preconditions instead.
There's one exception to "duplicate property name is a programming
error": the way object_property_add() implements the magic (and
undocumented) "automatic arrayification". Don't drop @errp there.
Instead, rename object_property_add() to object_property_try_add(),
and add the obvious wrapper object_property_add().
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <20200505152926.18877-15-armbru@redhat.com>
[Two semantic rebase conflicts resolved]
2020-05-05 18:29:22 +03:00
|
|
|
object_property_add_bool(obj, "pmu", arm_get_pmu, arm_set_pmu);
|
2016-10-28 16:12:31 +03:00
|
|
|
}
|
|
|
|
|
2019-05-17 20:40:43 +03:00
|
|
|
/*
|
|
|
|
* Allow user to turn off VFP and Neon support, but only for TCG --
|
|
|
|
* KVM does not currently allow us to lie to the guest about its
|
|
|
|
* ID/feature registers, so the guest always sees what the host has.
|
|
|
|
*/
|
2023-06-19 17:02:16 +03:00
|
|
|
if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
|
|
|
|
if (cpu_isar_feature(aa64_fp_simd, cpu)) {
|
|
|
|
cpu->has_vfp = true;
|
|
|
|
cpu->has_vfp_d32 = true;
|
|
|
|
if (tcg_enabled() || qtest_enabled()) {
|
|
|
|
qdev_property_add_static(DEVICE(obj),
|
|
|
|
&arm_cpu_has_vfp_property);
|
|
|
|
}
|
2019-05-17 20:40:43 +03:00
|
|
|
}
|
2023-06-19 17:02:16 +03:00
|
|
|
} else if (cpu_isar_feature(aa32_vfp, cpu)) {
|
|
|
|
cpu->has_vfp = true;
|
|
|
|
if (cpu_isar_feature(aa32_simd_r32, cpu)) {
|
|
|
|
cpu->has_vfp_d32 = true;
|
2023-06-07 07:39:43 +03:00
|
|
|
/*
|
|
|
|
* The permitted values of the SIMDReg bits [3:0] on
|
|
|
|
* Armv8-A are either 0b0000 and 0b0010. On such CPUs,
|
|
|
|
* make sure that has_vfp_d32 can not be set to false.
|
|
|
|
*/
|
2023-06-19 17:02:16 +03:00
|
|
|
if ((tcg_enabled() || qtest_enabled())
|
|
|
|
&& !(arm_feature(&cpu->env, ARM_FEATURE_V8)
|
|
|
|
&& !arm_feature(&cpu->env, ARM_FEATURE_M))) {
|
2023-06-07 07:39:43 +03:00
|
|
|
qdev_property_add_static(DEVICE(obj),
|
|
|
|
&arm_cpu_has_vfp_d32_property);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-17 20:40:43 +03:00
|
|
|
if (arm_feature(&cpu->env, ARM_FEATURE_NEON)) {
|
|
|
|
cpu->has_neon = true;
|
|
|
|
if (!kvm_enabled()) {
|
2020-01-10 18:30:16 +03:00
|
|
|
qdev_property_add_static(DEVICE(obj), &arm_cpu_has_neon_property);
|
2019-05-17 20:40:43 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-17 20:40:44 +03:00
|
|
|
if (arm_feature(&cpu->env, ARM_FEATURE_M) &&
|
|
|
|
arm_feature(&cpu->env, ARM_FEATURE_THUMB_DSP)) {
|
2020-01-10 18:30:16 +03:00
|
|
|
qdev_property_add_static(DEVICE(obj), &arm_cpu_has_dsp_property);
|
2019-05-17 20:40:44 +03:00
|
|
|
}
|
|
|
|
|
2017-06-02 13:51:47 +03:00
|
|
|
if (arm_feature(&cpu->env, ARM_FEATURE_PMSA)) {
|
2020-01-10 18:30:16 +03:00
|
|
|
qdev_property_add_static(DEVICE(obj), &arm_cpu_has_mpu_property);
|
2015-06-19 16:17:44 +03:00
|
|
|
if (arm_feature(&cpu->env, ARM_FEATURE_V7)) {
|
|
|
|
qdev_property_add_static(DEVICE(obj),
|
2020-01-10 18:30:16 +03:00
|
|
|
&arm_cpu_pmsav7_dregion_property);
|
2015-06-19 16:17:44 +03:00
|
|
|
}
|
2015-06-15 20:06:10 +03:00
|
|
|
}
|
|
|
|
|
2018-03-02 13:45:36 +03:00
|
|
|
if (arm_feature(&cpu->env, ARM_FEATURE_M_SECURITY)) {
|
|
|
|
object_property_add_link(obj, "idau", TYPE_IDAU_INTERFACE, &cpu->idau,
|
|
|
|
qdev_prop_allow_set_link_before_realize,
|
qom: Drop parameter @errp of object_property_add() & friends
The only way object_property_add() can fail is when a property with
the same name already exists. Since our property names are all
hardcoded, failure is a programming error, and the appropriate way to
handle it is passing &error_abort.
Same for its variants, except for object_property_add_child(), which
additionally fails when the child already has a parent. Parentage is
also under program control, so this is a programming error, too.
We have a bit over 500 callers. Almost half of them pass
&error_abort, slightly fewer ignore errors, one test case handles
errors, and the remaining few callers pass them to their own callers.
The previous few commits demonstrated once again that ignoring
programming errors is a bad idea.
Of the few ones that pass on errors, several violate the Error API.
The Error ** argument must be NULL, &error_abort, &error_fatal, or a
pointer to a variable containing NULL. Passing an argument of the
latter kind twice without clearing it in between is wrong: if the
first call sets an error, it no longer points to NULL for the second
call. ich9_pm_add_properties(), sparc32_ledma_realize(),
sparc32_dma_realize(), xilinx_axidma_realize(), xilinx_enet_realize()
are wrong that way.
When the one appropriate choice of argument is &error_abort, letting
users pick the argument is a bad idea.
Drop parameter @errp and assert the preconditions instead.
There's one exception to "duplicate property name is a programming
error": the way object_property_add() implements the magic (and
undocumented) "automatic arrayification". Don't drop @errp there.
Instead, rename object_property_add() to object_property_try_add(),
and add the obvious wrapper object_property_add().
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <20200505152926.18877-15-armbru@redhat.com>
[Two semantic rebase conflicts resolved]
2020-05-05 18:29:22 +03:00
|
|
|
OBJ_PROP_LINK_STRONG);
|
2019-02-28 13:55:15 +03:00
|
|
|
/*
|
|
|
|
* M profile: initial value of the Secure VTOR. We can't just use
|
|
|
|
* a simple DEFINE_PROP_UINT32 for this because we want to permit
|
|
|
|
* the property to be set after realize.
|
|
|
|
*/
|
2020-02-04 16:16:01 +03:00
|
|
|
object_property_add_uint32_ptr(obj, "init-svtor",
|
|
|
|
&cpu->init_svtor,
|
qom: Drop parameter @errp of object_property_add() & friends
The only way object_property_add() can fail is when a property with
the same name already exists. Since our property names are all
hardcoded, failure is a programming error, and the appropriate way to
handle it is passing &error_abort.
Same for its variants, except for object_property_add_child(), which
additionally fails when the child already has a parent. Parentage is
also under program control, so this is a programming error, too.
We have a bit over 500 callers. Almost half of them pass
&error_abort, slightly fewer ignore errors, one test case handles
errors, and the remaining few callers pass them to their own callers.
The previous few commits demonstrated once again that ignoring
programming errors is a bad idea.
Of the few ones that pass on errors, several violate the Error API.
The Error ** argument must be NULL, &error_abort, &error_fatal, or a
pointer to a variable containing NULL. Passing an argument of the
latter kind twice without clearing it in between is wrong: if the
first call sets an error, it no longer points to NULL for the second
call. ich9_pm_add_properties(), sparc32_ledma_realize(),
sparc32_dma_realize(), xilinx_axidma_realize(), xilinx_enet_realize()
are wrong that way.
When the one appropriate choice of argument is &error_abort, letting
users pick the argument is a bad idea.
Drop parameter @errp and assert the preconditions instead.
There's one exception to "duplicate property name is a programming
error": the way object_property_add() implements the magic (and
undocumented) "automatic arrayification". Don't drop @errp there.
Instead, rename object_property_add() to object_property_try_add(),
and add the obvious wrapper object_property_add().
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <20200505152926.18877-15-armbru@redhat.com>
[Two semantic rebase conflicts resolved]
2020-05-05 18:29:22 +03:00
|
|
|
OBJ_PROP_FLAG_READWRITE);
|
2018-03-02 13:45:36 +03:00
|
|
|
}
|
2021-05-20 18:28:40 +03:00
|
|
|
if (arm_feature(&cpu->env, ARM_FEATURE_M)) {
|
|
|
|
/*
|
|
|
|
* Initial value of the NS VTOR (for cores without the Security
|
|
|
|
* extension, this is the only VTOR)
|
|
|
|
*/
|
|
|
|
object_property_add_uint32_ptr(obj, "init-nsvtor",
|
|
|
|
&cpu->init_nsvtor,
|
|
|
|
OBJ_PROP_FLAG_READWRITE);
|
|
|
|
}
|
2018-03-02 13:45:36 +03:00
|
|
|
|
2022-01-27 18:46:24 +03:00
|
|
|
/* Not DEFINE_PROP_UINT32: we want this to be settable after realize */
|
|
|
|
object_property_add_uint32_ptr(obj, "psci-conduit",
|
|
|
|
&cpu->psci_conduit,
|
|
|
|
OBJ_PROP_FLAG_READWRITE);
|
|
|
|
|
2020-01-10 18:30:16 +03:00
|
|
|
qdev_property_add_static(DEVICE(obj), &arm_cpu_cfgend_property);
|
2019-12-20 17:02:59 +03:00
|
|
|
|
|
|
|
if (arm_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER)) {
|
2020-01-10 18:30:16 +03:00
|
|
|
qdev_property_add_static(DEVICE(cpu), &arm_cpu_gt_cntfrq_property);
|
2019-12-20 17:02:59 +03:00
|
|
|
}
|
2020-06-16 12:32:29 +03:00
|
|
|
|
|
|
|
if (kvm_enabled()) {
|
2023-12-19 20:57:44 +03:00
|
|
|
kvm_arm_add_vcpu_properties(cpu);
|
2020-06-16 12:32:29 +03:00
|
|
|
}
|
2020-06-26 06:31:41 +03:00
|
|
|
|
|
|
|
#ifndef CONFIG_USER_ONLY
|
|
|
|
if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64) &&
|
|
|
|
cpu_isar_feature(aa64_mte, cpu)) {
|
|
|
|
object_property_add_link(obj, "tag-memory",
|
|
|
|
TYPE_MEMORY_REGION,
|
|
|
|
(Object **)&cpu->tag_memory,
|
|
|
|
qdev_prop_allow_set_link_before_realize,
|
|
|
|
OBJ_PROP_LINK_STRONG);
|
|
|
|
|
|
|
|
if (arm_feature(&cpu->env, ARM_FEATURE_EL3)) {
|
|
|
|
object_property_add_link(obj, "secure-tag-memory",
|
|
|
|
TYPE_MEMORY_REGION,
|
|
|
|
(Object **)&cpu->secure_tag_memory,
|
|
|
|
qdev_prop_allow_set_link_before_realize,
|
|
|
|
OBJ_PROP_LINK_STRONG);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
2013-12-17 23:42:28 +04:00
|
|
|
}
|
|
|
|
|
2012-06-20 15:57:06 +04:00
|
|
|
static void arm_cpu_finalizefn(Object *obj)
|
|
|
|
{
|
|
|
|
ARMCPU *cpu = ARM_CPU(obj);
|
2018-04-26 13:04:39 +03:00
|
|
|
ARMELChangeHook *hook, *next;
|
|
|
|
|
2012-06-20 15:57:06 +04:00
|
|
|
g_hash_table_destroy(cpu->cp_regs);
|
2018-04-26 13:04:39 +03:00
|
|
|
|
2018-04-26 13:04:39 +03:00
|
|
|
QLIST_FOREACH_SAFE(hook, &cpu->pre_el_change_hooks, node, next) {
|
|
|
|
QLIST_REMOVE(hook, node);
|
|
|
|
g_free(hook);
|
|
|
|
}
|
2018-04-26 13:04:39 +03:00
|
|
|
QLIST_FOREACH_SAFE(hook, &cpu->el_change_hooks, node, next) {
|
|
|
|
QLIST_REMOVE(hook, node);
|
|
|
|
g_free(hook);
|
|
|
|
}
|
2019-02-01 17:55:45 +03:00
|
|
|
#ifndef CONFIG_USER_ONLY
|
|
|
|
if (cpu->pmu_timer) {
|
|
|
|
timer_free(cpu->pmu_timer);
|
|
|
|
}
|
|
|
|
#endif
|
2012-04-20 21:58:31 +04:00
|
|
|
}
|
|
|
|
|
target/arm/cpu64: max cpu: Introduce sve<N> properties
Introduce cpu properties to give fine control over SVE vector lengths.
We introduce a property for each valid length up to the current
maximum supported, which is 2048-bits. The properties are named, e.g.
sve128, sve256, sve384, sve512, ..., where the number is the number of
bits. See the updates to docs/arm-cpu-features.rst for a description
of the semantics and for example uses.
Note, as sve-max-vq is still present and we'd like to be able to
support qmp_query_cpu_model_expansion with guests launched with e.g.
-cpu max,sve-max-vq=8 on their command lines, then we do allow
sve-max-vq and sve<N> properties to be provided at the same time, but
this is not recommended, and is why sve-max-vq is not mentioned in the
document. If sve-max-vq is provided then it enables all lengths smaller
than and including the max and disables all lengths larger. It also has
the side-effect that no larger lengths may be enabled and that the max
itself cannot be disabled. Smaller non-power-of-two lengths may,
however, be disabled, e.g. -cpu max,sve-max-vq=4,sve384=off provides a
guest the vector lengths 128, 256, and 512 bits.
This patch has been co-authored with Richard Henderson, who reworked
the target/arm/cpu64.c changes in order to push all the validation and
auto-enabling/disabling steps into the finalizer, resulting in a nice
LOC reduction.
Signed-off-by: Andrew Jones <drjones@redhat.com>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Tested-by: Masayoshi Mizuma <m.mizuma@jp.fujitsu.com>
Reviewed-by: Beata Michalska <beata.michalska@linaro.org>
Message-id: 20191031142734.8590-5-drjones@redhat.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2019-10-31 17:27:29 +03:00
|
|
|
void arm_cpu_finalize_features(ARMCPU *cpu, Error **errp)
|
|
|
|
{
|
|
|
|
Error *local_err = NULL;
|
|
|
|
|
2022-06-20 20:51:59 +03:00
|
|
|
#ifdef TARGET_AARCH64
|
target/arm/cpu64: max cpu: Introduce sve<N> properties
Introduce cpu properties to give fine control over SVE vector lengths.
We introduce a property for each valid length up to the current
maximum supported, which is 2048-bits. The properties are named, e.g.
sve128, sve256, sve384, sve512, ..., where the number is the number of
bits. See the updates to docs/arm-cpu-features.rst for a description
of the semantics and for example uses.
Note, as sve-max-vq is still present and we'd like to be able to
support qmp_query_cpu_model_expansion with guests launched with e.g.
-cpu max,sve-max-vq=8 on their command lines, then we do allow
sve-max-vq and sve<N> properties to be provided at the same time, but
this is not recommended, and is why sve-max-vq is not mentioned in the
document. If sve-max-vq is provided then it enables all lengths smaller
than and including the max and disables all lengths larger. It also has
the side-effect that no larger lengths may be enabled and that the max
itself cannot be disabled. Smaller non-power-of-two lengths may,
however, be disabled, e.g. -cpu max,sve-max-vq=4,sve384=off provides a
guest the vector lengths 128, 256, and 512 bits.
This patch has been co-authored with Richard Henderson, who reworked
the target/arm/cpu64.c changes in order to push all the validation and
auto-enabling/disabling steps into the finalizer, resulting in a nice
LOC reduction.
Signed-off-by: Andrew Jones <drjones@redhat.com>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Tested-by: Masayoshi Mizuma <m.mizuma@jp.fujitsu.com>
Reviewed-by: Beata Michalska <beata.michalska@linaro.org>
Message-id: 20191031142734.8590-5-drjones@redhat.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2019-10-31 17:27:29 +03:00
|
|
|
if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
|
|
|
|
arm_cpu_sve_finalize(cpu, &local_err);
|
2020-10-01 09:17:18 +03:00
|
|
|
if (local_err != NULL) {
|
|
|
|
error_propagate(errp, local_err);
|
|
|
|
return;
|
|
|
|
}
|
2021-01-12 02:57:39 +03:00
|
|
|
|
target/arm: Disable SME if SVE is disabled
There is no architectural requirement that SME implies SVE, but
our implementation currently assumes it. (FEAT_SME_FA64 does
imply SVE.) So if you try to run a CPU with eg "-cpu max,sve=off"
you quickly run into an assert when the guest tries to write to
SMCR_EL1:
#6 0x00007ffff4b38e96 in __GI___assert_fail
(assertion=0x5555566e69cb "sm", file=0x5555566e5b24 "../../target/arm/helper.c", line=6865, function=0x5555566e82f0 <__PRETTY_FUNCTION__.31> "sve_vqm1_for_el_sm") at ./assert/assert.c:101
#7 0x0000555555ee33aa in sve_vqm1_for_el_sm (env=0x555557d291f0, el=2, sm=false) at ../../target/arm/helper.c:6865
#8 0x0000555555ee3407 in sve_vqm1_for_el (env=0x555557d291f0, el=2) at ../../target/arm/helper.c:6871
#9 0x0000555555ee3724 in smcr_write (env=0x555557d291f0, ri=0x555557da23b0, value=2147483663) at ../../target/arm/helper.c:6995
#10 0x0000555555fd1dba in helper_set_cp_reg64 (env=0x555557d291f0, rip=0x555557da23b0, value=2147483663) at ../../target/arm/tcg/op_helper.c:839
#11 0x00007fff60056781 in code_gen_buffer ()
Avoid this unsupported and slightly odd combination by
disabling SME when SVE is not present.
Cc: qemu-stable@nongnu.org
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/2005
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 20231127173318.674758-1-peter.maydell@linaro.org
2023-12-04 16:34:16 +03:00
|
|
|
/*
|
|
|
|
* FEAT_SME is not architecturally dependent on FEAT_SVE (unless
|
|
|
|
* FEAT_SME_FA64 is present). However our implementation currently
|
|
|
|
* assumes it, so if the user asked for sve=off then turn off SME also.
|
|
|
|
* (KVM doesn't currently support SME at all.)
|
|
|
|
*/
|
|
|
|
if (cpu_isar_feature(aa64_sme, cpu) && !cpu_isar_feature(aa64_sve, cpu)) {
|
|
|
|
object_property_set_bool(OBJECT(cpu), "sme", false, &error_abort);
|
|
|
|
}
|
|
|
|
|
2022-06-20 20:52:01 +03:00
|
|
|
arm_cpu_sme_finalize(cpu, &local_err);
|
|
|
|
if (local_err != NULL) {
|
|
|
|
error_propagate(errp, local_err);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2022-01-07 18:01:54 +03:00
|
|
|
arm_cpu_pauth_finalize(cpu, &local_err);
|
|
|
|
if (local_err != NULL) {
|
|
|
|
error_propagate(errp, local_err);
|
|
|
|
return;
|
2021-01-12 02:57:39 +03:00
|
|
|
}
|
2022-03-02 00:59:57 +03:00
|
|
|
|
|
|
|
arm_cpu_lpa2_finalize(cpu, &local_err);
|
|
|
|
if (local_err != NULL) {
|
|
|
|
error_propagate(errp, local_err);
|
|
|
|
return;
|
|
|
|
}
|
2020-10-01 09:17:18 +03:00
|
|
|
}
|
2022-06-20 20:51:59 +03:00
|
|
|
#endif
|
2020-10-01 09:17:18 +03:00
|
|
|
|
|
|
|
if (kvm_enabled()) {
|
|
|
|
kvm_arm_steal_time_finalize(cpu, &local_err);
|
target/arm/cpu64: max cpu: Introduce sve<N> properties
Introduce cpu properties to give fine control over SVE vector lengths.
We introduce a property for each valid length up to the current
maximum supported, which is 2048-bits. The properties are named, e.g.
sve128, sve256, sve384, sve512, ..., where the number is the number of
bits. See the updates to docs/arm-cpu-features.rst for a description
of the semantics and for example uses.
Note, as sve-max-vq is still present and we'd like to be able to
support qmp_query_cpu_model_expansion with guests launched with e.g.
-cpu max,sve-max-vq=8 on their command lines, then we do allow
sve-max-vq and sve<N> properties to be provided at the same time, but
this is not recommended, and is why sve-max-vq is not mentioned in the
document. If sve-max-vq is provided then it enables all lengths smaller
than and including the max and disables all lengths larger. It also has
the side-effect that no larger lengths may be enabled and that the max
itself cannot be disabled. Smaller non-power-of-two lengths may,
however, be disabled, e.g. -cpu max,sve-max-vq=4,sve384=off provides a
guest the vector lengths 128, 256, and 512 bits.
This patch has been co-authored with Richard Henderson, who reworked
the target/arm/cpu64.c changes in order to push all the validation and
auto-enabling/disabling steps into the finalizer, resulting in a nice
LOC reduction.
Signed-off-by: Andrew Jones <drjones@redhat.com>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Tested-by: Masayoshi Mizuma <m.mizuma@jp.fujitsu.com>
Reviewed-by: Beata Michalska <beata.michalska@linaro.org>
Message-id: 20191031142734.8590-5-drjones@redhat.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2019-10-31 17:27:29 +03:00
|
|
|
if (local_err != NULL) {
|
|
|
|
error_propagate(errp, local_err);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-01-05 13:18:18 +04:00
|
|
|
static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
|
2012-04-20 21:58:31 +04:00
|
|
|
{
|
2013-07-27 04:53:25 +04:00
|
|
|
CPUState *cs = CPU(dev);
|
2013-01-05 13:18:18 +04:00
|
|
|
ARMCPU *cpu = ARM_CPU(dev);
|
|
|
|
ARMCPUClass *acc = ARM_CPU_GET_CLASS(dev);
|
2012-04-20 21:58:31 +04:00
|
|
|
CPUARMState *env = &cpu->env;
|
2016-10-24 18:26:50 +03:00
|
|
|
int pagebits;
|
2016-10-20 14:26:03 +03:00
|
|
|
Error *local_err = NULL;
|
|
|
|
|
2023-02-27 16:51:38 +03:00
|
|
|
/* Use pc-relative instructions in system-mode */
|
|
|
|
#ifndef CONFIG_USER_ONLY
|
|
|
|
cs->tcg_cflags |= CF_PCREL;
|
|
|
|
#endif
|
|
|
|
|
2018-03-09 20:09:44 +03:00
|
|
|
/* If we needed to query the host kernel for the CPU features
|
|
|
|
* then it's possible that might have failed in the initfn, but
|
|
|
|
* this is the first point where we can report it.
|
|
|
|
*/
|
|
|
|
if (cpu->host_cpu_probe_failed) {
|
2021-09-20 12:21:08 +03:00
|
|
|
if (!kvm_enabled() && !hvf_enabled()) {
|
|
|
|
error_setg(errp, "The 'host' CPU type can only be used with KVM or HVF");
|
2018-03-09 20:09:44 +03:00
|
|
|
} else {
|
|
|
|
error_setg(errp, "Failed to retrieve host CPU features");
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-06-15 16:57:13 +03:00
|
|
|
#ifndef CONFIG_USER_ONLY
|
|
|
|
/* The NVIC and M-profile CPU are two halves of a single piece of
|
|
|
|
* hardware; trying to use one without the other is a command line
|
|
|
|
* error and will result in segfaults if not caught here.
|
|
|
|
*/
|
|
|
|
if (arm_feature(env, ARM_FEATURE_M)) {
|
|
|
|
if (!env->nvic) {
|
|
|
|
error_setg(errp, "This board cannot be used with Cortex-M CPUs");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (env->nvic) {
|
|
|
|
error_setg(errp, "This board can only be used with Cortex-M CPUs");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
2018-12-14 16:30:55 +03:00
|
|
|
|
2022-06-24 17:42:56 +03:00
|
|
|
if (!tcg_enabled() && !qtest_enabled()) {
|
2021-08-16 16:58:41 +03:00
|
|
|
/*
|
2022-06-24 17:42:56 +03:00
|
|
|
* We assume that no accelerator except TCG (and the "not really an
|
|
|
|
* accelerator" qtest) can handle these features, because Arm hardware
|
|
|
|
* virtualization can't virtualize them.
|
|
|
|
*
|
2021-08-16 16:58:41 +03:00
|
|
|
* Catch all the cases which might cause us to create more than one
|
|
|
|
* address space for the CPU (otherwise we will assert() later in
|
|
|
|
* cpu_address_space_init()).
|
|
|
|
*/
|
|
|
|
if (arm_feature(env, ARM_FEATURE_M)) {
|
|
|
|
error_setg(errp,
|
2022-06-24 17:42:56 +03:00
|
|
|
"Cannot enable %s when using an M-profile guest CPU",
|
|
|
|
current_accel_name());
|
2021-08-16 16:58:41 +03:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (cpu->has_el3) {
|
|
|
|
error_setg(errp,
|
2022-06-24 17:42:56 +03:00
|
|
|
"Cannot enable %s when guest CPU has EL3 enabled",
|
|
|
|
current_accel_name());
|
2021-08-16 16:58:41 +03:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (cpu->tag_memory) {
|
|
|
|
error_setg(errp,
|
2023-05-19 17:58:08 +03:00
|
|
|
"Cannot enable %s when guest CPUs has MTE enabled",
|
2022-06-24 17:42:56 +03:00
|
|
|
current_accel_name());
|
2021-08-16 16:58:41 +03:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-20 17:02:59 +03:00
|
|
|
{
|
|
|
|
uint64_t scale;
|
|
|
|
|
|
|
|
if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) {
|
|
|
|
if (!cpu->gt_cntfrq_hz) {
|
|
|
|
error_setg(errp, "Invalid CNTFRQ: %"PRId64"Hz",
|
|
|
|
cpu->gt_cntfrq_hz);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
scale = gt_cntfrq_period_ns(cpu);
|
|
|
|
} else {
|
|
|
|
scale = GTIMER_SCALE;
|
|
|
|
}
|
|
|
|
|
|
|
|
cpu->gt_timer[GTIMER_PHYS] = timer_new(QEMU_CLOCK_VIRTUAL, scale,
|
|
|
|
arm_gt_ptimer_cb, cpu);
|
|
|
|
cpu->gt_timer[GTIMER_VIRT] = timer_new(QEMU_CLOCK_VIRTUAL, scale,
|
|
|
|
arm_gt_vtimer_cb, cpu);
|
|
|
|
cpu->gt_timer[GTIMER_HYP] = timer_new(QEMU_CLOCK_VIRTUAL, scale,
|
|
|
|
arm_gt_htimer_cb, cpu);
|
|
|
|
cpu->gt_timer[GTIMER_SEC] = timer_new(QEMU_CLOCK_VIRTUAL, scale,
|
|
|
|
arm_gt_stimer_cb, cpu);
|
2020-02-07 17:04:25 +03:00
|
|
|
cpu->gt_timer[GTIMER_HYPVIRT] = timer_new(QEMU_CLOCK_VIRTUAL, scale,
|
|
|
|
arm_gt_hvtimer_cb, cpu);
|
2019-12-20 17:02:59 +03:00
|
|
|
}
|
2018-06-15 16:57:13 +03:00
|
|
|
#endif
|
|
|
|
|
2016-10-20 14:26:03 +03:00
|
|
|
cpu_exec_realizefn(cs, &local_err);
|
|
|
|
if (local_err != NULL) {
|
|
|
|
error_propagate(errp, local_err);
|
|
|
|
return;
|
|
|
|
}
|
2013-01-05 13:18:18 +04:00
|
|
|
|
target/arm/cpu64: max cpu: Introduce sve<N> properties
Introduce cpu properties to give fine control over SVE vector lengths.
We introduce a property for each valid length up to the current
maximum supported, which is 2048-bits. The properties are named, e.g.
sve128, sve256, sve384, sve512, ..., where the number is the number of
bits. See the updates to docs/arm-cpu-features.rst for a description
of the semantics and for example uses.
Note, as sve-max-vq is still present and we'd like to be able to
support qmp_query_cpu_model_expansion with guests launched with e.g.
-cpu max,sve-max-vq=8 on their command lines, then we do allow
sve-max-vq and sve<N> properties to be provided at the same time, but
this is not recommended, and is why sve-max-vq is not mentioned in the
document. If sve-max-vq is provided then it enables all lengths smaller
than and including the max and disables all lengths larger. It also has
the side-effect that no larger lengths may be enabled and that the max
itself cannot be disabled. Smaller non-power-of-two lengths may,
however, be disabled, e.g. -cpu max,sve-max-vq=4,sve384=off provides a
guest the vector lengths 128, 256, and 512 bits.
This patch has been co-authored with Richard Henderson, who reworked
the target/arm/cpu64.c changes in order to push all the validation and
auto-enabling/disabling steps into the finalizer, resulting in a nice
LOC reduction.
Signed-off-by: Andrew Jones <drjones@redhat.com>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Tested-by: Masayoshi Mizuma <m.mizuma@jp.fujitsu.com>
Reviewed-by: Beata Michalska <beata.michalska@linaro.org>
Message-id: 20191031142734.8590-5-drjones@redhat.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2019-10-31 17:27:29 +03:00
|
|
|
arm_cpu_finalize_features(cpu, &local_err);
|
|
|
|
if (local_err != NULL) {
|
|
|
|
error_propagate(errp, local_err);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2023-07-04 16:08:48 +03:00
|
|
|
#ifdef CONFIG_USER_ONLY
|
|
|
|
/*
|
|
|
|
* User mode relies on IC IVAU instructions to catch modification of
|
|
|
|
* dual-mapped code.
|
|
|
|
*
|
|
|
|
* Clear CTR_EL0.DIC to ensure that software that honors these flags uses
|
|
|
|
* IC IVAU even if the emulated processor does not normally require it.
|
|
|
|
*/
|
|
|
|
cpu->ctr = FIELD_DP64(cpu->ctr, CTR_EL0, DIC, 0);
|
|
|
|
#endif
|
|
|
|
|
2019-05-17 20:40:43 +03:00
|
|
|
if (arm_feature(env, ARM_FEATURE_AARCH64) &&
|
|
|
|
cpu->has_vfp != cpu->has_neon) {
|
|
|
|
/*
|
|
|
|
* This is an architectural requirement for AArch64; AArch32 is
|
|
|
|
* more flexible and permits VFP-no-Neon and Neon-no-VFP.
|
|
|
|
*/
|
|
|
|
error_setg(errp,
|
|
|
|
"AArch64 CPUs must have both VFP and Neon or neither");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2023-06-07 07:39:43 +03:00
|
|
|
if (cpu->has_vfp_d32 != cpu->has_neon) {
|
|
|
|
error_setg(errp, "ARM CPUs must have both VFP-D32 and Neon or neither");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!cpu->has_vfp_d32) {
|
|
|
|
uint32_t u;
|
|
|
|
|
|
|
|
u = cpu->isar.mvfr0;
|
|
|
|
u = FIELD_DP32(u, MVFR0, SIMDREG, 1); /* 16 registers */
|
|
|
|
cpu->isar.mvfr0 = u;
|
|
|
|
}
|
|
|
|
|
2019-05-17 20:40:43 +03:00
|
|
|
if (!cpu->has_vfp) {
|
|
|
|
uint64_t t;
|
|
|
|
uint32_t u;
|
|
|
|
|
|
|
|
t = cpu->isar.id_aa64isar1;
|
|
|
|
t = FIELD_DP64(t, ID_AA64ISAR1, JSCVT, 0);
|
|
|
|
cpu->isar.id_aa64isar1 = t;
|
|
|
|
|
|
|
|
t = cpu->isar.id_aa64pfr0;
|
|
|
|
t = FIELD_DP64(t, ID_AA64PFR0, FP, 0xf);
|
|
|
|
cpu->isar.id_aa64pfr0 = t;
|
|
|
|
|
|
|
|
u = cpu->isar.id_isar6;
|
|
|
|
u = FIELD_DP32(u, ID_ISAR6, JSCVT, 0);
|
2021-05-26 01:58:17 +03:00
|
|
|
u = FIELD_DP32(u, ID_ISAR6, BF16, 0);
|
2019-05-17 20:40:43 +03:00
|
|
|
cpu->isar.id_isar6 = u;
|
|
|
|
|
|
|
|
u = cpu->isar.mvfr0;
|
|
|
|
u = FIELD_DP32(u, MVFR0, FPSP, 0);
|
|
|
|
u = FIELD_DP32(u, MVFR0, FPDP, 0);
|
|
|
|
u = FIELD_DP32(u, MVFR0, FPDIVIDE, 0);
|
|
|
|
u = FIELD_DP32(u, MVFR0, FPSQRT, 0);
|
|
|
|
u = FIELD_DP32(u, MVFR0, FPROUND, 0);
|
2020-10-19 18:12:59 +03:00
|
|
|
if (!arm_feature(env, ARM_FEATURE_M)) {
|
|
|
|
u = FIELD_DP32(u, MVFR0, FPTRAP, 0);
|
|
|
|
u = FIELD_DP32(u, MVFR0, FPSHVEC, 0);
|
|
|
|
}
|
2019-05-17 20:40:43 +03:00
|
|
|
cpu->isar.mvfr0 = u;
|
|
|
|
|
|
|
|
u = cpu->isar.mvfr1;
|
|
|
|
u = FIELD_DP32(u, MVFR1, FPFTZ, 0);
|
|
|
|
u = FIELD_DP32(u, MVFR1, FPDNAN, 0);
|
|
|
|
u = FIELD_DP32(u, MVFR1, FPHP, 0);
|
2020-10-19 18:12:59 +03:00
|
|
|
if (arm_feature(env, ARM_FEATURE_M)) {
|
|
|
|
u = FIELD_DP32(u, MVFR1, FP16, 0);
|
|
|
|
}
|
2019-05-17 20:40:43 +03:00
|
|
|
cpu->isar.mvfr1 = u;
|
|
|
|
|
|
|
|
u = cpu->isar.mvfr2;
|
|
|
|
u = FIELD_DP32(u, MVFR2, FPMISC, 0);
|
|
|
|
cpu->isar.mvfr2 = u;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!cpu->has_neon) {
|
|
|
|
uint64_t t;
|
|
|
|
uint32_t u;
|
|
|
|
|
|
|
|
unset_feature(env, ARM_FEATURE_NEON);
|
|
|
|
|
|
|
|
t = cpu->isar.id_aa64isar0;
|
2022-04-27 12:01:17 +03:00
|
|
|
t = FIELD_DP64(t, ID_AA64ISAR0, AES, 0);
|
|
|
|
t = FIELD_DP64(t, ID_AA64ISAR0, SHA1, 0);
|
|
|
|
t = FIELD_DP64(t, ID_AA64ISAR0, SHA2, 0);
|
|
|
|
t = FIELD_DP64(t, ID_AA64ISAR0, SHA3, 0);
|
|
|
|
t = FIELD_DP64(t, ID_AA64ISAR0, SM3, 0);
|
|
|
|
t = FIELD_DP64(t, ID_AA64ISAR0, SM4, 0);
|
2019-05-17 20:40:43 +03:00
|
|
|
t = FIELD_DP64(t, ID_AA64ISAR0, DP, 0);
|
|
|
|
cpu->isar.id_aa64isar0 = t;
|
|
|
|
|
|
|
|
t = cpu->isar.id_aa64isar1;
|
|
|
|
t = FIELD_DP64(t, ID_AA64ISAR1, FCMA, 0);
|
2021-05-26 01:58:17 +03:00
|
|
|
t = FIELD_DP64(t, ID_AA64ISAR1, BF16, 0);
|
2021-05-25 04:03:58 +03:00
|
|
|
t = FIELD_DP64(t, ID_AA64ISAR1, I8MM, 0);
|
2019-05-17 20:40:43 +03:00
|
|
|
cpu->isar.id_aa64isar1 = t;
|
|
|
|
|
|
|
|
t = cpu->isar.id_aa64pfr0;
|
|
|
|
t = FIELD_DP64(t, ID_AA64PFR0, ADVSIMD, 0xf);
|
|
|
|
cpu->isar.id_aa64pfr0 = t;
|
|
|
|
|
|
|
|
u = cpu->isar.id_isar5;
|
2022-04-27 12:01:17 +03:00
|
|
|
u = FIELD_DP32(u, ID_ISAR5, AES, 0);
|
|
|
|
u = FIELD_DP32(u, ID_ISAR5, SHA1, 0);
|
|
|
|
u = FIELD_DP32(u, ID_ISAR5, SHA2, 0);
|
2019-05-17 20:40:43 +03:00
|
|
|
u = FIELD_DP32(u, ID_ISAR5, RDM, 0);
|
|
|
|
u = FIELD_DP32(u, ID_ISAR5, VCMA, 0);
|
|
|
|
cpu->isar.id_isar5 = u;
|
|
|
|
|
|
|
|
u = cpu->isar.id_isar6;
|
|
|
|
u = FIELD_DP32(u, ID_ISAR6, DP, 0);
|
|
|
|
u = FIELD_DP32(u, ID_ISAR6, FHM, 0);
|
2021-05-26 01:58:17 +03:00
|
|
|
u = FIELD_DP32(u, ID_ISAR6, BF16, 0);
|
2021-05-25 04:03:58 +03:00
|
|
|
u = FIELD_DP32(u, ID_ISAR6, I8MM, 0);
|
2019-05-17 20:40:43 +03:00
|
|
|
cpu->isar.id_isar6 = u;
|
|
|
|
|
2020-10-19 18:12:59 +03:00
|
|
|
if (!arm_feature(env, ARM_FEATURE_M)) {
|
|
|
|
u = cpu->isar.mvfr1;
|
|
|
|
u = FIELD_DP32(u, MVFR1, SIMDLS, 0);
|
|
|
|
u = FIELD_DP32(u, MVFR1, SIMDINT, 0);
|
|
|
|
u = FIELD_DP32(u, MVFR1, SIMDSP, 0);
|
|
|
|
u = FIELD_DP32(u, MVFR1, SIMDHP, 0);
|
|
|
|
cpu->isar.mvfr1 = u;
|
|
|
|
|
|
|
|
u = cpu->isar.mvfr2;
|
|
|
|
u = FIELD_DP32(u, MVFR2, SIMDMISC, 0);
|
|
|
|
cpu->isar.mvfr2 = u;
|
|
|
|
}
|
2019-05-17 20:40:43 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!cpu->has_neon && !cpu->has_vfp) {
|
|
|
|
uint64_t t;
|
|
|
|
uint32_t u;
|
|
|
|
|
|
|
|
t = cpu->isar.id_aa64isar0;
|
|
|
|
t = FIELD_DP64(t, ID_AA64ISAR0, FHM, 0);
|
|
|
|
cpu->isar.id_aa64isar0 = t;
|
|
|
|
|
|
|
|
t = cpu->isar.id_aa64isar1;
|
|
|
|
t = FIELD_DP64(t, ID_AA64ISAR1, FRINTTS, 0);
|
|
|
|
cpu->isar.id_aa64isar1 = t;
|
|
|
|
|
|
|
|
u = cpu->isar.mvfr0;
|
|
|
|
u = FIELD_DP32(u, MVFR0, SIMDREG, 0);
|
|
|
|
cpu->isar.mvfr0 = u;
|
2020-02-25 01:22:24 +03:00
|
|
|
|
|
|
|
/* Despite the name, this field covers both VFP and Neon */
|
|
|
|
u = cpu->isar.mvfr1;
|
|
|
|
u = FIELD_DP32(u, MVFR1, SIMDFMAC, 0);
|
|
|
|
cpu->isar.mvfr1 = u;
|
2019-05-17 20:40:43 +03:00
|
|
|
}
|
|
|
|
|
2019-05-17 20:40:44 +03:00
|
|
|
if (arm_feature(env, ARM_FEATURE_M) && !cpu->has_dsp) {
|
|
|
|
uint32_t u;
|
|
|
|
|
|
|
|
unset_feature(env, ARM_FEATURE_THUMB_DSP);
|
|
|
|
|
|
|
|
u = cpu->isar.id_isar1;
|
|
|
|
u = FIELD_DP32(u, ID_ISAR1, EXTEND, 1);
|
|
|
|
cpu->isar.id_isar1 = u;
|
|
|
|
|
|
|
|
u = cpu->isar.id_isar2;
|
|
|
|
u = FIELD_DP32(u, ID_ISAR2, MULTU, 1);
|
|
|
|
u = FIELD_DP32(u, ID_ISAR2, MULTS, 1);
|
|
|
|
cpu->isar.id_isar2 = u;
|
|
|
|
|
|
|
|
u = cpu->isar.id_isar3;
|
|
|
|
u = FIELD_DP32(u, ID_ISAR3, SIMD, 1);
|
|
|
|
u = FIELD_DP32(u, ID_ISAR3, SATURATE, 0);
|
|
|
|
cpu->isar.id_isar3 = u;
|
|
|
|
}
|
|
|
|
|
2012-06-20 15:57:09 +04:00
|
|
|
|
2019-04-29 19:36:01 +03:00
|
|
|
/*
|
|
|
|
* We rely on no XScale CPU having VFP so we can use the same bits in the
|
|
|
|
* TB flags field for VECSTRIDE and XSCALE_CPAR.
|
|
|
|
*/
|
2020-02-25 01:22:19 +03:00
|
|
|
assert(arm_feature(&cpu->env, ARM_FEATURE_AARCH64) ||
|
|
|
|
!cpu_isar_feature(aa32_vfp_simd, cpu) ||
|
|
|
|
!arm_feature(env, ARM_FEATURE_XSCALE));
|
2019-04-29 19:36:01 +03:00
|
|
|
|
2016-10-24 18:26:50 +03:00
|
|
|
if (arm_feature(env, ARM_FEATURE_V7) &&
|
|
|
|
!arm_feature(env, ARM_FEATURE_M) &&
|
2017-06-02 13:51:47 +03:00
|
|
|
!arm_feature(env, ARM_FEATURE_PMSA)) {
|
2016-10-24 18:26:50 +03:00
|
|
|
/* v7VMSA drops support for the old ARMv5 tiny pages, so we
|
|
|
|
* can use 4K pages.
|
|
|
|
*/
|
|
|
|
pagebits = 12;
|
|
|
|
} else {
|
|
|
|
/* For CPUs which might have tiny 1K pages, or which have an
|
|
|
|
* MPU and might have small region sizes, stick with 1K pages.
|
|
|
|
*/
|
|
|
|
pagebits = 10;
|
|
|
|
}
|
|
|
|
if (!set_preferred_target_page_bits(pagebits)) {
|
|
|
|
/* This can only ever happen for hotplugging a CPU, or if
|
|
|
|
* the board code incorrectly creates a CPU which it has
|
|
|
|
* promised via minimum_page_size that it will not.
|
|
|
|
*/
|
|
|
|
error_setg(errp, "This CPU requires a smaller page size than the "
|
|
|
|
"system is using");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-10-20 14:26:03 +03:00
|
|
|
/* This cpu-id-to-MPIDR affinity is used only for TCG; KVM will override it.
|
|
|
|
* We don't support setting cluster ID ([16..23]) (known as Aff2
|
|
|
|
* in later ARM ARM versions), or any of the higher affinity level fields,
|
|
|
|
* so these bits always RAZ.
|
|
|
|
*/
|
|
|
|
if (cpu->mp_affinity == ARM64_AFFINITY_INVALID) {
|
2017-05-03 15:56:56 +03:00
|
|
|
cpu->mp_affinity = arm_cpu_mp_affinity(cs->cpu_index,
|
|
|
|
ARM_DEFAULT_CPUS_PER_CLUSTER);
|
2016-10-20 14:26:03 +03:00
|
|
|
}
|
|
|
|
|
2013-12-17 23:42:29 +04:00
|
|
|
if (cpu->reset_hivecs) {
|
|
|
|
cpu->reset_sctlr |= (1 << 13);
|
|
|
|
}
|
|
|
|
|
2017-02-07 21:29:59 +03:00
|
|
|
if (cpu->cfgend) {
|
|
|
|
if (arm_feature(&cpu->env, ARM_FEATURE_V7)) {
|
|
|
|
cpu->reset_sctlr |= SCTLR_EE;
|
|
|
|
} else {
|
|
|
|
cpu->reset_sctlr |= SCTLR_B;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-11-20 00:55:52 +03:00
|
|
|
if (!arm_feature(env, ARM_FEATURE_M) && !cpu->has_el3) {
|
2014-12-16 02:09:46 +03:00
|
|
|
/* If the has_el3 CPU property is disabled then we need to disable the
|
|
|
|
* feature.
|
|
|
|
*/
|
|
|
|
unset_feature(env, ARM_FEATURE_EL3);
|
|
|
|
|
2022-05-06 21:02:28 +03:00
|
|
|
/*
|
|
|
|
* Disable the security extension feature bits in the processor
|
|
|
|
* feature registers as well.
|
2014-12-16 02:09:46 +03:00
|
|
|
*/
|
2022-05-06 21:02:28 +03:00
|
|
|
cpu->isar.id_pfr1 = FIELD_DP32(cpu->isar.id_pfr1, ID_PFR1, SECURITY, 0);
|
2022-05-06 21:02:29 +03:00
|
|
|
cpu->isar.id_dfr0 = FIELD_DP32(cpu->isar.id_dfr0, ID_DFR0, COPSDBG, 0);
|
2022-05-06 21:02:28 +03:00
|
|
|
cpu->isar.id_aa64pfr0 = FIELD_DP64(cpu->isar.id_aa64pfr0,
|
|
|
|
ID_AA64PFR0, EL3, 0);
|
2023-06-23 13:15:43 +03:00
|
|
|
|
|
|
|
/* Disable the realm management extension, which requires EL3. */
|
|
|
|
cpu->isar.id_aa64pfr0 = FIELD_DP64(cpu->isar.id_aa64pfr0,
|
|
|
|
ID_AA64PFR0, RME, 0);
|
2014-12-16 02:09:46 +03:00
|
|
|
}
|
|
|
|
|
2017-01-20 14:15:10 +03:00
|
|
|
if (!cpu->has_el2) {
|
|
|
|
unset_feature(env, ARM_FEATURE_EL2);
|
|
|
|
}
|
|
|
|
|
2017-02-10 20:40:28 +03:00
|
|
|
if (!cpu->has_pmu) {
|
2016-10-28 16:12:31 +03:00
|
|
|
unset_feature(env, ARM_FEATURE_PMU);
|
2019-01-21 13:23:14 +03:00
|
|
|
}
|
|
|
|
if (arm_feature(env, ARM_FEATURE_PMU)) {
|
2019-01-29 14:46:04 +03:00
|
|
|
pmu_init(cpu);
|
2019-01-21 13:23:14 +03:00
|
|
|
|
|
|
|
if (!kvm_enabled()) {
|
|
|
|
arm_register_pre_el_change_hook(cpu, &pmu_pre_el_change, 0);
|
|
|
|
arm_register_el_change_hook(cpu, &pmu_post_el_change, 0);
|
|
|
|
}
|
2019-02-01 17:55:45 +03:00
|
|
|
|
|
|
|
#ifndef CONFIG_USER_ONLY
|
|
|
|
cpu->pmu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, arm_pmu_timer_cb,
|
|
|
|
cpu);
|
|
|
|
#endif
|
2019-01-21 13:23:14 +03:00
|
|
|
} else {
|
2020-02-14 20:51:04 +03:00
|
|
|
cpu->isar.id_aa64dfr0 =
|
|
|
|
FIELD_DP64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, PMUVER, 0);
|
2020-02-14 20:51:03 +03:00
|
|
|
cpu->isar.id_dfr0 = FIELD_DP32(cpu->isar.id_dfr0, ID_DFR0, PERFMON, 0);
|
2019-01-21 13:23:14 +03:00
|
|
|
cpu->pmceid0 = 0;
|
|
|
|
cpu->pmceid1 = 0;
|
2016-10-28 16:12:31 +03:00
|
|
|
}
|
|
|
|
|
2016-02-02 21:20:42 +03:00
|
|
|
if (!arm_feature(env, ARM_FEATURE_EL2)) {
|
2022-05-06 21:02:28 +03:00
|
|
|
/*
|
|
|
|
* Disable the hypervisor feature bits in the processor feature
|
|
|
|
* registers if we don't have EL2.
|
2016-02-02 21:20:42 +03:00
|
|
|
*/
|
2022-05-06 21:02:28 +03:00
|
|
|
cpu->isar.id_aa64pfr0 = FIELD_DP64(cpu->isar.id_aa64pfr0,
|
|
|
|
ID_AA64PFR0, EL2, 0);
|
|
|
|
cpu->isar.id_pfr1 = FIELD_DP32(cpu->isar.id_pfr1,
|
|
|
|
ID_PFR1, VIRTUALIZATION, 0);
|
2016-02-02 21:20:42 +03:00
|
|
|
}
|
|
|
|
|
2023-08-31 11:45:14 +03:00
|
|
|
if (cpu_isar_feature(aa64_mte, cpu)) {
|
|
|
|
/*
|
|
|
|
* The architectural range of GM blocksize is 2-6, however qemu
|
|
|
|
* doesn't support blocksize of 2 (see HELPER(ldgm)).
|
|
|
|
*/
|
|
|
|
if (tcg_enabled()) {
|
|
|
|
assert(cpu->gm_blocksize >= 3 && cpu->gm_blocksize <= 6);
|
|
|
|
}
|
|
|
|
|
2020-07-20 12:25:36 +03:00
|
|
|
#ifndef CONFIG_USER_ONLY
|
|
|
|
/*
|
2023-08-31 11:45:15 +03:00
|
|
|
* If we do not have tag-memory provided by the machine,
|
|
|
|
* reduce MTE support to instructions enabled at EL0.
|
|
|
|
* This matches Cortex-A710 BROADCASTMTE input being LOW.
|
2020-07-20 12:25:36 +03:00
|
|
|
*/
|
2023-08-31 11:45:14 +03:00
|
|
|
if (cpu->tag_memory == NULL) {
|
|
|
|
cpu->isar.id_aa64pfr1 =
|
2023-08-31 11:45:15 +03:00
|
|
|
FIELD_DP64(cpu->isar.id_aa64pfr1, ID_AA64PFR1, MTE, 1);
|
2023-08-31 11:45:14 +03:00
|
|
|
}
|
2020-07-20 12:25:36 +03:00
|
|
|
#endif
|
2023-08-31 11:45:14 +03:00
|
|
|
}
|
2020-07-20 12:25:36 +03:00
|
|
|
|
2022-08-11 16:11:27 +03:00
|
|
|
if (tcg_enabled()) {
|
|
|
|
/*
|
2023-07-04 16:06:46 +03:00
|
|
|
* Don't report some architectural features in the ID registers
|
|
|
|
* where TCG does not yet implement it (not even a minimal
|
|
|
|
* stub version). This avoids guests falling over when they
|
|
|
|
* try to access the non-existent system registers for them.
|
2022-08-11 16:11:27 +03:00
|
|
|
*/
|
2023-07-04 16:06:46 +03:00
|
|
|
/* FEAT_SPE (Statistical Profiling Extension) */
|
2022-08-11 16:11:27 +03:00
|
|
|
cpu->isar.id_aa64dfr0 =
|
|
|
|
FIELD_DP64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, PMSVER, 0);
|
2023-08-31 11:45:16 +03:00
|
|
|
/* FEAT_TRBE (Trace Buffer Extension) */
|
|
|
|
cpu->isar.id_aa64dfr0 =
|
|
|
|
FIELD_DP64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, TRACEBUFFER, 0);
|
2023-07-04 16:06:46 +03:00
|
|
|
/* FEAT_TRF (Self-hosted Trace Extension) */
|
|
|
|
cpu->isar.id_aa64dfr0 =
|
|
|
|
FIELD_DP64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, TRACEFILT, 0);
|
|
|
|
cpu->isar.id_dfr0 =
|
|
|
|
FIELD_DP32(cpu->isar.id_dfr0, ID_DFR0, TRACEFILT, 0);
|
|
|
|
/* Trace Macrocell system register access */
|
|
|
|
cpu->isar.id_aa64dfr0 =
|
|
|
|
FIELD_DP64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, TRACEVER, 0);
|
|
|
|
cpu->isar.id_dfr0 =
|
|
|
|
FIELD_DP32(cpu->isar.id_dfr0, ID_DFR0, COPTRC, 0);
|
|
|
|
/* Memory mapped trace */
|
|
|
|
cpu->isar.id_dfr0 =
|
|
|
|
FIELD_DP32(cpu->isar.id_dfr0, ID_DFR0, MMAPTRC, 0);
|
|
|
|
/* FEAT_AMU (Activity Monitors Extension) */
|
|
|
|
cpu->isar.id_aa64pfr0 =
|
|
|
|
FIELD_DP64(cpu->isar.id_aa64pfr0, ID_AA64PFR0, AMU, 0);
|
|
|
|
cpu->isar.id_pfr0 =
|
|
|
|
FIELD_DP32(cpu->isar.id_pfr0, ID_PFR0, AMU, 0);
|
|
|
|
/* FEAT_MPAM (Memory Partitioning and Monitoring Extension) */
|
|
|
|
cpu->isar.id_aa64pfr0 =
|
|
|
|
FIELD_DP64(cpu->isar.id_aa64pfr0, ID_AA64PFR0, MPAM, 0);
|
2024-01-09 17:43:51 +03:00
|
|
|
/* FEAT_NV2 (Enhanced Nested Virtualization support) */
|
|
|
|
if (FIELD_EX64(cpu->isar.id_aa64mmfr2, ID_AA64MMFR2, NV) > 1) {
|
|
|
|
cpu->isar.id_aa64mmfr2 =
|
|
|
|
FIELD_DP64(cpu->isar.id_aa64mmfr2, ID_AA64MMFR2, NV, 1);
|
|
|
|
}
|
2022-08-11 16:11:27 +03:00
|
|
|
}
|
|
|
|
|
2017-06-02 13:51:47 +03:00
|
|
|
/* MPU can be configured out of a PMSA CPU either by setting has-mpu
|
|
|
|
* to false or by setting pmsav7-dregion to 0.
|
|
|
|
*/
|
2022-12-06 13:25:02 +03:00
|
|
|
if (!cpu->has_mpu || cpu->pmsav7_dregion == 0) {
|
2017-06-02 13:51:47 +03:00
|
|
|
cpu->has_mpu = false;
|
2022-12-06 13:25:02 +03:00
|
|
|
cpu->pmsav7_dregion = 0;
|
|
|
|
cpu->pmsav8r_hdregion = 0;
|
2015-06-15 20:06:10 +03:00
|
|
|
}
|
|
|
|
|
2017-06-02 13:51:47 +03:00
|
|
|
if (arm_feature(env, ARM_FEATURE_PMSA) &&
|
2015-06-19 16:17:44 +03:00
|
|
|
arm_feature(env, ARM_FEATURE_V7)) {
|
|
|
|
uint32_t nr = cpu->pmsav7_dregion;
|
|
|
|
|
|
|
|
if (nr > 0xff) {
|
2015-12-18 18:35:19 +03:00
|
|
|
error_setg(errp, "PMSAv7 MPU #regions invalid %" PRIu32, nr);
|
2015-06-19 16:17:44 +03:00
|
|
|
return;
|
|
|
|
}
|
2015-06-19 16:17:44 +03:00
|
|
|
|
|
|
|
if (nr) {
|
2017-09-07 15:54:51 +03:00
|
|
|
if (arm_feature(env, ARM_FEATURE_V8)) {
|
|
|
|
/* PMSAv8 */
|
2017-09-07 15:54:53 +03:00
|
|
|
env->pmsav8.rbar[M_REG_NS] = g_new0(uint32_t, nr);
|
|
|
|
env->pmsav8.rlar[M_REG_NS] = g_new0(uint32_t, nr);
|
|
|
|
if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
|
|
|
|
env->pmsav8.rbar[M_REG_S] = g_new0(uint32_t, nr);
|
|
|
|
env->pmsav8.rlar[M_REG_S] = g_new0(uint32_t, nr);
|
|
|
|
}
|
2017-09-07 15:54:51 +03:00
|
|
|
} else {
|
|
|
|
env->pmsav7.drbar = g_new0(uint32_t, nr);
|
|
|
|
env->pmsav7.drsr = g_new0(uint32_t, nr);
|
|
|
|
env->pmsav7.dracr = g_new0(uint32_t, nr);
|
|
|
|
}
|
2015-06-19 16:17:44 +03:00
|
|
|
}
|
2022-12-06 13:25:02 +03:00
|
|
|
|
|
|
|
if (cpu->pmsav8r_hdregion > 0xff) {
|
|
|
|
error_setg(errp, "PMSAv8 MPU EL2 #regions invalid %" PRIu32,
|
|
|
|
cpu->pmsav8r_hdregion);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cpu->pmsav8r_hdregion) {
|
|
|
|
env->pmsav8.hprbar = g_new0(uint32_t,
|
|
|
|
cpu->pmsav8r_hdregion);
|
|
|
|
env->pmsav8.hprlar = g_new0(uint32_t,
|
|
|
|
cpu->pmsav8r_hdregion);
|
|
|
|
}
|
2015-06-19 16:17:44 +03:00
|
|
|
}
|
|
|
|
|
2017-10-06 18:46:49 +03:00
|
|
|
if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
|
|
|
|
uint32_t nr = cpu->sau_sregion;
|
|
|
|
|
|
|
|
if (nr > 0xff) {
|
|
|
|
error_setg(errp, "v8M SAU #regions invalid %" PRIu32, nr);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (nr) {
|
|
|
|
env->sau.rbar = g_new0(uint32_t, nr);
|
|
|
|
env->sau.rlar = g_new0(uint32_t, nr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-27 17:59:30 +03:00
|
|
|
if (arm_feature(env, ARM_FEATURE_EL3)) {
|
|
|
|
set_feature(env, ARM_FEATURE_VBAR);
|
|
|
|
}
|
|
|
|
|
2023-08-22 19:31:13 +03:00
|
|
|
#ifndef CONFIG_USER_ONLY
|
|
|
|
if (tcg_enabled() && cpu_isar_feature(aa64_rme, cpu)) {
|
|
|
|
arm_register_el_change_hook(cpu, >_rme_post_el_change, 0);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2012-06-20 15:57:09 +04:00
|
|
|
register_cp_regs_for_features(cpu);
|
2013-01-05 13:18:18 +04:00
|
|
|
arm_cpu_register_gdb_regs_for_features(cpu);
|
|
|
|
|
2013-06-25 21:16:07 +04:00
|
|
|
init_cpreg_list(cpu);
|
|
|
|
|
2016-01-21 17:15:06 +03:00
|
|
|
#ifndef CONFIG_USER_ONLY
|
2019-05-18 23:54:26 +03:00
|
|
|
MachineState *ms = MACHINE(qdev_get_machine());
|
|
|
|
unsigned int smp_cpus = ms->smp.cpus;
|
2020-06-26 06:31:41 +03:00
|
|
|
bool has_secure = cpu->has_el3 || arm_feature(env, ARM_FEATURE_M_SECURITY);
|
2019-05-18 23:54:26 +03:00
|
|
|
|
2020-06-26 06:31:41 +03:00
|
|
|
/*
|
|
|
|
* We must set cs->num_ases to the final value before
|
|
|
|
* the first call to cpu_address_space_init.
|
|
|
|
*/
|
|
|
|
if (cpu->tag_memory != NULL) {
|
|
|
|
cs->num_ases = 3 + has_secure;
|
|
|
|
} else {
|
|
|
|
cs->num_ases = 1 + has_secure;
|
|
|
|
}
|
2017-09-07 15:54:52 +03:00
|
|
|
|
2020-06-26 06:31:41 +03:00
|
|
|
if (has_secure) {
|
2016-01-21 17:15:06 +03:00
|
|
|
if (!cpu->secure_memory) {
|
|
|
|
cpu->secure_memory = cs->memory;
|
|
|
|
}
|
2017-11-23 12:23:32 +03:00
|
|
|
cpu_address_space_init(cs, ARMASIdx_S, "cpu-secure-memory",
|
|
|
|
cpu->secure_memory);
|
2016-01-21 17:15:06 +03:00
|
|
|
}
|
2020-06-26 06:31:41 +03:00
|
|
|
|
|
|
|
if (cpu->tag_memory != NULL) {
|
|
|
|
cpu_address_space_init(cs, ARMASIdx_TagNS, "cpu-tag-memory",
|
|
|
|
cpu->tag_memory);
|
|
|
|
if (has_secure) {
|
|
|
|
cpu_address_space_init(cs, ARMASIdx_TagS, "cpu-tag-memory",
|
|
|
|
cpu->secure_tag_memory);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-23 12:23:32 +03:00
|
|
|
cpu_address_space_init(cs, ARMASIdx_NS, "cpu-memory", cs->memory);
|
2018-03-09 20:09:43 +03:00
|
|
|
|
|
|
|
/* No core_count specified, default to smp_cpus. */
|
|
|
|
if (cpu->core_count == -1) {
|
|
|
|
cpu->core_count = smp_cpus;
|
|
|
|
}
|
2016-01-21 17:15:06 +03:00
|
|
|
#endif
|
|
|
|
|
2020-06-26 06:31:15 +03:00
|
|
|
if (tcg_enabled()) {
|
|
|
|
int dcz_blocklen = 4 << cpu->dcz_blocksize;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We only support DCZ blocklen that fits on one page.
|
|
|
|
*
|
|
|
|
* Architectually this is always true. However TARGET_PAGE_SIZE
|
|
|
|
* is variable and, for compatibility with -machine virt-2.7,
|
|
|
|
* is only 1KiB, as an artifact of legacy ARMv5 subpage support.
|
|
|
|
* But even then, while the largest architectural DCZ blocklen
|
|
|
|
* is 2KiB, no cpu actually uses such a large blocklen.
|
|
|
|
*/
|
|
|
|
assert(dcz_blocklen <= TARGET_PAGE_SIZE);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We only support DCZ blocksize >= 2*TAG_GRANULE, which is to say
|
|
|
|
* both nibbles of each byte storing tag data may be written at once.
|
|
|
|
* Since TAG_GRANULE is 16, this means that blocklen must be >= 32.
|
|
|
|
*/
|
|
|
|
if (cpu_isar_feature(aa64_mte, cpu)) {
|
|
|
|
assert(dcz_blocklen >= 2 * TAG_GRANULE);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-07-27 04:53:25 +04:00
|
|
|
qemu_init_vcpu(cs);
|
2014-05-27 16:37:43 +04:00
|
|
|
cpu_reset(cs);
|
2013-01-05 13:18:18 +04:00
|
|
|
|
|
|
|
acc->parent_realize(dev, errp);
|
2012-04-20 21:58:31 +04:00
|
|
|
}
|
|
|
|
|
2013-01-21 19:11:43 +04:00
|
|
|
static ObjectClass *arm_cpu_class_by_name(const char *cpu_model)
|
|
|
|
{
|
|
|
|
ObjectClass *oc;
|
2013-01-27 20:30:10 +04:00
|
|
|
char *typename;
|
2015-02-13 08:46:08 +03:00
|
|
|
char **cpuname;
|
2018-03-09 20:09:44 +03:00
|
|
|
const char *cpunamestr;
|
2013-01-21 19:11:43 +04:00
|
|
|
|
2015-02-13 08:46:08 +03:00
|
|
|
cpuname = g_strsplit(cpu_model, ",", 1);
|
2018-03-09 20:09:44 +03:00
|
|
|
cpunamestr = cpuname[0];
|
|
|
|
#ifdef CONFIG_USER_ONLY
|
|
|
|
/* For backwards compatibility usermode emulation allows "-cpu any",
|
|
|
|
* which has the same semantics as "-cpu max".
|
|
|
|
*/
|
|
|
|
if (!strcmp(cpunamestr, "any")) {
|
|
|
|
cpunamestr = "max";
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
typename = g_strdup_printf(ARM_CPU_TYPE_NAME("%s"), cpunamestr);
|
2013-01-27 20:30:10 +04:00
|
|
|
oc = object_class_by_name(typename);
|
2015-02-13 08:46:08 +03:00
|
|
|
g_strfreev(cpuname);
|
2013-01-27 20:30:10 +04:00
|
|
|
g_free(typename);
|
2023-09-08 11:09:23 +03:00
|
|
|
|
2013-01-21 19:11:43 +04:00
|
|
|
return oc;
|
|
|
|
}
|
|
|
|
|
2013-11-22 21:17:13 +04:00
|
|
|
static Property arm_cpu_properties[] = {
|
2020-04-28 20:26:34 +03:00
|
|
|
DEFINE_PROP_UINT64("midr", ARMCPU, midr, 0),
|
2016-10-20 14:26:03 +03:00
|
|
|
DEFINE_PROP_UINT64("mp-affinity", ARMCPU,
|
|
|
|
mp_affinity, ARM64_AFFINITY_INVALID),
|
2017-05-30 19:24:00 +03:00
|
|
|
DEFINE_PROP_INT32("node-id", ARMCPU, node_id, CPU_UNSET_NUMA_NODE_ID),
|
2018-03-09 20:09:43 +03:00
|
|
|
DEFINE_PROP_INT32("core-count", ARMCPU, core_count, -1),
|
2013-11-22 21:17:13 +04:00
|
|
|
DEFINE_PROP_END_OF_LIST()
|
|
|
|
};
|
|
|
|
|
2023-10-09 19:40:53 +03:00
|
|
|
static const gchar *arm_gdb_arch_name(CPUState *cs)
|
2015-12-03 15:14:41 +03:00
|
|
|
{
|
|
|
|
ARMCPU *cpu = ARM_CPU(cs);
|
|
|
|
CPUARMState *env = &cpu->env;
|
|
|
|
|
|
|
|
if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
|
2023-10-09 19:40:53 +03:00
|
|
|
return "iwmmxt";
|
2015-12-03 15:14:41 +03:00
|
|
|
}
|
2023-10-09 19:40:53 +03:00
|
|
|
return "arm";
|
2015-12-03 15:14:41 +03:00
|
|
|
}
|
|
|
|
|
2021-05-17 13:51:31 +03:00
|
|
|
#ifndef CONFIG_USER_ONLY
|
|
|
|
#include "hw/core/sysemu-cpu-ops.h"
|
|
|
|
|
|
|
|
static const struct SysemuCPUOps arm_sysemu_ops = {
|
2021-05-17 13:51:37 +03:00
|
|
|
.get_phys_page_attrs_debug = arm_cpu_get_phys_page_attrs_debug,
|
2021-05-17 13:51:36 +03:00
|
|
|
.asidx_from_attrs = arm_asidx_from_attrs,
|
2021-05-17 13:51:35 +03:00
|
|
|
.write_elf32_note = arm_cpu_write_elf32_note,
|
|
|
|
.write_elf64_note = arm_cpu_write_elf64_note,
|
2021-05-17 13:51:33 +03:00
|
|
|
.virtio_is_big_endian = arm_cpu_virtio_is_big_endian,
|
2021-05-17 13:51:32 +03:00
|
|
|
.legacy_vmsd = &vmstate_arm_cpu,
|
2021-05-17 13:51:31 +03:00
|
|
|
};
|
|
|
|
#endif
|
|
|
|
|
2021-02-04 19:39:23 +03:00
|
|
|
#ifdef CONFIG_TCG
|
2021-02-28 02:21:17 +03:00
|
|
|
static const struct TCGCPUOps arm_tcg_ops = {
|
2021-02-04 19:39:23 +03:00
|
|
|
.initialize = arm_translate_init,
|
|
|
|
.synchronize_from_tb = arm_cpu_synchronize_from_tb,
|
|
|
|
.debug_excp_handler = arm_debug_excp_handler,
|
2022-10-24 12:59:18 +03:00
|
|
|
.restore_state_to_opc = arm_restore_state_to_opc,
|
2021-02-04 19:39:23 +03:00
|
|
|
|
2021-09-18 04:23:07 +03:00
|
|
|
#ifdef CONFIG_USER_ONLY
|
|
|
|
.record_sigsegv = arm_cpu_record_sigsegv,
|
2021-07-24 01:22:54 +03:00
|
|
|
.record_sigbus = arm_cpu_record_sigbus,
|
2021-09-18 04:23:07 +03:00
|
|
|
#else
|
|
|
|
.tlb_fill = arm_cpu_tlb_fill,
|
2021-09-11 19:54:17 +03:00
|
|
|
.cpu_exec_interrupt = arm_cpu_exec_interrupt,
|
2021-02-04 19:39:23 +03:00
|
|
|
.do_interrupt = arm_cpu_do_interrupt,
|
|
|
|
.do_transaction_failed = arm_cpu_do_transaction_failed,
|
|
|
|
.do_unaligned_access = arm_cpu_do_unaligned_access,
|
|
|
|
.adjust_watchpoint_address = arm_adjust_watchpoint_address,
|
|
|
|
.debug_check_watchpoint = arm_debug_check_watchpoint,
|
2021-07-19 21:19:26 +03:00
|
|
|
.debug_check_breakpoint = arm_debug_check_breakpoint,
|
2021-02-04 19:39:23 +03:00
|
|
|
#endif /* !CONFIG_USER_ONLY */
|
|
|
|
};
|
|
|
|
#endif /* CONFIG_TCG */
|
|
|
|
|
2012-03-29 08:50:31 +04:00
|
|
|
static void arm_cpu_class_init(ObjectClass *oc, void *data)
|
|
|
|
{
|
|
|
|
ARMCPUClass *acc = ARM_CPU_CLASS(oc);
|
|
|
|
CPUClass *cc = CPU_CLASS(acc);
|
2013-01-05 13:18:18 +04:00
|
|
|
DeviceClass *dc = DEVICE_CLASS(oc);
|
2022-11-24 14:50:05 +03:00
|
|
|
ResettableClass *rc = RESETTABLE_CLASS(oc);
|
2013-01-05 13:18:18 +04:00
|
|
|
|
2018-01-14 05:04:12 +03:00
|
|
|
device_class_set_parent_realize(dc, arm_cpu_realizefn,
|
|
|
|
&acc->parent_realize);
|
2012-03-29 08:50:31 +04:00
|
|
|
|
2020-01-10 18:30:32 +03:00
|
|
|
device_class_set_props(dc, arm_cpu_properties);
|
2022-11-24 14:50:05 +03:00
|
|
|
|
|
|
|
resettable_class_set_parent_phases(rc, NULL, arm_cpu_reset_hold, NULL,
|
|
|
|
&acc->parent_phases);
|
2013-01-21 19:11:43 +04:00
|
|
|
|
|
|
|
cc->class_by_name = arm_cpu_class_by_name;
|
2013-08-25 20:53:55 +04:00
|
|
|
cc->has_work = arm_cpu_has_work;
|
2013-05-27 03:33:50 +04:00
|
|
|
cc->dump_state = arm_cpu_dump_state;
|
2013-06-21 21:09:18 +04:00
|
|
|
cc->set_pc = arm_cpu_set_pc;
|
2022-09-30 20:31:21 +03:00
|
|
|
cc->get_pc = arm_cpu_get_pc;
|
2013-06-29 06:18:45 +04:00
|
|
|
cc->gdb_read_register = arm_cpu_gdb_read_register;
|
|
|
|
cc->gdb_write_register = arm_cpu_gdb_write_register;
|
2019-04-02 11:12:58 +03:00
|
|
|
#ifndef CONFIG_USER_ONLY
|
2021-05-17 13:51:31 +03:00
|
|
|
cc->sysemu_ops = &arm_sysemu_ops;
|
2013-06-29 20:55:54 +04:00
|
|
|
#endif
|
2013-06-29 01:18:47 +04:00
|
|
|
cc->gdb_num_core_regs = 26;
|
2015-12-03 15:14:41 +03:00
|
|
|
cc->gdb_arch_name = arm_gdb_arch_name;
|
2018-05-18 19:48:07 +03:00
|
|
|
cc->gdb_get_dynamic_xml = arm_gdb_get_dynamic_xml;
|
2014-09-12 22:04:17 +04:00
|
|
|
cc->gdb_stop_before_watchpoint = true;
|
2015-06-24 06:57:35 +03:00
|
|
|
cc->disas_set_info = arm_disas_set_info;
|
2021-02-04 19:39:23 +03:00
|
|
|
|
2017-10-26 16:58:14 +03:00
|
|
|
#ifdef CONFIG_TCG
|
2021-02-04 19:39:23 +03:00
|
|
|
cc->tcg_ops = &arm_tcg_ops;
|
2021-02-04 19:39:18 +03:00
|
|
|
#endif /* CONFIG_TCG */
|
2012-03-29 08:50:31 +04:00
|
|
|
}
|
|
|
|
|
2018-11-27 11:55:59 +03:00
|
|
|
static void arm_cpu_instance_init(Object *obj)
|
|
|
|
{
|
|
|
|
ARMCPUClass *acc = ARM_CPU_GET_CLASS(obj);
|
|
|
|
|
|
|
|
acc->info->initfn(obj);
|
|
|
|
arm_cpu_post_init(obj);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void cpu_register_class_init(ObjectClass *oc, void *data)
|
|
|
|
{
|
|
|
|
ARMCPUClass *acc = ARM_CPU_CLASS(oc);
|
2023-10-09 19:40:52 +03:00
|
|
|
CPUClass *cc = CPU_CLASS(acc);
|
2018-11-27 11:55:59 +03:00
|
|
|
|
|
|
|
acc->info = data;
|
2023-10-09 19:40:52 +03:00
|
|
|
cc->gdb_core_xml_file = "arm-core.xml";
|
2018-11-27 11:55:59 +03:00
|
|
|
}
|
|
|
|
|
2020-04-23 10:33:55 +03:00
|
|
|
void arm_cpu_register(const ARMCPUInfo *info)
|
2012-04-20 21:58:31 +04:00
|
|
|
{
|
|
|
|
TypeInfo type_info = {
|
|
|
|
.parent = TYPE_ARM_CPU,
|
2018-11-27 11:55:59 +03:00
|
|
|
.instance_init = arm_cpu_instance_init,
|
|
|
|
.class_init = info->class_init ?: cpu_register_class_init,
|
|
|
|
.class_data = (void *)info,
|
2012-04-20 21:58:31 +04:00
|
|
|
};
|
|
|
|
|
2013-01-27 20:30:10 +04:00
|
|
|
type_info.name = g_strdup_printf("%s-" TYPE_ARM_CPU, info->name);
|
2013-01-11 19:21:22 +04:00
|
|
|
type_register(&type_info);
|
2013-01-27 20:30:10 +04:00
|
|
|
g_free((void *)type_info.name);
|
2012-04-20 21:58:31 +04:00
|
|
|
}
|
|
|
|
|
2012-03-29 08:50:31 +04:00
|
|
|
static const TypeInfo arm_cpu_type_info = {
|
|
|
|
.name = TYPE_ARM_CPU,
|
|
|
|
.parent = TYPE_CPU,
|
|
|
|
.instance_size = sizeof(ARMCPU),
|
2020-09-16 03:46:35 +03:00
|
|
|
.instance_align = __alignof__(ARMCPU),
|
2012-04-20 21:58:31 +04:00
|
|
|
.instance_init = arm_cpu_initfn,
|
2012-06-20 15:57:06 +04:00
|
|
|
.instance_finalize = arm_cpu_finalizefn,
|
2012-04-20 21:58:31 +04:00
|
|
|
.abstract = true,
|
2012-03-29 08:50:31 +04:00
|
|
|
.class_size = sizeof(ARMCPUClass),
|
|
|
|
.class_init = arm_cpu_class_init,
|
|
|
|
};
|
|
|
|
|
|
|
|
static void arm_cpu_register_types(void)
|
|
|
|
{
|
|
|
|
type_register_static(&arm_cpu_type_info);
|
|
|
|
}
|
|
|
|
|
|
|
|
type_init(arm_cpu_register_types)
|