2016-05-12 15:22:28 +03:00
|
|
|
/*
|
|
|
|
* QEMU support -- ARM Power Control specific functions.
|
|
|
|
*
|
|
|
|
* Copyright (c) 2016 Jean-Christophe Dubois
|
|
|
|
*
|
|
|
|
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
|
|
|
* See the COPYING file in the top-level directory.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "qemu/osdep.h"
|
2016-06-22 20:11:19 +03:00
|
|
|
#include "cpu.h"
|
|
|
|
#include "cpu-qom.h"
|
2016-05-12 15:22:28 +03:00
|
|
|
#include "internals.h"
|
|
|
|
#include "arm-powerctl.h"
|
2015-12-15 15:16:16 +03:00
|
|
|
#include "qemu/log.h"
|
2017-02-23 21:29:23 +03:00
|
|
|
#include "qemu/main-loop.h"
|
2023-02-17 23:11:32 +03:00
|
|
|
#include "sysemu/tcg.h"
|
2024-01-18 23:06:31 +03:00
|
|
|
#include "target/arm/multiprocessing.h"
|
2016-05-12 15:22:28 +03:00
|
|
|
|
|
|
|
#ifndef DEBUG_ARM_POWERCTL
|
|
|
|
#define DEBUG_ARM_POWERCTL 0
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#define DPRINTF(fmt, args...) \
|
|
|
|
do { \
|
|
|
|
if (DEBUG_ARM_POWERCTL) { \
|
|
|
|
fprintf(stderr, "[ARM]%s: " fmt , __func__, ##args); \
|
|
|
|
} \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
CPUState *arm_get_cpu_by_id(uint64_t id)
|
|
|
|
{
|
|
|
|
CPUState *cpu;
|
|
|
|
|
|
|
|
DPRINTF("cpu %" PRId64 "\n", id);
|
|
|
|
|
|
|
|
CPU_FOREACH(cpu) {
|
|
|
|
ARMCPU *armcpu = ARM_CPU(cpu);
|
|
|
|
|
2024-01-18 23:06:30 +03:00
|
|
|
if (arm_cpu_mp_affinity(armcpu) == id) {
|
2016-05-12 15:22:28 +03:00
|
|
|
return cpu;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
qemu_log_mask(LOG_GUEST_ERROR,
|
|
|
|
"[ARM]%s: Requesting unknown CPU %" PRId64 "\n",
|
|
|
|
__func__, id);
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2017-02-23 21:29:23 +03:00
|
|
|
struct CpuOnInfo {
|
|
|
|
uint64_t entry;
|
|
|
|
uint64_t context_id;
|
|
|
|
uint32_t target_el;
|
|
|
|
bool target_aa64;
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
static void arm_set_cpu_on_async_work(CPUState *target_cpu_state,
|
|
|
|
run_on_cpu_data data)
|
|
|
|
{
|
|
|
|
ARMCPU *target_cpu = ARM_CPU(target_cpu_state);
|
|
|
|
struct CpuOnInfo *info = (struct CpuOnInfo *) data.host_ptr;
|
|
|
|
|
|
|
|
/* Initialize the cpu we are turning on */
|
|
|
|
cpu_reset(target_cpu_state);
|
target/arm/arm-powerctl: Correctly init CPUs when powered on to lower EL
The code for powering on a CPU in arm-powerctl.c has two separate
use cases:
* emulation of a real hardware power controller
* emulation of firmware interfaces (primarily PSCI) with
CPU on/off APIs
For the first case, we only need to reset the CPU and set its
starting PC and X0. For the second case, because we're emulating the
firmware we need to ensure that it's in the state that the firmware
provides. In particular, when we reset to a lower EL than the
highest one we are emulating, we need to put the CPU into a state
that permits correct running at that lower EL. We already do a
little of this in arm-powerctl.c (for instance we set SCR_HCE to
enable the HVC insn) but we don't do enough of it. This means that
in the case where we are emulating EL3 but also providing emulated
PSCI the guest will crash when a secondary core tries to use a
feature that needs an SCR_EL3 bit to be set, such as MTE or PAuth.
The hw/arm/boot.c code also has to support this "start guest code in
an EL that's lower than the highest emulated EL" case in order to do
direct guest kernel booting; it has all the necessary initialization
code to set the SCR_EL3 bits. Pull the relevant boot.c code out into
a separate function so we can share it between there and
arm-powerctl.c.
This refactoring has a few code changes that look like they
might be behaviour changes but aren't:
* if info->secure_boot is false and info->secure_board_setup is
true, then the old code would start the first CPU in Hyp
mode but without changing SCR.NS and NSACR.{CP11,CP10}.
This was wrong behaviour because there's no such thing
as Secure Hyp mode. The new code will leave the CPU in SVC.
(There is no board which sets secure_boot to false and
secure_board_setup to true, so this isn't a behaviour
change for any of our boards.)
* we don't explicitly clear SCR.NS when arm-powerctl.c
does a CPU-on to EL3. This was a no-op because CPU reset
will reset to NS == 0.
And some real behaviour changes:
* we no longer set HCR_EL2.RW when booting into EL2: the guest
can and should do that themselves before dropping into their
EL1 code. (arm-powerctl and boot did this differently; I
opted to use the logic from arm-powerctl, which only sets
HCR_EL2.RW when it's directly starting the guest in EL1,
because it's more correct, and I don't expect guests to be
accidentally depending on our having set the RW bit for them.)
* if we are booting a CPU into AArch32 Secure SVC then we won't
set SCR.HCE any more. This affects only the vexpress-a15 and
raspi2b machine types. Guests booting in this case will either:
- be able to set SCR.HCE themselves as part of moving from
Secure SVC into NS Hyp mode
- will move from Secure SVC to NS SVC, and won't care about
behaviour of the HVC insn
- will stay in Secure SVC, and won't care about HVC
* on an arm-powerctl CPU-on we will now set the SCR bits for
pauth/mte/sve/sme/hcx/fgt features
The first two of these are very minor and I don't expect guest
code to trip over them, so I didn't judge it worth convoluting
the code in an attempt to keep exactly the same boot.c behaviour.
The third change fixes issue 1899.
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1899
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 20230926155619.4028618-1-peter.maydell@linaro.org
2023-09-26 18:56:19 +03:00
|
|
|
arm_emulate_firmware_reset(target_cpu_state, info->target_el);
|
2017-02-23 21:29:23 +03:00
|
|
|
target_cpu_state->halted = 0;
|
|
|
|
|
|
|
|
/* We check if the started CPU is now at the correct level */
|
|
|
|
assert(info->target_el == arm_current_el(&target_cpu->env));
|
|
|
|
|
|
|
|
if (info->target_aa64) {
|
|
|
|
target_cpu->env.xregs[0] = info->context_id;
|
|
|
|
} else {
|
|
|
|
target_cpu->env.regs[0] = info->context_id;
|
|
|
|
}
|
|
|
|
|
2023-02-17 23:11:32 +03:00
|
|
|
if (tcg_enabled()) {
|
|
|
|
/* CP15 update requires rebuilding hflags */
|
|
|
|
arm_rebuild_hflags(&target_cpu->env);
|
|
|
|
}
|
2019-12-20 17:03:00 +03:00
|
|
|
|
2017-02-23 21:29:23 +03:00
|
|
|
/* Start the new CPU at the requested address */
|
|
|
|
cpu_set_pc(target_cpu_state, info->entry);
|
|
|
|
|
|
|
|
g_free(info);
|
|
|
|
|
|
|
|
/* Finally set the power status */
|
2024-01-02 18:35:25 +03:00
|
|
|
assert(bql_locked());
|
2017-02-23 21:29:23 +03:00
|
|
|
target_cpu->power_state = PSCI_ON;
|
|
|
|
}
|
|
|
|
|
2016-05-12 15:22:28 +03:00
|
|
|
int arm_set_cpu_on(uint64_t cpuid, uint64_t entry, uint64_t context_id,
|
|
|
|
uint32_t target_el, bool target_aa64)
|
|
|
|
{
|
|
|
|
CPUState *target_cpu_state;
|
|
|
|
ARMCPU *target_cpu;
|
2017-02-23 21:29:23 +03:00
|
|
|
struct CpuOnInfo *info;
|
|
|
|
|
2024-01-02 18:35:25 +03:00
|
|
|
assert(bql_locked());
|
2016-05-12 15:22:28 +03:00
|
|
|
|
|
|
|
DPRINTF("cpu %" PRId64 " (EL %d, %s) @ 0x%" PRIx64 " with R0 = 0x%" PRIx64
|
|
|
|
"\n", cpuid, target_el, target_aa64 ? "aarch64" : "aarch32", entry,
|
|
|
|
context_id);
|
|
|
|
|
|
|
|
/* requested EL level need to be in the 1 to 3 range */
|
|
|
|
assert((target_el > 0) && (target_el < 4));
|
|
|
|
|
|
|
|
if (target_aa64 && (entry & 3)) {
|
|
|
|
/*
|
|
|
|
* if we are booting in AArch64 mode then "entry" needs to be 4 bytes
|
|
|
|
* aligned.
|
|
|
|
*/
|
|
|
|
return QEMU_ARM_POWERCTL_INVALID_PARAM;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Retrieve the cpu we are powering up */
|
|
|
|
target_cpu_state = arm_get_cpu_by_id(cpuid);
|
|
|
|
if (!target_cpu_state) {
|
|
|
|
/* The cpu was not found */
|
|
|
|
return QEMU_ARM_POWERCTL_INVALID_PARAM;
|
|
|
|
}
|
|
|
|
|
|
|
|
target_cpu = ARM_CPU(target_cpu_state);
|
2017-02-23 21:29:23 +03:00
|
|
|
if (target_cpu->power_state == PSCI_ON) {
|
2016-05-12 15:22:28 +03:00
|
|
|
qemu_log_mask(LOG_GUEST_ERROR,
|
|
|
|
"[ARM]%s: CPU %" PRId64 " is already on\n",
|
|
|
|
__func__, cpuid);
|
|
|
|
return QEMU_ARM_POWERCTL_ALREADY_ON;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The newly brought CPU is requested to enter the exception level
|
|
|
|
* "target_el" and be in the requested mode (AArch64 or AArch32).
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (((target_el == 3) && !arm_feature(&target_cpu->env, ARM_FEATURE_EL3)) ||
|
|
|
|
((target_el == 2) && !arm_feature(&target_cpu->env, ARM_FEATURE_EL2))) {
|
|
|
|
/*
|
|
|
|
* The CPU does not support requested level
|
|
|
|
*/
|
|
|
|
return QEMU_ARM_POWERCTL_INVALID_PARAM;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!target_aa64 && arm_feature(&target_cpu->env, ARM_FEATURE_AARCH64)) {
|
|
|
|
/*
|
|
|
|
* For now we don't support booting an AArch64 CPU in AArch32 mode
|
|
|
|
* TODO: We should add this support later
|
|
|
|
*/
|
|
|
|
qemu_log_mask(LOG_UNIMP,
|
|
|
|
"[ARM]%s: Starting AArch64 CPU %" PRId64
|
|
|
|
" in AArch32 mode is not supported yet\n",
|
|
|
|
__func__, cpuid);
|
|
|
|
return QEMU_ARM_POWERCTL_INVALID_PARAM;
|
|
|
|
}
|
|
|
|
|
2017-02-23 21:29:23 +03:00
|
|
|
/*
|
|
|
|
* If another CPU has powered the target on we are in the state
|
|
|
|
* ON_PENDING and additional attempts to power on the CPU should
|
|
|
|
* fail (see 6.6 Implementation CPU_ON/CPU_OFF races in the PSCI
|
|
|
|
* spec)
|
|
|
|
*/
|
|
|
|
if (target_cpu->power_state == PSCI_ON_PENDING) {
|
|
|
|
qemu_log_mask(LOG_GUEST_ERROR,
|
|
|
|
"[ARM]%s: CPU %" PRId64 " is already powering on\n",
|
|
|
|
__func__, cpuid);
|
|
|
|
return QEMU_ARM_POWERCTL_ON_PENDING;
|
2016-05-12 15:22:28 +03:00
|
|
|
}
|
|
|
|
|
2017-02-23 21:29:23 +03:00
|
|
|
/* To avoid racing with a CPU we are just kicking off we do the
|
|
|
|
* final bit of preparation for the work in the target CPUs
|
|
|
|
* context.
|
|
|
|
*/
|
|
|
|
info = g_new(struct CpuOnInfo, 1);
|
|
|
|
info->entry = entry;
|
|
|
|
info->context_id = context_id;
|
|
|
|
info->target_el = target_el;
|
|
|
|
info->target_aa64 = target_aa64;
|
2016-05-12 15:22:28 +03:00
|
|
|
|
2017-02-23 21:29:23 +03:00
|
|
|
async_run_on_cpu(target_cpu_state, arm_set_cpu_on_async_work,
|
|
|
|
RUN_ON_CPU_HOST_PTR(info));
|
2016-10-27 18:10:07 +03:00
|
|
|
|
2016-05-12 15:22:28 +03:00
|
|
|
/* We are good to go */
|
|
|
|
return QEMU_ARM_POWERCTL_RET_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2019-02-28 13:55:16 +03:00
|
|
|
static void arm_set_cpu_on_and_reset_async_work(CPUState *target_cpu_state,
|
|
|
|
run_on_cpu_data data)
|
|
|
|
{
|
|
|
|
ARMCPU *target_cpu = ARM_CPU(target_cpu_state);
|
|
|
|
|
|
|
|
/* Initialize the cpu we are turning on */
|
|
|
|
cpu_reset(target_cpu_state);
|
|
|
|
target_cpu_state->halted = 0;
|
|
|
|
|
|
|
|
/* Finally set the power status */
|
2024-01-02 18:35:25 +03:00
|
|
|
assert(bql_locked());
|
2019-02-28 13:55:16 +03:00
|
|
|
target_cpu->power_state = PSCI_ON;
|
|
|
|
}
|
|
|
|
|
|
|
|
int arm_set_cpu_on_and_reset(uint64_t cpuid)
|
|
|
|
{
|
|
|
|
CPUState *target_cpu_state;
|
|
|
|
ARMCPU *target_cpu;
|
|
|
|
|
2024-01-02 18:35:25 +03:00
|
|
|
assert(bql_locked());
|
2019-02-28 13:55:16 +03:00
|
|
|
|
|
|
|
/* Retrieve the cpu we are powering up */
|
|
|
|
target_cpu_state = arm_get_cpu_by_id(cpuid);
|
|
|
|
if (!target_cpu_state) {
|
|
|
|
/* The cpu was not found */
|
|
|
|
return QEMU_ARM_POWERCTL_INVALID_PARAM;
|
|
|
|
}
|
|
|
|
|
|
|
|
target_cpu = ARM_CPU(target_cpu_state);
|
|
|
|
if (target_cpu->power_state == PSCI_ON) {
|
|
|
|
qemu_log_mask(LOG_GUEST_ERROR,
|
|
|
|
"[ARM]%s: CPU %" PRId64 " is already on\n",
|
|
|
|
__func__, cpuid);
|
|
|
|
return QEMU_ARM_POWERCTL_ALREADY_ON;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If another CPU has powered the target on we are in the state
|
|
|
|
* ON_PENDING and additional attempts to power on the CPU should
|
|
|
|
* fail (see 6.6 Implementation CPU_ON/CPU_OFF races in the PSCI
|
|
|
|
* spec)
|
|
|
|
*/
|
|
|
|
if (target_cpu->power_state == PSCI_ON_PENDING) {
|
|
|
|
qemu_log_mask(LOG_GUEST_ERROR,
|
|
|
|
"[ARM]%s: CPU %" PRId64 " is already powering on\n",
|
|
|
|
__func__, cpuid);
|
|
|
|
return QEMU_ARM_POWERCTL_ON_PENDING;
|
|
|
|
}
|
|
|
|
|
|
|
|
async_run_on_cpu(target_cpu_state, arm_set_cpu_on_and_reset_async_work,
|
|
|
|
RUN_ON_CPU_NULL);
|
|
|
|
|
|
|
|
/* We are good to go */
|
|
|
|
return QEMU_ARM_POWERCTL_RET_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2017-02-23 21:29:23 +03:00
|
|
|
static void arm_set_cpu_off_async_work(CPUState *target_cpu_state,
|
|
|
|
run_on_cpu_data data)
|
|
|
|
{
|
|
|
|
ARMCPU *target_cpu = ARM_CPU(target_cpu_state);
|
|
|
|
|
2024-01-02 18:35:25 +03:00
|
|
|
assert(bql_locked());
|
2017-02-23 21:29:23 +03:00
|
|
|
target_cpu->power_state = PSCI_OFF;
|
|
|
|
target_cpu_state->halted = 1;
|
|
|
|
target_cpu_state->exception_index = EXCP_HLT;
|
|
|
|
}
|
|
|
|
|
2016-05-12 15:22:28 +03:00
|
|
|
int arm_set_cpu_off(uint64_t cpuid)
|
|
|
|
{
|
|
|
|
CPUState *target_cpu_state;
|
|
|
|
ARMCPU *target_cpu;
|
|
|
|
|
2024-01-02 18:35:25 +03:00
|
|
|
assert(bql_locked());
|
2017-02-23 21:29:23 +03:00
|
|
|
|
2016-05-12 15:22:28 +03:00
|
|
|
DPRINTF("cpu %" PRId64 "\n", cpuid);
|
|
|
|
|
|
|
|
/* change to the cpu we are powering up */
|
|
|
|
target_cpu_state = arm_get_cpu_by_id(cpuid);
|
|
|
|
if (!target_cpu_state) {
|
|
|
|
return QEMU_ARM_POWERCTL_INVALID_PARAM;
|
|
|
|
}
|
|
|
|
target_cpu = ARM_CPU(target_cpu_state);
|
2017-02-23 21:29:23 +03:00
|
|
|
if (target_cpu->power_state == PSCI_OFF) {
|
2016-05-12 15:22:28 +03:00
|
|
|
qemu_log_mask(LOG_GUEST_ERROR,
|
|
|
|
"[ARM]%s: CPU %" PRId64 " is already off\n",
|
|
|
|
__func__, cpuid);
|
|
|
|
return QEMU_ARM_POWERCTL_IS_OFF;
|
|
|
|
}
|
|
|
|
|
2017-02-23 21:29:23 +03:00
|
|
|
/* Queue work to run under the target vCPUs context */
|
|
|
|
async_run_on_cpu(target_cpu_state, arm_set_cpu_off_async_work,
|
|
|
|
RUN_ON_CPU_NULL);
|
2016-05-12 15:22:28 +03:00
|
|
|
|
|
|
|
return QEMU_ARM_POWERCTL_RET_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2017-02-23 21:29:23 +03:00
|
|
|
static void arm_reset_cpu_async_work(CPUState *target_cpu_state,
|
|
|
|
run_on_cpu_data data)
|
|
|
|
{
|
|
|
|
/* Reset the cpu */
|
|
|
|
cpu_reset(target_cpu_state);
|
|
|
|
}
|
|
|
|
|
2016-05-12 15:22:28 +03:00
|
|
|
int arm_reset_cpu(uint64_t cpuid)
|
|
|
|
{
|
|
|
|
CPUState *target_cpu_state;
|
|
|
|
ARMCPU *target_cpu;
|
|
|
|
|
2024-01-02 18:35:25 +03:00
|
|
|
assert(bql_locked());
|
2017-02-23 21:29:23 +03:00
|
|
|
|
2016-05-12 15:22:28 +03:00
|
|
|
DPRINTF("cpu %" PRId64 "\n", cpuid);
|
|
|
|
|
|
|
|
/* change to the cpu we are resetting */
|
|
|
|
target_cpu_state = arm_get_cpu_by_id(cpuid);
|
|
|
|
if (!target_cpu_state) {
|
|
|
|
return QEMU_ARM_POWERCTL_INVALID_PARAM;
|
|
|
|
}
|
|
|
|
target_cpu = ARM_CPU(target_cpu_state);
|
2017-02-23 21:29:23 +03:00
|
|
|
|
|
|
|
if (target_cpu->power_state == PSCI_OFF) {
|
2016-05-12 15:22:28 +03:00
|
|
|
qemu_log_mask(LOG_GUEST_ERROR,
|
|
|
|
"[ARM]%s: CPU %" PRId64 " is off\n",
|
|
|
|
__func__, cpuid);
|
|
|
|
return QEMU_ARM_POWERCTL_IS_OFF;
|
|
|
|
}
|
|
|
|
|
2017-02-23 21:29:23 +03:00
|
|
|
/* Queue work to run under the target vCPUs context */
|
|
|
|
async_run_on_cpu(target_cpu_state, arm_reset_cpu_async_work,
|
|
|
|
RUN_ON_CPU_NULL);
|
2016-05-12 15:22:28 +03:00
|
|
|
|
|
|
|
return QEMU_ARM_POWERCTL_RET_SUCCESS;
|
|
|
|
}
|