target-arm queue:
* hw/arm: Move raspberrypi-fw-defs.h to the include/hw/arm/ folder * hw/arm/exynos4210: Get arm_boot_info declaration from 'hw/arm/boot' * xlnx devices: remove deprecated device reset * xlnx-bbram: hw/nvram: Use dot in device type name * elf2dmp: fix coverity issues * elf2dmp: convert to g_malloc, g_new and g_free * target/arm: Fix CNTPCT_EL0 trapping from EL0 when HCR_EL2.E2H is 0 * hw/arm: refactor virt PPI logic * arm/kvm: convert to kvm_set_one_reg, kvm_get_one_reg * target/arm: Permit T32 LDM with single register * smmuv3: Advertise SMMUv3.1-XNX * target/arm: Implement FEAT_HPMN0 * Remove some unnecessary include lines * target/arm/arm-powerctl: Correctly init CPUs when powered on to lower EL * hw/timer/npcm7xx_timer: Prevent timer from counting down past zero -----BEGIN PGP SIGNATURE----- iQJNBAABCAA3FiEE4aXFk81BneKOgxXPPCUl7RQ2DN4FAmUxMF4ZHHBldGVyLm1h eWRlbGxAbGluYXJvLm9yZwAKCRA8JSXtFDYM3oJND/4p64q0Wxq8x8yXCDUZAHME lZe2liBPBkqZusGfK0O4CpClwGbM5+8tMeLaRgSOUgJ/WGFiLCGAKEKB0S7EiCa5 1bNvVn+a7cdDj7FdYf+Dvp5fNZZIus4w+CUlUaiRyDhIfYquz53J1RD1wN5+SQ/I g6JQRp2gONeqGM5hT+0v2J/wGMmhuI5XO+PtQ1QNGoUnAA4QNof1thYjqdTJxzfz V2CUSOKnAT/PDcUWoy8BVPDDE+wYTnjTO1j/ZsQvnNQm7r18OiMUn85teLq1JtB+ T3vyVZ2f2gc8lAgkKy5n3NH5fmLVgbO0WXgpWLHNkcp+shZMM6J5J/u/P6B/wk95 DMzQy4slu/UfWMvsaxq+OjejhAtbdiIOeNfF6dAMy2NAyZplEAjlP8dsFrqAdACL 9m/DA4ODAV6OJ3E0zQ0dI4o6kr+/wbPVseLklqn3Ss0dndjU1K9XR0qpC8OruUJq 4h6kl5q6V3BHAoELvBtAqb0yHYdqhLqznpO8HsrUEmU5eTjDaOyyI4HW+AY5GG1R dtvrCLSiPe0EMartMMtezaB2GxQb9O7e+OI3XL2zVxb1F+QQ+vRZE3zVIdXm+Ev4 oBztF1peZC3c8zurjr7/MxnDSnzynpkSR1zOY8+WJnAqpQ+C1YvdF6/Llwn7IMHw ZHh6sGzQsaAu7u/DW9yY5w== =WreO -----END PGP SIGNATURE----- Merge tag 'pull-target-arm-20231019' of https://git.linaro.org/people/pmaydell/qemu-arm into staging target-arm queue: * hw/arm: Move raspberrypi-fw-defs.h to the include/hw/arm/ folder * hw/arm/exynos4210: Get arm_boot_info declaration from 'hw/arm/boot' * xlnx devices: remove deprecated device reset * xlnx-bbram: hw/nvram: Use dot in device type name * elf2dmp: fix coverity issues * elf2dmp: convert to g_malloc, g_new and g_free * target/arm: Fix CNTPCT_EL0 trapping from EL0 when HCR_EL2.E2H is 0 * hw/arm: refactor virt PPI logic * arm/kvm: convert to kvm_set_one_reg, kvm_get_one_reg * target/arm: Permit T32 LDM with single register * smmuv3: Advertise SMMUv3.1-XNX * target/arm: Implement FEAT_HPMN0 * Remove some unnecessary include lines * target/arm/arm-powerctl: Correctly init CPUs when powered on to lower EL * hw/timer/npcm7xx_timer: Prevent timer from counting down past zero # -----BEGIN PGP SIGNATURE----- # # iQJNBAABCAA3FiEE4aXFk81BneKOgxXPPCUl7RQ2DN4FAmUxMF4ZHHBldGVyLm1h # eWRlbGxAbGluYXJvLm9yZwAKCRA8JSXtFDYM3oJND/4p64q0Wxq8x8yXCDUZAHME # lZe2liBPBkqZusGfK0O4CpClwGbM5+8tMeLaRgSOUgJ/WGFiLCGAKEKB0S7EiCa5 # 1bNvVn+a7cdDj7FdYf+Dvp5fNZZIus4w+CUlUaiRyDhIfYquz53J1RD1wN5+SQ/I # g6JQRp2gONeqGM5hT+0v2J/wGMmhuI5XO+PtQ1QNGoUnAA4QNof1thYjqdTJxzfz # V2CUSOKnAT/PDcUWoy8BVPDDE+wYTnjTO1j/ZsQvnNQm7r18OiMUn85teLq1JtB+ # T3vyVZ2f2gc8lAgkKy5n3NH5fmLVgbO0WXgpWLHNkcp+shZMM6J5J/u/P6B/wk95 # DMzQy4slu/UfWMvsaxq+OjejhAtbdiIOeNfF6dAMy2NAyZplEAjlP8dsFrqAdACL # 9m/DA4ODAV6OJ3E0zQ0dI4o6kr+/wbPVseLklqn3Ss0dndjU1K9XR0qpC8OruUJq # 4h6kl5q6V3BHAoELvBtAqb0yHYdqhLqznpO8HsrUEmU5eTjDaOyyI4HW+AY5GG1R # dtvrCLSiPe0EMartMMtezaB2GxQb9O7e+OI3XL2zVxb1F+QQ+vRZE3zVIdXm+Ev4 # oBztF1peZC3c8zurjr7/MxnDSnzynpkSR1zOY8+WJnAqpQ+C1YvdF6/Llwn7IMHw # ZHh6sGzQsaAu7u/DW9yY5w== # =WreO # -----END PGP SIGNATURE----- # gpg: Signature made Thu 19 Oct 2023 06:34:22 PDT # gpg: using RSA key E1A5C593CD419DE28E8315CF3C2525ED14360CDE # gpg: issuer "peter.maydell@linaro.org" # gpg: Good signature from "Peter Maydell <peter.maydell@linaro.org>" [full] # gpg: aka "Peter Maydell <pmaydell@gmail.com>" [full] # gpg: aka "Peter Maydell <pmaydell@chiark.greenend.org.uk>" [full] # gpg: aka "Peter Maydell <peter@archaic.org.uk>" [unknown] # Primary key fingerprint: E1A5 C593 CD41 9DE2 8E83 15CF 3C25 25ED 1436 0CDE * tag 'pull-target-arm-20231019' of https://git.linaro.org/people/pmaydell/qemu-arm: (24 commits) contrib/elf2dmp: Use g_malloc(), g_new() and g_free() hw/timer/npcm7xx_timer: Prevent timer from counting down past zero target/arm/arm-powerctl: Correctly init CPUs when powered on to lower EL target/arm/common-semi-target.h: Remove unnecessary boot.h include target/arm/kvm64.c: Remove unused include target/arm: Implement FEAT_HPMN0 hw/arm/smmuv3: Advertise SMMUv3.1-XNX feature hw/arm/smmuv3: Sort ID register setting into field order hw/arm/smmuv3: Update ID register bit field definitions target/arm: Permit T32 LDM with single register arm/kvm: convert to kvm_get_one_reg arm/kvm: convert to kvm_set_one_reg hw/arm/sbsa-ref: use bsa.h for PPI definitions include/hw/arm: move BSA definitions to bsa.h {include/}hw/arm: refactor virt PPI logic target/arm: Fix CNTPCT_EL0 trapping from EL0 when HCR_EL2.E2H is 0 elf2dmp: check array bounds in pdb_get_file_size elf2dmp: limit print length for sign_rsds xlnx-bbram: hw/nvram: Use dot in device type name xlnx-versal-efuse: hw/nvram: Remove deprecated device reset ... Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
commit
caa75cc56e
@ -885,7 +885,7 @@ S: Odd Fixes
|
||||
F: hw/arm/raspi.c
|
||||
F: hw/arm/raspi_platform.h
|
||||
F: hw/*/bcm283*
|
||||
F: include/hw/arm/raspi*
|
||||
F: include/hw/arm/rasp*
|
||||
F: include/hw/*/bcm283*
|
||||
F: docs/system/arm/raspi.rst
|
||||
|
||||
|
@ -72,10 +72,7 @@ int pa_space_create(struct pa_space *ps, QEMU_Elf *qemu_elf)
|
||||
}
|
||||
}
|
||||
|
||||
ps->block = malloc(sizeof(*ps->block) * ps->block_nr);
|
||||
if (!ps->block) {
|
||||
return 1;
|
||||
}
|
||||
ps->block = g_new(struct pa_block, ps->block_nr);
|
||||
|
||||
for (i = 0; i < phdr_nr; i++) {
|
||||
if (phdr[i].p_type == PT_LOAD) {
|
||||
@ -97,7 +94,7 @@ int pa_space_create(struct pa_space *ps, QEMU_Elf *qemu_elf)
|
||||
void pa_space_destroy(struct pa_space *ps)
|
||||
{
|
||||
ps->block_nr = 0;
|
||||
free(ps->block);
|
||||
g_free(ps->block);
|
||||
}
|
||||
|
||||
void va_space_set_dtb(struct va_space *vs, uint64_t dtb)
|
||||
|
@ -120,14 +120,11 @@ static KDDEBUGGER_DATA64 *get_kdbg(uint64_t KernBase, struct pdb_reader *pdb,
|
||||
}
|
||||
}
|
||||
|
||||
kdbg = malloc(kdbg_hdr.Size);
|
||||
if (!kdbg) {
|
||||
return NULL;
|
||||
}
|
||||
kdbg = g_malloc(kdbg_hdr.Size);
|
||||
|
||||
if (va_space_rw(vs, KdDebuggerDataBlock, kdbg, kdbg_hdr.Size, 0)) {
|
||||
eprintf("Failed to extract entire KDBG\n");
|
||||
free(kdbg);
|
||||
g_free(kdbg);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -478,7 +475,7 @@ static bool pe_check_pdb_name(uint64_t base, void *start_addr,
|
||||
}
|
||||
|
||||
if (memcmp(&rsds->Signature, sign_rsds, sizeof(sign_rsds))) {
|
||||
eprintf("CodeView signature is \'%.4s\', \'%s\' expected\n",
|
||||
eprintf("CodeView signature is \'%.4s\', \'%.4s\' expected\n",
|
||||
rsds->Signature, sign_rsds);
|
||||
return false;
|
||||
}
|
||||
@ -643,7 +640,7 @@ int main(int argc, char *argv[])
|
||||
}
|
||||
|
||||
out_kdbg:
|
||||
free(kdbg);
|
||||
g_free(kdbg);
|
||||
out_pdb:
|
||||
pdb_exit(&pdb);
|
||||
out_pdb_file:
|
||||
|
@ -25,6 +25,10 @@
|
||||
|
||||
static uint32_t pdb_get_file_size(const struct pdb_reader *r, unsigned idx)
|
||||
{
|
||||
if (idx >= r->ds.toc->num_files) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
return r->ds.toc->file_size[idx];
|
||||
}
|
||||
|
||||
@ -90,18 +94,18 @@ uint64_t pdb_resolve(uint64_t img_base, struct pdb_reader *r, const char *name)
|
||||
|
||||
static void pdb_reader_ds_exit(struct pdb_reader *r)
|
||||
{
|
||||
free(r->ds.toc);
|
||||
g_free(r->ds.toc);
|
||||
}
|
||||
|
||||
static void pdb_exit_symbols(struct pdb_reader *r)
|
||||
{
|
||||
free(r->modimage);
|
||||
free(r->symbols);
|
||||
g_free(r->modimage);
|
||||
g_free(r->symbols);
|
||||
}
|
||||
|
||||
static void pdb_exit_segments(struct pdb_reader *r)
|
||||
{
|
||||
free(r->segs);
|
||||
g_free(r->segs);
|
||||
}
|
||||
|
||||
static void *pdb_ds_read(const PDB_DS_HEADER *header,
|
||||
@ -116,10 +120,7 @@ static void *pdb_ds_read(const PDB_DS_HEADER *header,
|
||||
|
||||
nBlocks = (size + header->block_size - 1) / header->block_size;
|
||||
|
||||
buffer = malloc(nBlocks * header->block_size);
|
||||
if (!buffer) {
|
||||
return NULL;
|
||||
}
|
||||
buffer = g_malloc(nBlocks * header->block_size);
|
||||
|
||||
for (i = 0; i < nBlocks; i++) {
|
||||
memcpy(buffer + i * header->block_size, (const char *)header +
|
||||
@ -159,16 +160,17 @@ static void *pdb_ds_read_file(struct pdb_reader* r, uint32_t file_number)
|
||||
|
||||
static int pdb_init_segments(struct pdb_reader *r)
|
||||
{
|
||||
char *segs;
|
||||
unsigned stream_idx = r->segments;
|
||||
|
||||
segs = pdb_ds_read_file(r, stream_idx);
|
||||
if (!segs) {
|
||||
r->segs = pdb_ds_read_file(r, stream_idx);
|
||||
if (!r->segs) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
r->segs = segs;
|
||||
r->segs_size = pdb_get_file_size(r, stream_idx);
|
||||
if (!r->segs_size) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -201,7 +203,7 @@ static int pdb_init_symbols(struct pdb_reader *r)
|
||||
return 0;
|
||||
|
||||
out_symbols:
|
||||
free(symbols);
|
||||
g_free(symbols);
|
||||
|
||||
return err;
|
||||
}
|
||||
@ -258,7 +260,7 @@ static int pdb_reader_init(struct pdb_reader *r, void *data)
|
||||
out_sym:
|
||||
pdb_exit_symbols(r);
|
||||
out_root:
|
||||
free(r->ds.root);
|
||||
g_free(r->ds.root);
|
||||
out_ds:
|
||||
pdb_reader_ds_exit(r);
|
||||
|
||||
@ -269,7 +271,7 @@ static void pdb_reader_exit(struct pdb_reader *r)
|
||||
{
|
||||
pdb_exit_segments(r);
|
||||
pdb_exit_symbols(r);
|
||||
free(r->ds.root);
|
||||
g_free(r->ds.root);
|
||||
pdb_reader_ds_exit(r);
|
||||
}
|
||||
|
||||
|
@ -94,10 +94,7 @@ static int init_states(QEMU_Elf *qe)
|
||||
|
||||
printf("%zu CPU states has been found\n", cpu_nr);
|
||||
|
||||
qe->state = malloc(sizeof(*qe->state) * cpu_nr);
|
||||
if (!qe->state) {
|
||||
return 1;
|
||||
}
|
||||
qe->state = g_new(QEMUCPUState*, cpu_nr);
|
||||
|
||||
cpu_nr = 0;
|
||||
|
||||
@ -115,7 +112,7 @@ static int init_states(QEMU_Elf *qe)
|
||||
|
||||
static void exit_states(QEMU_Elf *qe)
|
||||
{
|
||||
free(qe->state);
|
||||
g_free(qe->state);
|
||||
}
|
||||
|
||||
static bool check_ehdr(QEMU_Elf *qe)
|
||||
|
@ -46,6 +46,7 @@ the following architecture extensions:
|
||||
- FEAT_HCX (Support for the HCRX_EL2 register)
|
||||
- FEAT_HPDS (Hierarchical permission disables)
|
||||
- FEAT_HPDS2 (Translation table page-based hardware attributes)
|
||||
- FEAT_HPMN0 (Setting of MDCR_EL2.HPMN to zero)
|
||||
- FEAT_I8MM (AArch64 Int8 matrix multiplication instructions)
|
||||
- FEAT_IDST (ID space trap handling)
|
||||
- FEAT_IESB (Implicit error synchronization event)
|
||||
|
@ -722,84 +722,35 @@ static void do_cpu_reset(void *opaque)
|
||||
|
||||
cpu_set_pc(cs, entry);
|
||||
} else {
|
||||
/* If we are booting Linux then we need to check whether we are
|
||||
* booting into secure or non-secure state and adjust the state
|
||||
* accordingly. Out of reset, ARM is defined to be in secure state
|
||||
* (SCR.NS = 0), we change that here if non-secure boot has been
|
||||
* requested.
|
||||
/*
|
||||
* If we are booting Linux then we might need to do so at:
|
||||
* - AArch64 NS EL2 or NS EL1
|
||||
* - AArch32 Secure SVC (EL3)
|
||||
* - AArch32 NS Hyp (EL2)
|
||||
* - AArch32 NS SVC (EL1)
|
||||
* Configure the CPU in the way boot firmware would do to
|
||||
* drop us down to the appropriate level.
|
||||
*/
|
||||
if (arm_feature(env, ARM_FEATURE_EL3)) {
|
||||
/* AArch64 is defined to come out of reset into EL3 if enabled.
|
||||
* If we are booting Linux then we need to adjust our EL as
|
||||
* Linux expects us to be in EL2 or EL1. AArch32 resets into
|
||||
* SVC, which Linux expects, so no privilege/exception level to
|
||||
* adjust.
|
||||
*/
|
||||
if (env->aarch64) {
|
||||
env->cp15.scr_el3 |= SCR_RW;
|
||||
if (arm_feature(env, ARM_FEATURE_EL2)) {
|
||||
env->cp15.hcr_el2 |= HCR_RW;
|
||||
env->pstate = PSTATE_MODE_EL2h;
|
||||
} else {
|
||||
env->pstate = PSTATE_MODE_EL1h;
|
||||
}
|
||||
if (cpu_isar_feature(aa64_pauth, cpu)) {
|
||||
env->cp15.scr_el3 |= SCR_API | SCR_APK;
|
||||
}
|
||||
if (cpu_isar_feature(aa64_mte, cpu)) {
|
||||
env->cp15.scr_el3 |= SCR_ATA;
|
||||
}
|
||||
if (cpu_isar_feature(aa64_sve, cpu)) {
|
||||
env->cp15.cptr_el[3] |= R_CPTR_EL3_EZ_MASK;
|
||||
env->vfp.zcr_el[3] = 0xf;
|
||||
}
|
||||
if (cpu_isar_feature(aa64_sme, cpu)) {
|
||||
env->cp15.cptr_el[3] |= R_CPTR_EL3_ESM_MASK;
|
||||
env->cp15.scr_el3 |= SCR_ENTP2;
|
||||
env->vfp.smcr_el[3] = 0xf;
|
||||
}
|
||||
if (cpu_isar_feature(aa64_hcx, cpu)) {
|
||||
env->cp15.scr_el3 |= SCR_HXEN;
|
||||
}
|
||||
if (cpu_isar_feature(aa64_fgt, cpu)) {
|
||||
env->cp15.scr_el3 |= SCR_FGTEN;
|
||||
}
|
||||
int target_el = arm_feature(env, ARM_FEATURE_EL2) ? 2 : 1;
|
||||
|
||||
/* AArch64 kernels never boot in secure mode */
|
||||
assert(!info->secure_boot);
|
||||
/* This hook is only supported for AArch32 currently:
|
||||
* bootloader_aarch64[] will not call the hook, and
|
||||
* the code above has already dropped us into EL2 or EL1.
|
||||
*/
|
||||
assert(!info->secure_board_setup);
|
||||
}
|
||||
|
||||
if (arm_feature(env, ARM_FEATURE_EL2)) {
|
||||
/* If we have EL2 then Linux expects the HVC insn to work */
|
||||
env->cp15.scr_el3 |= SCR_HCE;
|
||||
}
|
||||
|
||||
/* Set to non-secure if not a secure boot */
|
||||
if (!info->secure_boot &&
|
||||
(cs != first_cpu || !info->secure_board_setup)) {
|
||||
/* Linux expects non-secure state */
|
||||
env->cp15.scr_el3 |= SCR_NS;
|
||||
/* Set NSACR.{CP11,CP10} so NS can access the FPU */
|
||||
env->cp15.nsacr |= 3 << 10;
|
||||
}
|
||||
}
|
||||
|
||||
if (!env->aarch64 && !info->secure_boot &&
|
||||
arm_feature(env, ARM_FEATURE_EL2)) {
|
||||
if (env->aarch64) {
|
||||
/*
|
||||
* This is an AArch32 boot not to Secure state, and
|
||||
* we have Hyp mode available, so boot the kernel into
|
||||
* Hyp mode. This is not how the CPU comes out of reset,
|
||||
* so we need to manually put it there.
|
||||
* AArch64 kernels never boot in secure mode, and we don't
|
||||
* support the secure_board_setup hook for AArch64.
|
||||
*/
|
||||
cpsr_write(env, ARM_CPU_MODE_HYP, CPSR_M, CPSRWriteRaw);
|
||||
assert(!info->secure_boot);
|
||||
assert(!info->secure_board_setup);
|
||||
} else {
|
||||
if (arm_feature(env, ARM_FEATURE_EL3) &&
|
||||
(info->secure_boot ||
|
||||
(info->secure_board_setup && cs == first_cpu))) {
|
||||
/* Start this CPU in Secure SVC */
|
||||
target_el = 3;
|
||||
}
|
||||
}
|
||||
|
||||
arm_emulate_firmware_reset(cs, target_el);
|
||||
|
||||
if (cs == first_cpu) {
|
||||
AddressSpace *as = arm_boot_address_space(cpu, info);
|
||||
|
||||
|
@ -2,6 +2,7 @@
|
||||
* ARM SBSA Reference Platform emulation
|
||||
*
|
||||
* Copyright (c) 2018 Linaro Limited
|
||||
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
* Written by Hongbo Zhang <hongbo.zhang@linaro.org>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
@ -30,6 +31,7 @@
|
||||
#include "exec/hwaddr.h"
|
||||
#include "kvm_arm.h"
|
||||
#include "hw/arm/boot.h"
|
||||
#include "hw/arm/bsa.h"
|
||||
#include "hw/arm/fdt.h"
|
||||
#include "hw/arm/smmuv3.h"
|
||||
#include "hw/block/flash.h"
|
||||
@ -55,14 +57,6 @@
|
||||
#define NUM_SMMU_IRQS 4
|
||||
#define NUM_SATA_PORTS 6
|
||||
|
||||
#define VIRTUAL_PMU_IRQ 7
|
||||
#define ARCH_GIC_MAINT_IRQ 9
|
||||
#define ARCH_TIMER_VIRT_IRQ 11
|
||||
#define ARCH_TIMER_S_EL1_IRQ 13
|
||||
#define ARCH_TIMER_NS_EL1_IRQ 14
|
||||
#define ARCH_TIMER_NS_EL2_IRQ 10
|
||||
#define ARCH_TIMER_NS_EL2_VIRT_IRQ 12
|
||||
|
||||
enum {
|
||||
SBSA_FLASH,
|
||||
SBSA_MEM,
|
||||
@ -479,7 +473,7 @@ static void create_gic(SBSAMachineState *sms, MemoryRegion *mem)
|
||||
*/
|
||||
for (i = 0; i < smp_cpus; i++) {
|
||||
DeviceState *cpudev = DEVICE(qemu_get_cpu(i));
|
||||
int ppibase = NUM_IRQS + i * GIC_INTERNAL + GIC_NR_SGIS;
|
||||
int intidbase = NUM_IRQS + i * GIC_INTERNAL;
|
||||
int irq;
|
||||
/*
|
||||
* Mapping from the output timer irq lines from the CPU to the
|
||||
@ -496,14 +490,17 @@ static void create_gic(SBSAMachineState *sms, MemoryRegion *mem)
|
||||
for (irq = 0; irq < ARRAY_SIZE(timer_irq); irq++) {
|
||||
qdev_connect_gpio_out(cpudev, irq,
|
||||
qdev_get_gpio_in(sms->gic,
|
||||
ppibase + timer_irq[irq]));
|
||||
intidbase + timer_irq[irq]));
|
||||
}
|
||||
|
||||
qdev_connect_gpio_out_named(cpudev, "gicv3-maintenance-interrupt", 0,
|
||||
qdev_get_gpio_in(sms->gic, ppibase
|
||||
qdev_get_gpio_in(sms->gic,
|
||||
intidbase
|
||||
+ ARCH_GIC_MAINT_IRQ));
|
||||
|
||||
qdev_connect_gpio_out_named(cpudev, "pmu-interrupt", 0,
|
||||
qdev_get_gpio_in(sms->gic, ppibase
|
||||
qdev_get_gpio_in(sms->gic,
|
||||
intidbase
|
||||
+ VIRTUAL_PMU_IRQ));
|
||||
|
||||
sysbus_connect_irq(gicbusdev, i, qdev_get_gpio_in(cpudev, ARM_CPU_IRQ));
|
||||
|
@ -38,33 +38,71 @@ REG32(IDR0, 0x0)
|
||||
FIELD(IDR0, S1P, 1 , 1)
|
||||
FIELD(IDR0, TTF, 2 , 2)
|
||||
FIELD(IDR0, COHACC, 4 , 1)
|
||||
FIELD(IDR0, BTM, 5 , 1)
|
||||
FIELD(IDR0, HTTU, 6 , 2)
|
||||
FIELD(IDR0, DORMHINT, 8 , 1)
|
||||
FIELD(IDR0, HYP, 9 , 1)
|
||||
FIELD(IDR0, ATS, 10, 1)
|
||||
FIELD(IDR0, NS1ATS, 11, 1)
|
||||
FIELD(IDR0, ASID16, 12, 1)
|
||||
FIELD(IDR0, MSI, 13, 1)
|
||||
FIELD(IDR0, SEV, 14, 1)
|
||||
FIELD(IDR0, ATOS, 15, 1)
|
||||
FIELD(IDR0, PRI, 16, 1)
|
||||
FIELD(IDR0, VMW, 17, 1)
|
||||
FIELD(IDR0, VMID16, 18, 1)
|
||||
FIELD(IDR0, CD2L, 19, 1)
|
||||
FIELD(IDR0, VATOS, 20, 1)
|
||||
FIELD(IDR0, TTENDIAN, 21, 2)
|
||||
FIELD(IDR0, ATSRECERR, 23, 1)
|
||||
FIELD(IDR0, STALL_MODEL, 24, 2)
|
||||
FIELD(IDR0, TERM_MODEL, 26, 1)
|
||||
FIELD(IDR0, STLEVEL, 27, 2)
|
||||
FIELD(IDR0, RME_IMPL, 30, 1)
|
||||
|
||||
REG32(IDR1, 0x4)
|
||||
FIELD(IDR1, SIDSIZE, 0 , 6)
|
||||
FIELD(IDR1, SSIDSIZE, 6 , 5)
|
||||
FIELD(IDR1, PRIQS, 11, 5)
|
||||
FIELD(IDR1, EVENTQS, 16, 5)
|
||||
FIELD(IDR1, CMDQS, 21, 5)
|
||||
FIELD(IDR1, ATTR_PERMS_OVR, 26, 1)
|
||||
FIELD(IDR1, ATTR_TYPES_OVR, 27, 1)
|
||||
FIELD(IDR1, REL, 28, 1)
|
||||
FIELD(IDR1, QUEUES_PRESET, 29, 1)
|
||||
FIELD(IDR1, TABLES_PRESET, 30, 1)
|
||||
FIELD(IDR1, ECMDQ, 31, 1)
|
||||
|
||||
#define SMMU_IDR1_SIDSIZE 16
|
||||
#define SMMU_CMDQS 19
|
||||
#define SMMU_EVENTQS 19
|
||||
|
||||
REG32(IDR2, 0x8)
|
||||
FIELD(IDR2, BA_VATOS, 0, 10)
|
||||
|
||||
REG32(IDR3, 0xc)
|
||||
FIELD(IDR3, HAD, 2, 1);
|
||||
FIELD(IDR3, PBHA, 3, 1);
|
||||
FIELD(IDR3, XNX, 4, 1);
|
||||
FIELD(IDR3, PPS, 5, 1);
|
||||
FIELD(IDR3, MPAM, 7, 1);
|
||||
FIELD(IDR3, FWB, 8, 1);
|
||||
FIELD(IDR3, STT, 9, 1);
|
||||
FIELD(IDR3, RIL, 10, 1);
|
||||
FIELD(IDR3, BBML, 11, 2);
|
||||
FIELD(IDR3, E0PD, 13, 1);
|
||||
FIELD(IDR3, PTWNNC, 14, 1);
|
||||
FIELD(IDR3, DPT, 15, 1);
|
||||
|
||||
REG32(IDR4, 0x10)
|
||||
|
||||
REG32(IDR5, 0x14)
|
||||
FIELD(IDR5, OAS, 0, 3);
|
||||
FIELD(IDR5, GRAN4K, 4, 1);
|
||||
FIELD(IDR5, GRAN16K, 5, 1);
|
||||
FIELD(IDR5, GRAN64K, 6, 1);
|
||||
FIELD(IDR5, VAX, 10, 2);
|
||||
FIELD(IDR5, STALL_MAX, 16, 16);
|
||||
|
||||
#define SMMU_IDR5_OAS 4
|
||||
|
||||
|
@ -278,15 +278,19 @@ static void smmuv3_init_regs(SMMUv3State *s)
|
||||
s->idr[1] = FIELD_DP32(s->idr[1], IDR1, EVENTQS, SMMU_EVENTQS);
|
||||
s->idr[1] = FIELD_DP32(s->idr[1], IDR1, CMDQS, SMMU_CMDQS);
|
||||
|
||||
s->idr[3] = FIELD_DP32(s->idr[3], IDR3, RIL, 1);
|
||||
s->idr[3] = FIELD_DP32(s->idr[3], IDR3, HAD, 1);
|
||||
if (FIELD_EX32(s->idr[0], IDR0, S2P)) {
|
||||
/* XNX is a stage-2-specific feature */
|
||||
s->idr[3] = FIELD_DP32(s->idr[3], IDR3, XNX, 1);
|
||||
}
|
||||
s->idr[3] = FIELD_DP32(s->idr[3], IDR3, RIL, 1);
|
||||
s->idr[3] = FIELD_DP32(s->idr[3], IDR3, BBML, 2);
|
||||
|
||||
s->idr[5] = FIELD_DP32(s->idr[5], IDR5, OAS, SMMU_IDR5_OAS); /* 44 bits */
|
||||
/* 4K, 16K and 64K granule support */
|
||||
s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN4K, 1);
|
||||
s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN16K, 1);
|
||||
s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN64K, 1);
|
||||
s->idr[5] = FIELD_DP32(s->idr[5], IDR5, OAS, SMMU_IDR5_OAS); /* 44 bits */
|
||||
|
||||
s->cmdq.base = deposit64(s->cmdq.base, 0, 5, SMMU_CMDQS);
|
||||
s->cmdq.prod = 0;
|
||||
|
@ -601,21 +601,21 @@ build_gtdt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
|
||||
* The interrupt values are the same with the device tree when adding 16
|
||||
*/
|
||||
/* Secure EL1 timer GSIV */
|
||||
build_append_int_noprefix(table_data, ARCH_TIMER_S_EL1_IRQ + 16, 4);
|
||||
build_append_int_noprefix(table_data, ARCH_TIMER_S_EL1_IRQ, 4);
|
||||
/* Secure EL1 timer Flags */
|
||||
build_append_int_noprefix(table_data, irqflags, 4);
|
||||
/* Non-Secure EL1 timer GSIV */
|
||||
build_append_int_noprefix(table_data, ARCH_TIMER_NS_EL1_IRQ + 16, 4);
|
||||
build_append_int_noprefix(table_data, ARCH_TIMER_NS_EL1_IRQ, 4);
|
||||
/* Non-Secure EL1 timer Flags */
|
||||
build_append_int_noprefix(table_data, irqflags |
|
||||
1UL << 2, /* Always-on Capability */
|
||||
4);
|
||||
/* Virtual timer GSIV */
|
||||
build_append_int_noprefix(table_data, ARCH_TIMER_VIRT_IRQ + 16, 4);
|
||||
build_append_int_noprefix(table_data, ARCH_TIMER_VIRT_IRQ, 4);
|
||||
/* Virtual Timer Flags */
|
||||
build_append_int_noprefix(table_data, irqflags, 4);
|
||||
/* Non-Secure EL2 timer GSIV */
|
||||
build_append_int_noprefix(table_data, ARCH_TIMER_NS_EL2_IRQ + 16, 4);
|
||||
build_append_int_noprefix(table_data, ARCH_TIMER_NS_EL2_IRQ, 4);
|
||||
/* Non-Secure EL2 timer Flags */
|
||||
build_append_int_noprefix(table_data, irqflags, 4);
|
||||
/* CntReadBase Physical address */
|
||||
@ -729,9 +729,9 @@ build_madt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
|
||||
for (i = 0; i < MACHINE(vms)->smp.cpus; i++) {
|
||||
ARMCPU *armcpu = ARM_CPU(qemu_get_cpu(i));
|
||||
uint64_t physical_base_address = 0, gich = 0, gicv = 0;
|
||||
uint32_t vgic_interrupt = vms->virt ? PPI(ARCH_GIC_MAINT_IRQ) : 0;
|
||||
uint32_t vgic_interrupt = vms->virt ? ARCH_GIC_MAINT_IRQ : 0;
|
||||
uint32_t pmu_interrupt = arm_feature(&armcpu->env, ARM_FEATURE_PMU) ?
|
||||
PPI(VIRTUAL_PMU_IRQ) : 0;
|
||||
VIRTUAL_PMU_IRQ : 0;
|
||||
|
||||
if (vms->gic_version == VIRT_GIC_VERSION_2) {
|
||||
physical_base_address = memmap[VIRT_GIC_CPU].base;
|
||||
|
@ -366,10 +366,14 @@ static void fdt_add_timer_nodes(const VirtMachineState *vms)
|
||||
}
|
||||
qemu_fdt_setprop(ms->fdt, "/timer", "always-on", NULL, 0);
|
||||
qemu_fdt_setprop_cells(ms->fdt, "/timer", "interrupts",
|
||||
GIC_FDT_IRQ_TYPE_PPI, ARCH_TIMER_S_EL1_IRQ, irqflags,
|
||||
GIC_FDT_IRQ_TYPE_PPI, ARCH_TIMER_NS_EL1_IRQ, irqflags,
|
||||
GIC_FDT_IRQ_TYPE_PPI, ARCH_TIMER_VIRT_IRQ, irqflags,
|
||||
GIC_FDT_IRQ_TYPE_PPI, ARCH_TIMER_NS_EL2_IRQ, irqflags);
|
||||
GIC_FDT_IRQ_TYPE_PPI,
|
||||
INTID_TO_PPI(ARCH_TIMER_S_EL1_IRQ), irqflags,
|
||||
GIC_FDT_IRQ_TYPE_PPI,
|
||||
INTID_TO_PPI(ARCH_TIMER_NS_EL1_IRQ), irqflags,
|
||||
GIC_FDT_IRQ_TYPE_PPI,
|
||||
INTID_TO_PPI(ARCH_TIMER_VIRT_IRQ), irqflags,
|
||||
GIC_FDT_IRQ_TYPE_PPI,
|
||||
INTID_TO_PPI(ARCH_TIMER_NS_EL2_IRQ), irqflags);
|
||||
}
|
||||
|
||||
static void fdt_add_cpu_nodes(const VirtMachineState *vms)
|
||||
@ -800,7 +804,7 @@ static void create_gic(VirtMachineState *vms, MemoryRegion *mem)
|
||||
*/
|
||||
for (i = 0; i < smp_cpus; i++) {
|
||||
DeviceState *cpudev = DEVICE(qemu_get_cpu(i));
|
||||
int ppibase = NUM_IRQS + i * GIC_INTERNAL + GIC_NR_SGIS;
|
||||
int intidbase = NUM_IRQS + i * GIC_INTERNAL;
|
||||
/* Mapping from the output timer irq lines from the CPU to the
|
||||
* GIC PPI inputs we use for the virt board.
|
||||
*/
|
||||
@ -814,22 +818,22 @@ static void create_gic(VirtMachineState *vms, MemoryRegion *mem)
|
||||
for (unsigned irq = 0; irq < ARRAY_SIZE(timer_irq); irq++) {
|
||||
qdev_connect_gpio_out(cpudev, irq,
|
||||
qdev_get_gpio_in(vms->gic,
|
||||
ppibase + timer_irq[irq]));
|
||||
intidbase + timer_irq[irq]));
|
||||
}
|
||||
|
||||
if (vms->gic_version != VIRT_GIC_VERSION_2) {
|
||||
qemu_irq irq = qdev_get_gpio_in(vms->gic,
|
||||
ppibase + ARCH_GIC_MAINT_IRQ);
|
||||
intidbase + ARCH_GIC_MAINT_IRQ);
|
||||
qdev_connect_gpio_out_named(cpudev, "gicv3-maintenance-interrupt",
|
||||
0, irq);
|
||||
} else if (vms->virt) {
|
||||
qemu_irq irq = qdev_get_gpio_in(vms->gic,
|
||||
ppibase + ARCH_GIC_MAINT_IRQ);
|
||||
intidbase + ARCH_GIC_MAINT_IRQ);
|
||||
sysbus_connect_irq(gicbusdev, i + 4 * smp_cpus, irq);
|
||||
}
|
||||
|
||||
qdev_connect_gpio_out_named(cpudev, "pmu-interrupt", 0,
|
||||
qdev_get_gpio_in(vms->gic, ppibase
|
||||
qdev_get_gpio_in(vms->gic, intidbase
|
||||
+ VIRTUAL_PMU_IRQ));
|
||||
|
||||
sysbus_connect_irq(gicbusdev, i, qdev_get_gpio_in(cpudev, ARM_CPU_IRQ));
|
||||
@ -1989,7 +1993,7 @@ static void virt_cpu_post_init(VirtMachineState *vms, MemoryRegion *sysmem)
|
||||
if (pmu) {
|
||||
assert(arm_feature(&ARM_CPU(cpu)->env, ARM_FEATURE_PMU));
|
||||
if (kvm_irqchip_in_kernel()) {
|
||||
kvm_arm_pmu_set_irq(cpu, PPI(VIRTUAL_PMU_IRQ));
|
||||
kvm_arm_pmu_set_irq(cpu, VIRTUAL_PMU_IRQ);
|
||||
}
|
||||
kvm_arm_pmu_init(cpu);
|
||||
}
|
||||
|
@ -12,7 +12,7 @@
|
||||
#include "migration/vmstate.h"
|
||||
#include "hw/irq.h"
|
||||
#include "hw/misc/bcm2835_mbox_defs.h"
|
||||
#include "hw/misc/raspberrypi-fw-defs.h"
|
||||
#include "hw/arm/raspberrypi-fw-defs.h"
|
||||
#include "sysemu/dma.h"
|
||||
#include "qemu/log.h"
|
||||
#include "qemu/module.h"
|
||||
|
@ -2,6 +2,7 @@
|
||||
* QEMU model of the Xilinx BBRAM Battery Backed RAM
|
||||
*
|
||||
* Copyright (c) 2014-2021 Xilinx Inc.
|
||||
* Copyright (c) 2023 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
@ -416,9 +417,9 @@ static RegisterAccessInfo bbram_ctrl_regs_info[] = {
|
||||
}
|
||||
};
|
||||
|
||||
static void bbram_ctrl_reset(DeviceState *dev)
|
||||
static void bbram_ctrl_reset_hold(Object *obj)
|
||||
{
|
||||
XlnxBBRam *s = XLNX_BBRAM(dev);
|
||||
XlnxBBRam *s = XLNX_BBRAM(obj);
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(s->regs_info); ++i) {
|
||||
@ -522,8 +523,9 @@ static Property bbram_ctrl_props[] = {
|
||||
static void bbram_ctrl_class_init(ObjectClass *klass, void *data)
|
||||
{
|
||||
DeviceClass *dc = DEVICE_CLASS(klass);
|
||||
ResettableClass *rc = RESETTABLE_CLASS(klass);
|
||||
|
||||
dc->reset = bbram_ctrl_reset;
|
||||
rc->phases.hold = bbram_ctrl_reset_hold;
|
||||
dc->realize = bbram_ctrl_realize;
|
||||
dc->vmsd = &vmstate_bbram_ctrl;
|
||||
device_class_set_props(dc, bbram_ctrl_props);
|
||||
|
@ -2,6 +2,7 @@
|
||||
* QEMU model of the Versal eFuse controller
|
||||
*
|
||||
* Copyright (c) 2020 Xilinx Inc.
|
||||
* Copyright (c) 2023 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
@ -657,9 +658,9 @@ static void efuse_ctrl_register_reset(RegisterInfo *reg)
|
||||
register_reset(reg);
|
||||
}
|
||||
|
||||
static void efuse_ctrl_reset(DeviceState *dev)
|
||||
static void efuse_ctrl_reset_hold(Object *obj)
|
||||
{
|
||||
XlnxVersalEFuseCtrl *s = XLNX_VERSAL_EFUSE_CTRL(dev);
|
||||
XlnxVersalEFuseCtrl *s = XLNX_VERSAL_EFUSE_CTRL(obj);
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(s->regs_info); ++i) {
|
||||
@ -749,8 +750,9 @@ static Property efuse_ctrl_props[] = {
|
||||
static void efuse_ctrl_class_init(ObjectClass *klass, void *data)
|
||||
{
|
||||
DeviceClass *dc = DEVICE_CLASS(klass);
|
||||
ResettableClass *rc = RESETTABLE_CLASS(klass);
|
||||
|
||||
dc->reset = efuse_ctrl_reset;
|
||||
rc->phases.hold = efuse_ctrl_reset_hold;
|
||||
dc->realize = efuse_ctrl_realize;
|
||||
dc->vmsd = &vmstate_efuse_ctrl;
|
||||
device_class_set_props(dc, efuse_ctrl_props);
|
||||
|
@ -2,6 +2,7 @@
|
||||
* QEMU model of the ZynqMP eFuse
|
||||
*
|
||||
* Copyright (c) 2015 Xilinx Inc.
|
||||
* Copyright (c) 2023 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Written by Edgar E. Iglesias <edgari@xilinx.com>
|
||||
*
|
||||
@ -769,9 +770,9 @@ static void zynqmp_efuse_register_reset(RegisterInfo *reg)
|
||||
register_reset(reg);
|
||||
}
|
||||
|
||||
static void zynqmp_efuse_reset(DeviceState *dev)
|
||||
static void zynqmp_efuse_reset_hold(Object *obj)
|
||||
{
|
||||
XlnxZynqMPEFuse *s = XLNX_ZYNQMP_EFUSE(dev);
|
||||
XlnxZynqMPEFuse *s = XLNX_ZYNQMP_EFUSE(obj);
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(s->regs_info); ++i) {
|
||||
@ -837,8 +838,9 @@ static Property zynqmp_efuse_props[] = {
|
||||
static void zynqmp_efuse_class_init(ObjectClass *klass, void *data)
|
||||
{
|
||||
DeviceClass *dc = DEVICE_CLASS(klass);
|
||||
ResettableClass *rc = RESETTABLE_CLASS(klass);
|
||||
|
||||
dc->reset = zynqmp_efuse_reset;
|
||||
rc->phases.hold = zynqmp_efuse_reset_hold;
|
||||
dc->realize = zynqmp_efuse_realize;
|
||||
dc->vmsd = &vmstate_efuse;
|
||||
device_class_set_props(dc, zynqmp_efuse_props);
|
||||
|
@ -138,6 +138,9 @@ static int64_t npcm7xx_timer_count_to_ns(NPCM7xxTimer *t, uint32_t count)
|
||||
/* Convert a time interval in nanoseconds to a timer cycle count. */
|
||||
static uint32_t npcm7xx_timer_ns_to_count(NPCM7xxTimer *t, int64_t ns)
|
||||
{
|
||||
if (ns < 0) {
|
||||
return 0;
|
||||
}
|
||||
return clock_ns_to_ticks(t->ctrl->clock, ns) /
|
||||
npcm7xx_tcsr_prescaler(t->tcsr);
|
||||
}
|
||||
|
35
include/hw/arm/bsa.h
Normal file
35
include/hw/arm/bsa.h
Normal file
@ -0,0 +1,35 @@
|
||||
/*
|
||||
* Common definitions for Arm Base System Architecture (BSA) platforms.
|
||||
*
|
||||
* Copyright (c) 2015 Linaro Limited
|
||||
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2 or later, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef QEMU_ARM_BSA_H
|
||||
#define QEMU_ARM_BSA_H
|
||||
|
||||
/* These are architectural INTID values */
|
||||
#define VIRTUAL_PMU_IRQ 23
|
||||
#define ARCH_GIC_MAINT_IRQ 25
|
||||
#define ARCH_TIMER_NS_EL2_IRQ 26
|
||||
#define ARCH_TIMER_VIRT_IRQ 27
|
||||
#define ARCH_TIMER_NS_EL2_VIRT_IRQ 28
|
||||
#define ARCH_TIMER_S_EL1_IRQ 29
|
||||
#define ARCH_TIMER_NS_EL1_IRQ 30
|
||||
|
||||
#define INTID_TO_PPI(irq) ((irq) - 16)
|
||||
|
||||
#endif /* QEMU_ARM_BSA_H */
|
@ -30,7 +30,7 @@
|
||||
#include "hw/intc/exynos4210_gic.h"
|
||||
#include "hw/intc/exynos4210_combiner.h"
|
||||
#include "hw/core/split-irq.h"
|
||||
#include "target/arm/cpu-qom.h"
|
||||
#include "hw/arm/boot.h"
|
||||
#include "qom/object.h"
|
||||
|
||||
#define EXYNOS4210_NCPUS 2
|
||||
|
@ -34,6 +34,7 @@
|
||||
#include "qemu/notify.h"
|
||||
#include "hw/boards.h"
|
||||
#include "hw/arm/boot.h"
|
||||
#include "hw/arm/bsa.h"
|
||||
#include "hw/block/flash.h"
|
||||
#include "sysemu/kvm.h"
|
||||
#include "hw/intc/arm_gicv3_common.h"
|
||||
@ -43,17 +44,6 @@
|
||||
#define NUM_VIRTIO_TRANSPORTS 32
|
||||
#define NUM_SMMU_IRQS 4
|
||||
|
||||
#define ARCH_GIC_MAINT_IRQ 9
|
||||
|
||||
#define ARCH_TIMER_VIRT_IRQ 11
|
||||
#define ARCH_TIMER_S_EL1_IRQ 13
|
||||
#define ARCH_TIMER_NS_EL1_IRQ 14
|
||||
#define ARCH_TIMER_NS_EL2_IRQ 10
|
||||
|
||||
#define VIRTUAL_PMU_IRQ 7
|
||||
|
||||
#define PPI(irq) ((irq) + 16)
|
||||
|
||||
/* See Linux kernel arch/arm64/include/asm/pvclock-abi.h */
|
||||
#define PVTIME_SIZE_PER_CPU 64
|
||||
|
||||
|
@ -34,7 +34,7 @@
|
||||
|
||||
#define RMAX_XLNX_BBRAM ((0x4c / 4) + 1)
|
||||
|
||||
#define TYPE_XLNX_BBRAM "xlnx,bbram-ctrl"
|
||||
#define TYPE_XLNX_BBRAM "xlnx.bbram-ctrl"
|
||||
OBJECT_DECLARE_SIMPLE_TYPE(XlnxBBRam, XLNX_BBRAM);
|
||||
|
||||
struct XlnxBBRam {
|
||||
|
@ -65,60 +65,9 @@ static void arm_set_cpu_on_async_work(CPUState *target_cpu_state,
|
||||
|
||||
/* Initialize the cpu we are turning on */
|
||||
cpu_reset(target_cpu_state);
|
||||
arm_emulate_firmware_reset(target_cpu_state, info->target_el);
|
||||
target_cpu_state->halted = 0;
|
||||
|
||||
if (info->target_aa64) {
|
||||
if ((info->target_el < 3) && arm_feature(&target_cpu->env,
|
||||
ARM_FEATURE_EL3)) {
|
||||
/*
|
||||
* As target mode is AArch64, we need to set lower
|
||||
* exception level (the requested level 2) to AArch64
|
||||
*/
|
||||
target_cpu->env.cp15.scr_el3 |= SCR_RW;
|
||||
}
|
||||
|
||||
if ((info->target_el < 2) && arm_feature(&target_cpu->env,
|
||||
ARM_FEATURE_EL2)) {
|
||||
/*
|
||||
* As target mode is AArch64, we need to set lower
|
||||
* exception level (the requested level 1) to AArch64
|
||||
*/
|
||||
target_cpu->env.cp15.hcr_el2 |= HCR_RW;
|
||||
}
|
||||
|
||||
target_cpu->env.pstate = aarch64_pstate_mode(info->target_el, true);
|
||||
} else {
|
||||
/* We are requested to boot in AArch32 mode */
|
||||
static const uint32_t mode_for_el[] = { 0,
|
||||
ARM_CPU_MODE_SVC,
|
||||
ARM_CPU_MODE_HYP,
|
||||
ARM_CPU_MODE_SVC };
|
||||
|
||||
cpsr_write(&target_cpu->env, mode_for_el[info->target_el], CPSR_M,
|
||||
CPSRWriteRaw);
|
||||
}
|
||||
|
||||
if (info->target_el == 3) {
|
||||
/* Processor is in secure mode */
|
||||
target_cpu->env.cp15.scr_el3 &= ~SCR_NS;
|
||||
} else {
|
||||
/* Processor is not in secure mode */
|
||||
target_cpu->env.cp15.scr_el3 |= SCR_NS;
|
||||
|
||||
/* Set NSACR.{CP11,CP10} so NS can access the FPU */
|
||||
target_cpu->env.cp15.nsacr |= 3 << 10;
|
||||
|
||||
/*
|
||||
* If QEMU is providing the equivalent of EL3 firmware, then we need
|
||||
* to make sure a CPU targeting EL2 comes out of reset with a
|
||||
* functional HVC insn.
|
||||
*/
|
||||
if (arm_feature(&target_cpu->env, ARM_FEATURE_EL3)
|
||||
&& info->target_el == 2) {
|
||||
target_cpu->env.cp15.scr_el3 |= SCR_HCE;
|
||||
}
|
||||
}
|
||||
|
||||
/* We check if the started CPU is now at the correct level */
|
||||
assert(info->target_el == arm_current_el(&target_cpu->env));
|
||||
|
||||
|
@ -10,9 +10,7 @@
|
||||
#ifndef TARGET_ARM_COMMON_SEMI_TARGET_H
|
||||
#define TARGET_ARM_COMMON_SEMI_TARGET_H
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
#include "hw/arm/boot.h"
|
||||
#endif
|
||||
#include "target/arm/cpu-qom.h"
|
||||
|
||||
static inline target_ulong common_semi_arg(CPUState *cs, int argno)
|
||||
{
|
||||
|
@ -23,8 +23,6 @@
|
||||
#include "hw/core/cpu.h"
|
||||
#include "qom/object.h"
|
||||
|
||||
struct arm_boot_info;
|
||||
|
||||
#define TYPE_ARM_CPU "arm-cpu"
|
||||
|
||||
OBJECT_DECLARE_CPU_TYPE(ARMCPU, ARMCPUClass, ARM_CPU)
|
||||
|
@ -553,6 +553,101 @@ static void arm_cpu_reset_hold(Object *obj)
|
||||
}
|
||||
}
|
||||
|
||||
void arm_emulate_firmware_reset(CPUState *cpustate, int target_el)
|
||||
{
|
||||
ARMCPU *cpu = ARM_CPU(cpustate);
|
||||
CPUARMState *env = &cpu->env;
|
||||
bool have_el3 = arm_feature(env, ARM_FEATURE_EL3);
|
||||
bool have_el2 = arm_feature(env, ARM_FEATURE_EL2);
|
||||
|
||||
/*
|
||||
* Check we have the EL we're aiming for. If that is the
|
||||
* highest implemented EL, then cpu_reset has already done
|
||||
* all the work.
|
||||
*/
|
||||
switch (target_el) {
|
||||
case 3:
|
||||
assert(have_el3);
|
||||
return;
|
||||
case 2:
|
||||
assert(have_el2);
|
||||
if (!have_el3) {
|
||||
return;
|
||||
}
|
||||
break;
|
||||
case 1:
|
||||
if (!have_el3 && !have_el2) {
|
||||
return;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
|
||||
if (have_el3) {
|
||||
/*
|
||||
* Set the EL3 state so code can run at EL2. This should match
|
||||
* the requirements set by Linux in its booting spec.
|
||||
*/
|
||||
if (env->aarch64) {
|
||||
env->cp15.scr_el3 |= SCR_RW;
|
||||
if (cpu_isar_feature(aa64_pauth, cpu)) {
|
||||
env->cp15.scr_el3 |= SCR_API | SCR_APK;
|
||||
}
|
||||
if (cpu_isar_feature(aa64_mte, cpu)) {
|
||||
env->cp15.scr_el3 |= SCR_ATA;
|
||||
}
|
||||
if (cpu_isar_feature(aa64_sve, cpu)) {
|
||||
env->cp15.cptr_el[3] |= R_CPTR_EL3_EZ_MASK;
|
||||
env->vfp.zcr_el[3] = 0xf;
|
||||
}
|
||||
if (cpu_isar_feature(aa64_sme, cpu)) {
|
||||
env->cp15.cptr_el[3] |= R_CPTR_EL3_ESM_MASK;
|
||||
env->cp15.scr_el3 |= SCR_ENTP2;
|
||||
env->vfp.smcr_el[3] = 0xf;
|
||||
}
|
||||
if (cpu_isar_feature(aa64_hcx, cpu)) {
|
||||
env->cp15.scr_el3 |= SCR_HXEN;
|
||||
}
|
||||
if (cpu_isar_feature(aa64_fgt, cpu)) {
|
||||
env->cp15.scr_el3 |= SCR_FGTEN;
|
||||
}
|
||||
}
|
||||
|
||||
if (target_el == 2) {
|
||||
/* If the guest is at EL2 then Linux expects the HVC insn to work */
|
||||
env->cp15.scr_el3 |= SCR_HCE;
|
||||
}
|
||||
|
||||
/* Put CPU into non-secure state */
|
||||
env->cp15.scr_el3 |= SCR_NS;
|
||||
/* Set NSACR.{CP11,CP10} so NS can access the FPU */
|
||||
env->cp15.nsacr |= 3 << 10;
|
||||
}
|
||||
|
||||
if (have_el2 && target_el < 2) {
|
||||
/* Set EL2 state so code can run at EL1. */
|
||||
if (env->aarch64) {
|
||||
env->cp15.hcr_el2 |= HCR_RW;
|
||||
}
|
||||
}
|
||||
|
||||
/* Set the CPU to the desired state */
|
||||
if (env->aarch64) {
|
||||
env->pstate = aarch64_pstate_mode(target_el, true);
|
||||
} else {
|
||||
static const uint32_t mode_for_el[] = {
|
||||
0,
|
||||
ARM_CPU_MODE_SVC,
|
||||
ARM_CPU_MODE_HYP,
|
||||
ARM_CPU_MODE_SVC,
|
||||
};
|
||||
|
||||
cpsr_write(env, mode_for_el[target_el], CPSR_M, CPSRWriteRaw);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
|
||||
|
||||
static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
|
||||
|
@ -1149,6 +1149,28 @@ int arm_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs,
|
||||
int arm_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs,
|
||||
int cpuid, DumpState *s);
|
||||
|
||||
/**
|
||||
* arm_emulate_firmware_reset: Emulate firmware CPU reset handling
|
||||
* @cpu: CPU (which must have been freshly reset)
|
||||
* @target_el: exception level to put the CPU into
|
||||
* @secure: whether to put the CPU in secure state
|
||||
*
|
||||
* When QEMU is directly running a guest kernel at a lower level than
|
||||
* EL3 it implicitly emulates some aspects of the guest firmware.
|
||||
* This includes that on reset we need to configure the parts of the
|
||||
* CPU corresponding to EL3 so that the real guest code can run at its
|
||||
* lower exception level. This function does that post-reset CPU setup,
|
||||
* for when we do direct boot of a guest kernel, and for when we
|
||||
* emulate PSCI and similar firmware interfaces starting a CPU at a
|
||||
* lower exception level.
|
||||
*
|
||||
* @target_el must be an EL implemented by the CPU between 1 and 3.
|
||||
* We do not support dropping into a Secure EL other than 3.
|
||||
*
|
||||
* It is the responsibility of the caller to call arm_rebuild_hflags().
|
||||
*/
|
||||
void arm_emulate_firmware_reset(CPUState *cpustate, int target_el);
|
||||
|
||||
#ifdef TARGET_AARCH64
|
||||
int aarch64_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
|
||||
int aarch64_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
|
||||
|
@ -1283,7 +1283,7 @@ static bool pmevcntr_is_64_bit(CPUARMState *env, int counter)
|
||||
bool hlp = env->cp15.mdcr_el2 & MDCR_HLP;
|
||||
int hpmn = env->cp15.mdcr_el2 & MDCR_HPMN;
|
||||
|
||||
if (hpmn != 0 && counter >= hpmn) {
|
||||
if (counter >= hpmn) {
|
||||
return hlp;
|
||||
}
|
||||
}
|
||||
@ -2475,22 +2475,7 @@ static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx,
|
||||
if (!extract32(env->cp15.c14_cntkctl, timeridx, 1)) {
|
||||
return CP_ACCESS_TRAP;
|
||||
}
|
||||
|
||||
/* If HCR_EL2.<E2H,TGE> == '10': check CNTHCTL_EL2.EL1PCTEN. */
|
||||
if (hcr & HCR_E2H) {
|
||||
if (timeridx == GTIMER_PHYS &&
|
||||
!extract32(env->cp15.cnthctl_el2, 10, 1)) {
|
||||
return CP_ACCESS_TRAP_EL2;
|
||||
}
|
||||
} else {
|
||||
/* If HCR_EL2.<E2H> == 0: check CNTHCTL_EL2.EL1PCEN. */
|
||||
if (has_el2 && timeridx == GTIMER_PHYS &&
|
||||
!extract32(env->cp15.cnthctl_el2, 1, 1)) {
|
||||
return CP_ACCESS_TRAP_EL2;
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
/* fall through */
|
||||
case 1:
|
||||
/* Check CNTHCTL_EL2.EL1PCTEN, which changes location based on E2H. */
|
||||
if (has_el2 && timeridx == GTIMER_PHYS &&
|
||||
|
@ -553,24 +553,19 @@ bool write_kvmstate_to_list(ARMCPU *cpu)
|
||||
bool ok = true;
|
||||
|
||||
for (i = 0; i < cpu->cpreg_array_len; i++) {
|
||||
struct kvm_one_reg r;
|
||||
uint64_t regidx = cpu->cpreg_indexes[i];
|
||||
uint32_t v32;
|
||||
int ret;
|
||||
|
||||
r.id = regidx;
|
||||
|
||||
switch (regidx & KVM_REG_SIZE_MASK) {
|
||||
case KVM_REG_SIZE_U32:
|
||||
r.addr = (uintptr_t)&v32;
|
||||
ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
|
||||
ret = kvm_get_one_reg(cs, regidx, &v32);
|
||||
if (!ret) {
|
||||
cpu->cpreg_values[i] = v32;
|
||||
}
|
||||
break;
|
||||
case KVM_REG_SIZE_U64:
|
||||
r.addr = (uintptr_t)(cpu->cpreg_values + i);
|
||||
ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
|
||||
ret = kvm_get_one_reg(cs, regidx, cpu->cpreg_values + i);
|
||||
break;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
@ -589,7 +584,6 @@ bool write_list_to_kvmstate(ARMCPU *cpu, int level)
|
||||
bool ok = true;
|
||||
|
||||
for (i = 0; i < cpu->cpreg_array_len; i++) {
|
||||
struct kvm_one_reg r;
|
||||
uint64_t regidx = cpu->cpreg_indexes[i];
|
||||
uint32_t v32;
|
||||
int ret;
|
||||
@ -598,19 +592,17 @@ bool write_list_to_kvmstate(ARMCPU *cpu, int level)
|
||||
continue;
|
||||
}
|
||||
|
||||
r.id = regidx;
|
||||
switch (regidx & KVM_REG_SIZE_MASK) {
|
||||
case KVM_REG_SIZE_U32:
|
||||
v32 = cpu->cpreg_values[i];
|
||||
r.addr = (uintptr_t)&v32;
|
||||
ret = kvm_set_one_reg(cs, regidx, &v32);
|
||||
break;
|
||||
case KVM_REG_SIZE_U64:
|
||||
r.addr = (uintptr_t)(cpu->cpreg_values + i);
|
||||
ret = kvm_set_one_reg(cs, regidx, cpu->cpreg_values + i);
|
||||
break;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r);
|
||||
if (ret) {
|
||||
/* We might fail for "unknown register" and also for
|
||||
* "you tried to set a register which is constant with
|
||||
@ -709,17 +701,13 @@ int kvm_arm_sync_mpstate_to_qemu(ARMCPU *cpu)
|
||||
void kvm_arm_get_virtual_time(CPUState *cs)
|
||||
{
|
||||
ARMCPU *cpu = ARM_CPU(cs);
|
||||
struct kvm_one_reg reg = {
|
||||
.id = KVM_REG_ARM_TIMER_CNT,
|
||||
.addr = (uintptr_t)&cpu->kvm_vtime,
|
||||
};
|
||||
int ret;
|
||||
|
||||
if (cpu->kvm_vtime_dirty) {
|
||||
return;
|
||||
}
|
||||
|
||||
ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
|
||||
ret = kvm_get_one_reg(cs, KVM_REG_ARM_TIMER_CNT, &cpu->kvm_vtime);
|
||||
if (ret) {
|
||||
error_report("Failed to get KVM_REG_ARM_TIMER_CNT");
|
||||
abort();
|
||||
@ -731,17 +719,13 @@ void kvm_arm_get_virtual_time(CPUState *cs)
|
||||
void kvm_arm_put_virtual_time(CPUState *cs)
|
||||
{
|
||||
ARMCPU *cpu = ARM_CPU(cs);
|
||||
struct kvm_one_reg reg = {
|
||||
.id = KVM_REG_ARM_TIMER_CNT,
|
||||
.addr = (uintptr_t)&cpu->kvm_vtime,
|
||||
};
|
||||
int ret;
|
||||
|
||||
if (!cpu->kvm_vtime_dirty) {
|
||||
return;
|
||||
}
|
||||
|
||||
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
|
||||
ret = kvm_set_one_reg(cs, KVM_REG_ARM_TIMER_CNT, &cpu->kvm_vtime);
|
||||
if (ret) {
|
||||
error_report("Failed to set KVM_REG_ARM_TIMER_CNT");
|
||||
abort();
|
||||
|
@ -30,7 +30,6 @@
|
||||
#include "internals.h"
|
||||
#include "hw/acpi/acpi.h"
|
||||
#include "hw/acpi/ghes.h"
|
||||
#include "hw/arm/virt.h"
|
||||
|
||||
static bool have_guest_debug;
|
||||
|
||||
@ -540,14 +539,10 @@ static int kvm_arm_sve_set_vls(CPUState *cs)
|
||||
{
|
||||
ARMCPU *cpu = ARM_CPU(cs);
|
||||
uint64_t vls[KVM_ARM64_SVE_VLS_WORDS] = { cpu->sve_vq.map };
|
||||
struct kvm_one_reg reg = {
|
||||
.id = KVM_REG_ARM64_SVE_VLS,
|
||||
.addr = (uint64_t)&vls[0],
|
||||
};
|
||||
|
||||
assert(cpu->sve_max_vq <= KVM_ARM64_SVE_VQ_MAX);
|
||||
|
||||
return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
|
||||
return kvm_set_one_reg(cs, KVM_REG_ARM64_SVE_VLS, &vls[0]);
|
||||
}
|
||||
|
||||
#define ARM_CPU_ID_MPIDR 3, 0, 0, 0, 5
|
||||
@ -726,19 +721,17 @@ static void kvm_inject_arm_sea(CPUState *c)
|
||||
static int kvm_arch_put_fpsimd(CPUState *cs)
|
||||
{
|
||||
CPUARMState *env = &ARM_CPU(cs)->env;
|
||||
struct kvm_one_reg reg;
|
||||
int i, ret;
|
||||
|
||||
for (i = 0; i < 32; i++) {
|
||||
uint64_t *q = aa64_vfp_qreg(env, i);
|
||||
#if HOST_BIG_ENDIAN
|
||||
uint64_t fp_val[2] = { q[1], q[0] };
|
||||
reg.addr = (uintptr_t)fp_val;
|
||||
ret = kvm_set_one_reg(cs, AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]),
|
||||
fp_val);
|
||||
#else
|
||||
reg.addr = (uintptr_t)q;
|
||||
ret = kvm_set_one_reg(cs, AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]), q);
|
||||
#endif
|
||||
reg.id = AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]);
|
||||
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
@ -759,14 +752,11 @@ static int kvm_arch_put_sve(CPUState *cs)
|
||||
CPUARMState *env = &cpu->env;
|
||||
uint64_t tmp[ARM_MAX_VQ * 2];
|
||||
uint64_t *r;
|
||||
struct kvm_one_reg reg;
|
||||
int n, ret;
|
||||
|
||||
for (n = 0; n < KVM_ARM64_SVE_NUM_ZREGS; ++n) {
|
||||
r = sve_bswap64(tmp, &env->vfp.zregs[n].d[0], cpu->sve_max_vq * 2);
|
||||
reg.addr = (uintptr_t)r;
|
||||
reg.id = KVM_REG_ARM64_SVE_ZREG(n, 0);
|
||||
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
|
||||
ret = kvm_set_one_reg(cs, KVM_REG_ARM64_SVE_ZREG(n, 0), r);
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
@ -775,9 +765,7 @@ static int kvm_arch_put_sve(CPUState *cs)
|
||||
for (n = 0; n < KVM_ARM64_SVE_NUM_PREGS; ++n) {
|
||||
r = sve_bswap64(tmp, r = &env->vfp.pregs[n].p[0],
|
||||
DIV_ROUND_UP(cpu->sve_max_vq * 2, 8));
|
||||
reg.addr = (uintptr_t)r;
|
||||
reg.id = KVM_REG_ARM64_SVE_PREG(n, 0);
|
||||
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
|
||||
ret = kvm_set_one_reg(cs, KVM_REG_ARM64_SVE_PREG(n, 0), r);
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
@ -785,9 +773,7 @@ static int kvm_arch_put_sve(CPUState *cs)
|
||||
|
||||
r = sve_bswap64(tmp, &env->vfp.pregs[FFR_PRED_NUM].p[0],
|
||||
DIV_ROUND_UP(cpu->sve_max_vq * 2, 8));
|
||||
reg.addr = (uintptr_t)r;
|
||||
reg.id = KVM_REG_ARM64_SVE_FFR(0);
|
||||
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
|
||||
ret = kvm_set_one_reg(cs, KVM_REG_ARM64_SVE_FFR(0), r);
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
@ -797,7 +783,6 @@ static int kvm_arch_put_sve(CPUState *cs)
|
||||
|
||||
int kvm_arch_put_registers(CPUState *cs, int level)
|
||||
{
|
||||
struct kvm_one_reg reg;
|
||||
uint64_t val;
|
||||
uint32_t fpr;
|
||||
int i, ret;
|
||||
@ -814,9 +799,8 @@ int kvm_arch_put_registers(CPUState *cs, int level)
|
||||
}
|
||||
|
||||
for (i = 0; i < 31; i++) {
|
||||
reg.id = AARCH64_CORE_REG(regs.regs[i]);
|
||||
reg.addr = (uintptr_t) &env->xregs[i];
|
||||
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
|
||||
ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(regs.regs[i]),
|
||||
&env->xregs[i]);
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
@ -827,16 +811,12 @@ int kvm_arch_put_registers(CPUState *cs, int level)
|
||||
*/
|
||||
aarch64_save_sp(env, 1);
|
||||
|
||||
reg.id = AARCH64_CORE_REG(regs.sp);
|
||||
reg.addr = (uintptr_t) &env->sp_el[0];
|
||||
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
|
||||
ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(regs.sp), &env->sp_el[0]);
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
reg.id = AARCH64_CORE_REG(sp_el1);
|
||||
reg.addr = (uintptr_t) &env->sp_el[1];
|
||||
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
|
||||
ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(sp_el1), &env->sp_el[1]);
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
@ -847,23 +827,17 @@ int kvm_arch_put_registers(CPUState *cs, int level)
|
||||
} else {
|
||||
val = cpsr_read(env);
|
||||
}
|
||||
reg.id = AARCH64_CORE_REG(regs.pstate);
|
||||
reg.addr = (uintptr_t) &val;
|
||||
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
|
||||
ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(regs.pstate), &val);
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
reg.id = AARCH64_CORE_REG(regs.pc);
|
||||
reg.addr = (uintptr_t) &env->pc;
|
||||
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
|
||||
ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(regs.pc), &env->pc);
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
reg.id = AARCH64_CORE_REG(elr_el1);
|
||||
reg.addr = (uintptr_t) &env->elr_el[1];
|
||||
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
|
||||
ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(elr_el1), &env->elr_el[1]);
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
@ -882,9 +856,8 @@ int kvm_arch_put_registers(CPUState *cs, int level)
|
||||
|
||||
/* KVM 0-4 map to QEMU banks 1-5 */
|
||||
for (i = 0; i < KVM_NR_SPSR; i++) {
|
||||
reg.id = AARCH64_CORE_REG(spsr[i]);
|
||||
reg.addr = (uintptr_t) &env->banked_spsr[i + 1];
|
||||
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
|
||||
ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(spsr[i]),
|
||||
&env->banked_spsr[i + 1]);
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
@ -899,18 +872,14 @@ int kvm_arch_put_registers(CPUState *cs, int level)
|
||||
return ret;
|
||||
}
|
||||
|
||||
reg.addr = (uintptr_t)(&fpr);
|
||||
fpr = vfp_get_fpsr(env);
|
||||
reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr);
|
||||
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
|
||||
ret = kvm_set_one_reg(cs, AARCH64_SIMD_CTRL_REG(fp_regs.fpsr), &fpr);
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
reg.addr = (uintptr_t)(&fpr);
|
||||
fpr = vfp_get_fpcr(env);
|
||||
reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr);
|
||||
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
|
||||
ret = kvm_set_one_reg(cs, AARCH64_SIMD_CTRL_REG(fp_regs.fpcr), &fpr);
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
@ -939,14 +908,11 @@ int kvm_arch_put_registers(CPUState *cs, int level)
|
||||
static int kvm_arch_get_fpsimd(CPUState *cs)
|
||||
{
|
||||
CPUARMState *env = &ARM_CPU(cs)->env;
|
||||
struct kvm_one_reg reg;
|
||||
int i, ret;
|
||||
|
||||
for (i = 0; i < 32; i++) {
|
||||
uint64_t *q = aa64_vfp_qreg(env, i);
|
||||
reg.id = AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]);
|
||||
reg.addr = (uintptr_t)q;
|
||||
ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
|
||||
ret = kvm_get_one_reg(cs, AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]), q);
|
||||
if (ret) {
|
||||
return ret;
|
||||
} else {
|
||||
@ -970,15 +936,12 @@ static int kvm_arch_get_sve(CPUState *cs)
|
||||
{
|
||||
ARMCPU *cpu = ARM_CPU(cs);
|
||||
CPUARMState *env = &cpu->env;
|
||||
struct kvm_one_reg reg;
|
||||
uint64_t *r;
|
||||
int n, ret;
|
||||
|
||||
for (n = 0; n < KVM_ARM64_SVE_NUM_ZREGS; ++n) {
|
||||
r = &env->vfp.zregs[n].d[0];
|
||||
reg.addr = (uintptr_t)r;
|
||||
reg.id = KVM_REG_ARM64_SVE_ZREG(n, 0);
|
||||
ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
|
||||
ret = kvm_get_one_reg(cs, KVM_REG_ARM64_SVE_ZREG(n, 0), r);
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
@ -987,9 +950,7 @@ static int kvm_arch_get_sve(CPUState *cs)
|
||||
|
||||
for (n = 0; n < KVM_ARM64_SVE_NUM_PREGS; ++n) {
|
||||
r = &env->vfp.pregs[n].p[0];
|
||||
reg.addr = (uintptr_t)r;
|
||||
reg.id = KVM_REG_ARM64_SVE_PREG(n, 0);
|
||||
ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
|
||||
ret = kvm_get_one_reg(cs, KVM_REG_ARM64_SVE_PREG(n, 0), r);
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
@ -997,9 +958,7 @@ static int kvm_arch_get_sve(CPUState *cs)
|
||||
}
|
||||
|
||||
r = &env->vfp.pregs[FFR_PRED_NUM].p[0];
|
||||
reg.addr = (uintptr_t)r;
|
||||
reg.id = KVM_REG_ARM64_SVE_FFR(0);
|
||||
ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
|
||||
ret = kvm_get_one_reg(cs, KVM_REG_ARM64_SVE_FFR(0), r);
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
@ -1010,7 +969,6 @@ static int kvm_arch_get_sve(CPUState *cs)
|
||||
|
||||
int kvm_arch_get_registers(CPUState *cs)
|
||||
{
|
||||
struct kvm_one_reg reg;
|
||||
uint64_t val;
|
||||
unsigned int el;
|
||||
uint32_t fpr;
|
||||
@ -1020,31 +978,24 @@ int kvm_arch_get_registers(CPUState *cs)
|
||||
CPUARMState *env = &cpu->env;
|
||||
|
||||
for (i = 0; i < 31; i++) {
|
||||
reg.id = AARCH64_CORE_REG(regs.regs[i]);
|
||||
reg.addr = (uintptr_t) &env->xregs[i];
|
||||
ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
|
||||
ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(regs.regs[i]),
|
||||
&env->xregs[i]);
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
reg.id = AARCH64_CORE_REG(regs.sp);
|
||||
reg.addr = (uintptr_t) &env->sp_el[0];
|
||||
ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
|
||||
ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(regs.sp), &env->sp_el[0]);
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
reg.id = AARCH64_CORE_REG(sp_el1);
|
||||
reg.addr = (uintptr_t) &env->sp_el[1];
|
||||
ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
|
||||
ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(sp_el1), &env->sp_el[1]);
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
reg.id = AARCH64_CORE_REG(regs.pstate);
|
||||
reg.addr = (uintptr_t) &val;
|
||||
ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
|
||||
ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(regs.pstate), &val);
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
@ -1061,9 +1012,7 @@ int kvm_arch_get_registers(CPUState *cs)
|
||||
*/
|
||||
aarch64_restore_sp(env, 1);
|
||||
|
||||
reg.id = AARCH64_CORE_REG(regs.pc);
|
||||
reg.addr = (uintptr_t) &env->pc;
|
||||
ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
|
||||
ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(regs.pc), &env->pc);
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
@ -1077,9 +1026,7 @@ int kvm_arch_get_registers(CPUState *cs)
|
||||
aarch64_sync_64_to_32(env);
|
||||
}
|
||||
|
||||
reg.id = AARCH64_CORE_REG(elr_el1);
|
||||
reg.addr = (uintptr_t) &env->elr_el[1];
|
||||
ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
|
||||
ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(elr_el1), &env->elr_el[1]);
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
@ -1089,9 +1036,8 @@ int kvm_arch_get_registers(CPUState *cs)
|
||||
* KVM SPSRs 0-4 map to QEMU banks 1-5
|
||||
*/
|
||||
for (i = 0; i < KVM_NR_SPSR; i++) {
|
||||
reg.id = AARCH64_CORE_REG(spsr[i]);
|
||||
reg.addr = (uintptr_t) &env->banked_spsr[i + 1];
|
||||
ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
|
||||
ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(spsr[i]),
|
||||
&env->banked_spsr[i + 1]);
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
@ -1112,17 +1058,13 @@ int kvm_arch_get_registers(CPUState *cs)
|
||||
return ret;
|
||||
}
|
||||
|
||||
reg.addr = (uintptr_t)(&fpr);
|
||||
reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr);
|
||||
ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
|
||||
ret = kvm_get_one_reg(cs, AARCH64_SIMD_CTRL_REG(fp_regs.fpsr), &fpr);
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
vfp_set_fpsr(env, fpr);
|
||||
|
||||
reg.addr = (uintptr_t)(&fpr);
|
||||
reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr);
|
||||
ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
|
||||
ret = kvm_get_one_reg(cs, AARCH64_SIMD_CTRL_REG(fp_regs.fpcr), &fpr);
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
|
@ -89,6 +89,10 @@ void aa32_max_features(ARMCPU *cpu)
|
||||
t = FIELD_DP32(t, ID_DFR0, COPSDBG, 9); /* FEAT_Debugv8p4 */
|
||||
t = FIELD_DP32(t, ID_DFR0, PERFMON, 6); /* FEAT_PMUv3p5 */
|
||||
cpu->isar.id_dfr0 = t;
|
||||
|
||||
t = cpu->isar.id_dfr1;
|
||||
t = FIELD_DP32(t, ID_DFR1, HPMN0, 1); /* FEAT_HPMN0 */
|
||||
cpu->isar.id_dfr1 = t;
|
||||
}
|
||||
|
||||
/* CPU models. These are not needed for the AArch64 linux-user build. */
|
||||
|
@ -1109,6 +1109,7 @@ void aarch64_max_tcg_initfn(Object *obj)
|
||||
t = cpu->isar.id_aa64dfr0;
|
||||
t = FIELD_DP64(t, ID_AA64DFR0, DEBUGVER, 9); /* FEAT_Debugv8p4 */
|
||||
t = FIELD_DP64(t, ID_AA64DFR0, PMUVER, 6); /* FEAT_PMUv3p5 */
|
||||
t = FIELD_DP64(t, ID_AA64DFR0, HPMN0, 1); /* FEAT_HPMN0 */
|
||||
cpu->isar.id_aa64dfr0 = t;
|
||||
|
||||
t = cpu->isar.id_aa64smfr0;
|
||||
|
@ -7882,7 +7882,7 @@ static void op_addr_block_post(DisasContext *s, arg_ldst_block *a,
|
||||
}
|
||||
}
|
||||
|
||||
static bool op_stm(DisasContext *s, arg_ldst_block *a, int min_n)
|
||||
static bool op_stm(DisasContext *s, arg_ldst_block *a)
|
||||
{
|
||||
int i, j, n, list, mem_idx;
|
||||
bool user = a->u;
|
||||
@ -7899,7 +7899,14 @@ static bool op_stm(DisasContext *s, arg_ldst_block *a, int min_n)
|
||||
|
||||
list = a->list;
|
||||
n = ctpop16(list);
|
||||
if (n < min_n || a->rn == 15) {
|
||||
/*
|
||||
* This is UNPREDICTABLE for n < 1 in all encodings, and we choose
|
||||
* to UNDEF. In the T32 STM encoding n == 1 is also UNPREDICTABLE,
|
||||
* but hardware treats it like the A32 version and implements the
|
||||
* single-register-store, and some in-the-wild (buggy) software
|
||||
* assumes that, so we don't UNDEF on that case.
|
||||
*/
|
||||
if (n < 1 || a->rn == 15) {
|
||||
unallocated_encoding(s);
|
||||
return true;
|
||||
}
|
||||
@ -7935,8 +7942,7 @@ static bool op_stm(DisasContext *s, arg_ldst_block *a, int min_n)
|
||||
|
||||
static bool trans_STM(DisasContext *s, arg_ldst_block *a)
|
||||
{
|
||||
/* BitCount(list) < 1 is UNPREDICTABLE */
|
||||
return op_stm(s, a, 1);
|
||||
return op_stm(s, a);
|
||||
}
|
||||
|
||||
static bool trans_STM_t32(DisasContext *s, arg_ldst_block *a)
|
||||
@ -7946,11 +7952,10 @@ static bool trans_STM_t32(DisasContext *s, arg_ldst_block *a)
|
||||
unallocated_encoding(s);
|
||||
return true;
|
||||
}
|
||||
/* BitCount(list) < 2 is UNPREDICTABLE */
|
||||
return op_stm(s, a, 2);
|
||||
return op_stm(s, a);
|
||||
}
|
||||
|
||||
static bool do_ldm(DisasContext *s, arg_ldst_block *a, int min_n)
|
||||
static bool do_ldm(DisasContext *s, arg_ldst_block *a)
|
||||
{
|
||||
int i, j, n, list, mem_idx;
|
||||
bool loaded_base;
|
||||
@ -7979,7 +7984,14 @@ static bool do_ldm(DisasContext *s, arg_ldst_block *a, int min_n)
|
||||
|
||||
list = a->list;
|
||||
n = ctpop16(list);
|
||||
if (n < min_n || a->rn == 15) {
|
||||
/*
|
||||
* This is UNPREDICTABLE for n < 1 in all encodings, and we choose
|
||||
* to UNDEF. In the T32 LDM encoding n == 1 is also UNPREDICTABLE,
|
||||
* but hardware treats it like the A32 version and implements the
|
||||
* single-register-load, and some in-the-wild (buggy) software
|
||||
* assumes that, so we don't UNDEF on that case.
|
||||
*/
|
||||
if (n < 1 || a->rn == 15) {
|
||||
unallocated_encoding(s);
|
||||
return true;
|
||||
}
|
||||
@ -8045,8 +8057,7 @@ static bool trans_LDM_a32(DisasContext *s, arg_ldst_block *a)
|
||||
unallocated_encoding(s);
|
||||
return true;
|
||||
}
|
||||
/* BitCount(list) < 1 is UNPREDICTABLE */
|
||||
return do_ldm(s, a, 1);
|
||||
return do_ldm(s, a);
|
||||
}
|
||||
|
||||
static bool trans_LDM_t32(DisasContext *s, arg_ldst_block *a)
|
||||
@ -8056,16 +8067,14 @@ static bool trans_LDM_t32(DisasContext *s, arg_ldst_block *a)
|
||||
unallocated_encoding(s);
|
||||
return true;
|
||||
}
|
||||
/* BitCount(list) < 2 is UNPREDICTABLE */
|
||||
return do_ldm(s, a, 2);
|
||||
return do_ldm(s, a);
|
||||
}
|
||||
|
||||
static bool trans_LDM_t16(DisasContext *s, arg_ldst_block *a)
|
||||
{
|
||||
/* Writeback is conditional on the base register not being loaded. */
|
||||
a->w = !(a->list & (1 << a->rn));
|
||||
/* BitCount(list) < 1 is UNPREDICTABLE */
|
||||
return do_ldm(s, a, 1);
|
||||
return do_ldm(s, a);
|
||||
}
|
||||
|
||||
static bool trans_CLRM(DisasContext *s, arg_CLRM *a)
|
||||
|
Loading…
Reference in New Issue
Block a user