target-arm:
* collection of M profile cleanups and minor bugfixes * loader: handle ELF files with overlapping zero-init data * virt: allow PMU instantiation with userspace irqchip * wdt_aspeed: Add support for the reset width register * cpu: Define new cpu_transaction_failed() hook * Mark some SoC devices as not user-creatable * arm: Fix aa64 ldp register writeback * arm_gicv3_kvm: Fix compile warning -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQIcBAABCAAGBQJZrX1YAAoJEDwlJe0UNgzeEiAP/AnmzpihFG1tcinucQIBBtW+ JIPmb3x/wALmEWkdYsxKsvxpC98GIavI+ygM2iSb0HGoQu7T8Tm0cDc+7UEiNQsq elsl6Rg6Um9WiFkMhkukjVOcjbYbHvblCd2GSXU89lT1+v6gK5wC0+hk/GjoebVM 8dUOPfoExXCz1eHCAv5J6us0Lz/powm2RlUKC454ACjxsDed5vkWwod10SMRd86j ZfDjaxo0Lua878IE7bNpSqkMf0gMZ17hDoD6imqySK+V7jGbkcTGDGdeqimP9vjC 1fcP6eFJQveEPAr2IrHOY1GXSHhIZ0lSrYLiQIOd24uUIcWvXVB+7CKRGCcuRwPo etyDMZ8+6i3/3baOoXiaAHNdjFFiR3iS3OdgVf5z9sX18IL3CvHYGpq8NESBTQJ9 le38fmzFx3deREhLmokb1Wy4U5U1xsS8FjIC9Ee9IQN+br5/6Ah6b1cURPk78aK4 RCj+oVc6qLelomNSe0ph9ux96vVegRW4nmEQ7+Ed4gkLwr13fHyKyMT3Rb0Zi4We h92TYznKqeHKkJhibKQRCL3XXYtBHxrNjhZ7dyC37W2su3wDY4tvYgS5RPuAeys/ Ko9riHDmhZCaqBOsahxGXAHlCUOcadQxbGDUj/1xhpcwD123ZRPZxjyHQNEe0pLP h+r0lmxn/U3Z2Bq/qX5I =jTl2 -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/pmaydell/tags/pull-target-arm-20170904-2' into staging target-arm: * collection of M profile cleanups and minor bugfixes * loader: handle ELF files with overlapping zero-init data * virt: allow PMU instantiation with userspace irqchip * wdt_aspeed: Add support for the reset width register * cpu: Define new cpu_transaction_failed() hook * Mark some SoC devices as not user-creatable * arm: Fix aa64 ldp register writeback * arm_gicv3_kvm: Fix compile warning # gpg: Signature made Mon 04 Sep 2017 17:20:40 BST # gpg: using RSA key 0x3C2525ED14360CDE # gpg: Good signature from "Peter Maydell <peter.maydell@linaro.org>" # gpg: aka "Peter Maydell <pmaydell@gmail.com>" # gpg: aka "Peter Maydell <pmaydell@chiark.greenend.org.uk>" # Primary key fingerprint: E1A5 C593 CD41 9DE2 8E83 15CF 3C25 25ED 1436 0CDE * remotes/pmaydell/tags/pull-target-arm-20170904-2: (33 commits) arm_gicv3_kvm: Fix compile warning target/arm: Fix aa64 ldp register writeback hw/arm/digic: Mark device with user_creatable = false hw/arm/aspeed_soc: Mark devices as user_creatable = false target/arm: Allow deliver_fault() caller to specify EA bit target/arm: Factor out fault delivery code cputlb: Support generating CPU exceptions on memory transaction failures cpu: Define new cpu_transaction_failed() hook memory.h: Move MemTxResult type to memattrs.h aspeed_soc: Propagate silicon-rev to watchdog watchdog: wdt_aspeed: Add support for the reset width register target/arm/kvm: pmu: improve error handling hw/arm/virt: allow pmu instantiation with userspace irqchip target/arm/kvm: pmu: split init and set-irq stages hw/arm/virt: add pmu interrupt state hw/arm: use defined type name instead of hard-coded string loader: Ignore zero-sized ELF segments loader: Handle ELF files with overlapping zero-initialized data nvic: Implement "user accesses BusFault" SCS region behaviour armv7m_nvic.h: Move from include/hw/arm to include/hw/intc ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
2b48373979
@ -747,6 +747,7 @@ static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
|
static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
|
||||||
|
int mmu_idx,
|
||||||
target_ulong addr, uintptr_t retaddr, int size)
|
target_ulong addr, uintptr_t retaddr, int size)
|
||||||
{
|
{
|
||||||
CPUState *cpu = ENV_GET_CPU(env);
|
CPUState *cpu = ENV_GET_CPU(env);
|
||||||
@ -754,6 +755,7 @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
|
|||||||
MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs);
|
MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs);
|
||||||
uint64_t val;
|
uint64_t val;
|
||||||
bool locked = false;
|
bool locked = false;
|
||||||
|
MemTxResult r;
|
||||||
|
|
||||||
physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
|
physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
|
||||||
cpu->mem_io_pc = retaddr;
|
cpu->mem_io_pc = retaddr;
|
||||||
@ -767,7 +769,12 @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
|
|||||||
qemu_mutex_lock_iothread();
|
qemu_mutex_lock_iothread();
|
||||||
locked = true;
|
locked = true;
|
||||||
}
|
}
|
||||||
memory_region_dispatch_read(mr, physaddr, &val, size, iotlbentry->attrs);
|
r = memory_region_dispatch_read(mr, physaddr,
|
||||||
|
&val, size, iotlbentry->attrs);
|
||||||
|
if (r != MEMTX_OK) {
|
||||||
|
cpu_transaction_failed(cpu, physaddr, addr, size, MMU_DATA_LOAD,
|
||||||
|
mmu_idx, iotlbentry->attrs, r, retaddr);
|
||||||
|
}
|
||||||
if (locked) {
|
if (locked) {
|
||||||
qemu_mutex_unlock_iothread();
|
qemu_mutex_unlock_iothread();
|
||||||
}
|
}
|
||||||
@ -776,6 +783,7 @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
|
static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
|
||||||
|
int mmu_idx,
|
||||||
uint64_t val, target_ulong addr,
|
uint64_t val, target_ulong addr,
|
||||||
uintptr_t retaddr, int size)
|
uintptr_t retaddr, int size)
|
||||||
{
|
{
|
||||||
@ -783,6 +791,7 @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
|
|||||||
hwaddr physaddr = iotlbentry->addr;
|
hwaddr physaddr = iotlbentry->addr;
|
||||||
MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs);
|
MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs);
|
||||||
bool locked = false;
|
bool locked = false;
|
||||||
|
MemTxResult r;
|
||||||
|
|
||||||
physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
|
physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
|
||||||
if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
|
if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
|
||||||
@ -795,7 +804,12 @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
|
|||||||
qemu_mutex_lock_iothread();
|
qemu_mutex_lock_iothread();
|
||||||
locked = true;
|
locked = true;
|
||||||
}
|
}
|
||||||
memory_region_dispatch_write(mr, physaddr, val, size, iotlbentry->attrs);
|
r = memory_region_dispatch_write(mr, physaddr,
|
||||||
|
val, size, iotlbentry->attrs);
|
||||||
|
if (r != MEMTX_OK) {
|
||||||
|
cpu_transaction_failed(cpu, physaddr, addr, size, MMU_DATA_STORE,
|
||||||
|
mmu_idx, iotlbentry->attrs, r, retaddr);
|
||||||
|
}
|
||||||
if (locked) {
|
if (locked) {
|
||||||
qemu_mutex_unlock_iothread();
|
qemu_mutex_unlock_iothread();
|
||||||
}
|
}
|
||||||
@ -845,6 +859,7 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
|
|||||||
MemoryRegion *mr;
|
MemoryRegion *mr;
|
||||||
CPUState *cpu = ENV_GET_CPU(env);
|
CPUState *cpu = ENV_GET_CPU(env);
|
||||||
CPUIOTLBEntry *iotlbentry;
|
CPUIOTLBEntry *iotlbentry;
|
||||||
|
hwaddr physaddr;
|
||||||
|
|
||||||
index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
||||||
mmu_idx = cpu_mmu_index(env, true);
|
mmu_idx = cpu_mmu_index(env, true);
|
||||||
@ -868,6 +883,19 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
|
|||||||
}
|
}
|
||||||
qemu_mutex_unlock_iothread();
|
qemu_mutex_unlock_iothread();
|
||||||
|
|
||||||
|
/* Give the new-style cpu_transaction_failed() hook first chance
|
||||||
|
* to handle this.
|
||||||
|
* This is not the ideal place to detect and generate CPU
|
||||||
|
* exceptions for instruction fetch failure (for instance
|
||||||
|
* we don't know the length of the access that the CPU would
|
||||||
|
* use, and it would be better to go ahead and try the access
|
||||||
|
* and use the MemTXResult it produced). However it is the
|
||||||
|
* simplest place we have currently available for the check.
|
||||||
|
*/
|
||||||
|
physaddr = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
|
||||||
|
cpu_transaction_failed(cpu, physaddr, addr, 0, MMU_INST_FETCH, mmu_idx,
|
||||||
|
iotlbentry->attrs, MEMTX_DECODE_ERROR, 0);
|
||||||
|
|
||||||
cpu_unassigned_access(cpu, addr, false, true, 0, 4);
|
cpu_unassigned_access(cpu, addr, false, true, 0, 4);
|
||||||
/* The CPU's unassigned access hook might have longjumped out
|
/* The CPU's unassigned access hook might have longjumped out
|
||||||
* with an exception. If it didn't (or there was no hook) then
|
* with an exception. If it didn't (or there was no hook) then
|
||||||
|
@ -146,7 +146,7 @@ static void armv7m_instance_init(Object *obj)
|
|||||||
&error_abort);
|
&error_abort);
|
||||||
memory_region_init(&s->container, obj, "armv7m-container", UINT64_MAX);
|
memory_region_init(&s->container, obj, "armv7m-container", UINT64_MAX);
|
||||||
|
|
||||||
object_initialize(&s->nvic, sizeof(s->nvic), "armv7m_nvic");
|
object_initialize(&s->nvic, sizeof(s->nvic), TYPE_NVIC);
|
||||||
qdev_set_parent_bus(DEVICE(&s->nvic), sysbus_get_default());
|
qdev_set_parent_bus(DEVICE(&s->nvic), sysbus_get_default());
|
||||||
object_property_add_alias(obj, "num-irq",
|
object_property_add_alias(obj, "num-irq",
|
||||||
OBJECT(&s->nvic), "num-irq", &error_abort);
|
OBJECT(&s->nvic), "num-irq", &error_abort);
|
||||||
@ -293,7 +293,7 @@ DeviceState *armv7m_init(MemoryRegion *system_memory, int mem_size, int num_irq,
|
|||||||
cpu_model = "cortex-m3";
|
cpu_model = "cortex-m3";
|
||||||
}
|
}
|
||||||
|
|
||||||
armv7m = qdev_create(NULL, "armv7m");
|
armv7m = qdev_create(NULL, TYPE_ARMV7M);
|
||||||
qdev_prop_set_uint32(armv7m, "num-irq", num_irq);
|
qdev_prop_set_uint32(armv7m, "num-irq", num_irq);
|
||||||
qdev_prop_set_string(armv7m, "cpu-model", cpu_model);
|
qdev_prop_set_string(armv7m, "cpu-model", cpu_model);
|
||||||
object_property_set_link(OBJECT(armv7m), OBJECT(get_system_memory()),
|
object_property_set_link(OBJECT(armv7m), OBJECT(get_system_memory()),
|
||||||
|
@ -183,6 +183,8 @@ static void aspeed_soc_init(Object *obj)
|
|||||||
object_initialize(&s->wdt[i], sizeof(s->wdt[i]), TYPE_ASPEED_WDT);
|
object_initialize(&s->wdt[i], sizeof(s->wdt[i]), TYPE_ASPEED_WDT);
|
||||||
object_property_add_child(obj, "wdt[*]", OBJECT(&s->wdt[i]), NULL);
|
object_property_add_child(obj, "wdt[*]", OBJECT(&s->wdt[i]), NULL);
|
||||||
qdev_set_parent_bus(DEVICE(&s->wdt[i]), sysbus_get_default());
|
qdev_set_parent_bus(DEVICE(&s->wdt[i]), sysbus_get_default());
|
||||||
|
qdev_prop_set_uint32(DEVICE(&s->wdt[i]), "silicon-rev",
|
||||||
|
sc->info->silicon_rev);
|
||||||
}
|
}
|
||||||
|
|
||||||
object_initialize(&s->ftgmac100, sizeof(s->ftgmac100), TYPE_FTGMAC100);
|
object_initialize(&s->ftgmac100, sizeof(s->ftgmac100), TYPE_FTGMAC100);
|
||||||
@ -338,6 +340,8 @@ static void aspeed_soc_class_init(ObjectClass *oc, void *data)
|
|||||||
|
|
||||||
sc->info = (AspeedSoCInfo *) data;
|
sc->info = (AspeedSoCInfo *) data;
|
||||||
dc->realize = aspeed_soc_realize;
|
dc->realize = aspeed_soc_realize;
|
||||||
|
/* Reason: Uses serial_hds and nd_table in realize() directly */
|
||||||
|
dc->user_creatable = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const TypeInfo aspeed_soc_type_info = {
|
static const TypeInfo aspeed_soc_type_info = {
|
||||||
|
@ -101,6 +101,8 @@ static void digic_class_init(ObjectClass *oc, void *data)
|
|||||||
DeviceClass *dc = DEVICE_CLASS(oc);
|
DeviceClass *dc = DEVICE_CLASS(oc);
|
||||||
|
|
||||||
dc->realize = digic_realize;
|
dc->realize = digic_realize;
|
||||||
|
/* Reason: Uses serial_hds in the realize function --> not usable twice */
|
||||||
|
dc->user_creatable = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const TypeInfo digic_type_info = {
|
static const TypeInfo digic_type_info = {
|
||||||
|
@ -33,7 +33,7 @@
|
|||||||
#include "hw/arm/arm.h"
|
#include "hw/arm/arm.h"
|
||||||
#include "hw/loader.h"
|
#include "hw/loader.h"
|
||||||
#include "hw/arm/exynos4210.h"
|
#include "hw/arm/exynos4210.h"
|
||||||
#include "hw/sd/sd.h"
|
#include "hw/sd/sdhci.h"
|
||||||
#include "hw/usb/hcd-ehci.h"
|
#include "hw/usb/hcd-ehci.h"
|
||||||
|
|
||||||
#define EXYNOS4210_CHIPID_ADDR 0x10000000
|
#define EXYNOS4210_CHIPID_ADDR 0x10000000
|
||||||
@ -381,7 +381,7 @@ Exynos4210State *exynos4210_init(MemoryRegion *system_mem)
|
|||||||
BlockBackend *blk;
|
BlockBackend *blk;
|
||||||
DriveInfo *di;
|
DriveInfo *di;
|
||||||
|
|
||||||
dev = qdev_create(NULL, "generic-sdhci");
|
dev = qdev_create(NULL, TYPE_SYSBUS_SDHCI);
|
||||||
qdev_prop_set_uint32(dev, "capareg", EXYNOS4210_SDHCI_CAPABILITIES);
|
qdev_prop_set_uint32(dev, "capareg", EXYNOS4210_SDHCI_CAPABILITIES);
|
||||||
qdev_init_nofail(dev);
|
qdev_init_nofail(dev);
|
||||||
|
|
||||||
|
@ -31,6 +31,9 @@
|
|||||||
#include "exec/address-spaces.h"
|
#include "exec/address-spaces.h"
|
||||||
#include "qemu/error-report.h"
|
#include "qemu/error-report.h"
|
||||||
#include "hw/char/pl011.h"
|
#include "hw/char/pl011.h"
|
||||||
|
#include "hw/ide/ahci.h"
|
||||||
|
#include "hw/cpu/a9mpcore.h"
|
||||||
|
#include "hw/cpu/a15mpcore.h"
|
||||||
|
|
||||||
#define SMP_BOOT_ADDR 0x100
|
#define SMP_BOOT_ADDR 0x100
|
||||||
#define SMP_BOOT_REG 0x40
|
#define SMP_BOOT_REG 0x40
|
||||||
@ -300,10 +303,10 @@ static void calxeda_init(MachineState *machine, enum cxmachines machine_id)
|
|||||||
busdev = SYS_BUS_DEVICE(dev);
|
busdev = SYS_BUS_DEVICE(dev);
|
||||||
sysbus_mmio_map(busdev, 0, 0xfff12000);
|
sysbus_mmio_map(busdev, 0, 0xfff12000);
|
||||||
|
|
||||||
dev = qdev_create(NULL, "a9mpcore_priv");
|
dev = qdev_create(NULL, TYPE_A9MPCORE_PRIV);
|
||||||
break;
|
break;
|
||||||
case CALXEDA_MIDWAY:
|
case CALXEDA_MIDWAY:
|
||||||
dev = qdev_create(NULL, "a15mpcore_priv");
|
dev = qdev_create(NULL, TYPE_A15MPCORE_PRIV);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
qdev_prop_set_uint32(dev, "num-cpu", smp_cpus);
|
qdev_prop_set_uint32(dev, "num-cpu", smp_cpus);
|
||||||
@ -329,7 +332,7 @@ static void calxeda_init(MachineState *machine, enum cxmachines machine_id)
|
|||||||
sysbus_connect_irq(busdev, 0, pic[18]);
|
sysbus_connect_irq(busdev, 0, pic[18]);
|
||||||
pl011_create(0xfff36000, pic[20], serial_hds[0]);
|
pl011_create(0xfff36000, pic[20], serial_hds[0]);
|
||||||
|
|
||||||
dev = qdev_create(NULL, "highbank-regs");
|
dev = qdev_create(NULL, TYPE_HIGHBANK_REGISTERS);
|
||||||
qdev_init_nofail(dev);
|
qdev_init_nofail(dev);
|
||||||
busdev = SYS_BUS_DEVICE(dev);
|
busdev = SYS_BUS_DEVICE(dev);
|
||||||
sysbus_mmio_map(busdev, 0, 0xfff3c000);
|
sysbus_mmio_map(busdev, 0, 0xfff3c000);
|
||||||
@ -341,7 +344,7 @@ static void calxeda_init(MachineState *machine, enum cxmachines machine_id)
|
|||||||
sysbus_create_simple("pl031", 0xfff35000, pic[19]);
|
sysbus_create_simple("pl031", 0xfff35000, pic[19]);
|
||||||
sysbus_create_simple("pl022", 0xfff39000, pic[23]);
|
sysbus_create_simple("pl022", 0xfff39000, pic[23]);
|
||||||
|
|
||||||
sysbus_create_simple("sysbus-ahci", 0xffe08000, pic[83]);
|
sysbus_create_simple(TYPE_SYSBUS_AHCI, 0xffe08000, pic[83]);
|
||||||
|
|
||||||
if (nd_table[0].used) {
|
if (nd_table[0].used) {
|
||||||
qemu_check_nic_model(&nd_table[0], "xgmac");
|
qemu_check_nic_model(&nd_table[0], "xgmac");
|
||||||
|
@ -24,6 +24,8 @@
|
|||||||
#include "exec/address-spaces.h"
|
#include "exec/address-spaces.h"
|
||||||
#include "qemu/error-report.h"
|
#include "qemu/error-report.h"
|
||||||
#include "hw/char/pl011.h"
|
#include "hw/char/pl011.h"
|
||||||
|
#include "hw/cpu/a9mpcore.h"
|
||||||
|
#include "hw/intc/realview_gic.h"
|
||||||
|
|
||||||
#define SMP_BOOT_ADDR 0xe0000000
|
#define SMP_BOOT_ADDR 0xe0000000
|
||||||
#define SMP_BOOTREG_ADDR 0x10000030
|
#define SMP_BOOTREG_ADDR 0x10000030
|
||||||
@ -172,7 +174,7 @@ static void realview_init(MachineState *machine,
|
|||||||
sysbus_mmio_map(SYS_BUS_DEVICE(sysctl), 0, 0x10000000);
|
sysbus_mmio_map(SYS_BUS_DEVICE(sysctl), 0, 0x10000000);
|
||||||
|
|
||||||
if (is_mpcore) {
|
if (is_mpcore) {
|
||||||
dev = qdev_create(NULL, is_pb ? "a9mpcore_priv": "realview_mpcore");
|
dev = qdev_create(NULL, is_pb ? TYPE_A9MPCORE_PRIV : "realview_mpcore");
|
||||||
qdev_prop_set_uint32(dev, "num-cpu", smp_cpus);
|
qdev_prop_set_uint32(dev, "num-cpu", smp_cpus);
|
||||||
qdev_init_nofail(dev);
|
qdev_init_nofail(dev);
|
||||||
busdev = SYS_BUS_DEVICE(dev);
|
busdev = SYS_BUS_DEVICE(dev);
|
||||||
@ -186,7 +188,7 @@ static void realview_init(MachineState *machine,
|
|||||||
} else {
|
} else {
|
||||||
uint32_t gic_addr = is_pb ? 0x1e000000 : 0x10040000;
|
uint32_t gic_addr = is_pb ? 0x1e000000 : 0x10040000;
|
||||||
/* For now just create the nIRQ GIC, and ignore the others. */
|
/* For now just create the nIRQ GIC, and ignore the others. */
|
||||||
dev = sysbus_create_simple("realview_gic", gic_addr, cpu_irq[0]);
|
dev = sysbus_create_simple(TYPE_REALVIEW_GIC, gic_addr, cpu_irq[0]);
|
||||||
}
|
}
|
||||||
for (n = 0; n < 64; n++) {
|
for (n = 0; n < 64; n++) {
|
||||||
pic[n] = qdev_get_gpio_in(dev, n);
|
pic[n] = qdev_get_gpio_in(dev, n);
|
||||||
|
@ -40,6 +40,8 @@
|
|||||||
#include "qemu/error-report.h"
|
#include "qemu/error-report.h"
|
||||||
#include <libfdt.h>
|
#include <libfdt.h>
|
||||||
#include "hw/char/pl011.h"
|
#include "hw/char/pl011.h"
|
||||||
|
#include "hw/cpu/a9mpcore.h"
|
||||||
|
#include "hw/cpu/a15mpcore.h"
|
||||||
|
|
||||||
#define VEXPRESS_BOARD_ID 0x8e0
|
#define VEXPRESS_BOARD_ID 0x8e0
|
||||||
#define VEXPRESS_FLASH_SIZE (64 * 1024 * 1024)
|
#define VEXPRESS_FLASH_SIZE (64 * 1024 * 1024)
|
||||||
@ -293,7 +295,7 @@ static void a9_daughterboard_init(const VexpressMachineState *vms,
|
|||||||
memory_region_add_subregion(sysmem, 0x60000000, ram);
|
memory_region_add_subregion(sysmem, 0x60000000, ram);
|
||||||
|
|
||||||
/* 0x1e000000 A9MPCore (SCU) private memory region */
|
/* 0x1e000000 A9MPCore (SCU) private memory region */
|
||||||
init_cpus(cpu_model, "a9mpcore_priv", 0x1e000000, pic, vms->secure);
|
init_cpus(cpu_model, TYPE_A9MPCORE_PRIV, 0x1e000000, pic, vms->secure);
|
||||||
|
|
||||||
/* Daughterboard peripherals : 0x10020000 .. 0x20000000 */
|
/* Daughterboard peripherals : 0x10020000 .. 0x20000000 */
|
||||||
|
|
||||||
@ -378,7 +380,7 @@ static void a15_daughterboard_init(const VexpressMachineState *vms,
|
|||||||
memory_region_add_subregion(sysmem, 0x80000000, ram);
|
memory_region_add_subregion(sysmem, 0x80000000, ram);
|
||||||
|
|
||||||
/* 0x2c000000 A15MPCore private memory region (GIC) */
|
/* 0x2c000000 A15MPCore private memory region (GIC) */
|
||||||
init_cpus(cpu_model, "a15mpcore_priv", 0x2c000000, pic, vms->secure);
|
init_cpus(cpu_model, TYPE_A15MPCORE_PRIV, 0x2c000000, pic, vms->secure);
|
||||||
|
|
||||||
/* A15 daughterboard peripherals: */
|
/* A15 daughterboard peripherals: */
|
||||||
|
|
||||||
|
@ -492,10 +492,15 @@ static void fdt_add_pmu_nodes(const VirtMachineState *vms)
|
|||||||
|
|
||||||
CPU_FOREACH(cpu) {
|
CPU_FOREACH(cpu) {
|
||||||
armcpu = ARM_CPU(cpu);
|
armcpu = ARM_CPU(cpu);
|
||||||
if (!arm_feature(&armcpu->env, ARM_FEATURE_PMU) ||
|
if (!arm_feature(&armcpu->env, ARM_FEATURE_PMU)) {
|
||||||
(kvm_enabled() && !kvm_arm_pmu_create(cpu, PPI(VIRTUAL_PMU_IRQ)))) {
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
if (kvm_enabled()) {
|
||||||
|
if (kvm_irqchip_in_kernel()) {
|
||||||
|
kvm_arm_pmu_set_irq(cpu, PPI(VIRTUAL_PMU_IRQ));
|
||||||
|
}
|
||||||
|
kvm_arm_pmu_init(cpu);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (vms->gic_version == 2) {
|
if (vms->gic_version == 2) {
|
||||||
@ -610,6 +615,9 @@ static void create_gic(VirtMachineState *vms, qemu_irq *pic)
|
|||||||
qdev_connect_gpio_out_named(cpudev, "gicv3-maintenance-interrupt", 0,
|
qdev_connect_gpio_out_named(cpudev, "gicv3-maintenance-interrupt", 0,
|
||||||
qdev_get_gpio_in(gicdev, ppibase
|
qdev_get_gpio_in(gicdev, ppibase
|
||||||
+ ARCH_GICV3_MAINT_IRQ));
|
+ ARCH_GICV3_MAINT_IRQ));
|
||||||
|
qdev_connect_gpio_out_named(cpudev, "pmu-interrupt", 0,
|
||||||
|
qdev_get_gpio_in(gicdev, ppibase
|
||||||
|
+ VIRTUAL_PMU_IRQ));
|
||||||
|
|
||||||
sysbus_connect_irq(gicbusdev, i, qdev_get_gpio_in(cpudev, ARM_CPU_IRQ));
|
sysbus_connect_irq(gicbusdev, i, qdev_get_gpio_in(cpudev, ARM_CPU_IRQ));
|
||||||
sysbus_connect_irq(gicbusdev, i + smp_cpus,
|
sysbus_connect_irq(gicbusdev, i + smp_cpus,
|
||||||
|
@ -31,8 +31,10 @@
|
|||||||
#include "hw/misc/zynq-xadc.h"
|
#include "hw/misc/zynq-xadc.h"
|
||||||
#include "hw/ssi/ssi.h"
|
#include "hw/ssi/ssi.h"
|
||||||
#include "qemu/error-report.h"
|
#include "qemu/error-report.h"
|
||||||
#include "hw/sd/sd.h"
|
#include "hw/sd/sdhci.h"
|
||||||
#include "hw/char/cadence_uart.h"
|
#include "hw/char/cadence_uart.h"
|
||||||
|
#include "hw/net/cadence_gem.h"
|
||||||
|
#include "hw/cpu/a9mpcore.h"
|
||||||
|
|
||||||
#define NUM_SPI_FLASHES 4
|
#define NUM_SPI_FLASHES 4
|
||||||
#define NUM_QSPI_FLASHES 2
|
#define NUM_QSPI_FLASHES 2
|
||||||
@ -96,9 +98,9 @@ static void gem_init(NICInfo *nd, uint32_t base, qemu_irq irq)
|
|||||||
DeviceState *dev;
|
DeviceState *dev;
|
||||||
SysBusDevice *s;
|
SysBusDevice *s;
|
||||||
|
|
||||||
dev = qdev_create(NULL, "cadence_gem");
|
dev = qdev_create(NULL, TYPE_CADENCE_GEM);
|
||||||
if (nd->used) {
|
if (nd->used) {
|
||||||
qemu_check_nic_model(nd, "cadence_gem");
|
qemu_check_nic_model(nd, TYPE_CADENCE_GEM);
|
||||||
qdev_set_nic_properties(dev, nd);
|
qdev_set_nic_properties(dev, nd);
|
||||||
}
|
}
|
||||||
qdev_init_nofail(dev);
|
qdev_init_nofail(dev);
|
||||||
@ -222,7 +224,7 @@ static void zynq_init(MachineState *machine)
|
|||||||
qdev_init_nofail(dev);
|
qdev_init_nofail(dev);
|
||||||
sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, 0xF8000000);
|
sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, 0xF8000000);
|
||||||
|
|
||||||
dev = qdev_create(NULL, "a9mpcore_priv");
|
dev = qdev_create(NULL, TYPE_A9MPCORE_PRIV);
|
||||||
qdev_prop_set_uint32(dev, "num-cpu", 1);
|
qdev_prop_set_uint32(dev, "num-cpu", 1);
|
||||||
qdev_init_nofail(dev);
|
qdev_init_nofail(dev);
|
||||||
busdev = SYS_BUS_DEVICE(dev);
|
busdev = SYS_BUS_DEVICE(dev);
|
||||||
@ -252,7 +254,7 @@ static void zynq_init(MachineState *machine)
|
|||||||
gem_init(&nd_table[0], 0xE000B000, pic[54-IRQ_OFFSET]);
|
gem_init(&nd_table[0], 0xE000B000, pic[54-IRQ_OFFSET]);
|
||||||
gem_init(&nd_table[1], 0xE000C000, pic[77-IRQ_OFFSET]);
|
gem_init(&nd_table[1], 0xE000C000, pic[77-IRQ_OFFSET]);
|
||||||
|
|
||||||
dev = qdev_create(NULL, "generic-sdhci");
|
dev = qdev_create(NULL, TYPE_SYSBUS_SDHCI);
|
||||||
qdev_init_nofail(dev);
|
qdev_init_nofail(dev);
|
||||||
sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, 0xE0100000);
|
sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, 0xE0100000);
|
||||||
sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, pic[56-IRQ_OFFSET]);
|
sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, pic[56-IRQ_OFFSET]);
|
||||||
@ -263,7 +265,7 @@ static void zynq_init(MachineState *machine)
|
|||||||
qdev_prop_set_drive(carddev, "drive", blk, &error_fatal);
|
qdev_prop_set_drive(carddev, "drive", blk, &error_fatal);
|
||||||
object_property_set_bool(OBJECT(carddev), true, "realized", &error_fatal);
|
object_property_set_bool(OBJECT(carddev), true, "realized", &error_fatal);
|
||||||
|
|
||||||
dev = qdev_create(NULL, "generic-sdhci");
|
dev = qdev_create(NULL, TYPE_SYSBUS_SDHCI);
|
||||||
qdev_init_nofail(dev);
|
qdev_init_nofail(dev);
|
||||||
sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, 0xE0101000);
|
sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, 0xE0101000);
|
||||||
sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, pic[79-IRQ_OFFSET]);
|
sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, pic[79-IRQ_OFFSET]);
|
||||||
|
@ -293,7 +293,7 @@ static void kvm_arm_gicv3_put(GICv3State *s)
|
|||||||
kvm_gicr_access(s, GICR_PROPBASER + 4, ncpu, ®h, true);
|
kvm_gicr_access(s, GICR_PROPBASER + 4, ncpu, ®h, true);
|
||||||
|
|
||||||
reg64 = c->gicr_pendbaser;
|
reg64 = c->gicr_pendbaser;
|
||||||
if (!c->gicr_ctlr & GICR_CTLR_ENABLE_LPIS) {
|
if (!(c->gicr_ctlr & GICR_CTLR_ENABLE_LPIS)) {
|
||||||
/* Setting PTZ is advised if LPIs are disabled, to reduce
|
/* Setting PTZ is advised if LPIs are disabled, to reduce
|
||||||
* GIC initialization time.
|
* GIC initialization time.
|
||||||
*/
|
*/
|
||||||
|
@ -17,7 +17,7 @@
|
|||||||
#include "hw/sysbus.h"
|
#include "hw/sysbus.h"
|
||||||
#include "qemu/timer.h"
|
#include "qemu/timer.h"
|
||||||
#include "hw/arm/arm.h"
|
#include "hw/arm/arm.h"
|
||||||
#include "hw/arm/armv7m_nvic.h"
|
#include "hw/intc/armv7m_nvic.h"
|
||||||
#include "target/arm/cpu.h"
|
#include "target/arm/cpu.h"
|
||||||
#include "exec/exec-all.h"
|
#include "exec/exec-all.h"
|
||||||
#include "qemu/log.h"
|
#include "qemu/log.h"
|
||||||
@ -167,9 +167,9 @@ static inline int nvic_exec_prio(NVICState *s)
|
|||||||
CPUARMState *env = &s->cpu->env;
|
CPUARMState *env = &s->cpu->env;
|
||||||
int running;
|
int running;
|
||||||
|
|
||||||
if (env->daif & PSTATE_F) { /* FAULTMASK */
|
if (env->v7m.faultmask) {
|
||||||
running = -1;
|
running = -1;
|
||||||
} else if (env->daif & PSTATE_I) { /* PRIMASK */
|
} else if (env->v7m.primask) {
|
||||||
running = 0;
|
running = 0;
|
||||||
} else if (env->v7m.basepri > 0) {
|
} else if (env->v7m.basepri > 0) {
|
||||||
running = env->v7m.basepri & nvic_gprio_mask(s);
|
running = env->v7m.basepri & nvic_gprio_mask(s);
|
||||||
@ -733,11 +733,8 @@ static void nvic_writel(NVICState *s, uint32_t offset, uint32_t value)
|
|||||||
}
|
}
|
||||||
case 0xf00: /* Software Triggered Interrupt Register */
|
case 0xf00: /* Software Triggered Interrupt Register */
|
||||||
{
|
{
|
||||||
/* user mode can only write to STIR if CCR.USERSETMPEND permits it */
|
|
||||||
int excnum = (value & 0x1ff) + NVIC_FIRST_IRQ;
|
int excnum = (value & 0x1ff) + NVIC_FIRST_IRQ;
|
||||||
if (excnum < s->num_irq &&
|
if (excnum < s->num_irq) {
|
||||||
(arm_current_el(&cpu->env) ||
|
|
||||||
(cpu->env.v7m.ccr & R_V7M_CCR_USERSETMPEND_MASK))) {
|
|
||||||
armv7m_nvic_set_pending(s, excnum);
|
armv7m_nvic_set_pending(s, excnum);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
@ -748,14 +745,32 @@ static void nvic_writel(NVICState *s, uint32_t offset, uint32_t value)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static uint64_t nvic_sysreg_read(void *opaque, hwaddr addr,
|
static bool nvic_user_access_ok(NVICState *s, hwaddr offset)
|
||||||
unsigned size)
|
{
|
||||||
|
/* Return true if unprivileged access to this register is permitted. */
|
||||||
|
switch (offset) {
|
||||||
|
case 0xf00: /* STIR: accessible only if CCR.USERSETMPEND permits */
|
||||||
|
return s->cpu->env.v7m.ccr & R_V7M_CCR_USERSETMPEND_MASK;
|
||||||
|
default:
|
||||||
|
/* All other user accesses cause a BusFault unconditionally */
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static MemTxResult nvic_sysreg_read(void *opaque, hwaddr addr,
|
||||||
|
uint64_t *data, unsigned size,
|
||||||
|
MemTxAttrs attrs)
|
||||||
{
|
{
|
||||||
NVICState *s = (NVICState *)opaque;
|
NVICState *s = (NVICState *)opaque;
|
||||||
uint32_t offset = addr;
|
uint32_t offset = addr;
|
||||||
unsigned i, startvec, end;
|
unsigned i, startvec, end;
|
||||||
uint32_t val;
|
uint32_t val;
|
||||||
|
|
||||||
|
if (attrs.user && !nvic_user_access_ok(s, addr)) {
|
||||||
|
/* Generate BusFault for unprivileged accesses */
|
||||||
|
return MEMTX_ERROR;
|
||||||
|
}
|
||||||
|
|
||||||
switch (offset) {
|
switch (offset) {
|
||||||
/* reads of set and clear both return the status */
|
/* reads of set and clear both return the status */
|
||||||
case 0x100 ... 0x13f: /* NVIC Set enable */
|
case 0x100 ... 0x13f: /* NVIC Set enable */
|
||||||
@ -826,11 +841,13 @@ static uint64_t nvic_sysreg_read(void *opaque, hwaddr addr,
|
|||||||
}
|
}
|
||||||
|
|
||||||
trace_nvic_sysreg_read(addr, val, size);
|
trace_nvic_sysreg_read(addr, val, size);
|
||||||
return val;
|
*data = val;
|
||||||
|
return MEMTX_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void nvic_sysreg_write(void *opaque, hwaddr addr,
|
static MemTxResult nvic_sysreg_write(void *opaque, hwaddr addr,
|
||||||
uint64_t value, unsigned size)
|
uint64_t value, unsigned size,
|
||||||
|
MemTxAttrs attrs)
|
||||||
{
|
{
|
||||||
NVICState *s = (NVICState *)opaque;
|
NVICState *s = (NVICState *)opaque;
|
||||||
uint32_t offset = addr;
|
uint32_t offset = addr;
|
||||||
@ -839,6 +856,11 @@ static void nvic_sysreg_write(void *opaque, hwaddr addr,
|
|||||||
|
|
||||||
trace_nvic_sysreg_write(addr, value, size);
|
trace_nvic_sysreg_write(addr, value, size);
|
||||||
|
|
||||||
|
if (attrs.user && !nvic_user_access_ok(s, addr)) {
|
||||||
|
/* Generate BusFault for unprivileged accesses */
|
||||||
|
return MEMTX_ERROR;
|
||||||
|
}
|
||||||
|
|
||||||
switch (offset) {
|
switch (offset) {
|
||||||
case 0x100 ... 0x13f: /* NVIC Set enable */
|
case 0x100 ... 0x13f: /* NVIC Set enable */
|
||||||
offset += 0x80;
|
offset += 0x80;
|
||||||
@ -853,7 +875,7 @@ static void nvic_sysreg_write(void *opaque, hwaddr addr,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
nvic_irq_update(s);
|
nvic_irq_update(s);
|
||||||
return;
|
return MEMTX_OK;
|
||||||
case 0x200 ... 0x23f: /* NVIC Set pend */
|
case 0x200 ... 0x23f: /* NVIC Set pend */
|
||||||
/* the special logic in armv7m_nvic_set_pending()
|
/* the special logic in armv7m_nvic_set_pending()
|
||||||
* is not needed since IRQs are never escalated
|
* is not needed since IRQs are never escalated
|
||||||
@ -870,9 +892,9 @@ static void nvic_sysreg_write(void *opaque, hwaddr addr,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
nvic_irq_update(s);
|
nvic_irq_update(s);
|
||||||
return;
|
return MEMTX_OK;
|
||||||
case 0x300 ... 0x33f: /* NVIC Active */
|
case 0x300 ... 0x33f: /* NVIC Active */
|
||||||
return; /* R/O */
|
return MEMTX_OK; /* R/O */
|
||||||
case 0x400 ... 0x5ef: /* NVIC Priority */
|
case 0x400 ... 0x5ef: /* NVIC Priority */
|
||||||
startvec = 8 * (offset - 0x400) + NVIC_FIRST_IRQ; /* vector # */
|
startvec = 8 * (offset - 0x400) + NVIC_FIRST_IRQ; /* vector # */
|
||||||
|
|
||||||
@ -880,26 +902,28 @@ static void nvic_sysreg_write(void *opaque, hwaddr addr,
|
|||||||
set_prio(s, startvec + i, (value >> (i * 8)) & 0xff);
|
set_prio(s, startvec + i, (value >> (i * 8)) & 0xff);
|
||||||
}
|
}
|
||||||
nvic_irq_update(s);
|
nvic_irq_update(s);
|
||||||
return;
|
return MEMTX_OK;
|
||||||
case 0xd18 ... 0xd23: /* System Handler Priority. */
|
case 0xd18 ... 0xd23: /* System Handler Priority. */
|
||||||
for (i = 0; i < size; i++) {
|
for (i = 0; i < size; i++) {
|
||||||
unsigned hdlidx = (offset - 0xd14) + i;
|
unsigned hdlidx = (offset - 0xd14) + i;
|
||||||
set_prio(s, hdlidx, (value >> (i * 8)) & 0xff);
|
set_prio(s, hdlidx, (value >> (i * 8)) & 0xff);
|
||||||
}
|
}
|
||||||
nvic_irq_update(s);
|
nvic_irq_update(s);
|
||||||
return;
|
return MEMTX_OK;
|
||||||
}
|
}
|
||||||
if (size == 4) {
|
if (size == 4) {
|
||||||
nvic_writel(s, offset, value);
|
nvic_writel(s, offset, value);
|
||||||
return;
|
return MEMTX_OK;
|
||||||
}
|
}
|
||||||
qemu_log_mask(LOG_GUEST_ERROR,
|
qemu_log_mask(LOG_GUEST_ERROR,
|
||||||
"NVIC: Bad write of size %d at offset 0x%x\n", size, offset);
|
"NVIC: Bad write of size %d at offset 0x%x\n", size, offset);
|
||||||
|
/* This is UNPREDICTABLE; treat as RAZ/WI */
|
||||||
|
return MEMTX_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const MemoryRegionOps nvic_sysreg_ops = {
|
static const MemoryRegionOps nvic_sysreg_ops = {
|
||||||
.read = nvic_sysreg_read,
|
.read_with_attrs = nvic_sysreg_read,
|
||||||
.write = nvic_sysreg_write,
|
.write_with_attrs = nvic_sysreg_write,
|
||||||
.endianness = DEVICE_NATIVE_ENDIAN,
|
.endianness = DEVICE_NATIVE_ENDIAN,
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -1036,10 +1060,6 @@ static void armv7m_nvic_realize(DeviceState *dev, Error **errp)
|
|||||||
* 0xd00..0xd3c - SCS registers
|
* 0xd00..0xd3c - SCS registers
|
||||||
* 0xd40..0xeff - Reserved or Not implemented
|
* 0xd40..0xeff - Reserved or Not implemented
|
||||||
* 0xf00 - STIR
|
* 0xf00 - STIR
|
||||||
*
|
|
||||||
* At the moment there is only one thing in the container region,
|
|
||||||
* but we leave it in place to allow us to pull systick out into
|
|
||||||
* its own device object later.
|
|
||||||
*/
|
*/
|
||||||
memory_region_init(&s->container, OBJECT(s), "nvic", 0x1000);
|
memory_region_init(&s->container, OBJECT(s), "nvic", 0x1000);
|
||||||
/* The system register region goes at the bottom of the priority
|
/* The system register region goes at the bottom of the priority
|
||||||
|
@ -8,16 +8,19 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
#include "qemu/osdep.h"
|
#include "qemu/osdep.h"
|
||||||
|
|
||||||
|
#include "qapi/error.h"
|
||||||
#include "qemu/log.h"
|
#include "qemu/log.h"
|
||||||
#include "sysemu/watchdog.h"
|
|
||||||
#include "hw/sysbus.h"
|
|
||||||
#include "qemu/timer.h"
|
#include "qemu/timer.h"
|
||||||
|
#include "sysemu/watchdog.h"
|
||||||
|
#include "hw/misc/aspeed_scu.h"
|
||||||
|
#include "hw/sysbus.h"
|
||||||
#include "hw/watchdog/wdt_aspeed.h"
|
#include "hw/watchdog/wdt_aspeed.h"
|
||||||
|
|
||||||
#define WDT_STATUS (0x00 / 4)
|
#define WDT_STATUS (0x00 / 4)
|
||||||
#define WDT_RELOAD_VALUE (0x04 / 4)
|
#define WDT_RELOAD_VALUE (0x04 / 4)
|
||||||
#define WDT_RESTART (0x08 / 4)
|
#define WDT_RESTART (0x08 / 4)
|
||||||
#define WDT_CTRL (0x0C / 4)
|
#define WDT_CTRL (0x0C / 4)
|
||||||
#define WDT_CTRL_RESET_MODE_SOC (0x00 << 5)
|
#define WDT_CTRL_RESET_MODE_SOC (0x00 << 5)
|
||||||
#define WDT_CTRL_RESET_MODE_FULL_CHIP (0x01 << 5)
|
#define WDT_CTRL_RESET_MODE_FULL_CHIP (0x01 << 5)
|
||||||
#define WDT_CTRL_1MHZ_CLK BIT(4)
|
#define WDT_CTRL_1MHZ_CLK BIT(4)
|
||||||
@ -25,18 +28,41 @@
|
|||||||
#define WDT_CTRL_WDT_INTR BIT(2)
|
#define WDT_CTRL_WDT_INTR BIT(2)
|
||||||
#define WDT_CTRL_RESET_SYSTEM BIT(1)
|
#define WDT_CTRL_RESET_SYSTEM BIT(1)
|
||||||
#define WDT_CTRL_ENABLE BIT(0)
|
#define WDT_CTRL_ENABLE BIT(0)
|
||||||
|
#define WDT_RESET_WIDTH (0x18 / 4)
|
||||||
|
#define WDT_RESET_WIDTH_ACTIVE_HIGH BIT(31)
|
||||||
|
#define WDT_POLARITY_MASK (0xFF << 24)
|
||||||
|
#define WDT_ACTIVE_HIGH_MAGIC (0xA5 << 24)
|
||||||
|
#define WDT_ACTIVE_LOW_MAGIC (0x5A << 24)
|
||||||
|
#define WDT_RESET_WIDTH_PUSH_PULL BIT(30)
|
||||||
|
#define WDT_DRIVE_TYPE_MASK (0xFF << 24)
|
||||||
|
#define WDT_PUSH_PULL_MAGIC (0xA8 << 24)
|
||||||
|
#define WDT_OPEN_DRAIN_MAGIC (0x8A << 24)
|
||||||
|
|
||||||
#define WDT_TIMEOUT_STATUS (0x10 / 4)
|
#define WDT_TIMEOUT_STATUS (0x10 / 4)
|
||||||
#define WDT_TIMEOUT_CLEAR (0x14 / 4)
|
#define WDT_TIMEOUT_CLEAR (0x14 / 4)
|
||||||
#define WDT_RESET_WDITH (0x18 / 4)
|
|
||||||
|
|
||||||
#define WDT_RESTART_MAGIC 0x4755
|
#define WDT_RESTART_MAGIC 0x4755
|
||||||
|
|
||||||
static bool aspeed_wdt_is_enabled(const AspeedWDTState *s)
|
static bool aspeed_wdt_is_enabled(const AspeedWDTState *s)
|
||||||
{
|
{
|
||||||
return s->regs[WDT_CTRL] & WDT_CTRL_ENABLE;
|
return s->regs[WDT_CTRL] & WDT_CTRL_ENABLE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool is_ast2500(const AspeedWDTState *s)
|
||||||
|
{
|
||||||
|
switch (s->silicon_rev) {
|
||||||
|
case AST2500_A0_SILICON_REV:
|
||||||
|
case AST2500_A1_SILICON_REV:
|
||||||
|
return true;
|
||||||
|
case AST2400_A0_SILICON_REV:
|
||||||
|
case AST2400_A1_SILICON_REV:
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
static uint64_t aspeed_wdt_read(void *opaque, hwaddr offset, unsigned size)
|
static uint64_t aspeed_wdt_read(void *opaque, hwaddr offset, unsigned size)
|
||||||
{
|
{
|
||||||
AspeedWDTState *s = ASPEED_WDT(opaque);
|
AspeedWDTState *s = ASPEED_WDT(opaque);
|
||||||
@ -55,9 +81,10 @@ static uint64_t aspeed_wdt_read(void *opaque, hwaddr offset, unsigned size)
|
|||||||
return 0;
|
return 0;
|
||||||
case WDT_CTRL:
|
case WDT_CTRL:
|
||||||
return s->regs[WDT_CTRL];
|
return s->regs[WDT_CTRL];
|
||||||
|
case WDT_RESET_WIDTH:
|
||||||
|
return s->regs[WDT_RESET_WIDTH];
|
||||||
case WDT_TIMEOUT_STATUS:
|
case WDT_TIMEOUT_STATUS:
|
||||||
case WDT_TIMEOUT_CLEAR:
|
case WDT_TIMEOUT_CLEAR:
|
||||||
case WDT_RESET_WDITH:
|
|
||||||
qemu_log_mask(LOG_UNIMP,
|
qemu_log_mask(LOG_UNIMP,
|
||||||
"%s: uninmplemented read at offset 0x%" HWADDR_PRIx "\n",
|
"%s: uninmplemented read at offset 0x%" HWADDR_PRIx "\n",
|
||||||
__func__, offset);
|
__func__, offset);
|
||||||
@ -119,9 +146,27 @@ static void aspeed_wdt_write(void *opaque, hwaddr offset, uint64_t data,
|
|||||||
timer_del(s->timer);
|
timer_del(s->timer);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
case WDT_RESET_WIDTH:
|
||||||
|
{
|
||||||
|
uint32_t property = data & WDT_POLARITY_MASK;
|
||||||
|
|
||||||
|
if (property && is_ast2500(s)) {
|
||||||
|
if (property == WDT_ACTIVE_HIGH_MAGIC) {
|
||||||
|
s->regs[WDT_RESET_WIDTH] |= WDT_RESET_WIDTH_ACTIVE_HIGH;
|
||||||
|
} else if (property == WDT_ACTIVE_LOW_MAGIC) {
|
||||||
|
s->regs[WDT_RESET_WIDTH] &= ~WDT_RESET_WIDTH_ACTIVE_HIGH;
|
||||||
|
} else if (property == WDT_PUSH_PULL_MAGIC) {
|
||||||
|
s->regs[WDT_RESET_WIDTH] |= WDT_RESET_WIDTH_PUSH_PULL;
|
||||||
|
} else if (property == WDT_OPEN_DRAIN_MAGIC) {
|
||||||
|
s->regs[WDT_RESET_WIDTH] &= ~WDT_RESET_WIDTH_PUSH_PULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
s->regs[WDT_RESET_WIDTH] &= ~s->ext_pulse_width_mask;
|
||||||
|
s->regs[WDT_RESET_WIDTH] |= data & s->ext_pulse_width_mask;
|
||||||
|
break;
|
||||||
|
}
|
||||||
case WDT_TIMEOUT_STATUS:
|
case WDT_TIMEOUT_STATUS:
|
||||||
case WDT_TIMEOUT_CLEAR:
|
case WDT_TIMEOUT_CLEAR:
|
||||||
case WDT_RESET_WDITH:
|
|
||||||
qemu_log_mask(LOG_UNIMP,
|
qemu_log_mask(LOG_UNIMP,
|
||||||
"%s: uninmplemented write at offset 0x%" HWADDR_PRIx "\n",
|
"%s: uninmplemented write at offset 0x%" HWADDR_PRIx "\n",
|
||||||
__func__, offset);
|
__func__, offset);
|
||||||
@ -167,6 +212,7 @@ static void aspeed_wdt_reset(DeviceState *dev)
|
|||||||
s->regs[WDT_RELOAD_VALUE] = 0x03EF1480;
|
s->regs[WDT_RELOAD_VALUE] = 0x03EF1480;
|
||||||
s->regs[WDT_RESTART] = 0;
|
s->regs[WDT_RESTART] = 0;
|
||||||
s->regs[WDT_CTRL] = 0;
|
s->regs[WDT_CTRL] = 0;
|
||||||
|
s->regs[WDT_RESET_WIDTH] = 0xFF;
|
||||||
|
|
||||||
timer_del(s->timer);
|
timer_del(s->timer);
|
||||||
}
|
}
|
||||||
@ -187,6 +233,25 @@ static void aspeed_wdt_realize(DeviceState *dev, Error **errp)
|
|||||||
SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
|
SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
|
||||||
AspeedWDTState *s = ASPEED_WDT(dev);
|
AspeedWDTState *s = ASPEED_WDT(dev);
|
||||||
|
|
||||||
|
if (!is_supported_silicon_rev(s->silicon_rev)) {
|
||||||
|
error_setg(errp, "Unknown silicon revision: 0x%" PRIx32,
|
||||||
|
s->silicon_rev);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
switch (s->silicon_rev) {
|
||||||
|
case AST2400_A0_SILICON_REV:
|
||||||
|
case AST2400_A1_SILICON_REV:
|
||||||
|
s->ext_pulse_width_mask = 0xff;
|
||||||
|
break;
|
||||||
|
case AST2500_A0_SILICON_REV:
|
||||||
|
case AST2500_A1_SILICON_REV:
|
||||||
|
s->ext_pulse_width_mask = 0xfffff;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
g_assert_not_reached();
|
||||||
|
}
|
||||||
|
|
||||||
s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, aspeed_wdt_timer_expired, dev);
|
s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, aspeed_wdt_timer_expired, dev);
|
||||||
|
|
||||||
/* FIXME: This setting should be derived from the SCU hw strapping
|
/* FIXME: This setting should be derived from the SCU hw strapping
|
||||||
@ -199,6 +264,11 @@ static void aspeed_wdt_realize(DeviceState *dev, Error **errp)
|
|||||||
sysbus_init_mmio(sbd, &s->iomem);
|
sysbus_init_mmio(sbd, &s->iomem);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static Property aspeed_wdt_properties[] = {
|
||||||
|
DEFINE_PROP_UINT32("silicon-rev", AspeedWDTState, silicon_rev, 0),
|
||||||
|
DEFINE_PROP_END_OF_LIST(),
|
||||||
|
};
|
||||||
|
|
||||||
static void aspeed_wdt_class_init(ObjectClass *klass, void *data)
|
static void aspeed_wdt_class_init(ObjectClass *klass, void *data)
|
||||||
{
|
{
|
||||||
DeviceClass *dc = DEVICE_CLASS(klass);
|
DeviceClass *dc = DEVICE_CLASS(klass);
|
||||||
@ -207,6 +277,7 @@ static void aspeed_wdt_class_init(ObjectClass *klass, void *data)
|
|||||||
dc->reset = aspeed_wdt_reset;
|
dc->reset = aspeed_wdt_reset;
|
||||||
set_bit(DEVICE_CATEGORY_MISC, dc->categories);
|
set_bit(DEVICE_CATEGORY_MISC, dc->categories);
|
||||||
dc->vmsd = &vmstate_aspeed_wdt;
|
dc->vmsd = &vmstate_aspeed_wdt;
|
||||||
|
dc->props = aspeed_wdt_properties;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const TypeInfo aspeed_wdt_info = {
|
static const TypeInfo aspeed_wdt_info = {
|
||||||
|
@ -46,4 +46,14 @@ typedef struct MemTxAttrs {
|
|||||||
*/
|
*/
|
||||||
#define MEMTXATTRS_UNSPECIFIED ((MemTxAttrs) { .unspecified = 1 })
|
#define MEMTXATTRS_UNSPECIFIED ((MemTxAttrs) { .unspecified = 1 })
|
||||||
|
|
||||||
|
/* New-style MMIO accessors can indicate that the transaction failed.
|
||||||
|
* A zero (MEMTX_OK) response means success; anything else is a failure
|
||||||
|
* of some kind. The memory subsystem will bitwise-OR together results
|
||||||
|
* if it is synthesizing an operation from multiple smaller accesses.
|
||||||
|
*/
|
||||||
|
#define MEMTX_OK 0
|
||||||
|
#define MEMTX_ERROR (1U << 0) /* device returned an error */
|
||||||
|
#define MEMTX_DECODE_ERROR (1U << 1) /* nothing at that address */
|
||||||
|
typedef uint32_t MemTxResult;
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -112,16 +112,6 @@ static inline void iommu_notifier_init(IOMMUNotifier *n, IOMMUNotify fn,
|
|||||||
n->end = end;
|
n->end = end;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* New-style MMIO accessors can indicate that the transaction failed.
|
|
||||||
* A zero (MEMTX_OK) response means success; anything else is a failure
|
|
||||||
* of some kind. The memory subsystem will bitwise-OR together results
|
|
||||||
* if it is synthesizing an operation from multiple smaller accesses.
|
|
||||||
*/
|
|
||||||
#define MEMTX_OK 0
|
|
||||||
#define MEMTX_ERROR (1U << 0) /* device returned an error */
|
|
||||||
#define MEMTX_DECODE_ERROR (1U << 1) /* nothing at that address */
|
|
||||||
typedef uint32_t MemTxResult;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Memory region callbacks
|
* Memory region callbacks
|
||||||
*/
|
*/
|
||||||
|
@ -11,7 +11,7 @@
|
|||||||
#define HW_ARM_ARMV7M_H
|
#define HW_ARM_ARMV7M_H
|
||||||
|
|
||||||
#include "hw/sysbus.h"
|
#include "hw/sysbus.h"
|
||||||
#include "hw/arm/armv7m_nvic.h"
|
#include "hw/intc/armv7m_nvic.h"
|
||||||
|
|
||||||
#define TYPE_BITBAND "ARM,bitband-memory"
|
#define TYPE_BITBAND "ARM,bitband-memory"
|
||||||
#define BITBAND(obj) OBJECT_CHECK(BitBandState, (obj), TYPE_BITBAND)
|
#define BITBAND(obj) OBJECT_CHECK(BitBandState, (obj), TYPE_BITBAND)
|
||||||
|
@ -362,6 +362,54 @@ static int glue(load_elf, SZ)(const char *name, int fd,
|
|||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* The ELF spec is somewhat vague about the purpose of the
|
||||||
|
* physical address field. One common use in the embedded world
|
||||||
|
* is that physical address field specifies the load address
|
||||||
|
* and the virtual address field specifies the execution address.
|
||||||
|
* Segments are packed into ROM or flash, and the relocation
|
||||||
|
* and zero-initialization of data is done at runtime. This
|
||||||
|
* means that the memsz header represents the runtime size of the
|
||||||
|
* segment, but the filesz represents the loadtime size. If
|
||||||
|
* we try to honour the memsz value for an ELF file like this
|
||||||
|
* we will end up with overlapping segments (which the
|
||||||
|
* loader.c code will later reject).
|
||||||
|
* We support ELF files using this scheme by by checking whether
|
||||||
|
* paddr + memsz for this segment would overlap with any other
|
||||||
|
* segment. If so, then we assume it's using this scheme and
|
||||||
|
* truncate the loaded segment to the filesz size.
|
||||||
|
* If the segment considered as being memsz size doesn't overlap
|
||||||
|
* then we use memsz for the segment length, to handle ELF files
|
||||||
|
* which assume that the loader will do the zero-initialization.
|
||||||
|
*/
|
||||||
|
if (mem_size > file_size) {
|
||||||
|
/* If this segment's zero-init portion overlaps another
|
||||||
|
* segment's data or zero-init portion, then truncate this one.
|
||||||
|
* Invalid ELF files where the segments overlap even when
|
||||||
|
* only file_size bytes are loaded will be rejected by
|
||||||
|
* the ROM overlap check in loader.c, so we don't try to
|
||||||
|
* explicitly detect those here.
|
||||||
|
*/
|
||||||
|
int j;
|
||||||
|
elf_word zero_start = ph->p_paddr + file_size;
|
||||||
|
elf_word zero_end = ph->p_paddr + mem_size;
|
||||||
|
|
||||||
|
for (j = 0; j < ehdr.e_phnum; j++) {
|
||||||
|
struct elf_phdr *jph = &phdr[j];
|
||||||
|
|
||||||
|
if (i != j && jph->p_type == PT_LOAD) {
|
||||||
|
elf_word other_start = jph->p_paddr;
|
||||||
|
elf_word other_end = jph->p_paddr + jph->p_memsz;
|
||||||
|
|
||||||
|
if (!(other_start >= zero_end ||
|
||||||
|
zero_start >= other_end)) {
|
||||||
|
mem_size = file_size;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/* address_offset is hack for kernel images that are
|
/* address_offset is hack for kernel images that are
|
||||||
linked at the wrong physical address. */
|
linked at the wrong physical address. */
|
||||||
if (translate_fn) {
|
if (translate_fn) {
|
||||||
@ -403,14 +451,24 @@ static int glue(load_elf, SZ)(const char *name, int fd,
|
|||||||
*pentry = ehdr.e_entry - ph->p_vaddr + ph->p_paddr;
|
*pentry = ehdr.e_entry - ph->p_vaddr + ph->p_paddr;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (load_rom) {
|
if (mem_size == 0) {
|
||||||
snprintf(label, sizeof(label), "phdr #%d: %s", i, name);
|
/* Some ELF files really do have segments of zero size;
|
||||||
|
* just ignore them rather than trying to create empty
|
||||||
/* rom_add_elf_program() seize the ownership of 'data' */
|
* ROM blobs, because the zero-length blob can falsely
|
||||||
rom_add_elf_program(label, data, file_size, mem_size, addr, as);
|
* trigger the overlapping-ROM-blobs check.
|
||||||
} else {
|
*/
|
||||||
cpu_physical_memory_write(addr, data, file_size);
|
|
||||||
g_free(data);
|
g_free(data);
|
||||||
|
} else {
|
||||||
|
if (load_rom) {
|
||||||
|
snprintf(label, sizeof(label), "phdr #%d: %s", i, name);
|
||||||
|
|
||||||
|
/* rom_add_elf_program() seize the ownership of 'data' */
|
||||||
|
rom_add_elf_program(label, data, file_size, mem_size,
|
||||||
|
addr, as);
|
||||||
|
} else {
|
||||||
|
cpu_physical_memory_write(addr, data, file_size);
|
||||||
|
g_free(data);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
total_size += mem_size;
|
total_size += mem_size;
|
||||||
|
@ -27,6 +27,8 @@ typedef struct AspeedWDTState {
|
|||||||
uint32_t regs[ASPEED_WDT_REGS_MAX];
|
uint32_t regs[ASPEED_WDT_REGS_MAX];
|
||||||
|
|
||||||
uint32_t pclk_freq;
|
uint32_t pclk_freq;
|
||||||
|
uint32_t silicon_rev;
|
||||||
|
uint32_t ext_pulse_width_mask;
|
||||||
} AspeedWDTState;
|
} AspeedWDTState;
|
||||||
|
|
||||||
#endif /* ASPEED_WDT_H */
|
#endif /* ASPEED_WDT_H */
|
||||||
|
@ -85,8 +85,11 @@ struct TranslationBlock;
|
|||||||
* @has_work: Callback for checking if there is work to do.
|
* @has_work: Callback for checking if there is work to do.
|
||||||
* @do_interrupt: Callback for interrupt handling.
|
* @do_interrupt: Callback for interrupt handling.
|
||||||
* @do_unassigned_access: Callback for unassigned access handling.
|
* @do_unassigned_access: Callback for unassigned access handling.
|
||||||
|
* (this is deprecated: new targets should use do_transaction_failed instead)
|
||||||
* @do_unaligned_access: Callback for unaligned access handling, if
|
* @do_unaligned_access: Callback for unaligned access handling, if
|
||||||
* the target defines #ALIGNED_ONLY.
|
* the target defines #ALIGNED_ONLY.
|
||||||
|
* @do_transaction_failed: Callback for handling failed memory transactions
|
||||||
|
* (ie bus faults or external aborts; not MMU faults)
|
||||||
* @virtio_is_big_endian: Callback to return %true if a CPU which supports
|
* @virtio_is_big_endian: Callback to return %true if a CPU which supports
|
||||||
* runtime configurable endianness is currently big-endian. Non-configurable
|
* runtime configurable endianness is currently big-endian. Non-configurable
|
||||||
* CPUs can use the default implementation of this method. This method should
|
* CPUs can use the default implementation of this method. This method should
|
||||||
@ -153,6 +156,10 @@ typedef struct CPUClass {
|
|||||||
void (*do_unaligned_access)(CPUState *cpu, vaddr addr,
|
void (*do_unaligned_access)(CPUState *cpu, vaddr addr,
|
||||||
MMUAccessType access_type,
|
MMUAccessType access_type,
|
||||||
int mmu_idx, uintptr_t retaddr);
|
int mmu_idx, uintptr_t retaddr);
|
||||||
|
void (*do_transaction_failed)(CPUState *cpu, hwaddr physaddr, vaddr addr,
|
||||||
|
unsigned size, MMUAccessType access_type,
|
||||||
|
int mmu_idx, MemTxAttrs attrs,
|
||||||
|
MemTxResult response, uintptr_t retaddr);
|
||||||
bool (*virtio_is_big_endian)(CPUState *cpu);
|
bool (*virtio_is_big_endian)(CPUState *cpu);
|
||||||
int (*memory_rw_debug)(CPUState *cpu, vaddr addr,
|
int (*memory_rw_debug)(CPUState *cpu, vaddr addr,
|
||||||
uint8_t *buf, int len, bool is_write);
|
uint8_t *buf, int len, bool is_write);
|
||||||
@ -847,6 +854,21 @@ static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr,
|
|||||||
|
|
||||||
cc->do_unaligned_access(cpu, addr, access_type, mmu_idx, retaddr);
|
cc->do_unaligned_access(cpu, addr, access_type, mmu_idx, retaddr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr,
|
||||||
|
vaddr addr, unsigned size,
|
||||||
|
MMUAccessType access_type,
|
||||||
|
int mmu_idx, MemTxAttrs attrs,
|
||||||
|
MemTxResult response,
|
||||||
|
uintptr_t retaddr)
|
||||||
|
{
|
||||||
|
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||||
|
|
||||||
|
if (cc->do_transaction_failed) {
|
||||||
|
cc->do_transaction_failed(cpu, physaddr, addr, size, access_type,
|
||||||
|
mmu_idx, attrs, response, retaddr);
|
||||||
|
}
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif /* NEED_CPU_H */
|
#endif /* NEED_CPU_H */
|
||||||
|
@ -101,7 +101,7 @@ static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
|
|||||||
uintptr_t retaddr)
|
uintptr_t retaddr)
|
||||||
{
|
{
|
||||||
CPUIOTLBEntry *iotlbentry = &env->iotlb[mmu_idx][index];
|
CPUIOTLBEntry *iotlbentry = &env->iotlb[mmu_idx][index];
|
||||||
return io_readx(env, iotlbentry, addr, retaddr, DATA_SIZE);
|
return io_readx(env, iotlbentry, mmu_idx, addr, retaddr, DATA_SIZE);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -262,7 +262,7 @@ static inline void glue(io_write, SUFFIX)(CPUArchState *env,
|
|||||||
uintptr_t retaddr)
|
uintptr_t retaddr)
|
||||||
{
|
{
|
||||||
CPUIOTLBEntry *iotlbentry = &env->iotlb[mmu_idx][index];
|
CPUIOTLBEntry *iotlbentry = &env->iotlb[mmu_idx][index];
|
||||||
return io_writex(env, iotlbentry, val, addr, retaddr, DATA_SIZE);
|
return io_writex(env, iotlbentry, mmu_idx, val, addr, retaddr, DATA_SIZE);
|
||||||
}
|
}
|
||||||
|
|
||||||
void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
|
void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
|
||||||
|
@ -185,11 +185,6 @@ static void arm_cpu_reset(CPUState *s)
|
|||||||
uint32_t initial_pc; /* Loaded from 0x4 */
|
uint32_t initial_pc; /* Loaded from 0x4 */
|
||||||
uint8_t *rom;
|
uint8_t *rom;
|
||||||
|
|
||||||
/* For M profile we store FAULTMASK and PRIMASK in the
|
|
||||||
* PSTATE F and I bits; these are both clear at reset.
|
|
||||||
*/
|
|
||||||
env->daif &= ~(PSTATE_I | PSTATE_F);
|
|
||||||
|
|
||||||
/* The reset value of this bit is IMPDEF, but ARM recommends
|
/* The reset value of this bit is IMPDEF, but ARM recommends
|
||||||
* that it resets to 1, so QEMU always does that rather than making
|
* that it resets to 1, so QEMU always does that rather than making
|
||||||
* it dependent on CPU model.
|
* it dependent on CPU model.
|
||||||
@ -513,6 +508,8 @@ static void arm_cpu_initfn(Object *obj)
|
|||||||
|
|
||||||
qdev_init_gpio_out_named(DEVICE(cpu), &cpu->gicv3_maintenance_interrupt,
|
qdev_init_gpio_out_named(DEVICE(cpu), &cpu->gicv3_maintenance_interrupt,
|
||||||
"gicv3-maintenance-interrupt", 1);
|
"gicv3-maintenance-interrupt", 1);
|
||||||
|
qdev_init_gpio_out_named(DEVICE(cpu), &cpu->pmu_interrupt,
|
||||||
|
"pmu-interrupt", 1);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* DTB consumers generally don't in fact care what the 'compatible'
|
/* DTB consumers generally don't in fact care what the 'compatible'
|
||||||
|
@ -416,8 +416,10 @@ typedef struct CPUARMState {
|
|||||||
uint32_t dfsr; /* Debug Fault Status Register */
|
uint32_t dfsr; /* Debug Fault Status Register */
|
||||||
uint32_t mmfar; /* MemManage Fault Address */
|
uint32_t mmfar; /* MemManage Fault Address */
|
||||||
uint32_t bfar; /* BusFault Address */
|
uint32_t bfar; /* BusFault Address */
|
||||||
unsigned mpu_ctrl; /* MPU_CTRL (some bits kept in sctlr_el[1]) */
|
unsigned mpu_ctrl; /* MPU_CTRL */
|
||||||
int exception;
|
int exception;
|
||||||
|
uint32_t primask;
|
||||||
|
uint32_t faultmask;
|
||||||
} v7m;
|
} v7m;
|
||||||
|
|
||||||
/* Information associated with an exception about to be taken:
|
/* Information associated with an exception about to be taken:
|
||||||
@ -583,6 +585,8 @@ struct ARMCPU {
|
|||||||
qemu_irq gt_timer_outputs[NUM_GTIMERS];
|
qemu_irq gt_timer_outputs[NUM_GTIMERS];
|
||||||
/* GPIO output for GICv3 maintenance interrupt signal */
|
/* GPIO output for GICv3 maintenance interrupt signal */
|
||||||
qemu_irq gicv3_maintenance_interrupt;
|
qemu_irq gicv3_maintenance_interrupt;
|
||||||
|
/* GPIO output for the PMU interrupt */
|
||||||
|
qemu_irq pmu_interrupt;
|
||||||
|
|
||||||
/* MemoryRegion to use for secure physical accesses */
|
/* MemoryRegion to use for secure physical accesses */
|
||||||
MemoryRegion *secure_memory;
|
MemoryRegion *secure_memory;
|
||||||
@ -882,6 +886,22 @@ void pmccntr_sync(CPUARMState *env);
|
|||||||
/* Mask of bits which may be set by exception return copying them from SPSR */
|
/* Mask of bits which may be set by exception return copying them from SPSR */
|
||||||
#define CPSR_ERET_MASK (~CPSR_RESERVED)
|
#define CPSR_ERET_MASK (~CPSR_RESERVED)
|
||||||
|
|
||||||
|
/* Bit definitions for M profile XPSR. Most are the same as CPSR. */
|
||||||
|
#define XPSR_EXCP 0x1ffU
|
||||||
|
#define XPSR_SPREALIGN (1U << 9) /* Only set in exception stack frames */
|
||||||
|
#define XPSR_IT_2_7 CPSR_IT_2_7
|
||||||
|
#define XPSR_GE CPSR_GE
|
||||||
|
#define XPSR_SFPA (1U << 20) /* Only set in exception stack frames */
|
||||||
|
#define XPSR_T (1U << 24) /* Not the same as CPSR_T ! */
|
||||||
|
#define XPSR_IT_0_1 CPSR_IT_0_1
|
||||||
|
#define XPSR_Q CPSR_Q
|
||||||
|
#define XPSR_V CPSR_V
|
||||||
|
#define XPSR_C CPSR_C
|
||||||
|
#define XPSR_Z CPSR_Z
|
||||||
|
#define XPSR_N CPSR_N
|
||||||
|
#define XPSR_NZCV CPSR_NZCV
|
||||||
|
#define XPSR_IT CPSR_IT
|
||||||
|
|
||||||
#define TTBCR_N (7U << 0) /* TTBCR.EAE==0 */
|
#define TTBCR_N (7U << 0) /* TTBCR.EAE==0 */
|
||||||
#define TTBCR_T0SZ (7U << 0) /* TTBCR.EAE==1 */
|
#define TTBCR_T0SZ (7U << 0) /* TTBCR.EAE==1 */
|
||||||
#define TTBCR_PD0 (1U << 4)
|
#define TTBCR_PD0 (1U << 4)
|
||||||
@ -986,26 +1006,28 @@ static inline uint32_t xpsr_read(CPUARMState *env)
|
|||||||
/* Set the xPSR. Note that some bits of mask must be all-set or all-clear. */
|
/* Set the xPSR. Note that some bits of mask must be all-set or all-clear. */
|
||||||
static inline void xpsr_write(CPUARMState *env, uint32_t val, uint32_t mask)
|
static inline void xpsr_write(CPUARMState *env, uint32_t val, uint32_t mask)
|
||||||
{
|
{
|
||||||
if (mask & CPSR_NZCV) {
|
if (mask & XPSR_NZCV) {
|
||||||
env->ZF = (~val) & CPSR_Z;
|
env->ZF = (~val) & XPSR_Z;
|
||||||
env->NF = val;
|
env->NF = val;
|
||||||
env->CF = (val >> 29) & 1;
|
env->CF = (val >> 29) & 1;
|
||||||
env->VF = (val << 3) & 0x80000000;
|
env->VF = (val << 3) & 0x80000000;
|
||||||
}
|
}
|
||||||
if (mask & CPSR_Q)
|
if (mask & XPSR_Q) {
|
||||||
env->QF = ((val & CPSR_Q) != 0);
|
env->QF = ((val & XPSR_Q) != 0);
|
||||||
if (mask & (1 << 24))
|
}
|
||||||
env->thumb = ((val & (1 << 24)) != 0);
|
if (mask & XPSR_T) {
|
||||||
if (mask & CPSR_IT_0_1) {
|
env->thumb = ((val & XPSR_T) != 0);
|
||||||
|
}
|
||||||
|
if (mask & XPSR_IT_0_1) {
|
||||||
env->condexec_bits &= ~3;
|
env->condexec_bits &= ~3;
|
||||||
env->condexec_bits |= (val >> 25) & 3;
|
env->condexec_bits |= (val >> 25) & 3;
|
||||||
}
|
}
|
||||||
if (mask & CPSR_IT_2_7) {
|
if (mask & XPSR_IT_2_7) {
|
||||||
env->condexec_bits &= 3;
|
env->condexec_bits &= 3;
|
||||||
env->condexec_bits |= (val >> 8) & 0xfc;
|
env->condexec_bits |= (val >> 8) & 0xfc;
|
||||||
}
|
}
|
||||||
if (mask & 0x1ff) {
|
if (mask & XPSR_EXCP) {
|
||||||
env->v7m.exception = val & 0x1ff;
|
env->v7m.exception = val & XPSR_EXCP;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1609,13 +1631,19 @@ static inline int arm_highest_el(CPUARMState *env)
|
|||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Return true if a v7M CPU is in Handler mode */
|
||||||
|
static inline bool arm_v7m_is_handler_mode(CPUARMState *env)
|
||||||
|
{
|
||||||
|
return env->v7m.exception != 0;
|
||||||
|
}
|
||||||
|
|
||||||
/* Return the current Exception Level (as per ARMv8; note that this differs
|
/* Return the current Exception Level (as per ARMv8; note that this differs
|
||||||
* from the ARMv7 Privilege Level).
|
* from the ARMv7 Privilege Level).
|
||||||
*/
|
*/
|
||||||
static inline int arm_current_el(CPUARMState *env)
|
static inline int arm_current_el(CPUARMState *env)
|
||||||
{
|
{
|
||||||
if (arm_feature(env, ARM_FEATURE_M)) {
|
if (arm_feature(env, ARM_FEATURE_M)) {
|
||||||
return !((env->v7m.exception == 0) && (env->v7m.control & 1));
|
return arm_v7m_is_handler_mode(env) || !(env->v7m.control & 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (is_a64(env)) {
|
if (is_a64(env)) {
|
||||||
@ -2160,7 +2188,7 @@ static inline int cpu_mmu_index(CPUARMState *env, bool ifetch)
|
|||||||
* we're in a HardFault or NMI handler.
|
* we're in a HardFault or NMI handler.
|
||||||
*/
|
*/
|
||||||
if ((env->v7m.exception > 0 && env->v7m.exception <= 3)
|
if ((env->v7m.exception > 0 && env->v7m.exception <= 3)
|
||||||
|| env->daif & PSTATE_F) {
|
|| env->v7m.faultmask) {
|
||||||
return arm_to_core_mmu_idx(ARMMMUIdx_MNegPri);
|
return arm_to_core_mmu_idx(ARMMMUIdx_MNegPri);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2615,7 +2643,7 @@ static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
|
|||||||
}
|
}
|
||||||
*flags |= fp_exception_el(env) << ARM_TBFLAG_FPEXC_EL_SHIFT;
|
*flags |= fp_exception_el(env) << ARM_TBFLAG_FPEXC_EL_SHIFT;
|
||||||
|
|
||||||
if (env->v7m.exception != 0) {
|
if (arm_v7m_is_handler_mode(env)) {
|
||||||
*flags |= ARM_TBFLAG_HANDLER_MASK;
|
*flags |= ARM_TBFLAG_HANDLER_MASK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -20,13 +20,13 @@
|
|||||||
|
|
||||||
#ifndef CONFIG_USER_ONLY
|
#ifndef CONFIG_USER_ONLY
|
||||||
static bool get_phys_addr(CPUARMState *env, target_ulong address,
|
static bool get_phys_addr(CPUARMState *env, target_ulong address,
|
||||||
int access_type, ARMMMUIdx mmu_idx,
|
MMUAccessType access_type, ARMMMUIdx mmu_idx,
|
||||||
hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
|
hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
|
||||||
target_ulong *page_size, uint32_t *fsr,
|
target_ulong *page_size, uint32_t *fsr,
|
||||||
ARMMMUFaultInfo *fi);
|
ARMMMUFaultInfo *fi);
|
||||||
|
|
||||||
static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
|
static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
|
||||||
int access_type, ARMMMUIdx mmu_idx,
|
MMUAccessType access_type, ARMMMUIdx mmu_idx,
|
||||||
hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
|
hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
|
||||||
target_ulong *page_size_ptr, uint32_t *fsr,
|
target_ulong *page_size_ptr, uint32_t *fsr,
|
||||||
ARMMMUFaultInfo *fi);
|
ARMMMUFaultInfo *fi);
|
||||||
@ -2135,7 +2135,7 @@ static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
|
static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
|
||||||
int access_type, ARMMMUIdx mmu_idx)
|
MMUAccessType access_type, ARMMMUIdx mmu_idx)
|
||||||
{
|
{
|
||||||
hwaddr phys_addr;
|
hwaddr phys_addr;
|
||||||
target_ulong page_size;
|
target_ulong page_size;
|
||||||
@ -2194,7 +2194,7 @@ static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
|
|||||||
|
|
||||||
static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
|
static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
|
||||||
{
|
{
|
||||||
int access_type = ri->opc2 & 1;
|
MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
|
||||||
uint64_t par64;
|
uint64_t par64;
|
||||||
ARMMMUIdx mmu_idx;
|
ARMMMUIdx mmu_idx;
|
||||||
int el = arm_current_el(env);
|
int el = arm_current_el(env);
|
||||||
@ -2253,7 +2253,7 @@ static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
|
|||||||
static void ats1h_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
static void ats1h_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||||
uint64_t value)
|
uint64_t value)
|
||||||
{
|
{
|
||||||
int access_type = ri->opc2 & 1;
|
MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
|
||||||
uint64_t par64;
|
uint64_t par64;
|
||||||
|
|
||||||
par64 = do_ats_write(env, value, access_type, ARMMMUIdx_S2NS);
|
par64 = do_ats_write(env, value, access_type, ARMMMUIdx_S2NS);
|
||||||
@ -2273,7 +2273,7 @@ static CPAccessResult at_s1e2_access(CPUARMState *env, const ARMCPRegInfo *ri,
|
|||||||
static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
|
static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||||
uint64_t value)
|
uint64_t value)
|
||||||
{
|
{
|
||||||
int access_type = ri->opc2 & 1;
|
MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
|
||||||
ARMMMUIdx mmu_idx;
|
ARMMMUIdx mmu_idx;
|
||||||
int secure = arm_is_secure_below_el3(env);
|
int secure = arm_is_secure_below_el3(env);
|
||||||
|
|
||||||
@ -6114,7 +6114,7 @@ static void v7m_push_stack(ARMCPU *cpu)
|
|||||||
/* Align stack pointer if the guest wants that */
|
/* Align stack pointer if the guest wants that */
|
||||||
if ((env->regs[13] & 4) && (env->v7m.ccr & R_V7M_CCR_STKALIGN_MASK)) {
|
if ((env->regs[13] & 4) && (env->v7m.ccr & R_V7M_CCR_STKALIGN_MASK)) {
|
||||||
env->regs[13] -= 4;
|
env->regs[13] -= 4;
|
||||||
xpsr |= 0x200;
|
xpsr |= XPSR_SPREALIGN;
|
||||||
}
|
}
|
||||||
/* Switch to the handler mode. */
|
/* Switch to the handler mode. */
|
||||||
v7m_push(env, xpsr);
|
v7m_push(env, xpsr);
|
||||||
@ -6138,11 +6138,11 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
|
|||||||
bool rettobase = false;
|
bool rettobase = false;
|
||||||
|
|
||||||
/* We can only get here from an EXCP_EXCEPTION_EXIT, and
|
/* We can only get here from an EXCP_EXCEPTION_EXIT, and
|
||||||
* arm_v7m_do_unassigned_access() enforces the architectural rule
|
* gen_bx_excret() enforces the architectural rule
|
||||||
* that jumps to magic addresses don't have magic behaviour unless
|
* that jumps to magic addresses don't have magic behaviour unless
|
||||||
* we're in Handler mode (compare pseudocode BXWritePC()).
|
* we're in Handler mode (compare pseudocode BXWritePC()).
|
||||||
*/
|
*/
|
||||||
assert(env->v7m.exception != 0);
|
assert(arm_v7m_is_handler_mode(env));
|
||||||
|
|
||||||
/* In the spec pseudocode ExceptionReturn() is called directly
|
/* In the spec pseudocode ExceptionReturn() is called directly
|
||||||
* from BXWritePC() and gets the full target PC value including
|
* from BXWritePC() and gets the full target PC value including
|
||||||
@ -6167,7 +6167,7 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
|
|||||||
|
|
||||||
if (env->v7m.exception != ARMV7M_EXCP_NMI) {
|
if (env->v7m.exception != ARMV7M_EXCP_NMI) {
|
||||||
/* Auto-clear FAULTMASK on return from other than NMI */
|
/* Auto-clear FAULTMASK on return from other than NMI */
|
||||||
env->daif &= ~PSTATE_F;
|
env->v7m.faultmask = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
switch (armv7m_nvic_complete_irq(env->nvic, env->v7m.exception)) {
|
switch (armv7m_nvic_complete_irq(env->nvic, env->v7m.exception)) {
|
||||||
@ -6239,16 +6239,17 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
|
|||||||
env->regs[15] &= ~1U;
|
env->regs[15] &= ~1U;
|
||||||
}
|
}
|
||||||
xpsr = v7m_pop(env);
|
xpsr = v7m_pop(env);
|
||||||
xpsr_write(env, xpsr, 0xfffffdff);
|
xpsr_write(env, xpsr, ~XPSR_SPREALIGN);
|
||||||
/* Undo stack alignment. */
|
/* Undo stack alignment. */
|
||||||
if (xpsr & 0x200)
|
if (xpsr & XPSR_SPREALIGN) {
|
||||||
env->regs[13] |= 4;
|
env->regs[13] |= 4;
|
||||||
|
}
|
||||||
|
|
||||||
/* The restored xPSR exception field will be zero if we're
|
/* The restored xPSR exception field will be zero if we're
|
||||||
* resuming in Thread mode. If that doesn't match what the
|
* resuming in Thread mode. If that doesn't match what the
|
||||||
* exception return type specified then this is a UsageFault.
|
* exception return type specified then this is a UsageFault.
|
||||||
*/
|
*/
|
||||||
if (return_to_handler == (env->v7m.exception == 0)) {
|
if (return_to_handler != arm_v7m_is_handler_mode(env)) {
|
||||||
/* Take an INVPC UsageFault by pushing the stack again. */
|
/* Take an INVPC UsageFault by pushing the stack again. */
|
||||||
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE);
|
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE);
|
||||||
env->v7m.cfsr |= R_V7M_CFSR_INVPC_MASK;
|
env->v7m.cfsr |= R_V7M_CFSR_INVPC_MASK;
|
||||||
@ -6305,13 +6306,6 @@ void arm_v7m_cpu_do_interrupt(CPUState *cs)
|
|||||||
|
|
||||||
arm_log_exception(cs->exception_index);
|
arm_log_exception(cs->exception_index);
|
||||||
|
|
||||||
lr = 0xfffffff1;
|
|
||||||
if (env->v7m.control & R_V7M_CONTROL_SPSEL_MASK) {
|
|
||||||
lr |= 4;
|
|
||||||
}
|
|
||||||
if (env->v7m.exception == 0)
|
|
||||||
lr |= 8;
|
|
||||||
|
|
||||||
/* For exceptions we just mark as pending on the NVIC, and let that
|
/* For exceptions we just mark as pending on the NVIC, and let that
|
||||||
handle it. */
|
handle it. */
|
||||||
switch (cs->exception_index) {
|
switch (cs->exception_index) {
|
||||||
@ -6402,6 +6396,14 @@ void arm_v7m_cpu_do_interrupt(CPUState *cs)
|
|||||||
return; /* Never happens. Keep compiler happy. */
|
return; /* Never happens. Keep compiler happy. */
|
||||||
}
|
}
|
||||||
|
|
||||||
|
lr = 0xfffffff1;
|
||||||
|
if (env->v7m.control & R_V7M_CONTROL_SPSEL_MASK) {
|
||||||
|
lr |= 4;
|
||||||
|
}
|
||||||
|
if (!arm_v7m_is_handler_mode(env)) {
|
||||||
|
lr |= 8;
|
||||||
|
}
|
||||||
|
|
||||||
v7m_push_stack(cpu);
|
v7m_push_stack(cpu);
|
||||||
v7m_exception_taken(cpu, lr);
|
v7m_exception_taken(cpu, lr);
|
||||||
qemu_log_mask(CPU_LOG_INT, "... as %d\n", env->v7m.exception);
|
qemu_log_mask(CPU_LOG_INT, "... as %d\n", env->v7m.exception);
|
||||||
@ -7505,7 +7507,7 @@ static uint64_t arm_ldq_ptw(CPUState *cs, hwaddr addr, bool is_secure,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static bool get_phys_addr_v5(CPUARMState *env, uint32_t address,
|
static bool get_phys_addr_v5(CPUARMState *env, uint32_t address,
|
||||||
int access_type, ARMMMUIdx mmu_idx,
|
MMUAccessType access_type, ARMMMUIdx mmu_idx,
|
||||||
hwaddr *phys_ptr, int *prot,
|
hwaddr *phys_ptr, int *prot,
|
||||||
target_ulong *page_size, uint32_t *fsr,
|
target_ulong *page_size, uint32_t *fsr,
|
||||||
ARMMMUFaultInfo *fi)
|
ARMMMUFaultInfo *fi)
|
||||||
@ -7621,7 +7623,7 @@ do_fault:
|
|||||||
}
|
}
|
||||||
|
|
||||||
static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
|
static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
|
||||||
int access_type, ARMMMUIdx mmu_idx,
|
MMUAccessType access_type, ARMMMUIdx mmu_idx,
|
||||||
hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
|
hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
|
||||||
target_ulong *page_size, uint32_t *fsr,
|
target_ulong *page_size, uint32_t *fsr,
|
||||||
ARMMMUFaultInfo *fi)
|
ARMMMUFaultInfo *fi)
|
||||||
@ -7728,7 +7730,7 @@ static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
|
|||||||
if (pxn && !regime_is_user(env, mmu_idx)) {
|
if (pxn && !regime_is_user(env, mmu_idx)) {
|
||||||
xn = 1;
|
xn = 1;
|
||||||
}
|
}
|
||||||
if (xn && access_type == 2)
|
if (xn && access_type == MMU_INST_FETCH)
|
||||||
goto do_fault;
|
goto do_fault;
|
||||||
|
|
||||||
if (arm_feature(env, ARM_FEATURE_V6K) &&
|
if (arm_feature(env, ARM_FEATURE_V6K) &&
|
||||||
@ -7843,7 +7845,7 @@ static bool check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, int level,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
|
static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
|
||||||
int access_type, ARMMMUIdx mmu_idx,
|
MMUAccessType access_type, ARMMMUIdx mmu_idx,
|
||||||
hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
|
hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
|
||||||
target_ulong *page_size_ptr, uint32_t *fsr,
|
target_ulong *page_size_ptr, uint32_t *fsr,
|
||||||
ARMMMUFaultInfo *fi)
|
ARMMMUFaultInfo *fi)
|
||||||
@ -8251,7 +8253,7 @@ static inline bool m_is_system_region(CPUARMState *env, uint32_t address)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
|
static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
|
||||||
int access_type, ARMMMUIdx mmu_idx,
|
MMUAccessType access_type, ARMMMUIdx mmu_idx,
|
||||||
hwaddr *phys_ptr, int *prot, uint32_t *fsr)
|
hwaddr *phys_ptr, int *prot, uint32_t *fsr)
|
||||||
{
|
{
|
||||||
ARMCPU *cpu = arm_env_get_cpu(env);
|
ARMCPU *cpu = arm_env_get_cpu(env);
|
||||||
@ -8410,7 +8412,7 @@ static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
|
static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
|
||||||
int access_type, ARMMMUIdx mmu_idx,
|
MMUAccessType access_type, ARMMMUIdx mmu_idx,
|
||||||
hwaddr *phys_ptr, int *prot, uint32_t *fsr)
|
hwaddr *phys_ptr, int *prot, uint32_t *fsr)
|
||||||
{
|
{
|
||||||
int n;
|
int n;
|
||||||
@ -8418,6 +8420,13 @@ static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
|
|||||||
uint32_t base;
|
uint32_t base;
|
||||||
bool is_user = regime_is_user(env, mmu_idx);
|
bool is_user = regime_is_user(env, mmu_idx);
|
||||||
|
|
||||||
|
if (regime_translation_disabled(env, mmu_idx)) {
|
||||||
|
/* MPU disabled. */
|
||||||
|
*phys_ptr = address;
|
||||||
|
*prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
*phys_ptr = address;
|
*phys_ptr = address;
|
||||||
for (n = 7; n >= 0; n--) {
|
for (n = 7; n >= 0; n--) {
|
||||||
base = env->cp15.c6_region[n];
|
base = env->cp15.c6_region[n];
|
||||||
@ -8437,7 +8446,7 @@ static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (access_type == 2) {
|
if (access_type == MMU_INST_FETCH) {
|
||||||
mask = env->cp15.pmsav5_insn_ap;
|
mask = env->cp15.pmsav5_insn_ap;
|
||||||
} else {
|
} else {
|
||||||
mask = env->cp15.pmsav5_data_ap;
|
mask = env->cp15.pmsav5_data_ap;
|
||||||
@ -8508,7 +8517,7 @@ static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
|
|||||||
* @fsr: set to the DFSR/IFSR value on failure
|
* @fsr: set to the DFSR/IFSR value on failure
|
||||||
*/
|
*/
|
||||||
static bool get_phys_addr(CPUARMState *env, target_ulong address,
|
static bool get_phys_addr(CPUARMState *env, target_ulong address,
|
||||||
int access_type, ARMMMUIdx mmu_idx,
|
MMUAccessType access_type, ARMMMUIdx mmu_idx,
|
||||||
hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
|
hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
|
||||||
target_ulong *page_size, uint32_t *fsr,
|
target_ulong *page_size, uint32_t *fsr,
|
||||||
ARMMMUFaultInfo *fi)
|
ARMMMUFaultInfo *fi)
|
||||||
@ -8567,16 +8576,20 @@ static bool get_phys_addr(CPUARMState *env, target_ulong address,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* pmsav7 has special handling for when MPU is disabled so call it before
|
if (arm_feature(env, ARM_FEATURE_PMSA)) {
|
||||||
* the common MMU/MPU disabled check below.
|
|
||||||
*/
|
|
||||||
if (arm_feature(env, ARM_FEATURE_PMSA) &&
|
|
||||||
arm_feature(env, ARM_FEATURE_V7)) {
|
|
||||||
bool ret;
|
bool ret;
|
||||||
*page_size = TARGET_PAGE_SIZE;
|
*page_size = TARGET_PAGE_SIZE;
|
||||||
ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx,
|
|
||||||
phys_ptr, prot, fsr);
|
if (arm_feature(env, ARM_FEATURE_V7)) {
|
||||||
qemu_log_mask(CPU_LOG_MMU, "PMSAv7 MPU lookup for %s at 0x%08" PRIx32
|
/* PMSAv7 */
|
||||||
|
ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx,
|
||||||
|
phys_ptr, prot, fsr);
|
||||||
|
} else {
|
||||||
|
/* Pre-v7 MPU */
|
||||||
|
ret = get_phys_addr_pmsav5(env, address, access_type, mmu_idx,
|
||||||
|
phys_ptr, prot, fsr);
|
||||||
|
}
|
||||||
|
qemu_log_mask(CPU_LOG_MMU, "PMSA MPU lookup for %s at 0x%08" PRIx32
|
||||||
" mmu_idx %u -> %s (prot %c%c%c)\n",
|
" mmu_idx %u -> %s (prot %c%c%c)\n",
|
||||||
access_type == MMU_DATA_LOAD ? "reading" :
|
access_type == MMU_DATA_LOAD ? "reading" :
|
||||||
(access_type == MMU_DATA_STORE ? "writing" : "execute"),
|
(access_type == MMU_DATA_STORE ? "writing" : "execute"),
|
||||||
@ -8589,21 +8602,16 @@ static bool get_phys_addr(CPUARMState *env, target_ulong address,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Definitely a real MMU, not an MPU */
|
||||||
|
|
||||||
if (regime_translation_disabled(env, mmu_idx)) {
|
if (regime_translation_disabled(env, mmu_idx)) {
|
||||||
/* MMU/MPU disabled. */
|
/* MMU disabled. */
|
||||||
*phys_ptr = address;
|
*phys_ptr = address;
|
||||||
*prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
|
*prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
|
||||||
*page_size = TARGET_PAGE_SIZE;
|
*page_size = TARGET_PAGE_SIZE;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (arm_feature(env, ARM_FEATURE_PMSA)) {
|
|
||||||
/* Pre-v7 MPU */
|
|
||||||
*page_size = TARGET_PAGE_SIZE;
|
|
||||||
return get_phys_addr_pmsav5(env, address, access_type, mmu_idx,
|
|
||||||
phys_ptr, prot, fsr);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (regime_using_lpae_format(env, mmu_idx)) {
|
if (regime_using_lpae_format(env, mmu_idx)) {
|
||||||
return get_phys_addr_lpae(env, address, access_type, mmu_idx, phys_ptr,
|
return get_phys_addr_lpae(env, address, access_type, mmu_idx, phys_ptr,
|
||||||
attrs, prot, page_size, fsr, fi);
|
attrs, prot, page_size, fsr, fi);
|
||||||
@ -8621,7 +8629,7 @@ static bool get_phys_addr(CPUARMState *env, target_ulong address,
|
|||||||
* fsr with ARM DFSR/IFSR fault register format value on failure.
|
* fsr with ARM DFSR/IFSR fault register format value on failure.
|
||||||
*/
|
*/
|
||||||
bool arm_tlb_fill(CPUState *cs, vaddr address,
|
bool arm_tlb_fill(CPUState *cs, vaddr address,
|
||||||
int access_type, int mmu_idx, uint32_t *fsr,
|
MMUAccessType access_type, int mmu_idx, uint32_t *fsr,
|
||||||
ARMMMUFaultInfo *fi)
|
ARMMMUFaultInfo *fi)
|
||||||
{
|
{
|
||||||
ARMCPU *cpu = ARM_CPU(cs);
|
ARMCPU *cpu = ARM_CPU(cs);
|
||||||
@ -8682,10 +8690,10 @@ uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
|
|||||||
case 0 ... 7: /* xPSR sub-fields */
|
case 0 ... 7: /* xPSR sub-fields */
|
||||||
mask = 0;
|
mask = 0;
|
||||||
if ((reg & 1) && el) {
|
if ((reg & 1) && el) {
|
||||||
mask |= 0x000001ff; /* IPSR (unpriv. reads as zero) */
|
mask |= XPSR_EXCP; /* IPSR (unpriv. reads as zero) */
|
||||||
}
|
}
|
||||||
if (!(reg & 4)) {
|
if (!(reg & 4)) {
|
||||||
mask |= 0xf8000000; /* APSR */
|
mask |= XPSR_NZCV | XPSR_Q; /* APSR */
|
||||||
}
|
}
|
||||||
/* EPSR reads as zero */
|
/* EPSR reads as zero */
|
||||||
return xpsr_read(env) & mask;
|
return xpsr_read(env) & mask;
|
||||||
@ -8706,12 +8714,12 @@ uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
|
|||||||
return (env->v7m.control & R_V7M_CONTROL_SPSEL_MASK) ?
|
return (env->v7m.control & R_V7M_CONTROL_SPSEL_MASK) ?
|
||||||
env->regs[13] : env->v7m.other_sp;
|
env->regs[13] : env->v7m.other_sp;
|
||||||
case 16: /* PRIMASK */
|
case 16: /* PRIMASK */
|
||||||
return (env->daif & PSTATE_I) != 0;
|
return env->v7m.primask;
|
||||||
case 17: /* BASEPRI */
|
case 17: /* BASEPRI */
|
||||||
case 18: /* BASEPRI_MAX */
|
case 18: /* BASEPRI_MAX */
|
||||||
return env->v7m.basepri;
|
return env->v7m.basepri;
|
||||||
case 19: /* FAULTMASK */
|
case 19: /* FAULTMASK */
|
||||||
return (env->daif & PSTATE_F) != 0;
|
return env->v7m.faultmask;
|
||||||
default:
|
default:
|
||||||
qemu_log_mask(LOG_GUEST_ERROR, "Attempt to read unknown special"
|
qemu_log_mask(LOG_GUEST_ERROR, "Attempt to read unknown special"
|
||||||
" register %d\n", reg);
|
" register %d\n", reg);
|
||||||
@ -8743,10 +8751,10 @@ void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
|
|||||||
uint32_t apsrmask = 0;
|
uint32_t apsrmask = 0;
|
||||||
|
|
||||||
if (mask & 8) {
|
if (mask & 8) {
|
||||||
apsrmask |= 0xf8000000; /* APSR NZCVQ */
|
apsrmask |= XPSR_NZCV | XPSR_Q;
|
||||||
}
|
}
|
||||||
if ((mask & 4) && arm_feature(env, ARM_FEATURE_THUMB_DSP)) {
|
if ((mask & 4) && arm_feature(env, ARM_FEATURE_THUMB_DSP)) {
|
||||||
apsrmask |= 0x000f0000; /* APSR GE[3:0] */
|
apsrmask |= XPSR_GE;
|
||||||
}
|
}
|
||||||
xpsr_write(env, val, apsrmask);
|
xpsr_write(env, val, apsrmask);
|
||||||
}
|
}
|
||||||
@ -8766,11 +8774,7 @@ void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
|
|||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case 16: /* PRIMASK */
|
case 16: /* PRIMASK */
|
||||||
if (val & 1) {
|
env->v7m.primask = val & 1;
|
||||||
env->daif |= PSTATE_I;
|
|
||||||
} else {
|
|
||||||
env->daif &= ~PSTATE_I;
|
|
||||||
}
|
|
||||||
break;
|
break;
|
||||||
case 17: /* BASEPRI */
|
case 17: /* BASEPRI */
|
||||||
env->v7m.basepri = val & 0xff;
|
env->v7m.basepri = val & 0xff;
|
||||||
@ -8781,11 +8785,7 @@ void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
|
|||||||
env->v7m.basepri = val;
|
env->v7m.basepri = val;
|
||||||
break;
|
break;
|
||||||
case 19: /* FAULTMASK */
|
case 19: /* FAULTMASK */
|
||||||
if (val & 1) {
|
env->v7m.faultmask = val & 1;
|
||||||
env->daif |= PSTATE_F;
|
|
||||||
} else {
|
|
||||||
env->daif &= ~PSTATE_F;
|
|
||||||
}
|
|
||||||
break;
|
break;
|
||||||
case 20: /* CONTROL */
|
case 20: /* CONTROL */
|
||||||
/* Writing to the SPSEL bit only has an effect if we are in
|
/* Writing to the SPSEL bit only has an effect if we are in
|
||||||
@ -8793,7 +8793,7 @@ void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
|
|||||||
* switch_v7m_sp() deals with updating the SPSEL bit in
|
* switch_v7m_sp() deals with updating the SPSEL bit in
|
||||||
* env->v7m.control, so we only need update the others.
|
* env->v7m.control, so we only need update the others.
|
||||||
*/
|
*/
|
||||||
if (env->v7m.exception == 0) {
|
if (!arm_v7m_is_handler_mode(env)) {
|
||||||
switch_v7m_sp(env, (val & R_V7M_CONTROL_SPSEL_MASK) != 0);
|
switch_v7m_sp(env, (val & R_V7M_CONTROL_SPSEL_MASK) != 0);
|
||||||
}
|
}
|
||||||
env->v7m.control &= ~R_V7M_CONTROL_NPRIV_MASK;
|
env->v7m.control &= ~R_V7M_CONTROL_NPRIV_MASK;
|
||||||
|
@ -448,16 +448,19 @@ void arm_handle_psci_call(ARMCPU *cpu);
|
|||||||
* @s2addr: Address that caused a fault at stage 2
|
* @s2addr: Address that caused a fault at stage 2
|
||||||
* @stage2: True if we faulted at stage 2
|
* @stage2: True if we faulted at stage 2
|
||||||
* @s1ptw: True if we faulted at stage 2 while doing a stage 1 page-table walk
|
* @s1ptw: True if we faulted at stage 2 while doing a stage 1 page-table walk
|
||||||
|
* @ea: True if we should set the EA (external abort type) bit in syndrome
|
||||||
*/
|
*/
|
||||||
typedef struct ARMMMUFaultInfo ARMMMUFaultInfo;
|
typedef struct ARMMMUFaultInfo ARMMMUFaultInfo;
|
||||||
struct ARMMMUFaultInfo {
|
struct ARMMMUFaultInfo {
|
||||||
target_ulong s2addr;
|
target_ulong s2addr;
|
||||||
bool stage2;
|
bool stage2;
|
||||||
bool s1ptw;
|
bool s1ptw;
|
||||||
|
bool ea;
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Do a page table walk and add page to TLB if possible */
|
/* Do a page table walk and add page to TLB if possible */
|
||||||
bool arm_tlb_fill(CPUState *cpu, vaddr address, int rw, int mmu_idx,
|
bool arm_tlb_fill(CPUState *cpu, vaddr address,
|
||||||
|
MMUAccessType access_type, int mmu_idx,
|
||||||
uint32_t *fsr, ARMMMUFaultInfo *fi);
|
uint32_t *fsr, ARMMMUFaultInfo *fi);
|
||||||
|
|
||||||
/* Return true if the stage 1 translation regime is using LPAE format page
|
/* Return true if the stage 1 translation regime is using LPAE format page
|
||||||
|
@ -567,7 +567,11 @@ MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
|
|||||||
switched_level &= ~KVM_ARM_DEV_EL1_PTIMER;
|
switched_level &= ~KVM_ARM_DEV_EL1_PTIMER;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* XXX PMU IRQ is missing */
|
if (switched_level & KVM_ARM_DEV_PMU) {
|
||||||
|
qemu_set_irq(cpu->pmu_interrupt,
|
||||||
|
!!(run->s.regs.device_irq_level & KVM_ARM_DEV_PMU));
|
||||||
|
switched_level &= ~KVM_ARM_DEV_PMU;
|
||||||
|
}
|
||||||
|
|
||||||
if (switched_level) {
|
if (switched_level) {
|
||||||
qemu_log_mask(LOG_UNIMP, "%s: unhandled in-kernel device IRQ %x\n",
|
qemu_log_mask(LOG_UNIMP, "%s: unhandled in-kernel device IRQ %x\n",
|
||||||
|
@ -522,8 +522,12 @@ bool kvm_arm_hw_debug_active(CPUState *cs)
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
int kvm_arm_pmu_create(CPUState *cs, int irq)
|
void kvm_arm_pmu_set_irq(CPUState *cs, int irq)
|
||||||
|
{
|
||||||
|
qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__);
|
||||||
|
}
|
||||||
|
|
||||||
|
void kvm_arm_pmu_init(CPUState *cs)
|
||||||
{
|
{
|
||||||
qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__);
|
qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__);
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
@ -381,46 +381,56 @@ static CPUWatchpoint *find_hw_watchpoint(CPUState *cpu, target_ulong addr)
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool kvm_arm_pmu_support_ctrl(CPUState *cs, struct kvm_device_attr *attr)
|
static bool kvm_arm_pmu_set_attr(CPUState *cs, struct kvm_device_attr *attr)
|
||||||
{
|
|
||||||
return kvm_vcpu_ioctl(cs, KVM_HAS_DEVICE_ATTR, attr) == 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int kvm_arm_pmu_create(CPUState *cs, int irq)
|
|
||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
|
err = kvm_vcpu_ioctl(cs, KVM_HAS_DEVICE_ATTR, attr);
|
||||||
|
if (err != 0) {
|
||||||
|
error_report("PMU: KVM_HAS_DEVICE_ATTR: %s", strerror(-err));
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
err = kvm_vcpu_ioctl(cs, KVM_SET_DEVICE_ATTR, attr);
|
||||||
|
if (err != 0) {
|
||||||
|
error_report("PMU: KVM_SET_DEVICE_ATTR: %s", strerror(-err));
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
void kvm_arm_pmu_init(CPUState *cs)
|
||||||
|
{
|
||||||
|
struct kvm_device_attr attr = {
|
||||||
|
.group = KVM_ARM_VCPU_PMU_V3_CTRL,
|
||||||
|
.attr = KVM_ARM_VCPU_PMU_V3_INIT,
|
||||||
|
};
|
||||||
|
|
||||||
|
if (!ARM_CPU(cs)->has_pmu) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (!kvm_arm_pmu_set_attr(cs, &attr)) {
|
||||||
|
error_report("failed to init PMU");
|
||||||
|
abort();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void kvm_arm_pmu_set_irq(CPUState *cs, int irq)
|
||||||
|
{
|
||||||
struct kvm_device_attr attr = {
|
struct kvm_device_attr attr = {
|
||||||
.group = KVM_ARM_VCPU_PMU_V3_CTRL,
|
.group = KVM_ARM_VCPU_PMU_V3_CTRL,
|
||||||
.addr = (intptr_t)&irq,
|
.addr = (intptr_t)&irq,
|
||||||
.attr = KVM_ARM_VCPU_PMU_V3_IRQ,
|
.attr = KVM_ARM_VCPU_PMU_V3_IRQ,
|
||||||
.flags = 0,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
if (!kvm_arm_pmu_support_ctrl(cs, &attr)) {
|
if (!ARM_CPU(cs)->has_pmu) {
|
||||||
return 0;
|
return;
|
||||||
}
|
}
|
||||||
|
if (!kvm_arm_pmu_set_attr(cs, &attr)) {
|
||||||
err = kvm_vcpu_ioctl(cs, KVM_SET_DEVICE_ATTR, &attr);
|
error_report("failed to set irq for PMU");
|
||||||
if (err < 0) {
|
|
||||||
fprintf(stderr, "KVM_SET_DEVICE_ATTR failed: %s\n",
|
|
||||||
strerror(-err));
|
|
||||||
abort();
|
abort();
|
||||||
}
|
}
|
||||||
|
|
||||||
attr.group = KVM_ARM_VCPU_PMU_V3_CTRL;
|
|
||||||
attr.attr = KVM_ARM_VCPU_PMU_V3_INIT;
|
|
||||||
attr.addr = 0;
|
|
||||||
attr.flags = 0;
|
|
||||||
|
|
||||||
err = kvm_vcpu_ioctl(cs, KVM_SET_DEVICE_ATTR, &attr);
|
|
||||||
if (err < 0) {
|
|
||||||
fprintf(stderr, "KVM_SET_DEVICE_ATTR failed: %s\n",
|
|
||||||
strerror(-err));
|
|
||||||
abort();
|
|
||||||
}
|
|
||||||
|
|
||||||
return 1;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void set_feature(uint64_t *features, int feature)
|
static inline void set_feature(uint64_t *features, int feature)
|
||||||
@ -508,8 +518,7 @@ int kvm_arch_init_vcpu(CPUState *cs)
|
|||||||
if (!arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
|
if (!arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
|
||||||
cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_EL1_32BIT;
|
cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_EL1_32BIT;
|
||||||
}
|
}
|
||||||
if (!kvm_irqchip_in_kernel() ||
|
if (!kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PMU_V3)) {
|
||||||
!kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PMU_V3)) {
|
|
||||||
cpu->has_pmu = false;
|
cpu->has_pmu = false;
|
||||||
}
|
}
|
||||||
if (cpu->has_pmu) {
|
if (cpu->has_pmu) {
|
||||||
|
@ -195,7 +195,8 @@ int kvm_arm_sync_mpstate_to_qemu(ARMCPU *cpu);
|
|||||||
|
|
||||||
int kvm_arm_vgic_probe(void);
|
int kvm_arm_vgic_probe(void);
|
||||||
|
|
||||||
int kvm_arm_pmu_create(CPUState *cs, int irq);
|
void kvm_arm_pmu_set_irq(CPUState *cs, int irq);
|
||||||
|
void kvm_arm_pmu_init(CPUState *cs);
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
|
||||||
@ -204,10 +205,8 @@ static inline int kvm_arm_vgic_probe(void)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int kvm_arm_pmu_create(CPUState *cs, int irq)
|
static inline void kvm_arm_pmu_set_irq(CPUState *cs, int irq) {}
|
||||||
{
|
static inline void kvm_arm_pmu_init(CPUState *cs) {}
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -97,6 +97,17 @@ static bool m_needed(void *opaque)
|
|||||||
return arm_feature(env, ARM_FEATURE_M);
|
return arm_feature(env, ARM_FEATURE_M);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static const VMStateDescription vmstate_m_faultmask_primask = {
|
||||||
|
.name = "cpu/m/faultmask-primask",
|
||||||
|
.version_id = 1,
|
||||||
|
.minimum_version_id = 1,
|
||||||
|
.fields = (VMStateField[]) {
|
||||||
|
VMSTATE_UINT32(env.v7m.faultmask, ARMCPU),
|
||||||
|
VMSTATE_UINT32(env.v7m.primask, ARMCPU),
|
||||||
|
VMSTATE_END_OF_LIST()
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
static const VMStateDescription vmstate_m = {
|
static const VMStateDescription vmstate_m = {
|
||||||
.name = "cpu/m",
|
.name = "cpu/m",
|
||||||
.version_id = 4,
|
.version_id = 4,
|
||||||
@ -115,6 +126,10 @@ static const VMStateDescription vmstate_m = {
|
|||||||
VMSTATE_UINT32(env.v7m.mpu_ctrl, ARMCPU),
|
VMSTATE_UINT32(env.v7m.mpu_ctrl, ARMCPU),
|
||||||
VMSTATE_INT32(env.v7m.exception, ARMCPU),
|
VMSTATE_INT32(env.v7m.exception, ARMCPU),
|
||||||
VMSTATE_END_OF_LIST()
|
VMSTATE_END_OF_LIST()
|
||||||
|
},
|
||||||
|
.subsections = (const VMStateDescription*[]) {
|
||||||
|
&vmstate_m_faultmask_primask,
|
||||||
|
NULL
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -201,6 +216,40 @@ static int get_cpsr(QEMUFile *f, void *opaque, size_t size,
|
|||||||
CPUARMState *env = &cpu->env;
|
CPUARMState *env = &cpu->env;
|
||||||
uint32_t val = qemu_get_be32(f);
|
uint32_t val = qemu_get_be32(f);
|
||||||
|
|
||||||
|
if (arm_feature(env, ARM_FEATURE_M)) {
|
||||||
|
if (val & XPSR_EXCP) {
|
||||||
|
/* This is a CPSR format value from an older QEMU. (We can tell
|
||||||
|
* because values transferred in XPSR format always have zero
|
||||||
|
* for the EXCP field, and CPSR format will always have bit 4
|
||||||
|
* set in CPSR_M.) Rearrange it into XPSR format. The significant
|
||||||
|
* differences are that the T bit is not in the same place, the
|
||||||
|
* primask/faultmask info may be in the CPSR I and F bits, and
|
||||||
|
* we do not want the mode bits.
|
||||||
|
*/
|
||||||
|
uint32_t newval = val;
|
||||||
|
|
||||||
|
newval &= (CPSR_NZCV | CPSR_Q | CPSR_IT | CPSR_GE);
|
||||||
|
if (val & CPSR_T) {
|
||||||
|
newval |= XPSR_T;
|
||||||
|
}
|
||||||
|
/* If the I or F bits are set then this is a migration from
|
||||||
|
* an old QEMU which still stored the M profile FAULTMASK
|
||||||
|
* and PRIMASK in env->daif. For a new QEMU, the data is
|
||||||
|
* transferred using the vmstate_m_faultmask_primask subsection.
|
||||||
|
*/
|
||||||
|
if (val & CPSR_F) {
|
||||||
|
env->v7m.faultmask = 1;
|
||||||
|
}
|
||||||
|
if (val & CPSR_I) {
|
||||||
|
env->v7m.primask = 1;
|
||||||
|
}
|
||||||
|
val = newval;
|
||||||
|
}
|
||||||
|
/* Ignore the low bits, they are handled by vmstate_m. */
|
||||||
|
xpsr_write(env, val, ~XPSR_EXCP);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
env->aarch64 = ((val & PSTATE_nRW) == 0);
|
env->aarch64 = ((val & PSTATE_nRW) == 0);
|
||||||
|
|
||||||
if (is_a64(env)) {
|
if (is_a64(env)) {
|
||||||
@ -219,7 +268,10 @@ static int put_cpsr(QEMUFile *f, void *opaque, size_t size,
|
|||||||
CPUARMState *env = &cpu->env;
|
CPUARMState *env = &cpu->env;
|
||||||
uint32_t val;
|
uint32_t val;
|
||||||
|
|
||||||
if (is_a64(env)) {
|
if (arm_feature(env, ARM_FEATURE_M)) {
|
||||||
|
/* The low 9 bits are v7m.exception, which is handled by vmstate_m. */
|
||||||
|
val = xpsr_read(env) & ~XPSR_EXCP;
|
||||||
|
} else if (is_a64(env)) {
|
||||||
val = pstate_read(env);
|
val = pstate_read(env);
|
||||||
} else {
|
} else {
|
||||||
val = cpsr_read(env);
|
val = cpsr_read(env);
|
||||||
|
@ -80,7 +80,7 @@ uint32_t HELPER(neon_tbl)(CPUARMState *env, uint32_t ireg, uint32_t def,
|
|||||||
|
|
||||||
static inline uint32_t merge_syn_data_abort(uint32_t template_syn,
|
static inline uint32_t merge_syn_data_abort(uint32_t template_syn,
|
||||||
unsigned int target_el,
|
unsigned int target_el,
|
||||||
bool same_el,
|
bool same_el, bool ea,
|
||||||
bool s1ptw, bool is_write,
|
bool s1ptw, bool is_write,
|
||||||
int fsc)
|
int fsc)
|
||||||
{
|
{
|
||||||
@ -99,7 +99,7 @@ static inline uint32_t merge_syn_data_abort(uint32_t template_syn,
|
|||||||
*/
|
*/
|
||||||
if (!(template_syn & ARM_EL_ISV) || target_el != 2 || s1ptw) {
|
if (!(template_syn & ARM_EL_ISV) || target_el != 2 || s1ptw) {
|
||||||
syn = syn_data_abort_no_iss(same_el,
|
syn = syn_data_abort_no_iss(same_el,
|
||||||
0, 0, s1ptw, is_write, fsc);
|
ea, 0, s1ptw, is_write, fsc);
|
||||||
} else {
|
} else {
|
||||||
/* Fields: IL, ISV, SAS, SSE, SRT, SF and AR come from the template
|
/* Fields: IL, ISV, SAS, SSE, SRT, SF and AR come from the template
|
||||||
* syndrome created at translation time.
|
* syndrome created at translation time.
|
||||||
@ -107,7 +107,7 @@ static inline uint32_t merge_syn_data_abort(uint32_t template_syn,
|
|||||||
*/
|
*/
|
||||||
syn = syn_data_abort_with_iss(same_el,
|
syn = syn_data_abort_with_iss(same_el,
|
||||||
0, 0, 0, 0, 0,
|
0, 0, 0, 0, 0,
|
||||||
0, 0, s1ptw, is_write, fsc,
|
ea, 0, s1ptw, is_write, fsc,
|
||||||
false);
|
false);
|
||||||
/* Merge the runtime syndrome with the template syndrome. */
|
/* Merge the runtime syndrome with the template syndrome. */
|
||||||
syn |= template_syn;
|
syn |= template_syn;
|
||||||
@ -115,6 +115,51 @@ static inline uint32_t merge_syn_data_abort(uint32_t template_syn,
|
|||||||
return syn;
|
return syn;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void deliver_fault(ARMCPU *cpu, vaddr addr, MMUAccessType access_type,
|
||||||
|
uint32_t fsr, uint32_t fsc, ARMMMUFaultInfo *fi)
|
||||||
|
{
|
||||||
|
CPUARMState *env = &cpu->env;
|
||||||
|
int target_el;
|
||||||
|
bool same_el;
|
||||||
|
uint32_t syn, exc;
|
||||||
|
|
||||||
|
target_el = exception_target_el(env);
|
||||||
|
if (fi->stage2) {
|
||||||
|
target_el = 2;
|
||||||
|
env->cp15.hpfar_el2 = extract64(fi->s2addr, 12, 47) << 4;
|
||||||
|
}
|
||||||
|
same_el = (arm_current_el(env) == target_el);
|
||||||
|
|
||||||
|
if (fsc == 0x3f) {
|
||||||
|
/* Caller doesn't have a long-format fault status code. This
|
||||||
|
* should only happen if this fault will never actually be reported
|
||||||
|
* to an EL that uses a syndrome register. Check that here.
|
||||||
|
* 0x3f is a (currently) reserved FSC code, in case the constructed
|
||||||
|
* syndrome does leak into the guest somehow.
|
||||||
|
*/
|
||||||
|
assert(target_el != 2 && !arm_el_is_aa64(env, target_el));
|
||||||
|
}
|
||||||
|
|
||||||
|
if (access_type == MMU_INST_FETCH) {
|
||||||
|
syn = syn_insn_abort(same_el, fi->ea, fi->s1ptw, fsc);
|
||||||
|
exc = EXCP_PREFETCH_ABORT;
|
||||||
|
} else {
|
||||||
|
syn = merge_syn_data_abort(env->exception.syndrome, target_el,
|
||||||
|
same_el, fi->ea, fi->s1ptw,
|
||||||
|
access_type == MMU_DATA_STORE,
|
||||||
|
fsc);
|
||||||
|
if (access_type == MMU_DATA_STORE
|
||||||
|
&& arm_feature(env, ARM_FEATURE_V6)) {
|
||||||
|
fsr |= (1 << 11);
|
||||||
|
}
|
||||||
|
exc = EXCP_DATA_ABORT;
|
||||||
|
}
|
||||||
|
|
||||||
|
env->exception.vaddress = addr;
|
||||||
|
env->exception.fsr = fsr;
|
||||||
|
raise_exception(env, exc, syn, target_el);
|
||||||
|
}
|
||||||
|
|
||||||
/* try to fill the TLB and return an exception if error. If retaddr is
|
/* try to fill the TLB and return an exception if error. If retaddr is
|
||||||
* NULL, it means that the function was called in C code (i.e. not
|
* NULL, it means that the function was called in C code (i.e. not
|
||||||
* from generated code or from helper.c)
|
* from generated code or from helper.c)
|
||||||
@ -129,23 +174,13 @@ void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type,
|
|||||||
ret = arm_tlb_fill(cs, addr, access_type, mmu_idx, &fsr, &fi);
|
ret = arm_tlb_fill(cs, addr, access_type, mmu_idx, &fsr, &fi);
|
||||||
if (unlikely(ret)) {
|
if (unlikely(ret)) {
|
||||||
ARMCPU *cpu = ARM_CPU(cs);
|
ARMCPU *cpu = ARM_CPU(cs);
|
||||||
CPUARMState *env = &cpu->env;
|
uint32_t fsc;
|
||||||
uint32_t syn, exc, fsc;
|
|
||||||
unsigned int target_el;
|
|
||||||
bool same_el;
|
|
||||||
|
|
||||||
if (retaddr) {
|
if (retaddr) {
|
||||||
/* now we have a real cpu fault */
|
/* now we have a real cpu fault */
|
||||||
cpu_restore_state(cs, retaddr);
|
cpu_restore_state(cs, retaddr);
|
||||||
}
|
}
|
||||||
|
|
||||||
target_el = exception_target_el(env);
|
|
||||||
if (fi.stage2) {
|
|
||||||
target_el = 2;
|
|
||||||
env->cp15.hpfar_el2 = extract64(fi.s2addr, 12, 47) << 4;
|
|
||||||
}
|
|
||||||
same_el = arm_current_el(env) == target_el;
|
|
||||||
|
|
||||||
if (fsr & (1 << 9)) {
|
if (fsr & (1 << 9)) {
|
||||||
/* LPAE format fault status register : bottom 6 bits are
|
/* LPAE format fault status register : bottom 6 bits are
|
||||||
* status code in the same form as needed for syndrome
|
* status code in the same form as needed for syndrome
|
||||||
@ -153,34 +188,15 @@ void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type,
|
|||||||
fsc = extract32(fsr, 0, 6);
|
fsc = extract32(fsr, 0, 6);
|
||||||
} else {
|
} else {
|
||||||
/* Short format FSR : this fault will never actually be reported
|
/* Short format FSR : this fault will never actually be reported
|
||||||
* to an EL that uses a syndrome register. Check that here,
|
* to an EL that uses a syndrome register. Use a (currently)
|
||||||
* and use a (currently) reserved FSR code in case the constructed
|
* reserved FSR code in case the constructed syndrome does leak
|
||||||
* syndrome does leak into the guest somehow.
|
* into the guest somehow. deliver_fault will assert that
|
||||||
|
* we don't target an EL using the syndrome.
|
||||||
*/
|
*/
|
||||||
assert(target_el != 2 && !arm_el_is_aa64(env, target_el));
|
|
||||||
fsc = 0x3f;
|
fsc = 0x3f;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* For insn and data aborts we assume there is no instruction syndrome
|
deliver_fault(cpu, addr, access_type, fsr, fsc, &fi);
|
||||||
* information; this is always true for exceptions reported to EL1.
|
|
||||||
*/
|
|
||||||
if (access_type == MMU_INST_FETCH) {
|
|
||||||
syn = syn_insn_abort(same_el, 0, fi.s1ptw, fsc);
|
|
||||||
exc = EXCP_PREFETCH_ABORT;
|
|
||||||
} else {
|
|
||||||
syn = merge_syn_data_abort(env->exception.syndrome, target_el,
|
|
||||||
same_el, fi.s1ptw,
|
|
||||||
access_type == MMU_DATA_STORE, fsc);
|
|
||||||
if (access_type == MMU_DATA_STORE
|
|
||||||
&& arm_feature(env, ARM_FEATURE_V6)) {
|
|
||||||
fsr |= (1 << 11);
|
|
||||||
}
|
|
||||||
exc = EXCP_DATA_ABORT;
|
|
||||||
}
|
|
||||||
|
|
||||||
env->exception.vaddress = addr;
|
|
||||||
env->exception.fsr = fsr;
|
|
||||||
raise_exception(env, exc, syn, target_el);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -191,9 +207,8 @@ void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
|
|||||||
{
|
{
|
||||||
ARMCPU *cpu = ARM_CPU(cs);
|
ARMCPU *cpu = ARM_CPU(cs);
|
||||||
CPUARMState *env = &cpu->env;
|
CPUARMState *env = &cpu->env;
|
||||||
int target_el;
|
uint32_t fsr, fsc;
|
||||||
bool same_el;
|
ARMMMUFaultInfo fi = {};
|
||||||
uint32_t syn;
|
|
||||||
ARMMMUIdx arm_mmu_idx = core_to_arm_mmu_idx(env, mmu_idx);
|
ARMMMUIdx arm_mmu_idx = core_to_arm_mmu_idx(env, mmu_idx);
|
||||||
|
|
||||||
if (retaddr) {
|
if (retaddr) {
|
||||||
@ -201,28 +216,17 @@ void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
|
|||||||
cpu_restore_state(cs, retaddr);
|
cpu_restore_state(cs, retaddr);
|
||||||
}
|
}
|
||||||
|
|
||||||
target_el = exception_target_el(env);
|
|
||||||
same_el = (arm_current_el(env) == target_el);
|
|
||||||
|
|
||||||
env->exception.vaddress = vaddr;
|
|
||||||
|
|
||||||
/* the DFSR for an alignment fault depends on whether we're using
|
/* the DFSR for an alignment fault depends on whether we're using
|
||||||
* the LPAE long descriptor format, or the short descriptor format
|
* the LPAE long descriptor format, or the short descriptor format
|
||||||
*/
|
*/
|
||||||
if (arm_s1_regime_using_lpae_format(env, arm_mmu_idx)) {
|
if (arm_s1_regime_using_lpae_format(env, arm_mmu_idx)) {
|
||||||
env->exception.fsr = (1 << 9) | 0x21;
|
fsr = (1 << 9) | 0x21;
|
||||||
} else {
|
} else {
|
||||||
env->exception.fsr = 0x1;
|
fsr = 0x1;
|
||||||
}
|
}
|
||||||
|
fsc = 0x21;
|
||||||
|
|
||||||
if (access_type == MMU_DATA_STORE && arm_feature(env, ARM_FEATURE_V6)) {
|
deliver_fault(cpu, vaddr, access_type, fsr, fsc, &fi);
|
||||||
env->exception.fsr |= (1 << 11);
|
|
||||||
}
|
|
||||||
|
|
||||||
syn = merge_syn_data_abort(env->exception.syndrome, target_el,
|
|
||||||
same_el, 0, access_type == MMU_DATA_STORE,
|
|
||||||
0x21);
|
|
||||||
raise_exception(env, EXCP_DATA_ABORT, syn, target_el);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* !defined(CONFIG_USER_ONLY) */
|
#endif /* !defined(CONFIG_USER_ONLY) */
|
||||||
@ -370,6 +374,11 @@ static inline int check_wfx_trap(CPUARMState *env, bool is_wfe)
|
|||||||
int cur_el = arm_current_el(env);
|
int cur_el = arm_current_el(env);
|
||||||
uint64_t mask;
|
uint64_t mask;
|
||||||
|
|
||||||
|
if (arm_feature(env, ARM_FEATURE_M)) {
|
||||||
|
/* M profile cores can never trap WFI/WFE. */
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/* If we are currently in EL0 then we need to check if SCTLR is set up for
|
/* If we are currently in EL0 then we need to check if SCTLR is set up for
|
||||||
* WFx instructions being trapped to EL1. These trap bits don't exist in v7.
|
* WFx instructions being trapped to EL1. These trap bits don't exist in v7.
|
||||||
*/
|
*/
|
||||||
|
@ -2217,29 +2217,34 @@ static void disas_ldst_pair(DisasContext *s, uint32_t insn)
|
|||||||
} else {
|
} else {
|
||||||
do_fp_st(s, rt, tcg_addr, size);
|
do_fp_st(s, rt, tcg_addr, size);
|
||||||
}
|
}
|
||||||
} else {
|
tcg_gen_addi_i64(tcg_addr, tcg_addr, 1 << size);
|
||||||
TCGv_i64 tcg_rt = cpu_reg(s, rt);
|
|
||||||
if (is_load) {
|
|
||||||
do_gpr_ld(s, tcg_rt, tcg_addr, size, is_signed, false,
|
|
||||||
false, 0, false, false);
|
|
||||||
} else {
|
|
||||||
do_gpr_st(s, tcg_rt, tcg_addr, size,
|
|
||||||
false, 0, false, false);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
tcg_gen_addi_i64(tcg_addr, tcg_addr, 1 << size);
|
|
||||||
if (is_vector) {
|
|
||||||
if (is_load) {
|
if (is_load) {
|
||||||
do_fp_ld(s, rt2, tcg_addr, size);
|
do_fp_ld(s, rt2, tcg_addr, size);
|
||||||
} else {
|
} else {
|
||||||
do_fp_st(s, rt2, tcg_addr, size);
|
do_fp_st(s, rt2, tcg_addr, size);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
TCGv_i64 tcg_rt = cpu_reg(s, rt);
|
||||||
TCGv_i64 tcg_rt2 = cpu_reg(s, rt2);
|
TCGv_i64 tcg_rt2 = cpu_reg(s, rt2);
|
||||||
|
|
||||||
if (is_load) {
|
if (is_load) {
|
||||||
|
TCGv_i64 tmp = tcg_temp_new_i64();
|
||||||
|
|
||||||
|
/* Do not modify tcg_rt before recognizing any exception
|
||||||
|
* from the second load.
|
||||||
|
*/
|
||||||
|
do_gpr_ld(s, tmp, tcg_addr, size, is_signed, false,
|
||||||
|
false, 0, false, false);
|
||||||
|
tcg_gen_addi_i64(tcg_addr, tcg_addr, 1 << size);
|
||||||
do_gpr_ld(s, tcg_rt2, tcg_addr, size, is_signed, false,
|
do_gpr_ld(s, tcg_rt2, tcg_addr, size, is_signed, false,
|
||||||
false, 0, false, false);
|
false, 0, false, false);
|
||||||
|
|
||||||
|
tcg_gen_mov_i64(tcg_rt, tmp);
|
||||||
|
tcg_temp_free_i64(tmp);
|
||||||
} else {
|
} else {
|
||||||
|
do_gpr_st(s, tcg_rt, tcg_addr, size,
|
||||||
|
false, 0, false, false);
|
||||||
|
tcg_gen_addi_i64(tcg_addr, tcg_addr, 1 << size);
|
||||||
do_gpr_st(s, tcg_rt2, tcg_addr, size,
|
do_gpr_st(s, tcg_rt2, tcg_addr, size,
|
||||||
false, 0, false, false);
|
false, 0, false, false);
|
||||||
}
|
}
|
||||||
|
@ -9735,10 +9735,23 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw
|
|||||||
abort();
|
abort();
|
||||||
case 4:
|
case 4:
|
||||||
if (insn & (1 << 22)) {
|
if (insn & (1 << 22)) {
|
||||||
/* Other load/store, table branch. */
|
/* 0b1110_100x_x1xx_xxxx_xxxx_xxxx_xxxx_xxxx
|
||||||
|
* - load/store doubleword, load/store exclusive, ldacq/strel,
|
||||||
|
* table branch.
|
||||||
|
*/
|
||||||
if (insn & 0x01200000) {
|
if (insn & 0x01200000) {
|
||||||
/* Load/store doubleword. */
|
/* 0b1110_1000_x11x_xxxx_xxxx_xxxx_xxxx_xxxx
|
||||||
|
* - load/store dual (post-indexed)
|
||||||
|
* 0b1111_1001_x10x_xxxx_xxxx_xxxx_xxxx_xxxx
|
||||||
|
* - load/store dual (literal and immediate)
|
||||||
|
* 0b1111_1001_x11x_xxxx_xxxx_xxxx_xxxx_xxxx
|
||||||
|
* - load/store dual (pre-indexed)
|
||||||
|
*/
|
||||||
if (rn == 15) {
|
if (rn == 15) {
|
||||||
|
if (insn & (1 << 21)) {
|
||||||
|
/* UNPREDICTABLE */
|
||||||
|
goto illegal_op;
|
||||||
|
}
|
||||||
addr = tcg_temp_new_i32();
|
addr = tcg_temp_new_i32();
|
||||||
tcg_gen_movi_i32(addr, s->pc & ~3);
|
tcg_gen_movi_i32(addr, s->pc & ~3);
|
||||||
} else {
|
} else {
|
||||||
@ -9772,15 +9785,18 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw
|
|||||||
}
|
}
|
||||||
if (insn & (1 << 21)) {
|
if (insn & (1 << 21)) {
|
||||||
/* Base writeback. */
|
/* Base writeback. */
|
||||||
if (rn == 15)
|
|
||||||
goto illegal_op;
|
|
||||||
tcg_gen_addi_i32(addr, addr, offset - 4);
|
tcg_gen_addi_i32(addr, addr, offset - 4);
|
||||||
store_reg(s, rn, addr);
|
store_reg(s, rn, addr);
|
||||||
} else {
|
} else {
|
||||||
tcg_temp_free_i32(addr);
|
tcg_temp_free_i32(addr);
|
||||||
}
|
}
|
||||||
} else if ((insn & (1 << 23)) == 0) {
|
} else if ((insn & (1 << 23)) == 0) {
|
||||||
/* Load/store exclusive word. */
|
/* 0b1110_1000_010x_xxxx_xxxx_xxxx_xxxx_xxxx
|
||||||
|
* - load/store exclusive word
|
||||||
|
*/
|
||||||
|
if (rs == 15) {
|
||||||
|
goto illegal_op;
|
||||||
|
}
|
||||||
addr = tcg_temp_local_new_i32();
|
addr = tcg_temp_local_new_i32();
|
||||||
load_reg_var(s, addr, rn);
|
load_reg_var(s, addr, rn);
|
||||||
tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
|
tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
|
||||||
@ -11137,7 +11153,9 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (insn & (1 << 10)) {
|
if (insn & (1 << 10)) {
|
||||||
/* data processing extended or blx */
|
/* 0b0100_01xx_xxxx_xxxx
|
||||||
|
* - data processing extended, branch and exchange
|
||||||
|
*/
|
||||||
rd = (insn & 7) | ((insn >> 4) & 8);
|
rd = (insn & 7) | ((insn >> 4) & 8);
|
||||||
rm = (insn >> 3) & 0xf;
|
rm = (insn >> 3) & 0xf;
|
||||||
op = (insn >> 8) & 3;
|
op = (insn >> 8) & 3;
|
||||||
@ -11160,10 +11178,21 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
|
|||||||
tmp = load_reg(s, rm);
|
tmp = load_reg(s, rm);
|
||||||
store_reg(s, rd, tmp);
|
store_reg(s, rd, tmp);
|
||||||
break;
|
break;
|
||||||
case 3:/* branch [and link] exchange thumb register */
|
case 3:
|
||||||
tmp = load_reg(s, rm);
|
{
|
||||||
if (insn & (1 << 7)) {
|
/* 0b0100_0111_xxxx_xxxx
|
||||||
|
* - branch [and link] exchange thumb register
|
||||||
|
*/
|
||||||
|
bool link = insn & (1 << 7);
|
||||||
|
|
||||||
|
if (insn & 7) {
|
||||||
|
goto undef;
|
||||||
|
}
|
||||||
|
if (link) {
|
||||||
ARCH(5);
|
ARCH(5);
|
||||||
|
}
|
||||||
|
tmp = load_reg(s, rm);
|
||||||
|
if (link) {
|
||||||
val = (uint32_t)s->pc | 1;
|
val = (uint32_t)s->pc | 1;
|
||||||
tmp2 = tcg_temp_new_i32();
|
tmp2 = tcg_temp_new_i32();
|
||||||
tcg_gen_movi_i32(tmp2, val);
|
tcg_gen_movi_i32(tmp2, val);
|
||||||
@ -11175,6 +11204,7 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
|
|||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -12185,8 +12215,6 @@ void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
|
|||||||
ARMCPU *cpu = ARM_CPU(cs);
|
ARMCPU *cpu = ARM_CPU(cs);
|
||||||
CPUARMState *env = &cpu->env;
|
CPUARMState *env = &cpu->env;
|
||||||
int i;
|
int i;
|
||||||
uint32_t psr;
|
|
||||||
const char *ns_status;
|
|
||||||
|
|
||||||
if (is_a64(env)) {
|
if (is_a64(env)) {
|
||||||
aarch64_cpu_dump_state(cs, f, cpu_fprintf, flags);
|
aarch64_cpu_dump_state(cs, f, cpu_fprintf, flags);
|
||||||
@ -12200,24 +12228,48 @@ void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
|
|||||||
else
|
else
|
||||||
cpu_fprintf(f, " ");
|
cpu_fprintf(f, " ");
|
||||||
}
|
}
|
||||||
psr = cpsr_read(env);
|
|
||||||
|
|
||||||
if (arm_feature(env, ARM_FEATURE_EL3) &&
|
if (arm_feature(env, ARM_FEATURE_M)) {
|
||||||
(psr & CPSR_M) != ARM_CPU_MODE_MON) {
|
uint32_t xpsr = xpsr_read(env);
|
||||||
ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
|
const char *mode;
|
||||||
|
|
||||||
|
if (xpsr & XPSR_EXCP) {
|
||||||
|
mode = "handler";
|
||||||
|
} else {
|
||||||
|
if (env->v7m.control & R_V7M_CONTROL_NPRIV_MASK) {
|
||||||
|
mode = "unpriv-thread";
|
||||||
|
} else {
|
||||||
|
mode = "priv-thread";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
cpu_fprintf(f, "XPSR=%08x %c%c%c%c %c %s\n",
|
||||||
|
xpsr,
|
||||||
|
xpsr & XPSR_N ? 'N' : '-',
|
||||||
|
xpsr & XPSR_Z ? 'Z' : '-',
|
||||||
|
xpsr & XPSR_C ? 'C' : '-',
|
||||||
|
xpsr & XPSR_V ? 'V' : '-',
|
||||||
|
xpsr & XPSR_T ? 'T' : 'A',
|
||||||
|
mode);
|
||||||
} else {
|
} else {
|
||||||
ns_status = "";
|
uint32_t psr = cpsr_read(env);
|
||||||
}
|
const char *ns_status = "";
|
||||||
|
|
||||||
cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%s%d\n",
|
if (arm_feature(env, ARM_FEATURE_EL3) &&
|
||||||
psr,
|
(psr & CPSR_M) != ARM_CPU_MODE_MON) {
|
||||||
psr & (1 << 31) ? 'N' : '-',
|
ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
|
||||||
psr & (1 << 30) ? 'Z' : '-',
|
}
|
||||||
psr & (1 << 29) ? 'C' : '-',
|
|
||||||
psr & (1 << 28) ? 'V' : '-',
|
cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%s%d\n",
|
||||||
psr & CPSR_T ? 'T' : 'A',
|
psr,
|
||||||
ns_status,
|
psr & CPSR_N ? 'N' : '-',
|
||||||
cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
|
psr & CPSR_Z ? 'Z' : '-',
|
||||||
|
psr & CPSR_C ? 'C' : '-',
|
||||||
|
psr & CPSR_V ? 'V' : '-',
|
||||||
|
psr & CPSR_T ? 'T' : 'A',
|
||||||
|
ns_status,
|
||||||
|
cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
|
||||||
|
}
|
||||||
|
|
||||||
if (flags & CPU_DUMP_FPU) {
|
if (flags & CPU_DUMP_FPU) {
|
||||||
int numvfpregs = 0;
|
int numvfpregs = 0;
|
||||||
|
Loading…
Reference in New Issue
Block a user