target-arm queue:
* Various code cleanups * More refactoring working towards allowing a build without CONFIG_TCG -----BEGIN PGP SIGNATURE----- iQJNBAABCAA3FiEE4aXFk81BneKOgxXPPCUl7RQ2DN4FAmP8ty0ZHHBldGVyLm1h eWRlbGxAbGluYXJvLm9yZwAKCRA8JSXtFDYM3na0EACToAEGC4/iFigdKD7ZwG3F FvoDcMRRSdElcSo7BTDrFBBOH5/BYhorUq+mVpPvEYADXNaPOCmXWieSJpu68sJC VpVLPMhGS8lTsT16C2vB/4Lh4t8pJgs7aej90nqKk2rGgKw4ZNwMS+7Eg6n2lKf/ V27+O+drJxgYzO6feveuKtIQXsHkx4//DNOCDPLLvrrOk+1NWnyPyT/UDxV/emyr KLBbeXqcNhPkn7xZtvM7WARSHZcqhEPBkIAJG2H9HE4imxNm8d8ADZjEMbfE9ZNE MDanpM6BYYDWw4y2A8J5QmbiLu3znH8RWmWHww1v6UQ7qyBCLx+HyEGKipGd3Eoe 48hi/ktsAJUb1lRrk9gOJ+NsokGINzI5urFOReUh1q6+5us0Q0VpwjyVvhi8REy3 5gOMDC7O2zH+bLN08kseDXfc7vR9wLrIHqMloMgJzpjG5KcL67nVCPHcOwxe0sfn 0SYWUY0UFNSYgEGBG6JfM6LiM1lRREzlw6YnnaJ+GUf/jdIUbMV6PKpL34TGLeQ3 xEWrKV0+PMoWHwN0Pdo1tMXm7mc/9H27Mf7hB5k0Hp3dfQ7nIdkfnFA2YEUSxIQt OXYsKLTJmO/4XIAYCHhIOncPTmM6KWNQajDJMIuEdYYV67Xb88EIv5Hg8q6tS/mN uuQfun3Z2UbAtGvzN5Yx1w== =K0Vo -----END PGP SIGNATURE----- Merge tag 'pull-target-arm-20230227' of https://git.linaro.org/people/pmaydell/qemu-arm into staging target-arm queue: * Various code cleanups * More refactoring working towards allowing a build without CONFIG_TCG # -----BEGIN PGP SIGNATURE----- # # iQJNBAABCAA3FiEE4aXFk81BneKOgxXPPCUl7RQ2DN4FAmP8ty0ZHHBldGVyLm1h # eWRlbGxAbGluYXJvLm9yZwAKCRA8JSXtFDYM3na0EACToAEGC4/iFigdKD7ZwG3F # FvoDcMRRSdElcSo7BTDrFBBOH5/BYhorUq+mVpPvEYADXNaPOCmXWieSJpu68sJC # VpVLPMhGS8lTsT16C2vB/4Lh4t8pJgs7aej90nqKk2rGgKw4ZNwMS+7Eg6n2lKf/ # V27+O+drJxgYzO6feveuKtIQXsHkx4//DNOCDPLLvrrOk+1NWnyPyT/UDxV/emyr # KLBbeXqcNhPkn7xZtvM7WARSHZcqhEPBkIAJG2H9HE4imxNm8d8ADZjEMbfE9ZNE # MDanpM6BYYDWw4y2A8J5QmbiLu3znH8RWmWHww1v6UQ7qyBCLx+HyEGKipGd3Eoe # 48hi/ktsAJUb1lRrk9gOJ+NsokGINzI5urFOReUh1q6+5us0Q0VpwjyVvhi8REy3 # 5gOMDC7O2zH+bLN08kseDXfc7vR9wLrIHqMloMgJzpjG5KcL67nVCPHcOwxe0sfn # 0SYWUY0UFNSYgEGBG6JfM6LiM1lRREzlw6YnnaJ+GUf/jdIUbMV6PKpL34TGLeQ3 # xEWrKV0+PMoWHwN0Pdo1tMXm7mc/9H27Mf7hB5k0Hp3dfQ7nIdkfnFA2YEUSxIQt # OXYsKLTJmO/4XIAYCHhIOncPTmM6KWNQajDJMIuEdYYV67Xb88EIv5Hg8q6tS/mN # uuQfun3Z2UbAtGvzN5Yx1w== # =K0Vo # -----END PGP SIGNATURE----- # gpg: Signature made Mon 27 Feb 2023 13:59:09 GMT # gpg: using RSA key E1A5C593CD419DE28E8315CF3C2525ED14360CDE # gpg: issuer "peter.maydell@linaro.org" # gpg: Good signature from "Peter Maydell <peter.maydell@linaro.org>" [ultimate] # gpg: aka "Peter Maydell <pmaydell@gmail.com>" [ultimate] # gpg: aka "Peter Maydell <pmaydell@chiark.greenend.org.uk>" [ultimate] # gpg: aka "Peter Maydell <peter@archaic.org.uk>" [ultimate] # Primary key fingerprint: E1A5 C593 CD41 9DE2 8E83 15CF 3C25 25ED 1436 0CDE * tag 'pull-target-arm-20230227' of https://git.linaro.org/people/pmaydell/qemu-arm: (25 commits) hw: Replace qemu_or_irq typedef by OrIRQState hw/or-irq: Declare QOM macros using OBJECT_DECLARE_SIMPLE_TYPE() hw/irq: Declare QOM macros using OBJECT_DECLARE_SIMPLE_TYPE() iothread: Remove unused IOThreadClass / IOTHREAD_CLASS hw/arm/musicpal: Remove unused dummy MemoryRegion hw/intc/armv7m_nvic: Use QOM cast CPU() macro hw/timer/cmsdk-apb-timer: Remove unused 'qdev-properties.h' header hw/char/cmsdk-apb-uart: Open-code cmsdk_apb_uart_create() hw/char/xilinx_uartlite: Open-code xilinx_uartlite_create() hw/char/xilinx_uartlite: Expose XILINX_UARTLITE QOM type hw/char/pl011: Open-code pl011_luminary_create() hw/char/pl011: Un-inline pl011_create() hw/gpio/max7310: Simplify max7310_realize() tests/avocado: add machine:none tag to version.py cpu-defs.h: Expose CPUTLBEntryFull to non-TCG code target/arm: Don't access TCG code when debugging with KVM target/arm: Move regime_using_lpae_format into internal.h target/arm: Move hflags code into the tcg directory target/arm: Wrap arm_rebuild_hflags calls with tcg_enabled target/arm: Move psci.c into the tcg directory ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
e1f9f73ba1
@ -161,6 +161,7 @@ M: Peter Maydell <peter.maydell@linaro.org>
|
|||||||
L: qemu-arm@nongnu.org
|
L: qemu-arm@nongnu.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: target/arm/
|
F: target/arm/
|
||||||
|
F: target/arm/tcg/
|
||||||
F: tests/tcg/arm/
|
F: tests/tcg/arm/
|
||||||
F: tests/tcg/aarch64/
|
F: tests/tcg/aarch64/
|
||||||
F: tests/qtest/arm-cpu-features.c
|
F: tests/qtest/arm-cpu-features.c
|
||||||
|
@ -18,6 +18,7 @@
|
|||||||
#include "qemu/osdep.h"
|
#include "qemu/osdep.h"
|
||||||
#include "qapi/error.h"
|
#include "qapi/error.h"
|
||||||
#include "qemu/module.h"
|
#include "qemu/module.h"
|
||||||
|
#include "hw/char/serial.h"
|
||||||
#include "hw/sysbus.h"
|
#include "hw/sysbus.h"
|
||||||
#include "hw/arm/allwinner-a10.h"
|
#include "hw/arm/allwinner-a10.h"
|
||||||
#include "hw/misc/unimp.h"
|
#include "hw/misc/unimp.h"
|
||||||
|
@ -15,6 +15,7 @@
|
|||||||
#include "hw/arm/boot.h"
|
#include "hw/arm/boot.h"
|
||||||
#include "hw/arm/linux-boot-if.h"
|
#include "hw/arm/linux-boot-if.h"
|
||||||
#include "sysemu/kvm.h"
|
#include "sysemu/kvm.h"
|
||||||
|
#include "sysemu/tcg.h"
|
||||||
#include "sysemu/sysemu.h"
|
#include "sysemu/sysemu.h"
|
||||||
#include "sysemu/numa.h"
|
#include "sysemu/numa.h"
|
||||||
#include "hw/boards.h"
|
#include "hw/boards.h"
|
||||||
@ -827,7 +828,10 @@ static void do_cpu_reset(void *opaque)
|
|||||||
info->secondary_cpu_reset_hook(cpu, info);
|
info->secondary_cpu_reset_hook(cpu, info);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
arm_rebuild_hflags(env);
|
|
||||||
|
if (tcg_enabled()) {
|
||||||
|
arm_rebuild_hflags(env);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -507,7 +507,7 @@ static uint64_t exynos4210_calc_affinity(int cpu)
|
|||||||
return (0x9 << ARM_AFF1_SHIFT) | cpu;
|
return (0x9 << ARM_AFF1_SHIFT) | cpu;
|
||||||
}
|
}
|
||||||
|
|
||||||
static DeviceState *pl330_create(uint32_t base, qemu_or_irq *orgate,
|
static DeviceState *pl330_create(uint32_t base, OrIRQState *orgate,
|
||||||
qemu_irq irq, int nreq, int nevents, int width)
|
qemu_irq irq, int nreq, int nevents, int width)
|
||||||
{
|
{
|
||||||
SysBusDevice *busdev;
|
SysBusDevice *busdev;
|
||||||
@ -806,7 +806,7 @@ static void exynos4210_init(Object *obj)
|
|||||||
|
|
||||||
for (i = 0; i < ARRAY_SIZE(s->pl330_irq_orgate); i++) {
|
for (i = 0; i < ARRAY_SIZE(s->pl330_irq_orgate); i++) {
|
||||||
char *name = g_strdup_printf("pl330-irq-orgate%d", i);
|
char *name = g_strdup_printf("pl330-irq-orgate%d", i);
|
||||||
qemu_or_irq *orgate = &s->pl330_irq_orgate[i];
|
OrIRQState *orgate = &s->pl330_irq_orgate[i];
|
||||||
|
|
||||||
object_initialize_child(obj, name, orgate, TYPE_OR_IRQ);
|
object_initialize_child(obj, name, orgate, TYPE_OR_IRQ);
|
||||||
g_free(name);
|
g_free(name);
|
||||||
|
@ -152,7 +152,7 @@ struct MPS2TZMachineState {
|
|||||||
TZMSC msc[4];
|
TZMSC msc[4];
|
||||||
CMSDKAPBUART uart[6];
|
CMSDKAPBUART uart[6];
|
||||||
SplitIRQ sec_resp_splitter;
|
SplitIRQ sec_resp_splitter;
|
||||||
qemu_or_irq uart_irq_orgate;
|
OrIRQState uart_irq_orgate;
|
||||||
DeviceState *lan9118;
|
DeviceState *lan9118;
|
||||||
SplitIRQ cpu_irq_splitter[MPS2TZ_NUMIRQ_MAX];
|
SplitIRQ cpu_irq_splitter[MPS2TZ_NUMIRQ_MAX];
|
||||||
Clock *sysclk;
|
Clock *sysclk;
|
||||||
|
@ -35,6 +35,7 @@
|
|||||||
#include "hw/boards.h"
|
#include "hw/boards.h"
|
||||||
#include "exec/address-spaces.h"
|
#include "exec/address-spaces.h"
|
||||||
#include "sysemu/sysemu.h"
|
#include "sysemu/sysemu.h"
|
||||||
|
#include "hw/qdev-properties.h"
|
||||||
#include "hw/misc/unimp.h"
|
#include "hw/misc/unimp.h"
|
||||||
#include "hw/char/cmsdk-apb-uart.h"
|
#include "hw/char/cmsdk-apb-uart.h"
|
||||||
#include "hw/timer/cmsdk-apb-timer.h"
|
#include "hw/timer/cmsdk-apb-timer.h"
|
||||||
@ -282,6 +283,9 @@ static void mps2_common_init(MachineState *machine)
|
|||||||
qdev_connect_gpio_out(orgate_dev, 0, qdev_get_gpio_in(armv7m, 12));
|
qdev_connect_gpio_out(orgate_dev, 0, qdev_get_gpio_in(armv7m, 12));
|
||||||
|
|
||||||
for (i = 0; i < 5; i++) {
|
for (i = 0; i < 5; i++) {
|
||||||
|
DeviceState *dev;
|
||||||
|
SysBusDevice *s;
|
||||||
|
|
||||||
static const hwaddr uartbase[] = {0x40004000, 0x40005000,
|
static const hwaddr uartbase[] = {0x40004000, 0x40005000,
|
||||||
0x40006000, 0x40007000,
|
0x40006000, 0x40007000,
|
||||||
0x40009000};
|
0x40009000};
|
||||||
@ -294,12 +298,16 @@ static void mps2_common_init(MachineState *machine)
|
|||||||
rxovrint = qdev_get_gpio_in(orgate_dev, i * 2 + 1);
|
rxovrint = qdev_get_gpio_in(orgate_dev, i * 2 + 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
cmsdk_apb_uart_create(uartbase[i],
|
dev = qdev_new(TYPE_CMSDK_APB_UART);
|
||||||
qdev_get_gpio_in(armv7m, uartirq[i] + 1),
|
s = SYS_BUS_DEVICE(dev);
|
||||||
qdev_get_gpio_in(armv7m, uartirq[i]),
|
qdev_prop_set_chr(dev, "chardev", serial_hd(i));
|
||||||
txovrint, rxovrint,
|
qdev_prop_set_uint32(dev, "pclk-frq", SYSCLK_FRQ);
|
||||||
NULL,
|
sysbus_realize_and_unref(s, &error_fatal);
|
||||||
serial_hd(i), SYSCLK_FRQ);
|
sysbus_mmio_map(s, 0, uartbase[i]);
|
||||||
|
sysbus_connect_irq(s, 0, qdev_get_gpio_in(armv7m, uartirq[i] + 1));
|
||||||
|
sysbus_connect_irq(s, 1, qdev_get_gpio_in(armv7m, uartirq[i]));
|
||||||
|
sysbus_connect_irq(s, 2, txovrint);
|
||||||
|
sysbus_connect_irq(s, 3, rxovrint);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -324,7 +332,8 @@ static void mps2_common_init(MachineState *machine)
|
|||||||
0x4002c000, 0x4002d000,
|
0x4002c000, 0x4002d000,
|
||||||
0x4002e000};
|
0x4002e000};
|
||||||
Object *txrx_orgate;
|
Object *txrx_orgate;
|
||||||
DeviceState *txrx_orgate_dev;
|
DeviceState *txrx_orgate_dev, *dev;
|
||||||
|
SysBusDevice *s;
|
||||||
|
|
||||||
txrx_orgate = object_new(TYPE_OR_IRQ);
|
txrx_orgate = object_new(TYPE_OR_IRQ);
|
||||||
object_property_set_int(txrx_orgate, "num-lines", 2, &error_fatal);
|
object_property_set_int(txrx_orgate, "num-lines", 2, &error_fatal);
|
||||||
@ -332,13 +341,17 @@ static void mps2_common_init(MachineState *machine)
|
|||||||
txrx_orgate_dev = DEVICE(txrx_orgate);
|
txrx_orgate_dev = DEVICE(txrx_orgate);
|
||||||
qdev_connect_gpio_out(txrx_orgate_dev, 0,
|
qdev_connect_gpio_out(txrx_orgate_dev, 0,
|
||||||
qdev_get_gpio_in(armv7m, uart_txrx_irqno[i]));
|
qdev_get_gpio_in(armv7m, uart_txrx_irqno[i]));
|
||||||
cmsdk_apb_uart_create(uartbase[i],
|
|
||||||
qdev_get_gpio_in(txrx_orgate_dev, 0),
|
dev = qdev_new(TYPE_CMSDK_APB_UART);
|
||||||
qdev_get_gpio_in(txrx_orgate_dev, 1),
|
s = SYS_BUS_DEVICE(dev);
|
||||||
qdev_get_gpio_in(orgate_dev, i * 2),
|
qdev_prop_set_chr(dev, "chardev", serial_hd(i));
|
||||||
qdev_get_gpio_in(orgate_dev, i * 2 + 1),
|
qdev_prop_set_uint32(dev, "pclk-frq", SYSCLK_FRQ);
|
||||||
NULL,
|
sysbus_realize_and_unref(s, &error_fatal);
|
||||||
serial_hd(i), SYSCLK_FRQ);
|
sysbus_mmio_map(s, 0, uartbase[i]);
|
||||||
|
sysbus_connect_irq(s, 0, qdev_get_gpio_in(txrx_orgate_dev, 0));
|
||||||
|
sysbus_connect_irq(s, 1, qdev_get_gpio_in(txrx_orgate_dev, 1));
|
||||||
|
sysbus_connect_irq(s, 2, qdev_get_gpio_in(orgate_dev, i * 2));
|
||||||
|
sysbus_connect_irq(s, 3, qdev_get_gpio_in(orgate_dev, i * 2 + 1));
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -1072,7 +1072,6 @@ struct musicpal_key_state {
|
|||||||
SysBusDevice parent_obj;
|
SysBusDevice parent_obj;
|
||||||
/*< public >*/
|
/*< public >*/
|
||||||
|
|
||||||
MemoryRegion iomem;
|
|
||||||
uint32_t kbd_extended;
|
uint32_t kbd_extended;
|
||||||
uint32_t pressed_keys;
|
uint32_t pressed_keys;
|
||||||
qemu_irq out[8];
|
qemu_irq out[8];
|
||||||
@ -1161,9 +1160,6 @@ static void musicpal_key_init(Object *obj)
|
|||||||
DeviceState *dev = DEVICE(sbd);
|
DeviceState *dev = DEVICE(sbd);
|
||||||
musicpal_key_state *s = MUSICPAL_KEY(dev);
|
musicpal_key_state *s = MUSICPAL_KEY(dev);
|
||||||
|
|
||||||
memory_region_init(&s->iomem, obj, "dummy", 0);
|
|
||||||
sysbus_init_mmio(sbd, &s->iomem);
|
|
||||||
|
|
||||||
s->kbd_extended = 0;
|
s->kbd_extended = 0;
|
||||||
s->pressed_keys = 0;
|
s->pressed_keys = 0;
|
||||||
|
|
||||||
|
@ -1146,9 +1146,14 @@ static void stellaris_init(MachineState *ms, stellaris_board_info *board)
|
|||||||
|
|
||||||
for (i = 0; i < 4; i++) {
|
for (i = 0; i < 4; i++) {
|
||||||
if (board->dc2 & (1 << i)) {
|
if (board->dc2 & (1 << i)) {
|
||||||
pl011_luminary_create(0x4000c000 + i * 0x1000,
|
SysBusDevice *sbd;
|
||||||
qdev_get_gpio_in(nvic, uart_irq[i]),
|
|
||||||
serial_hd(i));
|
dev = qdev_new("pl011_luminary");
|
||||||
|
sbd = SYS_BUS_DEVICE(dev);
|
||||||
|
qdev_prop_set_chr(dev, "chardev", serial_hd(i));
|
||||||
|
sysbus_realize_and_unref(sbd, &error_fatal);
|
||||||
|
sysbus_mmio_map(sbd, 0, 0x4000c000 + i * 0x1000);
|
||||||
|
sysbus_connect_irq(sbd, 0, qdev_get_gpio_in(nvic, uart_irq[i]));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (board->dc2 & (1 << 4)) {
|
if (board->dc2 & (1 << 4)) {
|
||||||
|
@ -19,10 +19,12 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
#include "qemu/osdep.h"
|
#include "qemu/osdep.h"
|
||||||
|
#include "qapi/error.h"
|
||||||
#include "hw/char/pl011.h"
|
#include "hw/char/pl011.h"
|
||||||
#include "hw/irq.h"
|
#include "hw/irq.h"
|
||||||
#include "hw/sysbus.h"
|
#include "hw/sysbus.h"
|
||||||
#include "hw/qdev-clock.h"
|
#include "hw/qdev-clock.h"
|
||||||
|
#include "hw/qdev-properties.h"
|
||||||
#include "hw/qdev-properties-system.h"
|
#include "hw/qdev-properties-system.h"
|
||||||
#include "migration/vmstate.h"
|
#include "migration/vmstate.h"
|
||||||
#include "chardev/char-fe.h"
|
#include "chardev/char-fe.h"
|
||||||
@ -31,6 +33,21 @@
|
|||||||
#include "qemu/module.h"
|
#include "qemu/module.h"
|
||||||
#include "trace.h"
|
#include "trace.h"
|
||||||
|
|
||||||
|
DeviceState *pl011_create(hwaddr addr, qemu_irq irq, Chardev *chr)
|
||||||
|
{
|
||||||
|
DeviceState *dev;
|
||||||
|
SysBusDevice *s;
|
||||||
|
|
||||||
|
dev = qdev_new("pl011");
|
||||||
|
s = SYS_BUS_DEVICE(dev);
|
||||||
|
qdev_prop_set_chr(dev, "chardev", chr);
|
||||||
|
sysbus_realize_and_unref(s, &error_fatal);
|
||||||
|
sysbus_mmio_map(s, 0, addr);
|
||||||
|
sysbus_connect_irq(s, 0, irq);
|
||||||
|
|
||||||
|
return dev;
|
||||||
|
}
|
||||||
|
|
||||||
#define PL011_INT_TX 0x20
|
#define PL011_INT_TX 0x20
|
||||||
#define PL011_INT_RX 0x10
|
#define PL011_INT_RX 0x10
|
||||||
|
|
||||||
|
@ -24,6 +24,7 @@
|
|||||||
|
|
||||||
#include "qemu/osdep.h"
|
#include "qemu/osdep.h"
|
||||||
#include "qemu/log.h"
|
#include "qemu/log.h"
|
||||||
|
#include "hw/char/xilinx_uartlite.h"
|
||||||
#include "hw/irq.h"
|
#include "hw/irq.h"
|
||||||
#include "hw/qdev-properties.h"
|
#include "hw/qdev-properties.h"
|
||||||
#include "hw/qdev-properties-system.h"
|
#include "hw/qdev-properties-system.h"
|
||||||
@ -53,9 +54,6 @@
|
|||||||
#define CONTROL_RST_RX 0x02
|
#define CONTROL_RST_RX 0x02
|
||||||
#define CONTROL_IE 0x10
|
#define CONTROL_IE 0x10
|
||||||
|
|
||||||
#define TYPE_XILINX_UARTLITE "xlnx.xps-uartlite"
|
|
||||||
OBJECT_DECLARE_SIMPLE_TYPE(XilinxUARTLite, XILINX_UARTLITE)
|
|
||||||
|
|
||||||
struct XilinxUARTLite {
|
struct XilinxUARTLite {
|
||||||
SysBusDevice parent_obj;
|
SysBusDevice parent_obj;
|
||||||
|
|
||||||
|
@ -26,8 +26,7 @@
|
|||||||
#include "hw/irq.h"
|
#include "hw/irq.h"
|
||||||
#include "qom/object.h"
|
#include "qom/object.h"
|
||||||
|
|
||||||
DECLARE_INSTANCE_CHECKER(struct IRQState, IRQ,
|
OBJECT_DECLARE_SIMPLE_TYPE(IRQState, IRQ)
|
||||||
TYPE_IRQ)
|
|
||||||
|
|
||||||
struct IRQState {
|
struct IRQState {
|
||||||
Object parent_obj;
|
Object parent_obj;
|
||||||
@ -68,7 +67,7 @@ qemu_irq *qemu_allocate_irqs(qemu_irq_handler handler, void *opaque, int n)
|
|||||||
|
|
||||||
qemu_irq qemu_allocate_irq(qemu_irq_handler handler, void *opaque, int n)
|
qemu_irq qemu_allocate_irq(qemu_irq_handler handler, void *opaque, int n)
|
||||||
{
|
{
|
||||||
struct IRQState *irq;
|
IRQState *irq;
|
||||||
|
|
||||||
irq = IRQ(object_new(TYPE_IRQ));
|
irq = IRQ(object_new(TYPE_IRQ));
|
||||||
irq->handler = handler;
|
irq->handler = handler;
|
||||||
@ -94,7 +93,7 @@ void qemu_free_irq(qemu_irq irq)
|
|||||||
|
|
||||||
static void qemu_notirq(void *opaque, int line, int level)
|
static void qemu_notirq(void *opaque, int line, int level)
|
||||||
{
|
{
|
||||||
struct IRQState *irq = opaque;
|
IRQState *irq = opaque;
|
||||||
|
|
||||||
irq->handler(irq->opaque, irq->n, !level);
|
irq->handler(irq->opaque, irq->n, !level);
|
||||||
}
|
}
|
||||||
@ -120,7 +119,7 @@ void qemu_irq_intercept_in(qemu_irq *gpio_in, qemu_irq_handler handler, int n)
|
|||||||
static const TypeInfo irq_type_info = {
|
static const TypeInfo irq_type_info = {
|
||||||
.name = TYPE_IRQ,
|
.name = TYPE_IRQ,
|
||||||
.parent = TYPE_OBJECT,
|
.parent = TYPE_OBJECT,
|
||||||
.instance_size = sizeof(struct IRQState),
|
.instance_size = sizeof(IRQState),
|
||||||
};
|
};
|
||||||
|
|
||||||
static void irq_register_types(void)
|
static void irq_register_types(void)
|
||||||
|
@ -31,7 +31,7 @@
|
|||||||
|
|
||||||
static void or_irq_handler(void *opaque, int n, int level)
|
static void or_irq_handler(void *opaque, int n, int level)
|
||||||
{
|
{
|
||||||
qemu_or_irq *s = OR_IRQ(opaque);
|
OrIRQState *s = OR_IRQ(opaque);
|
||||||
int or_level = 0;
|
int or_level = 0;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
@ -46,7 +46,7 @@ static void or_irq_handler(void *opaque, int n, int level)
|
|||||||
|
|
||||||
static void or_irq_reset(DeviceState *dev)
|
static void or_irq_reset(DeviceState *dev)
|
||||||
{
|
{
|
||||||
qemu_or_irq *s = OR_IRQ(dev);
|
OrIRQState *s = OR_IRQ(dev);
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < MAX_OR_LINES; i++) {
|
for (i = 0; i < MAX_OR_LINES; i++) {
|
||||||
@ -56,7 +56,7 @@ static void or_irq_reset(DeviceState *dev)
|
|||||||
|
|
||||||
static void or_irq_realize(DeviceState *dev, Error **errp)
|
static void or_irq_realize(DeviceState *dev, Error **errp)
|
||||||
{
|
{
|
||||||
qemu_or_irq *s = OR_IRQ(dev);
|
OrIRQState *s = OR_IRQ(dev);
|
||||||
|
|
||||||
assert(s->num_lines <= MAX_OR_LINES);
|
assert(s->num_lines <= MAX_OR_LINES);
|
||||||
|
|
||||||
@ -65,7 +65,7 @@ static void or_irq_realize(DeviceState *dev, Error **errp)
|
|||||||
|
|
||||||
static void or_irq_init(Object *obj)
|
static void or_irq_init(Object *obj)
|
||||||
{
|
{
|
||||||
qemu_or_irq *s = OR_IRQ(obj);
|
OrIRQState *s = OR_IRQ(obj);
|
||||||
|
|
||||||
qdev_init_gpio_out(DEVICE(obj), &s->out_irq, 1);
|
qdev_init_gpio_out(DEVICE(obj), &s->out_irq, 1);
|
||||||
}
|
}
|
||||||
@ -84,7 +84,7 @@ static void or_irq_init(Object *obj)
|
|||||||
|
|
||||||
static bool vmstate_extras_needed(void *opaque)
|
static bool vmstate_extras_needed(void *opaque)
|
||||||
{
|
{
|
||||||
qemu_or_irq *s = OR_IRQ(opaque);
|
OrIRQState *s = OR_IRQ(opaque);
|
||||||
|
|
||||||
return s->num_lines >= OLD_MAX_OR_LINES;
|
return s->num_lines >= OLD_MAX_OR_LINES;
|
||||||
}
|
}
|
||||||
@ -95,7 +95,7 @@ static const VMStateDescription vmstate_or_irq_extras = {
|
|||||||
.minimum_version_id = 1,
|
.minimum_version_id = 1,
|
||||||
.needed = vmstate_extras_needed,
|
.needed = vmstate_extras_needed,
|
||||||
.fields = (VMStateField[]) {
|
.fields = (VMStateField[]) {
|
||||||
VMSTATE_VARRAY_UINT16_UNSAFE(levels, qemu_or_irq, num_lines, 0,
|
VMSTATE_VARRAY_UINT16_UNSAFE(levels, OrIRQState, num_lines, 0,
|
||||||
vmstate_info_bool, bool),
|
vmstate_info_bool, bool),
|
||||||
VMSTATE_END_OF_LIST(),
|
VMSTATE_END_OF_LIST(),
|
||||||
},
|
},
|
||||||
@ -106,7 +106,7 @@ static const VMStateDescription vmstate_or_irq = {
|
|||||||
.version_id = 1,
|
.version_id = 1,
|
||||||
.minimum_version_id = 1,
|
.minimum_version_id = 1,
|
||||||
.fields = (VMStateField[]) {
|
.fields = (VMStateField[]) {
|
||||||
VMSTATE_BOOL_SUB_ARRAY(levels, qemu_or_irq, 0, OLD_MAX_OR_LINES),
|
VMSTATE_BOOL_SUB_ARRAY(levels, OrIRQState, 0, OLD_MAX_OR_LINES),
|
||||||
VMSTATE_END_OF_LIST(),
|
VMSTATE_END_OF_LIST(),
|
||||||
},
|
},
|
||||||
.subsections = (const VMStateDescription*[]) {
|
.subsections = (const VMStateDescription*[]) {
|
||||||
@ -116,7 +116,7 @@ static const VMStateDescription vmstate_or_irq = {
|
|||||||
};
|
};
|
||||||
|
|
||||||
static Property or_irq_properties[] = {
|
static Property or_irq_properties[] = {
|
||||||
DEFINE_PROP_UINT16("num-lines", qemu_or_irq, num_lines, 1),
|
DEFINE_PROP_UINT16("num-lines", OrIRQState, num_lines, 1),
|
||||||
DEFINE_PROP_END_OF_LIST(),
|
DEFINE_PROP_END_OF_LIST(),
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -136,7 +136,7 @@ static void or_irq_class_init(ObjectClass *klass, void *data)
|
|||||||
static const TypeInfo or_irq_type_info = {
|
static const TypeInfo or_irq_type_info = {
|
||||||
.name = TYPE_OR_IRQ,
|
.name = TYPE_OR_IRQ,
|
||||||
.parent = TYPE_DEVICE,
|
.parent = TYPE_DEVICE,
|
||||||
.instance_size = sizeof(qemu_or_irq),
|
.instance_size = sizeof(OrIRQState),
|
||||||
.instance_init = or_irq_init,
|
.instance_init = or_irq_init,
|
||||||
.class_init = or_irq_class_init,
|
.class_init = or_irq_class_init,
|
||||||
};
|
};
|
||||||
|
@ -183,11 +183,10 @@ static void max7310_gpio_set(void *opaque, int line, int level)
|
|||||||
* but also accepts sequences that are not SMBus so return an I2C device. */
|
* but also accepts sequences that are not SMBus so return an I2C device. */
|
||||||
static void max7310_realize(DeviceState *dev, Error **errp)
|
static void max7310_realize(DeviceState *dev, Error **errp)
|
||||||
{
|
{
|
||||||
I2CSlave *i2c = I2C_SLAVE(dev);
|
|
||||||
MAX7310State *s = MAX7310(dev);
|
MAX7310State *s = MAX7310(dev);
|
||||||
|
|
||||||
qdev_init_gpio_in(&i2c->qdev, max7310_gpio_set, 8);
|
qdev_init_gpio_in(dev, max7310_gpio_set, ARRAY_SIZE(s->handler));
|
||||||
qdev_init_gpio_out(&i2c->qdev, s->handler, 8);
|
qdev_init_gpio_out(dev, s->handler, ARRAY_SIZE(s->handler));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void max7310_class_init(ObjectClass *klass, void *data)
|
static void max7310_class_init(ObjectClass *klass, void *data)
|
||||||
|
@ -18,6 +18,7 @@
|
|||||||
#include "hw/intc/armv7m_nvic.h"
|
#include "hw/intc/armv7m_nvic.h"
|
||||||
#include "hw/irq.h"
|
#include "hw/irq.h"
|
||||||
#include "hw/qdev-properties.h"
|
#include "hw/qdev-properties.h"
|
||||||
|
#include "sysemu/tcg.h"
|
||||||
#include "sysemu/runstate.h"
|
#include "sysemu/runstate.h"
|
||||||
#include "target/arm/cpu.h"
|
#include "target/arm/cpu.h"
|
||||||
#include "exec/exec-all.h"
|
#include "exec/exec-all.h"
|
||||||
@ -577,7 +578,7 @@ static void do_armv7m_nvic_set_pending(void *opaque, int irq, bool secure,
|
|||||||
* which saves having to have an extra argument is_terminal
|
* which saves having to have an extra argument is_terminal
|
||||||
* that we'd only use in one place.
|
* that we'd only use in one place.
|
||||||
*/
|
*/
|
||||||
cpu_abort(&s->cpu->parent_obj,
|
cpu_abort(CPU(s->cpu),
|
||||||
"Lockup: can't take terminal derived exception "
|
"Lockup: can't take terminal derived exception "
|
||||||
"(original exception priority %d)\n",
|
"(original exception priority %d)\n",
|
||||||
s->vectpending_prio);
|
s->vectpending_prio);
|
||||||
@ -643,7 +644,7 @@ static void do_armv7m_nvic_set_pending(void *opaque, int irq, bool secure,
|
|||||||
* Lockup condition due to a guest bug. We don't model
|
* Lockup condition due to a guest bug. We don't model
|
||||||
* Lockup, so report via cpu_abort() instead.
|
* Lockup, so report via cpu_abort() instead.
|
||||||
*/
|
*/
|
||||||
cpu_abort(&s->cpu->parent_obj,
|
cpu_abort(CPU(s->cpu),
|
||||||
"Lockup: can't escalate %d to HardFault "
|
"Lockup: can't escalate %d to HardFault "
|
||||||
"(current priority %d)\n", irq, running);
|
"(current priority %d)\n", irq, running);
|
||||||
}
|
}
|
||||||
@ -741,7 +742,7 @@ void armv7m_nvic_set_pending_lazyfp(NVICState *s, int irq, bool secure)
|
|||||||
* We want to escalate to HardFault but the context the
|
* We want to escalate to HardFault but the context the
|
||||||
* FP state belongs to prevents the exception pre-empting.
|
* FP state belongs to prevents the exception pre-empting.
|
||||||
*/
|
*/
|
||||||
cpu_abort(&s->cpu->parent_obj,
|
cpu_abort(CPU(s->cpu),
|
||||||
"Lockup: can't escalate to HardFault during "
|
"Lockup: can't escalate to HardFault during "
|
||||||
"lazy FP register stacking\n");
|
"lazy FP register stacking\n");
|
||||||
}
|
}
|
||||||
@ -2454,8 +2455,10 @@ static MemTxResult nvic_sysreg_write(void *opaque, hwaddr addr,
|
|||||||
/* This is UNPREDICTABLE; treat as RAZ/WI */
|
/* This is UNPREDICTABLE; treat as RAZ/WI */
|
||||||
|
|
||||||
exit_ok:
|
exit_ok:
|
||||||
/* Ensure any changes made are reflected in the cached hflags. */
|
if (tcg_enabled()) {
|
||||||
arm_rebuild_hflags(&s->cpu->env);
|
/* Ensure any changes made are reflected in the cached hflags. */
|
||||||
|
arm_rebuild_hflags(&s->cpu->env);
|
||||||
|
}
|
||||||
return MEMTX_OK;
|
return MEMTX_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2636,11 +2639,14 @@ static void armv7m_nvic_reset(DeviceState *dev)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
if (tcg_enabled()) {
|
||||||
* We updated state that affects the CPU's MMUidx and thus its hflags;
|
/*
|
||||||
* and we can't guarantee that we run before the CPU reset function.
|
* We updated state that affects the CPU's MMUidx and thus its
|
||||||
*/
|
* hflags; and we can't guarantee that we run before the CPU
|
||||||
arm_rebuild_hflags(&s->cpu->env);
|
* reset function.
|
||||||
|
*/
|
||||||
|
arm_rebuild_hflags(&s->cpu->env);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void nvic_systick_trigger(void *opaque, int n, int level)
|
static void nvic_systick_trigger(void *opaque, int n, int level)
|
||||||
|
@ -100,8 +100,11 @@ petalogix_s3adsp1800_init(MachineState *machine)
|
|||||||
irq[i] = qdev_get_gpio_in(dev, i);
|
irq[i] = qdev_get_gpio_in(dev, i);
|
||||||
}
|
}
|
||||||
|
|
||||||
xilinx_uartlite_create(UARTLITE_BASEADDR, irq[UARTLITE_IRQ],
|
dev = qdev_new(TYPE_XILINX_UARTLITE);
|
||||||
serial_hd(0));
|
qdev_prop_set_chr(dev, "chardev", serial_hd(0));
|
||||||
|
sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
|
||||||
|
sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, UARTLITE_BASEADDR);
|
||||||
|
sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, irq[UARTLITE_IRQ]);
|
||||||
|
|
||||||
/* 2 timers at irq 2 @ 62 Mhz. */
|
/* 2 timers at irq 2 @ 62 Mhz. */
|
||||||
dev = qdev_new("xlnx.xps-timer");
|
dev = qdev_new("xlnx.xps-timer");
|
||||||
|
@ -60,7 +60,7 @@ DECLARE_INSTANCE_CHECKER(PREPPCIState, RAVEN_PCI_HOST_BRIDGE,
|
|||||||
struct PRePPCIState {
|
struct PRePPCIState {
|
||||||
PCIHostState parent_obj;
|
PCIHostState parent_obj;
|
||||||
|
|
||||||
qemu_or_irq *or_irq;
|
OrIRQState *or_irq;
|
||||||
qemu_irq pci_irqs[PCI_NUM_PINS];
|
qemu_irq pci_irqs[PCI_NUM_PINS];
|
||||||
PCIBus pci_bus;
|
PCIBus pci_bus;
|
||||||
AddressSpace pci_io_as;
|
AddressSpace pci_io_as;
|
||||||
|
@ -135,6 +135,10 @@ typedef struct CPUTLBEntry {
|
|||||||
|
|
||||||
QEMU_BUILD_BUG_ON(sizeof(CPUTLBEntry) != (1 << CPU_TLB_ENTRY_BITS));
|
QEMU_BUILD_BUG_ON(sizeof(CPUTLBEntry) != (1 << CPU_TLB_ENTRY_BITS));
|
||||||
|
|
||||||
|
|
||||||
|
#endif /* !CONFIG_USER_ONLY && CONFIG_TCG */
|
||||||
|
|
||||||
|
#if !defined(CONFIG_USER_ONLY)
|
||||||
/*
|
/*
|
||||||
* The full TLB entry, which is not accessed by generated TCG code,
|
* The full TLB entry, which is not accessed by generated TCG code,
|
||||||
* so the layout is not as critical as that of CPUTLBEntry. This is
|
* so the layout is not as critical as that of CPUTLBEntry. This is
|
||||||
@ -176,7 +180,9 @@ typedef struct CPUTLBEntryFull {
|
|||||||
TARGET_PAGE_ENTRY_EXTRA
|
TARGET_PAGE_ENTRY_EXTRA
|
||||||
#endif
|
#endif
|
||||||
} CPUTLBEntryFull;
|
} CPUTLBEntryFull;
|
||||||
|
#endif /* !CONFIG_USER_ONLY */
|
||||||
|
|
||||||
|
#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG)
|
||||||
/*
|
/*
|
||||||
* Data elements that are per MMU mode, minus the bits accessed by
|
* Data elements that are per MMU mode, minus the bits accessed by
|
||||||
* the TCG fast path.
|
* the TCG fast path.
|
||||||
|
@ -1,9 +1,7 @@
|
|||||||
#ifndef HW_ARM_ALLWINNER_A10_H
|
#ifndef HW_ARM_ALLWINNER_A10_H
|
||||||
#define HW_ARM_ALLWINNER_A10_H
|
#define HW_ARM_ALLWINNER_A10_H
|
||||||
|
|
||||||
#include "hw/char/serial.h"
|
|
||||||
#include "hw/arm/boot.h"
|
#include "hw/arm/boot.h"
|
||||||
#include "hw/pci/pci_device.h"
|
|
||||||
#include "hw/timer/allwinner-a10-pit.h"
|
#include "hw/timer/allwinner-a10-pit.h"
|
||||||
#include "hw/intc/allwinner-a10-pic.h"
|
#include "hw/intc/allwinner-a10-pic.h"
|
||||||
#include "hw/net/allwinner_emac.h"
|
#include "hw/net/allwinner_emac.h"
|
||||||
|
@ -155,12 +155,12 @@ struct ARMSSE {
|
|||||||
TZPPC apb_ppc[NUM_INTERNAL_PPCS];
|
TZPPC apb_ppc[NUM_INTERNAL_PPCS];
|
||||||
TZMPC mpc[IOTS_NUM_MPC];
|
TZMPC mpc[IOTS_NUM_MPC];
|
||||||
CMSDKAPBTimer timer[3];
|
CMSDKAPBTimer timer[3];
|
||||||
qemu_or_irq ppc_irq_orgate;
|
OrIRQState ppc_irq_orgate;
|
||||||
SplitIRQ sec_resp_splitter;
|
SplitIRQ sec_resp_splitter;
|
||||||
SplitIRQ ppc_irq_splitter[NUM_PPCS];
|
SplitIRQ ppc_irq_splitter[NUM_PPCS];
|
||||||
SplitIRQ mpc_irq_splitter[IOTS_NUM_EXP_MPC + IOTS_NUM_MPC];
|
SplitIRQ mpc_irq_splitter[IOTS_NUM_EXP_MPC + IOTS_NUM_MPC];
|
||||||
qemu_or_irq mpc_irq_orgate;
|
OrIRQState mpc_irq_orgate;
|
||||||
qemu_or_irq nmi_orgate;
|
OrIRQState nmi_orgate;
|
||||||
|
|
||||||
SplitIRQ cpu_irq_splitter[NUM_SSE_IRQS];
|
SplitIRQ cpu_irq_splitter[NUM_SSE_IRQS];
|
||||||
|
|
||||||
|
@ -56,7 +56,7 @@ struct BCM2835PeripheralState {
|
|||||||
BCM2835AuxState aux;
|
BCM2835AuxState aux;
|
||||||
BCM2835FBState fb;
|
BCM2835FBState fb;
|
||||||
BCM2835DMAState dma;
|
BCM2835DMAState dma;
|
||||||
qemu_or_irq orgated_dma_irq;
|
OrIRQState orgated_dma_irq;
|
||||||
BCM2835ICState ic;
|
BCM2835ICState ic;
|
||||||
BCM2835PropertyState property;
|
BCM2835PropertyState property;
|
||||||
BCM2835RngState rng;
|
BCM2835RngState rng;
|
||||||
|
@ -96,8 +96,8 @@ struct Exynos4210State {
|
|||||||
MemoryRegion boot_secondary;
|
MemoryRegion boot_secondary;
|
||||||
MemoryRegion bootreg_mem;
|
MemoryRegion bootreg_mem;
|
||||||
I2CBus *i2c_if[EXYNOS4210_I2C_NUMBER];
|
I2CBus *i2c_if[EXYNOS4210_I2C_NUMBER];
|
||||||
qemu_or_irq pl330_irq_orgate[EXYNOS4210_NUM_DMA];
|
OrIRQState pl330_irq_orgate[EXYNOS4210_NUM_DMA];
|
||||||
qemu_or_irq cpu_irq_orgate[EXYNOS4210_NCPUS];
|
OrIRQState cpu_irq_orgate[EXYNOS4210_NCPUS];
|
||||||
A9MPPrivState a9mpcore;
|
A9MPPrivState a9mpcore;
|
||||||
Exynos4210GicState ext_gic;
|
Exynos4210GicState ext_gic;
|
||||||
Exynos4210CombinerState int_combiner;
|
Exynos4210CombinerState int_combiner;
|
||||||
|
@ -63,7 +63,7 @@ struct STM32F205State {
|
|||||||
STM32F2XXADCState adc[STM_NUM_ADCS];
|
STM32F2XXADCState adc[STM_NUM_ADCS];
|
||||||
STM32F2XXSPIState spi[STM_NUM_SPIS];
|
STM32F2XXSPIState spi[STM_NUM_SPIS];
|
||||||
|
|
||||||
qemu_or_irq *adc_irqs;
|
OrIRQState *adc_irqs;
|
||||||
|
|
||||||
MemoryRegion sram;
|
MemoryRegion sram;
|
||||||
MemoryRegion flash;
|
MemoryRegion flash;
|
||||||
|
@ -63,7 +63,7 @@ struct STM32F405State {
|
|||||||
STM32F4xxExtiState exti;
|
STM32F4xxExtiState exti;
|
||||||
STM32F2XXUsartState usart[STM_NUM_USARTS];
|
STM32F2XXUsartState usart[STM_NUM_USARTS];
|
||||||
STM32F2XXTimerState timer[STM_NUM_TIMERS];
|
STM32F2XXTimerState timer[STM_NUM_TIMERS];
|
||||||
qemu_or_irq adc_irqs;
|
OrIRQState adc_irqs;
|
||||||
STM32F2XXADCState adc[STM_NUM_ADCS];
|
STM32F2XXADCState adc[STM_NUM_ADCS];
|
||||||
STM32F2XXSPIState spi[STM_NUM_SPIS];
|
STM32F2XXSPIState spi[STM_NUM_SPIS];
|
||||||
|
|
||||||
|
@ -85,7 +85,7 @@ struct Versal {
|
|||||||
} rpu;
|
} rpu;
|
||||||
|
|
||||||
struct {
|
struct {
|
||||||
qemu_or_irq irq_orgate;
|
OrIRQState irq_orgate;
|
||||||
XlnxXramCtrl ctrl[XLNX_VERSAL_NR_XRAM];
|
XlnxXramCtrl ctrl[XLNX_VERSAL_NR_XRAM];
|
||||||
} xram;
|
} xram;
|
||||||
|
|
||||||
@ -103,7 +103,7 @@ struct Versal {
|
|||||||
XlnxCSUDMA dma_src;
|
XlnxCSUDMA dma_src;
|
||||||
XlnxCSUDMA dma_dst;
|
XlnxCSUDMA dma_dst;
|
||||||
MemoryRegion linear_mr;
|
MemoryRegion linear_mr;
|
||||||
qemu_or_irq irq_orgate;
|
OrIRQState irq_orgate;
|
||||||
} ospi;
|
} ospi;
|
||||||
} iou;
|
} iou;
|
||||||
|
|
||||||
@ -113,7 +113,7 @@ struct Versal {
|
|||||||
XlnxVersalEFuseCtrl efuse_ctrl;
|
XlnxVersalEFuseCtrl efuse_ctrl;
|
||||||
XlnxVersalEFuseCache efuse_cache;
|
XlnxVersalEFuseCache efuse_cache;
|
||||||
|
|
||||||
qemu_or_irq apb_irq_orgate;
|
OrIRQState apb_irq_orgate;
|
||||||
} pmc;
|
} pmc;
|
||||||
|
|
||||||
struct {
|
struct {
|
||||||
|
@ -130,7 +130,7 @@ struct XlnxZynqMPState {
|
|||||||
XlnxZDMA gdma[XLNX_ZYNQMP_NUM_GDMA_CH];
|
XlnxZDMA gdma[XLNX_ZYNQMP_NUM_GDMA_CH];
|
||||||
XlnxZDMA adma[XLNX_ZYNQMP_NUM_ADMA_CH];
|
XlnxZDMA adma[XLNX_ZYNQMP_NUM_ADMA_CH];
|
||||||
XlnxCSUDMA qspi_dma;
|
XlnxCSUDMA qspi_dma;
|
||||||
qemu_or_irq qspi_irq_orgate;
|
OrIRQState qspi_irq_orgate;
|
||||||
XlnxZynqMPAPUCtrl apu_ctrl;
|
XlnxZynqMPAPUCtrl apu_ctrl;
|
||||||
XlnxZynqMPCRF crf;
|
XlnxZynqMPCRF crf;
|
||||||
CadenceTTCState ttc[XLNX_ZYNQMP_NUM_TTC];
|
CadenceTTCState ttc[XLNX_ZYNQMP_NUM_TTC];
|
||||||
|
@ -12,10 +12,8 @@
|
|||||||
#ifndef CMSDK_APB_UART_H
|
#ifndef CMSDK_APB_UART_H
|
||||||
#define CMSDK_APB_UART_H
|
#define CMSDK_APB_UART_H
|
||||||
|
|
||||||
#include "hw/qdev-properties.h"
|
|
||||||
#include "hw/sysbus.h"
|
#include "hw/sysbus.h"
|
||||||
#include "chardev/char-fe.h"
|
#include "chardev/char-fe.h"
|
||||||
#include "qapi/error.h"
|
|
||||||
#include "qom/object.h"
|
#include "qom/object.h"
|
||||||
|
|
||||||
#define TYPE_CMSDK_APB_UART "cmsdk-apb-uart"
|
#define TYPE_CMSDK_APB_UART "cmsdk-apb-uart"
|
||||||
@ -45,36 +43,4 @@ struct CMSDKAPBUART {
|
|||||||
uint8_t rxbuf;
|
uint8_t rxbuf;
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
|
||||||
* cmsdk_apb_uart_create - convenience function to create TYPE_CMSDK_APB_UART
|
|
||||||
* @addr: location in system memory to map registers
|
|
||||||
* @chr: Chardev backend to connect UART to, or NULL if no backend
|
|
||||||
* @pclk_frq: frequency in Hz of the PCLK clock (used for calculating baud rate)
|
|
||||||
*/
|
|
||||||
static inline DeviceState *cmsdk_apb_uart_create(hwaddr addr,
|
|
||||||
qemu_irq txint,
|
|
||||||
qemu_irq rxint,
|
|
||||||
qemu_irq txovrint,
|
|
||||||
qemu_irq rxovrint,
|
|
||||||
qemu_irq uartint,
|
|
||||||
Chardev *chr,
|
|
||||||
uint32_t pclk_frq)
|
|
||||||
{
|
|
||||||
DeviceState *dev;
|
|
||||||
SysBusDevice *s;
|
|
||||||
|
|
||||||
dev = qdev_new(TYPE_CMSDK_APB_UART);
|
|
||||||
s = SYS_BUS_DEVICE(dev);
|
|
||||||
qdev_prop_set_chr(dev, "chardev", chr);
|
|
||||||
qdev_prop_set_uint32(dev, "pclk-frq", pclk_frq);
|
|
||||||
sysbus_realize_and_unref(s, &error_fatal);
|
|
||||||
sysbus_mmio_map(s, 0, addr);
|
|
||||||
sysbus_connect_irq(s, 0, txint);
|
|
||||||
sysbus_connect_irq(s, 1, rxint);
|
|
||||||
sysbus_connect_irq(s, 2, txovrint);
|
|
||||||
sysbus_connect_irq(s, 3, rxovrint);
|
|
||||||
sysbus_connect_irq(s, 4, uartint);
|
|
||||||
return dev;
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -15,10 +15,8 @@
|
|||||||
#ifndef HW_PL011_H
|
#ifndef HW_PL011_H
|
||||||
#define HW_PL011_H
|
#define HW_PL011_H
|
||||||
|
|
||||||
#include "hw/qdev-properties.h"
|
|
||||||
#include "hw/sysbus.h"
|
#include "hw/sysbus.h"
|
||||||
#include "chardev/char-fe.h"
|
#include "chardev/char-fe.h"
|
||||||
#include "qapi/error.h"
|
|
||||||
#include "qom/object.h"
|
#include "qom/object.h"
|
||||||
|
|
||||||
#define TYPE_PL011 "pl011"
|
#define TYPE_PL011 "pl011"
|
||||||
@ -57,38 +55,6 @@ struct PL011State {
|
|||||||
const unsigned char *id;
|
const unsigned char *id;
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline DeviceState *pl011_create(hwaddr addr,
|
DeviceState *pl011_create(hwaddr addr, qemu_irq irq, Chardev *chr);
|
||||||
qemu_irq irq,
|
|
||||||
Chardev *chr)
|
|
||||||
{
|
|
||||||
DeviceState *dev;
|
|
||||||
SysBusDevice *s;
|
|
||||||
|
|
||||||
dev = qdev_new("pl011");
|
|
||||||
s = SYS_BUS_DEVICE(dev);
|
|
||||||
qdev_prop_set_chr(dev, "chardev", chr);
|
|
||||||
sysbus_realize_and_unref(s, &error_fatal);
|
|
||||||
sysbus_mmio_map(s, 0, addr);
|
|
||||||
sysbus_connect_irq(s, 0, irq);
|
|
||||||
|
|
||||||
return dev;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline DeviceState *pl011_luminary_create(hwaddr addr,
|
|
||||||
qemu_irq irq,
|
|
||||||
Chardev *chr)
|
|
||||||
{
|
|
||||||
DeviceState *dev;
|
|
||||||
SysBusDevice *s;
|
|
||||||
|
|
||||||
dev = qdev_new("pl011_luminary");
|
|
||||||
s = SYS_BUS_DEVICE(dev);
|
|
||||||
qdev_prop_set_chr(dev, "chardev", chr);
|
|
||||||
sysbus_realize_and_unref(s, &error_fatal);
|
|
||||||
sysbus_mmio_map(s, 0, addr);
|
|
||||||
sysbus_connect_irq(s, 0, irq);
|
|
||||||
|
|
||||||
return dev;
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -15,25 +15,9 @@
|
|||||||
#ifndef XILINX_UARTLITE_H
|
#ifndef XILINX_UARTLITE_H
|
||||||
#define XILINX_UARTLITE_H
|
#define XILINX_UARTLITE_H
|
||||||
|
|
||||||
#include "hw/qdev-properties.h"
|
#include "qom/object.h"
|
||||||
#include "hw/sysbus.h"
|
|
||||||
#include "qapi/error.h"
|
|
||||||
|
|
||||||
static inline DeviceState *xilinx_uartlite_create(hwaddr addr,
|
#define TYPE_XILINX_UARTLITE "xlnx.xps-uartlite"
|
||||||
qemu_irq irq,
|
OBJECT_DECLARE_SIMPLE_TYPE(XilinxUARTLite, XILINX_UARTLITE)
|
||||||
Chardev *chr)
|
|
||||||
{
|
|
||||||
DeviceState *dev;
|
|
||||||
SysBusDevice *s;
|
|
||||||
|
|
||||||
dev = qdev_new("xlnx.xps-uartlite");
|
|
||||||
s = SYS_BUS_DEVICE(dev);
|
|
||||||
qdev_prop_set_chr(dev, "chardev", chr);
|
|
||||||
sysbus_realize_and_unref(s, &error_fatal);
|
|
||||||
sysbus_mmio_map(s, 0, addr);
|
|
||||||
sysbus_connect_irq(s, 0, irq);
|
|
||||||
|
|
||||||
return dev;
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -35,10 +35,7 @@
|
|||||||
*/
|
*/
|
||||||
#define MAX_OR_LINES 48
|
#define MAX_OR_LINES 48
|
||||||
|
|
||||||
typedef struct OrIRQState qemu_or_irq;
|
OBJECT_DECLARE_SIMPLE_TYPE(OrIRQState, OR_IRQ)
|
||||||
|
|
||||||
DECLARE_INSTANCE_CHECKER(qemu_or_irq, OR_IRQ,
|
|
||||||
TYPE_OR_IRQ)
|
|
||||||
|
|
||||||
struct OrIRQState {
|
struct OrIRQState {
|
||||||
DeviceState parent_obj;
|
DeviceState parent_obj;
|
||||||
|
@ -12,7 +12,6 @@
|
|||||||
#ifndef CMSDK_APB_TIMER_H
|
#ifndef CMSDK_APB_TIMER_H
|
||||||
#define CMSDK_APB_TIMER_H
|
#define CMSDK_APB_TIMER_H
|
||||||
|
|
||||||
#include "hw/qdev-properties.h"
|
|
||||||
#include "hw/sysbus.h"
|
#include "hw/sysbus.h"
|
||||||
#include "hw/ptimer.h"
|
#include "hw/ptimer.h"
|
||||||
#include "hw/clock.h"
|
#include "hw/clock.h"
|
||||||
|
@ -25,10 +25,6 @@
|
|||||||
#include "qemu/rcu.h"
|
#include "qemu/rcu.h"
|
||||||
#include "qemu/main-loop.h"
|
#include "qemu/main-loop.h"
|
||||||
|
|
||||||
typedef ObjectClass IOThreadClass;
|
|
||||||
|
|
||||||
DECLARE_CLASS_CHECKERS(IOThreadClass, IOTHREAD,
|
|
||||||
TYPE_IOTHREAD)
|
|
||||||
|
|
||||||
#ifdef CONFIG_POSIX
|
#ifdef CONFIG_POSIX
|
||||||
/* Benchmark results from 2016 on NVMe SSD drives show max polling times around
|
/* Benchmark results from 2016 on NVMe SSD drives show max polling times around
|
||||||
|
@ -15,6 +15,7 @@
|
|||||||
#include "arm-powerctl.h"
|
#include "arm-powerctl.h"
|
||||||
#include "qemu/log.h"
|
#include "qemu/log.h"
|
||||||
#include "qemu/main-loop.h"
|
#include "qemu/main-loop.h"
|
||||||
|
#include "sysemu/tcg.h"
|
||||||
|
|
||||||
#ifndef DEBUG_ARM_POWERCTL
|
#ifndef DEBUG_ARM_POWERCTL
|
||||||
#define DEBUG_ARM_POWERCTL 0
|
#define DEBUG_ARM_POWERCTL 0
|
||||||
@ -127,8 +128,10 @@ static void arm_set_cpu_on_async_work(CPUState *target_cpu_state,
|
|||||||
target_cpu->env.regs[0] = info->context_id;
|
target_cpu->env.regs[0] = info->context_id;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* CP15 update requires rebuilding hflags */
|
if (tcg_enabled()) {
|
||||||
arm_rebuild_hflags(&target_cpu->env);
|
/* CP15 update requires rebuilding hflags */
|
||||||
|
arm_rebuild_hflags(&target_cpu->env);
|
||||||
|
}
|
||||||
|
|
||||||
/* Start the new CPU at the requested address */
|
/* Start the new CPU at the requested address */
|
||||||
cpu_set_pc(target_cpu_state, info->entry);
|
cpu_set_pc(target_cpu_state, info->entry);
|
||||||
|
@ -539,9 +539,12 @@ static void arm_cpu_reset_hold(Object *obj)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
hw_breakpoint_update_all(cpu);
|
if (tcg_enabled()) {
|
||||||
hw_watchpoint_update_all(cpu);
|
hw_breakpoint_update_all(cpu);
|
||||||
arm_rebuild_hflags(env);
|
hw_watchpoint_update_all(cpu);
|
||||||
|
|
||||||
|
arm_rebuild_hflags(env);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
|
#if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
|
||||||
|
@ -12,8 +12,9 @@
|
|||||||
#include "cpregs.h"
|
#include "cpregs.h"
|
||||||
#include "exec/exec-all.h"
|
#include "exec/exec-all.h"
|
||||||
#include "exec/helper-proto.h"
|
#include "exec/helper-proto.h"
|
||||||
|
#include "sysemu/tcg.h"
|
||||||
|
|
||||||
|
#ifdef CONFIG_TCG
|
||||||
/* Return the Exception Level targeted by debug exceptions. */
|
/* Return the Exception Level targeted by debug exceptions. */
|
||||||
static int arm_debug_target_el(CPUARMState *env)
|
static int arm_debug_target_el(CPUARMState *env)
|
||||||
{
|
{
|
||||||
@ -536,6 +537,243 @@ void HELPER(exception_swstep)(CPUARMState *env, uint32_t syndrome)
|
|||||||
raise_exception_debug(env, EXCP_UDEF, syndrome);
|
raise_exception_debug(env, EXCP_UDEF, syndrome);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void hw_watchpoint_update(ARMCPU *cpu, int n)
|
||||||
|
{
|
||||||
|
CPUARMState *env = &cpu->env;
|
||||||
|
vaddr len = 0;
|
||||||
|
vaddr wvr = env->cp15.dbgwvr[n];
|
||||||
|
uint64_t wcr = env->cp15.dbgwcr[n];
|
||||||
|
int mask;
|
||||||
|
int flags = BP_CPU | BP_STOP_BEFORE_ACCESS;
|
||||||
|
|
||||||
|
if (env->cpu_watchpoint[n]) {
|
||||||
|
cpu_watchpoint_remove_by_ref(CPU(cpu), env->cpu_watchpoint[n]);
|
||||||
|
env->cpu_watchpoint[n] = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!FIELD_EX64(wcr, DBGWCR, E)) {
|
||||||
|
/* E bit clear : watchpoint disabled */
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
switch (FIELD_EX64(wcr, DBGWCR, LSC)) {
|
||||||
|
case 0:
|
||||||
|
/* LSC 00 is reserved and must behave as if the wp is disabled */
|
||||||
|
return;
|
||||||
|
case 1:
|
||||||
|
flags |= BP_MEM_READ;
|
||||||
|
break;
|
||||||
|
case 2:
|
||||||
|
flags |= BP_MEM_WRITE;
|
||||||
|
break;
|
||||||
|
case 3:
|
||||||
|
flags |= BP_MEM_ACCESS;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Attempts to use both MASK and BAS fields simultaneously are
|
||||||
|
* CONSTRAINED UNPREDICTABLE; we opt to ignore BAS in this case,
|
||||||
|
* thus generating a watchpoint for every byte in the masked region.
|
||||||
|
*/
|
||||||
|
mask = FIELD_EX64(wcr, DBGWCR, MASK);
|
||||||
|
if (mask == 1 || mask == 2) {
|
||||||
|
/*
|
||||||
|
* Reserved values of MASK; we must act as if the mask value was
|
||||||
|
* some non-reserved value, or as if the watchpoint were disabled.
|
||||||
|
* We choose the latter.
|
||||||
|
*/
|
||||||
|
return;
|
||||||
|
} else if (mask) {
|
||||||
|
/* Watchpoint covers an aligned area up to 2GB in size */
|
||||||
|
len = 1ULL << mask;
|
||||||
|
/*
|
||||||
|
* If masked bits in WVR are not zero it's CONSTRAINED UNPREDICTABLE
|
||||||
|
* whether the watchpoint fires when the unmasked bits match; we opt
|
||||||
|
* to generate the exceptions.
|
||||||
|
*/
|
||||||
|
wvr &= ~(len - 1);
|
||||||
|
} else {
|
||||||
|
/* Watchpoint covers bytes defined by the byte address select bits */
|
||||||
|
int bas = FIELD_EX64(wcr, DBGWCR, BAS);
|
||||||
|
int basstart;
|
||||||
|
|
||||||
|
if (extract64(wvr, 2, 1)) {
|
||||||
|
/*
|
||||||
|
* Deprecated case of an only 4-aligned address. BAS[7:4] are
|
||||||
|
* ignored, and BAS[3:0] define which bytes to watch.
|
||||||
|
*/
|
||||||
|
bas &= 0xf;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (bas == 0) {
|
||||||
|
/* This must act as if the watchpoint is disabled */
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The BAS bits are supposed to be programmed to indicate a contiguous
|
||||||
|
* range of bytes. Otherwise it is CONSTRAINED UNPREDICTABLE whether
|
||||||
|
* we fire for each byte in the word/doubleword addressed by the WVR.
|
||||||
|
* We choose to ignore any non-zero bits after the first range of 1s.
|
||||||
|
*/
|
||||||
|
basstart = ctz32(bas);
|
||||||
|
len = cto32(bas >> basstart);
|
||||||
|
wvr += basstart;
|
||||||
|
}
|
||||||
|
|
||||||
|
cpu_watchpoint_insert(CPU(cpu), wvr, len, flags,
|
||||||
|
&env->cpu_watchpoint[n]);
|
||||||
|
}
|
||||||
|
|
||||||
|
void hw_watchpoint_update_all(ARMCPU *cpu)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
CPUARMState *env = &cpu->env;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Completely clear out existing QEMU watchpoints and our array, to
|
||||||
|
* avoid possible stale entries following migration load.
|
||||||
|
*/
|
||||||
|
cpu_watchpoint_remove_all(CPU(cpu), BP_CPU);
|
||||||
|
memset(env->cpu_watchpoint, 0, sizeof(env->cpu_watchpoint));
|
||||||
|
|
||||||
|
for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_watchpoint); i++) {
|
||||||
|
hw_watchpoint_update(cpu, i);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void hw_breakpoint_update(ARMCPU *cpu, int n)
|
||||||
|
{
|
||||||
|
CPUARMState *env = &cpu->env;
|
||||||
|
uint64_t bvr = env->cp15.dbgbvr[n];
|
||||||
|
uint64_t bcr = env->cp15.dbgbcr[n];
|
||||||
|
vaddr addr;
|
||||||
|
int bt;
|
||||||
|
int flags = BP_CPU;
|
||||||
|
|
||||||
|
if (env->cpu_breakpoint[n]) {
|
||||||
|
cpu_breakpoint_remove_by_ref(CPU(cpu), env->cpu_breakpoint[n]);
|
||||||
|
env->cpu_breakpoint[n] = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!extract64(bcr, 0, 1)) {
|
||||||
|
/* E bit clear : watchpoint disabled */
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
bt = extract64(bcr, 20, 4);
|
||||||
|
|
||||||
|
switch (bt) {
|
||||||
|
case 4: /* unlinked address mismatch (reserved if AArch64) */
|
||||||
|
case 5: /* linked address mismatch (reserved if AArch64) */
|
||||||
|
qemu_log_mask(LOG_UNIMP,
|
||||||
|
"arm: address mismatch breakpoint types not implemented\n");
|
||||||
|
return;
|
||||||
|
case 0: /* unlinked address match */
|
||||||
|
case 1: /* linked address match */
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* Bits [1:0] are RES0.
|
||||||
|
*
|
||||||
|
* It is IMPLEMENTATION DEFINED whether bits [63:49]
|
||||||
|
* ([63:53] for FEAT_LVA) are hardwired to a copy of the sign bit
|
||||||
|
* of the VA field ([48] or [52] for FEAT_LVA), or whether the
|
||||||
|
* value is read as written. It is CONSTRAINED UNPREDICTABLE
|
||||||
|
* whether the RESS bits are ignored when comparing an address.
|
||||||
|
* Therefore we are allowed to compare the entire register, which
|
||||||
|
* lets us avoid considering whether FEAT_LVA is actually enabled.
|
||||||
|
*
|
||||||
|
* The BAS field is used to allow setting breakpoints on 16-bit
|
||||||
|
* wide instructions; it is CONSTRAINED UNPREDICTABLE whether
|
||||||
|
* a bp will fire if the addresses covered by the bp and the addresses
|
||||||
|
* covered by the insn overlap but the insn doesn't start at the
|
||||||
|
* start of the bp address range. We choose to require the insn and
|
||||||
|
* the bp to have the same address. The constraints on writing to
|
||||||
|
* BAS enforced in dbgbcr_write mean we have only four cases:
|
||||||
|
* 0b0000 => no breakpoint
|
||||||
|
* 0b0011 => breakpoint on addr
|
||||||
|
* 0b1100 => breakpoint on addr + 2
|
||||||
|
* 0b1111 => breakpoint on addr
|
||||||
|
* See also figure D2-3 in the v8 ARM ARM (DDI0487A.c).
|
||||||
|
*/
|
||||||
|
int bas = extract64(bcr, 5, 4);
|
||||||
|
addr = bvr & ~3ULL;
|
||||||
|
if (bas == 0) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (bas == 0xc) {
|
||||||
|
addr += 2;
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case 2: /* unlinked context ID match */
|
||||||
|
case 8: /* unlinked VMID match (reserved if no EL2) */
|
||||||
|
case 10: /* unlinked context ID and VMID match (reserved if no EL2) */
|
||||||
|
qemu_log_mask(LOG_UNIMP,
|
||||||
|
"arm: unlinked context breakpoint types not implemented\n");
|
||||||
|
return;
|
||||||
|
case 9: /* linked VMID match (reserved if no EL2) */
|
||||||
|
case 11: /* linked context ID and VMID match (reserved if no EL2) */
|
||||||
|
case 3: /* linked context ID match */
|
||||||
|
default:
|
||||||
|
/*
|
||||||
|
* We must generate no events for Linked context matches (unless
|
||||||
|
* they are linked to by some other bp/wp, which is handled in
|
||||||
|
* updates for the linking bp/wp). We choose to also generate no events
|
||||||
|
* for reserved values.
|
||||||
|
*/
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
cpu_breakpoint_insert(CPU(cpu), addr, flags, &env->cpu_breakpoint[n]);
|
||||||
|
}
|
||||||
|
|
||||||
|
void hw_breakpoint_update_all(ARMCPU *cpu)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
CPUARMState *env = &cpu->env;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Completely clear out existing QEMU breakpoints and our array, to
|
||||||
|
* avoid possible stale entries following migration load.
|
||||||
|
*/
|
||||||
|
cpu_breakpoint_remove_all(CPU(cpu), BP_CPU);
|
||||||
|
memset(env->cpu_breakpoint, 0, sizeof(env->cpu_breakpoint));
|
||||||
|
|
||||||
|
for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_breakpoint); i++) {
|
||||||
|
hw_breakpoint_update(cpu, i);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#if !defined(CONFIG_USER_ONLY)
|
||||||
|
|
||||||
|
vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len)
|
||||||
|
{
|
||||||
|
ARMCPU *cpu = ARM_CPU(cs);
|
||||||
|
CPUARMState *env = &cpu->env;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* In BE32 system mode, target memory is stored byteswapped (on a
|
||||||
|
* little-endian host system), and by the time we reach here (via an
|
||||||
|
* opcode helper) the addresses of subword accesses have been adjusted
|
||||||
|
* to account for that, which means that watchpoints will not match.
|
||||||
|
* Undo the adjustment here.
|
||||||
|
*/
|
||||||
|
if (arm_sctlr_b(env)) {
|
||||||
|
if (len == 1) {
|
||||||
|
addr ^= 3;
|
||||||
|
} else if (len == 2) {
|
||||||
|
addr ^= 2;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return addr;
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif /* !CONFIG_USER_ONLY */
|
||||||
|
#endif /* CONFIG_TCG */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Check for traps to "powerdown debug" registers, which are controlled
|
* Check for traps to "powerdown debug" registers, which are controlled
|
||||||
* by MDCR.TDOSA
|
* by MDCR.TDOSA
|
||||||
@ -813,112 +1051,6 @@ static const ARMCPRegInfo debug_lpae_cp_reginfo[] = {
|
|||||||
.access = PL0_R, .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 },
|
.access = PL0_R, .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 },
|
||||||
};
|
};
|
||||||
|
|
||||||
void hw_watchpoint_update(ARMCPU *cpu, int n)
|
|
||||||
{
|
|
||||||
CPUARMState *env = &cpu->env;
|
|
||||||
vaddr len = 0;
|
|
||||||
vaddr wvr = env->cp15.dbgwvr[n];
|
|
||||||
uint64_t wcr = env->cp15.dbgwcr[n];
|
|
||||||
int mask;
|
|
||||||
int flags = BP_CPU | BP_STOP_BEFORE_ACCESS;
|
|
||||||
|
|
||||||
if (env->cpu_watchpoint[n]) {
|
|
||||||
cpu_watchpoint_remove_by_ref(CPU(cpu), env->cpu_watchpoint[n]);
|
|
||||||
env->cpu_watchpoint[n] = NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!FIELD_EX64(wcr, DBGWCR, E)) {
|
|
||||||
/* E bit clear : watchpoint disabled */
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
switch (FIELD_EX64(wcr, DBGWCR, LSC)) {
|
|
||||||
case 0:
|
|
||||||
/* LSC 00 is reserved and must behave as if the wp is disabled */
|
|
||||||
return;
|
|
||||||
case 1:
|
|
||||||
flags |= BP_MEM_READ;
|
|
||||||
break;
|
|
||||||
case 2:
|
|
||||||
flags |= BP_MEM_WRITE;
|
|
||||||
break;
|
|
||||||
case 3:
|
|
||||||
flags |= BP_MEM_ACCESS;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Attempts to use both MASK and BAS fields simultaneously are
|
|
||||||
* CONSTRAINED UNPREDICTABLE; we opt to ignore BAS in this case,
|
|
||||||
* thus generating a watchpoint for every byte in the masked region.
|
|
||||||
*/
|
|
||||||
mask = FIELD_EX64(wcr, DBGWCR, MASK);
|
|
||||||
if (mask == 1 || mask == 2) {
|
|
||||||
/*
|
|
||||||
* Reserved values of MASK; we must act as if the mask value was
|
|
||||||
* some non-reserved value, or as if the watchpoint were disabled.
|
|
||||||
* We choose the latter.
|
|
||||||
*/
|
|
||||||
return;
|
|
||||||
} else if (mask) {
|
|
||||||
/* Watchpoint covers an aligned area up to 2GB in size */
|
|
||||||
len = 1ULL << mask;
|
|
||||||
/*
|
|
||||||
* If masked bits in WVR are not zero it's CONSTRAINED UNPREDICTABLE
|
|
||||||
* whether the watchpoint fires when the unmasked bits match; we opt
|
|
||||||
* to generate the exceptions.
|
|
||||||
*/
|
|
||||||
wvr &= ~(len - 1);
|
|
||||||
} else {
|
|
||||||
/* Watchpoint covers bytes defined by the byte address select bits */
|
|
||||||
int bas = FIELD_EX64(wcr, DBGWCR, BAS);
|
|
||||||
int basstart;
|
|
||||||
|
|
||||||
if (extract64(wvr, 2, 1)) {
|
|
||||||
/*
|
|
||||||
* Deprecated case of an only 4-aligned address. BAS[7:4] are
|
|
||||||
* ignored, and BAS[3:0] define which bytes to watch.
|
|
||||||
*/
|
|
||||||
bas &= 0xf;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (bas == 0) {
|
|
||||||
/* This must act as if the watchpoint is disabled */
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The BAS bits are supposed to be programmed to indicate a contiguous
|
|
||||||
* range of bytes. Otherwise it is CONSTRAINED UNPREDICTABLE whether
|
|
||||||
* we fire for each byte in the word/doubleword addressed by the WVR.
|
|
||||||
* We choose to ignore any non-zero bits after the first range of 1s.
|
|
||||||
*/
|
|
||||||
basstart = ctz32(bas);
|
|
||||||
len = cto32(bas >> basstart);
|
|
||||||
wvr += basstart;
|
|
||||||
}
|
|
||||||
|
|
||||||
cpu_watchpoint_insert(CPU(cpu), wvr, len, flags,
|
|
||||||
&env->cpu_watchpoint[n]);
|
|
||||||
}
|
|
||||||
|
|
||||||
void hw_watchpoint_update_all(ARMCPU *cpu)
|
|
||||||
{
|
|
||||||
int i;
|
|
||||||
CPUARMState *env = &cpu->env;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Completely clear out existing QEMU watchpoints and our array, to
|
|
||||||
* avoid possible stale entries following migration load.
|
|
||||||
*/
|
|
||||||
cpu_watchpoint_remove_all(CPU(cpu), BP_CPU);
|
|
||||||
memset(env->cpu_watchpoint, 0, sizeof(env->cpu_watchpoint));
|
|
||||||
|
|
||||||
for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_watchpoint); i++) {
|
|
||||||
hw_watchpoint_update(cpu, i);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static void dbgwvr_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
static void dbgwvr_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||||
uint64_t value)
|
uint64_t value)
|
||||||
{
|
{
|
||||||
@ -939,7 +1071,9 @@ static void dbgwvr_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|||||||
value &= ~3ULL;
|
value &= ~3ULL;
|
||||||
|
|
||||||
raw_write(env, ri, value);
|
raw_write(env, ri, value);
|
||||||
hw_watchpoint_update(cpu, i);
|
if (tcg_enabled()) {
|
||||||
|
hw_watchpoint_update(cpu, i);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void dbgwcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
static void dbgwcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||||
@ -949,109 +1083,8 @@ static void dbgwcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|||||||
int i = ri->crm;
|
int i = ri->crm;
|
||||||
|
|
||||||
raw_write(env, ri, value);
|
raw_write(env, ri, value);
|
||||||
hw_watchpoint_update(cpu, i);
|
if (tcg_enabled()) {
|
||||||
}
|
hw_watchpoint_update(cpu, i);
|
||||||
|
|
||||||
void hw_breakpoint_update(ARMCPU *cpu, int n)
|
|
||||||
{
|
|
||||||
CPUARMState *env = &cpu->env;
|
|
||||||
uint64_t bvr = env->cp15.dbgbvr[n];
|
|
||||||
uint64_t bcr = env->cp15.dbgbcr[n];
|
|
||||||
vaddr addr;
|
|
||||||
int bt;
|
|
||||||
int flags = BP_CPU;
|
|
||||||
|
|
||||||
if (env->cpu_breakpoint[n]) {
|
|
||||||
cpu_breakpoint_remove_by_ref(CPU(cpu), env->cpu_breakpoint[n]);
|
|
||||||
env->cpu_breakpoint[n] = NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!extract64(bcr, 0, 1)) {
|
|
||||||
/* E bit clear : watchpoint disabled */
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
bt = extract64(bcr, 20, 4);
|
|
||||||
|
|
||||||
switch (bt) {
|
|
||||||
case 4: /* unlinked address mismatch (reserved if AArch64) */
|
|
||||||
case 5: /* linked address mismatch (reserved if AArch64) */
|
|
||||||
qemu_log_mask(LOG_UNIMP,
|
|
||||||
"arm: address mismatch breakpoint types not implemented\n");
|
|
||||||
return;
|
|
||||||
case 0: /* unlinked address match */
|
|
||||||
case 1: /* linked address match */
|
|
||||||
{
|
|
||||||
/*
|
|
||||||
* Bits [1:0] are RES0.
|
|
||||||
*
|
|
||||||
* It is IMPLEMENTATION DEFINED whether bits [63:49]
|
|
||||||
* ([63:53] for FEAT_LVA) are hardwired to a copy of the sign bit
|
|
||||||
* of the VA field ([48] or [52] for FEAT_LVA), or whether the
|
|
||||||
* value is read as written. It is CONSTRAINED UNPREDICTABLE
|
|
||||||
* whether the RESS bits are ignored when comparing an address.
|
|
||||||
* Therefore we are allowed to compare the entire register, which
|
|
||||||
* lets us avoid considering whether FEAT_LVA is actually enabled.
|
|
||||||
*
|
|
||||||
* The BAS field is used to allow setting breakpoints on 16-bit
|
|
||||||
* wide instructions; it is CONSTRAINED UNPREDICTABLE whether
|
|
||||||
* a bp will fire if the addresses covered by the bp and the addresses
|
|
||||||
* covered by the insn overlap but the insn doesn't start at the
|
|
||||||
* start of the bp address range. We choose to require the insn and
|
|
||||||
* the bp to have the same address. The constraints on writing to
|
|
||||||
* BAS enforced in dbgbcr_write mean we have only four cases:
|
|
||||||
* 0b0000 => no breakpoint
|
|
||||||
* 0b0011 => breakpoint on addr
|
|
||||||
* 0b1100 => breakpoint on addr + 2
|
|
||||||
* 0b1111 => breakpoint on addr
|
|
||||||
* See also figure D2-3 in the v8 ARM ARM (DDI0487A.c).
|
|
||||||
*/
|
|
||||||
int bas = extract64(bcr, 5, 4);
|
|
||||||
addr = bvr & ~3ULL;
|
|
||||||
if (bas == 0) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
if (bas == 0xc) {
|
|
||||||
addr += 2;
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case 2: /* unlinked context ID match */
|
|
||||||
case 8: /* unlinked VMID match (reserved if no EL2) */
|
|
||||||
case 10: /* unlinked context ID and VMID match (reserved if no EL2) */
|
|
||||||
qemu_log_mask(LOG_UNIMP,
|
|
||||||
"arm: unlinked context breakpoint types not implemented\n");
|
|
||||||
return;
|
|
||||||
case 9: /* linked VMID match (reserved if no EL2) */
|
|
||||||
case 11: /* linked context ID and VMID match (reserved if no EL2) */
|
|
||||||
case 3: /* linked context ID match */
|
|
||||||
default:
|
|
||||||
/*
|
|
||||||
* We must generate no events for Linked context matches (unless
|
|
||||||
* they are linked to by some other bp/wp, which is handled in
|
|
||||||
* updates for the linking bp/wp). We choose to also generate no events
|
|
||||||
* for reserved values.
|
|
||||||
*/
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
cpu_breakpoint_insert(CPU(cpu), addr, flags, &env->cpu_breakpoint[n]);
|
|
||||||
}
|
|
||||||
|
|
||||||
void hw_breakpoint_update_all(ARMCPU *cpu)
|
|
||||||
{
|
|
||||||
int i;
|
|
||||||
CPUARMState *env = &cpu->env;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Completely clear out existing QEMU breakpoints and our array, to
|
|
||||||
* avoid possible stale entries following migration load.
|
|
||||||
*/
|
|
||||||
cpu_breakpoint_remove_all(CPU(cpu), BP_CPU);
|
|
||||||
memset(env->cpu_breakpoint, 0, sizeof(env->cpu_breakpoint));
|
|
||||||
|
|
||||||
for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_breakpoint); i++) {
|
|
||||||
hw_breakpoint_update(cpu, i);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1062,7 +1095,9 @@ static void dbgbvr_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|||||||
int i = ri->crm;
|
int i = ri->crm;
|
||||||
|
|
||||||
raw_write(env, ri, value);
|
raw_write(env, ri, value);
|
||||||
hw_breakpoint_update(cpu, i);
|
if (tcg_enabled()) {
|
||||||
|
hw_breakpoint_update(cpu, i);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void dbgbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
static void dbgbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||||
@ -1079,7 +1114,9 @@ static void dbgbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|||||||
value = deposit64(value, 8, 1, extract64(value, 7, 1));
|
value = deposit64(value, 8, 1, extract64(value, 7, 1));
|
||||||
|
|
||||||
raw_write(env, ri, value);
|
raw_write(env, ri, value);
|
||||||
hw_breakpoint_update(cpu, i);
|
if (tcg_enabled()) {
|
||||||
|
hw_breakpoint_update(cpu, i);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void define_debug_regs(ARMCPU *cpu)
|
void define_debug_regs(ARMCPU *cpu)
|
||||||
@ -1202,30 +1239,3 @@ void define_debug_regs(ARMCPU *cpu)
|
|||||||
g_free(dbgwcr_el1_name);
|
g_free(dbgwcr_el1_name);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#if !defined(CONFIG_USER_ONLY)
|
|
||||||
|
|
||||||
vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len)
|
|
||||||
{
|
|
||||||
ARMCPU *cpu = ARM_CPU(cs);
|
|
||||||
CPUARMState *env = &cpu->env;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* In BE32 system mode, target memory is stored byteswapped (on a
|
|
||||||
* little-endian host system), and by the time we reach here (via an
|
|
||||||
* opcode helper) the addresses of subword accesses have been adjusted
|
|
||||||
* to account for that, which means that watchpoints will not match.
|
|
||||||
* Undo the adjustment here.
|
|
||||||
*/
|
|
||||||
if (arm_sctlr_b(env)) {
|
|
||||||
if (len == 1) {
|
|
||||||
addr ^= 3;
|
|
||||||
} else if (len == 2) {
|
|
||||||
addr ^= 2;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return addr;
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif
|
|
||||||
|
@ -5173,7 +5173,7 @@ static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|||||||
/* This may enable/disable the MMU, so do a TLB flush. */
|
/* This may enable/disable the MMU, so do a TLB flush. */
|
||||||
tlb_flush(CPU(cpu));
|
tlb_flush(CPU(cpu));
|
||||||
|
|
||||||
if (ri->type & ARM_CP_SUPPRESS_TB_END) {
|
if (tcg_enabled() && ri->type & ARM_CP_SUPPRESS_TB_END) {
|
||||||
/*
|
/*
|
||||||
* Normally we would always end the TB on an SCTLR write; see the
|
* Normally we would always end the TB on an SCTLR write; see the
|
||||||
* comment in ARMCPRegInfo sctlr initialization below for why Xscale
|
* comment in ARMCPRegInfo sctlr initialization below for why Xscale
|
||||||
@ -6669,32 +6669,6 @@ int sme_exception_el(CPUARMState *env, int el)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* This corresponds to the ARM pseudocode function IsFullA64Enabled(). */
|
|
||||||
static bool sme_fa64(CPUARMState *env, int el)
|
|
||||||
{
|
|
||||||
if (!cpu_isar_feature(aa64_sme_fa64, env_archcpu(env))) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (el <= 1 && !el_is_in_host(env, el)) {
|
|
||||||
if (!FIELD_EX64(env->vfp.smcr_el[1], SMCR, FA64)) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (el <= 2 && arm_is_el2_enabled(env)) {
|
|
||||||
if (!FIELD_EX64(env->vfp.smcr_el[2], SMCR, FA64)) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (arm_feature(env, ARM_FEATURE_EL3)) {
|
|
||||||
if (!FIELD_EX64(env->vfp.smcr_el[3], SMCR, FA64)) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Given that SVE is enabled, return the vector length for EL.
|
* Given that SVE is enabled, return the vector length for EL.
|
||||||
*/
|
*/
|
||||||
@ -6841,7 +6815,9 @@ void aarch64_set_svcr(CPUARMState *env, uint64_t new, uint64_t mask)
|
|||||||
memset(env->zarray, 0, sizeof(env->zarray));
|
memset(env->zarray, 0, sizeof(env->zarray));
|
||||||
}
|
}
|
||||||
|
|
||||||
arm_rebuild_hflags(env);
|
if (tcg_enabled()) {
|
||||||
|
arm_rebuild_hflags(env);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void svcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
static void svcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||||
@ -9886,7 +9862,7 @@ void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask,
|
|||||||
}
|
}
|
||||||
mask &= ~CACHED_CPSR_BITS;
|
mask &= ~CACHED_CPSR_BITS;
|
||||||
env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask);
|
env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask);
|
||||||
if (rebuild_hflags) {
|
if (tcg_enabled() && rebuild_hflags) {
|
||||||
arm_rebuild_hflags(env);
|
arm_rebuild_hflags(env);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -10445,7 +10421,10 @@ static void take_aarch32_exception(CPUARMState *env, int new_mode,
|
|||||||
env->regs[14] = env->regs[15] + offset;
|
env->regs[14] = env->regs[15] + offset;
|
||||||
}
|
}
|
||||||
env->regs[15] = newpc;
|
env->regs[15] = newpc;
|
||||||
arm_rebuild_hflags(env);
|
|
||||||
|
if (tcg_enabled()) {
|
||||||
|
arm_rebuild_hflags(env);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void arm_cpu_do_interrupt_aarch32_hyp(CPUState *cs)
|
static void arm_cpu_do_interrupt_aarch32_hyp(CPUState *cs)
|
||||||
@ -11001,7 +10980,10 @@ static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
|
|||||||
pstate_write(env, PSTATE_DAIF | new_mode);
|
pstate_write(env, PSTATE_DAIF | new_mode);
|
||||||
env->aarch64 = true;
|
env->aarch64 = true;
|
||||||
aarch64_restore_sp(env, new_el);
|
aarch64_restore_sp(env, new_el);
|
||||||
helper_rebuild_hflags_a64(env, new_el);
|
|
||||||
|
if (tcg_enabled()) {
|
||||||
|
helper_rebuild_hflags_a64(env, new_el);
|
||||||
|
}
|
||||||
|
|
||||||
env->pc = addr;
|
env->pc = addr;
|
||||||
|
|
||||||
@ -11142,7 +11124,7 @@ int aa64_va_parameter_tbid(uint64_t tcr, ARMMMUIdx mmu_idx)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int aa64_va_parameter_tcma(uint64_t tcr, ARMMMUIdx mmu_idx)
|
int aa64_va_parameter_tcma(uint64_t tcr, ARMMMUIdx mmu_idx)
|
||||||
{
|
{
|
||||||
if (regime_has_2_ranges(mmu_idx)) {
|
if (regime_has_2_ranges(mmu_idx)) {
|
||||||
return extract64(tcr, 57, 2);
|
return extract64(tcr, 57, 2);
|
||||||
@ -11853,371 +11835,6 @@ ARMMMUIdx arm_mmu_idx(CPUARMState *env)
|
|||||||
return arm_mmu_idx_el(env, arm_current_el(env));
|
return arm_mmu_idx_el(env, arm_current_el(env));
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool fgt_svc(CPUARMState *env, int el)
|
|
||||||
{
|
|
||||||
/*
|
|
||||||
* Assuming fine-grained-traps are active, return true if we
|
|
||||||
* should be trapping on SVC instructions. Only AArch64 can
|
|
||||||
* trap on an SVC at EL1, but we don't need to special-case this
|
|
||||||
* because if this is AArch32 EL1 then arm_fgt_active() is false.
|
|
||||||
* We also know el is 0 or 1.
|
|
||||||
*/
|
|
||||||
return el == 0 ?
|
|
||||||
FIELD_EX64(env->cp15.fgt_exec[FGTREG_HFGITR], HFGITR_EL2, SVC_EL0) :
|
|
||||||
FIELD_EX64(env->cp15.fgt_exec[FGTREG_HFGITR], HFGITR_EL2, SVC_EL1);
|
|
||||||
}
|
|
||||||
|
|
||||||
static CPUARMTBFlags rebuild_hflags_common(CPUARMState *env, int fp_el,
|
|
||||||
ARMMMUIdx mmu_idx,
|
|
||||||
CPUARMTBFlags flags)
|
|
||||||
{
|
|
||||||
DP_TBFLAG_ANY(flags, FPEXC_EL, fp_el);
|
|
||||||
DP_TBFLAG_ANY(flags, MMUIDX, arm_to_core_mmu_idx(mmu_idx));
|
|
||||||
|
|
||||||
if (arm_singlestep_active(env)) {
|
|
||||||
DP_TBFLAG_ANY(flags, SS_ACTIVE, 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
return flags;
|
|
||||||
}
|
|
||||||
|
|
||||||
static CPUARMTBFlags rebuild_hflags_common_32(CPUARMState *env, int fp_el,
|
|
||||||
ARMMMUIdx mmu_idx,
|
|
||||||
CPUARMTBFlags flags)
|
|
||||||
{
|
|
||||||
bool sctlr_b = arm_sctlr_b(env);
|
|
||||||
|
|
||||||
if (sctlr_b) {
|
|
||||||
DP_TBFLAG_A32(flags, SCTLR__B, 1);
|
|
||||||
}
|
|
||||||
if (arm_cpu_data_is_big_endian_a32(env, sctlr_b)) {
|
|
||||||
DP_TBFLAG_ANY(flags, BE_DATA, 1);
|
|
||||||
}
|
|
||||||
DP_TBFLAG_A32(flags, NS, !access_secure_reg(env));
|
|
||||||
|
|
||||||
return rebuild_hflags_common(env, fp_el, mmu_idx, flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
static CPUARMTBFlags rebuild_hflags_m32(CPUARMState *env, int fp_el,
|
|
||||||
ARMMMUIdx mmu_idx)
|
|
||||||
{
|
|
||||||
CPUARMTBFlags flags = {};
|
|
||||||
uint32_t ccr = env->v7m.ccr[env->v7m.secure];
|
|
||||||
|
|
||||||
/* Without HaveMainExt, CCR.UNALIGN_TRP is RES1. */
|
|
||||||
if (ccr & R_V7M_CCR_UNALIGN_TRP_MASK) {
|
|
||||||
DP_TBFLAG_ANY(flags, ALIGN_MEM, 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (arm_v7m_is_handler_mode(env)) {
|
|
||||||
DP_TBFLAG_M32(flags, HANDLER, 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* v8M always applies stack limit checks unless CCR.STKOFHFNMIGN
|
|
||||||
* is suppressing them because the requested execution priority
|
|
||||||
* is less than 0.
|
|
||||||
*/
|
|
||||||
if (arm_feature(env, ARM_FEATURE_V8) &&
|
|
||||||
!((mmu_idx & ARM_MMU_IDX_M_NEGPRI) &&
|
|
||||||
(ccr & R_V7M_CCR_STKOFHFNMIGN_MASK))) {
|
|
||||||
DP_TBFLAG_M32(flags, STACKCHECK, 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (arm_feature(env, ARM_FEATURE_M_SECURITY) && env->v7m.secure) {
|
|
||||||
DP_TBFLAG_M32(flags, SECURE, 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
static CPUARMTBFlags rebuild_hflags_a32(CPUARMState *env, int fp_el,
|
|
||||||
ARMMMUIdx mmu_idx)
|
|
||||||
{
|
|
||||||
CPUARMTBFlags flags = {};
|
|
||||||
int el = arm_current_el(env);
|
|
||||||
|
|
||||||
if (arm_sctlr(env, el) & SCTLR_A) {
|
|
||||||
DP_TBFLAG_ANY(flags, ALIGN_MEM, 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (arm_el_is_aa64(env, 1)) {
|
|
||||||
DP_TBFLAG_A32(flags, VFPEN, 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (el < 2 && env->cp15.hstr_el2 && arm_is_el2_enabled(env) &&
|
|
||||||
(arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
|
|
||||||
DP_TBFLAG_A32(flags, HSTR_ACTIVE, 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (arm_fgt_active(env, el)) {
|
|
||||||
DP_TBFLAG_ANY(flags, FGT_ACTIVE, 1);
|
|
||||||
if (fgt_svc(env, el)) {
|
|
||||||
DP_TBFLAG_ANY(flags, FGT_SVC, 1);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (env->uncached_cpsr & CPSR_IL) {
|
|
||||||
DP_TBFLAG_ANY(flags, PSTATE__IL, 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The SME exception we are testing for is raised via
|
|
||||||
* AArch64.CheckFPAdvSIMDEnabled(), as called from
|
|
||||||
* AArch32.CheckAdvSIMDOrFPEnabled().
|
|
||||||
*/
|
|
||||||
if (el == 0
|
|
||||||
&& FIELD_EX64(env->svcr, SVCR, SM)
|
|
||||||
&& (!arm_is_el2_enabled(env)
|
|
||||||
|| (arm_el_is_aa64(env, 2) && !(env->cp15.hcr_el2 & HCR_TGE)))
|
|
||||||
&& arm_el_is_aa64(env, 1)
|
|
||||||
&& !sme_fa64(env, el)) {
|
|
||||||
DP_TBFLAG_A32(flags, SME_TRAP_NONSTREAMING, 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
|
|
||||||
ARMMMUIdx mmu_idx)
|
|
||||||
{
|
|
||||||
CPUARMTBFlags flags = {};
|
|
||||||
ARMMMUIdx stage1 = stage_1_mmu_idx(mmu_idx);
|
|
||||||
uint64_t tcr = regime_tcr(env, mmu_idx);
|
|
||||||
uint64_t sctlr;
|
|
||||||
int tbii, tbid;
|
|
||||||
|
|
||||||
DP_TBFLAG_ANY(flags, AARCH64_STATE, 1);
|
|
||||||
|
|
||||||
/* Get control bits for tagged addresses. */
|
|
||||||
tbid = aa64_va_parameter_tbi(tcr, mmu_idx);
|
|
||||||
tbii = tbid & ~aa64_va_parameter_tbid(tcr, mmu_idx);
|
|
||||||
|
|
||||||
DP_TBFLAG_A64(flags, TBII, tbii);
|
|
||||||
DP_TBFLAG_A64(flags, TBID, tbid);
|
|
||||||
|
|
||||||
if (cpu_isar_feature(aa64_sve, env_archcpu(env))) {
|
|
||||||
int sve_el = sve_exception_el(env, el);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* If either FP or SVE are disabled, translator does not need len.
|
|
||||||
* If SVE EL > FP EL, FP exception has precedence, and translator
|
|
||||||
* does not need SVE EL. Save potential re-translations by forcing
|
|
||||||
* the unneeded data to zero.
|
|
||||||
*/
|
|
||||||
if (fp_el != 0) {
|
|
||||||
if (sve_el > fp_el) {
|
|
||||||
sve_el = 0;
|
|
||||||
}
|
|
||||||
} else if (sve_el == 0) {
|
|
||||||
DP_TBFLAG_A64(flags, VL, sve_vqm1_for_el(env, el));
|
|
||||||
}
|
|
||||||
DP_TBFLAG_A64(flags, SVEEXC_EL, sve_el);
|
|
||||||
}
|
|
||||||
if (cpu_isar_feature(aa64_sme, env_archcpu(env))) {
|
|
||||||
int sme_el = sme_exception_el(env, el);
|
|
||||||
bool sm = FIELD_EX64(env->svcr, SVCR, SM);
|
|
||||||
|
|
||||||
DP_TBFLAG_A64(flags, SMEEXC_EL, sme_el);
|
|
||||||
if (sme_el == 0) {
|
|
||||||
/* Similarly, do not compute SVL if SME is disabled. */
|
|
||||||
int svl = sve_vqm1_for_el_sm(env, el, true);
|
|
||||||
DP_TBFLAG_A64(flags, SVL, svl);
|
|
||||||
if (sm) {
|
|
||||||
/* If SVE is disabled, we will not have set VL above. */
|
|
||||||
DP_TBFLAG_A64(flags, VL, svl);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (sm) {
|
|
||||||
DP_TBFLAG_A64(flags, PSTATE_SM, 1);
|
|
||||||
DP_TBFLAG_A64(flags, SME_TRAP_NONSTREAMING, !sme_fa64(env, el));
|
|
||||||
}
|
|
||||||
DP_TBFLAG_A64(flags, PSTATE_ZA, FIELD_EX64(env->svcr, SVCR, ZA));
|
|
||||||
}
|
|
||||||
|
|
||||||
sctlr = regime_sctlr(env, stage1);
|
|
||||||
|
|
||||||
if (sctlr & SCTLR_A) {
|
|
||||||
DP_TBFLAG_ANY(flags, ALIGN_MEM, 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (arm_cpu_data_is_big_endian_a64(el, sctlr)) {
|
|
||||||
DP_TBFLAG_ANY(flags, BE_DATA, 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (cpu_isar_feature(aa64_pauth, env_archcpu(env))) {
|
|
||||||
/*
|
|
||||||
* In order to save space in flags, we record only whether
|
|
||||||
* pauth is "inactive", meaning all insns are implemented as
|
|
||||||
* a nop, or "active" when some action must be performed.
|
|
||||||
* The decision of which action to take is left to a helper.
|
|
||||||
*/
|
|
||||||
if (sctlr & (SCTLR_EnIA | SCTLR_EnIB | SCTLR_EnDA | SCTLR_EnDB)) {
|
|
||||||
DP_TBFLAG_A64(flags, PAUTH_ACTIVE, 1);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (cpu_isar_feature(aa64_bti, env_archcpu(env))) {
|
|
||||||
/* Note that SCTLR_EL[23].BT == SCTLR_BT1. */
|
|
||||||
if (sctlr & (el == 0 ? SCTLR_BT0 : SCTLR_BT1)) {
|
|
||||||
DP_TBFLAG_A64(flags, BT, 1);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Compute the condition for using AccType_UNPRIV for LDTR et al. */
|
|
||||||
if (!(env->pstate & PSTATE_UAO)) {
|
|
||||||
switch (mmu_idx) {
|
|
||||||
case ARMMMUIdx_E10_1:
|
|
||||||
case ARMMMUIdx_E10_1_PAN:
|
|
||||||
/* TODO: ARMv8.3-NV */
|
|
||||||
DP_TBFLAG_A64(flags, UNPRIV, 1);
|
|
||||||
break;
|
|
||||||
case ARMMMUIdx_E20_2:
|
|
||||||
case ARMMMUIdx_E20_2_PAN:
|
|
||||||
/*
|
|
||||||
* Note that EL20_2 is gated by HCR_EL2.E2H == 1, but EL20_0 is
|
|
||||||
* gated by HCR_EL2.<E2H,TGE> == '11', and so is LDTR.
|
|
||||||
*/
|
|
||||||
if (env->cp15.hcr_el2 & HCR_TGE) {
|
|
||||||
DP_TBFLAG_A64(flags, UNPRIV, 1);
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (env->pstate & PSTATE_IL) {
|
|
||||||
DP_TBFLAG_ANY(flags, PSTATE__IL, 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (arm_fgt_active(env, el)) {
|
|
||||||
DP_TBFLAG_ANY(flags, FGT_ACTIVE, 1);
|
|
||||||
if (FIELD_EX64(env->cp15.fgt_exec[FGTREG_HFGITR], HFGITR_EL2, ERET)) {
|
|
||||||
DP_TBFLAG_A64(flags, FGT_ERET, 1);
|
|
||||||
}
|
|
||||||
if (fgt_svc(env, el)) {
|
|
||||||
DP_TBFLAG_ANY(flags, FGT_SVC, 1);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (cpu_isar_feature(aa64_mte, env_archcpu(env))) {
|
|
||||||
/*
|
|
||||||
* Set MTE_ACTIVE if any access may be Checked, and leave clear
|
|
||||||
* if all accesses must be Unchecked:
|
|
||||||
* 1) If no TBI, then there are no tags in the address to check,
|
|
||||||
* 2) If Tag Check Override, then all accesses are Unchecked,
|
|
||||||
* 3) If Tag Check Fail == 0, then Checked access have no effect,
|
|
||||||
* 4) If no Allocation Tag Access, then all accesses are Unchecked.
|
|
||||||
*/
|
|
||||||
if (allocation_tag_access_enabled(env, el, sctlr)) {
|
|
||||||
DP_TBFLAG_A64(flags, ATA, 1);
|
|
||||||
if (tbid
|
|
||||||
&& !(env->pstate & PSTATE_TCO)
|
|
||||||
&& (sctlr & (el == 0 ? SCTLR_TCF0 : SCTLR_TCF))) {
|
|
||||||
DP_TBFLAG_A64(flags, MTE_ACTIVE, 1);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
/* And again for unprivileged accesses, if required. */
|
|
||||||
if (EX_TBFLAG_A64(flags, UNPRIV)
|
|
||||||
&& tbid
|
|
||||||
&& !(env->pstate & PSTATE_TCO)
|
|
||||||
&& (sctlr & SCTLR_TCF0)
|
|
||||||
&& allocation_tag_access_enabled(env, 0, sctlr)) {
|
|
||||||
DP_TBFLAG_A64(flags, MTE0_ACTIVE, 1);
|
|
||||||
}
|
|
||||||
/* Cache TCMA as well as TBI. */
|
|
||||||
DP_TBFLAG_A64(flags, TCMA, aa64_va_parameter_tcma(tcr, mmu_idx));
|
|
||||||
}
|
|
||||||
|
|
||||||
return rebuild_hflags_common(env, fp_el, mmu_idx, flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
static CPUARMTBFlags rebuild_hflags_internal(CPUARMState *env)
|
|
||||||
{
|
|
||||||
int el = arm_current_el(env);
|
|
||||||
int fp_el = fp_exception_el(env, el);
|
|
||||||
ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
|
|
||||||
|
|
||||||
if (is_a64(env)) {
|
|
||||||
return rebuild_hflags_a64(env, el, fp_el, mmu_idx);
|
|
||||||
} else if (arm_feature(env, ARM_FEATURE_M)) {
|
|
||||||
return rebuild_hflags_m32(env, fp_el, mmu_idx);
|
|
||||||
} else {
|
|
||||||
return rebuild_hflags_a32(env, fp_el, mmu_idx);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void arm_rebuild_hflags(CPUARMState *env)
|
|
||||||
{
|
|
||||||
env->hflags = rebuild_hflags_internal(env);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* If we have triggered a EL state change we can't rely on the
|
|
||||||
* translator having passed it to us, we need to recompute.
|
|
||||||
*/
|
|
||||||
void HELPER(rebuild_hflags_m32_newel)(CPUARMState *env)
|
|
||||||
{
|
|
||||||
int el = arm_current_el(env);
|
|
||||||
int fp_el = fp_exception_el(env, el);
|
|
||||||
ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
|
|
||||||
|
|
||||||
env->hflags = rebuild_hflags_m32(env, fp_el, mmu_idx);
|
|
||||||
}
|
|
||||||
|
|
||||||
void HELPER(rebuild_hflags_m32)(CPUARMState *env, int el)
|
|
||||||
{
|
|
||||||
int fp_el = fp_exception_el(env, el);
|
|
||||||
ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
|
|
||||||
|
|
||||||
env->hflags = rebuild_hflags_m32(env, fp_el, mmu_idx);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* If we have triggered a EL state change we can't rely on the
|
|
||||||
* translator having passed it to us, we need to recompute.
|
|
||||||
*/
|
|
||||||
void HELPER(rebuild_hflags_a32_newel)(CPUARMState *env)
|
|
||||||
{
|
|
||||||
int el = arm_current_el(env);
|
|
||||||
int fp_el = fp_exception_el(env, el);
|
|
||||||
ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
|
|
||||||
env->hflags = rebuild_hflags_a32(env, fp_el, mmu_idx);
|
|
||||||
}
|
|
||||||
|
|
||||||
void HELPER(rebuild_hflags_a32)(CPUARMState *env, int el)
|
|
||||||
{
|
|
||||||
int fp_el = fp_exception_el(env, el);
|
|
||||||
ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
|
|
||||||
|
|
||||||
env->hflags = rebuild_hflags_a32(env, fp_el, mmu_idx);
|
|
||||||
}
|
|
||||||
|
|
||||||
void HELPER(rebuild_hflags_a64)(CPUARMState *env, int el)
|
|
||||||
{
|
|
||||||
int fp_el = fp_exception_el(env, el);
|
|
||||||
ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
|
|
||||||
|
|
||||||
env->hflags = rebuild_hflags_a64(env, el, fp_el, mmu_idx);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void assert_hflags_rebuild_correctly(CPUARMState *env)
|
|
||||||
{
|
|
||||||
#ifdef CONFIG_DEBUG_TCG
|
|
||||||
CPUARMTBFlags c = env->hflags;
|
|
||||||
CPUARMTBFlags r = rebuild_hflags_internal(env);
|
|
||||||
|
|
||||||
if (unlikely(c.flags != r.flags || c.flags2 != r.flags2)) {
|
|
||||||
fprintf(stderr, "TCG hflags mismatch "
|
|
||||||
"(current:(0x%08x,0x" TARGET_FMT_lx ")"
|
|
||||||
" rebuilt:(0x%08x,0x" TARGET_FMT_lx ")\n",
|
|
||||||
c.flags, c.flags2, r.flags, r.flags2);
|
|
||||||
abort();
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool mve_no_pred(CPUARMState *env)
|
static bool mve_no_pred(CPUARMState *env)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
|
@ -600,9 +600,6 @@ int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx);
|
|||||||
/* Return the MMU index for a v7M CPU in the specified security state */
|
/* Return the MMU index for a v7M CPU in the specified security state */
|
||||||
ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate);
|
ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate);
|
||||||
|
|
||||||
/* Return true if the translation regime is using LPAE format page tables */
|
|
||||||
bool regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Return true if the stage 1 translation regime is using LPAE
|
* Return true if the stage 1 translation regime is using LPAE
|
||||||
* format page tables
|
* format page tables
|
||||||
@ -767,6 +764,24 @@ static inline uint64_t regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx)
|
|||||||
return env->cp15.tcr_el[regime_el(env, mmu_idx)];
|
return env->cp15.tcr_el[regime_el(env, mmu_idx)];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Return true if the translation regime is using LPAE format page tables */
|
||||||
|
static inline bool regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx)
|
||||||
|
{
|
||||||
|
int el = regime_el(env, mmu_idx);
|
||||||
|
if (el == 2 || arm_el_is_aa64(env, el)) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
if (arm_feature(env, ARM_FEATURE_PMSA) &&
|
||||||
|
arm_feature(env, ARM_FEATURE_V8)) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
if (arm_feature(env, ARM_FEATURE_LPAE)
|
||||||
|
&& (regime_tcr(env, mmu_idx) & TTBCR_EAE)) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* arm_num_brps: Return number of implemented breakpoints.
|
* arm_num_brps: Return number of implemented breakpoints.
|
||||||
* Note that the ID register BRPS field is "number of bps - 1",
|
* Note that the ID register BRPS field is "number of bps - 1",
|
||||||
@ -1073,6 +1088,7 @@ ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
|
|||||||
|
|
||||||
int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx);
|
int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx);
|
||||||
int aa64_va_parameter_tbid(uint64_t tcr, ARMMMUIdx mmu_idx);
|
int aa64_va_parameter_tbid(uint64_t tcr, ARMMMUIdx mmu_idx);
|
||||||
|
int aa64_va_parameter_tcma(uint64_t tcr, ARMMMUIdx mmu_idx);
|
||||||
|
|
||||||
/* Determine if allocation tags are available. */
|
/* Determine if allocation tags are available. */
|
||||||
static inline bool allocation_tag_access_enabled(CPUARMState *env, int el,
|
static inline bool allocation_tag_access_enabled(CPUARMState *env, int el,
|
||||||
@ -1383,4 +1399,5 @@ static inline bool arm_fgt_active(CPUARMState *env, int el)
|
|||||||
(!arm_feature(env, ARM_FEATURE_EL3) || (env->cp15.scr_el3 & SCR_FGTEN));
|
(!arm_feature(env, ARM_FEATURE_EL3) || (env->cp15.scr_el3 & SCR_FGTEN));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void assert_hflags_rebuild_correctly(CPUARMState *env);
|
||||||
#endif
|
#endif
|
||||||
|
@ -2,6 +2,7 @@
|
|||||||
#include "cpu.h"
|
#include "cpu.h"
|
||||||
#include "qemu/error-report.h"
|
#include "qemu/error-report.h"
|
||||||
#include "sysemu/kvm.h"
|
#include "sysemu/kvm.h"
|
||||||
|
#include "sysemu/tcg.h"
|
||||||
#include "kvm_arm.h"
|
#include "kvm_arm.h"
|
||||||
#include "internals.h"
|
#include "internals.h"
|
||||||
#include "migration/cpu.h"
|
#include "migration/cpu.h"
|
||||||
@ -848,8 +849,10 @@ static int cpu_post_load(void *opaque, int version_id)
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
hw_breakpoint_update_all(cpu);
|
if (tcg_enabled()) {
|
||||||
hw_watchpoint_update_all(cpu);
|
hw_breakpoint_update_all(cpu);
|
||||||
|
hw_watchpoint_update_all(cpu);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* TCG gen_update_fp_context() relies on the invariant that
|
* TCG gen_update_fp_context() relies on the invariant that
|
||||||
@ -868,7 +871,10 @@ static int cpu_post_load(void *opaque, int version_id)
|
|||||||
if (!kvm_enabled()) {
|
if (!kvm_enabled()) {
|
||||||
pmu_op_finish(&cpu->env);
|
pmu_op_finish(&cpu->env);
|
||||||
}
|
}
|
||||||
arm_rebuild_hflags(&cpu->env);
|
|
||||||
|
if (tcg_enabled()) {
|
||||||
|
arm_rebuild_hflags(&cpu->env);
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -1,40 +1,9 @@
|
|||||||
gen = [
|
|
||||||
decodetree.process('sve.decode', extra_args: '--decode=disas_sve'),
|
|
||||||
decodetree.process('sme.decode', extra_args: '--decode=disas_sme'),
|
|
||||||
decodetree.process('sme-fa64.decode', extra_args: '--static-decode=disas_sme_fa64'),
|
|
||||||
decodetree.process('neon-shared.decode', extra_args: '--decode=disas_neon_shared'),
|
|
||||||
decodetree.process('neon-dp.decode', extra_args: '--decode=disas_neon_dp'),
|
|
||||||
decodetree.process('neon-ls.decode', extra_args: '--decode=disas_neon_ls'),
|
|
||||||
decodetree.process('vfp.decode', extra_args: '--decode=disas_vfp'),
|
|
||||||
decodetree.process('vfp-uncond.decode', extra_args: '--decode=disas_vfp_uncond'),
|
|
||||||
decodetree.process('m-nocp.decode', extra_args: '--decode=disas_m_nocp'),
|
|
||||||
decodetree.process('mve.decode', extra_args: '--decode=disas_mve'),
|
|
||||||
decodetree.process('a32.decode', extra_args: '--static-decode=disas_a32'),
|
|
||||||
decodetree.process('a32-uncond.decode', extra_args: '--static-decode=disas_a32_uncond'),
|
|
||||||
decodetree.process('t32.decode', extra_args: '--static-decode=disas_t32'),
|
|
||||||
decodetree.process('t16.decode', extra_args: ['-w', '16', '--static-decode=disas_t16']),
|
|
||||||
]
|
|
||||||
|
|
||||||
arm_ss = ss.source_set()
|
arm_ss = ss.source_set()
|
||||||
arm_ss.add(gen)
|
|
||||||
arm_ss.add(files(
|
arm_ss.add(files(
|
||||||
'cpu.c',
|
'cpu.c',
|
||||||
'crypto_helper.c',
|
|
||||||
'debug_helper.c',
|
'debug_helper.c',
|
||||||
'gdbstub.c',
|
'gdbstub.c',
|
||||||
'helper.c',
|
'helper.c',
|
||||||
'iwmmxt_helper.c',
|
|
||||||
'm_helper.c',
|
|
||||||
'mve_helper.c',
|
|
||||||
'neon_helper.c',
|
|
||||||
'op_helper.c',
|
|
||||||
'tlb_helper.c',
|
|
||||||
'translate.c',
|
|
||||||
'translate-m-nocp.c',
|
|
||||||
'translate-mve.c',
|
|
||||||
'translate-neon.c',
|
|
||||||
'translate-vfp.c',
|
|
||||||
'vec_helper.c',
|
|
||||||
'vfp_helper.c',
|
'vfp_helper.c',
|
||||||
'cpu_tcg.c',
|
'cpu_tcg.c',
|
||||||
))
|
))
|
||||||
@ -45,14 +14,6 @@ arm_ss.add(when: 'CONFIG_KVM', if_true: files('kvm.c', 'kvm64.c'), if_false: fil
|
|||||||
arm_ss.add(when: 'TARGET_AARCH64', if_true: files(
|
arm_ss.add(when: 'TARGET_AARCH64', if_true: files(
|
||||||
'cpu64.c',
|
'cpu64.c',
|
||||||
'gdbstub64.c',
|
'gdbstub64.c',
|
||||||
'helper-a64.c',
|
|
||||||
'mte_helper.c',
|
|
||||||
'pauth_helper.c',
|
|
||||||
'sve_helper.c',
|
|
||||||
'sme_helper.c',
|
|
||||||
'translate-a64.c',
|
|
||||||
'translate-sve.c',
|
|
||||||
'translate-sme.c',
|
|
||||||
))
|
))
|
||||||
|
|
||||||
arm_softmmu_ss = ss.source_set()
|
arm_softmmu_ss = ss.source_set()
|
||||||
@ -61,11 +22,16 @@ arm_softmmu_ss.add(files(
|
|||||||
'arm-powerctl.c',
|
'arm-powerctl.c',
|
||||||
'machine.c',
|
'machine.c',
|
||||||
'monitor.c',
|
'monitor.c',
|
||||||
'psci.c',
|
|
||||||
'ptw.c',
|
'ptw.c',
|
||||||
))
|
))
|
||||||
|
|
||||||
subdir('hvf')
|
subdir('hvf')
|
||||||
|
|
||||||
|
if 'CONFIG_TCG' in config_all
|
||||||
|
subdir('tcg')
|
||||||
|
else
|
||||||
|
arm_ss.add(files('tcg-stubs.c'))
|
||||||
|
endif
|
||||||
|
|
||||||
target_arch += {'arm': arm_ss}
|
target_arch += {'arm': arm_ss}
|
||||||
target_softmmu_arch += {'arm': arm_softmmu_ss}
|
target_softmmu_arch += {'arm': arm_softmmu_ss}
|
||||||
|
@ -254,6 +254,7 @@ static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw,
|
|||||||
ptw->out_host = NULL;
|
ptw->out_host = NULL;
|
||||||
ptw->out_rw = false;
|
ptw->out_rw = false;
|
||||||
} else {
|
} else {
|
||||||
|
#ifdef CONFIG_TCG
|
||||||
CPUTLBEntryFull *full;
|
CPUTLBEntryFull *full;
|
||||||
int flags;
|
int flags;
|
||||||
|
|
||||||
@ -270,6 +271,9 @@ static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw,
|
|||||||
ptw->out_rw = full->prot & PAGE_WRITE;
|
ptw->out_rw = full->prot & PAGE_WRITE;
|
||||||
pte_attrs = full->pte_attrs;
|
pte_attrs = full->pte_attrs;
|
||||||
pte_secure = full->attrs.secure;
|
pte_secure = full->attrs.secure;
|
||||||
|
#else
|
||||||
|
g_assert_not_reached();
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
if (regime_is_stage2(s2_mmu_idx)) {
|
if (regime_is_stage2(s2_mmu_idx)) {
|
||||||
|
27
target/arm/tcg-stubs.c
Normal file
27
target/arm/tcg-stubs.c
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
/*
|
||||||
|
* QEMU ARM stubs for some TCG helper functions
|
||||||
|
*
|
||||||
|
* Copyright 2021 SUSE LLC
|
||||||
|
*
|
||||||
|
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
||||||
|
* See the COPYING file in the top-level directory.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "qemu/osdep.h"
|
||||||
|
#include "cpu.h"
|
||||||
|
#include "internals.h"
|
||||||
|
|
||||||
|
void write_v7m_exception(CPUARMState *env, uint32_t new_exc)
|
||||||
|
{
|
||||||
|
g_assert_not_reached();
|
||||||
|
}
|
||||||
|
|
||||||
|
void raise_exception_ra(CPUARMState *env, uint32_t excp, uint32_t syndrome,
|
||||||
|
uint32_t target_el, uintptr_t ra)
|
||||||
|
{
|
||||||
|
g_assert_not_reached();
|
||||||
|
}
|
||||||
|
/* Temporarily while cpu_get_tb_cpu_state() is still in common code */
|
||||||
|
void assert_hflags_rebuild_correctly(CPUARMState *env)
|
||||||
|
{
|
||||||
|
}
|
403
target/arm/tcg/hflags.c
Normal file
403
target/arm/tcg/hflags.c
Normal file
@ -0,0 +1,403 @@
|
|||||||
|
/*
|
||||||
|
* ARM hflags
|
||||||
|
*
|
||||||
|
* This code is licensed under the GNU GPL v2 or later.
|
||||||
|
*
|
||||||
|
* SPDX-License-Identifier: GPL-2.0-or-later
|
||||||
|
*/
|
||||||
|
#include "qemu/osdep.h"
|
||||||
|
#include "cpu.h"
|
||||||
|
#include "internals.h"
|
||||||
|
#include "exec/helper-proto.h"
|
||||||
|
#include "cpregs.h"
|
||||||
|
|
||||||
|
static inline bool fgt_svc(CPUARMState *env, int el)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* Assuming fine-grained-traps are active, return true if we
|
||||||
|
* should be trapping on SVC instructions. Only AArch64 can
|
||||||
|
* trap on an SVC at EL1, but we don't need to special-case this
|
||||||
|
* because if this is AArch32 EL1 then arm_fgt_active() is false.
|
||||||
|
* We also know el is 0 or 1.
|
||||||
|
*/
|
||||||
|
return el == 0 ?
|
||||||
|
FIELD_EX64(env->cp15.fgt_exec[FGTREG_HFGITR], HFGITR_EL2, SVC_EL0) :
|
||||||
|
FIELD_EX64(env->cp15.fgt_exec[FGTREG_HFGITR], HFGITR_EL2, SVC_EL1);
|
||||||
|
}
|
||||||
|
|
||||||
|
static CPUARMTBFlags rebuild_hflags_common(CPUARMState *env, int fp_el,
|
||||||
|
ARMMMUIdx mmu_idx,
|
||||||
|
CPUARMTBFlags flags)
|
||||||
|
{
|
||||||
|
DP_TBFLAG_ANY(flags, FPEXC_EL, fp_el);
|
||||||
|
DP_TBFLAG_ANY(flags, MMUIDX, arm_to_core_mmu_idx(mmu_idx));
|
||||||
|
|
||||||
|
if (arm_singlestep_active(env)) {
|
||||||
|
DP_TBFLAG_ANY(flags, SS_ACTIVE, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
return flags;
|
||||||
|
}
|
||||||
|
|
||||||
|
static CPUARMTBFlags rebuild_hflags_common_32(CPUARMState *env, int fp_el,
|
||||||
|
ARMMMUIdx mmu_idx,
|
||||||
|
CPUARMTBFlags flags)
|
||||||
|
{
|
||||||
|
bool sctlr_b = arm_sctlr_b(env);
|
||||||
|
|
||||||
|
if (sctlr_b) {
|
||||||
|
DP_TBFLAG_A32(flags, SCTLR__B, 1);
|
||||||
|
}
|
||||||
|
if (arm_cpu_data_is_big_endian_a32(env, sctlr_b)) {
|
||||||
|
DP_TBFLAG_ANY(flags, BE_DATA, 1);
|
||||||
|
}
|
||||||
|
DP_TBFLAG_A32(flags, NS, !access_secure_reg(env));
|
||||||
|
|
||||||
|
return rebuild_hflags_common(env, fp_el, mmu_idx, flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
static CPUARMTBFlags rebuild_hflags_m32(CPUARMState *env, int fp_el,
|
||||||
|
ARMMMUIdx mmu_idx)
|
||||||
|
{
|
||||||
|
CPUARMTBFlags flags = {};
|
||||||
|
uint32_t ccr = env->v7m.ccr[env->v7m.secure];
|
||||||
|
|
||||||
|
/* Without HaveMainExt, CCR.UNALIGN_TRP is RES1. */
|
||||||
|
if (ccr & R_V7M_CCR_UNALIGN_TRP_MASK) {
|
||||||
|
DP_TBFLAG_ANY(flags, ALIGN_MEM, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (arm_v7m_is_handler_mode(env)) {
|
||||||
|
DP_TBFLAG_M32(flags, HANDLER, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* v8M always applies stack limit checks unless CCR.STKOFHFNMIGN
|
||||||
|
* is suppressing them because the requested execution priority
|
||||||
|
* is less than 0.
|
||||||
|
*/
|
||||||
|
if (arm_feature(env, ARM_FEATURE_V8) &&
|
||||||
|
!((mmu_idx & ARM_MMU_IDX_M_NEGPRI) &&
|
||||||
|
(ccr & R_V7M_CCR_STKOFHFNMIGN_MASK))) {
|
||||||
|
DP_TBFLAG_M32(flags, STACKCHECK, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (arm_feature(env, ARM_FEATURE_M_SECURITY) && env->v7m.secure) {
|
||||||
|
DP_TBFLAG_M32(flags, SECURE, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* This corresponds to the ARM pseudocode function IsFullA64Enabled(). */
|
||||||
|
static bool sme_fa64(CPUARMState *env, int el)
|
||||||
|
{
|
||||||
|
if (!cpu_isar_feature(aa64_sme_fa64, env_archcpu(env))) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (el <= 1 && !el_is_in_host(env, el)) {
|
||||||
|
if (!FIELD_EX64(env->vfp.smcr_el[1], SMCR, FA64)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (el <= 2 && arm_is_el2_enabled(env)) {
|
||||||
|
if (!FIELD_EX64(env->vfp.smcr_el[2], SMCR, FA64)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (arm_feature(env, ARM_FEATURE_EL3)) {
|
||||||
|
if (!FIELD_EX64(env->vfp.smcr_el[3], SMCR, FA64)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
static CPUARMTBFlags rebuild_hflags_a32(CPUARMState *env, int fp_el,
|
||||||
|
ARMMMUIdx mmu_idx)
|
||||||
|
{
|
||||||
|
CPUARMTBFlags flags = {};
|
||||||
|
int el = arm_current_el(env);
|
||||||
|
|
||||||
|
if (arm_sctlr(env, el) & SCTLR_A) {
|
||||||
|
DP_TBFLAG_ANY(flags, ALIGN_MEM, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (arm_el_is_aa64(env, 1)) {
|
||||||
|
DP_TBFLAG_A32(flags, VFPEN, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (el < 2 && env->cp15.hstr_el2 && arm_is_el2_enabled(env) &&
|
||||||
|
(arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
|
||||||
|
DP_TBFLAG_A32(flags, HSTR_ACTIVE, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (arm_fgt_active(env, el)) {
|
||||||
|
DP_TBFLAG_ANY(flags, FGT_ACTIVE, 1);
|
||||||
|
if (fgt_svc(env, el)) {
|
||||||
|
DP_TBFLAG_ANY(flags, FGT_SVC, 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (env->uncached_cpsr & CPSR_IL) {
|
||||||
|
DP_TBFLAG_ANY(flags, PSTATE__IL, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The SME exception we are testing for is raised via
|
||||||
|
* AArch64.CheckFPAdvSIMDEnabled(), as called from
|
||||||
|
* AArch32.CheckAdvSIMDOrFPEnabled().
|
||||||
|
*/
|
||||||
|
if (el == 0
|
||||||
|
&& FIELD_EX64(env->svcr, SVCR, SM)
|
||||||
|
&& (!arm_is_el2_enabled(env)
|
||||||
|
|| (arm_el_is_aa64(env, 2) && !(env->cp15.hcr_el2 & HCR_TGE)))
|
||||||
|
&& arm_el_is_aa64(env, 1)
|
||||||
|
&& !sme_fa64(env, el)) {
|
||||||
|
DP_TBFLAG_A32(flags, SME_TRAP_NONSTREAMING, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
|
||||||
|
ARMMMUIdx mmu_idx)
|
||||||
|
{
|
||||||
|
CPUARMTBFlags flags = {};
|
||||||
|
ARMMMUIdx stage1 = stage_1_mmu_idx(mmu_idx);
|
||||||
|
uint64_t tcr = regime_tcr(env, mmu_idx);
|
||||||
|
uint64_t sctlr;
|
||||||
|
int tbii, tbid;
|
||||||
|
|
||||||
|
DP_TBFLAG_ANY(flags, AARCH64_STATE, 1);
|
||||||
|
|
||||||
|
/* Get control bits for tagged addresses. */
|
||||||
|
tbid = aa64_va_parameter_tbi(tcr, mmu_idx);
|
||||||
|
tbii = tbid & ~aa64_va_parameter_tbid(tcr, mmu_idx);
|
||||||
|
|
||||||
|
DP_TBFLAG_A64(flags, TBII, tbii);
|
||||||
|
DP_TBFLAG_A64(flags, TBID, tbid);
|
||||||
|
|
||||||
|
if (cpu_isar_feature(aa64_sve, env_archcpu(env))) {
|
||||||
|
int sve_el = sve_exception_el(env, el);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If either FP or SVE are disabled, translator does not need len.
|
||||||
|
* If SVE EL > FP EL, FP exception has precedence, and translator
|
||||||
|
* does not need SVE EL. Save potential re-translations by forcing
|
||||||
|
* the unneeded data to zero.
|
||||||
|
*/
|
||||||
|
if (fp_el != 0) {
|
||||||
|
if (sve_el > fp_el) {
|
||||||
|
sve_el = 0;
|
||||||
|
}
|
||||||
|
} else if (sve_el == 0) {
|
||||||
|
DP_TBFLAG_A64(flags, VL, sve_vqm1_for_el(env, el));
|
||||||
|
}
|
||||||
|
DP_TBFLAG_A64(flags, SVEEXC_EL, sve_el);
|
||||||
|
}
|
||||||
|
if (cpu_isar_feature(aa64_sme, env_archcpu(env))) {
|
||||||
|
int sme_el = sme_exception_el(env, el);
|
||||||
|
bool sm = FIELD_EX64(env->svcr, SVCR, SM);
|
||||||
|
|
||||||
|
DP_TBFLAG_A64(flags, SMEEXC_EL, sme_el);
|
||||||
|
if (sme_el == 0) {
|
||||||
|
/* Similarly, do not compute SVL if SME is disabled. */
|
||||||
|
int svl = sve_vqm1_for_el_sm(env, el, true);
|
||||||
|
DP_TBFLAG_A64(flags, SVL, svl);
|
||||||
|
if (sm) {
|
||||||
|
/* If SVE is disabled, we will not have set VL above. */
|
||||||
|
DP_TBFLAG_A64(flags, VL, svl);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (sm) {
|
||||||
|
DP_TBFLAG_A64(flags, PSTATE_SM, 1);
|
||||||
|
DP_TBFLAG_A64(flags, SME_TRAP_NONSTREAMING, !sme_fa64(env, el));
|
||||||
|
}
|
||||||
|
DP_TBFLAG_A64(flags, PSTATE_ZA, FIELD_EX64(env->svcr, SVCR, ZA));
|
||||||
|
}
|
||||||
|
|
||||||
|
sctlr = regime_sctlr(env, stage1);
|
||||||
|
|
||||||
|
if (sctlr & SCTLR_A) {
|
||||||
|
DP_TBFLAG_ANY(flags, ALIGN_MEM, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (arm_cpu_data_is_big_endian_a64(el, sctlr)) {
|
||||||
|
DP_TBFLAG_ANY(flags, BE_DATA, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (cpu_isar_feature(aa64_pauth, env_archcpu(env))) {
|
||||||
|
/*
|
||||||
|
* In order to save space in flags, we record only whether
|
||||||
|
* pauth is "inactive", meaning all insns are implemented as
|
||||||
|
* a nop, or "active" when some action must be performed.
|
||||||
|
* The decision of which action to take is left to a helper.
|
||||||
|
*/
|
||||||
|
if (sctlr & (SCTLR_EnIA | SCTLR_EnIB | SCTLR_EnDA | SCTLR_EnDB)) {
|
||||||
|
DP_TBFLAG_A64(flags, PAUTH_ACTIVE, 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (cpu_isar_feature(aa64_bti, env_archcpu(env))) {
|
||||||
|
/* Note that SCTLR_EL[23].BT == SCTLR_BT1. */
|
||||||
|
if (sctlr & (el == 0 ? SCTLR_BT0 : SCTLR_BT1)) {
|
||||||
|
DP_TBFLAG_A64(flags, BT, 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Compute the condition for using AccType_UNPRIV for LDTR et al. */
|
||||||
|
if (!(env->pstate & PSTATE_UAO)) {
|
||||||
|
switch (mmu_idx) {
|
||||||
|
case ARMMMUIdx_E10_1:
|
||||||
|
case ARMMMUIdx_E10_1_PAN:
|
||||||
|
/* TODO: ARMv8.3-NV */
|
||||||
|
DP_TBFLAG_A64(flags, UNPRIV, 1);
|
||||||
|
break;
|
||||||
|
case ARMMMUIdx_E20_2:
|
||||||
|
case ARMMMUIdx_E20_2_PAN:
|
||||||
|
/*
|
||||||
|
* Note that EL20_2 is gated by HCR_EL2.E2H == 1, but EL20_0 is
|
||||||
|
* gated by HCR_EL2.<E2H,TGE> == '11', and so is LDTR.
|
||||||
|
*/
|
||||||
|
if (env->cp15.hcr_el2 & HCR_TGE) {
|
||||||
|
DP_TBFLAG_A64(flags, UNPRIV, 1);
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (env->pstate & PSTATE_IL) {
|
||||||
|
DP_TBFLAG_ANY(flags, PSTATE__IL, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (arm_fgt_active(env, el)) {
|
||||||
|
DP_TBFLAG_ANY(flags, FGT_ACTIVE, 1);
|
||||||
|
if (FIELD_EX64(env->cp15.fgt_exec[FGTREG_HFGITR], HFGITR_EL2, ERET)) {
|
||||||
|
DP_TBFLAG_A64(flags, FGT_ERET, 1);
|
||||||
|
}
|
||||||
|
if (fgt_svc(env, el)) {
|
||||||
|
DP_TBFLAG_ANY(flags, FGT_SVC, 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (cpu_isar_feature(aa64_mte, env_archcpu(env))) {
|
||||||
|
/*
|
||||||
|
* Set MTE_ACTIVE if any access may be Checked, and leave clear
|
||||||
|
* if all accesses must be Unchecked:
|
||||||
|
* 1) If no TBI, then there are no tags in the address to check,
|
||||||
|
* 2) If Tag Check Override, then all accesses are Unchecked,
|
||||||
|
* 3) If Tag Check Fail == 0, then Checked access have no effect,
|
||||||
|
* 4) If no Allocation Tag Access, then all accesses are Unchecked.
|
||||||
|
*/
|
||||||
|
if (allocation_tag_access_enabled(env, el, sctlr)) {
|
||||||
|
DP_TBFLAG_A64(flags, ATA, 1);
|
||||||
|
if (tbid
|
||||||
|
&& !(env->pstate & PSTATE_TCO)
|
||||||
|
&& (sctlr & (el == 0 ? SCTLR_TCF0 : SCTLR_TCF))) {
|
||||||
|
DP_TBFLAG_A64(flags, MTE_ACTIVE, 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
/* And again for unprivileged accesses, if required. */
|
||||||
|
if (EX_TBFLAG_A64(flags, UNPRIV)
|
||||||
|
&& tbid
|
||||||
|
&& !(env->pstate & PSTATE_TCO)
|
||||||
|
&& (sctlr & SCTLR_TCF0)
|
||||||
|
&& allocation_tag_access_enabled(env, 0, sctlr)) {
|
||||||
|
DP_TBFLAG_A64(flags, MTE0_ACTIVE, 1);
|
||||||
|
}
|
||||||
|
/* Cache TCMA as well as TBI. */
|
||||||
|
DP_TBFLAG_A64(flags, TCMA, aa64_va_parameter_tcma(tcr, mmu_idx));
|
||||||
|
}
|
||||||
|
|
||||||
|
return rebuild_hflags_common(env, fp_el, mmu_idx, flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
static CPUARMTBFlags rebuild_hflags_internal(CPUARMState *env)
|
||||||
|
{
|
||||||
|
int el = arm_current_el(env);
|
||||||
|
int fp_el = fp_exception_el(env, el);
|
||||||
|
ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
|
||||||
|
|
||||||
|
if (is_a64(env)) {
|
||||||
|
return rebuild_hflags_a64(env, el, fp_el, mmu_idx);
|
||||||
|
} else if (arm_feature(env, ARM_FEATURE_M)) {
|
||||||
|
return rebuild_hflags_m32(env, fp_el, mmu_idx);
|
||||||
|
} else {
|
||||||
|
return rebuild_hflags_a32(env, fp_el, mmu_idx);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void arm_rebuild_hflags(CPUARMState *env)
|
||||||
|
{
|
||||||
|
env->hflags = rebuild_hflags_internal(env);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If we have triggered a EL state change we can't rely on the
|
||||||
|
* translator having passed it to us, we need to recompute.
|
||||||
|
*/
|
||||||
|
void HELPER(rebuild_hflags_m32_newel)(CPUARMState *env)
|
||||||
|
{
|
||||||
|
int el = arm_current_el(env);
|
||||||
|
int fp_el = fp_exception_el(env, el);
|
||||||
|
ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
|
||||||
|
|
||||||
|
env->hflags = rebuild_hflags_m32(env, fp_el, mmu_idx);
|
||||||
|
}
|
||||||
|
|
||||||
|
void HELPER(rebuild_hflags_m32)(CPUARMState *env, int el)
|
||||||
|
{
|
||||||
|
int fp_el = fp_exception_el(env, el);
|
||||||
|
ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
|
||||||
|
|
||||||
|
env->hflags = rebuild_hflags_m32(env, fp_el, mmu_idx);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If we have triggered a EL state change we can't rely on the
|
||||||
|
* translator having passed it to us, we need to recompute.
|
||||||
|
*/
|
||||||
|
void HELPER(rebuild_hflags_a32_newel)(CPUARMState *env)
|
||||||
|
{
|
||||||
|
int el = arm_current_el(env);
|
||||||
|
int fp_el = fp_exception_el(env, el);
|
||||||
|
ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
|
||||||
|
env->hflags = rebuild_hflags_a32(env, fp_el, mmu_idx);
|
||||||
|
}
|
||||||
|
|
||||||
|
void HELPER(rebuild_hflags_a32)(CPUARMState *env, int el)
|
||||||
|
{
|
||||||
|
int fp_el = fp_exception_el(env, el);
|
||||||
|
ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
|
||||||
|
|
||||||
|
env->hflags = rebuild_hflags_a32(env, fp_el, mmu_idx);
|
||||||
|
}
|
||||||
|
|
||||||
|
void HELPER(rebuild_hflags_a64)(CPUARMState *env, int el)
|
||||||
|
{
|
||||||
|
int fp_el = fp_exception_el(env, el);
|
||||||
|
ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
|
||||||
|
|
||||||
|
env->hflags = rebuild_hflags_a64(env, el, fp_el, mmu_idx);
|
||||||
|
}
|
||||||
|
|
||||||
|
void assert_hflags_rebuild_correctly(CPUARMState *env)
|
||||||
|
{
|
||||||
|
#ifdef CONFIG_DEBUG_TCG
|
||||||
|
CPUARMTBFlags c = env->hflags;
|
||||||
|
CPUARMTBFlags r = rebuild_hflags_internal(env);
|
||||||
|
|
||||||
|
if (unlikely(c.flags != r.flags || c.flags2 != r.flags2)) {
|
||||||
|
fprintf(stderr, "TCG hflags mismatch "
|
||||||
|
"(current:(0x%08x,0x" TARGET_FMT_lx ")"
|
||||||
|
" rebuilt:(0x%08x,0x" TARGET_FMT_lx ")\n",
|
||||||
|
c.flags, c.flags2, r.flags, r.flags2);
|
||||||
|
abort();
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
}
|
50
target/arm/tcg/meson.build
Normal file
50
target/arm/tcg/meson.build
Normal file
@ -0,0 +1,50 @@
|
|||||||
|
gen = [
|
||||||
|
decodetree.process('sve.decode', extra_args: '--decode=disas_sve'),
|
||||||
|
decodetree.process('sme.decode', extra_args: '--decode=disas_sme'),
|
||||||
|
decodetree.process('sme-fa64.decode', extra_args: '--static-decode=disas_sme_fa64'),
|
||||||
|
decodetree.process('neon-shared.decode', extra_args: '--decode=disas_neon_shared'),
|
||||||
|
decodetree.process('neon-dp.decode', extra_args: '--decode=disas_neon_dp'),
|
||||||
|
decodetree.process('neon-ls.decode', extra_args: '--decode=disas_neon_ls'),
|
||||||
|
decodetree.process('vfp.decode', extra_args: '--decode=disas_vfp'),
|
||||||
|
decodetree.process('vfp-uncond.decode', extra_args: '--decode=disas_vfp_uncond'),
|
||||||
|
decodetree.process('m-nocp.decode', extra_args: '--decode=disas_m_nocp'),
|
||||||
|
decodetree.process('mve.decode', extra_args: '--decode=disas_mve'),
|
||||||
|
decodetree.process('a32.decode', extra_args: '--static-decode=disas_a32'),
|
||||||
|
decodetree.process('a32-uncond.decode', extra_args: '--static-decode=disas_a32_uncond'),
|
||||||
|
decodetree.process('t32.decode', extra_args: '--static-decode=disas_t32'),
|
||||||
|
decodetree.process('t16.decode', extra_args: ['-w', '16', '--static-decode=disas_t16']),
|
||||||
|
]
|
||||||
|
|
||||||
|
arm_ss.add(gen)
|
||||||
|
|
||||||
|
arm_ss.add(files(
|
||||||
|
'translate.c',
|
||||||
|
'translate-m-nocp.c',
|
||||||
|
'translate-mve.c',
|
||||||
|
'translate-neon.c',
|
||||||
|
'translate-vfp.c',
|
||||||
|
'crypto_helper.c',
|
||||||
|
'hflags.c',
|
||||||
|
'iwmmxt_helper.c',
|
||||||
|
'm_helper.c',
|
||||||
|
'mve_helper.c',
|
||||||
|
'neon_helper.c',
|
||||||
|
'op_helper.c',
|
||||||
|
'tlb_helper.c',
|
||||||
|
'vec_helper.c',
|
||||||
|
))
|
||||||
|
|
||||||
|
arm_ss.add(when: 'TARGET_AARCH64', if_true: files(
|
||||||
|
'translate-a64.c',
|
||||||
|
'translate-sve.c',
|
||||||
|
'translate-sme.c',
|
||||||
|
'helper-a64.c',
|
||||||
|
'mte_helper.c',
|
||||||
|
'pauth_helper.c',
|
||||||
|
'sme_helper.c',
|
||||||
|
'sve_helper.c',
|
||||||
|
))
|
||||||
|
|
||||||
|
arm_softmmu_ss.add(files(
|
||||||
|
'psci.c',
|
||||||
|
))
|
@ -12,24 +12,6 @@
|
|||||||
#include "exec/helper-proto.h"
|
#include "exec/helper-proto.h"
|
||||||
|
|
||||||
|
|
||||||
/* Return true if the translation regime is using LPAE format page tables */
|
|
||||||
bool regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx)
|
|
||||||
{
|
|
||||||
int el = regime_el(env, mmu_idx);
|
|
||||||
if (el == 2 || arm_el_is_aa64(env, el)) {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
if (arm_feature(env, ARM_FEATURE_PMSA) &&
|
|
||||||
arm_feature(env, ARM_FEATURE_V8)) {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
if (arm_feature(env, ARM_FEATURE_LPAE)
|
|
||||||
&& (regime_tcr(env, mmu_idx) & TTBCR_EAE)) {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Returns true if the stage 1 translation regime is using LPAE format page
|
* Returns true if the stage 1 translation regime is using LPAE format page
|
||||||
* tables. Used when raising alignment exceptions, whose FSR changes depending
|
* tables. Used when raising alignment exceptions, whose FSR changes depending
|
@ -15,6 +15,7 @@ from avocado_qemu import QemuSystemTest
|
|||||||
class Version(QemuSystemTest):
|
class Version(QemuSystemTest):
|
||||||
"""
|
"""
|
||||||
:avocado: tags=quick
|
:avocado: tags=quick
|
||||||
|
:avocado: tags=machine:none
|
||||||
"""
|
"""
|
||||||
def test_qmp_human_info_version(self):
|
def test_qmp_human_info_version(self):
|
||||||
self.vm.add_args('-nodefaults')
|
self.vm.add_args('-nodefaults')
|
||||||
|
Loading…
Reference in New Issue
Block a user