target-arm queue:
* target/arm: generate a custom MIDR for -cpu max * hw/misc/zynq_slcr: refactor to use standard register definition * Set ENET_BD_BDU in I.MX FEC controller * target/arm: Fix routing of singlestep exceptions * refactor a32/t32 decoder handling of PC * minor optimisations/cleanups of some a32/t32 codegen * target/arm/cpu64: Ensure kvm really supports aarch64=off * target/arm/cpu: Ensure we can use the pmu with kvm * target/arm: Minor cleanups preparatory to KVM SVE support -----BEGIN PGP SIGNATURE----- iQJNBAABCAA3FiEE4aXFk81BneKOgxXPPCUl7RQ2DN4FAl1WrIsZHHBldGVyLm1h eWRlbGxAbGluYXJvLm9yZwAKCRA8JSXtFDYM3tJ/D/9I0ccyciHwuekySUHs+Wq6 2grX8t6RFzlhA1ULoAaEO4x8uWWGnbiGTeSGM819T3nj1a7neQV12Xe5RRGG0j7n aeVseYnZF96oshKPkDSVTcGQisVfmmHIJ0oqx2k1aUGrmyFJlTuLWQBZCCiZKhxA zA6YzUbOA2apfi9nun6SbbjysiRD2lp2i9vI79nVlo+ca77v/1sdFUwzg0hRE//X IondHeWtCZScmc/GwABv4EdNzQ4Aerfe10v/pOKXEC59rPwEiaiSGBPu6SRUaGWH qHlwjVU2+BFGkz9Oy/7+tDTBk6saPi4taZF8SxxiC/QTyNV2ijyKV5iy9KOYAFw7 E41fhv4+Kch569/SX7fiyAxL0gAS2HGFtegByuQEgjjioOCRugFcX275NXvuW06j jfOP/zSD9P39WA0jCJaNj5FdJTcLmIuFxKjBUEX3Cdb+3igIq1BW0ZFd/OOBoo1W GHcEmO6tLyx35kigOb3TkayQpkqCoaGCcgzJ0g2Oy06rKwlcci+BfCfc3aG+uSSY +TuGjRhpQxQJJt880d7tBqeH9R5FABvQ0TEwGuACylDEZM5bN7BpZxCxCVN/bFG+ pzvzs/QtOq0FN7LK4L4rbuJui4nBhAyalbiIXQ8ihWQgmMqaYQSK8mXFgSZgizFl qATcYIr/q2gL4wHRos3XdA== =8BAF -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/pmaydell/tags/pull-target-arm-20190816' into staging target-arm queue: * target/arm: generate a custom MIDR for -cpu max * hw/misc/zynq_slcr: refactor to use standard register definition * Set ENET_BD_BDU in I.MX FEC controller * target/arm: Fix routing of singlestep exceptions * refactor a32/t32 decoder handling of PC * minor optimisations/cleanups of some a32/t32 codegen * target/arm/cpu64: Ensure kvm really supports aarch64=off * target/arm/cpu: Ensure we can use the pmu with kvm * target/arm: Minor cleanups preparatory to KVM SVE support # gpg: Signature made Fri 16 Aug 2019 14:15:55 BST # gpg: using RSA key E1A5C593CD419DE28E8315CF3C2525ED14360CDE # gpg: issuer "peter.maydell@linaro.org" # gpg: Good signature from "Peter Maydell <peter.maydell@linaro.org>" [ultimate] # gpg: aka "Peter Maydell <pmaydell@gmail.com>" [ultimate] # gpg: aka "Peter Maydell <pmaydell@chiark.greenend.org.uk>" [ultimate] # Primary key fingerprint: E1A5 C593 CD41 9DE2 8E83 15CF 3C25 25ED 1436 0CDE * remotes/pmaydell/tags/pull-target-arm-20190816: (29 commits) target/arm: Use tcg_gen_extrh_i64_i32 to extract the high word target/arm: Simplify SMMLA, SMMLAR, SMMLS, SMMLSR target/arm: Use tcg_gen_rotri_i32 for gen_swap_half target/arm: Use ror32 instead of open-coding the operation target/arm: Remove redundant shift tests target/arm: Use tcg_gen_deposit_i32 for PKHBT, PKHTB target/arm: Use tcg_gen_extract_i32 for shifter_out_im target/arm/kvm64: Move the get/put of fpsimd registers out target/arm/kvm64: Fix error returns target/arm/cpu: Use div-round-up to determine predicate register array size target/arm/helper: zcr: Add build bug next to value range assumption target/arm/cpu: Ensure we can use the pmu with kvm target/arm/cpu64: Ensure kvm really supports aarch64=off target/arm: Remove helper_double_saturate target/arm: Use unallocated_encoding for aarch32 target/arm: Remove offset argument to gen_exception_bkpt_insn target/arm: Replace offset with pc in gen_exception_internal_insn target/arm: Replace offset with pc in gen_exception_insn target/arm: Replace s->pc with s->base.pc_next target/arm: Remove redundant s->pc & ~1 ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
afd7605393
@ -21,6 +21,7 @@
|
||||
#include "migration/vmstate.h"
|
||||
#include "qemu/log.h"
|
||||
#include "qemu/module.h"
|
||||
#include "hw/registerfields.h"
|
||||
|
||||
#ifndef ZYNQ_SLCR_ERR_DEBUG
|
||||
#define ZYNQ_SLCR_ERR_DEBUG 0
|
||||
@ -36,138 +37,135 @@
|
||||
#define XILINX_LOCK_KEY 0x767b
|
||||
#define XILINX_UNLOCK_KEY 0xdf0d
|
||||
|
||||
#define R_PSS_RST_CTRL_SOFT_RST 0x1
|
||||
REG32(SCL, 0x000)
|
||||
REG32(LOCK, 0x004)
|
||||
REG32(UNLOCK, 0x008)
|
||||
REG32(LOCKSTA, 0x00c)
|
||||
|
||||
enum {
|
||||
SCL = 0x000 / 4,
|
||||
LOCK,
|
||||
UNLOCK,
|
||||
LOCKSTA,
|
||||
REG32(ARM_PLL_CTRL, 0x100)
|
||||
REG32(DDR_PLL_CTRL, 0x104)
|
||||
REG32(IO_PLL_CTRL, 0x108)
|
||||
REG32(PLL_STATUS, 0x10c)
|
||||
REG32(ARM_PLL_CFG, 0x110)
|
||||
REG32(DDR_PLL_CFG, 0x114)
|
||||
REG32(IO_PLL_CFG, 0x118)
|
||||
|
||||
ARM_PLL_CTRL = 0x100 / 4,
|
||||
DDR_PLL_CTRL,
|
||||
IO_PLL_CTRL,
|
||||
PLL_STATUS,
|
||||
ARM_PLL_CFG,
|
||||
DDR_PLL_CFG,
|
||||
IO_PLL_CFG,
|
||||
|
||||
ARM_CLK_CTRL = 0x120 / 4,
|
||||
DDR_CLK_CTRL,
|
||||
DCI_CLK_CTRL,
|
||||
APER_CLK_CTRL,
|
||||
USB0_CLK_CTRL,
|
||||
USB1_CLK_CTRL,
|
||||
GEM0_RCLK_CTRL,
|
||||
GEM1_RCLK_CTRL,
|
||||
GEM0_CLK_CTRL,
|
||||
GEM1_CLK_CTRL,
|
||||
SMC_CLK_CTRL,
|
||||
LQSPI_CLK_CTRL,
|
||||
SDIO_CLK_CTRL,
|
||||
UART_CLK_CTRL,
|
||||
SPI_CLK_CTRL,
|
||||
CAN_CLK_CTRL,
|
||||
CAN_MIOCLK_CTRL,
|
||||
DBG_CLK_CTRL,
|
||||
PCAP_CLK_CTRL,
|
||||
TOPSW_CLK_CTRL,
|
||||
REG32(ARM_CLK_CTRL, 0x120)
|
||||
REG32(DDR_CLK_CTRL, 0x124)
|
||||
REG32(DCI_CLK_CTRL, 0x128)
|
||||
REG32(APER_CLK_CTRL, 0x12c)
|
||||
REG32(USB0_CLK_CTRL, 0x130)
|
||||
REG32(USB1_CLK_CTRL, 0x134)
|
||||
REG32(GEM0_RCLK_CTRL, 0x138)
|
||||
REG32(GEM1_RCLK_CTRL, 0x13c)
|
||||
REG32(GEM0_CLK_CTRL, 0x140)
|
||||
REG32(GEM1_CLK_CTRL, 0x144)
|
||||
REG32(SMC_CLK_CTRL, 0x148)
|
||||
REG32(LQSPI_CLK_CTRL, 0x14c)
|
||||
REG32(SDIO_CLK_CTRL, 0x150)
|
||||
REG32(UART_CLK_CTRL, 0x154)
|
||||
REG32(SPI_CLK_CTRL, 0x158)
|
||||
REG32(CAN_CLK_CTRL, 0x15c)
|
||||
REG32(CAN_MIOCLK_CTRL, 0x160)
|
||||
REG32(DBG_CLK_CTRL, 0x164)
|
||||
REG32(PCAP_CLK_CTRL, 0x168)
|
||||
REG32(TOPSW_CLK_CTRL, 0x16c)
|
||||
|
||||
#define FPGA_CTRL_REGS(n, start) \
|
||||
FPGA ## n ## _CLK_CTRL = (start) / 4, \
|
||||
FPGA ## n ## _THR_CTRL, \
|
||||
FPGA ## n ## _THR_CNT, \
|
||||
FPGA ## n ## _THR_STA,
|
||||
FPGA_CTRL_REGS(0, 0x170)
|
||||
FPGA_CTRL_REGS(1, 0x180)
|
||||
FPGA_CTRL_REGS(2, 0x190)
|
||||
FPGA_CTRL_REGS(3, 0x1a0)
|
||||
REG32(FPGA ## n ## _CLK_CTRL, (start)) \
|
||||
REG32(FPGA ## n ## _THR_CTRL, (start) + 0x4)\
|
||||
REG32(FPGA ## n ## _THR_CNT, (start) + 0x8)\
|
||||
REG32(FPGA ## n ## _THR_STA, (start) + 0xc)
|
||||
FPGA_CTRL_REGS(0, 0x170)
|
||||
FPGA_CTRL_REGS(1, 0x180)
|
||||
FPGA_CTRL_REGS(2, 0x190)
|
||||
FPGA_CTRL_REGS(3, 0x1a0)
|
||||
|
||||
BANDGAP_TRIP = 0x1b8 / 4,
|
||||
PLL_PREDIVISOR = 0x1c0 / 4,
|
||||
CLK_621_TRUE,
|
||||
REG32(BANDGAP_TRIP, 0x1b8)
|
||||
REG32(PLL_PREDIVISOR, 0x1c0)
|
||||
REG32(CLK_621_TRUE, 0x1c4)
|
||||
|
||||
PSS_RST_CTRL = 0x200 / 4,
|
||||
DDR_RST_CTRL,
|
||||
TOPSW_RESET_CTRL,
|
||||
DMAC_RST_CTRL,
|
||||
USB_RST_CTRL,
|
||||
GEM_RST_CTRL,
|
||||
SDIO_RST_CTRL,
|
||||
SPI_RST_CTRL,
|
||||
CAN_RST_CTRL,
|
||||
I2C_RST_CTRL,
|
||||
UART_RST_CTRL,
|
||||
GPIO_RST_CTRL,
|
||||
LQSPI_RST_CTRL,
|
||||
SMC_RST_CTRL,
|
||||
OCM_RST_CTRL,
|
||||
FPGA_RST_CTRL = 0x240 / 4,
|
||||
A9_CPU_RST_CTRL,
|
||||
REG32(PSS_RST_CTRL, 0x200)
|
||||
FIELD(PSS_RST_CTRL, SOFT_RST, 0, 1)
|
||||
REG32(DDR_RST_CTRL, 0x204)
|
||||
REG32(TOPSW_RESET_CTRL, 0x208)
|
||||
REG32(DMAC_RST_CTRL, 0x20c)
|
||||
REG32(USB_RST_CTRL, 0x210)
|
||||
REG32(GEM_RST_CTRL, 0x214)
|
||||
REG32(SDIO_RST_CTRL, 0x218)
|
||||
REG32(SPI_RST_CTRL, 0x21c)
|
||||
REG32(CAN_RST_CTRL, 0x220)
|
||||
REG32(I2C_RST_CTRL, 0x224)
|
||||
REG32(UART_RST_CTRL, 0x228)
|
||||
REG32(GPIO_RST_CTRL, 0x22c)
|
||||
REG32(LQSPI_RST_CTRL, 0x230)
|
||||
REG32(SMC_RST_CTRL, 0x234)
|
||||
REG32(OCM_RST_CTRL, 0x238)
|
||||
REG32(FPGA_RST_CTRL, 0x240)
|
||||
REG32(A9_CPU_RST_CTRL, 0x244)
|
||||
|
||||
RS_AWDT_CTRL = 0x24c / 4,
|
||||
RST_REASON,
|
||||
REG32(RS_AWDT_CTRL, 0x24c)
|
||||
REG32(RST_REASON, 0x250)
|
||||
|
||||
REBOOT_STATUS = 0x258 / 4,
|
||||
BOOT_MODE,
|
||||
REG32(REBOOT_STATUS, 0x258)
|
||||
REG32(BOOT_MODE, 0x25c)
|
||||
|
||||
APU_CTRL = 0x300 / 4,
|
||||
WDT_CLK_SEL,
|
||||
REG32(APU_CTRL, 0x300)
|
||||
REG32(WDT_CLK_SEL, 0x304)
|
||||
|
||||
TZ_DMA_NS = 0x440 / 4,
|
||||
TZ_DMA_IRQ_NS,
|
||||
TZ_DMA_PERIPH_NS,
|
||||
REG32(TZ_DMA_NS, 0x440)
|
||||
REG32(TZ_DMA_IRQ_NS, 0x444)
|
||||
REG32(TZ_DMA_PERIPH_NS, 0x448)
|
||||
|
||||
PSS_IDCODE = 0x530 / 4,
|
||||
REG32(PSS_IDCODE, 0x530)
|
||||
|
||||
DDR_URGENT = 0x600 / 4,
|
||||
DDR_CAL_START = 0x60c / 4,
|
||||
DDR_REF_START = 0x614 / 4,
|
||||
DDR_CMD_STA,
|
||||
DDR_URGENT_SEL,
|
||||
DDR_DFI_STATUS,
|
||||
REG32(DDR_URGENT, 0x600)
|
||||
REG32(DDR_CAL_START, 0x60c)
|
||||
REG32(DDR_REF_START, 0x614)
|
||||
REG32(DDR_CMD_STA, 0x618)
|
||||
REG32(DDR_URGENT_SEL, 0x61c)
|
||||
REG32(DDR_DFI_STATUS, 0x620)
|
||||
|
||||
MIO = 0x700 / 4,
|
||||
REG32(MIO, 0x700)
|
||||
#define MIO_LENGTH 54
|
||||
|
||||
MIO_LOOPBACK = 0x804 / 4,
|
||||
MIO_MST_TRI0,
|
||||
MIO_MST_TRI1,
|
||||
REG32(MIO_LOOPBACK, 0x804)
|
||||
REG32(MIO_MST_TRI0, 0x808)
|
||||
REG32(MIO_MST_TRI1, 0x80c)
|
||||
|
||||
SD0_WP_CD_SEL = 0x830 / 4,
|
||||
SD1_WP_CD_SEL,
|
||||
REG32(SD0_WP_CD_SEL, 0x830)
|
||||
REG32(SD1_WP_CD_SEL, 0x834)
|
||||
|
||||
LVL_SHFTR_EN = 0x900 / 4,
|
||||
OCM_CFG = 0x910 / 4,
|
||||
REG32(LVL_SHFTR_EN, 0x900)
|
||||
REG32(OCM_CFG, 0x910)
|
||||
|
||||
CPU_RAM = 0xa00 / 4,
|
||||
REG32(CPU_RAM, 0xa00)
|
||||
|
||||
IOU = 0xa30 / 4,
|
||||
REG32(IOU, 0xa30)
|
||||
|
||||
DMAC_RAM = 0xa50 / 4,
|
||||
REG32(DMAC_RAM, 0xa50)
|
||||
|
||||
AFI0 = 0xa60 / 4,
|
||||
AFI1 = AFI0 + 3,
|
||||
AFI2 = AFI1 + 3,
|
||||
AFI3 = AFI2 + 3,
|
||||
REG32(AFI0, 0xa60)
|
||||
REG32(AFI1, 0xa6c)
|
||||
REG32(AFI2, 0xa78)
|
||||
REG32(AFI3, 0xa84)
|
||||
#define AFI_LENGTH 3
|
||||
|
||||
OCM = 0xa90 / 4,
|
||||
REG32(OCM, 0xa90)
|
||||
|
||||
DEVCI_RAM = 0xaa0 / 4,
|
||||
REG32(DEVCI_RAM, 0xaa0)
|
||||
|
||||
CSG_RAM = 0xab0 / 4,
|
||||
REG32(CSG_RAM, 0xab0)
|
||||
|
||||
GPIOB_CTRL = 0xb00 / 4,
|
||||
GPIOB_CFG_CMOS18,
|
||||
GPIOB_CFG_CMOS25,
|
||||
GPIOB_CFG_CMOS33,
|
||||
GPIOB_CFG_HSTL = 0xb14 / 4,
|
||||
GPIOB_DRVR_BIAS_CTRL,
|
||||
REG32(GPIOB_CTRL, 0xb00)
|
||||
REG32(GPIOB_CFG_CMOS18, 0xb04)
|
||||
REG32(GPIOB_CFG_CMOS25, 0xb08)
|
||||
REG32(GPIOB_CFG_CMOS33, 0xb0c)
|
||||
REG32(GPIOB_CFG_HSTL, 0xb14)
|
||||
REG32(GPIOB_DRVR_BIAS_CTRL, 0xb18)
|
||||
|
||||
DDRIOB = 0xb40 / 4,
|
||||
REG32(DDRIOB, 0xb40)
|
||||
#define DDRIOB_LENGTH 14
|
||||
};
|
||||
|
||||
#define ZYNQ_SLCR_MMIO_SIZE 0x1000
|
||||
#define ZYNQ_SLCR_NUM_REGS (ZYNQ_SLCR_MMIO_SIZE / 4)
|
||||
@ -190,150 +188,152 @@ static void zynq_slcr_reset(DeviceState *d)
|
||||
|
||||
DB_PRINT("RESET\n");
|
||||
|
||||
s->regs[LOCKSTA] = 1;
|
||||
s->regs[R_LOCKSTA] = 1;
|
||||
/* 0x100 - 0x11C */
|
||||
s->regs[ARM_PLL_CTRL] = 0x0001A008;
|
||||
s->regs[DDR_PLL_CTRL] = 0x0001A008;
|
||||
s->regs[IO_PLL_CTRL] = 0x0001A008;
|
||||
s->regs[PLL_STATUS] = 0x0000003F;
|
||||
s->regs[ARM_PLL_CFG] = 0x00014000;
|
||||
s->regs[DDR_PLL_CFG] = 0x00014000;
|
||||
s->regs[IO_PLL_CFG] = 0x00014000;
|
||||
s->regs[R_ARM_PLL_CTRL] = 0x0001A008;
|
||||
s->regs[R_DDR_PLL_CTRL] = 0x0001A008;
|
||||
s->regs[R_IO_PLL_CTRL] = 0x0001A008;
|
||||
s->regs[R_PLL_STATUS] = 0x0000003F;
|
||||
s->regs[R_ARM_PLL_CFG] = 0x00014000;
|
||||
s->regs[R_DDR_PLL_CFG] = 0x00014000;
|
||||
s->regs[R_IO_PLL_CFG] = 0x00014000;
|
||||
|
||||
/* 0x120 - 0x16C */
|
||||
s->regs[ARM_CLK_CTRL] = 0x1F000400;
|
||||
s->regs[DDR_CLK_CTRL] = 0x18400003;
|
||||
s->regs[DCI_CLK_CTRL] = 0x01E03201;
|
||||
s->regs[APER_CLK_CTRL] = 0x01FFCCCD;
|
||||
s->regs[USB0_CLK_CTRL] = s->regs[USB1_CLK_CTRL] = 0x00101941;
|
||||
s->regs[GEM0_RCLK_CTRL] = s->regs[GEM1_RCLK_CTRL] = 0x00000001;
|
||||
s->regs[GEM0_CLK_CTRL] = s->regs[GEM1_CLK_CTRL] = 0x00003C01;
|
||||
s->regs[SMC_CLK_CTRL] = 0x00003C01;
|
||||
s->regs[LQSPI_CLK_CTRL] = 0x00002821;
|
||||
s->regs[SDIO_CLK_CTRL] = 0x00001E03;
|
||||
s->regs[UART_CLK_CTRL] = 0x00003F03;
|
||||
s->regs[SPI_CLK_CTRL] = 0x00003F03;
|
||||
s->regs[CAN_CLK_CTRL] = 0x00501903;
|
||||
s->regs[DBG_CLK_CTRL] = 0x00000F03;
|
||||
s->regs[PCAP_CLK_CTRL] = 0x00000F01;
|
||||
s->regs[R_ARM_CLK_CTRL] = 0x1F000400;
|
||||
s->regs[R_DDR_CLK_CTRL] = 0x18400003;
|
||||
s->regs[R_DCI_CLK_CTRL] = 0x01E03201;
|
||||
s->regs[R_APER_CLK_CTRL] = 0x01FFCCCD;
|
||||
s->regs[R_USB0_CLK_CTRL] = s->regs[R_USB1_CLK_CTRL] = 0x00101941;
|
||||
s->regs[R_GEM0_RCLK_CTRL] = s->regs[R_GEM1_RCLK_CTRL] = 0x00000001;
|
||||
s->regs[R_GEM0_CLK_CTRL] = s->regs[R_GEM1_CLK_CTRL] = 0x00003C01;
|
||||
s->regs[R_SMC_CLK_CTRL] = 0x00003C01;
|
||||
s->regs[R_LQSPI_CLK_CTRL] = 0x00002821;
|
||||
s->regs[R_SDIO_CLK_CTRL] = 0x00001E03;
|
||||
s->regs[R_UART_CLK_CTRL] = 0x00003F03;
|
||||
s->regs[R_SPI_CLK_CTRL] = 0x00003F03;
|
||||
s->regs[R_CAN_CLK_CTRL] = 0x00501903;
|
||||
s->regs[R_DBG_CLK_CTRL] = 0x00000F03;
|
||||
s->regs[R_PCAP_CLK_CTRL] = 0x00000F01;
|
||||
|
||||
/* 0x170 - 0x1AC */
|
||||
s->regs[FPGA0_CLK_CTRL] = s->regs[FPGA1_CLK_CTRL] = s->regs[FPGA2_CLK_CTRL]
|
||||
= s->regs[FPGA3_CLK_CTRL] = 0x00101800;
|
||||
s->regs[FPGA0_THR_STA] = s->regs[FPGA1_THR_STA] = s->regs[FPGA2_THR_STA]
|
||||
= s->regs[FPGA3_THR_STA] = 0x00010000;
|
||||
s->regs[R_FPGA0_CLK_CTRL] = s->regs[R_FPGA1_CLK_CTRL]
|
||||
= s->regs[R_FPGA2_CLK_CTRL]
|
||||
= s->regs[R_FPGA3_CLK_CTRL] = 0x00101800;
|
||||
s->regs[R_FPGA0_THR_STA] = s->regs[R_FPGA1_THR_STA]
|
||||
= s->regs[R_FPGA2_THR_STA]
|
||||
= s->regs[R_FPGA3_THR_STA] = 0x00010000;
|
||||
|
||||
/* 0x1B0 - 0x1D8 */
|
||||
s->regs[BANDGAP_TRIP] = 0x0000001F;
|
||||
s->regs[PLL_PREDIVISOR] = 0x00000001;
|
||||
s->regs[CLK_621_TRUE] = 0x00000001;
|
||||
s->regs[R_BANDGAP_TRIP] = 0x0000001F;
|
||||
s->regs[R_PLL_PREDIVISOR] = 0x00000001;
|
||||
s->regs[R_CLK_621_TRUE] = 0x00000001;
|
||||
|
||||
/* 0x200 - 0x25C */
|
||||
s->regs[FPGA_RST_CTRL] = 0x01F33F0F;
|
||||
s->regs[RST_REASON] = 0x00000040;
|
||||
s->regs[R_FPGA_RST_CTRL] = 0x01F33F0F;
|
||||
s->regs[R_RST_REASON] = 0x00000040;
|
||||
|
||||
s->regs[BOOT_MODE] = 0x00000001;
|
||||
s->regs[R_BOOT_MODE] = 0x00000001;
|
||||
|
||||
/* 0x700 - 0x7D4 */
|
||||
for (i = 0; i < 54; i++) {
|
||||
s->regs[MIO + i] = 0x00001601;
|
||||
s->regs[R_MIO + i] = 0x00001601;
|
||||
}
|
||||
for (i = 2; i <= 8; i++) {
|
||||
s->regs[MIO + i] = 0x00000601;
|
||||
s->regs[R_MIO + i] = 0x00000601;
|
||||
}
|
||||
|
||||
s->regs[MIO_MST_TRI0] = s->regs[MIO_MST_TRI1] = 0xFFFFFFFF;
|
||||
s->regs[R_MIO_MST_TRI0] = s->regs[R_MIO_MST_TRI1] = 0xFFFFFFFF;
|
||||
|
||||
s->regs[CPU_RAM + 0] = s->regs[CPU_RAM + 1] = s->regs[CPU_RAM + 3]
|
||||
= s->regs[CPU_RAM + 4] = s->regs[CPU_RAM + 7]
|
||||
= 0x00010101;
|
||||
s->regs[CPU_RAM + 2] = s->regs[CPU_RAM + 5] = 0x01010101;
|
||||
s->regs[CPU_RAM + 6] = 0x00000001;
|
||||
s->regs[R_CPU_RAM + 0] = s->regs[R_CPU_RAM + 1] = s->regs[R_CPU_RAM + 3]
|
||||
= s->regs[R_CPU_RAM + 4] = s->regs[R_CPU_RAM + 7]
|
||||
= 0x00010101;
|
||||
s->regs[R_CPU_RAM + 2] = s->regs[R_CPU_RAM + 5] = 0x01010101;
|
||||
s->regs[R_CPU_RAM + 6] = 0x00000001;
|
||||
|
||||
s->regs[IOU + 0] = s->regs[IOU + 1] = s->regs[IOU + 2] = s->regs[IOU + 3]
|
||||
= 0x09090909;
|
||||
s->regs[IOU + 4] = s->regs[IOU + 5] = 0x00090909;
|
||||
s->regs[IOU + 6] = 0x00000909;
|
||||
s->regs[R_IOU + 0] = s->regs[R_IOU + 1] = s->regs[R_IOU + 2]
|
||||
= s->regs[R_IOU + 3] = 0x09090909;
|
||||
s->regs[R_IOU + 4] = s->regs[R_IOU + 5] = 0x00090909;
|
||||
s->regs[R_IOU + 6] = 0x00000909;
|
||||
|
||||
s->regs[DMAC_RAM] = 0x00000009;
|
||||
s->regs[R_DMAC_RAM] = 0x00000009;
|
||||
|
||||
s->regs[AFI0 + 0] = s->regs[AFI0 + 1] = 0x09090909;
|
||||
s->regs[AFI1 + 0] = s->regs[AFI1 + 1] = 0x09090909;
|
||||
s->regs[AFI2 + 0] = s->regs[AFI2 + 1] = 0x09090909;
|
||||
s->regs[AFI3 + 0] = s->regs[AFI3 + 1] = 0x09090909;
|
||||
s->regs[AFI0 + 2] = s->regs[AFI1 + 2] = s->regs[AFI2 + 2]
|
||||
= s->regs[AFI3 + 2] = 0x00000909;
|
||||
s->regs[R_AFI0 + 0] = s->regs[R_AFI0 + 1] = 0x09090909;
|
||||
s->regs[R_AFI1 + 0] = s->regs[R_AFI1 + 1] = 0x09090909;
|
||||
s->regs[R_AFI2 + 0] = s->regs[R_AFI2 + 1] = 0x09090909;
|
||||
s->regs[R_AFI3 + 0] = s->regs[R_AFI3 + 1] = 0x09090909;
|
||||
s->regs[R_AFI0 + 2] = s->regs[R_AFI1 + 2] = s->regs[R_AFI2 + 2]
|
||||
= s->regs[R_AFI3 + 2] = 0x00000909;
|
||||
|
||||
s->regs[OCM + 0] = 0x01010101;
|
||||
s->regs[OCM + 1] = s->regs[OCM + 2] = 0x09090909;
|
||||
s->regs[R_OCM + 0] = 0x01010101;
|
||||
s->regs[R_OCM + 1] = s->regs[R_OCM + 2] = 0x09090909;
|
||||
|
||||
s->regs[DEVCI_RAM] = 0x00000909;
|
||||
s->regs[CSG_RAM] = 0x00000001;
|
||||
s->regs[R_DEVCI_RAM] = 0x00000909;
|
||||
s->regs[R_CSG_RAM] = 0x00000001;
|
||||
|
||||
s->regs[DDRIOB + 0] = s->regs[DDRIOB + 1] = s->regs[DDRIOB + 2]
|
||||
= s->regs[DDRIOB + 3] = 0x00000e00;
|
||||
s->regs[DDRIOB + 4] = s->regs[DDRIOB + 5] = s->regs[DDRIOB + 6]
|
||||
= 0x00000e00;
|
||||
s->regs[DDRIOB + 12] = 0x00000021;
|
||||
s->regs[R_DDRIOB + 0] = s->regs[R_DDRIOB + 1] = s->regs[R_DDRIOB + 2]
|
||||
= s->regs[R_DDRIOB + 3] = 0x00000e00;
|
||||
s->regs[R_DDRIOB + 4] = s->regs[R_DDRIOB + 5] = s->regs[R_DDRIOB + 6]
|
||||
= 0x00000e00;
|
||||
s->regs[R_DDRIOB + 12] = 0x00000021;
|
||||
}
|
||||
|
||||
|
||||
static bool zynq_slcr_check_offset(hwaddr offset, bool rnw)
|
||||
{
|
||||
switch (offset) {
|
||||
case LOCK:
|
||||
case UNLOCK:
|
||||
case DDR_CAL_START:
|
||||
case DDR_REF_START:
|
||||
case R_LOCK:
|
||||
case R_UNLOCK:
|
||||
case R_DDR_CAL_START:
|
||||
case R_DDR_REF_START:
|
||||
return !rnw; /* Write only */
|
||||
case LOCKSTA:
|
||||
case FPGA0_THR_STA:
|
||||
case FPGA1_THR_STA:
|
||||
case FPGA2_THR_STA:
|
||||
case FPGA3_THR_STA:
|
||||
case BOOT_MODE:
|
||||
case PSS_IDCODE:
|
||||
case DDR_CMD_STA:
|
||||
case DDR_DFI_STATUS:
|
||||
case PLL_STATUS:
|
||||
case R_LOCKSTA:
|
||||
case R_FPGA0_THR_STA:
|
||||
case R_FPGA1_THR_STA:
|
||||
case R_FPGA2_THR_STA:
|
||||
case R_FPGA3_THR_STA:
|
||||
case R_BOOT_MODE:
|
||||
case R_PSS_IDCODE:
|
||||
case R_DDR_CMD_STA:
|
||||
case R_DDR_DFI_STATUS:
|
||||
case R_PLL_STATUS:
|
||||
return rnw;/* read only */
|
||||
case SCL:
|
||||
case ARM_PLL_CTRL ... IO_PLL_CTRL:
|
||||
case ARM_PLL_CFG ... IO_PLL_CFG:
|
||||
case ARM_CLK_CTRL ... TOPSW_CLK_CTRL:
|
||||
case FPGA0_CLK_CTRL ... FPGA0_THR_CNT:
|
||||
case FPGA1_CLK_CTRL ... FPGA1_THR_CNT:
|
||||
case FPGA2_CLK_CTRL ... FPGA2_THR_CNT:
|
||||
case FPGA3_CLK_CTRL ... FPGA3_THR_CNT:
|
||||
case BANDGAP_TRIP:
|
||||
case PLL_PREDIVISOR:
|
||||
case CLK_621_TRUE:
|
||||
case PSS_RST_CTRL ... A9_CPU_RST_CTRL:
|
||||
case RS_AWDT_CTRL:
|
||||
case RST_REASON:
|
||||
case REBOOT_STATUS:
|
||||
case APU_CTRL:
|
||||
case WDT_CLK_SEL:
|
||||
case TZ_DMA_NS ... TZ_DMA_PERIPH_NS:
|
||||
case DDR_URGENT:
|
||||
case DDR_URGENT_SEL:
|
||||
case MIO ... MIO + MIO_LENGTH - 1:
|
||||
case MIO_LOOPBACK ... MIO_MST_TRI1:
|
||||
case SD0_WP_CD_SEL:
|
||||
case SD1_WP_CD_SEL:
|
||||
case LVL_SHFTR_EN:
|
||||
case OCM_CFG:
|
||||
case CPU_RAM:
|
||||
case IOU:
|
||||
case DMAC_RAM:
|
||||
case AFI0 ... AFI3 + AFI_LENGTH - 1:
|
||||
case OCM:
|
||||
case DEVCI_RAM:
|
||||
case CSG_RAM:
|
||||
case GPIOB_CTRL ... GPIOB_CFG_CMOS33:
|
||||
case GPIOB_CFG_HSTL:
|
||||
case GPIOB_DRVR_BIAS_CTRL:
|
||||
case DDRIOB ... DDRIOB + DDRIOB_LENGTH - 1:
|
||||
case R_SCL:
|
||||
case R_ARM_PLL_CTRL ... R_IO_PLL_CTRL:
|
||||
case R_ARM_PLL_CFG ... R_IO_PLL_CFG:
|
||||
case R_ARM_CLK_CTRL ... R_TOPSW_CLK_CTRL:
|
||||
case R_FPGA0_CLK_CTRL ... R_FPGA0_THR_CNT:
|
||||
case R_FPGA1_CLK_CTRL ... R_FPGA1_THR_CNT:
|
||||
case R_FPGA2_CLK_CTRL ... R_FPGA2_THR_CNT:
|
||||
case R_FPGA3_CLK_CTRL ... R_FPGA3_THR_CNT:
|
||||
case R_BANDGAP_TRIP:
|
||||
case R_PLL_PREDIVISOR:
|
||||
case R_CLK_621_TRUE:
|
||||
case R_PSS_RST_CTRL ... R_A9_CPU_RST_CTRL:
|
||||
case R_RS_AWDT_CTRL:
|
||||
case R_RST_REASON:
|
||||
case R_REBOOT_STATUS:
|
||||
case R_APU_CTRL:
|
||||
case R_WDT_CLK_SEL:
|
||||
case R_TZ_DMA_NS ... R_TZ_DMA_PERIPH_NS:
|
||||
case R_DDR_URGENT:
|
||||
case R_DDR_URGENT_SEL:
|
||||
case R_MIO ... R_MIO + MIO_LENGTH - 1:
|
||||
case R_MIO_LOOPBACK ... R_MIO_MST_TRI1:
|
||||
case R_SD0_WP_CD_SEL:
|
||||
case R_SD1_WP_CD_SEL:
|
||||
case R_LVL_SHFTR_EN:
|
||||
case R_OCM_CFG:
|
||||
case R_CPU_RAM:
|
||||
case R_IOU:
|
||||
case R_DMAC_RAM:
|
||||
case R_AFI0 ... R_AFI3 + AFI_LENGTH - 1:
|
||||
case R_OCM:
|
||||
case R_DEVCI_RAM:
|
||||
case R_CSG_RAM:
|
||||
case R_GPIOB_CTRL ... R_GPIOB_CFG_CMOS33:
|
||||
case R_GPIOB_CFG_HSTL:
|
||||
case R_GPIOB_DRVR_BIAS_CTRL:
|
||||
case R_DDRIOB ... R_DDRIOB + DDRIOB_LENGTH - 1:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
@ -371,24 +371,24 @@ static void zynq_slcr_write(void *opaque, hwaddr offset,
|
||||
}
|
||||
|
||||
switch (offset) {
|
||||
case SCL:
|
||||
s->regs[SCL] = val & 0x1;
|
||||
case R_SCL:
|
||||
s->regs[R_SCL] = val & 0x1;
|
||||
return;
|
||||
case LOCK:
|
||||
case R_LOCK:
|
||||
if ((val & 0xFFFF) == XILINX_LOCK_KEY) {
|
||||
DB_PRINT("XILINX LOCK 0xF8000000 + 0x%x <= 0x%x\n", (int)offset,
|
||||
(unsigned)val & 0xFFFF);
|
||||
s->regs[LOCKSTA] = 1;
|
||||
s->regs[R_LOCKSTA] = 1;
|
||||
} else {
|
||||
DB_PRINT("WRONG XILINX LOCK KEY 0xF8000000 + 0x%x <= 0x%x\n",
|
||||
(int)offset, (unsigned)val & 0xFFFF);
|
||||
}
|
||||
return;
|
||||
case UNLOCK:
|
||||
case R_UNLOCK:
|
||||
if ((val & 0xFFFF) == XILINX_UNLOCK_KEY) {
|
||||
DB_PRINT("XILINX UNLOCK 0xF8000000 + 0x%x <= 0x%x\n", (int)offset,
|
||||
(unsigned)val & 0xFFFF);
|
||||
s->regs[LOCKSTA] = 0;
|
||||
s->regs[R_LOCKSTA] = 0;
|
||||
} else {
|
||||
DB_PRINT("WRONG XILINX UNLOCK KEY 0xF8000000 + 0x%x <= 0x%x\n",
|
||||
(int)offset, (unsigned)val & 0xFFFF);
|
||||
@ -396,7 +396,7 @@ static void zynq_slcr_write(void *opaque, hwaddr offset,
|
||||
return;
|
||||
}
|
||||
|
||||
if (s->regs[LOCKSTA]) {
|
||||
if (s->regs[R_LOCKSTA]) {
|
||||
qemu_log_mask(LOG_GUEST_ERROR,
|
||||
"SCLR registers are locked. Unlock them first\n");
|
||||
return;
|
||||
@ -404,8 +404,8 @@ static void zynq_slcr_write(void *opaque, hwaddr offset,
|
||||
s->regs[offset] = val;
|
||||
|
||||
switch (offset) {
|
||||
case PSS_RST_CTRL:
|
||||
if (val & R_PSS_RST_CTRL_SOFT_RST) {
|
||||
case R_PSS_RST_CTRL:
|
||||
if (FIELD_EX32(val, PSS_RST_CTRL, SOFT_RST)) {
|
||||
qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
|
||||
}
|
||||
break;
|
||||
|
@ -593,6 +593,8 @@ static void imx_enet_do_tx(IMXFECState *s, uint32_t index)
|
||||
if (bd.option & ENET_BD_TX_INT) {
|
||||
s->regs[ENET_EIR] |= int_txf;
|
||||
}
|
||||
/* Indicate that we've updated the last buffer descriptor. */
|
||||
bd.last_buffer = ENET_BD_BDU;
|
||||
}
|
||||
if (bd.option & ENET_BD_TX_INT) {
|
||||
s->regs[ENET_EIR] |= int_txb;
|
||||
@ -1242,6 +1244,8 @@ static ssize_t imx_enet_receive(NetClientState *nc, const uint8_t *buf,
|
||||
/* Last buffer in frame. */
|
||||
bd.flags |= flags | ENET_BD_L;
|
||||
FEC_PRINTF("rx frame flags %04x\n", bd.flags);
|
||||
/* Indicate that we've updated the last buffer descriptor. */
|
||||
bd.last_buffer = ENET_BD_BDU;
|
||||
if (bd.option & ENET_BD_RX_INT) {
|
||||
s->regs[ENET_EIR] |= ENET_INT_RXF;
|
||||
}
|
||||
|
@ -994,10 +994,6 @@ static Property arm_cpu_has_el3_property =
|
||||
static Property arm_cpu_cfgend_property =
|
||||
DEFINE_PROP_BOOL("cfgend", ARMCPU, cfgend, false);
|
||||
|
||||
/* use property name "pmu" to match other archs and virt tools */
|
||||
static Property arm_cpu_has_pmu_property =
|
||||
DEFINE_PROP_BOOL("pmu", ARMCPU, has_pmu, true);
|
||||
|
||||
static Property arm_cpu_has_vfp_property =
|
||||
DEFINE_PROP_BOOL("vfp", ARMCPU, has_vfp, true);
|
||||
|
||||
@ -1020,6 +1016,29 @@ static Property arm_cpu_pmsav7_dregion_property =
|
||||
pmsav7_dregion,
|
||||
qdev_prop_uint32, uint32_t);
|
||||
|
||||
static bool arm_get_pmu(Object *obj, Error **errp)
|
||||
{
|
||||
ARMCPU *cpu = ARM_CPU(obj);
|
||||
|
||||
return cpu->has_pmu;
|
||||
}
|
||||
|
||||
static void arm_set_pmu(Object *obj, bool value, Error **errp)
|
||||
{
|
||||
ARMCPU *cpu = ARM_CPU(obj);
|
||||
|
||||
if (value) {
|
||||
if (kvm_enabled() && !kvm_arm_pmu_supported(CPU(cpu))) {
|
||||
error_setg(errp, "'pmu' feature not supported by KVM on this host");
|
||||
return;
|
||||
}
|
||||
set_feature(&cpu->env, ARM_FEATURE_PMU);
|
||||
} else {
|
||||
unset_feature(&cpu->env, ARM_FEATURE_PMU);
|
||||
}
|
||||
cpu->has_pmu = value;
|
||||
}
|
||||
|
||||
static void arm_get_init_svtor(Object *obj, Visitor *v, const char *name,
|
||||
void *opaque, Error **errp)
|
||||
{
|
||||
@ -1094,7 +1113,8 @@ void arm_cpu_post_init(Object *obj)
|
||||
}
|
||||
|
||||
if (arm_feature(&cpu->env, ARM_FEATURE_PMU)) {
|
||||
qdev_property_add_static(DEVICE(obj), &arm_cpu_has_pmu_property,
|
||||
cpu->has_pmu = true;
|
||||
object_property_add_bool(obj, "pmu", arm_get_pmu, arm_set_pmu,
|
||||
&error_abort);
|
||||
}
|
||||
|
||||
|
@ -195,7 +195,7 @@ typedef struct ARMVectorReg {
|
||||
#ifdef TARGET_AARCH64
|
||||
/* In AArch32 mode, predicate registers do not exist at all. */
|
||||
typedef struct ARMPredicateReg {
|
||||
uint64_t p[2 * ARM_MAX_VQ / 8] QEMU_ALIGNED(16);
|
||||
uint64_t p[DIV_ROUND_UP(2 * ARM_MAX_VQ, 8)] QEMU_ALIGNED(16);
|
||||
} ARMPredicateReg;
|
||||
|
||||
/* In AArch32 mode, PAC keys do not exist at all. */
|
||||
@ -1611,6 +1611,12 @@ FIELD(V7M_FPCCR, ASPEN, 31, 1)
|
||||
/*
|
||||
* System register ID fields.
|
||||
*/
|
||||
FIELD(MIDR_EL1, REVISION, 0, 4)
|
||||
FIELD(MIDR_EL1, PARTNUM, 4, 12)
|
||||
FIELD(MIDR_EL1, ARCHITECTURE, 16, 4)
|
||||
FIELD(MIDR_EL1, VARIANT, 20, 4)
|
||||
FIELD(MIDR_EL1, IMPLEMENTER, 24, 8)
|
||||
|
||||
FIELD(ID_ISAR0, SWAP, 0, 4)
|
||||
FIELD(ID_ISAR0, BITCOUNT, 4, 4)
|
||||
FIELD(ID_ISAR0, BITFIELD, 8, 4)
|
||||
@ -3142,6 +3148,11 @@ FIELD(TBFLAG_ANY, PSTATE_SS, 26, 1)
|
||||
/* Target EL if we take a floating-point-disabled exception */
|
||||
FIELD(TBFLAG_ANY, FPEXC_EL, 24, 2)
|
||||
FIELD(TBFLAG_ANY, BE_DATA, 23, 1)
|
||||
/*
|
||||
* For A-profile only, target EL for debug exceptions.
|
||||
* Note that this overlaps with the M-profile-only HANDLER and STACKCHECK bits.
|
||||
*/
|
||||
FIELD(TBFLAG_ANY, DEBUG_TARGET_EL, 21, 2)
|
||||
|
||||
/* Bit usage when in AArch32 state: */
|
||||
FIELD(TBFLAG_A32, THUMB, 0, 1)
|
||||
|
@ -295,6 +295,25 @@ static void aarch64_max_initfn(Object *obj)
|
||||
uint32_t u;
|
||||
aarch64_a57_initfn(obj);
|
||||
|
||||
/*
|
||||
* Reset MIDR so the guest doesn't mistake our 'max' CPU type for a real
|
||||
* one and try to apply errata workarounds or use impdef features we
|
||||
* don't provide.
|
||||
* An IMPLEMENTER field of 0 means "reserved for software use";
|
||||
* ARCHITECTURE must be 0xf indicating "v7 or later, check ID registers
|
||||
* to see which features are present";
|
||||
* the VARIANT, PARTNUM and REVISION fields are all implementation
|
||||
* defined and we choose to define PARTNUM just in case guest
|
||||
* code needs to distinguish this QEMU CPU from other software
|
||||
* implementations, though this shouldn't be needed.
|
||||
*/
|
||||
t = FIELD_DP64(0, MIDR_EL1, IMPLEMENTER, 0);
|
||||
t = FIELD_DP64(t, MIDR_EL1, ARCHITECTURE, 0xf);
|
||||
t = FIELD_DP64(t, MIDR_EL1, PARTNUM, 'Q');
|
||||
t = FIELD_DP64(t, MIDR_EL1, VARIANT, 0);
|
||||
t = FIELD_DP64(t, MIDR_EL1, REVISION, 0);
|
||||
cpu->midr = t;
|
||||
|
||||
t = cpu->isar.id_aa64isar0;
|
||||
t = FIELD_DP64(t, ID_AA64ISAR0, AES, 2); /* AES + PMULL */
|
||||
t = FIELD_DP64(t, ID_AA64ISAR0, SHA1, 1);
|
||||
@ -406,13 +425,13 @@ static void aarch64_cpu_set_aarch64(Object *obj, bool value, Error **errp)
|
||||
* restriction allows us to avoid fixing up functionality that assumes a
|
||||
* uniform execution state like do_interrupt.
|
||||
*/
|
||||
if (!kvm_enabled()) {
|
||||
error_setg(errp, "'aarch64' feature cannot be disabled "
|
||||
"unless KVM is enabled");
|
||||
return;
|
||||
}
|
||||
|
||||
if (value == false) {
|
||||
if (!kvm_enabled() || !kvm_arm_aarch32_supported(CPU(cpu))) {
|
||||
error_setg(errp, "'aarch64' feature cannot be disabled "
|
||||
"unless KVM is enabled and 32-bit EL1 "
|
||||
"is supported");
|
||||
return;
|
||||
}
|
||||
unset_feature(&cpu->env, ARM_FEATURE_AARCH64);
|
||||
} else {
|
||||
set_feature(&cpu->env, ARM_FEATURE_AARCH64);
|
||||
|
@ -5302,6 +5302,7 @@ static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
int new_len;
|
||||
|
||||
/* Bits other than [3:0] are RAZ/WI. */
|
||||
QEMU_BUILD_BUG_ON(ARM_MAX_VQ > 16);
|
||||
raw_write(env, ri, value & 0xf);
|
||||
|
||||
/*
|
||||
@ -11172,6 +11173,12 @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
|
||||
}
|
||||
}
|
||||
|
||||
if (!arm_feature(env, ARM_FEATURE_M)) {
|
||||
int target_el = arm_debug_target_el(env);
|
||||
|
||||
flags = FIELD_DP32(flags, TBFLAG_ANY, DEBUG_TARGET_EL, target_el);
|
||||
}
|
||||
|
||||
*pflags = flags;
|
||||
*cs_base = 0;
|
||||
}
|
||||
|
@ -6,7 +6,6 @@ DEF_HELPER_3(add_saturate, i32, env, i32, i32)
|
||||
DEF_HELPER_3(sub_saturate, i32, env, i32, i32)
|
||||
DEF_HELPER_3(add_usaturate, i32, env, i32, i32)
|
||||
DEF_HELPER_3(sub_usaturate, i32, env, i32, i32)
|
||||
DEF_HELPER_2(double_saturate, i32, env, s32)
|
||||
DEF_HELPER_FLAGS_2(sdiv, TCG_CALL_NO_RWG_SE, s32, s32, s32)
|
||||
DEF_HELPER_FLAGS_2(udiv, TCG_CALL_NO_RWG_SE, i32, i32, i32)
|
||||
DEF_HELPER_FLAGS_1(rbit, TCG_CALL_NO_RWG_SE, i32, i32)
|
||||
|
@ -164,6 +164,13 @@ void kvm_arm_set_cpu_features_from_host(ARMCPU *cpu)
|
||||
env->features = arm_host_cpu_features.features;
|
||||
}
|
||||
|
||||
bool kvm_arm_pmu_supported(CPUState *cpu)
|
||||
{
|
||||
KVMState *s = KVM_STATE(current_machine->accelerator);
|
||||
|
||||
return kvm_check_extension(s, KVM_CAP_ARM_PMU_V3);
|
||||
}
|
||||
|
||||
int kvm_arm_get_max_vm_ipa_size(MachineState *ms)
|
||||
{
|
||||
KVMState *s = KVM_STATE(ms->accelerator);
|
||||
|
@ -24,7 +24,9 @@
|
||||
#include "qemu/main-loop.h"
|
||||
#include "exec/gdbstub.h"
|
||||
#include "sysemu/kvm.h"
|
||||
#include "sysemu/kvm_int.h"
|
||||
#include "kvm_arm.h"
|
||||
#include "hw/boards.h"
|
||||
#include "internals.h"
|
||||
|
||||
static bool have_guest_debug;
|
||||
@ -593,6 +595,13 @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
|
||||
return true;
|
||||
}
|
||||
|
||||
bool kvm_arm_aarch32_supported(CPUState *cpu)
|
||||
{
|
||||
KVMState *s = KVM_STATE(current_machine->accelerator);
|
||||
|
||||
return kvm_check_extension(s, KVM_CAP_ARM_EL1_32BIT);
|
||||
}
|
||||
|
||||
#define ARM_CPU_ID_MPIDR 3, 0, 0, 0, 5
|
||||
|
||||
int kvm_arch_init_vcpu(CPUState *cs)
|
||||
@ -710,13 +719,53 @@ int kvm_arm_cpreg_level(uint64_t regidx)
|
||||
#define AARCH64_SIMD_CTRL_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U32 | \
|
||||
KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
|
||||
|
||||
static int kvm_arch_put_fpsimd(CPUState *cs)
|
||||
{
|
||||
ARMCPU *cpu = ARM_CPU(cs);
|
||||
CPUARMState *env = &cpu->env;
|
||||
struct kvm_one_reg reg;
|
||||
uint32_t fpr;
|
||||
int i, ret;
|
||||
|
||||
for (i = 0; i < 32; i++) {
|
||||
uint64_t *q = aa64_vfp_qreg(env, i);
|
||||
#ifdef HOST_WORDS_BIGENDIAN
|
||||
uint64_t fp_val[2] = { q[1], q[0] };
|
||||
reg.addr = (uintptr_t)fp_val;
|
||||
#else
|
||||
reg.addr = (uintptr_t)q;
|
||||
#endif
|
||||
reg.id = AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]);
|
||||
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
reg.addr = (uintptr_t)(&fpr);
|
||||
fpr = vfp_get_fpsr(env);
|
||||
reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr);
|
||||
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
reg.addr = (uintptr_t)(&fpr);
|
||||
fpr = vfp_get_fpcr(env);
|
||||
reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr);
|
||||
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int kvm_arch_put_registers(CPUState *cs, int level)
|
||||
{
|
||||
struct kvm_one_reg reg;
|
||||
uint32_t fpr;
|
||||
uint64_t val;
|
||||
int i;
|
||||
int ret;
|
||||
int i, ret;
|
||||
unsigned int el;
|
||||
|
||||
ARMCPU *cpu = ARM_CPU(cs);
|
||||
@ -806,33 +855,7 @@ int kvm_arch_put_registers(CPUState *cs, int level)
|
||||
}
|
||||
}
|
||||
|
||||
/* Advanced SIMD and FP registers. */
|
||||
for (i = 0; i < 32; i++) {
|
||||
uint64_t *q = aa64_vfp_qreg(env, i);
|
||||
#ifdef HOST_WORDS_BIGENDIAN
|
||||
uint64_t fp_val[2] = { q[1], q[0] };
|
||||
reg.addr = (uintptr_t)fp_val;
|
||||
#else
|
||||
reg.addr = (uintptr_t)q;
|
||||
#endif
|
||||
reg.id = AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]);
|
||||
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
reg.addr = (uintptr_t)(&fpr);
|
||||
fpr = vfp_get_fpsr(env);
|
||||
reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr);
|
||||
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
fpr = vfp_get_fpcr(env);
|
||||
reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr);
|
||||
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
|
||||
ret = kvm_arch_put_fpsimd(cs);
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
@ -845,7 +868,7 @@ int kvm_arch_put_registers(CPUState *cs, int level)
|
||||
write_cpustate_to_list(cpu, true);
|
||||
|
||||
if (!write_list_to_kvmstate(cpu, level)) {
|
||||
return EINVAL;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
kvm_arm_sync_mpstate_to_kvm(cpu);
|
||||
@ -853,14 +876,54 @@ int kvm_arch_put_registers(CPUState *cs, int level)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int kvm_arch_get_fpsimd(CPUState *cs)
|
||||
{
|
||||
ARMCPU *cpu = ARM_CPU(cs);
|
||||
CPUARMState *env = &cpu->env;
|
||||
struct kvm_one_reg reg;
|
||||
uint32_t fpr;
|
||||
int i, ret;
|
||||
|
||||
for (i = 0; i < 32; i++) {
|
||||
uint64_t *q = aa64_vfp_qreg(env, i);
|
||||
reg.id = AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]);
|
||||
reg.addr = (uintptr_t)q;
|
||||
ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
|
||||
if (ret) {
|
||||
return ret;
|
||||
} else {
|
||||
#ifdef HOST_WORDS_BIGENDIAN
|
||||
uint64_t t;
|
||||
t = q[0], q[0] = q[1], q[1] = t;
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
reg.addr = (uintptr_t)(&fpr);
|
||||
reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr);
|
||||
ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
vfp_set_fpsr(env, fpr);
|
||||
|
||||
reg.addr = (uintptr_t)(&fpr);
|
||||
reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr);
|
||||
ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
vfp_set_fpcr(env, fpr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int kvm_arch_get_registers(CPUState *cs)
|
||||
{
|
||||
struct kvm_one_reg reg;
|
||||
uint64_t val;
|
||||
uint32_t fpr;
|
||||
unsigned int el;
|
||||
int i;
|
||||
int ret;
|
||||
int i, ret;
|
||||
|
||||
ARMCPU *cpu = ARM_CPU(cs);
|
||||
CPUARMState *env = &cpu->env;
|
||||
@ -949,36 +1012,10 @@ int kvm_arch_get_registers(CPUState *cs)
|
||||
env->spsr = env->banked_spsr[i];
|
||||
}
|
||||
|
||||
/* Advanced SIMD and FP registers */
|
||||
for (i = 0; i < 32; i++) {
|
||||
uint64_t *q = aa64_vfp_qreg(env, i);
|
||||
reg.id = AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]);
|
||||
reg.addr = (uintptr_t)q;
|
||||
ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
|
||||
if (ret) {
|
||||
return ret;
|
||||
} else {
|
||||
#ifdef HOST_WORDS_BIGENDIAN
|
||||
uint64_t t;
|
||||
t = q[0], q[0] = q[1], q[1] = t;
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
reg.addr = (uintptr_t)(&fpr);
|
||||
reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr);
|
||||
ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
|
||||
ret = kvm_arch_get_fpsimd(cs);
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
vfp_set_fpsr(env, fpr);
|
||||
|
||||
reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr);
|
||||
ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
vfp_set_fpcr(env, fpr);
|
||||
|
||||
ret = kvm_get_vcpu_events(cpu);
|
||||
if (ret) {
|
||||
@ -986,7 +1023,7 @@ int kvm_arch_get_registers(CPUState *cs)
|
||||
}
|
||||
|
||||
if (!write_kvmstate_to_list(cpu)) {
|
||||
return EINVAL;
|
||||
return -EINVAL;
|
||||
}
|
||||
/* Note that it's OK to have registers which aren't in CPUState,
|
||||
* so we can ignore a failure return here.
|
||||
|
@ -207,6 +207,24 @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf);
|
||||
*/
|
||||
void kvm_arm_set_cpu_features_from_host(ARMCPU *cpu);
|
||||
|
||||
/**
|
||||
* kvm_arm_aarch32_supported:
|
||||
* @cs: CPUState
|
||||
*
|
||||
* Returns: true if the KVM VCPU can enable AArch32 mode
|
||||
* and false otherwise.
|
||||
*/
|
||||
bool kvm_arm_aarch32_supported(CPUState *cs);
|
||||
|
||||
/**
|
||||
* bool kvm_arm_pmu_supported:
|
||||
* @cs: CPUState
|
||||
*
|
||||
* Returns: true if the KVM VCPU can enable its PMU
|
||||
* and false otherwise.
|
||||
*/
|
||||
bool kvm_arm_pmu_supported(CPUState *cs);
|
||||
|
||||
/**
|
||||
* kvm_arm_get_max_vm_ipa_size - Returns the number of bits in the
|
||||
* IPA address space supported by KVM
|
||||
@ -247,6 +265,16 @@ static inline void kvm_arm_set_cpu_features_from_host(ARMCPU *cpu)
|
||||
cpu->host_cpu_probe_failed = true;
|
||||
}
|
||||
|
||||
static inline bool kvm_arm_aarch32_supported(CPUState *cs)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool kvm_arm_pmu_supported(CPUState *cs)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline int kvm_arm_get_max_vm_ipa_size(MachineState *ms)
|
||||
{
|
||||
return -ENOENT;
|
||||
|
@ -135,21 +135,6 @@ uint32_t HELPER(sub_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
|
||||
return res;
|
||||
}
|
||||
|
||||
uint32_t HELPER(double_saturate)(CPUARMState *env, int32_t val)
|
||||
{
|
||||
uint32_t res;
|
||||
if (val >= 0x40000000) {
|
||||
res = ~SIGNBIT;
|
||||
env->QF = 1;
|
||||
} else if (val <= (int32_t)0xc0000000) {
|
||||
res = SIGNBIT;
|
||||
env->QF = 1;
|
||||
} else {
|
||||
res = val << 1;
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
uint32_t HELPER(add_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
|
||||
{
|
||||
uint32_t res = a + b;
|
||||
|
@ -253,40 +253,26 @@ static void gen_exception_internal(int excp)
|
||||
tcg_temp_free_i32(tcg_excp);
|
||||
}
|
||||
|
||||
static void gen_exception(int excp, uint32_t syndrome, uint32_t target_el)
|
||||
static void gen_exception_internal_insn(DisasContext *s, uint64_t pc, int excp)
|
||||
{
|
||||
TCGv_i32 tcg_excp = tcg_const_i32(excp);
|
||||
TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
|
||||
TCGv_i32 tcg_el = tcg_const_i32(target_el);
|
||||
|
||||
gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
|
||||
tcg_syn, tcg_el);
|
||||
tcg_temp_free_i32(tcg_el);
|
||||
tcg_temp_free_i32(tcg_syn);
|
||||
tcg_temp_free_i32(tcg_excp);
|
||||
}
|
||||
|
||||
static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
|
||||
{
|
||||
gen_a64_set_pc_im(s->pc - offset);
|
||||
gen_a64_set_pc_im(pc);
|
||||
gen_exception_internal(excp);
|
||||
s->base.is_jmp = DISAS_NORETURN;
|
||||
}
|
||||
|
||||
static void gen_exception_insn(DisasContext *s, int offset, int excp,
|
||||
static void gen_exception_insn(DisasContext *s, uint64_t pc, int excp,
|
||||
uint32_t syndrome, uint32_t target_el)
|
||||
{
|
||||
gen_a64_set_pc_im(s->pc - offset);
|
||||
gen_a64_set_pc_im(pc);
|
||||
gen_exception(excp, syndrome, target_el);
|
||||
s->base.is_jmp = DISAS_NORETURN;
|
||||
}
|
||||
|
||||
static void gen_exception_bkpt_insn(DisasContext *s, int offset,
|
||||
uint32_t syndrome)
|
||||
static void gen_exception_bkpt_insn(DisasContext *s, uint32_t syndrome)
|
||||
{
|
||||
TCGv_i32 tcg_syn;
|
||||
|
||||
gen_a64_set_pc_im(s->pc - offset);
|
||||
gen_a64_set_pc_im(s->pc_curr);
|
||||
tcg_syn = tcg_const_i32(syndrome);
|
||||
gen_helper_exception_bkpt_insn(cpu_env, tcg_syn);
|
||||
tcg_temp_free_i32(tcg_syn);
|
||||
@ -305,8 +291,7 @@ static void gen_step_complete_exception(DisasContext *s)
|
||||
* of the exception, and our syndrome information is always correct.
|
||||
*/
|
||||
gen_ss_advance(s);
|
||||
gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex),
|
||||
default_exception_el(s));
|
||||
gen_swstep_exception(s, 1, s->is_ldex);
|
||||
s->base.is_jmp = DISAS_NORETURN;
|
||||
}
|
||||
|
||||
@ -353,13 +338,6 @@ static inline void gen_goto_tb(DisasContext *s, int n, uint64_t dest)
|
||||
}
|
||||
}
|
||||
|
||||
void unallocated_encoding(DisasContext *s)
|
||||
{
|
||||
/* Unallocated and reserved encodings are uncategorized */
|
||||
gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
|
||||
default_exception_el(s));
|
||||
}
|
||||
|
||||
static void init_tmp_a64_array(DisasContext *s)
|
||||
{
|
||||
#ifdef CONFIG_DEBUG_TCG
|
||||
@ -1128,8 +1106,8 @@ static inline bool fp_access_check(DisasContext *s)
|
||||
return true;
|
||||
}
|
||||
|
||||
gen_exception_insn(s, 4, EXCP_UDEF, syn_fp_access_trap(1, 0xe, false),
|
||||
s->fp_excp_el);
|
||||
gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
|
||||
syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -1139,7 +1117,7 @@ static inline bool fp_access_check(DisasContext *s)
|
||||
bool sve_access_check(DisasContext *s)
|
||||
{
|
||||
if (s->sve_excp_el) {
|
||||
gen_exception_insn(s, 4, EXCP_UDEF, syn_sve_access_trap(),
|
||||
gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_sve_access_trap(),
|
||||
s->sve_excp_el);
|
||||
return false;
|
||||
}
|
||||
@ -1248,11 +1226,11 @@ static inline AArch64DecodeFn *lookup_disas_fn(const AArch64DecodeTable *table,
|
||||
*/
|
||||
static void disas_uncond_b_imm(DisasContext *s, uint32_t insn)
|
||||
{
|
||||
uint64_t addr = s->pc + sextract32(insn, 0, 26) * 4 - 4;
|
||||
uint64_t addr = s->pc_curr + sextract32(insn, 0, 26) * 4;
|
||||
|
||||
if (insn & (1U << 31)) {
|
||||
/* BL Branch with link */
|
||||
tcg_gen_movi_i64(cpu_reg(s, 30), s->pc);
|
||||
tcg_gen_movi_i64(cpu_reg(s, 30), s->base.pc_next);
|
||||
}
|
||||
|
||||
/* B Branch / BL Branch with link */
|
||||
@ -1276,7 +1254,7 @@ static void disas_comp_b_imm(DisasContext *s, uint32_t insn)
|
||||
sf = extract32(insn, 31, 1);
|
||||
op = extract32(insn, 24, 1); /* 0: CBZ; 1: CBNZ */
|
||||
rt = extract32(insn, 0, 5);
|
||||
addr = s->pc + sextract32(insn, 5, 19) * 4 - 4;
|
||||
addr = s->pc_curr + sextract32(insn, 5, 19) * 4;
|
||||
|
||||
tcg_cmp = read_cpu_reg(s, rt, sf);
|
||||
label_match = gen_new_label();
|
||||
@ -1285,7 +1263,7 @@ static void disas_comp_b_imm(DisasContext *s, uint32_t insn)
|
||||
tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ,
|
||||
tcg_cmp, 0, label_match);
|
||||
|
||||
gen_goto_tb(s, 0, s->pc);
|
||||
gen_goto_tb(s, 0, s->base.pc_next);
|
||||
gen_set_label(label_match);
|
||||
gen_goto_tb(s, 1, addr);
|
||||
}
|
||||
@ -1305,7 +1283,7 @@ static void disas_test_b_imm(DisasContext *s, uint32_t insn)
|
||||
|
||||
bit_pos = (extract32(insn, 31, 1) << 5) | extract32(insn, 19, 5);
|
||||
op = extract32(insn, 24, 1); /* 0: TBZ; 1: TBNZ */
|
||||
addr = s->pc + sextract32(insn, 5, 14) * 4 - 4;
|
||||
addr = s->pc_curr + sextract32(insn, 5, 14) * 4;
|
||||
rt = extract32(insn, 0, 5);
|
||||
|
||||
tcg_cmp = tcg_temp_new_i64();
|
||||
@ -1316,7 +1294,7 @@ static void disas_test_b_imm(DisasContext *s, uint32_t insn)
|
||||
tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ,
|
||||
tcg_cmp, 0, label_match);
|
||||
tcg_temp_free_i64(tcg_cmp);
|
||||
gen_goto_tb(s, 0, s->pc);
|
||||
gen_goto_tb(s, 0, s->base.pc_next);
|
||||
gen_set_label(label_match);
|
||||
gen_goto_tb(s, 1, addr);
|
||||
}
|
||||
@ -1336,7 +1314,7 @@ static void disas_cond_b_imm(DisasContext *s, uint32_t insn)
|
||||
unallocated_encoding(s);
|
||||
return;
|
||||
}
|
||||
addr = s->pc + sextract32(insn, 5, 19) * 4 - 4;
|
||||
addr = s->pc_curr + sextract32(insn, 5, 19) * 4;
|
||||
cond = extract32(insn, 0, 4);
|
||||
|
||||
reset_btype(s);
|
||||
@ -1344,7 +1322,7 @@ static void disas_cond_b_imm(DisasContext *s, uint32_t insn)
|
||||
/* genuinely conditional branches */
|
||||
TCGLabel *label_match = gen_new_label();
|
||||
arm_gen_test_cc(cond, label_match);
|
||||
gen_goto_tb(s, 0, s->pc);
|
||||
gen_goto_tb(s, 0, s->base.pc_next);
|
||||
gen_set_label(label_match);
|
||||
gen_goto_tb(s, 1, addr);
|
||||
} else {
|
||||
@ -1505,7 +1483,7 @@ static void handle_sync(DisasContext *s, uint32_t insn,
|
||||
* any pending interrupts immediately.
|
||||
*/
|
||||
reset_btype(s);
|
||||
gen_goto_tb(s, 0, s->pc);
|
||||
gen_goto_tb(s, 0, s->base.pc_next);
|
||||
return;
|
||||
|
||||
case 7: /* SB */
|
||||
@ -1517,7 +1495,7 @@ static void handle_sync(DisasContext *s, uint32_t insn,
|
||||
* MB and end the TB instead.
|
||||
*/
|
||||
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
|
||||
gen_goto_tb(s, 0, s->pc);
|
||||
gen_goto_tb(s, 0, s->base.pc_next);
|
||||
return;
|
||||
|
||||
default:
|
||||
@ -1720,7 +1698,7 @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
|
||||
TCGv_i32 tcg_syn, tcg_isread;
|
||||
uint32_t syndrome;
|
||||
|
||||
gen_a64_set_pc_im(s->pc - 4);
|
||||
gen_a64_set_pc_im(s->pc_curr);
|
||||
tmpptr = tcg_const_ptr(ri);
|
||||
syndrome = syn_aa64_sysregtrap(op0, op1, op2, crn, crm, rt, isread);
|
||||
tcg_syn = tcg_const_i32(syndrome);
|
||||
@ -1873,8 +1851,8 @@ static void disas_exc(DisasContext *s, uint32_t insn)
|
||||
switch (op2_ll) {
|
||||
case 1: /* SVC */
|
||||
gen_ss_advance(s);
|
||||
gen_exception_insn(s, 0, EXCP_SWI, syn_aa64_svc(imm16),
|
||||
default_exception_el(s));
|
||||
gen_exception_insn(s, s->base.pc_next, EXCP_SWI,
|
||||
syn_aa64_svc(imm16), default_exception_el(s));
|
||||
break;
|
||||
case 2: /* HVC */
|
||||
if (s->current_el == 0) {
|
||||
@ -1884,22 +1862,24 @@ static void disas_exc(DisasContext *s, uint32_t insn)
|
||||
/* The pre HVC helper handles cases when HVC gets trapped
|
||||
* as an undefined insn by runtime configuration.
|
||||
*/
|
||||
gen_a64_set_pc_im(s->pc - 4);
|
||||
gen_a64_set_pc_im(s->pc_curr);
|
||||
gen_helper_pre_hvc(cpu_env);
|
||||
gen_ss_advance(s);
|
||||
gen_exception_insn(s, 0, EXCP_HVC, syn_aa64_hvc(imm16), 2);
|
||||
gen_exception_insn(s, s->base.pc_next, EXCP_HVC,
|
||||
syn_aa64_hvc(imm16), 2);
|
||||
break;
|
||||
case 3: /* SMC */
|
||||
if (s->current_el == 0) {
|
||||
unallocated_encoding(s);
|
||||
break;
|
||||
}
|
||||
gen_a64_set_pc_im(s->pc - 4);
|
||||
gen_a64_set_pc_im(s->pc_curr);
|
||||
tmp = tcg_const_i32(syn_aa64_smc(imm16));
|
||||
gen_helper_pre_smc(cpu_env, tmp);
|
||||
tcg_temp_free_i32(tmp);
|
||||
gen_ss_advance(s);
|
||||
gen_exception_insn(s, 0, EXCP_SMC, syn_aa64_smc(imm16), 3);
|
||||
gen_exception_insn(s, s->base.pc_next, EXCP_SMC,
|
||||
syn_aa64_smc(imm16), 3);
|
||||
break;
|
||||
default:
|
||||
unallocated_encoding(s);
|
||||
@ -1912,7 +1892,7 @@ static void disas_exc(DisasContext *s, uint32_t insn)
|
||||
break;
|
||||
}
|
||||
/* BRK */
|
||||
gen_exception_bkpt_insn(s, 4, syn_aa64_bkpt(imm16));
|
||||
gen_exception_bkpt_insn(s, syn_aa64_bkpt(imm16));
|
||||
break;
|
||||
case 2:
|
||||
if (op2_ll != 0) {
|
||||
@ -1936,7 +1916,7 @@ static void disas_exc(DisasContext *s, uint32_t insn)
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
gen_exception_internal_insn(s, 0, EXCP_SEMIHOST);
|
||||
gen_exception_internal_insn(s, s->base.pc_next, EXCP_SEMIHOST);
|
||||
} else {
|
||||
unsupported_encoding(s, insn);
|
||||
}
|
||||
@ -2029,7 +2009,7 @@ static void disas_uncond_b_reg(DisasContext *s, uint32_t insn)
|
||||
gen_a64_set_pc(s, dst);
|
||||
/* BLR also needs to load return address */
|
||||
if (opc == 1) {
|
||||
tcg_gen_movi_i64(cpu_reg(s, 30), s->pc);
|
||||
tcg_gen_movi_i64(cpu_reg(s, 30), s->base.pc_next);
|
||||
}
|
||||
break;
|
||||
|
||||
@ -2056,7 +2036,7 @@ static void disas_uncond_b_reg(DisasContext *s, uint32_t insn)
|
||||
gen_a64_set_pc(s, dst);
|
||||
/* BLRAA also needs to load return address */
|
||||
if (opc == 9) {
|
||||
tcg_gen_movi_i64(cpu_reg(s, 30), s->pc);
|
||||
tcg_gen_movi_i64(cpu_reg(s, 30), s->base.pc_next);
|
||||
}
|
||||
break;
|
||||
|
||||
@ -2615,7 +2595,7 @@ static void disas_ld_lit(DisasContext *s, uint32_t insn)
|
||||
|
||||
tcg_rt = cpu_reg(s, rt);
|
||||
|
||||
clean_addr = tcg_const_i64((s->pc - 4) + imm);
|
||||
clean_addr = tcg_const_i64(s->pc_curr + imm);
|
||||
if (is_vector) {
|
||||
do_fp_ld(s, rt, clean_addr, size);
|
||||
} else {
|
||||
@ -3594,7 +3574,7 @@ static void disas_pc_rel_adr(DisasContext *s, uint32_t insn)
|
||||
offset = sextract64(insn, 5, 19);
|
||||
offset = offset << 2 | extract32(insn, 29, 2);
|
||||
rd = extract32(insn, 0, 5);
|
||||
base = s->pc - 4;
|
||||
base = s->pc_curr;
|
||||
|
||||
if (page) {
|
||||
/* ADRP (page based) */
|
||||
@ -11533,7 +11513,7 @@ static void disas_simd_three_reg_same_fp16(DisasContext *s, uint32_t insn)
|
||||
break;
|
||||
default:
|
||||
fprintf(stderr, "%s: insn %#04x, fpop %#2x @ %#" PRIx64 "\n",
|
||||
__func__, insn, fpopcode, s->pc);
|
||||
__func__, insn, fpopcode, s->pc_curr);
|
||||
g_assert_not_reached();
|
||||
}
|
||||
|
||||
@ -14044,9 +14024,10 @@ static void disas_a64_insn(CPUARMState *env, DisasContext *s)
|
||||
{
|
||||
uint32_t insn;
|
||||
|
||||
insn = arm_ldl_code(env, s->pc, s->sctlr_b);
|
||||
s->pc_curr = s->base.pc_next;
|
||||
insn = arm_ldl_code(env, s->base.pc_next, s->sctlr_b);
|
||||
s->insn = insn;
|
||||
s->pc += 4;
|
||||
s->base.pc_next += 4;
|
||||
|
||||
s->fp_access_checked = false;
|
||||
|
||||
@ -14077,7 +14058,8 @@ static void disas_a64_insn(CPUARMState *env, DisasContext *s)
|
||||
if (s->btype != 0
|
||||
&& s->guarded_page
|
||||
&& !btype_destination_ok(insn, s->bt, s->btype)) {
|
||||
gen_exception_insn(s, 4, EXCP_UDEF, syn_btitrap(s->btype),
|
||||
gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
|
||||
syn_btitrap(s->btype),
|
||||
default_exception_el(s));
|
||||
return;
|
||||
}
|
||||
@ -14143,7 +14125,6 @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
|
||||
int bound, core_mmu_idx;
|
||||
|
||||
dc->isar = &arm_cpu->isar;
|
||||
dc->pc = dc->base.pc_first;
|
||||
dc->condjmp = 0;
|
||||
|
||||
dc->aarch64 = 1;
|
||||
@ -14194,7 +14175,7 @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
|
||||
dc->ss_active = FIELD_EX32(tb_flags, TBFLAG_ANY, SS_ACTIVE);
|
||||
dc->pstate_ss = FIELD_EX32(tb_flags, TBFLAG_ANY, PSTATE_SS);
|
||||
dc->is_ldex = false;
|
||||
dc->ss_same_el = (arm_debug_target_el(env) == dc->current_el);
|
||||
dc->debug_target_el = FIELD_EX32(tb_flags, TBFLAG_ANY, DEBUG_TARGET_EL);
|
||||
|
||||
/* Bound the number of insns to execute to those left on the page. */
|
||||
bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
|
||||
@ -14216,7 +14197,7 @@ static void aarch64_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
|
||||
{
|
||||
DisasContext *dc = container_of(dcbase, DisasContext, base);
|
||||
|
||||
tcg_gen_insn_start(dc->pc, 0, 0);
|
||||
tcg_gen_insn_start(dc->base.pc_next, 0, 0);
|
||||
dc->insn_start = tcg_last_op();
|
||||
}
|
||||
|
||||
@ -14226,18 +14207,18 @@ static bool aarch64_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
|
||||
DisasContext *dc = container_of(dcbase, DisasContext, base);
|
||||
|
||||
if (bp->flags & BP_CPU) {
|
||||
gen_a64_set_pc_im(dc->pc);
|
||||
gen_a64_set_pc_im(dc->base.pc_next);
|
||||
gen_helper_check_breakpoints(cpu_env);
|
||||
/* End the TB early; it likely won't be executed */
|
||||
dc->base.is_jmp = DISAS_TOO_MANY;
|
||||
} else {
|
||||
gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
|
||||
gen_exception_internal_insn(dc, dc->base.pc_next, EXCP_DEBUG);
|
||||
/* The address covered by the breakpoint must be
|
||||
included in [tb->pc, tb->pc + tb->size) in order
|
||||
to for it to be properly cleared -- thus we
|
||||
increment the PC here so that the logic setting
|
||||
tb->size below does the right thing. */
|
||||
dc->pc += 4;
|
||||
dc->base.pc_next += 4;
|
||||
dc->base.is_jmp = DISAS_NORETURN;
|
||||
}
|
||||
|
||||
@ -14261,14 +14242,12 @@ static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
|
||||
* bits should be zero.
|
||||
*/
|
||||
assert(dc->base.num_insns == 1);
|
||||
gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
|
||||
default_exception_el(dc));
|
||||
gen_swstep_exception(dc, 0, 0);
|
||||
dc->base.is_jmp = DISAS_NORETURN;
|
||||
} else {
|
||||
disas_a64_insn(env, dc);
|
||||
}
|
||||
|
||||
dc->base.pc_next = dc->pc;
|
||||
translator_loop_temp_check(&dc->base);
|
||||
}
|
||||
|
||||
@ -14284,7 +14263,7 @@ static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
|
||||
*/
|
||||
switch (dc->base.is_jmp) {
|
||||
default:
|
||||
gen_a64_set_pc_im(dc->pc);
|
||||
gen_a64_set_pc_im(dc->base.pc_next);
|
||||
/* fall through */
|
||||
case DISAS_EXIT:
|
||||
case DISAS_JUMP:
|
||||
@ -14301,11 +14280,11 @@ static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
|
||||
switch (dc->base.is_jmp) {
|
||||
case DISAS_NEXT:
|
||||
case DISAS_TOO_MANY:
|
||||
gen_goto_tb(dc, 1, dc->pc);
|
||||
gen_goto_tb(dc, 1, dc->base.pc_next);
|
||||
break;
|
||||
default:
|
||||
case DISAS_UPDATE:
|
||||
gen_a64_set_pc_im(dc->pc);
|
||||
gen_a64_set_pc_im(dc->base.pc_next);
|
||||
/* fall through */
|
||||
case DISAS_EXIT:
|
||||
tcg_gen_exit_tb(NULL, 0);
|
||||
@ -14317,11 +14296,11 @@ static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
|
||||
case DISAS_SWI:
|
||||
break;
|
||||
case DISAS_WFE:
|
||||
gen_a64_set_pc_im(dc->pc);
|
||||
gen_a64_set_pc_im(dc->base.pc_next);
|
||||
gen_helper_wfe(cpu_env);
|
||||
break;
|
||||
case DISAS_YIELD:
|
||||
gen_a64_set_pc_im(dc->pc);
|
||||
gen_a64_set_pc_im(dc->base.pc_next);
|
||||
gen_helper_yield(cpu_env);
|
||||
break;
|
||||
case DISAS_WFI:
|
||||
@ -14331,7 +14310,7 @@ static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
|
||||
*/
|
||||
TCGv_i32 tmp = tcg_const_i32(4);
|
||||
|
||||
gen_a64_set_pc_im(dc->pc);
|
||||
gen_a64_set_pc_im(dc->base.pc_next);
|
||||
gen_helper_wfi(cpu_env, tmp);
|
||||
tcg_temp_free_i32(tmp);
|
||||
/* The helper doesn't necessarily throw an exception, but we
|
||||
@ -14342,9 +14321,6 @@ static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Functions above can change dc->pc, so re-align db->pc_next */
|
||||
dc->base.pc_next = dc->pc;
|
||||
}
|
||||
|
||||
static void aarch64_tr_disas_log(const DisasContextBase *dcbase,
|
||||
|
@ -18,14 +18,12 @@
|
||||
#ifndef TARGET_ARM_TRANSLATE_A64_H
|
||||
#define TARGET_ARM_TRANSLATE_A64_H
|
||||
|
||||
void unallocated_encoding(DisasContext *s);
|
||||
|
||||
#define unsupported_encoding(s, insn) \
|
||||
do { \
|
||||
qemu_log_mask(LOG_UNIMP, \
|
||||
"%s:%d: unsupported instruction encoding 0x%08x " \
|
||||
"at pc=%016" PRIx64 "\n", \
|
||||
__FILE__, __LINE__, insn, s->pc - 4); \
|
||||
__FILE__, __LINE__, insn, s->pc_curr); \
|
||||
unallocated_encoding(s); \
|
||||
} while (0)
|
||||
|
||||
|
@ -96,10 +96,10 @@ static bool full_vfp_access_check(DisasContext *s, bool ignore_vfp_enabled)
|
||||
{
|
||||
if (s->fp_excp_el) {
|
||||
if (arm_dc_feature(s, ARM_FEATURE_M)) {
|
||||
gen_exception_insn(s, 4, EXCP_NOCP, syn_uncategorized(),
|
||||
gen_exception_insn(s, s->pc_curr, EXCP_NOCP, syn_uncategorized(),
|
||||
s->fp_excp_el);
|
||||
} else {
|
||||
gen_exception_insn(s, 4, EXCP_UDEF,
|
||||
gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
|
||||
syn_fp_access_trap(1, 0xe, false),
|
||||
s->fp_excp_el);
|
||||
}
|
||||
@ -108,8 +108,7 @@ static bool full_vfp_access_check(DisasContext *s, bool ignore_vfp_enabled)
|
||||
|
||||
if (!s->vfp_enabled && !ignore_vfp_enabled) {
|
||||
assert(!arm_dc_feature(s, ARM_FEATURE_M));
|
||||
gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
|
||||
default_exception_el(s));
|
||||
unallocated_encoding(s);
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -941,14 +940,8 @@ static bool trans_VLDR_VSTR_sp(DisasContext *s, arg_VLDR_VSTR_sp *a)
|
||||
offset = -offset;
|
||||
}
|
||||
|
||||
if (s->thumb && a->rn == 15) {
|
||||
/* This is actually UNPREDICTABLE */
|
||||
addr = tcg_temp_new_i32();
|
||||
tcg_gen_movi_i32(addr, s->pc & ~2);
|
||||
} else {
|
||||
addr = load_reg(s, a->rn);
|
||||
}
|
||||
tcg_gen_addi_i32(addr, addr, offset);
|
||||
/* For thumb, use of PC is UNPREDICTABLE. */
|
||||
addr = add_reg_for_lit(s, a->rn, offset);
|
||||
tmp = tcg_temp_new_i32();
|
||||
if (a->l) {
|
||||
gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
|
||||
@ -983,14 +976,8 @@ static bool trans_VLDR_VSTR_dp(DisasContext *s, arg_VLDR_VSTR_dp *a)
|
||||
offset = -offset;
|
||||
}
|
||||
|
||||
if (s->thumb && a->rn == 15) {
|
||||
/* This is actually UNPREDICTABLE */
|
||||
addr = tcg_temp_new_i32();
|
||||
tcg_gen_movi_i32(addr, s->pc & ~2);
|
||||
} else {
|
||||
addr = load_reg(s, a->rn);
|
||||
}
|
||||
tcg_gen_addi_i32(addr, addr, offset);
|
||||
/* For thumb, use of PC is UNPREDICTABLE. */
|
||||
addr = add_reg_for_lit(s, a->rn, offset);
|
||||
tmp = tcg_temp_new_i64();
|
||||
if (a->l) {
|
||||
gen_aa32_ld64(s, tmp, addr, get_mem_index(s));
|
||||
@ -1029,13 +1016,8 @@ static bool trans_VLDM_VSTM_sp(DisasContext *s, arg_VLDM_VSTM_sp *a)
|
||||
return true;
|
||||
}
|
||||
|
||||
if (s->thumb && a->rn == 15) {
|
||||
/* This is actually UNPREDICTABLE */
|
||||
addr = tcg_temp_new_i32();
|
||||
tcg_gen_movi_i32(addr, s->pc & ~2);
|
||||
} else {
|
||||
addr = load_reg(s, a->rn);
|
||||
}
|
||||
/* For thumb, use of PC is UNPREDICTABLE. */
|
||||
addr = add_reg_for_lit(s, a->rn, 0);
|
||||
if (a->p) {
|
||||
/* pre-decrement */
|
||||
tcg_gen_addi_i32(addr, addr, -(a->imm << 2));
|
||||
@ -1112,13 +1094,8 @@ static bool trans_VLDM_VSTM_dp(DisasContext *s, arg_VLDM_VSTM_dp *a)
|
||||
return true;
|
||||
}
|
||||
|
||||
if (s->thumb && a->rn == 15) {
|
||||
/* This is actually UNPREDICTABLE */
|
||||
addr = tcg_temp_new_i32();
|
||||
tcg_gen_movi_i32(addr, s->pc & ~2);
|
||||
} else {
|
||||
addr = load_reg(s, a->rn);
|
||||
}
|
||||
/* For thumb, use of PC is UNPREDICTABLE. */
|
||||
addr = add_reg_for_lit(s, a->rn, 0);
|
||||
if (a->p) {
|
||||
/* pre-decrement */
|
||||
tcg_gen_addi_i32(addr, addr, -(a->imm << 2));
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -2,6 +2,7 @@
|
||||
#define TARGET_ARM_TRANSLATE_H
|
||||
|
||||
#include "exec/translator.h"
|
||||
#include "internals.h"
|
||||
|
||||
|
||||
/* internal defines */
|
||||
@ -9,7 +10,8 @@ typedef struct DisasContext {
|
||||
DisasContextBase base;
|
||||
const ARMISARegisters *isar;
|
||||
|
||||
target_ulong pc;
|
||||
/* The address of the current instruction being translated. */
|
||||
target_ulong pc_curr;
|
||||
target_ulong page_start;
|
||||
uint32_t insn;
|
||||
/* Nonzero if this instruction has been conditionally skipped. */
|
||||
@ -49,6 +51,8 @@ typedef struct DisasContext {
|
||||
uint32_t svc_imm;
|
||||
int aarch64;
|
||||
int current_el;
|
||||
/* Debug target exception level for single-step exceptions */
|
||||
int debug_target_el;
|
||||
GHashTable *cp_regs;
|
||||
uint64_t features; /* CPU features bits */
|
||||
/* Because unallocated encodings generate different exception syndrome
|
||||
@ -69,8 +73,6 @@ typedef struct DisasContext {
|
||||
* ie A64 LDX*, LDAX*, A32/T32 LDREX*, LDAEX*.
|
||||
*/
|
||||
bool is_ldex;
|
||||
/* True if a single-step exception will be taken to the current EL */
|
||||
bool ss_same_el;
|
||||
/* True if v8.3-PAuth is active. */
|
||||
bool pauth_active;
|
||||
/* True with v8.5-BTI and SCTLR_ELx.BT* set. */
|
||||
@ -97,6 +99,8 @@ typedef struct DisasCompare {
|
||||
bool value_global;
|
||||
} DisasCompare;
|
||||
|
||||
void unallocated_encoding(DisasContext *s);
|
||||
|
||||
/* Share the TCG temporaries common between 32 and 64 bit modes. */
|
||||
extern TCGv_i32 cpu_NF, cpu_ZF, cpu_CF, cpu_VF;
|
||||
extern TCGv_i64 cpu_exclusive_addr;
|
||||
@ -232,6 +236,35 @@ static inline void gen_ss_advance(DisasContext *s)
|
||||
}
|
||||
}
|
||||
|
||||
static inline void gen_exception(int excp, uint32_t syndrome,
|
||||
uint32_t target_el)
|
||||
{
|
||||
TCGv_i32 tcg_excp = tcg_const_i32(excp);
|
||||
TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
|
||||
TCGv_i32 tcg_el = tcg_const_i32(target_el);
|
||||
|
||||
gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
|
||||
tcg_syn, tcg_el);
|
||||
|
||||
tcg_temp_free_i32(tcg_el);
|
||||
tcg_temp_free_i32(tcg_syn);
|
||||
tcg_temp_free_i32(tcg_excp);
|
||||
}
|
||||
|
||||
/* Generate an architectural singlestep exception */
|
||||
static inline void gen_swstep_exception(DisasContext *s, int isv, int ex)
|
||||
{
|
||||
bool same_el = (s->debug_target_el == s->current_el);
|
||||
|
||||
/*
|
||||
* If singlestep is targeting a lower EL than the current one,
|
||||
* then s->ss_active must be false and we can never get here.
|
||||
*/
|
||||
assert(s->debug_target_el >= s->current_el);
|
||||
|
||||
gen_exception(EXCP_UDEF, syn_swstep(same_el, isv, ex), s->debug_target_el);
|
||||
}
|
||||
|
||||
/*
|
||||
* Given a VFP floating point constant encoded into an 8 bit immediate in an
|
||||
* instruction, expand it to the actual constant value of the specified
|
||||
|
Loading…
Reference in New Issue
Block a user