target-arm:
* Support ACPI for ARMv8 systems using the 'virt' board (and a UEFI boot image, typically) * avoid buffer overrun in some UNPREDICTABLE ldrd/strd cases * further work preparing for 64-bit EL2/EL3 support -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQIcBAABCAAGBQJVaEn+AAoJEDwlJe0UNgzeqE0P/i3stCseXlDPVXRYBHH32YxJ mduKnzvJ0uWYODSicmFivA8hyOtu/Q0t1H2V7XvDEy2UZ1jHZ0bZBzm9DieubStH q5RrbbRVTEBVvO8uBXnu6fMbD/p0TbtyvsA8siTCorkWANq3MVsMbxTnN2jc2QGj ytB+nUF/lDm7rLBPJyBqblvBZOHMSaH5IzMAjeqwi6zPG4MfGDya48J82MZ+3k9W OGIRbV4rhFFyqwbu0zIMUYgVA0xyvK/hKQ1CVai/GEu7L7lZlEb6UnmoNQGmTjH/ XtyHYsiVauA4M7POJ5G2c6Q7UsZHEKZJmnGVIkEyC8ZERhflWJ6MT2bNr1TsViUi 8Lannl0dPZpPld0GZt2QVF6uyvR/RtbuX41kDCPW1+nCB1kBXWO6JvuNjb+JQ6Dm co2+LlIYK5avxUkPxzVkKdqS9HVaQ8yZ+wwKvX+gOthM6pa6uDRZsrhJWX2pFzD/ hK3c1PQPIPWOnZO4adpeOiaRuctqjZlihfh5saXxidfVYm6ZfNxza2hi3j75mKlR mCo7dlCsFrH4IZniRSe5rCr+QGol3J2ViThmZnBL5OrFBFe2T/GtPCBJrY+1OW2o 99nBmsGd5xbuH3qdZxIjq0/nxtkSmlFJSpH75yi+TmRS9fKcNdK8RWaU52wmFi1/ sIt5E6qKnt03IaZyxJLt =dFxn -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/pmaydell/tags/pull-target-arm-20150529' into staging target-arm: * Support ACPI for ARMv8 systems using the 'virt' board (and a UEFI boot image, typically) * avoid buffer overrun in some UNPREDICTABLE ldrd/strd cases * further work preparing for 64-bit EL2/EL3 support # gpg: Signature made Fri May 29 12:14:06 2015 BST using RSA key ID 14360CDE # gpg: Good signature from "Peter Maydell <peter.maydell@linaro.org>" * remotes/pmaydell/tags/pull-target-arm-20150529: (39 commits) target-arm: Avoid buffer overrun on UNPREDICTABLE ldrd/strd hw/arm/virt: Enable dynamic generation of ACPI v5.1 tables ACPI: split CONFIG_ACPI into 4 pieces hw/arm/virt-acpi-build: Add PCIe controller in ACPI DSDT table hw/acpi/aml-build: Add Unicode macro hw/acpi/aml-build: Add aml_dword_io() term hw/acpi/aml-build: Add aml_create_dword_field() term hw/acpi/aml-build: Add aml_else() term hw/acpi/aml-build: Add aml_lnot() term hw/acpi/aml-build: Add aml_or() term hw/acpi/aml-build: Add ToUUID macro hw/acpi/aml-build: Make aml_buffer() definition consistent with the spec hw/arm/virt-acpi-build: Generate MCFG table hw/arm/virt-acpi-build: Generate RSDP table hw/arm/virt-acpi-build: Generate RSDT table hw/arm/virt-acpi-build: Generate GTDT table hw/arm/virt-acpi-build: Generate MADT table hw/arm/virt-acpi-build: Generate FADT table and update ACPI headers hw/arm/virt-acpi-build: Generation of DSDT table for virt devices hw/acpi/aml-build: Add aml_interrupt() term ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
97af820f53
@ -101,3 +101,4 @@ CONFIG_ALLWINNER_A10=y
|
||||
CONFIG_XIO3130=y
|
||||
CONFIG_IOH3420=y
|
||||
CONFIG_I82801B11=y
|
||||
CONFIG_ACPI=y
|
||||
|
@ -15,6 +15,9 @@ CONFIG_PCSPK=y
|
||||
CONFIG_PCKBD=y
|
||||
CONFIG_FDC=y
|
||||
CONFIG_ACPI=y
|
||||
CONFIG_ACPI_X86=y
|
||||
CONFIG_ACPI_MEMORY_HOTPLUG=y
|
||||
CONFIG_ACPI_CPU_HOTPLUG=y
|
||||
CONFIG_APM=y
|
||||
CONFIG_I8257=y
|
||||
CONFIG_IDE_ISA=y
|
||||
|
@ -15,6 +15,9 @@ CONFIG_PCSPK=y
|
||||
CONFIG_PCKBD=y
|
||||
CONFIG_FDC=y
|
||||
CONFIG_ACPI=y
|
||||
CONFIG_ACPI_X86=y
|
||||
CONFIG_ACPI_MEMORY_HOTPLUG=y
|
||||
CONFIG_ACPI_CPU_HOTPLUG=y
|
||||
CONFIG_APM=y
|
||||
CONFIG_I8257=y
|
||||
CONFIG_PIIX4=y
|
||||
|
@ -15,6 +15,9 @@ CONFIG_PCSPK=y
|
||||
CONFIG_PCKBD=y
|
||||
CONFIG_FDC=y
|
||||
CONFIG_ACPI=y
|
||||
CONFIG_ACPI_X86=y
|
||||
CONFIG_ACPI_MEMORY_HOTPLUG=y
|
||||
CONFIG_ACPI_CPU_HOTPLUG=y
|
||||
CONFIG_APM=y
|
||||
CONFIG_I8257=y
|
||||
CONFIG_PIIX4=y
|
||||
|
@ -15,6 +15,9 @@ CONFIG_PCSPK=y
|
||||
CONFIG_PCKBD=y
|
||||
CONFIG_FDC=y
|
||||
CONFIG_ACPI=y
|
||||
CONFIG_ACPI_X86=y
|
||||
CONFIG_ACPI_MEMORY_HOTPLUG=y
|
||||
CONFIG_ACPI_CPU_HOTPLUG=y
|
||||
CONFIG_APM=y
|
||||
CONFIG_I8257=y
|
||||
CONFIG_PIIX4=y
|
||||
|
@ -15,6 +15,9 @@ CONFIG_PCSPK=y
|
||||
CONFIG_PCKBD=y
|
||||
CONFIG_FDC=y
|
||||
CONFIG_ACPI=y
|
||||
CONFIG_ACPI_X86=y
|
||||
CONFIG_ACPI_MEMORY_HOTPLUG=y
|
||||
CONFIG_ACPI_CPU_HOTPLUG=y
|
||||
CONFIG_APM=y
|
||||
CONFIG_I8257=y
|
||||
CONFIG_PIIX4=y
|
||||
|
@ -15,6 +15,9 @@ CONFIG_PCSPK=y
|
||||
CONFIG_PCKBD=y
|
||||
CONFIG_FDC=y
|
||||
CONFIG_ACPI=y
|
||||
CONFIG_ACPI_X86=y
|
||||
CONFIG_ACPI_MEMORY_HOTPLUG=y
|
||||
CONFIG_ACPI_CPU_HOTPLUG=y
|
||||
CONFIG_APM=y
|
||||
CONFIG_I8257=y
|
||||
CONFIG_IDE_ISA=y
|
||||
|
@ -1,5 +1,6 @@
|
||||
common-obj-$(CONFIG_ACPI) += core.o piix4.o ich9.o pcihp.o cpu_hotplug.o
|
||||
common-obj-$(CONFIG_ACPI) += memory_hotplug.o
|
||||
common-obj-$(CONFIG_ACPI_X86) += core.o piix4.o ich9.o pcihp.o
|
||||
common-obj-$(CONFIG_ACPI_CPU_HOTPLUG) += cpu_hotplug.o
|
||||
common-obj-$(CONFIG_ACPI_MEMORY_HOTPLUG) += memory_hotplug.o
|
||||
common-obj-$(CONFIG_ACPI) += acpi_interface.o
|
||||
common-obj-$(CONFIG_ACPI) += bios-linker-loader.o
|
||||
common-obj-$(CONFIG_ACPI) += aml-build.o
|
||||
|
@ -26,6 +26,7 @@
|
||||
#include <string.h>
|
||||
#include "hw/acpi/aml-build.h"
|
||||
#include "qemu/bswap.h"
|
||||
#include "qemu/bitops.h"
|
||||
#include "hw/acpi/bios-linker-loader.h"
|
||||
|
||||
static GArray *build_alloc_array(void)
|
||||
@ -454,6 +455,16 @@ Aml *aml_and(Aml *arg1, Aml *arg2)
|
||||
return var;
|
||||
}
|
||||
|
||||
/* ACPI 1.0b: 16.2.5.4 Type 2 Opcodes Encoding: DefOr */
|
||||
Aml *aml_or(Aml *arg1, Aml *arg2)
|
||||
{
|
||||
Aml *var = aml_opcode(0x7D /* OrOp */);
|
||||
aml_append(var, arg1);
|
||||
aml_append(var, arg2);
|
||||
build_append_byte(var->buf, 0x00 /* NullNameOp */);
|
||||
return var;
|
||||
}
|
||||
|
||||
/* ACPI 1.0b: 16.2.5.3 Type 1 Opcodes Encoding: DefNotify */
|
||||
Aml *aml_notify(Aml *arg1, Aml *arg2)
|
||||
{
|
||||
@ -505,6 +516,60 @@ Aml *aml_call4(const char *method, Aml *arg1, Aml *arg2, Aml *arg3, Aml *arg4)
|
||||
return var;
|
||||
}
|
||||
|
||||
/*
|
||||
* ACPI 1.0b: 6.4.3.4 32-Bit Fixed Location Memory Range Descriptor
|
||||
* (Type 1, Large Item Name 0x6)
|
||||
*/
|
||||
Aml *aml_memory32_fixed(uint32_t addr, uint32_t size,
|
||||
AmlReadAndWrite read_and_write)
|
||||
{
|
||||
Aml *var = aml_alloc();
|
||||
build_append_byte(var->buf, 0x86); /* Memory32Fixed Resource Descriptor */
|
||||
build_append_byte(var->buf, 9); /* Length, bits[7:0] value = 9 */
|
||||
build_append_byte(var->buf, 0); /* Length, bits[15:8] value = 0 */
|
||||
build_append_byte(var->buf, read_and_write); /* Write status, 1 rw 0 ro */
|
||||
|
||||
/* Range base address */
|
||||
build_append_byte(var->buf, extract32(addr, 0, 8)); /* bits[7:0] */
|
||||
build_append_byte(var->buf, extract32(addr, 8, 8)); /* bits[15:8] */
|
||||
build_append_byte(var->buf, extract32(addr, 16, 8)); /* bits[23:16] */
|
||||
build_append_byte(var->buf, extract32(addr, 24, 8)); /* bits[31:24] */
|
||||
|
||||
/* Range length */
|
||||
build_append_byte(var->buf, extract32(size, 0, 8)); /* bits[7:0] */
|
||||
build_append_byte(var->buf, extract32(size, 8, 8)); /* bits[15:8] */
|
||||
build_append_byte(var->buf, extract32(size, 16, 8)); /* bits[23:16] */
|
||||
build_append_byte(var->buf, extract32(size, 24, 8)); /* bits[31:24] */
|
||||
return var;
|
||||
}
|
||||
|
||||
/*
|
||||
* ACPI 5.0: 6.4.3.6 Extended Interrupt Descriptor
|
||||
* Type 1, Large Item Name 0x9
|
||||
*/
|
||||
Aml *aml_interrupt(AmlConsumerAndProducer con_and_pro,
|
||||
AmlLevelAndEdge level_and_edge,
|
||||
AmlActiveHighAndLow high_and_low, AmlShared shared,
|
||||
uint32_t irq)
|
||||
{
|
||||
Aml *var = aml_alloc();
|
||||
uint8_t irq_flags = con_and_pro | (level_and_edge << 1)
|
||||
| (high_and_low << 2) | (shared << 3);
|
||||
|
||||
build_append_byte(var->buf, 0x89); /* Extended irq descriptor */
|
||||
build_append_byte(var->buf, 6); /* Length, bits[7:0] minimum value = 6 */
|
||||
build_append_byte(var->buf, 0); /* Length, bits[15:8] minimum value = 0 */
|
||||
build_append_byte(var->buf, irq_flags); /* Interrupt Vector Information. */
|
||||
build_append_byte(var->buf, 0x01); /* Interrupt table length = 1 */
|
||||
|
||||
/* Interrupt Number */
|
||||
build_append_byte(var->buf, extract32(irq, 0, 8)); /* bits[7:0] */
|
||||
build_append_byte(var->buf, extract32(irq, 8, 8)); /* bits[15:8] */
|
||||
build_append_byte(var->buf, extract32(irq, 16, 8)); /* bits[23:16] */
|
||||
build_append_byte(var->buf, extract32(irq, 24, 8)); /* bits[31:24] */
|
||||
return var;
|
||||
}
|
||||
|
||||
/* ACPI 1.0b: 6.4.2.5 I/O Port Descriptor */
|
||||
Aml *aml_io(AmlIODecode dec, uint16_t min_base, uint16_t max_base,
|
||||
uint8_t aln, uint8_t len)
|
||||
@ -542,6 +607,14 @@ Aml *aml_irq_no_flags(uint8_t irq)
|
||||
return var;
|
||||
}
|
||||
|
||||
/* ACPI 1.0b: 16.2.5.4 Type 2 Opcodes Encoding: DefLNot */
|
||||
Aml *aml_lnot(Aml *arg)
|
||||
{
|
||||
Aml *var = aml_opcode(0x92 /* LNotOp */);
|
||||
aml_append(var, arg);
|
||||
return var;
|
||||
}
|
||||
|
||||
/* ACPI 1.0b: 16.2.5.4 Type 2 Opcodes Encoding: DefLEqual */
|
||||
Aml *aml_equal(Aml *arg1, Aml *arg2)
|
||||
{
|
||||
@ -559,6 +632,13 @@ Aml *aml_if(Aml *predicate)
|
||||
return var;
|
||||
}
|
||||
|
||||
/* ACPI 1.0b: 16.2.5.3 Type 1 Opcodes Encoding: DefElse */
|
||||
Aml *aml_else(void)
|
||||
{
|
||||
Aml *var = aml_bundle(0xA1 /* ElseOp */, AML_PACKAGE);
|
||||
return var;
|
||||
}
|
||||
|
||||
/* ACPI 1.0b: 16.2.5.2 Named Objects Encoding: DefMethod */
|
||||
Aml *aml_method(const char *name, int arg_count)
|
||||
{
|
||||
@ -587,10 +667,22 @@ Aml *aml_resource_template(void)
|
||||
return var;
|
||||
}
|
||||
|
||||
/* ACPI 1.0b: 16.2.5.4 Type 2 Opcodes Encoding: DefBuffer */
|
||||
Aml *aml_buffer(void)
|
||||
/* ACPI 1.0b: 16.2.5.4 Type 2 Opcodes Encoding: DefBuffer
|
||||
* Pass byte_list as NULL to request uninitialized buffer to reserve space.
|
||||
*/
|
||||
Aml *aml_buffer(int buffer_size, uint8_t *byte_list)
|
||||
{
|
||||
int i;
|
||||
Aml *var = aml_bundle(0x11 /* BufferOp */, AML_BUFFER);
|
||||
|
||||
for (i = 0; i < buffer_size; i++) {
|
||||
if (byte_list == NULL) {
|
||||
build_append_byte(var->buf, 0x0);
|
||||
} else {
|
||||
build_append_byte(var->buf, byte_list[i]);
|
||||
}
|
||||
}
|
||||
|
||||
return var;
|
||||
}
|
||||
|
||||
@ -646,6 +738,17 @@ Aml *aml_field(const char *name, AmlAccessType type, AmlUpdateRule rule)
|
||||
return var;
|
||||
}
|
||||
|
||||
/* ACPI 1.0b: 16.2.5.2 Named Objects Encoding: DefCreateDWordField */
|
||||
Aml *aml_create_dword_field(Aml *srcbuf, Aml *index, const char *name)
|
||||
{
|
||||
Aml *var = aml_alloc();
|
||||
build_append_byte(var->buf, 0x8A); /* CreateDWordFieldOp */
|
||||
aml_append(var, srcbuf);
|
||||
aml_append(var, index);
|
||||
build_append_namestring(var->buf, "%s", name);
|
||||
return var;
|
||||
}
|
||||
|
||||
/* ACPI 1.0b: 16.2.3 Data Objects Encoding: String */
|
||||
Aml *aml_string(const char *name_format, ...)
|
||||
{
|
||||
@ -833,7 +936,7 @@ Aml *aml_word_bus_number(AmlMinFixed min_fixed, AmlMaxFixed max_fixed,
|
||||
uint16_t addr_trans, uint16_t len)
|
||||
|
||||
{
|
||||
return aml_word_as_desc(aml_bus_number_range, min_fixed, max_fixed, dec,
|
||||
return aml_word_as_desc(AML_BUS_NUMBER_RANGE, min_fixed, max_fixed, dec,
|
||||
addr_gran, addr_min, addr_max, addr_trans, len, 0);
|
||||
}
|
||||
|
||||
@ -850,7 +953,25 @@ Aml *aml_word_io(AmlMinFixed min_fixed, AmlMaxFixed max_fixed,
|
||||
uint16_t len)
|
||||
|
||||
{
|
||||
return aml_word_as_desc(aml_io_range, min_fixed, max_fixed, dec,
|
||||
return aml_word_as_desc(AML_IO_RANGE, min_fixed, max_fixed, dec,
|
||||
addr_gran, addr_min, addr_max, addr_trans, len,
|
||||
isa_ranges);
|
||||
}
|
||||
|
||||
/*
|
||||
* ACPI 1.0b: 6.4.3.5.4 ASL Macros for DWORD Address Descriptor
|
||||
*
|
||||
* More verbose description at:
|
||||
* ACPI 5.0: 19.5.33 DWordIO (DWord IO Resource Descriptor Macro)
|
||||
*/
|
||||
Aml *aml_dword_io(AmlMinFixed min_fixed, AmlMaxFixed max_fixed,
|
||||
AmlDecode dec, AmlISARanges isa_ranges,
|
||||
uint32_t addr_gran, uint32_t addr_min,
|
||||
uint32_t addr_max, uint32_t addr_trans,
|
||||
uint32_t len)
|
||||
|
||||
{
|
||||
return aml_dword_as_desc(AML_IO_RANGE, min_fixed, max_fixed, dec,
|
||||
addr_gran, addr_min, addr_max, addr_trans, len,
|
||||
isa_ranges);
|
||||
}
|
||||
@ -862,7 +983,7 @@ Aml *aml_word_io(AmlMinFixed min_fixed, AmlMaxFixed max_fixed,
|
||||
* ACPI 5.0: 19.5.34 DWordMemory (DWord Memory Resource Descriptor Macro)
|
||||
*/
|
||||
Aml *aml_dword_memory(AmlDecode dec, AmlMinFixed min_fixed,
|
||||
AmlMaxFixed max_fixed, AmlCacheble cacheable,
|
||||
AmlMaxFixed max_fixed, AmlCacheable cacheable,
|
||||
AmlReadAndWrite read_and_write,
|
||||
uint32_t addr_gran, uint32_t addr_min,
|
||||
uint32_t addr_max, uint32_t addr_trans,
|
||||
@ -870,7 +991,7 @@ Aml *aml_dword_memory(AmlDecode dec, AmlMinFixed min_fixed,
|
||||
{
|
||||
uint8_t flags = read_and_write | (cacheable << 1);
|
||||
|
||||
return aml_dword_as_desc(aml_memory_range, min_fixed, max_fixed,
|
||||
return aml_dword_as_desc(AML_MEMORY_RANGE, min_fixed, max_fixed,
|
||||
dec, addr_gran, addr_min, addr_max,
|
||||
addr_trans, len, flags);
|
||||
}
|
||||
@ -882,7 +1003,7 @@ Aml *aml_dword_memory(AmlDecode dec, AmlMinFixed min_fixed,
|
||||
* ACPI 5.0: 19.5.102 QWordMemory (QWord Memory Resource Descriptor Macro)
|
||||
*/
|
||||
Aml *aml_qword_memory(AmlDecode dec, AmlMinFixed min_fixed,
|
||||
AmlMaxFixed max_fixed, AmlCacheble cacheable,
|
||||
AmlMaxFixed max_fixed, AmlCacheable cacheable,
|
||||
AmlReadAndWrite read_and_write,
|
||||
uint64_t addr_gran, uint64_t addr_min,
|
||||
uint64_t addr_max, uint64_t addr_trans,
|
||||
@ -890,11 +1011,81 @@ Aml *aml_qword_memory(AmlDecode dec, AmlMinFixed min_fixed,
|
||||
{
|
||||
uint8_t flags = read_and_write | (cacheable << 1);
|
||||
|
||||
return aml_qword_as_desc(aml_memory_range, min_fixed, max_fixed,
|
||||
return aml_qword_as_desc(AML_MEMORY_RANGE, min_fixed, max_fixed,
|
||||
dec, addr_gran, addr_min, addr_max,
|
||||
addr_trans, len, flags);
|
||||
}
|
||||
|
||||
static uint8_t Hex2Byte(const char *src)
|
||||
{
|
||||
int hi, lo;
|
||||
|
||||
hi = Hex2Digit(src[0]);
|
||||
assert(hi >= 0);
|
||||
assert(hi <= 15);
|
||||
|
||||
lo = Hex2Digit(src[1]);
|
||||
assert(lo >= 0);
|
||||
assert(lo <= 15);
|
||||
return (hi << 4) | lo;
|
||||
}
|
||||
|
||||
/*
|
||||
* ACPI 3.0: 17.5.124 ToUUID (Convert String to UUID Macro)
|
||||
* e.g. UUID: aabbccdd-eeff-gghh-iijj-kkllmmnnoopp
|
||||
* call aml_touuid("aabbccdd-eeff-gghh-iijj-kkllmmnnoopp");
|
||||
*/
|
||||
Aml *aml_touuid(const char *uuid)
|
||||
{
|
||||
Aml *var = aml_bundle(0x11 /* BufferOp */, AML_BUFFER);
|
||||
|
||||
assert(strlen(uuid) == 36);
|
||||
assert(uuid[8] == '-');
|
||||
assert(uuid[13] == '-');
|
||||
assert(uuid[18] == '-');
|
||||
assert(uuid[23] == '-');
|
||||
|
||||
build_append_byte(var->buf, Hex2Byte(uuid + 6)); /* dd - at offset 00 */
|
||||
build_append_byte(var->buf, Hex2Byte(uuid + 4)); /* cc - at offset 01 */
|
||||
build_append_byte(var->buf, Hex2Byte(uuid + 2)); /* bb - at offset 02 */
|
||||
build_append_byte(var->buf, Hex2Byte(uuid + 0)); /* aa - at offset 03 */
|
||||
|
||||
build_append_byte(var->buf, Hex2Byte(uuid + 11)); /* ff - at offset 04 */
|
||||
build_append_byte(var->buf, Hex2Byte(uuid + 9)); /* ee - at offset 05 */
|
||||
|
||||
build_append_byte(var->buf, Hex2Byte(uuid + 16)); /* hh - at offset 06 */
|
||||
build_append_byte(var->buf, Hex2Byte(uuid + 14)); /* gg - at offset 07 */
|
||||
|
||||
build_append_byte(var->buf, Hex2Byte(uuid + 19)); /* ii - at offset 08 */
|
||||
build_append_byte(var->buf, Hex2Byte(uuid + 21)); /* jj - at offset 09 */
|
||||
|
||||
build_append_byte(var->buf, Hex2Byte(uuid + 24)); /* kk - at offset 10 */
|
||||
build_append_byte(var->buf, Hex2Byte(uuid + 26)); /* ll - at offset 11 */
|
||||
build_append_byte(var->buf, Hex2Byte(uuid + 28)); /* mm - at offset 12 */
|
||||
build_append_byte(var->buf, Hex2Byte(uuid + 30)); /* nn - at offset 13 */
|
||||
build_append_byte(var->buf, Hex2Byte(uuid + 32)); /* oo - at offset 14 */
|
||||
build_append_byte(var->buf, Hex2Byte(uuid + 34)); /* pp - at offset 15 */
|
||||
|
||||
return var;
|
||||
}
|
||||
|
||||
/*
|
||||
* ACPI 2.0b: 16.2.3.6.4.3 Unicode Macro (Convert Ascii String To Unicode)
|
||||
*/
|
||||
Aml *aml_unicode(const char *str)
|
||||
{
|
||||
int i = 0;
|
||||
Aml *var = aml_bundle(0x11 /* BufferOp */, AML_BUFFER);
|
||||
|
||||
do {
|
||||
build_append_byte(var->buf, str[i]);
|
||||
build_append_byte(var->buf, 0);
|
||||
i++;
|
||||
} while (i <= strlen(str));
|
||||
|
||||
return var;
|
||||
}
|
||||
|
||||
void
|
||||
build_header(GArray *linker, GArray *table_data,
|
||||
AcpiTableHeader *h, const char *sig, int len, uint8_t rev)
|
||||
@ -951,3 +1142,27 @@ void acpi_build_tables_cleanup(AcpiBuildTables *tables, bool mfre)
|
||||
g_array_free(tables->table_data, true);
|
||||
g_array_free(tables->tcpalog, mfre);
|
||||
}
|
||||
|
||||
/* Build rsdt table */
|
||||
void
|
||||
build_rsdt(GArray *table_data, GArray *linker, GArray *table_offsets)
|
||||
{
|
||||
AcpiRsdtDescriptorRev1 *rsdt;
|
||||
size_t rsdt_len;
|
||||
int i;
|
||||
const int table_data_len = (sizeof(uint32_t) * table_offsets->len);
|
||||
|
||||
rsdt_len = sizeof(*rsdt) + table_data_len;
|
||||
rsdt = acpi_data_push(table_data, rsdt_len);
|
||||
memcpy(rsdt->table_offset_entry, table_offsets->data, table_data_len);
|
||||
for (i = 0; i < table_offsets->len; ++i) {
|
||||
/* rsdt->table_offset_entry to be filled by Guest linker */
|
||||
bios_linker_loader_add_pointer(linker,
|
||||
ACPI_BUILD_TABLE_FILE,
|
||||
ACPI_BUILD_TABLE_FILE,
|
||||
table_data, &rsdt->table_offset_entry[i],
|
||||
sizeof(uint32_t));
|
||||
}
|
||||
build_header(linker, table_data,
|
||||
(void *)rsdt, "RSDT", rsdt_len, 1);
|
||||
}
|
||||
|
@ -3,6 +3,7 @@ obj-$(CONFIG_DIGIC) += digic_boards.o
|
||||
obj-y += integratorcp.o kzm.o mainstone.o musicpal.o nseries.o
|
||||
obj-y += omap_sx1.o palm.o realview.o spitz.o stellaris.o
|
||||
obj-y += tosa.o versatilepb.o vexpress.o virt.o xilinx_zynq.o z2.o
|
||||
obj-$(CONFIG_ACPI) += virt-acpi-build.o
|
||||
obj-y += netduino2.o
|
||||
|
||||
obj-y += armv7m.o exynos4210.o pxa2xx.o pxa2xx_gpio.o pxa2xx_pic.o
|
||||
|
644
hw/arm/virt-acpi-build.c
Normal file
644
hw/arm/virt-acpi-build.c
Normal file
@ -0,0 +1,644 @@
|
||||
/* Support for generating ACPI tables and passing them to Guests
|
||||
*
|
||||
* ARM virt ACPI generation
|
||||
*
|
||||
* Copyright (C) 2008-2010 Kevin O'Connor <kevin@koconnor.net>
|
||||
* Copyright (C) 2006 Fabrice Bellard
|
||||
* Copyright (C) 2013 Red Hat Inc
|
||||
*
|
||||
* Author: Michael S. Tsirkin <mst@redhat.com>
|
||||
*
|
||||
* Copyright (c) 2015 HUAWEI TECHNOLOGIES CO.,LTD.
|
||||
*
|
||||
* Author: Shannon Zhao <zhaoshenglong@huawei.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
|
||||
* You should have received a copy of the GNU General Public License along
|
||||
* with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "qemu-common.h"
|
||||
#include "hw/arm/virt-acpi-build.h"
|
||||
#include "qemu/bitmap.h"
|
||||
#include "trace.h"
|
||||
#include "qom/cpu.h"
|
||||
#include "target-arm/cpu.h"
|
||||
#include "hw/acpi/acpi-defs.h"
|
||||
#include "hw/acpi/acpi.h"
|
||||
#include "hw/nvram/fw_cfg.h"
|
||||
#include "hw/acpi/bios-linker-loader.h"
|
||||
#include "hw/loader.h"
|
||||
#include "hw/hw.h"
|
||||
#include "hw/acpi/aml-build.h"
|
||||
#include "hw/pci/pcie_host.h"
|
||||
#include "hw/pci/pci.h"
|
||||
|
||||
#define ARM_SPI_BASE 32
|
||||
|
||||
typedef struct VirtAcpiCpuInfo {
|
||||
DECLARE_BITMAP(found_cpus, VIRT_ACPI_CPU_ID_LIMIT);
|
||||
} VirtAcpiCpuInfo;
|
||||
|
||||
static void virt_acpi_get_cpu_info(VirtAcpiCpuInfo *cpuinfo)
|
||||
{
|
||||
CPUState *cpu;
|
||||
|
||||
memset(cpuinfo->found_cpus, 0, sizeof cpuinfo->found_cpus);
|
||||
CPU_FOREACH(cpu) {
|
||||
set_bit(cpu->cpu_index, cpuinfo->found_cpus);
|
||||
}
|
||||
}
|
||||
|
||||
static void acpi_dsdt_add_cpus(Aml *scope, int smp_cpus)
|
||||
{
|
||||
uint16_t i;
|
||||
|
||||
for (i = 0; i < smp_cpus; i++) {
|
||||
Aml *dev = aml_device("C%03x", i);
|
||||
aml_append(dev, aml_name_decl("_HID", aml_string("ACPI0007")));
|
||||
aml_append(dev, aml_name_decl("_UID", aml_int(i)));
|
||||
aml_append(scope, dev);
|
||||
}
|
||||
}
|
||||
|
||||
static void acpi_dsdt_add_uart(Aml *scope, const MemMapEntry *uart_memmap,
|
||||
int uart_irq)
|
||||
{
|
||||
Aml *dev = aml_device("COM0");
|
||||
aml_append(dev, aml_name_decl("_HID", aml_string("ARMH0011")));
|
||||
aml_append(dev, aml_name_decl("_UID", aml_int(0)));
|
||||
|
||||
Aml *crs = aml_resource_template();
|
||||
aml_append(crs, aml_memory32_fixed(uart_memmap->base,
|
||||
uart_memmap->size, AML_READ_WRITE));
|
||||
aml_append(crs,
|
||||
aml_interrupt(AML_CONSUMER, AML_LEVEL, AML_ACTIVE_HIGH,
|
||||
AML_EXCLUSIVE, uart_irq));
|
||||
aml_append(dev, aml_name_decl("_CRS", crs));
|
||||
aml_append(scope, dev);
|
||||
}
|
||||
|
||||
static void acpi_dsdt_add_rtc(Aml *scope, const MemMapEntry *rtc_memmap,
|
||||
int rtc_irq)
|
||||
{
|
||||
Aml *dev = aml_device("RTC0");
|
||||
aml_append(dev, aml_name_decl("_HID", aml_string("LNRO0013")));
|
||||
aml_append(dev, aml_name_decl("_UID", aml_int(0)));
|
||||
|
||||
Aml *crs = aml_resource_template();
|
||||
aml_append(crs, aml_memory32_fixed(rtc_memmap->base,
|
||||
rtc_memmap->size, AML_READ_WRITE));
|
||||
aml_append(crs,
|
||||
aml_interrupt(AML_CONSUMER, AML_LEVEL, AML_ACTIVE_HIGH,
|
||||
AML_EXCLUSIVE, rtc_irq));
|
||||
aml_append(dev, aml_name_decl("_CRS", crs));
|
||||
aml_append(scope, dev);
|
||||
}
|
||||
|
||||
static void acpi_dsdt_add_flash(Aml *scope, const MemMapEntry *flash_memmap)
|
||||
{
|
||||
Aml *dev, *crs;
|
||||
hwaddr base = flash_memmap->base;
|
||||
hwaddr size = flash_memmap->size;
|
||||
|
||||
dev = aml_device("FLS0");
|
||||
aml_append(dev, aml_name_decl("_HID", aml_string("LNRO0015")));
|
||||
aml_append(dev, aml_name_decl("_UID", aml_int(0)));
|
||||
|
||||
crs = aml_resource_template();
|
||||
aml_append(crs, aml_memory32_fixed(base, size, AML_READ_WRITE));
|
||||
aml_append(dev, aml_name_decl("_CRS", crs));
|
||||
aml_append(scope, dev);
|
||||
|
||||
dev = aml_device("FLS1");
|
||||
aml_append(dev, aml_name_decl("_HID", aml_string("LNRO0015")));
|
||||
aml_append(dev, aml_name_decl("_UID", aml_int(1)));
|
||||
crs = aml_resource_template();
|
||||
aml_append(crs, aml_memory32_fixed(base + size, size, AML_READ_WRITE));
|
||||
aml_append(dev, aml_name_decl("_CRS", crs));
|
||||
aml_append(scope, dev);
|
||||
}
|
||||
|
||||
static void acpi_dsdt_add_virtio(Aml *scope,
|
||||
const MemMapEntry *virtio_mmio_memmap,
|
||||
int mmio_irq, int num)
|
||||
{
|
||||
hwaddr base = virtio_mmio_memmap->base;
|
||||
hwaddr size = virtio_mmio_memmap->size;
|
||||
int irq = mmio_irq;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < num; i++) {
|
||||
Aml *dev = aml_device("VR%02u", i);
|
||||
aml_append(dev, aml_name_decl("_HID", aml_string("LNRO0005")));
|
||||
aml_append(dev, aml_name_decl("_UID", aml_int(i)));
|
||||
|
||||
Aml *crs = aml_resource_template();
|
||||
aml_append(crs, aml_memory32_fixed(base, size, AML_READ_WRITE));
|
||||
aml_append(crs,
|
||||
aml_interrupt(AML_CONSUMER, AML_LEVEL, AML_ACTIVE_HIGH,
|
||||
AML_EXCLUSIVE, irq + i));
|
||||
aml_append(dev, aml_name_decl("_CRS", crs));
|
||||
aml_append(scope, dev);
|
||||
base += size;
|
||||
}
|
||||
}
|
||||
|
||||
static void acpi_dsdt_add_pci(Aml *scope, const MemMapEntry *memmap, int irq)
|
||||
{
|
||||
Aml *method, *crs, *ifctx, *UUID, *ifctx1, *elsectx, *buf;
|
||||
int i, bus_no;
|
||||
hwaddr base_mmio = memmap[VIRT_PCIE_MMIO].base;
|
||||
hwaddr size_mmio = memmap[VIRT_PCIE_MMIO].size;
|
||||
hwaddr base_pio = memmap[VIRT_PCIE_PIO].base;
|
||||
hwaddr size_pio = memmap[VIRT_PCIE_PIO].size;
|
||||
hwaddr base_ecam = memmap[VIRT_PCIE_ECAM].base;
|
||||
hwaddr size_ecam = memmap[VIRT_PCIE_ECAM].size;
|
||||
int nr_pcie_buses = size_ecam / PCIE_MMCFG_SIZE_MIN;
|
||||
|
||||
Aml *dev = aml_device("%s", "PCI0");
|
||||
aml_append(dev, aml_name_decl("_HID", aml_string("PNP0A08")));
|
||||
aml_append(dev, aml_name_decl("_CID", aml_string("PNP0A03")));
|
||||
aml_append(dev, aml_name_decl("_SEG", aml_int(0)));
|
||||
aml_append(dev, aml_name_decl("_BBN", aml_int(0)));
|
||||
aml_append(dev, aml_name_decl("_ADR", aml_int(0)));
|
||||
aml_append(dev, aml_name_decl("_UID", aml_string("PCI0")));
|
||||
aml_append(dev, aml_name_decl("_STR", aml_unicode("PCIe 0 Device")));
|
||||
|
||||
/* Declare the PCI Routing Table. */
|
||||
Aml *rt_pkg = aml_package(nr_pcie_buses * PCI_NUM_PINS);
|
||||
for (bus_no = 0; bus_no < nr_pcie_buses; bus_no++) {
|
||||
for (i = 0; i < PCI_NUM_PINS; i++) {
|
||||
int gsi = (i + bus_no) % PCI_NUM_PINS;
|
||||
Aml *pkg = aml_package(4);
|
||||
aml_append(pkg, aml_int((bus_no << 16) | 0xFFFF));
|
||||
aml_append(pkg, aml_int(i));
|
||||
aml_append(pkg, aml_name("GSI%d", gsi));
|
||||
aml_append(pkg, aml_int(0));
|
||||
aml_append(rt_pkg, pkg);
|
||||
}
|
||||
}
|
||||
aml_append(dev, aml_name_decl("_PRT", rt_pkg));
|
||||
|
||||
/* Create GSI link device */
|
||||
for (i = 0; i < PCI_NUM_PINS; i++) {
|
||||
Aml *dev_gsi = aml_device("GSI%d", i);
|
||||
aml_append(dev_gsi, aml_name_decl("_HID", aml_string("PNP0C0F")));
|
||||
aml_append(dev_gsi, aml_name_decl("_UID", aml_int(0)));
|
||||
crs = aml_resource_template();
|
||||
aml_append(crs,
|
||||
aml_interrupt(AML_CONSUMER, AML_LEVEL, AML_ACTIVE_HIGH,
|
||||
AML_EXCLUSIVE, irq + i));
|
||||
aml_append(dev_gsi, aml_name_decl("_PRS", crs));
|
||||
crs = aml_resource_template();
|
||||
aml_append(crs,
|
||||
aml_interrupt(AML_CONSUMER, AML_LEVEL, AML_ACTIVE_HIGH,
|
||||
AML_EXCLUSIVE, irq + i));
|
||||
aml_append(dev_gsi, aml_name_decl("_CRS", crs));
|
||||
method = aml_method("_SRS", 1);
|
||||
aml_append(dev_gsi, method);
|
||||
aml_append(dev, dev_gsi);
|
||||
}
|
||||
|
||||
method = aml_method("_CBA", 0);
|
||||
aml_append(method, aml_return(aml_int(base_ecam)));
|
||||
aml_append(dev, method);
|
||||
|
||||
method = aml_method("_CRS", 0);
|
||||
Aml *rbuf = aml_resource_template();
|
||||
aml_append(rbuf,
|
||||
aml_word_bus_number(AML_MIN_FIXED, AML_MAX_FIXED, AML_POS_DECODE,
|
||||
0x0000, 0x0000, nr_pcie_buses - 1, 0x0000,
|
||||
nr_pcie_buses));
|
||||
aml_append(rbuf,
|
||||
aml_dword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED,
|
||||
AML_NON_CACHEABLE, AML_READ_WRITE, 0x0000, base_mmio,
|
||||
base_mmio + size_mmio - 1, 0x0000, size_mmio));
|
||||
aml_append(rbuf,
|
||||
aml_dword_io(AML_MIN_FIXED, AML_MAX_FIXED, AML_POS_DECODE,
|
||||
AML_ENTIRE_RANGE, 0x0000, 0x0000, size_pio - 1, base_pio,
|
||||
size_pio));
|
||||
|
||||
aml_append(method, aml_name_decl("RBUF", rbuf));
|
||||
aml_append(method, aml_return(rbuf));
|
||||
aml_append(dev, method);
|
||||
|
||||
/* Declare an _OSC (OS Control Handoff) method */
|
||||
aml_append(dev, aml_name_decl("SUPP", aml_int(0)));
|
||||
aml_append(dev, aml_name_decl("CTRL", aml_int(0)));
|
||||
method = aml_method("_OSC", 4);
|
||||
aml_append(method,
|
||||
aml_create_dword_field(aml_arg(3), aml_int(0), "CDW1"));
|
||||
|
||||
/* PCI Firmware Specification 3.0
|
||||
* 4.5.1. _OSC Interface for PCI Host Bridge Devices
|
||||
* The _OSC interface for a PCI/PCI-X/PCI Express hierarchy is
|
||||
* identified by the Universal Unique IDentifier (UUID)
|
||||
* 33DB4D5B-1FF7-401C-9657-7441C03DD766
|
||||
*/
|
||||
UUID = aml_touuid("33DB4D5B-1FF7-401C-9657-7441C03DD766");
|
||||
ifctx = aml_if(aml_equal(aml_arg(0), UUID));
|
||||
aml_append(ifctx,
|
||||
aml_create_dword_field(aml_arg(3), aml_int(4), "CDW2"));
|
||||
aml_append(ifctx,
|
||||
aml_create_dword_field(aml_arg(3), aml_int(8), "CDW3"));
|
||||
aml_append(ifctx, aml_store(aml_name("CDW2"), aml_name("SUPP")));
|
||||
aml_append(ifctx, aml_store(aml_name("CDW3"), aml_name("CTRL")));
|
||||
aml_append(ifctx, aml_store(aml_and(aml_name("CTRL"), aml_int(0x1D)),
|
||||
aml_name("CTRL")));
|
||||
|
||||
ifctx1 = aml_if(aml_lnot(aml_equal(aml_arg(1), aml_int(0x1))));
|
||||
aml_append(ifctx1, aml_store(aml_or(aml_name("CDW1"), aml_int(0x08)),
|
||||
aml_name("CDW1")));
|
||||
aml_append(ifctx, ifctx1);
|
||||
|
||||
ifctx1 = aml_if(aml_lnot(aml_equal(aml_name("CDW3"), aml_name("CTRL"))));
|
||||
aml_append(ifctx1, aml_store(aml_or(aml_name("CDW1"), aml_int(0x10)),
|
||||
aml_name("CDW1")));
|
||||
aml_append(ifctx, ifctx1);
|
||||
|
||||
aml_append(ifctx, aml_store(aml_name("CTRL"), aml_name("CDW3")));
|
||||
aml_append(ifctx, aml_return(aml_arg(3)));
|
||||
aml_append(method, ifctx);
|
||||
|
||||
elsectx = aml_else();
|
||||
aml_append(elsectx, aml_store(aml_or(aml_name("CDW1"), aml_int(4)),
|
||||
aml_name("CDW1")));
|
||||
aml_append(elsectx, aml_return(aml_arg(3)));
|
||||
aml_append(method, elsectx);
|
||||
aml_append(dev, method);
|
||||
|
||||
method = aml_method("_DSM", 4);
|
||||
|
||||
/* PCI Firmware Specification 3.0
|
||||
* 4.6.1. _DSM for PCI Express Slot Information
|
||||
* The UUID in _DSM in this context is
|
||||
* {E5C937D0-3553-4D7A-9117-EA4D19C3434D}
|
||||
*/
|
||||
UUID = aml_touuid("E5C937D0-3553-4D7A-9117-EA4D19C3434D");
|
||||
ifctx = aml_if(aml_equal(aml_arg(0), UUID));
|
||||
ifctx1 = aml_if(aml_equal(aml_arg(2), aml_int(0)));
|
||||
uint8_t byte_list[1] = {1};
|
||||
buf = aml_buffer(1, byte_list);
|
||||
aml_append(ifctx1, aml_return(buf));
|
||||
aml_append(ifctx, ifctx1);
|
||||
aml_append(method, ifctx);
|
||||
|
||||
byte_list[0] = 0;
|
||||
buf = aml_buffer(1, byte_list);
|
||||
aml_append(method, aml_return(buf));
|
||||
aml_append(dev, method);
|
||||
|
||||
Aml *dev_rp0 = aml_device("%s", "RP0");
|
||||
aml_append(dev_rp0, aml_name_decl("_ADR", aml_int(0)));
|
||||
aml_append(dev, dev_rp0);
|
||||
aml_append(scope, dev);
|
||||
}
|
||||
|
||||
/* RSDP */
|
||||
static GArray *
|
||||
build_rsdp(GArray *rsdp_table, GArray *linker, unsigned rsdt)
|
||||
{
|
||||
AcpiRsdpDescriptor *rsdp = acpi_data_push(rsdp_table, sizeof *rsdp);
|
||||
|
||||
bios_linker_loader_alloc(linker, ACPI_BUILD_RSDP_FILE, 16,
|
||||
true /* fseg memory */);
|
||||
|
||||
memcpy(&rsdp->signature, "RSD PTR ", sizeof(rsdp->signature));
|
||||
memcpy(rsdp->oem_id, ACPI_BUILD_APPNAME6, sizeof(rsdp->oem_id));
|
||||
rsdp->length = cpu_to_le32(sizeof(*rsdp));
|
||||
rsdp->revision = 0x02;
|
||||
|
||||
/* Point to RSDT */
|
||||
rsdp->rsdt_physical_address = cpu_to_le32(rsdt);
|
||||
/* Address to be filled by Guest linker */
|
||||
bios_linker_loader_add_pointer(linker, ACPI_BUILD_RSDP_FILE,
|
||||
ACPI_BUILD_TABLE_FILE,
|
||||
rsdp_table, &rsdp->rsdt_physical_address,
|
||||
sizeof rsdp->rsdt_physical_address);
|
||||
rsdp->checksum = 0;
|
||||
/* Checksum to be filled by Guest linker */
|
||||
bios_linker_loader_add_checksum(linker, ACPI_BUILD_RSDP_FILE,
|
||||
rsdp, rsdp, sizeof *rsdp, &rsdp->checksum);
|
||||
|
||||
return rsdp_table;
|
||||
}
|
||||
|
||||
static void
|
||||
build_mcfg(GArray *table_data, GArray *linker, VirtGuestInfo *guest_info)
|
||||
{
|
||||
AcpiTableMcfg *mcfg;
|
||||
const MemMapEntry *memmap = guest_info->memmap;
|
||||
int len = sizeof(*mcfg) + sizeof(mcfg->allocation[0]);
|
||||
|
||||
mcfg = acpi_data_push(table_data, len);
|
||||
mcfg->allocation[0].address = cpu_to_le64(memmap[VIRT_PCIE_ECAM].base);
|
||||
|
||||
/* Only a single allocation so no need to play with segments */
|
||||
mcfg->allocation[0].pci_segment = cpu_to_le16(0);
|
||||
mcfg->allocation[0].start_bus_number = 0;
|
||||
mcfg->allocation[0].end_bus_number = (memmap[VIRT_PCIE_ECAM].size
|
||||
/ PCIE_MMCFG_SIZE_MIN) - 1;
|
||||
|
||||
build_header(linker, table_data, (void *)mcfg, "MCFG", len, 5);
|
||||
}
|
||||
|
||||
/* GTDT */
|
||||
static void
|
||||
build_gtdt(GArray *table_data, GArray *linker)
|
||||
{
|
||||
int gtdt_start = table_data->len;
|
||||
AcpiGenericTimerTable *gtdt;
|
||||
|
||||
gtdt = acpi_data_push(table_data, sizeof *gtdt);
|
||||
/* The interrupt values are the same with the device tree when adding 16 */
|
||||
gtdt->secure_el1_interrupt = ARCH_TIMER_S_EL1_IRQ + 16;
|
||||
gtdt->secure_el1_flags = ACPI_EDGE_SENSITIVE;
|
||||
|
||||
gtdt->non_secure_el1_interrupt = ARCH_TIMER_NS_EL1_IRQ + 16;
|
||||
gtdt->non_secure_el1_flags = ACPI_EDGE_SENSITIVE;
|
||||
|
||||
gtdt->virtual_timer_interrupt = ARCH_TIMER_VIRT_IRQ + 16;
|
||||
gtdt->virtual_timer_flags = ACPI_EDGE_SENSITIVE;
|
||||
|
||||
gtdt->non_secure_el2_interrupt = ARCH_TIMER_NS_EL2_IRQ + 16;
|
||||
gtdt->non_secure_el2_flags = ACPI_EDGE_SENSITIVE;
|
||||
|
||||
build_header(linker, table_data,
|
||||
(void *)(table_data->data + gtdt_start), "GTDT",
|
||||
table_data->len - gtdt_start, 5);
|
||||
}
|
||||
|
||||
/* MADT */
|
||||
static void
|
||||
build_madt(GArray *table_data, GArray *linker, VirtGuestInfo *guest_info,
|
||||
VirtAcpiCpuInfo *cpuinfo)
|
||||
{
|
||||
int madt_start = table_data->len;
|
||||
const MemMapEntry *memmap = guest_info->memmap;
|
||||
AcpiMultipleApicTable *madt;
|
||||
AcpiMadtGenericDistributor *gicd;
|
||||
int i;
|
||||
|
||||
madt = acpi_data_push(table_data, sizeof *madt);
|
||||
|
||||
for (i = 0; i < guest_info->smp_cpus; i++) {
|
||||
AcpiMadtGenericInterrupt *gicc = acpi_data_push(table_data,
|
||||
sizeof *gicc);
|
||||
gicc->type = ACPI_APIC_GENERIC_INTERRUPT;
|
||||
gicc->length = sizeof(*gicc);
|
||||
gicc->base_address = memmap[VIRT_GIC_CPU].base;
|
||||
gicc->cpu_interface_number = i;
|
||||
gicc->arm_mpidr = i;
|
||||
gicc->uid = i;
|
||||
if (test_bit(i, cpuinfo->found_cpus)) {
|
||||
gicc->flags = cpu_to_le32(ACPI_GICC_ENABLED);
|
||||
}
|
||||
}
|
||||
|
||||
gicd = acpi_data_push(table_data, sizeof *gicd);
|
||||
gicd->type = ACPI_APIC_GENERIC_DISTRIBUTOR;
|
||||
gicd->length = sizeof(*gicd);
|
||||
gicd->base_address = memmap[VIRT_GIC_DIST].base;
|
||||
|
||||
build_header(linker, table_data,
|
||||
(void *)(table_data->data + madt_start), "APIC",
|
||||
table_data->len - madt_start, 5);
|
||||
}
|
||||
|
||||
/* FADT */
|
||||
static void
|
||||
build_fadt(GArray *table_data, GArray *linker, unsigned dsdt)
|
||||
{
|
||||
AcpiFadtDescriptorRev5_1 *fadt = acpi_data_push(table_data, sizeof(*fadt));
|
||||
|
||||
/* Hardware Reduced = 1 and use PSCI 0.2+ and with HVC */
|
||||
fadt->flags = cpu_to_le32(1 << ACPI_FADT_F_HW_REDUCED_ACPI);
|
||||
fadt->arm_boot_flags = cpu_to_le16((1 << ACPI_FADT_ARM_USE_PSCI_G_0_2) |
|
||||
(1 << ACPI_FADT_ARM_PSCI_USE_HVC));
|
||||
|
||||
/* ACPI v5.1 (fadt->revision.fadt->minor_revision) */
|
||||
fadt->minor_revision = 0x1;
|
||||
|
||||
fadt->dsdt = cpu_to_le32(dsdt);
|
||||
/* DSDT address to be filled by Guest linker */
|
||||
bios_linker_loader_add_pointer(linker, ACPI_BUILD_TABLE_FILE,
|
||||
ACPI_BUILD_TABLE_FILE,
|
||||
table_data, &fadt->dsdt,
|
||||
sizeof fadt->dsdt);
|
||||
|
||||
build_header(linker, table_data,
|
||||
(void *)fadt, "FACP", sizeof(*fadt), 5);
|
||||
}
|
||||
|
||||
/* DSDT */
|
||||
static void
|
||||
build_dsdt(GArray *table_data, GArray *linker, VirtGuestInfo *guest_info)
|
||||
{
|
||||
Aml *scope, *dsdt;
|
||||
const MemMapEntry *memmap = guest_info->memmap;
|
||||
const int *irqmap = guest_info->irqmap;
|
||||
|
||||
dsdt = init_aml_allocator();
|
||||
/* Reserve space for header */
|
||||
acpi_data_push(dsdt->buf, sizeof(AcpiTableHeader));
|
||||
|
||||
scope = aml_scope("\\_SB");
|
||||
acpi_dsdt_add_cpus(scope, guest_info->smp_cpus);
|
||||
acpi_dsdt_add_uart(scope, &memmap[VIRT_UART],
|
||||
(irqmap[VIRT_UART] + ARM_SPI_BASE));
|
||||
acpi_dsdt_add_rtc(scope, &memmap[VIRT_RTC],
|
||||
(irqmap[VIRT_RTC] + ARM_SPI_BASE));
|
||||
acpi_dsdt_add_flash(scope, &memmap[VIRT_FLASH]);
|
||||
acpi_dsdt_add_virtio(scope, &memmap[VIRT_MMIO],
|
||||
(irqmap[VIRT_MMIO] + ARM_SPI_BASE), NUM_VIRTIO_TRANSPORTS);
|
||||
acpi_dsdt_add_pci(scope, memmap, (irqmap[VIRT_PCIE] + ARM_SPI_BASE));
|
||||
|
||||
aml_append(dsdt, scope);
|
||||
|
||||
/* copy AML table into ACPI tables blob and patch header there */
|
||||
g_array_append_vals(table_data, dsdt->buf->data, dsdt->buf->len);
|
||||
build_header(linker, table_data,
|
||||
(void *)(table_data->data + table_data->len - dsdt->buf->len),
|
||||
"DSDT", dsdt->buf->len, 5);
|
||||
free_aml_allocator();
|
||||
}
|
||||
|
||||
typedef
|
||||
struct AcpiBuildState {
|
||||
/* Copy of table in RAM (for patching). */
|
||||
MemoryRegion *table_mr;
|
||||
MemoryRegion *rsdp_mr;
|
||||
MemoryRegion *linker_mr;
|
||||
/* Is table patched? */
|
||||
bool patched;
|
||||
VirtGuestInfo *guest_info;
|
||||
} AcpiBuildState;
|
||||
|
||||
static
|
||||
void virt_acpi_build(VirtGuestInfo *guest_info, AcpiBuildTables *tables)
|
||||
{
|
||||
GArray *table_offsets;
|
||||
unsigned dsdt, rsdt;
|
||||
VirtAcpiCpuInfo cpuinfo;
|
||||
GArray *tables_blob = tables->table_data;
|
||||
|
||||
virt_acpi_get_cpu_info(&cpuinfo);
|
||||
|
||||
table_offsets = g_array_new(false, true /* clear */,
|
||||
sizeof(uint32_t));
|
||||
|
||||
bios_linker_loader_alloc(tables->linker, ACPI_BUILD_TABLE_FILE,
|
||||
64, false /* high memory */);
|
||||
|
||||
/*
|
||||
* The ACPI v5.1 tables for Hardware-reduced ACPI platform are:
|
||||
* RSDP
|
||||
* RSDT
|
||||
* FADT
|
||||
* GTDT
|
||||
* MADT
|
||||
* DSDT
|
||||
*/
|
||||
|
||||
/* DSDT is pointed to by FADT */
|
||||
dsdt = tables_blob->len;
|
||||
build_dsdt(tables_blob, tables->linker, guest_info);
|
||||
|
||||
/* FADT MADT GTDT pointed to by RSDT */
|
||||
acpi_add_table(table_offsets, tables_blob);
|
||||
build_fadt(tables_blob, tables->linker, dsdt);
|
||||
|
||||
acpi_add_table(table_offsets, tables_blob);
|
||||
build_madt(tables_blob, tables->linker, guest_info, &cpuinfo);
|
||||
|
||||
acpi_add_table(table_offsets, tables_blob);
|
||||
build_gtdt(tables_blob, tables->linker);
|
||||
|
||||
acpi_add_table(table_offsets, tables_blob);
|
||||
build_mcfg(tables_blob, tables->linker, guest_info);
|
||||
|
||||
/* RSDT is pointed to by RSDP */
|
||||
rsdt = tables_blob->len;
|
||||
build_rsdt(tables_blob, tables->linker, table_offsets);
|
||||
|
||||
/* RSDP is in FSEG memory, so allocate it separately */
|
||||
build_rsdp(tables->rsdp, tables->linker, rsdt);
|
||||
|
||||
/* Cleanup memory that's no longer used. */
|
||||
g_array_free(table_offsets, true);
|
||||
}
|
||||
|
||||
static void acpi_ram_update(MemoryRegion *mr, GArray *data)
|
||||
{
|
||||
uint32_t size = acpi_data_len(data);
|
||||
|
||||
/* Make sure RAM size is correct - in case it got changed
|
||||
* e.g. by migration */
|
||||
memory_region_ram_resize(mr, size, &error_abort);
|
||||
|
||||
memcpy(memory_region_get_ram_ptr(mr), data->data, size);
|
||||
memory_region_set_dirty(mr, 0, size);
|
||||
}
|
||||
|
||||
static void virt_acpi_build_update(void *build_opaque, uint32_t offset)
|
||||
{
|
||||
AcpiBuildState *build_state = build_opaque;
|
||||
AcpiBuildTables tables;
|
||||
|
||||
/* No state to update or already patched? Nothing to do. */
|
||||
if (!build_state || build_state->patched) {
|
||||
return;
|
||||
}
|
||||
build_state->patched = true;
|
||||
|
||||
acpi_build_tables_init(&tables);
|
||||
|
||||
virt_acpi_build(build_state->guest_info, &tables);
|
||||
|
||||
acpi_ram_update(build_state->table_mr, tables.table_data);
|
||||
acpi_ram_update(build_state->rsdp_mr, tables.rsdp);
|
||||
acpi_ram_update(build_state->linker_mr, tables.linker);
|
||||
|
||||
|
||||
acpi_build_tables_cleanup(&tables, true);
|
||||
}
|
||||
|
||||
static void virt_acpi_build_reset(void *build_opaque)
|
||||
{
|
||||
AcpiBuildState *build_state = build_opaque;
|
||||
build_state->patched = false;
|
||||
}
|
||||
|
||||
static MemoryRegion *acpi_add_rom_blob(AcpiBuildState *build_state,
|
||||
GArray *blob, const char *name,
|
||||
uint64_t max_size)
|
||||
{
|
||||
return rom_add_blob(name, blob->data, acpi_data_len(blob), max_size, -1,
|
||||
name, virt_acpi_build_update, build_state);
|
||||
}
|
||||
|
||||
static const VMStateDescription vmstate_virt_acpi_build = {
|
||||
.name = "virt_acpi_build",
|
||||
.version_id = 1,
|
||||
.minimum_version_id = 1,
|
||||
.fields = (VMStateField[]) {
|
||||
VMSTATE_BOOL(patched, AcpiBuildState),
|
||||
VMSTATE_END_OF_LIST()
|
||||
},
|
||||
};
|
||||
|
||||
void virt_acpi_setup(VirtGuestInfo *guest_info)
|
||||
{
|
||||
AcpiBuildTables tables;
|
||||
AcpiBuildState *build_state;
|
||||
|
||||
if (!guest_info->fw_cfg) {
|
||||
trace_virt_acpi_setup();
|
||||
return;
|
||||
}
|
||||
|
||||
if (!acpi_enabled) {
|
||||
trace_virt_acpi_setup();
|
||||
return;
|
||||
}
|
||||
|
||||
build_state = g_malloc0(sizeof *build_state);
|
||||
build_state->guest_info = guest_info;
|
||||
|
||||
acpi_build_tables_init(&tables);
|
||||
virt_acpi_build(build_state->guest_info, &tables);
|
||||
|
||||
/* Now expose it all to Guest */
|
||||
build_state->table_mr = acpi_add_rom_blob(build_state, tables.table_data,
|
||||
ACPI_BUILD_TABLE_FILE,
|
||||
ACPI_BUILD_TABLE_MAX_SIZE);
|
||||
assert(build_state->table_mr != NULL);
|
||||
|
||||
build_state->linker_mr =
|
||||
acpi_add_rom_blob(build_state, tables.linker, "etc/table-loader", 0);
|
||||
|
||||
fw_cfg_add_file(guest_info->fw_cfg, ACPI_BUILD_TPMLOG_FILE,
|
||||
tables.tcpalog->data, acpi_data_len(tables.tcpalog));
|
||||
|
||||
build_state->rsdp_mr = acpi_add_rom_blob(build_state, tables.rsdp,
|
||||
ACPI_BUILD_RSDP_FILE, 0);
|
||||
|
||||
qemu_register_reset(virt_acpi_build_reset, build_state);
|
||||
virt_acpi_build_reset(build_state);
|
||||
vmstate_register(NULL, 0, &vmstate_virt_acpi_build, build_state);
|
||||
|
||||
/* Cleanup tables but don't free the memory: we track it
|
||||
* in build_state.
|
||||
*/
|
||||
acpi_build_tables_cleanup(&tables, false);
|
||||
}
|
@ -31,6 +31,7 @@
|
||||
#include "hw/sysbus.h"
|
||||
#include "hw/arm/arm.h"
|
||||
#include "hw/arm/primecell.h"
|
||||
#include "hw/arm/virt.h"
|
||||
#include "hw/devices.h"
|
||||
#include "net/net.h"
|
||||
#include "sysemu/block-backend.h"
|
||||
@ -43,8 +44,7 @@
|
||||
#include "qemu/bitops.h"
|
||||
#include "qemu/error-report.h"
|
||||
#include "hw/pci-host/gpex.h"
|
||||
|
||||
#define NUM_VIRTIO_TRANSPORTS 32
|
||||
#include "hw/arm/virt-acpi-build.h"
|
||||
|
||||
/* Number of external interrupt lines to configure the GIC with */
|
||||
#define NUM_IRQS 128
|
||||
@ -60,24 +60,6 @@
|
||||
#define GIC_FDT_IRQ_PPI_CPU_START 8
|
||||
#define GIC_FDT_IRQ_PPI_CPU_WIDTH 8
|
||||
|
||||
enum {
|
||||
VIRT_FLASH,
|
||||
VIRT_MEM,
|
||||
VIRT_CPUPERIPHS,
|
||||
VIRT_GIC_DIST,
|
||||
VIRT_GIC_CPU,
|
||||
VIRT_UART,
|
||||
VIRT_MMIO,
|
||||
VIRT_RTC,
|
||||
VIRT_FW_CFG,
|
||||
VIRT_PCIE,
|
||||
};
|
||||
|
||||
typedef struct MemMapEntry {
|
||||
hwaddr base;
|
||||
hwaddr size;
|
||||
} MemMapEntry;
|
||||
|
||||
typedef struct VirtBoardInfo {
|
||||
struct arm_boot_info bootinfo;
|
||||
const char *cpu_model;
|
||||
@ -131,14 +113,9 @@ static const MemMapEntry a15memmap[] = {
|
||||
[VIRT_FW_CFG] = { 0x09020000, 0x0000000a },
|
||||
[VIRT_MMIO] = { 0x0a000000, 0x00000200 },
|
||||
/* ...repeating for a total of NUM_VIRTIO_TRANSPORTS, each of that size */
|
||||
/*
|
||||
* PCIE verbose map:
|
||||
*
|
||||
* MMIO window { 0x10000000, 0x2eff0000 },
|
||||
* PIO window { 0x3eff0000, 0x00010000 },
|
||||
* ECAM { 0x3f000000, 0x01000000 },
|
||||
*/
|
||||
[VIRT_PCIE] = { 0x10000000, 0x30000000 },
|
||||
[VIRT_PCIE_MMIO] = { 0x10000000, 0x2eff0000 },
|
||||
[VIRT_PCIE_PIO] = { 0x3eff0000, 0x00010000 },
|
||||
[VIRT_PCIE_ECAM] = { 0x3f000000, 0x01000000 },
|
||||
[VIRT_MEM] = { 0x40000000, 30ULL * 1024 * 1024 * 1024 },
|
||||
};
|
||||
|
||||
@ -289,10 +266,10 @@ static void fdt_add_timer_nodes(const VirtBoardInfo *vbi)
|
||||
"arm,armv7-timer");
|
||||
}
|
||||
qemu_fdt_setprop_cells(vbi->fdt, "/timer", "interrupts",
|
||||
GIC_FDT_IRQ_TYPE_PPI, 13, irqflags,
|
||||
GIC_FDT_IRQ_TYPE_PPI, 14, irqflags,
|
||||
GIC_FDT_IRQ_TYPE_PPI, 11, irqflags,
|
||||
GIC_FDT_IRQ_TYPE_PPI, 10, irqflags);
|
||||
GIC_FDT_IRQ_TYPE_PPI, ARCH_TIMER_S_EL1_IRQ, irqflags,
|
||||
GIC_FDT_IRQ_TYPE_PPI, ARCH_TIMER_NS_EL1_IRQ, irqflags,
|
||||
GIC_FDT_IRQ_TYPE_PPI, ARCH_TIMER_VIRT_IRQ, irqflags,
|
||||
GIC_FDT_IRQ_TYPE_PPI, ARCH_TIMER_NS_EL2_IRQ, irqflags);
|
||||
}
|
||||
|
||||
static void fdt_add_cpu_nodes(const VirtBoardInfo *vbi)
|
||||
@ -644,16 +621,14 @@ static void create_pcie_irq_map(const VirtBoardInfo *vbi, uint32_t gic_phandle,
|
||||
static void create_pcie(const VirtBoardInfo *vbi, qemu_irq *pic,
|
||||
uint32_t gic_phandle)
|
||||
{
|
||||
hwaddr base = vbi->memmap[VIRT_PCIE].base;
|
||||
hwaddr size = vbi->memmap[VIRT_PCIE].size;
|
||||
hwaddr end = base + size;
|
||||
hwaddr size_mmio;
|
||||
hwaddr size_ioport = 64 * 1024;
|
||||
int nr_pcie_buses = 16;
|
||||
hwaddr size_ecam = PCIE_MMCFG_SIZE_MIN * nr_pcie_buses;
|
||||
hwaddr base_mmio = base;
|
||||
hwaddr base_ioport;
|
||||
hwaddr base_ecam;
|
||||
hwaddr base_mmio = vbi->memmap[VIRT_PCIE_MMIO].base;
|
||||
hwaddr size_mmio = vbi->memmap[VIRT_PCIE_MMIO].size;
|
||||
hwaddr base_pio = vbi->memmap[VIRT_PCIE_PIO].base;
|
||||
hwaddr size_pio = vbi->memmap[VIRT_PCIE_PIO].size;
|
||||
hwaddr base_ecam = vbi->memmap[VIRT_PCIE_ECAM].base;
|
||||
hwaddr size_ecam = vbi->memmap[VIRT_PCIE_ECAM].size;
|
||||
hwaddr base = base_mmio;
|
||||
int nr_pcie_buses = size_ecam / PCIE_MMCFG_SIZE_MIN;
|
||||
int irq = vbi->irqmap[VIRT_PCIE];
|
||||
MemoryRegion *mmio_alias;
|
||||
MemoryRegion *mmio_reg;
|
||||
@ -663,10 +638,6 @@ static void create_pcie(const VirtBoardInfo *vbi, qemu_irq *pic,
|
||||
char *nodename;
|
||||
int i;
|
||||
|
||||
base_ecam = QEMU_ALIGN_DOWN(end - size_ecam, size_ecam);
|
||||
base_ioport = QEMU_ALIGN_DOWN(base_ecam - size_ioport, size_ioport);
|
||||
size_mmio = base_ioport - base;
|
||||
|
||||
dev = qdev_create(NULL, TYPE_GPEX_HOST);
|
||||
qdev_init_nofail(dev);
|
||||
|
||||
@ -689,7 +660,7 @@ static void create_pcie(const VirtBoardInfo *vbi, qemu_irq *pic,
|
||||
memory_region_add_subregion(get_system_memory(), base_mmio, mmio_alias);
|
||||
|
||||
/* Map IO port space */
|
||||
sysbus_mmio_map(SYS_BUS_DEVICE(dev), 2, base_ioport);
|
||||
sysbus_mmio_map(SYS_BUS_DEVICE(dev), 2, base_pio);
|
||||
|
||||
for (i = 0; i < GPEX_NUM_IRQS; i++) {
|
||||
sysbus_connect_irq(SYS_BUS_DEVICE(dev), i, pic[irq + i]);
|
||||
@ -709,7 +680,7 @@ static void create_pcie(const VirtBoardInfo *vbi, qemu_irq *pic,
|
||||
2, base_ecam, 2, size_ecam);
|
||||
qemu_fdt_setprop_sized_cells(vbi->fdt, nodename, "ranges",
|
||||
1, FDT_PCI_RANGE_IOPORT, 2, 0,
|
||||
2, base_ioport, 2, size_ioport,
|
||||
2, base_pio, 2, size_pio,
|
||||
1, FDT_PCI_RANGE_MMIO, 2, base_mmio,
|
||||
2, base_mmio, 2, size_mmio);
|
||||
|
||||
@ -727,6 +698,14 @@ static void *machvirt_dtb(const struct arm_boot_info *binfo, int *fdt_size)
|
||||
return board->fdt;
|
||||
}
|
||||
|
||||
static
|
||||
void virt_guest_info_machine_done(Notifier *notifier, void *data)
|
||||
{
|
||||
VirtGuestInfoState *guest_info_state = container_of(notifier,
|
||||
VirtGuestInfoState, machine_done);
|
||||
virt_acpi_setup(&guest_info_state->info);
|
||||
}
|
||||
|
||||
static void machvirt_init(MachineState *machine)
|
||||
{
|
||||
VirtMachineState *vms = VIRT_MACHINE(machine);
|
||||
@ -736,6 +715,8 @@ static void machvirt_init(MachineState *machine)
|
||||
MemoryRegion *ram = g_new(MemoryRegion, 1);
|
||||
const char *cpu_model = machine->cpu_model;
|
||||
VirtBoardInfo *vbi;
|
||||
VirtGuestInfoState *guest_info_state = g_malloc0(sizeof *guest_info_state);
|
||||
VirtGuestInfo *guest_info = &guest_info_state->info;
|
||||
uint32_t gic_phandle;
|
||||
char **cpustr;
|
||||
|
||||
@ -828,6 +809,14 @@ static void machvirt_init(MachineState *machine)
|
||||
create_virtio_devices(vbi, pic);
|
||||
|
||||
create_fw_cfg(vbi);
|
||||
rom_set_fw(fw_cfg_find());
|
||||
|
||||
guest_info->smp_cpus = smp_cpus;
|
||||
guest_info->fw_cfg = fw_cfg_find();
|
||||
guest_info->memmap = vbi->memmap;
|
||||
guest_info->irqmap = vbi->irqmap;
|
||||
guest_info_state->machine_done.notify = virt_guest_info_machine_done;
|
||||
qemu_add_machine_init_done_notifier(&guest_info_state->machine_done);
|
||||
|
||||
vbi->bootinfo.ram_size = machine->ram_size;
|
||||
vbi->bootinfo.kernel_filename = machine->kernel_filename;
|
||||
|
@ -1,6 +1,6 @@
|
||||
common-obj-y += core.o smbus.o smbus_eeprom.o
|
||||
common-obj-$(CONFIG_VERSATILE_I2C) += versatile_i2c.o
|
||||
common-obj-$(CONFIG_ACPI) += smbus_ich9.o
|
||||
common-obj-$(CONFIG_ACPI_X86) += smbus_ich9.o
|
||||
common-obj-$(CONFIG_APM) += pm_smbus.o
|
||||
common-obj-$(CONFIG_BITBANG_I2C) += bitbang_i2c.o
|
||||
common-obj-$(CONFIG_EXYNOS4) += exynos4210_i2c.o
|
||||
|
@ -620,31 +620,31 @@ build_ssdt(GArray *table_data, GArray *linker,
|
||||
/* build PCI0._CRS */
|
||||
crs = aml_resource_template();
|
||||
aml_append(crs,
|
||||
aml_word_bus_number(aml_min_fixed, aml_max_fixed, aml_pos_decode,
|
||||
aml_word_bus_number(AML_MIN_FIXED, AML_MAX_FIXED, AML_POS_DECODE,
|
||||
0x0000, 0x0000, 0x00FF, 0x0000, 0x0100));
|
||||
aml_append(crs, aml_io(aml_decode16, 0x0CF8, 0x0CF8, 0x01, 0x08));
|
||||
aml_append(crs, aml_io(AML_DECODE16, 0x0CF8, 0x0CF8, 0x01, 0x08));
|
||||
|
||||
aml_append(crs,
|
||||
aml_word_io(aml_min_fixed, aml_max_fixed,
|
||||
aml_pos_decode, aml_entire_range,
|
||||
aml_word_io(AML_MIN_FIXED, AML_MAX_FIXED,
|
||||
AML_POS_DECODE, AML_ENTIRE_RANGE,
|
||||
0x0000, 0x0000, 0x0CF7, 0x0000, 0x0CF8));
|
||||
aml_append(crs,
|
||||
aml_word_io(aml_min_fixed, aml_max_fixed,
|
||||
aml_pos_decode, aml_entire_range,
|
||||
aml_word_io(AML_MIN_FIXED, AML_MAX_FIXED,
|
||||
AML_POS_DECODE, AML_ENTIRE_RANGE,
|
||||
0x0000, 0x0D00, 0xFFFF, 0x0000, 0xF300));
|
||||
aml_append(crs,
|
||||
aml_dword_memory(aml_pos_decode, aml_min_fixed, aml_max_fixed,
|
||||
aml_cacheable, aml_ReadWrite,
|
||||
aml_dword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED,
|
||||
AML_CACHEABLE, AML_READ_WRITE,
|
||||
0, 0x000A0000, 0x000BFFFF, 0, 0x00020000));
|
||||
aml_append(crs,
|
||||
aml_dword_memory(aml_pos_decode, aml_min_fixed, aml_max_fixed,
|
||||
aml_non_cacheable, aml_ReadWrite,
|
||||
aml_dword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED,
|
||||
AML_NON_CACHEABLE, AML_READ_WRITE,
|
||||
0, pci->w32.begin, pci->w32.end - 1, 0,
|
||||
pci->w32.end - pci->w32.begin));
|
||||
if (pci->w64.begin) {
|
||||
aml_append(crs,
|
||||
aml_qword_memory(aml_pos_decode, aml_min_fixed, aml_max_fixed,
|
||||
aml_cacheable, aml_ReadWrite,
|
||||
aml_qword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED,
|
||||
AML_CACHEABLE, AML_READ_WRITE,
|
||||
0, pci->w64.begin, pci->w64.end - 1, 0,
|
||||
pci->w64.end - pci->w64.begin));
|
||||
}
|
||||
@ -658,7 +658,7 @@ build_ssdt(GArray *table_data, GArray *linker,
|
||||
aml_append(dev, aml_name_decl("_STA", aml_int(0xB)));
|
||||
crs = aml_resource_template();
|
||||
aml_append(crs,
|
||||
aml_io(aml_decode16, pm->gpe0_blk, pm->gpe0_blk, 1, pm->gpe0_blk_len)
|
||||
aml_io(AML_DECODE16, pm->gpe0_blk, pm->gpe0_blk, 1, pm->gpe0_blk_len)
|
||||
);
|
||||
aml_append(dev, aml_name_decl("_CRS", crs));
|
||||
aml_append(scope, dev);
|
||||
@ -673,7 +673,7 @@ build_ssdt(GArray *table_data, GArray *linker,
|
||||
aml_append(dev, aml_name_decl("_STA", aml_int(0xB)));
|
||||
crs = aml_resource_template();
|
||||
aml_append(crs,
|
||||
aml_io(aml_decode16, pm->pcihp_io_base, pm->pcihp_io_base, 1,
|
||||
aml_io(AML_DECODE16, pm->pcihp_io_base, pm->pcihp_io_base, 1,
|
||||
pm->pcihp_io_len)
|
||||
);
|
||||
aml_append(dev, aml_name_decl("_CRS", crs));
|
||||
@ -720,7 +720,7 @@ build_ssdt(GArray *table_data, GArray *linker,
|
||||
|
||||
crs = aml_resource_template();
|
||||
aml_append(crs,
|
||||
aml_io(aml_decode16, misc->applesmc_io_base, misc->applesmc_io_base,
|
||||
aml_io(AML_DECODE16, misc->applesmc_io_base, misc->applesmc_io_base,
|
||||
0x01, APPLESMC_MAX_DATA_LENGTH)
|
||||
);
|
||||
aml_append(crs, aml_irq_no_flags(6));
|
||||
@ -738,13 +738,13 @@ build_ssdt(GArray *table_data, GArray *linker,
|
||||
|
||||
crs = aml_resource_template();
|
||||
aml_append(crs,
|
||||
aml_io(aml_decode16, misc->pvpanic_port, misc->pvpanic_port, 1, 1)
|
||||
aml_io(AML_DECODE16, misc->pvpanic_port, misc->pvpanic_port, 1, 1)
|
||||
);
|
||||
aml_append(dev, aml_name_decl("_CRS", crs));
|
||||
|
||||
aml_append(dev, aml_operation_region("PEOR", aml_system_io,
|
||||
aml_append(dev, aml_operation_region("PEOR", AML_SYSTEM_IO,
|
||||
misc->pvpanic_port, 1));
|
||||
field = aml_field("PEOR", aml_byte_acc, aml_preserve);
|
||||
field = aml_field("PEOR", AML_BYTE_ACC, AML_PRESERVE);
|
||||
aml_append(field, aml_named_field("PEPT", 8));
|
||||
aml_append(dev, field);
|
||||
|
||||
@ -773,15 +773,15 @@ build_ssdt(GArray *table_data, GArray *linker,
|
||||
aml_append(dev, aml_name_decl("_STA", aml_int(0xB)));
|
||||
crs = aml_resource_template();
|
||||
aml_append(crs,
|
||||
aml_io(aml_decode16, pm->cpu_hp_io_base, pm->cpu_hp_io_base, 1,
|
||||
aml_io(AML_DECODE16, pm->cpu_hp_io_base, pm->cpu_hp_io_base, 1,
|
||||
pm->cpu_hp_io_len)
|
||||
);
|
||||
aml_append(dev, aml_name_decl("_CRS", crs));
|
||||
aml_append(sb_scope, dev);
|
||||
/* declare CPU hotplug MMIO region and PRS field to access it */
|
||||
aml_append(sb_scope, aml_operation_region(
|
||||
"PRST", aml_system_io, pm->cpu_hp_io_base, pm->cpu_hp_io_len));
|
||||
field = aml_field("PRST", aml_byte_acc, aml_preserve);
|
||||
"PRST", AML_SYSTEM_IO, pm->cpu_hp_io_base, pm->cpu_hp_io_len));
|
||||
field = aml_field("PRST", AML_BYTE_ACC, AML_PRESERVE);
|
||||
aml_append(field, aml_named_field("PRS", 256));
|
||||
aml_append(sb_scope, field);
|
||||
|
||||
@ -845,18 +845,18 @@ build_ssdt(GArray *table_data, GArray *linker,
|
||||
|
||||
crs = aml_resource_template();
|
||||
aml_append(crs,
|
||||
aml_io(aml_decode16, pm->mem_hp_io_base, pm->mem_hp_io_base, 0,
|
||||
aml_io(AML_DECODE16, pm->mem_hp_io_base, pm->mem_hp_io_base, 0,
|
||||
pm->mem_hp_io_len)
|
||||
);
|
||||
aml_append(scope, aml_name_decl("_CRS", crs));
|
||||
|
||||
aml_append(scope, aml_operation_region(
|
||||
stringify(MEMORY_HOTPLUG_IO_REGION), aml_system_io,
|
||||
stringify(MEMORY_HOTPLUG_IO_REGION), AML_SYSTEM_IO,
|
||||
pm->mem_hp_io_base, pm->mem_hp_io_len)
|
||||
);
|
||||
|
||||
field = aml_field(stringify(MEMORY_HOTPLUG_IO_REGION), aml_dword_acc,
|
||||
aml_preserve);
|
||||
field = aml_field(stringify(MEMORY_HOTPLUG_IO_REGION), AML_DWORD_ACC,
|
||||
AML_PRESERVE);
|
||||
aml_append(field, /* read only */
|
||||
aml_named_field(stringify(MEMORY_SLOT_ADDR_LOW), 32));
|
||||
aml_append(field, /* read only */
|
||||
@ -869,8 +869,8 @@ build_ssdt(GArray *table_data, GArray *linker,
|
||||
aml_named_field(stringify(MEMORY_SLOT_PROXIMITY), 32));
|
||||
aml_append(scope, field);
|
||||
|
||||
field = aml_field(stringify(MEMORY_HOTPLUG_IO_REGION), aml_byte_acc,
|
||||
aml_write_as_zeros);
|
||||
field = aml_field(stringify(MEMORY_HOTPLUG_IO_REGION), AML_BYTE_ACC,
|
||||
AML_WRITE_AS_ZEROS);
|
||||
aml_append(field, aml_reserved_field(160 /* bits, Offset(20) */));
|
||||
aml_append(field, /* 1 if enabled, read only */
|
||||
aml_named_field(stringify(MEMORY_SLOT_ENABLED), 1));
|
||||
@ -885,8 +885,8 @@ build_ssdt(GArray *table_data, GArray *linker,
|
||||
aml_named_field(stringify(MEMORY_SLOT_EJECT), 1));
|
||||
aml_append(scope, field);
|
||||
|
||||
field = aml_field(stringify(MEMORY_HOTPLUG_IO_REGION), aml_dword_acc,
|
||||
aml_preserve);
|
||||
field = aml_field(stringify(MEMORY_HOTPLUG_IO_REGION), AML_DWORD_ACC,
|
||||
AML_PRESERVE);
|
||||
aml_append(field, /* DIMM selector, write only */
|
||||
aml_named_field(stringify(MEMORY_SLOT_SLECTOR), 32));
|
||||
aml_append(field, /* _OST event code, write only */
|
||||
@ -1208,30 +1208,6 @@ build_dsdt(GArray *table_data, GArray *linker, AcpiMiscInfo *misc)
|
||||
misc->dsdt_size, 1);
|
||||
}
|
||||
|
||||
/* Build final rsdt table */
|
||||
static void
|
||||
build_rsdt(GArray *table_data, GArray *linker, GArray *table_offsets)
|
||||
{
|
||||
AcpiRsdtDescriptorRev1 *rsdt;
|
||||
size_t rsdt_len;
|
||||
int i;
|
||||
|
||||
rsdt_len = sizeof(*rsdt) + sizeof(uint32_t) * table_offsets->len;
|
||||
rsdt = acpi_data_push(table_data, rsdt_len);
|
||||
memcpy(rsdt->table_offset_entry, table_offsets->data,
|
||||
sizeof(uint32_t) * table_offsets->len);
|
||||
for (i = 0; i < table_offsets->len; ++i) {
|
||||
/* rsdt->table_offset_entry to be filled by Guest linker */
|
||||
bios_linker_loader_add_pointer(linker,
|
||||
ACPI_BUILD_TABLE_FILE,
|
||||
ACPI_BUILD_TABLE_FILE,
|
||||
table_data, &rsdt->table_offset_entry[i],
|
||||
sizeof(uint32_t));
|
||||
}
|
||||
build_header(linker, table_data,
|
||||
(void *)rsdt, "RSDT", rsdt_len, 1);
|
||||
}
|
||||
|
||||
static GArray *
|
||||
build_rsdp(GArray *rsdp_table, GArray *linker, unsigned rsdt)
|
||||
{
|
||||
|
@ -88,46 +88,54 @@ struct AcpiTableHeader /* ACPI common table header */
|
||||
typedef struct AcpiTableHeader AcpiTableHeader;
|
||||
|
||||
/*
|
||||
* ACPI 1.0 Fixed ACPI Description Table (FADT)
|
||||
* ACPI Fixed ACPI Description Table (FADT)
|
||||
*/
|
||||
#define ACPI_FADT_COMMON_DEF /* FADT common definition */ \
|
||||
ACPI_TABLE_HEADER_DEF /* ACPI common table header */ \
|
||||
uint32_t firmware_ctrl; /* Physical address of FACS */ \
|
||||
uint32_t dsdt; /* Physical address of DSDT */ \
|
||||
uint8_t model; /* System Interrupt Model */ \
|
||||
uint8_t reserved1; /* Reserved */ \
|
||||
uint16_t sci_int; /* System vector of SCI interrupt */ \
|
||||
uint32_t smi_cmd; /* Port address of SMI command port */ \
|
||||
uint8_t acpi_enable; /* Value to write to smi_cmd to enable ACPI */ \
|
||||
uint8_t acpi_disable; /* Value to write to smi_cmd to disable ACPI */ \
|
||||
/* Value to write to SMI CMD to enter S4BIOS state */ \
|
||||
uint8_t S4bios_req; \
|
||||
uint8_t reserved2; /* Reserved - must be zero */ \
|
||||
/* Port address of Power Mgt 1a acpi_event Reg Blk */ \
|
||||
uint32_t pm1a_evt_blk; \
|
||||
/* Port address of Power Mgt 1b acpi_event Reg Blk */ \
|
||||
uint32_t pm1b_evt_blk; \
|
||||
uint32_t pm1a_cnt_blk; /* Port address of Power Mgt 1a Control Reg Blk */ \
|
||||
uint32_t pm1b_cnt_blk; /* Port address of Power Mgt 1b Control Reg Blk */ \
|
||||
uint32_t pm2_cnt_blk; /* Port address of Power Mgt 2 Control Reg Blk */ \
|
||||
uint32_t pm_tmr_blk; /* Port address of Power Mgt Timer Ctrl Reg Blk */ \
|
||||
/* Port addr of General Purpose acpi_event 0 Reg Blk */ \
|
||||
uint32_t gpe0_blk; \
|
||||
/* Port addr of General Purpose acpi_event 1 Reg Blk */ \
|
||||
uint32_t gpe1_blk; \
|
||||
uint8_t pm1_evt_len; /* Byte length of ports at pm1_x_evt_blk */ \
|
||||
uint8_t pm1_cnt_len; /* Byte length of ports at pm1_x_cnt_blk */ \
|
||||
uint8_t pm2_cnt_len; /* Byte Length of ports at pm2_cnt_blk */ \
|
||||
uint8_t pm_tmr_len; /* Byte Length of ports at pm_tm_blk */ \
|
||||
uint8_t gpe0_blk_len; /* Byte Length of ports at gpe0_blk */ \
|
||||
uint8_t gpe1_blk_len; /* Byte Length of ports at gpe1_blk */ \
|
||||
uint8_t gpe1_base; /* Offset in gpe model where gpe1 events start */ \
|
||||
uint8_t reserved3; /* Reserved */ \
|
||||
uint16_t plvl2_lat; /* Worst case HW latency to enter/exit C2 state */ \
|
||||
uint16_t plvl3_lat; /* Worst case HW latency to enter/exit C3 state */ \
|
||||
uint16_t flush_size; /* Size of area read to flush caches */ \
|
||||
uint16_t flush_stride; /* Stride used in flushing caches */ \
|
||||
uint8_t duty_offset; /* Bit location of duty cycle field in p_cnt reg */ \
|
||||
uint8_t duty_width; /* Bit width of duty cycle field in p_cnt reg */ \
|
||||
uint8_t day_alrm; /* Index to day-of-month alarm in RTC CMOS RAM */ \
|
||||
uint8_t mon_alrm; /* Index to month-of-year alarm in RTC CMOS RAM */ \
|
||||
uint8_t century; /* Index to century in RTC CMOS RAM */
|
||||
|
||||
struct AcpiFadtDescriptorRev1
|
||||
{
|
||||
ACPI_TABLE_HEADER_DEF /* ACPI common table header */
|
||||
uint32_t firmware_ctrl; /* Physical address of FACS */
|
||||
uint32_t dsdt; /* Physical address of DSDT */
|
||||
uint8_t model; /* System Interrupt Model */
|
||||
uint8_t reserved1; /* Reserved */
|
||||
uint16_t sci_int; /* System vector of SCI interrupt */
|
||||
uint32_t smi_cmd; /* Port address of SMI command port */
|
||||
uint8_t acpi_enable; /* Value to write to smi_cmd to enable ACPI */
|
||||
uint8_t acpi_disable; /* Value to write to smi_cmd to disable ACPI */
|
||||
uint8_t S4bios_req; /* Value to write to SMI CMD to enter S4BIOS state */
|
||||
uint8_t reserved2; /* Reserved - must be zero */
|
||||
uint32_t pm1a_evt_blk; /* Port address of Power Mgt 1a acpi_event Reg Blk */
|
||||
uint32_t pm1b_evt_blk; /* Port address of Power Mgt 1b acpi_event Reg Blk */
|
||||
uint32_t pm1a_cnt_blk; /* Port address of Power Mgt 1a Control Reg Blk */
|
||||
uint32_t pm1b_cnt_blk; /* Port address of Power Mgt 1b Control Reg Blk */
|
||||
uint32_t pm2_cnt_blk; /* Port address of Power Mgt 2 Control Reg Blk */
|
||||
uint32_t pm_tmr_blk; /* Port address of Power Mgt Timer Ctrl Reg Blk */
|
||||
uint32_t gpe0_blk; /* Port addr of General Purpose acpi_event 0 Reg Blk */
|
||||
uint32_t gpe1_blk; /* Port addr of General Purpose acpi_event 1 Reg Blk */
|
||||
uint8_t pm1_evt_len; /* Byte length of ports at pm1_x_evt_blk */
|
||||
uint8_t pm1_cnt_len; /* Byte length of ports at pm1_x_cnt_blk */
|
||||
uint8_t pm2_cnt_len; /* Byte Length of ports at pm2_cnt_blk */
|
||||
uint8_t pm_tmr_len; /* Byte Length of ports at pm_tm_blk */
|
||||
uint8_t gpe0_blk_len; /* Byte Length of ports at gpe0_blk */
|
||||
uint8_t gpe1_blk_len; /* Byte Length of ports at gpe1_blk */
|
||||
uint8_t gpe1_base; /* Offset in gpe model where gpe1 events start */
|
||||
uint8_t reserved3; /* Reserved */
|
||||
uint16_t plvl2_lat; /* Worst case HW latency to enter/exit C2 state */
|
||||
uint16_t plvl3_lat; /* Worst case HW latency to enter/exit C3 state */
|
||||
uint16_t flush_size; /* Size of area read to flush caches */
|
||||
uint16_t flush_stride; /* Stride used in flushing caches */
|
||||
uint8_t duty_offset; /* Bit location of duty cycle field in p_cnt reg */
|
||||
uint8_t duty_width; /* Bit width of duty cycle field in p_cnt reg */
|
||||
uint8_t day_alrm; /* Index to day-of-month alarm in RTC CMOS RAM */
|
||||
uint8_t mon_alrm; /* Index to month-of-year alarm in RTC CMOS RAM */
|
||||
uint8_t century; /* Index to century in RTC CMOS RAM */
|
||||
ACPI_FADT_COMMON_DEF
|
||||
uint8_t reserved4; /* Reserved */
|
||||
uint8_t reserved4a; /* Reserved */
|
||||
uint8_t reserved4b; /* Reserved */
|
||||
@ -135,6 +143,59 @@ struct AcpiFadtDescriptorRev1
|
||||
} QEMU_PACKED;
|
||||
typedef struct AcpiFadtDescriptorRev1 AcpiFadtDescriptorRev1;
|
||||
|
||||
struct AcpiGenericAddress {
|
||||
uint8_t space_id; /* Address space where struct or register exists */
|
||||
uint8_t bit_width; /* Size in bits of given register */
|
||||
uint8_t bit_offset; /* Bit offset within the register */
|
||||
uint8_t access_width; /* Minimum Access size (ACPI 3.0) */
|
||||
uint64_t address; /* 64-bit address of struct or register */
|
||||
} QEMU_PACKED;
|
||||
|
||||
struct AcpiFadtDescriptorRev5_1 {
|
||||
ACPI_FADT_COMMON_DEF
|
||||
/* IA-PC Boot Architecture Flags (see below for individual flags) */
|
||||
uint16_t boot_flags;
|
||||
uint8_t reserved; /* Reserved, must be zero */
|
||||
/* Miscellaneous flag bits (see below for individual flags) */
|
||||
uint32_t flags;
|
||||
/* 64-bit address of the Reset register */
|
||||
struct AcpiGenericAddress reset_register;
|
||||
/* Value to write to the reset_register port to reset the system */
|
||||
uint8_t reset_value;
|
||||
/* ARM-Specific Boot Flags (see below for individual flags) (ACPI 5.1) */
|
||||
uint16_t arm_boot_flags;
|
||||
uint8_t minor_revision; /* FADT Minor Revision (ACPI 5.1) */
|
||||
uint64_t Xfacs; /* 64-bit physical address of FACS */
|
||||
uint64_t Xdsdt; /* 64-bit physical address of DSDT */
|
||||
/* 64-bit Extended Power Mgt 1a Event Reg Blk address */
|
||||
struct AcpiGenericAddress xpm1a_event_block;
|
||||
/* 64-bit Extended Power Mgt 1b Event Reg Blk address */
|
||||
struct AcpiGenericAddress xpm1b_event_block;
|
||||
/* 64-bit Extended Power Mgt 1a Control Reg Blk address */
|
||||
struct AcpiGenericAddress xpm1a_control_block;
|
||||
/* 64-bit Extended Power Mgt 1b Control Reg Blk address */
|
||||
struct AcpiGenericAddress xpm1b_control_block;
|
||||
/* 64-bit Extended Power Mgt 2 Control Reg Blk address */
|
||||
struct AcpiGenericAddress xpm2_control_block;
|
||||
/* 64-bit Extended Power Mgt Timer Ctrl Reg Blk address */
|
||||
struct AcpiGenericAddress xpm_timer_block;
|
||||
/* 64-bit Extended General Purpose Event 0 Reg Blk address */
|
||||
struct AcpiGenericAddress xgpe0_block;
|
||||
/* 64-bit Extended General Purpose Event 1 Reg Blk address */
|
||||
struct AcpiGenericAddress xgpe1_block;
|
||||
/* 64-bit Sleep Control register (ACPI 5.0) */
|
||||
struct AcpiGenericAddress sleep_control;
|
||||
/* 64-bit Sleep Status register (ACPI 5.0) */
|
||||
struct AcpiGenericAddress sleep_status;
|
||||
} QEMU_PACKED;
|
||||
|
||||
typedef struct AcpiFadtDescriptorRev5_1 AcpiFadtDescriptorRev5_1;
|
||||
|
||||
enum {
|
||||
ACPI_FADT_ARM_USE_PSCI_G_0_2 = 0,
|
||||
ACPI_FADT_ARM_PSCI_USE_HVC = 1,
|
||||
};
|
||||
|
||||
/*
|
||||
* ACPI 1.0 Root System Description Table (RSDT)
|
||||
*/
|
||||
@ -195,7 +256,13 @@ typedef struct AcpiMultipleApicTable AcpiMultipleApicTable;
|
||||
#define ACPI_APIC_IO_SAPIC 6
|
||||
#define ACPI_APIC_LOCAL_SAPIC 7
|
||||
#define ACPI_APIC_XRUPT_SOURCE 8
|
||||
#define ACPI_APIC_RESERVED 9 /* 9 and greater are reserved */
|
||||
#define ACPI_APIC_LOCAL_X2APIC 9
|
||||
#define ACPI_APIC_LOCAL_X2APIC_NMI 10
|
||||
#define ACPI_APIC_GENERIC_INTERRUPT 11
|
||||
#define ACPI_APIC_GENERIC_DISTRIBUTOR 12
|
||||
#define ACPI_APIC_GENERIC_MSI_FRAME 13
|
||||
#define ACPI_APIC_GENERIC_REDISTRIBUTOR 14
|
||||
#define ACPI_APIC_RESERVED 15 /* 15 and greater are reserved */
|
||||
|
||||
/*
|
||||
* MADT sub-structures (Follow MULTIPLE_APIC_DESCRIPTION_TABLE)
|
||||
@ -243,6 +310,73 @@ struct AcpiMadtLocalNmi {
|
||||
} QEMU_PACKED;
|
||||
typedef struct AcpiMadtLocalNmi AcpiMadtLocalNmi;
|
||||
|
||||
struct AcpiMadtGenericInterrupt {
|
||||
ACPI_SUB_HEADER_DEF
|
||||
uint16_t reserved;
|
||||
uint32_t cpu_interface_number;
|
||||
uint32_t uid;
|
||||
uint32_t flags;
|
||||
uint32_t parking_version;
|
||||
uint32_t performance_interrupt;
|
||||
uint64_t parked_address;
|
||||
uint64_t base_address;
|
||||
uint64_t gicv_base_address;
|
||||
uint64_t gich_base_address;
|
||||
uint32_t vgic_interrupt;
|
||||
uint64_t gicr_base_address;
|
||||
uint64_t arm_mpidr;
|
||||
} QEMU_PACKED;
|
||||
|
||||
typedef struct AcpiMadtGenericInterrupt AcpiMadtGenericInterrupt;
|
||||
|
||||
struct AcpiMadtGenericDistributor {
|
||||
ACPI_SUB_HEADER_DEF
|
||||
uint16_t reserved;
|
||||
uint32_t gic_id;
|
||||
uint64_t base_address;
|
||||
uint32_t global_irq_base;
|
||||
uint32_t reserved2;
|
||||
} QEMU_PACKED;
|
||||
|
||||
typedef struct AcpiMadtGenericDistributor AcpiMadtGenericDistributor;
|
||||
|
||||
/*
|
||||
* Generic Timer Description Table (GTDT)
|
||||
*/
|
||||
|
||||
#define ACPI_GTDT_INTERRUPT_MODE (1 << 0)
|
||||
#define ACPI_GTDT_INTERRUPT_POLARITY (1 << 1)
|
||||
#define ACPI_GTDT_ALWAYS_ON (1 << 2)
|
||||
|
||||
/* Triggering */
|
||||
|
||||
#define ACPI_LEVEL_SENSITIVE ((uint8_t) 0x00)
|
||||
#define ACPI_EDGE_SENSITIVE ((uint8_t) 0x01)
|
||||
|
||||
/* Polarity */
|
||||
|
||||
#define ACPI_ACTIVE_HIGH ((uint8_t) 0x00)
|
||||
#define ACPI_ACTIVE_LOW ((uint8_t) 0x01)
|
||||
#define ACPI_ACTIVE_BOTH ((uint8_t) 0x02)
|
||||
|
||||
struct AcpiGenericTimerTable {
|
||||
ACPI_TABLE_HEADER_DEF
|
||||
uint64_t counter_block_addresss;
|
||||
uint32_t reserved;
|
||||
uint32_t secure_el1_interrupt;
|
||||
uint32_t secure_el1_flags;
|
||||
uint32_t non_secure_el1_interrupt;
|
||||
uint32_t non_secure_el1_flags;
|
||||
uint32_t virtual_timer_interrupt;
|
||||
uint32_t virtual_timer_flags;
|
||||
uint32_t non_secure_el2_interrupt;
|
||||
uint32_t non_secure_el2_flags;
|
||||
uint64_t counter_read_block_address;
|
||||
uint32_t platform_timer_count;
|
||||
uint32_t platform_timer_offset;
|
||||
} QEMU_PACKED;
|
||||
typedef struct AcpiGenericTimerTable AcpiGenericTimerTable;
|
||||
|
||||
/*
|
||||
* HPET Description Table
|
||||
*/
|
||||
|
@ -36,49 +36,49 @@ struct Aml {
|
||||
typedef struct Aml Aml;
|
||||
|
||||
typedef enum {
|
||||
aml_decode10 = 0,
|
||||
aml_decode16 = 1,
|
||||
AML_DECODE10 = 0,
|
||||
AML_DECODE16 = 1,
|
||||
} AmlIODecode;
|
||||
|
||||
typedef enum {
|
||||
aml_any_acc = 0,
|
||||
aml_byte_acc = 1,
|
||||
aml_word_acc = 2,
|
||||
aml_dword_acc = 3,
|
||||
aml_qword_acc = 4,
|
||||
aml_buffer_acc = 5,
|
||||
AML_ANY_ACC = 0,
|
||||
AML_BYTE_ACC = 1,
|
||||
AML_WORD_ACC = 2,
|
||||
AML_DWORD_ACC = 3,
|
||||
AML_QWORD_ACC = 4,
|
||||
AML_BUFFER_ACC = 5,
|
||||
} AmlAccessType;
|
||||
|
||||
typedef enum {
|
||||
aml_preserve = 0,
|
||||
aml_write_as_ones = 1,
|
||||
aml_write_as_zeros = 2,
|
||||
AML_PRESERVE = 0,
|
||||
AML_WRITE_AS_ONES = 1,
|
||||
AML_WRITE_AS_ZEROS = 2,
|
||||
} AmlUpdateRule;
|
||||
|
||||
typedef enum {
|
||||
aml_system_memory = 0x00,
|
||||
aml_system_io = 0x01,
|
||||
AML_SYSTEM_MEMORY = 0X00,
|
||||
AML_SYSTEM_IO = 0X01,
|
||||
} AmlRegionSpace;
|
||||
|
||||
typedef enum {
|
||||
aml_memory_range = 0,
|
||||
aml_io_range = 1,
|
||||
aml_bus_number_range = 2,
|
||||
AML_MEMORY_RANGE = 0,
|
||||
AML_IO_RANGE = 1,
|
||||
AML_BUS_NUMBER_RANGE = 2,
|
||||
} AmlResourceType;
|
||||
|
||||
typedef enum {
|
||||
aml_sub_decode = 1 << 1,
|
||||
aml_pos_decode = 0
|
||||
AML_SUB_DECODE = 1 << 1,
|
||||
AML_POS_DECODE = 0
|
||||
} AmlDecode;
|
||||
|
||||
typedef enum {
|
||||
aml_max_fixed = 1 << 3,
|
||||
aml_max_not_fixed = 0,
|
||||
AML_MAX_FIXED = 1 << 3,
|
||||
AML_MAX_NOT_FIXED = 0,
|
||||
} AmlMaxFixed;
|
||||
|
||||
typedef enum {
|
||||
aml_min_fixed = 1 << 2,
|
||||
aml_min_not_fixed = 0
|
||||
AML_MIN_FIXED = 1 << 2,
|
||||
AML_MIN_NOT_FIXED = 0
|
||||
} AmlMinFixed;
|
||||
|
||||
/*
|
||||
@ -86,9 +86,9 @@ typedef enum {
|
||||
* _RNG field definition
|
||||
*/
|
||||
typedef enum {
|
||||
aml_isa_only = 1,
|
||||
aml_non_isa_only = 2,
|
||||
aml_entire_range = 3,
|
||||
AML_ISA_ONLY = 1,
|
||||
AML_NON_ISA_ONLY = 2,
|
||||
AML_ENTIRE_RANGE = 3,
|
||||
} AmlISARanges;
|
||||
|
||||
/*
|
||||
@ -96,21 +96,59 @@ typedef enum {
|
||||
* _MEM field definition
|
||||
*/
|
||||
typedef enum {
|
||||
aml_non_cacheable = 0,
|
||||
aml_cacheable = 1,
|
||||
aml_write_combining = 2,
|
||||
aml_prefetchable = 3,
|
||||
} AmlCacheble;
|
||||
AML_NON_CACHEABLE = 0,
|
||||
AML_CACHEABLE = 1,
|
||||
AML_WRITE_COMBINING = 2,
|
||||
AML_PREFETCHABLE = 3,
|
||||
} AmlCacheable;
|
||||
|
||||
/*
|
||||
* ACPI 1.0b: Table 6-25 Memory Resource Flag (Resource Type = 0) Definitions
|
||||
* _RW field definition
|
||||
*/
|
||||
typedef enum {
|
||||
aml_ReadOnly = 0,
|
||||
aml_ReadWrite = 1,
|
||||
AML_READ_ONLY = 0,
|
||||
AML_READ_WRITE = 1,
|
||||
} AmlReadAndWrite;
|
||||
|
||||
/*
|
||||
* ACPI 5.0: Table 6-187 Extended Interrupt Descriptor Definition
|
||||
* Interrupt Vector Flags Bits[0] Consumer/Producer
|
||||
*/
|
||||
typedef enum {
|
||||
AML_CONSUMER_PRODUCER = 0,
|
||||
AML_CONSUMER = 1,
|
||||
} AmlConsumerAndProducer;
|
||||
|
||||
/*
|
||||
* ACPI 5.0: Table 6-187 Extended Interrupt Descriptor Definition
|
||||
* _HE field definition
|
||||
*/
|
||||
typedef enum {
|
||||
AML_LEVEL = 0,
|
||||
AML_EDGE = 1,
|
||||
} AmlLevelAndEdge;
|
||||
|
||||
/*
|
||||
* ACPI 5.0: Table 6-187 Extended Interrupt Descriptor Definition
|
||||
* _LL field definition
|
||||
*/
|
||||
typedef enum {
|
||||
AML_ACTIVE_HIGH = 0,
|
||||
AML_ACTIVE_LOW = 1,
|
||||
} AmlActiveHighAndLow;
|
||||
|
||||
/*
|
||||
* ACPI 5.0: Table 6-187 Extended Interrupt Descriptor Definition
|
||||
* _SHR field definition
|
||||
*/
|
||||
typedef enum {
|
||||
AML_EXCLUSIVE = 0,
|
||||
AML_SHARED = 1,
|
||||
AML_EXCLUSIVE_AND_WAKE = 2,
|
||||
AML_SHARED_AND_WAKE = 3,
|
||||
} AmlShared;
|
||||
|
||||
typedef
|
||||
struct AcpiBuildTables {
|
||||
GArray *table_data;
|
||||
@ -163,11 +201,18 @@ Aml *aml_int(const uint64_t val);
|
||||
Aml *aml_arg(int pos);
|
||||
Aml *aml_store(Aml *val, Aml *target);
|
||||
Aml *aml_and(Aml *arg1, Aml *arg2);
|
||||
Aml *aml_or(Aml *arg1, Aml *arg2);
|
||||
Aml *aml_notify(Aml *arg1, Aml *arg2);
|
||||
Aml *aml_call1(const char *method, Aml *arg1);
|
||||
Aml *aml_call2(const char *method, Aml *arg1, Aml *arg2);
|
||||
Aml *aml_call3(const char *method, Aml *arg1, Aml *arg2, Aml *arg3);
|
||||
Aml *aml_call4(const char *method, Aml *arg1, Aml *arg2, Aml *arg3, Aml *arg4);
|
||||
Aml *aml_memory32_fixed(uint32_t addr, uint32_t size,
|
||||
AmlReadAndWrite read_and_write);
|
||||
Aml *aml_interrupt(AmlConsumerAndProducer con_and_pro,
|
||||
AmlLevelAndEdge level_and_edge,
|
||||
AmlActiveHighAndLow high_and_low, AmlShared shared,
|
||||
uint32_t irq);
|
||||
Aml *aml_io(AmlIODecode dec, uint16_t min_base, uint16_t max_base,
|
||||
uint8_t aln, uint8_t len);
|
||||
Aml *aml_operation_region(const char *name, AmlRegionSpace rs,
|
||||
@ -177,6 +222,7 @@ Aml *aml_named_field(const char *name, unsigned length);
|
||||
Aml *aml_reserved_field(unsigned length);
|
||||
Aml *aml_local(int num);
|
||||
Aml *aml_string(const char *name_format, ...) GCC_FMT_ATTR(1, 2);
|
||||
Aml *aml_lnot(Aml *arg);
|
||||
Aml *aml_equal(Aml *arg1, Aml *arg2);
|
||||
Aml *aml_processor(uint8_t proc_id, uint32_t pblk_addr, uint8_t pblk_len,
|
||||
const char *name_format, ...) GCC_FMT_ATTR(4, 5);
|
||||
@ -190,14 +236,19 @@ Aml *aml_word_io(AmlMinFixed min_fixed, AmlMaxFixed max_fixed,
|
||||
uint16_t addr_gran, uint16_t addr_min,
|
||||
uint16_t addr_max, uint16_t addr_trans,
|
||||
uint16_t len);
|
||||
Aml *aml_dword_io(AmlMinFixed min_fixed, AmlMaxFixed max_fixed,
|
||||
AmlDecode dec, AmlISARanges isa_ranges,
|
||||
uint32_t addr_gran, uint32_t addr_min,
|
||||
uint32_t addr_max, uint32_t addr_trans,
|
||||
uint32_t len);
|
||||
Aml *aml_dword_memory(AmlDecode dec, AmlMinFixed min_fixed,
|
||||
AmlMaxFixed max_fixed, AmlCacheble cacheable,
|
||||
AmlMaxFixed max_fixed, AmlCacheable cacheable,
|
||||
AmlReadAndWrite read_and_write,
|
||||
uint32_t addr_gran, uint32_t addr_min,
|
||||
uint32_t addr_max, uint32_t addr_trans,
|
||||
uint32_t len);
|
||||
Aml *aml_qword_memory(AmlDecode dec, AmlMinFixed min_fixed,
|
||||
AmlMaxFixed max_fixed, AmlCacheble cacheable,
|
||||
AmlMaxFixed max_fixed, AmlCacheable cacheable,
|
||||
AmlReadAndWrite read_and_write,
|
||||
uint64_t addr_gran, uint64_t addr_min,
|
||||
uint64_t addr_max, uint64_t addr_trans,
|
||||
@ -208,11 +259,15 @@ Aml *aml_scope(const char *name_format, ...) GCC_FMT_ATTR(1, 2);
|
||||
Aml *aml_device(const char *name_format, ...) GCC_FMT_ATTR(1, 2);
|
||||
Aml *aml_method(const char *name, int arg_count);
|
||||
Aml *aml_if(Aml *predicate);
|
||||
Aml *aml_else(void);
|
||||
Aml *aml_package(uint8_t num_elements);
|
||||
Aml *aml_buffer(void);
|
||||
Aml *aml_buffer(int buffer_size, uint8_t *byte_list);
|
||||
Aml *aml_resource_template(void);
|
||||
Aml *aml_field(const char *name, AmlAccessType type, AmlUpdateRule rule);
|
||||
Aml *aml_create_dword_field(Aml *srcbuf, Aml *index, const char *name);
|
||||
Aml *aml_varpackage(uint32_t num_elements);
|
||||
Aml *aml_touuid(const char *uuid);
|
||||
Aml *aml_unicode(const char *str);
|
||||
|
||||
void
|
||||
build_header(GArray *linker, GArray *table_data,
|
||||
@ -222,5 +277,7 @@ unsigned acpi_data_len(GArray *table);
|
||||
void acpi_add_table(GArray *table_offsets, GArray *table_data);
|
||||
void acpi_build_tables_init(AcpiBuildTables *tables);
|
||||
void acpi_build_tables_cleanup(AcpiBuildTables *tables, bool mfre);
|
||||
void
|
||||
build_rsdt(GArray *table_data, GArray *linker, GArray *table_offsets);
|
||||
|
||||
#endif
|
||||
|
44
include/hw/arm/virt-acpi-build.h
Normal file
44
include/hw/arm/virt-acpi-build.h
Normal file
@ -0,0 +1,44 @@
|
||||
/*
|
||||
*
|
||||
* Copyright (c) 2015 HUAWEI TECHNOLOGIES CO.,LTD.
|
||||
*
|
||||
* Author: Shannon Zhao <zhaoshenglong@huawei.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2 or later, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef QEMU_VIRT_ACPI_BUILD_H
|
||||
#define QEMU_VIRT_ACPI_BUILD_H
|
||||
|
||||
#include "qemu-common.h"
|
||||
#include "hw/arm/virt.h"
|
||||
|
||||
#define VIRT_ACPI_CPU_ID_LIMIT 8
|
||||
#define ACPI_GICC_ENABLED 1
|
||||
|
||||
typedef struct VirtGuestInfo {
|
||||
int smp_cpus;
|
||||
FWCfgState *fw_cfg;
|
||||
const MemMapEntry *memmap;
|
||||
const int *irqmap;
|
||||
} VirtGuestInfo;
|
||||
|
||||
|
||||
typedef struct VirtGuestInfoState {
|
||||
VirtGuestInfo info;
|
||||
Notifier machine_done;
|
||||
} VirtGuestInfoState;
|
||||
|
||||
void virt_acpi_setup(VirtGuestInfo *guest_info);
|
||||
|
||||
#endif
|
64
include/hw/arm/virt.h
Normal file
64
include/hw/arm/virt.h
Normal file
@ -0,0 +1,64 @@
|
||||
/*
|
||||
*
|
||||
* Copyright (c) 2015 Linaro Limited
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2 or later, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*
|
||||
* Emulate a virtual board which works by passing Linux all the information
|
||||
* it needs about what devices are present via the device tree.
|
||||
* There are some restrictions about what we can do here:
|
||||
* + we can only present devices whose Linux drivers will work based
|
||||
* purely on the device tree with no platform data at all
|
||||
* + we want to present a very stripped-down minimalist platform,
|
||||
* both because this reduces the security attack surface from the guest
|
||||
* and also because it reduces our exposure to being broken when
|
||||
* the kernel updates its device tree bindings and requires further
|
||||
* information in a device binding that we aren't providing.
|
||||
* This is essentially the same approach kvmtool uses.
|
||||
*/
|
||||
|
||||
#ifndef QEMU_ARM_VIRT_H
|
||||
#define QEMU_ARM_VIRT_H
|
||||
|
||||
#include "qemu-common.h"
|
||||
|
||||
#define NUM_VIRTIO_TRANSPORTS 32
|
||||
|
||||
#define ARCH_TIMER_VIRT_IRQ 11
|
||||
#define ARCH_TIMER_S_EL1_IRQ 13
|
||||
#define ARCH_TIMER_NS_EL1_IRQ 14
|
||||
#define ARCH_TIMER_NS_EL2_IRQ 10
|
||||
|
||||
enum {
|
||||
VIRT_FLASH,
|
||||
VIRT_MEM,
|
||||
VIRT_CPUPERIPHS,
|
||||
VIRT_GIC_DIST,
|
||||
VIRT_GIC_CPU,
|
||||
VIRT_UART,
|
||||
VIRT_MMIO,
|
||||
VIRT_RTC,
|
||||
VIRT_FW_CFG,
|
||||
VIRT_PCIE,
|
||||
VIRT_PCIE_MMIO,
|
||||
VIRT_PCIE_PIO,
|
||||
VIRT_PCIE_ECAM,
|
||||
};
|
||||
|
||||
typedef struct MemMapEntry {
|
||||
hwaddr base;
|
||||
hwaddr size;
|
||||
} MemMapEntry;
|
||||
|
||||
|
||||
#endif
|
@ -1352,7 +1352,7 @@ be needed to boot from old floppy disks.
|
||||
ETEXI
|
||||
|
||||
DEF("no-acpi", 0, QEMU_OPTION_no_acpi,
|
||||
"-no-acpi disable ACPI\n", QEMU_ARCH_I386)
|
||||
"-no-acpi disable ACPI\n", QEMU_ARCH_I386 | QEMU_ARCH_ARM)
|
||||
STEXI
|
||||
@item -no-acpi
|
||||
@findex -no-acpi
|
||||
|
@ -206,32 +206,53 @@ static void arm_cpu_reset(CPUState *s)
|
||||
bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
|
||||
{
|
||||
CPUClass *cc = CPU_GET_CLASS(cs);
|
||||
CPUARMState *env = cs->env_ptr;
|
||||
uint32_t cur_el = arm_current_el(env);
|
||||
bool secure = arm_is_secure(env);
|
||||
uint32_t target_el;
|
||||
uint32_t excp_idx;
|
||||
bool ret = false;
|
||||
|
||||
if (interrupt_request & CPU_INTERRUPT_FIQ
|
||||
&& arm_excp_unmasked(cs, EXCP_FIQ)) {
|
||||
cs->exception_index = EXCP_FIQ;
|
||||
if (interrupt_request & CPU_INTERRUPT_FIQ) {
|
||||
excp_idx = EXCP_FIQ;
|
||||
target_el = arm_phys_excp_target_el(cs, excp_idx, cur_el, secure);
|
||||
if (arm_excp_unmasked(cs, excp_idx, target_el)) {
|
||||
cs->exception_index = excp_idx;
|
||||
env->exception.target_el = target_el;
|
||||
cc->do_interrupt(cs);
|
||||
ret = true;
|
||||
}
|
||||
if (interrupt_request & CPU_INTERRUPT_HARD
|
||||
&& arm_excp_unmasked(cs, EXCP_IRQ)) {
|
||||
cs->exception_index = EXCP_IRQ;
|
||||
}
|
||||
if (interrupt_request & CPU_INTERRUPT_HARD) {
|
||||
excp_idx = EXCP_IRQ;
|
||||
target_el = arm_phys_excp_target_el(cs, excp_idx, cur_el, secure);
|
||||
if (arm_excp_unmasked(cs, excp_idx, target_el)) {
|
||||
cs->exception_index = excp_idx;
|
||||
env->exception.target_el = target_el;
|
||||
cc->do_interrupt(cs);
|
||||
ret = true;
|
||||
}
|
||||
if (interrupt_request & CPU_INTERRUPT_VIRQ
|
||||
&& arm_excp_unmasked(cs, EXCP_VIRQ)) {
|
||||
cs->exception_index = EXCP_VIRQ;
|
||||
}
|
||||
if (interrupt_request & CPU_INTERRUPT_VIRQ) {
|
||||
excp_idx = EXCP_VIRQ;
|
||||
target_el = 1;
|
||||
if (arm_excp_unmasked(cs, excp_idx, target_el)) {
|
||||
cs->exception_index = excp_idx;
|
||||
env->exception.target_el = target_el;
|
||||
cc->do_interrupt(cs);
|
||||
ret = true;
|
||||
}
|
||||
if (interrupt_request & CPU_INTERRUPT_VFIQ
|
||||
&& arm_excp_unmasked(cs, EXCP_VFIQ)) {
|
||||
cs->exception_index = EXCP_VFIQ;
|
||||
}
|
||||
if (interrupt_request & CPU_INTERRUPT_VFIQ) {
|
||||
excp_idx = EXCP_VFIQ;
|
||||
target_el = 1;
|
||||
if (arm_excp_unmasked(cs, excp_idx, target_el)) {
|
||||
cs->exception_index = excp_idx;
|
||||
env->exception.target_el = target_el;
|
||||
cc->do_interrupt(cs);
|
||||
ret = true;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -1197,6 +1218,23 @@ static Property arm_cpu_properties[] = {
|
||||
DEFINE_PROP_END_OF_LIST()
|
||||
};
|
||||
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
static int arm_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
|
||||
int mmu_idx)
|
||||
{
|
||||
ARMCPU *cpu = ARM_CPU(cs);
|
||||
CPUARMState *env = &cpu->env;
|
||||
|
||||
env->exception.vaddress = address;
|
||||
if (rw == 2) {
|
||||
cs->exception_index = EXCP_PREFETCH_ABORT;
|
||||
} else {
|
||||
cs->exception_index = EXCP_DATA_ABORT;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
#endif
|
||||
|
||||
static void arm_cpu_class_init(ObjectClass *oc, void *data)
|
||||
{
|
||||
ARMCPUClass *acc = ARM_CPU_CLASS(oc);
|
||||
|
170
target-arm/cpu.h
170
target-arm/cpu.h
@ -197,6 +197,7 @@ typedef struct CPUARMState {
|
||||
uint64_t sctlr_el[4];
|
||||
};
|
||||
uint64_t cpacr_el1; /* Architectural feature access control register */
|
||||
uint64_t cptr_el[4]; /* ARMv8 feature trap registers */
|
||||
uint32_t c1_xscaleauxcr; /* XScale auxiliary control register. */
|
||||
uint64_t sder; /* Secure debug enable register. */
|
||||
uint32_t nsacr; /* Non-secure access control register. */
|
||||
@ -396,6 +397,7 @@ typedef struct CPUARMState {
|
||||
uint32_t syndrome; /* AArch64 format syndrome register */
|
||||
uint32_t fsr; /* AArch32 format fault status register info */
|
||||
uint64_t vaddress; /* virtual addr associated with exception, if any */
|
||||
uint32_t target_el; /* EL the exception should be targeted for */
|
||||
/* If we implement EL2 we will also need to store information
|
||||
* about the intermediate physical address for stage 2 faults.
|
||||
*/
|
||||
@ -503,8 +505,6 @@ static inline bool is_a64(CPUARMState *env)
|
||||
is returned if the signal was handled by the virtual CPU. */
|
||||
int cpu_arm_signal_handler(int host_signum, void *pinfo,
|
||||
void *puc);
|
||||
int arm_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw,
|
||||
int mmu_idx);
|
||||
|
||||
/**
|
||||
* pmccntr_sync
|
||||
@ -569,6 +569,10 @@ void pmccntr_sync(CPUARMState *env);
|
||||
#define SCTLR_AFE (1U << 29)
|
||||
#define SCTLR_TE (1U << 30)
|
||||
|
||||
#define CPTR_TCPAC (1U << 31)
|
||||
#define CPTR_TTA (1U << 20)
|
||||
#define CPTR_TFP (1U << 10)
|
||||
|
||||
#define CPSR_M (0x1fU)
|
||||
#define CPSR_T (1U << 5)
|
||||
#define CPSR_F (1U << 6)
|
||||
@ -1001,7 +1005,8 @@ static inline bool access_secure_reg(CPUARMState *env)
|
||||
(_val))
|
||||
|
||||
void arm_cpu_list(FILE *f, fprintf_function cpu_fprintf);
|
||||
unsigned int arm_excp_target_el(CPUState *cs, unsigned int excp_idx);
|
||||
uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
|
||||
uint32_t cur_el, bool secure);
|
||||
|
||||
/* Interface between CPU and Interrupt controller. */
|
||||
void armv7m_nvic_set_pending(void *opaque, int irq);
|
||||
@ -1252,7 +1257,8 @@ typedef enum CPAccessResult {
|
||||
/* Access fails due to a configurable trap or enable which would
|
||||
* result in a categorized exception syndrome giving information about
|
||||
* the failing instruction (ie syndrome category 0x3, 0x4, 0x5, 0x6,
|
||||
* 0xc or 0x18).
|
||||
* 0xc or 0x18). The exception is taken to the usual target EL (EL1 or
|
||||
* PL1 if in EL0, otherwise to the current EL).
|
||||
*/
|
||||
CP_ACCESS_TRAP = 1,
|
||||
/* Access fails and results in an exception syndrome 0x0 ("uncategorized").
|
||||
@ -1260,6 +1266,9 @@ typedef enum CPAccessResult {
|
||||
* result in this failure is specifically defined by the architecture.
|
||||
*/
|
||||
CP_ACCESS_TRAP_UNCATEGORIZED = 2,
|
||||
/* As CP_ACCESS_TRAP, but for traps directly to EL2 or EL3 */
|
||||
CP_ACCESS_TRAP_EL2 = 3,
|
||||
CP_ACCESS_TRAP_EL3 = 4,
|
||||
} CPAccessResult;
|
||||
|
||||
/* Access functions for coprocessor registers. These cannot fail and
|
||||
@ -1483,11 +1492,11 @@ bool write_cpustate_to_list(ARMCPU *cpu);
|
||||
# define TARGET_VIRT_ADDR_SPACE_BITS 32
|
||||
#endif
|
||||
|
||||
static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx)
|
||||
static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
|
||||
unsigned int target_el)
|
||||
{
|
||||
CPUARMState *env = cs->env_ptr;
|
||||
unsigned int cur_el = arm_current_el(env);
|
||||
unsigned int target_el = arm_excp_target_el(cs, excp_idx);
|
||||
bool secure = arm_is_secure(env);
|
||||
uint32_t scr;
|
||||
uint32_t hcr;
|
||||
@ -1728,6 +1737,13 @@ static inline bool arm_singlestep_active(CPUARMState *env)
|
||||
#define ARM_TBFLAG_AARCH64_STATE_MASK (1U << ARM_TBFLAG_AARCH64_STATE_SHIFT)
|
||||
#define ARM_TBFLAG_MMUIDX_SHIFT 28
|
||||
#define ARM_TBFLAG_MMUIDX_MASK (0x7 << ARM_TBFLAG_MMUIDX_SHIFT)
|
||||
#define ARM_TBFLAG_SS_ACTIVE_SHIFT 27
|
||||
#define ARM_TBFLAG_SS_ACTIVE_MASK (1 << ARM_TBFLAG_SS_ACTIVE_SHIFT)
|
||||
#define ARM_TBFLAG_PSTATE_SS_SHIFT 26
|
||||
#define ARM_TBFLAG_PSTATE_SS_MASK (1 << ARM_TBFLAG_PSTATE_SS_SHIFT)
|
||||
/* Target EL if we take a floating-point-disabled exception */
|
||||
#define ARM_TBFLAG_FPEXC_EL_SHIFT 24
|
||||
#define ARM_TBFLAG_FPEXC_EL_MASK (0x3 << ARM_TBFLAG_FPEXC_EL_SHIFT)
|
||||
|
||||
/* Bit usage when in AArch32 state: */
|
||||
#define ARM_TBFLAG_THUMB_SHIFT 0
|
||||
@ -1742,37 +1758,31 @@ static inline bool arm_singlestep_active(CPUARMState *env)
|
||||
#define ARM_TBFLAG_CONDEXEC_MASK (0xff << ARM_TBFLAG_CONDEXEC_SHIFT)
|
||||
#define ARM_TBFLAG_BSWAP_CODE_SHIFT 16
|
||||
#define ARM_TBFLAG_BSWAP_CODE_MASK (1 << ARM_TBFLAG_BSWAP_CODE_SHIFT)
|
||||
#define ARM_TBFLAG_CPACR_FPEN_SHIFT 17
|
||||
#define ARM_TBFLAG_CPACR_FPEN_MASK (1 << ARM_TBFLAG_CPACR_FPEN_SHIFT)
|
||||
#define ARM_TBFLAG_SS_ACTIVE_SHIFT 18
|
||||
#define ARM_TBFLAG_SS_ACTIVE_MASK (1 << ARM_TBFLAG_SS_ACTIVE_SHIFT)
|
||||
#define ARM_TBFLAG_PSTATE_SS_SHIFT 19
|
||||
#define ARM_TBFLAG_PSTATE_SS_MASK (1 << ARM_TBFLAG_PSTATE_SS_SHIFT)
|
||||
/* We store the bottom two bits of the CPAR as TB flags and handle
|
||||
* checks on the other bits at runtime
|
||||
*/
|
||||
#define ARM_TBFLAG_XSCALE_CPAR_SHIFT 20
|
||||
#define ARM_TBFLAG_XSCALE_CPAR_SHIFT 17
|
||||
#define ARM_TBFLAG_XSCALE_CPAR_MASK (3 << ARM_TBFLAG_XSCALE_CPAR_SHIFT)
|
||||
/* Indicates whether cp register reads and writes by guest code should access
|
||||
* the secure or nonsecure bank of banked registers; note that this is not
|
||||
* the same thing as the current security state of the processor!
|
||||
*/
|
||||
#define ARM_TBFLAG_NS_SHIFT 22
|
||||
#define ARM_TBFLAG_NS_SHIFT 19
|
||||
#define ARM_TBFLAG_NS_MASK (1 << ARM_TBFLAG_NS_SHIFT)
|
||||
|
||||
/* Bit usage when in AArch64 state */
|
||||
#define ARM_TBFLAG_AA64_FPEN_SHIFT 2
|
||||
#define ARM_TBFLAG_AA64_FPEN_MASK (1 << ARM_TBFLAG_AA64_FPEN_SHIFT)
|
||||
#define ARM_TBFLAG_AA64_SS_ACTIVE_SHIFT 3
|
||||
#define ARM_TBFLAG_AA64_SS_ACTIVE_MASK (1 << ARM_TBFLAG_AA64_SS_ACTIVE_SHIFT)
|
||||
#define ARM_TBFLAG_AA64_PSTATE_SS_SHIFT 4
|
||||
#define ARM_TBFLAG_AA64_PSTATE_SS_MASK (1 << ARM_TBFLAG_AA64_PSTATE_SS_SHIFT)
|
||||
/* Bit usage when in AArch64 state: currently we have no A64 specific bits */
|
||||
|
||||
/* some convenience accessor macros */
|
||||
#define ARM_TBFLAG_AARCH64_STATE(F) \
|
||||
(((F) & ARM_TBFLAG_AARCH64_STATE_MASK) >> ARM_TBFLAG_AARCH64_STATE_SHIFT)
|
||||
#define ARM_TBFLAG_MMUIDX(F) \
|
||||
(((F) & ARM_TBFLAG_MMUIDX_MASK) >> ARM_TBFLAG_MMUIDX_SHIFT)
|
||||
#define ARM_TBFLAG_SS_ACTIVE(F) \
|
||||
(((F) & ARM_TBFLAG_SS_ACTIVE_MASK) >> ARM_TBFLAG_SS_ACTIVE_SHIFT)
|
||||
#define ARM_TBFLAG_PSTATE_SS(F) \
|
||||
(((F) & ARM_TBFLAG_PSTATE_SS_MASK) >> ARM_TBFLAG_PSTATE_SS_SHIFT)
|
||||
#define ARM_TBFLAG_FPEXC_EL(F) \
|
||||
(((F) & ARM_TBFLAG_FPEXC_EL_MASK) >> ARM_TBFLAG_FPEXC_EL_SHIFT)
|
||||
#define ARM_TBFLAG_THUMB(F) \
|
||||
(((F) & ARM_TBFLAG_THUMB_MASK) >> ARM_TBFLAG_THUMB_SHIFT)
|
||||
#define ARM_TBFLAG_VECLEN(F) \
|
||||
@ -1785,54 +1795,82 @@ static inline bool arm_singlestep_active(CPUARMState *env)
|
||||
(((F) & ARM_TBFLAG_CONDEXEC_MASK) >> ARM_TBFLAG_CONDEXEC_SHIFT)
|
||||
#define ARM_TBFLAG_BSWAP_CODE(F) \
|
||||
(((F) & ARM_TBFLAG_BSWAP_CODE_MASK) >> ARM_TBFLAG_BSWAP_CODE_SHIFT)
|
||||
#define ARM_TBFLAG_CPACR_FPEN(F) \
|
||||
(((F) & ARM_TBFLAG_CPACR_FPEN_MASK) >> ARM_TBFLAG_CPACR_FPEN_SHIFT)
|
||||
#define ARM_TBFLAG_SS_ACTIVE(F) \
|
||||
(((F) & ARM_TBFLAG_SS_ACTIVE_MASK) >> ARM_TBFLAG_SS_ACTIVE_SHIFT)
|
||||
#define ARM_TBFLAG_PSTATE_SS(F) \
|
||||
(((F) & ARM_TBFLAG_PSTATE_SS_MASK) >> ARM_TBFLAG_PSTATE_SS_SHIFT)
|
||||
#define ARM_TBFLAG_XSCALE_CPAR(F) \
|
||||
(((F) & ARM_TBFLAG_XSCALE_CPAR_MASK) >> ARM_TBFLAG_XSCALE_CPAR_SHIFT)
|
||||
#define ARM_TBFLAG_AA64_FPEN(F) \
|
||||
(((F) & ARM_TBFLAG_AA64_FPEN_MASK) >> ARM_TBFLAG_AA64_FPEN_SHIFT)
|
||||
#define ARM_TBFLAG_AA64_SS_ACTIVE(F) \
|
||||
(((F) & ARM_TBFLAG_AA64_SS_ACTIVE_MASK) >> ARM_TBFLAG_AA64_SS_ACTIVE_SHIFT)
|
||||
#define ARM_TBFLAG_AA64_PSTATE_SS(F) \
|
||||
(((F) & ARM_TBFLAG_AA64_PSTATE_SS_MASK) >> ARM_TBFLAG_AA64_PSTATE_SS_SHIFT)
|
||||
#define ARM_TBFLAG_NS(F) \
|
||||
(((F) & ARM_TBFLAG_NS_MASK) >> ARM_TBFLAG_NS_SHIFT)
|
||||
|
||||
/* Return the exception level to which FP-disabled exceptions should
|
||||
* be taken, or 0 if FP is enabled.
|
||||
*/
|
||||
static inline int fp_exception_el(CPUARMState *env)
|
||||
{
|
||||
int fpen;
|
||||
int cur_el = arm_current_el(env);
|
||||
|
||||
/* CPACR and the CPTR registers don't exist before v6, so FP is
|
||||
* always accessible
|
||||
*/
|
||||
if (!arm_feature(env, ARM_FEATURE_V6)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* The CPACR controls traps to EL1, or PL1 if we're 32 bit:
|
||||
* 0, 2 : trap EL0 and EL1/PL1 accesses
|
||||
* 1 : trap only EL0 accesses
|
||||
* 3 : trap no accesses
|
||||
*/
|
||||
fpen = extract32(env->cp15.cpacr_el1, 20, 2);
|
||||
switch (fpen) {
|
||||
case 0:
|
||||
case 2:
|
||||
if (cur_el == 0 || cur_el == 1) {
|
||||
/* Trap to PL1, which might be EL1 or EL3 */
|
||||
if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) {
|
||||
return 3;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
if (cur_el == 3 && !is_a64(env)) {
|
||||
/* Secure PL1 running at EL3 */
|
||||
return 3;
|
||||
}
|
||||
break;
|
||||
case 1:
|
||||
if (cur_el == 0) {
|
||||
return 1;
|
||||
}
|
||||
break;
|
||||
case 3:
|
||||
break;
|
||||
}
|
||||
|
||||
/* For the CPTR registers we don't need to guard with an ARM_FEATURE
|
||||
* check because zero bits in the registers mean "don't trap".
|
||||
*/
|
||||
|
||||
/* CPTR_EL2 : present in v7VE or v8 */
|
||||
if (cur_el <= 2 && extract32(env->cp15.cptr_el[2], 10, 1)
|
||||
&& !arm_is_secure_below_el3(env)) {
|
||||
/* Trap FP ops at EL2, NS-EL1 or NS-EL0 to EL2 */
|
||||
return 2;
|
||||
}
|
||||
|
||||
/* CPTR_EL3 : present in v8 */
|
||||
if (extract32(env->cp15.cptr_el[3], 10, 1)) {
|
||||
/* Trap all FP ops to EL3 */
|
||||
return 3;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
|
||||
target_ulong *cs_base, int *flags)
|
||||
{
|
||||
int fpen;
|
||||
|
||||
if (arm_feature(env, ARM_FEATURE_V6)) {
|
||||
fpen = extract32(env->cp15.cpacr_el1, 20, 2);
|
||||
} else {
|
||||
/* CPACR doesn't exist before v6, so VFP is always accessible */
|
||||
fpen = 3;
|
||||
}
|
||||
|
||||
if (is_a64(env)) {
|
||||
*pc = env->pc;
|
||||
*flags = ARM_TBFLAG_AARCH64_STATE_MASK;
|
||||
if (fpen == 3 || (fpen == 1 && arm_current_el(env) != 0)) {
|
||||
*flags |= ARM_TBFLAG_AA64_FPEN_MASK;
|
||||
}
|
||||
/* The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
|
||||
* states defined in the ARM ARM for software singlestep:
|
||||
* SS_ACTIVE PSTATE.SS State
|
||||
* 0 x Inactive (the TB flag for SS is always 0)
|
||||
* 1 0 Active-pending
|
||||
* 1 1 Active-not-pending
|
||||
*/
|
||||
if (arm_singlestep_active(env)) {
|
||||
*flags |= ARM_TBFLAG_AA64_SS_ACTIVE_MASK;
|
||||
if (env->pstate & PSTATE_SS) {
|
||||
*flags |= ARM_TBFLAG_AA64_PSTATE_SS_MASK;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
*pc = env->regs[15];
|
||||
*flags = (env->thumb << ARM_TBFLAG_THUMB_SHIFT)
|
||||
@ -1847,9 +1885,11 @@ static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
|
||||
|| arm_el_is_aa64(env, 1)) {
|
||||
*flags |= ARM_TBFLAG_VFPEN_MASK;
|
||||
}
|
||||
if (fpen == 3 || (fpen == 1 && arm_current_el(env) != 0)) {
|
||||
*flags |= ARM_TBFLAG_CPACR_FPEN_MASK;
|
||||
*flags |= (extract32(env->cp15.c15_cpar, 0, 2)
|
||||
<< ARM_TBFLAG_XSCALE_CPAR_SHIFT);
|
||||
}
|
||||
|
||||
*flags |= (cpu_mmu_index(env) << ARM_TBFLAG_MMUIDX_SHIFT);
|
||||
/* The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
|
||||
* states defined in the ARM ARM for software singlestep:
|
||||
* SS_ACTIVE PSTATE.SS State
|
||||
@ -1859,15 +1899,17 @@ static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
|
||||
*/
|
||||
if (arm_singlestep_active(env)) {
|
||||
*flags |= ARM_TBFLAG_SS_ACTIVE_MASK;
|
||||
if (is_a64(env)) {
|
||||
if (env->pstate & PSTATE_SS) {
|
||||
*flags |= ARM_TBFLAG_PSTATE_SS_MASK;
|
||||
}
|
||||
} else {
|
||||
if (env->uncached_cpsr & PSTATE_SS) {
|
||||
*flags |= ARM_TBFLAG_PSTATE_SS_MASK;
|
||||
}
|
||||
}
|
||||
*flags |= (extract32(env->cp15.c15_cpar, 0, 2)
|
||||
<< ARM_TBFLAG_XSCALE_CPAR_SHIFT);
|
||||
}
|
||||
|
||||
*flags |= (cpu_mmu_index(env) << ARM_TBFLAG_MMUIDX_SHIFT);
|
||||
*flags |= fp_exception_el(env) << ARM_TBFLAG_FPEXC_EL_SHIFT;
|
||||
|
||||
*cs_base = 0;
|
||||
}
|
||||
|
@ -463,7 +463,7 @@ void aarch64_cpu_do_interrupt(CPUState *cs)
|
||||
{
|
||||
ARMCPU *cpu = ARM_CPU(cs);
|
||||
CPUARMState *env = &cpu->env;
|
||||
unsigned int new_el = arm_excp_target_el(cs, cs->exception_index);
|
||||
unsigned int new_el = env->exception.target_el;
|
||||
target_ulong addr = env->cp15.vbar_el[new_el];
|
||||
unsigned int new_mode = aarch64_pstate_mode(new_el, true);
|
||||
|
||||
|
@ -592,6 +592,33 @@ static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
env->cp15.cpacr_el1 = value;
|
||||
}
|
||||
|
||||
static CPAccessResult cpacr_access(CPUARMState *env, const ARMCPRegInfo *ri)
|
||||
{
|
||||
if (arm_feature(env, ARM_FEATURE_V8)) {
|
||||
/* Check if CPACR accesses are to be trapped to EL2 */
|
||||
if (arm_current_el(env) == 1 &&
|
||||
(env->cp15.cptr_el[2] & CPTR_TCPAC) && !arm_is_secure(env)) {
|
||||
return CP_ACCESS_TRAP_EL2;
|
||||
/* Check if CPACR accesses are to be trapped to EL3 */
|
||||
} else if (arm_current_el(env) < 3 &&
|
||||
(env->cp15.cptr_el[3] & CPTR_TCPAC)) {
|
||||
return CP_ACCESS_TRAP_EL3;
|
||||
}
|
||||
}
|
||||
|
||||
return CP_ACCESS_OK;
|
||||
}
|
||||
|
||||
static CPAccessResult cptr_access(CPUARMState *env, const ARMCPRegInfo *ri)
|
||||
{
|
||||
/* Check if CPTR accesses are set to trap to EL3 */
|
||||
if (arm_current_el(env) == 2 && (env->cp15.cptr_el[3] & CPTR_TCPAC)) {
|
||||
return CP_ACCESS_TRAP_EL3;
|
||||
}
|
||||
|
||||
return CP_ACCESS_OK;
|
||||
}
|
||||
|
||||
static const ARMCPRegInfo v6_cp_reginfo[] = {
|
||||
/* prefetch by MVA in v6, NOP in v7 */
|
||||
{ .name = "MVA_prefetch",
|
||||
@ -614,7 +641,7 @@ static const ARMCPRegInfo v6_cp_reginfo[] = {
|
||||
{ .name = "WFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1,
|
||||
.access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, },
|
||||
{ .name = "CPACR", .state = ARM_CP_STATE_BOTH, .opc0 = 3,
|
||||
.crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2,
|
||||
.crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2, .accessfn = cpacr_access,
|
||||
.access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.cpacr_el1),
|
||||
.resetvalue = 0, .writefn = cpacr_write },
|
||||
REGINFO_SENTINEL
|
||||
@ -2481,6 +2508,9 @@ static const ARMCPRegInfo v8_el3_no_el2_cp_reginfo[] = {
|
||||
.opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
|
||||
.access = PL2_RW,
|
||||
.readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore },
|
||||
{ .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH,
|
||||
.opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2,
|
||||
.access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
|
||||
REGINFO_SENTINEL
|
||||
};
|
||||
|
||||
@ -2548,6 +2578,10 @@ static const ARMCPRegInfo v8_el2_cp_reginfo[] = {
|
||||
.opc0 = 3, .opc1 = 6, .crn = 4, .crm = 1, .opc2 = 0,
|
||||
.access = PL3_RW, .type = ARM_CP_ALIAS,
|
||||
.fieldoffset = offsetof(CPUARMState, sp_el[2]) },
|
||||
{ .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH,
|
||||
.opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2,
|
||||
.access = PL2_RW, .accessfn = cptr_access, .resetvalue = 0,
|
||||
.fieldoffset = offsetof(CPUARMState, cp15.cptr_el[2]) },
|
||||
REGINFO_SENTINEL
|
||||
};
|
||||
|
||||
@ -2609,6 +2643,10 @@ static const ARMCPRegInfo el3_cp_reginfo[] = {
|
||||
.access = PL3_RW, .writefn = vbar_write,
|
||||
.fieldoffset = offsetof(CPUARMState, cp15.vbar_el[3]),
|
||||
.resetvalue = 0 },
|
||||
{ .name = "CPTR_EL3", .state = ARM_CP_STATE_AA64,
|
||||
.opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 2,
|
||||
.access = PL3_RW, .accessfn = cptr_access, .resetvalue = 0,
|
||||
.fieldoffset = offsetof(CPUARMState, cp15.cptr_el[3]) },
|
||||
REGINFO_SENTINEL
|
||||
};
|
||||
|
||||
@ -4047,21 +4085,6 @@ uint32_t HELPER(rbit)(uint32_t x)
|
||||
|
||||
#if defined(CONFIG_USER_ONLY)
|
||||
|
||||
int arm_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
|
||||
int mmu_idx)
|
||||
{
|
||||
ARMCPU *cpu = ARM_CPU(cs);
|
||||
CPUARMState *env = &cpu->env;
|
||||
|
||||
env->exception.vaddress = address;
|
||||
if (rw == 2) {
|
||||
cs->exception_index = EXCP_PREFETCH_ABORT;
|
||||
} else {
|
||||
cs->exception_index = EXCP_DATA_ABORT;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* These should probably raise undefined insn exceptions. */
|
||||
void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val)
|
||||
{
|
||||
@ -4102,7 +4125,8 @@ uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode)
|
||||
return 0;
|
||||
}
|
||||
|
||||
unsigned int arm_excp_target_el(CPUState *cs, unsigned int excp_idx)
|
||||
uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
|
||||
uint32_t cur_el, bool secure)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
@ -4226,7 +4250,7 @@ const int8_t target_el_table[2][2][2][2][2][4] = {
|
||||
/*
|
||||
* Determine the target EL for physical exceptions
|
||||
*/
|
||||
static inline uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
|
||||
uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
|
||||
uint32_t cur_el, bool secure)
|
||||
{
|
||||
CPUARMState *env = cs->env_ptr;
|
||||
@ -4262,40 +4286,6 @@ static inline uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
|
||||
return target_el;
|
||||
}
|
||||
|
||||
/*
|
||||
* Determine the target EL for a given exception type.
|
||||
*/
|
||||
unsigned int arm_excp_target_el(CPUState *cs, unsigned int excp_idx)
|
||||
{
|
||||
ARMCPU *cpu = ARM_CPU(cs);
|
||||
CPUARMState *env = &cpu->env;
|
||||
unsigned int cur_el = arm_current_el(env);
|
||||
unsigned int target_el;
|
||||
bool secure = arm_is_secure(env);
|
||||
|
||||
switch (excp_idx) {
|
||||
case EXCP_HVC:
|
||||
case EXCP_HYP_TRAP:
|
||||
target_el = 2;
|
||||
break;
|
||||
case EXCP_SMC:
|
||||
target_el = 3;
|
||||
break;
|
||||
case EXCP_FIQ:
|
||||
case EXCP_IRQ:
|
||||
target_el = arm_phys_excp_target_el(cs, excp_idx, cur_el, secure);
|
||||
break;
|
||||
case EXCP_VIRQ:
|
||||
case EXCP_VFIQ:
|
||||
target_el = 1;
|
||||
break;
|
||||
default:
|
||||
target_el = MAX(cur_el, 1);
|
||||
break;
|
||||
}
|
||||
return target_el;
|
||||
}
|
||||
|
||||
static void v7m_push(CPUARMState *env, uint32_t val)
|
||||
{
|
||||
CPUState *cs = CPU(arm_env_get_cpu(env));
|
||||
@ -5826,7 +5816,11 @@ static inline int get_phys_addr(CPUARMState *env, target_ulong address,
|
||||
}
|
||||
}
|
||||
|
||||
int arm_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
|
||||
/* Walk the page table and (if the mapping exists) add the page
|
||||
* to the TLB. Return 0 on success, or an ARM DFSR/IFSR fault
|
||||
* register format value on failure.
|
||||
*/
|
||||
int arm_tlb_fill(CPUState *cs, vaddr address,
|
||||
int access_type, int mmu_idx)
|
||||
{
|
||||
ARMCPU *cpu = ARM_CPU(cs);
|
||||
@ -5835,8 +5829,6 @@ int arm_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
|
||||
target_ulong page_size;
|
||||
int prot;
|
||||
int ret;
|
||||
uint32_t syn;
|
||||
bool same_el = (arm_current_el(env) != 0);
|
||||
MemTxAttrs attrs = {};
|
||||
|
||||
ret = get_phys_addr(env, address, access_type, mmu_idx, &phys_addr,
|
||||
@ -5850,27 +5842,7 @@ int arm_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* AArch64 syndrome does not have an LPAE bit */
|
||||
syn = ret & ~(1 << 9);
|
||||
|
||||
/* For insn and data aborts we assume there is no instruction syndrome
|
||||
* information; this is always true for exceptions reported to EL1.
|
||||
*/
|
||||
if (access_type == 2) {
|
||||
syn = syn_insn_abort(same_el, 0, 0, syn);
|
||||
cs->exception_index = EXCP_PREFETCH_ABORT;
|
||||
} else {
|
||||
syn = syn_data_abort(same_el, 0, 0, 0, access_type == 1, syn);
|
||||
if (access_type == 1 && arm_feature(env, ARM_FEATURE_V6)) {
|
||||
ret |= (1 << 11);
|
||||
}
|
||||
cs->exception_index = EXCP_DATA_ABORT;
|
||||
}
|
||||
|
||||
env->exception.syndrome = syn;
|
||||
env->exception.vaddress = address;
|
||||
env->exception.fsr = ret;
|
||||
return 1;
|
||||
return ret;
|
||||
}
|
||||
|
||||
hwaddr arm_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
|
||||
|
@ -47,7 +47,7 @@ DEF_HELPER_FLAGS_2(usad8, TCG_CALL_NO_RWG_SE, i32, i32, i32)
|
||||
DEF_HELPER_FLAGS_3(sel_flags, TCG_CALL_NO_RWG_SE,
|
||||
i32, i32, i32, i32)
|
||||
DEF_HELPER_2(exception_internal, void, env, i32)
|
||||
DEF_HELPER_3(exception_with_syndrome, void, env, i32, i32)
|
||||
DEF_HELPER_4(exception_with_syndrome, void, env, i32, i32, i32)
|
||||
DEF_HELPER_1(wfi, void, env)
|
||||
DEF_HELPER_1(wfe, void, env)
|
||||
DEF_HELPER_1(pre_hvc, void, env)
|
||||
|
@ -387,4 +387,7 @@ bool arm_is_psci_call(ARMCPU *cpu, int excp_type);
|
||||
void arm_handle_psci_call(ARMCPU *cpu);
|
||||
#endif
|
||||
|
||||
/* Do a page table walk and add page to TLB if possible */
|
||||
int arm_tlb_fill(CPUState *cpu, vaddr address, int rw, int mmu_idx);
|
||||
|
||||
#endif
|
||||
|
@ -24,15 +24,32 @@
|
||||
#define SIGNBIT (uint32_t)0x80000000
|
||||
#define SIGNBIT64 ((uint64_t)1 << 63)
|
||||
|
||||
static void raise_exception(CPUARMState *env, int tt)
|
||||
static void raise_exception(CPUARMState *env, uint32_t excp,
|
||||
uint32_t syndrome, uint32_t target_el)
|
||||
{
|
||||
ARMCPU *cpu = arm_env_get_cpu(env);
|
||||
CPUState *cs = CPU(cpu);
|
||||
CPUState *cs = CPU(arm_env_get_cpu(env));
|
||||
|
||||
cs->exception_index = tt;
|
||||
assert(!excp_is_internal(excp));
|
||||
cs->exception_index = excp;
|
||||
env->exception.syndrome = syndrome;
|
||||
env->exception.target_el = target_el;
|
||||
cpu_loop_exit(cs);
|
||||
}
|
||||
|
||||
static int exception_target_el(CPUARMState *env)
|
||||
{
|
||||
int target_el = MAX(1, arm_current_el(env));
|
||||
|
||||
/* No such thing as secure EL1 if EL3 is aarch32, so update the target EL
|
||||
* to EL3 in this case.
|
||||
*/
|
||||
if (arm_is_secure(env) && !arm_el_is_aa64(env, 3) && target_el == 1) {
|
||||
target_el = 3;
|
||||
}
|
||||
|
||||
return target_el;
|
||||
}
|
||||
|
||||
uint32_t HELPER(neon_tbl)(CPUARMState *env, uint32_t ireg, uint32_t def,
|
||||
uint32_t rn, uint32_t maxindex)
|
||||
{
|
||||
@ -66,16 +83,38 @@ void tlb_fill(CPUState *cs, target_ulong addr, int is_write, int mmu_idx,
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = arm_cpu_handle_mmu_fault(cs, addr, is_write, mmu_idx);
|
||||
ret = arm_tlb_fill(cs, addr, is_write, mmu_idx);
|
||||
if (unlikely(ret)) {
|
||||
ARMCPU *cpu = ARM_CPU(cs);
|
||||
CPUARMState *env = &cpu->env;
|
||||
uint32_t syn, exc;
|
||||
bool same_el = (arm_current_el(env) != 0);
|
||||
|
||||
if (retaddr) {
|
||||
/* now we have a real cpu fault */
|
||||
cpu_restore_state(cs, retaddr);
|
||||
}
|
||||
raise_exception(env, cs->exception_index);
|
||||
|
||||
/* AArch64 syndrome does not have an LPAE bit */
|
||||
syn = ret & ~(1 << 9);
|
||||
|
||||
/* For insn and data aborts we assume there is no instruction syndrome
|
||||
* information; this is always true for exceptions reported to EL1.
|
||||
*/
|
||||
if (is_write == 2) {
|
||||
syn = syn_insn_abort(same_el, 0, 0, syn);
|
||||
exc = EXCP_PREFETCH_ABORT;
|
||||
} else {
|
||||
syn = syn_data_abort(same_el, 0, 0, 0, is_write == 1, syn);
|
||||
if (is_write == 1 && arm_feature(env, ARM_FEATURE_V6)) {
|
||||
ret |= (1 << 11);
|
||||
}
|
||||
exc = EXCP_DATA_ABORT;
|
||||
}
|
||||
|
||||
env->exception.vaddress = addr;
|
||||
env->exception.fsr = ret;
|
||||
raise_exception(env, exc, syn, exception_target_el(env));
|
||||
}
|
||||
}
|
||||
#endif
|
||||
@ -209,9 +248,72 @@ uint32_t HELPER(usat16)(CPUARMState *env, uint32_t x, uint32_t shift)
|
||||
return res;
|
||||
}
|
||||
|
||||
/* Function checks whether WFx (WFI/WFE) instructions are set up to be trapped.
|
||||
* The function returns the target EL (1-3) if the instruction is to be trapped;
|
||||
* otherwise it returns 0 indicating it is not trapped.
|
||||
*/
|
||||
static inline int check_wfx_trap(CPUARMState *env, bool is_wfe)
|
||||
{
|
||||
int cur_el = arm_current_el(env);
|
||||
uint64_t mask;
|
||||
|
||||
/* If we are currently in EL0 then we need to check if SCTLR is set up for
|
||||
* WFx instructions being trapped to EL1. These trap bits don't exist in v7.
|
||||
*/
|
||||
if (cur_el < 1 && arm_feature(env, ARM_FEATURE_V8)) {
|
||||
int target_el;
|
||||
|
||||
mask = is_wfe ? SCTLR_nTWE : SCTLR_nTWI;
|
||||
if (arm_is_secure_below_el3(env) && !arm_el_is_aa64(env, 3)) {
|
||||
/* Secure EL0 and Secure PL1 is at EL3 */
|
||||
target_el = 3;
|
||||
} else {
|
||||
target_el = 1;
|
||||
}
|
||||
|
||||
if (!(env->cp15.sctlr_el[target_el] & mask)) {
|
||||
return target_el;
|
||||
}
|
||||
}
|
||||
|
||||
/* We are not trapping to EL1; trap to EL2 if HCR_EL2 requires it
|
||||
* No need for ARM_FEATURE check as if HCR_EL2 doesn't exist the
|
||||
* bits will be zero indicating no trap.
|
||||
*/
|
||||
if (cur_el < 2 && !arm_is_secure(env)) {
|
||||
mask = (is_wfe) ? HCR_TWE : HCR_TWI;
|
||||
if (env->cp15.hcr_el2 & mask) {
|
||||
return 2;
|
||||
}
|
||||
}
|
||||
|
||||
/* We are not trapping to EL1 or EL2; trap to EL3 if SCR_EL3 requires it */
|
||||
if (cur_el < 3) {
|
||||
mask = (is_wfe) ? SCR_TWE : SCR_TWI;
|
||||
if (env->cp15.scr_el3 & mask) {
|
||||
return 3;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void HELPER(wfi)(CPUARMState *env)
|
||||
{
|
||||
CPUState *cs = CPU(arm_env_get_cpu(env));
|
||||
int target_el = check_wfx_trap(env, false);
|
||||
|
||||
if (cpu_has_work(cs)) {
|
||||
/* Don't bother to go into our "low power state" if
|
||||
* we would just wake up immediately.
|
||||
*/
|
||||
return;
|
||||
}
|
||||
|
||||
if (target_el) {
|
||||
env->pc -= 4;
|
||||
raise_exception(env, EXCP_UDEF, syn_wfx(1, 0xe, 0), target_el);
|
||||
}
|
||||
|
||||
cs->exception_index = EXCP_HLT;
|
||||
cs->halted = 1;
|
||||
@ -223,7 +325,9 @@ void HELPER(wfe)(CPUARMState *env)
|
||||
CPUState *cs = CPU(arm_env_get_cpu(env));
|
||||
|
||||
/* Don't actually halt the CPU, just yield back to top
|
||||
* level loop
|
||||
* level loop. This is not going into a "low power state"
|
||||
* (ie halting until some event occurs), so we never take
|
||||
* a configurable trap to a different exception level.
|
||||
*/
|
||||
cs->exception_index = EXCP_YIELD;
|
||||
cpu_loop_exit(cs);
|
||||
@ -246,14 +350,9 @@ void HELPER(exception_internal)(CPUARMState *env, uint32_t excp)
|
||||
|
||||
/* Raise an exception with the specified syndrome register value */
|
||||
void HELPER(exception_with_syndrome)(CPUARMState *env, uint32_t excp,
|
||||
uint32_t syndrome)
|
||||
uint32_t syndrome, uint32_t target_el)
|
||||
{
|
||||
CPUState *cs = CPU(arm_env_get_cpu(env));
|
||||
|
||||
assert(!excp_is_internal(excp));
|
||||
cs->exception_index = excp;
|
||||
env->exception.syndrome = syndrome;
|
||||
cpu_loop_exit(cs);
|
||||
raise_exception(env, excp, syndrome, target_el);
|
||||
}
|
||||
|
||||
uint32_t HELPER(cpsr_read)(CPUARMState *env)
|
||||
@ -301,11 +400,11 @@ void HELPER(set_user_reg)(CPUARMState *env, uint32_t regno, uint32_t val)
|
||||
void HELPER(access_check_cp_reg)(CPUARMState *env, void *rip, uint32_t syndrome)
|
||||
{
|
||||
const ARMCPRegInfo *ri = rip;
|
||||
int target_el;
|
||||
|
||||
if (arm_feature(env, ARM_FEATURE_XSCALE) && ri->cp < 14
|
||||
&& extract32(env->cp15.c15_cpar, ri->cp, 1) == 0) {
|
||||
env->exception.syndrome = syndrome;
|
||||
raise_exception(env, EXCP_UDEF);
|
||||
raise_exception(env, EXCP_UDEF, syndrome, exception_target_el(env));
|
||||
}
|
||||
|
||||
if (!ri->accessfn) {
|
||||
@ -316,15 +415,27 @@ void HELPER(access_check_cp_reg)(CPUARMState *env, void *rip, uint32_t syndrome)
|
||||
case CP_ACCESS_OK:
|
||||
return;
|
||||
case CP_ACCESS_TRAP:
|
||||
env->exception.syndrome = syndrome;
|
||||
target_el = exception_target_el(env);
|
||||
break;
|
||||
case CP_ACCESS_TRAP_EL2:
|
||||
/* Requesting a trap to EL2 when we're in EL3 or S-EL0/1 is
|
||||
* a bug in the access function.
|
||||
*/
|
||||
assert(!arm_is_secure(env) && !arm_current_el(env) == 3);
|
||||
target_el = 2;
|
||||
break;
|
||||
case CP_ACCESS_TRAP_EL3:
|
||||
target_el = 3;
|
||||
break;
|
||||
case CP_ACCESS_TRAP_UNCATEGORIZED:
|
||||
env->exception.syndrome = syn_uncategorized();
|
||||
target_el = exception_target_el(env);
|
||||
syndrome = syn_uncategorized();
|
||||
break;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
raise_exception(env, EXCP_UDEF);
|
||||
|
||||
raise_exception(env, EXCP_UDEF, syndrome, target_el);
|
||||
}
|
||||
|
||||
void HELPER(set_cp_reg)(CPUARMState *env, void *rip, uint32_t value)
|
||||
@ -362,7 +473,10 @@ void HELPER(msr_i_pstate)(CPUARMState *env, uint32_t op, uint32_t imm)
|
||||
* to catch that case at translate time.
|
||||
*/
|
||||
if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UMA)) {
|
||||
raise_exception(env, EXCP_UDEF);
|
||||
uint32_t syndrome = syn_aa64_sysregtrap(0, extract32(op, 0, 3),
|
||||
extract32(op, 3, 3), 4,
|
||||
imm, 0x1f, 0);
|
||||
raise_exception(env, EXCP_UDEF, syndrome, exception_target_el(env));
|
||||
}
|
||||
|
||||
switch (op) {
|
||||
@ -420,8 +534,8 @@ void HELPER(pre_hvc)(CPUARMState *env)
|
||||
}
|
||||
|
||||
if (undef) {
|
||||
env->exception.syndrome = syn_uncategorized();
|
||||
raise_exception(env, EXCP_UDEF);
|
||||
raise_exception(env, EXCP_UDEF, syn_uncategorized(),
|
||||
exception_target_el(env));
|
||||
}
|
||||
}
|
||||
|
||||
@ -450,13 +564,12 @@ void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome)
|
||||
undef = true;
|
||||
} else if (!secure && cur_el == 1 && (env->cp15.hcr_el2 & HCR_TSC)) {
|
||||
/* In NS EL1, HCR controlled routing to EL2 has priority over SMD. */
|
||||
env->exception.syndrome = syndrome;
|
||||
raise_exception(env, EXCP_HYP_TRAP);
|
||||
raise_exception(env, EXCP_HYP_TRAP, syndrome, 2);
|
||||
}
|
||||
|
||||
if (undef) {
|
||||
env->exception.syndrome = syn_uncategorized();
|
||||
raise_exception(env, EXCP_UDEF);
|
||||
raise_exception(env, EXCP_UDEF, syn_uncategorized(),
|
||||
exception_target_el(env));
|
||||
}
|
||||
}
|
||||
|
||||
@ -749,14 +862,15 @@ void arm_debug_excp_handler(CPUState *cs)
|
||||
bool wnr = (wp_hit->flags & BP_WATCHPOINT_HIT_WRITE) != 0;
|
||||
bool same_el = arm_debug_target_el(env) == arm_current_el(env);
|
||||
|
||||
env->exception.syndrome = syn_watchpoint(same_el, 0, wnr);
|
||||
if (extended_addresses_enabled(env)) {
|
||||
env->exception.fsr = (1 << 9) | 0x22;
|
||||
} else {
|
||||
env->exception.fsr = 0x2;
|
||||
}
|
||||
env->exception.vaddress = wp_hit->hitaddr;
|
||||
raise_exception(env, EXCP_DATA_ABORT);
|
||||
raise_exception(env, EXCP_DATA_ABORT,
|
||||
syn_watchpoint(same_el, 0, wnr),
|
||||
arm_debug_target_el(env));
|
||||
} else {
|
||||
cpu_resume_from_signal(cs, NULL);
|
||||
}
|
||||
@ -764,14 +878,15 @@ void arm_debug_excp_handler(CPUState *cs)
|
||||
} else {
|
||||
if (check_breakpoints(cpu)) {
|
||||
bool same_el = (arm_debug_target_el(env) == arm_current_el(env));
|
||||
env->exception.syndrome = syn_breakpoint(same_el);
|
||||
if (extended_addresses_enabled(env)) {
|
||||
env->exception.fsr = (1 << 9) | 0x22;
|
||||
} else {
|
||||
env->exception.fsr = 0x2;
|
||||
}
|
||||
/* FAR is UNKNOWN, so doesn't need setting */
|
||||
raise_exception(env, EXCP_PREFETCH_ABORT);
|
||||
raise_exception(env, EXCP_PREFETCH_ABORT,
|
||||
syn_breakpoint(same_el),
|
||||
arm_debug_target_el(env));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -197,12 +197,15 @@ static void gen_exception_internal(int excp)
|
||||
tcg_temp_free_i32(tcg_excp);
|
||||
}
|
||||
|
||||
static void gen_exception(int excp, uint32_t syndrome)
|
||||
static void gen_exception(int excp, uint32_t syndrome, uint32_t target_el)
|
||||
{
|
||||
TCGv_i32 tcg_excp = tcg_const_i32(excp);
|
||||
TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
|
||||
TCGv_i32 tcg_el = tcg_const_i32(target_el);
|
||||
|
||||
gen_helper_exception_with_syndrome(cpu_env, tcg_excp, tcg_syn);
|
||||
gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
|
||||
tcg_syn, tcg_el);
|
||||
tcg_temp_free_i32(tcg_el);
|
||||
tcg_temp_free_i32(tcg_syn);
|
||||
tcg_temp_free_i32(tcg_excp);
|
||||
}
|
||||
@ -215,10 +218,10 @@ static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
|
||||
}
|
||||
|
||||
static void gen_exception_insn(DisasContext *s, int offset, int excp,
|
||||
uint32_t syndrome)
|
||||
uint32_t syndrome, uint32_t target_el)
|
||||
{
|
||||
gen_a64_set_pc_im(s->pc - offset);
|
||||
gen_exception(excp, syndrome);
|
||||
gen_exception(excp, syndrome, target_el);
|
||||
s->is_jmp = DISAS_EXC;
|
||||
}
|
||||
|
||||
@ -245,7 +248,8 @@ static void gen_step_complete_exception(DisasContext *s)
|
||||
* of the exception, and our syndrome information is always correct.
|
||||
*/
|
||||
gen_ss_advance(s);
|
||||
gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex));
|
||||
gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex),
|
||||
default_exception_el(s));
|
||||
s->is_jmp = DISAS_EXC;
|
||||
}
|
||||
|
||||
@ -292,7 +296,8 @@ static inline void gen_goto_tb(DisasContext *s, int n, uint64_t dest)
|
||||
static void unallocated_encoding(DisasContext *s)
|
||||
{
|
||||
/* Unallocated and reserved encodings are uncategorized */
|
||||
gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized());
|
||||
gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
|
||||
default_exception_el(s));
|
||||
}
|
||||
|
||||
#define unsupported_encoding(s, insn) \
|
||||
@ -407,7 +412,7 @@ static TCGv_i64 read_cpu_reg_sp(DisasContext *s, int reg, int sf)
|
||||
static inline void assert_fp_access_checked(DisasContext *s)
|
||||
{
|
||||
#ifdef CONFIG_DEBUG_TCG
|
||||
if (unlikely(!s->fp_access_checked || !s->cpacr_fpen)) {
|
||||
if (unlikely(!s->fp_access_checked || s->fp_excp_el)) {
|
||||
fprintf(stderr, "target-arm: FP access check missing for "
|
||||
"instruction 0x%08x\n", s->insn);
|
||||
abort();
|
||||
@ -967,11 +972,12 @@ static inline bool fp_access_check(DisasContext *s)
|
||||
assert(!s->fp_access_checked);
|
||||
s->fp_access_checked = true;
|
||||
|
||||
if (s->cpacr_fpen) {
|
||||
if (!s->fp_excp_el) {
|
||||
return true;
|
||||
}
|
||||
|
||||
gen_exception_insn(s, 4, EXCP_UDEF, syn_fp_access_trap(1, 0xe, false));
|
||||
gen_exception_insn(s, 4, EXCP_UDEF, syn_fp_access_trap(1, 0xe, false),
|
||||
s->fp_excp_el);
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -1498,7 +1504,8 @@ static void disas_exc(DisasContext *s, uint32_t insn)
|
||||
switch (op2_ll) {
|
||||
case 1:
|
||||
gen_ss_advance(s);
|
||||
gen_exception_insn(s, 0, EXCP_SWI, syn_aa64_svc(imm16));
|
||||
gen_exception_insn(s, 0, EXCP_SWI, syn_aa64_svc(imm16),
|
||||
default_exception_el(s));
|
||||
break;
|
||||
case 2:
|
||||
if (s->current_el == 0) {
|
||||
@ -1511,7 +1518,7 @@ static void disas_exc(DisasContext *s, uint32_t insn)
|
||||
gen_a64_set_pc_im(s->pc - 4);
|
||||
gen_helper_pre_hvc(cpu_env);
|
||||
gen_ss_advance(s);
|
||||
gen_exception_insn(s, 0, EXCP_HVC, syn_aa64_hvc(imm16));
|
||||
gen_exception_insn(s, 0, EXCP_HVC, syn_aa64_hvc(imm16), 2);
|
||||
break;
|
||||
case 3:
|
||||
if (s->current_el == 0) {
|
||||
@ -1523,7 +1530,7 @@ static void disas_exc(DisasContext *s, uint32_t insn)
|
||||
gen_helper_pre_smc(cpu_env, tmp);
|
||||
tcg_temp_free_i32(tmp);
|
||||
gen_ss_advance(s);
|
||||
gen_exception_insn(s, 0, EXCP_SMC, syn_aa64_smc(imm16));
|
||||
gen_exception_insn(s, 0, EXCP_SMC, syn_aa64_smc(imm16), 3);
|
||||
break;
|
||||
default:
|
||||
unallocated_encoding(s);
|
||||
@ -1536,7 +1543,8 @@ static void disas_exc(DisasContext *s, uint32_t insn)
|
||||
break;
|
||||
}
|
||||
/* BRK */
|
||||
gen_exception_insn(s, 4, EXCP_BKPT, syn_aa64_bkpt(imm16));
|
||||
gen_exception_insn(s, 4, EXCP_BKPT, syn_aa64_bkpt(imm16),
|
||||
default_exception_el(s));
|
||||
break;
|
||||
case 2:
|
||||
if (op2_ll != 0) {
|
||||
@ -10936,6 +10944,7 @@ void gen_intermediate_code_internal_a64(ARMCPU *cpu,
|
||||
dc->condjmp = 0;
|
||||
|
||||
dc->aarch64 = 1;
|
||||
dc->el3_is_aa64 = arm_el_is_aa64(env, 3);
|
||||
dc->thumb = 0;
|
||||
dc->bswap_code = 0;
|
||||
dc->condexec_mask = 0;
|
||||
@ -10945,7 +10954,7 @@ void gen_intermediate_code_internal_a64(ARMCPU *cpu,
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
dc->user = (dc->current_el == 0);
|
||||
#endif
|
||||
dc->cpacr_fpen = ARM_TBFLAG_AA64_FPEN(tb->flags);
|
||||
dc->fp_excp_el = ARM_TBFLAG_FPEXC_EL(tb->flags);
|
||||
dc->vec_len = 0;
|
||||
dc->vec_stride = 0;
|
||||
dc->cp_regs = cpu->cp_regs;
|
||||
@ -10966,8 +10975,8 @@ void gen_intermediate_code_internal_a64(ARMCPU *cpu,
|
||||
* emit code to generate a software step exception
|
||||
* end the TB
|
||||
*/
|
||||
dc->ss_active = ARM_TBFLAG_AA64_SS_ACTIVE(tb->flags);
|
||||
dc->pstate_ss = ARM_TBFLAG_AA64_PSTATE_SS(tb->flags);
|
||||
dc->ss_active = ARM_TBFLAG_SS_ACTIVE(tb->flags);
|
||||
dc->pstate_ss = ARM_TBFLAG_PSTATE_SS(tb->flags);
|
||||
dc->is_ldex = false;
|
||||
dc->ss_same_el = (arm_debug_target_el(env) == dc->current_el);
|
||||
|
||||
@ -11031,7 +11040,8 @@ void gen_intermediate_code_internal_a64(ARMCPU *cpu,
|
||||
* bits should be zero.
|
||||
*/
|
||||
assert(num_insns == 0);
|
||||
gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0));
|
||||
gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
|
||||
default_exception_el(dc));
|
||||
dc->is_jmp = DISAS_EXC;
|
||||
break;
|
||||
}
|
||||
@ -11103,6 +11113,10 @@ void gen_intermediate_code_internal_a64(ARMCPU *cpu,
|
||||
*/
|
||||
gen_a64_set_pc_im(dc->pc);
|
||||
gen_helper_wfi(cpu_env);
|
||||
/* The helper doesn't necessarily throw an exception, but we
|
||||
* must go back to the main loop to check for interrupts anyway.
|
||||
*/
|
||||
tcg_gen_exit_tb(0);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -217,12 +217,16 @@ static void gen_exception_internal(int excp)
|
||||
tcg_temp_free_i32(tcg_excp);
|
||||
}
|
||||
|
||||
static void gen_exception(int excp, uint32_t syndrome)
|
||||
static void gen_exception(int excp, uint32_t syndrome, uint32_t target_el)
|
||||
{
|
||||
TCGv_i32 tcg_excp = tcg_const_i32(excp);
|
||||
TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
|
||||
TCGv_i32 tcg_el = tcg_const_i32(target_el);
|
||||
|
||||
gen_helper_exception_with_syndrome(cpu_env, tcg_excp, tcg_syn);
|
||||
gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
|
||||
tcg_syn, tcg_el);
|
||||
|
||||
tcg_temp_free_i32(tcg_el);
|
||||
tcg_temp_free_i32(tcg_syn);
|
||||
tcg_temp_free_i32(tcg_excp);
|
||||
}
|
||||
@ -250,7 +254,8 @@ static void gen_step_complete_exception(DisasContext *s)
|
||||
* of the exception, and our syndrome information is always correct.
|
||||
*/
|
||||
gen_ss_advance(s);
|
||||
gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex));
|
||||
gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex),
|
||||
default_exception_el(s));
|
||||
s->is_jmp = DISAS_EXC;
|
||||
}
|
||||
|
||||
@ -1013,11 +1018,12 @@ static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
|
||||
s->is_jmp = DISAS_JUMP;
|
||||
}
|
||||
|
||||
static void gen_exception_insn(DisasContext *s, int offset, int excp, int syn)
|
||||
static void gen_exception_insn(DisasContext *s, int offset, int excp,
|
||||
int syn, uint32_t target_el)
|
||||
{
|
||||
gen_set_condexec(s);
|
||||
gen_set_pc_im(s, s->pc - offset);
|
||||
gen_exception(excp, syn);
|
||||
gen_exception(excp, syn, target_el);
|
||||
s->is_jmp = DISAS_JUMP;
|
||||
}
|
||||
|
||||
@ -3038,9 +3044,9 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
|
||||
* for invalid encodings; we will generate incorrect syndrome information
|
||||
* for attempts to execute invalid vfp/neon encodings with FP disabled.
|
||||
*/
|
||||
if (!s->cpacr_fpen) {
|
||||
if (s->fp_excp_el) {
|
||||
gen_exception_insn(s, 4, EXCP_UDEF,
|
||||
syn_fp_access_trap(1, 0xe, s->thumb));
|
||||
syn_fp_access_trap(1, 0xe, s->thumb), s->fp_excp_el);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -4356,9 +4362,9 @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
|
||||
* for invalid encodings; we will generate incorrect syndrome information
|
||||
* for attempts to execute invalid vfp/neon encodings with FP disabled.
|
||||
*/
|
||||
if (!s->cpacr_fpen) {
|
||||
if (s->fp_excp_el) {
|
||||
gen_exception_insn(s, 4, EXCP_UDEF,
|
||||
syn_fp_access_trap(1, 0xe, s->thumb));
|
||||
syn_fp_access_trap(1, 0xe, s->thumb), s->fp_excp_el);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -5094,9 +5100,9 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
|
||||
* for invalid encodings; we will generate incorrect syndrome information
|
||||
* for attempts to execute invalid vfp/neon encodings with FP disabled.
|
||||
*/
|
||||
if (!s->cpacr_fpen) {
|
||||
if (s->fp_excp_el) {
|
||||
gen_exception_insn(s, 4, EXCP_UDEF,
|
||||
syn_fp_access_trap(1, 0xe, s->thumb));
|
||||
syn_fp_access_trap(1, 0xe, s->thumb), s->fp_excp_el);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -7960,7 +7966,8 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
|
||||
/* bkpt */
|
||||
ARCH(5);
|
||||
gen_exception_insn(s, 4, EXCP_BKPT,
|
||||
syn_aa32_bkpt(imm16, false));
|
||||
syn_aa32_bkpt(imm16, false),
|
||||
default_exception_el(s));
|
||||
break;
|
||||
case 2:
|
||||
/* Hypervisor call (v7) */
|
||||
@ -8423,18 +8430,53 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
|
||||
}
|
||||
} else {
|
||||
int address_offset;
|
||||
int load;
|
||||
bool load = insn & (1 << 20);
|
||||
bool doubleword = false;
|
||||
/* Misc load/store */
|
||||
rn = (insn >> 16) & 0xf;
|
||||
rd = (insn >> 12) & 0xf;
|
||||
|
||||
if (!load && (sh & 2)) {
|
||||
/* doubleword */
|
||||
ARCH(5TE);
|
||||
if (rd & 1) {
|
||||
/* UNPREDICTABLE; we choose to UNDEF */
|
||||
goto illegal_op;
|
||||
}
|
||||
load = (sh & 1) == 0;
|
||||
doubleword = true;
|
||||
}
|
||||
|
||||
addr = load_reg(s, rn);
|
||||
if (insn & (1 << 24))
|
||||
gen_add_datah_offset(s, insn, 0, addr);
|
||||
address_offset = 0;
|
||||
if (insn & (1 << 20)) {
|
||||
|
||||
if (doubleword) {
|
||||
if (!load) {
|
||||
/* store */
|
||||
tmp = load_reg(s, rd);
|
||||
gen_aa32_st32(tmp, addr, get_mem_index(s));
|
||||
tcg_temp_free_i32(tmp);
|
||||
tcg_gen_addi_i32(addr, addr, 4);
|
||||
tmp = load_reg(s, rd + 1);
|
||||
gen_aa32_st32(tmp, addr, get_mem_index(s));
|
||||
tcg_temp_free_i32(tmp);
|
||||
} else {
|
||||
/* load */
|
||||
tmp = tcg_temp_new_i32();
|
||||
switch(sh) {
|
||||
gen_aa32_ld32u(tmp, addr, get_mem_index(s));
|
||||
store_reg(s, rd, tmp);
|
||||
tcg_gen_addi_i32(addr, addr, 4);
|
||||
tmp = tcg_temp_new_i32();
|
||||
gen_aa32_ld32u(tmp, addr, get_mem_index(s));
|
||||
rd++;
|
||||
}
|
||||
address_offset = -4;
|
||||
} else if (load) {
|
||||
/* load */
|
||||
tmp = tcg_temp_new_i32();
|
||||
switch (sh) {
|
||||
case 1:
|
||||
gen_aa32_ld16u(tmp, addr, get_mem_index(s));
|
||||
break;
|
||||
@ -8446,38 +8488,11 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
|
||||
gen_aa32_ld16s(tmp, addr, get_mem_index(s));
|
||||
break;
|
||||
}
|
||||
load = 1;
|
||||
} else if (sh & 2) {
|
||||
ARCH(5TE);
|
||||
/* doubleword */
|
||||
if (sh & 1) {
|
||||
/* store */
|
||||
tmp = load_reg(s, rd);
|
||||
gen_aa32_st32(tmp, addr, get_mem_index(s));
|
||||
tcg_temp_free_i32(tmp);
|
||||
tcg_gen_addi_i32(addr, addr, 4);
|
||||
tmp = load_reg(s, rd + 1);
|
||||
gen_aa32_st32(tmp, addr, get_mem_index(s));
|
||||
tcg_temp_free_i32(tmp);
|
||||
load = 0;
|
||||
} else {
|
||||
/* load */
|
||||
tmp = tcg_temp_new_i32();
|
||||
gen_aa32_ld32u(tmp, addr, get_mem_index(s));
|
||||
store_reg(s, rd, tmp);
|
||||
tcg_gen_addi_i32(addr, addr, 4);
|
||||
tmp = tcg_temp_new_i32();
|
||||
gen_aa32_ld32u(tmp, addr, get_mem_index(s));
|
||||
rd++;
|
||||
load = 1;
|
||||
}
|
||||
address_offset = -4;
|
||||
} else {
|
||||
/* store */
|
||||
tmp = load_reg(s, rd);
|
||||
gen_aa32_st16(tmp, addr, get_mem_index(s));
|
||||
tcg_temp_free_i32(tmp);
|
||||
load = 0;
|
||||
}
|
||||
/* Perform base writeback before the loaded value to
|
||||
ensure correct behavior with overlapping index registers.
|
||||
@ -9021,7 +9036,8 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
|
||||
break;
|
||||
default:
|
||||
illegal_op:
|
||||
gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized());
|
||||
gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
|
||||
default_exception_el(s));
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -10858,7 +10874,8 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
|
||||
{
|
||||
int imm8 = extract32(insn, 0, 8);
|
||||
ARCH(5);
|
||||
gen_exception_insn(s, 2, EXCP_BKPT, syn_aa32_bkpt(imm8, true));
|
||||
gen_exception_insn(s, 2, EXCP_BKPT, syn_aa32_bkpt(imm8, true),
|
||||
default_exception_el(s));
|
||||
break;
|
||||
}
|
||||
|
||||
@ -11013,11 +11030,13 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
|
||||
}
|
||||
return;
|
||||
undef32:
|
||||
gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized());
|
||||
gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
|
||||
default_exception_el(s));
|
||||
return;
|
||||
illegal_op:
|
||||
undef:
|
||||
gen_exception_insn(s, 2, EXCP_UDEF, syn_uncategorized());
|
||||
gen_exception_insn(s, 2, EXCP_UDEF, syn_uncategorized(),
|
||||
default_exception_el(s));
|
||||
}
|
||||
|
||||
/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
|
||||
@ -11057,6 +11076,7 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu,
|
||||
dc->condjmp = 0;
|
||||
|
||||
dc->aarch64 = 0;
|
||||
dc->el3_is_aa64 = arm_el_is_aa64(env, 3);
|
||||
dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
|
||||
dc->bswap_code = ARM_TBFLAG_BSWAP_CODE(tb->flags);
|
||||
dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
|
||||
@ -11067,7 +11087,7 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu,
|
||||
dc->user = (dc->current_el == 0);
|
||||
#endif
|
||||
dc->ns = ARM_TBFLAG_NS(tb->flags);
|
||||
dc->cpacr_fpen = ARM_TBFLAG_CPACR_FPEN(tb->flags);
|
||||
dc->fp_excp_el = ARM_TBFLAG_FPEXC_EL(tb->flags);
|
||||
dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
|
||||
dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
|
||||
dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
|
||||
@ -11216,7 +11236,8 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu,
|
||||
* bits should be zero.
|
||||
*/
|
||||
assert(num_insns == 0);
|
||||
gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0));
|
||||
gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
|
||||
default_exception_el(dc));
|
||||
goto done_generating;
|
||||
}
|
||||
|
||||
@ -11276,13 +11297,14 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu,
|
||||
gen_set_condexec(dc);
|
||||
if (dc->is_jmp == DISAS_SWI) {
|
||||
gen_ss_advance(dc);
|
||||
gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb));
|
||||
gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
|
||||
default_exception_el(dc));
|
||||
} else if (dc->is_jmp == DISAS_HVC) {
|
||||
gen_ss_advance(dc);
|
||||
gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm));
|
||||
gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
|
||||
} else if (dc->is_jmp == DISAS_SMC) {
|
||||
gen_ss_advance(dc);
|
||||
gen_exception(EXCP_SMC, syn_aa32_smc());
|
||||
gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
|
||||
} else if (dc->ss_active) {
|
||||
gen_step_complete_exception(dc);
|
||||
} else {
|
||||
@ -11297,13 +11319,14 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu,
|
||||
gen_set_condexec(dc);
|
||||
if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
|
||||
gen_ss_advance(dc);
|
||||
gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb));
|
||||
gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
|
||||
default_exception_el(dc));
|
||||
} else if (dc->is_jmp == DISAS_HVC && !dc->condjmp) {
|
||||
gen_ss_advance(dc);
|
||||
gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm));
|
||||
gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
|
||||
} else if (dc->is_jmp == DISAS_SMC && !dc->condjmp) {
|
||||
gen_ss_advance(dc);
|
||||
gen_exception(EXCP_SMC, syn_aa32_smc());
|
||||
gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
|
||||
} else if (dc->ss_active) {
|
||||
gen_step_complete_exception(dc);
|
||||
} else {
|
||||
@ -11336,18 +11359,23 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu,
|
||||
break;
|
||||
case DISAS_WFI:
|
||||
gen_helper_wfi(cpu_env);
|
||||
/* The helper doesn't necessarily throw an exception, but we
|
||||
* must go back to the main loop to check for interrupts anyway.
|
||||
*/
|
||||
tcg_gen_exit_tb(0);
|
||||
break;
|
||||
case DISAS_WFE:
|
||||
gen_helper_wfe(cpu_env);
|
||||
break;
|
||||
case DISAS_SWI:
|
||||
gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb));
|
||||
gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
|
||||
default_exception_el(dc));
|
||||
break;
|
||||
case DISAS_HVC:
|
||||
gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm));
|
||||
gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
|
||||
break;
|
||||
case DISAS_SMC:
|
||||
gen_exception(EXCP_SMC, syn_aa32_smc());
|
||||
gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
|
||||
break;
|
||||
}
|
||||
if (dc->condjmp) {
|
||||
|
@ -22,7 +22,8 @@ typedef struct DisasContext {
|
||||
#endif
|
||||
ARMMMUIdx mmu_idx; /* MMU index to use for normal loads/stores */
|
||||
bool ns; /* Use non-secure CPREG bank on access */
|
||||
bool cpacr_fpen; /* FP enabled via CPACR.FPEN */
|
||||
int fp_excp_el; /* FP exception EL or 0 if enabled */
|
||||
bool el3_is_aa64; /* Flag indicating whether EL3 is AArch64 or not */
|
||||
bool vfp_enabled; /* FP enabled via FPSCR.EN */
|
||||
int vec_len;
|
||||
int vec_stride;
|
||||
@ -73,6 +74,20 @@ static inline int get_mem_index(DisasContext *s)
|
||||
return s->mmu_idx;
|
||||
}
|
||||
|
||||
/* Function used to determine the target exception EL when otherwise not known
|
||||
* or default.
|
||||
*/
|
||||
static inline int default_exception_el(DisasContext *s)
|
||||
{
|
||||
/* If we are coming from secure EL0 in a system with a 32-bit EL3, then
|
||||
* there is no secure EL1, so we route exceptions to EL3. Otherwise,
|
||||
* exceptions can only be routed to ELs above 1, so we target the higher of
|
||||
* 1 or the current EL.
|
||||
*/
|
||||
return (s->mmu_idx == ARMMMUIdx_S1SE0 && !s->el3_is_aa64)
|
||||
? 3 : MAX(1, s->current_el);
|
||||
}
|
||||
|
||||
/* target-specific extra values for is_jmp */
|
||||
/* These instructions trap after executing, so the A32/T32 decoder must
|
||||
* defer them until after the conditional execution state has been updated.
|
||||
|
@ -1594,3 +1594,6 @@ i8257_unregistered_dma(int nchan, int dma_pos, int dma_len) "unregistered DMA ch
|
||||
cpu_set_state(int cpu_index, uint8_t state) "setting cpu %d state to %" PRIu8
|
||||
cpu_halt(int cpu_index) "halting cpu %d"
|
||||
cpu_unhalt(int cpu_index) "unhalting cpu %d"
|
||||
|
||||
# hw/arm/virt-acpi-build.c
|
||||
virt_acpi_setup(void) "No fw cfg or ACPI disabled. Bailing out."
|
||||
|
Loading…
Reference in New Issue
Block a user