vhost, virtio, pci, pc, acpi

nvdimm work
 sparse cpu id rework
 ipmi enhancements
 fixes all over the place
 pxb option to tweak chassis number
 
 Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQEcBAABAgAGBQJW6B0mAAoJECgfDbjSjVRpCtMH/2xSrYsLGAM3tQZtcAlpoof/
 LNjN0GxTMXWtbDfGLKQMkexAmrDtBOqXTNfPpDaeycvktcMJ4wUCqX8nWC+cNwl+
 3Uu4yiR9J5qjB4ANRlHnW45aL9rq7Qv0BBMQvc0AxpKMqZ9sf4okd2a4ZprTbhHq
 /q/JnWKa/a4CGAZrfwlonobtT/YvF+cbq6gQOxUWMaemptEZDhYaH8PMtw0Ituw1
 HWsI5lkhy+bHC2FeoqKt+p3ypuIXfnbg1rJyAiyVzgwz4289aMoh6nBll6WFRGpI
 vEligkFv4GBjNlYloUaGLlJxz3RE/7uPGER2lSH0OYTSVNc1zUbvbmv/7DIucSA=
 =0EjS
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/mst/tags/for_upstream' into staging

vhost, virtio, pci, pc, acpi

nvdimm work
sparse cpu id rework
ipmi enhancements
fixes all over the place
pxb option to tweak chassis number

Signed-off-by: Michael S. Tsirkin <mst@redhat.com>

# gpg: Signature made Tue 15 Mar 2016 14:33:10 GMT using RSA key ID D28D5469
# gpg: Good signature from "Michael S. Tsirkin <mst@kernel.org>"
# gpg:                 aka "Michael S. Tsirkin <mst@redhat.com>"

* remotes/mst/tags/for_upstream: (51 commits)
  hw/acpi: fix GSI links UID
  ipmi: add some local variables in ipmi_sdr_init
  ipmi: remove the need of an ending record in the SDR table
  ipmi: use a function to initialize the SDR table
  ipmi: add a realize function to the device class
  ipmi: add rsp_buffer_set_error() helper
  ipmi: remove IPMI_CHECK_RESERVATION() macro
  ipmi: replace IPMI_ADD_RSP_DATA() macro with inline helpers
  ipmi: remove IPMI_CHECK_CMD_LEN() macro
  MAINTAINERS: machine core
  MAINTAINERS: Add an entry for virtio header files
  pc: acpi: clarify why possible LAPIC entries must be present in MADT
  pc: acpi: drop cpu->found_cpus bitmap
  pc: acpi: create Processor and Notify objects only for valid lapics
  pc: acpi: create MADT.lapic entries only for valid lapics
  pc: acpi: SRAT: create only valid processor lapic entries
  pc: acpi: cleanup qdev_get_machine() calls
  machine: introduce MachineClass.possible_cpu_arch_ids() hook
  pc: init pcms->apic_id_limit once and use it throughout pc.c
  pc: acpi: remove NOP assignment
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2016-03-15 16:43:48 +00:00
commit a58a4cb187
44 changed files with 1113 additions and 618 deletions

View File

@ -717,6 +717,12 @@ F: hw/timer/hpet*
F: hw/timer/i8254*
F: hw/timer/mc146818rtc*
Machine core
M: Eduardo Habkost <ehabkost@redhat.com>
M: Marcel Apfelbaum <marcel@redhat.com>
S: Supported
F: hw/core/machine.c
F: include/hw/boards.h
Xtensa Machines
---------------
@ -878,6 +884,7 @@ M: Michael S. Tsirkin <mst@redhat.com>
S: Supported
F: hw/*/virtio*
F: net/vhost-user.c
F: include/hw/virtio/
virtio-9p
M: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>

View File

@ -24,8 +24,8 @@ A detailed command line would be:
-object memory-backend-ram,size=1024M,policy=bind,host-nodes=0,id=ram-node0 -numa node,nodeid=0,cpus=0,memdev=ram-node0
-object memory-backend-ram,size=1024M,policy=bind,host-nodes=1,id=ram-node1 -numa node,nodeid=1,cpus=1,memdev=ram-node1
-device pxb,id=bridge1,bus=pci.0,numa_node=1,bus_nr=4 -netdev user,id=nd -device e1000,bus=bridge1,addr=0x4,netdev=nd
-device pxb,id=bridge2,bus=pci.0,numa_node=0,bus_nr=8, -device e1000,bus=bridge2,addr=0x3
-device pxb,id=bridge3,bus=pci.0,bus_nr=40, -drive if=none,id=drive0,file=[img] -device virtio-blk-pci,drive=drive0,scsi=off,bus=bridge3,addr=1
-device pxb,id=bridge2,bus=pci.0,numa_node=0,bus_nr=8 -device e1000,bus=bridge2,addr=0x3
-device pxb,id=bridge3,bus=pci.0,bus_nr=40 -drive if=none,id=drive0,file=[img] -device virtio-blk-pci,drive=drive0,scsi=off,bus=bridge3,addr=1
Here you have:
- 2 NUMA nodes for the guest, 0 and 1. (both mapped to the same NUMA node in host, but you can and should put it in different host NUMA nodes)
@ -43,7 +43,7 @@ Implementation
==============
The PXB is composed by:
- HostBridge (TYPE_PXB_HOST)
The host bridge allows to register and query the PXB's rPCI root bus in QEMU.
The host bridge allows to register and query the PXB's PCI root bus in QEMU.
- PXBDev(TYPE_PXB_DEVICE)
It is a regular PCI Device that resides on the piix host-bridge bus and its bus uses the same PCI domain.
However, the bus behind is exposed through ACPI as a primary PCI bus and starts a new PCI hierarchy.

View File

@ -15,13 +15,23 @@ The 1000 -> 10ff device ID range is used as follows for virtio-pci devices.
Note that this allocation separate from the virtio device IDs, which are
maintained as part of the virtio specification.
1af4:1000 network device
1af4:1001 block device
1af4:1002 balloon device
1af4:1003 console device
1af4:1004 SCSI host bus adapter device
1af4:1005 entropy generator device
1af4:1009 9p filesystem device
1af4:1000 network device (legacy)
1af4:1001 block device (legacy)
1af4:1002 balloon device (legacy)
1af4:1003 console device (legacy)
1af4:1004 SCSI host bus adapter device (legacy)
1af4:1005 entropy generator device (legacy)
1af4:1009 9p filesystem device (legacy)
1af4:1041 network device (modern)
1af4:1042 block device (modern)
1af4:1043 console device (modern)
1af4:1044 entropy generator device (modern)
1af4:1045 balloon device (modern)
1af4:1048 SCSI host bus adapter device (modern)
1af4:1049 9p filesystem device (modern)
1af4:1050 virtio gpu device (modern)
1af4:1052 virtio input device (modern)
1af4:10f0 Available for experimental usage without registration. Must get
to official ID when the code leaves the test lab (i.e. when seeking

View File

@ -2,7 +2,7 @@ common-obj-$(CONFIG_ACPI_X86) += core.o piix4.o pcihp.o
common-obj-$(CONFIG_ACPI_X86_ICH) += ich9.o tco.o
common-obj-$(CONFIG_ACPI_CPU_HOTPLUG) += cpu_hotplug.o cpu_hotplug_acpi_table.o
common-obj-$(CONFIG_ACPI_MEMORY_HOTPLUG) += memory_hotplug.o memory_hotplug_acpi_table.o
common-obj-$(CONFIG_ACPI_NVDIMM) += nvdimm.o
obj-$(CONFIG_ACPI_NVDIMM) += nvdimm.o
common-obj-$(CONFIG_ACPI) += acpi_interface.o
common-obj-$(CONFIG_ACPI) += bios-linker-loader.o
common-obj-$(CONFIG_ACPI) += aml-build.o

View File

@ -258,6 +258,34 @@ static void build_append_int(GArray *table, uint64_t value)
}
}
/*
* Build NAME(XXXX, 0x00000000) where 0x00000000 is encoded as a dword,
* and return the offset to 0x00000000 for runtime patching.
*
* Warning: runtime patching is best avoided. Only use this as
* a replacement for DataTableRegion (for guests that don't
* support it).
*/
int
build_append_named_dword(GArray *array, const char *name_format, ...)
{
int offset;
va_list ap;
build_append_byte(array, 0x08); /* NameOp */
va_start(ap, name_format);
build_append_namestringv(array, name_format, ap);
va_end(ap);
build_append_byte(array, 0x0C); /* DWordPrefix */
offset = array->len;
build_append_int_noprefix(array, 0x00000000, 4);
assert(array->len == offset + 4);
return offset;
}
static GPtrArray *alloc_list;
static Aml *aml_alloc(void)
@ -942,14 +970,14 @@ Aml *aml_package(uint8_t num_elements)
/* ACPI 1.0b: 16.2.5.2 Named Objects Encoding: DefOpRegion */
Aml *aml_operation_region(const char *name, AmlRegionSpace rs,
uint32_t offset, uint32_t len)
Aml *offset, uint32_t len)
{
Aml *var = aml_alloc();
build_append_byte(var->buf, 0x5B); /* ExtOpPrefix */
build_append_byte(var->buf, 0x80); /* OpRegionOp */
build_append_namestring(var->buf, "%s", name);
build_append_byte(var->buf, rs);
build_append_int(var->buf, offset);
aml_append(var, offset);
build_append_int(var->buf, len);
return var;
}
@ -997,6 +1025,20 @@ Aml *create_field_common(int opcode, Aml *srcbuf, Aml *index, const char *name)
return var;
}
/* ACPI 1.0b: 16.2.5.2 Named Objects Encoding: DefCreateField */
Aml *aml_create_field(Aml *srcbuf, Aml *bit_index, Aml *num_bits,
const char *name)
{
Aml *var = aml_alloc();
build_append_byte(var->buf, 0x5B); /* ExtOpPrefix */
build_append_byte(var->buf, 0x13); /* CreateFieldOp */
aml_append(var, srcbuf);
aml_append(var, bit_index);
aml_append(var, num_bits);
build_append_namestring(var->buf, "%s", name);
return var;
}
/* ACPI 1.0b: 16.2.5.2 Named Objects Encoding: DefCreateDWordField */
Aml *aml_create_dword_field(Aml *srcbuf, Aml *index, const char *name)
{
@ -1423,6 +1465,13 @@ Aml *aml_alias(const char *source_object, const char *alias_object)
return var;
}
/* ACPI 1.0b: 16.2.5.4 Type 2 Opcodes Encoding: DefConcat */
Aml *aml_concatenate(Aml *source1, Aml *source2, Aml *target)
{
return build_opcode_2arg_dst(0x73 /* ConcatOp */, source1, source2,
target);
}
void
build_header(GArray *linker, GArray *table_data,
AcpiTableHeader *h, const char *sig, int len, uint8_t rev,

View File

@ -29,6 +29,8 @@
#include "qemu/osdep.h"
#include "hw/acpi/acpi.h"
#include "hw/acpi/aml-build.h"
#include "hw/acpi/bios-linker-loader.h"
#include "hw/nvram/fw_cfg.h"
#include "hw/mem/nvdimm.h"
static int nvdimm_plugged_device_list(Object *obj, void *opaque)
@ -370,15 +372,131 @@ static void nvdimm_build_nfit(GSList *device_list, GArray *table_offsets,
g_array_free(structures, true);
}
struct NvdimmDsmIn {
uint32_t handle;
uint32_t revision;
uint32_t function;
/* the remaining size in the page is used by arg3. */
union {
uint8_t arg3[0];
};
} QEMU_PACKED;
typedef struct NvdimmDsmIn NvdimmDsmIn;
struct NvdimmDsmOut {
/* the size of buffer filled by QEMU. */
uint32_t len;
uint8_t data[0];
} QEMU_PACKED;
typedef struct NvdimmDsmOut NvdimmDsmOut;
struct NvdimmDsmFunc0Out {
/* the size of buffer filled by QEMU. */
uint32_t len;
uint32_t supported_func;
} QEMU_PACKED;
typedef struct NvdimmDsmFunc0Out NvdimmDsmFunc0Out;
struct NvdimmDsmFuncNoPayloadOut {
/* the size of buffer filled by QEMU. */
uint32_t len;
uint32_t func_ret_status;
} QEMU_PACKED;
typedef struct NvdimmDsmFuncNoPayloadOut NvdimmDsmFuncNoPayloadOut;
static uint64_t
nvdimm_dsm_read(void *opaque, hwaddr addr, unsigned size)
{
nvdimm_debug("BUG: we never read _DSM IO Port.\n");
return 0;
}
static void
nvdimm_dsm_write(void *opaque, hwaddr addr, uint64_t val, unsigned size)
{
NvdimmDsmIn *in;
hwaddr dsm_mem_addr = val;
nvdimm_debug("dsm memory address %#" HWADDR_PRIx ".\n", dsm_mem_addr);
/*
* The DSM memory is mapped to guest address space so an evil guest
* can change its content while we are doing DSM emulation. Avoid
* this by copying DSM memory to QEMU local memory.
*/
in = g_malloc(TARGET_PAGE_SIZE);
cpu_physical_memory_read(dsm_mem_addr, in, TARGET_PAGE_SIZE);
le32_to_cpus(&in->revision);
le32_to_cpus(&in->function);
le32_to_cpus(&in->handle);
nvdimm_debug("Revision %#x Handler %#x Function %#x.\n", in->revision,
in->handle, in->function);
/*
* function 0 is called to inquire which functions are supported by
* OSPM
*/
if (in->function == 0) {
NvdimmDsmFunc0Out func0 = {
.len = cpu_to_le32(sizeof(func0)),
/* No function supported other than function 0 */
.supported_func = cpu_to_le32(0),
};
cpu_physical_memory_write(dsm_mem_addr, &func0, sizeof func0);
} else {
/* No function except function 0 is supported yet. */
NvdimmDsmFuncNoPayloadOut out = {
.len = cpu_to_le32(sizeof(out)),
.func_ret_status = cpu_to_le32(1) /* Not Supported */,
};
cpu_physical_memory_write(dsm_mem_addr, &out, sizeof(out));
}
g_free(in);
}
static const MemoryRegionOps nvdimm_dsm_ops = {
.read = nvdimm_dsm_read,
.write = nvdimm_dsm_write,
.endianness = DEVICE_LITTLE_ENDIAN,
.valid = {
.min_access_size = 4,
.max_access_size = 4,
},
};
void nvdimm_init_acpi_state(AcpiNVDIMMState *state, MemoryRegion *io,
FWCfgState *fw_cfg, Object *owner)
{
memory_region_init_io(&state->io_mr, owner, &nvdimm_dsm_ops, state,
"nvdimm-acpi-io", NVDIMM_ACPI_IO_LEN);
memory_region_add_subregion(io, NVDIMM_ACPI_IO_BASE, &state->io_mr);
state->dsm_mem = g_array_new(false, true /* clear */, 1);
acpi_data_push(state->dsm_mem, TARGET_PAGE_SIZE);
fw_cfg_add_file(fw_cfg, NVDIMM_DSM_MEM_FILE, state->dsm_mem->data,
state->dsm_mem->len);
}
#define NVDIMM_COMMON_DSM "NCAL"
#define NVDIMM_ACPI_MEM_ADDR "MEMA"
static void nvdimm_build_common_dsm(Aml *dev)
{
Aml *method, *ifctx, *function;
Aml *method, *ifctx, *function, *dsm_mem, *unpatched, *result_size;
uint8_t byte_list[1];
method = aml_method(NVDIMM_COMMON_DSM, 4, AML_NOTSERIALIZED);
method = aml_method(NVDIMM_COMMON_DSM, 4, AML_SERIALIZED);
function = aml_arg(2);
dsm_mem = aml_name(NVDIMM_ACPI_MEM_ADDR);
/*
* do not support any method if DSM memory address has not been
* patched.
*/
unpatched = aml_if(aml_equal(dsm_mem, aml_int(0x0)));
/*
* function 0 is called to inquire what functions are supported by
@ -387,12 +505,38 @@ static void nvdimm_build_common_dsm(Aml *dev)
ifctx = aml_if(aml_equal(function, aml_int(0)));
byte_list[0] = 0 /* No function Supported */;
aml_append(ifctx, aml_return(aml_buffer(1, byte_list)));
aml_append(method, ifctx);
aml_append(unpatched, ifctx);
/* No function is supported yet. */
byte_list[0] = 1 /* Not Supported */;
aml_append(method, aml_return(aml_buffer(1, byte_list)));
aml_append(unpatched, aml_return(aml_buffer(1, byte_list)));
aml_append(method, unpatched);
/*
* The HDLE indicates the DSM function is issued from which device,
* it is not used at this time as no function is supported yet.
* Currently we make it always be 0 for all the devices and will set
* the appropriate value once real function is implemented.
*/
aml_append(method, aml_store(aml_int(0x0), aml_name("HDLE")));
aml_append(method, aml_store(aml_arg(1), aml_name("REVS")));
aml_append(method, aml_store(aml_arg(2), aml_name("FUNC")));
/*
* tell QEMU about the real address of DSM memory, then QEMU
* gets the control and fills the result in DSM memory.
*/
aml_append(method, aml_store(dsm_mem, aml_name("NTFI")));
result_size = aml_local(1);
aml_append(method, aml_store(aml_name("RLEN"), result_size));
aml_append(method, aml_store(aml_shiftleft(result_size, aml_int(3)),
result_size));
aml_append(method, aml_create_field(aml_name("ODAT"), aml_int(0),
result_size, "OBUF"));
aml_append(method, aml_concatenate(aml_buffer(0, NULL), aml_name("OBUF"),
aml_arg(6)));
aml_append(method, aml_return(aml_arg(6)));
aml_append(dev, method);
}
@ -435,7 +579,8 @@ static void nvdimm_build_nvdimm_devices(GSList *device_list, Aml *root_dev)
static void nvdimm_build_ssdt(GSList *device_list, GArray *table_offsets,
GArray *table_data, GArray *linker)
{
Aml *ssdt, *sb_scope, *dev;
Aml *ssdt, *sb_scope, *dev, *field;
int mem_addr_offset, nvdimm_ssdt;
acpi_add_table(table_offsets, table_data);
@ -459,19 +604,89 @@ static void nvdimm_build_ssdt(GSList *device_list, GArray *table_offsets,
*/
aml_append(dev, aml_name_decl("_HID", aml_string("ACPI0012")));
/* map DSM memory and IO into ACPI namespace. */
aml_append(dev, aml_operation_region("NPIO", AML_SYSTEM_IO,
aml_int(NVDIMM_ACPI_IO_BASE), NVDIMM_ACPI_IO_LEN));
aml_append(dev, aml_operation_region("NRAM", AML_SYSTEM_MEMORY,
aml_name(NVDIMM_ACPI_MEM_ADDR), TARGET_PAGE_SIZE));
/*
* DSM notifier:
* NTFI: write the address of DSM memory and notify QEMU to emulate
* the access.
*
* It is the IO port so that accessing them will cause VM-exit, the
* control will be transferred to QEMU.
*/
field = aml_field("NPIO", AML_DWORD_ACC, AML_NOLOCK, AML_PRESERVE);
aml_append(field, aml_named_field("NTFI",
sizeof(uint32_t) * BITS_PER_BYTE));
aml_append(dev, field);
/*
* DSM input:
* HDLE: store device's handle, it's zero if the _DSM call happens
* on NVDIMM Root Device.
* REVS: store the Arg1 of _DSM call.
* FUNC: store the Arg2 of _DSM call.
* ARG3: store the Arg3 of _DSM call.
*
* They are RAM mapping on host so that these accesses never cause
* VM-EXIT.
*/
field = aml_field("NRAM", AML_DWORD_ACC, AML_NOLOCK, AML_PRESERVE);
aml_append(field, aml_named_field("HDLE",
sizeof(typeof_field(NvdimmDsmIn, handle)) * BITS_PER_BYTE));
aml_append(field, aml_named_field("REVS",
sizeof(typeof_field(NvdimmDsmIn, revision)) * BITS_PER_BYTE));
aml_append(field, aml_named_field("FUNC",
sizeof(typeof_field(NvdimmDsmIn, function)) * BITS_PER_BYTE));
aml_append(field, aml_named_field("ARG3",
(TARGET_PAGE_SIZE - offsetof(NvdimmDsmIn, arg3)) *
BITS_PER_BYTE));
aml_append(dev, field);
/*
* DSM output:
* RLEN: the size of the buffer filled by QEMU.
* ODAT: the buffer QEMU uses to store the result.
*
* Since the page is reused by both input and out, the input data
* will be lost after storing new result into ODAT so we should fetch
* all the input data before writing the result.
*/
field = aml_field("NRAM", AML_DWORD_ACC, AML_NOLOCK, AML_PRESERVE);
aml_append(field, aml_named_field("RLEN",
sizeof(typeof_field(NvdimmDsmOut, len)) * BITS_PER_BYTE));
aml_append(field, aml_named_field("ODAT",
(TARGET_PAGE_SIZE - offsetof(NvdimmDsmOut, data)) *
BITS_PER_BYTE));
aml_append(dev, field);
nvdimm_build_common_dsm(dev);
nvdimm_build_device_dsm(dev);
nvdimm_build_nvdimm_devices(device_list, dev);
aml_append(sb_scope, dev);
aml_append(ssdt, sb_scope);
nvdimm_ssdt = table_data->len;
/* copy AML table into ACPI tables blob and patch header there */
g_array_append_vals(table_data, ssdt->buf->data, ssdt->buf->len);
mem_addr_offset = build_append_named_dword(table_data,
NVDIMM_ACPI_MEM_ADDR);
bios_linker_loader_alloc(linker, NVDIMM_DSM_MEM_FILE, TARGET_PAGE_SIZE,
false /* high memory */);
bios_linker_loader_add_pointer(linker, ACPI_BUILD_TABLE_FILE,
NVDIMM_DSM_MEM_FILE, table_data,
table_data->data + mem_addr_offset,
sizeof(uint32_t));
build_header(linker, table_data,
(void *)(table_data->data + table_data->len - ssdt->buf->len),
"SSDT", ssdt->buf->len, 1, NULL, "NVDIMM");
(void *)(table_data->data + nvdimm_ssdt),
"SSDT", table_data->len - nvdimm_ssdt, 1, NULL, "NVDIMM");
free_aml_allocator();
}

View File

@ -2557,6 +2557,29 @@ FloppyDriveType isa_fdc_get_drive_type(ISADevice *fdc, int i)
return isa->state.drives[i].drive;
}
void isa_fdc_get_drive_max_chs(FloppyDriveType type,
uint8_t *maxc, uint8_t *maxh, uint8_t *maxs)
{
const FDFormat *fdf;
*maxc = *maxh = *maxs = 0;
for (fdf = fd_formats; fdf->drive != FLOPPY_DRIVE_TYPE_NONE; fdf++) {
if (fdf->drive != type) {
continue;
}
if (*maxc < fdf->max_track) {
*maxc = fdf->max_track;
}
if (*maxh < fdf->max_head) {
*maxh = fdf->max_head;
}
if (*maxs < fdf->last_sect) {
*maxs = fdf->last_sect;
}
}
(*maxc)--;
}
static const VMStateDescription vmstate_isa_fdc ={
.name = "fdc",
.version_id = 2,

View File

@ -37,8 +37,8 @@
#include "hw/acpi/bios-linker-loader.h"
#include "hw/loader.h"
#include "hw/isa/isa.h"
#include "hw/block/fdc.h"
#include "hw/acpi/memory_hotplug.h"
#include "hw/mem/nvdimm.h"
#include "sysemu/tpm.h"
#include "hw/acpi/tpm.h"
#include "sysemu/tpm_backend.h"
@ -76,10 +76,6 @@
#define ACPI_BUILD_DPRINTF(fmt, ...)
#endif
typedef struct AcpiCpuInfo {
DECLARE_BITMAP(found_cpus, ACPI_CPU_HOTPLUG_ID_LIMIT);
} AcpiCpuInfo;
typedef struct AcpiMcfgInfo {
uint64_t mcfg_base;
uint32_t mcfg_size;
@ -121,31 +117,6 @@ typedef struct AcpiBuildPciBusHotplugState {
bool pcihp_bridge_en;
} AcpiBuildPciBusHotplugState;
static
int acpi_add_cpu_info(Object *o, void *opaque)
{
AcpiCpuInfo *cpu = opaque;
uint64_t apic_id;
if (object_dynamic_cast(o, TYPE_CPU)) {
apic_id = object_property_get_int(o, "apic-id", NULL);
assert(apic_id < ACPI_CPU_HOTPLUG_ID_LIMIT);
set_bit(apic_id, cpu->found_cpus);
}
object_child_foreach(o, acpi_add_cpu_info, opaque);
return 0;
}
static void acpi_get_cpu_info(AcpiCpuInfo *cpu)
{
Object *root = object_get_root();
memset(cpu->found_cpus, 0, sizeof cpu->found_cpus);
object_child_foreach(root, acpi_add_cpu_info, cpu);
}
static void acpi_get_pm_info(AcpiPmInfo *pm)
{
Object *piix = piix4_pm_find();
@ -362,9 +333,10 @@ build_fadt(GArray *table_data, GArray *linker, AcpiPmInfo *pm,
}
static void
build_madt(GArray *table_data, GArray *linker, AcpiCpuInfo *cpu)
build_madt(GArray *table_data, GArray *linker, PCMachineState *pcms)
{
PCMachineState *pcms = PC_MACHINE(qdev_get_machine());
MachineClass *mc = MACHINE_GET_CLASS(pcms);
CPUArchIdList *apic_ids = mc->possible_cpu_arch_ids(MACHINE(pcms));
int madt_start = table_data->len;
AcpiMultipleApicTable *madt;
@ -377,18 +349,28 @@ build_madt(GArray *table_data, GArray *linker, AcpiCpuInfo *cpu)
madt->local_apic_address = cpu_to_le32(APIC_DEFAULT_ADDRESS);
madt->flags = cpu_to_le32(1);
for (i = 0; i < pcms->apic_id_limit; i++) {
for (i = 0; i < apic_ids->len; i++) {
AcpiMadtProcessorApic *apic = acpi_data_push(table_data, sizeof *apic);
int apic_id = apic_ids->cpus[i].arch_id;
apic->type = ACPI_APIC_PROCESSOR;
apic->length = sizeof(*apic);
apic->processor_id = i;
apic->local_apic_id = i;
if (test_bit(i, cpu->found_cpus)) {
apic->processor_id = apic_id;
apic->local_apic_id = apic_id;
if (apic_ids->cpus[i].cpu != NULL) {
apic->flags = cpu_to_le32(1);
} else {
/* ACPI spec says that LAPIC entry for non present
* CPU may be omitted from MADT or it must be marked
* as disabled. However omitting non present CPU from
* MADT breaks hotplug on linux. So possible CPUs
* should be put in MADT but kept disabled.
*/
apic->flags = cpu_to_le32(0);
}
}
g_free(apic_ids);
io_apic = acpi_data_push(table_data, sizeof *io_apic);
io_apic->type = ACPI_APIC_IO;
io_apic->length = sizeof(*io_apic);
@ -960,21 +942,24 @@ static Aml *build_crs(PCIHostState *host,
return crs;
}
static void build_processor_devices(Aml *sb_scope, unsigned acpi_cpus,
AcpiCpuInfo *cpu, AcpiPmInfo *pm)
static void build_processor_devices(Aml *sb_scope, MachineState *machine,
AcpiPmInfo *pm)
{
int i;
int i, apic_idx;
Aml *dev;
Aml *crs;
Aml *pkg;
Aml *field;
Aml *ifctx;
Aml *method;
MachineClass *mc = MACHINE_GET_CLASS(machine);
CPUArchIdList *apic_ids = mc->possible_cpu_arch_ids(machine);
PCMachineState *pcms = PC_MACHINE(machine);
/* The current AML generator can cover the APIC ID range [0..255],
* inclusive, for VCPU hotplug. */
QEMU_BUILD_BUG_ON(ACPI_CPU_HOTPLUG_ID_LIMIT > 256);
g_assert(acpi_cpus <= ACPI_CPU_HOTPLUG_ID_LIMIT);
g_assert(pcms->apic_id_limit <= ACPI_CPU_HOTPLUG_ID_LIMIT);
/* create PCI0.PRES device and its _CRS to reserve CPU hotplug MMIO */
dev = aml_device("PCI0." stringify(CPU_HOTPLUG_RESOURCE_DEVICE));
@ -993,28 +978,33 @@ static void build_processor_devices(Aml *sb_scope, unsigned acpi_cpus,
aml_append(sb_scope, dev);
/* declare CPU hotplug MMIO region and PRS field to access it */
aml_append(sb_scope, aml_operation_region(
"PRST", AML_SYSTEM_IO, pm->cpu_hp_io_base, pm->cpu_hp_io_len));
"PRST", AML_SYSTEM_IO, aml_int(pm->cpu_hp_io_base), pm->cpu_hp_io_len));
field = aml_field("PRST", AML_BYTE_ACC, AML_NOLOCK, AML_PRESERVE);
aml_append(field, aml_named_field("PRS", 256));
aml_append(sb_scope, field);
/* build Processor object for each processor */
for (i = 0; i < acpi_cpus; i++) {
dev = aml_processor(i, 0, 0, "CP%.02X", i);
for (i = 0; i < apic_ids->len; i++) {
int apic_id = apic_ids->cpus[i].arch_id;
assert(apic_id < ACPI_CPU_HOTPLUG_ID_LIMIT);
dev = aml_processor(apic_id, 0, 0, "CP%.02X", apic_id);
method = aml_method("_MAT", 0, AML_NOTSERIALIZED);
aml_append(method,
aml_return(aml_call1(CPU_MAT_METHOD, aml_int(i))));
aml_return(aml_call1(CPU_MAT_METHOD, aml_int(apic_id))));
aml_append(dev, method);
method = aml_method("_STA", 0, AML_NOTSERIALIZED);
aml_append(method,
aml_return(aml_call1(CPU_STATUS_METHOD, aml_int(i))));
aml_return(aml_call1(CPU_STATUS_METHOD, aml_int(apic_id))));
aml_append(dev, method);
method = aml_method("_EJ0", 1, AML_NOTSERIALIZED);
aml_append(method,
aml_return(aml_call2(CPU_EJECT_METHOD, aml_int(i), aml_arg(0)))
aml_return(aml_call2(CPU_EJECT_METHOD, aml_int(apic_id),
aml_arg(0)))
);
aml_append(dev, method);
@ -1026,10 +1016,12 @@ static void build_processor_devices(Aml *sb_scope, unsigned acpi_cpus,
*/
/* Arg0 = Processor ID = APIC ID */
method = aml_method(AML_NOTIFY_METHOD, 2, AML_NOTSERIALIZED);
for (i = 0; i < acpi_cpus; i++) {
ifctx = aml_if(aml_equal(aml_arg(0), aml_int(i)));
for (i = 0; i < apic_ids->len; i++) {
int apic_id = apic_ids->cpus[i].arch_id;
ifctx = aml_if(aml_equal(aml_arg(0), aml_int(apic_id)));
aml_append(ifctx,
aml_notify(aml_name("CP%.02X", i), aml_arg(1))
aml_notify(aml_name("CP%.02X", apic_id), aml_arg(1))
);
aml_append(method, ifctx);
}
@ -1042,14 +1034,20 @@ static void build_processor_devices(Aml *sb_scope, unsigned acpi_cpus,
* ith up to 255 elements. Windows guests up to win2k8 fail when
* VarPackageOp is used.
*/
pkg = acpi_cpus <= 255 ? aml_package(acpi_cpus) :
aml_varpackage(acpi_cpus);
pkg = pcms->apic_id_limit <= 255 ? aml_package(pcms->apic_id_limit) :
aml_varpackage(pcms->apic_id_limit);
for (i = 0; i < acpi_cpus; i++) {
uint8_t b = test_bit(i, cpu->found_cpus) ? 0x01 : 0x00;
aml_append(pkg, aml_int(b));
for (i = 0, apic_idx = 0; i < apic_ids->len; i++) {
int apic_id = apic_ids->cpus[i].arch_id;
for (; apic_idx < apic_id; apic_idx++) {
aml_append(pkg, aml_int(0));
}
aml_append(pkg, aml_int(apic_ids->cpus[i].cpu ? 1 : 0));
apic_idx = apic_id + 1;
}
aml_append(sb_scope, aml_name_decl(CPU_ON_BITMAP, pkg));
g_free(apic_ids);
}
static void build_memory_devices(Aml *sb_scope, int nr_mem,
@ -1078,7 +1076,7 @@ static void build_memory_devices(Aml *sb_scope, int nr_mem,
aml_append(scope, aml_operation_region(
MEMORY_HOTPLUG_IO_REGION, AML_SYSTEM_IO,
io_base, io_len)
aml_int(io_base), io_len)
);
field = aml_field(MEMORY_HOTPLUG_IO_REGION, AML_DWORD_ACC,
@ -1192,7 +1190,8 @@ static void build_hpet_aml(Aml *table)
aml_append(dev, aml_name_decl("_UID", zero));
aml_append(dev,
aml_operation_region("HPTM", AML_SYSTEM_MEMORY, HPET_BASE, HPET_LEN));
aml_operation_region("HPTM", AML_SYSTEM_MEMORY, aml_int(HPET_BASE),
HPET_LEN));
field = aml_field("HPTM", AML_DWORD_ACC, AML_LOCK, AML_PRESERVE);
aml_append(field, aml_named_field("VEND", 32));
aml_append(field, aml_named_field("PRD", 32));
@ -1227,33 +1226,63 @@ static void build_hpet_aml(Aml *table)
aml_append(table, scope);
}
static Aml *build_fdc_device_aml(void)
static Aml *build_fdinfo_aml(int idx, FloppyDriveType type)
{
Aml *dev, *fdi;
uint8_t maxc, maxh, maxs;
isa_fdc_get_drive_max_chs(type, &maxc, &maxh, &maxs);
dev = aml_device("FLP%c", 'A' + idx);
aml_append(dev, aml_name_decl("_ADR", aml_int(idx)));
fdi = aml_package(16);
aml_append(fdi, aml_int(idx)); /* Drive Number */
aml_append(fdi,
aml_int(cmos_get_fd_drive_type(type))); /* Device Type */
/*
* the values below are the limits of the drive, and are thus independent
* of the inserted media
*/
aml_append(fdi, aml_int(maxc)); /* Maximum Cylinder Number */
aml_append(fdi, aml_int(maxs)); /* Maximum Sector Number */
aml_append(fdi, aml_int(maxh)); /* Maximum Head Number */
/*
* SeaBIOS returns the below values for int 0x13 func 0x08 regardless of
* the drive type, so shall we
*/
aml_append(fdi, aml_int(0xAF)); /* disk_specify_1 */
aml_append(fdi, aml_int(0x02)); /* disk_specify_2 */
aml_append(fdi, aml_int(0x25)); /* disk_motor_wait */
aml_append(fdi, aml_int(0x02)); /* disk_sector_siz */
aml_append(fdi, aml_int(0x12)); /* disk_eot */
aml_append(fdi, aml_int(0x1B)); /* disk_rw_gap */
aml_append(fdi, aml_int(0xFF)); /* disk_dtl */
aml_append(fdi, aml_int(0x6C)); /* disk_formt_gap */
aml_append(fdi, aml_int(0xF6)); /* disk_fill */
aml_append(fdi, aml_int(0x0F)); /* disk_head_sttl */
aml_append(fdi, aml_int(0x08)); /* disk_motor_strt */
aml_append(dev, aml_name_decl("_FDI", fdi));
return dev;
}
static Aml *build_fdc_device_aml(ISADevice *fdc)
{
int i;
Aml *dev;
Aml *crs;
Aml *method;
Aml *if_ctx;
Aml *else_ctx;
Aml *zero = aml_int(0);
Aml *is_present = aml_local(0);
#define ACPI_FDE_MAX_FD 4
uint32_t fde_buf[5] = {
0, 0, 0, 0, /* presence of floppy drives #0 - #3 */
cpu_to_le32(2) /* tape presence (2 == never present) */
};
dev = aml_device("FDC0");
aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0700")));
method = aml_method("_STA", 0, AML_NOTSERIALIZED);
aml_append(method, aml_store(aml_name("FDEN"), is_present));
if_ctx = aml_if(aml_equal(is_present, zero));
{
aml_append(if_ctx, aml_return(aml_int(0x00)));
}
aml_append(method, if_ctx);
else_ctx = aml_else();
{
aml_append(else_ctx, aml_return(aml_int(0x0f)));
}
aml_append(method, else_ctx);
aml_append(dev, method);
crs = aml_resource_template();
aml_append(crs, aml_io(AML_DECODE16, 0x03F2, 0x03F2, 0x00, 0x04));
aml_append(crs, aml_io(AML_DECODE16, 0x03F7, 0x03F7, 0x00, 0x01));
@ -1262,6 +1291,17 @@ static Aml *build_fdc_device_aml(void)
aml_dma(AML_COMPATIBILITY, AML_NOTBUSMASTER, AML_TRANSFER8, 2));
aml_append(dev, aml_name_decl("_CRS", crs));
for (i = 0; i < MIN(MAX_FD, ACPI_FDE_MAX_FD); i++) {
FloppyDriveType type = isa_fdc_get_drive_type(fdc, i);
if (type < FLOPPY_DRIVE_TYPE_NONE) {
fde_buf[i] = cpu_to_le32(1); /* drive present */
aml_append(dev, build_fdinfo_aml(i, type));
}
}
aml_append(dev, aml_name_decl("_FDE",
aml_buffer(sizeof(fde_buf), (uint8_t *)fde_buf)));
return dev;
}
@ -1406,12 +1446,16 @@ static Aml *build_com_device_aml(uint8_t uid)
static void build_isa_devices_aml(Aml *table)
{
ISADevice *fdc = pc_find_fdc0();
Aml *scope = aml_scope("_SB.PCI0.ISA");
aml_append(scope, build_rtc_device_aml());
aml_append(scope, build_kbd_device_aml());
aml_append(scope, build_mouse_device_aml());
aml_append(scope, build_fdc_device_aml());
if (fdc) {
aml_append(scope, build_fdc_device_aml(fdc));
}
aml_append(scope, build_lpt_device_aml());
aml_append(scope, build_com_device_aml(1));
aml_append(scope, build_com_device_aml(2));
@ -1430,7 +1474,7 @@ static void build_dbg_aml(Aml *table)
Aml *idx = aml_local(2);
aml_append(scope,
aml_operation_region("DBG", AML_SYSTEM_IO, 0x0402, 0x01));
aml_operation_region("DBG", AML_SYSTEM_IO, aml_int(0x0402), 0x01));
field = aml_field("DBG", AML_BYTE_ACC, AML_NOLOCK, AML_PRESERVE);
aml_append(field, aml_named_field("DBGB", 8));
aml_append(scope, field);
@ -1509,6 +1553,12 @@ static Aml *build_gsi_link_dev(const char *name, uint8_t uid, uint8_t gsi)
aml_append(dev, aml_name_decl("_CRS", crs));
/*
* _DIS can be no-op because the interrupt cannot be disabled.
*/
method = aml_method("_DIS", 0, AML_NOTSERIALIZED);
aml_append(dev, method);
method = aml_method("_SRS", 1, AML_NOTSERIALIZED);
aml_append(dev, method);
@ -1742,18 +1792,14 @@ static void build_q35_pci0_int(Aml *table)
aml_append(sb_scope, build_link_dev("LNKG", 6, aml_name("PRQG")));
aml_append(sb_scope, build_link_dev("LNKH", 7, aml_name("PRQH")));
/*
* TODO: UID probably shouldn't be the same for GSIx devices
* but that's how it was in original ASL so keep it for now
*/
aml_append(sb_scope, build_gsi_link_dev("GSIA", 0, 0x10));
aml_append(sb_scope, build_gsi_link_dev("GSIB", 0, 0x11));
aml_append(sb_scope, build_gsi_link_dev("GSIC", 0, 0x12));
aml_append(sb_scope, build_gsi_link_dev("GSID", 0, 0x13));
aml_append(sb_scope, build_gsi_link_dev("GSIE", 0, 0x14));
aml_append(sb_scope, build_gsi_link_dev("GSIF", 0, 0x15));
aml_append(sb_scope, build_gsi_link_dev("GSIG", 0, 0x16));
aml_append(sb_scope, build_gsi_link_dev("GSIH", 0, 0x17));
aml_append(sb_scope, build_gsi_link_dev("GSIA", 0x10, 0x10));
aml_append(sb_scope, build_gsi_link_dev("GSIB", 0x11, 0x11));
aml_append(sb_scope, build_gsi_link_dev("GSIC", 0x12, 0x12));
aml_append(sb_scope, build_gsi_link_dev("GSID", 0x13, 0x13));
aml_append(sb_scope, build_gsi_link_dev("GSIE", 0x14, 0x14));
aml_append(sb_scope, build_gsi_link_dev("GSIF", 0x15, 0x15));
aml_append(sb_scope, build_gsi_link_dev("GSIG", 0x16, 0x16));
aml_append(sb_scope, build_gsi_link_dev("GSIH", 0x17, 0x17));
aml_append(table, sb_scope);
}
@ -1770,28 +1816,25 @@ static void build_q35_isa_bridge(Aml *table)
/* ICH9 PCI to ISA irq remapping */
aml_append(dev, aml_operation_region("PIRQ", AML_PCI_CONFIG,
0x60, 0x0C));
aml_int(0x60), 0x0C));
aml_append(dev, aml_operation_region("LPCD", AML_PCI_CONFIG,
0x80, 0x02));
aml_int(0x80), 0x02));
field = aml_field("LPCD", AML_ANY_ACC, AML_NOLOCK, AML_PRESERVE);
aml_append(field, aml_named_field("COMA", 3));
aml_append(field, aml_reserved_field(1));
aml_append(field, aml_named_field("COMB", 3));
aml_append(field, aml_reserved_field(1));
aml_append(field, aml_named_field("LPTD", 2));
aml_append(field, aml_reserved_field(2));
aml_append(field, aml_named_field("FDCD", 2));
aml_append(dev, field);
aml_append(dev, aml_operation_region("LPCE", AML_PCI_CONFIG,
0x82, 0x02));
aml_int(0x82), 0x02));
/* enable bits */
field = aml_field("LPCE", AML_ANY_ACC, AML_NOLOCK, AML_PRESERVE);
aml_append(field, aml_named_field("CAEN", 1));
aml_append(field, aml_named_field("CBEN", 1));
aml_append(field, aml_named_field("LPEN", 1));
aml_append(field, aml_named_field("FDEN", 1));
aml_append(dev, field);
aml_append(scope, dev);
@ -1808,7 +1851,7 @@ static void build_piix4_pm(Aml *table)
aml_append(dev, aml_name_decl("_ADR", aml_int(0x00010003)));
aml_append(dev, aml_operation_region("P13C", AML_PCI_CONFIG,
0x00, 0xff));
aml_int(0x00), 0xff));
aml_append(scope, dev);
aml_append(table, scope);
}
@ -1825,7 +1868,7 @@ static void build_piix4_isa_bridge(Aml *table)
/* PIIX PCI to ISA irq remapping */
aml_append(dev, aml_operation_region("P40C", AML_PCI_CONFIG,
0x60, 0x04));
aml_int(0x60), 0x04));
/* enable bits */
field = aml_field("^PX13.P13C", AML_ANY_ACC, AML_NOLOCK, AML_PRESERVE);
/* Offset(0x5f),, 7, */
@ -1839,7 +1882,6 @@ static void build_piix4_isa_bridge(Aml *table)
aml_append(field, aml_reserved_field(3));
aml_append(field, aml_named_field("CBEN", 1));
aml_append(dev, field);
aml_append(dev, aml_name_decl("FDEN", aml_int(1)));
aml_append(scope, dev);
aml_append(table, scope);
@ -1854,20 +1896,20 @@ static void build_piix4_pci_hotplug(Aml *table)
scope = aml_scope("_SB.PCI0");
aml_append(scope,
aml_operation_region("PCST", AML_SYSTEM_IO, 0xae00, 0x08));
aml_operation_region("PCST", AML_SYSTEM_IO, aml_int(0xae00), 0x08));
field = aml_field("PCST", AML_DWORD_ACC, AML_NOLOCK, AML_WRITE_AS_ZEROS);
aml_append(field, aml_named_field("PCIU", 32));
aml_append(field, aml_named_field("PCID", 32));
aml_append(scope, field);
aml_append(scope,
aml_operation_region("SEJ", AML_SYSTEM_IO, 0xae08, 0x04));
aml_operation_region("SEJ", AML_SYSTEM_IO, aml_int(0xae08), 0x04));
field = aml_field("SEJ", AML_DWORD_ACC, AML_NOLOCK, AML_WRITE_AS_ZEROS);
aml_append(field, aml_named_field("B0EJ", 32));
aml_append(scope, field);
aml_append(scope,
aml_operation_region("BNMR", AML_SYSTEM_IO, 0xae10, 0x04));
aml_operation_region("BNMR", AML_SYSTEM_IO, aml_int(0xae10), 0x04));
field = aml_field("BNMR", AML_DWORD_ACC, AML_NOLOCK, AML_WRITE_AS_ZEROS);
aml_append(field, aml_named_field("BNUM", 32));
aml_append(scope, field);
@ -1937,14 +1979,13 @@ static Aml *build_q35_osc_method(void)
static void
build_dsdt(GArray *table_data, GArray *linker,
AcpiCpuInfo *cpu, AcpiPmInfo *pm, AcpiMiscInfo *misc,
PcPciInfo *pci)
AcpiPmInfo *pm, AcpiMiscInfo *misc,
PcPciInfo *pci, MachineState *machine)
{
CrsRangeEntry *entry;
Aml *dsdt, *sb_scope, *scope, *dev, *method, *field, *pkg, *crs;
GPtrArray *mem_ranges = g_ptr_array_new_with_free_func(crs_range_free);
GPtrArray *io_ranges = g_ptr_array_new_with_free_func(crs_range_free);
MachineState *machine = MACHINE(qdev_get_machine());
PCMachineState *pcms = PC_MACHINE(machine);
uint32_t nr_mem = machine->ram_slots;
int root_bus_limit = 0xFF;
@ -1975,9 +2016,9 @@ build_dsdt(GArray *table_data, GArray *linker,
} else {
sb_scope = aml_scope("_SB");
aml_append(sb_scope,
aml_operation_region("PCST", AML_SYSTEM_IO, 0xae00, 0x0c));
aml_operation_region("PCST", AML_SYSTEM_IO, aml_int(0xae00), 0x0c));
aml_append(sb_scope,
aml_operation_region("PCSB", AML_SYSTEM_IO, 0xae0c, 0x01));
aml_operation_region("PCSB", AML_SYSTEM_IO, aml_int(0xae0c), 0x01));
field = aml_field("PCSB", AML_ANY_ACC, AML_NOLOCK, AML_WRITE_AS_ZEROS);
aml_append(field, aml_named_field("PCIB", 8));
aml_append(sb_scope, field);
@ -2252,7 +2293,7 @@ build_dsdt(GArray *table_data, GArray *linker,
aml_append(dev, aml_name_decl("_CRS", crs));
aml_append(dev, aml_operation_region("PEOR", AML_SYSTEM_IO,
misc->pvpanic_port, 1));
aml_int(misc->pvpanic_port), 1));
field = aml_field("PEOR", AML_BYTE_ACC, AML_NOLOCK, AML_PRESERVE);
aml_append(field, aml_named_field("PEPT", 8));
aml_append(dev, field);
@ -2275,7 +2316,7 @@ build_dsdt(GArray *table_data, GArray *linker,
sb_scope = aml_scope("\\_SB");
{
build_processor_devices(sb_scope, pcms->apic_id_limit, cpu, pm);
build_processor_devices(sb_scope, machine, pm);
build_memory_devices(sb_scope, nr_mem, pm->mem_hp_io_base,
pm->mem_hp_io_len);
@ -2396,7 +2437,7 @@ acpi_build_srat_memory(AcpiSratMemoryAffinity *numamem, uint64_t base,
}
static void
build_srat(GArray *table_data, GArray *linker)
build_srat(GArray *table_data, GArray *linker, MachineState *machine)
{
AcpiSystemResourceAffinityTable *srat;
AcpiSratProcessorAffinity *core;
@ -2406,7 +2447,9 @@ build_srat(GArray *table_data, GArray *linker)
uint64_t curnode;
int srat_start, numa_start, slots;
uint64_t mem_len, mem_base, next_base;
PCMachineState *pcms = PC_MACHINE(qdev_get_machine());
MachineClass *mc = MACHINE_GET_CLASS(machine);
CPUArchIdList *apic_ids = mc->possible_cpu_arch_ids(machine);
PCMachineState *pcms = PC_MACHINE(machine);
ram_addr_t hotplugabble_address_space_size =
object_property_get_int(OBJECT(pcms), PC_MACHINE_MEMHP_REGION_SIZE,
NULL);
@ -2415,14 +2458,15 @@ build_srat(GArray *table_data, GArray *linker)
srat = acpi_data_push(table_data, sizeof *srat);
srat->reserved1 = cpu_to_le32(1);
core = (void *)(srat + 1);
for (i = 0; i < pcms->apic_id_limit; ++i) {
for (i = 0; i < apic_ids->len; i++) {
int apic_id = apic_ids->cpus[i].arch_id;
core = acpi_data_push(table_data, sizeof *core);
core->type = ACPI_SRAT_PROCESSOR;
core->length = sizeof(*core);
core->local_apic_id = i;
curnode = pcms->node_cpu[i];
core->local_apic_id = apic_id;
curnode = pcms->node_cpu[apic_id];
core->proximity_lo = curnode;
memset(core->proximity_hi, 0, 3);
core->local_sapic_eid = 0;
@ -2487,6 +2531,7 @@ build_srat(GArray *table_data, GArray *linker)
(void *)(table_data->data + srat_start),
"SRAT",
table_data->len - srat_start, 1, NULL, NULL);
g_free(apic_ids);
}
static void
@ -2610,21 +2655,13 @@ static bool acpi_has_iommu(void)
return intel_iommu && !ambiguous;
}
static bool acpi_has_nvdimm(void)
{
PCMachineState *pcms = PC_MACHINE(qdev_get_machine());
return pcms->nvdimm;
}
static
void acpi_build(AcpiBuildTables *tables)
void acpi_build(AcpiBuildTables *tables, MachineState *machine)
{
PCMachineState *pcms = PC_MACHINE(qdev_get_machine());
PCMachineState *pcms = PC_MACHINE(machine);
PCMachineClass *pcmc = PC_MACHINE_GET_CLASS(pcms);
GArray *table_offsets;
unsigned facs, dsdt, rsdt, fadt;
AcpiCpuInfo cpu;
AcpiPmInfo pm;
AcpiMiscInfo misc;
AcpiMcfgInfo mcfg;
@ -2634,7 +2671,6 @@ void acpi_build(AcpiBuildTables *tables)
GArray *tables_blob = tables->table_data;
AcpiSlicOem slic_oem = { .id = NULL, .table_id = NULL };
acpi_get_cpu_info(&cpu);
acpi_get_pm_info(&pm);
acpi_get_misc_info(&misc);
acpi_get_pci_info(&pci);
@ -2658,7 +2694,7 @@ void acpi_build(AcpiBuildTables *tables)
/* DSDT is pointed to by FADT */
dsdt = tables_blob->len;
build_dsdt(tables_blob, tables->linker, &cpu, &pm, &misc, &pci);
build_dsdt(tables_blob, tables->linker, &pm, &misc, &pci, machine);
/* Count the size of the DSDT and SSDT, we will need it for legacy
* sizing of ACPI tables.
@ -2673,7 +2709,7 @@ void acpi_build(AcpiBuildTables *tables)
aml_len += tables_blob->len - fadt;
acpi_add_table(table_offsets, tables_blob);
build_madt(tables_blob, tables->linker, &cpu);
build_madt(tables_blob, tables->linker, pcms);
if (misc.has_hpet) {
acpi_add_table(table_offsets, tables_blob);
@ -2690,7 +2726,7 @@ void acpi_build(AcpiBuildTables *tables)
}
if (pcms->numa_nodes) {
acpi_add_table(table_offsets, tables_blob);
build_srat(tables_blob, tables->linker);
build_srat(tables_blob, tables->linker, machine);
}
if (acpi_get_mcfg(&mcfg)) {
acpi_add_table(table_offsets, tables_blob);
@ -2700,8 +2736,7 @@ void acpi_build(AcpiBuildTables *tables)
acpi_add_table(table_offsets, tables_blob);
build_dmar_q35(tables_blob, tables->linker);
}
if (acpi_has_nvdimm()) {
if (pcms->acpi_nvdimm_state.is_enabled) {
nvdimm_build_acpi(table_offsets, tables_blob, tables->linker);
}
@ -2795,7 +2830,7 @@ static void acpi_build_update(void *build_opaque)
acpi_build_tables_init(&tables);
acpi_build(&tables);
acpi_build(&tables, MACHINE(qdev_get_machine()));
acpi_ram_update(build_state->table_mr, tables.table_data);
@ -2860,7 +2895,7 @@ void acpi_setup(void)
acpi_set_pci_info();
acpi_build_tables_init(&tables);
acpi_build(&tables);
acpi_build(&tables, MACHINE(pcms));
/* Now expose it all to Guest */
build_state->table_mr = acpi_add_rom_blob(build_state, tables.table_data,

View File

@ -186,7 +186,7 @@ static void kvm_apic_realize(DeviceState *dev, Error **errp)
APIC_SPACE_SIZE);
if (kvm_has_gsi_routing()) {
msi_supported = true;
msi_nonbroken = true;
}
}

View File

@ -199,7 +199,7 @@ static void pic_irq_request(void *opaque, int irq, int level)
#define REG_EQUIPMENT_BYTE 0x14
static int cmos_get_fd_drive_type(FloppyDriveType fd0)
int cmos_get_fd_drive_type(FloppyDriveType fd0)
{
int val;
@ -699,18 +699,6 @@ static uint32_t x86_cpu_apic_id_from_index(unsigned int cpu_index)
}
}
/* Calculates the limit to CPU APIC ID values
*
* This function returns the limit for the APIC ID value, so that all
* CPU APIC IDs are < pc_apic_id_limit().
*
* This is used for FW_CFG_MAX_CPUS. See comments on bochs_bios_init().
*/
static unsigned int pc_apic_id_limit(unsigned int max_cpus)
{
return x86_cpu_apic_id_from_index(max_cpus - 1) + 1;
}
static void pc_build_smbios(FWCfgState *fw_cfg)
{
uint8_t *smbios_tables, *smbios_anchor;
@ -748,12 +736,11 @@ static void pc_build_smbios(FWCfgState *fw_cfg)
}
}
static FWCfgState *bochs_bios_init(AddressSpace *as)
static FWCfgState *bochs_bios_init(AddressSpace *as, PCMachineState *pcms)
{
FWCfgState *fw_cfg;
uint64_t *numa_fw_cfg;
int i, j;
unsigned int apic_id_limit = pc_apic_id_limit(max_cpus);
fw_cfg = fw_cfg_init_io_dma(FW_CFG_IO_BASE, FW_CFG_IO_BASE + 4, as);
@ -771,7 +758,7 @@ static FWCfgState *bochs_bios_init(AddressSpace *as)
* [1] The only kind of "CPU identifier" used between SeaBIOS and QEMU is
* the APIC ID, not the "CPU index"
*/
fw_cfg_add_i16(fw_cfg, FW_CFG_MAX_CPUS, (uint16_t)apic_id_limit);
fw_cfg_add_i16(fw_cfg, FW_CFG_MAX_CPUS, (uint16_t)pcms->apic_id_limit);
fw_cfg_add_i64(fw_cfg, FW_CFG_RAM_SIZE, (uint64_t)ram_size);
fw_cfg_add_bytes(fw_cfg, FW_CFG_ACPI_TABLES,
acpi_tables, acpi_tables_len);
@ -789,11 +776,11 @@ static FWCfgState *bochs_bios_init(AddressSpace *as)
* of nodes, one word for each VCPU->node and one word for each node to
* hold the amount of memory.
*/
numa_fw_cfg = g_new0(uint64_t, 1 + apic_id_limit + nb_numa_nodes);
numa_fw_cfg = g_new0(uint64_t, 1 + pcms->apic_id_limit + nb_numa_nodes);
numa_fw_cfg[0] = cpu_to_le64(nb_numa_nodes);
for (i = 0; i < max_cpus; i++) {
unsigned int apic_id = x86_cpu_apic_id_from_index(i);
assert(apic_id < apic_id_limit);
assert(apic_id < pcms->apic_id_limit);
for (j = 0; j < nb_numa_nodes; j++) {
if (test_bit(i, numa_info[j].node_cpu)) {
numa_fw_cfg[apic_id + 1] = cpu_to_le64(j);
@ -802,10 +789,11 @@ static FWCfgState *bochs_bios_init(AddressSpace *as)
}
}
for (i = 0; i < nb_numa_nodes; i++) {
numa_fw_cfg[apic_id_limit + 1 + i] = cpu_to_le64(numa_info[i].node_mem);
numa_fw_cfg[pcms->apic_id_limit + 1 + i] =
cpu_to_le64(numa_info[i].node_mem);
}
fw_cfg_add_bytes(fw_cfg, FW_CFG_NUMA, numa_fw_cfg,
(1 + apic_id_limit + nb_numa_nodes) *
(1 + pcms->apic_id_limit + nb_numa_nodes) *
sizeof(*numa_fw_cfg));
return fw_cfg;
@ -1119,7 +1107,6 @@ void pc_cpus_init(PCMachineState *pcms)
int i;
X86CPU *cpu = NULL;
MachineState *machine = MACHINE(pcms);
unsigned long apic_id_limit;
/* init CPUs */
if (machine->cpu_model == NULL) {
@ -1130,18 +1117,32 @@ void pc_cpus_init(PCMachineState *pcms)
#endif
}
apic_id_limit = pc_apic_id_limit(max_cpus);
if (apic_id_limit > ACPI_CPU_HOTPLUG_ID_LIMIT) {
error_report("max_cpus is too large. APIC ID of last CPU is %lu",
apic_id_limit - 1);
/* Calculates the limit to CPU APIC ID values
*
* Limit for the APIC ID value, so that all
* CPU APIC IDs are < pcms->apic_id_limit.
*
* This is used for FW_CFG_MAX_CPUS. See comments on bochs_bios_init().
*/
pcms->apic_id_limit = x86_cpu_apic_id_from_index(max_cpus - 1) + 1;
if (pcms->apic_id_limit > ACPI_CPU_HOTPLUG_ID_LIMIT) {
error_report("max_cpus is too large. APIC ID of last CPU is %u",
pcms->apic_id_limit - 1);
exit(1);
}
for (i = 0; i < smp_cpus; i++) {
pcms->possible_cpus = g_malloc0(sizeof(CPUArchIdList) +
sizeof(CPUArchId) * max_cpus);
for (i = 0; i < max_cpus; i++) {
pcms->possible_cpus->cpus[i].arch_id = x86_cpu_apic_id_from_index(i);
pcms->possible_cpus->len++;
if (i < smp_cpus) {
cpu = pc_new_cpu(machine->cpu_model, x86_cpu_apic_id_from_index(i),
&error_fatal);
pcms->possible_cpus->cpus[i].cpu = CPU(cpu);
object_unref(OBJECT(cpu));
}
}
/* tell smbios about cpuid version and features */
smbios_set_cpuid(cpu->env.cpuid_version, cpu->env.features[FEAT_1_EDX]);
@ -1186,7 +1187,6 @@ void pc_guest_info_init(PCMachineState *pcms)
{
int i, j;
pcms->apic_id_limit = pc_apic_id_limit(max_cpus);
pcms->apic_xrupt_override = kvm_allows_irq0_override();
pcms->numa_nodes = nb_numa_nodes;
pcms->node_mem = g_malloc0(pcms->numa_nodes *
@ -1371,7 +1371,7 @@ void pc_memory_init(PCMachineState *pcms,
option_rom_mr,
1);
fw_cfg = bochs_bios_init(&address_space_memory);
fw_cfg = bochs_bios_init(&address_space_memory, pcms);
rom_set_fw(fw_cfg);
@ -1664,9 +1664,19 @@ static void pc_dimm_unplug(HotplugHandler *hotplug_dev,
error_propagate(errp, local_err);
}
static int pc_apic_cmp(const void *a, const void *b)
{
CPUArchId *apic_a = (CPUArchId *)a;
CPUArchId *apic_b = (CPUArchId *)b;
return apic_a->arch_id - apic_b->arch_id;
}
static void pc_cpu_plug(HotplugHandler *hotplug_dev,
DeviceState *dev, Error **errp)
{
CPUClass *cc = CPU_GET_CLASS(dev);
CPUArchId apic_id, *found_cpu;
HotplugHandlerClass *hhc;
Error *local_err = NULL;
PCMachineState *pcms = PC_MACHINE(hotplug_dev);
@ -1689,6 +1699,13 @@ static void pc_cpu_plug(HotplugHandler *hotplug_dev,
/* increment the number of CPUs */
rtc_set_memory(pcms->rtc, 0x5f, rtc_get_memory(pcms->rtc, 0x5f) + 1);
apic_id.arch_id = cc->get_arch_id(CPU(dev));
found_cpu = bsearch(&apic_id, pcms->possible_cpus->cpus,
pcms->possible_cpus->len, sizeof(*pcms->possible_cpus->cpus),
pc_apic_cmp);
assert(found_cpu);
found_cpu->cpu = CPU(dev);
out:
error_propagate(errp, local_err);
}
@ -1853,14 +1870,14 @@ static bool pc_machine_get_nvdimm(Object *obj, Error **errp)
{
PCMachineState *pcms = PC_MACHINE(obj);
return pcms->nvdimm;
return pcms->acpi_nvdimm_state.is_enabled;
}
static void pc_machine_set_nvdimm(Object *obj, bool value, Error **errp)
{
PCMachineState *pcms = PC_MACHINE(obj);
pcms->nvdimm = value;
pcms->acpi_nvdimm_state.is_enabled = value;
}
static void pc_machine_initfn(Object *obj)
@ -1899,7 +1916,7 @@ static void pc_machine_initfn(Object *obj)
&error_abort);
/* nvdimm is disabled on default. */
pcms->nvdimm = false;
pcms->acpi_nvdimm_state.is_enabled = false;
object_property_add_bool(obj, PC_MACHINE_NVDIMM, pc_machine_get_nvdimm,
pc_machine_set_nvdimm, &error_abort);
}
@ -1931,6 +1948,17 @@ static unsigned pc_cpu_index_to_socket_id(unsigned cpu_index)
return topo.pkg_id;
}
static CPUArchIdList *pc_possible_cpu_arch_ids(MachineState *machine)
{
PCMachineState *pcms = PC_MACHINE(machine);
int len = sizeof(CPUArchIdList) +
sizeof(CPUArchId) * (pcms->possible_cpus->len);
CPUArchIdList *list = g_malloc(len);
memcpy(list, pcms->possible_cpus, len);
return list;
}
static void pc_machine_class_init(ObjectClass *oc, void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
@ -1953,6 +1981,7 @@ static void pc_machine_class_init(ObjectClass *oc, void *data)
pcmc->save_tsc_khz = true;
mc->get_hotplug_handler = pc_get_hotpug_handler;
mc->cpu_index_to_socket_id = pc_cpu_index_to_socket_id;
mc->possible_cpu_arch_ids = pc_possible_cpu_arch_ids;
mc->default_boot_order = "cad";
mc->hot_add_cpu = pc_hot_add_cpu;
mc->max_cpus = 255;

View File

@ -274,6 +274,11 @@ static void pc_init1(MachineState *machine,
if (pcmc->pci_enabled) {
pc_pci_device_init(pci_bus);
}
if (pcms->acpi_nvdimm_state.is_enabled) {
nvdimm_init_acpi_state(&pcms->acpi_nvdimm_state, system_io,
pcms->fw_cfg, OBJECT(pcms));
}
}
/* Looking for a pc_compat_2_4() function? It doesn't exist.

View File

@ -61,6 +61,7 @@ static void pc_q35_init(MachineState *machine)
PCIDevice *lpc;
BusState *idebus[MAX_SATA_PORTS];
ISADevice *rtc_state;
MemoryRegion *system_io = get_system_io();
MemoryRegion *pci_memory;
MemoryRegion *rom_memory;
MemoryRegion *ram_memory;
@ -160,7 +161,7 @@ static void pc_q35_init(MachineState *machine)
q35_host->mch.ram_memory = ram_memory;
q35_host->mch.pci_address_space = pci_memory;
q35_host->mch.system_memory = get_system_memory();
q35_host->mch.address_space_io = get_system_io();
q35_host->mch.address_space_io = system_io;
q35_host->mch.below_4g_mem_size = pcms->below_4g_mem_size;
q35_host->mch.above_4g_mem_size = pcms->above_4g_mem_size;
/* pci */
@ -251,6 +252,11 @@ static void pc_q35_init(MachineState *machine)
if (pcmc->pci_enabled) {
pc_pci_device_init(host_bus);
}
if (pcms->acpi_nvdimm_state.is_enabled) {
nvdimm_init_acpi_state(&pcms->acpi_nvdimm_state, system_io,
pcms->fw_cfg, OBJECT(pcms));
}
}
#define DEFINE_Q35_MACHINE(suffix, name, compatfn, optionfn) \

View File

@ -44,7 +44,7 @@ static void xen_apic_realize(DeviceState *dev, Error **errp)
s->vapic_control = 0;
memory_region_init_io(&s->io_memory, OBJECT(s), &xen_apic_io_ops, s,
"xen-apic-msi", APIC_SPACE_SIZE);
msi_supported = true;
msi_nonbroken = true;
}
static void xen_apic_set_base(APICCommonState *s, uint64_t val)

View File

@ -874,7 +874,7 @@ static void apic_realize(DeviceState *dev, Error **errp)
s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, apic_timer, s);
local_apics[s->idx] = s;
msi_supported = true;
msi_nonbroken = true;
}
static void apic_class_init(ObjectClass *klass, void *data)

View File

@ -148,7 +148,7 @@ static void gicv2m_realize(DeviceState *dev, Error **errp)
sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->spi[i]);
}
msi_supported = true;
msi_nonbroken = true;
kvm_gsi_direct_mapping = true;
kvm_msi_via_irqfd_allowed = kvm_irqfds_enabled();
}

View File

@ -1375,7 +1375,7 @@ static void fsl_common_init(OpenPICState *opp)
opp->irq_msi = 224;
msi_supported = true;
msi_nonbroken = true;
for (i = 0; i < opp->fsl->max_ext; i++) {
opp->src[i].level = false;
}

View File

@ -239,7 +239,7 @@ static void kvm_openpic_realize(DeviceState *dev, Error **errp)
memory_listener_register(&opp->mem_listener, &address_space_memory);
/* indicate pic capabilities */
msi_supported = true;
msi_nonbroken = true;
kvm_kernel_irqchip = true;
kvm_async_interrupts_allowed = true;

File diff suppressed because it is too large Load Diff

View File

@ -409,18 +409,18 @@ ich9_lpc_pmbase_update(ICH9LPCState *lpc)
ich9_pm_iospace_update(&lpc->pm, pm_io_base);
}
/* config:RBCA */
static void ich9_lpc_rcba_update(ICH9LPCState *lpc, uint32_t rbca_old)
/* config:RCBA */
static void ich9_lpc_rcba_update(ICH9LPCState *lpc, uint32_t rcba_old)
{
uint32_t rbca = pci_get_long(lpc->d.config + ICH9_LPC_RCBA);
uint32_t rcba = pci_get_long(lpc->d.config + ICH9_LPC_RCBA);
if (rbca_old & ICH9_LPC_RCBA_EN) {
memory_region_del_subregion(get_system_memory(), &lpc->rbca_mem);
if (rcba_old & ICH9_LPC_RCBA_EN) {
memory_region_del_subregion(get_system_memory(), &lpc->rcrb_mem);
}
if (rbca & ICH9_LPC_RCBA_EN) {
if (rcba & ICH9_LPC_RCBA_EN) {
memory_region_add_subregion_overlap(get_system_memory(),
rbca & ICH9_LPC_RCBA_BA_MASK,
&lpc->rbca_mem, 1);
rcba & ICH9_LPC_RCBA_BA_MASK,
&lpc->rcrb_mem, 1);
}
}
@ -444,7 +444,7 @@ static int ich9_lpc_post_load(void *opaque, int version_id)
ICH9LPCState *lpc = opaque;
ich9_lpc_pmbase_update(lpc);
ich9_lpc_rcba_update(lpc, 0 /* disabled ICH9_LPC_RBCA_EN */);
ich9_lpc_rcba_update(lpc, 0 /* disabled ICH9_LPC_RCBA_EN */);
ich9_lpc_pmcon_update(lpc);
return 0;
}
@ -453,14 +453,14 @@ static void ich9_lpc_config_write(PCIDevice *d,
uint32_t addr, uint32_t val, int len)
{
ICH9LPCState *lpc = ICH9_LPC_DEVICE(d);
uint32_t rbca_old = pci_get_long(d->config + ICH9_LPC_RCBA);
uint32_t rcba_old = pci_get_long(d->config + ICH9_LPC_RCBA);
pci_default_write_config(d, addr, val, len);
if (ranges_overlap(addr, len, ICH9_LPC_PMBASE, 4)) {
ich9_lpc_pmbase_update(lpc);
}
if (ranges_overlap(addr, len, ICH9_LPC_RCBA, 4)) {
ich9_lpc_rcba_update(lpc, rbca_old);
ich9_lpc_rcba_update(lpc, rcba_old);
}
if (ranges_overlap(addr, len, ICH9_LPC_PIRQA_ROUT, 4)) {
pci_bus_fire_intx_routing_notifier(lpc->d.bus);
@ -477,7 +477,7 @@ static void ich9_lpc_reset(DeviceState *qdev)
{
PCIDevice *d = PCI_DEVICE(qdev);
ICH9LPCState *lpc = ICH9_LPC_DEVICE(d);
uint32_t rbca_old = pci_get_long(d->config + ICH9_LPC_RCBA);
uint32_t rcba_old = pci_get_long(d->config + ICH9_LPC_RCBA);
int i;
for (i = 0; i < 4; i++) {
@ -496,13 +496,14 @@ static void ich9_lpc_reset(DeviceState *qdev)
ich9_cc_reset(lpc);
ich9_lpc_pmbase_update(lpc);
ich9_lpc_rcba_update(lpc, rbca_old);
ich9_lpc_rcba_update(lpc, rcba_old);
lpc->sci_level = 0;
lpc->rst_cnt = 0;
}
static const MemoryRegionOps rbca_mmio_ops = {
/* root complex register block is mapped into memory space */
static const MemoryRegionOps rcrb_mmio_ops = {
.read = ich9_cc_read,
.write = ich9_cc_write,
.endianness = DEVICE_LITTLE_ENDIAN,
@ -616,8 +617,8 @@ static void ich9_lpc_realize(PCIDevice *d, Error **errp)
pci_set_long(d->wmask + ICH9_LPC_PMBASE,
ICH9_LPC_PMBASE_BASE_ADDRESS_MASK);
memory_region_init_io(&lpc->rbca_mem, OBJECT(d), &rbca_mmio_ops, lpc,
"lpc-rbca-mmio", ICH9_CC_SIZE);
memory_region_init_io(&lpc->rcrb_mem, OBJECT(d), &rcrb_mmio_ops, lpc,
"lpc-rcrb-mmio", ICH9_CC_SIZE);
lpc->isa_bus = isa_bus;

View File

@ -364,15 +364,22 @@ static void pc_dimm_check_memdev_is_busy(Object *obj, const char *name,
Object *val, Error **errp)
{
MemoryRegion *mr;
Error *local_err = NULL;
mr = host_memory_backend_get_memory(MEMORY_BACKEND(val), errp);
mr = host_memory_backend_get_memory(MEMORY_BACKEND(val), &local_err);
if (local_err) {
goto out;
}
if (memory_region_is_mapped(mr)) {
char *path = object_get_canonical_path_component(val);
error_setg(errp, "can't use already busy memdev: %s", path);
error_setg(&local_err, "can't use already busy memdev: %s", path);
g_free(path);
} else {
qdev_prop_allow_set_link_before_realize(obj, name, val, errp);
qdev_prop_allow_set_link_before_realize(obj, name, val, &local_err);
}
out:
error_propagate(errp, local_err);
}
static void pc_dimm_init(Object *obj)

View File

@ -72,7 +72,7 @@ static int pci_bridge_dev_initfn(PCIDevice *dev)
goto slotid_error;
}
if ((bridge_dev->flags & (1 << PCI_BRIDGE_DEV_F_MSI_REQ)) &&
msi_supported) {
msi_nonbroken) {
err = msi_init(dev, 0, 1, true, true);
if (err < 0) {
goto msi_error;

View File

@ -283,7 +283,7 @@ static void pxb_dev_exitfn(PCIDevice *pci_dev)
}
static Property pxb_dev_properties[] = {
/* Note: 0 is not a legal a PXB bus number. */
/* Note: 0 is not a legal PXB bus number. */
DEFINE_PROP_UINT8("bus_nr", PXBDev, bus_nr, 0),
DEFINE_PROP_UINT16("numa_node", PXBDev, numa_node, NUMA_NODE_UNASSIGNED),
DEFINE_PROP_END_OF_LIST(),

View File

@ -34,8 +34,21 @@
#define PCI_MSI_VECTORS_MAX 32
/* Flag for interrupt controller to declare MSI/MSI-X support */
bool msi_supported;
/*
* Flag for interrupt controllers to declare broken MSI/MSI-X support.
* values: false - broken; true - non-broken.
*
* Setting this flag to false will remove MSI/MSI-X capability from all devices.
*
* It is preferrable for controllers to set this to true (non-broken) even if
* they do not actually support MSI/MSI-X: guests normally probe the controller
* type and do not attempt to enable MSI/MSI-X with interrupt controllers not
* supporting such, so removing the capability is not required, and
* it seems cleaner to have a given device look the same for all boards.
*
* TODO: some existing controllers violate the above rule. Identify and fix them.
*/
bool msi_nonbroken;
/* If we get rid of cap allocator, we won't need this. */
static inline uint8_t msi_cap_sizeof(uint16_t flags)
@ -160,7 +173,7 @@ int msi_init(struct PCIDevice *dev, uint8_t offset,
uint8_t cap_size;
int config_offset;
if (!msi_supported) {
if (!msi_nonbroken) {
return -ENOTSUP;
}

View File

@ -249,7 +249,7 @@ int msix_init(struct PCIDevice *dev, unsigned short nentries,
uint8_t *config;
/* Nothing to do if MSI is not supported by interrupt controller */
if (!msi_supported) {
if (!msi_nonbroken) {
return -ENOTSUP;
}

View File

@ -439,7 +439,7 @@ static void *spapr_create_fdt_skel(hwaddr initrd_base,
_FDT((fdt_property_cell(fdt, "rtas-event-scan-rate",
RTAS_EVENT_SCAN_RATE)));
if (msi_supported) {
if (msi_nonbroken) {
_FDT((fdt_property(fdt, "ibm,change-msix-capable", NULL, 0)));
}
@ -1743,7 +1743,7 @@ static void ppc_spapr_init(MachineState *machine)
bool kernel_le = false;
char *filename;
msi_supported = true;
msi_nonbroken = true;
QLIST_INIT(&spapr->phbs);

View File

@ -1790,7 +1790,7 @@ void spapr_pci_rtas_init(void)
rtas_ibm_read_pci_config);
spapr_rtas_register(RTAS_IBM_WRITE_PCI_CONFIG, "ibm,write-pci-config",
rtas_ibm_write_pci_config);
if (msi_supported) {
if (msi_nonbroken) {
spapr_rtas_register(RTAS_IBM_QUERY_INTERRUPT_SOURCE_NUMBER,
"ibm,query-interrupt-source-number",
rtas_ibm_query_interrupt_source_number);

View File

@ -597,7 +597,7 @@ static void s390_pcihost_class_init(ObjectClass *klass, void *data)
k->init = s390_pcihost_init;
hc->plug = s390_pcihost_hot_plug;
hc->unplug = s390_pcihost_hot_unplug;
msi_supported = true;
msi_nonbroken = true;
}
static const TypeInfo s390_pcihost_info = {

View File

@ -53,6 +53,7 @@ static const char *balloon_stat_names[] = {
[VIRTIO_BALLOON_S_MINFLT] = "stat-minor-faults",
[VIRTIO_BALLOON_S_MEMFREE] = "stat-free-memory",
[VIRTIO_BALLOON_S_MEMTOT] = "stat-total-memory",
[VIRTIO_BALLOON_S_AVAIL] = "stat-available-memory",
[VIRTIO_BALLOON_S_NR] = NULL
};
@ -101,7 +102,7 @@ static void balloon_stats_poll_cb(void *opaque)
VirtIOBalloon *s = opaque;
VirtIODevice *vdev = VIRTIO_DEVICE(s);
if (!balloon_stats_supported(s)) {
if (s->stats_vq_elem == NULL || !balloon_stats_supported(s)) {
/* re-schedule */
balloon_stats_change_timer(s, s->stats_poll_interval);
return;
@ -258,11 +259,20 @@ static void virtio_balloon_receive_stats(VirtIODevice *vdev, VirtQueue *vq)
size_t offset = 0;
qemu_timeval tv;
s->stats_vq_elem = elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
if (!elem) {
goto out;
}
if (s->stats_vq_elem != NULL) {
/* This should never happen if the driver follows the spec. */
virtqueue_push(vq, s->stats_vq_elem, 0);
virtio_notify(vdev, vq);
g_free(s->stats_vq_elem);
}
s->stats_vq_elem = elem;
/* Initialize the stats to get rid of any stale values. This is only
* needed to handle the case where a guest supports fewer stats than it
* used to (ie. it has booted into an old kernel).
@ -458,6 +468,16 @@ static void virtio_balloon_device_unrealize(DeviceState *dev, Error **errp)
virtio_cleanup(vdev);
}
static void virtio_balloon_device_reset(VirtIODevice *vdev)
{
VirtIOBalloon *s = VIRTIO_BALLOON(vdev);
if (s->stats_vq_elem != NULL) {
g_free(s->stats_vq_elem);
s->stats_vq_elem = NULL;
}
}
static void virtio_balloon_instance_init(Object *obj)
{
VirtIOBalloon *s = VIRTIO_BALLOON(obj);
@ -486,6 +506,7 @@ static void virtio_balloon_class_init(ObjectClass *klass, void *data)
set_bit(DEVICE_CATEGORY_MISC, dc->categories);
vdc->realize = virtio_balloon_device_realize;
vdc->unrealize = virtio_balloon_device_unrealize;
vdc->reset = virtio_balloon_device_reset;
vdc->get_config = virtio_balloon_get_config;
vdc->set_config = virtio_balloon_set_config;
vdc->get_features = virtio_balloon_get_features;

View File

@ -47,6 +47,7 @@
static void virtio_pci_bus_new(VirtioBusState *bus, size_t bus_size,
VirtIOPCIProxy *dev);
static void virtio_pci_reset(DeviceState *qdev);
/* virtio device */
/* DeviceState to VirtIOPCIProxy. For use off data-path. TODO: use QOM. */
@ -404,9 +405,7 @@ static void virtio_ioport_write(void *opaque, uint32_t addr, uint32_t val)
case VIRTIO_PCI_QUEUE_PFN:
pa = (hwaddr)val << VIRTIO_PCI_QUEUE_ADDR_SHIFT;
if (pa == 0) {
virtio_pci_stop_ioeventfd(proxy);
virtio_reset(vdev);
msix_unuse_all_vectors(&proxy->pci_dev);
virtio_pci_reset(DEVICE(proxy));
}
else
virtio_queue_set_addr(vdev, vdev->queue_sel, pa);
@ -432,8 +431,7 @@ static void virtio_ioport_write(void *opaque, uint32_t addr, uint32_t val)
}
if (vdev->status == 0) {
virtio_reset(vdev);
msix_unuse_all_vectors(&proxy->pci_dev);
virtio_pci_reset(DEVICE(proxy));
}
/* Linux before 2.6.34 drives the device without enabling
@ -1353,8 +1351,7 @@ static void virtio_pci_common_write(void *opaque, hwaddr addr,
}
if (vdev->status == 0) {
virtio_reset(vdev);
msix_unuse_all_vectors(&proxy->pci_dev);
virtio_pci_reset(DEVICE(proxy));
}
break;

View File

@ -58,30 +58,33 @@ typedef struct VirtioBusClass VirtioPCIBusClass;
#define VIRTIO_PCI_BUS_CLASS(klass) \
OBJECT_CLASS_CHECK(VirtioPCIBusClass, klass, TYPE_VIRTIO_PCI_BUS)
enum {
VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION_BIT,
VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT,
VIRTIO_PCI_FLAG_DISABLE_LEGACY_BIT,
VIRTIO_PCI_FLAG_DISABLE_MODERN_BIT,
VIRTIO_PCI_FLAG_MIGRATE_EXTRA_BIT,
VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY_BIT,
VIRTIO_PCI_FLAG_DISABLE_PCIE_BIT,
};
/* Need to activate work-arounds for buggy guests at vmstate load. */
#define VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION_BIT 0
#define VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION \
(1 << VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION_BIT)
/* Performance improves when virtqueue kick processing is decoupled from the
* vcpu thread using ioeventfd for some devices. */
#define VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT 1
#define VIRTIO_PCI_FLAG_USE_IOEVENTFD (1 << VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT)
/* virtio version flags */
#define VIRTIO_PCI_FLAG_DISABLE_LEGACY_BIT 2
#define VIRTIO_PCI_FLAG_DISABLE_MODERN_BIT 3
#define VIRTIO_PCI_FLAG_DISABLE_PCIE_BIT 4
#define VIRTIO_PCI_FLAG_DISABLE_LEGACY (1 << VIRTIO_PCI_FLAG_DISABLE_LEGACY_BIT)
#define VIRTIO_PCI_FLAG_DISABLE_MODERN (1 << VIRTIO_PCI_FLAG_DISABLE_MODERN_BIT)
#define VIRTIO_PCI_FLAG_DISABLE_PCIE (1 << VIRTIO_PCI_FLAG_DISABLE_PCIE_BIT)
/* migrate extra state */
#define VIRTIO_PCI_FLAG_MIGRATE_EXTRA_BIT 4
#define VIRTIO_PCI_FLAG_MIGRATE_EXTRA (1 << VIRTIO_PCI_FLAG_MIGRATE_EXTRA_BIT)
/* have pio notification for modern device ? */
#define VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY_BIT 5
#define VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY \
(1 << VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY_BIT)

View File

@ -285,7 +285,7 @@ Aml *aml_interrupt(AmlConsumerAndProducer con_and_pro,
Aml *aml_io(AmlIODecode dec, uint16_t min_base, uint16_t max_base,
uint8_t aln, uint8_t len);
Aml *aml_operation_region(const char *name, AmlRegionSpace rs,
uint32_t offset, uint32_t len);
Aml *offset, uint32_t len);
Aml *aml_irq_no_flags(uint8_t irq);
Aml *aml_named_field(const char *name, unsigned length);
Aml *aml_reserved_field(unsigned length);
@ -344,6 +344,8 @@ Aml *aml_mutex(const char *name, uint8_t sync_level);
Aml *aml_acquire(Aml *mutex, uint16_t timeout);
Aml *aml_release(Aml *mutex);
Aml *aml_alias(const char *source_object, const char *alias_object);
Aml *aml_create_field(Aml *srcbuf, Aml *bit_index, Aml *num_bits,
const char *name);
Aml *aml_create_dword_field(Aml *srcbuf, Aml *index, const char *name);
Aml *aml_create_qword_field(Aml *srcbuf, Aml *index, const char *name);
Aml *aml_varpackage(uint32_t num_elements);
@ -351,6 +353,7 @@ Aml *aml_touuid(const char *uuid);
Aml *aml_unicode(const char *str);
Aml *aml_derefof(Aml *arg);
Aml *aml_sizeof(Aml *arg);
Aml *aml_concatenate(Aml *source1, Aml *source2, Aml *target);
void
build_header(GArray *linker, GArray *table_data,
@ -365,4 +368,7 @@ void
build_rsdt(GArray *table_data, GArray *linker, GArray *table_offsets,
const char *oem_id, const char *oem_table_id);
int
build_append_named_dword(GArray *array, const char *name_format, ...);
#endif

View File

@ -15,5 +15,7 @@ void sun4m_fdctrl_init(qemu_irq irq, hwaddr io_base,
DriveInfo **fds, qemu_irq *fdc_tc);
FloppyDriveType isa_fdc_get_drive_type(ISADevice *fdc, int i);
void isa_fdc_get_drive_max_chs(FloppyDriveType type,
uint8_t *maxc, uint8_t *maxh, uint8_t *maxs);
#endif

View File

@ -8,6 +8,7 @@
#include "sysemu/accel.h"
#include "hw/qdev.h"
#include "qom/object.h"
#include "qom/cpu.h"
void memory_region_allocate_system_memory(MemoryRegion *mr, Object *owner,
const char *name,
@ -41,6 +42,26 @@ int machine_phandle_start(MachineState *machine);
bool machine_dump_guest_core(MachineState *machine);
bool machine_mem_merge(MachineState *machine);
/**
* CPUArchId:
* @arch_id - architecture-dependent CPU ID of present or possible CPU
* @cpu - pointer to corresponding CPU object if it's present on NULL otherwise
*/
typedef struct {
uint64_t arch_id;
struct CPUState *cpu;
} CPUArchId;
/**
* CPUArchIdList:
* @len - number of @CPUArchId items in @cpus array
* @cpus - array of present or possible CPUs for current machine configuration
*/
typedef struct {
int len;
CPUArchId cpus[0];
} CPUArchIdList;
/**
* MachineClass:
* @get_hotplug_handler: this function is called during bus-less
@ -57,6 +78,10 @@ bool machine_mem_merge(MachineState *machine);
* Set only by old machines because they need to keep
* compatibility on code that exposed QEMU_VERSION to guests in
* the past (and now use qemu_hw_version()).
* @possible_cpu_arch_ids:
* Returns an array of @CPUArchId architecture-dependent CPU IDs
* which includes CPU IDs for present and possible to hotplug CPUs.
* Caller is responsible for freeing returned list.
*/
struct MachineClass {
/*< private >*/
@ -98,6 +123,7 @@ struct MachineClass {
HotplugHandler *(*get_hotplug_handler)(MachineState *machine,
DeviceState *dev);
unsigned (*cpu_index_to_socket_id)(unsigned cpu_index);
CPUArchIdList *(*possible_cpu_arch_ids)(MachineState *machine);
};
/**

View File

@ -23,7 +23,7 @@ I2CBus *ich9_smb_init(PCIBus *bus, int devfn, uint32_t smb_io_base);
void ich9_generate_smi(void);
void ich9_generate_nmi(void);
#define ICH9_CC_SIZE (16 * 1024) /* 16KB */
#define ICH9_CC_SIZE (16 * 1024) /* 16KB. Chipset configuration registers */
#define TYPE_ICH9_LPC_DEVICE "ICH9-LPC"
#define ICH9_LPC_DEVICE(obj) \
@ -65,7 +65,7 @@ typedef struct ICH9LPCState {
/* isa bus */
ISABus *isa_bus;
MemoryRegion rbca_mem;
MemoryRegion rcrb_mem; /* root complex register block */
Notifier machine_ready;
qemu_irq *pic;

View File

@ -17,6 +17,7 @@
#include "hw/boards.h"
#include "hw/compat.h"
#include "hw/mem/pc-dimm.h"
#include "hw/mem/nvdimm.h"
#define HPET_INTCAP "hpet-intcap"
@ -57,7 +58,8 @@ struct PCMachineState {
uint64_t max_ram_below_4g;
OnOffAuto vmport;
OnOffAuto smm;
bool nvdimm;
AcpiNVDIMMState acpi_nvdimm_state;
/* RAM information (sizes, addresses, configuration): */
ram_addr_t below_4g_mem_size, above_4g_mem_size;
@ -65,6 +67,7 @@ struct PCMachineState {
/* CPU and apic information: */
bool apic_xrupt_override;
unsigned apic_id_limit;
CPUArchIdList *possible_cpus;
/* NUMA information: */
uint64_t numa_nodes;
@ -265,6 +268,7 @@ typedef void (*cpu_set_smm_t)(int smm, void *arg);
void ioapic_init_gsi(GSIState *gsi_state, const char *parent_name);
ISADevice *pc_find_fdc0(void);
int cmos_get_fd_drive_type(FloppyDriveType fd0);
#define FW_CFG_IO_BASE 0x510

View File

@ -25,8 +25,38 @@
#include "hw/mem/pc-dimm.h"
#define NVDIMM_DEBUG 0
#define nvdimm_debug(fmt, ...) \
do { \
if (NVDIMM_DEBUG) { \
fprintf(stderr, "nvdimm: " fmt, ## __VA_ARGS__); \
} \
} while (0)
#define TYPE_NVDIMM "nvdimm"
#define NVDIMM_DSM_MEM_FILE "etc/acpi/nvdimm-mem"
/*
* 32 bits IO port starting from 0x0a18 in guest is reserved for
* NVDIMM ACPI emulation.
*/
#define NVDIMM_ACPI_IO_BASE 0x0a18
#define NVDIMM_ACPI_IO_LEN 4
struct AcpiNVDIMMState {
/* detect if NVDIMM support is enabled. */
bool is_enabled;
/* the data of the fw_cfg file NVDIMM_DSM_MEM_FILE. */
GArray *dsm_mem;
/* the IO region used by OSPM to transfer control to QEMU. */
MemoryRegion io_mr;
};
typedef struct AcpiNVDIMMState AcpiNVDIMMState;
void nvdimm_init_acpi_state(AcpiNVDIMMState *state, MemoryRegion *io,
FWCfgState *fw_cfg, Object *owner);
void nvdimm_build_acpi(GArray *table_offsets, GArray *table_data,
GArray *linker);
#endif

View File

@ -29,7 +29,7 @@ struct MSIMessage {
uint32_t data;
};
extern bool msi_supported;
extern bool msi_nonbroken;
void msi_set_message(PCIDevice *dev, MSIMessage msg);
MSIMessage msi_get_message(PCIDevice *dev, unsigned int vector);

View File

@ -51,7 +51,8 @@ struct virtio_balloon_config {
#define VIRTIO_BALLOON_S_MINFLT 3 /* Number of minor faults */
#define VIRTIO_BALLOON_S_MEMFREE 4 /* Total amount of free memory */
#define VIRTIO_BALLOON_S_MEMTOT 5 /* Total amount of memory */
#define VIRTIO_BALLOON_S_NR 6
#define VIRTIO_BALLOON_S_AVAIL 6 /* Amount of available memory in guest */
#define VIRTIO_BALLOON_S_NR 7
/*
* Memory statistics structure.

View File

@ -27,7 +27,6 @@ typedef struct VhostUserState {
typedef struct VhostUserChardevProps {
bool is_socket;
bool is_unix;
bool is_server;
} VhostUserChardevProps;
VHostNetState *vhost_user_get_vhost_net(NetClientState *nc)
@ -179,6 +178,8 @@ static void net_vhost_user_event(void *opaque, int event)
queues = qemu_find_net_clients_except(name, ncs,
NET_CLIENT_OPTIONS_KIND_NIC,
MAX_QUEUE_NUM);
assert(queues < MAX_QUEUE_NUM);
s = DO_UPCAST(VhostUserState, nc, ncs[0]);
trace_vhost_user_event(s->chr->label, event);
switch (event) {
@ -207,6 +208,9 @@ static int net_vhost_user_init(NetClientState *peer, const char *device,
VhostUserState *s;
int i;
assert(name);
assert(queues > 0);
for (i = 0; i < queues; i++) {
nc = qemu_new_net_client(&net_vhost_user_info, peer, device, name);
@ -219,7 +223,7 @@ static int net_vhost_user_init(NetClientState *peer, const char *device,
s->chr = chr;
}
qemu_chr_add_handlers(chr, NULL, NULL, net_vhost_user_event, (void*)name);
qemu_chr_add_handlers(chr, NULL, NULL, net_vhost_user_event, nc[0].name);
return 0;
}
@ -235,7 +239,6 @@ static int net_vhost_chardev_opts(void *opaque,
} else if (strcmp(name, "path") == 0) {
props->is_unix = true;
} else if (strcmp(name, "server") == 0) {
props->is_server = true;
} else {
error_setg(errp,
"vhost-user does not support a chardev with option %s=%s",
@ -317,9 +320,10 @@ int net_init_vhost_user(const NetClientOptions *opts, const char *name,
}
queues = vhost_user_opts->has_queues ? vhost_user_opts->queues : 1;
if (queues < 1) {
if (queues < 1 || queues > MAX_QUEUE_NUM) {
error_setg(errp,
"vhost-user number of queues must be bigger than zero");
"vhost-user number of queues must be in range [1, %d]",
MAX_QUEUE_NUM);
return -1;
}

View File

@ -2697,6 +2697,7 @@ static int tcp_set_msgfds(CharDriverState *chr, int *fds, int num)
}
/* clear old pending fd array */
g_free(s->write_msgfds);
s->write_msgfds = NULL;
if (num) {
s->write_msgfds = g_new(int, num);
@ -2768,11 +2769,16 @@ static void tcp_chr_disconnect(CharDriverState *chr)
{
TCPCharDriver *s = chr->opaque;
if (!s->connected) {
return;
}
s->connected = 0;
if (s->listen_ioc) {
s->listen_tag = qio_channel_add_watch(
QIO_CHANNEL(s->listen_ioc), G_IO_IN, tcp_chr_accept, chr, NULL);
}
tcp_set_msgfds(chr, NULL, 0);
remove_fd_in_watch(chr);
object_unref(OBJECT(s->sioc));
s->sioc = NULL;

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.