pc,pci,virtio: bugfixes, improvements

Fixes all over the place. Faster boot for virtio. ioeventfd support for
 mmio.
 
 Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
 -----BEGIN PGP SIGNATURE-----
 
 iQFDBAABCAAtFiEEXQn9CHHI+FuUyooNKB8NuNKNVGkFAmCeiMEPHG1zdEByZWRo
 YXQuY29tAAoJECgfDbjSjVRpqsIH/A49Av5Bv8huL75lf9GzCx3E1a/z2W9Fphik
 OcQ1ahR+7CRDARub+vTG40MBmZBVefIWjLAj3BwBWzFGPX0DZq0zeI102VzlEVKY
 OeUx8ixuiKOSLcS+QxE7ZXIBL2Pn7l+MFUi4nLMYKti7c/kola7zlB57qsmXh+VD
 AOQ7Utj6NWoi6QocWJsMSCyHCh3Fk9QzcStLlr6/MkSJa1zqv8l22+8oWH07Fk2M
 wZfhrm9k094on28iSejsFYL5e4ROeXUajbOdfyMIxWvAB7boC9Jxk/e0oAbuSB4y
 2f71Gfk3mU6irS7PvrxcKbk6BVD2zxM2WumOchZJgxFAujDO6yg=
 =fvkT
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/mst/tags/for_upstream' into staging

pc,pci,virtio: bugfixes, improvements

Fixes all over the place. Faster boot for virtio. ioeventfd support for
mmio.

Signed-off-by: Michael S. Tsirkin <mst@redhat.com>

# gpg: Signature made Fri 14 May 2021 15:27:13 BST
# gpg:                using RSA key 5D09FD0871C8F85B94CA8A0D281F0DB8D28D5469
# gpg:                issuer "mst@redhat.com"
# gpg: Good signature from "Michael S. Tsirkin <mst@kernel.org>" [full]
# gpg:                 aka "Michael S. Tsirkin <mst@redhat.com>" [full]
# Primary key fingerprint: 0270 606B 6F3C DF3D 0B17  0970 C350 3912 AFBE 8E67
#      Subkey fingerprint: 5D09 FD08 71C8 F85B 94CA  8A0D 281F 0DB8 D28D 5469

* remotes/mst/tags/for_upstream:
  Fix build with 64 bits time_t
  vhost-vdpa: Make vhost_vdpa_get_device_id() static
  hw/virtio: enable ioeventfd configuring for mmio
  hw/smbios: support for type 41 (onboard devices extended information)
  checkpatch: Fix use of uninitialized value
  virtio-scsi: Configure all host notifiers in a single MR transaction
  virtio-scsi: Set host notifiers and callbacks separately
  virtio-blk: Configure all host notifiers in a single MR transaction
  virtio-blk: Fix rollback path in virtio_blk_data_plane_start()
  pc-dimm: remove unnecessary get_vmstate_memory_region() method
  amd_iommu: fix wrong MMIO operations
  virtio-net: Constify VirtIOFeature feature_sizes[]
  virtio-blk: Constify VirtIOFeature feature_sizes[]
  hw/virtio: Pass virtio_feature_get_config_size() a const argument
  x86: acpi: use offset instead of pointer when using build_header()
  amd_iommu: Fix pte_override_page_mask()

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>

# Conflicts:
#	hw/arm/virt.c
This commit is contained in:
Peter Maydell 2021-05-16 17:22:46 +01:00
commit 6005ee07c3
23 changed files with 311 additions and 75 deletions

View File

@ -6,12 +6,13 @@
#include "qemu/osdep.h"
#include <linux/input.h>
#include <sys/ioctl.h>
#include "qemu/iov.h"
#include "qemu/bswap.h"
#include "qemu/sockets.h"
#include "libvhost-user-glib.h"
#include "standard-headers/linux/input.h"
#include "standard-headers/linux/virtio_input.h"
#include "qapi/error.h"
@ -113,13 +114,16 @@ vi_evdev_watch(VuDev *dev, int condition, void *data)
static void vi_handle_status(VuInput *vi, virtio_input_event *event)
{
struct input_event evdev;
struct timeval tval;
int rc;
if (gettimeofday(&evdev.time, NULL)) {
if (gettimeofday(&tval, NULL)) {
perror("vi_handle_status: gettimeofday");
return;
}
evdev.input_event_sec = tval.tv_sec;
evdev.input_event_usec = tval.tv_usec;
evdev.type = le16toh(event->type);
evdev.code = le16toh(event->code);
evdev.value = le32toh(event->value);

View File

@ -1830,6 +1830,7 @@ build_rsdt(GArray *table_data, BIOSLinker *linker, GArray *table_offsets,
int i;
unsigned rsdt_entries_offset;
AcpiRsdtDescriptorRev1 *rsdt;
int rsdt_start = table_data->len;
const unsigned table_data_len = (sizeof(uint32_t) * table_offsets->len);
const unsigned rsdt_entry_size = sizeof(rsdt->table_offset_entry[0]);
const size_t rsdt_len = sizeof(*rsdt) + table_data_len;
@ -1846,7 +1847,8 @@ build_rsdt(GArray *table_data, BIOSLinker *linker, GArray *table_offsets,
ACPI_BUILD_TABLE_FILE, ref_tbl_offset);
}
build_header(linker, table_data,
(void *)rsdt, "RSDT", rsdt_len, 1, oem_id, oem_table_id);
(void *)(table_data->data + rsdt_start),
"RSDT", rsdt_len, 1, oem_id, oem_table_id);
}
/* Build xsdt table */
@ -1857,6 +1859,7 @@ build_xsdt(GArray *table_data, BIOSLinker *linker, GArray *table_offsets,
int i;
unsigned xsdt_entries_offset;
AcpiXsdtDescriptorRev2 *xsdt;
int xsdt_start = table_data->len;
const unsigned table_data_len = (sizeof(uint64_t) * table_offsets->len);
const unsigned xsdt_entry_size = sizeof(xsdt->table_offset_entry[0]);
const size_t xsdt_len = sizeof(*xsdt) + table_data_len;
@ -1873,7 +1876,8 @@ build_xsdt(GArray *table_data, BIOSLinker *linker, GArray *table_offsets,
ACPI_BUILD_TABLE_FILE, ref_tbl_offset);
}
build_header(linker, table_data,
(void *)xsdt, "XSDT", xsdt_len, 1, oem_id, oem_table_id);
(void *)(table_data->data + xsdt_start),
"XSDT", xsdt_len, 1, oem_id, oem_table_id);
}
void build_srat_memory(AcpiSratMemoryAffinity *numamem, uint64_t base,
@ -2053,10 +2057,9 @@ void build_tpm2(GArray *table_data, BIOSLinker *linker, GArray *tcpalog,
uint64_t control_area_start_address;
TPMIf *tpmif = tpm_find();
uint32_t start_method;
void *tpm2_ptr;
tpm2_start = table_data->len;
tpm2_ptr = acpi_data_push(table_data, sizeof(AcpiTableHeader));
acpi_data_push(table_data, sizeof(AcpiTableHeader));
/* Platform Class */
build_append_int_noprefix(table_data, TPM2_ACPI_CLASS_CLIENT, 2);
@ -2095,8 +2098,8 @@ void build_tpm2(GArray *table_data, BIOSLinker *linker, GArray *tcpalog,
log_addr_offset, 8,
ACPI_BUILD_TPMLOG_FILE, 0);
build_header(linker, table_data,
tpm2_ptr, "TPM2", table_data->len - tpm2_start, 4, oem_id,
oem_table_id);
(void *)(table_data->data + tpm2_start),
"TPM2", table_data->len - tpm2_start, 4, oem_id, oem_table_id);
}
Aml *build_crs(PCIHostState *host, CrsRangeSet *range_set, uint32_t io_offset,

View File

@ -50,6 +50,7 @@
#include "sysemu/tpm.h"
#include "sysemu/kvm.h"
#include "hw/loader.h"
#include "qapi/error.h"
#include "qemu/bitops.h"
#include "qemu/error-report.h"
#include "qemu/module.h"
@ -1521,8 +1522,10 @@ static void virt_build_smbios(VirtMachineState *vms)
vmc->smbios_old_sys_ver ? "1.0" : mc->name, false,
true, SMBIOS_ENTRY_POINT_30);
smbios_get_tables(MACHINE(vms), NULL, 0, &smbios_tables, &smbios_tables_len,
&smbios_anchor, &smbios_anchor_len);
smbios_get_tables(MACHINE(vms), NULL, 0,
&smbios_tables, &smbios_tables_len,
&smbios_anchor, &smbios_anchor_len,
&error_fatal);
if (smbios_anchor) {
fw_cfg_add_file(vms->fw_cfg, "etc/smbios/smbios-tables",

View File

@ -198,19 +198,30 @@ int virtio_blk_data_plane_start(VirtIODevice *vdev)
goto fail_guest_notifiers;
}
memory_region_transaction_begin();
/* Set up virtqueue notify */
for (i = 0; i < nvqs; i++) {
r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, true);
if (r != 0) {
int j = i;
fprintf(stderr, "virtio-blk failed to set host notifier (%d)\n", r);
while (i--) {
virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, false);
}
memory_region_transaction_commit();
while (j--) {
virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), i);
}
goto fail_guest_notifiers;
goto fail_host_notifiers;
}
}
memory_region_transaction_commit();
s->starting = false;
vblk->dataplane_started = true;
trace_virtio_blk_data_plane_start(s);
@ -221,7 +232,7 @@ int virtio_blk_data_plane_start(VirtIODevice *vdev)
aio_context_release(old_context);
if (r < 0) {
error_report_err(local_err);
goto fail_guest_notifiers;
goto fail_aio_context;
}
/* Process queued requests before the ones in vring */
@ -245,6 +256,20 @@ int virtio_blk_data_plane_start(VirtIODevice *vdev)
aio_context_release(s->ctx);
return 0;
fail_aio_context:
memory_region_transaction_begin();
for (i = 0; i < nvqs; i++) {
virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, false);
}
memory_region_transaction_commit();
for (i = 0; i < nvqs; i++) {
virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), i);
}
fail_host_notifiers:
k->set_guest_notifiers(qbus->parent, nvqs, false);
fail_guest_notifiers:
/*
* If we failed to set up the guest notifiers queued requests will be
@ -305,8 +330,15 @@ void virtio_blk_data_plane_stop(VirtIODevice *vdev)
aio_context_release(s->ctx);
memory_region_transaction_begin();
for (i = 0; i < nvqs; i++) {
virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, false);
}
memory_region_transaction_commit();
for (i = 0; i < nvqs; i++) {
virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), i);
}

View File

@ -40,7 +40,7 @@
* Starting from the discard feature, we can use this array to properly
* set the config size depending on the features enabled.
*/
static VirtIOFeature feature_sizes[] = {
static const VirtIOFeature feature_sizes[] = {
{.flags = 1ULL << VIRTIO_BLK_F_DISCARD,
.end = endof(struct virtio_blk_config, discard_sector_alignment)},
{.flags = 1ULL << VIRTIO_BLK_F_WRITE_ZEROES,

View File

@ -1815,6 +1815,7 @@ build_hpet(GArray *table_data, BIOSLinker *linker, const char *oem_id,
const char *oem_table_id)
{
Acpi20Hpet *hpet;
int hpet_start = table_data->len;
hpet = acpi_data_push(table_data, sizeof(*hpet));
/* Note timer_block_id value must be kept in sync with value advertised by
@ -1823,13 +1824,15 @@ build_hpet(GArray *table_data, BIOSLinker *linker, const char *oem_id,
hpet->timer_block_id = cpu_to_le32(0x8086a201);
hpet->addr.address = cpu_to_le64(HPET_BASE);
build_header(linker, table_data,
(void *)hpet, "HPET", sizeof(*hpet), 1, oem_id, oem_table_id);
(void *)(table_data->data + hpet_start),
"HPET", sizeof(*hpet), 1, oem_id, oem_table_id);
}
static void
build_tpm_tcpa(GArray *table_data, BIOSLinker *linker, GArray *tcpalog,
const char *oem_id, const char *oem_table_id)
{
int tcpa_start = table_data->len;
Acpi20Tcpa *tcpa = acpi_data_push(table_data, sizeof *tcpa);
unsigned log_addr_size = sizeof(tcpa->log_area_start_address);
unsigned log_addr_offset =
@ -1848,7 +1851,8 @@ build_tpm_tcpa(GArray *table_data, BIOSLinker *linker, GArray *tcpalog,
ACPI_BUILD_TPMLOG_FILE, 0);
build_header(linker, table_data,
(void *)tcpa, "TCPA", sizeof(*tcpa), 2, oem_id, oem_table_id);
(void *)(table_data->data + tcpa_start),
"TCPA", sizeof(*tcpa), 2, oem_id, oem_table_id);
}
#define HOLE_640K_START (640 * KiB)

View File

@ -99,7 +99,7 @@ static uint64_t amdvi_readq(AMDVIState *s, hwaddr addr)
}
/* internal write */
static void amdvi_writeq_raw(AMDVIState *s, uint64_t val, hwaddr addr)
static void amdvi_writeq_raw(AMDVIState *s, hwaddr addr, uint64_t val)
{
stq_le_p(&s->mmior[addr], val);
}
@ -382,7 +382,7 @@ static void amdvi_completion_wait(AMDVIState *s, uint64_t *cmd)
}
/* set completion interrupt */
if (extract64(cmd[0], 1, 1)) {
amdvi_test_mask(s, AMDVI_MMIO_STATUS, AMDVI_MMIO_STATUS_COMP_INT);
amdvi_assign_orq(s, AMDVI_MMIO_STATUS, AMDVI_MMIO_STATUS_COMP_INT);
/* generate interrupt */
amdvi_generate_msi_interrupt(s);
}
@ -553,7 +553,7 @@ static void amdvi_cmdbuf_run(AMDVIState *s)
trace_amdvi_command_exec(s->cmdbuf_head, s->cmdbuf_tail, s->cmdbuf);
amdvi_cmdbuf_exec(s);
s->cmdbuf_head += AMDVI_COMMAND_SIZE;
amdvi_writeq_raw(s, s->cmdbuf_head, AMDVI_MMIO_COMMAND_HEAD);
amdvi_writeq_raw(s, AMDVI_MMIO_COMMAND_HEAD, s->cmdbuf_head);
/* wrap head pointer */
if (s->cmdbuf_head >= s->cmdbuf_len * AMDVI_COMMAND_SIZE) {
@ -860,8 +860,8 @@ static inline uint8_t get_pte_translation_mode(uint64_t pte)
static inline uint64_t pte_override_page_mask(uint64_t pte)
{
uint8_t page_mask = 12;
uint64_t addr = (pte & AMDVI_DEV_PT_ROOT_MASK) ^ AMDVI_DEV_PT_ROOT_MASK;
uint8_t page_mask = 13;
uint64_t addr = (pte & AMDVI_DEV_PT_ROOT_MASK) >> 12;
/* find the first zero bit */
while (addr & 1) {
page_mask++;

View File

@ -22,6 +22,7 @@
#include "hw/nvram/fw_cfg.h"
#include "e820_memory_layout.h"
#include "kvm/kvm_i386.h"
#include "qapi/error.h"
#include CONFIG_DEVICES
struct hpet_fw_config hpet_cfg = {.count = UINT8_MAX};
@ -78,7 +79,8 @@ void fw_cfg_build_smbios(MachineState *ms, FWCfgState *fw_cfg)
}
smbios_get_tables(ms, mem_array, array_count,
&smbios_tables, &smbios_tables_len,
&smbios_anchor, &smbios_anchor_len);
&smbios_anchor, &smbios_anchor_len,
&error_fatal);
g_free(mem_array);
if (smbios_anchor) {

View File

@ -193,13 +193,16 @@ static void virtio_input_host_handle_status(VirtIOInput *vinput,
{
VirtIOInputHost *vih = VIRTIO_INPUT_HOST(vinput);
struct input_event evdev;
struct timeval tval;
int rc;
if (gettimeofday(&evdev.time, NULL)) {
if (gettimeofday(&tval, NULL)) {
perror("virtio_input_host_handle_status: gettimeofday");
return;
}
evdev.input_event_sec = tval.tv_sec;
evdev.input_event_usec = tval.tv_usec;
evdev.type = le16_to_cpu(event->type);
evdev.code = le16_to_cpu(event->code);
evdev.value = le32_to_cpu(event->value);

View File

@ -34,6 +34,16 @@
static int pc_dimm_get_free_slot(const int *hint, int max_slots, Error **errp);
static MemoryRegion *pc_dimm_get_memory_region(PCDIMMDevice *dimm, Error **errp)
{
if (!dimm->hostmem) {
error_setg(errp, "'" PC_DIMM_MEMDEV_PROP "' property must be set");
return NULL;
}
return host_memory_backend_get_memory(dimm->hostmem);
}
void pc_dimm_pre_plug(PCDIMMDevice *dimm, MachineState *machine,
const uint64_t *legacy_align, Error **errp)
{
@ -66,8 +76,7 @@ void pc_dimm_pre_plug(PCDIMMDevice *dimm, MachineState *machine,
void pc_dimm_plug(PCDIMMDevice *dimm, MachineState *machine)
{
PCDIMMDeviceClass *ddc = PC_DIMM_GET_CLASS(dimm);
MemoryRegion *vmstate_mr = ddc->get_vmstate_memory_region(dimm,
MemoryRegion *vmstate_mr = pc_dimm_get_memory_region(dimm,
&error_abort);
memory_device_plug(MEMORY_DEVICE(dimm), machine);
@ -76,8 +85,7 @@ void pc_dimm_plug(PCDIMMDevice *dimm, MachineState *machine)
void pc_dimm_unplug(PCDIMMDevice *dimm, MachineState *machine)
{
PCDIMMDeviceClass *ddc = PC_DIMM_GET_CLASS(dimm);
MemoryRegion *vmstate_mr = ddc->get_vmstate_memory_region(dimm,
MemoryRegion *vmstate_mr = pc_dimm_get_memory_region(dimm,
&error_abort);
memory_device_unplug(MEMORY_DEVICE(dimm), machine);
@ -205,16 +213,6 @@ static void pc_dimm_unrealize(DeviceState *dev)
host_memory_backend_set_mapped(dimm->hostmem, false);
}
static MemoryRegion *pc_dimm_get_memory_region(PCDIMMDevice *dimm, Error **errp)
{
if (!dimm->hostmem) {
error_setg(errp, "'" PC_DIMM_MEMDEV_PROP "' property must be set");
return NULL;
}
return host_memory_backend_get_memory(dimm->hostmem);
}
static uint64_t pc_dimm_md_get_addr(const MemoryDeviceState *md)
{
return object_property_get_uint(OBJECT(md), PC_DIMM_ADDR_PROP,
@ -266,7 +264,6 @@ static void pc_dimm_md_fill_device_info(const MemoryDeviceState *md,
static void pc_dimm_class_init(ObjectClass *oc, void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
PCDIMMDeviceClass *ddc = PC_DIMM_CLASS(oc);
MemoryDeviceClass *mdc = MEMORY_DEVICE_CLASS(oc);
dc->realize = pc_dimm_realize;
@ -274,8 +271,6 @@ static void pc_dimm_class_init(ObjectClass *oc, void *data)
device_class_set_props(dc, pc_dimm_properties);
dc->desc = "DIMM memory module";
ddc->get_vmstate_memory_region = pc_dimm_get_memory_region;
mdc->get_addr = pc_dimm_md_get_addr;
mdc->set_addr = pc_dimm_md_set_addr;
/* for a dimm plugged_size == region_size */

View File

@ -89,7 +89,7 @@
VIRTIO_NET_RSS_HASH_TYPE_TCP_EX | \
VIRTIO_NET_RSS_HASH_TYPE_UDP_EX)
static VirtIOFeature feature_sizes[] = {
static const VirtIOFeature feature_sizes[] = {
{.flags = 1ULL << VIRTIO_NET_F_MAC,
.end = endof(struct virtio_net_config, mac)},
{.flags = 1ULL << VIRTIO_NET_F_STATUS,

View File

@ -94,8 +94,7 @@ static bool virtio_scsi_data_plane_handle_event(VirtIODevice *vdev,
return progress;
}
static int virtio_scsi_vring_init(VirtIOSCSI *s, VirtQueue *vq, int n,
VirtIOHandleAIOOutput fn)
static int virtio_scsi_set_host_notifier(VirtIOSCSI *s, VirtQueue *vq, int n)
{
BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s)));
int rc;
@ -109,7 +108,6 @@ static int virtio_scsi_vring_init(VirtIOSCSI *s, VirtQueue *vq, int n,
return rc;
}
virtio_queue_aio_set_host_notifier_handler(vq, s->ctx, fn);
return 0;
}
@ -154,40 +152,55 @@ int virtio_scsi_dataplane_start(VirtIODevice *vdev)
goto fail_guest_notifiers;
}
aio_context_acquire(s->ctx);
rc = virtio_scsi_vring_init(s, vs->ctrl_vq, 0,
virtio_scsi_data_plane_handle_ctrl);
if (rc) {
goto fail_vrings;
memory_region_transaction_begin();
rc = virtio_scsi_set_host_notifier(s, vs->ctrl_vq, 0);
if (rc != 0) {
goto fail_host_notifiers;
}
vq_init_count++;
rc = virtio_scsi_vring_init(s, vs->event_vq, 1,
virtio_scsi_data_plane_handle_event);
if (rc) {
goto fail_vrings;
rc = virtio_scsi_set_host_notifier(s, vs->event_vq, 1);
if (rc != 0) {
goto fail_host_notifiers;
}
vq_init_count++;
for (i = 0; i < vs->conf.num_queues; i++) {
rc = virtio_scsi_vring_init(s, vs->cmd_vqs[i], i + 2,
virtio_scsi_data_plane_handle_cmd);
rc = virtio_scsi_set_host_notifier(s, vs->cmd_vqs[i], i + 2);
if (rc) {
goto fail_vrings;
goto fail_host_notifiers;
}
vq_init_count++;
}
memory_region_transaction_commit();
aio_context_acquire(s->ctx);
virtio_queue_aio_set_host_notifier_handler(vs->ctrl_vq, s->ctx,
virtio_scsi_data_plane_handle_ctrl);
virtio_queue_aio_set_host_notifier_handler(vs->event_vq, s->ctx,
virtio_scsi_data_plane_handle_event);
for (i = 0; i < vs->conf.num_queues; i++) {
virtio_queue_aio_set_host_notifier_handler(vs->cmd_vqs[i], s->ctx,
virtio_scsi_data_plane_handle_cmd);
}
s->dataplane_starting = false;
s->dataplane_started = true;
aio_context_release(s->ctx);
return 0;
fail_vrings:
aio_wait_bh_oneshot(s->ctx, virtio_scsi_dataplane_stop_bh, s);
aio_context_release(s->ctx);
fail_host_notifiers:
for (i = 0; i < vq_init_count; i++) {
virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, false);
}
memory_region_transaction_commit();
for (i = 0; i < vq_init_count; i++) {
virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), i);
}
k->set_guest_notifiers(qbus->parent, vs->conf.num_queues + 2, false);
@ -225,8 +238,15 @@ void virtio_scsi_dataplane_stop(VirtIODevice *vdev)
blk_drain_all(); /* ensure there are no in-flight requests */
memory_region_transaction_begin();
for (i = 0; i < vs->conf.num_queues + 2; i++) {
virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, false);
}
memory_region_transaction_commit();
for (i = 0; i < vs->conf.num_queues + 2; i++) {
virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), i);
}

View File

@ -27,6 +27,7 @@
#include "hw/firmware/smbios.h"
#include "hw/loader.h"
#include "hw/boards.h"
#include "hw/pci/pci_bus.h"
#include "smbios_build.h"
/* legacy structures and constants for <= 2.0 machines */
@ -118,6 +119,28 @@ static struct {
uint16_t speed;
} type17;
static QEnumLookup type41_kind_lookup = {
.array = (const char *const[]) {
"other",
"unknown",
"video",
"scsi",
"ethernet",
"tokenring",
"sound",
"pata",
"sata",
"sas",
},
.size = 10
};
struct type41_instance {
const char *designation, *pcidev;
uint8_t instance, kind;
QTAILQ_ENTRY(type41_instance) next;
};
static QTAILQ_HEAD(, type41_instance) type41 = QTAILQ_HEAD_INITIALIZER(type41);
static QemuOptsList qemu_smbios_opts = {
.name = "smbios",
.head = QTAILQ_HEAD_INITIALIZER(qemu_smbios_opts.head),
@ -358,6 +381,32 @@ static const QemuOptDesc qemu_smbios_type17_opts[] = {
{ /* end of list */ }
};
static const QemuOptDesc qemu_smbios_type41_opts[] = {
{
.name = "type",
.type = QEMU_OPT_NUMBER,
.help = "SMBIOS element type",
},{
.name = "designation",
.type = QEMU_OPT_STRING,
.help = "reference designation string",
},{
.name = "kind",
.type = QEMU_OPT_STRING,
.help = "device type",
.def_value_str = "other",
},{
.name = "instance",
.type = QEMU_OPT_NUMBER,
.help = "device type instance",
},{
.name = "pcidev",
.type = QEMU_OPT_STRING,
.help = "PCI device",
},
{ /* end of list */ }
};
static void smbios_register_config(void)
{
qemu_add_opts(&qemu_smbios_opts);
@ -773,6 +822,53 @@ static void smbios_build_type_32_table(void)
SMBIOS_BUILD_TABLE_POST;
}
static void smbios_build_type_41_table(Error **errp)
{
unsigned instance = 0;
struct type41_instance *t41;
QTAILQ_FOREACH(t41, &type41, next) {
SMBIOS_BUILD_TABLE_PRE(41, 0x2900 + instance, true);
SMBIOS_TABLE_SET_STR(41, reference_designation_str, t41->designation);
t->device_type = t41->kind;
t->device_type_instance = t41->instance;
t->segment_group_number = cpu_to_le16(0);
t->bus_number = 0;
t->device_number = 0;
if (t41->pcidev) {
PCIDevice *pdev = NULL;
int rc = pci_qdev_find_device(t41->pcidev, &pdev);
if (rc != 0) {
error_setg(errp,
"No PCI device %s for SMBIOS type 41 entry %s",
t41->pcidev, t41->designation);
return;
}
/*
* We only handle the case were the device is attached to
* the PCI root bus. The general case is more complex as
* bridges are enumerated later and the table would need
* to be updated at this moment.
*/
if (!pci_bus_is_root(pci_get_bus(pdev))) {
error_setg(errp,
"Cannot create type 41 entry for PCI device %s: "
"not attached to the root bus",
t41->pcidev);
return;
}
t->segment_group_number = cpu_to_le16(0);
t->bus_number = pci_dev_bus_num(pdev);
t->device_number = pdev->devfn;
}
SMBIOS_BUILD_TABLE_POST;
instance++;
}
}
static void smbios_build_type_127_table(void)
{
SMBIOS_BUILD_TABLE_PRE(127, 0x7F00, true); /* required */
@ -883,7 +979,8 @@ void smbios_get_tables(MachineState *ms,
const struct smbios_phys_mem_area *mem_array,
const unsigned int mem_array_size,
uint8_t **tables, size_t *tables_len,
uint8_t **anchor, size_t *anchor_len)
uint8_t **anchor, size_t *anchor_len,
Error **errp)
{
unsigned i, dimm_cnt;
@ -928,6 +1025,7 @@ void smbios_get_tables(MachineState *ms,
smbios_build_type_32_table();
smbios_build_type_38_table();
smbios_build_type_41_table(errp);
smbios_build_type_127_table();
smbios_validate_table(ms);
@ -1224,6 +1322,30 @@ void smbios_entry_add(QemuOpts *opts, Error **errp)
save_opt(&type17.part, opts, "part");
type17.speed = qemu_opt_get_number(opts, "speed", 0);
return;
case 41: {
struct type41_instance *t;
Error *local_err = NULL;
if (!qemu_opts_validate(opts, qemu_smbios_type41_opts, errp)) {
return;
}
t = g_new0(struct type41_instance, 1);
save_opt(&t->designation, opts, "designation");
t->kind = qapi_enum_parse(&type41_kind_lookup,
qemu_opt_get(opts, "kind"),
0, &local_err) + 1;
t->kind |= 0x80; /* enabled */
if (local_err != NULL) {
error_propagate(errp, local_err);
g_free(t);
return;
}
t->instance = qemu_opt_get_number(opts, "instance", 1);
save_opt(&t->pcidev, opts, "pcidev");
QTAILQ_INSERT_TAIL(&type41, t, next);
return;
}
default:
error_setg(errp,
"Don't know how to build fields for SMBIOS type %ld",

View File

@ -371,7 +371,7 @@ static int vhost_vdpa_set_backend_cap(struct vhost_dev *dev)
return 0;
}
int vhost_vdpa_get_device_id(struct vhost_dev *dev,
static int vhost_vdpa_get_device_id(struct vhost_dev *dev,
uint32_t *device_id)
{
int ret;

View File

@ -36,7 +36,9 @@
static bool virtio_mmio_ioeventfd_enabled(DeviceState *d)
{
return kvm_eventfds_enabled();
VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
return (proxy->flags & VIRTIO_IOMMIO_FLAG_USE_IOEVENTFD) != 0;
}
static int virtio_mmio_ioeventfd_assign(DeviceState *d,
@ -720,6 +722,8 @@ static Property virtio_mmio_properties[] = {
DEFINE_PROP_BOOL("format_transport_address", VirtIOMMIOProxy,
format_transport_address, true),
DEFINE_PROP_BOOL("force-legacy", VirtIOMMIOProxy, legacy, true),
DEFINE_PROP_BIT("ioeventfd", VirtIOMMIOProxy, flags,
VIRTIO_IOMMIO_FLAG_USE_IOEVENTFD_BIT, true),
DEFINE_PROP_END_OF_LIST(),
};
@ -731,6 +735,11 @@ static void virtio_mmio_realizefn(DeviceState *d, Error **errp)
qbus_create_inplace(&proxy->bus, sizeof(proxy->bus), TYPE_VIRTIO_MMIO_BUS,
d, NULL);
sysbus_init_irq(sbd, &proxy->irq);
if (!kvm_eventfds_enabled()) {
proxy->flags &= ~VIRTIO_IOMMIO_FLAG_USE_IOEVENTFD;
}
if (proxy->legacy) {
memory_region_init_io(&proxy->iomem, OBJECT(d),
&virtio_legacy_mem_ops, proxy,

View File

@ -2981,7 +2981,7 @@ int virtio_set_features(VirtIODevice *vdev, uint64_t val)
return ret;
}
size_t virtio_feature_get_config_size(VirtIOFeature *feature_sizes,
size_t virtio_feature_get_config_size(const VirtIOFeature *feature_sizes,
uint64_t host_features)
{
size_t config_size = 0;

View File

@ -258,6 +258,17 @@ struct smbios_type_32 {
uint8_t boot_status;
} QEMU_PACKED;
/* SMBIOS type 41 - Onboard Devices Extended Information */
struct smbios_type_41 {
struct smbios_structure_header header;
uint8_t reference_designation_str;
uint8_t device_type;
uint8_t device_type_instance;
uint16_t segment_group_number;
uint8_t bus_number;
uint8_t device_number;
} QEMU_PACKED;
/* SMBIOS type 127 -- End-of-table */
struct smbios_type_127 {
struct smbios_structure_header header;
@ -273,5 +284,6 @@ void smbios_get_tables(MachineState *ms,
const struct smbios_phys_mem_area *mem_array,
const unsigned int mem_array_size,
uint8_t **tables, size_t *tables_len,
uint8_t **anchor, size_t *anchor_len);
uint8_t **anchor, size_t *anchor_len,
Error **errp);
#endif /* QEMU_SMBIOS_H */

View File

@ -56,9 +56,6 @@ struct PCDIMMDevice {
* PCDIMMDeviceClass:
* @realize: called after common dimm is realized so that the dimm based
* devices get the chance to do specified operations.
* @get_vmstate_memory_region: returns #MemoryRegion which indicates the
* memory of @dimm should be kept during live migration. Will not fail
* after the device was realized.
*/
struct PCDIMMDeviceClass {
/* private */
@ -66,8 +63,6 @@ struct PCDIMMDeviceClass {
/* public */
void (*realize)(PCDIMMDevice *dimm, Error **errp);
MemoryRegion *(*get_vmstate_memory_region)(PCDIMMDevice *dimm,
Error **errp);
};
void pc_dimm_pre_plug(PCDIMMDevice *dimm, MachineState *machine,

View File

@ -22,6 +22,4 @@ typedef struct vhost_vdpa {
} VhostVDPA;
extern AddressSpace address_space_memory;
extern int vhost_vdpa_get_device_id(struct vhost_dev *dev,
uint32_t *device_id);
#endif

View File

@ -49,12 +49,17 @@ typedef struct VirtIOMMIOQueue {
uint32_t used[2];
} VirtIOMMIOQueue;
#define VIRTIO_IOMMIO_FLAG_USE_IOEVENTFD_BIT 1
#define VIRTIO_IOMMIO_FLAG_USE_IOEVENTFD \
(1 << VIRTIO_IOMMIO_FLAG_USE_IOEVENTFD_BIT)
struct VirtIOMMIOProxy {
/* Generic */
SysBusDevice parent_obj;
MemoryRegion iomem;
qemu_irq irq;
bool legacy;
uint32_t flags;
/* Guest accessible state needing migration and reset */
uint32_t host_features_sel;
uint32_t guest_features_sel;

View File

@ -43,7 +43,7 @@ typedef struct VirtIOFeature {
size_t end;
} VirtIOFeature;
size_t virtio_feature_get_config_size(VirtIOFeature *features,
size_t virtio_feature_get_config_size(const VirtIOFeature *features,
uint64_t host_features);
typedef struct VirtQueue VirtQueue;

View File

@ -2370,7 +2370,9 @@ DEF("smbios", HAS_ARG, QEMU_OPTION_smbios,
" specify SMBIOS type 11 fields\n"
"-smbios type=17[,loc_pfx=str][,bank=str][,manufacturer=str][,serial=str]\n"
" [,asset=str][,part=str][,speed=%d]\n"
" specify SMBIOS type 17 fields\n",
" specify SMBIOS type 17 fields\n"
"-smbios type=41[,designation=str][,kind=str][,instance=%d][,pcidev=str]\n"
" specify SMBIOS type 41 fields\n",
QEMU_ARCH_I386 | QEMU_ARCH_ARM)
SRST
``-smbios file=binary``
@ -2432,6 +2434,32 @@ SRST
``-smbios type=17[,loc_pfx=str][,bank=str][,manufacturer=str][,serial=str][,asset=str][,part=str][,speed=%d]``
Specify SMBIOS type 17 fields
``-smbios type=41[,designation=str][,kind=str][,instance=%d][,pcidev=str]``
Specify SMBIOS type 41 fields
This argument can be repeated multiple times. Its main use is to allow network interfaces be created
as ``enoX`` on Linux, with X being the instance number, instead of the name depending on the interface
position on the PCI bus.
Here is an example of use:
.. parsed-literal::
-netdev user,id=internet \\
-device virtio-net-pci,mac=50:54:00:00:00:42,netdev=internet,id=internet-dev \\
-smbios type=41,designation='Onboard LAN',instance=1,kind=ethernet,pcidev=internet-dev
In the guest OS, the device should then appear as ``eno1``:
..parsed-literal::
$ ip -brief l
lo UNKNOWN 00:00:00:00:00:00 <LOOPBACK,UP,LOWER_UP>
eno1 UP 50:54:00:00:00:42 <BROADCAST,MULTICAST,UP,LOWER_UP>
Currently, the PCI device has to be attached to the root bus.
ERST
DEFHEADING()

View File

@ -1532,6 +1532,7 @@ sub process {
($line =~ /\{\s*([\w\/\.\-]*)\s*\=\>\s*([\w\/\.\-]*)\s*\}/ &&
(defined($1) || defined($2)))) &&
!(($realfile ne '') &&
defined($acpi_testexpected) &&
($realfile eq $acpi_testexpected))) {
$reported_maintainer_file = 1;
WARN("added, moved or deleted file(s), does MAINTAINERS need updating?\n" . $herecurr);