2009-05-18 17:51:59 +04:00
|
|
|
/*
|
|
|
|
* Virtio PCI Bindings
|
|
|
|
*
|
|
|
|
* Copyright IBM, Corp. 2007
|
|
|
|
* Copyright (c) 2009 CodeSourcery
|
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Anthony Liguori <aliguori@us.ibm.com>
|
|
|
|
* Paul Brook <paul@codesourcery.com>
|
|
|
|
*
|
|
|
|
* This work is licensed under the terms of the GNU GPL, version 2. See
|
|
|
|
* the COPYING file in the top-level directory.
|
|
|
|
*
|
2012-01-13 20:44:23 +04:00
|
|
|
* Contributions after 2012-01-13 are licensed under the terms of the
|
|
|
|
* GNU GPL, version 2 or (at your option) any later version.
|
2009-05-18 17:51:59 +04:00
|
|
|
*/
|
|
|
|
|
2016-01-26 21:17:07 +03:00
|
|
|
#include "qemu/osdep.h"
|
2009-05-18 17:51:59 +04:00
|
|
|
|
2019-08-23 21:36:44 +03:00
|
|
|
#include "exec/memop.h"
|
2015-02-17 00:36:43 +03:00
|
|
|
#include "standard-headers/linux/virtio_pci.h"
|
2022-12-15 16:49:40 +03:00
|
|
|
#include "standard-headers/linux/virtio_ids.h"
|
2020-08-18 17:33:44 +03:00
|
|
|
#include "hw/boards.h"
|
2013-02-05 20:06:20 +04:00
|
|
|
#include "hw/virtio/virtio.h"
|
2019-08-12 08:23:39 +03:00
|
|
|
#include "migration/qemu-file-types.h"
|
2013-02-04 18:40:22 +04:00
|
|
|
#include "hw/pci/pci.h"
|
2019-04-24 07:19:58 +03:00
|
|
|
#include "hw/pci/pci_bus.h"
|
2019-08-12 08:23:51 +03:00
|
|
|
#include "hw/qdev-properties.h"
|
include/qemu/osdep.h: Don't include qapi/error.h
Commit 57cb38b included qapi/error.h into qemu/osdep.h to get the
Error typedef. Since then, we've moved to include qemu/osdep.h
everywhere. Its file comment explains: "To avoid getting into
possible circular include dependencies, this file should not include
any other QEMU headers, with the exceptions of config-host.h,
compiler.h, os-posix.h and os-win32.h, all of which are doing a
similar job to this file and are under similar constraints."
qapi/error.h doesn't do a similar job, and it doesn't adhere to
similar constraints: it includes qapi-types.h. That's in excess of
100KiB of crap most .c files don't actually need.
Add the typedef to qemu/typedefs.h, and include that instead of
qapi/error.h. Include qapi/error.h in .c files that need it and don't
get it now. Include qapi-types.h in qom/object.h for uint16List.
Update scripts/clean-includes accordingly. Update it further to match
reality: replace config.h by config-target.h, add sysemu/os-posix.h,
sysemu/os-win32.h. Update the list of includes in the qemu/osdep.h
comment quoted above similarly.
This reduces the number of objects depending on qapi/error.h from "all
of them" to less than a third. Unfortunately, the number depending on
qapi-types.h shrinks only a little. More work is needed for that one.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
[Fix compilation without the spice devel packages. - Paolo]
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2016-03-14 11:01:28 +03:00
|
|
|
#include "qapi/error.h"
|
2012-12-17 21:20:00 +04:00
|
|
|
#include "qemu/error-report.h"
|
2020-12-10 20:28:34 +03:00
|
|
|
#include "qemu/log.h"
|
2019-05-23 17:35:07 +03:00
|
|
|
#include "qemu/module.h"
|
2013-02-04 18:40:22 +04:00
|
|
|
#include "hw/pci/msi.h"
|
|
|
|
#include "hw/pci/msix.h"
|
|
|
|
#include "hw/loader.h"
|
2012-12-17 21:20:04 +04:00
|
|
|
#include "sysemu/kvm.h"
|
2022-03-21 18:30:25 +03:00
|
|
|
#include "hw/virtio/virtio-pci.h"
|
2012-12-17 21:20:00 +04:00
|
|
|
#include "qemu/range.h"
|
2013-02-05 20:06:20 +04:00
|
|
|
#include "hw/virtio/virtio-bus.h"
|
2013-04-14 16:07:00 +04:00
|
|
|
#include "qapi/visitor.h"
|
2021-05-17 16:06:28 +03:00
|
|
|
#include "sysemu/replay.h"
|
2022-03-21 18:30:26 +03:00
|
|
|
#include "trace.h"
|
2009-05-18 17:51:59 +04:00
|
|
|
|
2015-02-17 00:36:43 +03:00
|
|
|
#define VIRTIO_PCI_REGION_SIZE(dev) VIRTIO_PCI_CONFIG_OFF(msix_present(dev))
|
2009-06-21 20:50:26 +04:00
|
|
|
|
2015-06-04 13:34:21 +03:00
|
|
|
#undef VIRTIO_PCI_CONFIG
|
|
|
|
|
2009-06-21 20:50:26 +04:00
|
|
|
/* The remaining space is defined by each driver as the per-driver
|
|
|
|
* configuration space */
|
2015-02-17 00:36:43 +03:00
|
|
|
#define VIRTIO_PCI_CONFIG_SIZE(dev) VIRTIO_PCI_CONFIG_OFF(msix_enabled(dev))
|
2009-05-18 17:51:59 +04:00
|
|
|
|
2013-08-23 22:35:18 +04:00
|
|
|
static void virtio_pci_bus_new(VirtioBusState *bus, size_t bus_size,
|
|
|
|
VirtIOPCIProxy *dev);
|
2016-01-28 18:08:07 +03:00
|
|
|
static void virtio_pci_reset(DeviceState *qdev);
|
2013-04-24 12:21:18 +04:00
|
|
|
|
2009-05-18 17:51:59 +04:00
|
|
|
/* virtio device */
|
2012-12-17 15:01:07 +04:00
|
|
|
/* DeviceState to VirtIOPCIProxy. For use off data-path. TODO: use QOM. */
|
|
|
|
static inline VirtIOPCIProxy *to_virtio_pci_proxy(DeviceState *d)
|
|
|
|
{
|
|
|
|
return container_of(d, VirtIOPCIProxy, pci_dev.qdev);
|
|
|
|
}
|
2009-05-18 17:51:59 +04:00
|
|
|
|
2012-12-17 15:01:07 +04:00
|
|
|
/* DeviceState to VirtIOPCIProxy. Note: used on datapath,
|
|
|
|
* be careful and test performance if you change this.
|
|
|
|
*/
|
|
|
|
static inline VirtIOPCIProxy *to_virtio_pci_proxy_fast(DeviceState *d)
|
2009-05-18 17:51:59 +04:00
|
|
|
{
|
2012-12-17 15:01:07 +04:00
|
|
|
return container_of(d, VirtIOPCIProxy, pci_dev.qdev);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void virtio_pci_notify(DeviceState *d, uint16_t vector)
|
|
|
|
{
|
|
|
|
VirtIOPCIProxy *proxy = to_virtio_pci_proxy_fast(d);
|
2013-09-20 15:36:40 +04:00
|
|
|
|
2022-08-29 11:35:24 +03:00
|
|
|
if (msix_enabled(&proxy->pci_dev)) {
|
|
|
|
if (vector != VIRTIO_NO_VECTOR) {
|
|
|
|
msix_notify(&proxy->pci_dev, vector);
|
|
|
|
}
|
|
|
|
} else {
|
2013-09-20 15:36:40 +04:00
|
|
|
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
|
2020-09-23 13:56:46 +03:00
|
|
|
pci_set_irq(&proxy->pci_dev, qatomic_read(&vdev->isr) & 1);
|
2013-09-20 15:36:40 +04:00
|
|
|
}
|
2009-05-18 17:51:59 +04:00
|
|
|
}
|
|
|
|
|
2012-12-17 15:01:07 +04:00
|
|
|
static void virtio_pci_save_config(DeviceState *d, QEMUFile *f)
|
2009-06-21 20:50:40 +04:00
|
|
|
{
|
2012-12-17 15:01:07 +04:00
|
|
|
VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
|
2013-09-20 15:36:40 +04:00
|
|
|
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
|
|
|
|
|
2009-06-21 20:50:40 +04:00
|
|
|
pci_device_save(&proxy->pci_dev, f);
|
|
|
|
msix_save(&proxy->pci_dev, f);
|
|
|
|
if (msix_present(&proxy->pci_dev))
|
2013-09-20 15:36:40 +04:00
|
|
|
qemu_put_be16(f, vdev->config_vector);
|
2009-06-21 20:50:40 +04:00
|
|
|
}
|
|
|
|
|
2017-09-25 19:05:17 +03:00
|
|
|
static const VMStateDescription vmstate_virtio_pci_modern_queue_state = {
|
|
|
|
.name = "virtio_pci/modern_queue_state",
|
|
|
|
.version_id = 1,
|
|
|
|
.minimum_version_id = 1,
|
2023-12-21 06:16:41 +03:00
|
|
|
.fields = (const VMStateField[]) {
|
2017-09-25 19:05:17 +03:00
|
|
|
VMSTATE_UINT16(num, VirtIOPCIQueue),
|
|
|
|
VMSTATE_UNUSED(1), /* enabled was stored as be16 */
|
|
|
|
VMSTATE_BOOL(enabled, VirtIOPCIQueue),
|
|
|
|
VMSTATE_UINT32_ARRAY(desc, VirtIOPCIQueue, 2),
|
|
|
|
VMSTATE_UINT32_ARRAY(avail, VirtIOPCIQueue, 2),
|
|
|
|
VMSTATE_UINT32_ARRAY(used, VirtIOPCIQueue, 2),
|
|
|
|
VMSTATE_END_OF_LIST()
|
2015-11-06 11:02:44 +03:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
static bool virtio_pci_modern_state_needed(void *opaque)
|
|
|
|
{
|
|
|
|
VirtIOPCIProxy *proxy = opaque;
|
|
|
|
|
2016-07-20 18:28:21 +03:00
|
|
|
return virtio_pci_modern(proxy);
|
2015-11-06 11:02:44 +03:00
|
|
|
}
|
|
|
|
|
2017-09-25 19:05:17 +03:00
|
|
|
static const VMStateDescription vmstate_virtio_pci_modern_state_sub = {
|
2015-11-06 11:02:44 +03:00
|
|
|
.name = "virtio_pci/modern_state",
|
|
|
|
.version_id = 1,
|
|
|
|
.minimum_version_id = 1,
|
|
|
|
.needed = &virtio_pci_modern_state_needed,
|
2023-12-21 06:16:41 +03:00
|
|
|
.fields = (const VMStateField[]) {
|
2017-09-25 19:05:17 +03:00
|
|
|
VMSTATE_UINT32(dfselect, VirtIOPCIProxy),
|
|
|
|
VMSTATE_UINT32(gfselect, VirtIOPCIProxy),
|
|
|
|
VMSTATE_UINT32_ARRAY(guest_features, VirtIOPCIProxy, 2),
|
|
|
|
VMSTATE_STRUCT_ARRAY(vqs, VirtIOPCIProxy, VIRTIO_QUEUE_MAX, 0,
|
|
|
|
vmstate_virtio_pci_modern_queue_state,
|
|
|
|
VirtIOPCIQueue),
|
2015-11-06 11:02:44 +03:00
|
|
|
VMSTATE_END_OF_LIST()
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
static const VMStateDescription vmstate_virtio_pci = {
|
|
|
|
.name = "virtio_pci",
|
|
|
|
.version_id = 1,
|
|
|
|
.minimum_version_id = 1,
|
2023-12-21 06:16:41 +03:00
|
|
|
.fields = (const VMStateField[]) {
|
2015-11-06 11:02:44 +03:00
|
|
|
VMSTATE_END_OF_LIST()
|
|
|
|
},
|
2023-12-21 06:16:41 +03:00
|
|
|
.subsections = (const VMStateDescription * const []) {
|
2017-09-25 19:05:17 +03:00
|
|
|
&vmstate_virtio_pci_modern_state_sub,
|
2015-11-06 11:02:44 +03:00
|
|
|
NULL
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2017-09-25 19:05:17 +03:00
|
|
|
static bool virtio_pci_has_extra_state(DeviceState *d)
|
|
|
|
{
|
|
|
|
VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
|
|
|
|
|
|
|
|
return proxy->flags & VIRTIO_PCI_FLAG_MIGRATE_EXTRA;
|
|
|
|
}
|
|
|
|
|
2015-11-06 11:02:44 +03:00
|
|
|
static void virtio_pci_save_extra_state(DeviceState *d, QEMUFile *f)
|
|
|
|
{
|
|
|
|
VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
|
|
|
|
|
|
|
|
vmstate_save_state(f, &vmstate_virtio_pci, proxy, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int virtio_pci_load_extra_state(DeviceState *d, QEMUFile *f)
|
|
|
|
{
|
|
|
|
VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
|
|
|
|
|
|
|
|
return vmstate_load_state(f, &vmstate_virtio_pci, proxy, 1);
|
|
|
|
}
|
|
|
|
|
2012-12-17 15:01:07 +04:00
|
|
|
static void virtio_pci_save_queue(DeviceState *d, int n, QEMUFile *f)
|
2009-06-21 20:50:40 +04:00
|
|
|
{
|
2012-12-17 15:01:07 +04:00
|
|
|
VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
|
2013-09-20 15:36:40 +04:00
|
|
|
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
|
|
|
|
|
2009-06-21 20:50:40 +04:00
|
|
|
if (msix_present(&proxy->pci_dev))
|
2013-09-20 15:36:40 +04:00
|
|
|
qemu_put_be16(f, virtio_queue_vector(vdev, n));
|
2009-06-21 20:50:40 +04:00
|
|
|
}
|
|
|
|
|
2012-12-17 15:01:07 +04:00
|
|
|
static int virtio_pci_load_config(DeviceState *d, QEMUFile *f)
|
2009-06-21 20:50:40 +04:00
|
|
|
{
|
2012-12-17 15:01:07 +04:00
|
|
|
VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
|
2013-09-20 15:36:40 +04:00
|
|
|
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
|
2022-08-29 11:35:24 +03:00
|
|
|
uint16_t vector;
|
2013-09-20 15:36:40 +04:00
|
|
|
|
2009-06-21 20:50:40 +04:00
|
|
|
int ret;
|
|
|
|
ret = pci_device_load(&proxy->pci_dev, f);
|
2009-07-05 17:02:34 +04:00
|
|
|
if (ret) {
|
2009-06-21 20:50:40 +04:00
|
|
|
return ret;
|
2009-07-05 17:02:34 +04:00
|
|
|
}
|
2012-08-29 20:40:56 +04:00
|
|
|
msix_unuse_all_vectors(&proxy->pci_dev);
|
2009-06-21 20:50:40 +04:00
|
|
|
msix_load(&proxy->pci_dev, f);
|
2009-07-05 17:02:34 +04:00
|
|
|
if (msix_present(&proxy->pci_dev)) {
|
2022-08-29 11:35:24 +03:00
|
|
|
qemu_get_be16s(f, &vector);
|
|
|
|
|
|
|
|
if (vector != VIRTIO_NO_VECTOR && vector >= proxy->nvectors) {
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2009-07-05 17:02:34 +04:00
|
|
|
} else {
|
2022-08-29 11:35:24 +03:00
|
|
|
vector = VIRTIO_NO_VECTOR;
|
2009-07-05 17:02:34 +04:00
|
|
|
}
|
2022-08-29 11:35:24 +03:00
|
|
|
vdev->config_vector = vector;
|
|
|
|
if (vector != VIRTIO_NO_VECTOR) {
|
|
|
|
msix_vector_use(&proxy->pci_dev, vector);
|
2009-07-05 17:02:34 +04:00
|
|
|
}
|
2009-06-21 20:50:40 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-12-17 15:01:07 +04:00
|
|
|
static int virtio_pci_load_queue(DeviceState *d, int n, QEMUFile *f)
|
2009-06-21 20:50:40 +04:00
|
|
|
{
|
2012-12-17 15:01:07 +04:00
|
|
|
VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
|
2013-09-20 15:36:40 +04:00
|
|
|
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
|
|
|
|
|
2009-06-21 20:50:40 +04:00
|
|
|
uint16_t vector;
|
2009-07-05 17:02:34 +04:00
|
|
|
if (msix_present(&proxy->pci_dev)) {
|
|
|
|
qemu_get_be16s(f, &vector);
|
2022-08-29 11:35:24 +03:00
|
|
|
if (vector != VIRTIO_NO_VECTOR && vector >= proxy->nvectors) {
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2009-07-05 17:02:34 +04:00
|
|
|
} else {
|
|
|
|
vector = VIRTIO_NO_VECTOR;
|
|
|
|
}
|
2013-09-20 15:36:40 +04:00
|
|
|
virtio_queue_set_vector(vdev, n, vector);
|
2009-07-05 17:02:34 +04:00
|
|
|
if (vector != VIRTIO_NO_VECTOR) {
|
2022-08-29 11:35:24 +03:00
|
|
|
msix_vector_use(&proxy->pci_dev, vector);
|
2009-07-05 17:02:34 +04:00
|
|
|
}
|
2015-11-06 11:02:44 +03:00
|
|
|
|
2009-06-21 20:50:40 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-12-15 16:49:40 +03:00
|
|
|
typedef struct VirtIOPCIIDInfo {
|
|
|
|
/* virtio id */
|
|
|
|
uint16_t vdev_id;
|
|
|
|
/* pci device id for the transitional device */
|
|
|
|
uint16_t trans_devid;
|
|
|
|
uint16_t class_id;
|
|
|
|
} VirtIOPCIIDInfo;
|
|
|
|
|
|
|
|
static const VirtIOPCIIDInfo virtio_pci_id_info[] = {
|
|
|
|
{
|
|
|
|
.vdev_id = VIRTIO_ID_CRYPTO,
|
|
|
|
.class_id = PCI_CLASS_OTHERS,
|
|
|
|
}, {
|
|
|
|
.vdev_id = VIRTIO_ID_FS,
|
|
|
|
.class_id = PCI_CLASS_STORAGE_OTHER,
|
|
|
|
}, {
|
|
|
|
.vdev_id = VIRTIO_ID_NET,
|
|
|
|
.trans_devid = PCI_DEVICE_ID_VIRTIO_NET,
|
|
|
|
.class_id = PCI_CLASS_NETWORK_ETHERNET,
|
|
|
|
}, {
|
|
|
|
.vdev_id = VIRTIO_ID_BLOCK,
|
|
|
|
.trans_devid = PCI_DEVICE_ID_VIRTIO_BLOCK,
|
|
|
|
.class_id = PCI_CLASS_STORAGE_SCSI,
|
|
|
|
}, {
|
|
|
|
.vdev_id = VIRTIO_ID_CONSOLE,
|
|
|
|
.trans_devid = PCI_DEVICE_ID_VIRTIO_CONSOLE,
|
|
|
|
.class_id = PCI_CLASS_COMMUNICATION_OTHER,
|
|
|
|
}, {
|
|
|
|
.vdev_id = VIRTIO_ID_SCSI,
|
|
|
|
.trans_devid = PCI_DEVICE_ID_VIRTIO_SCSI,
|
|
|
|
.class_id = PCI_CLASS_STORAGE_SCSI
|
|
|
|
}, {
|
|
|
|
.vdev_id = VIRTIO_ID_9P,
|
|
|
|
.trans_devid = PCI_DEVICE_ID_VIRTIO_9P,
|
|
|
|
.class_id = PCI_BASE_CLASS_NETWORK,
|
|
|
|
}, {
|
|
|
|
.vdev_id = VIRTIO_ID_BALLOON,
|
|
|
|
.trans_devid = PCI_DEVICE_ID_VIRTIO_BALLOON,
|
|
|
|
.class_id = PCI_CLASS_OTHERS,
|
|
|
|
}, {
|
|
|
|
.vdev_id = VIRTIO_ID_RNG,
|
|
|
|
.trans_devid = PCI_DEVICE_ID_VIRTIO_RNG,
|
|
|
|
.class_id = PCI_CLASS_OTHERS,
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
static const VirtIOPCIIDInfo *virtio_pci_get_id_info(uint16_t vdev_id)
|
|
|
|
{
|
|
|
|
const VirtIOPCIIDInfo *info = NULL;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(virtio_pci_id_info); i++) {
|
|
|
|
if (virtio_pci_id_info[i].vdev_id == vdev_id) {
|
|
|
|
info = &virtio_pci_id_info[i];
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!info) {
|
|
|
|
/* The device id is invalid or not added to the id_info yet. */
|
|
|
|
error_report("Invalid virtio device(id %u)", vdev_id);
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
|
|
|
|
return info;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Get the Transitional Device ID for the specific device, return
|
|
|
|
* zero if the device is non-transitional.
|
|
|
|
*/
|
|
|
|
uint16_t virtio_pci_get_trans_devid(uint16_t device_id)
|
|
|
|
{
|
|
|
|
return virtio_pci_get_id_info(device_id)->trans_devid;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Get the Class ID for the specific device.
|
|
|
|
*/
|
|
|
|
uint16_t virtio_pci_get_class_id(uint16_t device_id)
|
|
|
|
{
|
|
|
|
return virtio_pci_get_id_info(device_id)->class_id;
|
|
|
|
}
|
|
|
|
|
2016-10-21 23:48:08 +03:00
|
|
|
static bool virtio_pci_ioeventfd_enabled(DeviceState *d)
|
2016-06-10 12:04:12 +03:00
|
|
|
{
|
|
|
|
VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
|
|
|
|
|
2016-10-21 23:48:08 +03:00
|
|
|
return (proxy->flags & VIRTIO_PCI_FLAG_USE_IOEVENTFD) != 0;
|
2016-06-10 12:04:12 +03:00
|
|
|
}
|
|
|
|
|
2015-06-04 13:34:30 +03:00
|
|
|
#define QEMU_VIRTIO_PCI_QUEUE_MEM_MULT 0x1000
|
|
|
|
|
2016-09-07 18:02:25 +03:00
|
|
|
static inline int virtio_pci_queue_mem_mult(struct VirtIOPCIProxy *proxy)
|
|
|
|
{
|
|
|
|
return (proxy->flags & VIRTIO_PCI_FLAG_PAGE_PER_VQ) ?
|
|
|
|
QEMU_VIRTIO_PCI_QUEUE_MEM_MULT : 4;
|
|
|
|
}
|
|
|
|
|
2016-06-10 12:04:12 +03:00
|
|
|
static int virtio_pci_ioeventfd_assign(DeviceState *d, EventNotifier *notifier,
|
|
|
|
int n, bool assign)
|
2010-12-17 15:01:50 +03:00
|
|
|
{
|
2016-06-10 12:04:12 +03:00
|
|
|
VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
|
2013-09-20 15:36:40 +04:00
|
|
|
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
|
|
|
|
VirtQueue *vq = virtio_get_queue(vdev, n);
|
2016-07-20 18:28:21 +03:00
|
|
|
bool legacy = virtio_pci_legacy(proxy);
|
|
|
|
bool modern = virtio_pci_modern(proxy);
|
2015-11-06 11:02:48 +03:00
|
|
|
bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY;
|
2015-06-04 13:34:33 +03:00
|
|
|
MemoryRegion *modern_mr = &proxy->notify.mr;
|
2015-11-06 11:02:48 +03:00
|
|
|
MemoryRegion *modern_notify_mr = &proxy->notify_pio.mr;
|
2015-06-04 13:34:30 +03:00
|
|
|
MemoryRegion *legacy_mr = &proxy->bar;
|
2016-09-07 18:02:25 +03:00
|
|
|
hwaddr modern_addr = virtio_pci_queue_mem_mult(proxy) *
|
2015-06-04 13:34:30 +03:00
|
|
|
virtio_get_queue_index(vq);
|
|
|
|
hwaddr legacy_addr = VIRTIO_PCI_QUEUE_NOTIFY;
|
2011-08-08 17:09:13 +04:00
|
|
|
|
2010-12-17 15:01:50 +03:00
|
|
|
if (assign) {
|
2015-06-04 13:34:30 +03:00
|
|
|
if (modern) {
|
2023-10-17 15:13:04 +03:00
|
|
|
memory_region_add_eventfd(modern_mr, modern_addr, 0,
|
|
|
|
false, n, notifier);
|
2015-11-06 11:02:48 +03:00
|
|
|
if (modern_pio) {
|
|
|
|
memory_region_add_eventfd(modern_notify_mr, 0, 2,
|
|
|
|
true, n, notifier);
|
|
|
|
}
|
2015-06-04 13:34:30 +03:00
|
|
|
}
|
|
|
|
if (legacy) {
|
|
|
|
memory_region_add_eventfd(legacy_mr, legacy_addr, 2,
|
|
|
|
true, n, notifier);
|
|
|
|
}
|
2010-12-17 15:01:50 +03:00
|
|
|
} else {
|
2015-06-04 13:34:30 +03:00
|
|
|
if (modern) {
|
2023-10-17 15:13:04 +03:00
|
|
|
memory_region_del_eventfd(modern_mr, modern_addr, 0,
|
|
|
|
false, n, notifier);
|
2015-11-06 11:02:48 +03:00
|
|
|
if (modern_pio) {
|
|
|
|
memory_region_del_eventfd(modern_notify_mr, 0, 2,
|
|
|
|
true, n, notifier);
|
|
|
|
}
|
2015-06-04 13:34:30 +03:00
|
|
|
}
|
|
|
|
if (legacy) {
|
|
|
|
memory_region_del_eventfd(legacy_mr, legacy_addr, 2,
|
|
|
|
true, n, notifier);
|
|
|
|
}
|
2010-12-17 15:01:50 +03:00
|
|
|
}
|
2016-06-10 12:04:12 +03:00
|
|
|
return 0;
|
2010-12-17 15:01:50 +03:00
|
|
|
}
|
|
|
|
|
2011-01-11 15:10:15 +03:00
|
|
|
static void virtio_pci_start_ioeventfd(VirtIOPCIProxy *proxy)
|
2010-12-17 15:01:50 +03:00
|
|
|
{
|
2016-06-10 12:04:12 +03:00
|
|
|
virtio_bus_start_ioeventfd(&proxy->bus);
|
2010-12-17 15:01:50 +03:00
|
|
|
}
|
|
|
|
|
2011-01-11 15:10:15 +03:00
|
|
|
static void virtio_pci_stop_ioeventfd(VirtIOPCIProxy *proxy)
|
2010-12-17 15:01:50 +03:00
|
|
|
{
|
2016-06-10 12:04:12 +03:00
|
|
|
virtio_bus_stop_ioeventfd(&proxy->bus);
|
2010-12-17 15:01:50 +03:00
|
|
|
}
|
|
|
|
|
2009-05-18 17:51:59 +04:00
|
|
|
static void virtio_ioport_write(void *opaque, uint32_t addr, uint32_t val)
|
|
|
|
{
|
|
|
|
VirtIOPCIProxy *proxy = opaque;
|
2013-09-20 15:36:40 +04:00
|
|
|
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
|
2022-08-29 11:35:24 +03:00
|
|
|
uint16_t vector;
|
2012-10-23 14:30:10 +04:00
|
|
|
hwaddr pa;
|
2009-05-18 17:51:59 +04:00
|
|
|
|
|
|
|
switch (addr) {
|
|
|
|
case VIRTIO_PCI_GUEST_FEATURES:
|
2013-04-24 12:21:20 +04:00
|
|
|
/* Guest does not negotiate properly? We have to assume nothing. */
|
|
|
|
if (val & (1 << VIRTIO_F_BAD_FEATURE)) {
|
|
|
|
val = virtio_bus_get_vdev_bad_features(&proxy->bus);
|
|
|
|
}
|
2011-11-24 16:28:52 +04:00
|
|
|
virtio_set_features(vdev, val);
|
2009-05-18 17:51:59 +04:00
|
|
|
break;
|
|
|
|
case VIRTIO_PCI_QUEUE_PFN:
|
2012-10-23 14:30:10 +04:00
|
|
|
pa = (hwaddr)val << VIRTIO_PCI_QUEUE_ADDR_SHIFT;
|
2009-11-24 17:45:35 +03:00
|
|
|
if (pa == 0) {
|
2016-01-28 18:08:07 +03:00
|
|
|
virtio_pci_reset(DEVICE(proxy));
|
2009-11-24 17:45:35 +03:00
|
|
|
}
|
2009-06-21 20:50:13 +04:00
|
|
|
else
|
|
|
|
virtio_queue_set_addr(vdev, vdev->queue_sel, pa);
|
2009-05-18 17:51:59 +04:00
|
|
|
break;
|
|
|
|
case VIRTIO_PCI_QUEUE_SEL:
|
2015-05-29 09:15:31 +03:00
|
|
|
if (val < VIRTIO_QUEUE_MAX)
|
2009-05-18 17:51:59 +04:00
|
|
|
vdev->queue_sel = val;
|
|
|
|
break;
|
|
|
|
case VIRTIO_PCI_QUEUE_NOTIFY:
|
2015-05-29 09:15:31 +03:00
|
|
|
if (val < VIRTIO_QUEUE_MAX) {
|
2011-05-09 01:29:07 +04:00
|
|
|
virtio_queue_notify(vdev, val);
|
|
|
|
}
|
2009-05-18 17:51:59 +04:00
|
|
|
break;
|
|
|
|
case VIRTIO_PCI_STATUS:
|
2010-12-17 15:01:50 +03:00
|
|
|
if (!(val & VIRTIO_CONFIG_S_DRIVER_OK)) {
|
|
|
|
virtio_pci_stop_ioeventfd(proxy);
|
|
|
|
}
|
|
|
|
|
2010-03-17 14:08:05 +03:00
|
|
|
virtio_set_status(vdev, val & 0xFF);
|
2010-12-17 15:01:50 +03:00
|
|
|
|
|
|
|
if (val & VIRTIO_CONFIG_S_DRIVER_OK) {
|
|
|
|
virtio_pci_start_ioeventfd(proxy);
|
|
|
|
}
|
|
|
|
|
2009-11-24 17:45:35 +03:00
|
|
|
if (vdev->status == 0) {
|
2016-01-28 18:08:07 +03:00
|
|
|
virtio_pci_reset(DEVICE(proxy));
|
2009-11-24 17:45:35 +03:00
|
|
|
}
|
2010-03-16 21:18:07 +03:00
|
|
|
|
2014-09-11 20:45:33 +04:00
|
|
|
/* Linux before 2.6.34 drives the device without enabling
|
|
|
|
the PCI device bus master bit. Enable it automatically
|
|
|
|
for the guest. This is a PCI spec violation but so is
|
|
|
|
initiating DMA with bus master bit clear. */
|
|
|
|
if (val == (VIRTIO_CONFIG_S_ACKNOWLEDGE | VIRTIO_CONFIG_S_DRIVER)) {
|
|
|
|
pci_default_write_config(&proxy->pci_dev, PCI_COMMAND,
|
|
|
|
proxy->pci_dev.config[PCI_COMMAND] |
|
|
|
|
PCI_COMMAND_MASTER, 1);
|
|
|
|
}
|
2009-05-18 17:51:59 +04:00
|
|
|
break;
|
2009-06-21 20:50:26 +04:00
|
|
|
case VIRTIO_MSI_CONFIG_VECTOR:
|
2022-08-29 11:35:24 +03:00
|
|
|
if (vdev->config_vector != VIRTIO_NO_VECTOR) {
|
|
|
|
msix_vector_unuse(&proxy->pci_dev, vdev->config_vector);
|
|
|
|
}
|
2009-06-21 20:50:26 +04:00
|
|
|
/* Make it possible for guest to discover an error took place. */
|
2022-08-29 11:35:24 +03:00
|
|
|
if (val < proxy->nvectors) {
|
|
|
|
msix_vector_use(&proxy->pci_dev, val);
|
|
|
|
} else {
|
2009-06-21 20:50:26 +04:00
|
|
|
val = VIRTIO_NO_VECTOR;
|
2022-08-29 11:35:24 +03:00
|
|
|
}
|
2009-06-21 20:50:26 +04:00
|
|
|
vdev->config_vector = val;
|
|
|
|
break;
|
|
|
|
case VIRTIO_MSI_QUEUE_VECTOR:
|
2022-08-29 11:35:24 +03:00
|
|
|
vector = virtio_queue_vector(vdev, vdev->queue_sel);
|
|
|
|
if (vector != VIRTIO_NO_VECTOR) {
|
|
|
|
msix_vector_unuse(&proxy->pci_dev, vector);
|
|
|
|
}
|
2009-06-21 20:50:26 +04:00
|
|
|
/* Make it possible for guest to discover an error took place. */
|
2022-08-29 11:35:24 +03:00
|
|
|
if (val < proxy->nvectors) {
|
|
|
|
msix_vector_use(&proxy->pci_dev, val);
|
|
|
|
} else {
|
2009-06-21 20:50:26 +04:00
|
|
|
val = VIRTIO_NO_VECTOR;
|
2022-08-29 11:35:24 +03:00
|
|
|
}
|
2009-06-21 20:50:26 +04:00
|
|
|
virtio_queue_set_vector(vdev, vdev->queue_sel, val);
|
|
|
|
break;
|
|
|
|
default:
|
2020-12-10 20:28:34 +03:00
|
|
|
qemu_log_mask(LOG_GUEST_ERROR,
|
|
|
|
"%s: unexpected address 0x%x value 0x%x\n",
|
|
|
|
__func__, addr, val);
|
2009-06-21 20:50:26 +04:00
|
|
|
break;
|
2009-05-18 17:51:59 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-06-21 20:50:26 +04:00
|
|
|
static uint32_t virtio_ioport_read(VirtIOPCIProxy *proxy, uint32_t addr)
|
2009-05-18 17:51:59 +04:00
|
|
|
{
|
2013-09-20 15:36:40 +04:00
|
|
|
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
|
2009-05-18 17:51:59 +04:00
|
|
|
uint32_t ret = 0xFFFFFFFF;
|
|
|
|
|
|
|
|
switch (addr) {
|
|
|
|
case VIRTIO_PCI_HOST_FEATURES:
|
2015-05-26 17:34:47 +03:00
|
|
|
ret = vdev->host_features;
|
2009-05-18 17:51:59 +04:00
|
|
|
break;
|
|
|
|
case VIRTIO_PCI_GUEST_FEATURES:
|
2010-01-10 14:52:47 +03:00
|
|
|
ret = vdev->guest_features;
|
2009-05-18 17:51:59 +04:00
|
|
|
break;
|
|
|
|
case VIRTIO_PCI_QUEUE_PFN:
|
|
|
|
ret = virtio_queue_get_addr(vdev, vdev->queue_sel)
|
|
|
|
>> VIRTIO_PCI_QUEUE_ADDR_SHIFT;
|
|
|
|
break;
|
|
|
|
case VIRTIO_PCI_QUEUE_NUM:
|
|
|
|
ret = virtio_queue_get_num(vdev, vdev->queue_sel);
|
|
|
|
break;
|
|
|
|
case VIRTIO_PCI_QUEUE_SEL:
|
|
|
|
ret = vdev->queue_sel;
|
|
|
|
break;
|
|
|
|
case VIRTIO_PCI_STATUS:
|
|
|
|
ret = vdev->status;
|
|
|
|
break;
|
|
|
|
case VIRTIO_PCI_ISR:
|
|
|
|
/* reading from the ISR also clears it. */
|
2020-09-23 13:56:46 +03:00
|
|
|
ret = qatomic_xchg(&vdev->isr, 0);
|
2013-10-07 11:36:39 +04:00
|
|
|
pci_irq_deassert(&proxy->pci_dev);
|
2009-05-18 17:51:59 +04:00
|
|
|
break;
|
2009-06-21 20:50:26 +04:00
|
|
|
case VIRTIO_MSI_CONFIG_VECTOR:
|
|
|
|
ret = vdev->config_vector;
|
|
|
|
break;
|
|
|
|
case VIRTIO_MSI_QUEUE_VECTOR:
|
|
|
|
ret = virtio_queue_vector(vdev, vdev->queue_sel);
|
|
|
|
break;
|
2009-05-18 17:51:59 +04:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2012-10-08 15:02:30 +04:00
|
|
|
static uint64_t virtio_pci_config_read(void *opaque, hwaddr addr,
|
|
|
|
unsigned size)
|
2009-05-18 17:51:59 +04:00
|
|
|
{
|
|
|
|
VirtIOPCIProxy *proxy = opaque;
|
2013-09-20 15:36:40 +04:00
|
|
|
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
|
2015-02-17 00:36:43 +03:00
|
|
|
uint32_t config = VIRTIO_PCI_CONFIG_SIZE(&proxy->pci_dev);
|
2012-10-08 15:02:30 +04:00
|
|
|
uint64_t val = 0;
|
2021-06-09 12:58:42 +03:00
|
|
|
|
|
|
|
if (vdev == NULL) {
|
|
|
|
return UINT64_MAX;
|
|
|
|
}
|
|
|
|
|
2009-06-21 20:50:26 +04:00
|
|
|
if (addr < config) {
|
2012-10-08 15:02:30 +04:00
|
|
|
return virtio_ioport_read(proxy, addr);
|
2009-06-21 20:50:26 +04:00
|
|
|
}
|
|
|
|
addr -= config;
|
2009-05-18 17:51:59 +04:00
|
|
|
|
2012-10-08 15:02:30 +04:00
|
|
|
switch (size) {
|
|
|
|
case 1:
|
2013-09-20 15:36:40 +04:00
|
|
|
val = virtio_config_readb(vdev, addr);
|
2012-10-08 15:02:30 +04:00
|
|
|
break;
|
|
|
|
case 2:
|
2013-09-20 15:36:40 +04:00
|
|
|
val = virtio_config_readw(vdev, addr);
|
2014-06-24 21:38:54 +04:00
|
|
|
if (virtio_is_big_endian(vdev)) {
|
2013-01-06 22:30:17 +04:00
|
|
|
val = bswap16(val);
|
|
|
|
}
|
2012-10-08 15:02:30 +04:00
|
|
|
break;
|
|
|
|
case 4:
|
2013-09-20 15:36:40 +04:00
|
|
|
val = virtio_config_readl(vdev, addr);
|
2014-06-24 21:38:54 +04:00
|
|
|
if (virtio_is_big_endian(vdev)) {
|
2013-01-06 22:30:17 +04:00
|
|
|
val = bswap32(val);
|
|
|
|
}
|
2012-10-08 15:02:30 +04:00
|
|
|
break;
|
2012-01-10 05:35:11 +04:00
|
|
|
}
|
2012-10-08 15:02:30 +04:00
|
|
|
return val;
|
2009-05-18 17:51:59 +04:00
|
|
|
}
|
|
|
|
|
2012-10-08 15:02:30 +04:00
|
|
|
static void virtio_pci_config_write(void *opaque, hwaddr addr,
|
|
|
|
uint64_t val, unsigned size)
|
2009-05-18 17:51:59 +04:00
|
|
|
{
|
|
|
|
VirtIOPCIProxy *proxy = opaque;
|
2015-02-17 00:36:43 +03:00
|
|
|
uint32_t config = VIRTIO_PCI_CONFIG_SIZE(&proxy->pci_dev);
|
2013-09-20 15:36:40 +04:00
|
|
|
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
|
2021-06-09 12:58:42 +03:00
|
|
|
|
|
|
|
if (vdev == NULL) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2009-06-21 20:50:26 +04:00
|
|
|
if (addr < config) {
|
|
|
|
virtio_ioport_write(proxy, addr, val);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
addr -= config;
|
2012-10-08 15:02:30 +04:00
|
|
|
/*
|
|
|
|
* Virtio-PCI is odd. Ioports are LE but config space is target native
|
|
|
|
* endian.
|
|
|
|
*/
|
|
|
|
switch (size) {
|
|
|
|
case 1:
|
2013-09-20 15:36:40 +04:00
|
|
|
virtio_config_writeb(vdev, addr, val);
|
2012-10-08 15:02:30 +04:00
|
|
|
break;
|
|
|
|
case 2:
|
2014-06-24 21:38:54 +04:00
|
|
|
if (virtio_is_big_endian(vdev)) {
|
2013-01-06 22:30:17 +04:00
|
|
|
val = bswap16(val);
|
|
|
|
}
|
2013-09-20 15:36:40 +04:00
|
|
|
virtio_config_writew(vdev, addr, val);
|
2012-10-08 15:02:30 +04:00
|
|
|
break;
|
|
|
|
case 4:
|
2014-06-24 21:38:54 +04:00
|
|
|
if (virtio_is_big_endian(vdev)) {
|
2013-01-06 22:30:17 +04:00
|
|
|
val = bswap32(val);
|
|
|
|
}
|
2013-09-20 15:36:40 +04:00
|
|
|
virtio_config_writel(vdev, addr, val);
|
2012-10-08 15:02:30 +04:00
|
|
|
break;
|
2012-01-10 05:35:11 +04:00
|
|
|
}
|
2009-05-18 17:51:59 +04:00
|
|
|
}
|
|
|
|
|
2011-08-08 17:09:13 +04:00
|
|
|
static const MemoryRegionOps virtio_pci_config_ops = {
|
2012-10-08 15:02:30 +04:00
|
|
|
.read = virtio_pci_config_read,
|
|
|
|
.write = virtio_pci_config_write,
|
|
|
|
.impl = {
|
|
|
|
.min_access_size = 1,
|
|
|
|
.max_access_size = 4,
|
|
|
|
},
|
2013-01-06 22:30:17 +04:00
|
|
|
.endianness = DEVICE_LITTLE_ENDIAN,
|
2011-08-08 17:09:13 +04:00
|
|
|
};
|
2009-06-21 20:50:26 +04:00
|
|
|
|
2017-10-09 06:19:41 +03:00
|
|
|
static MemoryRegion *virtio_address_space_lookup(VirtIOPCIProxy *proxy,
|
|
|
|
hwaddr *off, int len)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
VirtIOPCIRegion *reg;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(proxy->regs); ++i) {
|
|
|
|
reg = &proxy->regs[i];
|
|
|
|
if (*off >= reg->offset &&
|
|
|
|
*off + len <= reg->offset + reg->size) {
|
|
|
|
*off -= reg->offset;
|
|
|
|
return ®->mr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2015-07-05 16:08:09 +03:00
|
|
|
/* Below are generic functions to do memcpy from/to an address space,
|
|
|
|
* without byteswaps, with input validation.
|
|
|
|
*
|
|
|
|
* As regular address_space_* APIs all do some kind of byteswap at least for
|
|
|
|
* some host/target combinations, we are forced to explicitly convert to a
|
|
|
|
* known-endianness integer value.
|
|
|
|
* It doesn't really matter which endian format to go through, so the code
|
|
|
|
* below selects the endian that causes the least amount of work on the given
|
|
|
|
* host.
|
|
|
|
*
|
|
|
|
* Note: host pointer must be aligned.
|
|
|
|
*/
|
|
|
|
static
|
2017-10-09 06:19:41 +03:00
|
|
|
void virtio_address_space_write(VirtIOPCIProxy *proxy, hwaddr addr,
|
2015-07-05 16:08:09 +03:00
|
|
|
const uint8_t *buf, int len)
|
|
|
|
{
|
2017-10-09 06:19:41 +03:00
|
|
|
uint64_t val;
|
|
|
|
MemoryRegion *mr;
|
2015-07-05 16:08:09 +03:00
|
|
|
|
|
|
|
/* address_space_* APIs assume an aligned address.
|
|
|
|
* As address is under guest control, handle illegal values.
|
|
|
|
*/
|
|
|
|
addr &= ~(len - 1);
|
|
|
|
|
2017-10-09 06:19:41 +03:00
|
|
|
mr = virtio_address_space_lookup(proxy, &addr, len);
|
|
|
|
if (!mr) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2015-07-05 16:08:09 +03:00
|
|
|
/* Make sure caller aligned buf properly */
|
|
|
|
assert(!(((uintptr_t)buf) & (len - 1)));
|
|
|
|
|
|
|
|
switch (len) {
|
|
|
|
case 1:
|
|
|
|
val = pci_get_byte(buf);
|
|
|
|
break;
|
|
|
|
case 2:
|
2019-08-23 21:36:54 +03:00
|
|
|
val = pci_get_word(buf);
|
2015-07-05 16:08:09 +03:00
|
|
|
break;
|
|
|
|
case 4:
|
2019-08-23 21:36:54 +03:00
|
|
|
val = pci_get_long(buf);
|
2015-07-05 16:08:09 +03:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
/* As length is under guest control, handle illegal values. */
|
2017-10-09 06:19:41 +03:00
|
|
|
return;
|
2015-07-05 16:08:09 +03:00
|
|
|
}
|
2019-08-23 21:36:52 +03:00
|
|
|
memory_region_dispatch_write(mr, addr, val, size_memop(len) | MO_LE,
|
2019-08-23 21:36:44 +03:00
|
|
|
MEMTXATTRS_UNSPECIFIED);
|
2015-07-05 16:08:09 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2017-10-09 06:19:41 +03:00
|
|
|
virtio_address_space_read(VirtIOPCIProxy *proxy, hwaddr addr,
|
|
|
|
uint8_t *buf, int len)
|
2015-07-05 16:08:09 +03:00
|
|
|
{
|
2017-10-09 06:19:41 +03:00
|
|
|
uint64_t val;
|
|
|
|
MemoryRegion *mr;
|
2015-07-05 16:08:09 +03:00
|
|
|
|
|
|
|
/* address_space_* APIs assume an aligned address.
|
|
|
|
* As address is under guest control, handle illegal values.
|
|
|
|
*/
|
|
|
|
addr &= ~(len - 1);
|
|
|
|
|
2017-10-09 06:19:41 +03:00
|
|
|
mr = virtio_address_space_lookup(proxy, &addr, len);
|
|
|
|
if (!mr) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2015-07-05 16:08:09 +03:00
|
|
|
/* Make sure caller aligned buf properly */
|
|
|
|
assert(!(((uintptr_t)buf) & (len - 1)));
|
|
|
|
|
2019-08-23 21:36:52 +03:00
|
|
|
memory_region_dispatch_read(mr, addr, &val, size_memop(len) | MO_LE,
|
2019-08-23 21:36:44 +03:00
|
|
|
MEMTXATTRS_UNSPECIFIED);
|
2015-07-05 16:08:09 +03:00
|
|
|
switch (len) {
|
|
|
|
case 1:
|
|
|
|
pci_set_byte(buf, val);
|
|
|
|
break;
|
|
|
|
case 2:
|
2019-08-23 21:36:54 +03:00
|
|
|
pci_set_word(buf, val);
|
2015-07-05 16:08:09 +03:00
|
|
|
break;
|
|
|
|
case 4:
|
2019-08-23 21:36:54 +03:00
|
|
|
pci_set_long(buf, val);
|
2015-07-05 16:08:09 +03:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
/* As length is under guest control, handle illegal values. */
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-05-12 16:51:20 +03:00
|
|
|
static void virtio_pci_ats_ctrl_trigger(PCIDevice *pci_dev, bool enable)
|
|
|
|
{
|
|
|
|
VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev);
|
|
|
|
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
|
|
|
|
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
|
|
|
|
|
|
|
|
vdev->device_iotlb_enabled = enable;
|
|
|
|
|
|
|
|
if (k->toggle_device_iotlb) {
|
|
|
|
k->toggle_device_iotlb(vdev);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void pcie_ats_config_write(PCIDevice *dev, uint32_t address,
|
|
|
|
uint32_t val, int len)
|
|
|
|
{
|
|
|
|
uint32_t off;
|
|
|
|
uint16_t ats_cap = dev->exp.ats_cap;
|
|
|
|
|
|
|
|
if (!ats_cap || address < ats_cap) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
off = address - ats_cap;
|
|
|
|
if (off >= PCI_EXT_CAP_ATS_SIZEOF) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (range_covers_byte(off, len, PCI_ATS_CTRL + 1)) {
|
|
|
|
virtio_pci_ats_ctrl_trigger(dev, !!(val & PCI_ATS_CTRL_ENABLE));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-06-21 20:50:26 +04:00
|
|
|
static void virtio_write_config(PCIDevice *pci_dev, uint32_t address,
|
|
|
|
uint32_t val, int len)
|
|
|
|
{
|
2018-11-03 18:41:04 +03:00
|
|
|
VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev);
|
2013-09-20 15:36:40 +04:00
|
|
|
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
|
2015-07-02 15:59:49 +03:00
|
|
|
struct virtio_pci_cfg_cap *cfg;
|
2009-09-08 18:49:41 +04:00
|
|
|
|
2011-07-27 12:08:20 +04:00
|
|
|
pci_default_write_config(pci_dev, address, val, len);
|
|
|
|
|
2019-08-20 19:30:05 +03:00
|
|
|
if (proxy->flags & VIRTIO_PCI_FLAG_INIT_FLR) {
|
|
|
|
pcie_cap_flr_write_config(pci_dev, address, val, len);
|
|
|
|
}
|
|
|
|
|
2023-05-12 16:51:20 +03:00
|
|
|
if (proxy->flags & VIRTIO_PCI_FLAG_ATS) {
|
|
|
|
pcie_ats_config_write(pci_dev, address, val, len);
|
|
|
|
}
|
|
|
|
|
virtio-pci: disable vring processing when bus-mastering is disabled
Currently the SLOF firmware for pseries guests will disable/re-enable
a PCI device multiple times via IO/MEM/MASTER bits of PCI_COMMAND
register after the initial probe/feature negotiation, as it tends to
work with a single device at a time at various stages like probing
and running block/network bootloaders without doing a full reset
in-between.
In QEMU, when PCI_COMMAND_MASTER is disabled we disable the
corresponding IOMMU memory region, so DMA accesses (including to vring
fields like idx/flags) will no longer undergo the necessary
translation. Normally we wouldn't expect this to happen since it would
be misbehavior on the driver side to continue driving DMA requests.
However, in the case of pseries, with iommu_platform=on, we trigger the
following sequence when tearing down the virtio-blk dataplane ioeventfd
in response to the guest unsetting PCI_COMMAND_MASTER:
#2 0x0000555555922651 in virtqueue_map_desc (vdev=vdev@entry=0x555556dbcfb0, p_num_sg=p_num_sg@entry=0x7fffe657e1a8, addr=addr@entry=0x7fffe657e240, iov=iov@entry=0x7fffe6580240, max_num_sg=max_num_sg@entry=1024, is_write=is_write@entry=false, pa=0, sz=0)
at /home/mdroth/w/qemu.git/hw/virtio/virtio.c:757
#3 0x0000555555922a89 in virtqueue_pop (vq=vq@entry=0x555556dc8660, sz=sz@entry=184)
at /home/mdroth/w/qemu.git/hw/virtio/virtio.c:950
#4 0x00005555558d3eca in virtio_blk_get_request (vq=0x555556dc8660, s=0x555556dbcfb0)
at /home/mdroth/w/qemu.git/hw/block/virtio-blk.c:255
#5 0x00005555558d3eca in virtio_blk_handle_vq (s=0x555556dbcfb0, vq=0x555556dc8660)
at /home/mdroth/w/qemu.git/hw/block/virtio-blk.c:776
#6 0x000055555591dd66 in virtio_queue_notify_aio_vq (vq=vq@entry=0x555556dc8660)
at /home/mdroth/w/qemu.git/hw/virtio/virtio.c:1550
#7 0x000055555591ecef in virtio_queue_notify_aio_vq (vq=0x555556dc8660)
at /home/mdroth/w/qemu.git/hw/virtio/virtio.c:1546
#8 0x000055555591ecef in virtio_queue_host_notifier_aio_poll (opaque=0x555556dc86c8)
at /home/mdroth/w/qemu.git/hw/virtio/virtio.c:2527
#9 0x0000555555d02164 in run_poll_handlers_once (ctx=ctx@entry=0x55555688bfc0, timeout=timeout@entry=0x7fffe65844a8)
at /home/mdroth/w/qemu.git/util/aio-posix.c:520
#10 0x0000555555d02d1b in try_poll_mode (timeout=0x7fffe65844a8, ctx=0x55555688bfc0)
at /home/mdroth/w/qemu.git/util/aio-posix.c:607
#11 0x0000555555d02d1b in aio_poll (ctx=ctx@entry=0x55555688bfc0, blocking=blocking@entry=true)
at /home/mdroth/w/qemu.git/util/aio-posix.c:639
#12 0x0000555555d0004d in aio_wait_bh_oneshot (ctx=0x55555688bfc0, cb=cb@entry=0x5555558d5130 <virtio_blk_data_plane_stop_bh>, opaque=opaque@entry=0x555556de86f0)
at /home/mdroth/w/qemu.git/util/aio-wait.c:71
#13 0x00005555558d59bf in virtio_blk_data_plane_stop (vdev=<optimized out>)
at /home/mdroth/w/qemu.git/hw/block/dataplane/virtio-blk.c:288
#14 0x0000555555b906a1 in virtio_bus_stop_ioeventfd (bus=bus@entry=0x555556dbcf38)
at /home/mdroth/w/qemu.git/hw/virtio/virtio-bus.c:245
#15 0x0000555555b90dbb in virtio_bus_stop_ioeventfd (bus=bus@entry=0x555556dbcf38)
at /home/mdroth/w/qemu.git/hw/virtio/virtio-bus.c:237
#16 0x0000555555b92a8e in virtio_pci_stop_ioeventfd (proxy=0x555556db4e40)
at /home/mdroth/w/qemu.git/hw/virtio/virtio-pci.c:292
#17 0x0000555555b92a8e in virtio_write_config (pci_dev=0x555556db4e40, address=<optimized out>, val=1048832, len=<optimized out>)
at /home/mdroth/w/qemu.git/hw/virtio/virtio-pci.c:613
I.e. the calling code is only scheduling a one-shot BH for
virtio_blk_data_plane_stop_bh, but somehow we end up trying to process
an additional virtqueue entry before we get there. This is likely due
to the following check in virtio_queue_host_notifier_aio_poll:
static bool virtio_queue_host_notifier_aio_poll(void *opaque)
{
EventNotifier *n = opaque;
VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
bool progress;
if (!vq->vring.desc || virtio_queue_empty(vq)) {
return false;
}
progress = virtio_queue_notify_aio_vq(vq);
namely the call to virtio_queue_empty(). In this case, since no new
requests have actually been issued, shadow_avail_idx == last_avail_idx,
so we actually try to access the vring via vring_avail_idx() to get
the latest non-shadowed idx:
int virtio_queue_empty(VirtQueue *vq)
{
bool empty;
...
if (vq->shadow_avail_idx != vq->last_avail_idx) {
return 0;
}
rcu_read_lock();
empty = vring_avail_idx(vq) == vq->last_avail_idx;
rcu_read_unlock();
return empty;
but since the IOMMU region has been disabled we get a bogus value (0
usually), which causes virtio_queue_empty() to falsely report that
there are entries to be processed, which causes errors such as:
"virtio: zero sized buffers are not allowed"
or
"virtio-blk missing headers"
and puts the device in an error state.
This patch works around the issue by introducing virtio_set_disabled(),
which sets a 'disabled' flag to bypass checks like virtio_queue_empty()
when bus-mastering is disabled. Since we'd check this flag at all the
same sites as vdev->broken, we replace those checks with an inline
function which checks for either vdev->broken or vdev->disabled.
The 'disabled' flag is only migrated when set, which should be fairly
rare, but to maintain migration compatibility we disable it's use for
older machine types. Users requiring the use of the flag in conjunction
with older machine types can set it explicitly as a virtio-device
option.
NOTES:
- This leaves some other oddities in play, like the fact that
DRIVER_OK also gets unset in response to bus-mastering being
disabled, but not restored (however the device seems to continue
working)
- Similarly, we disable the host notifier via
virtio_bus_stop_ioeventfd(), which seems to move the handling out
of virtio-blk dataplane and back into the main IO thread, and it
ends up staying there till a reset (but otherwise continues working
normally)
Cc: David Gibson <david@gibson.dropbear.id.au>,
Cc: Alexey Kardashevskiy <aik@ozlabs.ru>
Cc: "Michael S. Tsirkin" <mst@redhat.com>
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
Message-Id: <20191120005003.27035-1-mdroth@linux.vnet.ibm.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2019-11-20 03:50:03 +03:00
|
|
|
if (range_covers_byte(address, len, PCI_COMMAND)) {
|
|
|
|
if (!(pci_dev->config[PCI_COMMAND] & PCI_COMMAND_MASTER)) {
|
|
|
|
virtio_set_disabled(vdev, true);
|
|
|
|
virtio_pci_stop_ioeventfd(proxy);
|
|
|
|
virtio_set_status(vdev, vdev->status & ~VIRTIO_CONFIG_S_DRIVER_OK);
|
|
|
|
} else {
|
|
|
|
virtio_set_disabled(vdev, false);
|
|
|
|
}
|
2009-09-08 18:49:41 +04:00
|
|
|
}
|
2015-07-02 15:59:49 +03:00
|
|
|
|
|
|
|
if (proxy->config_cap &&
|
|
|
|
ranges_overlap(address, len, proxy->config_cap + offsetof(struct virtio_pci_cfg_cap,
|
|
|
|
pci_cfg_data),
|
|
|
|
sizeof cfg->pci_cfg_data)) {
|
|
|
|
uint32_t off;
|
2023-10-04 12:53:02 +03:00
|
|
|
uint32_t caplen;
|
2015-07-02 15:59:49 +03:00
|
|
|
|
|
|
|
cfg = (void *)(proxy->pci_dev.config + proxy->config_cap);
|
|
|
|
off = le32_to_cpu(cfg->cap.offset);
|
2023-10-04 12:53:02 +03:00
|
|
|
caplen = le32_to_cpu(cfg->cap.length);
|
2015-07-02 15:59:49 +03:00
|
|
|
|
2023-10-04 12:53:02 +03:00
|
|
|
if (caplen == 1 || caplen == 2 || caplen == 4) {
|
|
|
|
assert(caplen <= sizeof cfg->pci_cfg_data);
|
|
|
|
virtio_address_space_write(proxy, off, cfg->pci_cfg_data, caplen);
|
2015-07-02 15:59:49 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint32_t virtio_read_config(PCIDevice *pci_dev,
|
|
|
|
uint32_t address, int len)
|
|
|
|
{
|
2018-11-03 18:41:04 +03:00
|
|
|
VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev);
|
2015-07-02 15:59:49 +03:00
|
|
|
struct virtio_pci_cfg_cap *cfg;
|
|
|
|
|
|
|
|
if (proxy->config_cap &&
|
|
|
|
ranges_overlap(address, len, proxy->config_cap + offsetof(struct virtio_pci_cfg_cap,
|
|
|
|
pci_cfg_data),
|
|
|
|
sizeof cfg->pci_cfg_data)) {
|
|
|
|
uint32_t off;
|
2023-10-04 12:53:02 +03:00
|
|
|
uint32_t caplen;
|
2015-07-02 15:59:49 +03:00
|
|
|
|
|
|
|
cfg = (void *)(proxy->pci_dev.config + proxy->config_cap);
|
|
|
|
off = le32_to_cpu(cfg->cap.offset);
|
2023-10-04 12:53:02 +03:00
|
|
|
caplen = le32_to_cpu(cfg->cap.length);
|
2015-07-02 15:59:49 +03:00
|
|
|
|
2023-10-04 12:53:02 +03:00
|
|
|
if (caplen == 1 || caplen == 2 || caplen == 4) {
|
|
|
|
assert(caplen <= sizeof cfg->pci_cfg_data);
|
|
|
|
virtio_address_space_read(proxy, off, cfg->pci_cfg_data, caplen);
|
2015-07-02 15:59:49 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return pci_default_read_config(pci_dev, address, len);
|
2009-05-18 17:51:59 +04:00
|
|
|
}
|
|
|
|
|
2012-05-17 17:32:39 +04:00
|
|
|
static int kvm_virtio_pci_vq_vector_use(VirtIOPCIProxy *proxy,
|
2016-07-14 08:56:30 +03:00
|
|
|
unsigned int vector)
|
2012-05-17 17:32:39 +04:00
|
|
|
{
|
|
|
|
VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
|
2012-07-05 19:16:30 +04:00
|
|
|
int ret;
|
2012-05-17 17:32:39 +04:00
|
|
|
|
|
|
|
if (irqfd->users == 0) {
|
2022-02-22 17:11:16 +03:00
|
|
|
KVMRouteChange c = kvm_irqchip_begin_route_changes(kvm_state);
|
|
|
|
ret = kvm_irqchip_add_msi_route(&c, vector, &proxy->pci_dev);
|
2012-05-17 17:32:39 +04:00
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
2022-02-22 17:11:16 +03:00
|
|
|
kvm_irqchip_commit_route_changes(&c);
|
2012-05-17 17:32:39 +04:00
|
|
|
irqfd->virq = ret;
|
|
|
|
}
|
|
|
|
irqfd->users++;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void kvm_virtio_pci_vq_vector_release(VirtIOPCIProxy *proxy,
|
|
|
|
unsigned int vector)
|
2012-12-21 02:27:54 +04:00
|
|
|
{
|
|
|
|
VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
|
|
|
|
if (--irqfd->users == 0) {
|
|
|
|
kvm_irqchip_release_virq(kvm_state, irqfd->virq);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-12-24 19:35:27 +04:00
|
|
|
static int kvm_virtio_pci_irqfd_use(VirtIOPCIProxy *proxy,
|
2022-12-22 10:04:43 +03:00
|
|
|
EventNotifier *n,
|
2012-12-24 19:35:27 +04:00
|
|
|
unsigned int vector)
|
|
|
|
{
|
|
|
|
VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
|
2016-06-14 00:57:58 +03:00
|
|
|
return kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, n, NULL, irqfd->virq);
|
2012-12-24 19:35:27 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static void kvm_virtio_pci_irqfd_release(VirtIOPCIProxy *proxy,
|
2022-12-22 10:04:43 +03:00
|
|
|
EventNotifier *n ,
|
2012-12-24 19:35:27 +04:00
|
|
|
unsigned int vector)
|
2012-05-17 17:32:39 +04:00
|
|
|
{
|
|
|
|
VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
|
2012-07-05 19:16:30 +04:00
|
|
|
int ret;
|
2012-05-17 17:32:39 +04:00
|
|
|
|
2015-07-06 21:15:13 +03:00
|
|
|
ret = kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, n, irqfd->virq);
|
2012-05-17 17:32:39 +04:00
|
|
|
assert(ret == 0);
|
2012-12-24 19:35:27 +04:00
|
|
|
}
|
2022-12-22 10:04:43 +03:00
|
|
|
static int virtio_pci_get_notifier(VirtIOPCIProxy *proxy, int queue_no,
|
|
|
|
EventNotifier **n, unsigned int *vector)
|
|
|
|
{
|
|
|
|
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
|
|
|
|
VirtQueue *vq;
|
|
|
|
|
|
|
|
if (queue_no == VIRTIO_CONFIG_IRQ_IDX) {
|
2022-12-22 10:04:51 +03:00
|
|
|
*n = virtio_config_get_guest_notifier(vdev);
|
|
|
|
*vector = vdev->config_vector;
|
2022-12-22 10:04:43 +03:00
|
|
|
} else {
|
|
|
|
if (!virtio_queue_get_num(vdev, queue_no)) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
*vector = virtio_queue_vector(vdev, queue_no);
|
|
|
|
vq = virtio_get_queue(vdev, queue_no);
|
|
|
|
*n = virtio_queue_get_guest_notifier(vq);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
2012-05-17 17:32:39 +04:00
|
|
|
|
2022-12-22 10:04:44 +03:00
|
|
|
static int kvm_virtio_pci_vector_use_one(VirtIOPCIProxy *proxy, int queue_no)
|
2012-12-21 02:27:54 +04:00
|
|
|
{
|
2022-12-22 10:04:44 +03:00
|
|
|
unsigned int vector;
|
|
|
|
int ret;
|
|
|
|
EventNotifier *n;
|
2012-12-21 02:27:54 +04:00
|
|
|
PCIDevice *dev = &proxy->pci_dev;
|
2013-09-20 15:36:40 +04:00
|
|
|
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
|
2013-04-24 12:21:20 +04:00
|
|
|
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
|
2022-12-22 10:04:44 +03:00
|
|
|
|
|
|
|
ret = virtio_pci_get_notifier(proxy, queue_no, &n, &vector);
|
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
if (vector >= msix_nr_vectors_allocated(dev)) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
ret = kvm_virtio_pci_vq_vector_use(proxy, vector);
|
|
|
|
if (ret < 0) {
|
|
|
|
goto undo;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* If guest supports masking, set up irqfd now.
|
|
|
|
* Otherwise, delay until unmasked in the frontend.
|
|
|
|
*/
|
|
|
|
if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
|
|
|
|
ret = kvm_virtio_pci_irqfd_use(proxy, n, vector);
|
2012-12-21 02:27:54 +04:00
|
|
|
if (ret < 0) {
|
2022-12-22 10:04:44 +03:00
|
|
|
kvm_virtio_pci_vq_vector_release(proxy, vector);
|
2012-12-21 02:27:54 +04:00
|
|
|
goto undo;
|
2012-05-17 17:32:39 +04:00
|
|
|
}
|
|
|
|
}
|
2021-11-04 19:48:20 +03:00
|
|
|
|
2022-12-22 10:04:44 +03:00
|
|
|
return 0;
|
2022-01-10 08:47:53 +03:00
|
|
|
undo:
|
2022-12-22 10:04:44 +03:00
|
|
|
|
|
|
|
vector = virtio_queue_vector(vdev, queue_no);
|
|
|
|
if (vector >= msix_nr_vectors_allocated(dev)) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
|
|
|
|
ret = virtio_pci_get_notifier(proxy, queue_no, &n, &vector);
|
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
kvm_virtio_pci_irqfd_release(proxy, n, vector);
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
2022-12-22 10:04:51 +03:00
|
|
|
static int kvm_virtio_pci_vector_vq_use(VirtIOPCIProxy *proxy, int nvqs)
|
2022-12-22 10:04:44 +03:00
|
|
|
{
|
|
|
|
int queue_no;
|
|
|
|
int ret = 0;
|
|
|
|
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
|
|
|
|
|
|
|
|
for (queue_no = 0; queue_no < nvqs; queue_no++) {
|
|
|
|
if (!virtio_queue_get_num(vdev, queue_no)) {
|
|
|
|
return -1;
|
2012-12-24 19:35:27 +04:00
|
|
|
}
|
2022-12-22 10:04:44 +03:00
|
|
|
ret = kvm_virtio_pci_vector_use_one(proxy, queue_no);
|
2012-12-21 02:27:54 +04:00
|
|
|
}
|
|
|
|
return ret;
|
2012-05-17 17:32:39 +04:00
|
|
|
}
|
|
|
|
|
2022-12-22 10:04:51 +03:00
|
|
|
static int kvm_virtio_pci_vector_config_use(VirtIOPCIProxy *proxy)
|
|
|
|
{
|
|
|
|
return kvm_virtio_pci_vector_use_one(proxy, VIRTIO_CONFIG_IRQ_IDX);
|
|
|
|
}
|
2022-12-22 10:04:44 +03:00
|
|
|
|
|
|
|
static void kvm_virtio_pci_vector_release_one(VirtIOPCIProxy *proxy,
|
|
|
|
int queue_no)
|
2012-12-21 02:27:54 +04:00
|
|
|
{
|
2013-09-20 15:36:40 +04:00
|
|
|
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
|
2012-12-21 02:27:54 +04:00
|
|
|
unsigned int vector;
|
2022-12-22 10:04:43 +03:00
|
|
|
EventNotifier *n;
|
2022-12-22 10:04:44 +03:00
|
|
|
int ret;
|
|
|
|
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
|
|
|
|
PCIDevice *dev = &proxy->pci_dev;
|
|
|
|
|
|
|
|
ret = virtio_pci_get_notifier(proxy, queue_no, &n, &vector);
|
|
|
|
if (ret < 0) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (vector >= msix_nr_vectors_allocated(dev)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
|
|
|
|
kvm_virtio_pci_irqfd_release(proxy, n, vector);
|
|
|
|
}
|
|
|
|
kvm_virtio_pci_vq_vector_release(proxy, vector);
|
|
|
|
}
|
|
|
|
|
2022-12-22 10:04:51 +03:00
|
|
|
static void kvm_virtio_pci_vector_vq_release(VirtIOPCIProxy *proxy, int nvqs)
|
2022-12-22 10:04:44 +03:00
|
|
|
{
|
|
|
|
int queue_no;
|
|
|
|
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
|
|
|
|
|
2012-12-21 02:27:54 +04:00
|
|
|
for (queue_no = 0; queue_no < nvqs; queue_no++) {
|
|
|
|
if (!virtio_queue_get_num(vdev, queue_no)) {
|
|
|
|
break;
|
|
|
|
}
|
2022-12-22 10:04:44 +03:00
|
|
|
kvm_virtio_pci_vector_release_one(proxy, queue_no);
|
2012-12-21 02:27:54 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-12-22 10:04:51 +03:00
|
|
|
static void kvm_virtio_pci_vector_config_release(VirtIOPCIProxy *proxy)
|
|
|
|
{
|
|
|
|
kvm_virtio_pci_vector_release_one(proxy, VIRTIO_CONFIG_IRQ_IDX);
|
|
|
|
}
|
|
|
|
|
2022-12-22 10:04:43 +03:00
|
|
|
static int virtio_pci_one_vector_unmask(VirtIOPCIProxy *proxy,
|
2013-03-13 23:37:08 +04:00
|
|
|
unsigned int queue_no,
|
|
|
|
unsigned int vector,
|
2022-12-22 10:04:43 +03:00
|
|
|
MSIMessage msg,
|
|
|
|
EventNotifier *n)
|
2012-12-21 02:27:54 +04:00
|
|
|
{
|
2013-09-20 15:36:40 +04:00
|
|
|
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
|
|
|
|
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
|
2013-03-13 23:37:08 +04:00
|
|
|
VirtIOIRQFD *irqfd;
|
2013-01-14 23:20:12 +04:00
|
|
|
int ret = 0;
|
2012-12-21 02:27:54 +04:00
|
|
|
|
2013-03-13 23:37:08 +04:00
|
|
|
if (proxy->vector_irqfd) {
|
|
|
|
irqfd = &proxy->vector_irqfd[vector];
|
|
|
|
if (irqfd->msg.data != msg.data || irqfd->msg.address != msg.address) {
|
2015-10-15 16:44:52 +03:00
|
|
|
ret = kvm_irqchip_update_msi_route(kvm_state, irqfd->virq, msg,
|
|
|
|
&proxy->pci_dev);
|
2013-03-13 23:37:08 +04:00
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
2016-07-14 08:56:33 +03:00
|
|
|
kvm_irqchip_commit_routes(kvm_state);
|
2012-12-21 02:27:54 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-12-24 19:35:27 +04:00
|
|
|
/* If guest supports masking, irqfd is already setup, unmask it.
|
|
|
|
* Otherwise, set it up now.
|
|
|
|
*/
|
2016-02-18 17:12:23 +03:00
|
|
|
if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
|
2013-09-20 15:36:40 +04:00
|
|
|
k->guest_notifier_mask(vdev, queue_no, false);
|
2012-12-24 19:35:27 +04:00
|
|
|
/* Test after unmasking to avoid losing events. */
|
2013-04-24 12:21:20 +04:00
|
|
|
if (k->guest_notifier_pending &&
|
2013-09-20 15:36:40 +04:00
|
|
|
k->guest_notifier_pending(vdev, queue_no)) {
|
2012-12-24 19:35:27 +04:00
|
|
|
event_notifier_set(n);
|
|
|
|
}
|
|
|
|
} else {
|
2022-12-22 10:04:43 +03:00
|
|
|
ret = kvm_virtio_pci_irqfd_use(proxy, n, vector);
|
2012-05-17 17:32:39 +04:00
|
|
|
}
|
2012-12-21 02:27:54 +04:00
|
|
|
return ret;
|
2012-05-17 17:32:39 +04:00
|
|
|
}
|
|
|
|
|
2022-12-22 10:04:43 +03:00
|
|
|
static void virtio_pci_one_vector_mask(VirtIOPCIProxy *proxy,
|
2012-05-17 17:32:39 +04:00
|
|
|
unsigned int queue_no,
|
2022-12-22 10:04:43 +03:00
|
|
|
unsigned int vector,
|
|
|
|
EventNotifier *n)
|
2012-05-17 17:32:39 +04:00
|
|
|
{
|
2013-09-20 15:36:40 +04:00
|
|
|
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
|
|
|
|
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
|
2013-04-24 12:21:20 +04:00
|
|
|
|
2012-12-24 19:35:27 +04:00
|
|
|
/* If guest supports masking, keep irqfd but mask it.
|
|
|
|
* Otherwise, clean it up now.
|
|
|
|
*/
|
2016-02-18 17:12:23 +03:00
|
|
|
if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
|
2013-09-20 15:36:40 +04:00
|
|
|
k->guest_notifier_mask(vdev, queue_no, true);
|
2012-12-24 19:35:27 +04:00
|
|
|
} else {
|
2022-12-22 10:04:43 +03:00
|
|
|
kvm_virtio_pci_irqfd_release(proxy, n, vector);
|
2012-12-24 19:35:27 +04:00
|
|
|
}
|
2012-05-17 17:32:39 +04:00
|
|
|
}
|
|
|
|
|
2013-03-13 23:37:08 +04:00
|
|
|
static int virtio_pci_vector_unmask(PCIDevice *dev, unsigned vector,
|
|
|
|
MSIMessage msg)
|
2012-05-17 17:32:39 +04:00
|
|
|
{
|
|
|
|
VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
|
2013-09-20 15:36:40 +04:00
|
|
|
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
|
2015-04-23 09:21:47 +03:00
|
|
|
VirtQueue *vq = virtio_vector_first_queue(vdev, vector);
|
2022-12-22 10:04:43 +03:00
|
|
|
EventNotifier *n;
|
2015-04-23 09:21:47 +03:00
|
|
|
int ret, index, unmasked = 0;
|
2012-05-17 17:32:39 +04:00
|
|
|
|
2015-04-23 09:21:47 +03:00
|
|
|
while (vq) {
|
|
|
|
index = virtio_get_queue_index(vq);
|
|
|
|
if (!virtio_queue_get_num(vdev, index)) {
|
2012-05-17 17:32:39 +04:00
|
|
|
break;
|
|
|
|
}
|
2015-05-27 11:26:07 +03:00
|
|
|
if (index < proxy->nvqs_with_notifiers) {
|
2022-12-22 10:04:43 +03:00
|
|
|
n = virtio_queue_get_guest_notifier(vq);
|
|
|
|
ret = virtio_pci_one_vector_unmask(proxy, index, vector, msg, n);
|
2015-05-27 11:26:07 +03:00
|
|
|
if (ret < 0) {
|
|
|
|
goto undo;
|
|
|
|
}
|
|
|
|
++unmasked;
|
2012-05-17 17:32:39 +04:00
|
|
|
}
|
2015-04-23 09:21:47 +03:00
|
|
|
vq = virtio_vector_next_queue(vq);
|
2012-05-17 17:32:39 +04:00
|
|
|
}
|
2022-12-22 10:04:51 +03:00
|
|
|
/* unmask config intr */
|
|
|
|
if (vector == vdev->config_vector) {
|
|
|
|
n = virtio_config_get_guest_notifier(vdev);
|
|
|
|
ret = virtio_pci_one_vector_unmask(proxy, VIRTIO_CONFIG_IRQ_IDX, vector,
|
|
|
|
msg, n);
|
|
|
|
if (ret < 0) {
|
|
|
|
goto undo_config;
|
|
|
|
}
|
|
|
|
}
|
2012-05-17 17:32:39 +04:00
|
|
|
return 0;
|
2022-12-22 10:04:51 +03:00
|
|
|
undo_config:
|
|
|
|
n = virtio_config_get_guest_notifier(vdev);
|
|
|
|
virtio_pci_one_vector_mask(proxy, VIRTIO_CONFIG_IRQ_IDX, vector, n);
|
2012-05-17 17:32:39 +04:00
|
|
|
undo:
|
2015-04-23 09:21:47 +03:00
|
|
|
vq = virtio_vector_first_queue(vdev, vector);
|
2015-05-27 11:26:07 +03:00
|
|
|
while (vq && unmasked >= 0) {
|
2015-04-23 09:21:47 +03:00
|
|
|
index = virtio_get_queue_index(vq);
|
2015-05-27 11:26:07 +03:00
|
|
|
if (index < proxy->nvqs_with_notifiers) {
|
2022-12-22 10:04:43 +03:00
|
|
|
n = virtio_queue_get_guest_notifier(vq);
|
|
|
|
virtio_pci_one_vector_mask(proxy, index, vector, n);
|
2015-05-27 11:26:07 +03:00
|
|
|
--unmasked;
|
|
|
|
}
|
2015-04-23 09:21:47 +03:00
|
|
|
vq = virtio_vector_next_queue(vq);
|
2012-05-17 17:32:39 +04:00
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2013-03-13 23:37:08 +04:00
|
|
|
static void virtio_pci_vector_mask(PCIDevice *dev, unsigned vector)
|
2012-05-17 17:32:39 +04:00
|
|
|
{
|
|
|
|
VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
|
2013-09-20 15:36:40 +04:00
|
|
|
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
|
2015-04-23 09:21:47 +03:00
|
|
|
VirtQueue *vq = virtio_vector_first_queue(vdev, vector);
|
2022-12-22 10:04:43 +03:00
|
|
|
EventNotifier *n;
|
2015-04-23 09:21:47 +03:00
|
|
|
int index;
|
2012-05-17 17:32:39 +04:00
|
|
|
|
2015-04-23 09:21:47 +03:00
|
|
|
while (vq) {
|
|
|
|
index = virtio_get_queue_index(vq);
|
2022-12-22 10:04:43 +03:00
|
|
|
n = virtio_queue_get_guest_notifier(vq);
|
2015-04-23 09:21:47 +03:00
|
|
|
if (!virtio_queue_get_num(vdev, index)) {
|
2012-05-17 17:32:39 +04:00
|
|
|
break;
|
|
|
|
}
|
2015-05-27 11:26:07 +03:00
|
|
|
if (index < proxy->nvqs_with_notifiers) {
|
2022-12-22 10:04:43 +03:00
|
|
|
virtio_pci_one_vector_mask(proxy, index, vector, n);
|
2015-05-27 11:26:07 +03:00
|
|
|
}
|
2015-04-23 09:21:47 +03:00
|
|
|
vq = virtio_vector_next_queue(vq);
|
2012-05-17 17:32:39 +04:00
|
|
|
}
|
2022-12-22 10:04:51 +03:00
|
|
|
|
|
|
|
if (vector == vdev->config_vector) {
|
|
|
|
n = virtio_config_get_guest_notifier(vdev);
|
|
|
|
virtio_pci_one_vector_mask(proxy, VIRTIO_CONFIG_IRQ_IDX, vector, n);
|
|
|
|
}
|
2012-05-17 17:32:39 +04:00
|
|
|
}
|
|
|
|
|
2013-03-13 23:37:08 +04:00
|
|
|
static void virtio_pci_vector_poll(PCIDevice *dev,
|
|
|
|
unsigned int vector_start,
|
|
|
|
unsigned int vector_end)
|
2012-12-18 16:02:46 +04:00
|
|
|
{
|
|
|
|
VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
|
2013-09-20 15:36:40 +04:00
|
|
|
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
|
2013-04-24 12:21:20 +04:00
|
|
|
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
|
2012-12-18 16:02:46 +04:00
|
|
|
int queue_no;
|
|
|
|
unsigned int vector;
|
|
|
|
EventNotifier *notifier;
|
2022-12-22 10:04:43 +03:00
|
|
|
int ret;
|
2012-12-18 16:02:46 +04:00
|
|
|
|
2012-12-20 16:28:58 +04:00
|
|
|
for (queue_no = 0; queue_no < proxy->nvqs_with_notifiers; queue_no++) {
|
2022-12-22 10:04:43 +03:00
|
|
|
ret = virtio_pci_get_notifier(proxy, queue_no, ¬ifier, &vector);
|
|
|
|
if (ret < 0) {
|
2012-12-18 16:02:46 +04:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (vector < vector_start || vector >= vector_end ||
|
|
|
|
!msix_is_masked(dev, vector)) {
|
|
|
|
continue;
|
|
|
|
}
|
2013-04-24 12:21:20 +04:00
|
|
|
if (k->guest_notifier_pending) {
|
|
|
|
if (k->guest_notifier_pending(vdev, queue_no)) {
|
2012-12-24 19:35:27 +04:00
|
|
|
msix_set_pending(dev, vector);
|
|
|
|
}
|
|
|
|
} else if (event_notifier_test_and_clear(notifier)) {
|
2012-12-18 16:02:46 +04:00
|
|
|
msix_set_pending(dev, vector);
|
|
|
|
}
|
|
|
|
}
|
2022-12-22 10:04:51 +03:00
|
|
|
/* poll the config intr */
|
|
|
|
ret = virtio_pci_get_notifier(proxy, VIRTIO_CONFIG_IRQ_IDX, ¬ifier,
|
|
|
|
&vector);
|
|
|
|
if (ret < 0) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (vector < vector_start || vector >= vector_end ||
|
|
|
|
!msix_is_masked(dev, vector)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (k->guest_notifier_pending) {
|
|
|
|
if (k->guest_notifier_pending(vdev, VIRTIO_CONFIG_IRQ_IDX)) {
|
|
|
|
msix_set_pending(dev, vector);
|
|
|
|
}
|
|
|
|
} else if (event_notifier_test_and_clear(notifier)) {
|
|
|
|
msix_set_pending(dev, vector);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void virtio_pci_set_guest_notifier_fd_handler(VirtIODevice *vdev, VirtQueue *vq,
|
|
|
|
int n, bool assign,
|
|
|
|
bool with_irqfd)
|
|
|
|
{
|
|
|
|
if (n == VIRTIO_CONFIG_IRQ_IDX) {
|
|
|
|
virtio_config_set_guest_notifier_fd_handler(vdev, assign, with_irqfd);
|
|
|
|
} else {
|
|
|
|
virtio_queue_set_guest_notifier_fd_handler(vq, assign, with_irqfd);
|
|
|
|
}
|
2012-12-18 16:02:46 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static int virtio_pci_set_guest_notifier(DeviceState *d, int n, bool assign,
|
|
|
|
bool with_irqfd)
|
2010-03-17 14:08:13 +03:00
|
|
|
{
|
2012-12-17 15:01:07 +04:00
|
|
|
VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
|
2013-09-20 15:36:40 +04:00
|
|
|
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
|
|
|
|
VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
|
2022-12-22 10:04:51 +03:00
|
|
|
VirtQueue *vq = NULL;
|
|
|
|
EventNotifier *notifier = NULL;
|
|
|
|
|
|
|
|
if (n == VIRTIO_CONFIG_IRQ_IDX) {
|
|
|
|
notifier = virtio_config_get_guest_notifier(vdev);
|
|
|
|
} else {
|
|
|
|
vq = virtio_get_queue(vdev, n);
|
|
|
|
notifier = virtio_queue_get_guest_notifier(vq);
|
|
|
|
}
|
2010-03-17 14:08:13 +03:00
|
|
|
|
|
|
|
if (assign) {
|
|
|
|
int r = event_notifier_init(notifier, 0);
|
|
|
|
if (r < 0) {
|
|
|
|
return r;
|
|
|
|
}
|
2022-12-22 10:04:51 +03:00
|
|
|
virtio_pci_set_guest_notifier_fd_handler(vdev, vq, n, true, with_irqfd);
|
2010-03-17 14:08:13 +03:00
|
|
|
} else {
|
2022-12-22 10:04:51 +03:00
|
|
|
virtio_pci_set_guest_notifier_fd_handler(vdev, vq, n, false,
|
|
|
|
with_irqfd);
|
2010-03-17 14:08:13 +03:00
|
|
|
event_notifier_cleanup(notifier);
|
|
|
|
}
|
|
|
|
|
2016-02-18 17:12:23 +03:00
|
|
|
if (!msix_enabled(&proxy->pci_dev) &&
|
|
|
|
vdev->use_guest_notifier_mask &&
|
|
|
|
vdc->guest_notifier_mask) {
|
2013-09-20 15:36:40 +04:00
|
|
|
vdc->guest_notifier_mask(vdev, n, !assign);
|
2013-05-07 16:49:58 +04:00
|
|
|
}
|
|
|
|
|
2010-03-17 14:08:13 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-12-17 15:01:07 +04:00
|
|
|
static bool virtio_pci_query_guest_notifiers(DeviceState *d)
|
2011-02-01 23:13:42 +03:00
|
|
|
{
|
2012-12-17 15:01:07 +04:00
|
|
|
VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
|
2011-02-01 23:13:42 +03:00
|
|
|
return msix_enabled(&proxy->pci_dev);
|
|
|
|
}
|
|
|
|
|
2012-12-20 16:28:58 +04:00
|
|
|
static int virtio_pci_set_guest_notifiers(DeviceState *d, int nvqs, bool assign)
|
2010-10-06 17:20:17 +04:00
|
|
|
{
|
2012-12-17 15:01:07 +04:00
|
|
|
VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
|
2013-09-20 15:36:40 +04:00
|
|
|
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
|
2013-04-24 12:21:20 +04:00
|
|
|
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
|
2010-10-06 17:20:17 +04:00
|
|
|
int r, n;
|
2012-12-18 16:02:46 +04:00
|
|
|
bool with_irqfd = msix_enabled(&proxy->pci_dev) &&
|
|
|
|
kvm_msi_via_irqfd_enabled();
|
2010-10-06 17:20:17 +04:00
|
|
|
|
2015-05-29 09:15:31 +03:00
|
|
|
nvqs = MIN(nvqs, VIRTIO_QUEUE_MAX);
|
2012-12-20 16:28:58 +04:00
|
|
|
|
2022-07-28 16:55:02 +03:00
|
|
|
/*
|
|
|
|
* When deassigning, pass a consistent nvqs value to avoid leaking
|
|
|
|
* notifiers. But first check we've actually been configured, exit
|
|
|
|
* early if we haven't.
|
2012-12-20 16:28:58 +04:00
|
|
|
*/
|
2022-07-28 16:55:02 +03:00
|
|
|
if (!assign && !proxy->nvqs_with_notifiers) {
|
|
|
|
return 0;
|
|
|
|
}
|
2012-12-20 16:28:58 +04:00
|
|
|
assert(assign || nvqs == proxy->nvqs_with_notifiers);
|
|
|
|
|
|
|
|
proxy->nvqs_with_notifiers = nvqs;
|
|
|
|
|
2012-05-17 17:32:39 +04:00
|
|
|
/* Must unset vector notifier while guest notifier is still assigned */
|
2022-12-22 10:04:51 +03:00
|
|
|
if ((proxy->vector_irqfd ||
|
|
|
|
(vdev->use_guest_notifier_mask && k->guest_notifier_mask)) &&
|
|
|
|
!assign) {
|
2012-05-17 17:32:39 +04:00
|
|
|
msix_unset_vector_notifiers(&proxy->pci_dev);
|
2013-03-13 23:37:08 +04:00
|
|
|
if (proxy->vector_irqfd) {
|
2022-12-22 10:04:51 +03:00
|
|
|
kvm_virtio_pci_vector_vq_release(proxy, nvqs);
|
|
|
|
kvm_virtio_pci_vector_config_release(proxy);
|
2013-03-13 23:37:08 +04:00
|
|
|
g_free(proxy->vector_irqfd);
|
|
|
|
proxy->vector_irqfd = NULL;
|
|
|
|
}
|
2012-05-17 17:32:39 +04:00
|
|
|
}
|
|
|
|
|
2012-12-20 16:28:58 +04:00
|
|
|
for (n = 0; n < nvqs; n++) {
|
2010-10-06 17:20:17 +04:00
|
|
|
if (!virtio_queue_get_num(vdev, n)) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2013-09-01 12:03:45 +04:00
|
|
|
r = virtio_pci_set_guest_notifier(d, n, assign, with_irqfd);
|
2010-10-06 17:20:17 +04:00
|
|
|
if (r < 0) {
|
|
|
|
goto assign_error;
|
|
|
|
}
|
|
|
|
}
|
2022-12-22 10:04:51 +03:00
|
|
|
r = virtio_pci_set_guest_notifier(d, VIRTIO_CONFIG_IRQ_IDX, assign,
|
|
|
|
with_irqfd);
|
|
|
|
if (r < 0) {
|
|
|
|
goto config_assign_error;
|
|
|
|
}
|
2012-05-17 17:32:39 +04:00
|
|
|
/* Must set vector notifier after guest notifier has been assigned */
|
2022-12-22 10:04:51 +03:00
|
|
|
if ((with_irqfd ||
|
|
|
|
(vdev->use_guest_notifier_mask && k->guest_notifier_mask)) &&
|
|
|
|
assign) {
|
2013-03-13 23:37:08 +04:00
|
|
|
if (with_irqfd) {
|
|
|
|
proxy->vector_irqfd =
|
|
|
|
g_malloc0(sizeof(*proxy->vector_irqfd) *
|
|
|
|
msix_nr_vectors_allocated(&proxy->pci_dev));
|
2022-12-22 10:04:51 +03:00
|
|
|
r = kvm_virtio_pci_vector_vq_use(proxy, nvqs);
|
|
|
|
if (r < 0) {
|
|
|
|
goto config_assign_error;
|
|
|
|
}
|
|
|
|
r = kvm_virtio_pci_vector_config_use(proxy);
|
2013-03-13 23:37:08 +04:00
|
|
|
if (r < 0) {
|
2022-12-22 10:04:51 +03:00
|
|
|
goto config_error;
|
2013-03-13 23:37:08 +04:00
|
|
|
}
|
2012-12-21 02:27:54 +04:00
|
|
|
}
|
2022-12-22 10:04:51 +03:00
|
|
|
|
|
|
|
r = msix_set_vector_notifiers(&proxy->pci_dev, virtio_pci_vector_unmask,
|
2013-03-13 23:37:08 +04:00
|
|
|
virtio_pci_vector_mask,
|
|
|
|
virtio_pci_vector_poll);
|
2012-05-17 17:32:39 +04:00
|
|
|
if (r < 0) {
|
2012-12-21 02:27:54 +04:00
|
|
|
goto notifiers_error;
|
2012-05-17 17:32:39 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-10-06 17:20:17 +04:00
|
|
|
return 0;
|
|
|
|
|
2012-12-21 02:27:54 +04:00
|
|
|
notifiers_error:
|
2013-03-13 23:37:08 +04:00
|
|
|
if (with_irqfd) {
|
|
|
|
assert(assign);
|
2022-12-22 10:04:51 +03:00
|
|
|
kvm_virtio_pci_vector_vq_release(proxy, nvqs);
|
2013-03-13 23:37:08 +04:00
|
|
|
}
|
2022-12-22 10:04:51 +03:00
|
|
|
config_error:
|
|
|
|
if (with_irqfd) {
|
|
|
|
kvm_virtio_pci_vector_config_release(proxy);
|
|
|
|
}
|
|
|
|
config_assign_error:
|
|
|
|
virtio_pci_set_guest_notifier(d, VIRTIO_CONFIG_IRQ_IDX, !assign,
|
|
|
|
with_irqfd);
|
2010-10-06 17:20:17 +04:00
|
|
|
assign_error:
|
|
|
|
/* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */
|
2012-05-17 17:32:39 +04:00
|
|
|
assert(assign);
|
2010-10-06 17:20:17 +04:00
|
|
|
while (--n >= 0) {
|
2012-12-18 16:02:46 +04:00
|
|
|
virtio_pci_set_guest_notifier(d, n, !assign, with_irqfd);
|
2010-10-06 17:20:17 +04:00
|
|
|
}
|
2022-12-27 11:16:04 +03:00
|
|
|
g_free(proxy->vector_irqfd);
|
|
|
|
proxy->vector_irqfd = NULL;
|
2010-10-06 17:20:17 +04:00
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2018-04-12 18:12:30 +03:00
|
|
|
static int virtio_pci_set_host_notifier_mr(DeviceState *d, int n,
|
|
|
|
MemoryRegion *mr, bool assign)
|
|
|
|
{
|
|
|
|
VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
|
|
|
|
int offset;
|
|
|
|
|
|
|
|
if (n >= VIRTIO_QUEUE_MAX || !virtio_pci_modern(proxy) ||
|
|
|
|
virtio_pci_queue_mem_mult(proxy) != memory_region_size(mr)) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (assign) {
|
|
|
|
offset = virtio_pci_queue_mem_mult(proxy) * n;
|
|
|
|
memory_region_add_subregion_overlap(&proxy->notify.mr, offset, mr, 1);
|
|
|
|
} else {
|
|
|
|
memory_region_del_subregion(&proxy->notify.mr, mr);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-12-17 15:01:07 +04:00
|
|
|
static void virtio_pci_vmstate_change(DeviceState *d, bool running)
|
2010-12-17 15:01:50 +03:00
|
|
|
{
|
2012-12-17 15:01:07 +04:00
|
|
|
VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
|
2013-09-20 15:36:40 +04:00
|
|
|
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
|
2010-12-17 15:01:50 +03:00
|
|
|
|
|
|
|
if (running) {
|
2014-10-14 20:40:06 +04:00
|
|
|
/* Old QEMU versions did not set bus master enable on status write.
|
|
|
|
* Detect DRIVER set and enable it.
|
|
|
|
*/
|
|
|
|
if ((proxy->flags & VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION) &&
|
|
|
|
(vdev->status & VIRTIO_CONFIG_S_DRIVER) &&
|
2014-09-29 12:27:32 +04:00
|
|
|
!(proxy->pci_dev.config[PCI_COMMAND] & PCI_COMMAND_MASTER)) {
|
2014-10-14 20:40:06 +04:00
|
|
|
pci_default_write_config(&proxy->pci_dev, PCI_COMMAND,
|
|
|
|
proxy->pci_dev.config[PCI_COMMAND] |
|
|
|
|
PCI_COMMAND_MASTER, 1);
|
2011-03-19 20:28:19 +03:00
|
|
|
}
|
2010-12-17 15:01:50 +03:00
|
|
|
virtio_pci_start_ioeventfd(proxy);
|
2010-03-17 14:08:13 +03:00
|
|
|
} else {
|
2010-12-17 15:01:50 +03:00
|
|
|
virtio_pci_stop_ioeventfd(proxy);
|
2010-03-17 14:08:13 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-01-15 03:08:04 +04:00
|
|
|
/*
|
|
|
|
* virtio-pci: This is the PCIDevice which has a virtio-pci-bus.
|
|
|
|
*/
|
|
|
|
|
2015-04-23 09:21:46 +03:00
|
|
|
static int virtio_pci_query_nvectors(DeviceState *d)
|
|
|
|
{
|
|
|
|
VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
|
|
|
|
|
|
|
|
return proxy->nvectors;
|
|
|
|
}
|
|
|
|
|
2016-12-30 13:09:10 +03:00
|
|
|
static AddressSpace *virtio_pci_get_dma_as(DeviceState *d)
|
|
|
|
{
|
|
|
|
VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
|
|
|
|
PCIDevice *dev = &proxy->pci_dev;
|
|
|
|
|
2017-03-13 06:29:57 +03:00
|
|
|
return pci_get_address_space(dev);
|
2016-12-30 13:09:10 +03:00
|
|
|
}
|
|
|
|
|
2021-08-04 06:48:02 +03:00
|
|
|
static bool virtio_pci_iommu_enabled(DeviceState *d)
|
|
|
|
{
|
|
|
|
VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
|
|
|
|
PCIDevice *dev = &proxy->pci_dev;
|
|
|
|
AddressSpace *dma_as = pci_device_iommu_address_space(dev);
|
|
|
|
|
|
|
|
if (dma_as == &address_space_memory) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-07-01 17:55:28 +03:00
|
|
|
static bool virtio_pci_queue_enabled(DeviceState *d, int n)
|
|
|
|
{
|
|
|
|
VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
|
|
|
|
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
|
|
|
|
|
|
|
|
if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
|
2020-07-27 17:38:07 +03:00
|
|
|
return proxy->vqs[n].enabled;
|
2020-07-01 17:55:28 +03:00
|
|
|
}
|
|
|
|
|
2020-07-27 18:33:19 +03:00
|
|
|
return virtio_queue_enabled_legacy(vdev, n);
|
2020-07-01 17:55:28 +03:00
|
|
|
}
|
|
|
|
|
2015-07-02 15:59:49 +03:00
|
|
|
static int virtio_pci_add_mem_cap(VirtIOPCIProxy *proxy,
|
2015-06-04 13:34:22 +03:00
|
|
|
struct virtio_pci_cap *cap)
|
|
|
|
{
|
|
|
|
PCIDevice *dev = &proxy->pci_dev;
|
|
|
|
int offset;
|
|
|
|
|
2017-06-27 09:16:50 +03:00
|
|
|
offset = pci_add_capability(dev, PCI_CAP_ID_VNDR, 0,
|
|
|
|
cap->cap_len, &error_abort);
|
2015-06-04 13:34:22 +03:00
|
|
|
|
|
|
|
assert(cap->cap_len >= sizeof *cap);
|
|
|
|
memcpy(dev->config + offset + PCI_CAP_FLAGS, &cap->cap_len,
|
|
|
|
cap->cap_len - PCI_CAP_FLAGS);
|
2015-07-02 15:59:49 +03:00
|
|
|
|
|
|
|
return offset;
|
2015-06-04 13:34:22 +03:00
|
|
|
}
|
|
|
|
|
2023-05-04 22:12:42 +03:00
|
|
|
int virtio_pci_add_shm_cap(VirtIOPCIProxy *proxy,
|
|
|
|
uint8_t bar, uint64_t offset, uint64_t length,
|
|
|
|
uint8_t id)
|
|
|
|
{
|
|
|
|
struct virtio_pci_cap64 cap = {
|
|
|
|
.cap.cap_len = sizeof cap,
|
|
|
|
.cap.cfg_type = VIRTIO_PCI_CAP_SHARED_MEMORY_CFG,
|
|
|
|
};
|
|
|
|
|
|
|
|
cap.cap.bar = bar;
|
|
|
|
cap.cap.length = cpu_to_le32(length);
|
|
|
|
cap.length_hi = cpu_to_le32(length >> 32);
|
|
|
|
cap.cap.offset = cpu_to_le32(offset);
|
|
|
|
cap.offset_hi = cpu_to_le32(offset >> 32);
|
|
|
|
cap.cap.id = id;
|
|
|
|
return virtio_pci_add_mem_cap(proxy, &cap.cap);
|
|
|
|
}
|
|
|
|
|
2015-06-04 13:34:22 +03:00
|
|
|
static uint64_t virtio_pci_common_read(void *opaque, hwaddr addr,
|
|
|
|
unsigned size)
|
|
|
|
{
|
|
|
|
VirtIOPCIProxy *proxy = opaque;
|
|
|
|
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
|
|
|
|
uint32_t val = 0;
|
|
|
|
int i;
|
|
|
|
|
2021-06-09 12:58:41 +03:00
|
|
|
if (vdev == NULL) {
|
|
|
|
return UINT64_MAX;
|
|
|
|
}
|
|
|
|
|
2015-06-04 13:34:22 +03:00
|
|
|
switch (addr) {
|
|
|
|
case VIRTIO_PCI_COMMON_DFSELECT:
|
|
|
|
val = proxy->dfselect;
|
|
|
|
break;
|
|
|
|
case VIRTIO_PCI_COMMON_DF:
|
|
|
|
if (proxy->dfselect <= 1) {
|
2016-11-04 13:04:23 +03:00
|
|
|
VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
|
|
|
|
|
|
|
|
val = (vdev->host_features & ~vdc->legacy_features) >>
|
2015-07-22 13:09:25 +03:00
|
|
|
(32 * proxy->dfselect);
|
2015-06-04 13:34:22 +03:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case VIRTIO_PCI_COMMON_GFSELECT:
|
|
|
|
val = proxy->gfselect;
|
|
|
|
break;
|
|
|
|
case VIRTIO_PCI_COMMON_GF:
|
2015-06-23 04:53:04 +03:00
|
|
|
if (proxy->gfselect < ARRAY_SIZE(proxy->guest_features)) {
|
2015-06-04 13:34:22 +03:00
|
|
|
val = proxy->guest_features[proxy->gfselect];
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case VIRTIO_PCI_COMMON_MSIX:
|
|
|
|
val = vdev->config_vector;
|
|
|
|
break;
|
|
|
|
case VIRTIO_PCI_COMMON_NUMQ:
|
|
|
|
for (i = 0; i < VIRTIO_QUEUE_MAX; ++i) {
|
|
|
|
if (virtio_queue_get_num(vdev, i)) {
|
|
|
|
val = i + 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case VIRTIO_PCI_COMMON_STATUS:
|
|
|
|
val = vdev->status;
|
|
|
|
break;
|
|
|
|
case VIRTIO_PCI_COMMON_CFGGENERATION:
|
2015-06-04 13:34:23 +03:00
|
|
|
val = vdev->generation;
|
2015-06-04 13:34:22 +03:00
|
|
|
break;
|
|
|
|
case VIRTIO_PCI_COMMON_Q_SELECT:
|
|
|
|
val = vdev->queue_sel;
|
|
|
|
break;
|
|
|
|
case VIRTIO_PCI_COMMON_Q_SIZE:
|
|
|
|
val = virtio_queue_get_num(vdev, vdev->queue_sel);
|
|
|
|
break;
|
|
|
|
case VIRTIO_PCI_COMMON_Q_MSIX:
|
|
|
|
val = virtio_queue_vector(vdev, vdev->queue_sel);
|
|
|
|
break;
|
|
|
|
case VIRTIO_PCI_COMMON_Q_ENABLE:
|
|
|
|
val = proxy->vqs[vdev->queue_sel].enabled;
|
|
|
|
break;
|
|
|
|
case VIRTIO_PCI_COMMON_Q_NOFF:
|
|
|
|
/* Simply map queues in order */
|
|
|
|
val = vdev->queue_sel;
|
|
|
|
break;
|
|
|
|
case VIRTIO_PCI_COMMON_Q_DESCLO:
|
|
|
|
val = proxy->vqs[vdev->queue_sel].desc[0];
|
|
|
|
break;
|
|
|
|
case VIRTIO_PCI_COMMON_Q_DESCHI:
|
|
|
|
val = proxy->vqs[vdev->queue_sel].desc[1];
|
|
|
|
break;
|
|
|
|
case VIRTIO_PCI_COMMON_Q_AVAILLO:
|
|
|
|
val = proxy->vqs[vdev->queue_sel].avail[0];
|
|
|
|
break;
|
|
|
|
case VIRTIO_PCI_COMMON_Q_AVAILHI:
|
|
|
|
val = proxy->vqs[vdev->queue_sel].avail[1];
|
|
|
|
break;
|
|
|
|
case VIRTIO_PCI_COMMON_Q_USEDLO:
|
|
|
|
val = proxy->vqs[vdev->queue_sel].used[0];
|
|
|
|
break;
|
|
|
|
case VIRTIO_PCI_COMMON_Q_USEDHI:
|
|
|
|
val = proxy->vqs[vdev->queue_sel].used[1];
|
|
|
|
break;
|
2022-10-17 12:25:48 +03:00
|
|
|
case VIRTIO_PCI_COMMON_Q_RESET:
|
|
|
|
val = proxy->vqs[vdev->queue_sel].reset;
|
|
|
|
break;
|
2015-06-04 13:34:22 +03:00
|
|
|
default:
|
|
|
|
val = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return val;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void virtio_pci_common_write(void *opaque, hwaddr addr,
|
|
|
|
uint64_t val, unsigned size)
|
|
|
|
{
|
|
|
|
VirtIOPCIProxy *proxy = opaque;
|
|
|
|
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
|
2022-08-29 11:35:24 +03:00
|
|
|
uint16_t vector;
|
2015-06-04 13:34:22 +03:00
|
|
|
|
2021-06-09 12:58:41 +03:00
|
|
|
if (vdev == NULL) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2015-06-04 13:34:22 +03:00
|
|
|
switch (addr) {
|
|
|
|
case VIRTIO_PCI_COMMON_DFSELECT:
|
|
|
|
proxy->dfselect = val;
|
|
|
|
break;
|
|
|
|
case VIRTIO_PCI_COMMON_GFSELECT:
|
|
|
|
proxy->gfselect = val;
|
|
|
|
break;
|
|
|
|
case VIRTIO_PCI_COMMON_GF:
|
2015-06-23 04:53:04 +03:00
|
|
|
if (proxy->gfselect < ARRAY_SIZE(proxy->guest_features)) {
|
2015-06-04 13:34:22 +03:00
|
|
|
proxy->guest_features[proxy->gfselect] = val;
|
|
|
|
virtio_set_features(vdev,
|
|
|
|
(((uint64_t)proxy->guest_features[1]) << 32) |
|
|
|
|
proxy->guest_features[0]);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case VIRTIO_PCI_COMMON_MSIX:
|
2022-08-29 11:35:24 +03:00
|
|
|
if (vdev->config_vector != VIRTIO_NO_VECTOR) {
|
|
|
|
msix_vector_unuse(&proxy->pci_dev, vdev->config_vector);
|
|
|
|
}
|
2015-06-04 13:34:22 +03:00
|
|
|
/* Make it possible for guest to discover an error took place. */
|
2022-08-29 11:35:24 +03:00
|
|
|
if (val < proxy->nvectors) {
|
|
|
|
msix_vector_use(&proxy->pci_dev, val);
|
|
|
|
} else {
|
2015-06-04 13:34:22 +03:00
|
|
|
val = VIRTIO_NO_VECTOR;
|
|
|
|
}
|
|
|
|
vdev->config_vector = val;
|
|
|
|
break;
|
|
|
|
case VIRTIO_PCI_COMMON_STATUS:
|
|
|
|
if (!(val & VIRTIO_CONFIG_S_DRIVER_OK)) {
|
|
|
|
virtio_pci_stop_ioeventfd(proxy);
|
|
|
|
}
|
|
|
|
|
|
|
|
virtio_set_status(vdev, val & 0xFF);
|
|
|
|
|
|
|
|
if (val & VIRTIO_CONFIG_S_DRIVER_OK) {
|
|
|
|
virtio_pci_start_ioeventfd(proxy);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (vdev->status == 0) {
|
2016-01-28 18:08:07 +03:00
|
|
|
virtio_pci_reset(DEVICE(proxy));
|
2015-06-04 13:34:22 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
break;
|
|
|
|
case VIRTIO_PCI_COMMON_Q_SELECT:
|
|
|
|
if (val < VIRTIO_QUEUE_MAX) {
|
|
|
|
vdev->queue_sel = val;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case VIRTIO_PCI_COMMON_Q_SIZE:
|
|
|
|
proxy->vqs[vdev->queue_sel].num = val;
|
2019-12-13 17:22:48 +03:00
|
|
|
virtio_queue_set_num(vdev, vdev->queue_sel,
|
|
|
|
proxy->vqs[vdev->queue_sel].num);
|
2023-03-17 03:27:51 +03:00
|
|
|
virtio_init_region_cache(vdev, vdev->queue_sel);
|
2015-06-04 13:34:22 +03:00
|
|
|
break;
|
|
|
|
case VIRTIO_PCI_COMMON_Q_MSIX:
|
2022-08-29 11:35:24 +03:00
|
|
|
vector = virtio_queue_vector(vdev, vdev->queue_sel);
|
|
|
|
if (vector != VIRTIO_NO_VECTOR) {
|
|
|
|
msix_vector_unuse(&proxy->pci_dev, vector);
|
|
|
|
}
|
2015-06-04 13:34:22 +03:00
|
|
|
/* Make it possible for guest to discover an error took place. */
|
2022-08-29 11:35:24 +03:00
|
|
|
if (val < proxy->nvectors) {
|
|
|
|
msix_vector_use(&proxy->pci_dev, val);
|
|
|
|
} else {
|
2015-06-04 13:34:22 +03:00
|
|
|
val = VIRTIO_NO_VECTOR;
|
|
|
|
}
|
|
|
|
virtio_queue_set_vector(vdev, vdev->queue_sel, val);
|
|
|
|
break;
|
|
|
|
case VIRTIO_PCI_COMMON_Q_ENABLE:
|
2020-06-10 08:43:51 +03:00
|
|
|
if (val == 1) {
|
|
|
|
virtio_queue_set_num(vdev, vdev->queue_sel,
|
|
|
|
proxy->vqs[vdev->queue_sel].num);
|
|
|
|
virtio_queue_set_rings(vdev, vdev->queue_sel,
|
2015-06-04 13:34:22 +03:00
|
|
|
((uint64_t)proxy->vqs[vdev->queue_sel].desc[1]) << 32 |
|
|
|
|
proxy->vqs[vdev->queue_sel].desc[0],
|
|
|
|
((uint64_t)proxy->vqs[vdev->queue_sel].avail[1]) << 32 |
|
|
|
|
proxy->vqs[vdev->queue_sel].avail[0],
|
|
|
|
((uint64_t)proxy->vqs[vdev->queue_sel].used[1]) << 32 |
|
|
|
|
proxy->vqs[vdev->queue_sel].used[0]);
|
2020-06-10 08:43:51 +03:00
|
|
|
proxy->vqs[vdev->queue_sel].enabled = 1;
|
2022-10-17 12:25:48 +03:00
|
|
|
proxy->vqs[vdev->queue_sel].reset = 0;
|
2022-10-17 12:25:49 +03:00
|
|
|
virtio_queue_enable(vdev, vdev->queue_sel);
|
2020-06-10 08:43:51 +03:00
|
|
|
} else {
|
|
|
|
virtio_error(vdev, "wrong value for queue_enable %"PRIx64, val);
|
|
|
|
}
|
2015-06-04 13:34:22 +03:00
|
|
|
break;
|
|
|
|
case VIRTIO_PCI_COMMON_Q_DESCLO:
|
|
|
|
proxy->vqs[vdev->queue_sel].desc[0] = val;
|
|
|
|
break;
|
|
|
|
case VIRTIO_PCI_COMMON_Q_DESCHI:
|
|
|
|
proxy->vqs[vdev->queue_sel].desc[1] = val;
|
|
|
|
break;
|
|
|
|
case VIRTIO_PCI_COMMON_Q_AVAILLO:
|
|
|
|
proxy->vqs[vdev->queue_sel].avail[0] = val;
|
|
|
|
break;
|
|
|
|
case VIRTIO_PCI_COMMON_Q_AVAILHI:
|
|
|
|
proxy->vqs[vdev->queue_sel].avail[1] = val;
|
|
|
|
break;
|
|
|
|
case VIRTIO_PCI_COMMON_Q_USEDLO:
|
|
|
|
proxy->vqs[vdev->queue_sel].used[0] = val;
|
|
|
|
break;
|
|
|
|
case VIRTIO_PCI_COMMON_Q_USEDHI:
|
|
|
|
proxy->vqs[vdev->queue_sel].used[1] = val;
|
|
|
|
break;
|
2022-10-17 12:25:48 +03:00
|
|
|
case VIRTIO_PCI_COMMON_Q_RESET:
|
|
|
|
if (val == 1) {
|
|
|
|
proxy->vqs[vdev->queue_sel].reset = 1;
|
|
|
|
|
|
|
|
virtio_queue_reset(vdev, vdev->queue_sel);
|
|
|
|
|
|
|
|
proxy->vqs[vdev->queue_sel].reset = 0;
|
|
|
|
proxy->vqs[vdev->queue_sel].enabled = 0;
|
|
|
|
}
|
|
|
|
break;
|
2015-06-04 13:34:22 +03:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static uint64_t virtio_pci_notify_read(void *opaque, hwaddr addr,
|
|
|
|
unsigned size)
|
|
|
|
{
|
2021-06-09 12:58:43 +03:00
|
|
|
VirtIOPCIProxy *proxy = opaque;
|
|
|
|
if (virtio_bus_get_device(&proxy->bus) == NULL) {
|
|
|
|
return UINT64_MAX;
|
|
|
|
}
|
|
|
|
|
2015-06-04 13:34:22 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void virtio_pci_notify_write(void *opaque, hwaddr addr,
|
|
|
|
uint64_t val, unsigned size)
|
|
|
|
{
|
2020-07-06 14:21:23 +03:00
|
|
|
VirtIOPCIProxy *proxy = opaque;
|
|
|
|
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
|
|
|
|
|
2016-09-07 18:02:25 +03:00
|
|
|
unsigned queue = addr / virtio_pci_queue_mem_mult(proxy);
|
2015-06-04 13:34:22 +03:00
|
|
|
|
2020-07-06 14:21:23 +03:00
|
|
|
if (vdev != NULL && queue < VIRTIO_QUEUE_MAX) {
|
2022-03-21 18:30:26 +03:00
|
|
|
trace_virtio_pci_notify_write(addr, val, size);
|
2015-06-04 13:34:22 +03:00
|
|
|
virtio_queue_notify(vdev, queue);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-11-06 11:02:48 +03:00
|
|
|
static void virtio_pci_notify_write_pio(void *opaque, hwaddr addr,
|
|
|
|
uint64_t val, unsigned size)
|
|
|
|
{
|
2020-07-06 14:21:23 +03:00
|
|
|
VirtIOPCIProxy *proxy = opaque;
|
|
|
|
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
|
|
|
|
|
2015-11-06 11:02:48 +03:00
|
|
|
unsigned queue = val;
|
|
|
|
|
2020-07-06 14:21:23 +03:00
|
|
|
if (vdev != NULL && queue < VIRTIO_QUEUE_MAX) {
|
2022-03-21 18:30:26 +03:00
|
|
|
trace_virtio_pci_notify_write_pio(addr, val, size);
|
2015-11-06 11:02:48 +03:00
|
|
|
virtio_queue_notify(vdev, queue);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-04 13:34:22 +03:00
|
|
|
static uint64_t virtio_pci_isr_read(void *opaque, hwaddr addr,
|
|
|
|
unsigned size)
|
|
|
|
{
|
|
|
|
VirtIOPCIProxy *proxy = opaque;
|
|
|
|
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
|
2021-03-15 14:59:36 +03:00
|
|
|
uint64_t val;
|
|
|
|
|
|
|
|
if (vdev == NULL) {
|
2021-06-09 12:58:43 +03:00
|
|
|
return UINT64_MAX;
|
2021-03-15 14:59:36 +03:00
|
|
|
}
|
2015-06-04 13:34:22 +03:00
|
|
|
|
2021-03-15 14:59:36 +03:00
|
|
|
val = qatomic_xchg(&vdev->isr, 0);
|
|
|
|
pci_irq_deassert(&proxy->pci_dev);
|
2015-06-04 13:34:22 +03:00
|
|
|
return val;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void virtio_pci_isr_write(void *opaque, hwaddr addr,
|
|
|
|
uint64_t val, unsigned size)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint64_t virtio_pci_device_read(void *opaque, hwaddr addr,
|
|
|
|
unsigned size)
|
|
|
|
{
|
2020-07-06 14:21:23 +03:00
|
|
|
VirtIOPCIProxy *proxy = opaque;
|
|
|
|
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
|
2021-03-15 14:59:37 +03:00
|
|
|
uint64_t val;
|
2015-06-04 13:34:22 +03:00
|
|
|
|
2020-07-06 14:21:23 +03:00
|
|
|
if (vdev == NULL) {
|
2021-06-09 12:58:43 +03:00
|
|
|
return UINT64_MAX;
|
2020-07-06 14:21:23 +03:00
|
|
|
}
|
|
|
|
|
2015-06-04 13:34:22 +03:00
|
|
|
switch (size) {
|
|
|
|
case 1:
|
2015-06-04 13:34:25 +03:00
|
|
|
val = virtio_config_modern_readb(vdev, addr);
|
2015-06-04 13:34:22 +03:00
|
|
|
break;
|
|
|
|
case 2:
|
2015-06-04 13:34:25 +03:00
|
|
|
val = virtio_config_modern_readw(vdev, addr);
|
2015-06-04 13:34:22 +03:00
|
|
|
break;
|
|
|
|
case 4:
|
2015-06-04 13:34:25 +03:00
|
|
|
val = virtio_config_modern_readl(vdev, addr);
|
2015-06-04 13:34:22 +03:00
|
|
|
break;
|
2021-03-15 14:59:37 +03:00
|
|
|
default:
|
|
|
|
val = 0;
|
|
|
|
break;
|
2015-06-04 13:34:22 +03:00
|
|
|
}
|
|
|
|
return val;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void virtio_pci_device_write(void *opaque, hwaddr addr,
|
|
|
|
uint64_t val, unsigned size)
|
|
|
|
{
|
2020-07-06 14:21:23 +03:00
|
|
|
VirtIOPCIProxy *proxy = opaque;
|
|
|
|
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
|
|
|
|
|
|
|
|
if (vdev == NULL) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2015-06-04 13:34:22 +03:00
|
|
|
switch (size) {
|
|
|
|
case 1:
|
2015-06-04 13:34:25 +03:00
|
|
|
virtio_config_modern_writeb(vdev, addr, val);
|
2015-06-04 13:34:22 +03:00
|
|
|
break;
|
|
|
|
case 2:
|
2015-06-04 13:34:25 +03:00
|
|
|
virtio_config_modern_writew(vdev, addr, val);
|
2015-06-04 13:34:22 +03:00
|
|
|
break;
|
|
|
|
case 4:
|
2015-06-04 13:34:25 +03:00
|
|
|
virtio_config_modern_writel(vdev, addr, val);
|
2015-06-04 13:34:22 +03:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-02-13 16:03:03 +03:00
|
|
|
static void virtio_pci_modern_regions_init(VirtIOPCIProxy *proxy,
|
|
|
|
const char *vdev_name)
|
2015-06-04 13:34:34 +03:00
|
|
|
{
|
|
|
|
static const MemoryRegionOps common_ops = {
|
|
|
|
.read = virtio_pci_common_read,
|
|
|
|
.write = virtio_pci_common_write,
|
|
|
|
.impl = {
|
|
|
|
.min_access_size = 1,
|
|
|
|
.max_access_size = 4,
|
|
|
|
},
|
|
|
|
.endianness = DEVICE_LITTLE_ENDIAN,
|
|
|
|
};
|
|
|
|
static const MemoryRegionOps isr_ops = {
|
|
|
|
.read = virtio_pci_isr_read,
|
|
|
|
.write = virtio_pci_isr_write,
|
|
|
|
.impl = {
|
|
|
|
.min_access_size = 1,
|
|
|
|
.max_access_size = 4,
|
|
|
|
},
|
|
|
|
.endianness = DEVICE_LITTLE_ENDIAN,
|
|
|
|
};
|
|
|
|
static const MemoryRegionOps device_ops = {
|
|
|
|
.read = virtio_pci_device_read,
|
|
|
|
.write = virtio_pci_device_write,
|
|
|
|
.impl = {
|
|
|
|
.min_access_size = 1,
|
|
|
|
.max_access_size = 4,
|
|
|
|
},
|
|
|
|
.endianness = DEVICE_LITTLE_ENDIAN,
|
|
|
|
};
|
|
|
|
static const MemoryRegionOps notify_ops = {
|
|
|
|
.read = virtio_pci_notify_read,
|
|
|
|
.write = virtio_pci_notify_write,
|
|
|
|
.impl = {
|
|
|
|
.min_access_size = 1,
|
|
|
|
.max_access_size = 4,
|
|
|
|
},
|
|
|
|
.endianness = DEVICE_LITTLE_ENDIAN,
|
|
|
|
};
|
2015-11-06 11:02:48 +03:00
|
|
|
static const MemoryRegionOps notify_pio_ops = {
|
|
|
|
.read = virtio_pci_notify_read,
|
|
|
|
.write = virtio_pci_notify_write_pio,
|
|
|
|
.impl = {
|
|
|
|
.min_access_size = 1,
|
|
|
|
.max_access_size = 4,
|
|
|
|
},
|
|
|
|
.endianness = DEVICE_LITTLE_ENDIAN,
|
|
|
|
};
|
2021-02-13 16:03:03 +03:00
|
|
|
g_autoptr(GString) name = g_string_new(NULL);
|
2015-11-06 11:02:48 +03:00
|
|
|
|
2021-02-13 16:03:03 +03:00
|
|
|
g_string_printf(name, "virtio-pci-common-%s", vdev_name);
|
2015-06-04 13:34:34 +03:00
|
|
|
memory_region_init_io(&proxy->common.mr, OBJECT(proxy),
|
|
|
|
&common_ops,
|
|
|
|
proxy,
|
2021-02-13 16:03:03 +03:00
|
|
|
name->str,
|
2015-06-04 13:34:39 +03:00
|
|
|
proxy->common.size);
|
2015-06-04 13:34:35 +03:00
|
|
|
|
2021-02-13 16:03:03 +03:00
|
|
|
g_string_printf(name, "virtio-pci-isr-%s", vdev_name);
|
2015-06-04 13:34:34 +03:00
|
|
|
memory_region_init_io(&proxy->isr.mr, OBJECT(proxy),
|
|
|
|
&isr_ops,
|
|
|
|
proxy,
|
2021-02-13 16:03:03 +03:00
|
|
|
name->str,
|
2015-06-04 13:34:39 +03:00
|
|
|
proxy->isr.size);
|
2015-06-04 13:34:35 +03:00
|
|
|
|
2021-02-13 16:03:03 +03:00
|
|
|
g_string_printf(name, "virtio-pci-device-%s", vdev_name);
|
2015-06-04 13:34:34 +03:00
|
|
|
memory_region_init_io(&proxy->device.mr, OBJECT(proxy),
|
|
|
|
&device_ops,
|
2020-07-06 14:21:23 +03:00
|
|
|
proxy,
|
2021-02-13 16:03:03 +03:00
|
|
|
name->str,
|
2015-06-04 13:34:39 +03:00
|
|
|
proxy->device.size);
|
2015-06-04 13:34:35 +03:00
|
|
|
|
2021-02-13 16:03:03 +03:00
|
|
|
g_string_printf(name, "virtio-pci-notify-%s", vdev_name);
|
2015-06-04 13:34:34 +03:00
|
|
|
memory_region_init_io(&proxy->notify.mr, OBJECT(proxy),
|
|
|
|
¬ify_ops,
|
2020-07-06 14:21:23 +03:00
|
|
|
proxy,
|
2021-02-13 16:03:03 +03:00
|
|
|
name->str,
|
2015-06-04 13:34:39 +03:00
|
|
|
proxy->notify.size);
|
2015-11-06 11:02:48 +03:00
|
|
|
|
2021-02-13 16:03:03 +03:00
|
|
|
g_string_printf(name, "virtio-pci-notify-pio-%s", vdev_name);
|
2015-11-06 11:02:48 +03:00
|
|
|
memory_region_init_io(&proxy->notify_pio.mr, OBJECT(proxy),
|
|
|
|
¬ify_pio_ops,
|
2020-07-06 14:21:23 +03:00
|
|
|
proxy,
|
2021-02-13 16:03:03 +03:00
|
|
|
name->str,
|
2016-09-06 13:36:51 +03:00
|
|
|
proxy->notify_pio.size);
|
2015-06-04 13:34:35 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static void virtio_pci_modern_region_map(VirtIOPCIProxy *proxy,
|
2015-06-04 13:34:36 +03:00
|
|
|
VirtIOPCIRegion *region,
|
2015-11-06 11:02:48 +03:00
|
|
|
struct virtio_pci_cap *cap,
|
|
|
|
MemoryRegion *mr,
|
|
|
|
uint8_t bar)
|
2015-06-04 13:34:35 +03:00
|
|
|
{
|
2015-11-06 11:02:48 +03:00
|
|
|
memory_region_add_subregion(mr, region->offset, ®ion->mr);
|
2015-06-04 13:34:36 +03:00
|
|
|
|
2015-06-04 13:34:37 +03:00
|
|
|
cap->cfg_type = region->type;
|
2015-11-06 11:02:48 +03:00
|
|
|
cap->bar = bar;
|
2015-06-04 13:34:36 +03:00
|
|
|
cap->offset = cpu_to_le32(region->offset);
|
2015-06-04 13:34:39 +03:00
|
|
|
cap->length = cpu_to_le32(region->size);
|
2015-06-04 13:34:36 +03:00
|
|
|
virtio_pci_add_mem_cap(proxy, cap);
|
2015-11-06 11:02:48 +03:00
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
static void virtio_pci_modern_mem_region_map(VirtIOPCIProxy *proxy,
|
|
|
|
VirtIOPCIRegion *region,
|
|
|
|
struct virtio_pci_cap *cap)
|
|
|
|
{
|
|
|
|
virtio_pci_modern_region_map(proxy, region, cap,
|
2016-09-29 07:04:40 +03:00
|
|
|
&proxy->modern_bar, proxy->modern_mem_bar_idx);
|
2015-06-04 13:34:34 +03:00
|
|
|
}
|
2015-06-04 13:34:22 +03:00
|
|
|
|
2015-11-06 11:02:48 +03:00
|
|
|
static void virtio_pci_modern_io_region_map(VirtIOPCIProxy *proxy,
|
|
|
|
VirtIOPCIRegion *region,
|
|
|
|
struct virtio_pci_cap *cap)
|
|
|
|
{
|
|
|
|
virtio_pci_modern_region_map(proxy, region, cap,
|
2016-09-29 07:04:40 +03:00
|
|
|
&proxy->io_bar, proxy->modern_io_bar_idx);
|
2015-11-06 11:02:48 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static void virtio_pci_modern_mem_region_unmap(VirtIOPCIProxy *proxy,
|
|
|
|
VirtIOPCIRegion *region)
|
2015-07-27 11:06:17 +03:00
|
|
|
{
|
|
|
|
memory_region_del_subregion(&proxy->modern_bar,
|
|
|
|
®ion->mr);
|
|
|
|
}
|
|
|
|
|
2015-11-06 11:02:48 +03:00
|
|
|
static void virtio_pci_modern_io_region_unmap(VirtIOPCIProxy *proxy,
|
|
|
|
VirtIOPCIRegion *region)
|
|
|
|
{
|
|
|
|
memory_region_del_subregion(&proxy->io_bar,
|
|
|
|
®ion->mr);
|
|
|
|
}
|
|
|
|
|
2016-09-13 16:30:30 +03:00
|
|
|
static void virtio_pci_pre_plugged(DeviceState *d, Error **errp)
|
|
|
|
{
|
|
|
|
VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
|
|
|
|
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
|
|
|
|
|
|
|
|
if (virtio_pci_modern(proxy)) {
|
|
|
|
virtio_add_feature(&vdev->host_features, VIRTIO_F_VERSION_1);
|
|
|
|
}
|
|
|
|
|
|
|
|
virtio_add_feature(&vdev->host_features, VIRTIO_F_BAD_FEATURE);
|
|
|
|
}
|
|
|
|
|
2013-01-15 03:08:04 +04:00
|
|
|
/* This is called by virtio-bus just after the device is plugged. */
|
2015-05-29 09:15:25 +03:00
|
|
|
static void virtio_pci_device_plugged(DeviceState *d, Error **errp)
|
2013-01-15 03:08:04 +04:00
|
|
|
{
|
|
|
|
VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
|
|
|
|
VirtioBusState *bus = &proxy->bus;
|
2016-07-20 18:28:21 +03:00
|
|
|
bool legacy = virtio_pci_legacy(proxy);
|
2016-09-13 16:30:30 +03:00
|
|
|
bool modern;
|
2015-11-06 11:02:48 +03:00
|
|
|
bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY;
|
2013-01-15 03:08:04 +04:00
|
|
|
uint8_t *config;
|
|
|
|
uint32_t size;
|
2015-05-26 17:34:47 +03:00
|
|
|
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
|
2013-01-15 03:08:04 +04:00
|
|
|
|
2016-09-13 16:30:30 +03:00
|
|
|
/*
|
|
|
|
* Virtio capabilities present without
|
|
|
|
* VIRTIO_F_VERSION_1 confuses guests
|
|
|
|
*/
|
2016-12-14 19:30:35 +03:00
|
|
|
if (!proxy->ignore_backend_features &&
|
|
|
|
!virtio_has_feature(vdev->host_features, VIRTIO_F_VERSION_1)) {
|
2016-09-13 16:30:30 +03:00
|
|
|
virtio_pci_disable_modern(proxy);
|
|
|
|
|
|
|
|
if (!legacy) {
|
|
|
|
error_setg(errp, "Device doesn't support modern mode, and legacy"
|
|
|
|
" mode is disabled");
|
|
|
|
error_append_hint(errp, "Set disable-legacy to off\n");
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
modern = virtio_pci_modern(proxy);
|
|
|
|
|
2013-01-15 03:08:04 +04:00
|
|
|
config = proxy->pci_dev.config;
|
|
|
|
if (proxy->class_code) {
|
|
|
|
pci_config_set_class(config, proxy->class_code);
|
|
|
|
}
|
2015-06-04 13:34:26 +03:00
|
|
|
|
|
|
|
if (legacy) {
|
2020-07-07 13:54:46 +03:00
|
|
|
if (!virtio_legacy_allowed(vdev)) {
|
2020-09-21 15:25:03 +03:00
|
|
|
/*
|
|
|
|
* To avoid migration issues, we allow legacy mode when legacy
|
|
|
|
* check is disabled in the old machine types (< 5.1).
|
|
|
|
*/
|
|
|
|
if (virtio_legacy_check_disabled(vdev)) {
|
|
|
|
warn_report("device is modern-only, but for backward "
|
|
|
|
"compatibility legacy is allowed");
|
|
|
|
} else {
|
|
|
|
error_setg(errp,
|
|
|
|
"device is modern-only, use disable-legacy=on");
|
|
|
|
return;
|
|
|
|
}
|
2020-07-07 13:54:46 +03:00
|
|
|
}
|
2016-12-30 13:09:10 +03:00
|
|
|
if (virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM)) {
|
|
|
|
error_setg(errp, "VIRTIO_F_IOMMU_PLATFORM was supported by"
|
2018-02-12 17:18:08 +03:00
|
|
|
" neither legacy nor transitional device");
|
2022-10-24 10:28:02 +03:00
|
|
|
return;
|
2016-12-30 13:09:10 +03:00
|
|
|
}
|
2017-11-13 11:45:58 +03:00
|
|
|
/*
|
|
|
|
* Legacy and transitional devices use specific subsystem IDs.
|
|
|
|
* Note that the subsystem vendor ID (config + PCI_SUBSYSTEM_VENDOR_ID)
|
|
|
|
* is set to PCI_SUBVENDOR_ID_REDHAT_QUMRANET by default.
|
|
|
|
*/
|
2015-06-04 13:34:26 +03:00
|
|
|
pci_set_word(config + PCI_SUBSYSTEM_ID, virtio_bus_get_vdev_id(bus));
|
2022-12-15 16:49:40 +03:00
|
|
|
if (proxy->trans_devid) {
|
|
|
|
pci_config_set_device_id(config, proxy->trans_devid);
|
|
|
|
}
|
2015-06-04 13:34:26 +03:00
|
|
|
} else {
|
|
|
|
/* pure virtio-1.0 */
|
|
|
|
pci_set_word(config + PCI_VENDOR_ID,
|
|
|
|
PCI_VENDOR_ID_REDHAT_QUMRANET);
|
|
|
|
pci_set_word(config + PCI_DEVICE_ID,
|
2022-10-04 14:21:00 +03:00
|
|
|
PCI_DEVICE_ID_VIRTIO_10_BASE + virtio_bus_get_vdev_id(bus));
|
2015-06-04 13:34:26 +03:00
|
|
|
pci_config_set_revision(config, 1);
|
|
|
|
}
|
2013-01-15 03:08:04 +04:00
|
|
|
config[PCI_INTERRUPT_PIN] = 1;
|
|
|
|
|
2015-06-04 13:34:22 +03:00
|
|
|
|
2015-06-04 13:34:26 +03:00
|
|
|
if (modern) {
|
2015-06-04 13:34:38 +03:00
|
|
|
struct virtio_pci_cap cap = {
|
|
|
|
.cap_len = sizeof cap,
|
2015-06-04 13:34:22 +03:00
|
|
|
};
|
|
|
|
struct virtio_pci_notify_cap notify = {
|
|
|
|
.cap.cap_len = sizeof notify,
|
|
|
|
.notify_off_multiplier =
|
2016-09-07 18:02:25 +03:00
|
|
|
cpu_to_le32(virtio_pci_queue_mem_mult(proxy)),
|
2015-06-04 13:34:22 +03:00
|
|
|
};
|
2015-07-02 15:59:49 +03:00
|
|
|
struct virtio_pci_cfg_cap cfg = {
|
|
|
|
.cap.cap_len = sizeof cfg,
|
|
|
|
.cap.cfg_type = VIRTIO_PCI_CAP_PCI_CFG,
|
|
|
|
};
|
2015-11-06 11:02:48 +03:00
|
|
|
struct virtio_pci_notify_cap notify_pio = {
|
|
|
|
.cap.cap_len = sizeof notify,
|
|
|
|
.notify_off_multiplier = cpu_to_le32(0x0),
|
|
|
|
};
|
2015-06-04 13:34:22 +03:00
|
|
|
|
2015-11-06 11:02:48 +03:00
|
|
|
struct virtio_pci_cfg_cap *cfg_mask;
|
2015-06-04 13:34:22 +03:00
|
|
|
|
2021-02-13 16:03:03 +03:00
|
|
|
virtio_pci_modern_regions_init(proxy, vdev->name);
|
2015-11-06 11:02:48 +03:00
|
|
|
|
|
|
|
virtio_pci_modern_mem_region_map(proxy, &proxy->common, &cap);
|
|
|
|
virtio_pci_modern_mem_region_map(proxy, &proxy->isr, &cap);
|
|
|
|
virtio_pci_modern_mem_region_map(proxy, &proxy->device, &cap);
|
|
|
|
virtio_pci_modern_mem_region_map(proxy, &proxy->notify, ¬ify.cap);
|
|
|
|
|
|
|
|
if (modern_pio) {
|
|
|
|
memory_region_init(&proxy->io_bar, OBJECT(proxy),
|
|
|
|
"virtio-pci-io", 0x4);
|
|
|
|
|
2016-09-29 07:04:40 +03:00
|
|
|
pci_register_bar(&proxy->pci_dev, proxy->modern_io_bar_idx,
|
2015-11-06 11:02:48 +03:00
|
|
|
PCI_BASE_ADDRESS_SPACE_IO, &proxy->io_bar);
|
|
|
|
|
|
|
|
virtio_pci_modern_io_region_map(proxy, &proxy->notify_pio,
|
|
|
|
¬ify_pio.cap);
|
|
|
|
}
|
2015-07-02 15:59:49 +03:00
|
|
|
|
2016-09-29 07:04:40 +03:00
|
|
|
pci_register_bar(&proxy->pci_dev, proxy->modern_mem_bar_idx,
|
2015-06-04 13:34:29 +03:00
|
|
|
PCI_BASE_ADDRESS_SPACE_MEMORY |
|
|
|
|
PCI_BASE_ADDRESS_MEM_PREFETCH |
|
|
|
|
PCI_BASE_ADDRESS_MEM_TYPE_64,
|
2015-06-04 13:34:22 +03:00
|
|
|
&proxy->modern_bar);
|
2015-07-02 15:59:49 +03:00
|
|
|
|
|
|
|
proxy->config_cap = virtio_pci_add_mem_cap(proxy, &cfg.cap);
|
|
|
|
cfg_mask = (void *)(proxy->pci_dev.wmask + proxy->config_cap);
|
|
|
|
pci_set_byte(&cfg_mask->cap.bar, ~0x0);
|
|
|
|
pci_set_long((uint8_t *)&cfg_mask->cap.offset, ~0x0);
|
|
|
|
pci_set_long((uint8_t *)&cfg_mask->cap.length, ~0x0);
|
|
|
|
pci_set_long(cfg_mask->pci_cfg_data, ~0x0);
|
2015-06-04 13:34:22 +03:00
|
|
|
}
|
|
|
|
|
2015-05-19 23:29:51 +03:00
|
|
|
if (proxy->nvectors) {
|
|
|
|
int err = msix_init_exclusive_bar(&proxy->pci_dev, proxy->nvectors,
|
2017-01-17 09:18:48 +03:00
|
|
|
proxy->msix_bar_idx, NULL);
|
2015-05-19 23:29:51 +03:00
|
|
|
if (err) {
|
2017-01-17 09:18:48 +03:00
|
|
|
/* Notice when a system that supports MSIx can't initialize it */
|
2015-05-19 23:29:51 +03:00
|
|
|
if (err != -ENOTSUP) {
|
2018-10-17 11:26:28 +03:00
|
|
|
warn_report("unable to init msix vectors to %" PRIu32,
|
|
|
|
proxy->nvectors);
|
2015-05-19 23:29:51 +03:00
|
|
|
}
|
|
|
|
proxy->nvectors = 0;
|
|
|
|
}
|
2013-01-15 03:08:04 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
proxy->pci_dev.config_write = virtio_write_config;
|
2015-07-02 15:59:49 +03:00
|
|
|
proxy->pci_dev.config_read = virtio_read_config;
|
2013-01-15 03:08:04 +04:00
|
|
|
|
2015-06-04 13:34:26 +03:00
|
|
|
if (legacy) {
|
|
|
|
size = VIRTIO_PCI_REGION_SIZE(&proxy->pci_dev)
|
|
|
|
+ virtio_bus_get_vdev_config_len(bus);
|
2015-07-24 15:33:08 +03:00
|
|
|
size = pow2ceil(size);
|
2013-01-15 03:08:04 +04:00
|
|
|
|
2015-06-04 13:34:26 +03:00
|
|
|
memory_region_init_io(&proxy->bar, OBJECT(proxy),
|
|
|
|
&virtio_pci_config_ops,
|
|
|
|
proxy, "virtio-pci", size);
|
2015-06-04 13:34:22 +03:00
|
|
|
|
2016-09-29 07:04:40 +03:00
|
|
|
pci_register_bar(&proxy->pci_dev, proxy->legacy_io_bar_idx,
|
2015-06-04 13:34:28 +03:00
|
|
|
PCI_BASE_ADDRESS_SPACE_IO, &proxy->bar);
|
2015-06-04 13:34:26 +03:00
|
|
|
}
|
2013-01-15 03:08:04 +04:00
|
|
|
}
|
|
|
|
|
2013-09-20 16:10:26 +04:00
|
|
|
static void virtio_pci_device_unplugged(DeviceState *d)
|
|
|
|
{
|
|
|
|
VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
|
2016-07-20 18:28:21 +03:00
|
|
|
bool modern = virtio_pci_modern(proxy);
|
2015-11-06 11:02:48 +03:00
|
|
|
bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY;
|
2013-09-20 16:10:26 +04:00
|
|
|
|
|
|
|
virtio_pci_stop_ioeventfd(proxy);
|
2015-07-27 11:06:17 +03:00
|
|
|
|
|
|
|
if (modern) {
|
2015-11-06 11:02:48 +03:00
|
|
|
virtio_pci_modern_mem_region_unmap(proxy, &proxy->common);
|
|
|
|
virtio_pci_modern_mem_region_unmap(proxy, &proxy->isr);
|
|
|
|
virtio_pci_modern_mem_region_unmap(proxy, &proxy->device);
|
|
|
|
virtio_pci_modern_mem_region_unmap(proxy, &proxy->notify);
|
|
|
|
if (modern_pio) {
|
|
|
|
virtio_pci_modern_io_region_unmap(proxy, &proxy->notify_pio);
|
|
|
|
}
|
2015-07-27 11:06:17 +03:00
|
|
|
}
|
2013-09-20 16:10:26 +04:00
|
|
|
}
|
|
|
|
|
2015-02-27 16:52:14 +03:00
|
|
|
static void virtio_pci_realize(PCIDevice *pci_dev, Error **errp)
|
2013-01-15 03:08:04 +04:00
|
|
|
{
|
2015-06-04 13:34:39 +03:00
|
|
|
VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev);
|
2013-01-15 03:08:04 +04:00
|
|
|
VirtioPCIClass *k = VIRTIO_PCI_GET_CLASS(pci_dev);
|
2017-11-29 11:46:27 +03:00
|
|
|
bool pcie_port = pci_bus_is_express(pci_get_bus(pci_dev)) &&
|
|
|
|
!pci_bus_is_root(pci_get_bus(pci_dev));
|
2015-02-27 16:52:14 +03:00
|
|
|
|
2021-05-17 16:06:28 +03:00
|
|
|
/* fd-based ioevents can't be synchronized in record/replay */
|
|
|
|
if (replay_mode != REPLAY_MODE_NONE) {
|
|
|
|
proxy->flags &= ~VIRTIO_PCI_FLAG_USE_IOEVENTFD;
|
|
|
|
}
|
|
|
|
|
2015-06-04 13:34:39 +03:00
|
|
|
/*
|
|
|
|
* virtio pci bar layout used by default.
|
|
|
|
* subclasses can re-arrange things if needed.
|
|
|
|
*
|
|
|
|
* region 0 -- virtio legacy io bar
|
|
|
|
* region 1 -- msi-x bar
|
2020-04-23 00:54:55 +03:00
|
|
|
* region 2 -- virtio modern io bar (off by default)
|
2015-06-04 13:34:39 +03:00
|
|
|
* region 4+5 -- virtio modern memory (64bit) bar
|
|
|
|
*
|
|
|
|
*/
|
2016-09-29 07:04:40 +03:00
|
|
|
proxy->legacy_io_bar_idx = 0;
|
|
|
|
proxy->msix_bar_idx = 1;
|
|
|
|
proxy->modern_io_bar_idx = 2;
|
|
|
|
proxy->modern_mem_bar_idx = 4;
|
2015-06-04 13:34:39 +03:00
|
|
|
|
|
|
|
proxy->common.offset = 0x0;
|
|
|
|
proxy->common.size = 0x1000;
|
|
|
|
proxy->common.type = VIRTIO_PCI_CAP_COMMON_CFG;
|
|
|
|
|
|
|
|
proxy->isr.offset = 0x1000;
|
|
|
|
proxy->isr.size = 0x1000;
|
|
|
|
proxy->isr.type = VIRTIO_PCI_CAP_ISR_CFG;
|
|
|
|
|
|
|
|
proxy->device.offset = 0x2000;
|
|
|
|
proxy->device.size = 0x1000;
|
|
|
|
proxy->device.type = VIRTIO_PCI_CAP_DEVICE_CFG;
|
|
|
|
|
|
|
|
proxy->notify.offset = 0x3000;
|
2016-09-07 18:02:25 +03:00
|
|
|
proxy->notify.size = virtio_pci_queue_mem_mult(proxy) * VIRTIO_QUEUE_MAX;
|
2015-06-04 13:34:39 +03:00
|
|
|
proxy->notify.type = VIRTIO_PCI_CAP_NOTIFY_CFG;
|
|
|
|
|
2015-11-06 11:02:48 +03:00
|
|
|
proxy->notify_pio.offset = 0x0;
|
|
|
|
proxy->notify_pio.size = 0x4;
|
|
|
|
proxy->notify_pio.type = VIRTIO_PCI_CAP_NOTIFY_CFG;
|
|
|
|
|
2015-06-04 13:34:39 +03:00
|
|
|
/* subclasses can enforce modern, so do this unconditionally */
|
|
|
|
memory_region_init(&proxy->modern_bar, OBJECT(proxy), "virtio-pci",
|
2016-09-07 18:02:25 +03:00
|
|
|
/* PCI BAR regions must be powers of 2 */
|
|
|
|
pow2ceil(proxy->notify.offset + proxy->notify.size));
|
2015-06-04 13:34:39 +03:00
|
|
|
|
2019-07-29 19:29:03 +03:00
|
|
|
if (proxy->disable_legacy == ON_OFF_AUTO_AUTO) {
|
|
|
|
proxy->disable_legacy = pcie_port ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!virtio_pci_modern(proxy) && !virtio_pci_legacy(proxy)) {
|
|
|
|
error_setg(errp, "device cannot work as neither modern nor legacy mode"
|
|
|
|
" is enabled");
|
|
|
|
error_append_hint(errp, "Set either disable-modern or disable-legacy"
|
|
|
|
" to off\n");
|
|
|
|
return;
|
2016-09-09 12:00:59 +03:00
|
|
|
}
|
|
|
|
|
2016-07-20 18:28:21 +03:00
|
|
|
if (pcie_port && pci_is_express(pci_dev)) {
|
2015-11-10 14:41:29 +03:00
|
|
|
int pos;
|
2020-12-03 14:07:12 +03:00
|
|
|
uint16_t last_pcie_cap_offset = PCI_CONFIG_SPACE_SIZE;
|
2015-11-10 14:41:29 +03:00
|
|
|
|
|
|
|
pos = pcie_endpoint_cap_init(pci_dev, 0);
|
|
|
|
assert(pos > 0);
|
|
|
|
|
2017-06-27 09:16:50 +03:00
|
|
|
pos = pci_add_capability(pci_dev, PCI_CAP_ID_PM, 0,
|
|
|
|
PCI_PM_SIZEOF, errp);
|
|
|
|
if (pos < 0) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2017-02-20 23:43:13 +03:00
|
|
|
pci_dev->exp.pm_cap = pos;
|
2015-11-10 14:41:29 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Indicates that this function complies with revision 1.2 of the
|
|
|
|
* PCI Power Management Interface Specification.
|
|
|
|
*/
|
|
|
|
pci_set_word(pci_dev->config + pos + PCI_PM_PMC, 0x3);
|
2016-12-30 13:09:15 +03:00
|
|
|
|
2020-12-03 14:07:13 +03:00
|
|
|
if (proxy->flags & VIRTIO_PCI_FLAG_AER) {
|
|
|
|
pcie_aer_init(pci_dev, PCI_ERR_VER, last_pcie_cap_offset,
|
|
|
|
PCI_ERR_SIZEOF, NULL);
|
|
|
|
last_pcie_cap_offset += PCI_ERR_SIZEOF;
|
|
|
|
}
|
|
|
|
|
2017-02-20 23:43:11 +03:00
|
|
|
if (proxy->flags & VIRTIO_PCI_FLAG_INIT_DEVERR) {
|
|
|
|
/* Init error enabling flags */
|
|
|
|
pcie_cap_deverr_init(pci_dev);
|
|
|
|
}
|
|
|
|
|
2017-02-20 23:43:12 +03:00
|
|
|
if (proxy->flags & VIRTIO_PCI_FLAG_INIT_LNKCTL) {
|
|
|
|
/* Init Link Control Register */
|
|
|
|
pcie_cap_lnkctl_init(pci_dev);
|
|
|
|
}
|
|
|
|
|
2017-02-20 23:43:13 +03:00
|
|
|
if (proxy->flags & VIRTIO_PCI_FLAG_INIT_PM) {
|
|
|
|
/* Init Power Management Control Register */
|
|
|
|
pci_set_word(pci_dev->wmask + pos + PCI_PM_CTRL,
|
|
|
|
PCI_PM_CTRL_STATE_MASK);
|
|
|
|
}
|
|
|
|
|
2016-12-30 13:09:15 +03:00
|
|
|
if (proxy->flags & VIRTIO_PCI_FLAG_ATS) {
|
2021-04-06 07:03:30 +03:00
|
|
|
pcie_ats_init(pci_dev, last_pcie_cap_offset,
|
|
|
|
proxy->flags & VIRTIO_PCI_FLAG_ATS_PAGE_ALIGNED);
|
2020-12-03 14:07:12 +03:00
|
|
|
last_pcie_cap_offset += PCI_EXT_CAP_ATS_SIZEOF;
|
2016-12-30 13:09:15 +03:00
|
|
|
}
|
|
|
|
|
2019-08-20 19:30:05 +03:00
|
|
|
if (proxy->flags & VIRTIO_PCI_FLAG_INIT_FLR) {
|
|
|
|
/* Set Function Level Reset capability bit */
|
|
|
|
pcie_cap_flr_init(pci_dev);
|
|
|
|
}
|
2015-12-02 20:49:07 +03:00
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* make future invocations of pci_is_express() return false
|
|
|
|
* and pci_config_size() return PCI_CONFIG_SPACE_SIZE.
|
|
|
|
*/
|
|
|
|
pci_dev->cap_present &= ~QEMU_PCI_CAP_EXPRESS;
|
2015-11-10 14:41:29 +03:00
|
|
|
}
|
|
|
|
|
2015-06-04 13:34:39 +03:00
|
|
|
virtio_pci_bus_new(&proxy->bus, sizeof(proxy->bus), proxy);
|
2015-02-27 16:52:14 +03:00
|
|
|
if (k->realize) {
|
2015-06-04 13:34:39 +03:00
|
|
|
k->realize(proxy, errp);
|
2013-01-15 03:08:04 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void virtio_pci_exit(PCIDevice *pci_dev)
|
|
|
|
{
|
2020-12-03 14:07:13 +03:00
|
|
|
VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev);
|
|
|
|
bool pcie_port = pci_bus_is_express(pci_get_bus(pci_dev)) &&
|
|
|
|
!pci_bus_is_root(pci_get_bus(pci_dev));
|
|
|
|
|
2014-07-04 13:43:49 +04:00
|
|
|
msix_uninit_exclusive_bar(pci_dev);
|
2020-12-03 14:07:13 +03:00
|
|
|
if (proxy->flags & VIRTIO_PCI_FLAG_AER && pcie_port &&
|
|
|
|
pci_is_express(pci_dev)) {
|
|
|
|
pcie_aer_exit(pci_dev);
|
|
|
|
}
|
2013-01-15 03:08:04 +04:00
|
|
|
}
|
|
|
|
|
2013-04-24 12:07:56 +04:00
|
|
|
static void virtio_pci_reset(DeviceState *qdev)
|
2013-01-15 03:08:04 +04:00
|
|
|
{
|
|
|
|
VirtIOPCIProxy *proxy = VIRTIO_PCI(qdev);
|
|
|
|
VirtioBusState *bus = VIRTIO_BUS(&proxy->bus);
|
2015-11-06 11:02:49 +03:00
|
|
|
int i;
|
|
|
|
|
2013-01-15 03:08:04 +04:00
|
|
|
virtio_bus_reset(bus);
|
|
|
|
msix_unuse_all_vectors(&proxy->pci_dev);
|
2015-11-06 11:02:49 +03:00
|
|
|
|
|
|
|
for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
|
|
|
|
proxy->vqs[i].enabled = 0;
|
2022-10-17 12:25:48 +03:00
|
|
|
proxy->vqs[i].reset = 0;
|
2017-03-14 10:25:05 +03:00
|
|
|
proxy->vqs[i].num = 0;
|
|
|
|
proxy->vqs[i].desc[0] = proxy->vqs[i].desc[1] = 0;
|
|
|
|
proxy->vqs[i].avail[0] = proxy->vqs[i].avail[1] = 0;
|
|
|
|
proxy->vqs[i].used[0] = proxy->vqs[i].used[1] = 0;
|
2015-11-06 11:02:49 +03:00
|
|
|
}
|
2022-07-27 19:10:38 +03:00
|
|
|
}
|
|
|
|
|
2022-11-25 14:52:34 +03:00
|
|
|
static void virtio_pci_bus_reset_hold(Object *obj)
|
2022-07-27 19:10:38 +03:00
|
|
|
{
|
2022-11-25 14:52:34 +03:00
|
|
|
PCIDevice *dev = PCI_DEVICE(obj);
|
|
|
|
DeviceState *qdev = DEVICE(obj);
|
2022-07-27 19:10:38 +03:00
|
|
|
|
|
|
|
virtio_pci_reset(qdev);
|
2017-02-20 23:43:11 +03:00
|
|
|
|
|
|
|
if (pci_is_express(dev)) {
|
|
|
|
pcie_cap_deverr_reset(dev);
|
2017-02-20 23:43:12 +03:00
|
|
|
pcie_cap_lnkctl_reset(dev);
|
2017-02-20 23:43:13 +03:00
|
|
|
|
|
|
|
pci_set_word(dev->config + dev->exp.pm_cap + PCI_PM_CTRL, 0);
|
2017-02-20 23:43:11 +03:00
|
|
|
}
|
2013-01-15 03:08:04 +04:00
|
|
|
}
|
|
|
|
|
2014-06-30 20:01:58 +04:00
|
|
|
static Property virtio_pci_properties[] = {
|
2014-10-14 20:40:06 +04:00
|
|
|
DEFINE_PROP_BIT("virtio-pci-bus-master-bug-migration", VirtIOPCIProxy, flags,
|
|
|
|
VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION_BIT, false),
|
2015-11-06 11:02:44 +03:00
|
|
|
DEFINE_PROP_BIT("migrate-extra", VirtIOPCIProxy, flags,
|
|
|
|
VIRTIO_PCI_FLAG_MIGRATE_EXTRA_BIT, true),
|
2015-11-06 11:02:48 +03:00
|
|
|
DEFINE_PROP_BIT("modern-pio-notify", VirtIOPCIProxy, flags,
|
|
|
|
VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY_BIT, false),
|
2015-11-10 14:41:29 +03:00
|
|
|
DEFINE_PROP_BIT("x-disable-pcie", VirtIOPCIProxy, flags,
|
|
|
|
VIRTIO_PCI_FLAG_DISABLE_PCIE_BIT, false),
|
2016-09-07 18:02:25 +03:00
|
|
|
DEFINE_PROP_BIT("page-per-vq", VirtIOPCIProxy, flags,
|
|
|
|
VIRTIO_PCI_FLAG_PAGE_PER_VQ_BIT, false),
|
2016-12-14 19:30:35 +03:00
|
|
|
DEFINE_PROP_BOOL("x-ignore-backend-features", VirtIOPCIProxy,
|
|
|
|
ignore_backend_features, false),
|
2016-12-30 13:09:15 +03:00
|
|
|
DEFINE_PROP_BIT("ats", VirtIOPCIProxy, flags,
|
|
|
|
VIRTIO_PCI_FLAG_ATS_BIT, false),
|
2021-04-06 07:03:30 +03:00
|
|
|
DEFINE_PROP_BIT("x-ats-page-aligned", VirtIOPCIProxy, flags,
|
|
|
|
VIRTIO_PCI_FLAG_ATS_PAGE_ALIGNED_BIT, true),
|
2017-02-20 23:43:11 +03:00
|
|
|
DEFINE_PROP_BIT("x-pcie-deverr-init", VirtIOPCIProxy, flags,
|
|
|
|
VIRTIO_PCI_FLAG_INIT_DEVERR_BIT, true),
|
2017-02-20 23:43:12 +03:00
|
|
|
DEFINE_PROP_BIT("x-pcie-lnkctl-init", VirtIOPCIProxy, flags,
|
|
|
|
VIRTIO_PCI_FLAG_INIT_LNKCTL_BIT, true),
|
2017-02-20 23:43:13 +03:00
|
|
|
DEFINE_PROP_BIT("x-pcie-pm-init", VirtIOPCIProxy, flags,
|
|
|
|
VIRTIO_PCI_FLAG_INIT_PM_BIT, true),
|
2019-08-20 19:30:05 +03:00
|
|
|
DEFINE_PROP_BIT("x-pcie-flr-init", VirtIOPCIProxy, flags,
|
|
|
|
VIRTIO_PCI_FLAG_INIT_FLR_BIT, true),
|
2020-12-03 14:07:13 +03:00
|
|
|
DEFINE_PROP_BIT("aer", VirtIOPCIProxy, flags,
|
|
|
|
VIRTIO_PCI_FLAG_AER_BIT, false),
|
2014-06-30 20:01:58 +04:00
|
|
|
DEFINE_PROP_END_OF_LIST(),
|
|
|
|
};
|
|
|
|
|
2015-12-02 20:49:07 +03:00
|
|
|
static void virtio_pci_dc_realize(DeviceState *qdev, Error **errp)
|
|
|
|
{
|
|
|
|
VirtioPCIClass *vpciklass = VIRTIO_PCI_GET_CLASS(qdev);
|
|
|
|
VirtIOPCIProxy *proxy = VIRTIO_PCI(qdev);
|
|
|
|
PCIDevice *pci_dev = &proxy->pci_dev;
|
|
|
|
|
|
|
|
if (!(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_PCIE) &&
|
2016-07-20 18:28:21 +03:00
|
|
|
virtio_pci_modern(proxy)) {
|
2015-12-02 20:49:07 +03:00
|
|
|
pci_dev->cap_present |= QEMU_PCI_CAP_EXPRESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
vpciklass->parent_dc_realize(qdev, errp);
|
|
|
|
}
|
|
|
|
|
2013-01-15 03:08:04 +04:00
|
|
|
static void virtio_pci_class_init(ObjectClass *klass, void *data)
|
|
|
|
{
|
|
|
|
DeviceClass *dc = DEVICE_CLASS(klass);
|
|
|
|
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
|
2015-12-02 20:49:07 +03:00
|
|
|
VirtioPCIClass *vpciklass = VIRTIO_PCI_CLASS(klass);
|
2022-11-25 14:52:34 +03:00
|
|
|
ResettableClass *rc = RESETTABLE_CLASS(klass);
|
2013-01-15 03:08:04 +04:00
|
|
|
|
2020-01-10 18:30:32 +03:00
|
|
|
device_class_set_props(dc, virtio_pci_properties);
|
2015-02-27 16:52:14 +03:00
|
|
|
k->realize = virtio_pci_realize;
|
2013-01-15 03:08:04 +04:00
|
|
|
k->exit = virtio_pci_exit;
|
|
|
|
k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
|
|
|
|
k->revision = VIRTIO_PCI_ABI_VERSION;
|
|
|
|
k->class_id = PCI_CLASS_OTHERS;
|
2018-01-14 05:04:12 +03:00
|
|
|
device_class_set_parent_realize(dc, virtio_pci_dc_realize,
|
|
|
|
&vpciklass->parent_dc_realize);
|
2022-11-25 14:52:34 +03:00
|
|
|
rc->phases.hold = virtio_pci_bus_reset_hold;
|
2013-01-15 03:08:04 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static const TypeInfo virtio_pci_info = {
|
|
|
|
.name = TYPE_VIRTIO_PCI,
|
|
|
|
.parent = TYPE_PCI_DEVICE,
|
|
|
|
.instance_size = sizeof(VirtIOPCIProxy),
|
|
|
|
.class_init = virtio_pci_class_init,
|
|
|
|
.class_size = sizeof(VirtioPCIClass),
|
|
|
|
.abstract = true,
|
|
|
|
};
|
|
|
|
|
2018-12-05 22:57:03 +03:00
|
|
|
static Property virtio_pci_generic_properties[] = {
|
|
|
|
DEFINE_PROP_ON_OFF_AUTO("disable-legacy", VirtIOPCIProxy, disable_legacy,
|
|
|
|
ON_OFF_AUTO_AUTO),
|
|
|
|
DEFINE_PROP_BOOL("disable-modern", VirtIOPCIProxy, disable_modern, false),
|
|
|
|
DEFINE_PROP_END_OF_LIST(),
|
|
|
|
};
|
|
|
|
|
|
|
|
static void virtio_pci_base_class_init(ObjectClass *klass, void *data)
|
|
|
|
{
|
|
|
|
const VirtioPCIDeviceTypeInfo *t = data;
|
|
|
|
if (t->class_init) {
|
|
|
|
t->class_init(klass, NULL);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void virtio_pci_generic_class_init(ObjectClass *klass, void *data)
|
|
|
|
{
|
|
|
|
DeviceClass *dc = DEVICE_CLASS(klass);
|
|
|
|
|
2020-01-10 18:30:32 +03:00
|
|
|
device_class_set_props(dc, virtio_pci_generic_properties);
|
2018-12-05 22:57:03 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static void virtio_pci_transitional_instance_init(Object *obj)
|
|
|
|
{
|
|
|
|
VirtIOPCIProxy *proxy = VIRTIO_PCI(obj);
|
|
|
|
|
|
|
|
proxy->disable_legacy = ON_OFF_AUTO_OFF;
|
|
|
|
proxy->disable_modern = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void virtio_pci_non_transitional_instance_init(Object *obj)
|
|
|
|
{
|
|
|
|
VirtIOPCIProxy *proxy = VIRTIO_PCI(obj);
|
|
|
|
|
|
|
|
proxy->disable_legacy = ON_OFF_AUTO_ON;
|
|
|
|
proxy->disable_modern = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
void virtio_pci_types_register(const VirtioPCIDeviceTypeInfo *t)
|
|
|
|
{
|
2019-06-26 02:23:33 +03:00
|
|
|
char *base_name = NULL;
|
2018-12-05 22:57:03 +03:00
|
|
|
TypeInfo base_type_info = {
|
|
|
|
.name = t->base_name,
|
|
|
|
.parent = t->parent ? t->parent : TYPE_VIRTIO_PCI,
|
|
|
|
.instance_size = t->instance_size,
|
|
|
|
.instance_init = t->instance_init,
|
2023-11-21 20:40:46 +03:00
|
|
|
.instance_finalize = t->instance_finalize,
|
2019-03-07 11:02:42 +03:00
|
|
|
.class_size = t->class_size,
|
2018-12-05 22:57:03 +03:00
|
|
|
.abstract = true,
|
2019-06-19 12:49:02 +03:00
|
|
|
.interfaces = t->interfaces,
|
2018-12-05 22:57:03 +03:00
|
|
|
};
|
|
|
|
TypeInfo generic_type_info = {
|
|
|
|
.name = t->generic_name,
|
|
|
|
.parent = base_type_info.name,
|
|
|
|
.class_init = virtio_pci_generic_class_init,
|
|
|
|
.interfaces = (InterfaceInfo[]) {
|
|
|
|
{ INTERFACE_PCIE_DEVICE },
|
|
|
|
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
|
|
|
|
{ }
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
if (!base_type_info.name) {
|
|
|
|
/* No base type -> register a single generic device type */
|
2019-06-26 02:23:33 +03:00
|
|
|
/* use intermediate %s-base-type to add generic device props */
|
|
|
|
base_name = g_strdup_printf("%s-base-type", t->generic_name);
|
|
|
|
base_type_info.name = base_name;
|
|
|
|
base_type_info.class_init = virtio_pci_generic_class_init;
|
|
|
|
|
|
|
|
generic_type_info.parent = base_name;
|
|
|
|
generic_type_info.class_init = virtio_pci_base_class_init;
|
|
|
|
generic_type_info.class_data = (void *)t;
|
|
|
|
|
2018-12-05 22:57:03 +03:00
|
|
|
assert(!t->non_transitional_name);
|
|
|
|
assert(!t->transitional_name);
|
2019-06-26 02:23:33 +03:00
|
|
|
} else {
|
|
|
|
base_type_info.class_init = virtio_pci_base_class_init;
|
|
|
|
base_type_info.class_data = (void *)t;
|
2018-12-05 22:57:03 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
type_register(&base_type_info);
|
|
|
|
if (generic_type_info.name) {
|
|
|
|
type_register(&generic_type_info);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (t->non_transitional_name) {
|
|
|
|
const TypeInfo non_transitional_type_info = {
|
|
|
|
.name = t->non_transitional_name,
|
|
|
|
.parent = base_type_info.name,
|
|
|
|
.instance_init = virtio_pci_non_transitional_instance_init,
|
|
|
|
.interfaces = (InterfaceInfo[]) {
|
|
|
|
{ INTERFACE_PCIE_DEVICE },
|
|
|
|
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
|
|
|
|
{ }
|
|
|
|
},
|
|
|
|
};
|
|
|
|
type_register(&non_transitional_type_info);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (t->transitional_name) {
|
|
|
|
const TypeInfo transitional_type_info = {
|
|
|
|
.name = t->transitional_name,
|
|
|
|
.parent = base_type_info.name,
|
|
|
|
.instance_init = virtio_pci_transitional_instance_init,
|
|
|
|
.interfaces = (InterfaceInfo[]) {
|
|
|
|
/*
|
|
|
|
* Transitional virtio devices work only as Conventional PCI
|
|
|
|
* devices because they require PIO ports.
|
|
|
|
*/
|
|
|
|
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
|
|
|
|
{ }
|
|
|
|
},
|
|
|
|
};
|
|
|
|
type_register(&transitional_type_info);
|
|
|
|
}
|
2019-06-26 02:23:33 +03:00
|
|
|
g_free(base_name);
|
2018-12-05 22:57:03 +03:00
|
|
|
}
|
|
|
|
|
2020-08-18 17:33:44 +03:00
|
|
|
unsigned virtio_pci_optimal_num_queues(unsigned fixed_queues)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* 1:1 vq to vCPU mapping is ideal because the same vCPU that submitted
|
|
|
|
* virtqueue buffers can handle their completion. When a different vCPU
|
|
|
|
* handles completion it may need to IPI the vCPU that submitted the
|
|
|
|
* request and this adds overhead.
|
|
|
|
*
|
|
|
|
* Virtqueues consume guest RAM and MSI-X vectors. This is wasteful in
|
|
|
|
* guests with very many vCPUs and a device that is only used by a few
|
|
|
|
* vCPUs. Unfortunately optimizing that case requires manual pinning inside
|
|
|
|
* the guest, so those users might as well manually set the number of
|
|
|
|
* queues. There is no upper limit that can be applied automatically and
|
|
|
|
* doing so arbitrarily would result in a sudden performance drop once the
|
|
|
|
* threshold number of vCPUs is exceeded.
|
|
|
|
*/
|
|
|
|
unsigned num_queues = current_machine->smp.cpus;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The maximum number of MSI-X vectors is PCI_MSIX_FLAGS_QSIZE + 1, but the
|
|
|
|
* config change interrupt and the fixed virtqueues must be taken into
|
|
|
|
* account too.
|
|
|
|
*/
|
|
|
|
num_queues = MIN(num_queues, PCI_MSIX_FLAGS_QSIZE - fixed_queues);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* There is a limit to how many virtqueues a device can have.
|
|
|
|
*/
|
|
|
|
return MIN(num_queues, VIRTIO_QUEUE_MAX - fixed_queues);
|
|
|
|
}
|
|
|
|
|
2013-01-15 03:08:03 +04:00
|
|
|
/* virtio-pci-bus */
|
|
|
|
|
2013-08-23 22:35:18 +04:00
|
|
|
static void virtio_pci_bus_new(VirtioBusState *bus, size_t bus_size,
|
|
|
|
VirtIOPCIProxy *dev)
|
2013-01-15 03:08:03 +04:00
|
|
|
{
|
|
|
|
DeviceState *qdev = DEVICE(dev);
|
2013-04-30 18:08:47 +04:00
|
|
|
char virtio_bus_name[] = "virtio-bus";
|
|
|
|
|
2021-09-23 15:11:51 +03:00
|
|
|
qbus_init(bus, bus_size, TYPE_VIRTIO_PCI_BUS, qdev, virtio_bus_name);
|
2013-01-15 03:08:03 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static void virtio_pci_bus_class_init(ObjectClass *klass, void *data)
|
|
|
|
{
|
|
|
|
BusClass *bus_class = BUS_CLASS(klass);
|
|
|
|
VirtioBusClass *k = VIRTIO_BUS_CLASS(klass);
|
|
|
|
bus_class->max_dev = 1;
|
|
|
|
k->notify = virtio_pci_notify;
|
|
|
|
k->save_config = virtio_pci_save_config;
|
|
|
|
k->load_config = virtio_pci_load_config;
|
|
|
|
k->save_queue = virtio_pci_save_queue;
|
|
|
|
k->load_queue = virtio_pci_load_queue;
|
2015-11-06 11:02:44 +03:00
|
|
|
k->save_extra_state = virtio_pci_save_extra_state;
|
|
|
|
k->load_extra_state = virtio_pci_load_extra_state;
|
|
|
|
k->has_extra_state = virtio_pci_has_extra_state;
|
2013-01-15 03:08:03 +04:00
|
|
|
k->query_guest_notifiers = virtio_pci_query_guest_notifiers;
|
|
|
|
k->set_guest_notifiers = virtio_pci_set_guest_notifiers;
|
2018-04-12 18:12:30 +03:00
|
|
|
k->set_host_notifier_mr = virtio_pci_set_host_notifier_mr;
|
2013-01-15 03:08:03 +04:00
|
|
|
k->vmstate_change = virtio_pci_vmstate_change;
|
2016-09-13 16:30:30 +03:00
|
|
|
k->pre_plugged = virtio_pci_pre_plugged;
|
2013-01-15 03:08:04 +04:00
|
|
|
k->device_plugged = virtio_pci_device_plugged;
|
2013-09-20 16:10:26 +04:00
|
|
|
k->device_unplugged = virtio_pci_device_unplugged;
|
2015-04-23 09:21:46 +03:00
|
|
|
k->query_nvectors = virtio_pci_query_nvectors;
|
2016-10-21 23:48:08 +03:00
|
|
|
k->ioeventfd_enabled = virtio_pci_ioeventfd_enabled;
|
2016-06-10 12:04:12 +03:00
|
|
|
k->ioeventfd_assign = virtio_pci_ioeventfd_assign;
|
2016-12-30 13:09:10 +03:00
|
|
|
k->get_dma_as = virtio_pci_get_dma_as;
|
2021-08-04 06:48:02 +03:00
|
|
|
k->iommu_enabled = virtio_pci_iommu_enabled;
|
2020-07-01 17:55:28 +03:00
|
|
|
k->queue_enabled = virtio_pci_queue_enabled;
|
2013-01-15 03:08:03 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static const TypeInfo virtio_pci_bus_info = {
|
|
|
|
.name = TYPE_VIRTIO_PCI_BUS,
|
|
|
|
.parent = TYPE_VIRTIO_BUS,
|
|
|
|
.instance_size = sizeof(VirtioPCIBusState),
|
2020-08-24 15:20:51 +03:00
|
|
|
.class_size = sizeof(VirtioPCIBusClass),
|
2013-01-15 03:08:03 +04:00
|
|
|
.class_init = virtio_pci_bus_class_init,
|
|
|
|
};
|
|
|
|
|
2012-02-09 18:20:55 +04:00
|
|
|
static void virtio_pci_register_types(void)
|
2009-05-18 17:51:59 +04:00
|
|
|
{
|
2018-12-05 22:57:03 +03:00
|
|
|
/* Base types: */
|
|
|
|
type_register_static(&virtio_pci_bus_info);
|
|
|
|
type_register_static(&virtio_pci_info);
|
2009-05-18 17:51:59 +04:00
|
|
|
}
|
|
|
|
|
2012-02-09 18:20:55 +04:00
|
|
|
type_init(virtio_pci_register_types)
|
2019-01-03 17:10:02 +03:00
|
|
|
|