virtio,pc,pci: bugfixes

Tiny fixes: important but mostly obvious ones.  Revert VDPA network sim
 for this release as there are questions around it's maintainatiblity.
 
 Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
 -----BEGIN PGP SIGNATURE-----
 
 iQFDBAABCAAtFiEEXQn9CHHI+FuUyooNKB8NuNKNVGkFAmYU7qcPHG1zdEByZWRo
 YXQuY29tAAoJECgfDbjSjVRpn/cIAJBWRN67BS5ysdHjK0Hmw1zumbLpK+85wlAv
 dTfmJmUnIV6Ft5yaFFXCpxVH0/lh/vhG2ra5+lu53mX+GMtwjdqk4Sufvo4TukXu
 uweHUqlb4pdL37Yf7Q9N6kSX4Ay3ITEC7N18IvlBU8be5gRhidejMWlKq/gW/1rk
 +mnWeD5Qxs91Lh2pxShcnsRah0D4UY47dNu3VnglC9wYb4fupukGgj0qOnqYDF2K
 tG9Us0grU/qF1FgqWwbrlhOUO1Ntlp4uYn4JNOFhswAFDPm2XXIJRIPUhoYEi9G2
 HhxGSpDjJm8I9BBbllDnQVpIbBFxoG/EiQRT64Nt+rw+Tq01sPA=
 =AZIl
 -----END PGP SIGNATURE-----

Merge tag 'for_upstream' of https://git.kernel.org/pub/scm/virt/kvm/mst/qemu into staging

virtio,pc,pci: bugfixes

Tiny fixes: important but mostly obvious ones.  Revert VDPA network sim
for this release as there are questions around it's maintainatiblity.

Signed-off-by: Michael S. Tsirkin <mst@redhat.com>

# -----BEGIN PGP SIGNATURE-----
#
# iQFDBAABCAAtFiEEXQn9CHHI+FuUyooNKB8NuNKNVGkFAmYU7qcPHG1zdEByZWRo
# YXQuY29tAAoJECgfDbjSjVRpn/cIAJBWRN67BS5ysdHjK0Hmw1zumbLpK+85wlAv
# dTfmJmUnIV6Ft5yaFFXCpxVH0/lh/vhG2ra5+lu53mX+GMtwjdqk4Sufvo4TukXu
# uweHUqlb4pdL37Yf7Q9N6kSX4Ay3ITEC7N18IvlBU8be5gRhidejMWlKq/gW/1rk
# +mnWeD5Qxs91Lh2pxShcnsRah0D4UY47dNu3VnglC9wYb4fupukGgj0qOnqYDF2K
# tG9Us0grU/qF1FgqWwbrlhOUO1Ntlp4uYn4JNOFhswAFDPm2XXIJRIPUhoYEi9G2
# HhxGSpDjJm8I9BBbllDnQVpIbBFxoG/EiQRT64Nt+rw+Tq01sPA=
# =AZIl
# -----END PGP SIGNATURE-----
# gpg: Signature made Tue 09 Apr 2024 08:30:47 BST
# gpg:                using RSA key 5D09FD0871C8F85B94CA8A0D281F0DB8D28D5469
# gpg:                issuer "mst@redhat.com"
# gpg: Good signature from "Michael S. Tsirkin <mst@kernel.org>" [full]
# gpg:                 aka "Michael S. Tsirkin <mst@redhat.com>" [full]
# Primary key fingerprint: 0270 606B 6F3C DF3D 0B17  0970 C350 3912 AFBE 8E67
#      Subkey fingerprint: 5D09 FD08 71C8 F85B 94CA  8A0D 281F 0DB8 D28D 5469

* tag 'for_upstream' of https://git.kernel.org/pub/scm/virt/kvm/mst/qemu:
  qdev-monitor: fix error message in find_device_state()
  vhost-user-blk: simplify and fix vhost_user_blk_handle_config_change
  vdpa-dev: Fix the issue of device status not updating when configuration interruption is triggered
  hw/virtio: Fix packed virtqueue flush used_idx
  virtio-snd: rewrite invalid tx/rx message handling
  virtio-snd: Enhance error handling for invalid transfers
  Revert "hw/virtio: Add support for VDPA network simulation devices"

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2024-04-09 09:51:07 +01:00
commit bc0cd4ae88
14 changed files with 97 additions and 482 deletions

View File

@ -2371,11 +2371,6 @@ F: hw/virtio/vhost-user-scmi*
F: include/hw/virtio/vhost-user-scmi.h F: include/hw/virtio/vhost-user-scmi.h
F: tests/qtest/libqos/virtio-scmi.* F: tests/qtest/libqos/virtio-scmi.*
vdpa-net
M: Hao Chen <chenh@yusur.tech>
S: Maintained
F: docs/system/devices/vdpa-net.rst
virtio-crypto virtio-crypto
M: Gonglei <arei.gonglei@huawei.com> M: Gonglei <arei.gonglei@huawei.com>
S: Supported S: Supported

View File

@ -99,4 +99,3 @@ Emulated Devices
devices/canokey.rst devices/canokey.rst
devices/usb-u2f.rst devices/usb-u2f.rst
devices/igb.rst devices/igb.rst
devices/vdpa-net.rst

View File

@ -1,121 +0,0 @@
vdpa net
============
This document explains the setup and usage of the vdpa network device.
The vdpa network device is a paravirtualized vdpa emulate device.
Description
-----------
VDPA net devices support dirty page bitmap mark and vring state saving and recovery.
Users can use this VDPA device for live migration simulation testing in a nested virtualization environment.
Registers layout
----------------
The vdpa device add live migrate registers layout as follow::
Offset Register Name Bitwidth Associated vq
0x0 LM_LOGGING_CTRL 4bits
0x10 LM_BASE_ADDR_LOW 32bits
0x14 LM_BASE_ADDR_HIGH 32bits
0x18 LM_END_ADDR_LOW 32bits
0x1c LM_END_ADDR_HIGH 32bits
0x20 LM_RING_STATE_OFFSET 32bits vq0
0x24 LM_RING_STATE_OFFSET 32bits vq1
0x28 LM_RING_STATE_OFFSET 32bits vq2
......
0x20+1023*4 LM_RING_STATE_OFFSET 32bits vq1023
These registers are extended at the end of the notify bar space.
Architecture diagram
--------------------
::
|------------------------------------------------------------------------|
| guest-L1-user-space |
| |
| |----------------------------------------|
| | [virtio-net driver] |
| | ^ guest-L2-src(iommu=on) |
| |--------------|-------------------------|
| | | qemu-L2-src(viommu) |
| [dpdk-vdpa]<->[vhost socket]<-+->[vhost-user backend(iommu=on)] |
--------------------------------------------------------------------------
--------------------------------------------------------------------------
| ^ guest-L1-kernel-space |
| | |
| [VFIO] |
| ^ |
| | guest-L1-src(iommu=on) |
--------|-----------------------------------------------------------------
--------|-----------------------------------------------------------------
| [vdpa net device(iommu=on)] [manager nic device] |
| | | |
| | | |
| [tap device] qemu-L1-src(viommu) | |
------------------------------------------------+-------------------------
|
|
--------------------- |
| kernel net bridge |<-----
| virbr0 |<----------------------------------
--------------------- |
|
|
-------------------------------------------------------------------------- |
| guest-L1-user-space | |
| | |
| |----------------------------------------| |
| | [virtio-net driver] | |
| | ^ guest-L2-dst(iommu=on) | |
| |--------------|-------------------------| |
| | | qemu-L2-dst(viommu) | |
| [dpdk-vdpa]<->[vhost socket]<-+->[vhost-user backend(iommu=on)] | |
-------------------------------------------------------------------------- |
-------------------------------------------------------------------------- |
| ^ guest-L1-kernel-space | |
| | | |
| [VFIO] | |
| ^ | |
| | guest-L1-dst(iommu=on) | |
--------|----------------------------------------------------------------- |
--------|----------------------------------------------------------------- |
| [vdpa net device(iommu=on)] [manager nic device]----------------+----
| | |
| | |
| [tap device] qemu-L1-dst(viommu) |
--------------------------------------------------------------------------
Device properties
-----------------
The Virtio vdpa device can be configured with the following properties:
* ``vdpa=on`` open vdpa device emulated.
Usages
--------
This patch add virtio sriov support and vdpa live migrate support.
You can open vdpa by set xml file as follow::
<qemu:commandline xmlns:qemu='http://libvirt.org/schemas/domain/qemu/1.0'>
<qemu:arg value='-device'/>
<qemu:arg value='intel-iommu,intremap=on,device-iotlb=on,aw-bits=48'/>
<qemu:arg value='-netdev'/>
<qemu:arg value='tap,id=hostnet1,script=no,downscript=no,vhost=off'/>
<qemu:arg value='-device'/>
<qemu:arg value='virtio-net-pci,netdev=hostnet1,id=net1,mac=56:4a:b7:4f:4d:a9,bus=pci.6,addr=0x0,iommu_platform=on,ats=on,vdpa=on'/>
</qemu:commandline>
Limitations
-----------
1. Dependent on tap device with param ``vhost=off``.
2. Nested virtualization environment only supports ``q35`` machines.
3. Current only support split vring live migrate.

View File

@ -456,7 +456,6 @@ static uint32_t virtio_snd_pcm_prepare(VirtIOSound *s, uint32_t stream_id)
stream->s = s; stream->s = s;
qemu_mutex_init(&stream->queue_mutex); qemu_mutex_init(&stream->queue_mutex);
QSIMPLEQ_INIT(&stream->queue); QSIMPLEQ_INIT(&stream->queue);
QSIMPLEQ_INIT(&stream->invalid);
/* /*
* stream_id >= s->snd_conf.streams was checked before so this is * stream_id >= s->snd_conf.streams was checked before so this is
@ -611,9 +610,6 @@ static size_t virtio_snd_pcm_get_io_msgs_count(VirtIOSoundPCMStream *stream)
QSIMPLEQ_FOREACH_SAFE(buffer, &stream->queue, entry, next) { QSIMPLEQ_FOREACH_SAFE(buffer, &stream->queue, entry, next) {
count += 1; count += 1;
} }
QSIMPLEQ_FOREACH_SAFE(buffer, &stream->invalid, entry, next) {
count += 1;
}
} }
return count; return count;
} }
@ -831,25 +827,22 @@ static void virtio_snd_handle_event(VirtIODevice *vdev, VirtQueue *vq)
trace_virtio_snd_handle_event(); trace_virtio_snd_handle_event();
} }
/*
* Must only be called if vsnd->invalid is not empty.
*/
static inline void empty_invalid_queue(VirtIODevice *vdev, VirtQueue *vq) static inline void empty_invalid_queue(VirtIODevice *vdev, VirtQueue *vq)
{ {
VirtIOSoundPCMBuffer *buffer = NULL; VirtIOSoundPCMBuffer *buffer = NULL;
VirtIOSoundPCMStream *stream = NULL;
virtio_snd_pcm_status resp = { 0 }; virtio_snd_pcm_status resp = { 0 };
VirtIOSound *vsnd = VIRTIO_SND(vdev); VirtIOSound *vsnd = VIRTIO_SND(vdev);
bool any = false;
for (uint32_t i = 0; i < vsnd->snd_conf.streams; i++) { g_assert(!QSIMPLEQ_EMPTY(&vsnd->invalid));
stream = vsnd->pcm->streams[i];
if (stream) { while (!QSIMPLEQ_EMPTY(&vsnd->invalid)) {
any = false; buffer = QSIMPLEQ_FIRST(&vsnd->invalid);
WITH_QEMU_LOCK_GUARD(&stream->queue_mutex) { /* If buffer->vq != vq, our logic is fundamentally wrong, so bail out */
while (!QSIMPLEQ_EMPTY(&stream->invalid)) { g_assert(buffer->vq == vq);
buffer = QSIMPLEQ_FIRST(&stream->invalid);
if (buffer->vq != vq) {
break;
}
any = true;
resp.status = cpu_to_le32(VIRTIO_SND_S_BAD_MSG); resp.status = cpu_to_le32(VIRTIO_SND_S_BAD_MSG);
iov_from_buf(buffer->elem->in_sg, iov_from_buf(buffer->elem->in_sg,
buffer->elem->in_num, buffer->elem->in_num,
@ -859,19 +852,11 @@ static inline void empty_invalid_queue(VirtIODevice *vdev, VirtQueue *vq)
virtqueue_push(vq, virtqueue_push(vq,
buffer->elem, buffer->elem,
sizeof(virtio_snd_pcm_status)); sizeof(virtio_snd_pcm_status));
QSIMPLEQ_REMOVE_HEAD(&stream->invalid, entry); QSIMPLEQ_REMOVE_HEAD(&vsnd->invalid, entry);
virtio_snd_pcm_buffer_free(buffer); virtio_snd_pcm_buffer_free(buffer);
} }
if (any) { /* Notify vq about virtio_snd_pcm_status responses. */
/*
* Notify vq about virtio_snd_pcm_status responses.
* Buffer responses must be notified separately later.
*/
virtio_notify(vdev, vq); virtio_notify(vdev, vq);
}
}
}
}
} }
/* /*
@ -883,15 +868,14 @@ static inline void empty_invalid_queue(VirtIODevice *vdev, VirtQueue *vq)
*/ */
static void virtio_snd_handle_tx_xfer(VirtIODevice *vdev, VirtQueue *vq) static void virtio_snd_handle_tx_xfer(VirtIODevice *vdev, VirtQueue *vq)
{ {
VirtIOSound *s = VIRTIO_SND(vdev); VirtIOSound *vsnd = VIRTIO_SND(vdev);
VirtIOSoundPCMStream *stream = NULL;
VirtIOSoundPCMBuffer *buffer; VirtIOSoundPCMBuffer *buffer;
VirtQueueElement *elem; VirtQueueElement *elem;
size_t msg_sz, size; size_t msg_sz, size;
virtio_snd_pcm_xfer hdr; virtio_snd_pcm_xfer hdr;
uint32_t stream_id; uint32_t stream_id;
/* /*
* If any of the I/O messages are invalid, put them in stream->invalid and * If any of the I/O messages are invalid, put them in vsnd->invalid and
* return them after the for loop. * return them after the for loop.
*/ */
bool must_empty_invalid_queue = false; bool must_empty_invalid_queue = false;
@ -901,7 +885,7 @@ static void virtio_snd_handle_tx_xfer(VirtIODevice *vdev, VirtQueue *vq)
} }
trace_virtio_snd_handle_tx_xfer(); trace_virtio_snd_handle_tx_xfer();
for (;;) { for (VirtIOSoundPCMStream *stream = NULL;; stream = NULL) {
elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
if (!elem) { if (!elem) {
break; break;
@ -917,12 +901,12 @@ static void virtio_snd_handle_tx_xfer(VirtIODevice *vdev, VirtQueue *vq)
} }
stream_id = le32_to_cpu(hdr.stream_id); stream_id = le32_to_cpu(hdr.stream_id);
if (stream_id >= s->snd_conf.streams if (stream_id >= vsnd->snd_conf.streams
|| s->pcm->streams[stream_id] == NULL) { || vsnd->pcm->streams[stream_id] == NULL) {
goto tx_err; goto tx_err;
} }
stream = s->pcm->streams[stream_id]; stream = vsnd->pcm->streams[stream_id];
if (stream->info.direction != VIRTIO_SND_D_OUTPUT) { if (stream->info.direction != VIRTIO_SND_D_OUTPUT) {
goto tx_err; goto tx_err;
} }
@ -942,13 +926,11 @@ static void virtio_snd_handle_tx_xfer(VirtIODevice *vdev, VirtQueue *vq)
continue; continue;
tx_err: tx_err:
WITH_QEMU_LOCK_GUARD(&stream->queue_mutex) {
must_empty_invalid_queue = true; must_empty_invalid_queue = true;
buffer = g_malloc0(sizeof(VirtIOSoundPCMBuffer)); buffer = g_malloc0(sizeof(VirtIOSoundPCMBuffer));
buffer->elem = elem; buffer->elem = elem;
buffer->vq = vq; buffer->vq = vq;
QSIMPLEQ_INSERT_TAIL(&stream->invalid, buffer, entry); QSIMPLEQ_INSERT_TAIL(&vsnd->invalid, buffer, entry);
}
} }
if (must_empty_invalid_queue) { if (must_empty_invalid_queue) {
@ -965,15 +947,14 @@ tx_err:
*/ */
static void virtio_snd_handle_rx_xfer(VirtIODevice *vdev, VirtQueue *vq) static void virtio_snd_handle_rx_xfer(VirtIODevice *vdev, VirtQueue *vq)
{ {
VirtIOSound *s = VIRTIO_SND(vdev); VirtIOSound *vsnd = VIRTIO_SND(vdev);
VirtIOSoundPCMStream *stream = NULL;
VirtIOSoundPCMBuffer *buffer; VirtIOSoundPCMBuffer *buffer;
VirtQueueElement *elem; VirtQueueElement *elem;
size_t msg_sz, size; size_t msg_sz, size;
virtio_snd_pcm_xfer hdr; virtio_snd_pcm_xfer hdr;
uint32_t stream_id; uint32_t stream_id;
/* /*
* if any of the I/O messages are invalid, put them in stream->invalid and * if any of the I/O messages are invalid, put them in vsnd->invalid and
* return them after the for loop. * return them after the for loop.
*/ */
bool must_empty_invalid_queue = false; bool must_empty_invalid_queue = false;
@ -983,7 +964,7 @@ static void virtio_snd_handle_rx_xfer(VirtIODevice *vdev, VirtQueue *vq)
} }
trace_virtio_snd_handle_rx_xfer(); trace_virtio_snd_handle_rx_xfer();
for (;;) { for (VirtIOSoundPCMStream *stream = NULL;; stream = NULL) {
elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
if (!elem) { if (!elem) {
break; break;
@ -999,12 +980,12 @@ static void virtio_snd_handle_rx_xfer(VirtIODevice *vdev, VirtQueue *vq)
} }
stream_id = le32_to_cpu(hdr.stream_id); stream_id = le32_to_cpu(hdr.stream_id);
if (stream_id >= s->snd_conf.streams if (stream_id >= vsnd->snd_conf.streams
|| !s->pcm->streams[stream_id]) { || !vsnd->pcm->streams[stream_id]) {
goto rx_err; goto rx_err;
} }
stream = s->pcm->streams[stream_id]; stream = vsnd->pcm->streams[stream_id];
if (stream == NULL || stream->info.direction != VIRTIO_SND_D_INPUT) { if (stream == NULL || stream->info.direction != VIRTIO_SND_D_INPUT) {
goto rx_err; goto rx_err;
} }
@ -1021,13 +1002,11 @@ static void virtio_snd_handle_rx_xfer(VirtIODevice *vdev, VirtQueue *vq)
continue; continue;
rx_err: rx_err:
WITH_QEMU_LOCK_GUARD(&stream->queue_mutex) {
must_empty_invalid_queue = true; must_empty_invalid_queue = true;
buffer = g_malloc0(sizeof(VirtIOSoundPCMBuffer)); buffer = g_malloc0(sizeof(VirtIOSoundPCMBuffer));
buffer->elem = elem; buffer->elem = elem;
buffer->vq = vq; buffer->vq = vq;
QSIMPLEQ_INSERT_TAIL(&stream->invalid, buffer, entry); QSIMPLEQ_INSERT_TAIL(&vsnd->invalid, buffer, entry);
}
} }
if (must_empty_invalid_queue) { if (must_empty_invalid_queue) {
@ -1127,6 +1106,7 @@ static void virtio_snd_realize(DeviceState *dev, Error **errp)
virtio_add_queue(vdev, 64, virtio_snd_handle_rx_xfer); virtio_add_queue(vdev, 64, virtio_snd_handle_rx_xfer);
qemu_mutex_init(&vsnd->cmdq_mutex); qemu_mutex_init(&vsnd->cmdq_mutex);
QTAILQ_INIT(&vsnd->cmdq); QTAILQ_INIT(&vsnd->cmdq);
QSIMPLEQ_INIT(&vsnd->invalid);
for (uint32_t i = 0; i < vsnd->snd_conf.streams; i++) { for (uint32_t i = 0; i < vsnd->snd_conf.streams; i++) {
status = virtio_snd_set_pcm_params(vsnd, i, &default_params); status = virtio_snd_set_pcm_params(vsnd, i, &default_params);
@ -1376,13 +1356,20 @@ static void virtio_snd_unrealize(DeviceState *dev)
static void virtio_snd_reset(VirtIODevice *vdev) static void virtio_snd_reset(VirtIODevice *vdev)
{ {
VirtIOSound *s = VIRTIO_SND(vdev); VirtIOSound *vsnd = VIRTIO_SND(vdev);
virtio_snd_ctrl_command *cmd; virtio_snd_ctrl_command *cmd;
WITH_QEMU_LOCK_GUARD(&s->cmdq_mutex) { /*
while (!QTAILQ_EMPTY(&s->cmdq)) { * Sanity check that the invalid buffer message queue is emptied at the end
cmd = QTAILQ_FIRST(&s->cmdq); * of every virtio_snd_handle_tx_xfer/virtio_snd_handle_rx_xfer call, and
QTAILQ_REMOVE(&s->cmdq, cmd, next); * must be empty otherwise.
*/
g_assert(QSIMPLEQ_EMPTY(&vsnd->invalid));
WITH_QEMU_LOCK_GUARD(&vsnd->cmdq_mutex) {
while (!QTAILQ_EMPTY(&vsnd->cmdq)) {
cmd = QTAILQ_FIRST(&vsnd->cmdq);
QTAILQ_REMOVE(&vsnd->cmdq, cmd, next);
virtio_snd_ctrl_cmd_free(cmd); virtio_snd_ctrl_cmd_free(cmd);
} }
} }

View File

@ -91,7 +91,6 @@ static void vhost_user_blk_set_config(VirtIODevice *vdev, const uint8_t *config)
static int vhost_user_blk_handle_config_change(struct vhost_dev *dev) static int vhost_user_blk_handle_config_change(struct vhost_dev *dev)
{ {
int ret; int ret;
struct virtio_blk_config blkcfg;
VirtIODevice *vdev = dev->vdev; VirtIODevice *vdev = dev->vdev;
VHostUserBlk *s = VHOST_USER_BLK(dev->vdev); VHostUserBlk *s = VHOST_USER_BLK(dev->vdev);
Error *local_err = NULL; Error *local_err = NULL;
@ -100,19 +99,15 @@ static int vhost_user_blk_handle_config_change(struct vhost_dev *dev)
return 0; return 0;
} }
ret = vhost_dev_get_config(dev, (uint8_t *)&blkcfg, ret = vhost_dev_get_config(dev, (uint8_t *)&s->blkcfg,
vdev->config_len, &local_err); vdev->config_len, &local_err);
if (ret < 0) { if (ret < 0) {
error_report_err(local_err); error_report_err(local_err);
return ret; return ret;
} }
/* valid for resize only */
if (blkcfg.capacity != s->blkcfg.capacity) {
s->blkcfg.capacity = blkcfg.capacity;
memcpy(dev->vdev->config, &s->blkcfg, vdev->config_len); memcpy(dev->vdev->config, &s->blkcfg, vdev->config_len);
virtio_notify_config(dev->vdev); virtio_notify_config(dev->vdev);
}
return 0; return 0;
} }

View File

@ -2039,22 +2039,6 @@ static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
goto err; goto err;
} }
/* Mark dirty page's bitmap of guest memory */
if (vdev->lm_logging_ctrl == LM_ENABLE) {
uint64_t chunk = elem->in_addr[i] / VHOST_LOG_CHUNK;
/* Get chunk index */
BitmapMemoryRegionCaches *caches = qatomic_rcu_read(&vdev->caches);
uint64_t index = chunk / 8;
uint64_t shift = chunk % 8;
uint8_t val = 0;
address_space_read_cached(&caches->bitmap, index, &val,
sizeof(val));
val |= 1 << shift;
address_space_write_cached(&caches->bitmap, index, &val,
sizeof(val));
address_space_cache_invalidate(&caches->bitmap, index, sizeof(val));
}
elems[i] = elem; elems[i] = elem;
lens[i] = total; lens[i] = total;
i++; i++;

View File

@ -195,7 +195,14 @@ static void
vhost_vdpa_device_get_config(VirtIODevice *vdev, uint8_t *config) vhost_vdpa_device_get_config(VirtIODevice *vdev, uint8_t *config)
{ {
VhostVdpaDevice *s = VHOST_VDPA_DEVICE(vdev); VhostVdpaDevice *s = VHOST_VDPA_DEVICE(vdev);
int ret;
ret = vhost_dev_get_config(&s->dev, s->config, s->config_size,
NULL);
if (ret < 0) {
error_report("get device config space failed");
return;
}
memcpy(config, s->config, s->config_size); memcpy(config, s->config, s->config_size);
} }

View File

@ -1442,155 +1442,6 @@ int virtio_pci_add_shm_cap(VirtIOPCIProxy *proxy,
return virtio_pci_add_mem_cap(proxy, &cap.cap); return virtio_pci_add_mem_cap(proxy, &cap.cap);
} }
/* Called within call_rcu(). */
static void bitmap_free_region_cache(BitmapMemoryRegionCaches *caches)
{
assert(caches != NULL);
address_space_cache_destroy(&caches->bitmap);
g_free(caches);
}
static void lm_disable(VirtIODevice *vdev)
{
BitmapMemoryRegionCaches *caches;
caches = qatomic_read(&vdev->caches);
qatomic_rcu_set(&vdev->caches, NULL);
if (caches) {
call_rcu(caches, bitmap_free_region_cache, rcu);
}
}
static void lm_enable(VirtIODevice *vdev)
{
BitmapMemoryRegionCaches *old = vdev->caches;
BitmapMemoryRegionCaches *new = NULL;
hwaddr addr, end, size;
int64_t len;
addr = vdev->lm_base_addr_low | ((hwaddr)(vdev->lm_base_addr_high) << 32);
end = vdev->lm_end_addr_low | ((hwaddr)(vdev->lm_end_addr_high) << 32);
size = end - addr;
if (size <= 0) {
error_report("Invalid lm size.");
return;
}
new = g_new0(BitmapMemoryRegionCaches, 1);
len = address_space_cache_init(&new->bitmap, vdev->dma_as, addr, size,
true);
if (len < size) {
virtio_error(vdev, "Cannot map bitmap");
goto err_bitmap;
}
qatomic_rcu_set(&vdev->caches, new);
if (old) {
call_rcu(old, bitmap_free_region_cache, rcu);
}
return;
err_bitmap:
address_space_cache_destroy(&new->bitmap);
g_free(new);
}
static uint64_t virtio_pci_lm_read(void *opaque, hwaddr addr,
unsigned size)
{
VirtIOPCIProxy *proxy = opaque;
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
hwaddr offset_end = LM_VRING_STATE_OFFSET +
virtio_pci_queue_mem_mult(proxy) * VIRTIO_QUEUE_MAX;
uint32_t val;
int qid;
if (vdev == NULL) {
return UINT64_MAX;
}
switch (addr) {
case LM_LOGGING_CTRL:
val = vdev->lm_logging_ctrl;
break;
case LM_BASE_ADDR_LOW:
val = vdev->lm_base_addr_low;
break;
case LM_BASE_ADDR_HIGH:
val = vdev->lm_base_addr_high;
break;
case LM_END_ADDR_LOW:
val = vdev->lm_end_addr_low;
break;
case LM_END_ADDR_HIGH:
val = vdev->lm_end_addr_high;
break;
default:
if (addr >= LM_VRING_STATE_OFFSET && addr <= offset_end) {
qid = (addr - LM_VRING_STATE_OFFSET) /
virtio_pci_queue_mem_mult(proxy);
val = virtio_queue_get_vring_states(vdev, qid);
} else
val = 0;
break;
}
return val;
}
static void virtio_pci_lm_write(void *opaque, hwaddr addr,
uint64_t val, unsigned size)
{
VirtIOPCIProxy *proxy = opaque;
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
hwaddr offset_end = LM_VRING_STATE_OFFSET +
virtio_pci_queue_mem_mult(proxy) * VIRTIO_QUEUE_MAX;
int qid;
if (vdev == NULL) {
return;
}
switch (addr) {
case LM_LOGGING_CTRL:
vdev->lm_logging_ctrl = val;
switch (val) {
case LM_DISABLE:
lm_disable(vdev);
break;
case LM_ENABLE:
lm_enable(vdev);
break;
default:
virtio_error(vdev, "Unsupport LM_LOGGING_CTRL value: %"PRIx64,
val);
break;
};
break;
case LM_BASE_ADDR_LOW:
vdev->lm_base_addr_low = val;
break;
case LM_BASE_ADDR_HIGH:
vdev->lm_base_addr_high = val;
break;
case LM_END_ADDR_LOW:
vdev->lm_end_addr_low = val;
break;
case LM_END_ADDR_HIGH:
vdev->lm_end_addr_high = val;
break;
default:
if (addr >= LM_VRING_STATE_OFFSET && addr <= offset_end) {
qid = (addr - LM_VRING_STATE_OFFSET) /
virtio_pci_queue_mem_mult(proxy);
virtio_queue_set_vring_states(vdev, qid, val);
} else
virtio_error(vdev, "Unsupport addr: %"PRIx64, addr);
break;
}
}
static uint64_t virtio_pci_common_read(void *opaque, hwaddr addr, static uint64_t virtio_pci_common_read(void *opaque, hwaddr addr,
unsigned size) unsigned size)
{ {
@ -1972,15 +1823,6 @@ static void virtio_pci_modern_regions_init(VirtIOPCIProxy *proxy,
}, },
.endianness = DEVICE_LITTLE_ENDIAN, .endianness = DEVICE_LITTLE_ENDIAN,
}; };
static const MemoryRegionOps lm_ops = {
.read = virtio_pci_lm_read,
.write = virtio_pci_lm_write,
.impl = {
.min_access_size = 1,
.max_access_size = 4,
},
.endianness = DEVICE_LITTLE_ENDIAN,
};
g_autoptr(GString) name = g_string_new(NULL); g_autoptr(GString) name = g_string_new(NULL);
g_string_printf(name, "virtio-pci-common-%s", vdev_name); g_string_printf(name, "virtio-pci-common-%s", vdev_name);
@ -2017,14 +1859,6 @@ static void virtio_pci_modern_regions_init(VirtIOPCIProxy *proxy,
proxy, proxy,
name->str, name->str,
proxy->notify_pio.size); proxy->notify_pio.size);
if (proxy->flags & VIRTIO_PCI_FLAG_VDPA) {
g_string_printf(name, "virtio-pci-lm-%s", vdev_name);
memory_region_init_io(&proxy->lm.mr, OBJECT(proxy),
&lm_ops,
proxy,
name->str,
proxy->lm.size);
}
} }
static void virtio_pci_modern_region_map(VirtIOPCIProxy *proxy, static void virtio_pci_modern_region_map(VirtIOPCIProxy *proxy,
@ -2187,10 +2021,6 @@ static void virtio_pci_device_plugged(DeviceState *d, Error **errp)
virtio_pci_modern_mem_region_map(proxy, &proxy->isr, &cap); virtio_pci_modern_mem_region_map(proxy, &proxy->isr, &cap);
virtio_pci_modern_mem_region_map(proxy, &proxy->device, &cap); virtio_pci_modern_mem_region_map(proxy, &proxy->device, &cap);
virtio_pci_modern_mem_region_map(proxy, &proxy->notify, &notify.cap); virtio_pci_modern_mem_region_map(proxy, &proxy->notify, &notify.cap);
if (proxy->flags & VIRTIO_PCI_FLAG_VDPA) {
memory_region_add_subregion(&proxy->modern_bar,
proxy->lm.offset, &proxy->lm.mr);
}
if (modern_pio) { if (modern_pio) {
memory_region_init(&proxy->io_bar, OBJECT(proxy), memory_region_init(&proxy->io_bar, OBJECT(proxy),
@ -2260,9 +2090,6 @@ static void virtio_pci_device_unplugged(DeviceState *d)
virtio_pci_modern_mem_region_unmap(proxy, &proxy->isr); virtio_pci_modern_mem_region_unmap(proxy, &proxy->isr);
virtio_pci_modern_mem_region_unmap(proxy, &proxy->device); virtio_pci_modern_mem_region_unmap(proxy, &proxy->device);
virtio_pci_modern_mem_region_unmap(proxy, &proxy->notify); virtio_pci_modern_mem_region_unmap(proxy, &proxy->notify);
if (proxy->flags & VIRTIO_PCI_FLAG_VDPA) {
memory_region_del_subregion(&proxy->modern_bar, &proxy->lm.mr);
}
if (modern_pio) { if (modern_pio) {
virtio_pci_modern_io_region_unmap(proxy, &proxy->notify_pio); virtio_pci_modern_io_region_unmap(proxy, &proxy->notify_pio);
} }
@ -2317,17 +2144,9 @@ static void virtio_pci_realize(PCIDevice *pci_dev, Error **errp)
proxy->notify_pio.type = VIRTIO_PCI_CAP_NOTIFY_CFG; proxy->notify_pio.type = VIRTIO_PCI_CAP_NOTIFY_CFG;
/* subclasses can enforce modern, so do this unconditionally */ /* subclasses can enforce modern, so do this unconditionally */
if (!(proxy->flags & VIRTIO_PCI_FLAG_VDPA)) {
memory_region_init(&proxy->modern_bar, OBJECT(proxy), "virtio-pci", memory_region_init(&proxy->modern_bar, OBJECT(proxy), "virtio-pci",
/* PCI BAR regions must be powers of 2 */ /* PCI BAR regions must be powers of 2 */
pow2ceil(proxy->notify.offset + proxy->notify.size)); pow2ceil(proxy->notify.offset + proxy->notify.size));
} else {
proxy->lm.offset = proxy->notify.offset + proxy->notify.size;
proxy->lm.size = 0x20 + VIRTIO_QUEUE_MAX * 4;
memory_region_init(&proxy->modern_bar, OBJECT(proxy), "virtio-pci",
/* PCI BAR regions must be powers of 2 */
pow2ceil(proxy->lm.offset + proxy->lm.size));
}
if (proxy->disable_legacy == ON_OFF_AUTO_AUTO) { if (proxy->disable_legacy == ON_OFF_AUTO_AUTO) {
proxy->disable_legacy = pcie_port ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF; proxy->disable_legacy = pcie_port ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF;
@ -2482,8 +2301,6 @@ static Property virtio_pci_properties[] = {
VIRTIO_PCI_FLAG_INIT_FLR_BIT, true), VIRTIO_PCI_FLAG_INIT_FLR_BIT, true),
DEFINE_PROP_BIT("aer", VirtIOPCIProxy, flags, DEFINE_PROP_BIT("aer", VirtIOPCIProxy, flags,
VIRTIO_PCI_FLAG_AER_BIT, false), VIRTIO_PCI_FLAG_AER_BIT, false),
DEFINE_PROP_BIT("vdpa", VirtIOPCIProxy, flags,
VIRTIO_PCI_FLAG_VDPA_BIT, false),
DEFINE_PROP_END_OF_LIST(), DEFINE_PROP_END_OF_LIST(),
}; };

View File

@ -957,12 +957,20 @@ static void virtqueue_packed_flush(VirtQueue *vq, unsigned int count)
return; return;
} }
/*
* For indirect element's 'ndescs' is 1.
* For all other elemment's 'ndescs' is the
* number of descriptors chained by NEXT (as set in virtqueue_packed_pop).
* So When the 'elem' be filled into the descriptor ring,
* The 'idx' of this 'elem' shall be
* the value of 'vq->used_idx' plus the 'ndescs'.
*/
ndescs += vq->used_elems[0].ndescs;
for (i = 1; i < count; i++) { for (i = 1; i < count; i++) {
virtqueue_packed_fill_desc(vq, &vq->used_elems[i], i, false); virtqueue_packed_fill_desc(vq, &vq->used_elems[i], ndescs, false);
ndescs += vq->used_elems[i].ndescs; ndescs += vq->used_elems[i].ndescs;
} }
virtqueue_packed_fill_desc(vq, &vq->used_elems[0], 0, true); virtqueue_packed_fill_desc(vq, &vq->used_elems[0], 0, true);
ndescs += vq->used_elems[0].ndescs;
vq->inuse -= ndescs; vq->inuse -= ndescs;
vq->used_idx += ndescs; vq->used_idx += ndescs;
@ -3368,18 +3376,6 @@ static uint16_t virtio_queue_split_get_last_avail_idx(VirtIODevice *vdev,
return vdev->vq[n].last_avail_idx; return vdev->vq[n].last_avail_idx;
} }
static uint32_t virtio_queue_split_get_vring_states(VirtIODevice *vdev,
int n)
{
struct VirtQueue *vq = &vdev->vq[n];
uint16_t avail, used;
avail = vq->last_avail_idx;
used = vq->used_idx;
return avail | (uint32_t)used << 16;
}
unsigned int virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n) unsigned int virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n)
{ {
if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) { if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
@ -3389,33 +3385,6 @@ unsigned int virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n)
} }
} }
unsigned int virtio_queue_get_vring_states(VirtIODevice *vdev, int n)
{
if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
return -1;
} else {
return virtio_queue_split_get_vring_states(vdev, n);
}
}
static void virtio_queue_split_set_vring_states(VirtIODevice *vdev,
int n, uint32_t idx)
{
struct VirtQueue *vq = &vdev->vq[n];
vq->last_avail_idx = (uint16_t)(idx & 0xffff);
vq->shadow_avail_idx = (uint16_t)(idx & 0xffff);
vq->used_idx = (uint16_t)(idx >> 16);
}
void virtio_queue_set_vring_states(VirtIODevice *vdev, int n, uint32_t idx)
{
if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
return;
} else {
virtio_queue_split_set_vring_states(vdev, n, idx);
}
}
static void virtio_queue_packed_set_last_avail_idx(VirtIODevice *vdev, static void virtio_queue_packed_set_last_avail_idx(VirtIODevice *vdev,
int n, unsigned int idx) int n, unsigned int idx)
{ {

View File

@ -151,7 +151,6 @@ struct VirtIOSoundPCMStream {
QemuMutex queue_mutex; QemuMutex queue_mutex;
bool active; bool active;
QSIMPLEQ_HEAD(, VirtIOSoundPCMBuffer) queue; QSIMPLEQ_HEAD(, VirtIOSoundPCMBuffer) queue;
QSIMPLEQ_HEAD(, VirtIOSoundPCMBuffer) invalid;
}; };
/* /*
@ -223,6 +222,21 @@ struct VirtIOSound {
QemuMutex cmdq_mutex; QemuMutex cmdq_mutex;
QTAILQ_HEAD(, virtio_snd_ctrl_command) cmdq; QTAILQ_HEAD(, virtio_snd_ctrl_command) cmdq;
bool processing_cmdq; bool processing_cmdq;
/*
* Convenience queue to keep track of invalid tx/rx queue messages inside
* the tx/rx callbacks.
*
* In the callbacks as a first step we are emptying the virtqueue to handle
* each message and we cannot add an invalid message back to the queue: we
* would re-process it in subsequent loop iterations.
*
* Instead, we add them to this queue and after finishing examining every
* virtqueue element, we inform the guest for each invalid message.
*
* This queue must be empty at all times except for inside the tx/rx
* callbacks.
*/
QSIMPLEQ_HEAD(, VirtIOSoundPCMBuffer) invalid;
}; };
struct virtio_snd_ctrl_command { struct virtio_snd_ctrl_command {

View File

@ -43,7 +43,6 @@ enum {
VIRTIO_PCI_FLAG_INIT_FLR_BIT, VIRTIO_PCI_FLAG_INIT_FLR_BIT,
VIRTIO_PCI_FLAG_AER_BIT, VIRTIO_PCI_FLAG_AER_BIT,
VIRTIO_PCI_FLAG_ATS_PAGE_ALIGNED_BIT, VIRTIO_PCI_FLAG_ATS_PAGE_ALIGNED_BIT,
VIRTIO_PCI_FLAG_VDPA_BIT,
}; };
/* Need to activate work-arounds for buggy guests at vmstate load. */ /* Need to activate work-arounds for buggy guests at vmstate load. */
@ -90,9 +89,6 @@ enum {
#define VIRTIO_PCI_FLAG_ATS_PAGE_ALIGNED \ #define VIRTIO_PCI_FLAG_ATS_PAGE_ALIGNED \
(1 << VIRTIO_PCI_FLAG_ATS_PAGE_ALIGNED_BIT) (1 << VIRTIO_PCI_FLAG_ATS_PAGE_ALIGNED_BIT)
/* VDPA supported flags */
#define VIRTIO_PCI_FLAG_VDPA (1 << VIRTIO_PCI_FLAG_VDPA_BIT)
typedef struct { typedef struct {
MSIMessage msg; MSIMessage msg;
int virq; int virq;
@ -144,7 +140,6 @@ struct VirtIOPCIProxy {
}; };
VirtIOPCIRegion regs[5]; VirtIOPCIRegion regs[5];
}; };
VirtIOPCIRegion lm;
MemoryRegion modern_bar; MemoryRegion modern_bar;
MemoryRegion io_bar; MemoryRegion io_bar;
uint32_t legacy_io_bar_idx; uint32_t legacy_io_bar_idx;

View File

@ -35,9 +35,6 @@
(0x1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) | \ (0x1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) | \
(0x1ULL << VIRTIO_F_ANY_LAYOUT)) (0x1ULL << VIRTIO_F_ANY_LAYOUT))
#define LM_DISABLE 0x00
#define LM_ENABLE 0x01
struct VirtQueue; struct VirtQueue;
static inline hwaddr vring_align(hwaddr addr, static inline hwaddr vring_align(hwaddr addr,
@ -98,11 +95,6 @@ enum virtio_device_endian {
VIRTIO_DEVICE_ENDIAN_BIG, VIRTIO_DEVICE_ENDIAN_BIG,
}; };
typedef struct BitmapMemoryRegionCaches {
struct rcu_head rcu;
MemoryRegionCache bitmap;
} BitmapMemoryRegionCaches;
/** /**
* struct VirtIODevice - common VirtIO structure * struct VirtIODevice - common VirtIO structure
* @name: name of the device * @name: name of the device
@ -136,14 +128,6 @@ struct VirtIODevice
uint32_t generation; uint32_t generation;
int nvectors; int nvectors;
VirtQueue *vq; VirtQueue *vq;
uint8_t lm_logging_ctrl;
uint32_t lm_base_addr_low;
uint32_t lm_base_addr_high;
uint32_t lm_end_addr_low;
uint32_t lm_end_addr_high;
BitmapMemoryRegionCaches *caches;
MemoryListener listener; MemoryListener listener;
uint16_t device_id; uint16_t device_id;
/* @vm_running: current VM running state via virtio_vmstate_change() */ /* @vm_running: current VM running state via virtio_vmstate_change() */
@ -395,11 +379,8 @@ hwaddr virtio_queue_get_desc_size(VirtIODevice *vdev, int n);
hwaddr virtio_queue_get_avail_size(VirtIODevice *vdev, int n); hwaddr virtio_queue_get_avail_size(VirtIODevice *vdev, int n);
hwaddr virtio_queue_get_used_size(VirtIODevice *vdev, int n); hwaddr virtio_queue_get_used_size(VirtIODevice *vdev, int n);
unsigned int virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n); unsigned int virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n);
unsigned int virtio_queue_get_vring_states(VirtIODevice *vdev, int n);
void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n, void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n,
unsigned int idx); unsigned int idx);
void virtio_queue_set_vring_states(VirtIODevice *vdev, int n,
unsigned int idx);
void virtio_queue_restore_last_avail_idx(VirtIODevice *vdev, int n); void virtio_queue_restore_last_avail_idx(VirtIODevice *vdev, int n);
void virtio_queue_invalidate_signalled_used(VirtIODevice *vdev, int n); void virtio_queue_invalidate_signalled_used(VirtIODevice *vdev, int n);
void virtio_queue_update_used_idx(VirtIODevice *vdev, int n); void virtio_queue_update_used_idx(VirtIODevice *vdev, int n);

View File

@ -221,13 +221,6 @@ struct virtio_pci_cfg_cap {
#define VIRTIO_PCI_COMMON_ADM_Q_IDX 60 #define VIRTIO_PCI_COMMON_ADM_Q_IDX 60
#define VIRTIO_PCI_COMMON_ADM_Q_NUM 62 #define VIRTIO_PCI_COMMON_ADM_Q_NUM 62
#define LM_LOGGING_CTRL 0
#define LM_BASE_ADDR_LOW 4
#define LM_BASE_ADDR_HIGH 8
#define LM_END_ADDR_LOW 12
#define LM_END_ADDR_HIGH 16
#define LM_VRING_STATE_OFFSET 0x20
#endif /* VIRTIO_PCI_NO_MODERN */ #endif /* VIRTIO_PCI_NO_MODERN */
/* Admin command status. */ /* Admin command status. */

View File

@ -891,7 +891,7 @@ static DeviceState *find_device_state(const char *id, Error **errp)
dev = (DeviceState *)object_dynamic_cast(obj, TYPE_DEVICE); dev = (DeviceState *)object_dynamic_cast(obj, TYPE_DEVICE);
if (!dev) { if (!dev) {
error_setg(errp, "%s is not a hotpluggable device", id); error_setg(errp, "%s is not a device", id);
return NULL; return NULL;
} }