qemu/hw/virtio/virtio.c

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

4203 lines
122 KiB
C
Raw Normal View History

/*
* Virtio Support
*
* Copyright IBM, Corp. 2007
*
* Authors:
* Anthony Liguori <aliguori@us.ibm.com>
*
* This work is licensed under the terms of the GNU GPL, version 2. See
* the COPYING file in the top-level directory.
*
*/
#include "qemu/osdep.h"
2016-03-14 11:01:28 +03:00
#include "qapi/error.h"
#include "qapi/qapi-commands-virtio.h"
#include "trace.h"
#include "qemu/defer-call.h"
#include "qemu/error-report.h"
#include "qemu/log.h"
#include "qemu/main-loop.h"
#include "qemu/module.h"
#include "exec/tswap.h"
#include "qom/object_interfaces.h"
#include "hw/core/cpu.h"
#include "hw/virtio/virtio.h"
#include "hw/virtio/vhost.h"
#include "migration/qemu-file-types.h"
#include "qemu/atomic.h"
#include "hw/virtio/virtio-bus.h"
#include "hw/qdev-properties.h"
#include "hw/virtio/virtio-access.h"
#include "sysemu/dma.h"
#include "sysemu/runstate.h"
#include "virtio-qmp.h"
#include "standard-headers/linux/virtio_ids.h"
qmp: decode feature & status bits in virtio-status Display feature names instead of bitmaps for host, guest, and backend for VirtIODevices. Display status names instead of bitmaps for VirtIODevices. Display feature names instead of bitmaps for backend, protocol, acked, and features (hdev->features) for vhost devices. Decode features according to device ID. Decode statuses according to configuration status bitmap (config_status_map). Decode vhost user protocol features according to vhost user protocol bitmap (vhost_user_protocol_map). Transport features are on the first line. Undecoded bits (if any) are stored in a separate field. [Jonah: Several changes made to this patch from prev. version (v14): - Moved all device features mappings to hw/virtio/virtio.c - Renamed device features mappings (less generic) - Generalized @FEATURE_ENTRY macro for all device mappings - Virtio device feature map definitions include descriptions of feature bits - Moved @VHOST_USER_F_PROTOCOL_FEATURES feature bit from transport feature map to vhost-user-supported device feature mappings (blk, fs, i2c, rng, net, gpu, input, scsi, vsock) - New feature bit added for virtio-vsock: @VIRTIO_VSOCK_F_SEQPACKET - New feature bit added for virtio-iommu: @VIRTIO_IOMMU_F_BYPASS_CONFIG - New feature bit added for virtio-mem: @VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE - New virtio transport feature bit added: @VIRTIO_F_IN_ORDER - Added device feature map definition for virtio-rng ] Signed-off-by: Laurent Vivier <lvivier@redhat.com> Signed-off-by: Jonah Palmer <jonah.palmer@oracle.com> Message-Id: <1660220684-24909-4-git-send-email-jonah.palmer@oracle.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2022-08-11 15:24:41 +03:00
#include "standard-headers/linux/vhost_types.h"
#include "standard-headers/linux/virtio_blk.h"
#include "standard-headers/linux/virtio_console.h"
#include "standard-headers/linux/virtio_gpu.h"
#include "standard-headers/linux/virtio_net.h"
#include "standard-headers/linux/virtio_scsi.h"
#include "standard-headers/linux/virtio_i2c.h"
#include "standard-headers/linux/virtio_balloon.h"
#include "standard-headers/linux/virtio_iommu.h"
#include "standard-headers/linux/virtio_mem.h"
#include "standard-headers/linux/virtio_vsock.h"
qmp: decode feature & status bits in virtio-status Display feature names instead of bitmaps for host, guest, and backend for VirtIODevices. Display status names instead of bitmaps for VirtIODevices. Display feature names instead of bitmaps for backend, protocol, acked, and features (hdev->features) for vhost devices. Decode features according to device ID. Decode statuses according to configuration status bitmap (config_status_map). Decode vhost user protocol features according to vhost user protocol bitmap (vhost_user_protocol_map). Transport features are on the first line. Undecoded bits (if any) are stored in a separate field. [Jonah: Several changes made to this patch from prev. version (v14): - Moved all device features mappings to hw/virtio/virtio.c - Renamed device features mappings (less generic) - Generalized @FEATURE_ENTRY macro for all device mappings - Virtio device feature map definitions include descriptions of feature bits - Moved @VHOST_USER_F_PROTOCOL_FEATURES feature bit from transport feature map to vhost-user-supported device feature mappings (blk, fs, i2c, rng, net, gpu, input, scsi, vsock) - New feature bit added for virtio-vsock: @VIRTIO_VSOCK_F_SEQPACKET - New feature bit added for virtio-iommu: @VIRTIO_IOMMU_F_BYPASS_CONFIG - New feature bit added for virtio-mem: @VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE - New virtio transport feature bit added: @VIRTIO_F_IN_ORDER - Added device feature map definition for virtio-rng ] Signed-off-by: Laurent Vivier <lvivier@redhat.com> Signed-off-by: Jonah Palmer <jonah.palmer@oracle.com> Message-Id: <1660220684-24909-4-git-send-email-jonah.palmer@oracle.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2022-08-11 15:24:41 +03:00
/*
* Maximum size of virtio device config space
*/
#define VHOST_USER_MAX_CONFIG_SIZE 256
/*
* The alignment to use between consumer and producer parts of vring.
* x86 pagesize again. This is the default, used by transports like PCI
* which don't provide a means for the guest to tell the host the alignment.
*/
#define VIRTIO_PCI_VRING_ALIGN 4096
typedef struct VRingDesc
{
uint64_t addr;
uint32_t len;
uint16_t flags;
uint16_t next;
} VRingDesc;
typedef struct VRingPackedDesc {
uint64_t addr;
uint32_t len;
uint16_t id;
uint16_t flags;
} VRingPackedDesc;
typedef struct VRingAvail
{
uint16_t flags;
uint16_t idx;
misc: Replace zero-length arrays with flexible array member (automatic) Description copied from Linux kernel commit from Gustavo A. R. Silva (see [3]): --v-- description start --v-- The current codebase makes use of the zero-length array language extension to the C90 standard, but the preferred mechanism to declare variable-length types such as these ones is a flexible array member [1], introduced in C99: struct foo { int stuff; struct boo array[]; }; By making use of the mechanism above, we will get a compiler warning in case the flexible array does not occur last in the structure, which will help us prevent some kind of undefined behavior bugs from being unadvertenly introduced [2] to the Linux codebase from now on. --^-- description end --^-- Do the similar housekeeping in the QEMU codebase (which uses C99 since commit 7be41675f7cb). All these instances of code were found with the help of the following Coccinelle script: @@ identifier s, m, a; type t, T; @@ struct s { ... t m; - T a[0]; + T a[]; }; @@ identifier s, m, a; type t, T; @@ struct s { ... t m; - T a[0]; + T a[]; } QEMU_PACKED; [1] https://gcc.gnu.org/onlinedocs/gcc/Zero-Length.html [2] https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=76497732932f [3] https://git.kernel.org/pub/scm/linux/kernel/git/gustavoars/linux.git/commit/?id=17642a2fbd2c1 Inspired-by: Gustavo A. R. Silva <gustavo@embeddedor.com> Reviewed-by: David Hildenbrand <david@redhat.com> Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2020-03-04 18:38:15 +03:00
uint16_t ring[];
} VRingAvail;
typedef struct VRingUsedElem
{
uint32_t id;
uint32_t len;
} VRingUsedElem;
typedef struct VRingUsed
{
uint16_t flags;
uint16_t idx;
misc: Replace zero-length arrays with flexible array member (automatic) Description copied from Linux kernel commit from Gustavo A. R. Silva (see [3]): --v-- description start --v-- The current codebase makes use of the zero-length array language extension to the C90 standard, but the preferred mechanism to declare variable-length types such as these ones is a flexible array member [1], introduced in C99: struct foo { int stuff; struct boo array[]; }; By making use of the mechanism above, we will get a compiler warning in case the flexible array does not occur last in the structure, which will help us prevent some kind of undefined behavior bugs from being unadvertenly introduced [2] to the Linux codebase from now on. --^-- description end --^-- Do the similar housekeeping in the QEMU codebase (which uses C99 since commit 7be41675f7cb). All these instances of code were found with the help of the following Coccinelle script: @@ identifier s, m, a; type t, T; @@ struct s { ... t m; - T a[0]; + T a[]; }; @@ identifier s, m, a; type t, T; @@ struct s { ... t m; - T a[0]; + T a[]; } QEMU_PACKED; [1] https://gcc.gnu.org/onlinedocs/gcc/Zero-Length.html [2] https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=76497732932f [3] https://git.kernel.org/pub/scm/linux/kernel/git/gustavoars/linux.git/commit/?id=17642a2fbd2c1 Inspired-by: Gustavo A. R. Silva <gustavo@embeddedor.com> Reviewed-by: David Hildenbrand <david@redhat.com> Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2020-03-04 18:38:15 +03:00
VRingUsedElem ring[];
} VRingUsed;
typedef struct VRingMemoryRegionCaches {
struct rcu_head rcu;
MemoryRegionCache desc;
MemoryRegionCache avail;
MemoryRegionCache used;
} VRingMemoryRegionCaches;
typedef struct VRing
{
unsigned int num;
unsigned int num_default;
unsigned int align;
hwaddr desc;
hwaddr avail;
hwaddr used;
VRingMemoryRegionCaches *caches;
} VRing;
typedef struct VRingPackedDescEvent {
uint16_t off_wrap;
uint16_t flags;
} VRingPackedDescEvent ;
struct VirtQueue
{
VRing vring;
VirtQueueElement *used_elems;
/* Next head to pop */
uint16_t last_avail_idx;
bool last_avail_wrap_counter;
/* Last avail_idx read from VQ. */
uint16_t shadow_avail_idx;
bool shadow_avail_wrap_counter;
uint16_t used_idx;
bool used_wrap_counter;
/* Last used index value we have signalled on */
uint16_t signalled_used;
/* Last used index value we have signalled on */
bool signalled_used_valid;
Revert "virtio: turn vq->notification into a nested counter" This reverts commit aff8fd18f1786fc5af259a9bc0077727222f51ca. Both virtio-net and virtio-crypto do not balance virtio_queue_set_notification() enable and disable calls. This makes the notifications_disabled counter unreliable and Doug Goldstein reported the following assertion failure: #3 0x00007ffff44d1c62 in __GI___assert_fail ( assertion=assertion@entry=0x555555ae8e8a "vq->notification_disabled > 0", file=file@entry=0x555555ae89c0 "/home/doug/work/qemu/hw/virtio/virtio.c", line=line@entry=215, function=function@entry=0x555555ae9630 <__PRETTY_FUNCTION__.43707> "virtio_queue_set_notification") at assert.c:101 #4 0x00005555557f25d6 in virtio_queue_set_notification (vq=0x55555666aa90, enable=enable@entry=1) at /home/doug/work/qemu/hw/virtio/virtio.c:215 #5 0x00005555557dc311 in virtio_net_has_buffers (q=<optimized out>, q=<optimized out>, bufsize=102) at /home/doug/work/qemu/hw/net/virtio-net.c:1008 #6 virtio_net_receive (nc=<optimized out>, buf=0x555557386b88 "", size=102) at /home/doug/work/qemu/hw/net/virtio-net.c:1148 #7 0x00005555559cad33 in nc_sendv_compat (flags=<optimized out>, iovcnt=1, iov=0x7fffead746d0, nc=0x55555788b340) at net/net.c:705 #8 qemu_deliver_packet_iov (sender=<optimized out>, flags=<optimized out>, iov=0x7fffead746d0, iovcnt=1, opaque=0x55555788b340) at net/net.c:732 #9 0x00005555559cd929 in qemu_net_queue_deliver (size=<optimized out>, data=<optimized out>, flags=<optimized out>, sender=<optimized out>, queue=0x55555788b550) at net/queue.c:164 #10 qemu_net_queue_flush (queue=0x55555788b550) at net/queue.c:261 This patch is safe to revert since it's just an optimization for virtqueue polling. The next patch will improve the situation again without resorting to nesting. Reported-by: Doug Goldstein <cardoe@cardoe.com> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Tested-by: Richard Henderson <rth@twiddle.net> Tested-by: Laszlo Ersek <lersek@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2017-01-12 14:46:10 +03:00
/* Notification enabled? */
bool notification;
uint16_t queue_index;
unsigned int inuse;
uint16_t vector;
VirtIOHandleOutput handle_output;
VirtIODevice *vdev;
EventNotifier guest_notifier;
EventNotifier host_notifier;
virtio: notify virtqueue via host notifier when available Host notifiers are used in several cases: 1. Traditional ioeventfd where virtqueue notifications are handled in the main loop thread. 2. IOThreads (aio_handle_output) where virtqueue notifications are handled in an IOThread AioContext. 3. vhost where virtqueue notifications are handled by kernel vhost or a vhost-user device backend. Most virtqueue notifications from the guest use the ioeventfd mechanism, but there are corner cases where QEMU code calls virtio_queue_notify(). This currently honors the host notifier for the IOThreads aio_handle_output case, but not for the vhost case. The result is that vhost does not receive virtqueue notifications from QEMU when virtio_queue_notify() is called. This patch extends virtio_queue_notify() to set the host notifier whenever it is enabled instead of calling the vq->(aio_)handle_output() function directly. We track the host notifier state for each virtqueue separately since some devices may use it only for certain virtqueues. This fixes the vhost case although it does add a trip through the eventfd for the traditional ioeventfd case. I don't think it's worth adding a fast path for the traditional ioeventfd case because calling virtio_queue_notify() is rare when ioeventfd is enabled. Reported-by: Felipe Franciosi <felipe@nutanix.com> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Message-Id: <20191105140946.165584-1-stefanha@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2019-11-05 17:09:46 +03:00
bool host_notifier_enabled;
QLIST_ENTRY(VirtQueue) node;
};
const char *virtio_device_names[] = {
[VIRTIO_ID_NET] = "virtio-net",
[VIRTIO_ID_BLOCK] = "virtio-blk",
[VIRTIO_ID_CONSOLE] = "virtio-serial",
[VIRTIO_ID_RNG] = "virtio-rng",
[VIRTIO_ID_BALLOON] = "virtio-balloon",
[VIRTIO_ID_IOMEM] = "virtio-iomem",
[VIRTIO_ID_RPMSG] = "virtio-rpmsg",
[VIRTIO_ID_SCSI] = "virtio-scsi",
[VIRTIO_ID_9P] = "virtio-9p",
[VIRTIO_ID_MAC80211_WLAN] = "virtio-mac-wlan",
[VIRTIO_ID_RPROC_SERIAL] = "virtio-rproc-serial",
[VIRTIO_ID_CAIF] = "virtio-caif",
[VIRTIO_ID_MEMORY_BALLOON] = "virtio-mem-balloon",
[VIRTIO_ID_GPU] = "virtio-gpu",
[VIRTIO_ID_CLOCK] = "virtio-clk",
[VIRTIO_ID_INPUT] = "virtio-input",
[VIRTIO_ID_VSOCK] = "vhost-vsock",
[VIRTIO_ID_CRYPTO] = "virtio-crypto",
[VIRTIO_ID_SIGNAL_DIST] = "virtio-signal",
[VIRTIO_ID_PSTORE] = "virtio-pstore",
[VIRTIO_ID_IOMMU] = "virtio-iommu",
[VIRTIO_ID_MEM] = "virtio-mem",
[VIRTIO_ID_SOUND] = "virtio-sound",
[VIRTIO_ID_FS] = "virtio-user-fs",
[VIRTIO_ID_PMEM] = "virtio-pmem",
[VIRTIO_ID_RPMB] = "virtio-rpmb",
[VIRTIO_ID_MAC80211_HWSIM] = "virtio-mac-hwsim",
[VIRTIO_ID_VIDEO_ENCODER] = "virtio-vid-encoder",
[VIRTIO_ID_VIDEO_DECODER] = "virtio-vid-decoder",
[VIRTIO_ID_SCMI] = "virtio-scmi",
[VIRTIO_ID_NITRO_SEC_MOD] = "virtio-nitro-sec-mod",
[VIRTIO_ID_I2C_ADAPTER] = "vhost-user-i2c",
[VIRTIO_ID_WATCHDOG] = "virtio-watchdog",
[VIRTIO_ID_CAN] = "virtio-can",
[VIRTIO_ID_DMABUF] = "virtio-dmabuf",
[VIRTIO_ID_PARAM_SERV] = "virtio-param-serv",
[VIRTIO_ID_AUDIO_POLICY] = "virtio-audio-pol",
[VIRTIO_ID_BT] = "virtio-bluetooth",
[VIRTIO_ID_GPIO] = "virtio-gpio"
};
static const char *virtio_id_to_name(uint16_t device_id)
{
assert(device_id < G_N_ELEMENTS(virtio_device_names));
const char *name = virtio_device_names[device_id];
assert(name != NULL);
return name;
}
/* Called within call_rcu(). */
static void virtio_free_region_cache(VRingMemoryRegionCaches *caches)
{
assert(caches != NULL);
address_space_cache_destroy(&caches->desc);
address_space_cache_destroy(&caches->avail);
address_space_cache_destroy(&caches->used);
g_free(caches);
}
static void virtio_virtqueue_reset_region_cache(struct VirtQueue *vq)
{
VRingMemoryRegionCaches *caches;
caches = qatomic_read(&vq->vring.caches);
qatomic_rcu_set(&vq->vring.caches, NULL);
if (caches) {
call_rcu(caches, virtio_free_region_cache, rcu);
}
}
void virtio_init_region_cache(VirtIODevice *vdev, int n)
{
VirtQueue *vq = &vdev->vq[n];
VRingMemoryRegionCaches *old = vq->vring.caches;
VRingMemoryRegionCaches *new = NULL;
hwaddr addr, size;
int64_t len;
bool packed;
addr = vq->vring.desc;
if (!addr) {
goto out_no_cache;
}
new = g_new0(VRingMemoryRegionCaches, 1);
size = virtio_queue_get_desc_size(vdev, n);
packed = virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED) ?
true : false;
len = address_space_cache_init(&new->desc, vdev->dma_as,
addr, size, packed);
if (len < size) {
virtio_error(vdev, "Cannot map desc");
goto err_desc;
}
size = virtio_queue_get_used_size(vdev, n);
len = address_space_cache_init(&new->used, vdev->dma_as,
vq->vring.used, size, true);
if (len < size) {
virtio_error(vdev, "Cannot map used");
goto err_used;
}
size = virtio_queue_get_avail_size(vdev, n);
len = address_space_cache_init(&new->avail, vdev->dma_as,
vq->vring.avail, size, false);
if (len < size) {
virtio_error(vdev, "Cannot map avail");
goto err_avail;
}
qatomic_rcu_set(&vq->vring.caches, new);
if (old) {
call_rcu(old, virtio_free_region_cache, rcu);
}
return;
err_avail:
address_space_cache_destroy(&new->avail);
err_used:
address_space_cache_destroy(&new->used);
err_desc:
address_space_cache_destroy(&new->desc);
out_no_cache:
g_free(new);
virtio_virtqueue_reset_region_cache(vq);
}
/* virt queue functions */
void virtio_queue_update_rings(VirtIODevice *vdev, int n)
{
VRing *vring = &vdev->vq[n].vring;
if (!vring->num || !vring->desc || !vring->align) {
/* not yet setup -> nothing to do */
return;
}
vring->avail = vring->desc + vring->num * sizeof(VRingDesc);
vring->used = vring_align(vring->avail +
offsetof(VRingAvail, ring[vring->num]),
vring->align);
virtio_init_region_cache(vdev, n);
}
/* Called within rcu_read_lock(). */
static void vring_split_desc_read(VirtIODevice *vdev, VRingDesc *desc,
MemoryRegionCache *cache, int i)
{
address_space_read_cached(cache, i * sizeof(VRingDesc),
desc, sizeof(VRingDesc));
virtio_tswap64s(vdev, &desc->addr);
virtio_tswap32s(vdev, &desc->len);
virtio_tswap16s(vdev, &desc->flags);
virtio_tswap16s(vdev, &desc->next);
}
static void vring_packed_event_read(VirtIODevice *vdev,
MemoryRegionCache *cache,
VRingPackedDescEvent *e)
{
hwaddr off_off = offsetof(VRingPackedDescEvent, off_wrap);
hwaddr off_flags = offsetof(VRingPackedDescEvent, flags);
e->flags = virtio_lduw_phys_cached(vdev, cache, off_flags);
/* Make sure flags is seen before off_wrap */
smp_rmb();
e->off_wrap = virtio_lduw_phys_cached(vdev, cache, off_off);
}
static void vring_packed_off_wrap_write(VirtIODevice *vdev,
MemoryRegionCache *cache,
uint16_t off_wrap)
{
hwaddr off = offsetof(VRingPackedDescEvent, off_wrap);
virtio_stw_phys_cached(vdev, cache, off, off_wrap);
address_space_cache_invalidate(cache, off, sizeof(off_wrap));
}
static void vring_packed_flags_write(VirtIODevice *vdev,
MemoryRegionCache *cache, uint16_t flags)
{
hwaddr off = offsetof(VRingPackedDescEvent, flags);
virtio_stw_phys_cached(vdev, cache, off, flags);
address_space_cache_invalidate(cache, off, sizeof(flags));
}
/* Called within rcu_read_lock(). */
static VRingMemoryRegionCaches *vring_get_region_caches(struct VirtQueue *vq)
{
return qatomic_rcu_read(&vq->vring.caches);
}
virtio: gracefully handle invalid region caches The virtqueue code sets up MemoryRegionCaches to access the virtqueue guest RAM data structures. The code currently assumes that VRingMemoryRegionCaches is initialized before device emulation code accesses the virtqueue. An assertion will fail in vring_get_region_caches() when this is not true. Device fuzzing found a case where this assumption is false (see below). Virtqueue guest RAM addresses can also be changed from a vCPU thread while an IOThread is accessing the virtqueue. This breaks the same assumption but this time the caches could become invalid partway through the virtqueue code. The code fetches the caches RCU pointer multiple times so we will need to validate the pointer every time it is fetched. Add checks each time we call vring_get_region_caches() and treat invalid caches as a nop: memory stores are ignored and memory reads return 0. The fuzz test failure is as follows: $ qemu -M pc -device virtio-blk-pci,id=drv0,drive=drive0,addr=4.0 \ -drive if=none,id=drive0,file=null-co://,format=raw,auto-read-only=off \ -drive if=none,id=drive1,file=null-co://,file.read-zeroes=on,format=raw \ -display none \ -qtest stdio endianness outl 0xcf8 0x80002020 outl 0xcfc 0xe0000000 outl 0xcf8 0x80002004 outw 0xcfc 0x7 write 0xe0000000 0x24 0x00ffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffab5cffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffab0000000001 inb 0x4 writew 0xe000001c 0x1 write 0xe0000014 0x1 0x0d The following error message is produced: qemu-system-x86_64: /home/stefanha/qemu/hw/virtio/virtio.c:286: vring_get_region_caches: Assertion `caches != NULL' failed. The backtrace looks like this: #0 0x00007ffff5520625 in raise () at /lib64/libc.so.6 #1 0x00007ffff55098d9 in abort () at /lib64/libc.so.6 #2 0x00007ffff55097a9 in _nl_load_domain.cold () at /lib64/libc.so.6 #3 0x00007ffff5518a66 in annobin_assert.c_end () at /lib64/libc.so.6 #4 0x00005555559073da in vring_get_region_caches (vq=<optimized out>) at qemu/hw/virtio/virtio.c:286 #5 vring_get_region_caches (vq=<optimized out>) at qemu/hw/virtio/virtio.c:283 #6 0x000055555590818d in vring_used_flags_set_bit (mask=1, vq=0x5555575ceea0) at qemu/hw/virtio/virtio.c:398 #7 virtio_queue_split_set_notification (enable=0, vq=0x5555575ceea0) at qemu/hw/virtio/virtio.c:398 #8 virtio_queue_set_notification (vq=vq@entry=0x5555575ceea0, enable=enable@entry=0) at qemu/hw/virtio/virtio.c:451 #9 0x0000555555908512 in virtio_queue_set_notification (vq=vq@entry=0x5555575ceea0, enable=enable@entry=0) at qemu/hw/virtio/virtio.c:444 #10 0x00005555558c697a in virtio_blk_handle_vq (s=0x5555575c57e0, vq=0x5555575ceea0) at qemu/hw/block/virtio-blk.c:775 #11 0x0000555555907836 in virtio_queue_notify_aio_vq (vq=0x5555575ceea0) at qemu/hw/virtio/virtio.c:2244 #12 0x0000555555cb5dd7 in aio_dispatch_handlers (ctx=ctx@entry=0x55555671a420) at util/aio-posix.c:429 #13 0x0000555555cb67a8 in aio_dispatch (ctx=0x55555671a420) at util/aio-posix.c:460 #14 0x0000555555cb307e in aio_ctx_dispatch (source=<optimized out>, callback=<optimized out>, user_data=<optimized out>) at util/async.c:260 #15 0x00007ffff7bbc510 in g_main_context_dispatch () at /lib64/libglib-2.0.so.0 #16 0x0000555555cb5848 in glib_pollfds_poll () at util/main-loop.c:219 #17 os_host_main_loop_wait (timeout=<optimized out>) at util/main-loop.c:242 #18 main_loop_wait (nonblocking=<optimized out>) at util/main-loop.c:518 #19 0x00005555559b20c9 in main_loop () at vl.c:1683 #20 0x0000555555838115 in main (argc=<optimized out>, argv=<optimized out>, envp=<optimized out>) at vl.c:4441 Reported-by: Alexander Bulekov <alxndr@bu.edu> Cc: Michael Tsirkin <mst@redhat.com> Cc: Cornelia Huck <cohuck@redhat.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: qemu-stable@nongnu.org Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Message-Id: <20200207104619.164892-1-stefanha@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2020-02-07 13:46:19 +03:00
/* Called within rcu_read_lock(). */
static inline uint16_t vring_avail_flags(VirtQueue *vq)
{
VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
hwaddr pa = offsetof(VRingAvail, flags);
virtio: gracefully handle invalid region caches The virtqueue code sets up MemoryRegionCaches to access the virtqueue guest RAM data structures. The code currently assumes that VRingMemoryRegionCaches is initialized before device emulation code accesses the virtqueue. An assertion will fail in vring_get_region_caches() when this is not true. Device fuzzing found a case where this assumption is false (see below). Virtqueue guest RAM addresses can also be changed from a vCPU thread while an IOThread is accessing the virtqueue. This breaks the same assumption but this time the caches could become invalid partway through the virtqueue code. The code fetches the caches RCU pointer multiple times so we will need to validate the pointer every time it is fetched. Add checks each time we call vring_get_region_caches() and treat invalid caches as a nop: memory stores are ignored and memory reads return 0. The fuzz test failure is as follows: $ qemu -M pc -device virtio-blk-pci,id=drv0,drive=drive0,addr=4.0 \ -drive if=none,id=drive0,file=null-co://,format=raw,auto-read-only=off \ -drive if=none,id=drive1,file=null-co://,file.read-zeroes=on,format=raw \ -display none \ -qtest stdio endianness outl 0xcf8 0x80002020 outl 0xcfc 0xe0000000 outl 0xcf8 0x80002004 outw 0xcfc 0x7 write 0xe0000000 0x24 0x00ffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffab5cffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffab0000000001 inb 0x4 writew 0xe000001c 0x1 write 0xe0000014 0x1 0x0d The following error message is produced: qemu-system-x86_64: /home/stefanha/qemu/hw/virtio/virtio.c:286: vring_get_region_caches: Assertion `caches != NULL' failed. The backtrace looks like this: #0 0x00007ffff5520625 in raise () at /lib64/libc.so.6 #1 0x00007ffff55098d9 in abort () at /lib64/libc.so.6 #2 0x00007ffff55097a9 in _nl_load_domain.cold () at /lib64/libc.so.6 #3 0x00007ffff5518a66 in annobin_assert.c_end () at /lib64/libc.so.6 #4 0x00005555559073da in vring_get_region_caches (vq=<optimized out>) at qemu/hw/virtio/virtio.c:286 #5 vring_get_region_caches (vq=<optimized out>) at qemu/hw/virtio/virtio.c:283 #6 0x000055555590818d in vring_used_flags_set_bit (mask=1, vq=0x5555575ceea0) at qemu/hw/virtio/virtio.c:398 #7 virtio_queue_split_set_notification (enable=0, vq=0x5555575ceea0) at qemu/hw/virtio/virtio.c:398 #8 virtio_queue_set_notification (vq=vq@entry=0x5555575ceea0, enable=enable@entry=0) at qemu/hw/virtio/virtio.c:451 #9 0x0000555555908512 in virtio_queue_set_notification (vq=vq@entry=0x5555575ceea0, enable=enable@entry=0) at qemu/hw/virtio/virtio.c:444 #10 0x00005555558c697a in virtio_blk_handle_vq (s=0x5555575c57e0, vq=0x5555575ceea0) at qemu/hw/block/virtio-blk.c:775 #11 0x0000555555907836 in virtio_queue_notify_aio_vq (vq=0x5555575ceea0) at qemu/hw/virtio/virtio.c:2244 #12 0x0000555555cb5dd7 in aio_dispatch_handlers (ctx=ctx@entry=0x55555671a420) at util/aio-posix.c:429 #13 0x0000555555cb67a8 in aio_dispatch (ctx=0x55555671a420) at util/aio-posix.c:460 #14 0x0000555555cb307e in aio_ctx_dispatch (source=<optimized out>, callback=<optimized out>, user_data=<optimized out>) at util/async.c:260 #15 0x00007ffff7bbc510 in g_main_context_dispatch () at /lib64/libglib-2.0.so.0 #16 0x0000555555cb5848 in glib_pollfds_poll () at util/main-loop.c:219 #17 os_host_main_loop_wait (timeout=<optimized out>) at util/main-loop.c:242 #18 main_loop_wait (nonblocking=<optimized out>) at util/main-loop.c:518 #19 0x00005555559b20c9 in main_loop () at vl.c:1683 #20 0x0000555555838115 in main (argc=<optimized out>, argv=<optimized out>, envp=<optimized out>) at vl.c:4441 Reported-by: Alexander Bulekov <alxndr@bu.edu> Cc: Michael Tsirkin <mst@redhat.com> Cc: Cornelia Huck <cohuck@redhat.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: qemu-stable@nongnu.org Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Message-Id: <20200207104619.164892-1-stefanha@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2020-02-07 13:46:19 +03:00
if (!caches) {
return 0;
}
return virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
}
/* Called within rcu_read_lock(). */
static inline uint16_t vring_avail_idx(VirtQueue *vq)
{
VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
hwaddr pa = offsetof(VRingAvail, idx);
virtio: gracefully handle invalid region caches The virtqueue code sets up MemoryRegionCaches to access the virtqueue guest RAM data structures. The code currently assumes that VRingMemoryRegionCaches is initialized before device emulation code accesses the virtqueue. An assertion will fail in vring_get_region_caches() when this is not true. Device fuzzing found a case where this assumption is false (see below). Virtqueue guest RAM addresses can also be changed from a vCPU thread while an IOThread is accessing the virtqueue. This breaks the same assumption but this time the caches could become invalid partway through the virtqueue code. The code fetches the caches RCU pointer multiple times so we will need to validate the pointer every time it is fetched. Add checks each time we call vring_get_region_caches() and treat invalid caches as a nop: memory stores are ignored and memory reads return 0. The fuzz test failure is as follows: $ qemu -M pc -device virtio-blk-pci,id=drv0,drive=drive0,addr=4.0 \ -drive if=none,id=drive0,file=null-co://,format=raw,auto-read-only=off \ -drive if=none,id=drive1,file=null-co://,file.read-zeroes=on,format=raw \ -display none \ -qtest stdio endianness outl 0xcf8 0x80002020 outl 0xcfc 0xe0000000 outl 0xcf8 0x80002004 outw 0xcfc 0x7 write 0xe0000000 0x24 0x00ffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffab5cffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffab0000000001 inb 0x4 writew 0xe000001c 0x1 write 0xe0000014 0x1 0x0d The following error message is produced: qemu-system-x86_64: /home/stefanha/qemu/hw/virtio/virtio.c:286: vring_get_region_caches: Assertion `caches != NULL' failed. The backtrace looks like this: #0 0x00007ffff5520625 in raise () at /lib64/libc.so.6 #1 0x00007ffff55098d9 in abort () at /lib64/libc.so.6 #2 0x00007ffff55097a9 in _nl_load_domain.cold () at /lib64/libc.so.6 #3 0x00007ffff5518a66 in annobin_assert.c_end () at /lib64/libc.so.6 #4 0x00005555559073da in vring_get_region_caches (vq=<optimized out>) at qemu/hw/virtio/virtio.c:286 #5 vring_get_region_caches (vq=<optimized out>) at qemu/hw/virtio/virtio.c:283 #6 0x000055555590818d in vring_used_flags_set_bit (mask=1, vq=0x5555575ceea0) at qemu/hw/virtio/virtio.c:398 #7 virtio_queue_split_set_notification (enable=0, vq=0x5555575ceea0) at qemu/hw/virtio/virtio.c:398 #8 virtio_queue_set_notification (vq=vq@entry=0x5555575ceea0, enable=enable@entry=0) at qemu/hw/virtio/virtio.c:451 #9 0x0000555555908512 in virtio_queue_set_notification (vq=vq@entry=0x5555575ceea0, enable=enable@entry=0) at qemu/hw/virtio/virtio.c:444 #10 0x00005555558c697a in virtio_blk_handle_vq (s=0x5555575c57e0, vq=0x5555575ceea0) at qemu/hw/block/virtio-blk.c:775 #11 0x0000555555907836 in virtio_queue_notify_aio_vq (vq=0x5555575ceea0) at qemu/hw/virtio/virtio.c:2244 #12 0x0000555555cb5dd7 in aio_dispatch_handlers (ctx=ctx@entry=0x55555671a420) at util/aio-posix.c:429 #13 0x0000555555cb67a8 in aio_dispatch (ctx=0x55555671a420) at util/aio-posix.c:460 #14 0x0000555555cb307e in aio_ctx_dispatch (source=<optimized out>, callback=<optimized out>, user_data=<optimized out>) at util/async.c:260 #15 0x00007ffff7bbc510 in g_main_context_dispatch () at /lib64/libglib-2.0.so.0 #16 0x0000555555cb5848 in glib_pollfds_poll () at util/main-loop.c:219 #17 os_host_main_loop_wait (timeout=<optimized out>) at util/main-loop.c:242 #18 main_loop_wait (nonblocking=<optimized out>) at util/main-loop.c:518 #19 0x00005555559b20c9 in main_loop () at vl.c:1683 #20 0x0000555555838115 in main (argc=<optimized out>, argv=<optimized out>, envp=<optimized out>) at vl.c:4441 Reported-by: Alexander Bulekov <alxndr@bu.edu> Cc: Michael Tsirkin <mst@redhat.com> Cc: Cornelia Huck <cohuck@redhat.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: qemu-stable@nongnu.org Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Message-Id: <20200207104619.164892-1-stefanha@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2020-02-07 13:46:19 +03:00
if (!caches) {
return 0;
}
vq->shadow_avail_idx = virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
return vq->shadow_avail_idx;
}
/* Called within rcu_read_lock(). */
static inline uint16_t vring_avail_ring(VirtQueue *vq, int i)
{
VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
hwaddr pa = offsetof(VRingAvail, ring[i]);
virtio: gracefully handle invalid region caches The virtqueue code sets up MemoryRegionCaches to access the virtqueue guest RAM data structures. The code currently assumes that VRingMemoryRegionCaches is initialized before device emulation code accesses the virtqueue. An assertion will fail in vring_get_region_caches() when this is not true. Device fuzzing found a case where this assumption is false (see below). Virtqueue guest RAM addresses can also be changed from a vCPU thread while an IOThread is accessing the virtqueue. This breaks the same assumption but this time the caches could become invalid partway through the virtqueue code. The code fetches the caches RCU pointer multiple times so we will need to validate the pointer every time it is fetched. Add checks each time we call vring_get_region_caches() and treat invalid caches as a nop: memory stores are ignored and memory reads return 0. The fuzz test failure is as follows: $ qemu -M pc -device virtio-blk-pci,id=drv0,drive=drive0,addr=4.0 \ -drive if=none,id=drive0,file=null-co://,format=raw,auto-read-only=off \ -drive if=none,id=drive1,file=null-co://,file.read-zeroes=on,format=raw \ -display none \ -qtest stdio endianness outl 0xcf8 0x80002020 outl 0xcfc 0xe0000000 outl 0xcf8 0x80002004 outw 0xcfc 0x7 write 0xe0000000 0x24 0x00ffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffab5cffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffab0000000001 inb 0x4 writew 0xe000001c 0x1 write 0xe0000014 0x1 0x0d The following error message is produced: qemu-system-x86_64: /home/stefanha/qemu/hw/virtio/virtio.c:286: vring_get_region_caches: Assertion `caches != NULL' failed. The backtrace looks like this: #0 0x00007ffff5520625 in raise () at /lib64/libc.so.6 #1 0x00007ffff55098d9 in abort () at /lib64/libc.so.6 #2 0x00007ffff55097a9 in _nl_load_domain.cold () at /lib64/libc.so.6 #3 0x00007ffff5518a66 in annobin_assert.c_end () at /lib64/libc.so.6 #4 0x00005555559073da in vring_get_region_caches (vq=<optimized out>) at qemu/hw/virtio/virtio.c:286 #5 vring_get_region_caches (vq=<optimized out>) at qemu/hw/virtio/virtio.c:283 #6 0x000055555590818d in vring_used_flags_set_bit (mask=1, vq=0x5555575ceea0) at qemu/hw/virtio/virtio.c:398 #7 virtio_queue_split_set_notification (enable=0, vq=0x5555575ceea0) at qemu/hw/virtio/virtio.c:398 #8 virtio_queue_set_notification (vq=vq@entry=0x5555575ceea0, enable=enable@entry=0) at qemu/hw/virtio/virtio.c:451 #9 0x0000555555908512 in virtio_queue_set_notification (vq=vq@entry=0x5555575ceea0, enable=enable@entry=0) at qemu/hw/virtio/virtio.c:444 #10 0x00005555558c697a in virtio_blk_handle_vq (s=0x5555575c57e0, vq=0x5555575ceea0) at qemu/hw/block/virtio-blk.c:775 #11 0x0000555555907836 in virtio_queue_notify_aio_vq (vq=0x5555575ceea0) at qemu/hw/virtio/virtio.c:2244 #12 0x0000555555cb5dd7 in aio_dispatch_handlers (ctx=ctx@entry=0x55555671a420) at util/aio-posix.c:429 #13 0x0000555555cb67a8 in aio_dispatch (ctx=0x55555671a420) at util/aio-posix.c:460 #14 0x0000555555cb307e in aio_ctx_dispatch (source=<optimized out>, callback=<optimized out>, user_data=<optimized out>) at util/async.c:260 #15 0x00007ffff7bbc510 in g_main_context_dispatch () at /lib64/libglib-2.0.so.0 #16 0x0000555555cb5848 in glib_pollfds_poll () at util/main-loop.c:219 #17 os_host_main_loop_wait (timeout=<optimized out>) at util/main-loop.c:242 #18 main_loop_wait (nonblocking=<optimized out>) at util/main-loop.c:518 #19 0x00005555559b20c9 in main_loop () at vl.c:1683 #20 0x0000555555838115 in main (argc=<optimized out>, argv=<optimized out>, envp=<optimized out>) at vl.c:4441 Reported-by: Alexander Bulekov <alxndr@bu.edu> Cc: Michael Tsirkin <mst@redhat.com> Cc: Cornelia Huck <cohuck@redhat.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: qemu-stable@nongnu.org Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Message-Id: <20200207104619.164892-1-stefanha@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2020-02-07 13:46:19 +03:00
if (!caches) {
return 0;
}
return virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
}
/* Called within rcu_read_lock(). */
static inline uint16_t vring_get_used_event(VirtQueue *vq)
{
return vring_avail_ring(vq, vq->vring.num);
}
/* Called within rcu_read_lock(). */
static inline void vring_used_write(VirtQueue *vq, VRingUsedElem *uelem,
int i)
{
VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
hwaddr pa = offsetof(VRingUsed, ring[i]);
virtio: gracefully handle invalid region caches The virtqueue code sets up MemoryRegionCaches to access the virtqueue guest RAM data structures. The code currently assumes that VRingMemoryRegionCaches is initialized before device emulation code accesses the virtqueue. An assertion will fail in vring_get_region_caches() when this is not true. Device fuzzing found a case where this assumption is false (see below). Virtqueue guest RAM addresses can also be changed from a vCPU thread while an IOThread is accessing the virtqueue. This breaks the same assumption but this time the caches could become invalid partway through the virtqueue code. The code fetches the caches RCU pointer multiple times so we will need to validate the pointer every time it is fetched. Add checks each time we call vring_get_region_caches() and treat invalid caches as a nop: memory stores are ignored and memory reads return 0. The fuzz test failure is as follows: $ qemu -M pc -device virtio-blk-pci,id=drv0,drive=drive0,addr=4.0 \ -drive if=none,id=drive0,file=null-co://,format=raw,auto-read-only=off \ -drive if=none,id=drive1,file=null-co://,file.read-zeroes=on,format=raw \ -display none \ -qtest stdio endianness outl 0xcf8 0x80002020 outl 0xcfc 0xe0000000 outl 0xcf8 0x80002004 outw 0xcfc 0x7 write 0xe0000000 0x24 0x00ffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffab5cffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffab0000000001 inb 0x4 writew 0xe000001c 0x1 write 0xe0000014 0x1 0x0d The following error message is produced: qemu-system-x86_64: /home/stefanha/qemu/hw/virtio/virtio.c:286: vring_get_region_caches: Assertion `caches != NULL' failed. The backtrace looks like this: #0 0x00007ffff5520625 in raise () at /lib64/libc.so.6 #1 0x00007ffff55098d9 in abort () at /lib64/libc.so.6 #2 0x00007ffff55097a9 in _nl_load_domain.cold () at /lib64/libc.so.6 #3 0x00007ffff5518a66 in annobin_assert.c_end () at /lib64/libc.so.6 #4 0x00005555559073da in vring_get_region_caches (vq=<optimized out>) at qemu/hw/virtio/virtio.c:286 #5 vring_get_region_caches (vq=<optimized out>) at qemu/hw/virtio/virtio.c:283 #6 0x000055555590818d in vring_used_flags_set_bit (mask=1, vq=0x5555575ceea0) at qemu/hw/virtio/virtio.c:398 #7 virtio_queue_split_set_notification (enable=0, vq=0x5555575ceea0) at qemu/hw/virtio/virtio.c:398 #8 virtio_queue_set_notification (vq=vq@entry=0x5555575ceea0, enable=enable@entry=0) at qemu/hw/virtio/virtio.c:451 #9 0x0000555555908512 in virtio_queue_set_notification (vq=vq@entry=0x5555575ceea0, enable=enable@entry=0) at qemu/hw/virtio/virtio.c:444 #10 0x00005555558c697a in virtio_blk_handle_vq (s=0x5555575c57e0, vq=0x5555575ceea0) at qemu/hw/block/virtio-blk.c:775 #11 0x0000555555907836 in virtio_queue_notify_aio_vq (vq=0x5555575ceea0) at qemu/hw/virtio/virtio.c:2244 #12 0x0000555555cb5dd7 in aio_dispatch_handlers (ctx=ctx@entry=0x55555671a420) at util/aio-posix.c:429 #13 0x0000555555cb67a8 in aio_dispatch (ctx=0x55555671a420) at util/aio-posix.c:460 #14 0x0000555555cb307e in aio_ctx_dispatch (source=<optimized out>, callback=<optimized out>, user_data=<optimized out>) at util/async.c:260 #15 0x00007ffff7bbc510 in g_main_context_dispatch () at /lib64/libglib-2.0.so.0 #16 0x0000555555cb5848 in glib_pollfds_poll () at util/main-loop.c:219 #17 os_host_main_loop_wait (timeout=<optimized out>) at util/main-loop.c:242 #18 main_loop_wait (nonblocking=<optimized out>) at util/main-loop.c:518 #19 0x00005555559b20c9 in main_loop () at vl.c:1683 #20 0x0000555555838115 in main (argc=<optimized out>, argv=<optimized out>, envp=<optimized out>) at vl.c:4441 Reported-by: Alexander Bulekov <alxndr@bu.edu> Cc: Michael Tsirkin <mst@redhat.com> Cc: Cornelia Huck <cohuck@redhat.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: qemu-stable@nongnu.org Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Message-Id: <20200207104619.164892-1-stefanha@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2020-02-07 13:46:19 +03:00
if (!caches) {
return;
}
virtio_tswap32s(vq->vdev, &uelem->id);
virtio_tswap32s(vq->vdev, &uelem->len);
address_space_write_cached(&caches->used, pa, uelem, sizeof(VRingUsedElem));
address_space_cache_invalidate(&caches->used, pa, sizeof(VRingUsedElem));
}
/* Called within rcu_read_lock(). */
static inline uint16_t vring_used_flags(VirtQueue *vq)
{
VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
hwaddr pa = offsetof(VRingUsed, flags);
if (!caches) {
return 0;
}
return virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
}
/* Called within rcu_read_lock(). */
static uint16_t vring_used_idx(VirtQueue *vq)
{
VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
hwaddr pa = offsetof(VRingUsed, idx);
virtio: gracefully handle invalid region caches The virtqueue code sets up MemoryRegionCaches to access the virtqueue guest RAM data structures. The code currently assumes that VRingMemoryRegionCaches is initialized before device emulation code accesses the virtqueue. An assertion will fail in vring_get_region_caches() when this is not true. Device fuzzing found a case where this assumption is false (see below). Virtqueue guest RAM addresses can also be changed from a vCPU thread while an IOThread is accessing the virtqueue. This breaks the same assumption but this time the caches could become invalid partway through the virtqueue code. The code fetches the caches RCU pointer multiple times so we will need to validate the pointer every time it is fetched. Add checks each time we call vring_get_region_caches() and treat invalid caches as a nop: memory stores are ignored and memory reads return 0. The fuzz test failure is as follows: $ qemu -M pc -device virtio-blk-pci,id=drv0,drive=drive0,addr=4.0 \ -drive if=none,id=drive0,file=null-co://,format=raw,auto-read-only=off \ -drive if=none,id=drive1,file=null-co://,file.read-zeroes=on,format=raw \ -display none \ -qtest stdio endianness outl 0xcf8 0x80002020 outl 0xcfc 0xe0000000 outl 0xcf8 0x80002004 outw 0xcfc 0x7 write 0xe0000000 0x24 0x00ffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffab5cffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffab0000000001 inb 0x4 writew 0xe000001c 0x1 write 0xe0000014 0x1 0x0d The following error message is produced: qemu-system-x86_64: /home/stefanha/qemu/hw/virtio/virtio.c:286: vring_get_region_caches: Assertion `caches != NULL' failed. The backtrace looks like this: #0 0x00007ffff5520625 in raise () at /lib64/libc.so.6 #1 0x00007ffff55098d9 in abort () at /lib64/libc.so.6 #2 0x00007ffff55097a9 in _nl_load_domain.cold () at /lib64/libc.so.6 #3 0x00007ffff5518a66 in annobin_assert.c_end () at /lib64/libc.so.6 #4 0x00005555559073da in vring_get_region_caches (vq=<optimized out>) at qemu/hw/virtio/virtio.c:286 #5 vring_get_region_caches (vq=<optimized out>) at qemu/hw/virtio/virtio.c:283 #6 0x000055555590818d in vring_used_flags_set_bit (mask=1, vq=0x5555575ceea0) at qemu/hw/virtio/virtio.c:398 #7 virtio_queue_split_set_notification (enable=0, vq=0x5555575ceea0) at qemu/hw/virtio/virtio.c:398 #8 virtio_queue_set_notification (vq=vq@entry=0x5555575ceea0, enable=enable@entry=0) at qemu/hw/virtio/virtio.c:451 #9 0x0000555555908512 in virtio_queue_set_notification (vq=vq@entry=0x5555575ceea0, enable=enable@entry=0) at qemu/hw/virtio/virtio.c:444 #10 0x00005555558c697a in virtio_blk_handle_vq (s=0x5555575c57e0, vq=0x5555575ceea0) at qemu/hw/block/virtio-blk.c:775 #11 0x0000555555907836 in virtio_queue_notify_aio_vq (vq=0x5555575ceea0) at qemu/hw/virtio/virtio.c:2244 #12 0x0000555555cb5dd7 in aio_dispatch_handlers (ctx=ctx@entry=0x55555671a420) at util/aio-posix.c:429 #13 0x0000555555cb67a8 in aio_dispatch (ctx=0x55555671a420) at util/aio-posix.c:460 #14 0x0000555555cb307e in aio_ctx_dispatch (source=<optimized out>, callback=<optimized out>, user_data=<optimized out>) at util/async.c:260 #15 0x00007ffff7bbc510 in g_main_context_dispatch () at /lib64/libglib-2.0.so.0 #16 0x0000555555cb5848 in glib_pollfds_poll () at util/main-loop.c:219 #17 os_host_main_loop_wait (timeout=<optimized out>) at util/main-loop.c:242 #18 main_loop_wait (nonblocking=<optimized out>) at util/main-loop.c:518 #19 0x00005555559b20c9 in main_loop () at vl.c:1683 #20 0x0000555555838115 in main (argc=<optimized out>, argv=<optimized out>, envp=<optimized out>) at vl.c:4441 Reported-by: Alexander Bulekov <alxndr@bu.edu> Cc: Michael Tsirkin <mst@redhat.com> Cc: Cornelia Huck <cohuck@redhat.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: qemu-stable@nongnu.org Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Message-Id: <20200207104619.164892-1-stefanha@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2020-02-07 13:46:19 +03:00
if (!caches) {
return 0;
}
return virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
}
/* Called within rcu_read_lock(). */
static inline void vring_used_idx_set(VirtQueue *vq, uint16_t val)
{
VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
hwaddr pa = offsetof(VRingUsed, idx);
virtio: gracefully handle invalid region caches The virtqueue code sets up MemoryRegionCaches to access the virtqueue guest RAM data structures. The code currently assumes that VRingMemoryRegionCaches is initialized before device emulation code accesses the virtqueue. An assertion will fail in vring_get_region_caches() when this is not true. Device fuzzing found a case where this assumption is false (see below). Virtqueue guest RAM addresses can also be changed from a vCPU thread while an IOThread is accessing the virtqueue. This breaks the same assumption but this time the caches could become invalid partway through the virtqueue code. The code fetches the caches RCU pointer multiple times so we will need to validate the pointer every time it is fetched. Add checks each time we call vring_get_region_caches() and treat invalid caches as a nop: memory stores are ignored and memory reads return 0. The fuzz test failure is as follows: $ qemu -M pc -device virtio-blk-pci,id=drv0,drive=drive0,addr=4.0 \ -drive if=none,id=drive0,file=null-co://,format=raw,auto-read-only=off \ -drive if=none,id=drive1,file=null-co://,file.read-zeroes=on,format=raw \ -display none \ -qtest stdio endianness outl 0xcf8 0x80002020 outl 0xcfc 0xe0000000 outl 0xcf8 0x80002004 outw 0xcfc 0x7 write 0xe0000000 0x24 0x00ffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffab5cffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffab0000000001 inb 0x4 writew 0xe000001c 0x1 write 0xe0000014 0x1 0x0d The following error message is produced: qemu-system-x86_64: /home/stefanha/qemu/hw/virtio/virtio.c:286: vring_get_region_caches: Assertion `caches != NULL' failed. The backtrace looks like this: #0 0x00007ffff5520625 in raise () at /lib64/libc.so.6 #1 0x00007ffff55098d9 in abort () at /lib64/libc.so.6 #2 0x00007ffff55097a9 in _nl_load_domain.cold () at /lib64/libc.so.6 #3 0x00007ffff5518a66 in annobin_assert.c_end () at /lib64/libc.so.6 #4 0x00005555559073da in vring_get_region_caches (vq=<optimized out>) at qemu/hw/virtio/virtio.c:286 #5 vring_get_region_caches (vq=<optimized out>) at qemu/hw/virtio/virtio.c:283 #6 0x000055555590818d in vring_used_flags_set_bit (mask=1, vq=0x5555575ceea0) at qemu/hw/virtio/virtio.c:398 #7 virtio_queue_split_set_notification (enable=0, vq=0x5555575ceea0) at qemu/hw/virtio/virtio.c:398 #8 virtio_queue_set_notification (vq=vq@entry=0x5555575ceea0, enable=enable@entry=0) at qemu/hw/virtio/virtio.c:451 #9 0x0000555555908512 in virtio_queue_set_notification (vq=vq@entry=0x5555575ceea0, enable=enable@entry=0) at qemu/hw/virtio/virtio.c:444 #10 0x00005555558c697a in virtio_blk_handle_vq (s=0x5555575c57e0, vq=0x5555575ceea0) at qemu/hw/block/virtio-blk.c:775 #11 0x0000555555907836 in virtio_queue_notify_aio_vq (vq=0x5555575ceea0) at qemu/hw/virtio/virtio.c:2244 #12 0x0000555555cb5dd7 in aio_dispatch_handlers (ctx=ctx@entry=0x55555671a420) at util/aio-posix.c:429 #13 0x0000555555cb67a8 in aio_dispatch (ctx=0x55555671a420) at util/aio-posix.c:460 #14 0x0000555555cb307e in aio_ctx_dispatch (source=<optimized out>, callback=<optimized out>, user_data=<optimized out>) at util/async.c:260 #15 0x00007ffff7bbc510 in g_main_context_dispatch () at /lib64/libglib-2.0.so.0 #16 0x0000555555cb5848 in glib_pollfds_poll () at util/main-loop.c:219 #17 os_host_main_loop_wait (timeout=<optimized out>) at util/main-loop.c:242 #18 main_loop_wait (nonblocking=<optimized out>) at util/main-loop.c:518 #19 0x00005555559b20c9 in main_loop () at vl.c:1683 #20 0x0000555555838115 in main (argc=<optimized out>, argv=<optimized out>, envp=<optimized out>) at vl.c:4441 Reported-by: Alexander Bulekov <alxndr@bu.edu> Cc: Michael Tsirkin <mst@redhat.com> Cc: Cornelia Huck <cohuck@redhat.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: qemu-stable@nongnu.org Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Message-Id: <20200207104619.164892-1-stefanha@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2020-02-07 13:46:19 +03:00
if (caches) {
virtio_stw_phys_cached(vq->vdev, &caches->used, pa, val);
address_space_cache_invalidate(&caches->used, pa, sizeof(val));
}
vq->used_idx = val;
}
/* Called within rcu_read_lock(). */
static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask)
{
VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
VirtIODevice *vdev = vq->vdev;
hwaddr pa = offsetof(VRingUsed, flags);
virtio: gracefully handle invalid region caches The virtqueue code sets up MemoryRegionCaches to access the virtqueue guest RAM data structures. The code currently assumes that VRingMemoryRegionCaches is initialized before device emulation code accesses the virtqueue. An assertion will fail in vring_get_region_caches() when this is not true. Device fuzzing found a case where this assumption is false (see below). Virtqueue guest RAM addresses can also be changed from a vCPU thread while an IOThread is accessing the virtqueue. This breaks the same assumption but this time the caches could become invalid partway through the virtqueue code. The code fetches the caches RCU pointer multiple times so we will need to validate the pointer every time it is fetched. Add checks each time we call vring_get_region_caches() and treat invalid caches as a nop: memory stores are ignored and memory reads return 0. The fuzz test failure is as follows: $ qemu -M pc -device virtio-blk-pci,id=drv0,drive=drive0,addr=4.0 \ -drive if=none,id=drive0,file=null-co://,format=raw,auto-read-only=off \ -drive if=none,id=drive1,file=null-co://,file.read-zeroes=on,format=raw \ -display none \ -qtest stdio endianness outl 0xcf8 0x80002020 outl 0xcfc 0xe0000000 outl 0xcf8 0x80002004 outw 0xcfc 0x7 write 0xe0000000 0x24 0x00ffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffab5cffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffab0000000001 inb 0x4 writew 0xe000001c 0x1 write 0xe0000014 0x1 0x0d The following error message is produced: qemu-system-x86_64: /home/stefanha/qemu/hw/virtio/virtio.c:286: vring_get_region_caches: Assertion `caches != NULL' failed. The backtrace looks like this: #0 0x00007ffff5520625 in raise () at /lib64/libc.so.6 #1 0x00007ffff55098d9 in abort () at /lib64/libc.so.6 #2 0x00007ffff55097a9 in _nl_load_domain.cold () at /lib64/libc.so.6 #3 0x00007ffff5518a66 in annobin_assert.c_end () at /lib64/libc.so.6 #4 0x00005555559073da in vring_get_region_caches (vq=<optimized out>) at qemu/hw/virtio/virtio.c:286 #5 vring_get_region_caches (vq=<optimized out>) at qemu/hw/virtio/virtio.c:283 #6 0x000055555590818d in vring_used_flags_set_bit (mask=1, vq=0x5555575ceea0) at qemu/hw/virtio/virtio.c:398 #7 virtio_queue_split_set_notification (enable=0, vq=0x5555575ceea0) at qemu/hw/virtio/virtio.c:398 #8 virtio_queue_set_notification (vq=vq@entry=0x5555575ceea0, enable=enable@entry=0) at qemu/hw/virtio/virtio.c:451 #9 0x0000555555908512 in virtio_queue_set_notification (vq=vq@entry=0x5555575ceea0, enable=enable@entry=0) at qemu/hw/virtio/virtio.c:444 #10 0x00005555558c697a in virtio_blk_handle_vq (s=0x5555575c57e0, vq=0x5555575ceea0) at qemu/hw/block/virtio-blk.c:775 #11 0x0000555555907836 in virtio_queue_notify_aio_vq (vq=0x5555575ceea0) at qemu/hw/virtio/virtio.c:2244 #12 0x0000555555cb5dd7 in aio_dispatch_handlers (ctx=ctx@entry=0x55555671a420) at util/aio-posix.c:429 #13 0x0000555555cb67a8 in aio_dispatch (ctx=0x55555671a420) at util/aio-posix.c:460 #14 0x0000555555cb307e in aio_ctx_dispatch (source=<optimized out>, callback=<optimized out>, user_data=<optimized out>) at util/async.c:260 #15 0x00007ffff7bbc510 in g_main_context_dispatch () at /lib64/libglib-2.0.so.0 #16 0x0000555555cb5848 in glib_pollfds_poll () at util/main-loop.c:219 #17 os_host_main_loop_wait (timeout=<optimized out>) at util/main-loop.c:242 #18 main_loop_wait (nonblocking=<optimized out>) at util/main-loop.c:518 #19 0x00005555559b20c9 in main_loop () at vl.c:1683 #20 0x0000555555838115 in main (argc=<optimized out>, argv=<optimized out>, envp=<optimized out>) at vl.c:4441 Reported-by: Alexander Bulekov <alxndr@bu.edu> Cc: Michael Tsirkin <mst@redhat.com> Cc: Cornelia Huck <cohuck@redhat.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: qemu-stable@nongnu.org Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Message-Id: <20200207104619.164892-1-stefanha@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2020-02-07 13:46:19 +03:00
uint16_t flags;
virtio: gracefully handle invalid region caches The virtqueue code sets up MemoryRegionCaches to access the virtqueue guest RAM data structures. The code currently assumes that VRingMemoryRegionCaches is initialized before device emulation code accesses the virtqueue. An assertion will fail in vring_get_region_caches() when this is not true. Device fuzzing found a case where this assumption is false (see below). Virtqueue guest RAM addresses can also be changed from a vCPU thread while an IOThread is accessing the virtqueue. This breaks the same assumption but this time the caches could become invalid partway through the virtqueue code. The code fetches the caches RCU pointer multiple times so we will need to validate the pointer every time it is fetched. Add checks each time we call vring_get_region_caches() and treat invalid caches as a nop: memory stores are ignored and memory reads return 0. The fuzz test failure is as follows: $ qemu -M pc -device virtio-blk-pci,id=drv0,drive=drive0,addr=4.0 \ -drive if=none,id=drive0,file=null-co://,format=raw,auto-read-only=off \ -drive if=none,id=drive1,file=null-co://,file.read-zeroes=on,format=raw \ -display none \ -qtest stdio endianness outl 0xcf8 0x80002020 outl 0xcfc 0xe0000000 outl 0xcf8 0x80002004 outw 0xcfc 0x7 write 0xe0000000 0x24 0x00ffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffab5cffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffab0000000001 inb 0x4 writew 0xe000001c 0x1 write 0xe0000014 0x1 0x0d The following error message is produced: qemu-system-x86_64: /home/stefanha/qemu/hw/virtio/virtio.c:286: vring_get_region_caches: Assertion `caches != NULL' failed. The backtrace looks like this: #0 0x00007ffff5520625 in raise () at /lib64/libc.so.6 #1 0x00007ffff55098d9 in abort () at /lib64/libc.so.6 #2 0x00007ffff55097a9 in _nl_load_domain.cold () at /lib64/libc.so.6 #3 0x00007ffff5518a66 in annobin_assert.c_end () at /lib64/libc.so.6 #4 0x00005555559073da in vring_get_region_caches (vq=<optimized out>) at qemu/hw/virtio/virtio.c:286 #5 vring_get_region_caches (vq=<optimized out>) at qemu/hw/virtio/virtio.c:283 #6 0x000055555590818d in vring_used_flags_set_bit (mask=1, vq=0x5555575ceea0) at qemu/hw/virtio/virtio.c:398 #7 virtio_queue_split_set_notification (enable=0, vq=0x5555575ceea0) at qemu/hw/virtio/virtio.c:398 #8 virtio_queue_set_notification (vq=vq@entry=0x5555575ceea0, enable=enable@entry=0) at qemu/hw/virtio/virtio.c:451 #9 0x0000555555908512 in virtio_queue_set_notification (vq=vq@entry=0x5555575ceea0, enable=enable@entry=0) at qemu/hw/virtio/virtio.c:444 #10 0x00005555558c697a in virtio_blk_handle_vq (s=0x5555575c57e0, vq=0x5555575ceea0) at qemu/hw/block/virtio-blk.c:775 #11 0x0000555555907836 in virtio_queue_notify_aio_vq (vq=0x5555575ceea0) at qemu/hw/virtio/virtio.c:2244 #12 0x0000555555cb5dd7 in aio_dispatch_handlers (ctx=ctx@entry=0x55555671a420) at util/aio-posix.c:429 #13 0x0000555555cb67a8 in aio_dispatch (ctx=0x55555671a420) at util/aio-posix.c:460 #14 0x0000555555cb307e in aio_ctx_dispatch (source=<optimized out>, callback=<optimized out>, user_data=<optimized out>) at util/async.c:260 #15 0x00007ffff7bbc510 in g_main_context_dispatch () at /lib64/libglib-2.0.so.0 #16 0x0000555555cb5848 in glib_pollfds_poll () at util/main-loop.c:219 #17 os_host_main_loop_wait (timeout=<optimized out>) at util/main-loop.c:242 #18 main_loop_wait (nonblocking=<optimized out>) at util/main-loop.c:518 #19 0x00005555559b20c9 in main_loop () at vl.c:1683 #20 0x0000555555838115 in main (argc=<optimized out>, argv=<optimized out>, envp=<optimized out>) at vl.c:4441 Reported-by: Alexander Bulekov <alxndr@bu.edu> Cc: Michael Tsirkin <mst@redhat.com> Cc: Cornelia Huck <cohuck@redhat.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: qemu-stable@nongnu.org Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Message-Id: <20200207104619.164892-1-stefanha@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2020-02-07 13:46:19 +03:00
if (!caches) {
return;
}
flags = virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
virtio_stw_phys_cached(vdev, &caches->used, pa, flags | mask);
address_space_cache_invalidate(&caches->used, pa, sizeof(flags));
}
/* Called within rcu_read_lock(). */
static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask)
{
VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
VirtIODevice *vdev = vq->vdev;
hwaddr pa = offsetof(VRingUsed, flags);
virtio: gracefully handle invalid region caches The virtqueue code sets up MemoryRegionCaches to access the virtqueue guest RAM data structures. The code currently assumes that VRingMemoryRegionCaches is initialized before device emulation code accesses the virtqueue. An assertion will fail in vring_get_region_caches() when this is not true. Device fuzzing found a case where this assumption is false (see below). Virtqueue guest RAM addresses can also be changed from a vCPU thread while an IOThread is accessing the virtqueue. This breaks the same assumption but this time the caches could become invalid partway through the virtqueue code. The code fetches the caches RCU pointer multiple times so we will need to validate the pointer every time it is fetched. Add checks each time we call vring_get_region_caches() and treat invalid caches as a nop: memory stores are ignored and memory reads return 0. The fuzz test failure is as follows: $ qemu -M pc -device virtio-blk-pci,id=drv0,drive=drive0,addr=4.0 \ -drive if=none,id=drive0,file=null-co://,format=raw,auto-read-only=off \ -drive if=none,id=drive1,file=null-co://,file.read-zeroes=on,format=raw \ -display none \ -qtest stdio endianness outl 0xcf8 0x80002020 outl 0xcfc 0xe0000000 outl 0xcf8 0x80002004 outw 0xcfc 0x7 write 0xe0000000 0x24 0x00ffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffab5cffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffab0000000001 inb 0x4 writew 0xe000001c 0x1 write 0xe0000014 0x1 0x0d The following error message is produced: qemu-system-x86_64: /home/stefanha/qemu/hw/virtio/virtio.c:286: vring_get_region_caches: Assertion `caches != NULL' failed. The backtrace looks like this: #0 0x00007ffff5520625 in raise () at /lib64/libc.so.6 #1 0x00007ffff55098d9 in abort () at /lib64/libc.so.6 #2 0x00007ffff55097a9 in _nl_load_domain.cold () at /lib64/libc.so.6 #3 0x00007ffff5518a66 in annobin_assert.c_end () at /lib64/libc.so.6 #4 0x00005555559073da in vring_get_region_caches (vq=<optimized out>) at qemu/hw/virtio/virtio.c:286 #5 vring_get_region_caches (vq=<optimized out>) at qemu/hw/virtio/virtio.c:283 #6 0x000055555590818d in vring_used_flags_set_bit (mask=1, vq=0x5555575ceea0) at qemu/hw/virtio/virtio.c:398 #7 virtio_queue_split_set_notification (enable=0, vq=0x5555575ceea0) at qemu/hw/virtio/virtio.c:398 #8 virtio_queue_set_notification (vq=vq@entry=0x5555575ceea0, enable=enable@entry=0) at qemu/hw/virtio/virtio.c:451 #9 0x0000555555908512 in virtio_queue_set_notification (vq=vq@entry=0x5555575ceea0, enable=enable@entry=0) at qemu/hw/virtio/virtio.c:444 #10 0x00005555558c697a in virtio_blk_handle_vq (s=0x5555575c57e0, vq=0x5555575ceea0) at qemu/hw/block/virtio-blk.c:775 #11 0x0000555555907836 in virtio_queue_notify_aio_vq (vq=0x5555575ceea0) at qemu/hw/virtio/virtio.c:2244 #12 0x0000555555cb5dd7 in aio_dispatch_handlers (ctx=ctx@entry=0x55555671a420) at util/aio-posix.c:429 #13 0x0000555555cb67a8 in aio_dispatch (ctx=0x55555671a420) at util/aio-posix.c:460 #14 0x0000555555cb307e in aio_ctx_dispatch (source=<optimized out>, callback=<optimized out>, user_data=<optimized out>) at util/async.c:260 #15 0x00007ffff7bbc510 in g_main_context_dispatch () at /lib64/libglib-2.0.so.0 #16 0x0000555555cb5848 in glib_pollfds_poll () at util/main-loop.c:219 #17 os_host_main_loop_wait (timeout=<optimized out>) at util/main-loop.c:242 #18 main_loop_wait (nonblocking=<optimized out>) at util/main-loop.c:518 #19 0x00005555559b20c9 in main_loop () at vl.c:1683 #20 0x0000555555838115 in main (argc=<optimized out>, argv=<optimized out>, envp=<optimized out>) at vl.c:4441 Reported-by: Alexander Bulekov <alxndr@bu.edu> Cc: Michael Tsirkin <mst@redhat.com> Cc: Cornelia Huck <cohuck@redhat.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: qemu-stable@nongnu.org Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Message-Id: <20200207104619.164892-1-stefanha@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2020-02-07 13:46:19 +03:00
uint16_t flags;
virtio: gracefully handle invalid region caches The virtqueue code sets up MemoryRegionCaches to access the virtqueue guest RAM data structures. The code currently assumes that VRingMemoryRegionCaches is initialized before device emulation code accesses the virtqueue. An assertion will fail in vring_get_region_caches() when this is not true. Device fuzzing found a case where this assumption is false (see below). Virtqueue guest RAM addresses can also be changed from a vCPU thread while an IOThread is accessing the virtqueue. This breaks the same assumption but this time the caches could become invalid partway through the virtqueue code. The code fetches the caches RCU pointer multiple times so we will need to validate the pointer every time it is fetched. Add checks each time we call vring_get_region_caches() and treat invalid caches as a nop: memory stores are ignored and memory reads return 0. The fuzz test failure is as follows: $ qemu -M pc -device virtio-blk-pci,id=drv0,drive=drive0,addr=4.0 \ -drive if=none,id=drive0,file=null-co://,format=raw,auto-read-only=off \ -drive if=none,id=drive1,file=null-co://,file.read-zeroes=on,format=raw \ -display none \ -qtest stdio endianness outl 0xcf8 0x80002020 outl 0xcfc 0xe0000000 outl 0xcf8 0x80002004 outw 0xcfc 0x7 write 0xe0000000 0x24 0x00ffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffab5cffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffab0000000001 inb 0x4 writew 0xe000001c 0x1 write 0xe0000014 0x1 0x0d The following error message is produced: qemu-system-x86_64: /home/stefanha/qemu/hw/virtio/virtio.c:286: vring_get_region_caches: Assertion `caches != NULL' failed. The backtrace looks like this: #0 0x00007ffff5520625 in raise () at /lib64/libc.so.6 #1 0x00007ffff55098d9 in abort () at /lib64/libc.so.6 #2 0x00007ffff55097a9 in _nl_load_domain.cold () at /lib64/libc.so.6 #3 0x00007ffff5518a66 in annobin_assert.c_end () at /lib64/libc.so.6 #4 0x00005555559073da in vring_get_region_caches (vq=<optimized out>) at qemu/hw/virtio/virtio.c:286 #5 vring_get_region_caches (vq=<optimized out>) at qemu/hw/virtio/virtio.c:283 #6 0x000055555590818d in vring_used_flags_set_bit (mask=1, vq=0x5555575ceea0) at qemu/hw/virtio/virtio.c:398 #7 virtio_queue_split_set_notification (enable=0, vq=0x5555575ceea0) at qemu/hw/virtio/virtio.c:398 #8 virtio_queue_set_notification (vq=vq@entry=0x5555575ceea0, enable=enable@entry=0) at qemu/hw/virtio/virtio.c:451 #9 0x0000555555908512 in virtio_queue_set_notification (vq=vq@entry=0x5555575ceea0, enable=enable@entry=0) at qemu/hw/virtio/virtio.c:444 #10 0x00005555558c697a in virtio_blk_handle_vq (s=0x5555575c57e0, vq=0x5555575ceea0) at qemu/hw/block/virtio-blk.c:775 #11 0x0000555555907836 in virtio_queue_notify_aio_vq (vq=0x5555575ceea0) at qemu/hw/virtio/virtio.c:2244 #12 0x0000555555cb5dd7 in aio_dispatch_handlers (ctx=ctx@entry=0x55555671a420) at util/aio-posix.c:429 #13 0x0000555555cb67a8 in aio_dispatch (ctx=0x55555671a420) at util/aio-posix.c:460 #14 0x0000555555cb307e in aio_ctx_dispatch (source=<optimized out>, callback=<optimized out>, user_data=<optimized out>) at util/async.c:260 #15 0x00007ffff7bbc510 in g_main_context_dispatch () at /lib64/libglib-2.0.so.0 #16 0x0000555555cb5848 in glib_pollfds_poll () at util/main-loop.c:219 #17 os_host_main_loop_wait (timeout=<optimized out>) at util/main-loop.c:242 #18 main_loop_wait (nonblocking=<optimized out>) at util/main-loop.c:518 #19 0x00005555559b20c9 in main_loop () at vl.c:1683 #20 0x0000555555838115 in main (argc=<optimized out>, argv=<optimized out>, envp=<optimized out>) at vl.c:4441 Reported-by: Alexander Bulekov <alxndr@bu.edu> Cc: Michael Tsirkin <mst@redhat.com> Cc: Cornelia Huck <cohuck@redhat.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: qemu-stable@nongnu.org Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Message-Id: <20200207104619.164892-1-stefanha@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2020-02-07 13:46:19 +03:00
if (!caches) {
return;
}
flags = virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
virtio_stw_phys_cached(vdev, &caches->used, pa, flags & ~mask);
address_space_cache_invalidate(&caches->used, pa, sizeof(flags));
}
/* Called within rcu_read_lock(). */
static inline void vring_set_avail_event(VirtQueue *vq, uint16_t val)
{
VRingMemoryRegionCaches *caches;
hwaddr pa;
Revert "virtio: turn vq->notification into a nested counter" This reverts commit aff8fd18f1786fc5af259a9bc0077727222f51ca. Both virtio-net and virtio-crypto do not balance virtio_queue_set_notification() enable and disable calls. This makes the notifications_disabled counter unreliable and Doug Goldstein reported the following assertion failure: #3 0x00007ffff44d1c62 in __GI___assert_fail ( assertion=assertion@entry=0x555555ae8e8a "vq->notification_disabled > 0", file=file@entry=0x555555ae89c0 "/home/doug/work/qemu/hw/virtio/virtio.c", line=line@entry=215, function=function@entry=0x555555ae9630 <__PRETTY_FUNCTION__.43707> "virtio_queue_set_notification") at assert.c:101 #4 0x00005555557f25d6 in virtio_queue_set_notification (vq=0x55555666aa90, enable=enable@entry=1) at /home/doug/work/qemu/hw/virtio/virtio.c:215 #5 0x00005555557dc311 in virtio_net_has_buffers (q=<optimized out>, q=<optimized out>, bufsize=102) at /home/doug/work/qemu/hw/net/virtio-net.c:1008 #6 virtio_net_receive (nc=<optimized out>, buf=0x555557386b88 "", size=102) at /home/doug/work/qemu/hw/net/virtio-net.c:1148 #7 0x00005555559cad33 in nc_sendv_compat (flags=<optimized out>, iovcnt=1, iov=0x7fffead746d0, nc=0x55555788b340) at net/net.c:705 #8 qemu_deliver_packet_iov (sender=<optimized out>, flags=<optimized out>, iov=0x7fffead746d0, iovcnt=1, opaque=0x55555788b340) at net/net.c:732 #9 0x00005555559cd929 in qemu_net_queue_deliver (size=<optimized out>, data=<optimized out>, flags=<optimized out>, sender=<optimized out>, queue=0x55555788b550) at net/queue.c:164 #10 qemu_net_queue_flush (queue=0x55555788b550) at net/queue.c:261 This patch is safe to revert since it's just an optimization for virtqueue polling. The next patch will improve the situation again without resorting to nesting. Reported-by: Doug Goldstein <cardoe@cardoe.com> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Tested-by: Richard Henderson <rth@twiddle.net> Tested-by: Laszlo Ersek <lersek@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2017-01-12 14:46:10 +03:00
if (!vq->notification) {
return;
}
caches = vring_get_region_caches(vq);
virtio: gracefully handle invalid region caches The virtqueue code sets up MemoryRegionCaches to access the virtqueue guest RAM data structures. The code currently assumes that VRingMemoryRegionCaches is initialized before device emulation code accesses the virtqueue. An assertion will fail in vring_get_region_caches() when this is not true. Device fuzzing found a case where this assumption is false (see below). Virtqueue guest RAM addresses can also be changed from a vCPU thread while an IOThread is accessing the virtqueue. This breaks the same assumption but this time the caches could become invalid partway through the virtqueue code. The code fetches the caches RCU pointer multiple times so we will need to validate the pointer every time it is fetched. Add checks each time we call vring_get_region_caches() and treat invalid caches as a nop: memory stores are ignored and memory reads return 0. The fuzz test failure is as follows: $ qemu -M pc -device virtio-blk-pci,id=drv0,drive=drive0,addr=4.0 \ -drive if=none,id=drive0,file=null-co://,format=raw,auto-read-only=off \ -drive if=none,id=drive1,file=null-co://,file.read-zeroes=on,format=raw \ -display none \ -qtest stdio endianness outl 0xcf8 0x80002020 outl 0xcfc 0xe0000000 outl 0xcf8 0x80002004 outw 0xcfc 0x7 write 0xe0000000 0x24 0x00ffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffab5cffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffab0000000001 inb 0x4 writew 0xe000001c 0x1 write 0xe0000014 0x1 0x0d The following error message is produced: qemu-system-x86_64: /home/stefanha/qemu/hw/virtio/virtio.c:286: vring_get_region_caches: Assertion `caches != NULL' failed. The backtrace looks like this: #0 0x00007ffff5520625 in raise () at /lib64/libc.so.6 #1 0x00007ffff55098d9 in abort () at /lib64/libc.so.6 #2 0x00007ffff55097a9 in _nl_load_domain.cold () at /lib64/libc.so.6 #3 0x00007ffff5518a66 in annobin_assert.c_end () at /lib64/libc.so.6 #4 0x00005555559073da in vring_get_region_caches (vq=<optimized out>) at qemu/hw/virtio/virtio.c:286 #5 vring_get_region_caches (vq=<optimized out>) at qemu/hw/virtio/virtio.c:283 #6 0x000055555590818d in vring_used_flags_set_bit (mask=1, vq=0x5555575ceea0) at qemu/hw/virtio/virtio.c:398 #7 virtio_queue_split_set_notification (enable=0, vq=0x5555575ceea0) at qemu/hw/virtio/virtio.c:398 #8 virtio_queue_set_notification (vq=vq@entry=0x5555575ceea0, enable=enable@entry=0) at qemu/hw/virtio/virtio.c:451 #9 0x0000555555908512 in virtio_queue_set_notification (vq=vq@entry=0x5555575ceea0, enable=enable@entry=0) at qemu/hw/virtio/virtio.c:444 #10 0x00005555558c697a in virtio_blk_handle_vq (s=0x5555575c57e0, vq=0x5555575ceea0) at qemu/hw/block/virtio-blk.c:775 #11 0x0000555555907836 in virtio_queue_notify_aio_vq (vq=0x5555575ceea0) at qemu/hw/virtio/virtio.c:2244 #12 0x0000555555cb5dd7 in aio_dispatch_handlers (ctx=ctx@entry=0x55555671a420) at util/aio-posix.c:429 #13 0x0000555555cb67a8 in aio_dispatch (ctx=0x55555671a420) at util/aio-posix.c:460 #14 0x0000555555cb307e in aio_ctx_dispatch (source=<optimized out>, callback=<optimized out>, user_data=<optimized out>) at util/async.c:260 #15 0x00007ffff7bbc510 in g_main_context_dispatch () at /lib64/libglib-2.0.so.0 #16 0x0000555555cb5848 in glib_pollfds_poll () at util/main-loop.c:219 #17 os_host_main_loop_wait (timeout=<optimized out>) at util/main-loop.c:242 #18 main_loop_wait (nonblocking=<optimized out>) at util/main-loop.c:518 #19 0x00005555559b20c9 in main_loop () at vl.c:1683 #20 0x0000555555838115 in main (argc=<optimized out>, argv=<optimized out>, envp=<optimized out>) at vl.c:4441 Reported-by: Alexander Bulekov <alxndr@bu.edu> Cc: Michael Tsirkin <mst@redhat.com> Cc: Cornelia Huck <cohuck@redhat.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: qemu-stable@nongnu.org Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Message-Id: <20200207104619.164892-1-stefanha@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2020-02-07 13:46:19 +03:00
if (!caches) {
return;
}
pa = offsetof(VRingUsed, ring[vq->vring.num]);
virtio_stw_phys_cached(vq->vdev, &caches->used, pa, val);
address_space_cache_invalidate(&caches->used, pa, sizeof(val));
}
static void virtio_queue_split_set_notification(VirtQueue *vq, int enable)
{
RCU_READ_LOCK_GUARD();
if (virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
vring_set_avail_event(vq, vring_avail_idx(vq));
} else if (enable) {
vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY);
} else {
vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY);
}
if (enable) {
/* Expose avail event/used flags before caller checks the avail idx. */
smp_mb();
}
}
static void virtio_queue_packed_set_notification(VirtQueue *vq, int enable)
{
uint16_t off_wrap;
VRingPackedDescEvent e;
VRingMemoryRegionCaches *caches;
RCU_READ_LOCK_GUARD();
virtio: gracefully handle invalid region caches The virtqueue code sets up MemoryRegionCaches to access the virtqueue guest RAM data structures. The code currently assumes that VRingMemoryRegionCaches is initialized before device emulation code accesses the virtqueue. An assertion will fail in vring_get_region_caches() when this is not true. Device fuzzing found a case where this assumption is false (see below). Virtqueue guest RAM addresses can also be changed from a vCPU thread while an IOThread is accessing the virtqueue. This breaks the same assumption but this time the caches could become invalid partway through the virtqueue code. The code fetches the caches RCU pointer multiple times so we will need to validate the pointer every time it is fetched. Add checks each time we call vring_get_region_caches() and treat invalid caches as a nop: memory stores are ignored and memory reads return 0. The fuzz test failure is as follows: $ qemu -M pc -device virtio-blk-pci,id=drv0,drive=drive0,addr=4.0 \ -drive if=none,id=drive0,file=null-co://,format=raw,auto-read-only=off \ -drive if=none,id=drive1,file=null-co://,file.read-zeroes=on,format=raw \ -display none \ -qtest stdio endianness outl 0xcf8 0x80002020 outl 0xcfc 0xe0000000 outl 0xcf8 0x80002004 outw 0xcfc 0x7 write 0xe0000000 0x24 0x00ffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffab5cffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffab0000000001 inb 0x4 writew 0xe000001c 0x1 write 0xe0000014 0x1 0x0d The following error message is produced: qemu-system-x86_64: /home/stefanha/qemu/hw/virtio/virtio.c:286: vring_get_region_caches: Assertion `caches != NULL' failed. The backtrace looks like this: #0 0x00007ffff5520625 in raise () at /lib64/libc.so.6 #1 0x00007ffff55098d9 in abort () at /lib64/libc.so.6 #2 0x00007ffff55097a9 in _nl_load_domain.cold () at /lib64/libc.so.6 #3 0x00007ffff5518a66 in annobin_assert.c_end () at /lib64/libc.so.6 #4 0x00005555559073da in vring_get_region_caches (vq=<optimized out>) at qemu/hw/virtio/virtio.c:286 #5 vring_get_region_caches (vq=<optimized out>) at qemu/hw/virtio/virtio.c:283 #6 0x000055555590818d in vring_used_flags_set_bit (mask=1, vq=0x5555575ceea0) at qemu/hw/virtio/virtio.c:398 #7 virtio_queue_split_set_notification (enable=0, vq=0x5555575ceea0) at qemu/hw/virtio/virtio.c:398 #8 virtio_queue_set_notification (vq=vq@entry=0x5555575ceea0, enable=enable@entry=0) at qemu/hw/virtio/virtio.c:451 #9 0x0000555555908512 in virtio_queue_set_notification (vq=vq@entry=0x5555575ceea0, enable=enable@entry=0) at qemu/hw/virtio/virtio.c:444 #10 0x00005555558c697a in virtio_blk_handle_vq (s=0x5555575c57e0, vq=0x5555575ceea0) at qemu/hw/block/virtio-blk.c:775 #11 0x0000555555907836 in virtio_queue_notify_aio_vq (vq=0x5555575ceea0) at qemu/hw/virtio/virtio.c:2244 #12 0x0000555555cb5dd7 in aio_dispatch_handlers (ctx=ctx@entry=0x55555671a420) at util/aio-posix.c:429 #13 0x0000555555cb67a8 in aio_dispatch (ctx=0x55555671a420) at util/aio-posix.c:460 #14 0x0000555555cb307e in aio_ctx_dispatch (source=<optimized out>, callback=<optimized out>, user_data=<optimized out>) at util/async.c:260 #15 0x00007ffff7bbc510 in g_main_context_dispatch () at /lib64/libglib-2.0.so.0 #16 0x0000555555cb5848 in glib_pollfds_poll () at util/main-loop.c:219 #17 os_host_main_loop_wait (timeout=<optimized out>) at util/main-loop.c:242 #18 main_loop_wait (nonblocking=<optimized out>) at util/main-loop.c:518 #19 0x00005555559b20c9 in main_loop () at vl.c:1683 #20 0x0000555555838115 in main (argc=<optimized out>, argv=<optimized out>, envp=<optimized out>) at vl.c:4441 Reported-by: Alexander Bulekov <alxndr@bu.edu> Cc: Michael Tsirkin <mst@redhat.com> Cc: Cornelia Huck <cohuck@redhat.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: qemu-stable@nongnu.org Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Message-Id: <20200207104619.164892-1-stefanha@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2020-02-07 13:46:19 +03:00
caches = vring_get_region_caches(vq);
if (!caches) {
return;
}
vring_packed_event_read(vq->vdev, &caches->used, &e);
if (!enable) {
e.flags = VRING_PACKED_EVENT_FLAG_DISABLE;
} else if (virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
off_wrap = vq->shadow_avail_idx | vq->shadow_avail_wrap_counter << 15;
vring_packed_off_wrap_write(vq->vdev, &caches->used, off_wrap);
/* Make sure off_wrap is wrote before flags */
smp_wmb();
e.flags = VRING_PACKED_EVENT_FLAG_DESC;
} else {
e.flags = VRING_PACKED_EVENT_FLAG_ENABLE;
}
vring_packed_flags_write(vq->vdev, &caches->used, e.flags);
if (enable) {
/* Expose avail event/used flags before caller checks the avail idx. */
smp_mb();
}
}
bool virtio_queue_get_notification(VirtQueue *vq)
{
return vq->notification;
}
void virtio_queue_set_notification(VirtQueue *vq, int enable)
{
vq->notification = enable;
if (!vq->vring.desc) {
return;
}
if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
virtio_queue_packed_set_notification(vq, enable);
} else {
virtio_queue_split_set_notification(vq, enable);
}
}
int virtio_queue_ready(VirtQueue *vq)
{
return vq->vring.avail != 0;
}
static void vring_packed_desc_read_flags(VirtIODevice *vdev,
uint16_t *flags,
MemoryRegionCache *cache,
int i)
{
hwaddr off = i * sizeof(VRingPackedDesc) + offsetof(VRingPackedDesc, flags);
*flags = virtio_lduw_phys_cached(vdev, cache, off);
}
static void vring_packed_desc_read(VirtIODevice *vdev,
VRingPackedDesc *desc,
MemoryRegionCache *cache,
int i, bool strict_order)
{
hwaddr off = i * sizeof(VRingPackedDesc);
vring_packed_desc_read_flags(vdev, &desc->flags, cache, i);
if (strict_order) {
/* Make sure flags is read before the rest fields. */
smp_rmb();
}
address_space_read_cached(cache, off + offsetof(VRingPackedDesc, addr),
&desc->addr, sizeof(desc->addr));
address_space_read_cached(cache, off + offsetof(VRingPackedDesc, id),
&desc->id, sizeof(desc->id));
address_space_read_cached(cache, off + offsetof(VRingPackedDesc, len),
&desc->len, sizeof(desc->len));
virtio_tswap64s(vdev, &desc->addr);
virtio_tswap16s(vdev, &desc->id);
virtio_tswap32s(vdev, &desc->len);
}
static void vring_packed_desc_write_data(VirtIODevice *vdev,
VRingPackedDesc *desc,
MemoryRegionCache *cache,
int i)
{
hwaddr off_id = i * sizeof(VRingPackedDesc) +
offsetof(VRingPackedDesc, id);
hwaddr off_len = i * sizeof(VRingPackedDesc) +
offsetof(VRingPackedDesc, len);
virtio_tswap32s(vdev, &desc->len);
virtio_tswap16s(vdev, &desc->id);
address_space_write_cached(cache, off_id, &desc->id, sizeof(desc->id));
address_space_cache_invalidate(cache, off_id, sizeof(desc->id));
address_space_write_cached(cache, off_len, &desc->len, sizeof(desc->len));
address_space_cache_invalidate(cache, off_len, sizeof(desc->len));
}
static void vring_packed_desc_write_flags(VirtIODevice *vdev,
VRingPackedDesc *desc,
MemoryRegionCache *cache,
int i)
{
hwaddr off = i * sizeof(VRingPackedDesc) + offsetof(VRingPackedDesc, flags);
virtio_stw_phys_cached(vdev, cache, off, desc->flags);
address_space_cache_invalidate(cache, off, sizeof(desc->flags));
}
static void vring_packed_desc_write(VirtIODevice *vdev,
VRingPackedDesc *desc,
MemoryRegionCache *cache,
int i, bool strict_order)
{
vring_packed_desc_write_data(vdev, desc, cache, i);
if (strict_order) {
/* Make sure data is wrote before flags. */
smp_wmb();
}
vring_packed_desc_write_flags(vdev, desc, cache, i);
}
static inline bool is_desc_avail(uint16_t flags, bool wrap_counter)
{
bool avail, used;
avail = !!(flags & (1 << VRING_PACKED_DESC_F_AVAIL));
used = !!(flags & (1 << VRING_PACKED_DESC_F_USED));
return (avail != used) && (avail == wrap_counter);
}
/* Fetch avail_idx from VQ memory only when we really need to know if
* guest has added some buffers.
* Called within rcu_read_lock(). */
static int virtio_queue_empty_rcu(VirtQueue *vq)
{
virtio-pci: disable vring processing when bus-mastering is disabled Currently the SLOF firmware for pseries guests will disable/re-enable a PCI device multiple times via IO/MEM/MASTER bits of PCI_COMMAND register after the initial probe/feature negotiation, as it tends to work with a single device at a time at various stages like probing and running block/network bootloaders without doing a full reset in-between. In QEMU, when PCI_COMMAND_MASTER is disabled we disable the corresponding IOMMU memory region, so DMA accesses (including to vring fields like idx/flags) will no longer undergo the necessary translation. Normally we wouldn't expect this to happen since it would be misbehavior on the driver side to continue driving DMA requests. However, in the case of pseries, with iommu_platform=on, we trigger the following sequence when tearing down the virtio-blk dataplane ioeventfd in response to the guest unsetting PCI_COMMAND_MASTER: #2 0x0000555555922651 in virtqueue_map_desc (vdev=vdev@entry=0x555556dbcfb0, p_num_sg=p_num_sg@entry=0x7fffe657e1a8, addr=addr@entry=0x7fffe657e240, iov=iov@entry=0x7fffe6580240, max_num_sg=max_num_sg@entry=1024, is_write=is_write@entry=false, pa=0, sz=0) at /home/mdroth/w/qemu.git/hw/virtio/virtio.c:757 #3 0x0000555555922a89 in virtqueue_pop (vq=vq@entry=0x555556dc8660, sz=sz@entry=184) at /home/mdroth/w/qemu.git/hw/virtio/virtio.c:950 #4 0x00005555558d3eca in virtio_blk_get_request (vq=0x555556dc8660, s=0x555556dbcfb0) at /home/mdroth/w/qemu.git/hw/block/virtio-blk.c:255 #5 0x00005555558d3eca in virtio_blk_handle_vq (s=0x555556dbcfb0, vq=0x555556dc8660) at /home/mdroth/w/qemu.git/hw/block/virtio-blk.c:776 #6 0x000055555591dd66 in virtio_queue_notify_aio_vq (vq=vq@entry=0x555556dc8660) at /home/mdroth/w/qemu.git/hw/virtio/virtio.c:1550 #7 0x000055555591ecef in virtio_queue_notify_aio_vq (vq=0x555556dc8660) at /home/mdroth/w/qemu.git/hw/virtio/virtio.c:1546 #8 0x000055555591ecef in virtio_queue_host_notifier_aio_poll (opaque=0x555556dc86c8) at /home/mdroth/w/qemu.git/hw/virtio/virtio.c:2527 #9 0x0000555555d02164 in run_poll_handlers_once (ctx=ctx@entry=0x55555688bfc0, timeout=timeout@entry=0x7fffe65844a8) at /home/mdroth/w/qemu.git/util/aio-posix.c:520 #10 0x0000555555d02d1b in try_poll_mode (timeout=0x7fffe65844a8, ctx=0x55555688bfc0) at /home/mdroth/w/qemu.git/util/aio-posix.c:607 #11 0x0000555555d02d1b in aio_poll (ctx=ctx@entry=0x55555688bfc0, blocking=blocking@entry=true) at /home/mdroth/w/qemu.git/util/aio-posix.c:639 #12 0x0000555555d0004d in aio_wait_bh_oneshot (ctx=0x55555688bfc0, cb=cb@entry=0x5555558d5130 <virtio_blk_data_plane_stop_bh>, opaque=opaque@entry=0x555556de86f0) at /home/mdroth/w/qemu.git/util/aio-wait.c:71 #13 0x00005555558d59bf in virtio_blk_data_plane_stop (vdev=<optimized out>) at /home/mdroth/w/qemu.git/hw/block/dataplane/virtio-blk.c:288 #14 0x0000555555b906a1 in virtio_bus_stop_ioeventfd (bus=bus@entry=0x555556dbcf38) at /home/mdroth/w/qemu.git/hw/virtio/virtio-bus.c:245 #15 0x0000555555b90dbb in virtio_bus_stop_ioeventfd (bus=bus@entry=0x555556dbcf38) at /home/mdroth/w/qemu.git/hw/virtio/virtio-bus.c:237 #16 0x0000555555b92a8e in virtio_pci_stop_ioeventfd (proxy=0x555556db4e40) at /home/mdroth/w/qemu.git/hw/virtio/virtio-pci.c:292 #17 0x0000555555b92a8e in virtio_write_config (pci_dev=0x555556db4e40, address=<optimized out>, val=1048832, len=<optimized out>) at /home/mdroth/w/qemu.git/hw/virtio/virtio-pci.c:613 I.e. the calling code is only scheduling a one-shot BH for virtio_blk_data_plane_stop_bh, but somehow we end up trying to process an additional virtqueue entry before we get there. This is likely due to the following check in virtio_queue_host_notifier_aio_poll: static bool virtio_queue_host_notifier_aio_poll(void *opaque) { EventNotifier *n = opaque; VirtQueue *vq = container_of(n, VirtQueue, host_notifier); bool progress; if (!vq->vring.desc || virtio_queue_empty(vq)) { return false; } progress = virtio_queue_notify_aio_vq(vq); namely the call to virtio_queue_empty(). In this case, since no new requests have actually been issued, shadow_avail_idx == last_avail_idx, so we actually try to access the vring via vring_avail_idx() to get the latest non-shadowed idx: int virtio_queue_empty(VirtQueue *vq) { bool empty; ... if (vq->shadow_avail_idx != vq->last_avail_idx) { return 0; } rcu_read_lock(); empty = vring_avail_idx(vq) == vq->last_avail_idx; rcu_read_unlock(); return empty; but since the IOMMU region has been disabled we get a bogus value (0 usually), which causes virtio_queue_empty() to falsely report that there are entries to be processed, which causes errors such as: "virtio: zero sized buffers are not allowed" or "virtio-blk missing headers" and puts the device in an error state. This patch works around the issue by introducing virtio_set_disabled(), which sets a 'disabled' flag to bypass checks like virtio_queue_empty() when bus-mastering is disabled. Since we'd check this flag at all the same sites as vdev->broken, we replace those checks with an inline function which checks for either vdev->broken or vdev->disabled. The 'disabled' flag is only migrated when set, which should be fairly rare, but to maintain migration compatibility we disable it's use for older machine types. Users requiring the use of the flag in conjunction with older machine types can set it explicitly as a virtio-device option. NOTES: - This leaves some other oddities in play, like the fact that DRIVER_OK also gets unset in response to bus-mastering being disabled, but not restored (however the device seems to continue working) - Similarly, we disable the host notifier via virtio_bus_stop_ioeventfd(), which seems to move the handling out of virtio-blk dataplane and back into the main IO thread, and it ends up staying there till a reset (but otherwise continues working normally) Cc: David Gibson <david@gibson.dropbear.id.au>, Cc: Alexey Kardashevskiy <aik@ozlabs.ru> Cc: "Michael S. Tsirkin" <mst@redhat.com> Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com> Message-Id: <20191120005003.27035-1-mdroth@linux.vnet.ibm.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2019-11-20 03:50:03 +03:00
if (virtio_device_disabled(vq->vdev)) {
virtio: Return true from virtio_queue_empty if broken Both virtio-blk and virtio-scsi use virtio_queue_empty() as the loop condition in VQ handlers (virtio_blk_handle_vq, virtio_scsi_handle_cmd_vq). When a device is marked broken in virtqueue_pop, for example if a vIOMMU address translation failed, we want to break out of the loop. This fixes a hanging problem when booting a CentOS 3.10.0-862.el7.x86_64 kernel with ATS enabled: $ qemu-system-x86_64 \ ... \ -device intel-iommu,intremap=on,caching-mode=on,eim=on,device-iotlb=on \ -device virtio-scsi-pci,iommu_platform=on,ats=on,id=scsi0,bus=pci.4,addr=0x0 The dead loop happens immediately when the kernel boots and initializes the device, where virtio_scsi_data_plane_handle_cmd will not return: > ... > #13 0x00005586602b7793 in virtio_scsi_handle_cmd_vq > #14 0x00005586602b8d66 in virtio_scsi_data_plane_handle_cmd > #15 0x00005586602ddab7 in virtio_queue_notify_aio_vq > #16 0x00005586602dfc9f in virtio_queue_host_notifier_aio_poll > #17 0x00005586607885da in run_poll_handlers_once > #18 0x000055866078880e in try_poll_mode > #19 0x00005586607888eb in aio_poll > #20 0x0000558660784561 in aio_wait_bh_oneshot > #21 0x00005586602b9582 in virtio_scsi_dataplane_stop > #22 0x00005586605a7110 in virtio_bus_stop_ioeventfd > #23 0x00005586605a9426 in virtio_pci_stop_ioeventfd > #24 0x00005586605ab808 in virtio_pci_common_write > #25 0x0000558660242396 in memory_region_write_accessor > #26 0x00005586602425ab in access_with_adjusted_size > #27 0x0000558660245281 in memory_region_dispatch_write > #28 0x00005586601e008e in flatview_write_continue > #29 0x00005586601e01d8 in flatview_write > #30 0x00005586601e04de in address_space_write > #31 0x00005586601e052f in address_space_rw > #32 0x00005586602607f2 in kvm_cpu_exec > #33 0x0000558660227148 in qemu_kvm_cpu_thread_fn > #34 0x000055866078bde7 in qemu_thread_start > #35 0x00007f5784906594 in start_thread > #36 0x00007f5784639e6f in clone With this patch, virtio_queue_empty will now return 1 as soon as the vdev is marked as broken, after a "virtio: zero sized buffers are not allowed" error. To be consistent, update virtio_queue_empty_rcu as well. Signed-off-by: Fam Zheng <famz@redhat.com> Message-Id: <20180910145616.8598-2-famz@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2018-09-10 17:56:15 +03:00
return 1;
}
if (unlikely(!vq->vring.avail)) {
return 1;
}
if (vq->shadow_avail_idx != vq->last_avail_idx) {
return 0;
}
return vring_avail_idx(vq) == vq->last_avail_idx;
}
static int virtio_queue_split_empty(VirtQueue *vq)
{
bool empty;
virtio-pci: disable vring processing when bus-mastering is disabled Currently the SLOF firmware for pseries guests will disable/re-enable a PCI device multiple times via IO/MEM/MASTER bits of PCI_COMMAND register after the initial probe/feature negotiation, as it tends to work with a single device at a time at various stages like probing and running block/network bootloaders without doing a full reset in-between. In QEMU, when PCI_COMMAND_MASTER is disabled we disable the corresponding IOMMU memory region, so DMA accesses (including to vring fields like idx/flags) will no longer undergo the necessary translation. Normally we wouldn't expect this to happen since it would be misbehavior on the driver side to continue driving DMA requests. However, in the case of pseries, with iommu_platform=on, we trigger the following sequence when tearing down the virtio-blk dataplane ioeventfd in response to the guest unsetting PCI_COMMAND_MASTER: #2 0x0000555555922651 in virtqueue_map_desc (vdev=vdev@entry=0x555556dbcfb0, p_num_sg=p_num_sg@entry=0x7fffe657e1a8, addr=addr@entry=0x7fffe657e240, iov=iov@entry=0x7fffe6580240, max_num_sg=max_num_sg@entry=1024, is_write=is_write@entry=false, pa=0, sz=0) at /home/mdroth/w/qemu.git/hw/virtio/virtio.c:757 #3 0x0000555555922a89 in virtqueue_pop (vq=vq@entry=0x555556dc8660, sz=sz@entry=184) at /home/mdroth/w/qemu.git/hw/virtio/virtio.c:950 #4 0x00005555558d3eca in virtio_blk_get_request (vq=0x555556dc8660, s=0x555556dbcfb0) at /home/mdroth/w/qemu.git/hw/block/virtio-blk.c:255 #5 0x00005555558d3eca in virtio_blk_handle_vq (s=0x555556dbcfb0, vq=0x555556dc8660) at /home/mdroth/w/qemu.git/hw/block/virtio-blk.c:776 #6 0x000055555591dd66 in virtio_queue_notify_aio_vq (vq=vq@entry=0x555556dc8660) at /home/mdroth/w/qemu.git/hw/virtio/virtio.c:1550 #7 0x000055555591ecef in virtio_queue_notify_aio_vq (vq=0x555556dc8660) at /home/mdroth/w/qemu.git/hw/virtio/virtio.c:1546 #8 0x000055555591ecef in virtio_queue_host_notifier_aio_poll (opaque=0x555556dc86c8) at /home/mdroth/w/qemu.git/hw/virtio/virtio.c:2527 #9 0x0000555555d02164 in run_poll_handlers_once (ctx=ctx@entry=0x55555688bfc0, timeout=timeout@entry=0x7fffe65844a8) at /home/mdroth/w/qemu.git/util/aio-posix.c:520 #10 0x0000555555d02d1b in try_poll_mode (timeout=0x7fffe65844a8, ctx=0x55555688bfc0) at /home/mdroth/w/qemu.git/util/aio-posix.c:607 #11 0x0000555555d02d1b in aio_poll (ctx=ctx@entry=0x55555688bfc0, blocking=blocking@entry=true) at /home/mdroth/w/qemu.git/util/aio-posix.c:639 #12 0x0000555555d0004d in aio_wait_bh_oneshot (ctx=0x55555688bfc0, cb=cb@entry=0x5555558d5130 <virtio_blk_data_plane_stop_bh>, opaque=opaque@entry=0x555556de86f0) at /home/mdroth/w/qemu.git/util/aio-wait.c:71 #13 0x00005555558d59bf in virtio_blk_data_plane_stop (vdev=<optimized out>) at /home/mdroth/w/qemu.git/hw/block/dataplane/virtio-blk.c:288 #14 0x0000555555b906a1 in virtio_bus_stop_ioeventfd (bus=bus@entry=0x555556dbcf38) at /home/mdroth/w/qemu.git/hw/virtio/virtio-bus.c:245 #15 0x0000555555b90dbb in virtio_bus_stop_ioeventfd (bus=bus@entry=0x555556dbcf38) at /home/mdroth/w/qemu.git/hw/virtio/virtio-bus.c:237 #16 0x0000555555b92a8e in virtio_pci_stop_ioeventfd (proxy=0x555556db4e40) at /home/mdroth/w/qemu.git/hw/virtio/virtio-pci.c:292 #17 0x0000555555b92a8e in virtio_write_config (pci_dev=0x555556db4e40, address=<optimized out>, val=1048832, len=<optimized out>) at /home/mdroth/w/qemu.git/hw/virtio/virtio-pci.c:613 I.e. the calling code is only scheduling a one-shot BH for virtio_blk_data_plane_stop_bh, but somehow we end up trying to process an additional virtqueue entry before we get there. This is likely due to the following check in virtio_queue_host_notifier_aio_poll: static bool virtio_queue_host_notifier_aio_poll(void *opaque) { EventNotifier *n = opaque; VirtQueue *vq = container_of(n, VirtQueue, host_notifier); bool progress; if (!vq->vring.desc || virtio_queue_empty(vq)) { return false; } progress = virtio_queue_notify_aio_vq(vq); namely the call to virtio_queue_empty(). In this case, since no new requests have actually been issued, shadow_avail_idx == last_avail_idx, so we actually try to access the vring via vring_avail_idx() to get the latest non-shadowed idx: int virtio_queue_empty(VirtQueue *vq) { bool empty; ... if (vq->shadow_avail_idx != vq->last_avail_idx) { return 0; } rcu_read_lock(); empty = vring_avail_idx(vq) == vq->last_avail_idx; rcu_read_unlock(); return empty; but since the IOMMU region has been disabled we get a bogus value (0 usually), which causes virtio_queue_empty() to falsely report that there are entries to be processed, which causes errors such as: "virtio: zero sized buffers are not allowed" or "virtio-blk missing headers" and puts the device in an error state. This patch works around the issue by introducing virtio_set_disabled(), which sets a 'disabled' flag to bypass checks like virtio_queue_empty() when bus-mastering is disabled. Since we'd check this flag at all the same sites as vdev->broken, we replace those checks with an inline function which checks for either vdev->broken or vdev->disabled. The 'disabled' flag is only migrated when set, which should be fairly rare, but to maintain migration compatibility we disable it's use for older machine types. Users requiring the use of the flag in conjunction with older machine types can set it explicitly as a virtio-device option. NOTES: - This leaves some other oddities in play, like the fact that DRIVER_OK also gets unset in response to bus-mastering being disabled, but not restored (however the device seems to continue working) - Similarly, we disable the host notifier via virtio_bus_stop_ioeventfd(), which seems to move the handling out of virtio-blk dataplane and back into the main IO thread, and it ends up staying there till a reset (but otherwise continues working normally) Cc: David Gibson <david@gibson.dropbear.id.au>, Cc: Alexey Kardashevskiy <aik@ozlabs.ru> Cc: "Michael S. Tsirkin" <mst@redhat.com> Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com> Message-Id: <20191120005003.27035-1-mdroth@linux.vnet.ibm.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2019-11-20 03:50:03 +03:00
if (virtio_device_disabled(vq->vdev)) {
virtio: Return true from virtio_queue_empty if broken Both virtio-blk and virtio-scsi use virtio_queue_empty() as the loop condition in VQ handlers (virtio_blk_handle_vq, virtio_scsi_handle_cmd_vq). When a device is marked broken in virtqueue_pop, for example if a vIOMMU address translation failed, we want to break out of the loop. This fixes a hanging problem when booting a CentOS 3.10.0-862.el7.x86_64 kernel with ATS enabled: $ qemu-system-x86_64 \ ... \ -device intel-iommu,intremap=on,caching-mode=on,eim=on,device-iotlb=on \ -device virtio-scsi-pci,iommu_platform=on,ats=on,id=scsi0,bus=pci.4,addr=0x0 The dead loop happens immediately when the kernel boots and initializes the device, where virtio_scsi_data_plane_handle_cmd will not return: > ... > #13 0x00005586602b7793 in virtio_scsi_handle_cmd_vq > #14 0x00005586602b8d66 in virtio_scsi_data_plane_handle_cmd > #15 0x00005586602ddab7 in virtio_queue_notify_aio_vq > #16 0x00005586602dfc9f in virtio_queue_host_notifier_aio_poll > #17 0x00005586607885da in run_poll_handlers_once > #18 0x000055866078880e in try_poll_mode > #19 0x00005586607888eb in aio_poll > #20 0x0000558660784561 in aio_wait_bh_oneshot > #21 0x00005586602b9582 in virtio_scsi_dataplane_stop > #22 0x00005586605a7110 in virtio_bus_stop_ioeventfd > #23 0x00005586605a9426 in virtio_pci_stop_ioeventfd > #24 0x00005586605ab808 in virtio_pci_common_write > #25 0x0000558660242396 in memory_region_write_accessor > #26 0x00005586602425ab in access_with_adjusted_size > #27 0x0000558660245281 in memory_region_dispatch_write > #28 0x00005586601e008e in flatview_write_continue > #29 0x00005586601e01d8 in flatview_write > #30 0x00005586601e04de in address_space_write > #31 0x00005586601e052f in address_space_rw > #32 0x00005586602607f2 in kvm_cpu_exec > #33 0x0000558660227148 in qemu_kvm_cpu_thread_fn > #34 0x000055866078bde7 in qemu_thread_start > #35 0x00007f5784906594 in start_thread > #36 0x00007f5784639e6f in clone With this patch, virtio_queue_empty will now return 1 as soon as the vdev is marked as broken, after a "virtio: zero sized buffers are not allowed" error. To be consistent, update virtio_queue_empty_rcu as well. Signed-off-by: Fam Zheng <famz@redhat.com> Message-Id: <20180910145616.8598-2-famz@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2018-09-10 17:56:15 +03:00
return 1;
}
if (unlikely(!vq->vring.avail)) {
return 1;
}
if (vq->shadow_avail_idx != vq->last_avail_idx) {
return 0;
}
RCU_READ_LOCK_GUARD();
empty = vring_avail_idx(vq) == vq->last_avail_idx;
return empty;
}
/* Called within rcu_read_lock(). */
static int virtio_queue_packed_empty_rcu(VirtQueue *vq)
{
struct VRingPackedDesc desc;
VRingMemoryRegionCaches *cache;
if (unlikely(!vq->vring.desc)) {
return 1;
}
cache = vring_get_region_caches(vq);
virtio: gracefully handle invalid region caches The virtqueue code sets up MemoryRegionCaches to access the virtqueue guest RAM data structures. The code currently assumes that VRingMemoryRegionCaches is initialized before device emulation code accesses the virtqueue. An assertion will fail in vring_get_region_caches() when this is not true. Device fuzzing found a case where this assumption is false (see below). Virtqueue guest RAM addresses can also be changed from a vCPU thread while an IOThread is accessing the virtqueue. This breaks the same assumption but this time the caches could become invalid partway through the virtqueue code. The code fetches the caches RCU pointer multiple times so we will need to validate the pointer every time it is fetched. Add checks each time we call vring_get_region_caches() and treat invalid caches as a nop: memory stores are ignored and memory reads return 0. The fuzz test failure is as follows: $ qemu -M pc -device virtio-blk-pci,id=drv0,drive=drive0,addr=4.0 \ -drive if=none,id=drive0,file=null-co://,format=raw,auto-read-only=off \ -drive if=none,id=drive1,file=null-co://,file.read-zeroes=on,format=raw \ -display none \ -qtest stdio endianness outl 0xcf8 0x80002020 outl 0xcfc 0xe0000000 outl 0xcf8 0x80002004 outw 0xcfc 0x7 write 0xe0000000 0x24 0x00ffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffab5cffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffab0000000001 inb 0x4 writew 0xe000001c 0x1 write 0xe0000014 0x1 0x0d The following error message is produced: qemu-system-x86_64: /home/stefanha/qemu/hw/virtio/virtio.c:286: vring_get_region_caches: Assertion `caches != NULL' failed. The backtrace looks like this: #0 0x00007ffff5520625 in raise () at /lib64/libc.so.6 #1 0x00007ffff55098d9 in abort () at /lib64/libc.so.6 #2 0x00007ffff55097a9 in _nl_load_domain.cold () at /lib64/libc.so.6 #3 0x00007ffff5518a66 in annobin_assert.c_end () at /lib64/libc.so.6 #4 0x00005555559073da in vring_get_region_caches (vq=<optimized out>) at qemu/hw/virtio/virtio.c:286 #5 vring_get_region_caches (vq=<optimized out>) at qemu/hw/virtio/virtio.c:283 #6 0x000055555590818d in vring_used_flags_set_bit (mask=1, vq=0x5555575ceea0) at qemu/hw/virtio/virtio.c:398 #7 virtio_queue_split_set_notification (enable=0, vq=0x5555575ceea0) at qemu/hw/virtio/virtio.c:398 #8 virtio_queue_set_notification (vq=vq@entry=0x5555575ceea0, enable=enable@entry=0) at qemu/hw/virtio/virtio.c:451 #9 0x0000555555908512 in virtio_queue_set_notification (vq=vq@entry=0x5555575ceea0, enable=enable@entry=0) at qemu/hw/virtio/virtio.c:444 #10 0x00005555558c697a in virtio_blk_handle_vq (s=0x5555575c57e0, vq=0x5555575ceea0) at qemu/hw/block/virtio-blk.c:775 #11 0x0000555555907836 in virtio_queue_notify_aio_vq (vq=0x5555575ceea0) at qemu/hw/virtio/virtio.c:2244 #12 0x0000555555cb5dd7 in aio_dispatch_handlers (ctx=ctx@entry=0x55555671a420) at util/aio-posix.c:429 #13 0x0000555555cb67a8 in aio_dispatch (ctx=0x55555671a420) at util/aio-posix.c:460 #14 0x0000555555cb307e in aio_ctx_dispatch (source=<optimized out>, callback=<optimized out>, user_data=<optimized out>) at util/async.c:260 #15 0x00007ffff7bbc510 in g_main_context_dispatch () at /lib64/libglib-2.0.so.0 #16 0x0000555555cb5848 in glib_pollfds_poll () at util/main-loop.c:219 #17 os_host_main_loop_wait (timeout=<optimized out>) at util/main-loop.c:242 #18 main_loop_wait (nonblocking=<optimized out>) at util/main-loop.c:518 #19 0x00005555559b20c9 in main_loop () at vl.c:1683 #20 0x0000555555838115 in main (argc=<optimized out>, argv=<optimized out>, envp=<optimized out>) at vl.c:4441 Reported-by: Alexander Bulekov <alxndr@bu.edu> Cc: Michael Tsirkin <mst@redhat.com> Cc: Cornelia Huck <cohuck@redhat.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: qemu-stable@nongnu.org Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Message-Id: <20200207104619.164892-1-stefanha@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2020-02-07 13:46:19 +03:00
if (!cache) {
return 1;
}
vring_packed_desc_read_flags(vq->vdev, &desc.flags, &cache->desc,
vq->last_avail_idx);
return !is_desc_avail(desc.flags, vq->last_avail_wrap_counter);
}
static int virtio_queue_packed_empty(VirtQueue *vq)
{
RCU_READ_LOCK_GUARD();
return virtio_queue_packed_empty_rcu(vq);
}
int virtio_queue_empty(VirtQueue *vq)
{
if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
return virtio_queue_packed_empty(vq);
} else {
return virtio_queue_split_empty(vq);
}
}
static void virtqueue_unmap_sg(VirtQueue *vq, const VirtQueueElement *elem,
unsigned int len)
{
AddressSpace *dma_as = vq->vdev->dma_as;
unsigned int offset;
int i;
offset = 0;
for (i = 0; i < elem->in_num; i++) {
size_t size = MIN(len - offset, elem->in_sg[i].iov_len);
dma_memory_unmap(dma_as, elem->in_sg[i].iov_base,
elem->in_sg[i].iov_len,
DMA_DIRECTION_FROM_DEVICE, size);
offset += size;
}
for (i = 0; i < elem->out_num; i++)
dma_memory_unmap(dma_as, elem->out_sg[i].iov_base,
elem->out_sg[i].iov_len,
DMA_DIRECTION_TO_DEVICE,
elem->out_sg[i].iov_len);
}
/* virtqueue_detach_element:
* @vq: The #VirtQueue
* @elem: The #VirtQueueElement
* @len: number of bytes written
*
* Detach the element from the virtqueue. This function is suitable for device
* reset or other situations where a #VirtQueueElement is simply freed and will
* not be pushed or discarded.
*/
void virtqueue_detach_element(VirtQueue *vq, const VirtQueueElement *elem,
unsigned int len)
{
vq->inuse -= elem->ndescs;
virtqueue_unmap_sg(vq, elem, len);
}
static void virtqueue_split_rewind(VirtQueue *vq, unsigned int num)
{
vq->last_avail_idx -= num;
}
static void virtqueue_packed_rewind(VirtQueue *vq, unsigned int num)
{
if (vq->last_avail_idx < num) {
vq->last_avail_idx = vq->vring.num + vq->last_avail_idx - num;
vq->last_avail_wrap_counter ^= 1;
} else {
vq->last_avail_idx -= num;
}
}
/* virtqueue_unpop:
* @vq: The #VirtQueue
* @elem: The #VirtQueueElement
* @len: number of bytes written
*
* Pretend the most recent element wasn't popped from the virtqueue. The next
* call to virtqueue_pop() will refetch the element.
*/
void virtqueue_unpop(VirtQueue *vq, const VirtQueueElement *elem,
unsigned int len)
{
if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
virtqueue_packed_rewind(vq, 1);
} else {
virtqueue_split_rewind(vq, 1);
}
virtqueue_detach_element(vq, elem, len);
}
/* virtqueue_rewind:
* @vq: The #VirtQueue
* @num: Number of elements to push back
*
* Pretend that elements weren't popped from the virtqueue. The next
* virtqueue_pop() will refetch the oldest element.
*
* Use virtqueue_unpop() instead if you have a VirtQueueElement.
*
* Returns: true on success, false if @num is greater than the number of in use
* elements.
*/
bool virtqueue_rewind(VirtQueue *vq, unsigned int num)
{
if (num > vq->inuse) {
return false;
}
vq->inuse -= num;
if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
virtqueue_packed_rewind(vq, num);
} else {
virtqueue_split_rewind(vq, num);
}
return true;
}
static void virtqueue_split_fill(VirtQueue *vq, const VirtQueueElement *elem,
unsigned int len, unsigned int idx)
{
VRingUsedElem uelem;
if (unlikely(!vq->vring.used)) {
return;
}
idx = (idx + vq->used_idx) % vq->vring.num;
uelem.id = elem->index;
uelem.len = len;
vring_used_write(vq, &uelem, idx);
}
static void virtqueue_packed_fill(VirtQueue *vq, const VirtQueueElement *elem,
unsigned int len, unsigned int idx)
{
vq->used_elems[idx].index = elem->index;
vq->used_elems[idx].len = len;
vq->used_elems[idx].ndescs = elem->ndescs;
}
static void virtqueue_packed_fill_desc(VirtQueue *vq,
const VirtQueueElement *elem,
unsigned int idx,
bool strict_order)
{
uint16_t head;
VRingMemoryRegionCaches *caches;
VRingPackedDesc desc = {
.id = elem->index,
.len = elem->len,
};
bool wrap_counter = vq->used_wrap_counter;
if (unlikely(!vq->vring.desc)) {
return;
}
head = vq->used_idx + idx;
if (head >= vq->vring.num) {
head -= vq->vring.num;
wrap_counter ^= 1;
}
if (wrap_counter) {
desc.flags |= (1 << VRING_PACKED_DESC_F_AVAIL);
desc.flags |= (1 << VRING_PACKED_DESC_F_USED);
} else {
desc.flags &= ~(1 << VRING_PACKED_DESC_F_AVAIL);
desc.flags &= ~(1 << VRING_PACKED_DESC_F_USED);
}
caches = vring_get_region_caches(vq);
virtio: gracefully handle invalid region caches The virtqueue code sets up MemoryRegionCaches to access the virtqueue guest RAM data structures. The code currently assumes that VRingMemoryRegionCaches is initialized before device emulation code accesses the virtqueue. An assertion will fail in vring_get_region_caches() when this is not true. Device fuzzing found a case where this assumption is false (see below). Virtqueue guest RAM addresses can also be changed from a vCPU thread while an IOThread is accessing the virtqueue. This breaks the same assumption but this time the caches could become invalid partway through the virtqueue code. The code fetches the caches RCU pointer multiple times so we will need to validate the pointer every time it is fetched. Add checks each time we call vring_get_region_caches() and treat invalid caches as a nop: memory stores are ignored and memory reads return 0. The fuzz test failure is as follows: $ qemu -M pc -device virtio-blk-pci,id=drv0,drive=drive0,addr=4.0 \ -drive if=none,id=drive0,file=null-co://,format=raw,auto-read-only=off \ -drive if=none,id=drive1,file=null-co://,file.read-zeroes=on,format=raw \ -display none \ -qtest stdio endianness outl 0xcf8 0x80002020 outl 0xcfc 0xe0000000 outl 0xcf8 0x80002004 outw 0xcfc 0x7 write 0xe0000000 0x24 0x00ffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffab5cffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffab0000000001 inb 0x4 writew 0xe000001c 0x1 write 0xe0000014 0x1 0x0d The following error message is produced: qemu-system-x86_64: /home/stefanha/qemu/hw/virtio/virtio.c:286: vring_get_region_caches: Assertion `caches != NULL' failed. The backtrace looks like this: #0 0x00007ffff5520625 in raise () at /lib64/libc.so.6 #1 0x00007ffff55098d9 in abort () at /lib64/libc.so.6 #2 0x00007ffff55097a9 in _nl_load_domain.cold () at /lib64/libc.so.6 #3 0x00007ffff5518a66 in annobin_assert.c_end () at /lib64/libc.so.6 #4 0x00005555559073da in vring_get_region_caches (vq=<optimized out>) at qemu/hw/virtio/virtio.c:286 #5 vring_get_region_caches (vq=<optimized out>) at qemu/hw/virtio/virtio.c:283 #6 0x000055555590818d in vring_used_flags_set_bit (mask=1, vq=0x5555575ceea0) at qemu/hw/virtio/virtio.c:398 #7 virtio_queue_split_set_notification (enable=0, vq=0x5555575ceea0) at qemu/hw/virtio/virtio.c:398 #8 virtio_queue_set_notification (vq=vq@entry=0x5555575ceea0, enable=enable@entry=0) at qemu/hw/virtio/virtio.c:451 #9 0x0000555555908512 in virtio_queue_set_notification (vq=vq@entry=0x5555575ceea0, enable=enable@entry=0) at qemu/hw/virtio/virtio.c:444 #10 0x00005555558c697a in virtio_blk_handle_vq (s=0x5555575c57e0, vq=0x5555575ceea0) at qemu/hw/block/virtio-blk.c:775 #11 0x0000555555907836 in virtio_queue_notify_aio_vq (vq=0x5555575ceea0) at qemu/hw/virtio/virtio.c:2244 #12 0x0000555555cb5dd7 in aio_dispatch_handlers (ctx=ctx@entry=0x55555671a420) at util/aio-posix.c:429 #13 0x0000555555cb67a8 in aio_dispatch (ctx=0x55555671a420) at util/aio-posix.c:460 #14 0x0000555555cb307e in aio_ctx_dispatch (source=<optimized out>, callback=<optimized out>, user_data=<optimized out>) at util/async.c:260 #15 0x00007ffff7bbc510 in g_main_context_dispatch () at /lib64/libglib-2.0.so.0 #16 0x0000555555cb5848 in glib_pollfds_poll () at util/main-loop.c:219 #17 os_host_main_loop_wait (timeout=<optimized out>) at util/main-loop.c:242 #18 main_loop_wait (nonblocking=<optimized out>) at util/main-loop.c:518 #19 0x00005555559b20c9 in main_loop () at vl.c:1683 #20 0x0000555555838115 in main (argc=<optimized out>, argv=<optimized out>, envp=<optimized out>) at vl.c:4441 Reported-by: Alexander Bulekov <alxndr@bu.edu> Cc: Michael Tsirkin <mst@redhat.com> Cc: Cornelia Huck <cohuck@redhat.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: qemu-stable@nongnu.org Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Message-Id: <20200207104619.164892-1-stefanha@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2020-02-07 13:46:19 +03:00
if (!caches) {
return;
}
vring_packed_desc_write(vq->vdev, &desc, &caches->desc, head, strict_order);
}
/* Called within rcu_read_lock(). */
void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
unsigned int len, unsigned int idx)
{
trace_virtqueue_fill(vq, elem, len, idx);
virtqueue_unmap_sg(vq, elem, len);
virtio-pci: disable vring processing when bus-mastering is disabled Currently the SLOF firmware for pseries guests will disable/re-enable a PCI device multiple times via IO/MEM/MASTER bits of PCI_COMMAND register after the initial probe/feature negotiation, as it tends to work with a single device at a time at various stages like probing and running block/network bootloaders without doing a full reset in-between. In QEMU, when PCI_COMMAND_MASTER is disabled we disable the corresponding IOMMU memory region, so DMA accesses (including to vring fields like idx/flags) will no longer undergo the necessary translation. Normally we wouldn't expect this to happen since it would be misbehavior on the driver side to continue driving DMA requests. However, in the case of pseries, with iommu_platform=on, we trigger the following sequence when tearing down the virtio-blk dataplane ioeventfd in response to the guest unsetting PCI_COMMAND_MASTER: #2 0x0000555555922651 in virtqueue_map_desc (vdev=vdev@entry=0x555556dbcfb0, p_num_sg=p_num_sg@entry=0x7fffe657e1a8, addr=addr@entry=0x7fffe657e240, iov=iov@entry=0x7fffe6580240, max_num_sg=max_num_sg@entry=1024, is_write=is_write@entry=false, pa=0, sz=0) at /home/mdroth/w/qemu.git/hw/virtio/virtio.c:757 #3 0x0000555555922a89 in virtqueue_pop (vq=vq@entry=0x555556dc8660, sz=sz@entry=184) at /home/mdroth/w/qemu.git/hw/virtio/virtio.c:950 #4 0x00005555558d3eca in virtio_blk_get_request (vq=0x555556dc8660, s=0x555556dbcfb0) at /home/mdroth/w/qemu.git/hw/block/virtio-blk.c:255 #5 0x00005555558d3eca in virtio_blk_handle_vq (s=0x555556dbcfb0, vq=0x555556dc8660) at /home/mdroth/w/qemu.git/hw/block/virtio-blk.c:776 #6 0x000055555591dd66 in virtio_queue_notify_aio_vq (vq=vq@entry=0x555556dc8660) at /home/mdroth/w/qemu.git/hw/virtio/virtio.c:1550 #7 0x000055555591ecef in virtio_queue_notify_aio_vq (vq=0x555556dc8660) at /home/mdroth/w/qemu.git/hw/virtio/virtio.c:1546 #8 0x000055555591ecef in virtio_queue_host_notifier_aio_poll (opaque=0x555556dc86c8) at /home/mdroth/w/qemu.git/hw/virtio/virtio.c:2527 #9 0x0000555555d02164 in run_poll_handlers_once (ctx=ctx@entry=0x55555688bfc0, timeout=timeout@entry=0x7fffe65844a8) at /home/mdroth/w/qemu.git/util/aio-posix.c:520 #10 0x0000555555d02d1b in try_poll_mode (timeout=0x7fffe65844a8, ctx=0x55555688bfc0) at /home/mdroth/w/qemu.git/util/aio-posix.c:607 #11 0x0000555555d02d1b in aio_poll (ctx=ctx@entry=0x55555688bfc0, blocking=blocking@entry=true) at /home/mdroth/w/qemu.git/util/aio-posix.c:639 #12 0x0000555555d0004d in aio_wait_bh_oneshot (ctx=0x55555688bfc0, cb=cb@entry=0x5555558d5130 <virtio_blk_data_plane_stop_bh>, opaque=opaque@entry=0x555556de86f0) at /home/mdroth/w/qemu.git/util/aio-wait.c:71 #13 0x00005555558d59bf in virtio_blk_data_plane_stop (vdev=<optimized out>) at /home/mdroth/w/qemu.git/hw/block/dataplane/virtio-blk.c:288 #14 0x0000555555b906a1 in virtio_bus_stop_ioeventfd (bus=bus@entry=0x555556dbcf38) at /home/mdroth/w/qemu.git/hw/virtio/virtio-bus.c:245 #15 0x0000555555b90dbb in virtio_bus_stop_ioeventfd (bus=bus@entry=0x555556dbcf38) at /home/mdroth/w/qemu.git/hw/virtio/virtio-bus.c:237 #16 0x0000555555b92a8e in virtio_pci_stop_ioeventfd (proxy=0x555556db4e40) at /home/mdroth/w/qemu.git/hw/virtio/virtio-pci.c:292 #17 0x0000555555b92a8e in virtio_write_config (pci_dev=0x555556db4e40, address=<optimized out>, val=1048832, len=<optimized out>) at /home/mdroth/w/qemu.git/hw/virtio/virtio-pci.c:613 I.e. the calling code is only scheduling a one-shot BH for virtio_blk_data_plane_stop_bh, but somehow we end up trying to process an additional virtqueue entry before we get there. This is likely due to the following check in virtio_queue_host_notifier_aio_poll: static bool virtio_queue_host_notifier_aio_poll(void *opaque) { EventNotifier *n = opaque; VirtQueue *vq = container_of(n, VirtQueue, host_notifier); bool progress; if (!vq->vring.desc || virtio_queue_empty(vq)) { return false; } progress = virtio_queue_notify_aio_vq(vq); namely the call to virtio_queue_empty(). In this case, since no new requests have actually been issued, shadow_avail_idx == last_avail_idx, so we actually try to access the vring via vring_avail_idx() to get the latest non-shadowed idx: int virtio_queue_empty(VirtQueue *vq) { bool empty; ... if (vq->shadow_avail_idx != vq->last_avail_idx) { return 0; } rcu_read_lock(); empty = vring_avail_idx(vq) == vq->last_avail_idx; rcu_read_unlock(); return empty; but since the IOMMU region has been disabled we get a bogus value (0 usually), which causes virtio_queue_empty() to falsely report that there are entries to be processed, which causes errors such as: "virtio: zero sized buffers are not allowed" or "virtio-blk missing headers" and puts the device in an error state. This patch works around the issue by introducing virtio_set_disabled(), which sets a 'disabled' flag to bypass checks like virtio_queue_empty() when bus-mastering is disabled. Since we'd check this flag at all the same sites as vdev->broken, we replace those checks with an inline function which checks for either vdev->broken or vdev->disabled. The 'disabled' flag is only migrated when set, which should be fairly rare, but to maintain migration compatibility we disable it's use for older machine types. Users requiring the use of the flag in conjunction with older machine types can set it explicitly as a virtio-device option. NOTES: - This leaves some other oddities in play, like the fact that DRIVER_OK also gets unset in response to bus-mastering being disabled, but not restored (however the device seems to continue working) - Similarly, we disable the host notifier via virtio_bus_stop_ioeventfd(), which seems to move the handling out of virtio-blk dataplane and back into the main IO thread, and it ends up staying there till a reset (but otherwise continues working normally) Cc: David Gibson <david@gibson.dropbear.id.au>, Cc: Alexey Kardashevskiy <aik@ozlabs.ru> Cc: "Michael S. Tsirkin" <mst@redhat.com> Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com> Message-Id: <20191120005003.27035-1-mdroth@linux.vnet.ibm.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2019-11-20 03:50:03 +03:00
if (virtio_device_disabled(vq->vdev)) {
return;
}
if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
virtqueue_packed_fill(vq, elem, len, idx);
} else {
virtqueue_split_fill(vq, elem, len, idx);
}
}
/* Called within rcu_read_lock(). */
static void virtqueue_split_flush(VirtQueue *vq, unsigned int count)
{
uint16_t old, new;
if (unlikely(!vq->vring.used)) {
return;
}
/* Make sure buffer is written before we update index. */
smp_wmb();
trace_virtqueue_flush(vq, count);
old = vq->used_idx;
new = old + count;
vring_used_idx_set(vq, new);
vq->inuse -= count;
if (unlikely((int16_t)(new - vq->signalled_used) < (uint16_t)(new - old)))
vq->signalled_used_valid = false;
}
static void virtqueue_packed_flush(VirtQueue *vq, unsigned int count)
{
unsigned int i, ndescs = 0;
if (unlikely(!vq->vring.desc)) {
return;
}
/*
* For indirect element's 'ndescs' is 1.
* For all other elemment's 'ndescs' is the
* number of descriptors chained by NEXT (as set in virtqueue_packed_pop).
* So When the 'elem' be filled into the descriptor ring,
* The 'idx' of this 'elem' shall be
* the value of 'vq->used_idx' plus the 'ndescs'.
*/
ndescs += vq->used_elems[0].ndescs;
for (i = 1; i < count; i++) {
virtqueue_packed_fill_desc(vq, &vq->used_elems[i], ndescs, false);
ndescs += vq->used_elems[i].ndescs;
}
virtqueue_packed_fill_desc(vq, &vq->used_elems[0], 0, true);
vq->inuse -= ndescs;
vq->used_idx += ndescs;
if (vq->used_idx >= vq->vring.num) {
vq->used_idx -= vq->vring.num;
vq->used_wrap_counter ^= 1;
vq->signalled_used_valid = false;
}
}
void virtqueue_flush(VirtQueue *vq, unsigned int count)
{
virtio-pci: disable vring processing when bus-mastering is disabled Currently the SLOF firmware for pseries guests will disable/re-enable a PCI device multiple times via IO/MEM/MASTER bits of PCI_COMMAND register after the initial probe/feature negotiation, as it tends to work with a single device at a time at various stages like probing and running block/network bootloaders without doing a full reset in-between. In QEMU, when PCI_COMMAND_MASTER is disabled we disable the corresponding IOMMU memory region, so DMA accesses (including to vring fields like idx/flags) will no longer undergo the necessary translation. Normally we wouldn't expect this to happen since it would be misbehavior on the driver side to continue driving DMA requests. However, in the case of pseries, with iommu_platform=on, we trigger the following sequence when tearing down the virtio-blk dataplane ioeventfd in response to the guest unsetting PCI_COMMAND_MASTER: #2 0x0000555555922651 in virtqueue_map_desc (vdev=vdev@entry=0x555556dbcfb0, p_num_sg=p_num_sg@entry=0x7fffe657e1a8, addr=addr@entry=0x7fffe657e240, iov=iov@entry=0x7fffe6580240, max_num_sg=max_num_sg@entry=1024, is_write=is_write@entry=false, pa=0, sz=0) at /home/mdroth/w/qemu.git/hw/virtio/virtio.c:757 #3 0x0000555555922a89 in virtqueue_pop (vq=vq@entry=0x555556dc8660, sz=sz@entry=184) at /home/mdroth/w/qemu.git/hw/virtio/virtio.c:950 #4 0x00005555558d3eca in virtio_blk_get_request (vq=0x555556dc8660, s=0x555556dbcfb0) at /home/mdroth/w/qemu.git/hw/block/virtio-blk.c:255 #5 0x00005555558d3eca in virtio_blk_handle_vq (s=0x555556dbcfb0, vq=0x555556dc8660) at /home/mdroth/w/qemu.git/hw/block/virtio-blk.c:776 #6 0x000055555591dd66 in virtio_queue_notify_aio_vq (vq=vq@entry=0x555556dc8660) at /home/mdroth/w/qemu.git/hw/virtio/virtio.c:1550 #7 0x000055555591ecef in virtio_queue_notify_aio_vq (vq=0x555556dc8660) at /home/mdroth/w/qemu.git/hw/virtio/virtio.c:1546 #8 0x000055555591ecef in virtio_queue_host_notifier_aio_poll (opaque=0x555556dc86c8) at /home/mdroth/w/qemu.git/hw/virtio/virtio.c:2527 #9 0x0000555555d02164 in run_poll_handlers_once (ctx=ctx@entry=0x55555688bfc0, timeout=timeout@entry=0x7fffe65844a8) at /home/mdroth/w/qemu.git/util/aio-posix.c:520 #10 0x0000555555d02d1b in try_poll_mode (timeout=0x7fffe65844a8, ctx=0x55555688bfc0) at /home/mdroth/w/qemu.git/util/aio-posix.c:607 #11 0x0000555555d02d1b in aio_poll (ctx=ctx@entry=0x55555688bfc0, blocking=blocking@entry=true) at /home/mdroth/w/qemu.git/util/aio-posix.c:639 #12 0x0000555555d0004d in aio_wait_bh_oneshot (ctx=0x55555688bfc0, cb=cb@entry=0x5555558d5130 <virtio_blk_data_plane_stop_bh>, opaque=opaque@entry=0x555556de86f0) at /home/mdroth/w/qemu.git/util/aio-wait.c:71 #13 0x00005555558d59bf in virtio_blk_data_plane_stop (vdev=<optimized out>) at /home/mdroth/w/qemu.git/hw/block/dataplane/virtio-blk.c:288 #14 0x0000555555b906a1 in virtio_bus_stop_ioeventfd (bus=bus@entry=0x555556dbcf38) at /home/mdroth/w/qemu.git/hw/virtio/virtio-bus.c:245 #15 0x0000555555b90dbb in virtio_bus_stop_ioeventfd (bus=bus@entry=0x555556dbcf38) at /home/mdroth/w/qemu.git/hw/virtio/virtio-bus.c:237 #16 0x0000555555b92a8e in virtio_pci_stop_ioeventfd (proxy=0x555556db4e40) at /home/mdroth/w/qemu.git/hw/virtio/virtio-pci.c:292 #17 0x0000555555b92a8e in virtio_write_config (pci_dev=0x555556db4e40, address=<optimized out>, val=1048832, len=<optimized out>) at /home/mdroth/w/qemu.git/hw/virtio/virtio-pci.c:613 I.e. the calling code is only scheduling a one-shot BH for virtio_blk_data_plane_stop_bh, but somehow we end up trying to process an additional virtqueue entry before we get there. This is likely due to the following check in virtio_queue_host_notifier_aio_poll: static bool virtio_queue_host_notifier_aio_poll(void *opaque) { EventNotifier *n = opaque; VirtQueue *vq = container_of(n, VirtQueue, host_notifier); bool progress; if (!vq->vring.desc || virtio_queue_empty(vq)) { return false; } progress = virtio_queue_notify_aio_vq(vq); namely the call to virtio_queue_empty(). In this case, since no new requests have actually been issued, shadow_avail_idx == last_avail_idx, so we actually try to access the vring via vring_avail_idx() to get the latest non-shadowed idx: int virtio_queue_empty(VirtQueue *vq) { bool empty; ... if (vq->shadow_avail_idx != vq->last_avail_idx) { return 0; } rcu_read_lock(); empty = vring_avail_idx(vq) == vq->last_avail_idx; rcu_read_unlock(); return empty; but since the IOMMU region has been disabled we get a bogus value (0 usually), which causes virtio_queue_empty() to falsely report that there are entries to be processed, which causes errors such as: "virtio: zero sized buffers are not allowed" or "virtio-blk missing headers" and puts the device in an error state. This patch works around the issue by introducing virtio_set_disabled(), which sets a 'disabled' flag to bypass checks like virtio_queue_empty() when bus-mastering is disabled. Since we'd check this flag at all the same sites as vdev->broken, we replace those checks with an inline function which checks for either vdev->broken or vdev->disabled. The 'disabled' flag is only migrated when set, which should be fairly rare, but to maintain migration compatibility we disable it's use for older machine types. Users requiring the use of the flag in conjunction with older machine types can set it explicitly as a virtio-device option. NOTES: - This leaves some other oddities in play, like the fact that DRIVER_OK also gets unset in response to bus-mastering being disabled, but not restored (however the device seems to continue working) - Similarly, we disable the host notifier via virtio_bus_stop_ioeventfd(), which seems to move the handling out of virtio-blk dataplane and back into the main IO thread, and it ends up staying there till a reset (but otherwise continues working normally) Cc: David Gibson <david@gibson.dropbear.id.au>, Cc: Alexey Kardashevskiy <aik@ozlabs.ru> Cc: "Michael S. Tsirkin" <mst@redhat.com> Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com> Message-Id: <20191120005003.27035-1-mdroth@linux.vnet.ibm.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2019-11-20 03:50:03 +03:00
if (virtio_device_disabled(vq->vdev)) {
vq->inuse -= count;
return;
}
if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
virtqueue_packed_flush(vq, count);
} else {
virtqueue_split_flush(vq, count);
}
}
void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem,
unsigned int len)
{
RCU_READ_LOCK_GUARD();
virtqueue_fill(vq, elem, len, 0);
virtqueue_flush(vq, 1);
}
/* Called within rcu_read_lock(). */
static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx)
{
uint16_t avail_idx, num_heads;
/* Use shadow index whenever possible. */
avail_idx = (vq->shadow_avail_idx != idx) ? vq->shadow_avail_idx
: vring_avail_idx(vq);
num_heads = avail_idx - idx;
/* Check it isn't doing very strange things with descriptor numbers. */
if (num_heads > vq->vring.num) {
virtio_error(vq->vdev, "Guest moved used index from %u to %u",
idx, vq->shadow_avail_idx);
return -EINVAL;
}
/*
* On success, callers read a descriptor at vq->last_avail_idx.
* Make sure descriptor read does not bypass avail index read.
*
* This is necessary even if we are using a shadow index, since
* the shadow index could have been initialized by calling
* vring_avail_idx() outside of this function, i.e., by a guest
* memory read not accompanied by a barrier.
*/
if (num_heads) {
smp_rmb();
}
return num_heads;
}
/* Called within rcu_read_lock(). */
static bool virtqueue_get_head(VirtQueue *vq, unsigned int idx,
unsigned int *head)
{
/* Grab the next descriptor number they're advertising, and increment
* the index we've seen. */
*head = vring_avail_ring(vq, idx % vq->vring.num);
/* If their number is silly, that's a fatal mistake. */
if (*head >= vq->vring.num) {
virtio_error(vq->vdev, "Guest says index %u is available", *head);
return false;
}
return true;
}
enum {
VIRTQUEUE_READ_DESC_ERROR = -1,
VIRTQUEUE_READ_DESC_DONE = 0, /* end of chain */
VIRTQUEUE_READ_DESC_MORE = 1, /* more buffers in chain */
};
/* Reads the 'desc->next' descriptor into '*desc'. */
static int virtqueue_split_read_next_desc(VirtIODevice *vdev, VRingDesc *desc,
MemoryRegionCache *desc_cache,
unsigned int max)
{
/* If this descriptor says it doesn't chain, we're done. */
if (!(desc->flags & VRING_DESC_F_NEXT)) {
return VIRTQUEUE_READ_DESC_DONE;
}
/* Check they're not leading us off end of descriptors. */
if (desc->next >= max) {
virtio_error(vdev, "Desc next is %u", desc->next);
return VIRTQUEUE_READ_DESC_ERROR;
}
vring_split_desc_read(vdev, desc, desc_cache, desc->next);
return VIRTQUEUE_READ_DESC_MORE;
}
/* Called within rcu_read_lock(). */
static void virtqueue_split_get_avail_bytes(VirtQueue *vq,
unsigned int *in_bytes, unsigned int *out_bytes,
unsigned max_in_bytes, unsigned max_out_bytes,
VRingMemoryRegionCaches *caches)
{
VirtIODevice *vdev = vq->vdev;
virtio: fix reachable assertion due to stale value of cached region size In virtqueue_{split,packed}_get_avail_bytes() descriptors are read in a loop via MemoryRegionCache regions and calls to vring_{split,packed}_desc_read() - these take a region cache and the index of the descriptor to be read. For direct descriptors we use a cache provided by the caller, whose size matches that of the virtqueue vring. We limit the number of descriptors we can read by the size of that vring: max = vq->vring.num; ... MemoryRegionCache *desc_cache = &caches->desc; For indirect descriptors, we initialize a new cache and limit the number of descriptors by the size of the intermediate descriptor: len = address_space_cache_init(&indirect_desc_cache, vdev->dma_as, desc.addr, desc.len, false); desc_cache = &indirect_desc_cache; ... max = desc.len / sizeof(VRingDesc); However, the first initialization of `max` is done outside the loop where we process guest descriptors, while the second one is done inside. This means that a sequence of an indirect descriptor followed by a direct one will leave a stale value in `max`. If the second descriptor's `next` field is smaller than the stale value, but greater than the size of the virtqueue ring (and thus the cached region), a failed assertion will be triggered in address_space_read_cached() down the call chain. Fix this by initializing `max` inside the loop in both functions. Fixes: 9796d0ac8fb0 ("virtio: use address_space_map/unmap to access descriptors") Signed-off-by: Carlos López <clopez@suse.de> Message-Id: <20230302100358.3613-1-clopez@suse.de> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2023-03-02 13:03:59 +03:00
unsigned int idx;
unsigned int total_bufs, in_total, out_total;
MemoryRegionCache indirect_desc_cache;
int64_t len = 0;
int rc;
address_space_cache_init_empty(&indirect_desc_cache);
idx = vq->last_avail_idx;
total_bufs = in_total = out_total = 0;
while ((rc = virtqueue_num_heads(vq, idx)) > 0) {
MemoryRegionCache *desc_cache = &caches->desc;
unsigned int num_bufs;
VRingDesc desc;
unsigned int i;
virtio: fix reachable assertion due to stale value of cached region size In virtqueue_{split,packed}_get_avail_bytes() descriptors are read in a loop via MemoryRegionCache regions and calls to vring_{split,packed}_desc_read() - these take a region cache and the index of the descriptor to be read. For direct descriptors we use a cache provided by the caller, whose size matches that of the virtqueue vring. We limit the number of descriptors we can read by the size of that vring: max = vq->vring.num; ... MemoryRegionCache *desc_cache = &caches->desc; For indirect descriptors, we initialize a new cache and limit the number of descriptors by the size of the intermediate descriptor: len = address_space_cache_init(&indirect_desc_cache, vdev->dma_as, desc.addr, desc.len, false); desc_cache = &indirect_desc_cache; ... max = desc.len / sizeof(VRingDesc); However, the first initialization of `max` is done outside the loop where we process guest descriptors, while the second one is done inside. This means that a sequence of an indirect descriptor followed by a direct one will leave a stale value in `max`. If the second descriptor's `next` field is smaller than the stale value, but greater than the size of the virtqueue ring (and thus the cached region), a failed assertion will be triggered in address_space_read_cached() down the call chain. Fix this by initializing `max` inside the loop in both functions. Fixes: 9796d0ac8fb0 ("virtio: use address_space_map/unmap to access descriptors") Signed-off-by: Carlos López <clopez@suse.de> Message-Id: <20230302100358.3613-1-clopez@suse.de> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2023-03-02 13:03:59 +03:00
unsigned int max = vq->vring.num;
num_bufs = total_bufs;
if (!virtqueue_get_head(vq, idx++, &i)) {
goto err;
}
vring_split_desc_read(vdev, &desc, desc_cache, i);
if (desc.flags & VRING_DESC_F_INDIRECT) {
if (!desc.len || (desc.len % sizeof(VRingDesc))) {
virtio_error(vdev, "Invalid size for indirect buffer table");
goto err;
}
/* If we've got too many, that implies a descriptor loop. */
if (num_bufs >= max) {
virtio_error(vdev, "Looped descriptor");
goto err;
}
/* loop over the indirect descriptor table */
len = address_space_cache_init(&indirect_desc_cache,
vdev->dma_as,
desc.addr, desc.len, false);
desc_cache = &indirect_desc_cache;
if (len < desc.len) {
virtio_error(vdev, "Cannot map indirect buffer");
goto err;
}
max = desc.len / sizeof(VRingDesc);
num_bufs = i = 0;
vring_split_desc_read(vdev, &desc, desc_cache, i);
}
do {
/* If we've got too many, that implies a descriptor loop. */
if (++num_bufs > max) {
virtio_error(vdev, "Looped descriptor");
goto err;
}
if (desc.flags & VRING_DESC_F_WRITE) {
in_total += desc.len;
} else {
out_total += desc.len;
}
if (in_total >= max_in_bytes && out_total >= max_out_bytes) {
goto done;
}
rc = virtqueue_split_read_next_desc(vdev, &desc, desc_cache, max);
} while (rc == VIRTQUEUE_READ_DESC_MORE);
if (rc == VIRTQUEUE_READ_DESC_ERROR) {
goto err;
}
if (desc_cache == &indirect_desc_cache) {
address_space_cache_destroy(&indirect_desc_cache);
total_bufs++;
} else {
total_bufs = num_bufs;
}
}
if (rc < 0) {
goto err;
}
done:
address_space_cache_destroy(&indirect_desc_cache);
if (in_bytes) {
*in_bytes = in_total;
}
if (out_bytes) {
*out_bytes = out_total;
}
return;
err:
in_total = out_total = 0;
goto done;
}
static int virtqueue_packed_read_next_desc(VirtQueue *vq,
VRingPackedDesc *desc,
MemoryRegionCache
*desc_cache,
unsigned int max,
unsigned int *next,
bool indirect)
{
/* If this descriptor says it doesn't chain, we're done. */
if (!indirect && !(desc->flags & VRING_DESC_F_NEXT)) {
return VIRTQUEUE_READ_DESC_DONE;
}
++*next;
if (*next == max) {
if (indirect) {
return VIRTQUEUE_READ_DESC_DONE;
} else {
(*next) -= vq->vring.num;
}
}
vring_packed_desc_read(vq->vdev, desc, desc_cache, *next, false);
return VIRTQUEUE_READ_DESC_MORE;
}
/* Called within rcu_read_lock(). */
static void virtqueue_packed_get_avail_bytes(VirtQueue *vq,
unsigned int *in_bytes,
unsigned int *out_bytes,
unsigned max_in_bytes,
unsigned max_out_bytes,
VRingMemoryRegionCaches *caches)
{
VirtIODevice *vdev = vq->vdev;
virtio: fix reachable assertion due to stale value of cached region size In virtqueue_{split,packed}_get_avail_bytes() descriptors are read in a loop via MemoryRegionCache regions and calls to vring_{split,packed}_desc_read() - these take a region cache and the index of the descriptor to be read. For direct descriptors we use a cache provided by the caller, whose size matches that of the virtqueue vring. We limit the number of descriptors we can read by the size of that vring: max = vq->vring.num; ... MemoryRegionCache *desc_cache = &caches->desc; For indirect descriptors, we initialize a new cache and limit the number of descriptors by the size of the intermediate descriptor: len = address_space_cache_init(&indirect_desc_cache, vdev->dma_as, desc.addr, desc.len, false); desc_cache = &indirect_desc_cache; ... max = desc.len / sizeof(VRingDesc); However, the first initialization of `max` is done outside the loop where we process guest descriptors, while the second one is done inside. This means that a sequence of an indirect descriptor followed by a direct one will leave a stale value in `max`. If the second descriptor's `next` field is smaller than the stale value, but greater than the size of the virtqueue ring (and thus the cached region), a failed assertion will be triggered in address_space_read_cached() down the call chain. Fix this by initializing `max` inside the loop in both functions. Fixes: 9796d0ac8fb0 ("virtio: use address_space_map/unmap to access descriptors") Signed-off-by: Carlos López <clopez@suse.de> Message-Id: <20230302100358.3613-1-clopez@suse.de> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2023-03-02 13:03:59 +03:00
unsigned int idx;
unsigned int total_bufs, in_total, out_total;
MemoryRegionCache indirect_desc_cache;
MemoryRegionCache *desc_cache;
int64_t len = 0;
VRingPackedDesc desc;
bool wrap_counter;
address_space_cache_init_empty(&indirect_desc_cache);
idx = vq->last_avail_idx;
wrap_counter = vq->last_avail_wrap_counter;
total_bufs = in_total = out_total = 0;
for (;;) {
unsigned int num_bufs = total_bufs;
unsigned int i = idx;
int rc;
virtio: fix reachable assertion due to stale value of cached region size In virtqueue_{split,packed}_get_avail_bytes() descriptors are read in a loop via MemoryRegionCache regions and calls to vring_{split,packed}_desc_read() - these take a region cache and the index of the descriptor to be read. For direct descriptors we use a cache provided by the caller, whose size matches that of the virtqueue vring. We limit the number of descriptors we can read by the size of that vring: max = vq->vring.num; ... MemoryRegionCache *desc_cache = &caches->desc; For indirect descriptors, we initialize a new cache and limit the number of descriptors by the size of the intermediate descriptor: len = address_space_cache_init(&indirect_desc_cache, vdev->dma_as, desc.addr, desc.len, false); desc_cache = &indirect_desc_cache; ... max = desc.len / sizeof(VRingDesc); However, the first initialization of `max` is done outside the loop where we process guest descriptors, while the second one is done inside. This means that a sequence of an indirect descriptor followed by a direct one will leave a stale value in `max`. If the second descriptor's `next` field is smaller than the stale value, but greater than the size of the virtqueue ring (and thus the cached region), a failed assertion will be triggered in address_space_read_cached() down the call chain. Fix this by initializing `max` inside the loop in both functions. Fixes: 9796d0ac8fb0 ("virtio: use address_space_map/unmap to access descriptors") Signed-off-by: Carlos López <clopez@suse.de> Message-Id: <20230302100358.3613-1-clopez@suse.de> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2023-03-02 13:03:59 +03:00
unsigned int max = vq->vring.num;
desc_cache = &caches->desc;
virtio: fix reachable assertion due to stale value of cached region size In virtqueue_{split,packed}_get_avail_bytes() descriptors are read in a loop via MemoryRegionCache regions and calls to vring_{split,packed}_desc_read() - these take a region cache and the index of the descriptor to be read. For direct descriptors we use a cache provided by the caller, whose size matches that of the virtqueue vring. We limit the number of descriptors we can read by the size of that vring: max = vq->vring.num; ... MemoryRegionCache *desc_cache = &caches->desc; For indirect descriptors, we initialize a new cache and limit the number of descriptors by the size of the intermediate descriptor: len = address_space_cache_init(&indirect_desc_cache, vdev->dma_as, desc.addr, desc.len, false); desc_cache = &indirect_desc_cache; ... max = desc.len / sizeof(VRingDesc); However, the first initialization of `max` is done outside the loop where we process guest descriptors, while the second one is done inside. This means that a sequence of an indirect descriptor followed by a direct one will leave a stale value in `max`. If the second descriptor's `next` field is smaller than the stale value, but greater than the size of the virtqueue ring (and thus the cached region), a failed assertion will be triggered in address_space_read_cached() down the call chain. Fix this by initializing `max` inside the loop in both functions. Fixes: 9796d0ac8fb0 ("virtio: use address_space_map/unmap to access descriptors") Signed-off-by: Carlos López <clopez@suse.de> Message-Id: <20230302100358.3613-1-clopez@suse.de> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2023-03-02 13:03:59 +03:00
vring_packed_desc_read(vdev, &desc, desc_cache, idx, true);
if (!is_desc_avail(desc.flags, wrap_counter)) {
break;
}
if (desc.flags & VRING_DESC_F_INDIRECT) {
if (desc.len % sizeof(VRingPackedDesc)) {
virtio_error(vdev, "Invalid size for indirect buffer table");
goto err;
}
/* If we've got too many, that implies a descriptor loop. */
if (num_bufs >= max) {
virtio_error(vdev, "Looped descriptor");
goto err;
}
/* loop over the indirect descriptor table */
len = address_space_cache_init(&indirect_desc_cache,
vdev->dma_as,
desc.addr, desc.len, false);
desc_cache = &indirect_desc_cache;
if (len < desc.len) {
virtio_error(vdev, "Cannot map indirect buffer");
goto err;
}
max = desc.len / sizeof(VRingPackedDesc);
num_bufs = i = 0;
vring_packed_desc_read(vdev, &desc, desc_cache, i, false);
}
do {
/* If we've got too many, that implies a descriptor loop. */
if (++num_bufs > max) {
virtio_error(vdev, "Looped descriptor");
goto err;
}
if (desc.flags & VRING_DESC_F_WRITE) {
in_total += desc.len;
} else {
out_total += desc.len;
}
if (in_total >= max_in_bytes && out_total >= max_out_bytes) {
goto done;
}
rc = virtqueue_packed_read_next_desc(vq, &desc, desc_cache, max,
&i, desc_cache ==
&indirect_desc_cache);
} while (rc == VIRTQUEUE_READ_DESC_MORE);
if (desc_cache == &indirect_desc_cache) {
address_space_cache_destroy(&indirect_desc_cache);
total_bufs++;
idx++;
} else {
idx += num_bufs - total_bufs;
total_bufs = num_bufs;
}
if (idx >= vq->vring.num) {
idx -= vq->vring.num;
wrap_counter ^= 1;
}
}
/* Record the index and wrap counter for a kick we want */
vq->shadow_avail_idx = idx;
vq->shadow_avail_wrap_counter = wrap_counter;
done:
address_space_cache_destroy(&indirect_desc_cache);
if (in_bytes) {
*in_bytes = in_total;
}
if (out_bytes) {
*out_bytes = out_total;
}
return;
err:
in_total = out_total = 0;
goto done;
}
void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
unsigned int *out_bytes,
unsigned max_in_bytes, unsigned max_out_bytes)
{
uint16_t desc_size;
VRingMemoryRegionCaches *caches;
RCU_READ_LOCK_GUARD();
if (unlikely(!vq->vring.desc)) {
goto err;
}
caches = vring_get_region_caches(vq);
virtio: gracefully handle invalid region caches The virtqueue code sets up MemoryRegionCaches to access the virtqueue guest RAM data structures. The code currently assumes that VRingMemoryRegionCaches is initialized before device emulation code accesses the virtqueue. An assertion will fail in vring_get_region_caches() when this is not true. Device fuzzing found a case where this assumption is false (see below). Virtqueue guest RAM addresses can also be changed from a vCPU thread while an IOThread is accessing the virtqueue. This breaks the same assumption but this time the caches could become invalid partway through the virtqueue code. The code fetches the caches RCU pointer multiple times so we will need to validate the pointer every time it is fetched. Add checks each time we call vring_get_region_caches() and treat invalid caches as a nop: memory stores are ignored and memory reads return 0. The fuzz test failure is as follows: $ qemu -M pc -device virtio-blk-pci,id=drv0,drive=drive0,addr=4.0 \ -drive if=none,id=drive0,file=null-co://,format=raw,auto-read-only=off \ -drive if=none,id=drive1,file=null-co://,file.read-zeroes=on,format=raw \ -display none \ -qtest stdio endianness outl 0xcf8 0x80002020 outl 0xcfc 0xe0000000 outl 0xcf8 0x80002004 outw 0xcfc 0x7 write 0xe0000000 0x24 0x00ffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffab5cffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffab0000000001 inb 0x4 writew 0xe000001c 0x1 write 0xe0000014 0x1 0x0d The following error message is produced: qemu-system-x86_64: /home/stefanha/qemu/hw/virtio/virtio.c:286: vring_get_region_caches: Assertion `caches != NULL' failed. The backtrace looks like this: #0 0x00007ffff5520625 in raise () at /lib64/libc.so.6 #1 0x00007ffff55098d9 in abort () at /lib64/libc.so.6 #2 0x00007ffff55097a9 in _nl_load_domain.cold () at /lib64/libc.so.6 #3 0x00007ffff5518a66 in annobin_assert.c_end () at /lib64/libc.so.6 #4 0x00005555559073da in vring_get_region_caches (vq=<optimized out>) at qemu/hw/virtio/virtio.c:286 #5 vring_get_region_caches (vq=<optimized out>) at qemu/hw/virtio/virtio.c:283 #6 0x000055555590818d in vring_used_flags_set_bit (mask=1, vq=0x5555575ceea0) at qemu/hw/virtio/virtio.c:398 #7 virtio_queue_split_set_notification (enable=0, vq=0x5555575ceea0) at qemu/hw/virtio/virtio.c:398 #8 virtio_queue_set_notification (vq=vq@entry=0x5555575ceea0, enable=enable@entry=0) at qemu/hw/virtio/virtio.c:451 #9 0x0000555555908512 in virtio_queue_set_notification (vq=vq@entry=0x5555575ceea0, enable=enable@entry=0) at qemu/hw/virtio/virtio.c:444 #10 0x00005555558c697a in virtio_blk_handle_vq (s=0x5555575c57e0, vq=0x5555575ceea0) at qemu/hw/block/virtio-blk.c:775 #11 0x0000555555907836 in virtio_queue_notify_aio_vq (vq=0x5555575ceea0) at qemu/hw/virtio/virtio.c:2244 #12 0x0000555555cb5dd7 in aio_dispatch_handlers (ctx=ctx@entry=0x55555671a420) at util/aio-posix.c:429 #13 0x0000555555cb67a8 in aio_dispatch (ctx=0x55555671a420) at util/aio-posix.c:460 #14 0x0000555555cb307e in aio_ctx_dispatch (source=<optimized out>, callback=<optimized out>, user_data=<optimized out>) at util/async.c:260 #15 0x00007ffff7bbc510 in g_main_context_dispatch () at /lib64/libglib-2.0.so.0 #16 0x0000555555cb5848 in glib_pollfds_poll () at util/main-loop.c:219 #17 os_host_main_loop_wait (timeout=<optimized out>) at util/main-loop.c:242 #18 main_loop_wait (nonblocking=<optimized out>) at util/main-loop.c:518 #19 0x00005555559b20c9 in main_loop () at vl.c:1683 #20 0x0000555555838115 in main (argc=<optimized out>, argv=<optimized out>, envp=<optimized out>) at vl.c:4441 Reported-by: Alexander Bulekov <alxndr@bu.edu> Cc: Michael Tsirkin <mst@redhat.com> Cc: Cornelia Huck <cohuck@redhat.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: qemu-stable@nongnu.org Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Message-Id: <20200207104619.164892-1-stefanha@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2020-02-07 13:46:19 +03:00
if (!caches) {
goto err;
}
desc_size = virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED) ?
sizeof(VRingPackedDesc) : sizeof(VRingDesc);
if (caches->desc.len < vq->vring.num * desc_size) {
virtio_error(vq->vdev, "Cannot map descriptor ring");
goto err;
}
if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
virtqueue_packed_get_avail_bytes(vq, in_bytes, out_bytes,
max_in_bytes, max_out_bytes,
caches);
} else {
virtqueue_split_get_avail_bytes(vq, in_bytes, out_bytes,
max_in_bytes, max_out_bytes,
caches);
}
return;
err:
if (in_bytes) {
*in_bytes = 0;
}
if (out_bytes) {
*out_bytes = 0;
}
}
int virtqueue_avail_bytes(VirtQueue *vq, unsigned int in_bytes,
unsigned int out_bytes)
{
unsigned int in_total, out_total;
virtqueue_get_avail_bytes(vq, &in_total, &out_total, in_bytes, out_bytes);
return in_bytes <= in_total && out_bytes <= out_total;
}
static bool virtqueue_map_desc(VirtIODevice *vdev, unsigned int *p_num_sg,
hwaddr *addr, struct iovec *iov,
unsigned int max_num_sg, bool is_write,
hwaddr pa, size_t sz)
{
bool ok = false;
unsigned num_sg = *p_num_sg;
assert(num_sg <= max_num_sg);
if (!sz) {
virtio_error(vdev, "virtio: zero sized buffers are not allowed");
goto out;
}
while (sz) {
hwaddr len = sz;
if (num_sg == max_num_sg) {
virtio_error(vdev, "virtio: too many write descriptors in "
"indirect table");
goto out;
}
iov[num_sg].iov_base = dma_memory_map(vdev->dma_as, pa, &len,
is_write ?
DMA_DIRECTION_FROM_DEVICE :
DMA_DIRECTION_TO_DEVICE,
MEMTXATTRS_UNSPECIFIED);
if (!iov[num_sg].iov_base) {
virtio_error(vdev, "virtio: bogus descriptor or out of resources");
goto out;
}
iov[num_sg].iov_len = len;
addr[num_sg] = pa;
sz -= len;
pa += len;
num_sg++;
}
ok = true;
out:
*p_num_sg = num_sg;
return ok;
}
/* Only used by error code paths before we have a VirtQueueElement (therefore
* virtqueue_unmap_sg() can't be used). Assumes buffers weren't written to
* yet.
*/
static void virtqueue_undo_map_desc(unsigned int out_num, unsigned int in_num,
struct iovec *iov)
{
unsigned int i;
for (i = 0; i < out_num + in_num; i++) {
int is_write = i >= out_num;
cpu_physical_memory_unmap(iov->iov_base, iov->iov_len, is_write, 0);
iov++;
}
}
static void virtqueue_map_iovec(VirtIODevice *vdev, struct iovec *sg,
hwaddr *addr, unsigned int num_sg,
bool is_write)
{
unsigned int i;
hwaddr len;
for (i = 0; i < num_sg; i++) {
len = sg[i].iov_len;
sg[i].iov_base = dma_memory_map(vdev->dma_as,
addr[i], &len, is_write ?
DMA_DIRECTION_FROM_DEVICE :
DMA_DIRECTION_TO_DEVICE,
MEMTXATTRS_UNSPECIFIED);
if (!sg[i].iov_base) {
error_report("virtio: error trying to map MMIO memory");
exit(1);
}
if (len != sg[i].iov_len) {
error_report("virtio: unexpected memory split");
exit(1);
}
}
}
void virtqueue_map(VirtIODevice *vdev, VirtQueueElement *elem)
{
virtqueue_map_iovec(vdev, elem->in_sg, elem->in_addr, elem->in_num, true);
virtqueue_map_iovec(vdev, elem->out_sg, elem->out_addr, elem->out_num,
false);
}
static void *virtqueue_alloc_element(size_t sz, unsigned out_num, unsigned in_num)
{
VirtQueueElement *elem;
size_t in_addr_ofs = QEMU_ALIGN_UP(sz, __alignof__(elem->in_addr[0]));
size_t out_addr_ofs = in_addr_ofs + in_num * sizeof(elem->in_addr[0]);
size_t out_addr_end = out_addr_ofs + out_num * sizeof(elem->out_addr[0]);
size_t in_sg_ofs = QEMU_ALIGN_UP(out_addr_end, __alignof__(elem->in_sg[0]));
size_t out_sg_ofs = in_sg_ofs + in_num * sizeof(elem->in_sg[0]);
size_t out_sg_end = out_sg_ofs + out_num * sizeof(elem->out_sg[0]);
assert(sz >= sizeof(VirtQueueElement));
elem = g_malloc(out_sg_end);
trace_virtqueue_alloc_element(elem, sz, in_num, out_num);
elem->out_num = out_num;
elem->in_num = in_num;
elem->in_addr = (void *)elem + in_addr_ofs;
elem->out_addr = (void *)elem + out_addr_ofs;
elem->in_sg = (void *)elem + in_sg_ofs;
elem->out_sg = (void *)elem + out_sg_ofs;
return elem;
}
static void *virtqueue_split_pop(VirtQueue *vq, size_t sz)
{
unsigned int i, head, max;
VRingMemoryRegionCaches *caches;
MemoryRegionCache indirect_desc_cache;
MemoryRegionCache *desc_cache;
int64_t len;
VirtIODevice *vdev = vq->vdev;
VirtQueueElement *elem = NULL;
unsigned out_num, in_num, elem_entries;
hwaddr addr[VIRTQUEUE_MAX_SIZE];
struct iovec iov[VIRTQUEUE_MAX_SIZE];
VRingDesc desc;
int rc;
address_space_cache_init_empty(&indirect_desc_cache);
RCU_READ_LOCK_GUARD();
if (virtio_queue_empty_rcu(vq)) {
goto done;
}
/* Needed after virtio_queue_empty(), see comment in
* virtqueue_num_heads(). */
smp_rmb();
/* When we start there are none of either input nor output. */
out_num = in_num = elem_entries = 0;
max = vq->vring.num;
if (vq->inuse >= vq->vring.num) {
virtio_error(vdev, "Virtqueue size exceeded");
goto done;
}
if (!virtqueue_get_head(vq, vq->last_avail_idx++, &head)) {
goto done;
}
if (virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
vring_set_avail_event(vq, vq->last_avail_idx);
}
i = head;
caches = vring_get_region_caches(vq);
virtio: gracefully handle invalid region caches The virtqueue code sets up MemoryRegionCaches to access the virtqueue guest RAM data structures. The code currently assumes that VRingMemoryRegionCaches is initialized before device emulation code accesses the virtqueue. An assertion will fail in vring_get_region_caches() when this is not true. Device fuzzing found a case where this assumption is false (see below). Virtqueue guest RAM addresses can also be changed from a vCPU thread while an IOThread is accessing the virtqueue. This breaks the same assumption but this time the caches could become invalid partway through the virtqueue code. The code fetches the caches RCU pointer multiple times so we will need to validate the pointer every time it is fetched. Add checks each time we call vring_get_region_caches() and treat invalid caches as a nop: memory stores are ignored and memory reads return 0. The fuzz test failure is as follows: $ qemu -M pc -device virtio-blk-pci,id=drv0,drive=drive0,addr=4.0 \ -drive if=none,id=drive0,file=null-co://,format=raw,auto-read-only=off \ -drive if=none,id=drive1,file=null-co://,file.read-zeroes=on,format=raw \ -display none \ -qtest stdio endianness outl 0xcf8 0x80002020 outl 0xcfc 0xe0000000 outl 0xcf8 0x80002004 outw 0xcfc 0x7 write 0xe0000000 0x24 0x00ffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffab5cffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffab0000000001 inb 0x4 writew 0xe000001c 0x1 write 0xe0000014 0x1 0x0d The following error message is produced: qemu-system-x86_64: /home/stefanha/qemu/hw/virtio/virtio.c:286: vring_get_region_caches: Assertion `caches != NULL' failed. The backtrace looks like this: #0 0x00007ffff5520625 in raise () at /lib64/libc.so.6 #1 0x00007ffff55098d9 in abort () at /lib64/libc.so.6 #2 0x00007ffff55097a9 in _nl_load_domain.cold () at /lib64/libc.so.6 #3 0x00007ffff5518a66 in annobin_assert.c_end () at /lib64/libc.so.6 #4 0x00005555559073da in vring_get_region_caches (vq=<optimized out>) at qemu/hw/virtio/virtio.c:286 #5 vring_get_region_caches (vq=<optimized out>) at qemu/hw/virtio/virtio.c:283 #6 0x000055555590818d in vring_used_flags_set_bit (mask=1, vq=0x5555575ceea0) at qemu/hw/virtio/virtio.c:398 #7 virtio_queue_split_set_notification (enable=0, vq=0x5555575ceea0) at qemu/hw/virtio/virtio.c:398 #8 virtio_queue_set_notification (vq=vq@entry=0x5555575ceea0, enable=enable@entry=0) at qemu/hw/virtio/virtio.c:451 #9 0x0000555555908512 in virtio_queue_set_notification (vq=vq@entry=0x5555575ceea0, enable=enable@entry=0) at qemu/hw/virtio/virtio.c:444 #10 0x00005555558c697a in virtio_blk_handle_vq (s=0x5555575c57e0, vq=0x5555575ceea0) at qemu/hw/block/virtio-blk.c:775 #11 0x0000555555907836 in virtio_queue_notify_aio_vq (vq=0x5555575ceea0) at qemu/hw/virtio/virtio.c:2244 #12 0x0000555555cb5dd7 in aio_dispatch_handlers (ctx=ctx@entry=0x55555671a420) at util/aio-posix.c:429 #13 0x0000555555cb67a8 in aio_dispatch (ctx=0x55555671a420) at util/aio-posix.c:460 #14 0x0000555555cb307e in aio_ctx_dispatch (source=<optimized out>, callback=<optimized out>, user_data=<optimized out>) at util/async.c:260 #15 0x00007ffff7bbc510 in g_main_context_dispatch () at /lib64/libglib-2.0.so.0 #16 0x0000555555cb5848 in glib_pollfds_poll () at util/main-loop.c:219 #17 os_host_main_loop_wait (timeout=<optimized out>) at util/main-loop.c:242 #18 main_loop_wait (nonblocking=<optimized out>) at util/main-loop.c:518 #19 0x00005555559b20c9 in main_loop () at vl.c:1683 #20 0x0000555555838115 in main (argc=<optimized out>, argv=<optimized out>, envp=<optimized out>) at vl.c:4441 Reported-by: Alexander Bulekov <alxndr@bu.edu> Cc: Michael Tsirkin <mst@redhat.com> Cc: Cornelia Huck <cohuck@redhat.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: qemu-stable@nongnu.org Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Message-Id: <20200207104619.164892-1-stefanha@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2020-02-07 13:46:19 +03:00
if (!caches) {
virtio_error(vdev, "Region caches not initialized");
goto done;
}
if (caches->desc.len < max * sizeof(VRingDesc)) {
virtio_error(vdev, "Cannot map descriptor ring");
goto done;
}
desc_cache = &caches->desc;
vring_split_desc_read(vdev, &desc, desc_cache, i);
if (desc.flags & VRING_DESC_F_INDIRECT) {
if (!desc.len || (desc.len % sizeof(VRingDesc))) {
virtio_error(vdev, "Invalid size for indirect buffer table");
goto done;
}
/* loop over the indirect descriptor table */
len = address_space_cache_init(&indirect_desc_cache, vdev->dma_as,
desc.addr, desc.len, false);
desc_cache = &indirect_desc_cache;
if (len < desc.len) {
virtio_error(vdev, "Cannot map indirect buffer");
goto done;
}
max = desc.len / sizeof(VRingDesc);
i = 0;
vring_split_desc_read(vdev, &desc, desc_cache, i);
}
/* Collect all the descriptors */
do {
bool map_ok;
if (desc.flags & VRING_DESC_F_WRITE) {
map_ok = virtqueue_map_desc(vdev, &in_num, addr + out_num,
iov + out_num,
VIRTQUEUE_MAX_SIZE - out_num, true,
desc.addr, desc.len);
} else {
if (in_num) {
virtio_error(vdev, "Incorrect order for descriptors");
goto err_undo_map;
}
map_ok = virtqueue_map_desc(vdev, &out_num, addr, iov,
VIRTQUEUE_MAX_SIZE, false,
desc.addr, desc.len);
}
if (!map_ok) {
goto err_undo_map;
}
/* If we've got too many, that implies a descriptor loop. */
if (++elem_entries > max) {
virtio_error(vdev, "Looped descriptor");
goto err_undo_map;
}
rc = virtqueue_split_read_next_desc(vdev, &desc, desc_cache, max);
} while (rc == VIRTQUEUE_READ_DESC_MORE);
if (rc == VIRTQUEUE_READ_DESC_ERROR) {
goto err_undo_map;
}
/* Now copy what we have collected and mapped */
elem = virtqueue_alloc_element(sz, out_num, in_num);
elem->index = head;
elem->ndescs = 1;
for (i = 0; i < out_num; i++) {
elem->out_addr[i] = addr[i];
elem->out_sg[i] = iov[i];
}
for (i = 0; i < in_num; i++) {
elem->in_addr[i] = addr[out_num + i];
elem->in_sg[i] = iov[out_num + i];
}
vq->inuse++;
trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num);
done:
address_space_cache_destroy(&indirect_desc_cache);
return elem;
err_undo_map:
virtqueue_undo_map_desc(out_num, in_num, iov);
goto done;
}
static void *virtqueue_packed_pop(VirtQueue *vq, size_t sz)
{
unsigned int i, max;
VRingMemoryRegionCaches *caches;
MemoryRegionCache indirect_desc_cache;
MemoryRegionCache *desc_cache;
int64_t len;
VirtIODevice *vdev = vq->vdev;
VirtQueueElement *elem = NULL;
unsigned out_num, in_num, elem_entries;
hwaddr addr[VIRTQUEUE_MAX_SIZE];
struct iovec iov[VIRTQUEUE_MAX_SIZE];
VRingPackedDesc desc;
uint16_t id;
int rc;
address_space_cache_init_empty(&indirect_desc_cache);
RCU_READ_LOCK_GUARD();
if (virtio_queue_packed_empty_rcu(vq)) {
goto done;
}
/* When we start there are none of either input nor output. */
out_num = in_num = elem_entries = 0;
max = vq->vring.num;
if (vq->inuse >= vq->vring.num) {
virtio_error(vdev, "Virtqueue size exceeded");
goto done;
}
i = vq->last_avail_idx;
caches = vring_get_region_caches(vq);
virtio: gracefully handle invalid region caches The virtqueue code sets up MemoryRegionCaches to access the virtqueue guest RAM data structures. The code currently assumes that VRingMemoryRegionCaches is initialized before device emulation code accesses the virtqueue. An assertion will fail in vring_get_region_caches() when this is not true. Device fuzzing found a case where this assumption is false (see below). Virtqueue guest RAM addresses can also be changed from a vCPU thread while an IOThread is accessing the virtqueue. This breaks the same assumption but this time the caches could become invalid partway through the virtqueue code. The code fetches the caches RCU pointer multiple times so we will need to validate the pointer every time it is fetched. Add checks each time we call vring_get_region_caches() and treat invalid caches as a nop: memory stores are ignored and memory reads return 0. The fuzz test failure is as follows: $ qemu -M pc -device virtio-blk-pci,id=drv0,drive=drive0,addr=4.0 \ -drive if=none,id=drive0,file=null-co://,format=raw,auto-read-only=off \ -drive if=none,id=drive1,file=null-co://,file.read-zeroes=on,format=raw \ -display none \ -qtest stdio endianness outl 0xcf8 0x80002020 outl 0xcfc 0xe0000000 outl 0xcf8 0x80002004 outw 0xcfc 0x7 write 0xe0000000 0x24 0x00ffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffab5cffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffab0000000001 inb 0x4 writew 0xe000001c 0x1 write 0xe0000014 0x1 0x0d The following error message is produced: qemu-system-x86_64: /home/stefanha/qemu/hw/virtio/virtio.c:286: vring_get_region_caches: Assertion `caches != NULL' failed. The backtrace looks like this: #0 0x00007ffff5520625 in raise () at /lib64/libc.so.6 #1 0x00007ffff55098d9 in abort () at /lib64/libc.so.6 #2 0x00007ffff55097a9 in _nl_load_domain.cold () at /lib64/libc.so.6 #3 0x00007ffff5518a66 in annobin_assert.c_end () at /lib64/libc.so.6 #4 0x00005555559073da in vring_get_region_caches (vq=<optimized out>) at qemu/hw/virtio/virtio.c:286 #5 vring_get_region_caches (vq=<optimized out>) at qemu/hw/virtio/virtio.c:283 #6 0x000055555590818d in vring_used_flags_set_bit (mask=1, vq=0x5555575ceea0) at qemu/hw/virtio/virtio.c:398 #7 virtio_queue_split_set_notification (enable=0, vq=0x5555575ceea0) at qemu/hw/virtio/virtio.c:398 #8 virtio_queue_set_notification (vq=vq@entry=0x5555575ceea0, enable=enable@entry=0) at qemu/hw/virtio/virtio.c:451 #9 0x0000555555908512 in virtio_queue_set_notification (vq=vq@entry=0x5555575ceea0, enable=enable@entry=0) at qemu/hw/virtio/virtio.c:444 #10 0x00005555558c697a in virtio_blk_handle_vq (s=0x5555575c57e0, vq=0x5555575ceea0) at qemu/hw/block/virtio-blk.c:775 #11 0x0000555555907836 in virtio_queue_notify_aio_vq (vq=0x5555575ceea0) at qemu/hw/virtio/virtio.c:2244 #12 0x0000555555cb5dd7 in aio_dispatch_handlers (ctx=ctx@entry=0x55555671a420) at util/aio-posix.c:429 #13 0x0000555555cb67a8 in aio_dispatch (ctx=0x55555671a420) at util/aio-posix.c:460 #14 0x0000555555cb307e in aio_ctx_dispatch (source=<optimized out>, callback=<optimized out>, user_data=<optimized out>) at util/async.c:260 #15 0x00007ffff7bbc510 in g_main_context_dispatch () at /lib64/libglib-2.0.so.0 #16 0x0000555555cb5848 in glib_pollfds_poll () at util/main-loop.c:219 #17 os_host_main_loop_wait (timeout=<optimized out>) at util/main-loop.c:242 #18 main_loop_wait (nonblocking=<optimized out>) at util/main-loop.c:518 #19 0x00005555559b20c9 in main_loop () at vl.c:1683 #20 0x0000555555838115 in main (argc=<optimized out>, argv=<optimized out>, envp=<optimized out>) at vl.c:4441 Reported-by: Alexander Bulekov <alxndr@bu.edu> Cc: Michael Tsirkin <mst@redhat.com> Cc: Cornelia Huck <cohuck@redhat.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: qemu-stable@nongnu.org Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Message-Id: <20200207104619.164892-1-stefanha@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2020-02-07 13:46:19 +03:00
if (!caches) {
virtio_error(vdev, "Region caches not initialized");
goto done;
}
if (caches->desc.len < max * sizeof(VRingDesc)) {
virtio_error(vdev, "Cannot map descriptor ring");
goto done;
}
desc_cache = &caches->desc;
vring_packed_desc_read(vdev, &desc, desc_cache, i, true);
id = desc.id;
if (desc.flags & VRING_DESC_F_INDIRECT) {
if (desc.len % sizeof(VRingPackedDesc)) {
virtio_error(vdev, "Invalid size for indirect buffer table");
goto done;
}
/* loop over the indirect descriptor table */
len = address_space_cache_init(&indirect_desc_cache, vdev->dma_as,
desc.addr, desc.len, false);
desc_cache = &indirect_desc_cache;
if (len < desc.len) {
virtio_error(vdev, "Cannot map indirect buffer");
goto done;
}
max = desc.len / sizeof(VRingPackedDesc);
i = 0;
vring_packed_desc_read(vdev, &desc, desc_cache, i, false);
}
/* Collect all the descriptors */
do {
bool map_ok;
if (desc.flags & VRING_DESC_F_WRITE) {
map_ok = virtqueue_map_desc(vdev, &in_num, addr + out_num,
iov + out_num,
VIRTQUEUE_MAX_SIZE - out_num, true,
desc.addr, desc.len);
} else {
if (in_num) {
virtio_error(vdev, "Incorrect order for descriptors");
goto err_undo_map;
}
map_ok = virtqueue_map_desc(vdev, &out_num, addr, iov,
VIRTQUEUE_MAX_SIZE, false,
desc.addr, desc.len);
}
if (!map_ok) {
goto err_undo_map;
}
/* If we've got too many, that implies a descriptor loop. */
if (++elem_entries > max) {
virtio_error(vdev, "Looped descriptor");
goto err_undo_map;
}
rc = virtqueue_packed_read_next_desc(vq, &desc, desc_cache, max, &i,
desc_cache ==
&indirect_desc_cache);
} while (rc == VIRTQUEUE_READ_DESC_MORE);
if (desc_cache != &indirect_desc_cache) {
/* Buffer ID is included in the last descriptor in the list. */
id = desc.id;
}
/* Now copy what we have collected and mapped */
elem = virtqueue_alloc_element(sz, out_num, in_num);
for (i = 0; i < out_num; i++) {
elem->out_addr[i] = addr[i];
elem->out_sg[i] = iov[i];
}
for (i = 0; i < in_num; i++) {
elem->in_addr[i] = addr[out_num + i];
elem->in_sg[i] = iov[out_num + i];
}
elem->index = id;
elem->ndescs = (desc_cache == &indirect_desc_cache) ? 1 : elem_entries;
vq->last_avail_idx += elem->ndescs;
vq->inuse += elem->ndescs;
if (vq->last_avail_idx >= vq->vring.num) {
vq->last_avail_idx -= vq->vring.num;
vq->last_avail_wrap_counter ^= 1;
}
vq->shadow_avail_idx = vq->last_avail_idx;
vq->shadow_avail_wrap_counter = vq->last_avail_wrap_counter;
trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num);
done:
address_space_cache_destroy(&indirect_desc_cache);
return elem;
err_undo_map:
virtqueue_undo_map_desc(out_num, in_num, iov);
goto done;
}
void *virtqueue_pop(VirtQueue *vq, size_t sz)
{
virtio-pci: disable vring processing when bus-mastering is disabled Currently the SLOF firmware for pseries guests will disable/re-enable a PCI device multiple times via IO/MEM/MASTER bits of PCI_COMMAND register after the initial probe/feature negotiation, as it tends to work with a single device at a time at various stages like probing and running block/network bootloaders without doing a full reset in-between. In QEMU, when PCI_COMMAND_MASTER is disabled we disable the corresponding IOMMU memory region, so DMA accesses (including to vring fields like idx/flags) will no longer undergo the necessary translation. Normally we wouldn't expect this to happen since it would be misbehavior on the driver side to continue driving DMA requests. However, in the case of pseries, with iommu_platform=on, we trigger the following sequence when tearing down the virtio-blk dataplane ioeventfd in response to the guest unsetting PCI_COMMAND_MASTER: #2 0x0000555555922651 in virtqueue_map_desc (vdev=vdev@entry=0x555556dbcfb0, p_num_sg=p_num_sg@entry=0x7fffe657e1a8, addr=addr@entry=0x7fffe657e240, iov=iov@entry=0x7fffe6580240, max_num_sg=max_num_sg@entry=1024, is_write=is_write@entry=false, pa=0, sz=0) at /home/mdroth/w/qemu.git/hw/virtio/virtio.c:757 #3 0x0000555555922a89 in virtqueue_pop (vq=vq@entry=0x555556dc8660, sz=sz@entry=184) at /home/mdroth/w/qemu.git/hw/virtio/virtio.c:950 #4 0x00005555558d3eca in virtio_blk_get_request (vq=0x555556dc8660, s=0x555556dbcfb0) at /home/mdroth/w/qemu.git/hw/block/virtio-blk.c:255 #5 0x00005555558d3eca in virtio_blk_handle_vq (s=0x555556dbcfb0, vq=0x555556dc8660) at /home/mdroth/w/qemu.git/hw/block/virtio-blk.c:776 #6 0x000055555591dd66 in virtio_queue_notify_aio_vq (vq=vq@entry=0x555556dc8660) at /home/mdroth/w/qemu.git/hw/virtio/virtio.c:1550 #7 0x000055555591ecef in virtio_queue_notify_aio_vq (vq=0x555556dc8660) at /home/mdroth/w/qemu.git/hw/virtio/virtio.c:1546 #8 0x000055555591ecef in virtio_queue_host_notifier_aio_poll (opaque=0x555556dc86c8) at /home/mdroth/w/qemu.git/hw/virtio/virtio.c:2527 #9 0x0000555555d02164 in run_poll_handlers_once (ctx=ctx@entry=0x55555688bfc0, timeout=timeout@entry=0x7fffe65844a8) at /home/mdroth/w/qemu.git/util/aio-posix.c:520 #10 0x0000555555d02d1b in try_poll_mode (timeout=0x7fffe65844a8, ctx=0x55555688bfc0) at /home/mdroth/w/qemu.git/util/aio-posix.c:607 #11 0x0000555555d02d1b in aio_poll (ctx=ctx@entry=0x55555688bfc0, blocking=blocking@entry=true) at /home/mdroth/w/qemu.git/util/aio-posix.c:639 #12 0x0000555555d0004d in aio_wait_bh_oneshot (ctx=0x55555688bfc0, cb=cb@entry=0x5555558d5130 <virtio_blk_data_plane_stop_bh>, opaque=opaque@entry=0x555556de86f0) at /home/mdroth/w/qemu.git/util/aio-wait.c:71 #13 0x00005555558d59bf in virtio_blk_data_plane_stop (vdev=<optimized out>) at /home/mdroth/w/qemu.git/hw/block/dataplane/virtio-blk.c:288 #14 0x0000555555b906a1 in virtio_bus_stop_ioeventfd (bus=bus@entry=0x555556dbcf38) at /home/mdroth/w/qemu.git/hw/virtio/virtio-bus.c:245 #15 0x0000555555b90dbb in virtio_bus_stop_ioeventfd (bus=bus@entry=0x555556dbcf38) at /home/mdroth/w/qemu.git/hw/virtio/virtio-bus.c:237 #16 0x0000555555b92a8e in virtio_pci_stop_ioeventfd (proxy=0x555556db4e40) at /home/mdroth/w/qemu.git/hw/virtio/virtio-pci.c:292 #17 0x0000555555b92a8e in virtio_write_config (pci_dev=0x555556db4e40, address=<optimized out>, val=1048832, len=<optimized out>) at /home/mdroth/w/qemu.git/hw/virtio/virtio-pci.c:613 I.e. the calling code is only scheduling a one-shot BH for virtio_blk_data_plane_stop_bh, but somehow we end up trying to process an additional virtqueue entry before we get there. This is likely due to the following check in virtio_queue_host_notifier_aio_poll: static bool virtio_queue_host_notifier_aio_poll(void *opaque) { EventNotifier *n = opaque; VirtQueue *vq = container_of(n, VirtQueue, host_notifier); bool progress; if (!vq->vring.desc || virtio_queue_empty(vq)) { return false; } progress = virtio_queue_notify_aio_vq(vq); namely the call to virtio_queue_empty(). In this case, since no new requests have actually been issued, shadow_avail_idx == last_avail_idx, so we actually try to access the vring via vring_avail_idx() to get the latest non-shadowed idx: int virtio_queue_empty(VirtQueue *vq) { bool empty; ... if (vq->shadow_avail_idx != vq->last_avail_idx) { return 0; } rcu_read_lock(); empty = vring_avail_idx(vq) == vq->last_avail_idx; rcu_read_unlock(); return empty; but since the IOMMU region has been disabled we get a bogus value (0 usually), which causes virtio_queue_empty() to falsely report that there are entries to be processed, which causes errors such as: "virtio: zero sized buffers are not allowed" or "virtio-blk missing headers" and puts the device in an error state. This patch works around the issue by introducing virtio_set_disabled(), which sets a 'disabled' flag to bypass checks like virtio_queue_empty() when bus-mastering is disabled. Since we'd check this flag at all the same sites as vdev->broken, we replace those checks with an inline function which checks for either vdev->broken or vdev->disabled. The 'disabled' flag is only migrated when set, which should be fairly rare, but to maintain migration compatibility we disable it's use for older machine types. Users requiring the use of the flag in conjunction with older machine types can set it explicitly as a virtio-device option. NOTES: - This leaves some other oddities in play, like the fact that DRIVER_OK also gets unset in response to bus-mastering being disabled, but not restored (however the device seems to continue working) - Similarly, we disable the host notifier via virtio_bus_stop_ioeventfd(), which seems to move the handling out of virtio-blk dataplane and back into the main IO thread, and it ends up staying there till a reset (but otherwise continues working normally) Cc: David Gibson <david@gibson.dropbear.id.au>, Cc: Alexey Kardashevskiy <aik@ozlabs.ru> Cc: "Michael S. Tsirkin" <mst@redhat.com> Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com> Message-Id: <20191120005003.27035-1-mdroth@linux.vnet.ibm.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2019-11-20 03:50:03 +03:00
if (virtio_device_disabled(vq->vdev)) {
return NULL;
}
if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
return virtqueue_packed_pop(vq, sz);
} else {
return virtqueue_split_pop(vq, sz);
}
}
static unsigned int virtqueue_packed_drop_all(VirtQueue *vq)
{
VRingMemoryRegionCaches *caches;
MemoryRegionCache *desc_cache;
unsigned int dropped = 0;
VirtQueueElement elem = {};
VirtIODevice *vdev = vq->vdev;
VRingPackedDesc desc;
RCU_READ_LOCK_GUARD();
caches = vring_get_region_caches(vq);
virtio: gracefully handle invalid region caches The virtqueue code sets up MemoryRegionCaches to access the virtqueue guest RAM data structures. The code currently assumes that VRingMemoryRegionCaches is initialized before device emulation code accesses the virtqueue. An assertion will fail in vring_get_region_caches() when this is not true. Device fuzzing found a case where this assumption is false (see below). Virtqueue guest RAM addresses can also be changed from a vCPU thread while an IOThread is accessing the virtqueue. This breaks the same assumption but this time the caches could become invalid partway through the virtqueue code. The code fetches the caches RCU pointer multiple times so we will need to validate the pointer every time it is fetched. Add checks each time we call vring_get_region_caches() and treat invalid caches as a nop: memory stores are ignored and memory reads return 0. The fuzz test failure is as follows: $ qemu -M pc -device virtio-blk-pci,id=drv0,drive=drive0,addr=4.0 \ -drive if=none,id=drive0,file=null-co://,format=raw,auto-read-only=off \ -drive if=none,id=drive1,file=null-co://,file.read-zeroes=on,format=raw \ -display none \ -qtest stdio endianness outl 0xcf8 0x80002020 outl 0xcfc 0xe0000000 outl 0xcf8 0x80002004 outw 0xcfc 0x7 write 0xe0000000 0x24 0x00ffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffab5cffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffab0000000001 inb 0x4 writew 0xe000001c 0x1 write 0xe0000014 0x1 0x0d The following error message is produced: qemu-system-x86_64: /home/stefanha/qemu/hw/virtio/virtio.c:286: vring_get_region_caches: Assertion `caches != NULL' failed. The backtrace looks like this: #0 0x00007ffff5520625 in raise () at /lib64/libc.so.6 #1 0x00007ffff55098d9 in abort () at /lib64/libc.so.6 #2 0x00007ffff55097a9 in _nl_load_domain.cold () at /lib64/libc.so.6 #3 0x00007ffff5518a66 in annobin_assert.c_end () at /lib64/libc.so.6 #4 0x00005555559073da in vring_get_region_caches (vq=<optimized out>) at qemu/hw/virtio/virtio.c:286 #5 vring_get_region_caches (vq=<optimized out>) at qemu/hw/virtio/virtio.c:283 #6 0x000055555590818d in vring_used_flags_set_bit (mask=1, vq=0x5555575ceea0) at qemu/hw/virtio/virtio.c:398 #7 virtio_queue_split_set_notification (enable=0, vq=0x5555575ceea0) at qemu/hw/virtio/virtio.c:398 #8 virtio_queue_set_notification (vq=vq@entry=0x5555575ceea0, enable=enable@entry=0) at qemu/hw/virtio/virtio.c:451 #9 0x0000555555908512 in virtio_queue_set_notification (vq=vq@entry=0x5555575ceea0, enable=enable@entry=0) at qemu/hw/virtio/virtio.c:444 #10 0x00005555558c697a in virtio_blk_handle_vq (s=0x5555575c57e0, vq=0x5555575ceea0) at qemu/hw/block/virtio-blk.c:775 #11 0x0000555555907836 in virtio_queue_notify_aio_vq (vq=0x5555575ceea0) at qemu/hw/virtio/virtio.c:2244 #12 0x0000555555cb5dd7 in aio_dispatch_handlers (ctx=ctx@entry=0x55555671a420) at util/aio-posix.c:429 #13 0x0000555555cb67a8 in aio_dispatch (ctx=0x55555671a420) at util/aio-posix.c:460 #14 0x0000555555cb307e in aio_ctx_dispatch (source=<optimized out>, callback=<optimized out>, user_data=<optimized out>) at util/async.c:260 #15 0x00007ffff7bbc510 in g_main_context_dispatch () at /lib64/libglib-2.0.so.0 #16 0x0000555555cb5848 in glib_pollfds_poll () at util/main-loop.c:219 #17 os_host_main_loop_wait (timeout=<optimized out>) at util/main-loop.c:242 #18 main_loop_wait (nonblocking=<optimized out>) at util/main-loop.c:518 #19 0x00005555559b20c9 in main_loop () at vl.c:1683 #20 0x0000555555838115 in main (argc=<optimized out>, argv=<optimized out>, envp=<optimized out>) at vl.c:4441 Reported-by: Alexander Bulekov <alxndr@bu.edu> Cc: Michael Tsirkin <mst@redhat.com> Cc: Cornelia Huck <cohuck@redhat.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: qemu-stable@nongnu.org Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Message-Id: <20200207104619.164892-1-stefanha@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2020-02-07 13:46:19 +03:00
if (!caches) {
return 0;
}
desc_cache = &caches->desc;
virtio_queue_set_notification(vq, 0);
while (vq->inuse < vq->vring.num) {
unsigned int idx = vq->last_avail_idx;
/*
* works similar to virtqueue_pop but does not map buffers
* and does not allocate any memory.
*/
vring_packed_desc_read(vdev, &desc, desc_cache,
vq->last_avail_idx , true);
if (!is_desc_avail(desc.flags, vq->last_avail_wrap_counter)) {
break;
}
elem.index = desc.id;
elem.ndescs = 1;
while (virtqueue_packed_read_next_desc(vq, &desc, desc_cache,
vq->vring.num, &idx, false)) {
++elem.ndescs;
}
/*
* immediately push the element, nothing to unmap
* as both in_num and out_num are set to 0.
*/
virtqueue_push(vq, &elem, 0);
dropped++;
vq->last_avail_idx += elem.ndescs;
if (vq->last_avail_idx >= vq->vring.num) {
vq->last_avail_idx -= vq->vring.num;
vq->last_avail_wrap_counter ^= 1;
}
}
return dropped;
}
static unsigned int virtqueue_split_drop_all(VirtQueue *vq)
{
unsigned int dropped = 0;
VirtQueueElement elem = {};
VirtIODevice *vdev = vq->vdev;
bool fEventIdx = virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
while (!virtio_queue_empty(vq) && vq->inuse < vq->vring.num) {
/* works similar to virtqueue_pop but does not map buffers
* and does not allocate any memory */
smp_rmb();
if (!virtqueue_get_head(vq, vq->last_avail_idx, &elem.index)) {
break;
}
vq->inuse++;
vq->last_avail_idx++;
if (fEventIdx) {
vring_set_avail_event(vq, vq->last_avail_idx);
}
/* immediately push the element, nothing to unmap
* as both in_num and out_num are set to 0 */
virtqueue_push(vq, &elem, 0);
dropped++;
}
return dropped;
}
/* virtqueue_drop_all:
* @vq: The #VirtQueue
* Drops all queued buffers and indicates them to the guest
* as if they are done. Useful when buffers can not be
* processed but must be returned to the guest.
*/
unsigned int virtqueue_drop_all(VirtQueue *vq)
{
struct VirtIODevice *vdev = vq->vdev;
virtio-pci: disable vring processing when bus-mastering is disabled Currently the SLOF firmware for pseries guests will disable/re-enable a PCI device multiple times via IO/MEM/MASTER bits of PCI_COMMAND register after the initial probe/feature negotiation, as it tends to work with a single device at a time at various stages like probing and running block/network bootloaders without doing a full reset in-between. In QEMU, when PCI_COMMAND_MASTER is disabled we disable the corresponding IOMMU memory region, so DMA accesses (including to vring fields like idx/flags) will no longer undergo the necessary translation. Normally we wouldn't expect this to happen since it would be misbehavior on the driver side to continue driving DMA requests. However, in the case of pseries, with iommu_platform=on, we trigger the following sequence when tearing down the virtio-blk dataplane ioeventfd in response to the guest unsetting PCI_COMMAND_MASTER: #2 0x0000555555922651 in virtqueue_map_desc (vdev=vdev@entry=0x555556dbcfb0, p_num_sg=p_num_sg@entry=0x7fffe657e1a8, addr=addr@entry=0x7fffe657e240, iov=iov@entry=0x7fffe6580240, max_num_sg=max_num_sg@entry=1024, is_write=is_write@entry=false, pa=0, sz=0) at /home/mdroth/w/qemu.git/hw/virtio/virtio.c:757 #3 0x0000555555922a89 in virtqueue_pop (vq=vq@entry=0x555556dc8660, sz=sz@entry=184) at /home/mdroth/w/qemu.git/hw/virtio/virtio.c:950 #4 0x00005555558d3eca in virtio_blk_get_request (vq=0x555556dc8660, s=0x555556dbcfb0) at /home/mdroth/w/qemu.git/hw/block/virtio-blk.c:255 #5 0x00005555558d3eca in virtio_blk_handle_vq (s=0x555556dbcfb0, vq=0x555556dc8660) at /home/mdroth/w/qemu.git/hw/block/virtio-blk.c:776 #6 0x000055555591dd66 in virtio_queue_notify_aio_vq (vq=vq@entry=0x555556dc8660) at /home/mdroth/w/qemu.git/hw/virtio/virtio.c:1550 #7 0x000055555591ecef in virtio_queue_notify_aio_vq (vq=0x555556dc8660) at /home/mdroth/w/qemu.git/hw/virtio/virtio.c:1546 #8 0x000055555591ecef in virtio_queue_host_notifier_aio_poll (opaque=0x555556dc86c8) at /home/mdroth/w/qemu.git/hw/virtio/virtio.c:2527 #9 0x0000555555d02164 in run_poll_handlers_once (ctx=ctx@entry=0x55555688bfc0, timeout=timeout@entry=0x7fffe65844a8) at /home/mdroth/w/qemu.git/util/aio-posix.c:520 #10 0x0000555555d02d1b in try_poll_mode (timeout=0x7fffe65844a8, ctx=0x55555688bfc0) at /home/mdroth/w/qemu.git/util/aio-posix.c:607 #11 0x0000555555d02d1b in aio_poll (ctx=ctx@entry=0x55555688bfc0, blocking=blocking@entry=true) at /home/mdroth/w/qemu.git/util/aio-posix.c:639 #12 0x0000555555d0004d in aio_wait_bh_oneshot (ctx=0x55555688bfc0, cb=cb@entry=0x5555558d5130 <virtio_blk_data_plane_stop_bh>, opaque=opaque@entry=0x555556de86f0) at /home/mdroth/w/qemu.git/util/aio-wait.c:71 #13 0x00005555558d59bf in virtio_blk_data_plane_stop (vdev=<optimized out>) at /home/mdroth/w/qemu.git/hw/block/dataplane/virtio-blk.c:288 #14 0x0000555555b906a1 in virtio_bus_stop_ioeventfd (bus=bus@entry=0x555556dbcf38) at /home/mdroth/w/qemu.git/hw/virtio/virtio-bus.c:245 #15 0x0000555555b90dbb in virtio_bus_stop_ioeventfd (bus=bus@entry=0x555556dbcf38) at /home/mdroth/w/qemu.git/hw/virtio/virtio-bus.c:237 #16 0x0000555555b92a8e in virtio_pci_stop_ioeventfd (proxy=0x555556db4e40) at /home/mdroth/w/qemu.git/hw/virtio/virtio-pci.c:292 #17 0x0000555555b92a8e in virtio_write_config (pci_dev=0x555556db4e40, address=<optimized out>, val=1048832, len=<optimized out>) at /home/mdroth/w/qemu.git/hw/virtio/virtio-pci.c:613 I.e. the calling code is only scheduling a one-shot BH for virtio_blk_data_plane_stop_bh, but somehow we end up trying to process an additional virtqueue entry before we get there. This is likely due to the following check in virtio_queue_host_notifier_aio_poll: static bool virtio_queue_host_notifier_aio_poll(void *opaque) { EventNotifier *n = opaque; VirtQueue *vq = container_of(n, VirtQueue, host_notifier); bool progress; if (!vq->vring.desc || virtio_queue_empty(vq)) { return false; } progress = virtio_queue_notify_aio_vq(vq); namely the call to virtio_queue_empty(). In this case, since no new requests have actually been issued, shadow_avail_idx == last_avail_idx, so we actually try to access the vring via vring_avail_idx() to get the latest non-shadowed idx: int virtio_queue_empty(VirtQueue *vq) { bool empty; ... if (vq->shadow_avail_idx != vq->last_avail_idx) { return 0; } rcu_read_lock(); empty = vring_avail_idx(vq) == vq->last_avail_idx; rcu_read_unlock(); return empty; but since the IOMMU region has been disabled we get a bogus value (0 usually), which causes virtio_queue_empty() to falsely report that there are entries to be processed, which causes errors such as: "virtio: zero sized buffers are not allowed" or "virtio-blk missing headers" and puts the device in an error state. This patch works around the issue by introducing virtio_set_disabled(), which sets a 'disabled' flag to bypass checks like virtio_queue_empty() when bus-mastering is disabled. Since we'd check this flag at all the same sites as vdev->broken, we replace those checks with an inline function which checks for either vdev->broken or vdev->disabled. The 'disabled' flag is only migrated when set, which should be fairly rare, but to maintain migration compatibility we disable it's use for older machine types. Users requiring the use of the flag in conjunction with older machine types can set it explicitly as a virtio-device option. NOTES: - This leaves some other oddities in play, like the fact that DRIVER_OK also gets unset in response to bus-mastering being disabled, but not restored (however the device seems to continue working) - Similarly, we disable the host notifier via virtio_bus_stop_ioeventfd(), which seems to move the handling out of virtio-blk dataplane and back into the main IO thread, and it ends up staying there till a reset (but otherwise continues working normally) Cc: David Gibson <david@gibson.dropbear.id.au>, Cc: Alexey Kardashevskiy <aik@ozlabs.ru> Cc: "Michael S. Tsirkin" <mst@redhat.com> Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com> Message-Id: <20191120005003.27035-1-mdroth@linux.vnet.ibm.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2019-11-20 03:50:03 +03:00
if (virtio_device_disabled(vq->vdev)) {
return 0;
}
if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
return virtqueue_packed_drop_all(vq);
} else {
return virtqueue_split_drop_all(vq);
}
}
/* Reading and writing a structure directly to QEMUFile is *awful*, but
* it is what QEMU has always done by mistake. We can change it sooner
* or later by bumping the version number of the affected vm states.
* In the meanwhile, since the in-memory layout of VirtQueueElement
* has changed, we need to marshal to and from the layout that was
* used before the change.
*/
typedef struct VirtQueueElementOld {
unsigned int index;
unsigned int out_num;
unsigned int in_num;
hwaddr in_addr[VIRTQUEUE_MAX_SIZE];
hwaddr out_addr[VIRTQUEUE_MAX_SIZE];
struct iovec in_sg[VIRTQUEUE_MAX_SIZE];
struct iovec out_sg[VIRTQUEUE_MAX_SIZE];
} VirtQueueElementOld;
void *qemu_get_virtqueue_element(VirtIODevice *vdev, QEMUFile *f, size_t sz)
{
VirtQueueElement *elem;
VirtQueueElementOld data;
int i;
qemu_get_buffer(f, (uint8_t *)&data, sizeof(VirtQueueElementOld));
/* TODO: teach all callers that this can fail, and return failure instead
* of asserting here.
osdep.h: Prohibit disabling assert() in supported builds We already have several files that knowingly require assert() to work, sometimes because refactoring the code for proper error handling has not been tackled yet; there are probably other files that have a similar situation but with no comments documenting the same. In fact, we have places in migration that handle untrusted input with assertions, where disabling the assertions risks a worse security hole than the current behavior of losing the guest to SIGABRT when migration fails because of the assertion. Promote our current per-file safety-valve to instead be project-wide, and expand it to also cover glib's g_assert(). Note that we do NOT want to encourage 'assert(side-effects);' (that is a bad practice that prevents copy-and-paste of code to other projects that CAN disable assertions; plus it costs unnecessary reviewer mental cycles to remember whether a project special-cases the crippling of asserts); and we would LIKE to fix migration to not rely on asserts (but that takes a big code audit). But in the meantime, we DO want to send a message that anyone that disables assertions has to tweak code in order to compile, making it obvious that they are taking on additional risk that we are not going to support. At the same time, leave comments mentioning NDEBUG in files that we know still need to be scrubbed, so there is at least something to grep for. It would be possible to come up with some other mechanism for doing runtime checking by default, but which does not abort the program on failure, while leaving side effects in place (unlike how crippling assert() avoids even the side effects), perhaps under the name q_verify(); but it was not deemed worth the effort (developers should not have to learn a replacement when the standard C macro works just fine, and it would be a lot of churn for little gain). The patch specifically uses #error rather than #warn so that a user is forced to tweak the header to acknowledge the issue, even when not using a -Werror compilation. Signed-off-by: Eric Blake <eblake@redhat.com> Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> Reviewed-by: Thomas Huth <thuth@redhat.com> Message-Id: <20170911211320.25385-1-eblake@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2017-09-12 00:13:20 +03:00
* This is just one thing (there are probably more) that must be
* fixed before we can allow NDEBUG compilation.
*/
assert(ARRAY_SIZE(data.in_addr) >= data.in_num);
assert(ARRAY_SIZE(data.out_addr) >= data.out_num);
elem = virtqueue_alloc_element(sz, data.out_num, data.in_num);
elem->index = data.index;
for (i = 0; i < elem->in_num; i++) {
elem->in_addr[i] = data.in_addr[i];
}
for (i = 0; i < elem->out_num; i++) {
elem->out_addr[i] = data.out_addr[i];
}
for (i = 0; i < elem->in_num; i++) {
/* Base is overwritten by virtqueue_map. */
elem->in_sg[i].iov_base = 0;
elem->in_sg[i].iov_len = data.in_sg[i].iov_len;
}
for (i = 0; i < elem->out_num; i++) {
/* Base is overwritten by virtqueue_map. */
elem->out_sg[i].iov_base = 0;
elem->out_sg[i].iov_len = data.out_sg[i].iov_len;
}
if (virtio_host_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
qemu_get_be32s(f, &elem->ndescs);
}
virtqueue_map(vdev, elem);
return elem;
}
void qemu_put_virtqueue_element(VirtIODevice *vdev, QEMUFile *f,
VirtQueueElement *elem)
{
VirtQueueElementOld data;
int i;
memset(&data, 0, sizeof(data));
data.index = elem->index;
data.in_num = elem->in_num;
data.out_num = elem->out_num;
for (i = 0; i < elem->in_num; i++) {
data.in_addr[i] = elem->in_addr[i];
}
for (i = 0; i < elem->out_num; i++) {
data.out_addr[i] = elem->out_addr[i];
}
for (i = 0; i < elem->in_num; i++) {
/* Base is overwritten by virtqueue_map when loading. Do not
* save it, as it would leak the QEMU address space layout. */
data.in_sg[i].iov_len = elem->in_sg[i].iov_len;
}
for (i = 0; i < elem->out_num; i++) {
/* Do not save iov_base as above. */
data.out_sg[i].iov_len = elem->out_sg[i].iov_len;
}
if (virtio_host_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
qemu_put_be32s(f, &elem->ndescs);
}
qemu_put_buffer(f, (uint8_t *)&data, sizeof(VirtQueueElementOld));
}
/* virtio device */
static void virtio_notify_vector(VirtIODevice *vdev, uint16_t vector)
{
BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
virtio-pci: disable vring processing when bus-mastering is disabled Currently the SLOF firmware for pseries guests will disable/re-enable a PCI device multiple times via IO/MEM/MASTER bits of PCI_COMMAND register after the initial probe/feature negotiation, as it tends to work with a single device at a time at various stages like probing and running block/network bootloaders without doing a full reset in-between. In QEMU, when PCI_COMMAND_MASTER is disabled we disable the corresponding IOMMU memory region, so DMA accesses (including to vring fields like idx/flags) will no longer undergo the necessary translation. Normally we wouldn't expect this to happen since it would be misbehavior on the driver side to continue driving DMA requests. However, in the case of pseries, with iommu_platform=on, we trigger the following sequence when tearing down the virtio-blk dataplane ioeventfd in response to the guest unsetting PCI_COMMAND_MASTER: #2 0x0000555555922651 in virtqueue_map_desc (vdev=vdev@entry=0x555556dbcfb0, p_num_sg=p_num_sg@entry=0x7fffe657e1a8, addr=addr@entry=0x7fffe657e240, iov=iov@entry=0x7fffe6580240, max_num_sg=max_num_sg@entry=1024, is_write=is_write@entry=false, pa=0, sz=0) at /home/mdroth/w/qemu.git/hw/virtio/virtio.c:757 #3 0x0000555555922a89 in virtqueue_pop (vq=vq@entry=0x555556dc8660, sz=sz@entry=184) at /home/mdroth/w/qemu.git/hw/virtio/virtio.c:950 #4 0x00005555558d3eca in virtio_blk_get_request (vq=0x555556dc8660, s=0x555556dbcfb0) at /home/mdroth/w/qemu.git/hw/block/virtio-blk.c:255 #5 0x00005555558d3eca in virtio_blk_handle_vq (s=0x555556dbcfb0, vq=0x555556dc8660) at /home/mdroth/w/qemu.git/hw/block/virtio-blk.c:776 #6 0x000055555591dd66 in virtio_queue_notify_aio_vq (vq=vq@entry=0x555556dc8660) at /home/mdroth/w/qemu.git/hw/virtio/virtio.c:1550 #7 0x000055555591ecef in virtio_queue_notify_aio_vq (vq=0x555556dc8660) at /home/mdroth/w/qemu.git/hw/virtio/virtio.c:1546 #8 0x000055555591ecef in virtio_queue_host_notifier_aio_poll (opaque=0x555556dc86c8) at /home/mdroth/w/qemu.git/hw/virtio/virtio.c:2527 #9 0x0000555555d02164 in run_poll_handlers_once (ctx=ctx@entry=0x55555688bfc0, timeout=timeout@entry=0x7fffe65844a8) at /home/mdroth/w/qemu.git/util/aio-posix.c:520 #10 0x0000555555d02d1b in try_poll_mode (timeout=0x7fffe65844a8, ctx=0x55555688bfc0) at /home/mdroth/w/qemu.git/util/aio-posix.c:607 #11 0x0000555555d02d1b in aio_poll (ctx=ctx@entry=0x55555688bfc0, blocking=blocking@entry=true) at /home/mdroth/w/qemu.git/util/aio-posix.c:639 #12 0x0000555555d0004d in aio_wait_bh_oneshot (ctx=0x55555688bfc0, cb=cb@entry=0x5555558d5130 <virtio_blk_data_plane_stop_bh>, opaque=opaque@entry=0x555556de86f0) at /home/mdroth/w/qemu.git/util/aio-wait.c:71 #13 0x00005555558d59bf in virtio_blk_data_plane_stop (vdev=<optimized out>) at /home/mdroth/w/qemu.git/hw/block/dataplane/virtio-blk.c:288 #14 0x0000555555b906a1 in virtio_bus_stop_ioeventfd (bus=bus@entry=0x555556dbcf38) at /home/mdroth/w/qemu.git/hw/virtio/virtio-bus.c:245 #15 0x0000555555b90dbb in virtio_bus_stop_ioeventfd (bus=bus@entry=0x555556dbcf38) at /home/mdroth/w/qemu.git/hw/virtio/virtio-bus.c:237 #16 0x0000555555b92a8e in virtio_pci_stop_ioeventfd (proxy=0x555556db4e40) at /home/mdroth/w/qemu.git/hw/virtio/virtio-pci.c:292 #17 0x0000555555b92a8e in virtio_write_config (pci_dev=0x555556db4e40, address=<optimized out>, val=1048832, len=<optimized out>) at /home/mdroth/w/qemu.git/hw/virtio/virtio-pci.c:613 I.e. the calling code is only scheduling a one-shot BH for virtio_blk_data_plane_stop_bh, but somehow we end up trying to process an additional virtqueue entry before we get there. This is likely due to the following check in virtio_queue_host_notifier_aio_poll: static bool virtio_queue_host_notifier_aio_poll(void *opaque) { EventNotifier *n = opaque; VirtQueue *vq = container_of(n, VirtQueue, host_notifier); bool progress; if (!vq->vring.desc || virtio_queue_empty(vq)) { return false; } progress = virtio_queue_notify_aio_vq(vq); namely the call to virtio_queue_empty(). In this case, since no new requests have actually been issued, shadow_avail_idx == last_avail_idx, so we actually try to access the vring via vring_avail_idx() to get the latest non-shadowed idx: int virtio_queue_empty(VirtQueue *vq) { bool empty; ... if (vq->shadow_avail_idx != vq->last_avail_idx) { return 0; } rcu_read_lock(); empty = vring_avail_idx(vq) == vq->last_avail_idx; rcu_read_unlock(); return empty; but since the IOMMU region has been disabled we get a bogus value (0 usually), which causes virtio_queue_empty() to falsely report that there are entries to be processed, which causes errors such as: "virtio: zero sized buffers are not allowed" or "virtio-blk missing headers" and puts the device in an error state. This patch works around the issue by introducing virtio_set_disabled(), which sets a 'disabled' flag to bypass checks like virtio_queue_empty() when bus-mastering is disabled. Since we'd check this flag at all the same sites as vdev->broken, we replace those checks with an inline function which checks for either vdev->broken or vdev->disabled. The 'disabled' flag is only migrated when set, which should be fairly rare, but to maintain migration compatibility we disable it's use for older machine types. Users requiring the use of the flag in conjunction with older machine types can set it explicitly as a virtio-device option. NOTES: - This leaves some other oddities in play, like the fact that DRIVER_OK also gets unset in response to bus-mastering being disabled, but not restored (however the device seems to continue working) - Similarly, we disable the host notifier via virtio_bus_stop_ioeventfd(), which seems to move the handling out of virtio-blk dataplane and back into the main IO thread, and it ends up staying there till a reset (but otherwise continues working normally) Cc: David Gibson <david@gibson.dropbear.id.au>, Cc: Alexey Kardashevskiy <aik@ozlabs.ru> Cc: "Michael S. Tsirkin" <mst@redhat.com> Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com> Message-Id: <20191120005003.27035-1-mdroth@linux.vnet.ibm.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2019-11-20 03:50:03 +03:00
if (virtio_device_disabled(vdev)) {
return;
}
if (k->notify) {
k->notify(qbus->parent, vector);
}
}
void virtio_update_irq(VirtIODevice *vdev)
{
virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
}
static int virtio_validate_features(VirtIODevice *vdev)
{
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
if (virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM) &&
!virtio_vdev_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM)) {
return -EFAULT;
}
if (k->validate_features) {
return k->validate_features(vdev);
} else {
return 0;
}
}
int virtio_set_status(VirtIODevice *vdev, uint8_t val)
{
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
trace_virtio_set_status(vdev, val);
if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
if (!(vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) &&
val & VIRTIO_CONFIG_S_FEATURES_OK) {
int ret = virtio_validate_features(vdev);
if (ret) {
return ret;
}
}
}
if ((vdev->status & VIRTIO_CONFIG_S_DRIVER_OK) !=
(val & VIRTIO_CONFIG_S_DRIVER_OK)) {
virtio_set_started(vdev, val & VIRTIO_CONFIG_S_DRIVER_OK);
}
if (k->set_status) {
k->set_status(vdev, val);
}
vdev->status = val;
return 0;
}
static enum virtio_device_endian virtio_default_endian(void)
{
if (target_words_bigendian()) {
return VIRTIO_DEVICE_ENDIAN_BIG;
} else {
return VIRTIO_DEVICE_ENDIAN_LITTLE;
}
}
static enum virtio_device_endian virtio_current_cpu_endian(void)
{
if (cpu_virtio_is_big_endian(current_cpu)) {
return VIRTIO_DEVICE_ENDIAN_BIG;
} else {
return VIRTIO_DEVICE_ENDIAN_LITTLE;
}
}
static void __virtio_queue_reset(VirtIODevice *vdev, uint32_t i)
{
vdev->vq[i].vring.desc = 0;
vdev->vq[i].vring.avail = 0;
vdev->vq[i].vring.used = 0;
vdev->vq[i].last_avail_idx = 0;
vdev->vq[i].shadow_avail_idx = 0;
vdev->vq[i].used_idx = 0;
vdev->vq[i].last_avail_wrap_counter = true;
vdev->vq[i].shadow_avail_wrap_counter = true;
vdev->vq[i].used_wrap_counter = true;
virtio_queue_set_vector(vdev, i, VIRTIO_NO_VECTOR);
vdev->vq[i].signalled_used = 0;
vdev->vq[i].signalled_used_valid = false;
vdev->vq[i].notification = true;
vdev->vq[i].vring.num = vdev->vq[i].vring.num_default;
vdev->vq[i].inuse = 0;
virtio_virtqueue_reset_region_cache(&vdev->vq[i]);
}
void virtio_queue_reset(VirtIODevice *vdev, uint32_t queue_index)
{
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
if (k->queue_reset) {
k->queue_reset(vdev, queue_index);
}
__virtio_queue_reset(vdev, queue_index);
}
void virtio_queue_enable(VirtIODevice *vdev, uint32_t queue_index)
{
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
/*
* TODO: Seabios is currently out of spec and triggering this error.
* So this needs to be fixed in Seabios, then this can
* be re-enabled for new machine types only, and also after
* being converted to LOG_GUEST_ERROR.
*
if (!virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
error_report("queue_enable is only supported in devices of virtio "
"1.0 or later.");
}
*/
if (k->queue_enable) {
k->queue_enable(vdev, queue_index);
}
}
void virtio_reset(void *opaque)
{
VirtIODevice *vdev = opaque;
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
int i;
virtio_set_status(vdev, 0);
if (current_cpu) {
/* Guest initiated reset */
vdev->device_endian = virtio_current_cpu_endian();
} else {
/* System reset */
vdev->device_endian = virtio_default_endian();
}
if (vdev->vhost_started && k->get_vhost) {
vhost_reset_device(k->get_vhost(vdev));
}
if (k->reset) {
k->reset(vdev);
}
vdev->start_on_kick = false;
vdev->started = false;
vdev->broken = false;
vdev->guest_features = 0;
vdev->queue_sel = 0;
vdev->status = 0;
virtio-pci: disable vring processing when bus-mastering is disabled Currently the SLOF firmware for pseries guests will disable/re-enable a PCI device multiple times via IO/MEM/MASTER bits of PCI_COMMAND register after the initial probe/feature negotiation, as it tends to work with a single device at a time at various stages like probing and running block/network bootloaders without doing a full reset in-between. In QEMU, when PCI_COMMAND_MASTER is disabled we disable the corresponding IOMMU memory region, so DMA accesses (including to vring fields like idx/flags) will no longer undergo the necessary translation. Normally we wouldn't expect this to happen since it would be misbehavior on the driver side to continue driving DMA requests. However, in the case of pseries, with iommu_platform=on, we trigger the following sequence when tearing down the virtio-blk dataplane ioeventfd in response to the guest unsetting PCI_COMMAND_MASTER: #2 0x0000555555922651 in virtqueue_map_desc (vdev=vdev@entry=0x555556dbcfb0, p_num_sg=p_num_sg@entry=0x7fffe657e1a8, addr=addr@entry=0x7fffe657e240, iov=iov@entry=0x7fffe6580240, max_num_sg=max_num_sg@entry=1024, is_write=is_write@entry=false, pa=0, sz=0) at /home/mdroth/w/qemu.git/hw/virtio/virtio.c:757 #3 0x0000555555922a89 in virtqueue_pop (vq=vq@entry=0x555556dc8660, sz=sz@entry=184) at /home/mdroth/w/qemu.git/hw/virtio/virtio.c:950 #4 0x00005555558d3eca in virtio_blk_get_request (vq=0x555556dc8660, s=0x555556dbcfb0) at /home/mdroth/w/qemu.git/hw/block/virtio-blk.c:255 #5 0x00005555558d3eca in virtio_blk_handle_vq (s=0x555556dbcfb0, vq=0x555556dc8660) at /home/mdroth/w/qemu.git/hw/block/virtio-blk.c:776 #6 0x000055555591dd66 in virtio_queue_notify_aio_vq (vq=vq@entry=0x555556dc8660) at /home/mdroth/w/qemu.git/hw/virtio/virtio.c:1550 #7 0x000055555591ecef in virtio_queue_notify_aio_vq (vq=0x555556dc8660) at /home/mdroth/w/qemu.git/hw/virtio/virtio.c:1546 #8 0x000055555591ecef in virtio_queue_host_notifier_aio_poll (opaque=0x555556dc86c8) at /home/mdroth/w/qemu.git/hw/virtio/virtio.c:2527 #9 0x0000555555d02164 in run_poll_handlers_once (ctx=ctx@entry=0x55555688bfc0, timeout=timeout@entry=0x7fffe65844a8) at /home/mdroth/w/qemu.git/util/aio-posix.c:520 #10 0x0000555555d02d1b in try_poll_mode (timeout=0x7fffe65844a8, ctx=0x55555688bfc0) at /home/mdroth/w/qemu.git/util/aio-posix.c:607 #11 0x0000555555d02d1b in aio_poll (ctx=ctx@entry=0x55555688bfc0, blocking=blocking@entry=true) at /home/mdroth/w/qemu.git/util/aio-posix.c:639 #12 0x0000555555d0004d in aio_wait_bh_oneshot (ctx=0x55555688bfc0, cb=cb@entry=0x5555558d5130 <virtio_blk_data_plane_stop_bh>, opaque=opaque@entry=0x555556de86f0) at /home/mdroth/w/qemu.git/util/aio-wait.c:71 #13 0x00005555558d59bf in virtio_blk_data_plane_stop (vdev=<optimized out>) at /home/mdroth/w/qemu.git/hw/block/dataplane/virtio-blk.c:288 #14 0x0000555555b906a1 in virtio_bus_stop_ioeventfd (bus=bus@entry=0x555556dbcf38) at /home/mdroth/w/qemu.git/hw/virtio/virtio-bus.c:245 #15 0x0000555555b90dbb in virtio_bus_stop_ioeventfd (bus=bus@entry=0x555556dbcf38) at /home/mdroth/w/qemu.git/hw/virtio/virtio-bus.c:237 #16 0x0000555555b92a8e in virtio_pci_stop_ioeventfd (proxy=0x555556db4e40) at /home/mdroth/w/qemu.git/hw/virtio/virtio-pci.c:292 #17 0x0000555555b92a8e in virtio_write_config (pci_dev=0x555556db4e40, address=<optimized out>, val=1048832, len=<optimized out>) at /home/mdroth/w/qemu.git/hw/virtio/virtio-pci.c:613 I.e. the calling code is only scheduling a one-shot BH for virtio_blk_data_plane_stop_bh, but somehow we end up trying to process an additional virtqueue entry before we get there. This is likely due to the following check in virtio_queue_host_notifier_aio_poll: static bool virtio_queue_host_notifier_aio_poll(void *opaque) { EventNotifier *n = opaque; VirtQueue *vq = container_of(n, VirtQueue, host_notifier); bool progress; if (!vq->vring.desc || virtio_queue_empty(vq)) { return false; } progress = virtio_queue_notify_aio_vq(vq); namely the call to virtio_queue_empty(). In this case, since no new requests have actually been issued, shadow_avail_idx == last_avail_idx, so we actually try to access the vring via vring_avail_idx() to get the latest non-shadowed idx: int virtio_queue_empty(VirtQueue *vq) { bool empty; ... if (vq->shadow_avail_idx != vq->last_avail_idx) { return 0; } rcu_read_lock(); empty = vring_avail_idx(vq) == vq->last_avail_idx; rcu_read_unlock(); return empty; but since the IOMMU region has been disabled we get a bogus value (0 usually), which causes virtio_queue_empty() to falsely report that there are entries to be processed, which causes errors such as: "virtio: zero sized buffers are not allowed" or "virtio-blk missing headers" and puts the device in an error state. This patch works around the issue by introducing virtio_set_disabled(), which sets a 'disabled' flag to bypass checks like virtio_queue_empty() when bus-mastering is disabled. Since we'd check this flag at all the same sites as vdev->broken, we replace those checks with an inline function which checks for either vdev->broken or vdev->disabled. The 'disabled' flag is only migrated when set, which should be fairly rare, but to maintain migration compatibility we disable it's use for older machine types. Users requiring the use of the flag in conjunction with older machine types can set it explicitly as a virtio-device option. NOTES: - This leaves some other oddities in play, like the fact that DRIVER_OK also gets unset in response to bus-mastering being disabled, but not restored (however the device seems to continue working) - Similarly, we disable the host notifier via virtio_bus_stop_ioeventfd(), which seems to move the handling out of virtio-blk dataplane and back into the main IO thread, and it ends up staying there till a reset (but otherwise continues working normally) Cc: David Gibson <david@gibson.dropbear.id.au>, Cc: Alexey Kardashevskiy <aik@ozlabs.ru> Cc: "Michael S. Tsirkin" <mst@redhat.com> Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com> Message-Id: <20191120005003.27035-1-mdroth@linux.vnet.ibm.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2019-11-20 03:50:03 +03:00
vdev->disabled = false;
qatomic_set(&vdev->isr, 0);
vdev->config_vector = VIRTIO_NO_VECTOR;
virtio_notify_vector(vdev, vdev->config_vector);
for(i = 0; i < VIRTIO_QUEUE_MAX; i++) {
__virtio_queue_reset(vdev, i);
}
}
void virtio_queue_set_addr(VirtIODevice *vdev, int n, hwaddr addr)
{
if (!vdev->vq[n].vring.num) {
return;
}
vdev->vq[n].vring.desc = addr;
virtio_queue_update_rings(vdev, n);
}
hwaddr virtio_queue_get_addr(VirtIODevice *vdev, int n)
{
return vdev->vq[n].vring.desc;
}
void virtio_queue_set_rings(VirtIODevice *vdev, int n, hwaddr desc,
hwaddr avail, hwaddr used)
{
if (!vdev->vq[n].vring.num) {
return;
}
vdev->vq[n].vring.desc = desc;
vdev->vq[n].vring.avail = avail;
vdev->vq[n].vring.used = used;
virtio_init_region_cache(vdev, n);
}
void virtio_queue_set_num(VirtIODevice *vdev, int n, int num)
{
/* Don't allow guest to flip queue between existent and
* nonexistent states, or to set it to an invalid size.
*/
if (!!num != !!vdev->vq[n].vring.num ||
num > VIRTQUEUE_MAX_SIZE ||
num < 0) {
return;
}
vdev->vq[n].vring.num = num;
}
VirtQueue *virtio_vector_first_queue(VirtIODevice *vdev, uint16_t vector)
{
return QLIST_FIRST(&vdev->vector_queues[vector]);
}
VirtQueue *virtio_vector_next_queue(VirtQueue *vq)
{
return QLIST_NEXT(vq, node);
}
int virtio_queue_get_num(VirtIODevice *vdev, int n)
{
return vdev->vq[n].vring.num;
}
int virtio_queue_get_max_num(VirtIODevice *vdev, int n)
{
return vdev->vq[n].vring.num_default;
}
int virtio_get_num_queues(VirtIODevice *vdev)
{
int i;
for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
if (!virtio_queue_get_num(vdev, i)) {
break;
}
}
return i;
}
void virtio_queue_set_align(VirtIODevice *vdev, int n, int align)
{
BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
/* virtio-1 compliant devices cannot change the alignment */
if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
error_report("tried to modify queue alignment for virtio-1 device");
return;
}
/* Check that the transport told us it was going to do this
* (so a buggy transport will immediately assert rather than
* silently failing to migrate this state)
*/
assert(k->has_variable_vring_alignment);
if (align) {
vdev->vq[n].vring.align = align;
virtio_queue_update_rings(vdev, n);
}
}
void virtio_queue_set_shadow_avail_idx(VirtQueue *vq, uint16_t shadow_avail_idx)
{
if (!vq->vring.desc) {
return;
}
/*
* 16-bit data for packed VQs include 1-bit wrap counter and
* 15-bit shadow_avail_idx.
*/
if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
vq->shadow_avail_wrap_counter = (shadow_avail_idx >> 15) & 0x1;
vq->shadow_avail_idx = shadow_avail_idx & 0x7FFF;
} else {
vq->shadow_avail_idx = shadow_avail_idx;
}
}
static void virtio_queue_notify_vq(VirtQueue *vq)
virtio-pci: Use ioeventfd for virtqueue notify Virtqueue notify is currently handled synchronously in userspace virtio. This prevents the vcpu from executing guest code while hardware emulation code handles the notify. On systems that support KVM, the ioeventfd mechanism can be used to make virtqueue notify a lightweight exit by deferring hardware emulation to the iothread and allowing the VM to continue execution. This model is similar to how vhost receives virtqueue notifies. The result of this change is improved performance for userspace virtio devices. Virtio-blk throughput increases especially for multithreaded scenarios and virtio-net transmit throughput increases substantially. Some virtio devices are known to have guest drivers which expect a notify to be processed synchronously and spin waiting for completion. For virtio-net, this also seems to interact with the guest stack in strange ways so that TCP throughput for small message sizes (~200bytes) is harmed. Only enable ioeventfd for virtio-blk for now. Care must be taken not to interfere with vhost-net, which uses host notifiers. If the set_host_notifier() API is used by a device virtio-pci will disable virtio-ioeventfd and let the device deal with host notifiers as it wishes. Finally, there used to be a limit of 6 KVM io bus devices inside the kernel. On such a kernel, don't use ioeventfd for virtqueue host notification since the limit is reached too easily. This ensures that existing vhost-net setups (which always use ioeventfd) have ioeventfds available so they can continue to work. After migration and on VM change state (running/paused) virtio-ioeventfd will enable/disable itself. * VIRTIO_CONFIG_S_DRIVER_OK -> enable virtio-ioeventfd * !VIRTIO_CONFIG_S_DRIVER_OK -> disable virtio-ioeventfd * virtio_pci_set_host_notifier() -> disable virtio-ioeventfd * vm_change_state(running=0) -> disable virtio-ioeventfd * vm_change_state(running=1) -> enable virtio-ioeventfd Signed-off-by: Stefan Hajnoczi <stefanha@linux.vnet.ibm.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2010-12-17 15:01:50 +03:00
{
if (vq->vring.desc && vq->handle_output) {
virtio-pci: Use ioeventfd for virtqueue notify Virtqueue notify is currently handled synchronously in userspace virtio. This prevents the vcpu from executing guest code while hardware emulation code handles the notify. On systems that support KVM, the ioeventfd mechanism can be used to make virtqueue notify a lightweight exit by deferring hardware emulation to the iothread and allowing the VM to continue execution. This model is similar to how vhost receives virtqueue notifies. The result of this change is improved performance for userspace virtio devices. Virtio-blk throughput increases especially for multithreaded scenarios and virtio-net transmit throughput increases substantially. Some virtio devices are known to have guest drivers which expect a notify to be processed synchronously and spin waiting for completion. For virtio-net, this also seems to interact with the guest stack in strange ways so that TCP throughput for small message sizes (~200bytes) is harmed. Only enable ioeventfd for virtio-blk for now. Care must be taken not to interfere with vhost-net, which uses host notifiers. If the set_host_notifier() API is used by a device virtio-pci will disable virtio-ioeventfd and let the device deal with host notifiers as it wishes. Finally, there used to be a limit of 6 KVM io bus devices inside the kernel. On such a kernel, don't use ioeventfd for virtqueue host notification since the limit is reached too easily. This ensures that existing vhost-net setups (which always use ioeventfd) have ioeventfds available so they can continue to work. After migration and on VM change state (running/paused) virtio-ioeventfd will enable/disable itself. * VIRTIO_CONFIG_S_DRIVER_OK -> enable virtio-ioeventfd * !VIRTIO_CONFIG_S_DRIVER_OK -> disable virtio-ioeventfd * virtio_pci_set_host_notifier() -> disable virtio-ioeventfd * vm_change_state(running=0) -> disable virtio-ioeventfd * vm_change_state(running=1) -> enable virtio-ioeventfd Signed-off-by: Stefan Hajnoczi <stefanha@linux.vnet.ibm.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2010-12-17 15:01:50 +03:00
VirtIODevice *vdev = vq->vdev;
if (unlikely(vdev->broken)) {
return;
}
virtio-pci: Use ioeventfd for virtqueue notify Virtqueue notify is currently handled synchronously in userspace virtio. This prevents the vcpu from executing guest code while hardware emulation code handles the notify. On systems that support KVM, the ioeventfd mechanism can be used to make virtqueue notify a lightweight exit by deferring hardware emulation to the iothread and allowing the VM to continue execution. This model is similar to how vhost receives virtqueue notifies. The result of this change is improved performance for userspace virtio devices. Virtio-blk throughput increases especially for multithreaded scenarios and virtio-net transmit throughput increases substantially. Some virtio devices are known to have guest drivers which expect a notify to be processed synchronously and spin waiting for completion. For virtio-net, this also seems to interact with the guest stack in strange ways so that TCP throughput for small message sizes (~200bytes) is harmed. Only enable ioeventfd for virtio-blk for now. Care must be taken not to interfere with vhost-net, which uses host notifiers. If the set_host_notifier() API is used by a device virtio-pci will disable virtio-ioeventfd and let the device deal with host notifiers as it wishes. Finally, there used to be a limit of 6 KVM io bus devices inside the kernel. On such a kernel, don't use ioeventfd for virtqueue host notification since the limit is reached too easily. This ensures that existing vhost-net setups (which always use ioeventfd) have ioeventfds available so they can continue to work. After migration and on VM change state (running/paused) virtio-ioeventfd will enable/disable itself. * VIRTIO_CONFIG_S_DRIVER_OK -> enable virtio-ioeventfd * !VIRTIO_CONFIG_S_DRIVER_OK -> disable virtio-ioeventfd * virtio_pci_set_host_notifier() -> disable virtio-ioeventfd * vm_change_state(running=0) -> disable virtio-ioeventfd * vm_change_state(running=1) -> enable virtio-ioeventfd Signed-off-by: Stefan Hajnoczi <stefanha@linux.vnet.ibm.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2010-12-17 15:01:50 +03:00
trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
vq->handle_output(vdev, vq);
if (unlikely(vdev->start_on_kick)) {
virtio_set_started(vdev, true);
}
virtio-pci: Use ioeventfd for virtqueue notify Virtqueue notify is currently handled synchronously in userspace virtio. This prevents the vcpu from executing guest code while hardware emulation code handles the notify. On systems that support KVM, the ioeventfd mechanism can be used to make virtqueue notify a lightweight exit by deferring hardware emulation to the iothread and allowing the VM to continue execution. This model is similar to how vhost receives virtqueue notifies. The result of this change is improved performance for userspace virtio devices. Virtio-blk throughput increases especially for multithreaded scenarios and virtio-net transmit throughput increases substantially. Some virtio devices are known to have guest drivers which expect a notify to be processed synchronously and spin waiting for completion. For virtio-net, this also seems to interact with the guest stack in strange ways so that TCP throughput for small message sizes (~200bytes) is harmed. Only enable ioeventfd for virtio-blk for now. Care must be taken not to interfere with vhost-net, which uses host notifiers. If the set_host_notifier() API is used by a device virtio-pci will disable virtio-ioeventfd and let the device deal with host notifiers as it wishes. Finally, there used to be a limit of 6 KVM io bus devices inside the kernel. On such a kernel, don't use ioeventfd for virtqueue host notification since the limit is reached too easily. This ensures that existing vhost-net setups (which always use ioeventfd) have ioeventfds available so they can continue to work. After migration and on VM change state (running/paused) virtio-ioeventfd will enable/disable itself. * VIRTIO_CONFIG_S_DRIVER_OK -> enable virtio-ioeventfd * !VIRTIO_CONFIG_S_DRIVER_OK -> disable virtio-ioeventfd * virtio_pci_set_host_notifier() -> disable virtio-ioeventfd * vm_change_state(running=0) -> disable virtio-ioeventfd * vm_change_state(running=1) -> enable virtio-ioeventfd Signed-off-by: Stefan Hajnoczi <stefanha@linux.vnet.ibm.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2010-12-17 15:01:50 +03:00
}
}
void virtio_queue_notify(VirtIODevice *vdev, int n)
{
VirtQueue *vq = &vdev->vq[n];
if (unlikely(!vq->vring.desc || vdev->broken)) {
return;
}
trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
virtio: notify virtqueue via host notifier when available Host notifiers are used in several cases: 1. Traditional ioeventfd where virtqueue notifications are handled in the main loop thread. 2. IOThreads (aio_handle_output) where virtqueue notifications are handled in an IOThread AioContext. 3. vhost where virtqueue notifications are handled by kernel vhost or a vhost-user device backend. Most virtqueue notifications from the guest use the ioeventfd mechanism, but there are corner cases where QEMU code calls virtio_queue_notify(). This currently honors the host notifier for the IOThreads aio_handle_output case, but not for the vhost case. The result is that vhost does not receive virtqueue notifications from QEMU when virtio_queue_notify() is called. This patch extends virtio_queue_notify() to set the host notifier whenever it is enabled instead of calling the vq->(aio_)handle_output() function directly. We track the host notifier state for each virtqueue separately since some devices may use it only for certain virtqueues. This fixes the vhost case although it does add a trip through the eventfd for the traditional ioeventfd case. I don't think it's worth adding a fast path for the traditional ioeventfd case because calling virtio_queue_notify() is rare when ioeventfd is enabled. Reported-by: Felipe Franciosi <felipe@nutanix.com> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Message-Id: <20191105140946.165584-1-stefanha@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2019-11-05 17:09:46 +03:00
if (vq->host_notifier_enabled) {
event_notifier_set(&vq->host_notifier);
} else if (vq->handle_output) {
vq->handle_output(vdev, vq);
if (unlikely(vdev->start_on_kick)) {
virtio_set_started(vdev, true);
}
}
}
uint16_t virtio_queue_vector(VirtIODevice *vdev, int n)
{
return n < VIRTIO_QUEUE_MAX ? vdev->vq[n].vector :
VIRTIO_NO_VECTOR;
}
void virtio_queue_set_vector(VirtIODevice *vdev, int n, uint16_t vector)
{
VirtQueue *vq = &vdev->vq[n];
if (n < VIRTIO_QUEUE_MAX) {
if (vdev->vector_queues &&
vdev->vq[n].vector != VIRTIO_NO_VECTOR) {
QLIST_REMOVE(vq, node);
}
vdev->vq[n].vector = vector;
if (vdev->vector_queues &&
vector != VIRTIO_NO_VECTOR) {
QLIST_INSERT_HEAD(&vdev->vector_queues[vector], vq, node);
}
}
}
VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
VirtIOHandleOutput handle_output)
{
int i;
for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
if (vdev->vq[i].vring.num == 0)
break;
}
if (i == VIRTIO_QUEUE_MAX || queue_size > VIRTQUEUE_MAX_SIZE)
abort();
vdev->vq[i].vring.num = queue_size;
vdev->vq[i].vring.num_default = queue_size;
vdev->vq[i].vring.align = VIRTIO_PCI_VRING_ALIGN;
vdev->vq[i].handle_output = handle_output;
vdev->vq[i].used_elems = g_new0(VirtQueueElement, queue_size);
return &vdev->vq[i];
}
void virtio_delete_queue(VirtQueue *vq)
{
vq->vring.num = 0;
vq->vring.num_default = 0;
vq->handle_output = NULL;
g_free(vq->used_elems);
vq->used_elems = NULL;
virtio_virtqueue_reset_region_cache(vq);
}
void virtio_del_queue(VirtIODevice *vdev, int n)
{
if (n < 0 || n >= VIRTIO_QUEUE_MAX) {
abort();
}
virtio_delete_queue(&vdev->vq[n]);
}
static void virtio_set_isr(VirtIODevice *vdev, int value)
{
uint8_t old = qatomic_read(&vdev->isr);
/* Do not write ISR if it does not change, so that its cacheline remains
* shared in the common case where the guest does not read it.
*/
if ((old & value) != value) {
qatomic_or(&vdev->isr, value);
}
}
/* Called within rcu_read_lock(). */
static bool virtio_split_should_notify(VirtIODevice *vdev, VirtQueue *vq)
{
uint16_t old, new;
bool v;
virtio: add missing mb() on notification During normal operation, virtio first writes a used index and then checks whether it should interrupt the guest by reading guest avail index/flag values. Guest does the reverse: writes the index/flag, then checks the used ring. The ordering is important: if host avail flag read bypasses the used index write, we could in effect get this timing: host avail flag read guest enable interrupts: avail flag write guest check used ring: ring is empty host used index write which results in a lost interrupt: guest will never be notified about the used ring update. This actually can happen when using kvm with an io thread, such that the guest vcpu and qemu run on different host cpus, and this has actually been observed in the field (but only seems to trigger on very specific processor types) with userspace virtio: vhost has the necessary smp_mb() in place to prevent the regordering, so the same workload stalls forever waiting for an interrupt with vhost=off but works fine with vhost=on. Insert an smp_mb barrier operation in userspace virtio to ensure the correct ordering. Applying this patch fixed the race condition we have observed. Tested on x86_64. I checked the code generated by the new macro for i386 and ppc but didn't run virtio. Note: mb could in theory be implemented by __sync_synchronize, but this would make us hit old GCC bugs. Besides old GCC not implementing __sync_synchronize at all, there were bugs http://gcc.gnu.org/bugzilla/show_bug.cgi?id=36793 in this functionality as recently as in 4.3. As we need asm for rmb,wmb anyway, it's just as well to use it for mb. Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2012-04-22 17:45:53 +04:00
/* We need to expose used array entries before checking used event. */
smp_mb();
/* Always notify when queue is empty (when feature acknowledge) */
if (virtio_vdev_has_feature(vdev, VIRTIO_F_NOTIFY_ON_EMPTY) &&
!vq->inuse && virtio_queue_empty(vq)) {
return true;
}
if (!virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
return !(vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT);
}
v = vq->signalled_used_valid;
vq->signalled_used_valid = true;
old = vq->signalled_used;
new = vq->signalled_used = vq->used_idx;
return !v || vring_need_event(vring_get_used_event(vq), new, old);
}
static bool vring_packed_need_event(VirtQueue *vq, bool wrap,
uint16_t off_wrap, uint16_t new,
uint16_t old)
{
int off = off_wrap & ~(1 << 15);
if (wrap != off_wrap >> 15) {
off -= vq->vring.num;
}
return vring_need_event(off, new, old);
}
/* Called within rcu_read_lock(). */
static bool virtio_packed_should_notify(VirtIODevice *vdev, VirtQueue *vq)
{
VRingPackedDescEvent e;
uint16_t old, new;
bool v;
VRingMemoryRegionCaches *caches;
caches = vring_get_region_caches(vq);
virtio: gracefully handle invalid region caches The virtqueue code sets up MemoryRegionCaches to access the virtqueue guest RAM data structures. The code currently assumes that VRingMemoryRegionCaches is initialized before device emulation code accesses the virtqueue. An assertion will fail in vring_get_region_caches() when this is not true. Device fuzzing found a case where this assumption is false (see below). Virtqueue guest RAM addresses can also be changed from a vCPU thread while an IOThread is accessing the virtqueue. This breaks the same assumption but this time the caches could become invalid partway through the virtqueue code. The code fetches the caches RCU pointer multiple times so we will need to validate the pointer every time it is fetched. Add checks each time we call vring_get_region_caches() and treat invalid caches as a nop: memory stores are ignored and memory reads return 0. The fuzz test failure is as follows: $ qemu -M pc -device virtio-blk-pci,id=drv0,drive=drive0,addr=4.0 \ -drive if=none,id=drive0,file=null-co://,format=raw,auto-read-only=off \ -drive if=none,id=drive1,file=null-co://,file.read-zeroes=on,format=raw \ -display none \ -qtest stdio endianness outl 0xcf8 0x80002020 outl 0xcfc 0xe0000000 outl 0xcf8 0x80002004 outw 0xcfc 0x7 write 0xe0000000 0x24 0x00ffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffab5cffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffabffffffab0000000001 inb 0x4 writew 0xe000001c 0x1 write 0xe0000014 0x1 0x0d The following error message is produced: qemu-system-x86_64: /home/stefanha/qemu/hw/virtio/virtio.c:286: vring_get_region_caches: Assertion `caches != NULL' failed. The backtrace looks like this: #0 0x00007ffff5520625 in raise () at /lib64/libc.so.6 #1 0x00007ffff55098d9 in abort () at /lib64/libc.so.6 #2 0x00007ffff55097a9 in _nl_load_domain.cold () at /lib64/libc.so.6 #3 0x00007ffff5518a66 in annobin_assert.c_end () at /lib64/libc.so.6 #4 0x00005555559073da in vring_get_region_caches (vq=<optimized out>) at qemu/hw/virtio/virtio.c:286 #5 vring_get_region_caches (vq=<optimized out>) at qemu/hw/virtio/virtio.c:283 #6 0x000055555590818d in vring_used_flags_set_bit (mask=1, vq=0x5555575ceea0) at qemu/hw/virtio/virtio.c:398 #7 virtio_queue_split_set_notification (enable=0, vq=0x5555575ceea0) at qemu/hw/virtio/virtio.c:398 #8 virtio_queue_set_notification (vq=vq@entry=0x5555575ceea0, enable=enable@entry=0) at qemu/hw/virtio/virtio.c:451 #9 0x0000555555908512 in virtio_queue_set_notification (vq=vq@entry=0x5555575ceea0, enable=enable@entry=0) at qemu/hw/virtio/virtio.c:444 #10 0x00005555558c697a in virtio_blk_handle_vq (s=0x5555575c57e0, vq=0x5555575ceea0) at qemu/hw/block/virtio-blk.c:775 #11 0x0000555555907836 in virtio_queue_notify_aio_vq (vq=0x5555575ceea0) at qemu/hw/virtio/virtio.c:2244 #12 0x0000555555cb5dd7 in aio_dispatch_handlers (ctx=ctx@entry=0x55555671a420) at util/aio-posix.c:429 #13 0x0000555555cb67a8 in aio_dispatch (ctx=0x55555671a420) at util/aio-posix.c:460 #14 0x0000555555cb307e in aio_ctx_dispatch (source=<optimized out>, callback=<optimized out>, user_data=<optimized out>) at util/async.c:260 #15 0x00007ffff7bbc510 in g_main_context_dispatch () at /lib64/libglib-2.0.so.0 #16 0x0000555555cb5848 in glib_pollfds_poll () at util/main-loop.c:219 #17 os_host_main_loop_wait (timeout=<optimized out>) at util/main-loop.c:242 #18 main_loop_wait (nonblocking=<optimized out>) at util/main-loop.c:518 #19 0x00005555559b20c9 in main_loop () at vl.c:1683 #20 0x0000555555838115 in main (argc=<optimized out>, argv=<optimized out>, envp=<optimized out>) at vl.c:4441 Reported-by: Alexander Bulekov <alxndr@bu.edu> Cc: Michael Tsirkin <mst@redhat.com> Cc: Cornelia Huck <cohuck@redhat.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: qemu-stable@nongnu.org Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Message-Id: <20200207104619.164892-1-stefanha@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2020-02-07 13:46:19 +03:00
if (!caches) {
return false;
}
vring_packed_event_read(vdev, &caches->avail, &e);
old = vq->signalled_used;
new = vq->signalled_used = vq->used_idx;
v = vq->signalled_used_valid;
vq->signalled_used_valid = true;
if (e.flags == VRING_PACKED_EVENT_FLAG_DISABLE) {
return false;
} else if (e.flags == VRING_PACKED_EVENT_FLAG_ENABLE) {
return true;
}
return !v || vring_packed_need_event(vq, vq->used_wrap_counter,
e.off_wrap, new, old);
}
/* Called within rcu_read_lock(). */
static bool virtio_should_notify(VirtIODevice *vdev, VirtQueue *vq)
{
if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
return virtio_packed_should_notify(vdev, vq);
} else {
return virtio_split_should_notify(vdev, vq);
}
}
/* Batch irqs while inside a defer_call_begin()/defer_call_end() section */
static void virtio_notify_irqfd_deferred_fn(void *opaque)
{
EventNotifier *notifier = opaque;
VirtQueue *vq = container_of(notifier, VirtQueue, guest_notifier);
trace_virtio_notify_irqfd_deferred_fn(vq->vdev, vq);
event_notifier_set(notifier);
}
virtio: set ISR on dataplane notifications Dataplane has been omitting forever the step of setting ISR when an interrupt is raised. This caused little breakage, because the specification actually says that ISR may not be updated in MSI mode. Some versions of the Windows drivers however didn't clear MSI mode correctly, and proceeded using polling mode (using ISR, not the used ring index!) for crashdump and hibernation. If it were just crashdump and hibernation it would not be a big deal, but recent releases of Windows do not really shut down, but rather log out and hibernate to make the next startup faster. Hence, this manifested as a more serious hang during shutdown with e.g. Windows 8.1 and virtio-win 1.8.0 RPMs. Newer versions fixed this, while older versions do not use MSI at all. The failure has always been there for virtio dataplane, but it became visible after commits 9ffe337 ("virtio-blk: always use dataplane path if ioeventfd is active", 2016-10-30) and ad07cd6 ("virtio-scsi: always use dataplane path if ioeventfd is active", 2016-10-30) made virtio-blk and virtio-scsi always use the dataplane code under KVM. The good news therefore is that it was not a bug in the patches---they were doing exactly what they were meant for, i.e. shake out remaining dataplane bugs. The fix is not hard, so it's worth arranging for the broken drivers. The virtio_should_notify+event_notifier_set pair that is common to virtio-blk and virtio-scsi dataplane is replaced with a new public function virtio_notify_irqfd that also sets ISR. The irqfd emulation code now need not set ISR anymore, so virtio_irq is removed. Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> Tested-by: Farhan Ali <alifm@linux.vnet.ibm.com> Tested-by: Alex Williamson <alex.williamson@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2016-11-18 18:07:02 +03:00
void virtio_notify_irqfd(VirtIODevice *vdev, VirtQueue *vq)
{
WITH_RCU_READ_LOCK_GUARD() {
if (!virtio_should_notify(vdev, vq)) {
return;
}
virtio: set ISR on dataplane notifications Dataplane has been omitting forever the step of setting ISR when an interrupt is raised. This caused little breakage, because the specification actually says that ISR may not be updated in MSI mode. Some versions of the Windows drivers however didn't clear MSI mode correctly, and proceeded using polling mode (using ISR, not the used ring index!) for crashdump and hibernation. If it were just crashdump and hibernation it would not be a big deal, but recent releases of Windows do not really shut down, but rather log out and hibernate to make the next startup faster. Hence, this manifested as a more serious hang during shutdown with e.g. Windows 8.1 and virtio-win 1.8.0 RPMs. Newer versions fixed this, while older versions do not use MSI at all. The failure has always been there for virtio dataplane, but it became visible after commits 9ffe337 ("virtio-blk: always use dataplane path if ioeventfd is active", 2016-10-30) and ad07cd6 ("virtio-scsi: always use dataplane path if ioeventfd is active", 2016-10-30) made virtio-blk and virtio-scsi always use the dataplane code under KVM. The good news therefore is that it was not a bug in the patches---they were doing exactly what they were meant for, i.e. shake out remaining dataplane bugs. The fix is not hard, so it's worth arranging for the broken drivers. The virtio_should_notify+event_notifier_set pair that is common to virtio-blk and virtio-scsi dataplane is replaced with a new public function virtio_notify_irqfd that also sets ISR. The irqfd emulation code now need not set ISR anymore, so virtio_irq is removed. Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> Tested-by: Farhan Ali <alifm@linux.vnet.ibm.com> Tested-by: Alex Williamson <alex.williamson@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2016-11-18 18:07:02 +03:00
}
trace_virtio_notify_irqfd(vdev, vq);
/*
* virtio spec 1.0 says ISR bit 0 should be ignored with MSI, but
* windows drivers included in virtio-win 1.8.0 (circa 2015) are
* incorrectly polling this bit during crashdump and hibernation
* in MSI mode, causing a hang if this bit is never updated.
* Recent releases of Windows do not really shut down, but rather
* log out and hibernate to make the next startup faster. Hence,
* this manifested as a more serious hang during shutdown with
*
* Next driver release from 2016 fixed this problem, so working around it
* is not a must, but it's easy to do so let's do it here.
*
* Note: it's safe to update ISR from any thread as it was switched
* to an atomic operation.
*/
virtio_set_isr(vq->vdev, 0x1);
defer_call(virtio_notify_irqfd_deferred_fn, &vq->guest_notifier);
virtio: set ISR on dataplane notifications Dataplane has been omitting forever the step of setting ISR when an interrupt is raised. This caused little breakage, because the specification actually says that ISR may not be updated in MSI mode. Some versions of the Windows drivers however didn't clear MSI mode correctly, and proceeded using polling mode (using ISR, not the used ring index!) for crashdump and hibernation. If it were just crashdump and hibernation it would not be a big deal, but recent releases of Windows do not really shut down, but rather log out and hibernate to make the next startup faster. Hence, this manifested as a more serious hang during shutdown with e.g. Windows 8.1 and virtio-win 1.8.0 RPMs. Newer versions fixed this, while older versions do not use MSI at all. The failure has always been there for virtio dataplane, but it became visible after commits 9ffe337 ("virtio-blk: always use dataplane path if ioeventfd is active", 2016-10-30) and ad07cd6 ("virtio-scsi: always use dataplane path if ioeventfd is active", 2016-10-30) made virtio-blk and virtio-scsi always use the dataplane code under KVM. The good news therefore is that it was not a bug in the patches---they were doing exactly what they were meant for, i.e. shake out remaining dataplane bugs. The fix is not hard, so it's worth arranging for the broken drivers. The virtio_should_notify+event_notifier_set pair that is common to virtio-blk and virtio-scsi dataplane is replaced with a new public function virtio_notify_irqfd that also sets ISR. The irqfd emulation code now need not set ISR anymore, so virtio_irq is removed. Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> Tested-by: Farhan Ali <alifm@linux.vnet.ibm.com> Tested-by: Alex Williamson <alex.williamson@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2016-11-18 18:07:02 +03:00
}
static void virtio_irq(VirtQueue *vq)
{
virtio_set_isr(vq->vdev, 0x1);
virtio_notify_vector(vq->vdev, vq->vector);
}
void virtio_notify(VirtIODevice *vdev, VirtQueue *vq)
{
WITH_RCU_READ_LOCK_GUARD() {
if (!virtio_should_notify(vdev, vq)) {
return;
}
}
trace_virtio_notify(vdev, vq);
virtio_irq(vq);
}
void virtio_notify_config(VirtIODevice *vdev)
{
if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
return;
virtio_set_isr(vdev, 0x3);
vdev->generation++;
virtio_notify_vector(vdev, vdev->config_vector);
}
static bool virtio_device_endian_needed(void *opaque)
{
VirtIODevice *vdev = opaque;
assert(vdev->device_endian != VIRTIO_DEVICE_ENDIAN_UNKNOWN);
if (!virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
return vdev->device_endian != virtio_default_endian();
}
/* Devices conforming to VIRTIO 1.0 or later are always LE. */
return vdev->device_endian != VIRTIO_DEVICE_ENDIAN_LITTLE;
}
static bool virtio_64bit_features_needed(void *opaque)
{
VirtIODevice *vdev = opaque;
return (vdev->host_features >> 32) != 0;
}
static bool virtio_virtqueue_needed(void *opaque)
{
VirtIODevice *vdev = opaque;
return virtio_host_has_feature(vdev, VIRTIO_F_VERSION_1);
}
static bool virtio_packed_virtqueue_needed(void *opaque)
{
VirtIODevice *vdev = opaque;
return virtio_host_has_feature(vdev, VIRTIO_F_RING_PACKED);
}
static bool virtio_ringsize_needed(void *opaque)
{
VirtIODevice *vdev = opaque;
int i;
for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
if (vdev->vq[i].vring.num != vdev->vq[i].vring.num_default) {
return true;
}
}
return false;
}
static bool virtio_extra_state_needed(void *opaque)
{
VirtIODevice *vdev = opaque;
BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
return k->has_extra_state &&
k->has_extra_state(qbus->parent);
}
static bool virtio_broken_needed(void *opaque)
{
VirtIODevice *vdev = opaque;
return vdev->broken;
}
static bool virtio_started_needed(void *opaque)
{
VirtIODevice *vdev = opaque;
return vdev->started;
}
virtio-pci: disable vring processing when bus-mastering is disabled Currently the SLOF firmware for pseries guests will disable/re-enable a PCI device multiple times via IO/MEM/MASTER bits of PCI_COMMAND register after the initial probe/feature negotiation, as it tends to work with a single device at a time at various stages like probing and running block/network bootloaders without doing a full reset in-between. In QEMU, when PCI_COMMAND_MASTER is disabled we disable the corresponding IOMMU memory region, so DMA accesses (including to vring fields like idx/flags) will no longer undergo the necessary translation. Normally we wouldn't expect this to happen since it would be misbehavior on the driver side to continue driving DMA requests. However, in the case of pseries, with iommu_platform=on, we trigger the following sequence when tearing down the virtio-blk dataplane ioeventfd in response to the guest unsetting PCI_COMMAND_MASTER: #2 0x0000555555922651 in virtqueue_map_desc (vdev=vdev@entry=0x555556dbcfb0, p_num_sg=p_num_sg@entry=0x7fffe657e1a8, addr=addr@entry=0x7fffe657e240, iov=iov@entry=0x7fffe6580240, max_num_sg=max_num_sg@entry=1024, is_write=is_write@entry=false, pa=0, sz=0) at /home/mdroth/w/qemu.git/hw/virtio/virtio.c:757 #3 0x0000555555922a89 in virtqueue_pop (vq=vq@entry=0x555556dc8660, sz=sz@entry=184) at /home/mdroth/w/qemu.git/hw/virtio/virtio.c:950 #4 0x00005555558d3eca in virtio_blk_get_request (vq=0x555556dc8660, s=0x555556dbcfb0) at /home/mdroth/w/qemu.git/hw/block/virtio-blk.c:255 #5 0x00005555558d3eca in virtio_blk_handle_vq (s=0x555556dbcfb0, vq=0x555556dc8660) at /home/mdroth/w/qemu.git/hw/block/virtio-blk.c:776 #6 0x000055555591dd66 in virtio_queue_notify_aio_vq (vq=vq@entry=0x555556dc8660) at /home/mdroth/w/qemu.git/hw/virtio/virtio.c:1550 #7 0x000055555591ecef in virtio_queue_notify_aio_vq (vq=0x555556dc8660) at /home/mdroth/w/qemu.git/hw/virtio/virtio.c:1546 #8 0x000055555591ecef in virtio_queue_host_notifier_aio_poll (opaque=0x555556dc86c8) at /home/mdroth/w/qemu.git/hw/virtio/virtio.c:2527 #9 0x0000555555d02164 in run_poll_handlers_once (ctx=ctx@entry=0x55555688bfc0, timeout=timeout@entry=0x7fffe65844a8) at /home/mdroth/w/qemu.git/util/aio-posix.c:520 #10 0x0000555555d02d1b in try_poll_mode (timeout=0x7fffe65844a8, ctx=0x55555688bfc0) at /home/mdroth/w/qemu.git/util/aio-posix.c:607 #11 0x0000555555d02d1b in aio_poll (ctx=ctx@entry=0x55555688bfc0, blocking=blocking@entry=true) at /home/mdroth/w/qemu.git/util/aio-posix.c:639 #12 0x0000555555d0004d in aio_wait_bh_oneshot (ctx=0x55555688bfc0, cb=cb@entry=0x5555558d5130 <virtio_blk_data_plane_stop_bh>, opaque=opaque@entry=0x555556de86f0) at /home/mdroth/w/qemu.git/util/aio-wait.c:71 #13 0x00005555558d59bf in virtio_blk_data_plane_stop (vdev=<optimized out>) at /home/mdroth/w/qemu.git/hw/block/dataplane/virtio-blk.c:288 #14 0x0000555555b906a1 in virtio_bus_stop_ioeventfd (bus=bus@entry=0x555556dbcf38) at /home/mdroth/w/qemu.git/hw/virtio/virtio-bus.c:245 #15 0x0000555555b90dbb in virtio_bus_stop_ioeventfd (bus=bus@entry=0x555556dbcf38) at /home/mdroth/w/qemu.git/hw/virtio/virtio-bus.c:237 #16 0x0000555555b92a8e in virtio_pci_stop_ioeventfd (proxy=0x555556db4e40) at /home/mdroth/w/qemu.git/hw/virtio/virtio-pci.c:292 #17 0x0000555555b92a8e in virtio_write_config (pci_dev=0x555556db4e40, address=<optimized out>, val=1048832, len=<optimized out>) at /home/mdroth/w/qemu.git/hw/virtio/virtio-pci.c:613 I.e. the calling code is only scheduling a one-shot BH for virtio_blk_data_plane_stop_bh, but somehow we end up trying to process an additional virtqueue entry before we get there. This is likely due to the following check in virtio_queue_host_notifier_aio_poll: static bool virtio_queue_host_notifier_aio_poll(void *opaque) { EventNotifier *n = opaque; VirtQueue *vq = container_of(n, VirtQueue, host_notifier); bool progress; if (!vq->vring.desc || virtio_queue_empty(vq)) { return false; } progress = virtio_queue_notify_aio_vq(vq); namely the call to virtio_queue_empty(). In this case, since no new requests have actually been issued, shadow_avail_idx == last_avail_idx, so we actually try to access the vring via vring_avail_idx() to get the latest non-shadowed idx: int virtio_queue_empty(VirtQueue *vq) { bool empty; ... if (vq->shadow_avail_idx != vq->last_avail_idx) { return 0; } rcu_read_lock(); empty = vring_avail_idx(vq) == vq->last_avail_idx; rcu_read_unlock(); return empty; but since the IOMMU region has been disabled we get a bogus value (0 usually), which causes virtio_queue_empty() to falsely report that there are entries to be processed, which causes errors such as: "virtio: zero sized buffers are not allowed" or "virtio-blk missing headers" and puts the device in an error state. This patch works around the issue by introducing virtio_set_disabled(), which sets a 'disabled' flag to bypass checks like virtio_queue_empty() when bus-mastering is disabled. Since we'd check this flag at all the same sites as vdev->broken, we replace those checks with an inline function which checks for either vdev->broken or vdev->disabled. The 'disabled' flag is only migrated when set, which should be fairly rare, but to maintain migration compatibility we disable it's use for older machine types. Users requiring the use of the flag in conjunction with older machine types can set it explicitly as a virtio-device option. NOTES: - This leaves some other oddities in play, like the fact that DRIVER_OK also gets unset in response to bus-mastering being disabled, but not restored (however the device seems to continue working) - Similarly, we disable the host notifier via virtio_bus_stop_ioeventfd(), which seems to move the handling out of virtio-blk dataplane and back into the main IO thread, and it ends up staying there till a reset (but otherwise continues working normally) Cc: David Gibson <david@gibson.dropbear.id.au>, Cc: Alexey Kardashevskiy <aik@ozlabs.ru> Cc: "Michael S. Tsirkin" <mst@redhat.com> Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com> Message-Id: <20191120005003.27035-1-mdroth@linux.vnet.ibm.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2019-11-20 03:50:03 +03:00
static bool virtio_disabled_needed(void *opaque)
{
VirtIODevice *vdev = opaque;
return vdev->disabled;
}
static const VMStateDescription vmstate_virtqueue = {
.name = "virtqueue_state",
.version_id = 1,
.minimum_version_id = 1,
.fields = (const VMStateField[]) {
VMSTATE_UINT64(vring.avail, struct VirtQueue),
VMSTATE_UINT64(vring.used, struct VirtQueue),
VMSTATE_END_OF_LIST()
}
};
static const VMStateDescription vmstate_packed_virtqueue = {
.name = "packed_virtqueue_state",
.version_id = 1,
.minimum_version_id = 1,
.fields = (const VMStateField[]) {
VMSTATE_UINT16(last_avail_idx, struct VirtQueue),
VMSTATE_BOOL(last_avail_wrap_counter, struct VirtQueue),
VMSTATE_UINT16(used_idx, struct VirtQueue),
VMSTATE_BOOL(used_wrap_counter, struct VirtQueue),
VMSTATE_UINT32(inuse, struct VirtQueue),
VMSTATE_END_OF_LIST()
}
};
static const VMStateDescription vmstate_virtio_virtqueues = {
.name = "virtio/virtqueues",
.version_id = 1,
.minimum_version_id = 1,
.needed = &virtio_virtqueue_needed,
.fields = (const VMStateField[]) {
VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice,
VIRTIO_QUEUE_MAX, 0, vmstate_virtqueue, VirtQueue),
VMSTATE_END_OF_LIST()
}
};
static const VMStateDescription vmstate_virtio_packed_virtqueues = {
.name = "virtio/packed_virtqueues",
.version_id = 1,
.minimum_version_id = 1,
.needed = &virtio_packed_virtqueue_needed,
.fields = (const VMStateField[]) {
VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice,
VIRTIO_QUEUE_MAX, 0, vmstate_packed_virtqueue, VirtQueue),
VMSTATE_END_OF_LIST()
}
};
static const VMStateDescription vmstate_ringsize = {
.name = "ringsize_state",
.version_id = 1,
.minimum_version_id = 1,
.fields = (const VMStateField[]) {
VMSTATE_UINT32(vring.num_default, struct VirtQueue),
VMSTATE_END_OF_LIST()
}
};
static const VMStateDescription vmstate_virtio_ringsize = {
.name = "virtio/ringsize",
.version_id = 1,
.minimum_version_id = 1,
.needed = &virtio_ringsize_needed,
.fields = (const VMStateField[]) {
VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice,
VIRTIO_QUEUE_MAX, 0, vmstate_ringsize, VirtQueue),
VMSTATE_END_OF_LIST()
}
};
static int get_extra_state(QEMUFile *f, void *pv, size_t size,
const VMStateField *field)
{
VirtIODevice *vdev = pv;
BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
if (!k->load_extra_state) {
return -1;
} else {
return k->load_extra_state(qbus->parent, f);
}
}
static int put_extra_state(QEMUFile *f, void *pv, size_t size,
const VMStateField *field, JSONWriter *vmdesc)
{
VirtIODevice *vdev = pv;
BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
k->save_extra_state(qbus->parent, f);
return 0;
}
static const VMStateInfo vmstate_info_extra_state = {
.name = "virtqueue_extra_state",
.get = get_extra_state,
.put = put_extra_state,
};
static const VMStateDescription vmstate_virtio_extra_state = {
.name = "virtio/extra_state",
.version_id = 1,
.minimum_version_id = 1,
.needed = &virtio_extra_state_needed,
.fields = (const VMStateField[]) {
{
.name = "extra_state",
.version_id = 0,
.field_exists = NULL,
.size = 0,
.info = &vmstate_info_extra_state,
.flags = VMS_SINGLE,
.offset = 0,
},
VMSTATE_END_OF_LIST()
}
};
static const VMStateDescription vmstate_virtio_device_endian = {
.name = "virtio/device_endian",
.version_id = 1,
.minimum_version_id = 1,
.needed = &virtio_device_endian_needed,
.fields = (const VMStateField[]) {
VMSTATE_UINT8(device_endian, VirtIODevice),
VMSTATE_END_OF_LIST()
}
};
static const VMStateDescription vmstate_virtio_64bit_features = {
.name = "virtio/64bit_features",
.version_id = 1,
.minimum_version_id = 1,
.needed = &virtio_64bit_features_needed,
.fields = (const VMStateField[]) {
VMSTATE_UINT64(guest_features, VirtIODevice),
VMSTATE_END_OF_LIST()
}
};
static const VMStateDescription vmstate_virtio_broken = {
.name = "virtio/broken",
.version_id = 1,
.minimum_version_id = 1,
.needed = &virtio_broken_needed,
.fields = (const VMStateField[]) {
VMSTATE_BOOL(broken, VirtIODevice),
VMSTATE_END_OF_LIST()
}
};
static const VMStateDescription vmstate_virtio_started = {
.name = "virtio/started",
.version_id = 1,
.minimum_version_id = 1,
.needed = &virtio_started_needed,
.fields = (const VMStateField[]) {
VMSTATE_BOOL(started, VirtIODevice),
VMSTATE_END_OF_LIST()
}
};
virtio-pci: disable vring processing when bus-mastering is disabled Currently the SLOF firmware for pseries guests will disable/re-enable a PCI device multiple times via IO/MEM/MASTER bits of PCI_COMMAND register after the initial probe/feature negotiation, as it tends to work with a single device at a time at various stages like probing and running block/network bootloaders without doing a full reset in-between. In QEMU, when PCI_COMMAND_MASTER is disabled we disable the corresponding IOMMU memory region, so DMA accesses (including to vring fields like idx/flags) will no longer undergo the necessary translation. Normally we wouldn't expect this to happen since it would be misbehavior on the driver side to continue driving DMA requests. However, in the case of pseries, with iommu_platform=on, we trigger the following sequence when tearing down the virtio-blk dataplane ioeventfd in response to the guest unsetting PCI_COMMAND_MASTER: #2 0x0000555555922651 in virtqueue_map_desc (vdev=vdev@entry=0x555556dbcfb0, p_num_sg=p_num_sg@entry=0x7fffe657e1a8, addr=addr@entry=0x7fffe657e240, iov=iov@entry=0x7fffe6580240, max_num_sg=max_num_sg@entry=1024, is_write=is_write@entry=false, pa=0, sz=0) at /home/mdroth/w/qemu.git/hw/virtio/virtio.c:757 #3 0x0000555555922a89 in virtqueue_pop (vq=vq@entry=0x555556dc8660, sz=sz@entry=184) at /home/mdroth/w/qemu.git/hw/virtio/virtio.c:950 #4 0x00005555558d3eca in virtio_blk_get_request (vq=0x555556dc8660, s=0x555556dbcfb0) at /home/mdroth/w/qemu.git/hw/block/virtio-blk.c:255 #5 0x00005555558d3eca in virtio_blk_handle_vq (s=0x555556dbcfb0, vq=0x555556dc8660) at /home/mdroth/w/qemu.git/hw/block/virtio-blk.c:776 #6 0x000055555591dd66 in virtio_queue_notify_aio_vq (vq=vq@entry=0x555556dc8660) at /home/mdroth/w/qemu.git/hw/virtio/virtio.c:1550 #7 0x000055555591ecef in virtio_queue_notify_aio_vq (vq=0x555556dc8660) at /home/mdroth/w/qemu.git/hw/virtio/virtio.c:1546 #8 0x000055555591ecef in virtio_queue_host_notifier_aio_poll (opaque=0x555556dc86c8) at /home/mdroth/w/qemu.git/hw/virtio/virtio.c:2527 #9 0x0000555555d02164 in run_poll_handlers_once (ctx=ctx@entry=0x55555688bfc0, timeout=timeout@entry=0x7fffe65844a8) at /home/mdroth/w/qemu.git/util/aio-posix.c:520 #10 0x0000555555d02d1b in try_poll_mode (timeout=0x7fffe65844a8, ctx=0x55555688bfc0) at /home/mdroth/w/qemu.git/util/aio-posix.c:607 #11 0x0000555555d02d1b in aio_poll (ctx=ctx@entry=0x55555688bfc0, blocking=blocking@entry=true) at /home/mdroth/w/qemu.git/util/aio-posix.c:639 #12 0x0000555555d0004d in aio_wait_bh_oneshot (ctx=0x55555688bfc0, cb=cb@entry=0x5555558d5130 <virtio_blk_data_plane_stop_bh>, opaque=opaque@entry=0x555556de86f0) at /home/mdroth/w/qemu.git/util/aio-wait.c:71 #13 0x00005555558d59bf in virtio_blk_data_plane_stop (vdev=<optimized out>) at /home/mdroth/w/qemu.git/hw/block/dataplane/virtio-blk.c:288 #14 0x0000555555b906a1 in virtio_bus_stop_ioeventfd (bus=bus@entry=0x555556dbcf38) at /home/mdroth/w/qemu.git/hw/virtio/virtio-bus.c:245 #15 0x0000555555b90dbb in virtio_bus_stop_ioeventfd (bus=bus@entry=0x555556dbcf38) at /home/mdroth/w/qemu.git/hw/virtio/virtio-bus.c:237 #16 0x0000555555b92a8e in virtio_pci_stop_ioeventfd (proxy=0x555556db4e40) at /home/mdroth/w/qemu.git/hw/virtio/virtio-pci.c:292 #17 0x0000555555b92a8e in virtio_write_config (pci_dev=0x555556db4e40, address=<optimized out>, val=1048832, len=<optimized out>) at /home/mdroth/w/qemu.git/hw/virtio/virtio-pci.c:613 I.e. the calling code is only scheduling a one-shot BH for virtio_blk_data_plane_stop_bh, but somehow we end up trying to process an additional virtqueue entry before we get there. This is likely due to the following check in virtio_queue_host_notifier_aio_poll: static bool virtio_queue_host_notifier_aio_poll(void *opaque) { EventNotifier *n = opaque; VirtQueue *vq = container_of(n, VirtQueue, host_notifier); bool progress; if (!vq->vring.desc || virtio_queue_empty(vq)) { return false; } progress = virtio_queue_notify_aio_vq(vq); namely the call to virtio_queue_empty(). In this case, since no new requests have actually been issued, shadow_avail_idx == last_avail_idx, so we actually try to access the vring via vring_avail_idx() to get the latest non-shadowed idx: int virtio_queue_empty(VirtQueue *vq) { bool empty; ... if (vq->shadow_avail_idx != vq->last_avail_idx) { return 0; } rcu_read_lock(); empty = vring_avail_idx(vq) == vq->last_avail_idx; rcu_read_unlock(); return empty; but since the IOMMU region has been disabled we get a bogus value (0 usually), which causes virtio_queue_empty() to falsely report that there are entries to be processed, which causes errors such as: "virtio: zero sized buffers are not allowed" or "virtio-blk missing headers" and puts the device in an error state. This patch works around the issue by introducing virtio_set_disabled(), which sets a 'disabled' flag to bypass checks like virtio_queue_empty() when bus-mastering is disabled. Since we'd check this flag at all the same sites as vdev->broken, we replace those checks with an inline function which checks for either vdev->broken or vdev->disabled. The 'disabled' flag is only migrated when set, which should be fairly rare, but to maintain migration compatibility we disable it's use for older machine types. Users requiring the use of the flag in conjunction with older machine types can set it explicitly as a virtio-device option. NOTES: - This leaves some other oddities in play, like the fact that DRIVER_OK also gets unset in response to bus-mastering being disabled, but not restored (however the device seems to continue working) - Similarly, we disable the host notifier via virtio_bus_stop_ioeventfd(), which seems to move the handling out of virtio-blk dataplane and back into the main IO thread, and it ends up staying there till a reset (but otherwise continues working normally) Cc: David Gibson <david@gibson.dropbear.id.au>, Cc: Alexey Kardashevskiy <aik@ozlabs.ru> Cc: "Michael S. Tsirkin" <mst@redhat.com> Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com> Message-Id: <20191120005003.27035-1-mdroth@linux.vnet.ibm.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2019-11-20 03:50:03 +03:00
static const VMStateDescription vmstate_virtio_disabled = {
.name = "virtio/disabled",
.version_id = 1,
.minimum_version_id = 1,
.needed = &virtio_disabled_needed,
.fields = (const VMStateField[]) {
virtio-pci: disable vring processing when bus-mastering is disabled Currently the SLOF firmware for pseries guests will disable/re-enable a PCI device multiple times via IO/MEM/MASTER bits of PCI_COMMAND register after the initial probe/feature negotiation, as it tends to work with a single device at a time at various stages like probing and running block/network bootloaders without doing a full reset in-between. In QEMU, when PCI_COMMAND_MASTER is disabled we disable the corresponding IOMMU memory region, so DMA accesses (including to vring fields like idx/flags) will no longer undergo the necessary translation. Normally we wouldn't expect this to happen since it would be misbehavior on the driver side to continue driving DMA requests. However, in the case of pseries, with iommu_platform=on, we trigger the following sequence when tearing down the virtio-blk dataplane ioeventfd in response to the guest unsetting PCI_COMMAND_MASTER: #2 0x0000555555922651 in virtqueue_map_desc (vdev=vdev@entry=0x555556dbcfb0, p_num_sg=p_num_sg@entry=0x7fffe657e1a8, addr=addr@entry=0x7fffe657e240, iov=iov@entry=0x7fffe6580240, max_num_sg=max_num_sg@entry=1024, is_write=is_write@entry=false, pa=0, sz=0) at /home/mdroth/w/qemu.git/hw/virtio/virtio.c:757 #3 0x0000555555922a89 in virtqueue_pop (vq=vq@entry=0x555556dc8660, sz=sz@entry=184) at /home/mdroth/w/qemu.git/hw/virtio/virtio.c:950 #4 0x00005555558d3eca in virtio_blk_get_request (vq=0x555556dc8660, s=0x555556dbcfb0) at /home/mdroth/w/qemu.git/hw/block/virtio-blk.c:255 #5 0x00005555558d3eca in virtio_blk_handle_vq (s=0x555556dbcfb0, vq=0x555556dc8660) at /home/mdroth/w/qemu.git/hw/block/virtio-blk.c:776 #6 0x000055555591dd66 in virtio_queue_notify_aio_vq (vq=vq@entry=0x555556dc8660) at /home/mdroth/w/qemu.git/hw/virtio/virtio.c:1550 #7 0x000055555591ecef in virtio_queue_notify_aio_vq (vq=0x555556dc8660) at /home/mdroth/w/qemu.git/hw/virtio/virtio.c:1546 #8 0x000055555591ecef in virtio_queue_host_notifier_aio_poll (opaque=0x555556dc86c8) at /home/mdroth/w/qemu.git/hw/virtio/virtio.c:2527 #9 0x0000555555d02164 in run_poll_handlers_once (ctx=ctx@entry=0x55555688bfc0, timeout=timeout@entry=0x7fffe65844a8) at /home/mdroth/w/qemu.git/util/aio-posix.c:520 #10 0x0000555555d02d1b in try_poll_mode (timeout=0x7fffe65844a8, ctx=0x55555688bfc0) at /home/mdroth/w/qemu.git/util/aio-posix.c:607 #11 0x0000555555d02d1b in aio_poll (ctx=ctx@entry=0x55555688bfc0, blocking=blocking@entry=true) at /home/mdroth/w/qemu.git/util/aio-posix.c:639 #12 0x0000555555d0004d in aio_wait_bh_oneshot (ctx=0x55555688bfc0, cb=cb@entry=0x5555558d5130 <virtio_blk_data_plane_stop_bh>, opaque=opaque@entry=0x555556de86f0) at /home/mdroth/w/qemu.git/util/aio-wait.c:71 #13 0x00005555558d59bf in virtio_blk_data_plane_stop (vdev=<optimized out>) at /home/mdroth/w/qemu.git/hw/block/dataplane/virtio-blk.c:288 #14 0x0000555555b906a1 in virtio_bus_stop_ioeventfd (bus=bus@entry=0x555556dbcf38) at /home/mdroth/w/qemu.git/hw/virtio/virtio-bus.c:245 #15 0x0000555555b90dbb in virtio_bus_stop_ioeventfd (bus=bus@entry=0x555556dbcf38) at /home/mdroth/w/qemu.git/hw/virtio/virtio-bus.c:237 #16 0x0000555555b92a8e in virtio_pci_stop_ioeventfd (proxy=0x555556db4e40) at /home/mdroth/w/qemu.git/hw/virtio/virtio-pci.c:292 #17 0x0000555555b92a8e in virtio_write_config (pci_dev=0x555556db4e40, address=<optimized out>, val=1048832, len=<optimized out>) at /home/mdroth/w/qemu.git/hw/virtio/virtio-pci.c:613 I.e. the calling code is only scheduling a one-shot BH for virtio_blk_data_plane_stop_bh, but somehow we end up trying to process an additional virtqueue entry before we get there. This is likely due to the following check in virtio_queue_host_notifier_aio_poll: static bool virtio_queue_host_notifier_aio_poll(void *opaque) { EventNotifier *n = opaque; VirtQueue *vq = container_of(n, VirtQueue, host_notifier); bool progress; if (!vq->vring.desc || virtio_queue_empty(vq)) { return false; } progress = virtio_queue_notify_aio_vq(vq); namely the call to virtio_queue_empty(). In this case, since no new requests have actually been issued, shadow_avail_idx == last_avail_idx, so we actually try to access the vring via vring_avail_idx() to get the latest non-shadowed idx: int virtio_queue_empty(VirtQueue *vq) { bool empty; ... if (vq->shadow_avail_idx != vq->last_avail_idx) { return 0; } rcu_read_lock(); empty = vring_avail_idx(vq) == vq->last_avail_idx; rcu_read_unlock(); return empty; but since the IOMMU region has been disabled we get a bogus value (0 usually), which causes virtio_queue_empty() to falsely report that there are entries to be processed, which causes errors such as: "virtio: zero sized buffers are not allowed" or "virtio-blk missing headers" and puts the device in an error state. This patch works around the issue by introducing virtio_set_disabled(), which sets a 'disabled' flag to bypass checks like virtio_queue_empty() when bus-mastering is disabled. Since we'd check this flag at all the same sites as vdev->broken, we replace those checks with an inline function which checks for either vdev->broken or vdev->disabled. The 'disabled' flag is only migrated when set, which should be fairly rare, but to maintain migration compatibility we disable it's use for older machine types. Users requiring the use of the flag in conjunction with older machine types can set it explicitly as a virtio-device option. NOTES: - This leaves some other oddities in play, like the fact that DRIVER_OK also gets unset in response to bus-mastering being disabled, but not restored (however the device seems to continue working) - Similarly, we disable the host notifier via virtio_bus_stop_ioeventfd(), which seems to move the handling out of virtio-blk dataplane and back into the main IO thread, and it ends up staying there till a reset (but otherwise continues working normally) Cc: David Gibson <david@gibson.dropbear.id.au>, Cc: Alexey Kardashevskiy <aik@ozlabs.ru> Cc: "Michael S. Tsirkin" <mst@redhat.com> Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com> Message-Id: <20191120005003.27035-1-mdroth@linux.vnet.ibm.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2019-11-20 03:50:03 +03:00
VMSTATE_BOOL(disabled, VirtIODevice),
VMSTATE_END_OF_LIST()
}
};
static const VMStateDescription vmstate_virtio = {
.name = "virtio",
.version_id = 1,
.minimum_version_id = 1,
.fields = (const VMStateField[]) {
VMSTATE_END_OF_LIST()
},
.subsections = (const VMStateDescription * const []) {
&vmstate_virtio_device_endian,
&vmstate_virtio_64bit_features,
&vmstate_virtio_virtqueues,
&vmstate_virtio_ringsize,
&vmstate_virtio_broken,
&vmstate_virtio_extra_state,
&vmstate_virtio_started,
&vmstate_virtio_packed_virtqueues,
virtio-pci: disable vring processing when bus-mastering is disabled Currently the SLOF firmware for pseries guests will disable/re-enable a PCI device multiple times via IO/MEM/MASTER bits of PCI_COMMAND register after the initial probe/feature negotiation, as it tends to work with a single device at a time at various stages like probing and running block/network bootloaders without doing a full reset in-between. In QEMU, when PCI_COMMAND_MASTER is disabled we disable the corresponding IOMMU memory region, so DMA accesses (including to vring fields like idx/flags) will no longer undergo the necessary translation. Normally we wouldn't expect this to happen since it would be misbehavior on the driver side to continue driving DMA requests. However, in the case of pseries, with iommu_platform=on, we trigger the following sequence when tearing down the virtio-blk dataplane ioeventfd in response to the guest unsetting PCI_COMMAND_MASTER: #2 0x0000555555922651 in virtqueue_map_desc (vdev=vdev@entry=0x555556dbcfb0, p_num_sg=p_num_sg@entry=0x7fffe657e1a8, addr=addr@entry=0x7fffe657e240, iov=iov@entry=0x7fffe6580240, max_num_sg=max_num_sg@entry=1024, is_write=is_write@entry=false, pa=0, sz=0) at /home/mdroth/w/qemu.git/hw/virtio/virtio.c:757 #3 0x0000555555922a89 in virtqueue_pop (vq=vq@entry=0x555556dc8660, sz=sz@entry=184) at /home/mdroth/w/qemu.git/hw/virtio/virtio.c:950 #4 0x00005555558d3eca in virtio_blk_get_request (vq=0x555556dc8660, s=0x555556dbcfb0) at /home/mdroth/w/qemu.git/hw/block/virtio-blk.c:255 #5 0x00005555558d3eca in virtio_blk_handle_vq (s=0x555556dbcfb0, vq=0x555556dc8660) at /home/mdroth/w/qemu.git/hw/block/virtio-blk.c:776 #6 0x000055555591dd66 in virtio_queue_notify_aio_vq (vq=vq@entry=0x555556dc8660) at /home/mdroth/w/qemu.git/hw/virtio/virtio.c:1550 #7 0x000055555591ecef in virtio_queue_notify_aio_vq (vq=0x555556dc8660) at /home/mdroth/w/qemu.git/hw/virtio/virtio.c:1546 #8 0x000055555591ecef in virtio_queue_host_notifier_aio_poll (opaque=0x555556dc86c8) at /home/mdroth/w/qemu.git/hw/virtio/virtio.c:2527 #9 0x0000555555d02164 in run_poll_handlers_once (ctx=ctx@entry=0x55555688bfc0, timeout=timeout@entry=0x7fffe65844a8) at /home/mdroth/w/qemu.git/util/aio-posix.c:520 #10 0x0000555555d02d1b in try_poll_mode (timeout=0x7fffe65844a8, ctx=0x55555688bfc0) at /home/mdroth/w/qemu.git/util/aio-posix.c:607 #11 0x0000555555d02d1b in aio_poll (ctx=ctx@entry=0x55555688bfc0, blocking=blocking@entry=true) at /home/mdroth/w/qemu.git/util/aio-posix.c:639 #12 0x0000555555d0004d in aio_wait_bh_oneshot (ctx=0x55555688bfc0, cb=cb@entry=0x5555558d5130 <virtio_blk_data_plane_stop_bh>, opaque=opaque@entry=0x555556de86f0) at /home/mdroth/w/qemu.git/util/aio-wait.c:71 #13 0x00005555558d59bf in virtio_blk_data_plane_stop (vdev=<optimized out>) at /home/mdroth/w/qemu.git/hw/block/dataplane/virtio-blk.c:288 #14 0x0000555555b906a1 in virtio_bus_stop_ioeventfd (bus=bus@entry=0x555556dbcf38) at /home/mdroth/w/qemu.git/hw/virtio/virtio-bus.c:245 #15 0x0000555555b90dbb in virtio_bus_stop_ioeventfd (bus=bus@entry=0x555556dbcf38) at /home/mdroth/w/qemu.git/hw/virtio/virtio-bus.c:237 #16 0x0000555555b92a8e in virtio_pci_stop_ioeventfd (proxy=0x555556db4e40) at /home/mdroth/w/qemu.git/hw/virtio/virtio-pci.c:292 #17 0x0000555555b92a8e in virtio_write_config (pci_dev=0x555556db4e40, address=<optimized out>, val=1048832, len=<optimized out>) at /home/mdroth/w/qemu.git/hw/virtio/virtio-pci.c:613 I.e. the calling code is only scheduling a one-shot BH for virtio_blk_data_plane_stop_bh, but somehow we end up trying to process an additional virtqueue entry before we get there. This is likely due to the following check in virtio_queue_host_notifier_aio_poll: static bool virtio_queue_host_notifier_aio_poll(void *opaque) { EventNotifier *n = opaque; VirtQueue *vq = container_of(n, VirtQueue, host_notifier); bool progress; if (!vq->vring.desc || virtio_queue_empty(vq)) { return false; } progress = virtio_queue_notify_aio_vq(vq); namely the call to virtio_queue_empty(). In this case, since no new requests have actually been issued, shadow_avail_idx == last_avail_idx, so we actually try to access the vring via vring_avail_idx() to get the latest non-shadowed idx: int virtio_queue_empty(VirtQueue *vq) { bool empty; ... if (vq->shadow_avail_idx != vq->last_avail_idx) { return 0; } rcu_read_lock(); empty = vring_avail_idx(vq) == vq->last_avail_idx; rcu_read_unlock(); return empty; but since the IOMMU region has been disabled we get a bogus value (0 usually), which causes virtio_queue_empty() to falsely report that there are entries to be processed, which causes errors such as: "virtio: zero sized buffers are not allowed" or "virtio-blk missing headers" and puts the device in an error state. This patch works around the issue by introducing virtio_set_disabled(), which sets a 'disabled' flag to bypass checks like virtio_queue_empty() when bus-mastering is disabled. Since we'd check this flag at all the same sites as vdev->broken, we replace those checks with an inline function which checks for either vdev->broken or vdev->disabled. The 'disabled' flag is only migrated when set, which should be fairly rare, but to maintain migration compatibility we disable it's use for older machine types. Users requiring the use of the flag in conjunction with older machine types can set it explicitly as a virtio-device option. NOTES: - This leaves some other oddities in play, like the fact that DRIVER_OK also gets unset in response to bus-mastering being disabled, but not restored (however the device seems to continue working) - Similarly, we disable the host notifier via virtio_bus_stop_ioeventfd(), which seems to move the handling out of virtio-blk dataplane and back into the main IO thread, and it ends up staying there till a reset (but otherwise continues working normally) Cc: David Gibson <david@gibson.dropbear.id.au>, Cc: Alexey Kardashevskiy <aik@ozlabs.ru> Cc: "Michael S. Tsirkin" <mst@redhat.com> Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com> Message-Id: <20191120005003.27035-1-mdroth@linux.vnet.ibm.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2019-11-20 03:50:03 +03:00
&vmstate_virtio_disabled,
NULL
}
};
int virtio_save(VirtIODevice *vdev, QEMUFile *f)
{
BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
uint32_t guest_features_lo = (vdev->guest_features & 0xffffffff);
int i;
if (k->save_config) {
k->save_config(qbus->parent, f);
}
qemu_put_8s(f, &vdev->status);
qemu_put_8s(f, &vdev->isr);
qemu_put_be16s(f, &vdev->queue_sel);
qemu_put_be32s(f, &guest_features_lo);
qemu_put_be32(f, vdev->config_len);
qemu_put_buffer(f, vdev->config, vdev->config_len);
for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
if (vdev->vq[i].vring.num == 0)
break;
}
qemu_put_be32(f, i);
for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
if (vdev->vq[i].vring.num == 0)
break;
qemu_put_be32(f, vdev->vq[i].vring.num);
if (k->has_variable_vring_alignment) {
qemu_put_be32(f, vdev->vq[i].vring.align);
}
2017-02-22 19:37:34 +03:00
/*
* Save desc now, the rest of the ring addresses are saved in
* subsections for VIRTIO-1 devices.
*/
qemu_put_be64(f, vdev->vq[i].vring.desc);
qemu_put_be16s(f, &vdev->vq[i].last_avail_idx);
if (k->save_queue) {
k->save_queue(qbus->parent, i, f);
}
}
if (vdc->save != NULL) {
vdc->save(vdev, f);
}
if (vdc->vmsd) {
int ret = vmstate_save_state(f, vdc->vmsd, vdev, NULL);
if (ret) {
return ret;
}
}
/* Subsections */
return vmstate_save_state(f, &vmstate_virtio, vdev, NULL);
}
/* A wrapper for use as a VMState .put function */
static int virtio_device_put(QEMUFile *f, void *opaque, size_t size,
const VMStateField *field, JSONWriter *vmdesc)
{
return virtio_save(VIRTIO_DEVICE(opaque), f);
}
/* A wrapper for use as a VMState .get function */
virtio: Drop out of coroutine context in virtio_load() virtio_load() as a whole should run in coroutine context because it reads from the migration stream and we don't want this to block. However, it calls virtio_set_features_nocheck() and devices don't expect their .set_features callback to run in a coroutine and therefore call functions that may not be called in coroutine context. To fix this, drop out of coroutine context for calling virtio_set_features_nocheck(). Without this fix, the following crash was reported: #0 __pthread_kill_implementation (threadid=<optimized out>, signo=signo@entry=6, no_tid=no_tid@entry=0) at pthread_kill.c:44 #1 0x00007efc738c05d3 in __pthread_kill_internal (signo=6, threadid=<optimized out>) at pthread_kill.c:78 #2 0x00007efc73873d26 in __GI_raise (sig=sig@entry=6) at ../sysdeps/posix/raise.c:26 #3 0x00007efc738477f3 in __GI_abort () at abort.c:79 #4 0x00007efc7384771b in __assert_fail_base (fmt=0x7efc739dbcb8 "", assertion=assertion@entry=0x560aebfbf5cf "!qemu_in_coroutine()", file=file@entry=0x560aebfcd2d4 "../block/graph-lock.c", line=line@entry=275, function=function@entry=0x560aebfcd34d "void bdrv_graph_rdlock_main_loop(void)") at assert.c:92 #5 0x00007efc7386ccc6 in __assert_fail (assertion=0x560aebfbf5cf "!qemu_in_coroutine()", file=0x560aebfcd2d4 "../block/graph-lock.c", line=275, function=0x560aebfcd34d "void bdrv_graph_rdlock_main_loop(void)") at assert.c:101 #6 0x0000560aebcd8dd6 in bdrv_register_buf () #7 0x0000560aeb97ed97 in ram_block_added.llvm () #8 0x0000560aebb8303f in ram_block_add.llvm () #9 0x0000560aebb834fa in qemu_ram_alloc_internal.llvm () #10 0x0000560aebb2ac98 in vfio_region_mmap () #11 0x0000560aebb3ea0f in vfio_bars_register () #12 0x0000560aebb3c628 in vfio_realize () #13 0x0000560aeb90f0c2 in pci_qdev_realize () #14 0x0000560aebc40305 in device_set_realized () #15 0x0000560aebc48e07 in property_set_bool.llvm () #16 0x0000560aebc46582 in object_property_set () #17 0x0000560aebc4cd58 in object_property_set_qobject () #18 0x0000560aebc46ba7 in object_property_set_bool () #19 0x0000560aeb98b3ca in qdev_device_add_from_qdict () #20 0x0000560aebb1fbaf in virtio_net_set_features () #21 0x0000560aebb46b51 in virtio_set_features_nocheck () #22 0x0000560aebb47107 in virtio_load () #23 0x0000560aeb9ae7ce in vmstate_load_state () #24 0x0000560aeb9d2ee9 in qemu_loadvm_state_main () #25 0x0000560aeb9d45e1 in qemu_loadvm_state () #26 0x0000560aeb9bc32c in process_incoming_migration_co.llvm () #27 0x0000560aebeace56 in coroutine_trampoline.llvm () Cc: qemu-stable@nongnu.org Buglink: https://issues.redhat.com/browse/RHEL-832 Signed-off-by: Kevin Wolf <kwolf@redhat.com> Message-ID: <20230905145002.46391-3-kwolf@redhat.com> Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2023-09-05 17:50:02 +03:00
static int coroutine_mixed_fn
virtio_device_get(QEMUFile *f, void *opaque, size_t size,
const VMStateField *field)
{
VirtIODevice *vdev = VIRTIO_DEVICE(opaque);
DeviceClass *dc = DEVICE_CLASS(VIRTIO_DEVICE_GET_CLASS(vdev));
return virtio_load(vdev, f, dc->vmsd->version_id);
}
const VMStateInfo virtio_vmstate_info = {
.name = "virtio",
.get = virtio_device_get,
.put = virtio_device_put,
};
static int virtio_set_features_nocheck(VirtIODevice *vdev, uint64_t val)
{
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
bool bad = (val & ~(vdev->host_features)) != 0;
val &= vdev->host_features;
if (k->set_features) {
k->set_features(vdev, val);
}
vdev->guest_features = val;
return bad ? -1 : 0;
}
virtio: Drop out of coroutine context in virtio_load() virtio_load() as a whole should run in coroutine context because it reads from the migration stream and we don't want this to block. However, it calls virtio_set_features_nocheck() and devices don't expect their .set_features callback to run in a coroutine and therefore call functions that may not be called in coroutine context. To fix this, drop out of coroutine context for calling virtio_set_features_nocheck(). Without this fix, the following crash was reported: #0 __pthread_kill_implementation (threadid=<optimized out>, signo=signo@entry=6, no_tid=no_tid@entry=0) at pthread_kill.c:44 #1 0x00007efc738c05d3 in __pthread_kill_internal (signo=6, threadid=<optimized out>) at pthread_kill.c:78 #2 0x00007efc73873d26 in __GI_raise (sig=sig@entry=6) at ../sysdeps/posix/raise.c:26 #3 0x00007efc738477f3 in __GI_abort () at abort.c:79 #4 0x00007efc7384771b in __assert_fail_base (fmt=0x7efc739dbcb8 "", assertion=assertion@entry=0x560aebfbf5cf "!qemu_in_coroutine()", file=file@entry=0x560aebfcd2d4 "../block/graph-lock.c", line=line@entry=275, function=function@entry=0x560aebfcd34d "void bdrv_graph_rdlock_main_loop(void)") at assert.c:92 #5 0x00007efc7386ccc6 in __assert_fail (assertion=0x560aebfbf5cf "!qemu_in_coroutine()", file=0x560aebfcd2d4 "../block/graph-lock.c", line=275, function=0x560aebfcd34d "void bdrv_graph_rdlock_main_loop(void)") at assert.c:101 #6 0x0000560aebcd8dd6 in bdrv_register_buf () #7 0x0000560aeb97ed97 in ram_block_added.llvm () #8 0x0000560aebb8303f in ram_block_add.llvm () #9 0x0000560aebb834fa in qemu_ram_alloc_internal.llvm () #10 0x0000560aebb2ac98 in vfio_region_mmap () #11 0x0000560aebb3ea0f in vfio_bars_register () #12 0x0000560aebb3c628 in vfio_realize () #13 0x0000560aeb90f0c2 in pci_qdev_realize () #14 0x0000560aebc40305 in device_set_realized () #15 0x0000560aebc48e07 in property_set_bool.llvm () #16 0x0000560aebc46582 in object_property_set () #17 0x0000560aebc4cd58 in object_property_set_qobject () #18 0x0000560aebc46ba7 in object_property_set_bool () #19 0x0000560aeb98b3ca in qdev_device_add_from_qdict () #20 0x0000560aebb1fbaf in virtio_net_set_features () #21 0x0000560aebb46b51 in virtio_set_features_nocheck () #22 0x0000560aebb47107 in virtio_load () #23 0x0000560aeb9ae7ce in vmstate_load_state () #24 0x0000560aeb9d2ee9 in qemu_loadvm_state_main () #25 0x0000560aeb9d45e1 in qemu_loadvm_state () #26 0x0000560aeb9bc32c in process_incoming_migration_co.llvm () #27 0x0000560aebeace56 in coroutine_trampoline.llvm () Cc: qemu-stable@nongnu.org Buglink: https://issues.redhat.com/browse/RHEL-832 Signed-off-by: Kevin Wolf <kwolf@redhat.com> Message-ID: <20230905145002.46391-3-kwolf@redhat.com> Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2023-09-05 17:50:02 +03:00
typedef struct VirtioSetFeaturesNocheckData {
Coroutine *co;
VirtIODevice *vdev;
uint64_t val;
int ret;
} VirtioSetFeaturesNocheckData;
static void virtio_set_features_nocheck_bh(void *opaque)
{
VirtioSetFeaturesNocheckData *data = opaque;
data->ret = virtio_set_features_nocheck(data->vdev, data->val);
aio_co_wake(data->co);
}
static int coroutine_mixed_fn
virtio_set_features_nocheck_maybe_co(VirtIODevice *vdev, uint64_t val)
{
if (qemu_in_coroutine()) {
VirtioSetFeaturesNocheckData data = {
.co = qemu_coroutine_self(),
.vdev = vdev,
.val = val,
};
aio_bh_schedule_oneshot(qemu_get_current_aio_context(),
virtio_set_features_nocheck_bh, &data);
qemu_coroutine_yield();
return data.ret;
} else {
return virtio_set_features_nocheck(vdev, val);
}
}
int virtio_set_features(VirtIODevice *vdev, uint64_t val)
{
int ret;
/*
* The driver must not attempt to set features after feature negotiation
* has finished.
*/
if (vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) {
return -EINVAL;
}
if (val & (1ull << VIRTIO_F_BAD_FEATURE)) {
qemu_log_mask(LOG_GUEST_ERROR,
"%s: guest driver for %s has enabled UNUSED(30) feature bit!\n",
__func__, vdev->name);
}
ret = virtio_set_features_nocheck(vdev, val);
if (virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
/* VIRTIO_RING_F_EVENT_IDX changes the size of the caches. */
int i;
for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
if (vdev->vq[i].vring.num != 0) {
virtio_init_region_cache(vdev, i);
}
}
}
if (!ret) {
if (!virtio_device_started(vdev, vdev->status) &&
!virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
vdev->start_on_kick = true;
}
}
return ret;
}
static void virtio_device_check_notification_compatibility(VirtIODevice *vdev,
Error **errp)
{
VirtioBusState *bus = VIRTIO_BUS(qdev_get_parent_bus(DEVICE(vdev)));
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(bus);
DeviceState *proxy = DEVICE(BUS(bus)->parent);
if (virtio_host_has_feature(vdev, VIRTIO_F_NOTIFICATION_DATA) &&
k->ioeventfd_enabled(proxy)) {
error_setg(errp,
"notification_data=on without ioeventfd=off is not supported");
}
}
virtio: introduce VirtIOConfigSizeParams & virtio_get_config_size This is the first step towards moving all device config size calculation logic into the virtio core code. In particular, this adds a struct that contains all the necessary information for common virtio code to be able to calculate the final config size for a device. This is expected to be used with the new virtio_get_config_size helper, which calculates the final length based on the provided host features. This builds on top of already existing code like VirtIOFeature and virtio_feature_get_config_size(), but adds additional fields, as well as sanity checking so that device-specifc code doesn't have to duplicate it. An example usage would be: static const VirtIOFeature dev_features[] = { {.flags = 1ULL << FEATURE_1_BIT, .end = endof(struct virtio_dev_config, feature_1)}, {.flags = 1ULL << FEATURE_2_BIT, .end = endof(struct virtio_dev_config, feature_2)}, {} }; static const VirtIOConfigSizeParams dev_cfg_size_params = { .min_size = DEV_BASE_CONFIG_SIZE, .max_size = sizeof(struct virtio_dev_config), .feature_sizes = dev_features }; // code inside my_dev_device_realize() size_t config_size = virtio_get_config_size(&dev_cfg_size_params, host_features); virtio_init(vdev, VIRTIO_ID_MYDEV, config_size); Currently every device is expected to write its own boilerplate from the example above in device_realize(), however, the next step of this transition is moving VirtIOConfigSizeParams into VirtioDeviceClass, so that it can be done automatically by the virtio initialization code. All of the users of virtio_feature_get_config_size have been converted to use virtio_get_config_size so it's no longer needed and is removed with this commit. Signed-off-by: Daniil Tatianin <d-tatianin@yandex-team.ru> Message-Id: <20220906073111.353245-2-d-tatianin@yandex-team.ru> Reviewed-by: Raphael Norwitz <raphael.norwitz@nutanix.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2022-09-06 10:31:07 +03:00
size_t virtio_get_config_size(const VirtIOConfigSizeParams *params,
uint64_t host_features)
{
virtio: introduce VirtIOConfigSizeParams & virtio_get_config_size This is the first step towards moving all device config size calculation logic into the virtio core code. In particular, this adds a struct that contains all the necessary information for common virtio code to be able to calculate the final config size for a device. This is expected to be used with the new virtio_get_config_size helper, which calculates the final length based on the provided host features. This builds on top of already existing code like VirtIOFeature and virtio_feature_get_config_size(), but adds additional fields, as well as sanity checking so that device-specifc code doesn't have to duplicate it. An example usage would be: static const VirtIOFeature dev_features[] = { {.flags = 1ULL << FEATURE_1_BIT, .end = endof(struct virtio_dev_config, feature_1)}, {.flags = 1ULL << FEATURE_2_BIT, .end = endof(struct virtio_dev_config, feature_2)}, {} }; static const VirtIOConfigSizeParams dev_cfg_size_params = { .min_size = DEV_BASE_CONFIG_SIZE, .max_size = sizeof(struct virtio_dev_config), .feature_sizes = dev_features }; // code inside my_dev_device_realize() size_t config_size = virtio_get_config_size(&dev_cfg_size_params, host_features); virtio_init(vdev, VIRTIO_ID_MYDEV, config_size); Currently every device is expected to write its own boilerplate from the example above in device_realize(), however, the next step of this transition is moving VirtIOConfigSizeParams into VirtioDeviceClass, so that it can be done automatically by the virtio initialization code. All of the users of virtio_feature_get_config_size have been converted to use virtio_get_config_size so it's no longer needed and is removed with this commit. Signed-off-by: Daniil Tatianin <d-tatianin@yandex-team.ru> Message-Id: <20220906073111.353245-2-d-tatianin@yandex-team.ru> Reviewed-by: Raphael Norwitz <raphael.norwitz@nutanix.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2022-09-06 10:31:07 +03:00
size_t config_size = params->min_size;
const VirtIOFeature *feature_sizes = params->feature_sizes;
size_t i;
for (i = 0; feature_sizes[i].flags != 0; i++) {
if (host_features & feature_sizes[i].flags) {
config_size = MAX(feature_sizes[i].end, config_size);
}
}
virtio: introduce VirtIOConfigSizeParams & virtio_get_config_size This is the first step towards moving all device config size calculation logic into the virtio core code. In particular, this adds a struct that contains all the necessary information for common virtio code to be able to calculate the final config size for a device. This is expected to be used with the new virtio_get_config_size helper, which calculates the final length based on the provided host features. This builds on top of already existing code like VirtIOFeature and virtio_feature_get_config_size(), but adds additional fields, as well as sanity checking so that device-specifc code doesn't have to duplicate it. An example usage would be: static const VirtIOFeature dev_features[] = { {.flags = 1ULL << FEATURE_1_BIT, .end = endof(struct virtio_dev_config, feature_1)}, {.flags = 1ULL << FEATURE_2_BIT, .end = endof(struct virtio_dev_config, feature_2)}, {} }; static const VirtIOConfigSizeParams dev_cfg_size_params = { .min_size = DEV_BASE_CONFIG_SIZE, .max_size = sizeof(struct virtio_dev_config), .feature_sizes = dev_features }; // code inside my_dev_device_realize() size_t config_size = virtio_get_config_size(&dev_cfg_size_params, host_features); virtio_init(vdev, VIRTIO_ID_MYDEV, config_size); Currently every device is expected to write its own boilerplate from the example above in device_realize(), however, the next step of this transition is moving VirtIOConfigSizeParams into VirtioDeviceClass, so that it can be done automatically by the virtio initialization code. All of the users of virtio_feature_get_config_size have been converted to use virtio_get_config_size so it's no longer needed and is removed with this commit. Signed-off-by: Daniil Tatianin <d-tatianin@yandex-team.ru> Message-Id: <20220906073111.353245-2-d-tatianin@yandex-team.ru> Reviewed-by: Raphael Norwitz <raphael.norwitz@nutanix.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2022-09-06 10:31:07 +03:00
assert(config_size <= params->max_size);
return config_size;
}
virtio: Drop out of coroutine context in virtio_load() virtio_load() as a whole should run in coroutine context because it reads from the migration stream and we don't want this to block. However, it calls virtio_set_features_nocheck() and devices don't expect their .set_features callback to run in a coroutine and therefore call functions that may not be called in coroutine context. To fix this, drop out of coroutine context for calling virtio_set_features_nocheck(). Without this fix, the following crash was reported: #0 __pthread_kill_implementation (threadid=<optimized out>, signo=signo@entry=6, no_tid=no_tid@entry=0) at pthread_kill.c:44 #1 0x00007efc738c05d3 in __pthread_kill_internal (signo=6, threadid=<optimized out>) at pthread_kill.c:78 #2 0x00007efc73873d26 in __GI_raise (sig=sig@entry=6) at ../sysdeps/posix/raise.c:26 #3 0x00007efc738477f3 in __GI_abort () at abort.c:79 #4 0x00007efc7384771b in __assert_fail_base (fmt=0x7efc739dbcb8 "", assertion=assertion@entry=0x560aebfbf5cf "!qemu_in_coroutine()", file=file@entry=0x560aebfcd2d4 "../block/graph-lock.c", line=line@entry=275, function=function@entry=0x560aebfcd34d "void bdrv_graph_rdlock_main_loop(void)") at assert.c:92 #5 0x00007efc7386ccc6 in __assert_fail (assertion=0x560aebfbf5cf "!qemu_in_coroutine()", file=0x560aebfcd2d4 "../block/graph-lock.c", line=275, function=0x560aebfcd34d "void bdrv_graph_rdlock_main_loop(void)") at assert.c:101 #6 0x0000560aebcd8dd6 in bdrv_register_buf () #7 0x0000560aeb97ed97 in ram_block_added.llvm () #8 0x0000560aebb8303f in ram_block_add.llvm () #9 0x0000560aebb834fa in qemu_ram_alloc_internal.llvm () #10 0x0000560aebb2ac98 in vfio_region_mmap () #11 0x0000560aebb3ea0f in vfio_bars_register () #12 0x0000560aebb3c628 in vfio_realize () #13 0x0000560aeb90f0c2 in pci_qdev_realize () #14 0x0000560aebc40305 in device_set_realized () #15 0x0000560aebc48e07 in property_set_bool.llvm () #16 0x0000560aebc46582 in object_property_set () #17 0x0000560aebc4cd58 in object_property_set_qobject () #18 0x0000560aebc46ba7 in object_property_set_bool () #19 0x0000560aeb98b3ca in qdev_device_add_from_qdict () #20 0x0000560aebb1fbaf in virtio_net_set_features () #21 0x0000560aebb46b51 in virtio_set_features_nocheck () #22 0x0000560aebb47107 in virtio_load () #23 0x0000560aeb9ae7ce in vmstate_load_state () #24 0x0000560aeb9d2ee9 in qemu_loadvm_state_main () #25 0x0000560aeb9d45e1 in qemu_loadvm_state () #26 0x0000560aeb9bc32c in process_incoming_migration_co.llvm () #27 0x0000560aebeace56 in coroutine_trampoline.llvm () Cc: qemu-stable@nongnu.org Buglink: https://issues.redhat.com/browse/RHEL-832 Signed-off-by: Kevin Wolf <kwolf@redhat.com> Message-ID: <20230905145002.46391-3-kwolf@redhat.com> Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2023-09-05 17:50:02 +03:00
int coroutine_mixed_fn
virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id)
{
int i, ret;
int32_t config_len;
uint32_t num;
uint32_t features;
BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
/*
* We poison the endianness to ensure it does not get used before
* subsections have been loaded.
*/
vdev->device_endian = VIRTIO_DEVICE_ENDIAN_UNKNOWN;
if (k->load_config) {
ret = k->load_config(qbus->parent, f);
if (ret)
return ret;
}
qemu_get_8s(f, &vdev->status);
qemu_get_8s(f, &vdev->isr);
qemu_get_be16s(f, &vdev->queue_sel);
if (vdev->queue_sel >= VIRTIO_QUEUE_MAX) {
return -1;
}
qemu_get_be32s(f, &features);
/*
* Temporarily set guest_features low bits - needed by
* virtio net load code testing for VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
* VIRTIO_NET_F_GUEST_ANNOUNCE and VIRTIO_NET_F_CTRL_VQ.
*
* Note: devices should always test host features in future - don't create
* new dependencies like this.
*/
vdev->guest_features = features;
config_len = qemu_get_be32(f);
/*
* There are cases where the incoming config can be bigger or smaller
* than what we have; so load what we have space for, and skip
* any excess that's in the stream.
*/
qemu_get_buffer(f, vdev->config, MIN(config_len, vdev->config_len));
while (config_len > vdev->config_len) {
qemu_get_byte(f);
config_len--;
}
num = qemu_get_be32(f);
if (num > VIRTIO_QUEUE_MAX) {
error_report("Invalid number of virtqueues: 0x%x", num);
return -1;
}
for (i = 0; i < num; i++) {
vdev->vq[i].vring.num = qemu_get_be32(f);
if (k->has_variable_vring_alignment) {
vdev->vq[i].vring.align = qemu_get_be32(f);
}
vdev->vq[i].vring.desc = qemu_get_be64(f);
qemu_get_be16s(f, &vdev->vq[i].last_avail_idx);
vdev->vq[i].signalled_used_valid = false;
Revert "virtio: turn vq->notification into a nested counter" This reverts commit aff8fd18f1786fc5af259a9bc0077727222f51ca. Both virtio-net and virtio-crypto do not balance virtio_queue_set_notification() enable and disable calls. This makes the notifications_disabled counter unreliable and Doug Goldstein reported the following assertion failure: #3 0x00007ffff44d1c62 in __GI___assert_fail ( assertion=assertion@entry=0x555555ae8e8a "vq->notification_disabled > 0", file=file@entry=0x555555ae89c0 "/home/doug/work/qemu/hw/virtio/virtio.c", line=line@entry=215, function=function@entry=0x555555ae9630 <__PRETTY_FUNCTION__.43707> "virtio_queue_set_notification") at assert.c:101 #4 0x00005555557f25d6 in virtio_queue_set_notification (vq=0x55555666aa90, enable=enable@entry=1) at /home/doug/work/qemu/hw/virtio/virtio.c:215 #5 0x00005555557dc311 in virtio_net_has_buffers (q=<optimized out>, q=<optimized out>, bufsize=102) at /home/doug/work/qemu/hw/net/virtio-net.c:1008 #6 virtio_net_receive (nc=<optimized out>, buf=0x555557386b88 "", size=102) at /home/doug/work/qemu/hw/net/virtio-net.c:1148 #7 0x00005555559cad33 in nc_sendv_compat (flags=<optimized out>, iovcnt=1, iov=0x7fffead746d0, nc=0x55555788b340) at net/net.c:705 #8 qemu_deliver_packet_iov (sender=<optimized out>, flags=<optimized out>, iov=0x7fffead746d0, iovcnt=1, opaque=0x55555788b340) at net/net.c:732 #9 0x00005555559cd929 in qemu_net_queue_deliver (size=<optimized out>, data=<optimized out>, flags=<optimized out>, sender=<optimized out>, queue=0x55555788b550) at net/queue.c:164 #10 qemu_net_queue_flush (queue=0x55555788b550) at net/queue.c:261 This patch is safe to revert since it's just an optimization for virtqueue polling. The next patch will improve the situation again without resorting to nesting. Reported-by: Doug Goldstein <cardoe@cardoe.com> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Tested-by: Richard Henderson <rth@twiddle.net> Tested-by: Laszlo Ersek <lersek@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2017-01-12 14:46:10 +03:00
vdev->vq[i].notification = true;
2017-02-22 19:37:34 +03:00
if (!vdev->vq[i].vring.desc && vdev->vq[i].last_avail_idx) {
error_report("VQ %d address 0x0 "
"inconsistent with Host index 0x%x",
i, vdev->vq[i].last_avail_idx);
2017-02-22 19:37:34 +03:00
return -1;
}
if (k->load_queue) {
ret = k->load_queue(qbus->parent, i, f);
if (ret)
return ret;
}
}
virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
if (vdc->load != NULL) {
ret = vdc->load(vdev, f, version_id);
if (ret) {
return ret;
}
}
if (vdc->vmsd) {
ret = vmstate_load_state(f, vdc->vmsd, vdev, version_id);
if (ret) {
return ret;
}
}
/* Subsections */
ret = vmstate_load_state(f, &vmstate_virtio, vdev, 1);
if (ret) {
return ret;
}
if (vdev->device_endian == VIRTIO_DEVICE_ENDIAN_UNKNOWN) {
vdev->device_endian = virtio_default_endian();
}
if (virtio_64bit_features_needed(vdev)) {
/*
* Subsection load filled vdev->guest_features. Run them
* through virtio_set_features to sanity-check them against
* host_features.
*/
uint64_t features64 = vdev->guest_features;
virtio: Drop out of coroutine context in virtio_load() virtio_load() as a whole should run in coroutine context because it reads from the migration stream and we don't want this to block. However, it calls virtio_set_features_nocheck() and devices don't expect their .set_features callback to run in a coroutine and therefore call functions that may not be called in coroutine context. To fix this, drop out of coroutine context for calling virtio_set_features_nocheck(). Without this fix, the following crash was reported: #0 __pthread_kill_implementation (threadid=<optimized out>, signo=signo@entry=6, no_tid=no_tid@entry=0) at pthread_kill.c:44 #1 0x00007efc738c05d3 in __pthread_kill_internal (signo=6, threadid=<optimized out>) at pthread_kill.c:78 #2 0x00007efc73873d26 in __GI_raise (sig=sig@entry=6) at ../sysdeps/posix/raise.c:26 #3 0x00007efc738477f3 in __GI_abort () at abort.c:79 #4 0x00007efc7384771b in __assert_fail_base (fmt=0x7efc739dbcb8 "", assertion=assertion@entry=0x560aebfbf5cf "!qemu_in_coroutine()", file=file@entry=0x560aebfcd2d4 "../block/graph-lock.c", line=line@entry=275, function=function@entry=0x560aebfcd34d "void bdrv_graph_rdlock_main_loop(void)") at assert.c:92 #5 0x00007efc7386ccc6 in __assert_fail (assertion=0x560aebfbf5cf "!qemu_in_coroutine()", file=0x560aebfcd2d4 "../block/graph-lock.c", line=275, function=0x560aebfcd34d "void bdrv_graph_rdlock_main_loop(void)") at assert.c:101 #6 0x0000560aebcd8dd6 in bdrv_register_buf () #7 0x0000560aeb97ed97 in ram_block_added.llvm () #8 0x0000560aebb8303f in ram_block_add.llvm () #9 0x0000560aebb834fa in qemu_ram_alloc_internal.llvm () #10 0x0000560aebb2ac98 in vfio_region_mmap () #11 0x0000560aebb3ea0f in vfio_bars_register () #12 0x0000560aebb3c628 in vfio_realize () #13 0x0000560aeb90f0c2 in pci_qdev_realize () #14 0x0000560aebc40305 in device_set_realized () #15 0x0000560aebc48e07 in property_set_bool.llvm () #16 0x0000560aebc46582 in object_property_set () #17 0x0000560aebc4cd58 in object_property_set_qobject () #18 0x0000560aebc46ba7 in object_property_set_bool () #19 0x0000560aeb98b3ca in qdev_device_add_from_qdict () #20 0x0000560aebb1fbaf in virtio_net_set_features () #21 0x0000560aebb46b51 in virtio_set_features_nocheck () #22 0x0000560aebb47107 in virtio_load () #23 0x0000560aeb9ae7ce in vmstate_load_state () #24 0x0000560aeb9d2ee9 in qemu_loadvm_state_main () #25 0x0000560aeb9d45e1 in qemu_loadvm_state () #26 0x0000560aeb9bc32c in process_incoming_migration_co.llvm () #27 0x0000560aebeace56 in coroutine_trampoline.llvm () Cc: qemu-stable@nongnu.org Buglink: https://issues.redhat.com/browse/RHEL-832 Signed-off-by: Kevin Wolf <kwolf@redhat.com> Message-ID: <20230905145002.46391-3-kwolf@redhat.com> Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2023-09-05 17:50:02 +03:00
if (virtio_set_features_nocheck_maybe_co(vdev, features64) < 0) {
error_report("Features 0x%" PRIx64 " unsupported. "
"Allowed features: 0x%" PRIx64,
features64, vdev->host_features);
return -1;
}
} else {
virtio: Drop out of coroutine context in virtio_load() virtio_load() as a whole should run in coroutine context because it reads from the migration stream and we don't want this to block. However, it calls virtio_set_features_nocheck() and devices don't expect their .set_features callback to run in a coroutine and therefore call functions that may not be called in coroutine context. To fix this, drop out of coroutine context for calling virtio_set_features_nocheck(). Without this fix, the following crash was reported: #0 __pthread_kill_implementation (threadid=<optimized out>, signo=signo@entry=6, no_tid=no_tid@entry=0) at pthread_kill.c:44 #1 0x00007efc738c05d3 in __pthread_kill_internal (signo=6, threadid=<optimized out>) at pthread_kill.c:78 #2 0x00007efc73873d26 in __GI_raise (sig=sig@entry=6) at ../sysdeps/posix/raise.c:26 #3 0x00007efc738477f3 in __GI_abort () at abort.c:79 #4 0x00007efc7384771b in __assert_fail_base (fmt=0x7efc739dbcb8 "", assertion=assertion@entry=0x560aebfbf5cf "!qemu_in_coroutine()", file=file@entry=0x560aebfcd2d4 "../block/graph-lock.c", line=line@entry=275, function=function@entry=0x560aebfcd34d "void bdrv_graph_rdlock_main_loop(void)") at assert.c:92 #5 0x00007efc7386ccc6 in __assert_fail (assertion=0x560aebfbf5cf "!qemu_in_coroutine()", file=0x560aebfcd2d4 "../block/graph-lock.c", line=275, function=0x560aebfcd34d "void bdrv_graph_rdlock_main_loop(void)") at assert.c:101 #6 0x0000560aebcd8dd6 in bdrv_register_buf () #7 0x0000560aeb97ed97 in ram_block_added.llvm () #8 0x0000560aebb8303f in ram_block_add.llvm () #9 0x0000560aebb834fa in qemu_ram_alloc_internal.llvm () #10 0x0000560aebb2ac98 in vfio_region_mmap () #11 0x0000560aebb3ea0f in vfio_bars_register () #12 0x0000560aebb3c628 in vfio_realize () #13 0x0000560aeb90f0c2 in pci_qdev_realize () #14 0x0000560aebc40305 in device_set_realized () #15 0x0000560aebc48e07 in property_set_bool.llvm () #16 0x0000560aebc46582 in object_property_set () #17 0x0000560aebc4cd58 in object_property_set_qobject () #18 0x0000560aebc46ba7 in object_property_set_bool () #19 0x0000560aeb98b3ca in qdev_device_add_from_qdict () #20 0x0000560aebb1fbaf in virtio_net_set_features () #21 0x0000560aebb46b51 in virtio_set_features_nocheck () #22 0x0000560aebb47107 in virtio_load () #23 0x0000560aeb9ae7ce in vmstate_load_state () #24 0x0000560aeb9d2ee9 in qemu_loadvm_state_main () #25 0x0000560aeb9d45e1 in qemu_loadvm_state () #26 0x0000560aeb9bc32c in process_incoming_migration_co.llvm () #27 0x0000560aebeace56 in coroutine_trampoline.llvm () Cc: qemu-stable@nongnu.org Buglink: https://issues.redhat.com/browse/RHEL-832 Signed-off-by: Kevin Wolf <kwolf@redhat.com> Message-ID: <20230905145002.46391-3-kwolf@redhat.com> Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2023-09-05 17:50:02 +03:00
if (virtio_set_features_nocheck_maybe_co(vdev, features) < 0) {
error_report("Features 0x%x unsupported. "
"Allowed features: 0x%" PRIx64,
features, vdev->host_features);
return -1;
}
}
if (!virtio_device_started(vdev, vdev->status) &&
!virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
vdev->start_on_kick = true;
}
RCU_READ_LOCK_GUARD();
for (i = 0; i < num; i++) {
if (vdev->vq[i].vring.desc) {
uint16_t nheads;
2017-02-22 19:37:34 +03:00
/*
* VIRTIO-1 devices migrate desc, used, and avail ring addresses so
* only the region cache needs to be set up. Legacy devices need
* to calculate used and avail ring addresses based on the desc
* address.
*/
if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
virtio_init_region_cache(vdev, i);
} else {
virtio_queue_update_rings(vdev, i);
}
if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
vdev->vq[i].shadow_avail_idx = vdev->vq[i].last_avail_idx;
vdev->vq[i].shadow_avail_wrap_counter =
vdev->vq[i].last_avail_wrap_counter;
continue;
}
nheads = vring_avail_idx(&vdev->vq[i]) - vdev->vq[i].last_avail_idx;
/* Check it isn't doing strange things with descriptor numbers. */
if (nheads > vdev->vq[i].vring.num) {
virtio_error(vdev, "VQ %d size 0x%x Guest index 0x%x "
"inconsistent with Host index 0x%x: delta 0x%x",
i, vdev->vq[i].vring.num,
vring_avail_idx(&vdev->vq[i]),
vdev->vq[i].last_avail_idx, nheads);
vdev->vq[i].used_idx = 0;
vdev->vq[i].shadow_avail_idx = 0;
vdev->vq[i].inuse = 0;
continue;
}
vdev->vq[i].used_idx = vring_used_idx(&vdev->vq[i]);
vdev->vq[i].shadow_avail_idx = vring_avail_idx(&vdev->vq[i]);
/*
* Some devices migrate VirtQueueElements that have been popped
* from the avail ring but not yet returned to the used ring.
* Since max ring size < UINT16_MAX it's safe to use modulo
* UINT16_MAX + 1 subtraction.
*/
vdev->vq[i].inuse = (uint16_t)(vdev->vq[i].last_avail_idx -
vdev->vq[i].used_idx);
if (vdev->vq[i].inuse > vdev->vq[i].vring.num) {
error_report("VQ %d size 0x%x < last_avail_idx 0x%x - "
"used_idx 0x%x",
i, vdev->vq[i].vring.num,
vdev->vq[i].last_avail_idx,
vdev->vq[i].used_idx);
return -1;
}
}
}
if (vdc->post_load) {
ret = vdc->post_load(vdev);
if (ret) {
return ret;
}
}
return 0;
}
void virtio_cleanup(VirtIODevice *vdev)
{
qemu_del_vm_change_state_handler(vdev->vmstate);
}
static void virtio_vmstate_change(void *opaque, bool running, RunState state)
{
VirtIODevice *vdev = opaque;
BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
bool backend_run = running && virtio_device_started(vdev, vdev->status);
vdev->vm_running = running;
if (backend_run) {
virtio_set_status(vdev, vdev->status);
}
if (k->vmstate_change) {
k->vmstate_change(qbus->parent, backend_run);
}
if (!backend_run) {
virtio_set_status(vdev, vdev->status);
}
}
void virtio_instance_init_common(Object *proxy_obj, void *data,
size_t vdev_size, const char *vdev_name)
{
DeviceState *vdev = data;
qom: Less verbose object_initialize_child() All users of object_initialize_child() pass the obvious child size argument. Almost all pass &error_abort and no properties. Tiresome. Rename object_initialize_child() to object_initialize_child_with_props() to free the name. New convenience wrapper object_initialize_child() automates the size argument, and passes &error_abort and no properties. Rename object_initialize_childv() to object_initialize_child_with_propsv() for consistency. Convert callers with this Coccinelle script: @@ expression parent, propname, type; expression child, size; symbol error_abort; @@ - object_initialize_child(parent, propname, OBJECT(child), size, type, &error_abort, NULL) + object_initialize_child(parent, propname, child, size, type, &error_abort, NULL) @@ expression parent, propname, type; expression child; symbol error_abort; @@ - object_initialize_child(parent, propname, child, sizeof(*child), type, &error_abort, NULL) + object_initialize_child(parent, propname, child, type) @@ expression parent, propname, type; expression child; symbol error_abort; @@ - object_initialize_child(parent, propname, &child, sizeof(child), type, &error_abort, NULL) + object_initialize_child(parent, propname, &child, type) @@ expression parent, propname, type; expression child, size, err; expression list props; @@ - object_initialize_child(parent, propname, child, size, type, err, props) + object_initialize_child_with_props(parent, propname, child, size, type, err, props) Note that Coccinelle chokes on ARMSSE typedef vs. macro in hw/arm/armsse.c. Worked around by temporarily renaming the macro for the spatch run. Signed-off-by: Markus Armbruster <armbru@redhat.com> Acked-by: Alistair Francis <alistair.francis@wdc.com> [Rebased: machine opentitan is new (commit fe0fe4735e7)] Reviewed-by: Paolo Bonzini <pbonzini@redhat.com> Message-Id: <20200610053247.1583243-37-armbru@redhat.com>
2020-06-10 08:32:25 +03:00
object_initialize_child_with_props(proxy_obj, "virtio-backend", vdev,
vdev_size, vdev_name, &error_abort,
NULL);
qdev_alias_all_properties(vdev, proxy_obj);
}
void virtio_init(VirtIODevice *vdev, uint16_t device_id, size_t config_size)
{
BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
int i;
int nvectors = k->query_nvectors ? k->query_nvectors(qbus->parent) : 0;
if (nvectors) {
vdev->vector_queues =
g_malloc0(sizeof(*vdev->vector_queues) * nvectors);
}
vdev->start_on_kick = false;
vdev->started = false;
vdev->vhost_started = false;
vdev->device_id = device_id;
vdev->status = 0;
qatomic_set(&vdev->isr, 0);
vdev->queue_sel = 0;
vdev->config_vector = VIRTIO_NO_VECTOR;
vdev->vq = g_new0(VirtQueue, VIRTIO_QUEUE_MAX);
vdev->vm_running = runstate_is_running();
vdev->broken = false;
for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
vdev->vq[i].vector = VIRTIO_NO_VECTOR;
vdev->vq[i].vdev = vdev;
vdev->vq[i].queue_index = i;
virtio: notify virtqueue via host notifier when available Host notifiers are used in several cases: 1. Traditional ioeventfd where virtqueue notifications are handled in the main loop thread. 2. IOThreads (aio_handle_output) where virtqueue notifications are handled in an IOThread AioContext. 3. vhost where virtqueue notifications are handled by kernel vhost or a vhost-user device backend. Most virtqueue notifications from the guest use the ioeventfd mechanism, but there are corner cases where QEMU code calls virtio_queue_notify(). This currently honors the host notifier for the IOThreads aio_handle_output case, but not for the vhost case. The result is that vhost does not receive virtqueue notifications from QEMU when virtio_queue_notify() is called. This patch extends virtio_queue_notify() to set the host notifier whenever it is enabled instead of calling the vq->(aio_)handle_output() function directly. We track the host notifier state for each virtqueue separately since some devices may use it only for certain virtqueues. This fixes the vhost case although it does add a trip through the eventfd for the traditional ioeventfd case. I don't think it's worth adding a fast path for the traditional ioeventfd case because calling virtio_queue_notify() is rare when ioeventfd is enabled. Reported-by: Felipe Franciosi <felipe@nutanix.com> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Message-Id: <20191105140946.165584-1-stefanha@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2019-11-05 17:09:46 +03:00
vdev->vq[i].host_notifier_enabled = false;
}
vdev->name = virtio_id_to_name(device_id);
vdev->config_len = config_size;
if (vdev->config_len) {
vdev->config = g_malloc0(config_size);
} else {
vdev->config = NULL;
}
vdev->vmstate = qdev_add_vm_change_state_handler(DEVICE(vdev),
virtio_vmstate_change, vdev);
vdev->device_endian = virtio_default_endian();
vdev->use_guest_notifier_mask = true;
}
/*
* Only devices that have already been around prior to defining the virtio
* standard support legacy mode; this includes devices not specified in the
* standard. All newer devices conform to the virtio standard only.
*/
bool virtio_legacy_allowed(VirtIODevice *vdev)
{
switch (vdev->device_id) {
case VIRTIO_ID_NET:
case VIRTIO_ID_BLOCK:
case VIRTIO_ID_CONSOLE:
case VIRTIO_ID_RNG:
case VIRTIO_ID_BALLOON:
case VIRTIO_ID_RPMSG:
case VIRTIO_ID_SCSI:
case VIRTIO_ID_9P:
case VIRTIO_ID_RPROC_SERIAL:
case VIRTIO_ID_CAIF:
return true;
default:
return false;
}
}
bool virtio_legacy_check_disabled(VirtIODevice *vdev)
{
return vdev->disable_legacy_check;
}
hwaddr virtio_queue_get_desc_addr(VirtIODevice *vdev, int n)
{
return vdev->vq[n].vring.desc;
}
bool virtio_queue_enabled_legacy(VirtIODevice *vdev, int n)
{
return virtio_queue_get_desc_addr(vdev, n) != 0;
}
bool virtio_queue_enabled(VirtIODevice *vdev, int n)
{
BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
if (k->queue_enabled) {
return k->queue_enabled(qbus->parent, n);
}
return virtio_queue_enabled_legacy(vdev, n);
}
hwaddr virtio_queue_get_avail_addr(VirtIODevice *vdev, int n)
{
return vdev->vq[n].vring.avail;
}
hwaddr virtio_queue_get_used_addr(VirtIODevice *vdev, int n)
{
return vdev->vq[n].vring.used;
}
hwaddr virtio_queue_get_desc_size(VirtIODevice *vdev, int n)
{
return sizeof(VRingDesc) * vdev->vq[n].vring.num;
}
hwaddr virtio_queue_get_avail_size(VirtIODevice *vdev, int n)
{
int s;
if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
return sizeof(struct VRingPackedDescEvent);
}
s = virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
return offsetof(VRingAvail, ring) +
sizeof(uint16_t) * vdev->vq[n].vring.num + s;
}
hwaddr virtio_queue_get_used_size(VirtIODevice *vdev, int n)
{
int s;
if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
return sizeof(struct VRingPackedDescEvent);
}
s = virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
return offsetof(VRingUsed, ring) +
sizeof(VRingUsedElem) * vdev->vq[n].vring.num + s;
}
static unsigned int virtio_queue_packed_get_last_avail_idx(VirtIODevice *vdev,
int n)
{
unsigned int avail, used;
avail = vdev->vq[n].last_avail_idx;
avail |= ((uint16_t)vdev->vq[n].last_avail_wrap_counter) << 15;
used = vdev->vq[n].used_idx;
used |= ((uint16_t)vdev->vq[n].used_wrap_counter) << 15;
return avail | used << 16;
}
static uint16_t virtio_queue_split_get_last_avail_idx(VirtIODevice *vdev,
int n)
{
return vdev->vq[n].last_avail_idx;
}
unsigned int virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n)
{
if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
return virtio_queue_packed_get_last_avail_idx(vdev, n);
} else {
return virtio_queue_split_get_last_avail_idx(vdev, n);
}
}
static void virtio_queue_packed_set_last_avail_idx(VirtIODevice *vdev,
int n, unsigned int idx)
{
struct VirtQueue *vq = &vdev->vq[n];
vq->last_avail_idx = vq->shadow_avail_idx = idx & 0x7fff;
vq->last_avail_wrap_counter =
vq->shadow_avail_wrap_counter = !!(idx & 0x8000);
idx >>= 16;
virtio: Fix packed virtqueue used_idx mask virtio_queue_packed_set_last_avail_idx() is used by vhost devices to set the internal queue indices to what has been reported by the vhost back-end through GET_VRING_BASE. For packed virtqueues, this 32-bit value is expected to contain both the device's internal avail and used indices, as well as their respective wrap counters. To get the used index, we shift the 32-bit value right by 16, and then apply a mask of 0x7ffff. That seems to be a typo, because it should be 0x7fff; first of all, the virtio specification says that the maximum queue size for packed virt queues is 2^15, so the indices cannot exceed 2^15 - 1 anyway, making 0x7fff the correct mask. Second, the mask clearly is wrong from context, too, given that (A) `idx & 0x70000` must be 0 at this point (`idx` is 32 bit and was shifted to the right by 16 already), (B) `idx & 0x8000` is the used_wrap_counter, so should not be part of the used index, and (C) `vq->used_idx` is a `uint16_t`, so cannot fit the 0x70000 part of the mask anyway. This most likely never produced any guest-visible bugs, though, because for a vhost device, qemu will probably not evaluate the used index outside of virtio_queue_packed_get_last_avail_idx(), where we reconstruct the 32-bit value from avail and used indices and their wrap counters again. There, it does not matter whether the highest bit of the used_idx is the used index wrap counter, because we put the wrap counter exactly in that position anyway. Signed-off-by: Hanna Czenczek <hreitz@redhat.com> Message-Id: <20230721134945.26967-1-hreitz@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Reviewed-by: German Maglione <gmaglione@redhat.com>
2023-07-21 16:49:45 +03:00
vq->used_idx = idx & 0x7fff;
vq->used_wrap_counter = !!(idx & 0x8000);
}
static void virtio_queue_split_set_last_avail_idx(VirtIODevice *vdev,
int n, unsigned int idx)
{
vdev->vq[n].last_avail_idx = idx;
vdev->vq[n].shadow_avail_idx = idx;
}
void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n,
unsigned int idx)
{
if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
virtio_queue_packed_set_last_avail_idx(vdev, n, idx);
} else {
virtio_queue_split_set_last_avail_idx(vdev, n, idx);
}
}
static void virtio_queue_packed_restore_last_avail_idx(VirtIODevice *vdev,
int n)
{
/* We don't have a reference like avail idx in shared memory */
return;
}
static void virtio_queue_split_restore_last_avail_idx(VirtIODevice *vdev,
int n)
{
RCU_READ_LOCK_GUARD();
if (vdev->vq[n].vring.desc) {
vdev->vq[n].last_avail_idx = vring_used_idx(&vdev->vq[n]);
vdev->vq[n].shadow_avail_idx = vdev->vq[n].last_avail_idx;
}
}
void virtio_queue_restore_last_avail_idx(VirtIODevice *vdev, int n)
{
if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
virtio_queue_packed_restore_last_avail_idx(vdev, n);
} else {
virtio_queue_split_restore_last_avail_idx(vdev, n);
}
}
static void virtio_queue_packed_update_used_idx(VirtIODevice *vdev, int n)
{
/* used idx was updated through set_last_avail_idx() */
return;
}
static void virtio_split_packed_update_used_idx(VirtIODevice *vdev, int n)
{
RCU_READ_LOCK_GUARD();
if (vdev->vq[n].vring.desc) {
vdev->vq[n].used_idx = vring_used_idx(&vdev->vq[n]);
}
}
void virtio_queue_update_used_idx(VirtIODevice *vdev, int n)
{
if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
return virtio_queue_packed_update_used_idx(vdev, n);
} else {
return virtio_split_packed_update_used_idx(vdev, n);
}
}
void virtio_queue_invalidate_signalled_used(VirtIODevice *vdev, int n)
{
vdev->vq[n].signalled_used_valid = false;
}
VirtQueue *virtio_get_queue(VirtIODevice *vdev, int n)
{
return vdev->vq + n;
}
uint16_t virtio_get_queue_index(VirtQueue *vq)
{
return vq->queue_index;
}
static void virtio_queue_guest_notifier_read(EventNotifier *n)
{
VirtQueue *vq = container_of(n, VirtQueue, guest_notifier);
if (event_notifier_test_and_clear(n)) {
virtio_irq(vq);
}
}
static void virtio_config_guest_notifier_read(EventNotifier *n)
{
VirtIODevice *vdev = container_of(n, VirtIODevice, config_notifier);
if (event_notifier_test_and_clear(n)) {
virtio_notify_config(vdev);
}
}
void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign,
bool with_irqfd)
{
if (assign && !with_irqfd) {
event_notifier_set_handler(&vq->guest_notifier,
virtio_queue_guest_notifier_read);
} else {
event_notifier_set_handler(&vq->guest_notifier, NULL);
}
if (!assign) {
/* Test and clear notifier before closing it,
* in case poll callback didn't have time to run. */
virtio_queue_guest_notifier_read(&vq->guest_notifier);
}
}
void virtio_config_set_guest_notifier_fd_handler(VirtIODevice *vdev,
bool assign, bool with_irqfd)
{
EventNotifier *n;
n = &vdev->config_notifier;
if (assign && !with_irqfd) {
event_notifier_set_handler(n, virtio_config_guest_notifier_read);
} else {
event_notifier_set_handler(n, NULL);
}
if (!assign) {
/* Test and clear notifier before closing it,*/
/* in case poll callback didn't have time to run. */
virtio_config_guest_notifier_read(n);
}
}
EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq)
{
return &vq->guest_notifier;
}
static void virtio_queue_host_notifier_aio_poll_begin(EventNotifier *n)
{
VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
virtio_queue_set_notification(vq, 0);
}
static bool virtio_queue_host_notifier_aio_poll(void *opaque)
{
EventNotifier *n = opaque;
VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
aio-posix: split poll check from ready handler Adaptive polling measures the execution time of the polling check plus handlers called when a polled event becomes ready. Handlers can take a significant amount of time, making it look like polling was running for a long time when in fact the event handler was running for a long time. For example, on Linux the io_submit(2) syscall invoked when a virtio-blk device's virtqueue becomes ready can take 10s of microseconds. This can exceed the default polling interval (32 microseconds) and cause adaptive polling to stop polling. By excluding the handler's execution time from the polling check we make the adaptive polling calculation more accurate. As a result, the event loop now stays in polling mode where previously it would have fallen back to file descriptor monitoring. The following data was collected with virtio-blk num-queues=2 event_idx=off using an IOThread. Before: 168k IOPS, IOThread syscalls: 9837.115 ( 0.020 ms): IO iothread1/620155 io_submit(ctx_id: 140512552468480, nr: 16, iocbpp: 0x7fcb9f937db0) = 16 9837.158 ( 0.002 ms): IO iothread1/620155 write(fd: 103, buf: 0x556a2ef71b88, count: 8) = 8 9837.161 ( 0.001 ms): IO iothread1/620155 write(fd: 104, buf: 0x556a2ef71b88, count: 8) = 8 9837.163 ( 0.001 ms): IO iothread1/620155 ppoll(ufds: 0x7fcb90002800, nfds: 4, tsp: 0x7fcb9f1342d0, sigsetsize: 8) = 3 9837.164 ( 0.001 ms): IO iothread1/620155 read(fd: 107, buf: 0x7fcb9f939cc0, count: 512) = 8 9837.174 ( 0.001 ms): IO iothread1/620155 read(fd: 105, buf: 0x7fcb9f939cc0, count: 512) = 8 9837.176 ( 0.001 ms): IO iothread1/620155 read(fd: 106, buf: 0x7fcb9f939cc0, count: 512) = 8 9837.209 ( 0.035 ms): IO iothread1/620155 io_submit(ctx_id: 140512552468480, nr: 32, iocbpp: 0x7fca7d0cebe0) = 32 174k IOPS (+3.6%), IOThread syscalls: 9809.566 ( 0.036 ms): IO iothread1/623061 io_submit(ctx_id: 140539805028352, nr: 32, iocbpp: 0x7fd0cdd62be0) = 32 9809.625 ( 0.001 ms): IO iothread1/623061 write(fd: 103, buf: 0x5647cfba5f58, count: 8) = 8 9809.627 ( 0.002 ms): IO iothread1/623061 write(fd: 104, buf: 0x5647cfba5f58, count: 8) = 8 9809.663 ( 0.036 ms): IO iothread1/623061 io_submit(ctx_id: 140539805028352, nr: 32, iocbpp: 0x7fd0d0388b50) = 32 Notice that ppoll(2) and eventfd read(2) syscalls are eliminated because the IOThread stays in polling mode instead of falling back to file descriptor monitoring. As usual, polling is not implemented on Windows so this patch ignores the new io_poll_read() callback in aio-win32.c. Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Reviewed-by: Stefano Garzarella <sgarzare@redhat.com> Message-id: 20211207132336.36627-2-stefanha@redhat.com [Fixed up aio_set_event_notifier() calls in tests/unit/test-fdmon-epoll.c added after this series was queued. --Stefan] Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2021-12-07 16:23:31 +03:00
return vq->vring.desc && !virtio_queue_empty(vq);
}
static void virtio_queue_host_notifier_aio_poll_ready(EventNotifier *n)
{
VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
virtio_queue_notify_vq(vq);
}
static void virtio_queue_host_notifier_aio_poll_end(EventNotifier *n)
{
VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
/* Caller polls once more after this to catch requests that race with us */
virtio_queue_set_notification(vq, 1);
}
void virtio_queue_aio_attach_host_notifier(VirtQueue *vq, AioContext *ctx)
{
/*
* virtio_queue_aio_detach_host_notifier() can leave notifications disabled.
* Re-enable them. (And if detach has not been used before, notifications
* being enabled is still the default state while a notifier is attached;
* see virtio_queue_host_notifier_aio_poll_end(), which will always leave
* notifications enabled once the polling section is left.)
*/
if (!virtio_queue_get_notification(vq)) {
virtio_queue_set_notification(vq, 1);
}
aio_set_event_notifier(ctx, &vq->host_notifier,
virtio_queue_host_notifier_read,
virtio_queue_host_notifier_aio_poll,
virtio_queue_host_notifier_aio_poll_ready);
aio_set_event_notifier_poll(ctx, &vq->host_notifier,
virtio_queue_host_notifier_aio_poll_begin,
virtio_queue_host_notifier_aio_poll_end);
/*
* We will have ignored notifications about new requests from the guest
* while no notifiers were attached, so "kick" the virt queue to process
* those requests now.
*/
event_notifier_set(&vq->host_notifier);
}
/*
* Same as virtio_queue_aio_attach_host_notifier() but without polling. Use
* this for rx virtqueues and similar cases where the virtqueue handler
* function does not pop all elements. When the virtqueue is left non-empty
* polling consumes CPU cycles and should not be used.
*/
void virtio_queue_aio_attach_host_notifier_no_poll(VirtQueue *vq, AioContext *ctx)
{
/* See virtio_queue_aio_attach_host_notifier() */
if (!virtio_queue_get_notification(vq)) {
virtio_queue_set_notification(vq, 1);
}
aio_set_event_notifier(ctx, &vq->host_notifier,
virtio_queue_host_notifier_read,
NULL, NULL);
/*
* See virtio_queue_aio_attach_host_notifier().
* Note that this may be unnecessary for the type of virtqueues this
* function is used for. Still, it will not hurt to have a quick look into
* whether we can/should process any of the virtqueue elements.
*/
event_notifier_set(&vq->host_notifier);
}
void virtio_queue_aio_detach_host_notifier(VirtQueue *vq, AioContext *ctx)
{
aio_set_event_notifier(ctx, &vq->host_notifier, NULL, NULL, NULL);
/*
* aio_set_event_notifier_poll() does not guarantee whether io_poll_end()
* will run after io_poll_begin(), so by removing the notifier, we do not
* know whether virtio_queue_host_notifier_aio_poll_end() has run after a
* previous virtio_queue_host_notifier_aio_poll_begin(), i.e. whether
* notifications are enabled or disabled. It does not really matter anyway;
* we just removed the notifier, so we do not care about notifications until
* we potentially re-attach it. The attach_host_notifier functions will
* ensure that notifications are enabled again when they are needed.
*/
}
void virtio_queue_host_notifier_read(EventNotifier *n)
{
VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
if (event_notifier_test_and_clear(n)) {
virtio_queue_notify_vq(vq);
}
}
EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq)
{
return &vq->host_notifier;
}
EventNotifier *virtio_config_get_guest_notifier(VirtIODevice *vdev)
{
return &vdev->config_notifier;
}
virtio: notify virtqueue via host notifier when available Host notifiers are used in several cases: 1. Traditional ioeventfd where virtqueue notifications are handled in the main loop thread. 2. IOThreads (aio_handle_output) where virtqueue notifications are handled in an IOThread AioContext. 3. vhost where virtqueue notifications are handled by kernel vhost or a vhost-user device backend. Most virtqueue notifications from the guest use the ioeventfd mechanism, but there are corner cases where QEMU code calls virtio_queue_notify(). This currently honors the host notifier for the IOThreads aio_handle_output case, but not for the vhost case. The result is that vhost does not receive virtqueue notifications from QEMU when virtio_queue_notify() is called. This patch extends virtio_queue_notify() to set the host notifier whenever it is enabled instead of calling the vq->(aio_)handle_output() function directly. We track the host notifier state for each virtqueue separately since some devices may use it only for certain virtqueues. This fixes the vhost case although it does add a trip through the eventfd for the traditional ioeventfd case. I don't think it's worth adding a fast path for the traditional ioeventfd case because calling virtio_queue_notify() is rare when ioeventfd is enabled. Reported-by: Felipe Franciosi <felipe@nutanix.com> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Message-Id: <20191105140946.165584-1-stefanha@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2019-11-05 17:09:46 +03:00
void virtio_queue_set_host_notifier_enabled(VirtQueue *vq, bool enabled)
{
vq->host_notifier_enabled = enabled;
}
int virtio_queue_set_host_notifier_mr(VirtIODevice *vdev, int n,
MemoryRegion *mr, bool assign)
{
BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
if (k->set_host_notifier_mr) {
return k->set_host_notifier_mr(qbus->parent, n, mr, assign);
}
return -1;
}
void virtio_device_set_child_bus_name(VirtIODevice *vdev, char *bus_name)
{
g_free(vdev->bus_name);
vdev->bus_name = g_strdup(bus_name);
}
void G_GNUC_PRINTF(2, 3) virtio_error(VirtIODevice *vdev, const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
error_vreport(fmt, ap);
va_end(ap);
if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
vdev->status = vdev->status | VIRTIO_CONFIG_S_NEEDS_RESET;
virtio_notify_config(vdev);
}
vdev->broken = true;
}
static void virtio_memory_listener_commit(MemoryListener *listener)
{
VirtIODevice *vdev = container_of(listener, VirtIODevice, listener);
int i;
for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
if (vdev->vq[i].vring.num == 0) {
break;
}
virtio_init_region_cache(vdev, i);
}
}
static void virtio_device_realize(DeviceState *dev, Error **errp)
{
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev);
Error *err = NULL;
/* Devices should either use vmsd or the load/save methods */
assert(!vdc->vmsd || !vdc->load);
if (vdc->realize != NULL) {
vdc->realize(dev, &err);
if (err != NULL) {
error_propagate(errp, err);
return;
}
}
/* Devices should not use both ioeventfd and notification data feature */
virtio_device_check_notification_compatibility(vdev, &err);
if (err != NULL) {
error_propagate(errp, err);
vdc->unrealize(dev);
return;
}
virtio_bus_device_plugged(vdev, &err);
if (err != NULL) {
error_propagate(errp, err);
qdev: Unrealize must not fail Devices may have component devices and buses. Device realization may fail. Realization is recursive: a device's realize() method realizes its components, and device_set_realized() realizes its buses (which should in turn realize the devices on that bus, except bus_set_realized() doesn't implement that, yet). When realization of a component or bus fails, we need to roll back: unrealize everything we realized so far. If any of these unrealizes failed, the device would be left in an inconsistent state. Must not happen. device_set_realized() lets it happen: it ignores errors in the roll back code starting at label child_realize_fail. Since realization is recursive, unrealization must be recursive, too. But how could a partly failed unrealize be rolled back? We'd have to re-realize, which can fail. This design is fundamentally broken. device_set_realized() does not roll back at all. Instead, it keeps unrealizing, ignoring further errors. It can screw up even for a device with no buses: if the lone dc->unrealize() fails, it still unregisters vmstate, and calls listeners' unrealize() callback. bus_set_realized() does not roll back either. Instead, it stops unrealizing. Fortunately, no unrealize method can fail, as we'll see below. To fix the design error, drop parameter @errp from all the unrealize methods. Any unrealize method that uses @errp now needs an update. This leads us to unrealize() methods that can fail. Merely passing it to another unrealize method cannot cause failure, though. Here are the ones that do other things with @errp: * virtio_serial_device_unrealize() Fails when qbus_set_hotplug_handler() fails, but still does all the other work. On failure, the device would stay realized with its resources completely gone. Oops. Can't happen, because qbus_set_hotplug_handler() can't actually fail here. Pass &error_abort to qbus_set_hotplug_handler() instead. * hw/ppc/spapr_drc.c's unrealize() Fails when object_property_del() fails, but all the other work is already done. On failure, the device would stay realized with its vmstate registration gone. Oops. Can't happen, because object_property_del() can't actually fail here. Pass &error_abort to object_property_del() instead. * spapr_phb_unrealize() Fails and bails out when remove_drcs() fails, but other work is already done. On failure, the device would stay realized with some of its resources gone. Oops. remove_drcs() fails only when chassis_from_bus()'s object_property_get_uint() fails, and it can't here. Pass &error_abort to remove_drcs() instead. Therefore, no unrealize method can fail before this patch. device_set_realized()'s recursive unrealization via bus uses object_property_set_bool(). Can't drop @errp there, so pass &error_abort. We similarly unrealize with object_property_set_bool() elsewhere, always ignoring errors. Pass &error_abort instead. Several unrealize methods no longer handle errors from other unrealize methods: virtio_9p_device_unrealize(), virtio_input_device_unrealize(), scsi_qdev_unrealize(), ... Much of the deleted error handling looks wrong anyway. One unrealize methods no longer ignore such errors: usb_ehci_pci_exit(). Several realize methods no longer ignore errors when rolling back: v9fs_device_realize_common(), pci_qdev_unrealize(), spapr_phb_realize(), usb_qdev_realize(), vfio_ccw_realize(), virtio_device_realize(). Signed-off-by: Markus Armbruster <armbru@redhat.com> Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> Reviewed-by: Paolo Bonzini <pbonzini@redhat.com> Message-Id: <20200505152926.18877-17-armbru@redhat.com>
2020-05-05 18:29:24 +03:00
vdc->unrealize(dev);
return;
}
vdev->listener.commit = virtio_memory_listener_commit;
vdev->listener.name = "virtio";
memory_listener_register(&vdev->listener, vdev->dma_as);
}
qdev: Unrealize must not fail Devices may have component devices and buses. Device realization may fail. Realization is recursive: a device's realize() method realizes its components, and device_set_realized() realizes its buses (which should in turn realize the devices on that bus, except bus_set_realized() doesn't implement that, yet). When realization of a component or bus fails, we need to roll back: unrealize everything we realized so far. If any of these unrealizes failed, the device would be left in an inconsistent state. Must not happen. device_set_realized() lets it happen: it ignores errors in the roll back code starting at label child_realize_fail. Since realization is recursive, unrealization must be recursive, too. But how could a partly failed unrealize be rolled back? We'd have to re-realize, which can fail. This design is fundamentally broken. device_set_realized() does not roll back at all. Instead, it keeps unrealizing, ignoring further errors. It can screw up even for a device with no buses: if the lone dc->unrealize() fails, it still unregisters vmstate, and calls listeners' unrealize() callback. bus_set_realized() does not roll back either. Instead, it stops unrealizing. Fortunately, no unrealize method can fail, as we'll see below. To fix the design error, drop parameter @errp from all the unrealize methods. Any unrealize method that uses @errp now needs an update. This leads us to unrealize() methods that can fail. Merely passing it to another unrealize method cannot cause failure, though. Here are the ones that do other things with @errp: * virtio_serial_device_unrealize() Fails when qbus_set_hotplug_handler() fails, but still does all the other work. On failure, the device would stay realized with its resources completely gone. Oops. Can't happen, because qbus_set_hotplug_handler() can't actually fail here. Pass &error_abort to qbus_set_hotplug_handler() instead. * hw/ppc/spapr_drc.c's unrealize() Fails when object_property_del() fails, but all the other work is already done. On failure, the device would stay realized with its vmstate registration gone. Oops. Can't happen, because object_property_del() can't actually fail here. Pass &error_abort to object_property_del() instead. * spapr_phb_unrealize() Fails and bails out when remove_drcs() fails, but other work is already done. On failure, the device would stay realized with some of its resources gone. Oops. remove_drcs() fails only when chassis_from_bus()'s object_property_get_uint() fails, and it can't here. Pass &error_abort to remove_drcs() instead. Therefore, no unrealize method can fail before this patch. device_set_realized()'s recursive unrealization via bus uses object_property_set_bool(). Can't drop @errp there, so pass &error_abort. We similarly unrealize with object_property_set_bool() elsewhere, always ignoring errors. Pass &error_abort instead. Several unrealize methods no longer handle errors from other unrealize methods: virtio_9p_device_unrealize(), virtio_input_device_unrealize(), scsi_qdev_unrealize(), ... Much of the deleted error handling looks wrong anyway. One unrealize methods no longer ignore such errors: usb_ehci_pci_exit(). Several realize methods no longer ignore errors when rolling back: v9fs_device_realize_common(), pci_qdev_unrealize(), spapr_phb_realize(), usb_qdev_realize(), vfio_ccw_realize(), virtio_device_realize(). Signed-off-by: Markus Armbruster <armbru@redhat.com> Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> Reviewed-by: Paolo Bonzini <pbonzini@redhat.com> Message-Id: <20200505152926.18877-17-armbru@redhat.com>
2020-05-05 18:29:24 +03:00
static void virtio_device_unrealize(DeviceState *dev)
{
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev);
virtio: Add corresponding memory_listener_unregister to unrealize Address space is destroyed without proper removal of its listeners with current code. They are expected to be removed in virtio_device_instance_finalize [1], but qemu calls it through object_deinit, after address_space_destroy call through device_set_realized [2]. Move it to virtio_device_unrealize, called before device_set_realized [3] and making it symmetric with memory_listener_register in virtio_device_realize. v2: Delete no-op call of virtio_device_instance_finalize. Add backtraces. [1] #0 virtio_device_instance_finalize (obj=0x555557de5120) at /home/qemu/include/hw/virtio/virtio.h:71 #1 0x0000555555b703c9 in object_deinit (type=0x555556639860, obj=<optimized out>) at ../qom/object.c:671 #2 object_finalize (data=0x555557de5120) at ../qom/object.c:685 #3 object_unref (objptr=0x555557de5120) at ../qom/object.c:1184 #4 0x0000555555b4de9d in bus_free_bus_child (kid=0x555557df0660) at ../hw/core/qdev.c:55 #5 0x0000555555c65003 in call_rcu_thread (opaque=opaque@entry=0x0) at ../util/rcu.c:281 Queued by: #0 bus_remove_child (bus=0x555557de5098, child=child@entry=0x555557de5120) at ../hw/core/qdev.c:60 #1 0x0000555555b4ee31 in device_unparent (obj=<optimized out>) at ../hw/core/qdev.c:984 #2 0x0000555555b70465 in object_finalize_child_property ( obj=<optimized out>, name=<optimized out>, opaque=0x555557de5120) at ../qom/object.c:1725 #3 0x0000555555b6fa17 in object_property_del_child ( child=0x555557de5120, obj=0x555557ddcf90) at ../qom/object.c:645 #4 object_unparent (obj=0x555557de5120) at ../qom/object.c:664 #5 0x0000555555b4c071 in bus_unparent (obj=<optimized out>) at ../hw/core/bus.c:147 #6 0x0000555555b70465 in object_finalize_child_property ( obj=<optimized out>, name=<optimized out>, opaque=0x555557de5098) at ../qom/object.c:1725 #7 0x0000555555b6fa17 in object_property_del_child ( child=0x555557de5098, obj=0x555557ddcf90) at ../qom/object.c:645 #8 object_unparent (obj=0x555557de5098) at ../qom/object.c:664 #9 0x0000555555b4ee19 in device_unparent (obj=<optimized out>) at ../hw/core/qdev.c:981 #10 0x0000555555b70465 in object_finalize_child_property ( obj=<optimized out>, name=<optimized out>, opaque=0x555557ddcf90) at ../qom/object.c:1725 #11 0x0000555555b6fa17 in object_property_del_child ( child=0x555557ddcf90, obj=0x55555685da10) at ../qom/object.c:645 #12 object_unparent (obj=0x555557ddcf90) at ../qom/object.c:664 #13 0x00005555558dc331 in pci_for_each_device_under_bus ( opaque=<optimized out>, fn=<optimized out>, bus=<optimized out>) at ../hw/pci/pci.c:1654 [2] Optimizer omits pci_qdev_unrealize, called by device_set_realized, and do_pci_unregister_device, called by pci_qdev_unrealize and caller of address_space_destroy. #0 address_space_destroy (as=0x555557ddd1b8) at ../softmmu/memory.c:2840 #1 0x0000555555b4fc53 in device_set_realized (obj=0x555557ddcf90, value=<optimized out>, errp=0x7fffeea8f1e0) at ../hw/core/qdev.c:850 #2 0x0000555555b6eaa6 in property_set_bool (obj=0x555557ddcf90, v=<optimized out>, name=<optimized out>, opaque=0x555556650ba0, errp=0x7fffeea8f1e0) at ../qom/object.c:2255 #3 0x0000555555b70e07 in object_property_set ( obj=obj@entry=0x555557ddcf90, name=name@entry=0x555555db99df "realized", v=v@entry=0x7fffe46b7500, errp=errp@entry=0x5555565bbf38 <error_abort>) at ../qom/object.c:1400 #4 0x0000555555b73c5f in object_property_set_qobject ( obj=obj@entry=0x555557ddcf90, name=name@entry=0x555555db99df "realized", value=value@entry=0x7fffe44f6180, errp=errp@entry=0x5555565bbf38 <error_abort>) at ../qom/qom-qobject.c:28 #5 0x0000555555b71044 in object_property_set_bool ( obj=0x555557ddcf90, name=0x555555db99df "realized", value=<optimized out>, errp=0x5555565bbf38 <error_abort>) at ../qom/object.c:1470 #6 0x0000555555921cb7 in pcie_unplug_device (bus=<optimized out>, dev=0x555557ddcf90, opaque=<optimized out>) at /home/qemu/include/hw/qdev-core.h:17 #7 0x00005555558dc331 in pci_for_each_device_under_bus ( opaque=<optimized out>, fn=<optimized out>, bus=<optimized out>) at ../hw/pci/pci.c:1654 [3] #0 virtio_device_unrealize (dev=0x555557de5120) at ../hw/virtio/virtio.c:3680 #1 0x0000555555b4fc63 in device_set_realized (obj=0x555557de5120, value=<optimized out>, errp=0x7fffee28df90) at ../hw/core/qdev.c:850 #2 0x0000555555b6eab6 in property_set_bool (obj=0x555557de5120, v=<optimized out>, name=<optimized out>, opaque=0x555556650ba0, errp=0x7fffee28df90) at ../qom/object.c:2255 #3 0x0000555555b70e17 in object_property_set ( obj=obj@entry=0x555557de5120, name=name@entry=0x555555db99ff "realized", v=v@entry=0x7ffdd8035040, errp=errp@entry=0x5555565bbf38 <error_abort>) at ../qom/object.c:1400 #4 0x0000555555b73c6f in object_property_set_qobject ( obj=obj@entry=0x555557de5120, name=name@entry=0x555555db99ff "realized", value=value@entry=0x7ffdd8035020, errp=errp@entry=0x5555565bbf38 <error_abort>) at ../qom/qom-qobject.c:28 #5 0x0000555555b71054 in object_property_set_bool ( obj=0x555557de5120, name=name@entry=0x555555db99ff "realized", value=value@entry=false, errp=0x5555565bbf38 <error_abort>) at ../qom/object.c:1470 #6 0x0000555555b4edc5 in qdev_unrealize (dev=<optimized out>) at ../hw/core/qdev.c:403 #7 0x0000555555b4c2a9 in bus_set_realized (obj=<optimized out>, value=<optimized out>, errp=<optimized out>) at ../hw/core/bus.c:204 #8 0x0000555555b6eab6 in property_set_bool (obj=0x555557de5098, v=<optimized out>, name=<optimized out>, opaque=0x555557df04c0, errp=0x7fffee28e0a0) at ../qom/object.c:2255 #9 0x0000555555b70e17 in object_property_set ( obj=obj@entry=0x555557de5098, name=name@entry=0x555555db99ff "realized", v=v@entry=0x7ffdd8034f50, errp=errp@entry=0x5555565bbf38 <error_abort>) at ../qom/object.c:1400 #10 0x0000555555b73c6f in object_property_set_qobject ( obj=obj@entry=0x555557de5098, name=name@entry=0x555555db99ff "realized", value=value@entry=0x7ffdd8020630, errp=errp@entry=0x5555565bbf38 <error_abort>) at ../qom/qom-qobject.c:28 #11 0x0000555555b71054 in object_property_set_bool ( obj=obj@entry=0x555557de5098, name=name@entry=0x555555db99ff "realized", value=value@entry=false, errp=0x5555565bbf38 <error_abort>) at ../qom/object.c:1470 #12 0x0000555555b4c725 in qbus_unrealize ( bus=bus@entry=0x555557de5098) at ../hw/core/bus.c:178 #13 0x0000555555b4fc00 in device_set_realized (obj=0x555557ddcf90, value=<optimized out>, errp=0x7fffee28e1e0) at ../hw/core/qdev.c:844 #14 0x0000555555b6eab6 in property_set_bool (obj=0x555557ddcf90, v=<optimized out>, name=<optimized out>, opaque=0x555556650ba0, errp=0x7fffee28e1e0) at ../qom/object.c:2255 #15 0x0000555555b70e17 in object_property_set ( obj=obj@entry=0x555557ddcf90, name=name@entry=0x555555db99ff "realized", v=v@entry=0x7ffdd8020560, errp=errp@entry=0x5555565bbf38 <error_abort>) at ../qom/object.c:1400 #16 0x0000555555b73c6f in object_property_set_qobject ( obj=obj@entry=0x555557ddcf90, name=name@entry=0x555555db99ff "realized", value=value@entry=0x7ffdd8020540, errp=errp@entry=0x5555565bbf38 <error_abort>) at ../qom/qom-qobject.c:28 #17 0x0000555555b71054 in object_property_set_bool ( obj=0x555557ddcf90, name=0x555555db99ff "realized", value=<optimized out>, errp=0x5555565bbf38 <error_abort>) at ../qom/object.c:1470 #18 0x0000555555921cb7 in pcie_unplug_device (bus=<optimized out>, dev=0x555557ddcf90, opaque=<optimized out>) at /home/qemu/include/hw/qdev-core.h:17 #19 0x00005555558dc331 in pci_for_each_device_under_bus ( opaque=<optimized out>, fn=<optimized out>, bus=<optimized out>) at ../hw/pci/pci.c:1654 Fixes: c611c76417f ("virtio: add MemoryListener to cache ring translations") Buglink: https://bugs.launchpad.net/qemu/+bug/1912846 Signed-off-by: Eugenio Pérez <eperezma@redhat.com> Message-Id: <20210125192505.390554-1-eperezma@redhat.com> Reviewed-by: Peter Xu <peterx@redhat.com> Acked-by: Jason Wang <jasowang@redhat.com> Reviewed-by: Stefano Garzarella <sgarzare@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2021-01-25 22:25:05 +03:00
memory_listener_unregister(&vdev->listener);
virtio_bus_device_unplugged(vdev);
if (vdc->unrealize != NULL) {
qdev: Unrealize must not fail Devices may have component devices and buses. Device realization may fail. Realization is recursive: a device's realize() method realizes its components, and device_set_realized() realizes its buses (which should in turn realize the devices on that bus, except bus_set_realized() doesn't implement that, yet). When realization of a component or bus fails, we need to roll back: unrealize everything we realized so far. If any of these unrealizes failed, the device would be left in an inconsistent state. Must not happen. device_set_realized() lets it happen: it ignores errors in the roll back code starting at label child_realize_fail. Since realization is recursive, unrealization must be recursive, too. But how could a partly failed unrealize be rolled back? We'd have to re-realize, which can fail. This design is fundamentally broken. device_set_realized() does not roll back at all. Instead, it keeps unrealizing, ignoring further errors. It can screw up even for a device with no buses: if the lone dc->unrealize() fails, it still unregisters vmstate, and calls listeners' unrealize() callback. bus_set_realized() does not roll back either. Instead, it stops unrealizing. Fortunately, no unrealize method can fail, as we'll see below. To fix the design error, drop parameter @errp from all the unrealize methods. Any unrealize method that uses @errp now needs an update. This leads us to unrealize() methods that can fail. Merely passing it to another unrealize method cannot cause failure, though. Here are the ones that do other things with @errp: * virtio_serial_device_unrealize() Fails when qbus_set_hotplug_handler() fails, but still does all the other work. On failure, the device would stay realized with its resources completely gone. Oops. Can't happen, because qbus_set_hotplug_handler() can't actually fail here. Pass &error_abort to qbus_set_hotplug_handler() instead. * hw/ppc/spapr_drc.c's unrealize() Fails when object_property_del() fails, but all the other work is already done. On failure, the device would stay realized with its vmstate registration gone. Oops. Can't happen, because object_property_del() can't actually fail here. Pass &error_abort to object_property_del() instead. * spapr_phb_unrealize() Fails and bails out when remove_drcs() fails, but other work is already done. On failure, the device would stay realized with some of its resources gone. Oops. remove_drcs() fails only when chassis_from_bus()'s object_property_get_uint() fails, and it can't here. Pass &error_abort to remove_drcs() instead. Therefore, no unrealize method can fail before this patch. device_set_realized()'s recursive unrealization via bus uses object_property_set_bool(). Can't drop @errp there, so pass &error_abort. We similarly unrealize with object_property_set_bool() elsewhere, always ignoring errors. Pass &error_abort instead. Several unrealize methods no longer handle errors from other unrealize methods: virtio_9p_device_unrealize(), virtio_input_device_unrealize(), scsi_qdev_unrealize(), ... Much of the deleted error handling looks wrong anyway. One unrealize methods no longer ignore such errors: usb_ehci_pci_exit(). Several realize methods no longer ignore errors when rolling back: v9fs_device_realize_common(), pci_qdev_unrealize(), spapr_phb_realize(), usb_qdev_realize(), vfio_ccw_realize(), virtio_device_realize(). Signed-off-by: Markus Armbruster <armbru@redhat.com> Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> Reviewed-by: Paolo Bonzini <pbonzini@redhat.com> Message-Id: <20200505152926.18877-17-armbru@redhat.com>
2020-05-05 18:29:24 +03:00
vdc->unrealize(dev);
}
g_free(vdev->bus_name);
vdev->bus_name = NULL;
}
static void virtio_device_free_virtqueues(VirtIODevice *vdev)
{
int i;
if (!vdev->vq) {
return;
}
for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
if (vdev->vq[i].vring.num == 0) {
break;
}
virtio_virtqueue_reset_region_cache(&vdev->vq[i]);
}
g_free(vdev->vq);
}
static void virtio_device_instance_finalize(Object *obj)
{
VirtIODevice *vdev = VIRTIO_DEVICE(obj);
virtio_device_free_virtqueues(vdev);
g_free(vdev->config);
g_free(vdev->vector_queues);
}
static Property virtio_properties[] = {
DEFINE_VIRTIO_COMMON_FEATURES(VirtIODevice, host_features),
DEFINE_PROP_BOOL("use-started", VirtIODevice, use_started, true),
virtio-pci: disable vring processing when bus-mastering is disabled Currently the SLOF firmware for pseries guests will disable/re-enable a PCI device multiple times via IO/MEM/MASTER bits of PCI_COMMAND register after the initial probe/feature negotiation, as it tends to work with a single device at a time at various stages like probing and running block/network bootloaders without doing a full reset in-between. In QEMU, when PCI_COMMAND_MASTER is disabled we disable the corresponding IOMMU memory region, so DMA accesses (including to vring fields like idx/flags) will no longer undergo the necessary translation. Normally we wouldn't expect this to happen since it would be misbehavior on the driver side to continue driving DMA requests. However, in the case of pseries, with iommu_platform=on, we trigger the following sequence when tearing down the virtio-blk dataplane ioeventfd in response to the guest unsetting PCI_COMMAND_MASTER: #2 0x0000555555922651 in virtqueue_map_desc (vdev=vdev@entry=0x555556dbcfb0, p_num_sg=p_num_sg@entry=0x7fffe657e1a8, addr=addr@entry=0x7fffe657e240, iov=iov@entry=0x7fffe6580240, max_num_sg=max_num_sg@entry=1024, is_write=is_write@entry=false, pa=0, sz=0) at /home/mdroth/w/qemu.git/hw/virtio/virtio.c:757 #3 0x0000555555922a89 in virtqueue_pop (vq=vq@entry=0x555556dc8660, sz=sz@entry=184) at /home/mdroth/w/qemu.git/hw/virtio/virtio.c:950 #4 0x00005555558d3eca in virtio_blk_get_request (vq=0x555556dc8660, s=0x555556dbcfb0) at /home/mdroth/w/qemu.git/hw/block/virtio-blk.c:255 #5 0x00005555558d3eca in virtio_blk_handle_vq (s=0x555556dbcfb0, vq=0x555556dc8660) at /home/mdroth/w/qemu.git/hw/block/virtio-blk.c:776 #6 0x000055555591dd66 in virtio_queue_notify_aio_vq (vq=vq@entry=0x555556dc8660) at /home/mdroth/w/qemu.git/hw/virtio/virtio.c:1550 #7 0x000055555591ecef in virtio_queue_notify_aio_vq (vq=0x555556dc8660) at /home/mdroth/w/qemu.git/hw/virtio/virtio.c:1546 #8 0x000055555591ecef in virtio_queue_host_notifier_aio_poll (opaque=0x555556dc86c8) at /home/mdroth/w/qemu.git/hw/virtio/virtio.c:2527 #9 0x0000555555d02164 in run_poll_handlers_once (ctx=ctx@entry=0x55555688bfc0, timeout=timeout@entry=0x7fffe65844a8) at /home/mdroth/w/qemu.git/util/aio-posix.c:520 #10 0x0000555555d02d1b in try_poll_mode (timeout=0x7fffe65844a8, ctx=0x55555688bfc0) at /home/mdroth/w/qemu.git/util/aio-posix.c:607 #11 0x0000555555d02d1b in aio_poll (ctx=ctx@entry=0x55555688bfc0, blocking=blocking@entry=true) at /home/mdroth/w/qemu.git/util/aio-posix.c:639 #12 0x0000555555d0004d in aio_wait_bh_oneshot (ctx=0x55555688bfc0, cb=cb@entry=0x5555558d5130 <virtio_blk_data_plane_stop_bh>, opaque=opaque@entry=0x555556de86f0) at /home/mdroth/w/qemu.git/util/aio-wait.c:71 #13 0x00005555558d59bf in virtio_blk_data_plane_stop (vdev=<optimized out>) at /home/mdroth/w/qemu.git/hw/block/dataplane/virtio-blk.c:288 #14 0x0000555555b906a1 in virtio_bus_stop_ioeventfd (bus=bus@entry=0x555556dbcf38) at /home/mdroth/w/qemu.git/hw/virtio/virtio-bus.c:245 #15 0x0000555555b90dbb in virtio_bus_stop_ioeventfd (bus=bus@entry=0x555556dbcf38) at /home/mdroth/w/qemu.git/hw/virtio/virtio-bus.c:237 #16 0x0000555555b92a8e in virtio_pci_stop_ioeventfd (proxy=0x555556db4e40) at /home/mdroth/w/qemu.git/hw/virtio/virtio-pci.c:292 #17 0x0000555555b92a8e in virtio_write_config (pci_dev=0x555556db4e40, address=<optimized out>, val=1048832, len=<optimized out>) at /home/mdroth/w/qemu.git/hw/virtio/virtio-pci.c:613 I.e. the calling code is only scheduling a one-shot BH for virtio_blk_data_plane_stop_bh, but somehow we end up trying to process an additional virtqueue entry before we get there. This is likely due to the following check in virtio_queue_host_notifier_aio_poll: static bool virtio_queue_host_notifier_aio_poll(void *opaque) { EventNotifier *n = opaque; VirtQueue *vq = container_of(n, VirtQueue, host_notifier); bool progress; if (!vq->vring.desc || virtio_queue_empty(vq)) { return false; } progress = virtio_queue_notify_aio_vq(vq); namely the call to virtio_queue_empty(). In this case, since no new requests have actually been issued, shadow_avail_idx == last_avail_idx, so we actually try to access the vring via vring_avail_idx() to get the latest non-shadowed idx: int virtio_queue_empty(VirtQueue *vq) { bool empty; ... if (vq->shadow_avail_idx != vq->last_avail_idx) { return 0; } rcu_read_lock(); empty = vring_avail_idx(vq) == vq->last_avail_idx; rcu_read_unlock(); return empty; but since the IOMMU region has been disabled we get a bogus value (0 usually), which causes virtio_queue_empty() to falsely report that there are entries to be processed, which causes errors such as: "virtio: zero sized buffers are not allowed" or "virtio-blk missing headers" and puts the device in an error state. This patch works around the issue by introducing virtio_set_disabled(), which sets a 'disabled' flag to bypass checks like virtio_queue_empty() when bus-mastering is disabled. Since we'd check this flag at all the same sites as vdev->broken, we replace those checks with an inline function which checks for either vdev->broken or vdev->disabled. The 'disabled' flag is only migrated when set, which should be fairly rare, but to maintain migration compatibility we disable it's use for older machine types. Users requiring the use of the flag in conjunction with older machine types can set it explicitly as a virtio-device option. NOTES: - This leaves some other oddities in play, like the fact that DRIVER_OK also gets unset in response to bus-mastering being disabled, but not restored (however the device seems to continue working) - Similarly, we disable the host notifier via virtio_bus_stop_ioeventfd(), which seems to move the handling out of virtio-blk dataplane and back into the main IO thread, and it ends up staying there till a reset (but otherwise continues working normally) Cc: David Gibson <david@gibson.dropbear.id.au>, Cc: Alexey Kardashevskiy <aik@ozlabs.ru> Cc: "Michael S. Tsirkin" <mst@redhat.com> Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com> Message-Id: <20191120005003.27035-1-mdroth@linux.vnet.ibm.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2019-11-20 03:50:03 +03:00
DEFINE_PROP_BOOL("use-disabled-flag", VirtIODevice, use_disabled_flag, true),
DEFINE_PROP_BOOL("x-disable-legacy-check", VirtIODevice,
disable_legacy_check, false),
DEFINE_PROP_END_OF_LIST(),
};
static int virtio_device_start_ioeventfd_impl(VirtIODevice *vdev)
{
VirtioBusState *qbus = VIRTIO_BUS(qdev_get_parent_bus(DEVICE(vdev)));
int i, n, r, err;
/*
* Batch all the host notifiers in a single transaction to avoid
* quadratic time complexity in address_space_update_ioeventfds().
*/
memory_region_transaction_begin();
for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
VirtQueue *vq = &vdev->vq[n];
if (!virtio_queue_get_num(vdev, n)) {
continue;
}
r = virtio_bus_set_host_notifier(qbus, n, true);
if (r < 0) {
err = r;
goto assign_error;
}
event_notifier_set_handler(&vq->host_notifier,
virtio_queue_host_notifier_read);
}
for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
/* Kick right away to begin processing requests already in vring */
VirtQueue *vq = &vdev->vq[n];
if (!vq->vring.num) {
continue;
}
event_notifier_set(&vq->host_notifier);
}
memory_region_transaction_commit();
return 0;
assign_error:
i = n; /* save n for a second iteration after transaction is committed. */
while (--n >= 0) {
VirtQueue *vq = &vdev->vq[n];
if (!virtio_queue_get_num(vdev, n)) {
continue;
}
event_notifier_set_handler(&vq->host_notifier, NULL);
r = virtio_bus_set_host_notifier(qbus, n, false);
assert(r >= 0);
}
/*
* The transaction expects the ioeventfds to be open when it
* commits. Do it now, before the cleanup loop.
*/
memory_region_transaction_commit();
while (--i >= 0) {
if (!virtio_queue_get_num(vdev, i)) {
continue;
}
virtio_bus_cleanup_host_notifier(qbus, i);
}
return err;
}
int virtio_device_start_ioeventfd(VirtIODevice *vdev)
{
BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
VirtioBusState *vbus = VIRTIO_BUS(qbus);
return virtio_bus_start_ioeventfd(vbus);
}
static void virtio_device_stop_ioeventfd_impl(VirtIODevice *vdev)
{
VirtioBusState *qbus = VIRTIO_BUS(qdev_get_parent_bus(DEVICE(vdev)));
int n, r;
/*
* Batch all the host notifiers in a single transaction to avoid
* quadratic time complexity in address_space_update_ioeventfds().
*/
memory_region_transaction_begin();
for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
VirtQueue *vq = &vdev->vq[n];
if (!virtio_queue_get_num(vdev, n)) {
continue;
}
event_notifier_set_handler(&vq->host_notifier, NULL);
r = virtio_bus_set_host_notifier(qbus, n, false);
assert(r >= 0);
}
/*
* The transaction expects the ioeventfds to be open when it
* commits. Do it now, before the cleanup loop.
*/
memory_region_transaction_commit();
for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
if (!virtio_queue_get_num(vdev, n)) {
continue;
}
virtio_bus_cleanup_host_notifier(qbus, n);
}
}
virtio: introduce grab/release_ioeventfd to fix vhost Following the recent refactoring of virtio notifiers [1], more specifically the patch ed08a2a0b ("virtio: use virtio_bus_set_host_notifier to start/stop ioeventfd") that uses virtio_bus_set_host_notifier [2] by default, core virtio code requires 'ioeventfd_started' to be set to true/false when the host notifiers are configured. When vhost is stopped and started, however, there is a stop followed by another start. Since ioeventfd_started was never set to true, the 'stop' operation triggered by virtio_bus_set_host_notifier() will not result in a call to virtio_pci_ioeventfd_assign(assign=false). This leaves the memory regions with stale notifiers and results on the next start triggering the following assertion: kvm_mem_ioeventfd_add: error adding ioeventfd: File exists Aborted This patch reintroduces (hopefully in a cleaner way) the concept that was present with ioeventfd_disabled before the refactoring. When ioeventfd_grabbed>0, ioeventfd_started tracks whether ioeventfd should be enabled or not, but ioeventfd is actually not started at all until vhost releases the host notifiers. [1] http://lists.nongnu.org/archive/html/qemu-devel/2016-10/msg07748.html [2] http://lists.nongnu.org/archive/html/qemu-devel/2016-10/msg07760.html Reported-by: Felipe Franciosi <felipe@nutanix.com> Reported-by: Christian Borntraeger <borntraeger@de.ibm.com> Reported-by: Alex Williamson <alex.williamson@redhat.com> Fixes: ed08a2a0b ("virtio: use virtio_bus_set_host_notifier to start/stop ioeventfd") Reviewed-by: Cornelia Huck <cornelia.huck@de.ibm.com> Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> Tested-by: Alexey Kardashevskiy <aik@ozlabs.ru> Tested-by: Farhan Ali <alifm@linux.vnet.ibm.com> Tested-by: Alex Williamson <alex.williamson@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2016-11-18 18:07:00 +03:00
int virtio_device_grab_ioeventfd(VirtIODevice *vdev)
{
BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
VirtioBusState *vbus = VIRTIO_BUS(qbus);
return virtio_bus_grab_ioeventfd(vbus);
}
void virtio_device_release_ioeventfd(VirtIODevice *vdev)
{
BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
VirtioBusState *vbus = VIRTIO_BUS(qbus);
virtio_bus_release_ioeventfd(vbus);
}
static void virtio_device_class_init(ObjectClass *klass, void *data)
{
/* Set the default value here. */
VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = virtio_device_realize;
dc->unrealize = virtio_device_unrealize;
dc->bus_type = TYPE_VIRTIO_BUS;
device_class_set_props(dc, virtio_properties);
vdc->start_ioeventfd = virtio_device_start_ioeventfd_impl;
vdc->stop_ioeventfd = virtio_device_stop_ioeventfd_impl;
vdc->legacy_features |= VIRTIO_LEGACY_FEATURES;
}
bool virtio_device_ioeventfd_enabled(VirtIODevice *vdev)
{
BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
VirtioBusState *vbus = VIRTIO_BUS(qbus);
return virtio_bus_ioeventfd_enabled(vbus);
}
VirtQueueStatus *qmp_x_query_virtio_queue_status(const char *path,
uint16_t queue,
Error **errp)
{
VirtIODevice *vdev;
VirtQueueStatus *status;
vdev = qmp_find_virtio_device(path);
if (vdev == NULL) {
error_setg(errp, "Path %s is not a VirtIODevice", path);
return NULL;
}
if (queue >= VIRTIO_QUEUE_MAX || !virtio_queue_get_num(vdev, queue)) {
error_setg(errp, "Invalid virtqueue number %d", queue);
return NULL;
}
status = g_new0(VirtQueueStatus, 1);
status->name = g_strdup(vdev->name);
status->queue_index = vdev->vq[queue].queue_index;
status->inuse = vdev->vq[queue].inuse;
status->vring_num = vdev->vq[queue].vring.num;
status->vring_num_default = vdev->vq[queue].vring.num_default;
status->vring_align = vdev->vq[queue].vring.align;
status->vring_desc = vdev->vq[queue].vring.desc;
status->vring_avail = vdev->vq[queue].vring.avail;
status->vring_used = vdev->vq[queue].vring.used;
status->used_idx = vdev->vq[queue].used_idx;
status->signalled_used = vdev->vq[queue].signalled_used;
status->signalled_used_valid = vdev->vq[queue].signalled_used_valid;
if (vdev->vhost_started) {
VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
struct vhost_dev *hdev = vdc->get_vhost(vdev);
/* check if vq index exists for vhost as well */
if (queue >= hdev->vq_index && queue < hdev->vq_index + hdev->nvqs) {
status->has_last_avail_idx = true;
int vhost_vq_index =
hdev->vhost_ops->vhost_get_vq_index(hdev, queue);
struct vhost_vring_state state = {
.index = vhost_vq_index,
};
status->last_avail_idx =
hdev->vhost_ops->vhost_get_vring_base(hdev, &state);
}
} else {
status->has_shadow_avail_idx = true;
status->has_last_avail_idx = true;
status->last_avail_idx = vdev->vq[queue].last_avail_idx;
status->shadow_avail_idx = vdev->vq[queue].shadow_avail_idx;
}
return status;
}
static strList *qmp_decode_vring_desc_flags(uint16_t flags)
{
strList *list = NULL;
strList *node;
int i;
struct {
uint16_t flag;
const char *value;
} map[] = {
{ VRING_DESC_F_NEXT, "next" },
{ VRING_DESC_F_WRITE, "write" },
{ VRING_DESC_F_INDIRECT, "indirect" },
{ 1 << VRING_PACKED_DESC_F_AVAIL, "avail" },
{ 1 << VRING_PACKED_DESC_F_USED, "used" },
{ 0, "" }
};
for (i = 0; map[i].flag; i++) {
if ((map[i].flag & flags) == 0) {
continue;
}
node = g_malloc0(sizeof(strList));
node->value = g_strdup(map[i].value);
node->next = list;
list = node;
}
return list;
}
VirtioQueueElement *qmp_x_query_virtio_queue_element(const char *path,
uint16_t queue,
bool has_index,
uint16_t index,
Error **errp)
{
VirtIODevice *vdev;
VirtQueue *vq;
VirtioQueueElement *element = NULL;
vdev = qmp_find_virtio_device(path);
if (vdev == NULL) {
error_setg(errp, "Path %s is not a VirtIO device", path);
return NULL;
}
if (queue >= VIRTIO_QUEUE_MAX || !virtio_queue_get_num(vdev, queue)) {
error_setg(errp, "Invalid virtqueue number %d", queue);
return NULL;
}
vq = &vdev->vq[queue];
if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
error_setg(errp, "Packed ring not supported");
return NULL;
} else {
unsigned int head, i, max;
VRingMemoryRegionCaches *caches;
MemoryRegionCache indirect_desc_cache;
MemoryRegionCache *desc_cache;
VRingDesc desc;
VirtioRingDescList *list = NULL;
VirtioRingDescList *node;
int rc; int ndescs;
address_space_cache_init_empty(&indirect_desc_cache);
RCU_READ_LOCK_GUARD();
max = vq->vring.num;
if (!has_index) {
head = vring_avail_ring(vq, vq->last_avail_idx % vq->vring.num);
} else {
head = vring_avail_ring(vq, index % vq->vring.num);
}
i = head;
caches = vring_get_region_caches(vq);
if (!caches) {
error_setg(errp, "Region caches not initialized");
return NULL;
}
if (caches->desc.len < max * sizeof(VRingDesc)) {
error_setg(errp, "Cannot map descriptor ring");
return NULL;
}
desc_cache = &caches->desc;
vring_split_desc_read(vdev, &desc, desc_cache, i);
if (desc.flags & VRING_DESC_F_INDIRECT) {
int64_t len;
len = address_space_cache_init(&indirect_desc_cache, vdev->dma_as,
desc.addr, desc.len, false);
desc_cache = &indirect_desc_cache;
if (len < desc.len) {
error_setg(errp, "Cannot map indirect buffer");
goto done;
}
max = desc.len / sizeof(VRingDesc);
i = 0;
vring_split_desc_read(vdev, &desc, desc_cache, i);
}
element = g_new0(VirtioQueueElement, 1);
element->avail = g_new0(VirtioRingAvail, 1);
element->used = g_new0(VirtioRingUsed, 1);
element->name = g_strdup(vdev->name);
element->index = head;
element->avail->flags = vring_avail_flags(vq);
element->avail->idx = vring_avail_idx(vq);
element->avail->ring = head;
element->used->flags = vring_used_flags(vq);
element->used->idx = vring_used_idx(vq);
ndescs = 0;
do {
/* A buggy driver may produce an infinite loop */
if (ndescs >= max) {
break;
}
node = g_new0(VirtioRingDescList, 1);
node->value = g_new0(VirtioRingDesc, 1);
node->value->addr = desc.addr;
node->value->len = desc.len;
node->value->flags = qmp_decode_vring_desc_flags(desc.flags);
node->next = list;
list = node;
ndescs++;
rc = virtqueue_split_read_next_desc(vdev, &desc, desc_cache, max);
} while (rc == VIRTQUEUE_READ_DESC_MORE);
element->descs = list;
done:
address_space_cache_destroy(&indirect_desc_cache);
}
return element;
}
static const TypeInfo virtio_device_info = {
.name = TYPE_VIRTIO_DEVICE,
.parent = TYPE_DEVICE,
.instance_size = sizeof(VirtIODevice),
.class_init = virtio_device_class_init,
.instance_finalize = virtio_device_instance_finalize,
.abstract = true,
.class_size = sizeof(VirtioDeviceClass),
};
static void virtio_register_types(void)
{
type_register_static(&virtio_device_info);
}
type_init(virtio_register_types)
QEMUBH *virtio_bh_new_guarded_full(DeviceState *dev,
QEMUBHFunc *cb, void *opaque,
const char *name)
{
DeviceState *transport = qdev_get_parent_bus(dev)->parent;
return qemu_bh_new_full(cb, opaque, name,
&transport->mem_reentrancy_guard);
}