qemu/hw/block/dataplane/virtio-blk.c

360 lines
10 KiB
C
Raw Normal View History

/*
* Dedicated thread for virtio-blk I/O processing
*
* Copyright 2012 IBM, Corp.
* Copyright 2012 Red Hat, Inc. and/or its affiliates
*
* Authors:
* Stefan Hajnoczi <stefanha@redhat.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*
*/
#include "qemu/osdep.h"
2016-03-14 11:01:28 +03:00
#include "qapi/error.h"
#include "trace.h"
#include "qemu/iov.h"
#include "qemu/main-loop.h"
#include "qemu/thread.h"
#include "qemu/error-report.h"
#include "hw/virtio/virtio-access.h"
#include "hw/virtio/virtio-blk.h"
#include "virtio-blk.h"
#include "block/aio.h"
#include "hw/virtio/virtio-bus.h"
#include "qom/object_interfaces.h"
struct VirtIOBlockDataPlane {
bool starting;
bool stopping;
VirtIOBlkConf *conf;
VirtIODevice *vdev;
QEMUBH *bh; /* bh for guest notification */
unsigned long *batch_notify_vqs;
virtio-blk: dataplane: Don't batch notifications if EVENT_IDX is present Commit 5b2ffbe4d99843fd8305c573a100047a8c962327 ("virtio-blk: dataplane: notify guest as a batch") deferred guest notification to a BH in order batch notifications, with purpose of avoiding flooding the guest with interruptions. This optimization came with a cost. The average latency perceived in the guest is increased by a few microseconds, but also when multiple IO operations finish at the same time, the guest won't be notified until all completions from each operation has been run. On the contrary, virtio-scsi issues the notification at the end of each completion. On the other hand, nowadays we have the EVENT_IDX feature that allows a better coordination between QEMU and the Guest OS to avoid sending unnecessary interruptions. With this change, virtio-blk/dataplane only batches notifications if the EVENT_IDX feature is not present. Some numbers obtained with fio (ioengine=sync, iodepth=1, direct=1): - Test specs: * fio-3.4 (ioengine=sync, iodepth=1, direct=1) * qemu master * virtio-blk with a dedicated iothread (default poll-max-ns) * backend: null_blk nr_devices=1 irqmode=2 completion_nsec=280000 * 8 vCPUs pinned to isolated physical cores * Emulator and iothread also pinned to separate isolated cores * variance between runs < 1% - Not patched * numjobs=1: lat_avg=327.32 irqs=29998 * numjobs=4: lat_avg=337.89 irqs=29073 * numjobs=8: lat_avg=342.98 irqs=28643 - Patched: * numjobs=1: lat_avg=323.92 irqs=30262 * numjobs=4: lat_avg=332.65 irqs=29520 * numjobs=8: lat_avg=335.54 irqs=29323 Signed-off-by: Sergio Lopez <slp@redhat.com> Message-id: 20180307114459.26636-1-slp@redhat.com Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2018-03-07 14:44:59 +03:00
bool batch_notifications;
/* Note that these EventNotifiers are assigned by value. This is
* fine as long as you do not call event_notifier_cleanup on them
* (because you don't own the file descriptor or handle; you just
* use it).
*/
IOThread *iothread;
AioContext *ctx;
};
/* Raise an interrupt to signal guest, if necessary */
void virtio_blk_data_plane_notify(VirtIOBlockDataPlane *s, VirtQueue *vq)
{
virtio-blk: dataplane: Don't batch notifications if EVENT_IDX is present Commit 5b2ffbe4d99843fd8305c573a100047a8c962327 ("virtio-blk: dataplane: notify guest as a batch") deferred guest notification to a BH in order batch notifications, with purpose of avoiding flooding the guest with interruptions. This optimization came with a cost. The average latency perceived in the guest is increased by a few microseconds, but also when multiple IO operations finish at the same time, the guest won't be notified until all completions from each operation has been run. On the contrary, virtio-scsi issues the notification at the end of each completion. On the other hand, nowadays we have the EVENT_IDX feature that allows a better coordination between QEMU and the Guest OS to avoid sending unnecessary interruptions. With this change, virtio-blk/dataplane only batches notifications if the EVENT_IDX feature is not present. Some numbers obtained with fio (ioengine=sync, iodepth=1, direct=1): - Test specs: * fio-3.4 (ioengine=sync, iodepth=1, direct=1) * qemu master * virtio-blk with a dedicated iothread (default poll-max-ns) * backend: null_blk nr_devices=1 irqmode=2 completion_nsec=280000 * 8 vCPUs pinned to isolated physical cores * Emulator and iothread also pinned to separate isolated cores * variance between runs < 1% - Not patched * numjobs=1: lat_avg=327.32 irqs=29998 * numjobs=4: lat_avg=337.89 irqs=29073 * numjobs=8: lat_avg=342.98 irqs=28643 - Patched: * numjobs=1: lat_avg=323.92 irqs=30262 * numjobs=4: lat_avg=332.65 irqs=29520 * numjobs=8: lat_avg=335.54 irqs=29323 Signed-off-by: Sergio Lopez <slp@redhat.com> Message-id: 20180307114459.26636-1-slp@redhat.com Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2018-03-07 14:44:59 +03:00
if (s->batch_notifications) {
set_bit(virtio_get_queue_index(vq), s->batch_notify_vqs);
qemu_bh_schedule(s->bh);
} else {
virtio_notify_irqfd(s->vdev, vq);
}
}
static void notify_guest_bh(void *opaque)
{
VirtIOBlockDataPlane *s = opaque;
unsigned nvqs = s->conf->num_queues;
unsigned long bitmap[BITS_TO_LONGS(nvqs)];
unsigned j;
memcpy(bitmap, s->batch_notify_vqs, sizeof(bitmap));
memset(s->batch_notify_vqs, 0, sizeof(bitmap));
for (j = 0; j < nvqs; j += BITS_PER_LONG) {
unsigned long bits = bitmap[j / BITS_PER_LONG];
while (bits != 0) {
unsigned i = j + ctzl(bits);
VirtQueue *vq = virtio_get_queue(s->vdev, i);
virtio: set ISR on dataplane notifications Dataplane has been omitting forever the step of setting ISR when an interrupt is raised. This caused little breakage, because the specification actually says that ISR may not be updated in MSI mode. Some versions of the Windows drivers however didn't clear MSI mode correctly, and proceeded using polling mode (using ISR, not the used ring index!) for crashdump and hibernation. If it were just crashdump and hibernation it would not be a big deal, but recent releases of Windows do not really shut down, but rather log out and hibernate to make the next startup faster. Hence, this manifested as a more serious hang during shutdown with e.g. Windows 8.1 and virtio-win 1.8.0 RPMs. Newer versions fixed this, while older versions do not use MSI at all. The failure has always been there for virtio dataplane, but it became visible after commits 9ffe337 ("virtio-blk: always use dataplane path if ioeventfd is active", 2016-10-30) and ad07cd6 ("virtio-scsi: always use dataplane path if ioeventfd is active", 2016-10-30) made virtio-blk and virtio-scsi always use the dataplane code under KVM. The good news therefore is that it was not a bug in the patches---they were doing exactly what they were meant for, i.e. shake out remaining dataplane bugs. The fix is not hard, so it's worth arranging for the broken drivers. The virtio_should_notify+event_notifier_set pair that is common to virtio-blk and virtio-scsi dataplane is replaced with a new public function virtio_notify_irqfd that also sets ISR. The irqfd emulation code now need not set ISR anymore, so virtio_irq is removed. Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> Tested-by: Farhan Ali <alifm@linux.vnet.ibm.com> Tested-by: Alex Williamson <alex.williamson@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2016-11-18 18:07:02 +03:00
virtio_notify_irqfd(s->vdev, vq);
bits &= bits - 1; /* clear right-most bit */
}
}
}
/* Context: QEMU global mutex held */
bool virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *conf,
VirtIOBlockDataPlane **dataplane,
Error **errp)
{
VirtIOBlockDataPlane *s;
BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
*dataplane = NULL;
if (conf->iothread) {
if (!k->set_guest_notifiers || !k->ioeventfd_assign) {
error_setg(errp,
"device is incompatible with iothread "
"(transport does not support notifiers)");
return false;
}
if (!virtio_device_ioeventfd_enabled(vdev)) {
error_setg(errp, "ioeventfd is required for iothread");
return false;
}
/* If dataplane is (re-)enabled while the guest is running there could
* be block jobs that can conflict.
*/
if (blk_op_is_blocked(conf->conf.blk, BLOCK_OP_TYPE_DATAPLANE, errp)) {
error_prepend(errp, "cannot start virtio-blk dataplane: ");
return false;
}
}
/* Don't try if transport does not support notifiers. */
if (!virtio_device_ioeventfd_enabled(vdev)) {
return false;
}
s = g_new0(VirtIOBlockDataPlane, 1);
s->vdev = vdev;
s->conf = conf;
if (conf->iothread) {
s->iothread = conf->iothread;
object_ref(OBJECT(s->iothread));
s->ctx = iothread_get_aio_context(s->iothread);
} else {
s->ctx = qemu_get_aio_context();
}
s->bh = aio_bh_new(s->ctx, notify_guest_bh, s);
s->batch_notify_vqs = bitmap_new(conf->num_queues);
*dataplane = s;
return true;
}
/* Context: QEMU global mutex held */
void virtio_blk_data_plane_destroy(VirtIOBlockDataPlane *s)
{
VirtIOBlock *vblk;
if (!s) {
return;
}
vblk = VIRTIO_BLK(s->vdev);
assert(!vblk->dataplane_started);
g_free(s->batch_notify_vqs);
qemu_bh_delete(s->bh);
if (s->iothread) {
object_unref(OBJECT(s->iothread));
}
g_free(s);
}
/* Context: QEMU global mutex held */
int virtio_blk_data_plane_start(VirtIODevice *vdev)
{
VirtIOBlock *vblk = VIRTIO_BLK(vdev);
VirtIOBlockDataPlane *s = vblk->dataplane;
BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vblk)));
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
AioContext *old_context;
unsigned i;
unsigned nvqs = s->conf->num_queues;
Error *local_err = NULL;
int r;
if (vblk->dataplane_started || s->starting) {
return 0;
}
s->starting = true;
virtio-blk: dataplane: Don't batch notifications if EVENT_IDX is present Commit 5b2ffbe4d99843fd8305c573a100047a8c962327 ("virtio-blk: dataplane: notify guest as a batch") deferred guest notification to a BH in order batch notifications, with purpose of avoiding flooding the guest with interruptions. This optimization came with a cost. The average latency perceived in the guest is increased by a few microseconds, but also when multiple IO operations finish at the same time, the guest won't be notified until all completions from each operation has been run. On the contrary, virtio-scsi issues the notification at the end of each completion. On the other hand, nowadays we have the EVENT_IDX feature that allows a better coordination between QEMU and the Guest OS to avoid sending unnecessary interruptions. With this change, virtio-blk/dataplane only batches notifications if the EVENT_IDX feature is not present. Some numbers obtained with fio (ioengine=sync, iodepth=1, direct=1): - Test specs: * fio-3.4 (ioengine=sync, iodepth=1, direct=1) * qemu master * virtio-blk with a dedicated iothread (default poll-max-ns) * backend: null_blk nr_devices=1 irqmode=2 completion_nsec=280000 * 8 vCPUs pinned to isolated physical cores * Emulator and iothread also pinned to separate isolated cores * variance between runs < 1% - Not patched * numjobs=1: lat_avg=327.32 irqs=29998 * numjobs=4: lat_avg=337.89 irqs=29073 * numjobs=8: lat_avg=342.98 irqs=28643 - Patched: * numjobs=1: lat_avg=323.92 irqs=30262 * numjobs=4: lat_avg=332.65 irqs=29520 * numjobs=8: lat_avg=335.54 irqs=29323 Signed-off-by: Sergio Lopez <slp@redhat.com> Message-id: 20180307114459.26636-1-slp@redhat.com Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2018-03-07 14:44:59 +03:00
if (!virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
s->batch_notifications = true;
} else {
s->batch_notifications = false;
}
/* Set up guest notifier (irq) */
r = k->set_guest_notifiers(qbus->parent, nvqs, true);
if (r != 0) {
error_report("virtio-blk failed to set guest notifier (%d), "
"ensure -accel kvm is set.", r);
goto fail_guest_notifiers;
}
/*
* Batch all the host notifiers in a single transaction to avoid
* quadratic time complexity in address_space_update_ioeventfds().
*/
memory_region_transaction_begin();
/* Set up virtqueue notify */
for (i = 0; i < nvqs; i++) {
r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, true);
if (r != 0) {
int j = i;
fprintf(stderr, "virtio-blk failed to set host notifier (%d)\n", r);
while (i--) {
virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, false);
}
/*
* The transaction expects the ioeventfds to be open when it
* commits. Do it now, before the cleanup loop.
*/
memory_region_transaction_commit();
while (j--) {
virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), j);
}
goto fail_host_notifiers;
}
}
memory_region_transaction_commit();
virtio-scsi: fix race in virtio_scsi_dataplane_start() As soon as virtio_scsi_data_plane_start() attaches host notifiers the IOThread may start virtqueue processing. There is a race between IOThread virtqueue processing and virtio_scsi_data_plane_start() because it only assigns s->dataplane_started after attaching host notifiers. When a virtqueue handler function in the IOThread calls virtio_scsi_defer_to_dataplane() it may see !s->dataplane_started and attempt to start dataplane even though we're already in the IOThread: #0 0x00007f67b360857c __pthread_kill_implementation (libc.so.6 + 0xa257c) #1 0x00007f67b35bbd56 raise (libc.so.6 + 0x55d56) #2 0x00007f67b358e833 abort (libc.so.6 + 0x28833) #3 0x00007f67b358e75b __assert_fail_base.cold (libc.so.6 + 0x2875b) #4 0x00007f67b35b4cd6 __assert_fail (libc.so.6 + 0x4ecd6) #5 0x000055ca87fd411b memory_region_transaction_commit (qemu-kvm + 0x67511b) #6 0x000055ca87e17811 virtio_pci_ioeventfd_assign (qemu-kvm + 0x4b8811) #7 0x000055ca87e14836 virtio_bus_set_host_notifier (qemu-kvm + 0x4b5836) #8 0x000055ca87f8e14e virtio_scsi_set_host_notifier (qemu-kvm + 0x62f14e) #9 0x000055ca87f8dd62 virtio_scsi_dataplane_start (qemu-kvm + 0x62ed62) #10 0x000055ca87e14610 virtio_bus_start_ioeventfd (qemu-kvm + 0x4b5610) #11 0x000055ca87f8c29a virtio_scsi_handle_ctrl (qemu-kvm + 0x62d29a) #12 0x000055ca87fa5902 virtio_queue_host_notifier_read (qemu-kvm + 0x646902) #13 0x000055ca882c099e aio_dispatch_handler (qemu-kvm + 0x96199e) #14 0x000055ca882c1761 aio_poll (qemu-kvm + 0x962761) #15 0x000055ca880e1052 iothread_run (qemu-kvm + 0x782052) #16 0x000055ca882c562a qemu_thread_start (qemu-kvm + 0x96662a) This patch assigns s->dataplane_started before attaching host notifiers so that virtqueue handler functions that run in the IOThread before virtio_scsi_data_plane_start() returns correctly identify that dataplane does not need to be started. This fix is taken from the virtio-blk dataplane code and it's worth adding a comment in virtio-blk as well to explain why it works. Note that s->dataplane_started does not need the AioContext lock because it is set before attaching host notifiers and cleared after detaching host notifiers. In other words, the IOThread always sees the value true and the main loop thread does not modify it while the IOThread is active. Buglink: https://bugzilla.redhat.com/show_bug.cgi?id=2099541 Reported-by: Qing Wang <qinwang@redhat.com> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Message-Id: <20220808162134.240405-1-stefanha@redhat.com> Reviewed-by: Emanuele Giuseppe Esposito <eesposit@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2022-08-08 19:21:34 +03:00
/*
* These fields are visible to the IOThread so we rely on implicit barriers
* in aio_context_acquire() on the write side and aio_notify_accept() on
* the read side.
*/
s->starting = false;
vblk->dataplane_started = true;
trace_virtio_blk_data_plane_start(s);
old_context = blk_get_aio_context(s->conf->conf.blk);
aio_context_acquire(old_context);
r = blk_set_aio_context(s->conf->conf.blk, s->ctx, &local_err);
aio_context_release(old_context);
if (r < 0) {
error_report_err(local_err);
goto fail_aio_context;
}
/* Kick right away to begin processing requests already in vring */
for (i = 0; i < nvqs; i++) {
VirtQueue *vq = virtio_get_queue(s->vdev, i);
event_notifier_set(virtio_queue_get_host_notifier(vq));
}
/* Get this show started by hooking up our callbacks */
aio_context_acquire(s->ctx);
for (i = 0; i < nvqs; i++) {
VirtQueue *vq = virtio_get_queue(s->vdev, i);
virtio_queue_aio_attach_host_notifier(vq, s->ctx);
}
aio_context_release(s->ctx);
return 0;
fail_aio_context:
memory_region_transaction_begin();
for (i = 0; i < nvqs; i++) {
virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, false);
}
memory_region_transaction_commit();
for (i = 0; i < nvqs; i++) {
virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), i);
}
fail_host_notifiers:
k->set_guest_notifiers(qbus->parent, nvqs, false);
fail_guest_notifiers:
vblk->dataplane_disabled = true;
s->starting = false;
vblk->dataplane_started = true;
return -ENOSYS;
}
/* Stop notifications for new requests from guest.
*
* Context: BH in IOThread
*/
static void virtio_blk_data_plane_stop_bh(void *opaque)
{
VirtIOBlockDataPlane *s = opaque;
unsigned i;
for (i = 0; i < s->conf->num_queues; i++) {
VirtQueue *vq = virtio_get_queue(s->vdev, i);
virtio_queue_aio_detach_host_notifier(vq, s->ctx);
}
}
/* Context: QEMU global mutex held */
void virtio_blk_data_plane_stop(VirtIODevice *vdev)
{
VirtIOBlock *vblk = VIRTIO_BLK(vdev);
VirtIOBlockDataPlane *s = vblk->dataplane;
BusState *qbus = qdev_get_parent_bus(DEVICE(vblk));
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
unsigned i;
unsigned nvqs = s->conf->num_queues;
if (!vblk->dataplane_started || s->stopping) {
return;
}
/* Better luck next time. */
if (vblk->dataplane_disabled) {
vblk->dataplane_disabled = false;
vblk->dataplane_started = false;
return;
}
s->stopping = true;
trace_virtio_blk_data_plane_stop(s);
aio_context_acquire(s->ctx);
aio_wait_bh_oneshot(s->ctx, virtio_blk_data_plane_stop_bh, s);
virtio-blk: simplify virtio_blk_dma_restart_cb() virtio_blk_dma_restart_cb() is tricky because the BH must deal with virtio_blk_data_plane_start()/virtio_blk_data_plane_stop() being called. There are two issues with the code: 1. virtio_blk_realize() should use qdev_add_vm_change_state_handler() instead of qemu_add_vm_change_state_handler(). This ensures the ordering with virtio_init()'s vm change state handler that calls virtio_blk_data_plane_start()/virtio_blk_data_plane_stop() is well-defined. Then blk's AioContext is guaranteed to be up-to-date in virtio_blk_dma_restart_cb() and it's no longer necessary to have a special case for virtio_blk_data_plane_start(). 2. Only blk_drain() waits for virtio_blk_dma_restart_cb()'s blk_inc_in_flight() to be decremented. The bdrv_drain() family of functions do not wait for BlockBackend's in_flight counter to reach zero. virtio_blk_data_plane_stop() relies on blk_set_aio_context()'s implicit drain, but that's a bdrv_drain() and not a blk_drain(). Note that virtio_blk_reset() already correctly relies on blk_drain(). If virtio_blk_data_plane_stop() switches to blk_drain() then we can properly wait for pending virtio_blk_dma_restart_bh() calls. Once these issues are taken care of the code becomes simpler. This change is in preparation for multiple IOThreads in virtio-blk where we need to clean up the multi-threading behavior. I ran the reproducer from commit 49b44549ace7 ("virtio-blk: On restart, process queued requests in the proper context") to check that there is no regression. Cc: Sergio Lopez <slp@redhat.com> Cc: Kevin Wolf <kwolf@redhat.com> Cc: Emanuele Giuseppe Esposito <eesposit@redhat.com> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Acked-by: Michael S. Tsirkin <mst@redhat.com> Reviewed-by: Emanuele Giuseppe Esposito <eesposit@redhat.com> Message-id: 20221102182337.252202-1-stefanha@redhat.com Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2022-11-02 21:23:37 +03:00
/* Wait for virtio_blk_dma_restart_bh() and in flight I/O to complete */
blk_drain(s->conf->conf.blk);
/*
* Try to switch bs back to the QEMU main loop. If other users keep the
* BlockBackend in the iothread, that's ok
*/
blk_set_aio_context(s->conf->conf.blk, qemu_get_aio_context(), NULL);
aio_context_release(s->ctx);
/*
* Batch all the host notifiers in a single transaction to avoid
* quadratic time complexity in address_space_update_ioeventfds().
*/
memory_region_transaction_begin();
for (i = 0; i < nvqs; i++) {
virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, false);
}
/*
* The transaction expects the ioeventfds to be open when it
* commits. Do it now, before the cleanup loop.
*/
memory_region_transaction_commit();
for (i = 0; i < nvqs; i++) {
virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), i);
}
virtio-blk: Cancel the pending BH when the dataplane is reset When 'system_reset' is called, the main loop clear the memory region cache before the BH has a chance to execute. Later when the deferred function is called, some assumptions that were made when scheduling them are no longer true when they actually execute. This is what happens using a virtio-blk device (fresh RHEL7.8 install): $ (sleep 12.3; echo system_reset; sleep 12.3; echo system_reset; sleep 1; echo q) \ | qemu-system-x86_64 -m 4G -smp 8 -boot menu=on \ -device virtio-blk-pci,id=image1,drive=drive_image1 \ -drive file=/var/lib/libvirt/images/rhel78.qcow2,if=none,id=drive_image1,format=qcow2,cache=none \ -device virtio-net-pci,netdev=net0,id=nic0,mac=52:54:00:c4:e7:84 \ -netdev tap,id=net0,script=/bin/true,downscript=/bin/true,vhost=on \ -monitor stdio -serial null -nographic (qemu) system_reset (qemu) system_reset (qemu) qemu-system-x86_64: hw/virtio/virtio.c:225: vring_get_region_caches: Assertion `caches != NULL' failed. Aborted (gdb) bt Thread 1 (Thread 0x7f109c17b680 (LWP 10939)): #0 0x00005604083296d1 in vring_get_region_caches (vq=0x56040a24bdd0) at hw/virtio/virtio.c:227 #1 0x000056040832972b in vring_avail_flags (vq=0x56040a24bdd0) at hw/virtio/virtio.c:235 #2 0x000056040832d13d in virtio_should_notify (vdev=0x56040a240630, vq=0x56040a24bdd0) at hw/virtio/virtio.c:1648 #3 0x000056040832d1f8 in virtio_notify_irqfd (vdev=0x56040a240630, vq=0x56040a24bdd0) at hw/virtio/virtio.c:1662 #4 0x00005604082d213d in notify_guest_bh (opaque=0x56040a243ec0) at hw/block/dataplane/virtio-blk.c:75 #5 0x000056040883dc35 in aio_bh_call (bh=0x56040a243f10) at util/async.c:90 #6 0x000056040883dccd in aio_bh_poll (ctx=0x560409161980) at util/async.c:118 #7 0x0000560408842af7 in aio_dispatch (ctx=0x560409161980) at util/aio-posix.c:460 #8 0x000056040883e068 in aio_ctx_dispatch (source=0x560409161980, callback=0x0, user_data=0x0) at util/async.c:261 #9 0x00007f10a8fca06d in g_main_context_dispatch () at /lib64/libglib-2.0.so.0 #10 0x0000560408841445 in glib_pollfds_poll () at util/main-loop.c:215 #11 0x00005604088414bf in os_host_main_loop_wait (timeout=0) at util/main-loop.c:238 #12 0x00005604088415c4 in main_loop_wait (nonblocking=0) at util/main-loop.c:514 #13 0x0000560408416b1e in main_loop () at vl.c:1923 #14 0x000056040841e0e8 in main (argc=20, argv=0x7ffc2c3f9c58, envp=0x7ffc2c3f9d00) at vl.c:4578 Fix this by cancelling the BH when the virtio dataplane is stopped. [This is version of the patch was modified as discussed with Philippe on the mailing list thread. --Stefan] Reported-by: Yihuang Yu <yihyu@redhat.com> Suggested-by: Stefan Hajnoczi <stefanha@redhat.com> Fixes: https://bugs.launchpad.net/qemu/+bug/1839428 Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com> Message-Id: <20190816171503.24761-1-philmd@redhat.com> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2019-08-16 20:15:03 +03:00
qemu_bh_cancel(s->bh);
notify_guest_bh(s); /* final chance to notify guest */
/* Clean up guest notifier (irq) */
k->set_guest_notifiers(qbus->parent, nvqs, false);
vblk->dataplane_started = false;
s->stopping = false;
}