3edf660a91
There is no need for the AioContext lock in aio_wait_bh_oneshot(). It's easy to remove the lock from existing callers and then switch from AIO_WAIT_WHILE() to AIO_WAIT_WHILE_UNLOCKED() in aio_wait_bh_oneshot(). Document that the AioContext lock should not be held across aio_wait_bh_oneshot(). Holding a lock across aio_poll() can cause deadlock so we don't want callers to do that. This is a step towards getting rid of the AioContext lock. Cc: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Message-Id: <20230404153307.458883-1-stefanha@redhat.com> Reviewed-by: Paolo Bonzini <pbonzini@redhat.com> Reviewed-by: Emanuele Giuseppe Esposito <eesposit@redhat.com> Reviewed-by: Kevin Wolf <kwolf@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
229 lines
6.5 KiB
C
229 lines
6.5 KiB
C
/*
|
|
* Virtio SCSI dataplane
|
|
*
|
|
* Copyright Red Hat, Inc. 2014
|
|
*
|
|
* Authors:
|
|
* Fam Zheng <famz@redhat.com>
|
|
*
|
|
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
|
* See the COPYING file in the top-level directory.
|
|
*
|
|
*/
|
|
|
|
#include "qemu/osdep.h"
|
|
#include "qapi/error.h"
|
|
#include "hw/virtio/virtio-scsi.h"
|
|
#include "qemu/error-report.h"
|
|
#include "sysemu/block-backend.h"
|
|
#include "hw/scsi/scsi.h"
|
|
#include "scsi/constants.h"
|
|
#include "hw/virtio/virtio-bus.h"
|
|
#include "hw/virtio/virtio-access.h"
|
|
|
|
/* Context: QEMU global mutex held */
|
|
void virtio_scsi_dataplane_setup(VirtIOSCSI *s, Error **errp)
|
|
{
|
|
VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s);
|
|
VirtIODevice *vdev = VIRTIO_DEVICE(s);
|
|
BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
|
|
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
|
|
|
|
if (vs->conf.iothread) {
|
|
if (!k->set_guest_notifiers || !k->ioeventfd_assign) {
|
|
error_setg(errp,
|
|
"device is incompatible with iothread "
|
|
"(transport does not support notifiers)");
|
|
return;
|
|
}
|
|
if (!virtio_device_ioeventfd_enabled(vdev)) {
|
|
error_setg(errp, "ioeventfd is required for iothread");
|
|
return;
|
|
}
|
|
s->ctx = iothread_get_aio_context(vs->conf.iothread);
|
|
} else {
|
|
if (!virtio_device_ioeventfd_enabled(vdev)) {
|
|
return;
|
|
}
|
|
s->ctx = qemu_get_aio_context();
|
|
}
|
|
}
|
|
|
|
static int virtio_scsi_set_host_notifier(VirtIOSCSI *s, VirtQueue *vq, int n)
|
|
{
|
|
BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s)));
|
|
int rc;
|
|
|
|
/* Set up virtqueue notify */
|
|
rc = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), n, true);
|
|
if (rc != 0) {
|
|
fprintf(stderr, "virtio-scsi: Failed to set host notifier (%d)\n",
|
|
rc);
|
|
s->dataplane_fenced = true;
|
|
return rc;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
/* Context: BH in IOThread */
|
|
static void virtio_scsi_dataplane_stop_bh(void *opaque)
|
|
{
|
|
VirtIOSCSI *s = opaque;
|
|
VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s);
|
|
int i;
|
|
|
|
virtio_queue_aio_detach_host_notifier(vs->ctrl_vq, s->ctx);
|
|
virtio_queue_aio_detach_host_notifier(vs->event_vq, s->ctx);
|
|
for (i = 0; i < vs->conf.num_queues; i++) {
|
|
virtio_queue_aio_detach_host_notifier(vs->cmd_vqs[i], s->ctx);
|
|
}
|
|
}
|
|
|
|
/* Context: QEMU global mutex held */
|
|
int virtio_scsi_dataplane_start(VirtIODevice *vdev)
|
|
{
|
|
int i;
|
|
int rc;
|
|
int vq_init_count = 0;
|
|
BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
|
|
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
|
|
VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(vdev);
|
|
VirtIOSCSI *s = VIRTIO_SCSI(vdev);
|
|
|
|
if (s->dataplane_started ||
|
|
s->dataplane_starting ||
|
|
s->dataplane_fenced) {
|
|
return 0;
|
|
}
|
|
|
|
s->dataplane_starting = true;
|
|
|
|
/* Set up guest notifier (irq) */
|
|
rc = k->set_guest_notifiers(qbus->parent, vs->conf.num_queues + 2, true);
|
|
if (rc != 0) {
|
|
error_report("virtio-scsi: Failed to set guest notifiers (%d), "
|
|
"ensure -accel kvm is set.", rc);
|
|
goto fail_guest_notifiers;
|
|
}
|
|
|
|
/*
|
|
* Batch all the host notifiers in a single transaction to avoid
|
|
* quadratic time complexity in address_space_update_ioeventfds().
|
|
*/
|
|
memory_region_transaction_begin();
|
|
|
|
rc = virtio_scsi_set_host_notifier(s, vs->ctrl_vq, 0);
|
|
if (rc != 0) {
|
|
goto fail_host_notifiers;
|
|
}
|
|
|
|
vq_init_count++;
|
|
rc = virtio_scsi_set_host_notifier(s, vs->event_vq, 1);
|
|
if (rc != 0) {
|
|
goto fail_host_notifiers;
|
|
}
|
|
|
|
vq_init_count++;
|
|
|
|
for (i = 0; i < vs->conf.num_queues; i++) {
|
|
rc = virtio_scsi_set_host_notifier(s, vs->cmd_vqs[i], i + 2);
|
|
if (rc) {
|
|
goto fail_host_notifiers;
|
|
}
|
|
vq_init_count++;
|
|
}
|
|
|
|
memory_region_transaction_commit();
|
|
|
|
/*
|
|
* These fields are visible to the IOThread so we rely on implicit barriers
|
|
* in aio_context_acquire() on the write side and aio_notify_accept() on
|
|
* the read side.
|
|
*/
|
|
s->dataplane_starting = false;
|
|
s->dataplane_started = true;
|
|
|
|
aio_context_acquire(s->ctx);
|
|
virtio_queue_aio_attach_host_notifier(vs->ctrl_vq, s->ctx);
|
|
virtio_queue_aio_attach_host_notifier_no_poll(vs->event_vq, s->ctx);
|
|
|
|
for (i = 0; i < vs->conf.num_queues; i++) {
|
|
virtio_queue_aio_attach_host_notifier(vs->cmd_vqs[i], s->ctx);
|
|
}
|
|
aio_context_release(s->ctx);
|
|
return 0;
|
|
|
|
fail_host_notifiers:
|
|
for (i = 0; i < vq_init_count; i++) {
|
|
virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, false);
|
|
}
|
|
|
|
/*
|
|
* The transaction expects the ioeventfds to be open when it
|
|
* commits. Do it now, before the cleanup loop.
|
|
*/
|
|
memory_region_transaction_commit();
|
|
|
|
for (i = 0; i < vq_init_count; i++) {
|
|
virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), i);
|
|
}
|
|
k->set_guest_notifiers(qbus->parent, vs->conf.num_queues + 2, false);
|
|
fail_guest_notifiers:
|
|
s->dataplane_fenced = true;
|
|
s->dataplane_starting = false;
|
|
s->dataplane_started = true;
|
|
return -ENOSYS;
|
|
}
|
|
|
|
/* Context: QEMU global mutex held */
|
|
void virtio_scsi_dataplane_stop(VirtIODevice *vdev)
|
|
{
|
|
BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
|
|
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
|
|
VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(vdev);
|
|
VirtIOSCSI *s = VIRTIO_SCSI(vdev);
|
|
int i;
|
|
|
|
if (!s->dataplane_started || s->dataplane_stopping) {
|
|
return;
|
|
}
|
|
|
|
/* Better luck next time. */
|
|
if (s->dataplane_fenced) {
|
|
s->dataplane_fenced = false;
|
|
s->dataplane_started = false;
|
|
return;
|
|
}
|
|
s->dataplane_stopping = true;
|
|
|
|
aio_wait_bh_oneshot(s->ctx, virtio_scsi_dataplane_stop_bh, s);
|
|
|
|
blk_drain_all(); /* ensure there are no in-flight requests */
|
|
|
|
/*
|
|
* Batch all the host notifiers in a single transaction to avoid
|
|
* quadratic time complexity in address_space_update_ioeventfds().
|
|
*/
|
|
memory_region_transaction_begin();
|
|
|
|
for (i = 0; i < vs->conf.num_queues + 2; i++) {
|
|
virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, false);
|
|
}
|
|
|
|
/*
|
|
* The transaction expects the ioeventfds to be open when it
|
|
* commits. Do it now, before the cleanup loop.
|
|
*/
|
|
memory_region_transaction_commit();
|
|
|
|
for (i = 0; i < vs->conf.num_queues + 2; i++) {
|
|
virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), i);
|
|
}
|
|
|
|
/* Clean up guest notifier (irq) */
|
|
k->set_guest_notifiers(qbus->parent, vs->conf.num_queues + 2, false);
|
|
s->dataplane_stopping = false;
|
|
s->dataplane_started = false;
|
|
}
|