virtio: use defer_call() in virtio_irqfd_notify()

virtio-blk and virtio-scsi invoke virtio_irqfd_notify() to send Used
Buffer Notifications from an IOThread. This involves an eventfd
write(2) syscall. Calling this repeatedly when completing multiple I/O
requests in a row is wasteful.

Use the defer_call() API to batch together virtio_irqfd_notify() calls
made during thread pool (aio=threads), Linux AIO (aio=native), and
io_uring (aio=io_uring) completion processing.

Behavior is unchanged for emulated devices that do not use
defer_call_begin()/defer_call_end() since defer_call() immediately
invokes the callback when called outside a
defer_call_begin()/defer_call_end() region.

fio rw=randread bs=4k iodepth=64 numjobs=8 IOPS increases by ~9% with a
single IOThread and 8 vCPUs. iodepth=1 decreases by ~1% but this could
be noise. Detailed performance data and configuration specifics are
available here:
https://gitlab.com/stefanha/virt-playbooks/-/tree/blk_io_plug-irqfd

This duplicates the BH that virtio-blk uses for batching. The next
commit will remove it.

Reviewed-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Message-ID: <20230913200045.1024233-4-stefanha@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
This commit is contained in:
Stefan Hajnoczi 2023-09-13 16:00:44 -04:00 committed by Kevin Wolf
parent 433fcea40c
commit 84d61e5f36
5 changed files with 28 additions and 1 deletions

View File

@ -125,6 +125,9 @@ static void luring_process_completions(LuringState *s)
{ {
struct io_uring_cqe *cqes; struct io_uring_cqe *cqes;
int total_bytes; int total_bytes;
defer_call_begin();
/* /*
* Request completion callbacks can run the nested event loop. * Request completion callbacks can run the nested event loop.
* Schedule ourselves so the nested event loop will "see" remaining * Schedule ourselves so the nested event loop will "see" remaining
@ -217,7 +220,10 @@ end:
aio_co_wake(luringcb->co); aio_co_wake(luringcb->co);
} }
} }
qemu_bh_cancel(s->completion_bh); qemu_bh_cancel(s->completion_bh);
defer_call_end();
} }
static int ioq_submit(LuringState *s) static int ioq_submit(LuringState *s)

View File

@ -205,6 +205,8 @@ static void qemu_laio_process_completions(LinuxAioState *s)
{ {
struct io_event *events; struct io_event *events;
defer_call_begin();
/* Reschedule so nested event loops see currently pending completions */ /* Reschedule so nested event loops see currently pending completions */
qemu_bh_schedule(s->completion_bh); qemu_bh_schedule(s->completion_bh);
@ -231,6 +233,8 @@ static void qemu_laio_process_completions(LinuxAioState *s)
* own `for` loop. If we are the last all counters dropped to zero. */ * own `for` loop. If we are the last all counters dropped to zero. */
s->event_max = 0; s->event_max = 0;
s->event_idx = 0; s->event_idx = 0;
defer_call_end();
} }
static void qemu_laio_process_completions_and_submit(LinuxAioState *s) static void qemu_laio_process_completions_and_submit(LinuxAioState *s)

View File

@ -73,6 +73,7 @@ virtqueue_fill(void *vq, const void *elem, unsigned int len, unsigned int idx) "
virtqueue_flush(void *vq, unsigned int count) "vq %p count %u" virtqueue_flush(void *vq, unsigned int count) "vq %p count %u"
virtqueue_pop(void *vq, void *elem, unsigned int in_num, unsigned int out_num) "vq %p elem %p in_num %u out_num %u" virtqueue_pop(void *vq, void *elem, unsigned int in_num, unsigned int out_num) "vq %p elem %p in_num %u out_num %u"
virtio_queue_notify(void *vdev, int n, void *vq) "vdev %p n %d vq %p" virtio_queue_notify(void *vdev, int n, void *vq) "vdev %p n %d vq %p"
virtio_notify_irqfd_deferred_fn(void *vdev, void *vq) "vdev %p vq %p"
virtio_notify_irqfd(void *vdev, void *vq) "vdev %p vq %p" virtio_notify_irqfd(void *vdev, void *vq) "vdev %p vq %p"
virtio_notify(void *vdev, void *vq) "vdev %p vq %p" virtio_notify(void *vdev, void *vq) "vdev %p vq %p"
virtio_set_status(void *vdev, uint8_t val) "vdev %p val %u" virtio_set_status(void *vdev, uint8_t val) "vdev %p val %u"

View File

@ -15,6 +15,7 @@
#include "qapi/error.h" #include "qapi/error.h"
#include "qapi/qapi-commands-virtio.h" #include "qapi/qapi-commands-virtio.h"
#include "trace.h" #include "trace.h"
#include "qemu/defer-call.h"
#include "qemu/error-report.h" #include "qemu/error-report.h"
#include "qemu/log.h" #include "qemu/log.h"
#include "qemu/main-loop.h" #include "qemu/main-loop.h"
@ -2445,6 +2446,16 @@ static bool virtio_should_notify(VirtIODevice *vdev, VirtQueue *vq)
} }
} }
/* Batch irqs while inside a defer_call_begin()/defer_call_end() section */
static void virtio_notify_irqfd_deferred_fn(void *opaque)
{
EventNotifier *notifier = opaque;
VirtQueue *vq = container_of(notifier, VirtQueue, guest_notifier);
trace_virtio_notify_irqfd_deferred_fn(vq->vdev, vq);
event_notifier_set(notifier);
}
void virtio_notify_irqfd(VirtIODevice *vdev, VirtQueue *vq) void virtio_notify_irqfd(VirtIODevice *vdev, VirtQueue *vq)
{ {
WITH_RCU_READ_LOCK_GUARD() { WITH_RCU_READ_LOCK_GUARD() {
@ -2471,7 +2482,7 @@ void virtio_notify_irqfd(VirtIODevice *vdev, VirtQueue *vq)
* to an atomic operation. * to an atomic operation.
*/ */
virtio_set_isr(vq->vdev, 0x1); virtio_set_isr(vq->vdev, 0x1);
event_notifier_set(&vq->guest_notifier); defer_call(virtio_notify_irqfd_deferred_fn, &vq->guest_notifier);
} }
static void virtio_irq(VirtQueue *vq) static void virtio_irq(VirtQueue *vq)

View File

@ -15,6 +15,7 @@
* GNU GPL, version 2 or (at your option) any later version. * GNU GPL, version 2 or (at your option) any later version.
*/ */
#include "qemu/osdep.h" #include "qemu/osdep.h"
#include "qemu/defer-call.h"
#include "qemu/queue.h" #include "qemu/queue.h"
#include "qemu/thread.h" #include "qemu/thread.h"
#include "qemu/coroutine.h" #include "qemu/coroutine.h"
@ -175,6 +176,8 @@ static void thread_pool_completion_bh(void *opaque)
ThreadPool *pool = opaque; ThreadPool *pool = opaque;
ThreadPoolElement *elem, *next; ThreadPoolElement *elem, *next;
defer_call_begin(); /* cb() may use defer_call() to coalesce work */
restart: restart:
QLIST_FOREACH_SAFE(elem, &pool->head, all, next) { QLIST_FOREACH_SAFE(elem, &pool->head, all, next) {
if (elem->state != THREAD_DONE) { if (elem->state != THREAD_DONE) {
@ -208,6 +211,8 @@ restart:
qemu_aio_unref(elem); qemu_aio_unref(elem);
} }
} }
defer_call_end();
} }
static void thread_pool_cancel(BlockAIOCB *acb) static void thread_pool_cancel(BlockAIOCB *acb)