aio: Introduce aio-epoll.c

To minimize code duplication, epoll is hooked into aio-posix's
aio_poll() instead of rolling its own. This approach also has both
compile-time and run-time switchability.

1) When QEMU starts with a small number of fds in the event loop, ppoll
is used.

2) When QEMU starts with a big number of fds, or when more devices are
hot plugged, epoll kicks in when the number of fds hits the threshold.

3) Some fds may not support epoll, such as tty based stdio. In this
case, it falls back to ppoll.

A rough benchmark with scsi-disk on virtio-scsi dataplane (epoll gets
enabled from 64 onward). Numbers are in MB/s.

===============================================
             |     master     |     epoll
             |                |
scsi disks # | read    randrw | read    randrw
-------------|----------------|----------------
1            | 86      36     | 92      45
8            | 87      43     | 86      41
64           | 71      32     | 70      38
128          | 48      24     | 58      31
256          | 37      19     | 57      28
===============================================

To comply with aio_{disable,enable}_external, we always use ppoll when
aio_external_disabled() is true.

[Removed #ifdef CONFIG_EPOLL around AioContext epollfd field declaration
since the field is also referenced outside CONFIG_EPOLL code.
--Stefan]

Signed-off-by: Fam Zheng <famz@redhat.com>
Message-id: 1446177989-6702-4-git-send-email-famz@redhat.com
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
Fam Zheng 2015-10-30 12:06:29 +08:00 committed by Stefan Hajnoczi
parent 37fcee5d11
commit fbe3fc5cb3
2 changed files with 188 additions and 1 deletions

View File

@ -17,6 +17,9 @@
#include "block/block.h" #include "block/block.h"
#include "qemu/queue.h" #include "qemu/queue.h"
#include "qemu/sockets.h" #include "qemu/sockets.h"
#ifdef CONFIG_EPOLL
#include <sys/epoll.h>
#endif
struct AioHandler struct AioHandler
{ {
@ -29,6 +32,162 @@ struct AioHandler
QLIST_ENTRY(AioHandler) node; QLIST_ENTRY(AioHandler) node;
}; };
#ifdef CONFIG_EPOLL
/* The fd number threashold to switch to epoll */
#define EPOLL_ENABLE_THRESHOLD 64
static void aio_epoll_disable(AioContext *ctx)
{
ctx->epoll_available = false;
if (!ctx->epoll_enabled) {
return;
}
ctx->epoll_enabled = false;
close(ctx->epollfd);
}
static inline int epoll_events_from_pfd(int pfd_events)
{
return (pfd_events & G_IO_IN ? EPOLLIN : 0) |
(pfd_events & G_IO_OUT ? EPOLLOUT : 0) |
(pfd_events & G_IO_HUP ? EPOLLHUP : 0) |
(pfd_events & G_IO_ERR ? EPOLLERR : 0);
}
static bool aio_epoll_try_enable(AioContext *ctx)
{
AioHandler *node;
struct epoll_event event;
QLIST_FOREACH(node, &ctx->aio_handlers, node) {
int r;
if (node->deleted || !node->pfd.events) {
continue;
}
event.events = epoll_events_from_pfd(node->pfd.events);
event.data.ptr = node;
r = epoll_ctl(ctx->epollfd, EPOLL_CTL_ADD, node->pfd.fd, &event);
if (r) {
return false;
}
}
ctx->epoll_enabled = true;
return true;
}
static void aio_epoll_update(AioContext *ctx, AioHandler *node, bool is_new)
{
struct epoll_event event;
int r;
if (!ctx->epoll_enabled) {
return;
}
if (!node->pfd.events) {
r = epoll_ctl(ctx->epollfd, EPOLL_CTL_DEL, node->pfd.fd, &event);
if (r) {
aio_epoll_disable(ctx);
}
} else {
event.data.ptr = node;
event.events = epoll_events_from_pfd(node->pfd.events);
if (is_new) {
r = epoll_ctl(ctx->epollfd, EPOLL_CTL_ADD, node->pfd.fd, &event);
if (r) {
aio_epoll_disable(ctx);
}
} else {
r = epoll_ctl(ctx->epollfd, EPOLL_CTL_MOD, node->pfd.fd, &event);
if (r) {
aio_epoll_disable(ctx);
}
}
}
}
static int aio_epoll(AioContext *ctx, GPollFD *pfds,
unsigned npfd, int64_t timeout)
{
AioHandler *node;
int i, ret = 0;
struct epoll_event events[128];
assert(npfd == 1);
assert(pfds[0].fd == ctx->epollfd);
if (timeout > 0) {
ret = qemu_poll_ns(pfds, npfd, timeout);
}
if (timeout <= 0 || ret > 0) {
ret = epoll_wait(ctx->epollfd, events,
sizeof(events) / sizeof(events[0]),
timeout);
if (ret <= 0) {
goto out;
}
for (i = 0; i < ret; i++) {
int ev = events[i].events;
node = events[i].data.ptr;
node->pfd.revents = (ev & EPOLLIN ? G_IO_IN : 0) |
(ev & EPOLLOUT ? G_IO_OUT : 0) |
(ev & EPOLLHUP ? G_IO_HUP : 0) |
(ev & EPOLLERR ? G_IO_ERR : 0);
}
}
out:
return ret;
}
static bool aio_epoll_enabled(AioContext *ctx)
{
/* Fall back to ppoll when external clients are disabled. */
return !aio_external_disabled(ctx) && ctx->epoll_enabled;
}
static bool aio_epoll_check_poll(AioContext *ctx, GPollFD *pfds,
unsigned npfd, int64_t timeout)
{
if (!ctx->epoll_available) {
return false;
}
if (aio_epoll_enabled(ctx)) {
return true;
}
if (npfd >= EPOLL_ENABLE_THRESHOLD) {
if (aio_epoll_try_enable(ctx)) {
return true;
} else {
aio_epoll_disable(ctx);
}
}
return false;
}
#else
static void aio_epoll_update(AioContext *ctx, AioHandler *node, bool is_new)
{
}
static int aio_epoll(AioContext *ctx, GPollFD *pfds,
unsigned npfd, int64_t timeout)
{
assert(false);
}
static bool aio_epoll_enabled(AioContext *ctx)
{
return false;
}
static bool aio_epoll_check_poll(AioContext *ctx, GPollFD *pfds,
unsigned npfd, int64_t timeout)
{
return false;
}
#endif
static AioHandler *find_aio_handler(AioContext *ctx, int fd) static AioHandler *find_aio_handler(AioContext *ctx, int fd)
{ {
AioHandler *node; AioHandler *node;
@ -50,6 +209,7 @@ void aio_set_fd_handler(AioContext *ctx,
void *opaque) void *opaque)
{ {
AioHandler *node; AioHandler *node;
bool is_new = false;
node = find_aio_handler(ctx, fd); node = find_aio_handler(ctx, fd);
@ -79,6 +239,7 @@ void aio_set_fd_handler(AioContext *ctx,
QLIST_INSERT_HEAD(&ctx->aio_handlers, node, node); QLIST_INSERT_HEAD(&ctx->aio_handlers, node, node);
g_source_add_poll(&ctx->source, &node->pfd); g_source_add_poll(&ctx->source, &node->pfd);
is_new = true;
} }
/* Update handler with latest information */ /* Update handler with latest information */
node->io_read = io_read; node->io_read = io_read;
@ -90,6 +251,7 @@ void aio_set_fd_handler(AioContext *ctx,
node->pfd.events |= (io_write ? G_IO_OUT | G_IO_ERR : 0); node->pfd.events |= (io_write ? G_IO_OUT | G_IO_ERR : 0);
} }
aio_epoll_update(ctx, node, is_new);
aio_notify(ctx); aio_notify(ctx);
} }
@ -262,6 +424,7 @@ bool aio_poll(AioContext *ctx, bool blocking)
/* fill pollfds */ /* fill pollfds */
QLIST_FOREACH(node, &ctx->aio_handlers, node) { QLIST_FOREACH(node, &ctx->aio_handlers, node) {
if (!node->deleted && node->pfd.events if (!node->deleted && node->pfd.events
&& !aio_epoll_enabled(ctx)
&& aio_node_check(ctx, node->is_external)) { && aio_node_check(ctx, node->is_external)) {
add_pollfd(node); add_pollfd(node);
} }
@ -273,7 +436,17 @@ bool aio_poll(AioContext *ctx, bool blocking)
if (timeout) { if (timeout) {
aio_context_release(ctx); aio_context_release(ctx);
} }
ret = qemu_poll_ns((GPollFD *)pollfds, npfd, timeout); if (aio_epoll_check_poll(ctx, pollfds, npfd, timeout)) {
AioHandler epoll_handler;
epoll_handler.pfd.fd = ctx->epollfd;
epoll_handler.pfd.events = G_IO_IN | G_IO_OUT | G_IO_HUP | G_IO_ERR;
npfd = 0;
add_pollfd(&epoll_handler);
ret = aio_epoll(ctx, pollfds, npfd, timeout);
} else {
ret = qemu_poll_ns(pollfds, npfd, timeout);
}
if (blocking) { if (blocking) {
atomic_sub(&ctx->notify_me, 2); atomic_sub(&ctx->notify_me, 2);
} }
@ -305,4 +478,13 @@ bool aio_poll(AioContext *ctx, bool blocking)
void aio_context_setup(AioContext *ctx, Error **errp) void aio_context_setup(AioContext *ctx, Error **errp)
{ {
#ifdef CONFIG_EPOLL
assert(!ctx->epollfd);
ctx->epollfd = epoll_create1(EPOLL_CLOEXEC);
if (ctx->epollfd == -1) {
ctx->epoll_available = false;
} else {
ctx->epoll_available = true;
}
#endif
} }

View File

@ -124,6 +124,11 @@ struct AioContext {
QEMUTimerListGroup tlg; QEMUTimerListGroup tlg;
int external_disable_cnt; int external_disable_cnt;
/* epoll(7) state used when built with CONFIG_EPOLL */
int epollfd;
bool epoll_enabled;
bool epoll_available;
}; };
/** /**