aio: add I/O handlers to the AioContext interface

With this patch, I/O handlers (including event notifier handlers) can be
attached to a single AioContext.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Paolo Bonzini 2012-09-13 12:28:51 +02:00
parent f627aab1cc
commit a915f4bc97
4 changed files with 101 additions and 48 deletions

52
aio.c
View File

@ -18,15 +18,6 @@
#include "qemu-queue.h"
#include "qemu_socket.h"
/* The list of registered AIO handlers */
static QLIST_HEAD(, AioHandler) aio_handlers;
/* This is a simple lock used to protect the aio_handlers list. Specifically,
* it's used to ensure that no callbacks are removed while we're walking and
* dispatching callbacks.
*/
static int walking_handlers;
struct AioHandler
{
int fd;
@ -38,11 +29,11 @@ struct AioHandler
QLIST_ENTRY(AioHandler) node;
};
static AioHandler *find_aio_handler(int fd)
static AioHandler *find_aio_handler(AioContext *ctx, int fd)
{
AioHandler *node;
QLIST_FOREACH(node, &aio_handlers, node) {
QLIST_FOREACH(node, &ctx->aio_handlers, node) {
if (node->fd == fd)
if (!node->deleted)
return node;
@ -51,7 +42,8 @@ static AioHandler *find_aio_handler(int fd)
return NULL;
}
void qemu_aio_set_fd_handler(int fd,
void aio_set_fd_handler(AioContext *ctx,
int fd,
IOHandler *io_read,
IOHandler *io_write,
AioFlushHandler *io_flush,
@ -59,13 +51,13 @@ void qemu_aio_set_fd_handler(int fd,
{
AioHandler *node;
node = find_aio_handler(fd);
node = find_aio_handler(ctx, fd);
/* Are we deleting the fd handler? */
if (!io_read && !io_write) {
if (node) {
/* If the lock is held, just mark the node as deleted */
if (walking_handlers)
if (ctx->walking_handlers)
node->deleted = 1;
else {
/* Otherwise, delete it for real. We can't just mark it as
@ -81,7 +73,7 @@ void qemu_aio_set_fd_handler(int fd,
/* Alloc and insert if it's not already there */
node = g_malloc0(sizeof(AioHandler));
node->fd = fd;
QLIST_INSERT_HEAD(&aio_handlers, node, node);
QLIST_INSERT_HEAD(&ctx->aio_handlers, node, node);
}
/* Update handler with latest information */
node->io_read = io_read;
@ -89,25 +81,19 @@ void qemu_aio_set_fd_handler(int fd,
node->io_flush = io_flush;
node->opaque = opaque;
}
qemu_set_fd_handler2(fd, NULL, io_read, io_write, opaque);
}
void qemu_aio_set_event_notifier(EventNotifier *notifier,
void aio_set_event_notifier(AioContext *ctx,
EventNotifier *notifier,
EventNotifierHandler *io_read,
AioFlushEventNotifierHandler *io_flush)
{
qemu_aio_set_fd_handler(event_notifier_get_fd(notifier),
aio_set_fd_handler(ctx, event_notifier_get_fd(notifier),
(IOHandler *)io_read, NULL,
(AioFlushHandler *)io_flush, notifier);
}
void qemu_aio_flush(void)
{
while (qemu_aio_wait());
}
bool qemu_aio_wait(void)
bool aio_wait(AioContext *ctx)
{
AioHandler *node;
fd_set rdfds, wrfds;
@ -120,18 +106,18 @@ bool qemu_aio_wait(void)
* Do not call select in this case, because it is possible that the caller
* does not need a complete flush (as is the case for qemu_aio_wait loops).
*/
if (qemu_bh_poll()) {
if (aio_bh_poll(ctx)) {
return true;
}
walking_handlers++;
ctx->walking_handlers++;
FD_ZERO(&rdfds);
FD_ZERO(&wrfds);
/* fill fd sets */
busy = false;
QLIST_FOREACH(node, &aio_handlers, node) {
QLIST_FOREACH(node, &ctx->aio_handlers, node) {
/* If there aren't pending AIO operations, don't invoke callbacks.
* Otherwise, if there are no AIO requests, qemu_aio_wait() would
* wait indefinitely.
@ -152,7 +138,7 @@ bool qemu_aio_wait(void)
}
}
walking_handlers--;
ctx->walking_handlers--;
/* No AIO operations? Get us out of here */
if (!busy) {
@ -166,11 +152,11 @@ bool qemu_aio_wait(void)
if (ret > 0) {
/* we have to walk very carefully in case
* qemu_aio_set_fd_handler is called while we're walking */
node = QLIST_FIRST(&aio_handlers);
node = QLIST_FIRST(&ctx->aio_handlers);
while (node) {
AioHandler *tmp;
walking_handlers++;
ctx->walking_handlers++;
if (!node->deleted &&
FD_ISSET(node->fd, &rdfds) &&
@ -186,9 +172,9 @@ bool qemu_aio_wait(void)
tmp = node;
node = QLIST_NEXT(node, node);
walking_handlers--;
ctx->walking_handlers--;
if (!walking_handlers && tmp->deleted) {
if (!ctx->walking_handlers && tmp->deleted) {
QLIST_REMOVE(tmp, node);
g_free(tmp);
}

View File

@ -136,7 +136,13 @@ void aio_bh_update_timeout(AioContext *ctx, uint32_t *timeout)
}
}
AioContext *aio_context_new(void)
{
return g_new0(AioContext, 1);
}
void aio_flush(AioContext *ctx)
{
while (aio_wait(ctx));
}

View File

@ -526,3 +526,36 @@ int qemu_bh_poll(void)
{
return aio_bh_poll(qemu_aio_context);
}
void qemu_aio_flush(void)
{
aio_flush(qemu_aio_context);
}
bool qemu_aio_wait(void)
{
return aio_wait(qemu_aio_context);
}
void qemu_aio_set_fd_handler(int fd,
IOHandler *io_read,
IOHandler *io_write,
AioFlushHandler *io_flush,
void *opaque)
{
aio_set_fd_handler(qemu_aio_context, fd, io_read, io_write, io_flush,
opaque);
qemu_set_fd_handler2(fd, NULL, io_read, io_write, opaque);
}
#ifdef CONFIG_POSIX
void qemu_aio_set_event_notifier(EventNotifier *notifier,
EventNotifierHandler *io_read,
AioFlushEventNotifierHandler *io_flush)
{
qemu_aio_set_fd_handler(event_notifier_get_fd(notifier),
(IOHandler *)io_read, NULL,
(AioFlushHandler *)io_flush, notifier);
}
#endif

View File

@ -15,6 +15,7 @@
#define QEMU_AIO_H
#include "qemu-common.h"
#include "qemu-queue.h"
#include "event_notifier.h"
typedef struct BlockDriverAIOCB BlockDriverAIOCB;
@ -43,6 +44,15 @@ typedef void QEMUBHFunc(void *opaque);
typedef void IOHandler(void *opaque);
typedef struct AioContext {
/* The list of registered AIO handlers */
QLIST_HEAD(, AioHandler) aio_handlers;
/* This is a simple lock used to protect the aio_handlers list.
* Specifically, it's used to ensure that no callbacks are removed while
* we're walking and dispatching callbacks.
*/
int walking_handlers;
/* Anchor of the list of Bottom Halves belonging to the context */
struct QEMUBH *first_bh;
@ -121,7 +131,7 @@ void qemu_bh_delete(QEMUBH *bh);
/* Flush any pending AIO operation. This function will block until all
* outstanding AIO operations have been completed or cancelled. */
void qemu_aio_flush(void);
void aio_flush(AioContext *ctx);
/* Wait for a single AIO completion to occur. This function will wait
* until a single AIO event has completed and it will ensure something
@ -129,7 +139,7 @@ void qemu_aio_flush(void);
* result of executing I/O completion or bh callbacks.
*
* Return whether there is still any pending AIO operation. */
bool qemu_aio_wait(void);
bool aio_wait(AioContext *ctx);
#ifdef CONFIG_POSIX
/* Returns 1 if there are still outstanding AIO requests; 0 otherwise */
@ -142,7 +152,8 @@ typedef int (AioFlushHandler)(void *opaque);
* Code that invokes AIO completion functions should rely on this function
* instead of qemu_set_fd_handler[2].
*/
void qemu_aio_set_fd_handler(int fd,
void aio_set_fd_handler(AioContext *ctx,
int fd,
IOHandler *io_read,
IOHandler *io_write,
AioFlushHandler *io_flush,
@ -156,8 +167,25 @@ void qemu_aio_set_fd_handler(int fd,
* Code that invokes AIO completion functions should rely on this function
* instead of event_notifier_set_handler.
*/
void aio_set_event_notifier(AioContext *ctx,
EventNotifier *notifier,
EventNotifierHandler *io_read,
AioFlushEventNotifierHandler *io_flush);
/* Functions to operate on the main QEMU AioContext. */
void qemu_aio_flush(void);
bool qemu_aio_wait(void);
void qemu_aio_set_event_notifier(EventNotifier *notifier,
EventNotifierHandler *io_read,
AioFlushEventNotifierHandler *io_flush);
#ifdef CONFIG_POSIX
void qemu_aio_set_fd_handler(int fd,
IOHandler *io_read,
IOHandler *io_write,
AioFlushHandler *io_flush,
void *opaque);
#endif
#endif