gluster: Convert aio routines into coroutines

Convert the read, write, flush and discard implementations from aio-based
ones to coroutine based ones.

Signed-off-by: Bharata B Rao <bharata@linux.vnet.ibm.com>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
This commit is contained in:
Bharata B Rao 2013-12-21 14:51:24 +05:30 committed by Kevin Wolf
parent 92397116a6
commit 15744b0b8f

View File

@ -21,19 +21,15 @@
#include "qemu/uri.h" #include "qemu/uri.h"
typedef struct GlusterAIOCB { typedef struct GlusterAIOCB {
BlockDriverAIOCB common;
int64_t size; int64_t size;
int ret; int ret;
bool *finished;
QEMUBH *bh; QEMUBH *bh;
Coroutine *coroutine;
} GlusterAIOCB; } GlusterAIOCB;
typedef struct BDRVGlusterState { typedef struct BDRVGlusterState {
struct glfs *glfs; struct glfs *glfs;
int fds[2];
struct glfs_fd *fd; struct glfs_fd *fd;
int event_reader_pos;
GlusterAIOCB *event_acb;
} BDRVGlusterState; } BDRVGlusterState;
#define GLUSTER_FD_READ 0 #define GLUSTER_FD_READ 0
@ -231,46 +227,13 @@ out:
return NULL; return NULL;
} }
static void qemu_gluster_complete_aio(GlusterAIOCB *acb, BDRVGlusterState *s) static void qemu_gluster_complete_aio(void *opaque)
{ {
int ret; GlusterAIOCB *acb = (GlusterAIOCB *)opaque;
bool *finished = acb->finished;
BlockDriverCompletionFunc *cb = acb->common.cb;
void *opaque = acb->common.opaque;
if (!acb->ret || acb->ret == acb->size) { qemu_bh_delete(acb->bh);
ret = 0; /* Success */ acb->bh = NULL;
} else if (acb->ret < 0) { qemu_coroutine_enter(acb->coroutine, NULL);
ret = acb->ret; /* Read/Write failed */
} else {
ret = -EIO; /* Partial read/write - fail it */
}
qemu_aio_release(acb);
cb(opaque, ret);
if (finished) {
*finished = true;
}
}
static void qemu_gluster_aio_event_reader(void *opaque)
{
BDRVGlusterState *s = opaque;
ssize_t ret;
do {
char *p = (char *)&s->event_acb;
ret = read(s->fds[GLUSTER_FD_READ], p + s->event_reader_pos,
sizeof(s->event_acb) - s->event_reader_pos);
if (ret > 0) {
s->event_reader_pos += ret;
if (s->event_reader_pos == sizeof(s->event_acb)) {
s->event_reader_pos = 0;
qemu_gluster_complete_aio(s->event_acb, s);
}
}
} while (ret < 0 && errno == EINTR);
} }
/* TODO Convert to fine grained options */ /* TODO Convert to fine grained options */
@ -309,7 +272,6 @@ static int qemu_gluster_open(BlockDriverState *bs, QDict *options,
filename = qemu_opt_get(opts, "filename"); filename = qemu_opt_get(opts, "filename");
s->glfs = qemu_gluster_init(gconf, filename); s->glfs = qemu_gluster_init(gconf, filename);
if (!s->glfs) { if (!s->glfs) {
ret = -errno; ret = -errno;
@ -329,18 +291,8 @@ static int qemu_gluster_open(BlockDriverState *bs, QDict *options,
s->fd = glfs_open(s->glfs, gconf->image, open_flags); s->fd = glfs_open(s->glfs, gconf->image, open_flags);
if (!s->fd) { if (!s->fd) {
ret = -errno; ret = -errno;
goto out;
} }
ret = qemu_pipe(s->fds);
if (ret < 0) {
ret = -errno;
goto out;
}
fcntl(s->fds[GLUSTER_FD_READ], F_SETFL, O_NONBLOCK);
qemu_aio_set_fd_handler(s->fds[GLUSTER_FD_READ],
qemu_gluster_aio_event_reader, NULL, s);
out: out:
qemu_opts_del(opts); qemu_opts_del(opts);
qemu_gluster_gconf_free(gconf); qemu_gluster_gconf_free(gconf);
@ -398,58 +350,37 @@ out:
return ret; return ret;
} }
static void qemu_gluster_aio_cancel(BlockDriverAIOCB *blockacb) /*
{ * AIO callback routine called from GlusterFS thread.
GlusterAIOCB *acb = (GlusterAIOCB *)blockacb; */
bool finished = false;
acb->finished = &finished;
while (!finished) {
qemu_aio_wait();
}
}
static const AIOCBInfo gluster_aiocb_info = {
.aiocb_size = sizeof(GlusterAIOCB),
.cancel = qemu_gluster_aio_cancel,
};
static void gluster_finish_aiocb(struct glfs_fd *fd, ssize_t ret, void *arg) static void gluster_finish_aiocb(struct glfs_fd *fd, ssize_t ret, void *arg)
{ {
GlusterAIOCB *acb = (GlusterAIOCB *)arg; GlusterAIOCB *acb = (GlusterAIOCB *)arg;
BlockDriverState *bs = acb->common.bs;
BDRVGlusterState *s = bs->opaque;
int retval;
acb->ret = ret; if (!ret || ret == acb->size) {
retval = qemu_write_full(s->fds[GLUSTER_FD_WRITE], &acb, sizeof(acb)); acb->ret = 0; /* Success */
if (retval != sizeof(acb)) { } else if (ret < 0) {
/* acb->ret = ret; /* Read/Write failed */
* Gluster AIO callback thread failed to notify the waiting } else {
* QEMU thread about IO completion. acb->ret = -EIO; /* Partial read/write - fail it */
*/
error_report("Gluster AIO completion failed: %s", strerror(errno));
abort();
} }
acb->bh = qemu_bh_new(qemu_gluster_complete_aio, acb);
qemu_bh_schedule(acb->bh);
} }
static BlockDriverAIOCB *qemu_gluster_aio_rw(BlockDriverState *bs, static coroutine_fn int qemu_gluster_co_rw(BlockDriverState *bs,
int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, int write)
BlockDriverCompletionFunc *cb, void *opaque, int write)
{ {
int ret; int ret;
GlusterAIOCB *acb; GlusterAIOCB *acb = g_slice_new(GlusterAIOCB);
BDRVGlusterState *s = bs->opaque; BDRVGlusterState *s = bs->opaque;
size_t size; size_t size = nb_sectors * BDRV_SECTOR_SIZE;
off_t offset; off_t offset = sector_num * BDRV_SECTOR_SIZE;
offset = sector_num * BDRV_SECTOR_SIZE;
size = nb_sectors * BDRV_SECTOR_SIZE;
acb = qemu_aio_get(&gluster_aiocb_info, bs, cb, opaque);
acb->size = size; acb->size = size;
acb->ret = 0; acb->ret = 0;
acb->finished = NULL; acb->coroutine = qemu_coroutine_self();
if (write) { if (write) {
ret = glfs_pwritev_async(s->fd, qiov->iov, qiov->niov, offset, 0, ret = glfs_pwritev_async(s->fd, qiov->iov, qiov->niov, offset, 0,
@ -460,13 +391,16 @@ static BlockDriverAIOCB *qemu_gluster_aio_rw(BlockDriverState *bs,
} }
if (ret < 0) { if (ret < 0) {
ret = -errno;
goto out; goto out;
} }
return &acb->common;
qemu_coroutine_yield();
ret = acb->ret;
out: out:
qemu_aio_release(acb); g_slice_free(GlusterAIOCB, acb);
return NULL; return ret;
} }
static int qemu_gluster_truncate(BlockDriverState *bs, int64_t offset) static int qemu_gluster_truncate(BlockDriverState *bs, int64_t offset)
@ -482,71 +416,68 @@ static int qemu_gluster_truncate(BlockDriverState *bs, int64_t offset)
return 0; return 0;
} }
static BlockDriverAIOCB *qemu_gluster_aio_readv(BlockDriverState *bs, static coroutine_fn int qemu_gluster_co_readv(BlockDriverState *bs,
int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
BlockDriverCompletionFunc *cb, void *opaque)
{ {
return qemu_gluster_aio_rw(bs, sector_num, qiov, nb_sectors, cb, opaque, 0); return qemu_gluster_co_rw(bs, sector_num, nb_sectors, qiov, 0);
} }
static BlockDriverAIOCB *qemu_gluster_aio_writev(BlockDriverState *bs, static coroutine_fn int qemu_gluster_co_writev(BlockDriverState *bs,
int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
BlockDriverCompletionFunc *cb, void *opaque)
{ {
return qemu_gluster_aio_rw(bs, sector_num, qiov, nb_sectors, cb, opaque, 1); return qemu_gluster_co_rw(bs, sector_num, nb_sectors, qiov, 1);
} }
static BlockDriverAIOCB *qemu_gluster_aio_flush(BlockDriverState *bs, static coroutine_fn int qemu_gluster_co_flush_to_disk(BlockDriverState *bs)
BlockDriverCompletionFunc *cb, void *opaque)
{ {
int ret; int ret;
GlusterAIOCB *acb; GlusterAIOCB *acb = g_slice_new(GlusterAIOCB);
BDRVGlusterState *s = bs->opaque; BDRVGlusterState *s = bs->opaque;
acb = qemu_aio_get(&gluster_aiocb_info, bs, cb, opaque);
acb->size = 0; acb->size = 0;
acb->ret = 0; acb->ret = 0;
acb->finished = NULL; acb->coroutine = qemu_coroutine_self();
ret = glfs_fsync_async(s->fd, &gluster_finish_aiocb, acb); ret = glfs_fsync_async(s->fd, &gluster_finish_aiocb, acb);
if (ret < 0) { if (ret < 0) {
ret = -errno;
goto out; goto out;
} }
return &acb->common;
qemu_coroutine_yield();
ret = acb->ret;
out: out:
qemu_aio_release(acb); g_slice_free(GlusterAIOCB, acb);
return NULL; return ret;
} }
#ifdef CONFIG_GLUSTERFS_DISCARD #ifdef CONFIG_GLUSTERFS_DISCARD
static BlockDriverAIOCB *qemu_gluster_aio_discard(BlockDriverState *bs, static coroutine_fn int qemu_gluster_co_discard(BlockDriverState *bs,
int64_t sector_num, int nb_sectors, BlockDriverCompletionFunc *cb, int64_t sector_num, int nb_sectors)
void *opaque)
{ {
int ret; int ret;
GlusterAIOCB *acb; GlusterAIOCB *acb = g_slice_new(GlusterAIOCB);
BDRVGlusterState *s = bs->opaque; BDRVGlusterState *s = bs->opaque;
size_t size; size_t size = nb_sectors * BDRV_SECTOR_SIZE;
off_t offset; off_t offset = sector_num * BDRV_SECTOR_SIZE;
offset = sector_num * BDRV_SECTOR_SIZE;
size = nb_sectors * BDRV_SECTOR_SIZE;
acb = qemu_aio_get(&gluster_aiocb_info, bs, cb, opaque);
acb->size = 0; acb->size = 0;
acb->ret = 0; acb->ret = 0;
acb->finished = NULL; acb->coroutine = qemu_coroutine_self();
ret = glfs_discard_async(s->fd, offset, size, &gluster_finish_aiocb, acb); ret = glfs_discard_async(s->fd, offset, size, &gluster_finish_aiocb, acb);
if (ret < 0) { if (ret < 0) {
ret = -errno;
goto out; goto out;
} }
return &acb->common;
qemu_coroutine_yield();
ret = acb->ret;
out: out:
qemu_aio_release(acb); g_slice_free(GlusterAIOCB, acb);
return NULL; return ret;
} }
#endif #endif
@ -581,10 +512,6 @@ static void qemu_gluster_close(BlockDriverState *bs)
{ {
BDRVGlusterState *s = bs->opaque; BDRVGlusterState *s = bs->opaque;
close(s->fds[GLUSTER_FD_READ]);
close(s->fds[GLUSTER_FD_WRITE]);
qemu_aio_set_fd_handler(s->fds[GLUSTER_FD_READ], NULL, NULL, NULL);
if (s->fd) { if (s->fd) {
glfs_close(s->fd); glfs_close(s->fd);
s->fd = NULL; s->fd = NULL;
@ -618,12 +545,12 @@ static BlockDriver bdrv_gluster = {
.bdrv_getlength = qemu_gluster_getlength, .bdrv_getlength = qemu_gluster_getlength,
.bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size, .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size,
.bdrv_truncate = qemu_gluster_truncate, .bdrv_truncate = qemu_gluster_truncate,
.bdrv_aio_readv = qemu_gluster_aio_readv, .bdrv_co_readv = qemu_gluster_co_readv,
.bdrv_aio_writev = qemu_gluster_aio_writev, .bdrv_co_writev = qemu_gluster_co_writev,
.bdrv_aio_flush = qemu_gluster_aio_flush, .bdrv_co_flush_to_disk = qemu_gluster_co_flush_to_disk,
.bdrv_has_zero_init = qemu_gluster_has_zero_init, .bdrv_has_zero_init = qemu_gluster_has_zero_init,
#ifdef CONFIG_GLUSTERFS_DISCARD #ifdef CONFIG_GLUSTERFS_DISCARD
.bdrv_aio_discard = qemu_gluster_aio_discard, .bdrv_co_discard = qemu_gluster_co_discard,
#endif #endif
.create_options = qemu_gluster_create_options, .create_options = qemu_gluster_create_options,
}; };
@ -639,12 +566,12 @@ static BlockDriver bdrv_gluster_tcp = {
.bdrv_getlength = qemu_gluster_getlength, .bdrv_getlength = qemu_gluster_getlength,
.bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size, .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size,
.bdrv_truncate = qemu_gluster_truncate, .bdrv_truncate = qemu_gluster_truncate,
.bdrv_aio_readv = qemu_gluster_aio_readv, .bdrv_co_readv = qemu_gluster_co_readv,
.bdrv_aio_writev = qemu_gluster_aio_writev, .bdrv_co_writev = qemu_gluster_co_writev,
.bdrv_aio_flush = qemu_gluster_aio_flush, .bdrv_co_flush_to_disk = qemu_gluster_co_flush_to_disk,
.bdrv_has_zero_init = qemu_gluster_has_zero_init, .bdrv_has_zero_init = qemu_gluster_has_zero_init,
#ifdef CONFIG_GLUSTERFS_DISCARD #ifdef CONFIG_GLUSTERFS_DISCARD
.bdrv_aio_discard = qemu_gluster_aio_discard, .bdrv_co_discard = qemu_gluster_co_discard,
#endif #endif
.create_options = qemu_gluster_create_options, .create_options = qemu_gluster_create_options,
}; };
@ -660,12 +587,12 @@ static BlockDriver bdrv_gluster_unix = {
.bdrv_getlength = qemu_gluster_getlength, .bdrv_getlength = qemu_gluster_getlength,
.bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size, .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size,
.bdrv_truncate = qemu_gluster_truncate, .bdrv_truncate = qemu_gluster_truncate,
.bdrv_aio_readv = qemu_gluster_aio_readv, .bdrv_co_readv = qemu_gluster_co_readv,
.bdrv_aio_writev = qemu_gluster_aio_writev, .bdrv_co_writev = qemu_gluster_co_writev,
.bdrv_aio_flush = qemu_gluster_aio_flush, .bdrv_co_flush_to_disk = qemu_gluster_co_flush_to_disk,
.bdrv_has_zero_init = qemu_gluster_has_zero_init, .bdrv_has_zero_init = qemu_gluster_has_zero_init,
#ifdef CONFIG_GLUSTERFS_DISCARD #ifdef CONFIG_GLUSTERFS_DISCARD
.bdrv_aio_discard = qemu_gluster_aio_discard, .bdrv_co_discard = qemu_gluster_co_discard,
#endif #endif
.create_options = qemu_gluster_create_options, .create_options = qemu_gluster_create_options,
}; };
@ -681,12 +608,12 @@ static BlockDriver bdrv_gluster_rdma = {
.bdrv_getlength = qemu_gluster_getlength, .bdrv_getlength = qemu_gluster_getlength,
.bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size, .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size,
.bdrv_truncate = qemu_gluster_truncate, .bdrv_truncate = qemu_gluster_truncate,
.bdrv_aio_readv = qemu_gluster_aio_readv, .bdrv_co_readv = qemu_gluster_co_readv,
.bdrv_aio_writev = qemu_gluster_aio_writev, .bdrv_co_writev = qemu_gluster_co_writev,
.bdrv_aio_flush = qemu_gluster_aio_flush, .bdrv_co_flush_to_disk = qemu_gluster_co_flush_to_disk,
.bdrv_has_zero_init = qemu_gluster_has_zero_init, .bdrv_has_zero_init = qemu_gluster_has_zero_init,
#ifdef CONFIG_GLUSTERFS_DISCARD #ifdef CONFIG_GLUSTERFS_DISCARD
.bdrv_aio_discard = qemu_gluster_aio_discard, .bdrv_co_discard = qemu_gluster_co_discard,
#endif #endif
.create_options = qemu_gluster_create_options, .create_options = qemu_gluster_create_options,
}; };