AioContext: take bottom halves into account when computing aio_poll timeout
Right now, QEMU invokes aio_bh_poll before the "poll" phase of aio_poll. It is simpler to do it afterwards and skip the "poll" phase altogether when the OS-dependent parts of AioContext are invoked from GSource. This way, AioContext behaves more similarly when used as a GSource vs. when used as stand-alone. As a start, take bottom halves into account when computing the poll timeout. If a bottom half is ready, do a non-blocking poll. As a side effect, this makes idle bottom halves work with aio_poll; an improvement, but not really an important one since they are deprecated. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
parent
3cbbe9fd1f
commit
845ca10dd0
@ -249,7 +249,7 @@ bool aio_poll(AioContext *ctx, bool blocking)
|
||||
/* wait until next event */
|
||||
ret = qemu_poll_ns((GPollFD *)ctx->pollfds->data,
|
||||
ctx->pollfds->len,
|
||||
blocking ? timerlistgroup_deadline_ns(&ctx->tlg) : 0);
|
||||
blocking ? aio_compute_timeout(ctx) : 0);
|
||||
|
||||
/* if we have any readable fds, dispatch event */
|
||||
if (ret > 0) {
|
||||
|
@ -165,8 +165,8 @@ bool aio_poll(AioContext *ctx, bool blocking)
|
||||
while (count > 0) {
|
||||
int ret;
|
||||
|
||||
timeout = blocking ?
|
||||
qemu_timeout_ns_to_ms(timerlistgroup_deadline_ns(&ctx->tlg)) : 0;
|
||||
timeout = blocking
|
||||
? qemu_timeout_ns_to_ms(aio_compute_timeout(ctx)) : 0;
|
||||
ret = WaitForMultipleObjects(count, events, FALSE, timeout);
|
||||
|
||||
/* if we have any signaled events, dispatch event */
|
||||
|
32
async.c
32
async.c
@ -152,39 +152,43 @@ void qemu_bh_delete(QEMUBH *bh)
|
||||
bh->deleted = 1;
|
||||
}
|
||||
|
||||
static gboolean
|
||||
aio_ctx_prepare(GSource *source, gint *timeout)
|
||||
int64_t
|
||||
aio_compute_timeout(AioContext *ctx)
|
||||
{
|
||||
AioContext *ctx = (AioContext *) source;
|
||||
int64_t deadline;
|
||||
int timeout = -1;
|
||||
QEMUBH *bh;
|
||||
int deadline;
|
||||
|
||||
/* We assume there is no timeout already supplied */
|
||||
*timeout = -1;
|
||||
for (bh = ctx->first_bh; bh; bh = bh->next) {
|
||||
if (!bh->deleted && bh->scheduled) {
|
||||
if (bh->idle) {
|
||||
/* idle bottom halves will be polled at least
|
||||
* every 10ms */
|
||||
*timeout = 10;
|
||||
timeout = 10000000;
|
||||
} else {
|
||||
/* non-idle bottom halves will be executed
|
||||
* immediately */
|
||||
*timeout = 0;
|
||||
return true;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
deadline = qemu_timeout_ns_to_ms(timerlistgroup_deadline_ns(&ctx->tlg));
|
||||
deadline = timerlistgroup_deadline_ns(&ctx->tlg);
|
||||
if (deadline == 0) {
|
||||
*timeout = 0;
|
||||
return true;
|
||||
return 0;
|
||||
} else {
|
||||
*timeout = qemu_soonest_timeout(*timeout, deadline);
|
||||
return qemu_soonest_timeout(timeout, deadline);
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
static gboolean
|
||||
aio_ctx_prepare(GSource *source, gint *timeout)
|
||||
{
|
||||
AioContext *ctx = (AioContext *) source;
|
||||
|
||||
/* We assume there is no timeout already supplied */
|
||||
*timeout = qemu_timeout_ns_to_ms(aio_compute_timeout(ctx));
|
||||
return *timeout == 0;
|
||||
}
|
||||
|
||||
static gboolean
|
||||
|
@ -303,4 +303,12 @@ static inline void aio_timer_init(AioContext *ctx,
|
||||
timer_init(ts, ctx->tlg.tl[type], scale, cb, opaque);
|
||||
}
|
||||
|
||||
/**
|
||||
* aio_compute_timeout:
|
||||
* @ctx: the aio context
|
||||
*
|
||||
* Compute the timeout that a blocking aio_poll should use.
|
||||
*/
|
||||
int64_t aio_compute_timeout(AioContext *ctx);
|
||||
|
||||
#endif
|
||||
|
Loading…
Reference in New Issue
Block a user