block: Let bdrv_drain_all() to call aio_poll() for each AioContext

After the commit 9b536adc ("block: acquire AioContext in
bdrv_drain_all()") the aio_poll() function got called for every
BlockDriverState, in assumption that every device may have its own
AioContext. If we have thousands of disks attached, there are a lot of
BlockDriverStates but only a few AioContexts, leading to tons of
unnecessary aio_poll() calls.

This patch changes the bdrv_drain_all() function allowing it find shared
AioContexts and to call aio_poll() only for unique ones.

Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: Cornelia Huck <cornelia.huck@de.ibm.com>
Cc: Kevin Wolf <kwolf@redhat.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Alexander Yarygin <yarygin@linux.vnet.ibm.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Tested-by: Christian Borntraeger <borntraeger@de.ibm.com>
Message-id: 1433936297-7098-4-git-send-email-yarygin@linux.vnet.ibm.com
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
Alexander Yarygin 2015-06-10 14:38:17 +03:00 committed by Stefan Hajnoczi
parent a3206972a9
commit f406c03c09

View File

@ -233,17 +233,6 @@ static bool bdrv_requests_pending(BlockDriverState *bs)
return false; return false;
} }
static bool bdrv_drain_one(BlockDriverState *bs)
{
bool bs_busy;
bdrv_flush_io_queue(bs);
bdrv_start_throttled_reqs(bs);
bs_busy = bdrv_requests_pending(bs);
bs_busy |= aio_poll(bdrv_get_aio_context(bs), bs_busy);
return bs_busy;
}
/* /*
* Wait for pending requests to complete on a single BlockDriverState subtree * Wait for pending requests to complete on a single BlockDriverState subtree
* *
@ -256,8 +245,13 @@ static bool bdrv_drain_one(BlockDriverState *bs)
*/ */
void bdrv_drain(BlockDriverState *bs) void bdrv_drain(BlockDriverState *bs)
{ {
while (bdrv_drain_one(bs)) { bool busy = true;
while (busy) {
/* Keep iterating */ /* Keep iterating */
bdrv_flush_io_queue(bs);
busy = bdrv_requests_pending(bs);
busy |= aio_poll(bdrv_get_aio_context(bs), busy);
} }
} }
@ -278,6 +272,7 @@ void bdrv_drain_all(void)
/* Always run first iteration so any pending completion BHs run */ /* Always run first iteration so any pending completion BHs run */
bool busy = true; bool busy = true;
BlockDriverState *bs = NULL; BlockDriverState *bs = NULL;
GSList *aio_ctxs = NULL, *ctx;
while ((bs = bdrv_next(bs))) { while ((bs = bdrv_next(bs))) {
AioContext *aio_context = bdrv_get_aio_context(bs); AioContext *aio_context = bdrv_get_aio_context(bs);
@ -287,17 +282,30 @@ void bdrv_drain_all(void)
block_job_pause(bs->job); block_job_pause(bs->job);
} }
aio_context_release(aio_context); aio_context_release(aio_context);
if (!aio_ctxs || !g_slist_find(aio_ctxs, aio_context)) {
aio_ctxs = g_slist_prepend(aio_ctxs, aio_context);
}
} }
while (busy) { while (busy) {
busy = false; busy = false;
bs = NULL;
while ((bs = bdrv_next(bs))) { for (ctx = aio_ctxs; ctx != NULL; ctx = ctx->next) {
AioContext *aio_context = bdrv_get_aio_context(bs); AioContext *aio_context = ctx->data;
bs = NULL;
aio_context_acquire(aio_context); aio_context_acquire(aio_context);
busy |= bdrv_drain_one(bs); while ((bs = bdrv_next(bs))) {
if (aio_context == bdrv_get_aio_context(bs)) {
bdrv_flush_io_queue(bs);
if (bdrv_requests_pending(bs)) {
busy = true;
aio_poll(aio_context, busy);
}
}
}
busy |= aio_poll(aio_context, false);
aio_context_release(aio_context); aio_context_release(aio_context);
} }
} }
@ -312,6 +320,7 @@ void bdrv_drain_all(void)
} }
aio_context_release(aio_context); aio_context_release(aio_context);
} }
g_slist_free(aio_ctxs);
} }
/** /**
@ -2562,4 +2571,5 @@ void bdrv_flush_io_queue(BlockDriverState *bs)
} else if (bs->file) { } else if (bs->file) {
bdrv_flush_io_queue(bs->file); bdrv_flush_io_queue(bs->file);
} }
bdrv_start_throttled_reqs(bs);
} }