aio-wait: delegate polling of main AioContext if BQL not held
Any thread that is not a iothread returns NULL for qemu_get_current_aio_context(). As a result, it would also return true for in_aio_context_home_thread(qemu_get_aio_context()), causing AIO_WAIT_WHILE to invoke aio_poll() directly. This is incorrect if the BQL is not held, because aio_poll() does not expect to run concurrently from multiple threads, and it can actually happen when savevm writes to the vmstate file from the migration thread. Therefore, restrict in_aio_context_home_thread to return true for the main AioContext only if the BQL is held. The function is moved to aio-wait.h because it is mostly used there and to avoid a circular reference between main-loop.h and block/aio.h. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Message-Id: <20200407140746.8041-5-pbonzini@redhat.com> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
parent
636b836d5f
commit
3c18a92dc4
@ -26,6 +26,7 @@
|
|||||||
#define QEMU_AIO_WAIT_H
|
#define QEMU_AIO_WAIT_H
|
||||||
|
|
||||||
#include "block/aio.h"
|
#include "block/aio.h"
|
||||||
|
#include "qemu/main-loop.h"
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* AioWait:
|
* AioWait:
|
||||||
@ -124,4 +125,25 @@ void aio_wait_kick(void);
|
|||||||
*/
|
*/
|
||||||
void aio_wait_bh_oneshot(AioContext *ctx, QEMUBHFunc *cb, void *opaque);
|
void aio_wait_bh_oneshot(AioContext *ctx, QEMUBHFunc *cb, void *opaque);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* in_aio_context_home_thread:
|
||||||
|
* @ctx: the aio context
|
||||||
|
*
|
||||||
|
* Return whether we are running in the thread that normally runs @ctx. Note
|
||||||
|
* that acquiring/releasing ctx does not affect the outcome, each AioContext
|
||||||
|
* still only has one home thread that is responsible for running it.
|
||||||
|
*/
|
||||||
|
static inline bool in_aio_context_home_thread(AioContext *ctx)
|
||||||
|
{
|
||||||
|
if (ctx == qemu_get_current_aio_context()) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (ctx == qemu_get_aio_context()) {
|
||||||
|
return qemu_mutex_iothread_locked();
|
||||||
|
} else {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* QEMU_AIO_WAIT_H */
|
#endif /* QEMU_AIO_WAIT_H */
|
||||||
|
@ -133,12 +133,16 @@ struct AioContext {
|
|||||||
AioHandlerList deleted_aio_handlers;
|
AioHandlerList deleted_aio_handlers;
|
||||||
|
|
||||||
/* Used to avoid unnecessary event_notifier_set calls in aio_notify;
|
/* Used to avoid unnecessary event_notifier_set calls in aio_notify;
|
||||||
* accessed with atomic primitives. If this field is 0, everything
|
* only written from the AioContext home thread, or under the BQL in
|
||||||
* (file descriptors, bottom halves, timers) will be re-evaluated
|
* the case of the main AioContext. However, it is read from any
|
||||||
* before the next blocking poll(), thus the event_notifier_set call
|
* thread so it is still accessed with atomic primitives.
|
||||||
* can be skipped. If it is non-zero, you may need to wake up a
|
*
|
||||||
* concurrent aio_poll or the glib main event loop, making
|
* If this field is 0, everything (file descriptors, bottom halves,
|
||||||
* event_notifier_set necessary.
|
* timers) will be re-evaluated before the next blocking poll() or
|
||||||
|
* io_uring wait; therefore, the event_notifier_set call can be
|
||||||
|
* skipped. If it is non-zero, you may need to wake up a concurrent
|
||||||
|
* aio_poll or the glib main event loop, making event_notifier_set
|
||||||
|
* necessary.
|
||||||
*
|
*
|
||||||
* Bit 0 is reserved for GSource usage of the AioContext, and is 1
|
* Bit 0 is reserved for GSource usage of the AioContext, and is 1
|
||||||
* between a call to aio_ctx_prepare and the next call to aio_ctx_check.
|
* between a call to aio_ctx_prepare and the next call to aio_ctx_check.
|
||||||
@ -681,19 +685,6 @@ void aio_co_enter(AioContext *ctx, struct Coroutine *co);
|
|||||||
*/
|
*/
|
||||||
AioContext *qemu_get_current_aio_context(void);
|
AioContext *qemu_get_current_aio_context(void);
|
||||||
|
|
||||||
/**
|
|
||||||
* in_aio_context_home_thread:
|
|
||||||
* @ctx: the aio context
|
|
||||||
*
|
|
||||||
* Return whether we are running in the thread that normally runs @ctx. Note
|
|
||||||
* that acquiring/releasing ctx does not affect the outcome, each AioContext
|
|
||||||
* still only has one home thread that is responsible for running it.
|
|
||||||
*/
|
|
||||||
static inline bool in_aio_context_home_thread(AioContext *ctx)
|
|
||||||
{
|
|
||||||
return ctx == qemu_get_current_aio_context();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* aio_context_setup:
|
* aio_context_setup:
|
||||||
* @ctx: the aio context
|
* @ctx: the aio context
|
||||||
|
Loading…
Reference in New Issue
Block a user