2018-02-16 19:50:12 +03:00
|
|
|
/*
|
|
|
|
* AioContext wait support
|
|
|
|
*
|
|
|
|
* Copyright (C) 2018 Red Hat, Inc.
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
|
|
* of this software and associated documentation files (the "Software"), to deal
|
|
|
|
* in the Software without restriction, including without limitation the rights
|
|
|
|
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
|
|
* copies of the Software, and to permit persons to whom the Software is
|
|
|
|
* furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice shall be included in
|
|
|
|
* all copies or substantial portions of the Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
|
|
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
|
|
* THE SOFTWARE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef QEMU_AIO_WAIT_H
|
|
|
|
#define QEMU_AIO_WAIT_H
|
|
|
|
|
|
|
|
#include "block/aio.h"
|
2020-04-07 17:07:45 +03:00
|
|
|
#include "qemu/main-loop.h"
|
2018-02-16 19:50:12 +03:00
|
|
|
|
|
|
|
/**
|
|
|
|
* AioWait:
|
|
|
|
*
|
2018-09-18 18:09:16 +03:00
|
|
|
* An object that facilitates synchronous waiting on a condition. A single
|
|
|
|
* global AioWait object (global_aio_wait) is used internally.
|
|
|
|
*
|
|
|
|
* The main loop can wait on an operation running in an IOThread as follows:
|
2018-02-16 19:50:12 +03:00
|
|
|
*
|
|
|
|
* AioContext *ctx = ...;
|
|
|
|
* MyWork work = { .done = false };
|
|
|
|
* schedule_my_work_in_iothread(ctx, &work);
|
2018-09-18 18:09:16 +03:00
|
|
|
* AIO_WAIT_WHILE(ctx, !work.done);
|
2018-02-16 19:50:12 +03:00
|
|
|
*
|
|
|
|
* The IOThread must call aio_wait_kick() to notify the main loop when
|
|
|
|
* work.done changes:
|
|
|
|
*
|
|
|
|
* static void do_work(...)
|
|
|
|
* {
|
|
|
|
* ...
|
|
|
|
* work.done = true;
|
2018-09-18 18:09:16 +03:00
|
|
|
* aio_wait_kick();
|
2018-02-16 19:50:12 +03:00
|
|
|
* }
|
|
|
|
*/
|
|
|
|
typedef struct {
|
2018-03-07 15:46:19 +03:00
|
|
|
/* Number of waiting AIO_WAIT_WHILE() callers. Accessed with atomic ops. */
|
|
|
|
unsigned num_waiters;
|
2018-02-16 19:50:12 +03:00
|
|
|
} AioWait;
|
|
|
|
|
2018-09-18 18:09:16 +03:00
|
|
|
extern AioWait global_aio_wait;
|
|
|
|
|
2018-02-16 19:50:12 +03:00
|
|
|
/**
|
2022-09-26 12:31:57 +03:00
|
|
|
* AIO_WAIT_WHILE_INTERNAL:
|
2018-04-10 16:51:52 +03:00
|
|
|
* @ctx: the aio context, or NULL if multiple aio contexts (for which the
|
|
|
|
* caller does not hold a lock) are involved in the polling condition.
|
2018-02-16 19:50:12 +03:00
|
|
|
* @cond: wait while this conditional expression is true
|
2023-03-13 03:37:45 +03:00
|
|
|
* @unlock: whether to unlock and then lock again @ctx. This applies
|
2022-09-26 12:31:57 +03:00
|
|
|
* only when waiting for another AioContext from the main loop.
|
|
|
|
* Otherwise it's ignored.
|
2018-02-16 19:50:12 +03:00
|
|
|
*
|
|
|
|
* Wait while a condition is true. Use this to implement synchronous
|
|
|
|
* operations that require event loop activity.
|
|
|
|
*
|
|
|
|
* The caller must be sure that something calls aio_wait_kick() when the value
|
|
|
|
* of @cond might have changed.
|
|
|
|
*
|
|
|
|
* The caller's thread must be the IOThread that owns @ctx or the main loop
|
|
|
|
* thread (with @ctx acquired exactly once). This function cannot be used to
|
|
|
|
* wait on conditions between two IOThreads since that could lead to deadlock,
|
|
|
|
* go via the main loop instead.
|
|
|
|
*/
|
2022-09-26 12:31:57 +03:00
|
|
|
#define AIO_WAIT_WHILE_INTERNAL(ctx, cond, unlock) ({ \
|
2018-03-07 15:46:19 +03:00
|
|
|
bool waited_ = false; \
|
2018-09-18 18:09:16 +03:00
|
|
|
AioWait *wait_ = &global_aio_wait; \
|
2018-03-07 15:46:19 +03:00
|
|
|
AioContext *ctx_ = (ctx); \
|
2018-09-05 19:14:17 +03:00
|
|
|
/* Increment wait_->num_waiters before evaluating cond. */ \
|
2020-09-23 13:56:46 +03:00
|
|
|
qatomic_inc(&wait_->num_waiters); \
|
2022-05-24 20:30:54 +03:00
|
|
|
/* Paired with smp_mb in aio_wait_kick(). */ \
|
2023-03-03 13:03:52 +03:00
|
|
|
smp_mb__after_rmw(); \
|
2018-04-10 16:51:52 +03:00
|
|
|
if (ctx_ && in_aio_context_home_thread(ctx_)) { \
|
2018-03-22 12:57:14 +03:00
|
|
|
while ((cond)) { \
|
|
|
|
aio_poll(ctx_, true); \
|
|
|
|
waited_ = true; \
|
2018-03-07 15:46:19 +03:00
|
|
|
} \
|
|
|
|
} else { \
|
|
|
|
assert(qemu_get_current_aio_context() == \
|
|
|
|
qemu_get_aio_context()); \
|
2018-03-22 12:57:14 +03:00
|
|
|
while ((cond)) { \
|
2022-09-26 12:31:57 +03:00
|
|
|
if (unlock && ctx_) { \
|
2018-04-10 16:51:52 +03:00
|
|
|
aio_context_release(ctx_); \
|
|
|
|
} \
|
2018-03-22 12:57:14 +03:00
|
|
|
aio_poll(qemu_get_aio_context(), true); \
|
2022-09-26 12:31:57 +03:00
|
|
|
if (unlock && ctx_) { \
|
2018-04-10 16:51:52 +03:00
|
|
|
aio_context_acquire(ctx_); \
|
|
|
|
} \
|
2018-03-22 12:57:14 +03:00
|
|
|
waited_ = true; \
|
2018-03-07 15:46:19 +03:00
|
|
|
} \
|
|
|
|
} \
|
2020-09-23 13:56:46 +03:00
|
|
|
qatomic_dec(&wait_->num_waiters); \
|
2018-02-16 19:50:12 +03:00
|
|
|
waited_; })
|
|
|
|
|
2022-09-26 12:31:57 +03:00
|
|
|
#define AIO_WAIT_WHILE(ctx, cond) \
|
|
|
|
AIO_WAIT_WHILE_INTERNAL(ctx, cond, true)
|
|
|
|
|
|
|
|
#define AIO_WAIT_WHILE_UNLOCKED(ctx, cond) \
|
|
|
|
AIO_WAIT_WHILE_INTERNAL(ctx, cond, false)
|
|
|
|
|
2018-02-16 19:50:12 +03:00
|
|
|
/**
|
|
|
|
* aio_wait_kick:
|
|
|
|
* Wake up the main thread if it is waiting on AIO_WAIT_WHILE(). During
|
|
|
|
* synchronous operations performed in an IOThread, the main thread lets the
|
|
|
|
* IOThread's event loop run, waiting for the operation to complete. A
|
|
|
|
* aio_wait_kick() call will wake up the main thread.
|
|
|
|
*/
|
2018-09-18 18:09:16 +03:00
|
|
|
void aio_wait_kick(void);
|
2018-02-16 19:50:12 +03:00
|
|
|
|
2018-03-07 17:42:02 +03:00
|
|
|
/**
|
|
|
|
* aio_wait_bh_oneshot:
|
|
|
|
* @ctx: the aio context
|
|
|
|
* @cb: the BH callback function
|
|
|
|
* @opaque: user data for the BH callback function
|
|
|
|
*
|
|
|
|
* Run a BH in @ctx and wait for it to complete.
|
|
|
|
*
|
2023-04-04 18:33:07 +03:00
|
|
|
* Must be called from the main loop thread without @ctx acquired.
|
2018-03-07 17:42:02 +03:00
|
|
|
* Note that main loop event processing may occur.
|
|
|
|
*/
|
|
|
|
void aio_wait_bh_oneshot(AioContext *ctx, QEMUBHFunc *cb, void *opaque);
|
|
|
|
|
2020-04-07 17:07:45 +03:00
|
|
|
/**
|
|
|
|
* in_aio_context_home_thread:
|
|
|
|
* @ctx: the aio context
|
|
|
|
*
|
|
|
|
* Return whether we are running in the thread that normally runs @ctx. Note
|
|
|
|
* that acquiring/releasing ctx does not affect the outcome, each AioContext
|
|
|
|
* still only has one home thread that is responsible for running it.
|
|
|
|
*/
|
|
|
|
static inline bool in_aio_context_home_thread(AioContext *ctx)
|
|
|
|
{
|
|
|
|
if (ctx == qemu_get_current_aio_context()) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ctx == qemu_get_aio_context()) {
|
|
|
|
return qemu_mutex_iothread_locked();
|
|
|
|
} else {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-15 17:51:23 +03:00
|
|
|
#endif /* QEMU_AIO_WAIT_H */
|