2023-05-30 21:09:54 +03:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
|
|
|
/*
|
2023-09-13 23:00:42 +03:00
|
|
|
* Deferred calls
|
2023-05-30 21:09:54 +03:00
|
|
|
*
|
|
|
|
* Copyright Red Hat.
|
|
|
|
*
|
2023-09-13 23:00:42 +03:00
|
|
|
* This API defers a function call within a defer_call_begin()/defer_call_end()
|
2023-05-30 21:09:54 +03:00
|
|
|
* section, allowing multiple calls to batch up. This is a performance
|
|
|
|
* optimization that is used in the block layer to submit several I/O requests
|
|
|
|
* at once instead of individually:
|
|
|
|
*
|
2023-09-13 23:00:42 +03:00
|
|
|
* defer_call_begin(); <-- start of section
|
2023-05-30 21:09:54 +03:00
|
|
|
* ...
|
2023-09-13 23:00:42 +03:00
|
|
|
* defer_call(my_func, my_obj); <-- deferred my_func(my_obj) call
|
|
|
|
* defer_call(my_func, my_obj); <-- another
|
|
|
|
* defer_call(my_func, my_obj); <-- another
|
2023-05-30 21:09:54 +03:00
|
|
|
* ...
|
2023-09-13 23:00:42 +03:00
|
|
|
* defer_call_end(); <-- end of section, my_func(my_obj) is called once
|
2023-05-30 21:09:54 +03:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include "qemu/osdep.h"
|
|
|
|
#include "qemu/coroutine-tls.h"
|
|
|
|
#include "qemu/notify.h"
|
|
|
|
#include "qemu/thread.h"
|
2023-09-13 23:00:43 +03:00
|
|
|
#include "qemu/defer-call.h"
|
2023-05-30 21:09:54 +03:00
|
|
|
|
2023-09-13 23:00:42 +03:00
|
|
|
/* A function call that has been deferred until defer_call_end() */
|
2023-05-30 21:09:54 +03:00
|
|
|
typedef struct {
|
|
|
|
void (*fn)(void *);
|
|
|
|
void *opaque;
|
2023-09-13 23:00:42 +03:00
|
|
|
} DeferredCall;
|
2023-05-30 21:09:54 +03:00
|
|
|
|
|
|
|
/* Per-thread state */
|
|
|
|
typedef struct {
|
2023-09-13 23:00:42 +03:00
|
|
|
unsigned nesting_level;
|
|
|
|
GArray *deferred_call_array;
|
|
|
|
} DeferCallThreadState;
|
2023-05-30 21:09:54 +03:00
|
|
|
|
2023-09-13 23:00:42 +03:00
|
|
|
/* Use get_ptr_defer_call_thread_state() to fetch this thread-local value */
|
|
|
|
QEMU_DEFINE_STATIC_CO_TLS(DeferCallThreadState, defer_call_thread_state);
|
2023-05-30 21:09:54 +03:00
|
|
|
|
|
|
|
/* Called at thread cleanup time */
|
2023-09-13 23:00:42 +03:00
|
|
|
static void defer_call_atexit(Notifier *n, void *value)
|
2023-05-30 21:09:54 +03:00
|
|
|
{
|
2023-09-13 23:00:42 +03:00
|
|
|
DeferCallThreadState *thread_state = get_ptr_defer_call_thread_state();
|
|
|
|
g_array_free(thread_state->deferred_call_array, TRUE);
|
2023-05-30 21:09:54 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/* This won't involve coroutines, so use __thread */
|
2023-09-13 23:00:42 +03:00
|
|
|
static __thread Notifier defer_call_atexit_notifier;
|
2023-05-30 21:09:54 +03:00
|
|
|
|
|
|
|
/**
|
2023-09-13 23:00:42 +03:00
|
|
|
* defer_call:
|
2023-05-30 21:09:54 +03:00
|
|
|
* @fn: a function pointer to be invoked
|
|
|
|
* @opaque: a user-defined argument to @fn()
|
|
|
|
*
|
2023-09-13 23:00:42 +03:00
|
|
|
* Call @fn(@opaque) immediately if not within a
|
|
|
|
* defer_call_begin()/defer_call_end() section.
|
2023-05-30 21:09:54 +03:00
|
|
|
*
|
|
|
|
* Otherwise defer the call until the end of the outermost
|
2023-09-13 23:00:42 +03:00
|
|
|
* defer_call_begin()/defer_call_end() section in this thread. If the same
|
2023-05-30 21:09:54 +03:00
|
|
|
* @fn/@opaque pair has already been deferred, it will only be called once upon
|
2023-09-13 23:00:42 +03:00
|
|
|
* defer_call_end() so that accumulated calls are batched into a single call.
|
2023-05-30 21:09:54 +03:00
|
|
|
*
|
|
|
|
* The caller must ensure that @opaque is not freed before @fn() is invoked.
|
|
|
|
*/
|
2023-09-13 23:00:42 +03:00
|
|
|
void defer_call(void (*fn)(void *), void *opaque)
|
2023-05-30 21:09:54 +03:00
|
|
|
{
|
2023-09-13 23:00:42 +03:00
|
|
|
DeferCallThreadState *thread_state = get_ptr_defer_call_thread_state();
|
2023-05-30 21:09:54 +03:00
|
|
|
|
2023-09-13 23:00:42 +03:00
|
|
|
/* Call immediately if we're not deferring calls */
|
|
|
|
if (thread_state->nesting_level == 0) {
|
2023-05-30 21:09:54 +03:00
|
|
|
fn(opaque);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2023-09-13 23:00:42 +03:00
|
|
|
GArray *array = thread_state->deferred_call_array;
|
2023-05-30 21:09:54 +03:00
|
|
|
if (!array) {
|
2023-09-13 23:00:42 +03:00
|
|
|
array = g_array_new(FALSE, FALSE, sizeof(DeferredCall));
|
|
|
|
thread_state->deferred_call_array = array;
|
|
|
|
defer_call_atexit_notifier.notify = defer_call_atexit;
|
|
|
|
qemu_thread_atexit_add(&defer_call_atexit_notifier);
|
2023-05-30 21:09:54 +03:00
|
|
|
}
|
|
|
|
|
2023-09-13 23:00:42 +03:00
|
|
|
DeferredCall *fns = (DeferredCall *)array->data;
|
|
|
|
DeferredCall new_fn = {
|
2023-05-30 21:09:54 +03:00
|
|
|
.fn = fn,
|
|
|
|
.opaque = opaque,
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* There won't be many, so do a linear search. If this becomes a bottleneck
|
|
|
|
* then a binary search (glib 2.62+) or different data structure could be
|
|
|
|
* used.
|
|
|
|
*/
|
|
|
|
for (guint i = 0; i < array->len; i++) {
|
|
|
|
if (memcmp(&fns[i], &new_fn, sizeof(new_fn)) == 0) {
|
|
|
|
return; /* already exists */
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
g_array_append_val(array, new_fn);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2023-09-13 23:00:42 +03:00
|
|
|
* defer_call_begin: Defer defer_call() functions until defer_call_end()
|
2023-05-30 21:09:54 +03:00
|
|
|
*
|
2023-09-13 23:00:42 +03:00
|
|
|
* defer_call_begin() and defer_call_end() are thread-local operations. The
|
|
|
|
* caller must ensure that each defer_call_begin() has a matching
|
|
|
|
* defer_call_end() in the same thread.
|
2023-05-30 21:09:54 +03:00
|
|
|
*
|
2023-09-13 23:00:42 +03:00
|
|
|
* Nesting is supported. defer_call() functions are only called at the
|
|
|
|
* outermost defer_call_end().
|
2023-05-30 21:09:54 +03:00
|
|
|
*/
|
2023-09-13 23:00:42 +03:00
|
|
|
void defer_call_begin(void)
|
2023-05-30 21:09:54 +03:00
|
|
|
{
|
2023-09-13 23:00:42 +03:00
|
|
|
DeferCallThreadState *thread_state = get_ptr_defer_call_thread_state();
|
2023-05-30 21:09:54 +03:00
|
|
|
|
2023-09-13 23:00:42 +03:00
|
|
|
assert(thread_state->nesting_level < UINT32_MAX);
|
2023-05-30 21:09:54 +03:00
|
|
|
|
2023-09-13 23:00:42 +03:00
|
|
|
thread_state->nesting_level++;
|
2023-05-30 21:09:54 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2023-09-13 23:00:42 +03:00
|
|
|
* defer_call_end: Run any pending defer_call() functions
|
2023-05-30 21:09:54 +03:00
|
|
|
*
|
2023-09-13 23:00:42 +03:00
|
|
|
* There must have been a matching defer_call_begin() call in the same thread
|
|
|
|
* prior to this defer_call_end() call.
|
2023-05-30 21:09:54 +03:00
|
|
|
*/
|
2023-09-13 23:00:42 +03:00
|
|
|
void defer_call_end(void)
|
2023-05-30 21:09:54 +03:00
|
|
|
{
|
2023-09-13 23:00:42 +03:00
|
|
|
DeferCallThreadState *thread_state = get_ptr_defer_call_thread_state();
|
2023-05-30 21:09:54 +03:00
|
|
|
|
2023-09-13 23:00:42 +03:00
|
|
|
assert(thread_state->nesting_level > 0);
|
2023-05-30 21:09:54 +03:00
|
|
|
|
2023-09-13 23:00:42 +03:00
|
|
|
if (--thread_state->nesting_level > 0) {
|
2023-05-30 21:09:54 +03:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2023-09-13 23:00:42 +03:00
|
|
|
GArray *array = thread_state->deferred_call_array;
|
2023-05-30 21:09:54 +03:00
|
|
|
if (!array) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2023-09-13 23:00:42 +03:00
|
|
|
DeferredCall *fns = (DeferredCall *)array->data;
|
2023-05-30 21:09:54 +03:00
|
|
|
|
|
|
|
for (guint i = 0; i < array->len; i++) {
|
|
|
|
fns[i].fn(fns[i].opaque);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This resets the array without freeing memory so that appending is cheap
|
|
|
|
* in the future.
|
|
|
|
*/
|
|
|
|
g_array_set_size(array, 0);
|
|
|
|
}
|