2014-03-03 14:30:05 +04:00
|
|
|
/*
|
|
|
|
* Event loop thread
|
|
|
|
*
|
2021-01-14 01:10:12 +03:00
|
|
|
* Copyright Red Hat Inc., 2013, 2020
|
2014-03-03 14:30:05 +04:00
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Stefan Hajnoczi <stefanha@redhat.com>
|
|
|
|
*
|
|
|
|
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
|
|
|
* See the COPYING file in the top-level directory.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2016-01-29 20:50:05 +03:00
|
|
|
#include "qemu/osdep.h"
|
2014-03-03 14:30:05 +04:00
|
|
|
#include "qom/object.h"
|
|
|
|
#include "qom/object_interfaces.h"
|
|
|
|
#include "qemu/module.h"
|
|
|
|
#include "block/aio.h"
|
2016-10-27 13:49:00 +03:00
|
|
|
#include "block/block.h"
|
2014-03-03 14:30:05 +04:00
|
|
|
#include "sysemu/iothread.h"
|
2018-02-01 14:18:31 +03:00
|
|
|
#include "qapi/error.h"
|
2018-02-27 02:13:27 +03:00
|
|
|
#include "qapi/qapi-commands-misc.h"
|
2014-09-18 15:30:49 +04:00
|
|
|
#include "qemu/error-report.h"
|
2015-07-09 09:55:38 +03:00
|
|
|
#include "qemu/rcu.h"
|
2016-10-27 13:48:59 +03:00
|
|
|
#include "qemu/main-loop.h"
|
2014-03-03 14:30:05 +04:00
|
|
|
|
|
|
|
typedef ObjectClass IOThreadClass;
|
|
|
|
|
2020-09-01 00:07:33 +03:00
|
|
|
DECLARE_CLASS_CHECKERS(IOThreadClass, IOTHREAD,
|
|
|
|
TYPE_IOTHREAD)
|
2014-03-03 14:30:05 +04:00
|
|
|
|
2018-03-22 11:56:30 +03:00
|
|
|
#ifdef CONFIG_POSIX
|
2017-01-26 20:01:19 +03:00
|
|
|
/* Benchmark results from 2016 on NVMe SSD drives show max polling times around
|
|
|
|
* 16-32 microseconds yield IOPS improvements for both iodepth=1 and iodepth=32
|
|
|
|
* workloads.
|
|
|
|
*/
|
|
|
|
#define IOTHREAD_POLL_MAX_NS_DEFAULT 32768ULL
|
2018-03-22 11:56:30 +03:00
|
|
|
#else
|
|
|
|
#define IOTHREAD_POLL_MAX_NS_DEFAULT 0ULL
|
|
|
|
#endif
|
2017-01-26 20:01:19 +03:00
|
|
|
|
2014-03-03 14:30:05 +04:00
|
|
|
static void *iothread_run(void *opaque)
|
|
|
|
{
|
|
|
|
IOThread *iothread = opaque;
|
|
|
|
|
2015-07-09 09:55:38 +03:00
|
|
|
rcu_register_thread();
|
2019-03-06 14:55:31 +03:00
|
|
|
/*
|
|
|
|
* g_main_context_push_thread_default() must be called before anything
|
|
|
|
* in this new thread uses glib.
|
|
|
|
*/
|
|
|
|
g_main_context_push_thread_default(iothread->worker_context);
|
async: the main AioContext is only "current" if under the BQL
If we want to wake up a coroutine from a worker thread, aio_co_wake()
currently does not work. In that scenario, aio_co_wake() calls
aio_co_enter(), but there is no current AioContext and therefore
qemu_get_current_aio_context() returns the main thread. aio_co_wake()
then attempts to call aio_context_acquire() instead of going through
aio_co_schedule().
The default case of qemu_get_current_aio_context() was added to cover
synchronous I/O started from the vCPU thread, but the main and vCPU
threads are quite different. The main thread is an I/O thread itself,
only running a more complicated event loop; the vCPU thread instead
is essentially a worker thread that occasionally calls
qemu_mutex_lock_iothread(). It is only in those critical sections
that it acts as if it were the home thread of the main AioContext.
Therefore, this patch detaches qemu_get_current_aio_context() from
iothreads, which is a useless complication. The AioContext pointer
is stored directly in the thread-local variable, including for the
main loop. Worker threads (including vCPU threads) optionally behave
as temporary home threads if they have taken the big QEMU lock,
but if that is not the case they will always schedule coroutines
on remote threads via aio_co_schedule().
With this change, the stub qemu_mutex_iothread_locked() must be changed
from true to false. The previous value of true was needed because the
main thread did not have an AioContext in the thread-local variable,
but now it does have one.
Reported-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <20210609122234.544153-1-pbonzini@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Tested-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
[eblake: tweak commit message per Vladimir's review]
Signed-off-by: Eric Blake <eblake@redhat.com>
2021-06-09 15:22:34 +03:00
|
|
|
qemu_set_current_aio_context(iothread->ctx);
|
2014-02-27 14:48:41 +04:00
|
|
|
iothread->thread_id = qemu_get_thread_id();
|
2019-03-06 14:55:28 +03:00
|
|
|
qemu_sem_post(&iothread->init_done_sem);
|
2014-02-27 14:48:41 +04:00
|
|
|
|
iothread: fix iothread_stop() race condition
There is a small chance that iothread_stop() hangs as follows:
Thread 3 (Thread 0x7f63eba5f700 (LWP 16105)):
#0 0x00007f64012c09b6 in ppoll () at /lib64/libc.so.6
#1 0x000055959992eac9 in ppoll (__ss=0x0, __timeout=0x0, __nfds=<optimized out>, __fds=<optimized out>) at /usr/include/bits/poll2.h:77
#2 0x000055959992eac9 in qemu_poll_ns (fds=<optimized out>, nfds=<optimized out>, timeout=<optimized out>) at util/qemu-timer.c:322
#3 0x0000559599930711 in aio_poll (ctx=0x55959bdb83c0, blocking=blocking@entry=true) at util/aio-posix.c:629
#4 0x00005595996806fe in iothread_run (opaque=0x55959bd78400) at iothread.c:59
#5 0x00007f640159f609 in start_thread () at /lib64/libpthread.so.0
#6 0x00007f64012cce6f in clone () at /lib64/libc.so.6
Thread 1 (Thread 0x7f640b45b280 (LWP 16103)):
#0 0x00007f64015a0b6d in pthread_join () at /lib64/libpthread.so.0
#1 0x00005595999332ef in qemu_thread_join (thread=<optimized out>) at util/qemu-thread-posix.c:547
#2 0x00005595996808ae in iothread_stop (iothread=<optimized out>) at iothread.c:91
#3 0x000055959968094d in iothread_stop_iter (object=<optimized out>, opaque=<optimized out>) at iothread.c:102
#4 0x0000559599857d97 in do_object_child_foreach (obj=obj@entry=0x55959bdb8100, fn=fn@entry=0x559599680930 <iothread_stop_iter>, opaque=opaque@entry=0x0, recurse=recurse@entry=false) at qom/object.c:852
#5 0x0000559599859477 in object_child_foreach (obj=obj@entry=0x55959bdb8100, fn=fn@entry=0x559599680930 <iothread_stop_iter>, opaque=opaque@entry=0x0) at qom/object.c:867
#6 0x0000559599680a6e in iothread_stop_all () at iothread.c:341
#7 0x000055959955b1d5 in main (argc=<optimized out>, argv=<optimized out>, envp=<optimized out>) at vl.c:4913
The relevant code from iothread_run() is:
while (!atomic_read(&iothread->stopping)) {
aio_poll(iothread->ctx, true);
and iothread_stop():
iothread->stopping = true;
aio_notify(iothread->ctx);
...
qemu_thread_join(&iothread->thread);
The following scenario can occur:
1. IOThread:
while (!atomic_read(&iothread->stopping)) -> stopping=false
2. Main loop:
iothread->stopping = true;
aio_notify(iothread->ctx);
3. IOThread:
aio_poll(iothread->ctx, true); -> hang
The bug is explained by the AioContext->notify_me doc comments:
"If this field is 0, everything (file descriptors, bottom halves,
timers) will be re-evaluated before the next blocking poll(), thus the
event_notifier_set call can be skipped."
The problem is that "everything" does not include checking
iothread->stopping. This means iothread_run() will block in aio_poll()
if aio_notify() was called just before aio_poll().
This patch fixes the hang by replacing aio_notify() with
aio_bh_schedule_oneshot(). This makes aio_poll() or g_main_loop_run()
to return.
Implementing this properly required a new bool running flag. The new
flag prevents races that are tricky if we try to use iothread->stopping.
Now iothread->stopping is purely for iothread_stop() and
iothread->running is purely for the iothread_run() thread.
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Message-id: 20171207201320.19284-6-stefanha@redhat.com
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2017-12-07 23:13:19 +03:00
|
|
|
while (iothread->running) {
|
2019-03-06 14:55:32 +03:00
|
|
|
/*
|
|
|
|
* Note: from functional-wise the g_main_loop_run() below can
|
|
|
|
* already cover the aio_poll() events, but we can't run the
|
|
|
|
* main loop unconditionally because explicit aio_poll() here
|
|
|
|
* is faster than g_main_loop_run() when we do not need the
|
|
|
|
* gcontext at all (e.g., pure block layer iothreads). In
|
|
|
|
* other words, when we want to run the gcontext with the
|
|
|
|
* iothread we need to pay some performance for functionality.
|
|
|
|
*/
|
2016-10-27 13:49:06 +03:00
|
|
|
aio_poll(iothread->ctx, true);
|
2017-08-29 10:22:37 +03:00
|
|
|
|
2019-01-29 08:14:32 +03:00
|
|
|
/*
|
|
|
|
* We must check the running state again in case it was
|
|
|
|
* changed in previous aio_poll()
|
|
|
|
*/
|
2020-09-23 13:56:46 +03:00
|
|
|
if (iothread->running && qatomic_read(&iothread->run_gcontext)) {
|
2017-08-29 10:22:37 +03:00
|
|
|
g_main_loop_run(iothread->main_loop);
|
|
|
|
}
|
2014-03-03 14:30:05 +04:00
|
|
|
}
|
2015-07-09 09:55:38 +03:00
|
|
|
|
2019-03-06 14:55:31 +03:00
|
|
|
g_main_context_pop_thread_default(iothread->worker_context);
|
2015-07-09 09:55:38 +03:00
|
|
|
rcu_unregister_thread();
|
2014-03-03 14:30:05 +04:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
iothread: fix iothread_stop() race condition
There is a small chance that iothread_stop() hangs as follows:
Thread 3 (Thread 0x7f63eba5f700 (LWP 16105)):
#0 0x00007f64012c09b6 in ppoll () at /lib64/libc.so.6
#1 0x000055959992eac9 in ppoll (__ss=0x0, __timeout=0x0, __nfds=<optimized out>, __fds=<optimized out>) at /usr/include/bits/poll2.h:77
#2 0x000055959992eac9 in qemu_poll_ns (fds=<optimized out>, nfds=<optimized out>, timeout=<optimized out>) at util/qemu-timer.c:322
#3 0x0000559599930711 in aio_poll (ctx=0x55959bdb83c0, blocking=blocking@entry=true) at util/aio-posix.c:629
#4 0x00005595996806fe in iothread_run (opaque=0x55959bd78400) at iothread.c:59
#5 0x00007f640159f609 in start_thread () at /lib64/libpthread.so.0
#6 0x00007f64012cce6f in clone () at /lib64/libc.so.6
Thread 1 (Thread 0x7f640b45b280 (LWP 16103)):
#0 0x00007f64015a0b6d in pthread_join () at /lib64/libpthread.so.0
#1 0x00005595999332ef in qemu_thread_join (thread=<optimized out>) at util/qemu-thread-posix.c:547
#2 0x00005595996808ae in iothread_stop (iothread=<optimized out>) at iothread.c:91
#3 0x000055959968094d in iothread_stop_iter (object=<optimized out>, opaque=<optimized out>) at iothread.c:102
#4 0x0000559599857d97 in do_object_child_foreach (obj=obj@entry=0x55959bdb8100, fn=fn@entry=0x559599680930 <iothread_stop_iter>, opaque=opaque@entry=0x0, recurse=recurse@entry=false) at qom/object.c:852
#5 0x0000559599859477 in object_child_foreach (obj=obj@entry=0x55959bdb8100, fn=fn@entry=0x559599680930 <iothread_stop_iter>, opaque=opaque@entry=0x0) at qom/object.c:867
#6 0x0000559599680a6e in iothread_stop_all () at iothread.c:341
#7 0x000055959955b1d5 in main (argc=<optimized out>, argv=<optimized out>, envp=<optimized out>) at vl.c:4913
The relevant code from iothread_run() is:
while (!atomic_read(&iothread->stopping)) {
aio_poll(iothread->ctx, true);
and iothread_stop():
iothread->stopping = true;
aio_notify(iothread->ctx);
...
qemu_thread_join(&iothread->thread);
The following scenario can occur:
1. IOThread:
while (!atomic_read(&iothread->stopping)) -> stopping=false
2. Main loop:
iothread->stopping = true;
aio_notify(iothread->ctx);
3. IOThread:
aio_poll(iothread->ctx, true); -> hang
The bug is explained by the AioContext->notify_me doc comments:
"If this field is 0, everything (file descriptors, bottom halves,
timers) will be re-evaluated before the next blocking poll(), thus the
event_notifier_set call can be skipped."
The problem is that "everything" does not include checking
iothread->stopping. This means iothread_run() will block in aio_poll()
if aio_notify() was called just before aio_poll().
This patch fixes the hang by replacing aio_notify() with
aio_bh_schedule_oneshot(). This makes aio_poll() or g_main_loop_run()
to return.
Implementing this properly required a new bool running flag. The new
flag prevents races that are tricky if we try to use iothread->stopping.
Now iothread->stopping is purely for iothread_stop() and
iothread->running is purely for the iothread_run() thread.
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Message-id: 20171207201320.19284-6-stefanha@redhat.com
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2017-12-07 23:13:19 +03:00
|
|
|
/* Runs in iothread_run() thread */
|
|
|
|
static void iothread_stop_bh(void *opaque)
|
|
|
|
{
|
|
|
|
IOThread *iothread = opaque;
|
|
|
|
|
|
|
|
iothread->running = false; /* stop iothread_run() */
|
|
|
|
|
|
|
|
if (iothread->main_loop) {
|
|
|
|
g_main_loop_quit(iothread->main_loop);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-09-28 05:59:56 +03:00
|
|
|
void iothread_stop(IOThread *iothread)
|
2014-03-03 14:30:05 +04:00
|
|
|
{
|
2017-09-28 05:59:56 +03:00
|
|
|
if (!iothread->ctx || iothread->stopping) {
|
|
|
|
return;
|
2014-09-18 15:30:49 +04:00
|
|
|
}
|
2014-03-03 14:30:05 +04:00
|
|
|
iothread->stopping = true;
|
iothread: fix iothread_stop() race condition
There is a small chance that iothread_stop() hangs as follows:
Thread 3 (Thread 0x7f63eba5f700 (LWP 16105)):
#0 0x00007f64012c09b6 in ppoll () at /lib64/libc.so.6
#1 0x000055959992eac9 in ppoll (__ss=0x0, __timeout=0x0, __nfds=<optimized out>, __fds=<optimized out>) at /usr/include/bits/poll2.h:77
#2 0x000055959992eac9 in qemu_poll_ns (fds=<optimized out>, nfds=<optimized out>, timeout=<optimized out>) at util/qemu-timer.c:322
#3 0x0000559599930711 in aio_poll (ctx=0x55959bdb83c0, blocking=blocking@entry=true) at util/aio-posix.c:629
#4 0x00005595996806fe in iothread_run (opaque=0x55959bd78400) at iothread.c:59
#5 0x00007f640159f609 in start_thread () at /lib64/libpthread.so.0
#6 0x00007f64012cce6f in clone () at /lib64/libc.so.6
Thread 1 (Thread 0x7f640b45b280 (LWP 16103)):
#0 0x00007f64015a0b6d in pthread_join () at /lib64/libpthread.so.0
#1 0x00005595999332ef in qemu_thread_join (thread=<optimized out>) at util/qemu-thread-posix.c:547
#2 0x00005595996808ae in iothread_stop (iothread=<optimized out>) at iothread.c:91
#3 0x000055959968094d in iothread_stop_iter (object=<optimized out>, opaque=<optimized out>) at iothread.c:102
#4 0x0000559599857d97 in do_object_child_foreach (obj=obj@entry=0x55959bdb8100, fn=fn@entry=0x559599680930 <iothread_stop_iter>, opaque=opaque@entry=0x0, recurse=recurse@entry=false) at qom/object.c:852
#5 0x0000559599859477 in object_child_foreach (obj=obj@entry=0x55959bdb8100, fn=fn@entry=0x559599680930 <iothread_stop_iter>, opaque=opaque@entry=0x0) at qom/object.c:867
#6 0x0000559599680a6e in iothread_stop_all () at iothread.c:341
#7 0x000055959955b1d5 in main (argc=<optimized out>, argv=<optimized out>, envp=<optimized out>) at vl.c:4913
The relevant code from iothread_run() is:
while (!atomic_read(&iothread->stopping)) {
aio_poll(iothread->ctx, true);
and iothread_stop():
iothread->stopping = true;
aio_notify(iothread->ctx);
...
qemu_thread_join(&iothread->thread);
The following scenario can occur:
1. IOThread:
while (!atomic_read(&iothread->stopping)) -> stopping=false
2. Main loop:
iothread->stopping = true;
aio_notify(iothread->ctx);
3. IOThread:
aio_poll(iothread->ctx, true); -> hang
The bug is explained by the AioContext->notify_me doc comments:
"If this field is 0, everything (file descriptors, bottom halves,
timers) will be re-evaluated before the next blocking poll(), thus the
event_notifier_set call can be skipped."
The problem is that "everything" does not include checking
iothread->stopping. This means iothread_run() will block in aio_poll()
if aio_notify() was called just before aio_poll().
This patch fixes the hang by replacing aio_notify() with
aio_bh_schedule_oneshot(). This makes aio_poll() or g_main_loop_run()
to return.
Implementing this properly required a new bool running flag. The new
flag prevents races that are tricky if we try to use iothread->stopping.
Now iothread->stopping is purely for iothread_stop() and
iothread->running is purely for the iothread_run() thread.
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Message-id: 20171207201320.19284-6-stefanha@redhat.com
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2017-12-07 23:13:19 +03:00
|
|
|
aio_bh_schedule_oneshot(iothread->ctx, iothread_stop_bh, iothread);
|
2014-03-03 14:30:05 +04:00
|
|
|
qemu_thread_join(&iothread->thread);
|
2017-09-28 05:59:56 +03:00
|
|
|
}
|
|
|
|
|
2017-01-26 20:01:19 +03:00
|
|
|
static void iothread_instance_init(Object *obj)
|
|
|
|
{
|
|
|
|
IOThread *iothread = IOTHREAD(obj);
|
|
|
|
|
|
|
|
iothread->poll_max_ns = IOTHREAD_POLL_MAX_NS_DEFAULT;
|
2018-08-21 13:07:16 +03:00
|
|
|
iothread->thread_id = -1;
|
2019-03-06 14:55:28 +03:00
|
|
|
qemu_sem_init(&iothread->init_done_sem, 0);
|
2019-03-06 14:55:29 +03:00
|
|
|
/* By default, we don't run gcontext */
|
2020-09-23 13:56:46 +03:00
|
|
|
qatomic_set(&iothread->run_gcontext, 0);
|
2017-01-26 20:01:19 +03:00
|
|
|
}
|
|
|
|
|
2016-09-08 12:28:51 +03:00
|
|
|
static void iothread_instance_finalize(Object *obj)
|
|
|
|
{
|
|
|
|
IOThread *iothread = IOTHREAD(obj);
|
|
|
|
|
2017-09-28 05:59:56 +03:00
|
|
|
iothread_stop(iothread);
|
2018-08-21 13:07:16 +03:00
|
|
|
|
iothread: workaround glib bug which hangs qmp-test
Free the AIO context earlier than the GMainContext (if we have) to
workaround a glib2 bug that GSource context pointer is not cleared even
if the context has already been destroyed (while it should).
The patch itself only changed the order to destroy the objects, no
functional change at all. Without this workaround, we can encounter
qmp-test hang with oob (and possibly any other use case when iothread is
used with GMainContexts):
#0 0x00007f35ffe45334 in __lll_lock_wait () from /lib64/libpthread.so.0
#1 0x00007f35ffe405d8 in _L_lock_854 () from /lib64/libpthread.so.0
#2 0x00007f35ffe404a7 in pthread_mutex_lock () from /lib64/libpthread.so.0
#3 0x00007f35fc5b9c9d in g_source_unref_internal (source=0x24f0600, context=0x7f35f0000960, have_lock=0) at gmain.c:1685
#4 0x0000000000aa6672 in aio_context_unref (ctx=0x24f0600) at /root/qemu/util/async.c:497
#5 0x000000000065851c in iothread_instance_finalize (obj=0x24f0380) at /root/qemu/iothread.c:129
#6 0x0000000000962d79 in object_deinit (obj=0x24f0380, type=0x242e960) at /root/qemu/qom/object.c:462
#7 0x0000000000962e0d in object_finalize (data=0x24f0380) at /root/qemu/qom/object.c:476
#8 0x0000000000964146 in object_unref (obj=0x24f0380) at /root/qemu/qom/object.c:924
#9 0x0000000000965880 in object_finalize_child_property (obj=0x24ec640, name=0x24efca0 "mon_iothread", opaque=0x24f0380) at /root/qemu/qom/object.c:1436
#10 0x0000000000962c33 in object_property_del_child (obj=0x24ec640, child=0x24f0380, errp=0x0) at /root/qemu/qom/object.c:436
#11 0x0000000000962d26 in object_unparent (obj=0x24f0380) at /root/qemu/qom/object.c:455
#12 0x0000000000658f00 in iothread_destroy (iothread=0x24f0380) at /root/qemu/iothread.c:365
#13 0x00000000004c67a8 in monitor_cleanup () at /root/qemu/monitor.c:4663
#14 0x0000000000669e27 in main (argc=16, argv=0x7ffc8b1ae2f8, envp=0x7ffc8b1ae380) at /root/qemu/vl.c:4749
The glib2 bug is fixed in commit 26056558b ("gmain: allow
g_source_get_context() on destroyed sources", 2012-07-30), so the first
good version is glib2 2.33.10. But we still support building with
glib as old as 2.28, so we need the workaround.
Let's make sure we destroy the GSources first before its owner context
until we drop support for glib older than 2.33.10.
Signed-off-by: Peter Xu <peterx@redhat.com>
Message-Id: <20180409083956.1780-1-peterx@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Eric Blake <eblake@redhat.com>
2018-04-09 11:39:56 +03:00
|
|
|
/*
|
|
|
|
* Before glib2 2.33.10, there is a glib2 bug that GSource context
|
|
|
|
* pointer may not be cleared even if the context has already been
|
|
|
|
* destroyed (while it should). Here let's free the AIO context
|
|
|
|
* earlier to bypass that glib bug.
|
|
|
|
*
|
|
|
|
* We can remove this comment after the minimum supported glib2
|
|
|
|
* version boosts to 2.33.10. Before that, let's free the
|
|
|
|
* GSources first before destroying any GMainContext.
|
|
|
|
*/
|
|
|
|
if (iothread->ctx) {
|
|
|
|
aio_context_unref(iothread->ctx);
|
|
|
|
iothread->ctx = NULL;
|
|
|
|
}
|
2017-09-28 05:59:57 +03:00
|
|
|
if (iothread->worker_context) {
|
|
|
|
g_main_context_unref(iothread->worker_context);
|
|
|
|
iothread->worker_context = NULL;
|
2019-03-06 14:55:30 +03:00
|
|
|
g_main_loop_unref(iothread->main_loop);
|
|
|
|
iothread->main_loop = NULL;
|
2017-09-28 05:59:57 +03:00
|
|
|
}
|
2019-03-06 14:55:28 +03:00
|
|
|
qemu_sem_destroy(&iothread->init_done_sem);
|
2014-03-03 14:30:05 +04:00
|
|
|
}
|
|
|
|
|
2019-03-06 14:55:29 +03:00
|
|
|
static void iothread_init_gcontext(IOThread *iothread)
|
|
|
|
{
|
|
|
|
GSource *source;
|
|
|
|
|
|
|
|
iothread->worker_context = g_main_context_new();
|
|
|
|
source = aio_get_g_source(iothread_get_aio_context(iothread));
|
|
|
|
g_source_attach(source, iothread->worker_context);
|
|
|
|
g_source_unref(source);
|
2019-03-06 14:55:30 +03:00
|
|
|
iothread->main_loop = g_main_loop_new(iothread->worker_context, TRUE);
|
2019-03-06 14:55:29 +03:00
|
|
|
}
|
|
|
|
|
2021-07-21 12:42:10 +03:00
|
|
|
static void iothread_set_aio_context_params(IOThread *iothread, Error **errp)
|
|
|
|
{
|
|
|
|
ERRP_GUARD();
|
|
|
|
|
|
|
|
aio_context_set_poll_params(iothread->ctx,
|
|
|
|
iothread->poll_max_ns,
|
|
|
|
iothread->poll_grow,
|
|
|
|
iothread->poll_shrink,
|
|
|
|
errp);
|
|
|
|
if (*errp) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
aio_context_set_aio_params(iothread->ctx,
|
|
|
|
iothread->aio_max_batch,
|
|
|
|
errp);
|
|
|
|
}
|
|
|
|
|
2014-03-03 14:30:05 +04:00
|
|
|
static void iothread_complete(UserCreatable *obj, Error **errp)
|
|
|
|
{
|
2014-09-18 15:30:49 +04:00
|
|
|
Error *local_error = NULL;
|
2014-03-03 14:30:05 +04:00
|
|
|
IOThread *iothread = IOTHREAD(obj);
|
2020-07-14 19:02:00 +03:00
|
|
|
char *thread_name;
|
2014-03-03 14:30:05 +04:00
|
|
|
|
|
|
|
iothread->stopping = false;
|
iothread: fix iothread_stop() race condition
There is a small chance that iothread_stop() hangs as follows:
Thread 3 (Thread 0x7f63eba5f700 (LWP 16105)):
#0 0x00007f64012c09b6 in ppoll () at /lib64/libc.so.6
#1 0x000055959992eac9 in ppoll (__ss=0x0, __timeout=0x0, __nfds=<optimized out>, __fds=<optimized out>) at /usr/include/bits/poll2.h:77
#2 0x000055959992eac9 in qemu_poll_ns (fds=<optimized out>, nfds=<optimized out>, timeout=<optimized out>) at util/qemu-timer.c:322
#3 0x0000559599930711 in aio_poll (ctx=0x55959bdb83c0, blocking=blocking@entry=true) at util/aio-posix.c:629
#4 0x00005595996806fe in iothread_run (opaque=0x55959bd78400) at iothread.c:59
#5 0x00007f640159f609 in start_thread () at /lib64/libpthread.so.0
#6 0x00007f64012cce6f in clone () at /lib64/libc.so.6
Thread 1 (Thread 0x7f640b45b280 (LWP 16103)):
#0 0x00007f64015a0b6d in pthread_join () at /lib64/libpthread.so.0
#1 0x00005595999332ef in qemu_thread_join (thread=<optimized out>) at util/qemu-thread-posix.c:547
#2 0x00005595996808ae in iothread_stop (iothread=<optimized out>) at iothread.c:91
#3 0x000055959968094d in iothread_stop_iter (object=<optimized out>, opaque=<optimized out>) at iothread.c:102
#4 0x0000559599857d97 in do_object_child_foreach (obj=obj@entry=0x55959bdb8100, fn=fn@entry=0x559599680930 <iothread_stop_iter>, opaque=opaque@entry=0x0, recurse=recurse@entry=false) at qom/object.c:852
#5 0x0000559599859477 in object_child_foreach (obj=obj@entry=0x55959bdb8100, fn=fn@entry=0x559599680930 <iothread_stop_iter>, opaque=opaque@entry=0x0) at qom/object.c:867
#6 0x0000559599680a6e in iothread_stop_all () at iothread.c:341
#7 0x000055959955b1d5 in main (argc=<optimized out>, argv=<optimized out>, envp=<optimized out>) at vl.c:4913
The relevant code from iothread_run() is:
while (!atomic_read(&iothread->stopping)) {
aio_poll(iothread->ctx, true);
and iothread_stop():
iothread->stopping = true;
aio_notify(iothread->ctx);
...
qemu_thread_join(&iothread->thread);
The following scenario can occur:
1. IOThread:
while (!atomic_read(&iothread->stopping)) -> stopping=false
2. Main loop:
iothread->stopping = true;
aio_notify(iothread->ctx);
3. IOThread:
aio_poll(iothread->ctx, true); -> hang
The bug is explained by the AioContext->notify_me doc comments:
"If this field is 0, everything (file descriptors, bottom halves,
timers) will be re-evaluated before the next blocking poll(), thus the
event_notifier_set call can be skipped."
The problem is that "everything" does not include checking
iothread->stopping. This means iothread_run() will block in aio_poll()
if aio_notify() was called just before aio_poll().
This patch fixes the hang by replacing aio_notify() with
aio_bh_schedule_oneshot(). This makes aio_poll() or g_main_loop_run()
to return.
Implementing this properly required a new bool running flag. The new
flag prevents races that are tricky if we try to use iothread->stopping.
Now iothread->stopping is purely for iothread_stop() and
iothread->running is purely for the iothread_run() thread.
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Message-id: 20171207201320.19284-6-stefanha@redhat.com
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2017-12-07 23:13:19 +03:00
|
|
|
iothread->running = true;
|
error: Eliminate error_propagate() with Coccinelle, part 1
When all we do with an Error we receive into a local variable is
propagating to somewhere else, we can just as well receive it there
right away. Convert
if (!foo(..., &err)) {
...
error_propagate(errp, err);
...
return ...
}
to
if (!foo(..., errp)) {
...
...
return ...
}
where nothing else needs @err. Coccinelle script:
@rule1 forall@
identifier fun, err, errp, lbl;
expression list args, args2;
binary operator op;
constant c1, c2;
symbol false;
@@
if (
(
- fun(args, &err, args2)
+ fun(args, errp, args2)
|
- !fun(args, &err, args2)
+ !fun(args, errp, args2)
|
- fun(args, &err, args2) op c1
+ fun(args, errp, args2) op c1
)
)
{
... when != err
when != lbl:
when strict
- error_propagate(errp, err);
... when != err
(
return;
|
return c2;
|
return false;
)
}
@rule2 forall@
identifier fun, err, errp, lbl;
expression list args, args2;
expression var;
binary operator op;
constant c1, c2;
symbol false;
@@
- var = fun(args, &err, args2);
+ var = fun(args, errp, args2);
... when != err
if (
(
var
|
!var
|
var op c1
)
)
{
... when != err
when != lbl:
when strict
- error_propagate(errp, err);
... when != err
(
return;
|
return c2;
|
return false;
|
return var;
)
}
@depends on rule1 || rule2@
identifier err;
@@
- Error *err = NULL;
... when != err
Not exactly elegant, I'm afraid.
The "when != lbl:" is necessary to avoid transforming
if (fun(args, &err)) {
goto out
}
...
out:
error_propagate(errp, err);
even though other paths to label out still need the error_propagate().
For an actual example, see sclp_realize().
Without the "when strict", Coccinelle transforms vfio_msix_setup(),
incorrectly. I don't know what exactly "when strict" does, only that
it helps here.
The match of return is narrower than what I want, but I can't figure
out how to express "return where the operand doesn't use @err". For
an example where it's too narrow, see vfio_intx_enable().
Silently fails to convert hw/arm/armsse.c, because Coccinelle gets
confused by ARMSSE being used both as typedef and function-like macro
there. Converted manually.
Line breaks tidied up manually. One nested declaration of @local_err
deleted manually. Preexisting unwanted blank line dropped in
hw/riscv/sifive_e.c.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Message-Id: <20200707160613.848843-35-armbru@redhat.com>
2020-07-07 19:06:02 +03:00
|
|
|
iothread->ctx = aio_context_new(errp);
|
2014-09-18 15:30:49 +04:00
|
|
|
if (!iothread->ctx) {
|
|
|
|
return;
|
|
|
|
}
|
2014-02-27 14:48:41 +04:00
|
|
|
|
2019-03-06 14:55:29 +03:00
|
|
|
/*
|
|
|
|
* Init one GMainContext for the iothread unconditionally, even if
|
|
|
|
* it's not used
|
|
|
|
*/
|
|
|
|
iothread_init_gcontext(iothread);
|
|
|
|
|
2021-07-21 12:42:10 +03:00
|
|
|
iothread_set_aio_context_params(iothread, &local_error);
|
2016-12-01 22:26:45 +03:00
|
|
|
if (local_error) {
|
|
|
|
error_propagate(errp, local_error);
|
|
|
|
aio_context_unref(iothread->ctx);
|
|
|
|
iothread->ctx = NULL;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2014-03-03 14:30:05 +04:00
|
|
|
/* This assumes we are called from a thread with useful CPU affinity for us
|
|
|
|
* to inherit.
|
|
|
|
*/
|
2020-07-14 19:02:00 +03:00
|
|
|
thread_name = g_strdup_printf("IO %s",
|
|
|
|
object_get_canonical_path_component(OBJECT(obj)));
|
2015-11-24 16:46:44 +03:00
|
|
|
qemu_thread_create(&iothread->thread, thread_name, iothread_run,
|
2014-03-03 14:30:05 +04:00
|
|
|
iothread, QEMU_THREAD_JOINABLE);
|
2015-11-24 16:46:44 +03:00
|
|
|
g_free(thread_name);
|
2014-02-27 14:48:41 +04:00
|
|
|
|
|
|
|
/* Wait for initialization to complete */
|
|
|
|
while (iothread->thread_id == -1) {
|
2019-03-06 14:55:28 +03:00
|
|
|
qemu_sem_wait(&iothread->init_done_sem);
|
2014-02-27 14:48:41 +04:00
|
|
|
}
|
2014-03-03 14:30:05 +04:00
|
|
|
}
|
|
|
|
|
2016-12-01 22:26:52 +03:00
|
|
|
typedef struct {
|
|
|
|
const char *name;
|
|
|
|
ptrdiff_t offset; /* field's byte offset in IOThread struct */
|
2021-07-27 17:59:35 +03:00
|
|
|
} IOThreadParamInfo;
|
2016-12-01 22:26:52 +03:00
|
|
|
|
2021-07-27 17:59:35 +03:00
|
|
|
static IOThreadParamInfo poll_max_ns_info = {
|
2016-12-01 22:26:52 +03:00
|
|
|
"poll-max-ns", offsetof(IOThread, poll_max_ns),
|
|
|
|
};
|
2021-07-27 17:59:35 +03:00
|
|
|
static IOThreadParamInfo poll_grow_info = {
|
2016-12-01 22:26:52 +03:00
|
|
|
"poll-grow", offsetof(IOThread, poll_grow),
|
|
|
|
};
|
2021-07-27 17:59:35 +03:00
|
|
|
static IOThreadParamInfo poll_shrink_info = {
|
2016-12-01 22:26:52 +03:00
|
|
|
"poll-shrink", offsetof(IOThread, poll_shrink),
|
|
|
|
};
|
2021-07-27 17:59:35 +03:00
|
|
|
static IOThreadParamInfo aio_max_batch_info = {
|
2021-07-21 12:42:10 +03:00
|
|
|
"aio-max-batch", offsetof(IOThread, aio_max_batch),
|
|
|
|
};
|
2016-12-01 22:26:52 +03:00
|
|
|
|
2021-07-21 12:42:09 +03:00
|
|
|
static void iothread_get_param(Object *obj, Visitor *v,
|
2021-07-27 17:59:36 +03:00
|
|
|
const char *name, IOThreadParamInfo *info, Error **errp)
|
2016-12-01 22:26:45 +03:00
|
|
|
{
|
|
|
|
IOThread *iothread = IOTHREAD(obj);
|
2016-12-01 22:26:52 +03:00
|
|
|
int64_t *field = (void *)iothread + info->offset;
|
2016-12-01 22:26:45 +03:00
|
|
|
|
2016-12-01 22:26:52 +03:00
|
|
|
visit_type_int64(v, name, field, errp);
|
2016-12-01 22:26:45 +03:00
|
|
|
}
|
|
|
|
|
2021-07-21 12:42:09 +03:00
|
|
|
static bool iothread_set_param(Object *obj, Visitor *v,
|
2021-07-27 17:59:36 +03:00
|
|
|
const char *name, IOThreadParamInfo *info, Error **errp)
|
2016-12-01 22:26:45 +03:00
|
|
|
{
|
|
|
|
IOThread *iothread = IOTHREAD(obj);
|
2016-12-01 22:26:52 +03:00
|
|
|
int64_t *field = (void *)iothread + info->offset;
|
2016-12-01 22:26:45 +03:00
|
|
|
int64_t value;
|
|
|
|
|
error: Eliminate error_propagate() with Coccinelle, part 1
When all we do with an Error we receive into a local variable is
propagating to somewhere else, we can just as well receive it there
right away. Convert
if (!foo(..., &err)) {
...
error_propagate(errp, err);
...
return ...
}
to
if (!foo(..., errp)) {
...
...
return ...
}
where nothing else needs @err. Coccinelle script:
@rule1 forall@
identifier fun, err, errp, lbl;
expression list args, args2;
binary operator op;
constant c1, c2;
symbol false;
@@
if (
(
- fun(args, &err, args2)
+ fun(args, errp, args2)
|
- !fun(args, &err, args2)
+ !fun(args, errp, args2)
|
- fun(args, &err, args2) op c1
+ fun(args, errp, args2) op c1
)
)
{
... when != err
when != lbl:
when strict
- error_propagate(errp, err);
... when != err
(
return;
|
return c2;
|
return false;
)
}
@rule2 forall@
identifier fun, err, errp, lbl;
expression list args, args2;
expression var;
binary operator op;
constant c1, c2;
symbol false;
@@
- var = fun(args, &err, args2);
+ var = fun(args, errp, args2);
... when != err
if (
(
var
|
!var
|
var op c1
)
)
{
... when != err
when != lbl:
when strict
- error_propagate(errp, err);
... when != err
(
return;
|
return c2;
|
return false;
|
return var;
)
}
@depends on rule1 || rule2@
identifier err;
@@
- Error *err = NULL;
... when != err
Not exactly elegant, I'm afraid.
The "when != lbl:" is necessary to avoid transforming
if (fun(args, &err)) {
goto out
}
...
out:
error_propagate(errp, err);
even though other paths to label out still need the error_propagate().
For an actual example, see sclp_realize().
Without the "when strict", Coccinelle transforms vfio_msix_setup(),
incorrectly. I don't know what exactly "when strict" does, only that
it helps here.
The match of return is narrower than what I want, but I can't figure
out how to express "return where the operand doesn't use @err". For
an example where it's too narrow, see vfio_intx_enable().
Silently fails to convert hw/arm/armsse.c, because Coccinelle gets
confused by ARMSSE being used both as typedef and function-like macro
there. Converted manually.
Line breaks tidied up manually. One nested declaration of @local_err
deleted manually. Preexisting unwanted blank line dropped in
hw/riscv/sifive_e.c.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Message-Id: <20200707160613.848843-35-armbru@redhat.com>
2020-07-07 19:06:02 +03:00
|
|
|
if (!visit_type_int64(v, name, &value, errp)) {
|
2021-07-21 12:42:09 +03:00
|
|
|
return false;
|
2016-12-01 22:26:45 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (value < 0) {
|
error: Avoid unnecessary error_propagate() after error_setg()
Replace
error_setg(&err, ...);
error_propagate(errp, err);
by
error_setg(errp, ...);
Related pattern:
if (...) {
error_setg(&err, ...);
goto out;
}
...
out:
error_propagate(errp, err);
return;
When all paths to label out are that way, replace by
if (...) {
error_setg(errp, ...);
return;
}
and delete the label along with the error_propagate().
When we have at most one other path that actually needs to propagate,
and maybe one at the end that where propagation is unnecessary, e.g.
foo(..., &err);
if (err) {
goto out;
}
...
bar(..., &err);
out:
error_propagate(errp, err);
return;
move the error_propagate() to where it's needed, like
if (...) {
foo(..., &err);
error_propagate(errp, err);
return;
}
...
bar(..., errp);
return;
and transform the error_setg() as above.
In some places, the transformation results in obviously unnecessary
error_propagate(). The next few commits will eliminate them.
Bonus: the elimination of gotos will make later patches in this series
easier to review.
Candidates for conversion tracked down with this Coccinelle script:
@@
identifier err, errp;
expression list args;
@@
- error_setg(&err, args);
+ error_setg(errp, args);
... when != err
error_propagate(errp, err);
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Message-Id: <20200707160613.848843-34-armbru@redhat.com>
2020-07-07 19:06:01 +03:00
|
|
|
error_setg(errp, "%s value must be in range [0, %" PRId64 "]",
|
2016-12-01 22:26:52 +03:00
|
|
|
info->name, INT64_MAX);
|
2021-07-21 12:42:09 +03:00
|
|
|
return false;
|
2016-12-01 22:26:45 +03:00
|
|
|
}
|
|
|
|
|
2016-12-01 22:26:52 +03:00
|
|
|
*field = value;
|
2016-12-01 22:26:45 +03:00
|
|
|
|
2021-07-21 12:42:09 +03:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void iothread_get_poll_param(Object *obj, Visitor *v,
|
|
|
|
const char *name, void *opaque, Error **errp)
|
|
|
|
{
|
2021-07-27 17:59:36 +03:00
|
|
|
IOThreadParamInfo *info = opaque;
|
2021-07-21 12:42:09 +03:00
|
|
|
|
2021-07-27 17:59:36 +03:00
|
|
|
iothread_get_param(obj, v, name, info, errp);
|
2021-07-21 12:42:09 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static void iothread_set_poll_param(Object *obj, Visitor *v,
|
|
|
|
const char *name, void *opaque, Error **errp)
|
|
|
|
{
|
|
|
|
IOThread *iothread = IOTHREAD(obj);
|
2021-07-27 17:59:36 +03:00
|
|
|
IOThreadParamInfo *info = opaque;
|
2021-07-21 12:42:09 +03:00
|
|
|
|
2021-07-27 17:59:36 +03:00
|
|
|
if (!iothread_set_param(obj, v, name, info, errp)) {
|
2021-07-21 12:42:09 +03:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-12-01 22:26:45 +03:00
|
|
|
if (iothread->ctx) {
|
2016-12-01 22:26:52 +03:00
|
|
|
aio_context_set_poll_params(iothread->ctx,
|
|
|
|
iothread->poll_max_ns,
|
|
|
|
iothread->poll_grow,
|
|
|
|
iothread->poll_shrink,
|
error: Avoid unnecessary error_propagate() after error_setg()
Replace
error_setg(&err, ...);
error_propagate(errp, err);
by
error_setg(errp, ...);
Related pattern:
if (...) {
error_setg(&err, ...);
goto out;
}
...
out:
error_propagate(errp, err);
return;
When all paths to label out are that way, replace by
if (...) {
error_setg(errp, ...);
return;
}
and delete the label along with the error_propagate().
When we have at most one other path that actually needs to propagate,
and maybe one at the end that where propagation is unnecessary, e.g.
foo(..., &err);
if (err) {
goto out;
}
...
bar(..., &err);
out:
error_propagate(errp, err);
return;
move the error_propagate() to where it's needed, like
if (...) {
foo(..., &err);
error_propagate(errp, err);
return;
}
...
bar(..., errp);
return;
and transform the error_setg() as above.
In some places, the transformation results in obviously unnecessary
error_propagate(). The next few commits will eliminate them.
Bonus: the elimination of gotos will make later patches in this series
easier to review.
Candidates for conversion tracked down with this Coccinelle script:
@@
identifier err, errp;
expression list args;
@@
- error_setg(&err, args);
+ error_setg(errp, args);
... when != err
error_propagate(errp, err);
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Message-Id: <20200707160613.848843-34-armbru@redhat.com>
2020-07-07 19:06:01 +03:00
|
|
|
errp);
|
2016-12-01 22:26:45 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-07-21 12:42:10 +03:00
|
|
|
static void iothread_get_aio_param(Object *obj, Visitor *v,
|
|
|
|
const char *name, void *opaque, Error **errp)
|
|
|
|
{
|
2021-07-27 17:59:36 +03:00
|
|
|
IOThreadParamInfo *info = opaque;
|
2021-07-21 12:42:10 +03:00
|
|
|
|
2021-07-27 17:59:36 +03:00
|
|
|
iothread_get_param(obj, v, name, info, errp);
|
2021-07-21 12:42:10 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static void iothread_set_aio_param(Object *obj, Visitor *v,
|
|
|
|
const char *name, void *opaque, Error **errp)
|
|
|
|
{
|
|
|
|
IOThread *iothread = IOTHREAD(obj);
|
2021-07-27 17:59:36 +03:00
|
|
|
IOThreadParamInfo *info = opaque;
|
2021-07-21 12:42:10 +03:00
|
|
|
|
2021-07-27 17:59:36 +03:00
|
|
|
if (!iothread_set_param(obj, v, name, info, errp)) {
|
2021-07-21 12:42:10 +03:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (iothread->ctx) {
|
|
|
|
aio_context_set_aio_params(iothread->ctx,
|
|
|
|
iothread->aio_max_batch,
|
|
|
|
errp);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-03-03 14:30:05 +04:00
|
|
|
static void iothread_class_init(ObjectClass *klass, void *class_data)
|
|
|
|
{
|
|
|
|
UserCreatableClass *ucc = USER_CREATABLE_CLASS(klass);
|
|
|
|
ucc->complete = iothread_complete;
|
2016-12-01 22:26:45 +03:00
|
|
|
|
|
|
|
object_class_property_add(klass, "poll-max-ns", "int",
|
2016-12-01 22:26:52 +03:00
|
|
|
iothread_get_poll_param,
|
|
|
|
iothread_set_poll_param,
|
qom: Drop parameter @errp of object_property_add() & friends
The only way object_property_add() can fail is when a property with
the same name already exists. Since our property names are all
hardcoded, failure is a programming error, and the appropriate way to
handle it is passing &error_abort.
Same for its variants, except for object_property_add_child(), which
additionally fails when the child already has a parent. Parentage is
also under program control, so this is a programming error, too.
We have a bit over 500 callers. Almost half of them pass
&error_abort, slightly fewer ignore errors, one test case handles
errors, and the remaining few callers pass them to their own callers.
The previous few commits demonstrated once again that ignoring
programming errors is a bad idea.
Of the few ones that pass on errors, several violate the Error API.
The Error ** argument must be NULL, &error_abort, &error_fatal, or a
pointer to a variable containing NULL. Passing an argument of the
latter kind twice without clearing it in between is wrong: if the
first call sets an error, it no longer points to NULL for the second
call. ich9_pm_add_properties(), sparc32_ledma_realize(),
sparc32_dma_realize(), xilinx_axidma_realize(), xilinx_enet_realize()
are wrong that way.
When the one appropriate choice of argument is &error_abort, letting
users pick the argument is a bad idea.
Drop parameter @errp and assert the preconditions instead.
There's one exception to "duplicate property name is a programming
error": the way object_property_add() implements the magic (and
undocumented) "automatic arrayification". Don't drop @errp there.
Instead, rename object_property_add() to object_property_try_add(),
and add the obvious wrapper object_property_add().
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <20200505152926.18877-15-armbru@redhat.com>
[Two semantic rebase conflicts resolved]
2020-05-05 18:29:22 +03:00
|
|
|
NULL, &poll_max_ns_info);
|
2016-12-01 22:26:52 +03:00
|
|
|
object_class_property_add(klass, "poll-grow", "int",
|
|
|
|
iothread_get_poll_param,
|
|
|
|
iothread_set_poll_param,
|
qom: Drop parameter @errp of object_property_add() & friends
The only way object_property_add() can fail is when a property with
the same name already exists. Since our property names are all
hardcoded, failure is a programming error, and the appropriate way to
handle it is passing &error_abort.
Same for its variants, except for object_property_add_child(), which
additionally fails when the child already has a parent. Parentage is
also under program control, so this is a programming error, too.
We have a bit over 500 callers. Almost half of them pass
&error_abort, slightly fewer ignore errors, one test case handles
errors, and the remaining few callers pass them to their own callers.
The previous few commits demonstrated once again that ignoring
programming errors is a bad idea.
Of the few ones that pass on errors, several violate the Error API.
The Error ** argument must be NULL, &error_abort, &error_fatal, or a
pointer to a variable containing NULL. Passing an argument of the
latter kind twice without clearing it in between is wrong: if the
first call sets an error, it no longer points to NULL for the second
call. ich9_pm_add_properties(), sparc32_ledma_realize(),
sparc32_dma_realize(), xilinx_axidma_realize(), xilinx_enet_realize()
are wrong that way.
When the one appropriate choice of argument is &error_abort, letting
users pick the argument is a bad idea.
Drop parameter @errp and assert the preconditions instead.
There's one exception to "duplicate property name is a programming
error": the way object_property_add() implements the magic (and
undocumented) "automatic arrayification". Don't drop @errp there.
Instead, rename object_property_add() to object_property_try_add(),
and add the obvious wrapper object_property_add().
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <20200505152926.18877-15-armbru@redhat.com>
[Two semantic rebase conflicts resolved]
2020-05-05 18:29:22 +03:00
|
|
|
NULL, &poll_grow_info);
|
2016-12-01 22:26:52 +03:00
|
|
|
object_class_property_add(klass, "poll-shrink", "int",
|
|
|
|
iothread_get_poll_param,
|
|
|
|
iothread_set_poll_param,
|
qom: Drop parameter @errp of object_property_add() & friends
The only way object_property_add() can fail is when a property with
the same name already exists. Since our property names are all
hardcoded, failure is a programming error, and the appropriate way to
handle it is passing &error_abort.
Same for its variants, except for object_property_add_child(), which
additionally fails when the child already has a parent. Parentage is
also under program control, so this is a programming error, too.
We have a bit over 500 callers. Almost half of them pass
&error_abort, slightly fewer ignore errors, one test case handles
errors, and the remaining few callers pass them to their own callers.
The previous few commits demonstrated once again that ignoring
programming errors is a bad idea.
Of the few ones that pass on errors, several violate the Error API.
The Error ** argument must be NULL, &error_abort, &error_fatal, or a
pointer to a variable containing NULL. Passing an argument of the
latter kind twice without clearing it in between is wrong: if the
first call sets an error, it no longer points to NULL for the second
call. ich9_pm_add_properties(), sparc32_ledma_realize(),
sparc32_dma_realize(), xilinx_axidma_realize(), xilinx_enet_realize()
are wrong that way.
When the one appropriate choice of argument is &error_abort, letting
users pick the argument is a bad idea.
Drop parameter @errp and assert the preconditions instead.
There's one exception to "duplicate property name is a programming
error": the way object_property_add() implements the magic (and
undocumented) "automatic arrayification". Don't drop @errp there.
Instead, rename object_property_add() to object_property_try_add(),
and add the obvious wrapper object_property_add().
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <20200505152926.18877-15-armbru@redhat.com>
[Two semantic rebase conflicts resolved]
2020-05-05 18:29:22 +03:00
|
|
|
NULL, &poll_shrink_info);
|
2021-07-21 12:42:10 +03:00
|
|
|
object_class_property_add(klass, "aio-max-batch", "int",
|
|
|
|
iothread_get_aio_param,
|
|
|
|
iothread_set_aio_param,
|
|
|
|
NULL, &aio_max_batch_info);
|
2014-03-03 14:30:05 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static const TypeInfo iothread_info = {
|
|
|
|
.name = TYPE_IOTHREAD,
|
|
|
|
.parent = TYPE_OBJECT,
|
|
|
|
.class_init = iothread_class_init,
|
|
|
|
.instance_size = sizeof(IOThread),
|
2017-01-26 20:01:19 +03:00
|
|
|
.instance_init = iothread_instance_init,
|
2014-03-03 14:30:05 +04:00
|
|
|
.instance_finalize = iothread_instance_finalize,
|
|
|
|
.interfaces = (InterfaceInfo[]) {
|
|
|
|
{TYPE_USER_CREATABLE},
|
|
|
|
{}
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
static void iothread_register_types(void)
|
|
|
|
{
|
|
|
|
type_register_static(&iothread_info);
|
|
|
|
}
|
|
|
|
|
|
|
|
type_init(iothread_register_types)
|
|
|
|
|
|
|
|
char *iothread_get_id(IOThread *iothread)
|
|
|
|
{
|
2020-07-14 19:02:00 +03:00
|
|
|
return g_strdup(object_get_canonical_path_component(OBJECT(iothread)));
|
2014-03-03 14:30:05 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
AioContext *iothread_get_aio_context(IOThread *iothread)
|
|
|
|
{
|
|
|
|
return iothread->ctx;
|
|
|
|
}
|
2014-02-27 14:48:42 +04:00
|
|
|
|
|
|
|
static int query_one_iothread(Object *object, void *opaque)
|
|
|
|
{
|
2021-01-14 01:10:12 +03:00
|
|
|
IOThreadInfoList ***tail = opaque;
|
2014-02-27 14:48:42 +04:00
|
|
|
IOThreadInfo *info;
|
|
|
|
IOThread *iothread;
|
|
|
|
|
|
|
|
iothread = (IOThread *)object_dynamic_cast(object, TYPE_IOTHREAD);
|
|
|
|
if (!iothread) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
info = g_new0(IOThreadInfo, 1);
|
|
|
|
info->id = iothread_get_id(iothread);
|
|
|
|
info->thread_id = iothread->thread_id;
|
2017-02-10 12:41:17 +03:00
|
|
|
info->poll_max_ns = iothread->poll_max_ns;
|
|
|
|
info->poll_grow = iothread->poll_grow;
|
|
|
|
info->poll_shrink = iothread->poll_shrink;
|
2021-07-21 12:42:10 +03:00
|
|
|
info->aio_max_batch = iothread->aio_max_batch;
|
2014-02-27 14:48:42 +04:00
|
|
|
|
2021-01-14 01:10:12 +03:00
|
|
|
QAPI_LIST_APPEND(*tail, info);
|
2014-02-27 14:48:42 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
IOThreadInfoList *qmp_query_iothreads(Error **errp)
|
|
|
|
{
|
|
|
|
IOThreadInfoList *head = NULL;
|
|
|
|
IOThreadInfoList **prev = &head;
|
2015-05-13 19:14:05 +03:00
|
|
|
Object *container = object_get_objects_root();
|
2014-02-27 14:48:42 +04:00
|
|
|
|
|
|
|
object_child_foreach(container, query_one_iothread, &prev);
|
|
|
|
return head;
|
|
|
|
}
|
2016-09-08 12:28:51 +03:00
|
|
|
|
2017-08-29 10:22:37 +03:00
|
|
|
GMainContext *iothread_get_g_main_context(IOThread *iothread)
|
|
|
|
{
|
2020-09-23 13:56:46 +03:00
|
|
|
qatomic_set(&iothread->run_gcontext, 1);
|
2019-03-06 14:55:29 +03:00
|
|
|
aio_notify(iothread->ctx);
|
2017-08-29 10:22:37 +03:00
|
|
|
return iothread->worker_context;
|
|
|
|
}
|
2017-09-28 05:59:55 +03:00
|
|
|
|
|
|
|
IOThread *iothread_create(const char *id, Error **errp)
|
|
|
|
{
|
|
|
|
Object *obj;
|
|
|
|
|
|
|
|
obj = object_new_with_props(TYPE_IOTHREAD,
|
|
|
|
object_get_internal_root(),
|
|
|
|
id, errp, NULL);
|
|
|
|
|
|
|
|
return IOTHREAD(obj);
|
|
|
|
}
|
|
|
|
|
|
|
|
void iothread_destroy(IOThread *iothread)
|
|
|
|
{
|
|
|
|
object_unparent(OBJECT(iothread));
|
|
|
|
}
|
2017-12-06 17:45:48 +03:00
|
|
|
|
|
|
|
/* Lookup IOThread by its id. Only finds user-created objects, not internal
|
|
|
|
* iothread_create() objects. */
|
|
|
|
IOThread *iothread_by_id(const char *id)
|
|
|
|
{
|
|
|
|
return IOTHREAD(object_resolve_path_type(id, TYPE_IOTHREAD, NULL));
|
|
|
|
}
|
2021-01-29 19:46:10 +03:00
|
|
|
|
|
|
|
bool qemu_in_iothread(void)
|
|
|
|
{
|
|
|
|
return qemu_get_current_aio_context() == qemu_get_aio_context() ?
|
|
|
|
false : true;
|
|
|
|
}
|