coroutine: add ./configure --disable-coroutine-pool
The 'gthread' coroutine backend was written before the freelist (aka pool) existed in qemu-coroutine.c. This means that every thread is expected to exit when its coroutine terminates. It is not possible to reuse threads from a pool. This patch automatically disables the pool when 'gthread' is used. This allows the 'gthread' backend to work again (for example, tests/test-coroutine completes successfully instead of hanging). I considered implementing thread reuse but I don't want quirks like CPU affinity differences due to coroutine threads being recycled. The 'gthread' backend is a reference backend and it's therefore okay to skip the pool optimization. Note this patch also makes it easy to toggle the pool for benchmarking purposes: ./configure --with-coroutine-backend=ucontext \ --disable-coroutine-pool Reported-by: Gabriel Kerneis <gabriel@kerneis.info> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Reviewed-by: Gabriel Kerneis <gabriel@kerneis.info> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
This commit is contained in:
parent
2c78857bf6
commit
70c60c089f
24
configure
vendored
24
configure
vendored
@ -238,6 +238,7 @@ win_sdk="no"
|
||||
want_tools="yes"
|
||||
libiscsi=""
|
||||
coroutine=""
|
||||
coroutine_pool=""
|
||||
seccomp=""
|
||||
glusterfs=""
|
||||
glusterfs_discard="no"
|
||||
@ -888,6 +889,10 @@ for opt do
|
||||
;;
|
||||
--with-coroutine=*) coroutine="$optarg"
|
||||
;;
|
||||
--disable-coroutine-pool) coroutine_pool="no"
|
||||
;;
|
||||
--enable-coroutine-pool) coroutine_pool="yes"
|
||||
;;
|
||||
--disable-docs) docs="no"
|
||||
;;
|
||||
--enable-docs) docs="yes"
|
||||
@ -1189,6 +1194,8 @@ echo " --disable-seccomp disable seccomp support"
|
||||
echo " --enable-seccomp enables seccomp support"
|
||||
echo " --with-coroutine=BACKEND coroutine backend. Supported options:"
|
||||
echo " gthread, ucontext, sigaltstack, windows"
|
||||
echo " --disable-coroutine-pool disable coroutine freelist (worse performance)"
|
||||
echo " --enable-coroutine-pool enable coroutine freelist (better performance)"
|
||||
echo " --enable-glusterfs enable GlusterFS backend"
|
||||
echo " --disable-glusterfs disable GlusterFS backend"
|
||||
echo " --enable-gcov enable test coverage analysis with gcov"
|
||||
@ -3362,6 +3369,17 @@ else
|
||||
esac
|
||||
fi
|
||||
|
||||
if test "$coroutine_pool" = ""; then
|
||||
if test "$coroutine" = "gthread"; then
|
||||
coroutine_pool=no
|
||||
else
|
||||
coroutine_pool=yes
|
||||
fi
|
||||
fi
|
||||
if test "$coroutine" = "gthread" -a "$coroutine_pool" = "yes"; then
|
||||
error_exit "'gthread' coroutine backend does not support pool (use --disable-coroutine-pool)"
|
||||
fi
|
||||
|
||||
##########################################
|
||||
# check if we have open_by_handle_at
|
||||
|
||||
@ -3733,6 +3751,7 @@ echo "build guest agent $guest_agent"
|
||||
echo "QGA VSS support $guest_agent_with_vss"
|
||||
echo "seccomp support $seccomp"
|
||||
echo "coroutine backend $coroutine"
|
||||
echo "coroutine pool $coroutine_pool"
|
||||
echo "GlusterFS support $glusterfs"
|
||||
echo "virtio-blk-data-plane $virtio_blk_data_plane"
|
||||
echo "gcov $gcov_tool"
|
||||
@ -4092,6 +4111,11 @@ if test "$rbd" = "yes" ; then
|
||||
fi
|
||||
|
||||
echo "CONFIG_COROUTINE_BACKEND=$coroutine" >> $config_host_mak
|
||||
if test "$coroutine_pool" = "yes" ; then
|
||||
echo "CONFIG_COROUTINE_POOL=1" >> $config_host_mak
|
||||
else
|
||||
echo "CONFIG_COROUTINE_POOL=0" >> $config_host_mak
|
||||
fi
|
||||
|
||||
if test "$open_by_handle_at" = "yes" ; then
|
||||
echo "CONFIG_OPEN_BY_HANDLE=y" >> $config_host_mak
|
||||
|
@ -30,15 +30,17 @@ static unsigned int pool_size;
|
||||
|
||||
Coroutine *qemu_coroutine_create(CoroutineEntry *entry)
|
||||
{
|
||||
Coroutine *co;
|
||||
Coroutine *co = NULL;
|
||||
|
||||
qemu_mutex_lock(&pool_lock);
|
||||
co = QSLIST_FIRST(&pool);
|
||||
if (co) {
|
||||
QSLIST_REMOVE_HEAD(&pool, pool_next);
|
||||
pool_size--;
|
||||
if (CONFIG_COROUTINE_POOL) {
|
||||
qemu_mutex_lock(&pool_lock);
|
||||
co = QSLIST_FIRST(&pool);
|
||||
if (co) {
|
||||
QSLIST_REMOVE_HEAD(&pool, pool_next);
|
||||
pool_size--;
|
||||
}
|
||||
qemu_mutex_unlock(&pool_lock);
|
||||
}
|
||||
qemu_mutex_unlock(&pool_lock);
|
||||
|
||||
if (!co) {
|
||||
co = qemu_coroutine_new();
|
||||
@ -51,15 +53,17 @@ Coroutine *qemu_coroutine_create(CoroutineEntry *entry)
|
||||
|
||||
static void coroutine_delete(Coroutine *co)
|
||||
{
|
||||
qemu_mutex_lock(&pool_lock);
|
||||
if (pool_size < POOL_MAX_SIZE) {
|
||||
QSLIST_INSERT_HEAD(&pool, co, pool_next);
|
||||
co->caller = NULL;
|
||||
pool_size++;
|
||||
if (CONFIG_COROUTINE_POOL) {
|
||||
qemu_mutex_lock(&pool_lock);
|
||||
if (pool_size < POOL_MAX_SIZE) {
|
||||
QSLIST_INSERT_HEAD(&pool, co, pool_next);
|
||||
co->caller = NULL;
|
||||
pool_size++;
|
||||
qemu_mutex_unlock(&pool_lock);
|
||||
return;
|
||||
}
|
||||
qemu_mutex_unlock(&pool_lock);
|
||||
return;
|
||||
}
|
||||
qemu_mutex_unlock(&pool_lock);
|
||||
|
||||
qemu_coroutine_delete(co);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user