aio: convert from RFifoLock to QemuRecMutex
It is simpler and a bit faster, and QEMU does not need the contention callbacks (and thus the fairness) anymore. Reviewed-by: Fam Zheng <famz@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Message-Id: <1477565348-5458-21-git-send-email-pbonzini@redhat.com> Signed-off-by: Fam Zheng <famz@redhat.com>
This commit is contained in:
parent
feadec6384
commit
3fe7122337
8
async.c
8
async.c
@ -284,7 +284,7 @@ aio_ctx_finalize(GSource *source)
|
||||
|
||||
aio_set_event_notifier(ctx, &ctx->notifier, false, NULL);
|
||||
event_notifier_cleanup(&ctx->notifier);
|
||||
rfifolock_destroy(&ctx->lock);
|
||||
qemu_rec_mutex_destroy(&ctx->lock);
|
||||
qemu_mutex_destroy(&ctx->bh_lock);
|
||||
timerlistgroup_deinit(&ctx->tlg);
|
||||
}
|
||||
@ -372,7 +372,7 @@ AioContext *aio_context_new(Error **errp)
|
||||
#endif
|
||||
ctx->thread_pool = NULL;
|
||||
qemu_mutex_init(&ctx->bh_lock);
|
||||
rfifolock_init(&ctx->lock, NULL, NULL);
|
||||
qemu_rec_mutex_init(&ctx->lock);
|
||||
timerlistgroup_init(&ctx->tlg, aio_timerlist_notify, ctx);
|
||||
|
||||
return ctx;
|
||||
@ -393,10 +393,10 @@ void aio_context_unref(AioContext *ctx)
|
||||
|
||||
void aio_context_acquire(AioContext *ctx)
|
||||
{
|
||||
rfifolock_lock(&ctx->lock);
|
||||
qemu_rec_mutex_lock(&ctx->lock);
|
||||
}
|
||||
|
||||
void aio_context_release(AioContext *ctx)
|
||||
{
|
||||
rfifolock_unlock(&ctx->lock);
|
||||
qemu_rec_mutex_unlock(&ctx->lock);
|
||||
}
|
||||
|
@ -18,7 +18,6 @@
|
||||
#include "qemu/queue.h"
|
||||
#include "qemu/event_notifier.h"
|
||||
#include "qemu/thread.h"
|
||||
#include "qemu/rfifolock.h"
|
||||
#include "qemu/timer.h"
|
||||
|
||||
typedef struct BlockAIOCB BlockAIOCB;
|
||||
@ -54,7 +53,7 @@ struct AioContext {
|
||||
GSource source;
|
||||
|
||||
/* Protects all fields from multi-threaded access */
|
||||
RFifoLock lock;
|
||||
QemuRecMutex lock;
|
||||
|
||||
/* The list of registered AIO handlers */
|
||||
QLIST_HEAD(, AioHandler) aio_handlers;
|
||||
|
@ -1,54 +0,0 @@
|
||||
/*
|
||||
* Recursive FIFO lock
|
||||
*
|
||||
* Copyright Red Hat, Inc. 2013
|
||||
*
|
||||
* Authors:
|
||||
* Stefan Hajnoczi <stefanha@redhat.com>
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
||||
* See the COPYING file in the top-level directory.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef QEMU_RFIFOLOCK_H
|
||||
#define QEMU_RFIFOLOCK_H
|
||||
|
||||
#include "qemu/thread.h"
|
||||
|
||||
/* Recursive FIFO lock
|
||||
*
|
||||
* This lock provides more features than a plain mutex:
|
||||
*
|
||||
* 1. Fairness - enforces FIFO order.
|
||||
* 2. Nesting - can be taken recursively.
|
||||
* 3. Contention callback - optional, called when thread must wait.
|
||||
*
|
||||
* The recursive FIFO lock is heavyweight so prefer other synchronization
|
||||
* primitives if you do not need its features.
|
||||
*/
|
||||
typedef struct {
|
||||
QemuMutex lock; /* protects all fields */
|
||||
|
||||
/* FIFO order */
|
||||
unsigned int head; /* active ticket number */
|
||||
unsigned int tail; /* waiting ticket number */
|
||||
QemuCond cond; /* used to wait for our ticket number */
|
||||
|
||||
/* Nesting */
|
||||
QemuThread owner_thread; /* thread that currently has ownership */
|
||||
unsigned int nesting; /* amount of nesting levels */
|
||||
|
||||
/* Contention callback */
|
||||
void (*cb)(void *); /* called when thread must wait, with ->lock
|
||||
* held so it may not recursively lock/unlock
|
||||
*/
|
||||
void *cb_opaque;
|
||||
} RFifoLock;
|
||||
|
||||
void rfifolock_init(RFifoLock *r, void (*cb)(void *), void *opaque);
|
||||
void rfifolock_destroy(RFifoLock *r);
|
||||
void rfifolock_lock(RFifoLock *r);
|
||||
void rfifolock_unlock(RFifoLock *r);
|
||||
|
||||
#endif /* QEMU_RFIFOLOCK_H */
|
1
tests/.gitignore
vendored
1
tests/.gitignore
vendored
@ -67,7 +67,6 @@ test-qmp-marshal.c
|
||||
test-qobject-output-visitor
|
||||
test-rcu-list
|
||||
test-replication
|
||||
test-rfifolock
|
||||
test-string-input-visitor
|
||||
test-string-output-visitor
|
||||
test-thread-pool
|
||||
|
@ -45,7 +45,6 @@ check-unit-y += tests/test-visitor-serialization$(EXESUF)
|
||||
check-unit-y += tests/test-iov$(EXESUF)
|
||||
gcov-files-test-iov-y = util/iov.c
|
||||
check-unit-y += tests/test-aio$(EXESUF)
|
||||
check-unit-$(CONFIG_POSIX) += tests/test-rfifolock$(EXESUF)
|
||||
check-unit-y += tests/test-throttle$(EXESUF)
|
||||
gcov-files-test-aio-$(CONFIG_WIN32) = aio-win32.c
|
||||
gcov-files-test-aio-$(CONFIG_POSIX) = aio-posix.c
|
||||
@ -490,7 +489,6 @@ tests/check-qom-proplist$(EXESUF): tests/check-qom-proplist.o $(test-qom-obj-y)
|
||||
tests/test-char$(EXESUF): tests/test-char.o qemu-char.o qemu-timer.o $(test-util-obj-y) $(qtest-obj-y) $(test-io-obj-y)
|
||||
tests/test-coroutine$(EXESUF): tests/test-coroutine.o $(test-block-obj-y)
|
||||
tests/test-aio$(EXESUF): tests/test-aio.o $(test-block-obj-y)
|
||||
tests/test-rfifolock$(EXESUF): tests/test-rfifolock.o $(test-util-obj-y)
|
||||
tests/test-throttle$(EXESUF): tests/test-throttle.o $(test-block-obj-y)
|
||||
tests/test-blockjob$(EXESUF): tests/test-blockjob.o $(test-block-obj-y) $(test-util-obj-y)
|
||||
tests/test-blockjob-txn$(EXESUF): tests/test-blockjob-txn.o $(test-block-obj-y) $(test-util-obj-y)
|
||||
|
@ -1,91 +0,0 @@
|
||||
/*
|
||||
* RFifoLock tests
|
||||
*
|
||||
* Copyright Red Hat, Inc. 2013
|
||||
*
|
||||
* Authors:
|
||||
* Stefan Hajnoczi <stefanha@redhat.com>
|
||||
*
|
||||
* This work is licensed under the terms of the GNU LGPL, version 2 or later.
|
||||
* See the COPYING.LIB file in the top-level directory.
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu-common.h"
|
||||
#include "qemu/rfifolock.h"
|
||||
|
||||
static void test_nesting(void)
|
||||
{
|
||||
RFifoLock lock;
|
||||
|
||||
/* Trivial test, ensure the lock is recursive */
|
||||
rfifolock_init(&lock, NULL, NULL);
|
||||
rfifolock_lock(&lock);
|
||||
rfifolock_lock(&lock);
|
||||
rfifolock_lock(&lock);
|
||||
rfifolock_unlock(&lock);
|
||||
rfifolock_unlock(&lock);
|
||||
rfifolock_unlock(&lock);
|
||||
rfifolock_destroy(&lock);
|
||||
}
|
||||
|
||||
typedef struct {
|
||||
RFifoLock lock;
|
||||
int fd[2];
|
||||
} CallbackTestData;
|
||||
|
||||
static void rfifolock_cb(void *opaque)
|
||||
{
|
||||
CallbackTestData *data = opaque;
|
||||
int ret;
|
||||
char c = 0;
|
||||
|
||||
ret = write(data->fd[1], &c, sizeof(c));
|
||||
g_assert(ret == 1);
|
||||
}
|
||||
|
||||
static void *callback_thread(void *opaque)
|
||||
{
|
||||
CallbackTestData *data = opaque;
|
||||
|
||||
/* The other thread holds the lock so the contention callback will be
|
||||
* invoked...
|
||||
*/
|
||||
rfifolock_lock(&data->lock);
|
||||
rfifolock_unlock(&data->lock);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void test_callback(void)
|
||||
{
|
||||
CallbackTestData data;
|
||||
QemuThread thread;
|
||||
int ret;
|
||||
char c;
|
||||
|
||||
rfifolock_init(&data.lock, rfifolock_cb, &data);
|
||||
ret = qemu_pipe(data.fd);
|
||||
g_assert(ret == 0);
|
||||
|
||||
/* Hold lock but allow the callback to kick us by writing to the pipe */
|
||||
rfifolock_lock(&data.lock);
|
||||
qemu_thread_create(&thread, "callback_thread",
|
||||
callback_thread, &data, QEMU_THREAD_JOINABLE);
|
||||
ret = read(data.fd[0], &c, sizeof(c));
|
||||
g_assert(ret == 1);
|
||||
rfifolock_unlock(&data.lock);
|
||||
/* If we got here then the callback was invoked, as expected */
|
||||
|
||||
qemu_thread_join(&thread);
|
||||
close(data.fd[0]);
|
||||
close(data.fd[1]);
|
||||
rfifolock_destroy(&data.lock);
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
g_test_init(&argc, &argv, NULL);
|
||||
g_test_add_func("/nesting", test_nesting);
|
||||
g_test_add_func("/callback", test_callback);
|
||||
return g_test_run();
|
||||
}
|
@ -25,7 +25,6 @@ util-obj-y += uuid.o
|
||||
util-obj-y += throttle.o
|
||||
util-obj-y += getauxval.o
|
||||
util-obj-y += readline.o
|
||||
util-obj-y += rfifolock.o
|
||||
util-obj-y += rcu.o
|
||||
util-obj-y += qemu-coroutine.o qemu-coroutine-lock.o qemu-coroutine-io.o
|
||||
util-obj-y += qemu-coroutine-sleep.o
|
||||
|
@ -1,78 +0,0 @@
|
||||
/*
|
||||
* Recursive FIFO lock
|
||||
*
|
||||
* Copyright Red Hat, Inc. 2013
|
||||
*
|
||||
* Authors:
|
||||
* Stefan Hajnoczi <stefanha@redhat.com>
|
||||
*
|
||||
* This work is licensed under the terms of the GNU LGPL, version 2 or later.
|
||||
* See the COPYING.LIB file in the top-level directory.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/rfifolock.h"
|
||||
|
||||
void rfifolock_init(RFifoLock *r, void (*cb)(void *), void *opaque)
|
||||
{
|
||||
qemu_mutex_init(&r->lock);
|
||||
r->head = 0;
|
||||
r->tail = 0;
|
||||
qemu_cond_init(&r->cond);
|
||||
r->nesting = 0;
|
||||
r->cb = cb;
|
||||
r->cb_opaque = opaque;
|
||||
}
|
||||
|
||||
void rfifolock_destroy(RFifoLock *r)
|
||||
{
|
||||
qemu_cond_destroy(&r->cond);
|
||||
qemu_mutex_destroy(&r->lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Theory of operation:
|
||||
*
|
||||
* In order to ensure FIFO ordering, implement a ticketlock. Threads acquiring
|
||||
* the lock enqueue themselves by incrementing the tail index. When the lock
|
||||
* is unlocked, the head is incremented and waiting threads are notified.
|
||||
*
|
||||
* Recursive locking does not take a ticket since the head is only incremented
|
||||
* when the outermost recursive caller unlocks.
|
||||
*/
|
||||
void rfifolock_lock(RFifoLock *r)
|
||||
{
|
||||
qemu_mutex_lock(&r->lock);
|
||||
|
||||
/* Take a ticket */
|
||||
unsigned int ticket = r->tail++;
|
||||
|
||||
if (r->nesting > 0 && qemu_thread_is_self(&r->owner_thread)) {
|
||||
r->tail--; /* put ticket back, we're nesting */
|
||||
} else {
|
||||
while (ticket != r->head) {
|
||||
/* Invoke optional contention callback */
|
||||
if (r->cb) {
|
||||
r->cb(r->cb_opaque);
|
||||
}
|
||||
qemu_cond_wait(&r->cond, &r->lock);
|
||||
}
|
||||
qemu_thread_get_self(&r->owner_thread);
|
||||
}
|
||||
|
||||
r->nesting++;
|
||||
qemu_mutex_unlock(&r->lock);
|
||||
}
|
||||
|
||||
void rfifolock_unlock(RFifoLock *r)
|
||||
{
|
||||
qemu_mutex_lock(&r->lock);
|
||||
assert(r->nesting > 0);
|
||||
assert(qemu_thread_is_self(&r->owner_thread));
|
||||
if (--r->nesting == 0) {
|
||||
r->head++;
|
||||
qemu_cond_broadcast(&r->cond);
|
||||
}
|
||||
qemu_mutex_unlock(&r->lock);
|
||||
}
|
Loading…
Reference in New Issue
Block a user