2012-11-23 19:13:23 +04:00
|
|
|
/*
|
|
|
|
* AioContext tests
|
|
|
|
*
|
|
|
|
* Copyright Red Hat, Inc. 2012
|
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Paolo Bonzini <pbonzini@redhat.com>
|
|
|
|
*
|
|
|
|
* This work is licensed under the terms of the GNU LGPL, version 2 or later.
|
|
|
|
* See the COPYING.LIB file in the top-level directory.
|
|
|
|
*/
|
|
|
|
|
2016-02-08 21:08:51 +03:00
|
|
|
#include "qemu/osdep.h"
|
2012-12-17 21:19:44 +04:00
|
|
|
#include "block/aio.h"
|
include/qemu/osdep.h: Don't include qapi/error.h
Commit 57cb38b included qapi/error.h into qemu/osdep.h to get the
Error typedef. Since then, we've moved to include qemu/osdep.h
everywhere. Its file comment explains: "To avoid getting into
possible circular include dependencies, this file should not include
any other QEMU headers, with the exceptions of config-host.h,
compiler.h, os-posix.h and os-win32.h, all of which are doing a
similar job to this file and are under similar constraints."
qapi/error.h doesn't do a similar job, and it doesn't adhere to
similar constraints: it includes qapi-types.h. That's in excess of
100KiB of crap most .c files don't actually need.
Add the typedef to qemu/typedefs.h, and include that instead of
qapi/error.h. Include qapi/error.h in .c files that need it and don't
get it now. Include qapi-types.h in qom/object.h for uint16List.
Update scripts/clean-includes accordingly. Update it further to match
reality: replace config.h by config-target.h, add sysemu/os-posix.h,
sysemu/os-win32.h. Update the list of includes in the qemu/osdep.h
comment quoted above similarly.
This reduces the number of objects depending on qapi/error.h from "all
of them" to less than a third. Unfortunately, the number depending on
qapi-types.h shrinks only a little. More work is needed for that one.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
[Fix compilation without the spice devel packages. - Paolo]
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2016-03-14 11:01:28 +03:00
|
|
|
#include "qapi/error.h"
|
2013-08-21 19:02:49 +04:00
|
|
|
#include "qemu/timer.h"
|
2013-08-29 20:48:16 +04:00
|
|
|
#include "qemu/sockets.h"
|
2014-09-18 15:30:49 +04:00
|
|
|
#include "qemu/error-report.h"
|
2012-11-23 19:13:23 +04:00
|
|
|
|
2014-07-07 23:03:38 +04:00
|
|
|
static AioContext *ctx;
|
2012-11-23 19:13:23 +04:00
|
|
|
|
2013-04-17 13:01:02 +04:00
|
|
|
typedef struct {
|
|
|
|
EventNotifier e;
|
|
|
|
int n;
|
|
|
|
int active;
|
|
|
|
bool auto_set;
|
|
|
|
} EventNotifierTestData;
|
|
|
|
|
|
|
|
/* Wait until event notifier becomes inactive */
|
|
|
|
static void wait_until_inactive(EventNotifierTestData *data)
|
|
|
|
{
|
|
|
|
while (data->active > 0) {
|
|
|
|
aio_poll(ctx, true);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-11-23 19:13:23 +04:00
|
|
|
/* Simple callbacks for testing. */
|
|
|
|
|
|
|
|
typedef struct {
|
|
|
|
QEMUBH *bh;
|
|
|
|
int n;
|
|
|
|
int max;
|
|
|
|
} BHTestData;
|
|
|
|
|
2013-08-21 19:03:06 +04:00
|
|
|
typedef struct {
|
|
|
|
QEMUTimer timer;
|
|
|
|
QEMUClockType clock_type;
|
|
|
|
int n;
|
|
|
|
int max;
|
|
|
|
int64_t ns;
|
|
|
|
AioContext *ctx;
|
|
|
|
} TimerTestData;
|
|
|
|
|
2012-11-23 19:13:23 +04:00
|
|
|
static void bh_test_cb(void *opaque)
|
|
|
|
{
|
|
|
|
BHTestData *data = opaque;
|
|
|
|
if (++data->n < data->max) {
|
|
|
|
qemu_bh_schedule(data->bh);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-08-21 19:03:06 +04:00
|
|
|
static void timer_test_cb(void *opaque)
|
|
|
|
{
|
|
|
|
TimerTestData *data = opaque;
|
|
|
|
if (++data->n < data->max) {
|
|
|
|
timer_mod(&data->timer,
|
|
|
|
qemu_clock_get_ns(data->clock_type) + data->ns);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-07-09 13:53:06 +04:00
|
|
|
static void dummy_io_handler_read(EventNotifier *e)
|
2013-08-21 19:03:06 +04:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2012-11-23 19:13:23 +04:00
|
|
|
static void bh_delete_cb(void *opaque)
|
|
|
|
{
|
|
|
|
BHTestData *data = opaque;
|
|
|
|
if (++data->n < data->max) {
|
|
|
|
qemu_bh_schedule(data->bh);
|
|
|
|
} else {
|
|
|
|
qemu_bh_delete(data->bh);
|
|
|
|
data->bh = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void event_ready_cb(EventNotifier *e)
|
|
|
|
{
|
|
|
|
EventNotifierTestData *data = container_of(e, EventNotifierTestData, e);
|
|
|
|
g_assert(event_notifier_test_and_clear(e));
|
|
|
|
data->n++;
|
|
|
|
if (data->active > 0) {
|
|
|
|
data->active--;
|
|
|
|
}
|
|
|
|
if (data->auto_set && data->active) {
|
|
|
|
event_notifier_set(e);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Tests using aio_*. */
|
|
|
|
|
2014-03-03 14:30:04 +04:00
|
|
|
typedef struct {
|
|
|
|
QemuMutex start_lock;
|
2016-10-27 13:49:06 +03:00
|
|
|
EventNotifier notifier;
|
2014-03-03 14:30:04 +04:00
|
|
|
bool thread_acquired;
|
|
|
|
} AcquireTestData;
|
|
|
|
|
|
|
|
static void *test_acquire_thread(void *opaque)
|
|
|
|
{
|
|
|
|
AcquireTestData *data = opaque;
|
|
|
|
|
|
|
|
/* Wait for other thread to let us start */
|
|
|
|
qemu_mutex_lock(&data->start_lock);
|
|
|
|
qemu_mutex_unlock(&data->start_lock);
|
|
|
|
|
2016-10-27 13:49:06 +03:00
|
|
|
/* event_notifier_set might be called either before or after
|
|
|
|
* the main thread's call to poll(). The test case's outcome
|
|
|
|
* should be the same in either case.
|
|
|
|
*/
|
|
|
|
event_notifier_set(&data->notifier);
|
2014-03-03 14:30:04 +04:00
|
|
|
aio_context_acquire(ctx);
|
|
|
|
aio_context_release(ctx);
|
|
|
|
|
|
|
|
data->thread_acquired = true; /* success, we got here */
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2015-10-23 06:08:05 +03:00
|
|
|
static void set_event_notifier(AioContext *ctx, EventNotifier *notifier,
|
|
|
|
EventNotifierHandler *handler)
|
|
|
|
{
|
|
|
|
aio_set_event_notifier(ctx, notifier, false, handler);
|
|
|
|
}
|
|
|
|
|
2016-10-27 13:49:06 +03:00
|
|
|
static void dummy_notifier_read(EventNotifier *n)
|
2014-03-03 14:30:04 +04:00
|
|
|
{
|
2016-10-27 13:49:06 +03:00
|
|
|
event_notifier_test_and_clear(n);
|
2014-03-03 14:30:04 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static void test_acquire(void)
|
|
|
|
{
|
|
|
|
QemuThread thread;
|
|
|
|
AcquireTestData data;
|
|
|
|
|
|
|
|
/* Dummy event notifier ensures aio_poll() will block */
|
2016-10-27 13:49:06 +03:00
|
|
|
event_notifier_init(&data.notifier, false);
|
|
|
|
set_event_notifier(ctx, &data.notifier, dummy_notifier_read);
|
2014-03-03 14:30:04 +04:00
|
|
|
g_assert(!aio_poll(ctx, false)); /* consume aio_notify() */
|
|
|
|
|
|
|
|
qemu_mutex_init(&data.start_lock);
|
|
|
|
qemu_mutex_lock(&data.start_lock);
|
|
|
|
data.thread_acquired = false;
|
|
|
|
|
|
|
|
qemu_thread_create(&thread, "test_acquire_thread",
|
|
|
|
test_acquire_thread,
|
|
|
|
&data, QEMU_THREAD_JOINABLE);
|
|
|
|
|
|
|
|
/* Block in aio_poll(), let other thread kick us and acquire context */
|
|
|
|
aio_context_acquire(ctx);
|
|
|
|
qemu_mutex_unlock(&data.start_lock); /* let the thread run */
|
2016-10-27 13:49:06 +03:00
|
|
|
g_assert(aio_poll(ctx, true));
|
|
|
|
g_assert(!data.thread_acquired);
|
2014-03-03 14:30:04 +04:00
|
|
|
aio_context_release(ctx);
|
|
|
|
|
|
|
|
qemu_thread_join(&thread);
|
2016-10-27 13:49:06 +03:00
|
|
|
set_event_notifier(ctx, &data.notifier, NULL);
|
|
|
|
event_notifier_cleanup(&data.notifier);
|
2014-03-03 14:30:04 +04:00
|
|
|
|
|
|
|
g_assert(data.thread_acquired);
|
|
|
|
}
|
|
|
|
|
2012-11-23 19:13:23 +04:00
|
|
|
static void test_bh_schedule(void)
|
|
|
|
{
|
|
|
|
BHTestData data = { .n = 0 };
|
|
|
|
data.bh = aio_bh_new(ctx, bh_test_cb, &data);
|
|
|
|
|
|
|
|
qemu_bh_schedule(data.bh);
|
|
|
|
g_assert_cmpint(data.n, ==, 0);
|
|
|
|
|
|
|
|
g_assert(aio_poll(ctx, true));
|
|
|
|
g_assert_cmpint(data.n, ==, 1);
|
|
|
|
|
|
|
|
g_assert(!aio_poll(ctx, false));
|
|
|
|
g_assert_cmpint(data.n, ==, 1);
|
|
|
|
qemu_bh_delete(data.bh);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void test_bh_schedule10(void)
|
|
|
|
{
|
|
|
|
BHTestData data = { .n = 0, .max = 10 };
|
|
|
|
data.bh = aio_bh_new(ctx, bh_test_cb, &data);
|
|
|
|
|
|
|
|
qemu_bh_schedule(data.bh);
|
|
|
|
g_assert_cmpint(data.n, ==, 0);
|
|
|
|
|
|
|
|
g_assert(aio_poll(ctx, false));
|
|
|
|
g_assert_cmpint(data.n, ==, 1);
|
|
|
|
|
|
|
|
g_assert(aio_poll(ctx, true));
|
|
|
|
g_assert_cmpint(data.n, ==, 2);
|
|
|
|
|
AioContext: do not rely on aio_poll(ctx, true) result to end a loop
Currently, whenever aio_poll(ctx, true) has completed all pending
work it returns true *and* the next call to aio_poll(ctx, true)
will not block.
This invariant has its roots in qemu_aio_flush()'s implementation
as "while (qemu_aio_wait()) {}". However, qemu_aio_flush() does
not exist anymore and bdrv_drain_all() is implemented differently;
and this invariant is complicated to maintain and subtly different
from the return value of GMainLoop's g_main_context_iteration.
All calls to aio_poll(ctx, true) except one are guarded by a
while() loop checking for a request to be incomplete, or a
BlockDriverState to be idle. The one remaining call (in
iothread.c) uses this to delay the aio_context_release/acquire
pair until the AioContext is quiescent, however:
- we can do the same just by using non-blocking aio_poll,
similar to how vl.c invokes main_loop_wait
- it is buggy, because it does not ensure that the AioContext
is released between an aio_notify and the next time the
iothread goes to sleep. This leads to hangs when stopping
the dataplane thread.
In the end, these semantics are a bad match for the current
users of AioContext. So modify that one exception in iothread.c,
which also fixes the hangs, as well as the testcase so that
it use the same idiom as the actual QEMU code.
Reported-by: Christian Borntraeger <borntraeger@de.ibm.com>
Tested-by: Christian Borntraeger <borntraeger@de.ibm.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2014-07-09 12:49:46 +04:00
|
|
|
while (data.n < 10) {
|
|
|
|
aio_poll(ctx, true);
|
|
|
|
}
|
2012-11-23 19:13:23 +04:00
|
|
|
g_assert_cmpint(data.n, ==, 10);
|
|
|
|
|
|
|
|
g_assert(!aio_poll(ctx, false));
|
|
|
|
g_assert_cmpint(data.n, ==, 10);
|
|
|
|
qemu_bh_delete(data.bh);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void test_bh_cancel(void)
|
|
|
|
{
|
|
|
|
BHTestData data = { .n = 0 };
|
|
|
|
data.bh = aio_bh_new(ctx, bh_test_cb, &data);
|
|
|
|
|
|
|
|
qemu_bh_schedule(data.bh);
|
|
|
|
g_assert_cmpint(data.n, ==, 0);
|
|
|
|
|
|
|
|
qemu_bh_cancel(data.bh);
|
|
|
|
g_assert_cmpint(data.n, ==, 0);
|
|
|
|
|
|
|
|
g_assert(!aio_poll(ctx, false));
|
|
|
|
g_assert_cmpint(data.n, ==, 0);
|
|
|
|
qemu_bh_delete(data.bh);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void test_bh_delete(void)
|
|
|
|
{
|
|
|
|
BHTestData data = { .n = 0 };
|
|
|
|
data.bh = aio_bh_new(ctx, bh_test_cb, &data);
|
|
|
|
|
|
|
|
qemu_bh_schedule(data.bh);
|
|
|
|
g_assert_cmpint(data.n, ==, 0);
|
|
|
|
|
|
|
|
qemu_bh_delete(data.bh);
|
|
|
|
g_assert_cmpint(data.n, ==, 0);
|
|
|
|
|
|
|
|
g_assert(!aio_poll(ctx, false));
|
|
|
|
g_assert_cmpint(data.n, ==, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void test_bh_delete_from_cb(void)
|
|
|
|
{
|
|
|
|
BHTestData data1 = { .n = 0, .max = 1 };
|
|
|
|
|
|
|
|
data1.bh = aio_bh_new(ctx, bh_delete_cb, &data1);
|
|
|
|
|
|
|
|
qemu_bh_schedule(data1.bh);
|
|
|
|
g_assert_cmpint(data1.n, ==, 0);
|
|
|
|
|
AioContext: do not rely on aio_poll(ctx, true) result to end a loop
Currently, whenever aio_poll(ctx, true) has completed all pending
work it returns true *and* the next call to aio_poll(ctx, true)
will not block.
This invariant has its roots in qemu_aio_flush()'s implementation
as "while (qemu_aio_wait()) {}". However, qemu_aio_flush() does
not exist anymore and bdrv_drain_all() is implemented differently;
and this invariant is complicated to maintain and subtly different
from the return value of GMainLoop's g_main_context_iteration.
All calls to aio_poll(ctx, true) except one are guarded by a
while() loop checking for a request to be incomplete, or a
BlockDriverState to be idle. The one remaining call (in
iothread.c) uses this to delay the aio_context_release/acquire
pair until the AioContext is quiescent, however:
- we can do the same just by using non-blocking aio_poll,
similar to how vl.c invokes main_loop_wait
- it is buggy, because it does not ensure that the AioContext
is released between an aio_notify and the next time the
iothread goes to sleep. This leads to hangs when stopping
the dataplane thread.
In the end, these semantics are a bad match for the current
users of AioContext. So modify that one exception in iothread.c,
which also fixes the hangs, as well as the testcase so that
it use the same idiom as the actual QEMU code.
Reported-by: Christian Borntraeger <borntraeger@de.ibm.com>
Tested-by: Christian Borntraeger <borntraeger@de.ibm.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2014-07-09 12:49:46 +04:00
|
|
|
while (data1.n < data1.max) {
|
|
|
|
aio_poll(ctx, true);
|
|
|
|
}
|
2012-11-23 19:13:23 +04:00
|
|
|
g_assert_cmpint(data1.n, ==, data1.max);
|
|
|
|
g_assert(data1.bh == NULL);
|
|
|
|
|
|
|
|
g_assert(!aio_poll(ctx, false));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void test_bh_delete_from_cb_many(void)
|
|
|
|
{
|
|
|
|
BHTestData data1 = { .n = 0, .max = 1 };
|
|
|
|
BHTestData data2 = { .n = 0, .max = 3 };
|
|
|
|
BHTestData data3 = { .n = 0, .max = 2 };
|
|
|
|
BHTestData data4 = { .n = 0, .max = 4 };
|
|
|
|
|
|
|
|
data1.bh = aio_bh_new(ctx, bh_delete_cb, &data1);
|
|
|
|
data2.bh = aio_bh_new(ctx, bh_delete_cb, &data2);
|
|
|
|
data3.bh = aio_bh_new(ctx, bh_delete_cb, &data3);
|
|
|
|
data4.bh = aio_bh_new(ctx, bh_delete_cb, &data4);
|
|
|
|
|
|
|
|
qemu_bh_schedule(data1.bh);
|
|
|
|
qemu_bh_schedule(data2.bh);
|
|
|
|
qemu_bh_schedule(data3.bh);
|
|
|
|
qemu_bh_schedule(data4.bh);
|
|
|
|
g_assert_cmpint(data1.n, ==, 0);
|
|
|
|
g_assert_cmpint(data2.n, ==, 0);
|
|
|
|
g_assert_cmpint(data3.n, ==, 0);
|
|
|
|
g_assert_cmpint(data4.n, ==, 0);
|
|
|
|
|
|
|
|
g_assert(aio_poll(ctx, false));
|
|
|
|
g_assert_cmpint(data1.n, ==, 1);
|
|
|
|
g_assert_cmpint(data2.n, ==, 1);
|
|
|
|
g_assert_cmpint(data3.n, ==, 1);
|
|
|
|
g_assert_cmpint(data4.n, ==, 1);
|
|
|
|
g_assert(data1.bh == NULL);
|
|
|
|
|
AioContext: do not rely on aio_poll(ctx, true) result to end a loop
Currently, whenever aio_poll(ctx, true) has completed all pending
work it returns true *and* the next call to aio_poll(ctx, true)
will not block.
This invariant has its roots in qemu_aio_flush()'s implementation
as "while (qemu_aio_wait()) {}". However, qemu_aio_flush() does
not exist anymore and bdrv_drain_all() is implemented differently;
and this invariant is complicated to maintain and subtly different
from the return value of GMainLoop's g_main_context_iteration.
All calls to aio_poll(ctx, true) except one are guarded by a
while() loop checking for a request to be incomplete, or a
BlockDriverState to be idle. The one remaining call (in
iothread.c) uses this to delay the aio_context_release/acquire
pair until the AioContext is quiescent, however:
- we can do the same just by using non-blocking aio_poll,
similar to how vl.c invokes main_loop_wait
- it is buggy, because it does not ensure that the AioContext
is released between an aio_notify and the next time the
iothread goes to sleep. This leads to hangs when stopping
the dataplane thread.
In the end, these semantics are a bad match for the current
users of AioContext. So modify that one exception in iothread.c,
which also fixes the hangs, as well as the testcase so that
it use the same idiom as the actual QEMU code.
Reported-by: Christian Borntraeger <borntraeger@de.ibm.com>
Tested-by: Christian Borntraeger <borntraeger@de.ibm.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2014-07-09 12:49:46 +04:00
|
|
|
while (data1.n < data1.max ||
|
|
|
|
data2.n < data2.max ||
|
|
|
|
data3.n < data3.max ||
|
|
|
|
data4.n < data4.max) {
|
|
|
|
aio_poll(ctx, true);
|
|
|
|
}
|
2012-11-23 19:13:23 +04:00
|
|
|
g_assert_cmpint(data1.n, ==, data1.max);
|
|
|
|
g_assert_cmpint(data2.n, ==, data2.max);
|
|
|
|
g_assert_cmpint(data3.n, ==, data3.max);
|
|
|
|
g_assert_cmpint(data4.n, ==, data4.max);
|
|
|
|
g_assert(data1.bh == NULL);
|
|
|
|
g_assert(data2.bh == NULL);
|
|
|
|
g_assert(data3.bh == NULL);
|
|
|
|
g_assert(data4.bh == NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void test_bh_flush(void)
|
|
|
|
{
|
|
|
|
BHTestData data = { .n = 0 };
|
|
|
|
data.bh = aio_bh_new(ctx, bh_test_cb, &data);
|
|
|
|
|
|
|
|
qemu_bh_schedule(data.bh);
|
|
|
|
g_assert_cmpint(data.n, ==, 0);
|
|
|
|
|
AioContext: do not rely on aio_poll(ctx, true) result to end a loop
Currently, whenever aio_poll(ctx, true) has completed all pending
work it returns true *and* the next call to aio_poll(ctx, true)
will not block.
This invariant has its roots in qemu_aio_flush()'s implementation
as "while (qemu_aio_wait()) {}". However, qemu_aio_flush() does
not exist anymore and bdrv_drain_all() is implemented differently;
and this invariant is complicated to maintain and subtly different
from the return value of GMainLoop's g_main_context_iteration.
All calls to aio_poll(ctx, true) except one are guarded by a
while() loop checking for a request to be incomplete, or a
BlockDriverState to be idle. The one remaining call (in
iothread.c) uses this to delay the aio_context_release/acquire
pair until the AioContext is quiescent, however:
- we can do the same just by using non-blocking aio_poll,
similar to how vl.c invokes main_loop_wait
- it is buggy, because it does not ensure that the AioContext
is released between an aio_notify and the next time the
iothread goes to sleep. This leads to hangs when stopping
the dataplane thread.
In the end, these semantics are a bad match for the current
users of AioContext. So modify that one exception in iothread.c,
which also fixes the hangs, as well as the testcase so that
it use the same idiom as the actual QEMU code.
Reported-by: Christian Borntraeger <borntraeger@de.ibm.com>
Tested-by: Christian Borntraeger <borntraeger@de.ibm.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2014-07-09 12:49:46 +04:00
|
|
|
g_assert(aio_poll(ctx, true));
|
2012-11-23 19:13:23 +04:00
|
|
|
g_assert_cmpint(data.n, ==, 1);
|
|
|
|
|
|
|
|
g_assert(!aio_poll(ctx, false));
|
|
|
|
g_assert_cmpint(data.n, ==, 1);
|
|
|
|
qemu_bh_delete(data.bh);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void test_set_event_notifier(void)
|
|
|
|
{
|
|
|
|
EventNotifierTestData data = { .n = 0, .active = 0 };
|
|
|
|
event_notifier_init(&data.e, false);
|
2015-10-23 06:08:05 +03:00
|
|
|
set_event_notifier(ctx, &data.e, event_ready_cb);
|
2012-11-23 19:13:23 +04:00
|
|
|
g_assert(!aio_poll(ctx, false));
|
|
|
|
g_assert_cmpint(data.n, ==, 0);
|
|
|
|
|
2015-10-23 06:08:05 +03:00
|
|
|
set_event_notifier(ctx, &data.e, NULL);
|
2012-11-23 19:13:23 +04:00
|
|
|
g_assert(!aio_poll(ctx, false));
|
|
|
|
g_assert_cmpint(data.n, ==, 0);
|
|
|
|
event_notifier_cleanup(&data.e);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void test_wait_event_notifier(void)
|
|
|
|
{
|
|
|
|
EventNotifierTestData data = { .n = 0, .active = 1 };
|
|
|
|
event_notifier_init(&data.e, false);
|
2015-10-23 06:08:05 +03:00
|
|
|
set_event_notifier(ctx, &data.e, event_ready_cb);
|
2015-07-21 17:07:49 +03:00
|
|
|
while (aio_poll(ctx, false));
|
2012-11-23 19:13:23 +04:00
|
|
|
g_assert_cmpint(data.n, ==, 0);
|
|
|
|
g_assert_cmpint(data.active, ==, 1);
|
|
|
|
|
|
|
|
event_notifier_set(&data.e);
|
|
|
|
g_assert(aio_poll(ctx, false));
|
|
|
|
g_assert_cmpint(data.n, ==, 1);
|
|
|
|
g_assert_cmpint(data.active, ==, 0);
|
|
|
|
|
|
|
|
g_assert(!aio_poll(ctx, false));
|
|
|
|
g_assert_cmpint(data.n, ==, 1);
|
|
|
|
g_assert_cmpint(data.active, ==, 0);
|
|
|
|
|
2015-10-23 06:08:05 +03:00
|
|
|
set_event_notifier(ctx, &data.e, NULL);
|
2012-11-23 19:13:23 +04:00
|
|
|
g_assert(!aio_poll(ctx, false));
|
|
|
|
g_assert_cmpint(data.n, ==, 1);
|
|
|
|
|
|
|
|
event_notifier_cleanup(&data.e);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void test_flush_event_notifier(void)
|
|
|
|
{
|
|
|
|
EventNotifierTestData data = { .n = 0, .active = 10, .auto_set = true };
|
|
|
|
event_notifier_init(&data.e, false);
|
2015-10-23 06:08:05 +03:00
|
|
|
set_event_notifier(ctx, &data.e, event_ready_cb);
|
2015-07-21 17:07:49 +03:00
|
|
|
while (aio_poll(ctx, false));
|
2012-11-23 19:13:23 +04:00
|
|
|
g_assert_cmpint(data.n, ==, 0);
|
|
|
|
g_assert_cmpint(data.active, ==, 10);
|
|
|
|
|
|
|
|
event_notifier_set(&data.e);
|
|
|
|
g_assert(aio_poll(ctx, false));
|
|
|
|
g_assert_cmpint(data.n, ==, 1);
|
|
|
|
g_assert_cmpint(data.active, ==, 9);
|
|
|
|
g_assert(aio_poll(ctx, false));
|
|
|
|
|
2013-04-17 13:01:02 +04:00
|
|
|
wait_until_inactive(&data);
|
2012-11-23 19:13:23 +04:00
|
|
|
g_assert_cmpint(data.n, ==, 10);
|
|
|
|
g_assert_cmpint(data.active, ==, 0);
|
|
|
|
g_assert(!aio_poll(ctx, false));
|
|
|
|
|
2015-10-23 06:08:05 +03:00
|
|
|
set_event_notifier(ctx, &data.e, NULL);
|
2012-11-23 19:13:23 +04:00
|
|
|
g_assert(!aio_poll(ctx, false));
|
|
|
|
event_notifier_cleanup(&data.e);
|
|
|
|
}
|
|
|
|
|
2015-10-23 06:08:14 +03:00
|
|
|
static void test_aio_external_client(void)
|
|
|
|
{
|
|
|
|
int i, j;
|
|
|
|
|
|
|
|
for (i = 1; i < 3; i++) {
|
|
|
|
EventNotifierTestData data = { .n = 0, .active = 10, .auto_set = true };
|
|
|
|
event_notifier_init(&data.e, false);
|
|
|
|
aio_set_event_notifier(ctx, &data.e, true, event_ready_cb);
|
|
|
|
event_notifier_set(&data.e);
|
|
|
|
for (j = 0; j < i; j++) {
|
|
|
|
aio_disable_external(ctx);
|
|
|
|
}
|
|
|
|
for (j = 0; j < i; j++) {
|
|
|
|
assert(!aio_poll(ctx, false));
|
|
|
|
assert(event_notifier_test_and_clear(&data.e));
|
|
|
|
event_notifier_set(&data.e);
|
|
|
|
aio_enable_external(ctx);
|
|
|
|
}
|
|
|
|
assert(aio_poll(ctx, false));
|
2015-11-23 15:30:23 +03:00
|
|
|
set_event_notifier(ctx, &data.e, NULL);
|
2015-10-23 06:08:14 +03:00
|
|
|
event_notifier_cleanup(&data.e);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-11-23 19:13:23 +04:00
|
|
|
static void test_wait_event_notifier_noflush(void)
|
|
|
|
{
|
|
|
|
EventNotifierTestData data = { .n = 0 };
|
|
|
|
EventNotifierTestData dummy = { .n = 0, .active = 1 };
|
|
|
|
|
|
|
|
event_notifier_init(&data.e, false);
|
2015-10-23 06:08:05 +03:00
|
|
|
set_event_notifier(ctx, &data.e, event_ready_cb);
|
2012-11-23 19:13:23 +04:00
|
|
|
|
|
|
|
g_assert(!aio_poll(ctx, false));
|
|
|
|
g_assert_cmpint(data.n, ==, 0);
|
|
|
|
|
|
|
|
/* Until there is an active descriptor, aio_poll may or may not call
|
|
|
|
* event_ready_cb. Still, it must not block. */
|
|
|
|
event_notifier_set(&data.e);
|
2013-04-11 18:56:50 +04:00
|
|
|
g_assert(aio_poll(ctx, true));
|
2012-11-23 19:13:23 +04:00
|
|
|
data.n = 0;
|
|
|
|
|
|
|
|
/* An active event notifier forces aio_poll to look at EventNotifiers. */
|
|
|
|
event_notifier_init(&dummy.e, false);
|
2015-10-23 06:08:05 +03:00
|
|
|
set_event_notifier(ctx, &dummy.e, event_ready_cb);
|
2012-11-23 19:13:23 +04:00
|
|
|
|
|
|
|
event_notifier_set(&data.e);
|
|
|
|
g_assert(aio_poll(ctx, false));
|
|
|
|
g_assert_cmpint(data.n, ==, 1);
|
2013-04-11 18:56:50 +04:00
|
|
|
g_assert(!aio_poll(ctx, false));
|
2012-11-23 19:13:23 +04:00
|
|
|
g_assert_cmpint(data.n, ==, 1);
|
|
|
|
|
|
|
|
event_notifier_set(&data.e);
|
|
|
|
g_assert(aio_poll(ctx, false));
|
|
|
|
g_assert_cmpint(data.n, ==, 2);
|
2013-04-11 18:56:50 +04:00
|
|
|
g_assert(!aio_poll(ctx, false));
|
2012-11-23 19:13:23 +04:00
|
|
|
g_assert_cmpint(data.n, ==, 2);
|
|
|
|
|
|
|
|
event_notifier_set(&dummy.e);
|
2013-04-17 13:01:02 +04:00
|
|
|
wait_until_inactive(&dummy);
|
2012-11-23 19:13:23 +04:00
|
|
|
g_assert_cmpint(data.n, ==, 2);
|
|
|
|
g_assert_cmpint(dummy.n, ==, 1);
|
|
|
|
g_assert_cmpint(dummy.active, ==, 0);
|
|
|
|
|
2015-10-23 06:08:05 +03:00
|
|
|
set_event_notifier(ctx, &dummy.e, NULL);
|
2012-11-23 19:13:23 +04:00
|
|
|
event_notifier_cleanup(&dummy.e);
|
|
|
|
|
2015-10-23 06:08:05 +03:00
|
|
|
set_event_notifier(ctx, &data.e, NULL);
|
2012-11-23 19:13:23 +04:00
|
|
|
g_assert(!aio_poll(ctx, false));
|
|
|
|
g_assert_cmpint(data.n, ==, 2);
|
|
|
|
|
|
|
|
event_notifier_cleanup(&data.e);
|
|
|
|
}
|
|
|
|
|
2013-08-21 19:03:06 +04:00
|
|
|
static void test_timer_schedule(void)
|
|
|
|
{
|
|
|
|
TimerTestData data = { .n = 0, .ctx = ctx, .ns = SCALE_MS * 750LL,
|
|
|
|
.max = 2,
|
timer: set vm_clock disabled default
(commit 80dcfb8532ae76343109a48f12ba8ca1c505c179)
Upon migration, the code use a timer based on vm_clock for 1ns
in the future from post_load to do the event send in case host_connected
differs between migration source and target.
However, it's not guaranteed that the apic is ready to inject irqs into
the guest, and the irq line remained high, resulting in any future interrupts
going unnoticed by the guest as well.
That's because 1) the migration coroutine is not blocked when it get EAGAIN
while reading QEMUFile. 2) The vm_clock is enabled default currently, it doesn't
rely on the calling of vm_start(), that means vm_clock timers can run before
VCPUs are running.
So, let's set the vm_clock disabled default, keep the initial intention of
design for vm_clock timers.
Meanwhile, change the test-aio usecase, using QEMU_CLOCK_REALTIME instead of
QEMU_CLOCK_VIRTUAL as the block code does.
CC: Paolo Bonzini <pbonzini@redhat.com>
CC: Dr. David Alan Gilbert <dgilbert@redhat.com>
CC: qemu-stable@nongnu.org
Signed-off-by: Gonglei <arei.gonglei@huawei.com>
Message-Id: <1470728955-90600-1-git-send-email-arei.gonglei@huawei.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2016-08-09 10:49:15 +03:00
|
|
|
.clock_type = QEMU_CLOCK_REALTIME };
|
2014-07-09 13:53:06 +04:00
|
|
|
EventNotifier e;
|
2013-08-21 19:03:06 +04:00
|
|
|
|
|
|
|
/* aio_poll will not block to wait for timers to complete unless it has
|
|
|
|
* an fd to wait on. Fixing this breaks other tests. So create a dummy one.
|
|
|
|
*/
|
2014-07-09 13:53:06 +04:00
|
|
|
event_notifier_init(&e, false);
|
2015-10-23 06:08:05 +03:00
|
|
|
set_event_notifier(ctx, &e, dummy_io_handler_read);
|
2013-08-21 19:03:06 +04:00
|
|
|
aio_poll(ctx, false);
|
|
|
|
|
|
|
|
aio_timer_init(ctx, &data.timer, data.clock_type,
|
|
|
|
SCALE_NS, timer_test_cb, &data);
|
|
|
|
timer_mod(&data.timer,
|
|
|
|
qemu_clock_get_ns(data.clock_type) +
|
|
|
|
data.ns);
|
|
|
|
|
|
|
|
g_assert_cmpint(data.n, ==, 0);
|
|
|
|
|
|
|
|
/* timer_mod may well cause an event notifer to have gone off,
|
|
|
|
* so clear that
|
|
|
|
*/
|
|
|
|
do {} while (aio_poll(ctx, false));
|
|
|
|
|
|
|
|
g_assert(!aio_poll(ctx, false));
|
|
|
|
g_assert_cmpint(data.n, ==, 0);
|
|
|
|
|
2013-08-30 02:32:14 +04:00
|
|
|
g_usleep(1 * G_USEC_PER_SEC);
|
2013-08-21 19:03:06 +04:00
|
|
|
g_assert_cmpint(data.n, ==, 0);
|
|
|
|
|
|
|
|
g_assert(aio_poll(ctx, false));
|
|
|
|
g_assert_cmpint(data.n, ==, 1);
|
|
|
|
|
|
|
|
/* timer_mod called by our callback */
|
|
|
|
do {} while (aio_poll(ctx, false));
|
|
|
|
|
|
|
|
g_assert(!aio_poll(ctx, false));
|
|
|
|
g_assert_cmpint(data.n, ==, 1);
|
|
|
|
|
|
|
|
g_assert(aio_poll(ctx, true));
|
|
|
|
g_assert_cmpint(data.n, ==, 2);
|
|
|
|
|
|
|
|
/* As max is now 2, an event notifier should not have gone off */
|
|
|
|
|
|
|
|
g_assert(!aio_poll(ctx, false));
|
|
|
|
g_assert_cmpint(data.n, ==, 2);
|
|
|
|
|
2015-10-23 06:08:05 +03:00
|
|
|
set_event_notifier(ctx, &e, NULL);
|
2014-07-09 13:53:06 +04:00
|
|
|
event_notifier_cleanup(&e);
|
2013-08-21 19:03:06 +04:00
|
|
|
|
|
|
|
timer_del(&data.timer);
|
|
|
|
}
|
|
|
|
|
2012-11-23 19:13:23 +04:00
|
|
|
/* Now the same tests, using the context as a GSource. They are
|
|
|
|
* very similar to the ones above, with g_main_context_iteration
|
|
|
|
* replacing aio_poll. However:
|
|
|
|
* - sometimes both the AioContext and the glib main loop wake
|
|
|
|
* themselves up. Hence, some "g_assert(!aio_poll(ctx, false));"
|
|
|
|
* are replaced by "while (g_main_context_iteration(NULL, false));".
|
2012-12-04 19:12:18 +04:00
|
|
|
* - there is no exact replacement for a blocking wait.
|
2012-11-23 19:13:23 +04:00
|
|
|
* "while (g_main_context_iteration(NULL, true)" seems to work,
|
|
|
|
* but it is not documented _why_ it works. For these tests a
|
|
|
|
* non-blocking loop like "while (g_main_context_iteration(NULL, false)"
|
|
|
|
* works well, and that's what I am using.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static void test_source_flush(void)
|
|
|
|
{
|
|
|
|
g_assert(!g_main_context_iteration(NULL, false));
|
|
|
|
aio_notify(ctx);
|
|
|
|
while (g_main_context_iteration(NULL, false));
|
|
|
|
g_assert(!g_main_context_iteration(NULL, false));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void test_source_bh_schedule(void)
|
|
|
|
{
|
|
|
|
BHTestData data = { .n = 0 };
|
|
|
|
data.bh = aio_bh_new(ctx, bh_test_cb, &data);
|
|
|
|
|
|
|
|
qemu_bh_schedule(data.bh);
|
|
|
|
g_assert_cmpint(data.n, ==, 0);
|
|
|
|
|
|
|
|
g_assert(g_main_context_iteration(NULL, true));
|
|
|
|
g_assert_cmpint(data.n, ==, 1);
|
|
|
|
|
|
|
|
g_assert(!g_main_context_iteration(NULL, false));
|
|
|
|
g_assert_cmpint(data.n, ==, 1);
|
|
|
|
qemu_bh_delete(data.bh);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void test_source_bh_schedule10(void)
|
|
|
|
{
|
|
|
|
BHTestData data = { .n = 0, .max = 10 };
|
|
|
|
data.bh = aio_bh_new(ctx, bh_test_cb, &data);
|
|
|
|
|
|
|
|
qemu_bh_schedule(data.bh);
|
|
|
|
g_assert_cmpint(data.n, ==, 0);
|
|
|
|
|
|
|
|
g_assert(g_main_context_iteration(NULL, false));
|
|
|
|
g_assert_cmpint(data.n, ==, 1);
|
|
|
|
|
|
|
|
g_assert(g_main_context_iteration(NULL, true));
|
|
|
|
g_assert_cmpint(data.n, ==, 2);
|
|
|
|
|
|
|
|
while (g_main_context_iteration(NULL, false));
|
|
|
|
g_assert_cmpint(data.n, ==, 10);
|
|
|
|
|
|
|
|
g_assert(!g_main_context_iteration(NULL, false));
|
|
|
|
g_assert_cmpint(data.n, ==, 10);
|
|
|
|
qemu_bh_delete(data.bh);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void test_source_bh_cancel(void)
|
|
|
|
{
|
|
|
|
BHTestData data = { .n = 0 };
|
|
|
|
data.bh = aio_bh_new(ctx, bh_test_cb, &data);
|
|
|
|
|
|
|
|
qemu_bh_schedule(data.bh);
|
|
|
|
g_assert_cmpint(data.n, ==, 0);
|
|
|
|
|
|
|
|
qemu_bh_cancel(data.bh);
|
|
|
|
g_assert_cmpint(data.n, ==, 0);
|
|
|
|
|
|
|
|
while (g_main_context_iteration(NULL, false));
|
|
|
|
g_assert_cmpint(data.n, ==, 0);
|
|
|
|
qemu_bh_delete(data.bh);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void test_source_bh_delete(void)
|
|
|
|
{
|
|
|
|
BHTestData data = { .n = 0 };
|
|
|
|
data.bh = aio_bh_new(ctx, bh_test_cb, &data);
|
|
|
|
|
|
|
|
qemu_bh_schedule(data.bh);
|
|
|
|
g_assert_cmpint(data.n, ==, 0);
|
|
|
|
|
|
|
|
qemu_bh_delete(data.bh);
|
|
|
|
g_assert_cmpint(data.n, ==, 0);
|
|
|
|
|
|
|
|
while (g_main_context_iteration(NULL, false));
|
|
|
|
g_assert_cmpint(data.n, ==, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void test_source_bh_delete_from_cb(void)
|
|
|
|
{
|
|
|
|
BHTestData data1 = { .n = 0, .max = 1 };
|
|
|
|
|
|
|
|
data1.bh = aio_bh_new(ctx, bh_delete_cb, &data1);
|
|
|
|
|
|
|
|
qemu_bh_schedule(data1.bh);
|
|
|
|
g_assert_cmpint(data1.n, ==, 0);
|
|
|
|
|
|
|
|
g_main_context_iteration(NULL, true);
|
|
|
|
g_assert_cmpint(data1.n, ==, data1.max);
|
|
|
|
g_assert(data1.bh == NULL);
|
|
|
|
|
|
|
|
g_assert(!g_main_context_iteration(NULL, false));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void test_source_bh_delete_from_cb_many(void)
|
|
|
|
{
|
|
|
|
BHTestData data1 = { .n = 0, .max = 1 };
|
|
|
|
BHTestData data2 = { .n = 0, .max = 3 };
|
|
|
|
BHTestData data3 = { .n = 0, .max = 2 };
|
|
|
|
BHTestData data4 = { .n = 0, .max = 4 };
|
|
|
|
|
|
|
|
data1.bh = aio_bh_new(ctx, bh_delete_cb, &data1);
|
|
|
|
data2.bh = aio_bh_new(ctx, bh_delete_cb, &data2);
|
|
|
|
data3.bh = aio_bh_new(ctx, bh_delete_cb, &data3);
|
|
|
|
data4.bh = aio_bh_new(ctx, bh_delete_cb, &data4);
|
|
|
|
|
|
|
|
qemu_bh_schedule(data1.bh);
|
|
|
|
qemu_bh_schedule(data2.bh);
|
|
|
|
qemu_bh_schedule(data3.bh);
|
|
|
|
qemu_bh_schedule(data4.bh);
|
|
|
|
g_assert_cmpint(data1.n, ==, 0);
|
|
|
|
g_assert_cmpint(data2.n, ==, 0);
|
|
|
|
g_assert_cmpint(data3.n, ==, 0);
|
|
|
|
g_assert_cmpint(data4.n, ==, 0);
|
|
|
|
|
|
|
|
g_assert(g_main_context_iteration(NULL, false));
|
|
|
|
g_assert_cmpint(data1.n, ==, 1);
|
|
|
|
g_assert_cmpint(data2.n, ==, 1);
|
|
|
|
g_assert_cmpint(data3.n, ==, 1);
|
|
|
|
g_assert_cmpint(data4.n, ==, 1);
|
|
|
|
g_assert(data1.bh == NULL);
|
|
|
|
|
|
|
|
while (g_main_context_iteration(NULL, false));
|
|
|
|
g_assert_cmpint(data1.n, ==, data1.max);
|
|
|
|
g_assert_cmpint(data2.n, ==, data2.max);
|
|
|
|
g_assert_cmpint(data3.n, ==, data3.max);
|
|
|
|
g_assert_cmpint(data4.n, ==, data4.max);
|
|
|
|
g_assert(data1.bh == NULL);
|
|
|
|
g_assert(data2.bh == NULL);
|
|
|
|
g_assert(data3.bh == NULL);
|
|
|
|
g_assert(data4.bh == NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void test_source_bh_flush(void)
|
|
|
|
{
|
|
|
|
BHTestData data = { .n = 0 };
|
|
|
|
data.bh = aio_bh_new(ctx, bh_test_cb, &data);
|
|
|
|
|
|
|
|
qemu_bh_schedule(data.bh);
|
|
|
|
g_assert_cmpint(data.n, ==, 0);
|
|
|
|
|
|
|
|
g_assert(g_main_context_iteration(NULL, true));
|
|
|
|
g_assert_cmpint(data.n, ==, 1);
|
|
|
|
|
|
|
|
g_assert(!g_main_context_iteration(NULL, false));
|
|
|
|
g_assert_cmpint(data.n, ==, 1);
|
|
|
|
qemu_bh_delete(data.bh);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void test_source_set_event_notifier(void)
|
|
|
|
{
|
|
|
|
EventNotifierTestData data = { .n = 0, .active = 0 };
|
|
|
|
event_notifier_init(&data.e, false);
|
2015-10-23 06:08:05 +03:00
|
|
|
set_event_notifier(ctx, &data.e, event_ready_cb);
|
2012-11-23 19:13:23 +04:00
|
|
|
while (g_main_context_iteration(NULL, false));
|
|
|
|
g_assert_cmpint(data.n, ==, 0);
|
|
|
|
|
2015-10-23 06:08:05 +03:00
|
|
|
set_event_notifier(ctx, &data.e, NULL);
|
2012-11-23 19:13:23 +04:00
|
|
|
while (g_main_context_iteration(NULL, false));
|
|
|
|
g_assert_cmpint(data.n, ==, 0);
|
|
|
|
event_notifier_cleanup(&data.e);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void test_source_wait_event_notifier(void)
|
|
|
|
{
|
|
|
|
EventNotifierTestData data = { .n = 0, .active = 1 };
|
|
|
|
event_notifier_init(&data.e, false);
|
2015-10-23 06:08:05 +03:00
|
|
|
set_event_notifier(ctx, &data.e, event_ready_cb);
|
2015-07-21 17:07:49 +03:00
|
|
|
while (g_main_context_iteration(NULL, false));
|
2012-11-23 19:13:23 +04:00
|
|
|
g_assert_cmpint(data.n, ==, 0);
|
|
|
|
g_assert_cmpint(data.active, ==, 1);
|
|
|
|
|
|
|
|
event_notifier_set(&data.e);
|
|
|
|
g_assert(g_main_context_iteration(NULL, false));
|
|
|
|
g_assert_cmpint(data.n, ==, 1);
|
|
|
|
g_assert_cmpint(data.active, ==, 0);
|
|
|
|
|
|
|
|
while (g_main_context_iteration(NULL, false));
|
|
|
|
g_assert_cmpint(data.n, ==, 1);
|
|
|
|
g_assert_cmpint(data.active, ==, 0);
|
|
|
|
|
2015-10-23 06:08:05 +03:00
|
|
|
set_event_notifier(ctx, &data.e, NULL);
|
2012-11-23 19:13:23 +04:00
|
|
|
while (g_main_context_iteration(NULL, false));
|
|
|
|
g_assert_cmpint(data.n, ==, 1);
|
|
|
|
|
|
|
|
event_notifier_cleanup(&data.e);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void test_source_flush_event_notifier(void)
|
|
|
|
{
|
|
|
|
EventNotifierTestData data = { .n = 0, .active = 10, .auto_set = true };
|
|
|
|
event_notifier_init(&data.e, false);
|
2015-10-23 06:08:05 +03:00
|
|
|
set_event_notifier(ctx, &data.e, event_ready_cb);
|
2015-07-21 17:07:49 +03:00
|
|
|
while (g_main_context_iteration(NULL, false));
|
2012-11-23 19:13:23 +04:00
|
|
|
g_assert_cmpint(data.n, ==, 0);
|
|
|
|
g_assert_cmpint(data.active, ==, 10);
|
|
|
|
|
|
|
|
event_notifier_set(&data.e);
|
|
|
|
g_assert(g_main_context_iteration(NULL, false));
|
|
|
|
g_assert_cmpint(data.n, ==, 1);
|
|
|
|
g_assert_cmpint(data.active, ==, 9);
|
|
|
|
g_assert(g_main_context_iteration(NULL, false));
|
|
|
|
|
|
|
|
while (g_main_context_iteration(NULL, false));
|
|
|
|
g_assert_cmpint(data.n, ==, 10);
|
|
|
|
g_assert_cmpint(data.active, ==, 0);
|
|
|
|
g_assert(!g_main_context_iteration(NULL, false));
|
|
|
|
|
2015-10-23 06:08:05 +03:00
|
|
|
set_event_notifier(ctx, &data.e, NULL);
|
2012-11-23 19:13:23 +04:00
|
|
|
while (g_main_context_iteration(NULL, false));
|
|
|
|
event_notifier_cleanup(&data.e);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void test_source_wait_event_notifier_noflush(void)
|
|
|
|
{
|
|
|
|
EventNotifierTestData data = { .n = 0 };
|
|
|
|
EventNotifierTestData dummy = { .n = 0, .active = 1 };
|
|
|
|
|
|
|
|
event_notifier_init(&data.e, false);
|
2015-10-23 06:08:05 +03:00
|
|
|
set_event_notifier(ctx, &data.e, event_ready_cb);
|
2012-11-23 19:13:23 +04:00
|
|
|
|
|
|
|
while (g_main_context_iteration(NULL, false));
|
|
|
|
g_assert_cmpint(data.n, ==, 0);
|
|
|
|
|
|
|
|
/* Until there is an active descriptor, glib may or may not call
|
|
|
|
* event_ready_cb. Still, it must not block. */
|
|
|
|
event_notifier_set(&data.e);
|
|
|
|
g_main_context_iteration(NULL, true);
|
|
|
|
data.n = 0;
|
|
|
|
|
|
|
|
/* An active event notifier forces aio_poll to look at EventNotifiers. */
|
|
|
|
event_notifier_init(&dummy.e, false);
|
2015-10-23 06:08:05 +03:00
|
|
|
set_event_notifier(ctx, &dummy.e, event_ready_cb);
|
2012-11-23 19:13:23 +04:00
|
|
|
|
|
|
|
event_notifier_set(&data.e);
|
|
|
|
g_assert(g_main_context_iteration(NULL, false));
|
|
|
|
g_assert_cmpint(data.n, ==, 1);
|
|
|
|
g_assert(!g_main_context_iteration(NULL, false));
|
|
|
|
g_assert_cmpint(data.n, ==, 1);
|
|
|
|
|
|
|
|
event_notifier_set(&data.e);
|
|
|
|
g_assert(g_main_context_iteration(NULL, false));
|
|
|
|
g_assert_cmpint(data.n, ==, 2);
|
|
|
|
g_assert(!g_main_context_iteration(NULL, false));
|
|
|
|
g_assert_cmpint(data.n, ==, 2);
|
|
|
|
|
|
|
|
event_notifier_set(&dummy.e);
|
|
|
|
while (g_main_context_iteration(NULL, false));
|
|
|
|
g_assert_cmpint(data.n, ==, 2);
|
|
|
|
g_assert_cmpint(dummy.n, ==, 1);
|
|
|
|
g_assert_cmpint(dummy.active, ==, 0);
|
|
|
|
|
2015-10-23 06:08:05 +03:00
|
|
|
set_event_notifier(ctx, &dummy.e, NULL);
|
2012-11-23 19:13:23 +04:00
|
|
|
event_notifier_cleanup(&dummy.e);
|
|
|
|
|
2015-10-23 06:08:05 +03:00
|
|
|
set_event_notifier(ctx, &data.e, NULL);
|
2012-11-23 19:13:23 +04:00
|
|
|
while (g_main_context_iteration(NULL, false));
|
|
|
|
g_assert_cmpint(data.n, ==, 2);
|
|
|
|
|
|
|
|
event_notifier_cleanup(&data.e);
|
|
|
|
}
|
|
|
|
|
2013-08-21 19:03:06 +04:00
|
|
|
static void test_source_timer_schedule(void)
|
|
|
|
{
|
|
|
|
TimerTestData data = { .n = 0, .ctx = ctx, .ns = SCALE_MS * 750LL,
|
|
|
|
.max = 2,
|
timer: set vm_clock disabled default
(commit 80dcfb8532ae76343109a48f12ba8ca1c505c179)
Upon migration, the code use a timer based on vm_clock for 1ns
in the future from post_load to do the event send in case host_connected
differs between migration source and target.
However, it's not guaranteed that the apic is ready to inject irqs into
the guest, and the irq line remained high, resulting in any future interrupts
going unnoticed by the guest as well.
That's because 1) the migration coroutine is not blocked when it get EAGAIN
while reading QEMUFile. 2) The vm_clock is enabled default currently, it doesn't
rely on the calling of vm_start(), that means vm_clock timers can run before
VCPUs are running.
So, let's set the vm_clock disabled default, keep the initial intention of
design for vm_clock timers.
Meanwhile, change the test-aio usecase, using QEMU_CLOCK_REALTIME instead of
QEMU_CLOCK_VIRTUAL as the block code does.
CC: Paolo Bonzini <pbonzini@redhat.com>
CC: Dr. David Alan Gilbert <dgilbert@redhat.com>
CC: qemu-stable@nongnu.org
Signed-off-by: Gonglei <arei.gonglei@huawei.com>
Message-Id: <1470728955-90600-1-git-send-email-arei.gonglei@huawei.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2016-08-09 10:49:15 +03:00
|
|
|
.clock_type = QEMU_CLOCK_REALTIME };
|
2014-07-09 13:53:06 +04:00
|
|
|
EventNotifier e;
|
2013-08-21 19:03:06 +04:00
|
|
|
int64_t expiry;
|
|
|
|
|
|
|
|
/* aio_poll will not block to wait for timers to complete unless it has
|
|
|
|
* an fd to wait on. Fixing this breaks other tests. So create a dummy one.
|
|
|
|
*/
|
2014-07-09 13:53:06 +04:00
|
|
|
event_notifier_init(&e, false);
|
2015-10-23 06:08:05 +03:00
|
|
|
set_event_notifier(ctx, &e, dummy_io_handler_read);
|
2013-08-21 19:03:06 +04:00
|
|
|
do {} while (g_main_context_iteration(NULL, false));
|
|
|
|
|
|
|
|
aio_timer_init(ctx, &data.timer, data.clock_type,
|
|
|
|
SCALE_NS, timer_test_cb, &data);
|
|
|
|
expiry = qemu_clock_get_ns(data.clock_type) +
|
|
|
|
data.ns;
|
|
|
|
timer_mod(&data.timer, expiry);
|
|
|
|
|
|
|
|
g_assert_cmpint(data.n, ==, 0);
|
|
|
|
|
2013-08-30 02:32:14 +04:00
|
|
|
g_usleep(1 * G_USEC_PER_SEC);
|
2013-08-21 19:03:06 +04:00
|
|
|
g_assert_cmpint(data.n, ==, 0);
|
|
|
|
|
2014-07-07 17:18:03 +04:00
|
|
|
g_assert(g_main_context_iteration(NULL, true));
|
2013-08-21 19:03:06 +04:00
|
|
|
g_assert_cmpint(data.n, ==, 1);
|
2014-07-07 17:18:03 +04:00
|
|
|
expiry += data.ns;
|
2013-08-21 19:03:06 +04:00
|
|
|
|
2014-07-07 17:18:03 +04:00
|
|
|
while (data.n < 2) {
|
|
|
|
g_main_context_iteration(NULL, true);
|
|
|
|
}
|
2013-08-21 19:03:06 +04:00
|
|
|
|
|
|
|
g_assert_cmpint(data.n, ==, 2);
|
2014-07-07 17:18:03 +04:00
|
|
|
g_assert(qemu_clock_get_ns(data.clock_type) > expiry);
|
2013-08-21 19:03:06 +04:00
|
|
|
|
2015-10-23 06:08:05 +03:00
|
|
|
set_event_notifier(ctx, &e, NULL);
|
2014-07-09 13:53:06 +04:00
|
|
|
event_notifier_cleanup(&e);
|
2013-08-21 19:03:06 +04:00
|
|
|
|
|
|
|
timer_del(&data.timer);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-11-23 19:13:23 +04:00
|
|
|
/* End of tests. */
|
|
|
|
|
|
|
|
int main(int argc, char **argv)
|
|
|
|
{
|
2014-09-18 15:30:49 +04:00
|
|
|
Error *local_error = NULL;
|
2012-11-23 19:13:23 +04:00
|
|
|
GSource *src;
|
|
|
|
|
2013-08-21 19:02:49 +04:00
|
|
|
init_clocks();
|
|
|
|
|
2014-09-18 15:30:49 +04:00
|
|
|
ctx = aio_context_new(&local_error);
|
|
|
|
if (!ctx) {
|
2015-12-18 18:35:14 +03:00
|
|
|
error_reportf_err(local_error, "Failed to create AIO Context: ");
|
2014-09-18 15:30:49 +04:00
|
|
|
exit(1);
|
|
|
|
}
|
2012-11-23 19:13:23 +04:00
|
|
|
src = aio_get_g_source(ctx);
|
|
|
|
g_source_attach(src, NULL);
|
|
|
|
g_source_unref(src);
|
|
|
|
|
|
|
|
while (g_main_context_iteration(NULL, false));
|
|
|
|
|
|
|
|
g_test_init(&argc, &argv, NULL);
|
2014-03-03 14:30:04 +04:00
|
|
|
g_test_add_func("/aio/acquire", test_acquire);
|
2012-11-23 19:13:23 +04:00
|
|
|
g_test_add_func("/aio/bh/schedule", test_bh_schedule);
|
|
|
|
g_test_add_func("/aio/bh/schedule10", test_bh_schedule10);
|
|
|
|
g_test_add_func("/aio/bh/cancel", test_bh_cancel);
|
|
|
|
g_test_add_func("/aio/bh/delete", test_bh_delete);
|
|
|
|
g_test_add_func("/aio/bh/callback-delete/one", test_bh_delete_from_cb);
|
|
|
|
g_test_add_func("/aio/bh/callback-delete/many", test_bh_delete_from_cb_many);
|
|
|
|
g_test_add_func("/aio/bh/flush", test_bh_flush);
|
|
|
|
g_test_add_func("/aio/event/add-remove", test_set_event_notifier);
|
|
|
|
g_test_add_func("/aio/event/wait", test_wait_event_notifier);
|
|
|
|
g_test_add_func("/aio/event/wait/no-flush-cb", test_wait_event_notifier_noflush);
|
|
|
|
g_test_add_func("/aio/event/flush", test_flush_event_notifier);
|
2015-10-23 06:08:14 +03:00
|
|
|
g_test_add_func("/aio/external-client", test_aio_external_client);
|
2013-08-21 19:03:06 +04:00
|
|
|
g_test_add_func("/aio/timer/schedule", test_timer_schedule);
|
2012-11-23 19:13:23 +04:00
|
|
|
|
|
|
|
g_test_add_func("/aio-gsource/flush", test_source_flush);
|
|
|
|
g_test_add_func("/aio-gsource/bh/schedule", test_source_bh_schedule);
|
|
|
|
g_test_add_func("/aio-gsource/bh/schedule10", test_source_bh_schedule10);
|
|
|
|
g_test_add_func("/aio-gsource/bh/cancel", test_source_bh_cancel);
|
|
|
|
g_test_add_func("/aio-gsource/bh/delete", test_source_bh_delete);
|
|
|
|
g_test_add_func("/aio-gsource/bh/callback-delete/one", test_source_bh_delete_from_cb);
|
|
|
|
g_test_add_func("/aio-gsource/bh/callback-delete/many", test_source_bh_delete_from_cb_many);
|
|
|
|
g_test_add_func("/aio-gsource/bh/flush", test_source_bh_flush);
|
|
|
|
g_test_add_func("/aio-gsource/event/add-remove", test_source_set_event_notifier);
|
|
|
|
g_test_add_func("/aio-gsource/event/wait", test_source_wait_event_notifier);
|
|
|
|
g_test_add_func("/aio-gsource/event/wait/no-flush-cb", test_source_wait_event_notifier_noflush);
|
|
|
|
g_test_add_func("/aio-gsource/event/flush", test_source_flush_event_notifier);
|
2013-08-21 19:03:06 +04:00
|
|
|
g_test_add_func("/aio-gsource/timer/schedule", test_source_timer_schedule);
|
2012-11-23 19:13:23 +04:00
|
|
|
return g_test_run();
|
|
|
|
}
|