2015-11-06 02:13:20 +03:00
|
|
|
/*
|
|
|
|
* Blockjob transactions tests
|
|
|
|
*
|
|
|
|
* Copyright Red Hat, Inc. 2015
|
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Stefan Hajnoczi <stefanha@redhat.com>
|
|
|
|
*
|
|
|
|
* This work is licensed under the terms of the GNU LGPL, version 2 or later.
|
|
|
|
* See the COPYING.LIB file in the top-level directory.
|
|
|
|
*/
|
|
|
|
|
2016-02-08 21:08:51 +03:00
|
|
|
#include "qemu/osdep.h"
|
include/qemu/osdep.h: Don't include qapi/error.h
Commit 57cb38b included qapi/error.h into qemu/osdep.h to get the
Error typedef. Since then, we've moved to include qemu/osdep.h
everywhere. Its file comment explains: "To avoid getting into
possible circular include dependencies, this file should not include
any other QEMU headers, with the exceptions of config-host.h,
compiler.h, os-posix.h and os-win32.h, all of which are doing a
similar job to this file and are under similar constraints."
qapi/error.h doesn't do a similar job, and it doesn't adhere to
similar constraints: it includes qapi-types.h. That's in excess of
100KiB of crap most .c files don't actually need.
Add the typedef to qemu/typedefs.h, and include that instead of
qapi/error.h. Include qapi/error.h in .c files that need it and don't
get it now. Include qapi-types.h in qom/object.h for uint16List.
Update scripts/clean-includes accordingly. Update it further to match
reality: replace config.h by config-target.h, add sysemu/os-posix.h,
sysemu/os-win32.h. Update the list of includes in the qemu/osdep.h
comment quoted above similarly.
This reduces the number of objects depending on qapi/error.h from "all
of them" to less than a third. Unfortunately, the number depending on
qapi-types.h shrinks only a little. More work is needed for that one.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
[Fix compilation without the spice devel packages. - Paolo]
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2016-03-14 11:01:28 +03:00
|
|
|
#include "qapi/error.h"
|
2015-11-06 02:13:20 +03:00
|
|
|
#include "qemu/main-loop.h"
|
2016-10-27 19:07:00 +03:00
|
|
|
#include "block/blockjob_int.h"
|
2016-04-18 18:30:17 +03:00
|
|
|
#include "sysemu/block-backend.h"
|
2015-11-06 02:13:20 +03:00
|
|
|
|
|
|
|
typedef struct {
|
|
|
|
BlockJob common;
|
|
|
|
unsigned int iterations;
|
|
|
|
bool use_timer;
|
|
|
|
int rc;
|
|
|
|
int *result;
|
|
|
|
} TestBlockJob;
|
|
|
|
|
2018-09-06 16:02:19 +03:00
|
|
|
static void test_block_job_clean(Job *job)
|
2015-11-06 02:13:20 +03:00
|
|
|
{
|
2018-04-17 17:41:17 +03:00
|
|
|
BlockJob *bjob = container_of(job, BlockJob, job);
|
|
|
|
BlockDriverState *bs = blk_bs(bjob->blk);
|
2015-11-06 02:13:20 +03:00
|
|
|
|
|
|
|
bdrv_unref(bs);
|
|
|
|
}
|
|
|
|
|
2018-08-30 04:57:26 +03:00
|
|
|
static int coroutine_fn test_block_job_run(Job *job, Error **errp)
|
2015-11-06 02:13:20 +03:00
|
|
|
{
|
2018-08-30 04:57:26 +03:00
|
|
|
TestBlockJob *s = container_of(job, TestBlockJob, common.job);
|
2015-11-06 02:13:20 +03:00
|
|
|
|
|
|
|
while (s->iterations--) {
|
|
|
|
if (s->use_timer) {
|
2018-08-30 04:57:26 +03:00
|
|
|
job_sleep_ns(job, 0);
|
2015-11-06 02:13:20 +03:00
|
|
|
} else {
|
2018-08-30 04:57:26 +03:00
|
|
|
job_yield(job);
|
2015-11-06 02:13:20 +03:00
|
|
|
}
|
|
|
|
|
2018-08-30 04:57:26 +03:00
|
|
|
if (job_is_cancelled(job)) {
|
2015-11-06 02:13:20 +03:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-30 04:57:26 +03:00
|
|
|
return s->rc;
|
2015-11-06 02:13:20 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
typedef struct {
|
|
|
|
TestBlockJob *job;
|
|
|
|
int *result;
|
|
|
|
} TestBlockJobCBData;
|
|
|
|
|
|
|
|
static void test_block_job_cb(void *opaque, int ret)
|
|
|
|
{
|
|
|
|
TestBlockJobCBData *data = opaque;
|
2018-04-17 13:56:07 +03:00
|
|
|
if (!ret && job_is_cancelled(&data->job->common.job)) {
|
2015-11-06 02:13:20 +03:00
|
|
|
ret = -ECANCELED;
|
|
|
|
}
|
|
|
|
*data->result = ret;
|
|
|
|
g_free(data);
|
|
|
|
}
|
|
|
|
|
2016-11-08 09:50:37 +03:00
|
|
|
static const BlockJobDriver test_block_job_driver = {
|
2018-04-12 18:29:59 +03:00
|
|
|
.job_driver = {
|
|
|
|
.instance_size = sizeof(TestBlockJob),
|
2018-04-13 19:50:05 +03:00
|
|
|
.free = block_job_free,
|
2018-04-18 18:10:26 +03:00
|
|
|
.user_resume = block_job_user_resume,
|
2018-04-20 18:00:29 +03:00
|
|
|
.drain = block_job_drain,
|
2018-08-30 04:57:26 +03:00
|
|
|
.run = test_block_job_run,
|
2018-09-06 16:02:19 +03:00
|
|
|
.clean = test_block_job_clean,
|
2018-04-12 18:29:59 +03:00
|
|
|
},
|
2016-11-08 09:50:37 +03:00
|
|
|
};
|
|
|
|
|
2015-11-06 02:13:20 +03:00
|
|
|
/* Create a block job that completes with a given return code after a given
|
|
|
|
* number of event loop iterations. The return code is stored in the given
|
|
|
|
* result pointer.
|
|
|
|
*
|
|
|
|
* The event loop iterations can either be handled automatically with a 0 delay
|
|
|
|
* timer, or they can be stepped manually by entering the coroutine.
|
|
|
|
*/
|
|
|
|
static BlockJob *test_block_job_start(unsigned int iterations,
|
|
|
|
bool use_timer,
|
2018-04-19 17:09:52 +03:00
|
|
|
int rc, int *result, JobTxn *txn)
|
2015-11-06 02:13:20 +03:00
|
|
|
{
|
|
|
|
BlockDriverState *bs;
|
|
|
|
TestBlockJob *s;
|
|
|
|
TestBlockJobCBData *data;
|
2016-07-05 17:28:56 +03:00
|
|
|
static unsigned counter;
|
|
|
|
char job_id[24];
|
2015-11-06 02:13:20 +03:00
|
|
|
|
|
|
|
data = g_new0(TestBlockJobCBData, 1);
|
2017-01-16 19:17:38 +03:00
|
|
|
|
|
|
|
bs = bdrv_open("null-co://", NULL, NULL, 0, &error_abort);
|
|
|
|
g_assert_nonnull(bs);
|
|
|
|
|
2016-07-05 17:28:56 +03:00
|
|
|
snprintf(job_id, sizeof(job_id), "job%u", counter++);
|
2018-03-10 11:27:27 +03:00
|
|
|
s = block_job_create(job_id, &test_block_job_driver, txn, bs,
|
2018-04-19 18:54:56 +03:00
|
|
|
0, BLK_PERM_ALL, 0, JOB_DEFAULT,
|
2017-01-16 19:18:09 +03:00
|
|
|
test_block_job_cb, data, &error_abort);
|
2015-11-06 02:13:20 +03:00
|
|
|
s->iterations = iterations;
|
|
|
|
s->use_timer = use_timer;
|
|
|
|
s->rc = rc;
|
|
|
|
s->result = result;
|
|
|
|
data->job = s;
|
|
|
|
data->result = result;
|
|
|
|
return &s->common;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void test_single_job(int expected)
|
|
|
|
{
|
|
|
|
BlockJob *job;
|
2018-04-19 17:09:52 +03:00
|
|
|
JobTxn *txn;
|
2015-11-06 02:13:20 +03:00
|
|
|
int result = -EINPROGRESS;
|
|
|
|
|
2018-04-23 17:06:26 +03:00
|
|
|
txn = job_txn_new();
|
2018-03-10 11:27:27 +03:00
|
|
|
job = test_block_job_start(1, true, expected, &result, txn);
|
2018-04-13 18:31:02 +03:00
|
|
|
job_start(&job->job);
|
2015-11-06 02:13:20 +03:00
|
|
|
|
|
|
|
if (expected == -ECANCELED) {
|
2018-04-24 17:13:52 +03:00
|
|
|
job_cancel(&job->job, false);
|
2015-11-06 02:13:20 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
while (result == -EINPROGRESS) {
|
|
|
|
aio_poll(qemu_get_aio_context(), true);
|
|
|
|
}
|
|
|
|
g_assert_cmpint(result, ==, expected);
|
|
|
|
|
2018-04-23 17:06:26 +03:00
|
|
|
job_txn_unref(txn);
|
2015-11-06 02:13:20 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static void test_single_job_success(void)
|
|
|
|
{
|
|
|
|
test_single_job(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void test_single_job_failure(void)
|
|
|
|
{
|
|
|
|
test_single_job(-EIO);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void test_single_job_cancel(void)
|
|
|
|
{
|
|
|
|
test_single_job(-ECANCELED);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void test_pair_jobs(int expected1, int expected2)
|
|
|
|
{
|
|
|
|
BlockJob *job1;
|
|
|
|
BlockJob *job2;
|
2018-04-19 17:09:52 +03:00
|
|
|
JobTxn *txn;
|
2015-11-06 02:13:20 +03:00
|
|
|
int result1 = -EINPROGRESS;
|
|
|
|
int result2 = -EINPROGRESS;
|
|
|
|
|
2018-04-23 17:06:26 +03:00
|
|
|
txn = job_txn_new();
|
2018-03-10 11:27:27 +03:00
|
|
|
job1 = test_block_job_start(1, true, expected1, &result1, txn);
|
|
|
|
job2 = test_block_job_start(2, true, expected2, &result2, txn);
|
2018-04-13 18:31:02 +03:00
|
|
|
job_start(&job1->job);
|
|
|
|
job_start(&job2->job);
|
2015-11-06 02:13:20 +03:00
|
|
|
|
2017-05-08 17:13:08 +03:00
|
|
|
/* Release our reference now to trigger as many nice
|
|
|
|
* use-after-free bugs as possible.
|
|
|
|
*/
|
2018-04-23 17:06:26 +03:00
|
|
|
job_txn_unref(txn);
|
2017-05-08 17:13:08 +03:00
|
|
|
|
2015-11-06 02:13:20 +03:00
|
|
|
if (expected1 == -ECANCELED) {
|
2018-04-24 17:13:52 +03:00
|
|
|
job_cancel(&job1->job, false);
|
2015-11-06 02:13:20 +03:00
|
|
|
}
|
|
|
|
if (expected2 == -ECANCELED) {
|
2018-04-24 17:13:52 +03:00
|
|
|
job_cancel(&job2->job, false);
|
2015-11-06 02:13:20 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
while (result1 == -EINPROGRESS || result2 == -EINPROGRESS) {
|
|
|
|
aio_poll(qemu_get_aio_context(), true);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Failure or cancellation of one job cancels the other job */
|
|
|
|
if (expected1 != 0) {
|
|
|
|
expected2 = -ECANCELED;
|
|
|
|
} else if (expected2 != 0) {
|
|
|
|
expected1 = -ECANCELED;
|
|
|
|
}
|
|
|
|
|
|
|
|
g_assert_cmpint(result1, ==, expected1);
|
|
|
|
g_assert_cmpint(result2, ==, expected2);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void test_pair_jobs_success(void)
|
|
|
|
{
|
|
|
|
test_pair_jobs(0, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void test_pair_jobs_failure(void)
|
|
|
|
{
|
|
|
|
/* Test both orderings. The two jobs run for a different number of
|
|
|
|
* iterations so the code path is different depending on which job fails
|
|
|
|
* first.
|
|
|
|
*/
|
|
|
|
test_pair_jobs(-EIO, 0);
|
|
|
|
test_pair_jobs(0, -EIO);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void test_pair_jobs_cancel(void)
|
|
|
|
{
|
|
|
|
test_pair_jobs(-ECANCELED, 0);
|
|
|
|
test_pair_jobs(0, -ECANCELED);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void test_pair_jobs_fail_cancel_race(void)
|
|
|
|
{
|
|
|
|
BlockJob *job1;
|
|
|
|
BlockJob *job2;
|
2018-04-19 17:09:52 +03:00
|
|
|
JobTxn *txn;
|
2015-11-06 02:13:20 +03:00
|
|
|
int result1 = -EINPROGRESS;
|
|
|
|
int result2 = -EINPROGRESS;
|
|
|
|
|
2018-04-23 17:06:26 +03:00
|
|
|
txn = job_txn_new();
|
2018-03-10 11:27:27 +03:00
|
|
|
job1 = test_block_job_start(1, true, -ECANCELED, &result1, txn);
|
|
|
|
job2 = test_block_job_start(2, false, 0, &result2, txn);
|
2018-04-13 18:31:02 +03:00
|
|
|
job_start(&job1->job);
|
|
|
|
job_start(&job2->job);
|
2015-11-06 02:13:20 +03:00
|
|
|
|
2018-04-24 17:13:52 +03:00
|
|
|
job_cancel(&job1->job, false);
|
2015-11-06 02:13:20 +03:00
|
|
|
|
|
|
|
/* Now make job2 finish before the main loop kicks jobs. This simulates
|
|
|
|
* the race between a pending kick and another job completing.
|
|
|
|
*/
|
2018-04-24 17:13:52 +03:00
|
|
|
job_enter(&job2->job);
|
|
|
|
job_enter(&job2->job);
|
2015-11-06 02:13:20 +03:00
|
|
|
|
|
|
|
while (result1 == -EINPROGRESS || result2 == -EINPROGRESS) {
|
|
|
|
aio_poll(qemu_get_aio_context(), true);
|
|
|
|
}
|
|
|
|
|
|
|
|
g_assert_cmpint(result1, ==, -ECANCELED);
|
|
|
|
g_assert_cmpint(result2, ==, -ECANCELED);
|
|
|
|
|
2018-04-23 17:06:26 +03:00
|
|
|
job_txn_unref(txn);
|
2015-11-06 02:13:20 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
int main(int argc, char **argv)
|
|
|
|
{
|
|
|
|
qemu_init_main_loop(&error_abort);
|
2017-01-16 19:17:38 +03:00
|
|
|
bdrv_init();
|
2015-11-06 02:13:20 +03:00
|
|
|
|
|
|
|
g_test_init(&argc, &argv, NULL);
|
|
|
|
g_test_add_func("/single/success", test_single_job_success);
|
|
|
|
g_test_add_func("/single/failure", test_single_job_failure);
|
|
|
|
g_test_add_func("/single/cancel", test_single_job_cancel);
|
|
|
|
g_test_add_func("/pair/success", test_pair_jobs_success);
|
|
|
|
g_test_add_func("/pair/failure", test_pair_jobs_failure);
|
|
|
|
g_test_add_func("/pair/cancel", test_pair_jobs_cancel);
|
|
|
|
g_test_add_func("/pair/fail-cancel-race", test_pair_jobs_fail_cancel_race);
|
|
|
|
return g_test_run();
|
|
|
|
}
|