2016-07-29 17:31:41 +03:00
|
|
|
/*
|
|
|
|
* Blockjob tests
|
|
|
|
*
|
|
|
|
* Copyright Igalia, S.L. 2016
|
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Alberto Garcia <berto@igalia.com>
|
|
|
|
*
|
|
|
|
* This work is licensed under the terms of the GNU LGPL, version 2 or later.
|
|
|
|
* See the COPYING.LIB file in the top-level directory.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "qemu/osdep.h"
|
|
|
|
#include "qapi/error.h"
|
|
|
|
#include "qemu/main-loop.h"
|
2016-10-27 19:07:00 +03:00
|
|
|
#include "block/blockjob_int.h"
|
2016-07-29 17:31:41 +03:00
|
|
|
#include "sysemu/block-backend.h"
|
2019-07-29 15:46:00 +03:00
|
|
|
#include "qapi/qmp/qdict.h"
|
2016-07-29 17:31:41 +03:00
|
|
|
|
|
|
|
static const BlockJobDriver test_block_job_driver = {
|
2018-04-12 18:29:59 +03:00
|
|
|
.job_driver = {
|
|
|
|
.instance_size = sizeof(BlockJob),
|
2018-04-13 19:50:05 +03:00
|
|
|
.free = block_job_free,
|
2018-04-18 18:10:26 +03:00
|
|
|
.user_resume = block_job_user_resume,
|
2018-04-12 18:29:59 +03:00
|
|
|
},
|
2016-07-29 17:31:41 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
static void block_job_cb(void *opaque, int ret)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2018-03-10 11:27:46 +03:00
|
|
|
static BlockJob *mk_job(BlockBackend *blk, const char *id,
|
|
|
|
const BlockJobDriver *drv, bool should_succeed,
|
|
|
|
int flags)
|
2016-07-29 17:31:41 +03:00
|
|
|
{
|
|
|
|
BlockJob *job;
|
2019-12-04 12:36:23 +03:00
|
|
|
Error *err = NULL;
|
2016-07-29 17:31:41 +03:00
|
|
|
|
2018-03-10 11:27:46 +03:00
|
|
|
job = block_job_create(id, drv, NULL, blk_bs(blk),
|
|
|
|
0, BLK_PERM_ALL, 0, flags, block_job_cb,
|
2019-12-04 12:36:23 +03:00
|
|
|
NULL, &err);
|
2016-07-29 17:31:41 +03:00
|
|
|
if (should_succeed) {
|
2019-12-04 12:36:23 +03:00
|
|
|
g_assert_null(err);
|
2016-07-29 17:31:41 +03:00
|
|
|
g_assert_nonnull(job);
|
|
|
|
if (id) {
|
2018-04-12 18:29:59 +03:00
|
|
|
g_assert_cmpstr(job->job.id, ==, id);
|
2016-07-29 17:31:41 +03:00
|
|
|
} else {
|
2018-04-12 18:29:59 +03:00
|
|
|
g_assert_cmpstr(job->job.id, ==, blk_name(blk));
|
2016-07-29 17:31:41 +03:00
|
|
|
}
|
|
|
|
} else {
|
2019-12-04 12:36:25 +03:00
|
|
|
error_free_or_abort(&err);
|
2016-07-29 17:31:41 +03:00
|
|
|
g_assert_null(job);
|
|
|
|
}
|
|
|
|
|
|
|
|
return job;
|
|
|
|
}
|
|
|
|
|
2018-03-10 11:27:46 +03:00
|
|
|
static BlockJob *do_test_id(BlockBackend *blk, const char *id,
|
|
|
|
bool should_succeed)
|
|
|
|
{
|
|
|
|
return mk_job(blk, id, &test_block_job_driver,
|
2018-04-19 18:54:56 +03:00
|
|
|
should_succeed, JOB_DEFAULT);
|
2018-03-10 11:27:46 +03:00
|
|
|
}
|
|
|
|
|
2016-07-29 17:31:41 +03:00
|
|
|
/* This creates a BlockBackend (optionally with a name) with a
|
|
|
|
* BlockDriverState inserted. */
|
|
|
|
static BlockBackend *create_blk(const char *name)
|
|
|
|
{
|
2017-02-09 17:48:04 +03:00
|
|
|
/* No I/O is performed on this device */
|
2019-04-25 15:25:10 +03:00
|
|
|
BlockBackend *blk = blk_new(qemu_get_aio_context(), 0, BLK_PERM_ALL);
|
2017-01-16 19:17:38 +03:00
|
|
|
BlockDriverState *bs;
|
|
|
|
|
2019-07-29 15:46:00 +03:00
|
|
|
QDict *opt = qdict_new();
|
|
|
|
qdict_put_str(opt, "file.read-zeroes", "on");
|
|
|
|
bs = bdrv_open("null-co://", NULL, opt, 0, &error_abort);
|
2017-01-16 19:17:38 +03:00
|
|
|
g_assert_nonnull(bs);
|
2016-07-29 17:31:41 +03:00
|
|
|
|
2017-01-13 21:02:32 +03:00
|
|
|
blk_insert_bs(blk, bs, &error_abort);
|
2016-07-29 17:31:41 +03:00
|
|
|
bdrv_unref(bs);
|
|
|
|
|
|
|
|
if (name) {
|
2019-12-04 12:36:23 +03:00
|
|
|
Error *err = NULL;
|
|
|
|
monitor_add_blk(blk, name, &err);
|
|
|
|
g_assert_null(err);
|
2016-07-29 17:31:41 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return blk;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* This destroys the backend */
|
|
|
|
static void destroy_blk(BlockBackend *blk)
|
|
|
|
{
|
|
|
|
if (blk_name(blk)[0] != '\0') {
|
|
|
|
monitor_remove_blk(blk);
|
|
|
|
}
|
|
|
|
|
|
|
|
blk_remove_bs(blk);
|
|
|
|
blk_unref(blk);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void test_job_ids(void)
|
|
|
|
{
|
|
|
|
BlockBackend *blk[3];
|
|
|
|
BlockJob *job[3];
|
|
|
|
|
|
|
|
blk[0] = create_blk(NULL);
|
|
|
|
blk[1] = create_blk("drive1");
|
|
|
|
blk[2] = create_blk("drive2");
|
|
|
|
|
|
|
|
/* No job ID provided and the block backend has no name */
|
|
|
|
job[0] = do_test_id(blk[0], NULL, false);
|
|
|
|
|
|
|
|
/* These are all invalid job IDs */
|
|
|
|
job[0] = do_test_id(blk[0], "0id", false);
|
|
|
|
job[0] = do_test_id(blk[0], "", false);
|
|
|
|
job[0] = do_test_id(blk[0], " ", false);
|
|
|
|
job[0] = do_test_id(blk[0], "123", false);
|
|
|
|
job[0] = do_test_id(blk[0], "_id", false);
|
|
|
|
job[0] = do_test_id(blk[0], "-id", false);
|
|
|
|
job[0] = do_test_id(blk[0], ".id", false);
|
|
|
|
job[0] = do_test_id(blk[0], "#id", false);
|
|
|
|
|
|
|
|
/* This one is valid */
|
|
|
|
job[0] = do_test_id(blk[0], "id0", true);
|
|
|
|
|
2019-06-06 18:41:32 +03:00
|
|
|
/* We can have two jobs in the same BDS */
|
|
|
|
job[1] = do_test_id(blk[0], "id1", true);
|
|
|
|
job_early_fail(&job[1]->job);
|
2016-07-29 17:31:41 +03:00
|
|
|
|
|
|
|
/* Duplicate job IDs are not allowed */
|
|
|
|
job[1] = do_test_id(blk[1], "id0", false);
|
|
|
|
|
|
|
|
/* But once job[0] finishes we can reuse its ID */
|
2018-04-19 18:30:16 +03:00
|
|
|
job_early_fail(&job[0]->job);
|
2016-07-29 17:31:41 +03:00
|
|
|
job[1] = do_test_id(blk[1], "id0", true);
|
|
|
|
|
|
|
|
/* No job ID specified, defaults to the backend name ('drive1') */
|
2018-04-19 18:30:16 +03:00
|
|
|
job_early_fail(&job[1]->job);
|
2016-07-29 17:31:41 +03:00
|
|
|
job[1] = do_test_id(blk[1], NULL, true);
|
|
|
|
|
|
|
|
/* Duplicate job ID */
|
|
|
|
job[2] = do_test_id(blk[2], "drive1", false);
|
|
|
|
|
|
|
|
/* The ID of job[2] would default to 'drive2' but it is already in use */
|
|
|
|
job[0] = do_test_id(blk[0], "drive2", true);
|
|
|
|
job[2] = do_test_id(blk[2], NULL, false);
|
|
|
|
|
|
|
|
/* This one is valid */
|
|
|
|
job[2] = do_test_id(blk[2], "id_2", true);
|
|
|
|
|
2018-04-19 18:30:16 +03:00
|
|
|
job_early_fail(&job[0]->job);
|
|
|
|
job_early_fail(&job[1]->job);
|
|
|
|
job_early_fail(&job[2]->job);
|
2016-07-29 17:31:41 +03:00
|
|
|
|
|
|
|
destroy_blk(blk[0]);
|
|
|
|
destroy_blk(blk[1]);
|
|
|
|
destroy_blk(blk[2]);
|
|
|
|
}
|
|
|
|
|
2018-03-10 11:27:46 +03:00
|
|
|
typedef struct CancelJob {
|
|
|
|
BlockJob common;
|
|
|
|
BlockBackend *blk;
|
|
|
|
bool should_converge;
|
|
|
|
bool should_complete;
|
|
|
|
} CancelJob;
|
|
|
|
|
2018-04-23 13:24:16 +03:00
|
|
|
static void cancel_job_complete(Job *job, Error **errp)
|
2018-03-10 11:27:46 +03:00
|
|
|
{
|
2018-04-23 13:24:16 +03:00
|
|
|
CancelJob *s = container_of(job, CancelJob, common.job);
|
2018-03-10 11:27:46 +03:00
|
|
|
s->should_complete = true;
|
|
|
|
}
|
|
|
|
|
2018-08-30 04:57:26 +03:00
|
|
|
static int coroutine_fn cancel_job_run(Job *job, Error **errp)
|
2018-03-10 11:27:46 +03:00
|
|
|
{
|
2018-08-30 04:57:26 +03:00
|
|
|
CancelJob *s = container_of(job, CancelJob, common.job);
|
2018-03-10 11:27:46 +03:00
|
|
|
|
|
|
|
while (!s->should_complete) {
|
2018-04-17 13:56:07 +03:00
|
|
|
if (job_is_cancelled(&s->common.job)) {
|
2018-08-30 04:57:31 +03:00
|
|
|
return 0;
|
2018-03-10 11:27:46 +03:00
|
|
|
}
|
|
|
|
|
2018-04-25 16:09:58 +03:00
|
|
|
if (!job_is_ready(&s->common.job) && s->should_converge) {
|
2018-04-25 15:56:09 +03:00
|
|
|
job_transition_to_ready(&s->common.job);
|
2018-03-10 11:27:46 +03:00
|
|
|
}
|
|
|
|
|
2018-04-18 17:32:20 +03:00
|
|
|
job_sleep_ns(&s->common.job, 100000);
|
2018-03-10 11:27:46 +03:00
|
|
|
}
|
|
|
|
|
2018-08-30 04:57:26 +03:00
|
|
|
return 0;
|
2018-03-10 11:27:46 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static const BlockJobDriver test_cancel_driver = {
|
2018-04-12 18:29:59 +03:00
|
|
|
.job_driver = {
|
|
|
|
.instance_size = sizeof(CancelJob),
|
2018-04-13 19:50:05 +03:00
|
|
|
.free = block_job_free,
|
2018-04-18 18:10:26 +03:00
|
|
|
.user_resume = block_job_user_resume,
|
2018-08-30 04:57:26 +03:00
|
|
|
.run = cancel_job_run,
|
2018-04-23 13:24:16 +03:00
|
|
|
.complete = cancel_job_complete,
|
2018-04-12 18:29:59 +03:00
|
|
|
},
|
2018-03-10 11:27:46 +03:00
|
|
|
};
|
|
|
|
|
2018-09-06 16:02:17 +03:00
|
|
|
static CancelJob *create_common(Job **pjob)
|
2018-03-10 11:27:46 +03:00
|
|
|
{
|
|
|
|
BlockBackend *blk;
|
2018-09-06 16:02:17 +03:00
|
|
|
Job *job;
|
|
|
|
BlockJob *bjob;
|
2018-03-10 11:27:46 +03:00
|
|
|
CancelJob *s;
|
|
|
|
|
|
|
|
blk = create_blk(NULL);
|
2018-09-06 16:02:17 +03:00
|
|
|
bjob = mk_job(blk, "Steve", &test_cancel_driver, true,
|
|
|
|
JOB_MANUAL_FINALIZE | JOB_MANUAL_DISMISS);
|
|
|
|
job = &bjob->job;
|
|
|
|
job_ref(job);
|
|
|
|
assert(job->status == JOB_STATUS_CREATED);
|
|
|
|
s = container_of(bjob, CancelJob, common);
|
2018-03-10 11:27:46 +03:00
|
|
|
s->blk = blk;
|
|
|
|
|
|
|
|
*pjob = job;
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void cancel_common(CancelJob *s)
|
|
|
|
{
|
|
|
|
BlockJob *job = &s->common;
|
|
|
|
BlockBackend *blk = s->blk;
|
2018-04-13 18:19:31 +03:00
|
|
|
JobStatus sts = job->job.status;
|
2018-08-17 18:29:08 +03:00
|
|
|
AioContext *ctx;
|
|
|
|
|
|
|
|
ctx = job->job.aio_context;
|
|
|
|
aio_context_acquire(ctx);
|
2018-03-10 11:27:46 +03:00
|
|
|
|
2018-04-24 17:13:52 +03:00
|
|
|
job_cancel_sync(&job->job);
|
2018-04-13 18:19:31 +03:00
|
|
|
if (sts != JOB_STATUS_CREATED && sts != JOB_STATUS_CONCLUDED) {
|
2018-04-24 18:10:12 +03:00
|
|
|
Job *dummy = &job->job;
|
|
|
|
job_dismiss(&dummy, &error_abort);
|
2018-03-10 11:27:46 +03:00
|
|
|
}
|
2018-04-13 18:19:31 +03:00
|
|
|
assert(job->job.status == JOB_STATUS_NULL);
|
2018-04-13 19:50:05 +03:00
|
|
|
job_unref(&job->job);
|
2018-03-10 11:27:46 +03:00
|
|
|
destroy_blk(blk);
|
2018-08-17 18:29:08 +03:00
|
|
|
|
|
|
|
aio_context_release(ctx);
|
2018-03-10 11:27:46 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static void test_cancel_created(void)
|
|
|
|
{
|
2018-09-06 16:02:17 +03:00
|
|
|
Job *job;
|
2018-03-10 11:27:46 +03:00
|
|
|
CancelJob *s;
|
|
|
|
|
|
|
|
s = create_common(&job);
|
|
|
|
cancel_common(s);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void test_cancel_running(void)
|
|
|
|
{
|
2018-09-06 16:02:17 +03:00
|
|
|
Job *job;
|
2018-03-10 11:27:46 +03:00
|
|
|
CancelJob *s;
|
|
|
|
|
|
|
|
s = create_common(&job);
|
|
|
|
|
2018-09-06 16:02:17 +03:00
|
|
|
job_start(job);
|
|
|
|
assert(job->status == JOB_STATUS_RUNNING);
|
2018-03-10 11:27:46 +03:00
|
|
|
|
|
|
|
cancel_common(s);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void test_cancel_paused(void)
|
|
|
|
{
|
2018-09-06 16:02:17 +03:00
|
|
|
Job *job;
|
2018-03-10 11:27:46 +03:00
|
|
|
CancelJob *s;
|
|
|
|
|
|
|
|
s = create_common(&job);
|
|
|
|
|
2018-09-06 16:02:17 +03:00
|
|
|
job_start(job);
|
|
|
|
assert(job->status == JOB_STATUS_RUNNING);
|
2018-03-10 11:27:46 +03:00
|
|
|
|
2018-09-06 16:02:17 +03:00
|
|
|
job_user_pause(job, &error_abort);
|
|
|
|
job_enter(job);
|
|
|
|
assert(job->status == JOB_STATUS_PAUSED);
|
2018-03-10 11:27:46 +03:00
|
|
|
|
|
|
|
cancel_common(s);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void test_cancel_ready(void)
|
|
|
|
{
|
2018-09-06 16:02:17 +03:00
|
|
|
Job *job;
|
2018-03-10 11:27:46 +03:00
|
|
|
CancelJob *s;
|
|
|
|
|
|
|
|
s = create_common(&job);
|
|
|
|
|
2018-09-06 16:02:17 +03:00
|
|
|
job_start(job);
|
|
|
|
assert(job->status == JOB_STATUS_RUNNING);
|
2018-03-10 11:27:46 +03:00
|
|
|
|
|
|
|
s->should_converge = true;
|
2018-09-06 16:02:17 +03:00
|
|
|
job_enter(job);
|
|
|
|
assert(job->status == JOB_STATUS_READY);
|
2018-03-10 11:27:46 +03:00
|
|
|
|
|
|
|
cancel_common(s);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void test_cancel_standby(void)
|
|
|
|
{
|
2018-09-06 16:02:17 +03:00
|
|
|
Job *job;
|
2018-03-10 11:27:46 +03:00
|
|
|
CancelJob *s;
|
|
|
|
|
|
|
|
s = create_common(&job);
|
|
|
|
|
2018-09-06 16:02:17 +03:00
|
|
|
job_start(job);
|
|
|
|
assert(job->status == JOB_STATUS_RUNNING);
|
2018-03-10 11:27:46 +03:00
|
|
|
|
|
|
|
s->should_converge = true;
|
2018-09-06 16:02:17 +03:00
|
|
|
job_enter(job);
|
|
|
|
assert(job->status == JOB_STATUS_READY);
|
2018-03-10 11:27:46 +03:00
|
|
|
|
2018-09-06 16:02:17 +03:00
|
|
|
job_user_pause(job, &error_abort);
|
|
|
|
job_enter(job);
|
|
|
|
assert(job->status == JOB_STATUS_STANDBY);
|
2018-03-10 11:27:46 +03:00
|
|
|
|
|
|
|
cancel_common(s);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void test_cancel_pending(void)
|
|
|
|
{
|
2018-09-06 16:02:17 +03:00
|
|
|
Job *job;
|
2018-03-10 11:27:46 +03:00
|
|
|
CancelJob *s;
|
|
|
|
|
|
|
|
s = create_common(&job);
|
|
|
|
|
2018-09-06 16:02:17 +03:00
|
|
|
job_start(job);
|
|
|
|
assert(job->status == JOB_STATUS_RUNNING);
|
2018-03-10 11:27:46 +03:00
|
|
|
|
|
|
|
s->should_converge = true;
|
2018-09-06 16:02:17 +03:00
|
|
|
job_enter(job);
|
|
|
|
assert(job->status == JOB_STATUS_READY);
|
2018-03-10 11:27:46 +03:00
|
|
|
|
2018-09-06 16:02:17 +03:00
|
|
|
job_complete(job, &error_abort);
|
|
|
|
job_enter(job);
|
2018-09-06 16:02:18 +03:00
|
|
|
while (!job->deferred_to_main_loop) {
|
2018-03-10 11:27:46 +03:00
|
|
|
aio_poll(qemu_get_aio_context(), true);
|
|
|
|
}
|
2018-09-06 16:02:18 +03:00
|
|
|
assert(job->status == JOB_STATUS_READY);
|
|
|
|
aio_poll(qemu_get_aio_context(), true);
|
2018-09-06 16:02:17 +03:00
|
|
|
assert(job->status == JOB_STATUS_PENDING);
|
2018-03-10 11:27:46 +03:00
|
|
|
|
|
|
|
cancel_common(s);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void test_cancel_concluded(void)
|
|
|
|
{
|
2018-09-06 16:02:17 +03:00
|
|
|
Job *job;
|
2018-03-10 11:27:46 +03:00
|
|
|
CancelJob *s;
|
|
|
|
|
|
|
|
s = create_common(&job);
|
|
|
|
|
2018-09-06 16:02:17 +03:00
|
|
|
job_start(job);
|
|
|
|
assert(job->status == JOB_STATUS_RUNNING);
|
2018-03-10 11:27:46 +03:00
|
|
|
|
|
|
|
s->should_converge = true;
|
2018-09-06 16:02:17 +03:00
|
|
|
job_enter(job);
|
|
|
|
assert(job->status == JOB_STATUS_READY);
|
2018-03-10 11:27:46 +03:00
|
|
|
|
2018-09-06 16:02:17 +03:00
|
|
|
job_complete(job, &error_abort);
|
|
|
|
job_enter(job);
|
2018-09-06 16:02:18 +03:00
|
|
|
while (!job->deferred_to_main_loop) {
|
2018-03-10 11:27:46 +03:00
|
|
|
aio_poll(qemu_get_aio_context(), true);
|
|
|
|
}
|
2018-09-06 16:02:18 +03:00
|
|
|
assert(job->status == JOB_STATUS_READY);
|
|
|
|
aio_poll(qemu_get_aio_context(), true);
|
2018-09-06 16:02:17 +03:00
|
|
|
assert(job->status == JOB_STATUS_PENDING);
|
2018-03-10 11:27:46 +03:00
|
|
|
|
job: take each job's lock individually in job_txn_apply
All callers of job_txn_apply hold a single job's lock, but different
jobs within a transaction can have different contexts, thus we need to
lock each one individually before applying the callback function.
Similar to job_completed_txn_abort this also requires releasing the
caller's context before and reacquiring it after to avoid recursive
locks which might break AIO_WAIT_WHILE in the callback. This is safe, since
existing code would already have to take this into account, lest
job_completed_txn_abort might have broken.
This also brings to light a different issue: When a callback function in
job_txn_apply moves it's job to a different AIO context, callers will
try to release the wrong lock (now that we re-acquire the lock
correctly, previously it would just continue with the old lock, leaving
the job unlocked for the rest of the return path). Fix this by not caching
the job's context.
This is only necessary for qmp_block_job_finalize, qmp_job_finalize and
job_exit, since everyone else calls through job_exit.
One test needed adapting, since it calls job_finalize directly, so it
manually needs to acquire the correct context.
Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
Message-Id: <20200407115651.69472-2-s.reiter@proxmox.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2020-04-07 14:56:49 +03:00
|
|
|
aio_context_acquire(job->aio_context);
|
2018-09-06 16:02:17 +03:00
|
|
|
job_finalize(job, &error_abort);
|
job: take each job's lock individually in job_txn_apply
All callers of job_txn_apply hold a single job's lock, but different
jobs within a transaction can have different contexts, thus we need to
lock each one individually before applying the callback function.
Similar to job_completed_txn_abort this also requires releasing the
caller's context before and reacquiring it after to avoid recursive
locks which might break AIO_WAIT_WHILE in the callback. This is safe, since
existing code would already have to take this into account, lest
job_completed_txn_abort might have broken.
This also brings to light a different issue: When a callback function in
job_txn_apply moves it's job to a different AIO context, callers will
try to release the wrong lock (now that we re-acquire the lock
correctly, previously it would just continue with the old lock, leaving
the job unlocked for the rest of the return path). Fix this by not caching
the job's context.
This is only necessary for qmp_block_job_finalize, qmp_job_finalize and
job_exit, since everyone else calls through job_exit.
One test needed adapting, since it calls job_finalize directly, so it
manually needs to acquire the correct context.
Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
Message-Id: <20200407115651.69472-2-s.reiter@proxmox.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2020-04-07 14:56:49 +03:00
|
|
|
aio_context_release(job->aio_context);
|
2018-09-06 16:02:17 +03:00
|
|
|
assert(job->status == JOB_STATUS_CONCLUDED);
|
2018-03-10 11:27:46 +03:00
|
|
|
|
|
|
|
cancel_common(s);
|
|
|
|
}
|
|
|
|
|
2016-07-29 17:31:41 +03:00
|
|
|
int main(int argc, char **argv)
|
|
|
|
{
|
|
|
|
qemu_init_main_loop(&error_abort);
|
2017-01-16 19:17:38 +03:00
|
|
|
bdrv_init();
|
2016-07-29 17:31:41 +03:00
|
|
|
|
|
|
|
g_test_init(&argc, &argv, NULL);
|
|
|
|
g_test_add_func("/blockjob/ids", test_job_ids);
|
2018-03-10 11:27:46 +03:00
|
|
|
g_test_add_func("/blockjob/cancel/created", test_cancel_created);
|
|
|
|
g_test_add_func("/blockjob/cancel/running", test_cancel_running);
|
|
|
|
g_test_add_func("/blockjob/cancel/paused", test_cancel_paused);
|
|
|
|
g_test_add_func("/blockjob/cancel/ready", test_cancel_ready);
|
|
|
|
g_test_add_func("/blockjob/cancel/standby", test_cancel_standby);
|
|
|
|
g_test_add_func("/blockjob/cancel/pending", test_cancel_pending);
|
|
|
|
g_test_add_func("/blockjob/cancel/concluded", test_cancel_concluded);
|
2016-07-29 17:31:41 +03:00
|
|
|
return g_test_run();
|
|
|
|
}
|