2016-07-29 17:31:41 +03:00
|
|
|
/*
|
|
|
|
* Blockjob tests
|
|
|
|
*
|
|
|
|
* Copyright Igalia, S.L. 2016
|
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Alberto Garcia <berto@igalia.com>
|
|
|
|
*
|
|
|
|
* This work is licensed under the terms of the GNU LGPL, version 2 or later.
|
|
|
|
* See the COPYING.LIB file in the top-level directory.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "qemu/osdep.h"
|
|
|
|
#include "qapi/error.h"
|
|
|
|
#include "qemu/main-loop.h"
|
2016-10-27 19:07:00 +03:00
|
|
|
#include "block/blockjob_int.h"
|
2016-07-29 17:31:41 +03:00
|
|
|
#include "sysemu/block-backend.h"
|
2019-07-29 15:46:00 +03:00
|
|
|
#include "qapi/qmp/qdict.h"
|
2021-04-09 15:04:21 +03:00
|
|
|
#include "iothread.h"
|
2016-07-29 17:31:41 +03:00
|
|
|
|
|
|
|
static const BlockJobDriver test_block_job_driver = {
|
2018-04-12 18:29:59 +03:00
|
|
|
.job_driver = {
|
|
|
|
.instance_size = sizeof(BlockJob),
|
2018-04-13 19:50:05 +03:00
|
|
|
.free = block_job_free,
|
2018-04-18 18:10:26 +03:00
|
|
|
.user_resume = block_job_user_resume,
|
2018-04-12 18:29:59 +03:00
|
|
|
},
|
2016-07-29 17:31:41 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
static void block_job_cb(void *opaque, int ret)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2018-03-10 11:27:46 +03:00
|
|
|
static BlockJob *mk_job(BlockBackend *blk, const char *id,
|
|
|
|
const BlockJobDriver *drv, bool should_succeed,
|
|
|
|
int flags)
|
2016-07-29 17:31:41 +03:00
|
|
|
{
|
|
|
|
BlockJob *job;
|
2019-12-04 12:36:23 +03:00
|
|
|
Error *err = NULL;
|
2016-07-29 17:31:41 +03:00
|
|
|
|
2018-03-10 11:27:46 +03:00
|
|
|
job = block_job_create(id, drv, NULL, blk_bs(blk),
|
|
|
|
0, BLK_PERM_ALL, 0, flags, block_job_cb,
|
2019-12-04 12:36:23 +03:00
|
|
|
NULL, &err);
|
2016-07-29 17:31:41 +03:00
|
|
|
if (should_succeed) {
|
2019-12-04 12:36:23 +03:00
|
|
|
g_assert_null(err);
|
2016-07-29 17:31:41 +03:00
|
|
|
g_assert_nonnull(job);
|
|
|
|
if (id) {
|
2018-04-12 18:29:59 +03:00
|
|
|
g_assert_cmpstr(job->job.id, ==, id);
|
2016-07-29 17:31:41 +03:00
|
|
|
} else {
|
2018-04-12 18:29:59 +03:00
|
|
|
g_assert_cmpstr(job->job.id, ==, blk_name(blk));
|
2016-07-29 17:31:41 +03:00
|
|
|
}
|
|
|
|
} else {
|
2019-12-04 12:36:25 +03:00
|
|
|
error_free_or_abort(&err);
|
2016-07-29 17:31:41 +03:00
|
|
|
g_assert_null(job);
|
|
|
|
}
|
|
|
|
|
|
|
|
return job;
|
|
|
|
}
|
|
|
|
|
2018-03-10 11:27:46 +03:00
|
|
|
static BlockJob *do_test_id(BlockBackend *blk, const char *id,
|
|
|
|
bool should_succeed)
|
|
|
|
{
|
|
|
|
return mk_job(blk, id, &test_block_job_driver,
|
2018-04-19 18:54:56 +03:00
|
|
|
should_succeed, JOB_DEFAULT);
|
2018-03-10 11:27:46 +03:00
|
|
|
}
|
|
|
|
|
2016-07-29 17:31:41 +03:00
|
|
|
/* This creates a BlockBackend (optionally with a name) with a
|
|
|
|
* BlockDriverState inserted. */
|
|
|
|
static BlockBackend *create_blk(const char *name)
|
|
|
|
{
|
2017-02-09 17:48:04 +03:00
|
|
|
/* No I/O is performed on this device */
|
2019-04-25 15:25:10 +03:00
|
|
|
BlockBackend *blk = blk_new(qemu_get_aio_context(), 0, BLK_PERM_ALL);
|
2017-01-16 19:17:38 +03:00
|
|
|
BlockDriverState *bs;
|
|
|
|
|
2019-07-29 15:46:00 +03:00
|
|
|
QDict *opt = qdict_new();
|
|
|
|
qdict_put_str(opt, "file.read-zeroes", "on");
|
|
|
|
bs = bdrv_open("null-co://", NULL, opt, 0, &error_abort);
|
2017-01-16 19:17:38 +03:00
|
|
|
g_assert_nonnull(bs);
|
2016-07-29 17:31:41 +03:00
|
|
|
|
2017-01-13 21:02:32 +03:00
|
|
|
blk_insert_bs(blk, bs, &error_abort);
|
2016-07-29 17:31:41 +03:00
|
|
|
bdrv_unref(bs);
|
|
|
|
|
|
|
|
if (name) {
|
2019-12-04 12:36:23 +03:00
|
|
|
Error *err = NULL;
|
|
|
|
monitor_add_blk(blk, name, &err);
|
|
|
|
g_assert_null(err);
|
2016-07-29 17:31:41 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return blk;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* This destroys the backend */
|
|
|
|
static void destroy_blk(BlockBackend *blk)
|
|
|
|
{
|
|
|
|
if (blk_name(blk)[0] != '\0') {
|
|
|
|
monitor_remove_blk(blk);
|
|
|
|
}
|
|
|
|
|
|
|
|
blk_remove_bs(blk);
|
|
|
|
blk_unref(blk);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void test_job_ids(void)
|
|
|
|
{
|
|
|
|
BlockBackend *blk[3];
|
|
|
|
BlockJob *job[3];
|
|
|
|
|
|
|
|
blk[0] = create_blk(NULL);
|
|
|
|
blk[1] = create_blk("drive1");
|
|
|
|
blk[2] = create_blk("drive2");
|
|
|
|
|
|
|
|
/* No job ID provided and the block backend has no name */
|
|
|
|
job[0] = do_test_id(blk[0], NULL, false);
|
|
|
|
|
|
|
|
/* These are all invalid job IDs */
|
|
|
|
job[0] = do_test_id(blk[0], "0id", false);
|
|
|
|
job[0] = do_test_id(blk[0], "", false);
|
|
|
|
job[0] = do_test_id(blk[0], " ", false);
|
|
|
|
job[0] = do_test_id(blk[0], "123", false);
|
|
|
|
job[0] = do_test_id(blk[0], "_id", false);
|
|
|
|
job[0] = do_test_id(blk[0], "-id", false);
|
|
|
|
job[0] = do_test_id(blk[0], ".id", false);
|
|
|
|
job[0] = do_test_id(blk[0], "#id", false);
|
|
|
|
|
|
|
|
/* This one is valid */
|
|
|
|
job[0] = do_test_id(blk[0], "id0", true);
|
|
|
|
|
2019-06-06 18:41:32 +03:00
|
|
|
/* We can have two jobs in the same BDS */
|
|
|
|
job[1] = do_test_id(blk[0], "id1", true);
|
|
|
|
job_early_fail(&job[1]->job);
|
2016-07-29 17:31:41 +03:00
|
|
|
|
|
|
|
/* Duplicate job IDs are not allowed */
|
|
|
|
job[1] = do_test_id(blk[1], "id0", false);
|
|
|
|
|
|
|
|
/* But once job[0] finishes we can reuse its ID */
|
2018-04-19 18:30:16 +03:00
|
|
|
job_early_fail(&job[0]->job);
|
2016-07-29 17:31:41 +03:00
|
|
|
job[1] = do_test_id(blk[1], "id0", true);
|
|
|
|
|
|
|
|
/* No job ID specified, defaults to the backend name ('drive1') */
|
2018-04-19 18:30:16 +03:00
|
|
|
job_early_fail(&job[1]->job);
|
2016-07-29 17:31:41 +03:00
|
|
|
job[1] = do_test_id(blk[1], NULL, true);
|
|
|
|
|
|
|
|
/* Duplicate job ID */
|
|
|
|
job[2] = do_test_id(blk[2], "drive1", false);
|
|
|
|
|
|
|
|
/* The ID of job[2] would default to 'drive2' but it is already in use */
|
|
|
|
job[0] = do_test_id(blk[0], "drive2", true);
|
|
|
|
job[2] = do_test_id(blk[2], NULL, false);
|
|
|
|
|
|
|
|
/* This one is valid */
|
|
|
|
job[2] = do_test_id(blk[2], "id_2", true);
|
|
|
|
|
2018-04-19 18:30:16 +03:00
|
|
|
job_early_fail(&job[0]->job);
|
|
|
|
job_early_fail(&job[1]->job);
|
|
|
|
job_early_fail(&job[2]->job);
|
2016-07-29 17:31:41 +03:00
|
|
|
|
|
|
|
destroy_blk(blk[0]);
|
|
|
|
destroy_blk(blk[1]);
|
|
|
|
destroy_blk(blk[2]);
|
|
|
|
}
|
|
|
|
|
2018-03-10 11:27:46 +03:00
|
|
|
typedef struct CancelJob {
|
|
|
|
BlockJob common;
|
|
|
|
BlockBackend *blk;
|
|
|
|
bool should_converge;
|
|
|
|
bool should_complete;
|
|
|
|
} CancelJob;
|
|
|
|
|
2018-04-23 13:24:16 +03:00
|
|
|
static void cancel_job_complete(Job *job, Error **errp)
|
2018-03-10 11:27:46 +03:00
|
|
|
{
|
2018-04-23 13:24:16 +03:00
|
|
|
CancelJob *s = container_of(job, CancelJob, common.job);
|
2018-03-10 11:27:46 +03:00
|
|
|
s->should_complete = true;
|
|
|
|
}
|
|
|
|
|
2018-08-30 04:57:26 +03:00
|
|
|
static int coroutine_fn cancel_job_run(Job *job, Error **errp)
|
2018-03-10 11:27:46 +03:00
|
|
|
{
|
2018-08-30 04:57:26 +03:00
|
|
|
CancelJob *s = container_of(job, CancelJob, common.job);
|
2018-03-10 11:27:46 +03:00
|
|
|
|
|
|
|
while (!s->should_complete) {
|
2018-04-17 13:56:07 +03:00
|
|
|
if (job_is_cancelled(&s->common.job)) {
|
2018-08-30 04:57:31 +03:00
|
|
|
return 0;
|
2018-03-10 11:27:46 +03:00
|
|
|
}
|
|
|
|
|
2018-04-25 16:09:58 +03:00
|
|
|
if (!job_is_ready(&s->common.job) && s->should_converge) {
|
2018-04-25 15:56:09 +03:00
|
|
|
job_transition_to_ready(&s->common.job);
|
2018-03-10 11:27:46 +03:00
|
|
|
}
|
|
|
|
|
2018-04-18 17:32:20 +03:00
|
|
|
job_sleep_ns(&s->common.job, 100000);
|
2018-03-10 11:27:46 +03:00
|
|
|
}
|
|
|
|
|
2018-08-30 04:57:26 +03:00
|
|
|
return 0;
|
2018-03-10 11:27:46 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static const BlockJobDriver test_cancel_driver = {
|
2018-04-12 18:29:59 +03:00
|
|
|
.job_driver = {
|
|
|
|
.instance_size = sizeof(CancelJob),
|
2018-04-13 19:50:05 +03:00
|
|
|
.free = block_job_free,
|
2018-04-18 18:10:26 +03:00
|
|
|
.user_resume = block_job_user_resume,
|
2018-08-30 04:57:26 +03:00
|
|
|
.run = cancel_job_run,
|
2018-04-23 13:24:16 +03:00
|
|
|
.complete = cancel_job_complete,
|
2018-04-12 18:29:59 +03:00
|
|
|
},
|
2018-03-10 11:27:46 +03:00
|
|
|
};
|
|
|
|
|
2018-09-06 16:02:17 +03:00
|
|
|
static CancelJob *create_common(Job **pjob)
|
2018-03-10 11:27:46 +03:00
|
|
|
{
|
|
|
|
BlockBackend *blk;
|
2018-09-06 16:02:17 +03:00
|
|
|
Job *job;
|
|
|
|
BlockJob *bjob;
|
2018-03-10 11:27:46 +03:00
|
|
|
CancelJob *s;
|
|
|
|
|
|
|
|
blk = create_blk(NULL);
|
2018-09-06 16:02:17 +03:00
|
|
|
bjob = mk_job(blk, "Steve", &test_cancel_driver, true,
|
|
|
|
JOB_MANUAL_FINALIZE | JOB_MANUAL_DISMISS);
|
|
|
|
job = &bjob->job;
|
|
|
|
job_ref(job);
|
|
|
|
assert(job->status == JOB_STATUS_CREATED);
|
|
|
|
s = container_of(bjob, CancelJob, common);
|
2018-03-10 11:27:46 +03:00
|
|
|
s->blk = blk;
|
|
|
|
|
|
|
|
*pjob = job;
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void cancel_common(CancelJob *s)
|
|
|
|
{
|
|
|
|
BlockJob *job = &s->common;
|
|
|
|
BlockBackend *blk = s->blk;
|
2018-04-13 18:19:31 +03:00
|
|
|
JobStatus sts = job->job.status;
|
2018-08-17 18:29:08 +03:00
|
|
|
AioContext *ctx;
|
|
|
|
|
|
|
|
ctx = job->job.aio_context;
|
|
|
|
aio_context_acquire(ctx);
|
2018-03-10 11:27:46 +03:00
|
|
|
|
2021-10-06 18:19:32 +03:00
|
|
|
job_cancel_sync(&job->job, true);
|
2018-04-13 18:19:31 +03:00
|
|
|
if (sts != JOB_STATUS_CREATED && sts != JOB_STATUS_CONCLUDED) {
|
2018-04-24 18:10:12 +03:00
|
|
|
Job *dummy = &job->job;
|
|
|
|
job_dismiss(&dummy, &error_abort);
|
2018-03-10 11:27:46 +03:00
|
|
|
}
|
2018-04-13 18:19:31 +03:00
|
|
|
assert(job->job.status == JOB_STATUS_NULL);
|
2018-04-13 19:50:05 +03:00
|
|
|
job_unref(&job->job);
|
2018-03-10 11:27:46 +03:00
|
|
|
destroy_blk(blk);
|
2018-08-17 18:29:08 +03:00
|
|
|
|
|
|
|
aio_context_release(ctx);
|
2018-03-10 11:27:46 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static void test_cancel_created(void)
|
|
|
|
{
|
2018-09-06 16:02:17 +03:00
|
|
|
Job *job;
|
2018-03-10 11:27:46 +03:00
|
|
|
CancelJob *s;
|
|
|
|
|
|
|
|
s = create_common(&job);
|
|
|
|
cancel_common(s);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void test_cancel_running(void)
|
|
|
|
{
|
2018-09-06 16:02:17 +03:00
|
|
|
Job *job;
|
2018-03-10 11:27:46 +03:00
|
|
|
CancelJob *s;
|
|
|
|
|
|
|
|
s = create_common(&job);
|
|
|
|
|
2018-09-06 16:02:17 +03:00
|
|
|
job_start(job);
|
|
|
|
assert(job->status == JOB_STATUS_RUNNING);
|
2018-03-10 11:27:46 +03:00
|
|
|
|
|
|
|
cancel_common(s);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void test_cancel_paused(void)
|
|
|
|
{
|
2018-09-06 16:02:17 +03:00
|
|
|
Job *job;
|
2018-03-10 11:27:46 +03:00
|
|
|
CancelJob *s;
|
|
|
|
|
|
|
|
s = create_common(&job);
|
|
|
|
|
2018-09-06 16:02:17 +03:00
|
|
|
job_start(job);
|
|
|
|
assert(job->status == JOB_STATUS_RUNNING);
|
2018-03-10 11:27:46 +03:00
|
|
|
|
2018-09-06 16:02:17 +03:00
|
|
|
job_user_pause(job, &error_abort);
|
|
|
|
job_enter(job);
|
|
|
|
assert(job->status == JOB_STATUS_PAUSED);
|
2018-03-10 11:27:46 +03:00
|
|
|
|
|
|
|
cancel_common(s);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void test_cancel_ready(void)
|
|
|
|
{
|
2018-09-06 16:02:17 +03:00
|
|
|
Job *job;
|
2018-03-10 11:27:46 +03:00
|
|
|
CancelJob *s;
|
|
|
|
|
|
|
|
s = create_common(&job);
|
|
|
|
|
2018-09-06 16:02:17 +03:00
|
|
|
job_start(job);
|
|
|
|
assert(job->status == JOB_STATUS_RUNNING);
|
2018-03-10 11:27:46 +03:00
|
|
|
|
|
|
|
s->should_converge = true;
|
2018-09-06 16:02:17 +03:00
|
|
|
job_enter(job);
|
|
|
|
assert(job->status == JOB_STATUS_READY);
|
2018-03-10 11:27:46 +03:00
|
|
|
|
|
|
|
cancel_common(s);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void test_cancel_standby(void)
|
|
|
|
{
|
2018-09-06 16:02:17 +03:00
|
|
|
Job *job;
|
2018-03-10 11:27:46 +03:00
|
|
|
CancelJob *s;
|
|
|
|
|
|
|
|
s = create_common(&job);
|
|
|
|
|
2018-09-06 16:02:17 +03:00
|
|
|
job_start(job);
|
|
|
|
assert(job->status == JOB_STATUS_RUNNING);
|
2018-03-10 11:27:46 +03:00
|
|
|
|
|
|
|
s->should_converge = true;
|
2018-09-06 16:02:17 +03:00
|
|
|
job_enter(job);
|
|
|
|
assert(job->status == JOB_STATUS_READY);
|
2018-03-10 11:27:46 +03:00
|
|
|
|
2018-09-06 16:02:17 +03:00
|
|
|
job_user_pause(job, &error_abort);
|
|
|
|
job_enter(job);
|
|
|
|
assert(job->status == JOB_STATUS_STANDBY);
|
2018-03-10 11:27:46 +03:00
|
|
|
|
|
|
|
cancel_common(s);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void test_cancel_pending(void)
|
|
|
|
{
|
2018-09-06 16:02:17 +03:00
|
|
|
Job *job;
|
2018-03-10 11:27:46 +03:00
|
|
|
CancelJob *s;
|
|
|
|
|
|
|
|
s = create_common(&job);
|
|
|
|
|
2018-09-06 16:02:17 +03:00
|
|
|
job_start(job);
|
|
|
|
assert(job->status == JOB_STATUS_RUNNING);
|
2018-03-10 11:27:46 +03:00
|
|
|
|
|
|
|
s->should_converge = true;
|
2018-09-06 16:02:17 +03:00
|
|
|
job_enter(job);
|
|
|
|
assert(job->status == JOB_STATUS_READY);
|
2018-03-10 11:27:46 +03:00
|
|
|
|
2018-09-06 16:02:17 +03:00
|
|
|
job_complete(job, &error_abort);
|
|
|
|
job_enter(job);
|
2018-09-06 16:02:18 +03:00
|
|
|
while (!job->deferred_to_main_loop) {
|
2018-03-10 11:27:46 +03:00
|
|
|
aio_poll(qemu_get_aio_context(), true);
|
|
|
|
}
|
2018-09-06 16:02:18 +03:00
|
|
|
assert(job->status == JOB_STATUS_READY);
|
|
|
|
aio_poll(qemu_get_aio_context(), true);
|
2018-09-06 16:02:17 +03:00
|
|
|
assert(job->status == JOB_STATUS_PENDING);
|
2018-03-10 11:27:46 +03:00
|
|
|
|
|
|
|
cancel_common(s);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void test_cancel_concluded(void)
|
|
|
|
{
|
2018-09-06 16:02:17 +03:00
|
|
|
Job *job;
|
2018-03-10 11:27:46 +03:00
|
|
|
CancelJob *s;
|
|
|
|
|
|
|
|
s = create_common(&job);
|
|
|
|
|
2018-09-06 16:02:17 +03:00
|
|
|
job_start(job);
|
|
|
|
assert(job->status == JOB_STATUS_RUNNING);
|
2018-03-10 11:27:46 +03:00
|
|
|
|
|
|
|
s->should_converge = true;
|
2018-09-06 16:02:17 +03:00
|
|
|
job_enter(job);
|
|
|
|
assert(job->status == JOB_STATUS_READY);
|
2018-03-10 11:27:46 +03:00
|
|
|
|
2018-09-06 16:02:17 +03:00
|
|
|
job_complete(job, &error_abort);
|
|
|
|
job_enter(job);
|
2018-09-06 16:02:18 +03:00
|
|
|
while (!job->deferred_to_main_loop) {
|
2018-03-10 11:27:46 +03:00
|
|
|
aio_poll(qemu_get_aio_context(), true);
|
|
|
|
}
|
2018-09-06 16:02:18 +03:00
|
|
|
assert(job->status == JOB_STATUS_READY);
|
|
|
|
aio_poll(qemu_get_aio_context(), true);
|
2018-09-06 16:02:17 +03:00
|
|
|
assert(job->status == JOB_STATUS_PENDING);
|
2018-03-10 11:27:46 +03:00
|
|
|
|
job: take each job's lock individually in job_txn_apply
All callers of job_txn_apply hold a single job's lock, but different
jobs within a transaction can have different contexts, thus we need to
lock each one individually before applying the callback function.
Similar to job_completed_txn_abort this also requires releasing the
caller's context before and reacquiring it after to avoid recursive
locks which might break AIO_WAIT_WHILE in the callback. This is safe, since
existing code would already have to take this into account, lest
job_completed_txn_abort might have broken.
This also brings to light a different issue: When a callback function in
job_txn_apply moves it's job to a different AIO context, callers will
try to release the wrong lock (now that we re-acquire the lock
correctly, previously it would just continue with the old lock, leaving
the job unlocked for the rest of the return path). Fix this by not caching
the job's context.
This is only necessary for qmp_block_job_finalize, qmp_job_finalize and
job_exit, since everyone else calls through job_exit.
One test needed adapting, since it calls job_finalize directly, so it
manually needs to acquire the correct context.
Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
Message-Id: <20200407115651.69472-2-s.reiter@proxmox.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2020-04-07 14:56:49 +03:00
|
|
|
aio_context_acquire(job->aio_context);
|
2018-09-06 16:02:17 +03:00
|
|
|
job_finalize(job, &error_abort);
|
job: take each job's lock individually in job_txn_apply
All callers of job_txn_apply hold a single job's lock, but different
jobs within a transaction can have different contexts, thus we need to
lock each one individually before applying the callback function.
Similar to job_completed_txn_abort this also requires releasing the
caller's context before and reacquiring it after to avoid recursive
locks which might break AIO_WAIT_WHILE in the callback. This is safe, since
existing code would already have to take this into account, lest
job_completed_txn_abort might have broken.
This also brings to light a different issue: When a callback function in
job_txn_apply moves it's job to a different AIO context, callers will
try to release the wrong lock (now that we re-acquire the lock
correctly, previously it would just continue with the old lock, leaving
the job unlocked for the rest of the return path). Fix this by not caching
the job's context.
This is only necessary for qmp_block_job_finalize, qmp_job_finalize and
job_exit, since everyone else calls through job_exit.
One test needed adapting, since it calls job_finalize directly, so it
manually needs to acquire the correct context.
Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
Message-Id: <20200407115651.69472-2-s.reiter@proxmox.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2020-04-07 14:56:49 +03:00
|
|
|
aio_context_release(job->aio_context);
|
2018-09-06 16:02:17 +03:00
|
|
|
assert(job->status == JOB_STATUS_CONCLUDED);
|
2018-03-10 11:27:46 +03:00
|
|
|
|
|
|
|
cancel_common(s);
|
|
|
|
}
|
|
|
|
|
2021-04-09 15:04:21 +03:00
|
|
|
/* (See test_yielding_driver for the job description) */
|
|
|
|
typedef struct YieldingJob {
|
|
|
|
BlockJob common;
|
|
|
|
bool should_complete;
|
|
|
|
} YieldingJob;
|
|
|
|
|
|
|
|
static void yielding_job_complete(Job *job, Error **errp)
|
|
|
|
{
|
|
|
|
YieldingJob *s = container_of(job, YieldingJob, common.job);
|
|
|
|
s->should_complete = true;
|
|
|
|
job_enter(job);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int coroutine_fn yielding_job_run(Job *job, Error **errp)
|
|
|
|
{
|
|
|
|
YieldingJob *s = container_of(job, YieldingJob, common.job);
|
|
|
|
|
|
|
|
job_transition_to_ready(job);
|
|
|
|
|
|
|
|
while (!s->should_complete) {
|
|
|
|
job_yield(job);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This job transitions immediately to the READY state, and then
|
|
|
|
* yields until it is to complete.
|
|
|
|
*/
|
|
|
|
static const BlockJobDriver test_yielding_driver = {
|
|
|
|
.job_driver = {
|
|
|
|
.instance_size = sizeof(YieldingJob),
|
|
|
|
.free = block_job_free,
|
|
|
|
.user_resume = block_job_user_resume,
|
|
|
|
.run = yielding_job_run,
|
|
|
|
.complete = yielding_job_complete,
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Test that job_complete() works even on jobs that are in a paused
|
|
|
|
* state (i.e., STANDBY).
|
|
|
|
*
|
|
|
|
* To do this, run YieldingJob in an IO thread, get it into the READY
|
|
|
|
* state, then have a drained section. Before ending the section,
|
|
|
|
* acquire the context so the job will not be entered and will thus
|
|
|
|
* remain on STANDBY.
|
|
|
|
*
|
|
|
|
* job_complete() should still work without error.
|
|
|
|
*
|
|
|
|
* Note that on the QMP interface, it is impossible to lock an IO
|
|
|
|
* thread before a drained section ends. In practice, the
|
|
|
|
* bdrv_drain_all_end() and the aio_context_acquire() will be
|
|
|
|
* reversed. However, that makes for worse reproducibility here:
|
|
|
|
* Sometimes, the job would no longer be in STANDBY then but already
|
|
|
|
* be started. We cannot prevent that, because the IO thread runs
|
|
|
|
* concurrently. We can only prevent it by taking the lock before
|
|
|
|
* ending the drained section, so we do that.
|
|
|
|
*
|
|
|
|
* (You can reverse the order of operations and most of the time the
|
|
|
|
* test will pass, but sometimes the assert(status == STANDBY) will
|
|
|
|
* fail.)
|
|
|
|
*/
|
|
|
|
static void test_complete_in_standby(void)
|
|
|
|
{
|
|
|
|
BlockBackend *blk;
|
|
|
|
IOThread *iothread;
|
|
|
|
AioContext *ctx;
|
|
|
|
Job *job;
|
|
|
|
BlockJob *bjob;
|
|
|
|
|
|
|
|
/* Create a test drive, move it to an IO thread */
|
|
|
|
blk = create_blk(NULL);
|
|
|
|
iothread = iothread_new();
|
|
|
|
|
|
|
|
ctx = iothread_get_aio_context(iothread);
|
|
|
|
blk_set_aio_context(blk, ctx, &error_abort);
|
|
|
|
|
|
|
|
/* Create our test job */
|
|
|
|
bjob = mk_job(blk, "job", &test_yielding_driver, true,
|
|
|
|
JOB_MANUAL_FINALIZE | JOB_MANUAL_DISMISS);
|
|
|
|
job = &bjob->job;
|
|
|
|
assert(job->status == JOB_STATUS_CREATED);
|
|
|
|
|
|
|
|
/* Wait for the job to become READY */
|
|
|
|
job_start(job);
|
|
|
|
aio_context_acquire(ctx);
|
|
|
|
AIO_WAIT_WHILE(ctx, job->status != JOB_STATUS_READY);
|
|
|
|
aio_context_release(ctx);
|
|
|
|
|
|
|
|
/* Begin the drained section, pausing the job */
|
|
|
|
bdrv_drain_all_begin();
|
|
|
|
assert(job->status == JOB_STATUS_STANDBY);
|
|
|
|
/* Lock the IO thread to prevent the job from being run */
|
|
|
|
aio_context_acquire(ctx);
|
|
|
|
/* This will schedule the job to resume it */
|
|
|
|
bdrv_drain_all_end();
|
|
|
|
|
|
|
|
/* But the job cannot run, so it will remain on standby */
|
|
|
|
assert(job->status == JOB_STATUS_STANDBY);
|
|
|
|
|
|
|
|
/* Even though the job is on standby, this should work */
|
|
|
|
job_complete(job, &error_abort);
|
|
|
|
|
|
|
|
/* The test is done now, clean up. */
|
|
|
|
job_finish_sync(job, NULL, &error_abort);
|
|
|
|
assert(job->status == JOB_STATUS_PENDING);
|
|
|
|
|
|
|
|
job_finalize(job, &error_abort);
|
|
|
|
assert(job->status == JOB_STATUS_CONCLUDED);
|
|
|
|
|
|
|
|
job_dismiss(&job, &error_abort);
|
|
|
|
|
|
|
|
destroy_blk(blk);
|
|
|
|
aio_context_release(ctx);
|
|
|
|
iothread_join(iothread);
|
|
|
|
}
|
|
|
|
|
2016-07-29 17:31:41 +03:00
|
|
|
int main(int argc, char **argv)
|
|
|
|
{
|
|
|
|
qemu_init_main_loop(&error_abort);
|
2017-01-16 19:17:38 +03:00
|
|
|
bdrv_init();
|
2016-07-29 17:31:41 +03:00
|
|
|
|
|
|
|
g_test_init(&argc, &argv, NULL);
|
|
|
|
g_test_add_func("/blockjob/ids", test_job_ids);
|
2018-03-10 11:27:46 +03:00
|
|
|
g_test_add_func("/blockjob/cancel/created", test_cancel_created);
|
|
|
|
g_test_add_func("/blockjob/cancel/running", test_cancel_running);
|
|
|
|
g_test_add_func("/blockjob/cancel/paused", test_cancel_paused);
|
|
|
|
g_test_add_func("/blockjob/cancel/ready", test_cancel_ready);
|
|
|
|
g_test_add_func("/blockjob/cancel/standby", test_cancel_standby);
|
|
|
|
g_test_add_func("/blockjob/cancel/pending", test_cancel_pending);
|
|
|
|
g_test_add_func("/blockjob/cancel/concluded", test_cancel_concluded);
|
2021-04-09 15:04:21 +03:00
|
|
|
g_test_add_func("/blockjob/complete_in_standby", test_complete_in_standby);
|
2016-07-29 17:31:41 +03:00
|
|
|
return g_test_run();
|
|
|
|
}
|