2016-07-27 10:01:50 +03:00
|
|
|
/*
|
|
|
|
* Replication Block filter
|
|
|
|
*
|
|
|
|
* Copyright (c) 2016 HUAWEI TECHNOLOGIES CO., LTD.
|
|
|
|
* Copyright (c) 2016 Intel Corporation
|
|
|
|
* Copyright (c) 2016 FUJITSU LIMITED
|
|
|
|
*
|
|
|
|
* Author:
|
|
|
|
* Wen Congyang <wency@cn.fujitsu.com>
|
|
|
|
*
|
|
|
|
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
|
|
|
* See the COPYING file in the top-level directory.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "qemu/osdep.h"
|
2019-05-23 17:35:07 +03:00
|
|
|
#include "qemu/module.h"
|
2018-02-01 14:18:46 +03:00
|
|
|
#include "qemu/option.h"
|
2016-07-27 10:01:50 +03:00
|
|
|
#include "block/nbd.h"
|
|
|
|
#include "block/blockjob.h"
|
|
|
|
#include "block/block_int.h"
|
|
|
|
#include "block/block_backup.h"
|
|
|
|
#include "sysemu/block-backend.h"
|
|
|
|
#include "qapi/error.h"
|
2018-11-12 17:00:43 +03:00
|
|
|
#include "qapi/qmp/qdict.h"
|
2021-05-17 14:36:56 +03:00
|
|
|
#include "block/replication.h"
|
2016-07-27 10:01:50 +03:00
|
|
|
|
2017-03-17 05:17:39 +03:00
|
|
|
typedef enum {
|
|
|
|
BLOCK_REPLICATION_NONE, /* block replication is not started */
|
|
|
|
BLOCK_REPLICATION_RUNNING, /* block replication is running */
|
|
|
|
BLOCK_REPLICATION_FAILOVER, /* failover is running in background */
|
|
|
|
BLOCK_REPLICATION_FAILOVER_FAILED, /* failover failed */
|
|
|
|
BLOCK_REPLICATION_DONE, /* block replication is done */
|
|
|
|
} ReplicationStage;
|
|
|
|
|
2016-07-27 10:01:50 +03:00
|
|
|
typedef struct BDRVReplicationState {
|
|
|
|
ReplicationMode mode;
|
2017-03-17 05:17:39 +03:00
|
|
|
ReplicationStage stage;
|
2019-06-06 18:41:29 +03:00
|
|
|
BlockJob *commit_job;
|
2016-07-27 10:01:50 +03:00
|
|
|
BdrvChild *hidden_disk;
|
|
|
|
BdrvChild *secondary_disk;
|
2019-06-06 18:41:29 +03:00
|
|
|
BlockJob *backup_job;
|
2016-07-27 10:01:50 +03:00
|
|
|
char *top_id;
|
|
|
|
ReplicationState *rs;
|
|
|
|
Error *blocker;
|
2018-11-12 17:00:43 +03:00
|
|
|
bool orig_hidden_read_only;
|
|
|
|
bool orig_secondary_read_only;
|
2016-07-27 10:01:50 +03:00
|
|
|
int error;
|
|
|
|
} BDRVReplicationState;
|
|
|
|
|
|
|
|
static void replication_start(ReplicationState *rs, ReplicationMode mode,
|
|
|
|
Error **errp);
|
|
|
|
static void replication_do_checkpoint(ReplicationState *rs, Error **errp);
|
|
|
|
static void replication_get_error(ReplicationState *rs, Error **errp);
|
|
|
|
static void replication_stop(ReplicationState *rs, bool failover,
|
|
|
|
Error **errp);
|
|
|
|
|
|
|
|
#define REPLICATION_MODE "mode"
|
|
|
|
#define REPLICATION_TOP_ID "top-id"
|
|
|
|
static QemuOptsList replication_runtime_opts = {
|
|
|
|
.name = "replication",
|
|
|
|
.head = QTAILQ_HEAD_INITIALIZER(replication_runtime_opts.head),
|
|
|
|
.desc = {
|
|
|
|
{
|
|
|
|
.name = REPLICATION_MODE,
|
|
|
|
.type = QEMU_OPT_STRING,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.name = REPLICATION_TOP_ID,
|
|
|
|
.type = QEMU_OPT_STRING,
|
|
|
|
},
|
|
|
|
{ /* end of list */ }
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
static ReplicationOps replication_ops = {
|
|
|
|
.start = replication_start,
|
|
|
|
.checkpoint = replication_do_checkpoint,
|
|
|
|
.get_error = replication_get_error,
|
|
|
|
.stop = replication_stop,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int replication_open(BlockDriverState *bs, QDict *options,
|
|
|
|
int flags, Error **errp)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
BDRVReplicationState *s = bs->opaque;
|
|
|
|
QemuOpts *opts = NULL;
|
|
|
|
const char *mode;
|
|
|
|
const char *top_id;
|
|
|
|
|
2022-07-26 23:11:21 +03:00
|
|
|
ret = bdrv_open_file_child(NULL, options, "file", bs, errp);
|
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
2016-12-16 20:52:37 +03:00
|
|
|
}
|
|
|
|
|
2016-07-27 10:01:50 +03:00
|
|
|
ret = -EINVAL;
|
|
|
|
opts = qemu_opts_create(&replication_runtime_opts, NULL, 0, &error_abort);
|
2020-07-07 19:06:05 +03:00
|
|
|
if (!qemu_opts_absorb_qdict(opts, options, errp)) {
|
2016-07-27 10:01:50 +03:00
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
mode = qemu_opt_get(opts, REPLICATION_MODE);
|
|
|
|
if (!mode) {
|
error: Avoid unnecessary error_propagate() after error_setg()
Replace
error_setg(&err, ...);
error_propagate(errp, err);
by
error_setg(errp, ...);
Related pattern:
if (...) {
error_setg(&err, ...);
goto out;
}
...
out:
error_propagate(errp, err);
return;
When all paths to label out are that way, replace by
if (...) {
error_setg(errp, ...);
return;
}
and delete the label along with the error_propagate().
When we have at most one other path that actually needs to propagate,
and maybe one at the end that where propagation is unnecessary, e.g.
foo(..., &err);
if (err) {
goto out;
}
...
bar(..., &err);
out:
error_propagate(errp, err);
return;
move the error_propagate() to where it's needed, like
if (...) {
foo(..., &err);
error_propagate(errp, err);
return;
}
...
bar(..., errp);
return;
and transform the error_setg() as above.
In some places, the transformation results in obviously unnecessary
error_propagate(). The next few commits will eliminate them.
Bonus: the elimination of gotos will make later patches in this series
easier to review.
Candidates for conversion tracked down with this Coccinelle script:
@@
identifier err, errp;
expression list args;
@@
- error_setg(&err, args);
+ error_setg(errp, args);
... when != err
error_propagate(errp, err);
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Message-Id: <20200707160613.848843-34-armbru@redhat.com>
2020-07-07 19:06:01 +03:00
|
|
|
error_setg(errp, "Missing the option mode");
|
2016-07-27 10:01:50 +03:00
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!strcmp(mode, "primary")) {
|
|
|
|
s->mode = REPLICATION_MODE_PRIMARY;
|
2016-10-12 07:50:08 +03:00
|
|
|
top_id = qemu_opt_get(opts, REPLICATION_TOP_ID);
|
|
|
|
if (top_id) {
|
error: Avoid unnecessary error_propagate() after error_setg()
Replace
error_setg(&err, ...);
error_propagate(errp, err);
by
error_setg(errp, ...);
Related pattern:
if (...) {
error_setg(&err, ...);
goto out;
}
...
out:
error_propagate(errp, err);
return;
When all paths to label out are that way, replace by
if (...) {
error_setg(errp, ...);
return;
}
and delete the label along with the error_propagate().
When we have at most one other path that actually needs to propagate,
and maybe one at the end that where propagation is unnecessary, e.g.
foo(..., &err);
if (err) {
goto out;
}
...
bar(..., &err);
out:
error_propagate(errp, err);
return;
move the error_propagate() to where it's needed, like
if (...) {
foo(..., &err);
error_propagate(errp, err);
return;
}
...
bar(..., errp);
return;
and transform the error_setg() as above.
In some places, the transformation results in obviously unnecessary
error_propagate(). The next few commits will eliminate them.
Bonus: the elimination of gotos will make later patches in this series
easier to review.
Candidates for conversion tracked down with this Coccinelle script:
@@
identifier err, errp;
expression list args;
@@
- error_setg(&err, args);
+ error_setg(errp, args);
... when != err
error_propagate(errp, err);
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Message-Id: <20200707160613.848843-34-armbru@redhat.com>
2020-07-07 19:06:01 +03:00
|
|
|
error_setg(errp,
|
|
|
|
"The primary side does not support option top-id");
|
2016-10-12 07:50:08 +03:00
|
|
|
goto fail;
|
|
|
|
}
|
2016-07-27 10:01:50 +03:00
|
|
|
} else if (!strcmp(mode, "secondary")) {
|
|
|
|
s->mode = REPLICATION_MODE_SECONDARY;
|
|
|
|
top_id = qemu_opt_get(opts, REPLICATION_TOP_ID);
|
|
|
|
s->top_id = g_strdup(top_id);
|
|
|
|
if (!s->top_id) {
|
error: Avoid unnecessary error_propagate() after error_setg()
Replace
error_setg(&err, ...);
error_propagate(errp, err);
by
error_setg(errp, ...);
Related pattern:
if (...) {
error_setg(&err, ...);
goto out;
}
...
out:
error_propagate(errp, err);
return;
When all paths to label out are that way, replace by
if (...) {
error_setg(errp, ...);
return;
}
and delete the label along with the error_propagate().
When we have at most one other path that actually needs to propagate,
and maybe one at the end that where propagation is unnecessary, e.g.
foo(..., &err);
if (err) {
goto out;
}
...
bar(..., &err);
out:
error_propagate(errp, err);
return;
move the error_propagate() to where it's needed, like
if (...) {
foo(..., &err);
error_propagate(errp, err);
return;
}
...
bar(..., errp);
return;
and transform the error_setg() as above.
In some places, the transformation results in obviously unnecessary
error_propagate(). The next few commits will eliminate them.
Bonus: the elimination of gotos will make later patches in this series
easier to review.
Candidates for conversion tracked down with this Coccinelle script:
@@
identifier err, errp;
expression list args;
@@
- error_setg(&err, args);
+ error_setg(errp, args);
... when != err
error_propagate(errp, err);
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Message-Id: <20200707160613.848843-34-armbru@redhat.com>
2020-07-07 19:06:01 +03:00
|
|
|
error_setg(errp, "Missing the option top-id");
|
2016-07-27 10:01:50 +03:00
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
} else {
|
error: Avoid unnecessary error_propagate() after error_setg()
Replace
error_setg(&err, ...);
error_propagate(errp, err);
by
error_setg(errp, ...);
Related pattern:
if (...) {
error_setg(&err, ...);
goto out;
}
...
out:
error_propagate(errp, err);
return;
When all paths to label out are that way, replace by
if (...) {
error_setg(errp, ...);
return;
}
and delete the label along with the error_propagate().
When we have at most one other path that actually needs to propagate,
and maybe one at the end that where propagation is unnecessary, e.g.
foo(..., &err);
if (err) {
goto out;
}
...
bar(..., &err);
out:
error_propagate(errp, err);
return;
move the error_propagate() to where it's needed, like
if (...) {
foo(..., &err);
error_propagate(errp, err);
return;
}
...
bar(..., errp);
return;
and transform the error_setg() as above.
In some places, the transformation results in obviously unnecessary
error_propagate(). The next few commits will eliminate them.
Bonus: the elimination of gotos will make later patches in this series
easier to review.
Candidates for conversion tracked down with this Coccinelle script:
@@
identifier err, errp;
expression list args;
@@
- error_setg(&err, args);
+ error_setg(errp, args);
... when != err
error_propagate(errp, err);
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Message-Id: <20200707160613.848843-34-armbru@redhat.com>
2020-07-07 19:06:01 +03:00
|
|
|
error_setg(errp,
|
2016-07-27 10:01:50 +03:00
|
|
|
"The option mode's value should be primary or secondary");
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
s->rs = replication_new(bs, &replication_ops);
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
fail:
|
|
|
|
qemu_opts_del(opts);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void replication_close(BlockDriverState *bs)
|
|
|
|
{
|
|
|
|
BDRVReplicationState *s = bs->opaque;
|
2020-04-07 14:56:50 +03:00
|
|
|
Job *commit_job;
|
2022-09-26 12:32:06 +03:00
|
|
|
GLOBAL_STATE_CODE();
|
2016-07-27 10:01:50 +03:00
|
|
|
|
2017-03-17 05:17:39 +03:00
|
|
|
if (s->stage == BLOCK_REPLICATION_RUNNING) {
|
2016-07-27 10:01:50 +03:00
|
|
|
replication_stop(s->rs, false, NULL);
|
|
|
|
}
|
2017-03-17 05:17:39 +03:00
|
|
|
if (s->stage == BLOCK_REPLICATION_FAILOVER) {
|
2020-04-07 14:56:50 +03:00
|
|
|
commit_job = &s->commit_job->job;
|
|
|
|
assert(commit_job->aio_context == qemu_get_current_aio_context());
|
2021-10-06 18:19:32 +03:00
|
|
|
job_cancel_sync(commit_job, false);
|
2016-10-27 13:48:49 +03:00
|
|
|
}
|
2016-07-27 10:01:50 +03:00
|
|
|
|
|
|
|
if (s->mode == REPLICATION_MODE_SECONDARY) {
|
|
|
|
g_free(s->top_id);
|
|
|
|
}
|
|
|
|
|
|
|
|
replication_remove(s->rs);
|
|
|
|
}
|
|
|
|
|
2017-03-14 14:46:52 +03:00
|
|
|
static void replication_child_perm(BlockDriverState *bs, BdrvChild *c,
|
2020-05-13 14:05:16 +03:00
|
|
|
BdrvChildRole role,
|
2017-09-14 13:47:11 +03:00
|
|
|
BlockReopenQueue *reopen_queue,
|
2017-03-14 14:46:52 +03:00
|
|
|
uint64_t perm, uint64_t shared,
|
|
|
|
uint64_t *nperm, uint64_t *nshared)
|
|
|
|
{
|
2021-07-18 17:48:33 +03:00
|
|
|
if (role & BDRV_CHILD_PRIMARY) {
|
|
|
|
*nperm = BLK_PERM_CONSISTENT_READ;
|
|
|
|
} else {
|
|
|
|
*nperm = 0;
|
|
|
|
}
|
|
|
|
|
2017-10-25 09:51:23 +03:00
|
|
|
if ((bs->open_flags & (BDRV_O_INACTIVE | BDRV_O_RDWR)) == BDRV_O_RDWR) {
|
|
|
|
*nperm |= BLK_PERM_WRITE;
|
|
|
|
}
|
2020-04-13 01:35:56 +03:00
|
|
|
*nshared = BLK_PERM_CONSISTENT_READ
|
|
|
|
| BLK_PERM_WRITE
|
2017-10-25 09:51:23 +03:00
|
|
|
| BLK_PERM_WRITE_UNCHANGED;
|
2017-03-14 14:46:52 +03:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2023-02-03 18:22:02 +03:00
|
|
|
static int64_t coroutine_fn GRAPH_RDLOCK
|
|
|
|
replication_co_getlength(BlockDriverState *bs)
|
2016-07-27 10:01:50 +03:00
|
|
|
{
|
2023-01-13 23:42:04 +03:00
|
|
|
return bdrv_co_getlength(bs->file->bs);
|
2016-07-27 10:01:50 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static int replication_get_io_status(BDRVReplicationState *s)
|
|
|
|
{
|
2017-03-17 05:17:39 +03:00
|
|
|
switch (s->stage) {
|
2016-07-27 10:01:50 +03:00
|
|
|
case BLOCK_REPLICATION_NONE:
|
|
|
|
return -EIO;
|
|
|
|
case BLOCK_REPLICATION_RUNNING:
|
|
|
|
return 0;
|
|
|
|
case BLOCK_REPLICATION_FAILOVER:
|
|
|
|
return s->mode == REPLICATION_MODE_PRIMARY ? -EIO : 0;
|
|
|
|
case BLOCK_REPLICATION_FAILOVER_FAILED:
|
|
|
|
return s->mode == REPLICATION_MODE_PRIMARY ? -EIO : 1;
|
|
|
|
case BLOCK_REPLICATION_DONE:
|
|
|
|
/*
|
|
|
|
* active commit job completes, and active disk and secondary_disk
|
|
|
|
* is swapped, so we can operate bs->file directly
|
|
|
|
*/
|
|
|
|
return s->mode == REPLICATION_MODE_PRIMARY ? -EIO : 0;
|
|
|
|
default:
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int replication_return_value(BDRVReplicationState *s, int ret)
|
|
|
|
{
|
|
|
|
if (s->mode == REPLICATION_MODE_SECONDARY) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ret < 0) {
|
|
|
|
s->error = ret;
|
|
|
|
ret = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2023-02-03 18:21:50 +03:00
|
|
|
static int coroutine_fn GRAPH_RDLOCK
|
|
|
|
replication_co_readv(BlockDriverState *bs, int64_t sector_num,
|
|
|
|
int remaining_sectors, QEMUIOVector *qiov)
|
2016-07-27 10:01:50 +03:00
|
|
|
{
|
|
|
|
BDRVReplicationState *s = bs->opaque;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (s->mode == REPLICATION_MODE_PRIMARY) {
|
|
|
|
/* We only use it to forward primary write requests */
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = replication_get_io_status(s);
|
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-06-28 23:15:23 +03:00
|
|
|
ret = bdrv_co_preadv(bs->file, sector_num * BDRV_SECTOR_SIZE,
|
|
|
|
remaining_sectors * BDRV_SECTOR_SIZE, qiov, 0);
|
2018-09-19 15:43:42 +03:00
|
|
|
|
2016-07-27 10:01:50 +03:00
|
|
|
return replication_return_value(s, ret);
|
|
|
|
}
|
|
|
|
|
2023-02-03 18:21:50 +03:00
|
|
|
static int coroutine_fn GRAPH_RDLOCK
|
|
|
|
replication_co_writev(BlockDriverState *bs, int64_t sector_num,
|
|
|
|
int remaining_sectors, QEMUIOVector *qiov, int flags)
|
2016-07-27 10:01:50 +03:00
|
|
|
{
|
|
|
|
BDRVReplicationState *s = bs->opaque;
|
|
|
|
QEMUIOVector hd_qiov;
|
|
|
|
uint64_t bytes_done = 0;
|
|
|
|
BdrvChild *top = bs->file;
|
|
|
|
BdrvChild *base = s->secondary_disk;
|
|
|
|
BdrvChild *target;
|
block: Make bdrv_is_allocated_above() byte-based
We are gradually moving away from sector-based interfaces, towards
byte-based. In the common case, allocation is unlikely to ever use
values that are not naturally sector-aligned, but it is possible
that byte-based values will let us be more precise about allocation
at the end of an unaligned file that can do byte-based access.
Changing the signature of the function to use int64_t *pnum ensures
that the compiler enforces that all callers are updated. For now,
the io.c layer still assert()s that all callers are sector-aligned,
but that can be relaxed when a later patch implements byte-based
block status. Therefore, for the most part this patch is just the
addition of scaling at the callers followed by inverse scaling at
bdrv_is_allocated(). But some code, particularly stream_run(),
gets a lot simpler because it no longer has to mess with sectors.
Leave comments where we can further simplify by switching to
byte-based iterations, once later patches eliminate the need for
sector-aligned operations.
For ease of review, bdrv_is_allocated() was tackled separately.
Signed-off-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2017-07-07 15:44:59 +03:00
|
|
|
int ret;
|
|
|
|
int64_t n;
|
2016-07-27 10:01:50 +03:00
|
|
|
|
|
|
|
ret = replication_get_io_status(s);
|
|
|
|
if (ret < 0) {
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ret == 0) {
|
2018-06-28 23:15:23 +03:00
|
|
|
ret = bdrv_co_pwritev(top, sector_num * BDRV_SECTOR_SIZE,
|
|
|
|
remaining_sectors * BDRV_SECTOR_SIZE, qiov, 0);
|
2016-07-27 10:01:50 +03:00
|
|
|
return replication_return_value(s, ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Failover failed, only write to active disk if the sectors
|
|
|
|
* have already been allocated in active disk/hidden disk.
|
|
|
|
*/
|
|
|
|
qemu_iovec_init(&hd_qiov, qiov->niov);
|
|
|
|
while (remaining_sectors > 0) {
|
block: Make bdrv_is_allocated_above() byte-based
We are gradually moving away from sector-based interfaces, towards
byte-based. In the common case, allocation is unlikely to ever use
values that are not naturally sector-aligned, but it is possible
that byte-based values will let us be more precise about allocation
at the end of an unaligned file that can do byte-based access.
Changing the signature of the function to use int64_t *pnum ensures
that the compiler enforces that all callers are updated. For now,
the io.c layer still assert()s that all callers are sector-aligned,
but that can be relaxed when a later patch implements byte-based
block status. Therefore, for the most part this patch is just the
addition of scaling at the callers followed by inverse scaling at
bdrv_is_allocated(). But some code, particularly stream_run(),
gets a lot simpler because it no longer has to mess with sectors.
Leave comments where we can further simplify by switching to
byte-based iterations, once later patches eliminate the need for
sector-aligned operations.
For ease of review, bdrv_is_allocated() was tackled separately.
Signed-off-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2017-07-07 15:44:59 +03:00
|
|
|
int64_t count;
|
|
|
|
|
2023-09-04 13:03:06 +03:00
|
|
|
ret = bdrv_co_is_allocated_above(top->bs, base->bs, false,
|
|
|
|
sector_num * BDRV_SECTOR_SIZE,
|
|
|
|
remaining_sectors * BDRV_SECTOR_SIZE,
|
|
|
|
&count);
|
2016-07-27 10:01:50 +03:00
|
|
|
if (ret < 0) {
|
|
|
|
goto out1;
|
|
|
|
}
|
|
|
|
|
block: Make bdrv_is_allocated_above() byte-based
We are gradually moving away from sector-based interfaces, towards
byte-based. In the common case, allocation is unlikely to ever use
values that are not naturally sector-aligned, but it is possible
that byte-based values will let us be more precise about allocation
at the end of an unaligned file that can do byte-based access.
Changing the signature of the function to use int64_t *pnum ensures
that the compiler enforces that all callers are updated. For now,
the io.c layer still assert()s that all callers are sector-aligned,
but that can be relaxed when a later patch implements byte-based
block status. Therefore, for the most part this patch is just the
addition of scaling at the callers followed by inverse scaling at
bdrv_is_allocated(). But some code, particularly stream_run(),
gets a lot simpler because it no longer has to mess with sectors.
Leave comments where we can further simplify by switching to
byte-based iterations, once later patches eliminate the need for
sector-aligned operations.
For ease of review, bdrv_is_allocated() was tackled separately.
Signed-off-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2017-07-07 15:44:59 +03:00
|
|
|
assert(QEMU_IS_ALIGNED(count, BDRV_SECTOR_SIZE));
|
|
|
|
n = count >> BDRV_SECTOR_BITS;
|
2016-07-27 10:01:50 +03:00
|
|
|
qemu_iovec_reset(&hd_qiov);
|
block: Make bdrv_is_allocated_above() byte-based
We are gradually moving away from sector-based interfaces, towards
byte-based. In the common case, allocation is unlikely to ever use
values that are not naturally sector-aligned, but it is possible
that byte-based values will let us be more precise about allocation
at the end of an unaligned file that can do byte-based access.
Changing the signature of the function to use int64_t *pnum ensures
that the compiler enforces that all callers are updated. For now,
the io.c layer still assert()s that all callers are sector-aligned,
but that can be relaxed when a later patch implements byte-based
block status. Therefore, for the most part this patch is just the
addition of scaling at the callers followed by inverse scaling at
bdrv_is_allocated(). But some code, particularly stream_run(),
gets a lot simpler because it no longer has to mess with sectors.
Leave comments where we can further simplify by switching to
byte-based iterations, once later patches eliminate the need for
sector-aligned operations.
For ease of review, bdrv_is_allocated() was tackled separately.
Signed-off-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2017-07-07 15:44:59 +03:00
|
|
|
qemu_iovec_concat(&hd_qiov, qiov, bytes_done, count);
|
2016-07-27 10:01:50 +03:00
|
|
|
|
|
|
|
target = ret ? top : base;
|
2018-06-28 23:15:23 +03:00
|
|
|
ret = bdrv_co_pwritev(target, sector_num * BDRV_SECTOR_SIZE,
|
|
|
|
n * BDRV_SECTOR_SIZE, &hd_qiov, 0);
|
2016-07-27 10:01:50 +03:00
|
|
|
if (ret < 0) {
|
|
|
|
goto out1;
|
|
|
|
}
|
|
|
|
|
|
|
|
remaining_sectors -= n;
|
|
|
|
sector_num += n;
|
block: Make bdrv_is_allocated_above() byte-based
We are gradually moving away from sector-based interfaces, towards
byte-based. In the common case, allocation is unlikely to ever use
values that are not naturally sector-aligned, but it is possible
that byte-based values will let us be more precise about allocation
at the end of an unaligned file that can do byte-based access.
Changing the signature of the function to use int64_t *pnum ensures
that the compiler enforces that all callers are updated. For now,
the io.c layer still assert()s that all callers are sector-aligned,
but that can be relaxed when a later patch implements byte-based
block status. Therefore, for the most part this patch is just the
addition of scaling at the callers followed by inverse scaling at
bdrv_is_allocated(). But some code, particularly stream_run(),
gets a lot simpler because it no longer has to mess with sectors.
Leave comments where we can further simplify by switching to
byte-based iterations, once later patches eliminate the need for
sector-aligned operations.
For ease of review, bdrv_is_allocated() was tackled separately.
Signed-off-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2017-07-07 15:44:59 +03:00
|
|
|
bytes_done += count;
|
2016-07-27 10:01:50 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
out1:
|
|
|
|
qemu_iovec_destroy(&hd_qiov);
|
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2023-09-29 17:51:49 +03:00
|
|
|
static void GRAPH_UNLOCKED
|
|
|
|
secondary_do_checkpoint(BlockDriverState *bs, Error **errp)
|
2016-07-27 10:01:50 +03:00
|
|
|
{
|
2021-07-18 17:48:24 +03:00
|
|
|
BDRVReplicationState *s = bs->opaque;
|
2023-10-27 18:53:33 +03:00
|
|
|
BdrvChild *active_disk;
|
2016-07-27 10:01:50 +03:00
|
|
|
Error *local_err = NULL;
|
|
|
|
int ret;
|
|
|
|
|
2023-09-29 17:51:49 +03:00
|
|
|
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
|
|
|
|
2019-06-06 18:41:29 +03:00
|
|
|
if (!s->backup_job) {
|
2016-07-27 10:01:50 +03:00
|
|
|
error_setg(errp, "Backup job was cancelled unexpectedly");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-06-06 18:41:29 +03:00
|
|
|
backup_do_checkpoint(s->backup_job, &local_err);
|
2016-07-27 10:01:50 +03:00
|
|
|
if (local_err) {
|
|
|
|
error_propagate(errp, local_err);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2023-10-27 18:53:33 +03:00
|
|
|
active_disk = bs->file;
|
2021-07-18 17:48:24 +03:00
|
|
|
if (!active_disk->bs->drv) {
|
2017-11-10 23:31:09 +03:00
|
|
|
error_setg(errp, "Active disk %s is ejected",
|
2021-07-18 17:48:24 +03:00
|
|
|
active_disk->bs->node_name);
|
2017-11-10 23:31:09 +03:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2021-07-18 17:48:24 +03:00
|
|
|
ret = bdrv_make_empty(active_disk, errp);
|
2016-07-27 10:01:50 +03:00
|
|
|
if (ret < 0) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2017-11-10 23:31:09 +03:00
|
|
|
if (!s->hidden_disk->bs->drv) {
|
|
|
|
error_setg(errp, "Hidden disk %s is ejected",
|
|
|
|
s->hidden_disk->bs->node_name);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2021-07-18 17:48:42 +03:00
|
|
|
ret = bdrv_make_empty(s->hidden_disk, errp);
|
2016-07-27 10:01:50 +03:00
|
|
|
if (ret < 0) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-12 17:00:43 +03:00
|
|
|
/* This function is supposed to be called twice:
|
|
|
|
* first with writable = true, then with writable = false.
|
|
|
|
* The first call puts s->hidden_disk and s->secondary_disk in
|
|
|
|
* r/w mode, and the second puts them back in their original state.
|
|
|
|
*/
|
2016-10-27 13:49:01 +03:00
|
|
|
static void reopen_backing_file(BlockDriverState *bs, bool writable,
|
2016-07-27 10:01:50 +03:00
|
|
|
Error **errp)
|
|
|
|
{
|
2016-10-27 13:49:01 +03:00
|
|
|
BDRVReplicationState *s = bs->opaque;
|
2021-07-18 17:48:29 +03:00
|
|
|
BdrvChild *hidden_disk, *secondary_disk;
|
2016-07-27 10:01:50 +03:00
|
|
|
BlockReopenQueue *reopen_queue = NULL;
|
|
|
|
|
2023-10-27 18:53:26 +03:00
|
|
|
GLOBAL_STATE_CODE();
|
|
|
|
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
|
|
|
|
2021-07-18 17:48:29 +03:00
|
|
|
/*
|
|
|
|
* s->hidden_disk and s->secondary_disk may not be set yet, as they will
|
|
|
|
* only be set after the children are writable.
|
|
|
|
*/
|
|
|
|
hidden_disk = bs->file->bs->backing;
|
|
|
|
secondary_disk = hidden_disk->bs->backing;
|
|
|
|
|
2016-07-27 10:01:50 +03:00
|
|
|
if (writable) {
|
2021-07-18 17:48:29 +03:00
|
|
|
s->orig_hidden_read_only = bdrv_is_read_only(hidden_disk->bs);
|
|
|
|
s->orig_secondary_read_only = bdrv_is_read_only(secondary_disk->bs);
|
2016-07-27 10:01:50 +03:00
|
|
|
}
|
|
|
|
|
2018-11-12 17:00:43 +03:00
|
|
|
if (s->orig_hidden_read_only) {
|
|
|
|
QDict *opts = qdict_new();
|
|
|
|
qdict_put_bool(opts, BDRV_OPT_READ_ONLY, !writable);
|
2021-07-18 17:48:29 +03:00
|
|
|
reopen_queue = bdrv_reopen_queue(reopen_queue, hidden_disk->bs,
|
2019-03-12 19:48:44 +03:00
|
|
|
opts, true);
|
2016-07-27 10:01:50 +03:00
|
|
|
}
|
|
|
|
|
2018-11-12 17:00:43 +03:00
|
|
|
if (s->orig_secondary_read_only) {
|
|
|
|
QDict *opts = qdict_new();
|
|
|
|
qdict_put_bool(opts, BDRV_OPT_READ_ONLY, !writable);
|
2021-07-18 17:48:29 +03:00
|
|
|
reopen_queue = bdrv_reopen_queue(reopen_queue, secondary_disk->bs,
|
2019-03-12 19:48:44 +03:00
|
|
|
opts, true);
|
2016-07-27 10:01:50 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (reopen_queue) {
|
2020-07-07 19:06:04 +03:00
|
|
|
bdrv_reopen_multiple(reopen_queue, errp);
|
2016-07-27 10:01:50 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-10-27 13:49:01 +03:00
|
|
|
static void backup_job_cleanup(BlockDriverState *bs)
|
2016-07-27 10:01:50 +03:00
|
|
|
{
|
2016-10-27 13:49:01 +03:00
|
|
|
BDRVReplicationState *s = bs->opaque;
|
2016-07-27 10:01:50 +03:00
|
|
|
BlockDriverState *top_bs;
|
|
|
|
|
2020-05-11 10:08:01 +03:00
|
|
|
s->backup_job = NULL;
|
|
|
|
|
2016-07-27 10:01:50 +03:00
|
|
|
top_bs = bdrv_lookup_bs(s->top_id, s->top_id, NULL);
|
|
|
|
if (!top_bs) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
bdrv_op_unblock_all(top_bs, s->blocker);
|
|
|
|
error_free(s->blocker);
|
2016-10-27 13:49:01 +03:00
|
|
|
reopen_backing_file(bs, false, NULL);
|
2016-07-27 10:01:50 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static void backup_job_completed(void *opaque, int ret)
|
|
|
|
{
|
2016-10-27 13:49:01 +03:00
|
|
|
BlockDriverState *bs = opaque;
|
|
|
|
BDRVReplicationState *s = bs->opaque;
|
2016-07-27 10:01:50 +03:00
|
|
|
|
2017-03-17 05:17:39 +03:00
|
|
|
if (s->stage != BLOCK_REPLICATION_FAILOVER) {
|
2016-07-27 10:01:50 +03:00
|
|
|
/* The backup job is cancelled unexpectedly */
|
|
|
|
s->error = -EIO;
|
|
|
|
}
|
|
|
|
|
2016-10-27 13:49:01 +03:00
|
|
|
backup_job_cleanup(bs);
|
2016-07-27 10:01:50 +03:00
|
|
|
}
|
|
|
|
|
2023-09-29 17:51:56 +03:00
|
|
|
static bool GRAPH_RDLOCK
|
|
|
|
check_top_bs(BlockDriverState *top_bs, BlockDriverState *bs)
|
2016-07-27 10:01:50 +03:00
|
|
|
{
|
|
|
|
BdrvChild *child;
|
|
|
|
|
|
|
|
/* The bs itself is the top_bs */
|
|
|
|
if (top_bs == bs) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Iterate over top_bs's children */
|
|
|
|
QLIST_FOREACH(child, &top_bs->children, next) {
|
|
|
|
if (child->bs == bs || check_top_bs(child->bs, bs)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void replication_start(ReplicationState *rs, ReplicationMode mode,
|
|
|
|
Error **errp)
|
|
|
|
{
|
|
|
|
BlockDriverState *bs = rs->opaque;
|
|
|
|
BDRVReplicationState *s;
|
|
|
|
BlockDriverState *top_bs;
|
2021-07-18 17:48:29 +03:00
|
|
|
BdrvChild *active_disk, *hidden_disk, *secondary_disk;
|
2016-07-27 10:01:50 +03:00
|
|
|
int64_t active_length, hidden_length, disk_length;
|
|
|
|
Error *local_err = NULL;
|
2021-01-17 00:46:52 +03:00
|
|
|
BackupPerf perf = { .use_copy_range = true, .max_workers = 1 };
|
2016-07-27 10:01:50 +03:00
|
|
|
|
2023-09-29 17:51:39 +03:00
|
|
|
GLOBAL_STATE_CODE();
|
|
|
|
|
2016-07-27 10:01:50 +03:00
|
|
|
s = bs->opaque;
|
|
|
|
|
2019-10-24 17:25:35 +03:00
|
|
|
if (s->stage == BLOCK_REPLICATION_DONE ||
|
|
|
|
s->stage == BLOCK_REPLICATION_FAILOVER) {
|
|
|
|
/*
|
|
|
|
* This case happens when a secondary is promoted to primary.
|
|
|
|
* Ignore the request because the secondary side of replication
|
|
|
|
* doesn't have to do anything anymore.
|
|
|
|
*/
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2017-03-17 05:17:39 +03:00
|
|
|
if (s->stage != BLOCK_REPLICATION_NONE) {
|
2016-07-27 10:01:50 +03:00
|
|
|
error_setg(errp, "Block replication is running or done");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (s->mode != mode) {
|
|
|
|
error_setg(errp, "The parameter mode's value is invalid, needs %d,"
|
|
|
|
" but got %d", s->mode, mode);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (s->mode) {
|
|
|
|
case REPLICATION_MODE_PRIMARY:
|
|
|
|
break;
|
|
|
|
case REPLICATION_MODE_SECONDARY:
|
2023-10-27 18:53:26 +03:00
|
|
|
bdrv_graph_rdlock_main_loop();
|
2021-07-18 17:48:24 +03:00
|
|
|
active_disk = bs->file;
|
|
|
|
if (!active_disk || !active_disk->bs || !active_disk->bs->backing) {
|
2016-07-27 10:01:50 +03:00
|
|
|
error_setg(errp, "Active disk doesn't have backing file");
|
2023-10-27 18:53:26 +03:00
|
|
|
bdrv_graph_rdunlock_main_loop();
|
2016-07-27 10:01:50 +03:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2021-07-18 17:48:29 +03:00
|
|
|
hidden_disk = active_disk->bs->backing;
|
|
|
|
if (!hidden_disk->bs || !hidden_disk->bs->backing) {
|
2016-07-27 10:01:50 +03:00
|
|
|
error_setg(errp, "Hidden disk doesn't have backing file");
|
2023-10-27 18:53:26 +03:00
|
|
|
bdrv_graph_rdunlock_main_loop();
|
2016-07-27 10:01:50 +03:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2021-07-18 17:48:29 +03:00
|
|
|
secondary_disk = hidden_disk->bs->backing;
|
|
|
|
if (!secondary_disk->bs || !bdrv_has_blk(secondary_disk->bs)) {
|
2016-07-27 10:01:50 +03:00
|
|
|
error_setg(errp, "The secondary disk doesn't have block backend");
|
2023-09-29 17:51:39 +03:00
|
|
|
bdrv_graph_rdunlock_main_loop();
|
2016-07-27 10:01:50 +03:00
|
|
|
return;
|
|
|
|
}
|
2023-09-29 17:51:39 +03:00
|
|
|
bdrv_graph_rdunlock_main_loop();
|
2016-07-27 10:01:50 +03:00
|
|
|
|
|
|
|
/* verify the length */
|
2021-07-18 17:48:24 +03:00
|
|
|
active_length = bdrv_getlength(active_disk->bs);
|
2021-07-18 17:48:29 +03:00
|
|
|
hidden_length = bdrv_getlength(hidden_disk->bs);
|
|
|
|
disk_length = bdrv_getlength(secondary_disk->bs);
|
2016-07-27 10:01:50 +03:00
|
|
|
if (active_length < 0 || hidden_length < 0 || disk_length < 0 ||
|
|
|
|
active_length != hidden_length || hidden_length != disk_length) {
|
|
|
|
error_setg(errp, "Active disk, hidden disk, secondary disk's length"
|
|
|
|
" are not the same");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2017-11-10 23:31:09 +03:00
|
|
|
/* Must be true, or the bdrv_getlength() calls would have failed */
|
2021-07-18 17:48:29 +03:00
|
|
|
assert(active_disk->bs->drv && hidden_disk->bs->drv);
|
2017-11-10 23:31:09 +03:00
|
|
|
|
2023-09-29 17:51:49 +03:00
|
|
|
bdrv_graph_rdlock_main_loop();
|
2021-07-18 17:48:24 +03:00
|
|
|
if (!active_disk->bs->drv->bdrv_make_empty ||
|
2021-07-18 17:48:29 +03:00
|
|
|
!hidden_disk->bs->drv->bdrv_make_empty) {
|
2016-07-27 10:01:50 +03:00
|
|
|
error_setg(errp,
|
|
|
|
"Active disk or hidden disk doesn't support make_empty");
|
2023-09-29 17:51:49 +03:00
|
|
|
bdrv_graph_rdunlock_main_loop();
|
2016-07-27 10:01:50 +03:00
|
|
|
return;
|
|
|
|
}
|
2023-09-29 17:51:49 +03:00
|
|
|
bdrv_graph_rdunlock_main_loop();
|
2016-07-27 10:01:50 +03:00
|
|
|
|
|
|
|
/* reopen the backing file in r/w mode */
|
2016-10-27 13:49:01 +03:00
|
|
|
reopen_backing_file(bs, true, &local_err);
|
2016-07-27 10:01:50 +03:00
|
|
|
if (local_err) {
|
|
|
|
error_propagate(errp, local_err);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2023-12-05 21:20:02 +03:00
|
|
|
bdrv_graph_wrlock();
|
2023-09-11 12:46:11 +03:00
|
|
|
|
2021-07-18 17:48:33 +03:00
|
|
|
bdrv_ref(hidden_disk->bs);
|
|
|
|
s->hidden_disk = bdrv_attach_child(bs, hidden_disk->bs, "hidden disk",
|
|
|
|
&child_of_bds, BDRV_CHILD_DATA,
|
|
|
|
&local_err);
|
|
|
|
if (local_err) {
|
|
|
|
error_propagate(errp, local_err);
|
2023-12-05 21:20:02 +03:00
|
|
|
bdrv_graph_wrunlock();
|
2021-07-18 17:48:33 +03:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
bdrv_ref(secondary_disk->bs);
|
|
|
|
s->secondary_disk = bdrv_attach_child(bs, secondary_disk->bs,
|
|
|
|
"secondary disk", &child_of_bds,
|
|
|
|
BDRV_CHILD_DATA, &local_err);
|
|
|
|
if (local_err) {
|
|
|
|
error_propagate(errp, local_err);
|
2023-12-05 21:20:02 +03:00
|
|
|
bdrv_graph_wrunlock();
|
2021-07-18 17:48:33 +03:00
|
|
|
return;
|
|
|
|
}
|
2021-07-18 17:48:29 +03:00
|
|
|
|
2016-07-27 10:01:50 +03:00
|
|
|
/* start backup job now */
|
|
|
|
error_setg(&s->blocker,
|
|
|
|
"Block device is in use by internal backup job");
|
|
|
|
|
|
|
|
top_bs = bdrv_lookup_bs(s->top_id, s->top_id, NULL);
|
|
|
|
if (!top_bs || !bdrv_is_root_node(top_bs) ||
|
|
|
|
!check_top_bs(top_bs, bs)) {
|
|
|
|
error_setg(errp, "No top_bs or it is invalid");
|
2023-12-05 21:20:02 +03:00
|
|
|
bdrv_graph_wrunlock();
|
2016-10-27 13:49:01 +03:00
|
|
|
reopen_backing_file(bs, false, NULL);
|
2016-07-27 10:01:50 +03:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
bdrv_op_block_all(top_bs, s->blocker);
|
|
|
|
bdrv_op_unblock(top_bs, BLOCK_OP_TYPE_DATAPLANE, s->blocker);
|
|
|
|
|
2023-12-05 21:20:02 +03:00
|
|
|
bdrv_graph_wrunlock();
|
2023-09-29 17:51:39 +03:00
|
|
|
|
2019-06-06 18:41:29 +03:00
|
|
|
s->backup_job = backup_job_create(
|
|
|
|
NULL, s->secondary_disk->bs, s->hidden_disk->bs,
|
2024-03-13 18:28:21 +03:00
|
|
|
0, MIRROR_SYNC_MODE_NONE, NULL, 0, false, false,
|
|
|
|
NULL, &perf,
|
2016-11-08 09:50:38 +03:00
|
|
|
BLOCKDEV_ON_ERROR_REPORT,
|
2018-04-19 18:54:56 +03:00
|
|
|
BLOCKDEV_ON_ERROR_REPORT, JOB_INTERNAL,
|
2016-11-08 09:50:38 +03:00
|
|
|
backup_job_completed, bs, NULL, &local_err);
|
2016-07-27 10:01:50 +03:00
|
|
|
if (local_err) {
|
|
|
|
error_propagate(errp, local_err);
|
2016-10-27 13:49:01 +03:00
|
|
|
backup_job_cleanup(bs);
|
2016-07-27 10:01:50 +03:00
|
|
|
return;
|
|
|
|
}
|
2019-06-06 18:41:29 +03:00
|
|
|
job_start(&s->backup_job->job);
|
2016-07-27 10:01:50 +03:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
|
2017-03-17 05:17:39 +03:00
|
|
|
s->stage = BLOCK_REPLICATION_RUNNING;
|
2016-07-27 10:01:50 +03:00
|
|
|
|
|
|
|
if (s->mode == REPLICATION_MODE_SECONDARY) {
|
2021-07-18 17:48:24 +03:00
|
|
|
secondary_do_checkpoint(bs, errp);
|
2016-07-27 10:01:50 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
s->error = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void replication_do_checkpoint(ReplicationState *rs, Error **errp)
|
|
|
|
{
|
|
|
|
BlockDriverState *bs = rs->opaque;
|
2023-12-05 21:20:03 +03:00
|
|
|
BDRVReplicationState *s = bs->opaque;
|
2016-07-27 10:01:50 +03:00
|
|
|
|
2019-10-24 17:25:35 +03:00
|
|
|
if (s->stage == BLOCK_REPLICATION_DONE ||
|
|
|
|
s->stage == BLOCK_REPLICATION_FAILOVER) {
|
|
|
|
/*
|
|
|
|
* This case happens when a secondary was promoted to primary.
|
|
|
|
* Ignore the request because the secondary side of replication
|
|
|
|
* doesn't have to do anything anymore.
|
|
|
|
*/
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-07-27 10:01:50 +03:00
|
|
|
if (s->mode == REPLICATION_MODE_SECONDARY) {
|
2021-07-18 17:48:24 +03:00
|
|
|
secondary_do_checkpoint(bs, errp);
|
2016-07-27 10:01:50 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void replication_get_error(ReplicationState *rs, Error **errp)
|
|
|
|
{
|
|
|
|
BlockDriverState *bs = rs->opaque;
|
2023-12-05 21:20:03 +03:00
|
|
|
BDRVReplicationState *s = bs->opaque;
|
2016-07-27 10:01:50 +03:00
|
|
|
|
2019-10-24 17:25:35 +03:00
|
|
|
if (s->stage == BLOCK_REPLICATION_NONE) {
|
2016-07-27 10:01:50 +03:00
|
|
|
error_setg(errp, "Block replication is not running");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (s->error) {
|
|
|
|
error_setg(errp, "I/O error occurred");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void replication_done(void *opaque, int ret)
|
|
|
|
{
|
|
|
|
BlockDriverState *bs = opaque;
|
|
|
|
BDRVReplicationState *s = bs->opaque;
|
|
|
|
|
|
|
|
if (ret == 0) {
|
2017-03-17 05:17:39 +03:00
|
|
|
s->stage = BLOCK_REPLICATION_DONE;
|
2016-07-27 10:01:50 +03:00
|
|
|
|
2023-12-05 21:20:02 +03:00
|
|
|
bdrv_graph_wrlock();
|
2021-07-18 17:48:33 +03:00
|
|
|
bdrv_unref_child(bs, s->secondary_disk);
|
2016-07-27 10:01:50 +03:00
|
|
|
s->secondary_disk = NULL;
|
2021-07-18 17:48:33 +03:00
|
|
|
bdrv_unref_child(bs, s->hidden_disk);
|
2016-07-27 10:01:50 +03:00
|
|
|
s->hidden_disk = NULL;
|
2023-12-05 21:20:02 +03:00
|
|
|
bdrv_graph_wrunlock();
|
2023-09-11 12:46:19 +03:00
|
|
|
|
2016-07-27 10:01:50 +03:00
|
|
|
s->error = 0;
|
|
|
|
} else {
|
2017-03-17 05:17:39 +03:00
|
|
|
s->stage = BLOCK_REPLICATION_FAILOVER_FAILED;
|
2016-07-27 10:01:50 +03:00
|
|
|
s->error = -EIO;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void replication_stop(ReplicationState *rs, bool failover, Error **errp)
|
|
|
|
{
|
|
|
|
BlockDriverState *bs = rs->opaque;
|
2023-12-05 21:20:03 +03:00
|
|
|
BDRVReplicationState *s = bs->opaque;
|
2016-07-27 10:01:50 +03:00
|
|
|
|
2019-10-24 17:25:35 +03:00
|
|
|
if (s->stage == BLOCK_REPLICATION_DONE ||
|
|
|
|
s->stage == BLOCK_REPLICATION_FAILOVER) {
|
|
|
|
/*
|
|
|
|
* This case happens when a secondary was promoted to primary.
|
|
|
|
* Ignore the request because the secondary side of replication
|
|
|
|
* doesn't have to do anything anymore.
|
|
|
|
*/
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2017-03-17 05:17:39 +03:00
|
|
|
if (s->stage != BLOCK_REPLICATION_RUNNING) {
|
2016-07-27 10:01:50 +03:00
|
|
|
error_setg(errp, "Block replication is not running");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (s->mode) {
|
|
|
|
case REPLICATION_MODE_PRIMARY:
|
2017-03-17 05:17:39 +03:00
|
|
|
s->stage = BLOCK_REPLICATION_DONE;
|
2016-07-27 10:01:50 +03:00
|
|
|
s->error = 0;
|
|
|
|
break;
|
|
|
|
case REPLICATION_MODE_SECONDARY:
|
|
|
|
/*
|
|
|
|
* This BDS will be closed, and the job should be completed
|
|
|
|
* before the BDS is closed, because we will access hidden
|
|
|
|
* disk, secondary disk in backup_job_completed().
|
|
|
|
*/
|
2019-06-06 18:41:29 +03:00
|
|
|
if (s->backup_job) {
|
2021-10-06 18:19:32 +03:00
|
|
|
job_cancel_sync(&s->backup_job->job, true);
|
2016-07-27 10:01:50 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!failover) {
|
2021-07-18 17:48:24 +03:00
|
|
|
secondary_do_checkpoint(bs, errp);
|
2017-03-17 05:17:39 +03:00
|
|
|
s->stage = BLOCK_REPLICATION_DONE;
|
2016-07-27 10:01:50 +03:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2023-10-27 18:53:33 +03:00
|
|
|
bdrv_graph_rdlock_main_loop();
|
2017-03-17 05:17:39 +03:00
|
|
|
s->stage = BLOCK_REPLICATION_FAILOVER;
|
2019-06-06 18:41:29 +03:00
|
|
|
s->commit_job = commit_active_start(
|
2021-07-18 17:48:24 +03:00
|
|
|
NULL, bs->file->bs, s->secondary_disk->bs,
|
2018-04-19 18:54:56 +03:00
|
|
|
JOB_INTERNAL, 0, BLOCKDEV_ON_ERROR_REPORT,
|
2017-04-21 15:27:04 +03:00
|
|
|
NULL, replication_done, bs, true, errp);
|
2023-10-27 18:53:33 +03:00
|
|
|
bdrv_graph_rdunlock_main_loop();
|
2016-07-27 10:01:50 +03:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-01 22:29:25 +03:00
|
|
|
static const char *const replication_strong_runtime_opts[] = {
|
|
|
|
REPLICATION_MODE,
|
|
|
|
REPLICATION_TOP_ID,
|
|
|
|
|
|
|
|
NULL
|
|
|
|
};
|
|
|
|
|
2019-03-18 18:48:01 +03:00
|
|
|
static BlockDriver bdrv_replication = {
|
2016-07-27 10:01:50 +03:00
|
|
|
.format_name = "replication",
|
|
|
|
.instance_size = sizeof(BDRVReplicationState),
|
|
|
|
|
|
|
|
.bdrv_open = replication_open,
|
|
|
|
.bdrv_close = replication_close,
|
2017-03-14 14:46:52 +03:00
|
|
|
.bdrv_child_perm = replication_child_perm,
|
2016-07-27 10:01:50 +03:00
|
|
|
|
2023-01-13 23:42:04 +03:00
|
|
|
.bdrv_co_getlength = replication_co_getlength,
|
2016-07-27 10:01:50 +03:00
|
|
|
.bdrv_co_readv = replication_co_readv,
|
|
|
|
.bdrv_co_writev = replication_co_writev,
|
|
|
|
|
|
|
|
.is_filter = true,
|
|
|
|
|
2019-02-01 22:29:25 +03:00
|
|
|
.strong_runtime_opts = replication_strong_runtime_opts,
|
2016-07-27 10:01:50 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
static void bdrv_replication_init(void)
|
|
|
|
{
|
|
|
|
bdrv_register(&bdrv_replication);
|
|
|
|
}
|
|
|
|
|
|
|
|
block_init(bdrv_replication_init);
|