2014-02-22 01:21:10 +04:00
|
|
|
/*
|
|
|
|
* Quorum Block filter
|
|
|
|
*
|
|
|
|
* Copyright (C) 2012-2014 Nodalink, EURL.
|
|
|
|
*
|
|
|
|
* Author:
|
|
|
|
* Benoît Canet <benoit.canet@irqsave.net>
|
|
|
|
*
|
|
|
|
* Based on the design and code of blkverify.c (Copyright (C) 2010 IBM, Corp)
|
|
|
|
* and blkmirror.c (Copyright (C) 2011 Red Hat, Inc).
|
|
|
|
*
|
|
|
|
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
|
|
|
* See the COPYING file in the top-level directory.
|
|
|
|
*/
|
|
|
|
|
2016-01-18 21:01:42 +03:00
|
|
|
#include "qemu/osdep.h"
|
2016-05-10 10:36:38 +03:00
|
|
|
#include "qemu/cutils.h"
|
2019-05-23 17:35:07 +03:00
|
|
|
#include "qemu/module.h"
|
2018-02-01 14:18:46 +03:00
|
|
|
#include "qemu/option.h"
|
2022-02-26 21:07:23 +03:00
|
|
|
#include "qemu/memalign.h"
|
2014-02-22 01:21:10 +04:00
|
|
|
#include "block/block_int.h"
|
2020-11-13 19:52:31 +03:00
|
|
|
#include "block/coroutines.h"
|
2018-06-14 22:14:28 +03:00
|
|
|
#include "block/qdict.h"
|
2018-02-01 14:18:31 +03:00
|
|
|
#include "qapi/error.h"
|
2018-02-11 12:36:01 +03:00
|
|
|
#include "qapi/qapi-events-block.h"
|
2014-07-18 22:25:00 +04:00
|
|
|
#include "qapi/qmp/qdict.h"
|
2015-03-17 19:22:46 +03:00
|
|
|
#include "qapi/qmp/qerror.h"
|
2014-07-18 22:25:00 +04:00
|
|
|
#include "qapi/qmp/qlist.h"
|
|
|
|
#include "qapi/qmp/qstring.h"
|
2015-07-01 20:10:35 +03:00
|
|
|
#include "crypto/hash.h"
|
2014-02-22 01:21:15 +04:00
|
|
|
|
|
|
|
#define HASH_LENGTH 32
|
|
|
|
|
2020-08-04 13:46:42 +03:00
|
|
|
#define INDEXSTR_LEN 32
|
|
|
|
|
quorum: Add quorum_open() and quorum_close().
Example of command line:
-drive if=virtio,driver=quorum,\
children.0.file.filename=1.raw,\
children.0.node-name=1.raw,\
children.0.driver=raw,\
children.1.file.filename=2.raw,\
children.1.node-name=2.raw,\
children.1.driver=raw,\
children.2.file.filename=3.raw,\
children.2.node-name=3.raw,\
children.2.driver=raw,\
vote-threshold=2
blkverify=on with vote-threshold=2 and two files can be passed to
emulate blkverify.
Signed-off-by: Benoit Canet <benoit@irqsave.net>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2014-02-22 01:21:20 +04:00
|
|
|
#define QUORUM_OPT_VOTE_THRESHOLD "vote-threshold"
|
|
|
|
#define QUORUM_OPT_BLKVERIFY "blkverify"
|
2014-06-11 17:24:10 +04:00
|
|
|
#define QUORUM_OPT_REWRITE "rewrite-corrupted"
|
block/quorum: add simple read pattern support
This patch adds single read pattern to quorum driver and quorum vote is default
pattern.
For now we do a quorum vote on all the reads, it is designed for unreliable
underlying storage such as non-redundant NFS to make sure data integrity at the
cost of the read performance.
For some use cases as following:
VM
--------------
| |
v v
A B
Both A and B has hardware raid storage to justify the data integrity on its own.
So it would help performance if we do a single read instead of on all the nodes.
Further, if we run VM on either of the storage node, we can make a local read
request for better performance.
This patch generalize the above 2 nodes case in the N nodes. That is,
vm -> write to all the N nodes, read just one of them. If single read fails, we
try to read next node in FIFO order specified by the startup command.
The 2 nodes case is very similar to DRBD[1] though lack of auto-sync
functionality in the single device/node failure for now. But compared with DRBD
we still have some advantages over it:
- Suppose we have 20 VMs running on one(assume A) of 2 nodes' DRBD backed
storage. And if A crashes, we need to restart all the VMs on node B. But for
practice case, we can't because B might not have enough resources to setup 20 VMs
at once. So if we run our 20 VMs with quorum driver, and scatter the replicated
images over the data center, we can very likely restart 20 VMs without any
resource problem.
After all, I think we can build a more powerful replicated image functionality
on quorum and block jobs(block mirror) to meet various High Availibility needs.
E.g, Enable single read pattern on 2 children,
-drive driver=quorum,children.0.file.filename=0.qcow2,\
children.1.file.filename=1.qcow2,read-pattern=fifo,vote-threshold=1
[1] http://en.wikipedia.org/wiki/Distributed_Replicated_Block_Device
[Dropped \n from an error_setg() error message
--Stefan]
Cc: Benoit Canet <benoit@irqsave.net>
Cc: Eric Blake <eblake@redhat.com>
Cc: Kevin Wolf <kwolf@redhat.com>
Cc: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Liu Yuan <namei.unix@gmail.com>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2014-08-18 13:41:05 +04:00
|
|
|
#define QUORUM_OPT_READ_PATTERN "read-pattern"
|
quorum: Add quorum_open() and quorum_close().
Example of command line:
-drive if=virtio,driver=quorum,\
children.0.file.filename=1.raw,\
children.0.node-name=1.raw,\
children.0.driver=raw,\
children.1.file.filename=2.raw,\
children.1.node-name=2.raw,\
children.1.driver=raw,\
children.2.file.filename=3.raw,\
children.2.node-name=3.raw,\
children.2.driver=raw,\
vote-threshold=2
blkverify=on with vote-threshold=2 and two files can be passed to
emulate blkverify.
Signed-off-by: Benoit Canet <benoit@irqsave.net>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2014-02-22 01:21:20 +04:00
|
|
|
|
2014-02-22 01:21:15 +04:00
|
|
|
/* This union holds a vote hash value */
|
|
|
|
typedef union QuorumVoteValue {
|
2015-07-01 20:10:35 +03:00
|
|
|
uint8_t h[HASH_LENGTH]; /* SHA-256 hash */
|
2014-02-22 01:21:15 +04:00
|
|
|
int64_t l; /* simpler 64 bits hash */
|
|
|
|
} QuorumVoteValue;
|
|
|
|
|
|
|
|
/* A vote item */
|
|
|
|
typedef struct QuorumVoteItem {
|
|
|
|
int index;
|
|
|
|
QLIST_ENTRY(QuorumVoteItem) next;
|
|
|
|
} QuorumVoteItem;
|
|
|
|
|
|
|
|
/* this structure is a vote version. A version is the set of votes sharing the
|
|
|
|
* same vote value.
|
|
|
|
* The set of votes will be tracked with the items field and its cardinality is
|
|
|
|
* vote_count.
|
|
|
|
*/
|
|
|
|
typedef struct QuorumVoteVersion {
|
|
|
|
QuorumVoteValue value;
|
|
|
|
int index;
|
|
|
|
int vote_count;
|
|
|
|
QLIST_HEAD(, QuorumVoteItem) items;
|
|
|
|
QLIST_ENTRY(QuorumVoteVersion) next;
|
|
|
|
} QuorumVoteVersion;
|
|
|
|
|
|
|
|
/* this structure holds a group of vote versions together */
|
|
|
|
typedef struct QuorumVotes {
|
|
|
|
QLIST_HEAD(, QuorumVoteVersion) vote_list;
|
|
|
|
bool (*compare)(QuorumVoteValue *a, QuorumVoteValue *b);
|
|
|
|
} QuorumVotes;
|
2014-02-22 01:21:10 +04:00
|
|
|
|
2014-02-22 01:21:11 +04:00
|
|
|
/* the following structure holds the state of one quorum instance */
|
|
|
|
typedef struct BDRVQuorumState {
|
2015-06-16 12:29:22 +03:00
|
|
|
BdrvChild **children; /* children BlockDriverStates */
|
2014-02-22 01:21:11 +04:00
|
|
|
int num_children; /* children count */
|
2016-05-10 10:36:38 +03:00
|
|
|
unsigned next_child_index; /* the index of the next child that should
|
|
|
|
* be added
|
|
|
|
*/
|
2014-02-22 01:21:11 +04:00
|
|
|
int threshold; /* if less than threshold children reads gave the
|
|
|
|
* same result a quorum error occurs.
|
|
|
|
*/
|
|
|
|
bool is_blkverify; /* true if the driver is in blkverify mode
|
|
|
|
* Writes are mirrored on two children devices.
|
|
|
|
* On reads the two children devices' contents are
|
|
|
|
* compared and if a difference is spotted its
|
|
|
|
* location is printed and the code aborts.
|
|
|
|
* It is useful to debug other block drivers by
|
|
|
|
* comparing them with a reference one.
|
|
|
|
*/
|
2014-06-11 17:24:10 +04:00
|
|
|
bool rewrite_corrupted;/* true if the driver must rewrite-on-read corrupted
|
|
|
|
* block if Quorum is reached.
|
|
|
|
*/
|
block/quorum: add simple read pattern support
This patch adds single read pattern to quorum driver and quorum vote is default
pattern.
For now we do a quorum vote on all the reads, it is designed for unreliable
underlying storage such as non-redundant NFS to make sure data integrity at the
cost of the read performance.
For some use cases as following:
VM
--------------
| |
v v
A B
Both A and B has hardware raid storage to justify the data integrity on its own.
So it would help performance if we do a single read instead of on all the nodes.
Further, if we run VM on either of the storage node, we can make a local read
request for better performance.
This patch generalize the above 2 nodes case in the N nodes. That is,
vm -> write to all the N nodes, read just one of them. If single read fails, we
try to read next node in FIFO order specified by the startup command.
The 2 nodes case is very similar to DRBD[1] though lack of auto-sync
functionality in the single device/node failure for now. But compared with DRBD
we still have some advantages over it:
- Suppose we have 20 VMs running on one(assume A) of 2 nodes' DRBD backed
storage. And if A crashes, we need to restart all the VMs on node B. But for
practice case, we can't because B might not have enough resources to setup 20 VMs
at once. So if we run our 20 VMs with quorum driver, and scatter the replicated
images over the data center, we can very likely restart 20 VMs without any
resource problem.
After all, I think we can build a more powerful replicated image functionality
on quorum and block jobs(block mirror) to meet various High Availibility needs.
E.g, Enable single read pattern on 2 children,
-drive driver=quorum,children.0.file.filename=0.qcow2,\
children.1.file.filename=1.qcow2,read-pattern=fifo,vote-threshold=1
[1] http://en.wikipedia.org/wiki/Distributed_Replicated_Block_Device
[Dropped \n from an error_setg() error message
--Stefan]
Cc: Benoit Canet <benoit@irqsave.net>
Cc: Eric Blake <eblake@redhat.com>
Cc: Kevin Wolf <kwolf@redhat.com>
Cc: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Liu Yuan <namei.unix@gmail.com>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2014-08-18 13:41:05 +04:00
|
|
|
|
|
|
|
QuorumReadPattern read_pattern;
|
2014-02-22 01:21:11 +04:00
|
|
|
} BDRVQuorumState;
|
|
|
|
|
2014-02-22 01:21:10 +04:00
|
|
|
typedef struct QuorumAIOCB QuorumAIOCB;
|
|
|
|
|
|
|
|
/* Quorum will create one instance of the following structure per operation it
|
|
|
|
* performs on its children.
|
|
|
|
* So for each read/write operation coming from the upper layer there will be
|
|
|
|
* $children_count QuorumChildRequest.
|
|
|
|
*/
|
|
|
|
typedef struct QuorumChildRequest {
|
2016-11-08 13:10:14 +03:00
|
|
|
BlockDriverState *bs;
|
2014-02-22 01:21:10 +04:00
|
|
|
QEMUIOVector qiov;
|
|
|
|
uint8_t *buf;
|
|
|
|
int ret;
|
|
|
|
QuorumAIOCB *parent;
|
|
|
|
} QuorumChildRequest;
|
|
|
|
|
|
|
|
/* Quorum will use the following structure to track progress of each read/write
|
|
|
|
* operation received by the upper layer.
|
|
|
|
* This structure hold pointers to the QuorumChildRequest structures instances
|
|
|
|
* used to do operations on each children and track overall progress.
|
|
|
|
*/
|
|
|
|
struct QuorumAIOCB {
|
2016-11-08 13:10:14 +03:00
|
|
|
BlockDriverState *bs;
|
|
|
|
Coroutine *co;
|
2014-02-22 01:21:10 +04:00
|
|
|
|
|
|
|
/* Request metadata */
|
2016-11-10 19:22:07 +03:00
|
|
|
uint64_t offset;
|
|
|
|
uint64_t bytes;
|
2018-04-21 16:29:25 +03:00
|
|
|
int flags;
|
2014-02-22 01:21:10 +04:00
|
|
|
|
|
|
|
QEMUIOVector *qiov; /* calling IOV */
|
|
|
|
|
|
|
|
QuorumChildRequest *qcrs; /* individual child requests */
|
|
|
|
int count; /* number of completed AIOCB */
|
|
|
|
int success_count; /* number of successfully completed AIOCB */
|
|
|
|
|
2014-06-11 17:24:10 +04:00
|
|
|
int rewrite_count; /* number of replica to rewrite: count down to
|
|
|
|
* zero once writes are fired
|
|
|
|
*/
|
|
|
|
|
2014-02-22 01:21:15 +04:00
|
|
|
QuorumVotes votes;
|
|
|
|
|
2014-02-22 01:21:10 +04:00
|
|
|
bool is_read;
|
|
|
|
int vote_ret;
|
2016-10-05 19:35:26 +03:00
|
|
|
int children_read; /* how many children have been read from */
|
2014-02-22 01:21:10 +04:00
|
|
|
};
|
2014-02-22 01:21:11 +04:00
|
|
|
|
2016-11-08 13:10:14 +03:00
|
|
|
typedef struct QuorumCo {
|
|
|
|
QuorumAIOCB *acb;
|
|
|
|
int idx;
|
|
|
|
} QuorumCo;
|
2014-02-22 01:21:12 +04:00
|
|
|
|
|
|
|
static void quorum_aio_finalize(QuorumAIOCB *acb)
|
|
|
|
{
|
|
|
|
g_free(acb->qcrs);
|
2016-11-10 16:24:27 +03:00
|
|
|
g_free(acb);
|
2014-02-22 01:21:12 +04:00
|
|
|
}
|
|
|
|
|
2014-02-22 01:21:15 +04:00
|
|
|
static bool quorum_sha256_compare(QuorumVoteValue *a, QuorumVoteValue *b)
|
|
|
|
{
|
|
|
|
return !memcmp(a->h, b->h, HASH_LENGTH);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool quorum_64bits_compare(QuorumVoteValue *a, QuorumVoteValue *b)
|
|
|
|
{
|
|
|
|
return a->l == b->l;
|
|
|
|
}
|
|
|
|
|
2022-09-22 11:49:16 +03:00
|
|
|
static QuorumAIOCB *coroutine_fn quorum_aio_get(BlockDriverState *bs,
|
|
|
|
QEMUIOVector *qiov,
|
|
|
|
uint64_t offset, uint64_t bytes,
|
|
|
|
int flags)
|
2014-02-22 01:21:12 +04:00
|
|
|
{
|
2016-11-07 20:00:29 +03:00
|
|
|
BDRVQuorumState *s = bs->opaque;
|
2016-11-08 13:10:14 +03:00
|
|
|
QuorumAIOCB *acb = g_new(QuorumAIOCB, 1);
|
2014-02-22 01:21:12 +04:00
|
|
|
int i;
|
|
|
|
|
2016-11-22 14:49:49 +03:00
|
|
|
*acb = (QuorumAIOCB) {
|
|
|
|
.co = qemu_coroutine_self(),
|
|
|
|
.bs = bs,
|
|
|
|
.offset = offset,
|
|
|
|
.bytes = bytes,
|
2018-04-21 16:29:25 +03:00
|
|
|
.flags = flags,
|
2016-11-22 14:49:49 +03:00
|
|
|
.qiov = qiov,
|
|
|
|
.votes.compare = quorum_sha256_compare,
|
|
|
|
.votes.vote_list = QLIST_HEAD_INITIALIZER(acb.votes.vote_list),
|
|
|
|
};
|
2014-02-22 01:21:12 +04:00
|
|
|
|
2016-11-22 14:49:49 +03:00
|
|
|
acb->qcrs = g_new0(QuorumChildRequest, s->num_children);
|
2014-02-22 01:21:12 +04:00
|
|
|
for (i = 0; i < s->num_children; i++) {
|
|
|
|
acb->qcrs[i].buf = NULL;
|
|
|
|
acb->qcrs[i].ret = 0;
|
|
|
|
acb->qcrs[i].parent = acb;
|
|
|
|
}
|
|
|
|
|
|
|
|
return acb;
|
|
|
|
}
|
|
|
|
|
2016-11-10 19:22:07 +03:00
|
|
|
static void quorum_report_bad(QuorumOpType type, uint64_t offset,
|
|
|
|
uint64_t bytes, char *node_name, int ret)
|
2014-02-22 01:21:15 +04:00
|
|
|
{
|
2014-06-18 10:43:53 +04:00
|
|
|
const char *msg = NULL;
|
2016-11-10 19:22:07 +03:00
|
|
|
int64_t start_sector = offset / BDRV_SECTOR_SIZE;
|
|
|
|
int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE);
|
|
|
|
|
2014-02-22 21:43:41 +04:00
|
|
|
if (ret < 0) {
|
2014-06-18 10:43:53 +04:00
|
|
|
msg = strerror(-ret);
|
2014-02-22 21:43:41 +04:00
|
|
|
}
|
2016-02-26 04:39:01 +03:00
|
|
|
|
2022-11-04 19:06:52 +03:00
|
|
|
qapi_event_send_quorum_report_bad(type, msg, node_name, start_sector,
|
2018-08-15 16:37:37 +03:00
|
|
|
end_sector - start_sector);
|
2014-02-22 01:21:15 +04:00
|
|
|
}
|
|
|
|
|
2023-09-29 17:51:47 +03:00
|
|
|
static void GRAPH_RDLOCK quorum_report_failure(QuorumAIOCB *acb)
|
2014-02-22 01:21:15 +04:00
|
|
|
{
|
2016-11-08 13:10:14 +03:00
|
|
|
const char *reference = bdrv_get_device_or_node_name(acb->bs);
|
2016-11-10 19:22:07 +03:00
|
|
|
int64_t start_sector = acb->offset / BDRV_SECTOR_SIZE;
|
|
|
|
int64_t end_sector = DIV_ROUND_UP(acb->offset + acb->bytes,
|
|
|
|
BDRV_SECTOR_SIZE);
|
|
|
|
|
|
|
|
qapi_event_send_quorum_failure(reference, start_sector,
|
2018-08-15 16:37:37 +03:00
|
|
|
end_sector - start_sector);
|
2014-02-22 01:21:15 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static int quorum_vote_error(QuorumAIOCB *acb);
|
|
|
|
|
2023-09-29 17:51:47 +03:00
|
|
|
static bool GRAPH_RDLOCK quorum_has_too_much_io_failed(QuorumAIOCB *acb)
|
2014-02-22 01:21:15 +04:00
|
|
|
{
|
2016-11-08 13:10:14 +03:00
|
|
|
BDRVQuorumState *s = acb->bs->opaque;
|
2014-02-22 01:21:15 +04:00
|
|
|
|
|
|
|
if (acb->success_count < s->threshold) {
|
|
|
|
acb->vote_ret = quorum_vote_error(acb);
|
|
|
|
quorum_report_failure(acb);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
block/quorum: add simple read pattern support
This patch adds single read pattern to quorum driver and quorum vote is default
pattern.
For now we do a quorum vote on all the reads, it is designed for unreliable
underlying storage such as non-redundant NFS to make sure data integrity at the
cost of the read performance.
For some use cases as following:
VM
--------------
| |
v v
A B
Both A and B has hardware raid storage to justify the data integrity on its own.
So it would help performance if we do a single read instead of on all the nodes.
Further, if we run VM on either of the storage node, we can make a local read
request for better performance.
This patch generalize the above 2 nodes case in the N nodes. That is,
vm -> write to all the N nodes, read just one of them. If single read fails, we
try to read next node in FIFO order specified by the startup command.
The 2 nodes case is very similar to DRBD[1] though lack of auto-sync
functionality in the single device/node failure for now. But compared with DRBD
we still have some advantages over it:
- Suppose we have 20 VMs running on one(assume A) of 2 nodes' DRBD backed
storage. And if A crashes, we need to restart all the VMs on node B. But for
practice case, we can't because B might not have enough resources to setup 20 VMs
at once. So if we run our 20 VMs with quorum driver, and scatter the replicated
images over the data center, we can very likely restart 20 VMs without any
resource problem.
After all, I think we can build a more powerful replicated image functionality
on quorum and block jobs(block mirror) to meet various High Availibility needs.
E.g, Enable single read pattern on 2 children,
-drive driver=quorum,children.0.file.filename=0.qcow2,\
children.1.file.filename=1.qcow2,read-pattern=fifo,vote-threshold=1
[1] http://en.wikipedia.org/wiki/Distributed_Replicated_Block_Device
[Dropped \n from an error_setg() error message
--Stefan]
Cc: Benoit Canet <benoit@irqsave.net>
Cc: Eric Blake <eblake@redhat.com>
Cc: Kevin Wolf <kwolf@redhat.com>
Cc: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Liu Yuan <namei.unix@gmail.com>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2014-08-18 13:41:05 +04:00
|
|
|
static void quorum_copy_qiov(QEMUIOVector *dest, QEMUIOVector *source)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
assert(dest->niov == source->niov);
|
|
|
|
assert(dest->size == source->size);
|
|
|
|
for (i = 0; i < source->niov; i++) {
|
|
|
|
assert(dest->iov[i].iov_len == source->iov[i].iov_len);
|
|
|
|
memcpy(dest->iov[i].iov_base,
|
|
|
|
source->iov[i].iov_base,
|
|
|
|
source->iov[i].iov_len);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-10-05 19:35:27 +03:00
|
|
|
static void quorum_report_bad_acb(QuorumChildRequest *sacb, int ret)
|
|
|
|
{
|
|
|
|
QuorumAIOCB *acb = sacb->parent;
|
|
|
|
QuorumOpType type = acb->is_read ? QUORUM_OP_TYPE_READ : QUORUM_OP_TYPE_WRITE;
|
2016-11-10 19:22:07 +03:00
|
|
|
quorum_report_bad(type, acb->offset, acb->bytes, sacb->bs->node_name, ret);
|
2016-10-05 19:35:27 +03:00
|
|
|
}
|
|
|
|
|
2014-02-22 01:21:15 +04:00
|
|
|
static void quorum_report_bad_versions(BDRVQuorumState *s,
|
|
|
|
QuorumAIOCB *acb,
|
|
|
|
QuorumVoteValue *value)
|
|
|
|
{
|
|
|
|
QuorumVoteVersion *version;
|
|
|
|
QuorumVoteItem *item;
|
|
|
|
|
|
|
|
QLIST_FOREACH(version, &acb->votes.vote_list, next) {
|
|
|
|
if (acb->votes.compare(&version->value, value)) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
QLIST_FOREACH(item, &version->items, next) {
|
2016-11-10 19:22:07 +03:00
|
|
|
quorum_report_bad(QUORUM_OP_TYPE_READ, acb->offset, acb->bytes,
|
2016-02-26 04:39:01 +03:00
|
|
|
s->children[item->index]->bs->node_name, 0);
|
2014-02-22 01:21:15 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-02-03 18:21:50 +03:00
|
|
|
/*
|
|
|
|
* This function can count as GRAPH_RDLOCK because read_quorum_children() holds
|
|
|
|
* the graph lock and keeps it until this coroutine has terminated.
|
|
|
|
*/
|
|
|
|
static void coroutine_fn GRAPH_RDLOCK quorum_rewrite_entry(void *opaque)
|
2016-11-10 18:50:16 +03:00
|
|
|
{
|
|
|
|
QuorumCo *co = opaque;
|
|
|
|
QuorumAIOCB *acb = co->acb;
|
|
|
|
BDRVQuorumState *s = acb->bs->opaque;
|
|
|
|
|
|
|
|
/* Ignore any errors, it's just a correction attempt for already
|
2018-04-21 16:29:25 +03:00
|
|
|
* corrupted data.
|
|
|
|
* Mask out BDRV_REQ_WRITE_UNCHANGED because this overwrites the
|
|
|
|
* area with different data from the other children. */
|
2016-11-10 19:22:07 +03:00
|
|
|
bdrv_co_pwritev(s->children[co->idx], acb->offset, acb->bytes,
|
2018-04-21 16:29:25 +03:00
|
|
|
acb->qiov, acb->flags & ~BDRV_REQ_WRITE_UNCHANGED);
|
2016-11-10 18:50:16 +03:00
|
|
|
|
|
|
|
/* Wake up the caller after the last rewrite */
|
|
|
|
acb->rewrite_count--;
|
|
|
|
if (!acb->rewrite_count) {
|
|
|
|
qemu_coroutine_enter_if_inactive(acb->co);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-02-03 18:21:50 +03:00
|
|
|
static bool coroutine_fn GRAPH_RDLOCK
|
|
|
|
quorum_rewrite_bad_versions(QuorumAIOCB *acb, QuorumVoteValue *value)
|
2014-06-11 17:24:10 +04:00
|
|
|
{
|
|
|
|
QuorumVoteVersion *version;
|
|
|
|
QuorumVoteItem *item;
|
|
|
|
int count = 0;
|
|
|
|
|
|
|
|
/* first count the number of bad versions: done first to avoid concurrency
|
|
|
|
* issues.
|
|
|
|
*/
|
|
|
|
QLIST_FOREACH(version, &acb->votes.vote_list, next) {
|
|
|
|
if (acb->votes.compare(&version->value, value)) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
QLIST_FOREACH(item, &version->items, next) {
|
|
|
|
count++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-10 18:50:16 +03:00
|
|
|
/* quorum_rewrite_entry will count down this to zero */
|
2014-06-11 17:24:10 +04:00
|
|
|
acb->rewrite_count = count;
|
|
|
|
|
|
|
|
/* now fire the correcting rewrites */
|
|
|
|
QLIST_FOREACH(version, &acb->votes.vote_list, next) {
|
|
|
|
if (acb->votes.compare(&version->value, value)) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
QLIST_FOREACH(item, &version->items, next) {
|
2016-11-10 18:50:16 +03:00
|
|
|
Coroutine *co;
|
|
|
|
QuorumCo data = {
|
|
|
|
.acb = acb,
|
|
|
|
.idx = item->index,
|
|
|
|
};
|
|
|
|
|
|
|
|
co = qemu_coroutine_create(quorum_rewrite_entry, &data);
|
|
|
|
qemu_coroutine_enter(co);
|
2014-06-11 17:24:10 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* return true if any rewrite is done else false */
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
2014-02-22 01:21:15 +04:00
|
|
|
static void quorum_count_vote(QuorumVotes *votes,
|
|
|
|
QuorumVoteValue *value,
|
|
|
|
int index)
|
|
|
|
{
|
|
|
|
QuorumVoteVersion *v = NULL, *version = NULL;
|
|
|
|
QuorumVoteItem *item;
|
|
|
|
|
|
|
|
/* look if we have something with this hash */
|
|
|
|
QLIST_FOREACH(v, &votes->vote_list, next) {
|
|
|
|
if (votes->compare(&v->value, value)) {
|
|
|
|
version = v;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* It's a version not yet in the list add it */
|
|
|
|
if (!version) {
|
|
|
|
version = g_new0(QuorumVoteVersion, 1);
|
|
|
|
QLIST_INIT(&version->items);
|
|
|
|
memcpy(&version->value, value, sizeof(version->value));
|
|
|
|
version->index = index;
|
|
|
|
version->vote_count = 0;
|
|
|
|
QLIST_INSERT_HEAD(&votes->vote_list, version, next);
|
|
|
|
}
|
|
|
|
|
|
|
|
version->vote_count++;
|
|
|
|
|
|
|
|
item = g_new0(QuorumVoteItem, 1);
|
|
|
|
item->index = index;
|
|
|
|
QLIST_INSERT_HEAD(&version->items, item, next);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void quorum_free_vote_list(QuorumVotes *votes)
|
|
|
|
{
|
|
|
|
QuorumVoteVersion *version, *next_version;
|
|
|
|
QuorumVoteItem *item, *next_item;
|
|
|
|
|
|
|
|
QLIST_FOREACH_SAFE(version, &votes->vote_list, next, next_version) {
|
|
|
|
QLIST_REMOVE(version, next);
|
|
|
|
QLIST_FOREACH_SAFE(item, &version->items, next, next_item) {
|
|
|
|
QLIST_REMOVE(item, next);
|
|
|
|
g_free(item);
|
|
|
|
}
|
|
|
|
g_free(version);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int quorum_compute_hash(QuorumAIOCB *acb, int i, QuorumVoteValue *hash)
|
|
|
|
{
|
|
|
|
QEMUIOVector *qiov = &acb->qcrs[i].qiov;
|
2015-07-01 20:10:35 +03:00
|
|
|
size_t len = sizeof(hash->h);
|
|
|
|
uint8_t *data = hash->h;
|
2014-02-22 01:21:15 +04:00
|
|
|
|
2015-07-01 20:10:35 +03:00
|
|
|
/* XXX - would be nice if we could pass in the Error **
|
|
|
|
* and propagate that back, but this quorum code is
|
|
|
|
* restricted to just errno values currently */
|
2024-09-04 14:18:28 +03:00
|
|
|
if (qcrypto_hash_bytesv(QCRYPTO_HASH_ALGO_SHA256,
|
2015-07-01 20:10:35 +03:00
|
|
|
qiov->iov, qiov->niov,
|
|
|
|
&data, &len,
|
|
|
|
NULL) < 0) {
|
|
|
|
return -EINVAL;
|
2014-02-22 01:21:15 +04:00
|
|
|
}
|
|
|
|
|
2015-07-01 20:10:35 +03:00
|
|
|
return 0;
|
2014-02-22 01:21:15 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static QuorumVoteVersion *quorum_get_vote_winner(QuorumVotes *votes)
|
|
|
|
{
|
|
|
|
int max = 0;
|
|
|
|
QuorumVoteVersion *candidate, *winner = NULL;
|
|
|
|
|
|
|
|
QLIST_FOREACH(candidate, &votes->vote_list, next) {
|
|
|
|
if (candidate->vote_count > max) {
|
|
|
|
max = candidate->vote_count;
|
|
|
|
winner = candidate;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return winner;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* qemu_iovec_compare is handy for blkverify mode because it returns the first
|
|
|
|
* differing byte location. Yet it is handcoded to compare vectors one byte
|
|
|
|
* after another so it does not benefit from the libc SIMD optimizations.
|
|
|
|
* quorum_iovec_compare is written for speed and should be used in the non
|
|
|
|
* blkverify mode of quorum.
|
|
|
|
*/
|
|
|
|
static bool quorum_iovec_compare(QEMUIOVector *a, QEMUIOVector *b)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
int result;
|
|
|
|
|
|
|
|
assert(a->niov == b->niov);
|
|
|
|
for (i = 0; i < a->niov; i++) {
|
|
|
|
assert(a->iov[i].iov_len == b->iov[i].iov_len);
|
|
|
|
result = memcmp(a->iov[i].iov_base,
|
|
|
|
b->iov[i].iov_base,
|
|
|
|
a->iov[i].iov_len);
|
|
|
|
if (result) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-10-17 17:33:49 +03:00
|
|
|
static bool quorum_compare(QuorumAIOCB *acb, QEMUIOVector *a, QEMUIOVector *b)
|
2014-02-22 01:21:15 +04:00
|
|
|
{
|
2016-11-08 13:10:14 +03:00
|
|
|
BDRVQuorumState *s = acb->bs->opaque;
|
2014-02-22 01:21:15 +04:00
|
|
|
ssize_t offset;
|
|
|
|
|
|
|
|
/* This driver will replace blkverify in this particular case */
|
|
|
|
if (s->is_blkverify) {
|
|
|
|
offset = qemu_iovec_compare(a, b);
|
|
|
|
if (offset != -1) {
|
2018-10-17 17:33:49 +03:00
|
|
|
fprintf(stderr, "quorum: offset=%" PRIu64 " bytes=%" PRIu64
|
|
|
|
" contents mismatch at offset %" PRIu64 "\n",
|
|
|
|
acb->offset, acb->bytes, acb->offset + offset);
|
|
|
|
exit(1);
|
2014-02-22 01:21:15 +04:00
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return quorum_iovec_compare(a, b);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Do a vote to get the error code */
|
|
|
|
static int quorum_vote_error(QuorumAIOCB *acb)
|
|
|
|
{
|
2016-11-08 13:10:14 +03:00
|
|
|
BDRVQuorumState *s = acb->bs->opaque;
|
2014-02-22 01:21:15 +04:00
|
|
|
QuorumVoteVersion *winner = NULL;
|
|
|
|
QuorumVotes error_votes;
|
|
|
|
QuorumVoteValue result_value;
|
|
|
|
int i, ret = 0;
|
|
|
|
bool error = false;
|
|
|
|
|
|
|
|
QLIST_INIT(&error_votes.vote_list);
|
|
|
|
error_votes.compare = quorum_64bits_compare;
|
|
|
|
|
|
|
|
for (i = 0; i < s->num_children; i++) {
|
|
|
|
ret = acb->qcrs[i].ret;
|
|
|
|
if (ret) {
|
|
|
|
error = true;
|
|
|
|
result_value.l = ret;
|
|
|
|
quorum_count_vote(&error_votes, &result_value, i);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (error) {
|
|
|
|
winner = quorum_get_vote_winner(&error_votes);
|
|
|
|
ret = winner->value.l;
|
|
|
|
}
|
|
|
|
|
|
|
|
quorum_free_vote_list(&error_votes);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2023-02-03 18:21:50 +03:00
|
|
|
static void coroutine_fn GRAPH_RDLOCK quorum_vote(QuorumAIOCB *acb)
|
2014-02-22 01:21:15 +04:00
|
|
|
{
|
|
|
|
bool quorum = true;
|
|
|
|
int i, j, ret;
|
|
|
|
QuorumVoteValue hash;
|
2016-11-08 13:10:14 +03:00
|
|
|
BDRVQuorumState *s = acb->bs->opaque;
|
2014-02-22 01:21:15 +04:00
|
|
|
QuorumVoteVersion *winner;
|
|
|
|
|
|
|
|
if (quorum_has_too_much_io_failed(acb)) {
|
2016-11-10 18:13:15 +03:00
|
|
|
return;
|
2014-02-22 01:21:15 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* get the index of the first successful read */
|
|
|
|
for (i = 0; i < s->num_children; i++) {
|
|
|
|
if (!acb->qcrs[i].ret) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(i < s->num_children);
|
|
|
|
|
|
|
|
/* compare this read with all other successful reads stopping at quorum
|
|
|
|
* failure
|
|
|
|
*/
|
|
|
|
for (j = i + 1; j < s->num_children; j++) {
|
|
|
|
if (acb->qcrs[j].ret) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
quorum = quorum_compare(acb, &acb->qcrs[i].qiov, &acb->qcrs[j].qiov);
|
|
|
|
if (!quorum) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Every successful read agrees */
|
|
|
|
if (quorum) {
|
|
|
|
quorum_copy_qiov(acb->qiov, &acb->qcrs[i].qiov);
|
2016-11-10 18:13:15 +03:00
|
|
|
return;
|
2014-02-22 01:21:15 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* compute hashes for each successful read, also store indexes */
|
|
|
|
for (i = 0; i < s->num_children; i++) {
|
|
|
|
if (acb->qcrs[i].ret) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
ret = quorum_compute_hash(acb, i, &hash);
|
|
|
|
/* if ever the hash computation failed */
|
|
|
|
if (ret < 0) {
|
|
|
|
acb->vote_ret = ret;
|
|
|
|
goto free_exit;
|
|
|
|
}
|
|
|
|
quorum_count_vote(&acb->votes, &hash, i);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* vote to select the most represented version */
|
|
|
|
winner = quorum_get_vote_winner(&acb->votes);
|
|
|
|
|
|
|
|
/* if the winner count is smaller than threshold the read fails */
|
|
|
|
if (winner->vote_count < s->threshold) {
|
|
|
|
quorum_report_failure(acb);
|
|
|
|
acb->vote_ret = -EIO;
|
|
|
|
goto free_exit;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* we have a winner: copy it */
|
|
|
|
quorum_copy_qiov(acb->qiov, &acb->qcrs[winner->index].qiov);
|
|
|
|
|
|
|
|
/* some versions are bad print them */
|
|
|
|
quorum_report_bad_versions(s, acb, &winner->value);
|
|
|
|
|
2014-06-11 17:24:10 +04:00
|
|
|
/* corruption correction is enabled */
|
|
|
|
if (s->rewrite_corrupted) {
|
2016-11-10 18:50:16 +03:00
|
|
|
quorum_rewrite_bad_versions(acb, &winner->value);
|
2014-06-11 17:24:10 +04:00
|
|
|
}
|
|
|
|
|
2014-02-22 01:21:15 +04:00
|
|
|
free_exit:
|
|
|
|
/* free lists */
|
|
|
|
quorum_free_vote_list(&acb->votes);
|
|
|
|
}
|
|
|
|
|
2023-02-03 18:21:50 +03:00
|
|
|
/*
|
|
|
|
* This function can count as GRAPH_RDLOCK because read_quorum_children() holds
|
|
|
|
* the graph lock and keeps it until this coroutine has terminated.
|
|
|
|
*/
|
|
|
|
static void coroutine_fn GRAPH_RDLOCK read_quorum_children_entry(void *opaque)
|
2014-02-22 01:21:14 +04:00
|
|
|
{
|
2016-11-08 13:10:14 +03:00
|
|
|
QuorumCo *co = opaque;
|
|
|
|
QuorumAIOCB *acb = co->acb;
|
|
|
|
BDRVQuorumState *s = acb->bs->opaque;
|
|
|
|
int i = co->idx;
|
2016-11-10 18:13:15 +03:00
|
|
|
QuorumChildRequest *sacb = &acb->qcrs[i];
|
|
|
|
|
|
|
|
sacb->bs = s->children[i]->bs;
|
2016-11-10 19:22:07 +03:00
|
|
|
sacb->ret = bdrv_co_preadv(s->children[i], acb->offset, acb->bytes,
|
2016-11-10 18:13:15 +03:00
|
|
|
&acb->qcrs[i].qiov, 0);
|
|
|
|
|
|
|
|
if (sacb->ret == 0) {
|
|
|
|
acb->success_count++;
|
|
|
|
} else {
|
|
|
|
quorum_report_bad_acb(sacb, sacb->ret);
|
|
|
|
}
|
2016-11-08 13:10:14 +03:00
|
|
|
|
2016-11-10 18:13:15 +03:00
|
|
|
acb->count++;
|
|
|
|
assert(acb->count <= s->num_children);
|
|
|
|
assert(acb->success_count <= s->num_children);
|
|
|
|
|
|
|
|
/* Wake up the caller after the last read */
|
|
|
|
if (acb->count == s->num_children) {
|
|
|
|
qemu_coroutine_enter_if_inactive(acb->co);
|
|
|
|
}
|
2016-11-08 13:10:14 +03:00
|
|
|
}
|
|
|
|
|
2023-02-03 18:21:50 +03:00
|
|
|
static int coroutine_fn GRAPH_RDLOCK read_quorum_children(QuorumAIOCB *acb)
|
2016-11-08 13:10:14 +03:00
|
|
|
{
|
|
|
|
BDRVQuorumState *s = acb->bs->opaque;
|
2018-03-23 17:32:02 +03:00
|
|
|
int i;
|
2014-02-22 01:21:14 +04:00
|
|
|
|
2016-10-05 19:35:26 +03:00
|
|
|
acb->children_read = s->num_children;
|
2014-02-22 01:21:14 +04:00
|
|
|
for (i = 0; i < s->num_children; i++) {
|
2015-06-16 12:29:22 +03:00
|
|
|
acb->qcrs[i].buf = qemu_blockalign(s->children[i]->bs, acb->qiov->size);
|
block/quorum: add simple read pattern support
This patch adds single read pattern to quorum driver and quorum vote is default
pattern.
For now we do a quorum vote on all the reads, it is designed for unreliable
underlying storage such as non-redundant NFS to make sure data integrity at the
cost of the read performance.
For some use cases as following:
VM
--------------
| |
v v
A B
Both A and B has hardware raid storage to justify the data integrity on its own.
So it would help performance if we do a single read instead of on all the nodes.
Further, if we run VM on either of the storage node, we can make a local read
request for better performance.
This patch generalize the above 2 nodes case in the N nodes. That is,
vm -> write to all the N nodes, read just one of them. If single read fails, we
try to read next node in FIFO order specified by the startup command.
The 2 nodes case is very similar to DRBD[1] though lack of auto-sync
functionality in the single device/node failure for now. But compared with DRBD
we still have some advantages over it:
- Suppose we have 20 VMs running on one(assume A) of 2 nodes' DRBD backed
storage. And if A crashes, we need to restart all the VMs on node B. But for
practice case, we can't because B might not have enough resources to setup 20 VMs
at once. So if we run our 20 VMs with quorum driver, and scatter the replicated
images over the data center, we can very likely restart 20 VMs without any
resource problem.
After all, I think we can build a more powerful replicated image functionality
on quorum and block jobs(block mirror) to meet various High Availibility needs.
E.g, Enable single read pattern on 2 children,
-drive driver=quorum,children.0.file.filename=0.qcow2,\
children.1.file.filename=1.qcow2,read-pattern=fifo,vote-threshold=1
[1] http://en.wikipedia.org/wiki/Distributed_Replicated_Block_Device
[Dropped \n from an error_setg() error message
--Stefan]
Cc: Benoit Canet <benoit@irqsave.net>
Cc: Eric Blake <eblake@redhat.com>
Cc: Kevin Wolf <kwolf@redhat.com>
Cc: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Liu Yuan <namei.unix@gmail.com>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2014-08-18 13:41:05 +04:00
|
|
|
qemu_iovec_init(&acb->qcrs[i].qiov, acb->qiov->niov);
|
|
|
|
qemu_iovec_clone(&acb->qcrs[i].qiov, acb->qiov, acb->qcrs[i].buf);
|
2014-02-22 01:21:14 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < s->num_children; i++) {
|
2016-11-08 13:10:14 +03:00
|
|
|
Coroutine *co;
|
|
|
|
QuorumCo data = {
|
|
|
|
.acb = acb,
|
|
|
|
.idx = i,
|
|
|
|
};
|
|
|
|
|
|
|
|
co = qemu_coroutine_create(read_quorum_children_entry, &data);
|
|
|
|
qemu_coroutine_enter(co);
|
2014-02-22 01:21:14 +04:00
|
|
|
}
|
|
|
|
|
2016-11-10 18:13:15 +03:00
|
|
|
while (acb->count < s->num_children) {
|
|
|
|
qemu_coroutine_yield();
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Do the vote on read */
|
|
|
|
quorum_vote(acb);
|
|
|
|
for (i = 0; i < s->num_children; i++) {
|
|
|
|
qemu_vfree(acb->qcrs[i].buf);
|
|
|
|
qemu_iovec_destroy(&acb->qcrs[i].qiov);
|
|
|
|
}
|
|
|
|
|
|
|
|
while (acb->rewrite_count) {
|
2016-11-08 13:10:14 +03:00
|
|
|
qemu_coroutine_yield();
|
|
|
|
}
|
|
|
|
|
2018-03-23 17:32:02 +03:00
|
|
|
return acb->vote_ret;
|
2014-02-22 01:21:14 +04:00
|
|
|
}
|
|
|
|
|
2023-02-03 18:21:50 +03:00
|
|
|
static int coroutine_fn GRAPH_RDLOCK read_fifo_child(QuorumAIOCB *acb)
|
block/quorum: add simple read pattern support
This patch adds single read pattern to quorum driver and quorum vote is default
pattern.
For now we do a quorum vote on all the reads, it is designed for unreliable
underlying storage such as non-redundant NFS to make sure data integrity at the
cost of the read performance.
For some use cases as following:
VM
--------------
| |
v v
A B
Both A and B has hardware raid storage to justify the data integrity on its own.
So it would help performance if we do a single read instead of on all the nodes.
Further, if we run VM on either of the storage node, we can make a local read
request for better performance.
This patch generalize the above 2 nodes case in the N nodes. That is,
vm -> write to all the N nodes, read just one of them. If single read fails, we
try to read next node in FIFO order specified by the startup command.
The 2 nodes case is very similar to DRBD[1] though lack of auto-sync
functionality in the single device/node failure for now. But compared with DRBD
we still have some advantages over it:
- Suppose we have 20 VMs running on one(assume A) of 2 nodes' DRBD backed
storage. And if A crashes, we need to restart all the VMs on node B. But for
practice case, we can't because B might not have enough resources to setup 20 VMs
at once. So if we run our 20 VMs with quorum driver, and scatter the replicated
images over the data center, we can very likely restart 20 VMs without any
resource problem.
After all, I think we can build a more powerful replicated image functionality
on quorum and block jobs(block mirror) to meet various High Availibility needs.
E.g, Enable single read pattern on 2 children,
-drive driver=quorum,children.0.file.filename=0.qcow2,\
children.1.file.filename=1.qcow2,read-pattern=fifo,vote-threshold=1
[1] http://en.wikipedia.org/wiki/Distributed_Replicated_Block_Device
[Dropped \n from an error_setg() error message
--Stefan]
Cc: Benoit Canet <benoit@irqsave.net>
Cc: Eric Blake <eblake@redhat.com>
Cc: Kevin Wolf <kwolf@redhat.com>
Cc: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Liu Yuan <namei.unix@gmail.com>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2014-08-18 13:41:05 +04:00
|
|
|
{
|
2016-11-08 13:10:14 +03:00
|
|
|
BDRVQuorumState *s = acb->bs->opaque;
|
2016-11-10 19:40:34 +03:00
|
|
|
int n, ret;
|
|
|
|
|
|
|
|
/* We try to read the next child in FIFO order if we failed to read */
|
|
|
|
do {
|
|
|
|
n = acb->children_read++;
|
|
|
|
acb->qcrs[n].bs = s->children[n]->bs;
|
|
|
|
ret = bdrv_co_preadv(s->children[n], acb->offset, acb->bytes,
|
|
|
|
acb->qiov, 0);
|
|
|
|
if (ret < 0) {
|
|
|
|
quorum_report_bad_acb(&acb->qcrs[n], ret);
|
|
|
|
}
|
|
|
|
} while (ret < 0 && acb->children_read < s->num_children);
|
block/quorum: add simple read pattern support
This patch adds single read pattern to quorum driver and quorum vote is default
pattern.
For now we do a quorum vote on all the reads, it is designed for unreliable
underlying storage such as non-redundant NFS to make sure data integrity at the
cost of the read performance.
For some use cases as following:
VM
--------------
| |
v v
A B
Both A and B has hardware raid storage to justify the data integrity on its own.
So it would help performance if we do a single read instead of on all the nodes.
Further, if we run VM on either of the storage node, we can make a local read
request for better performance.
This patch generalize the above 2 nodes case in the N nodes. That is,
vm -> write to all the N nodes, read just one of them. If single read fails, we
try to read next node in FIFO order specified by the startup command.
The 2 nodes case is very similar to DRBD[1] though lack of auto-sync
functionality in the single device/node failure for now. But compared with DRBD
we still have some advantages over it:
- Suppose we have 20 VMs running on one(assume A) of 2 nodes' DRBD backed
storage. And if A crashes, we need to restart all the VMs on node B. But for
practice case, we can't because B might not have enough resources to setup 20 VMs
at once. So if we run our 20 VMs with quorum driver, and scatter the replicated
images over the data center, we can very likely restart 20 VMs without any
resource problem.
After all, I think we can build a more powerful replicated image functionality
on quorum and block jobs(block mirror) to meet various High Availibility needs.
E.g, Enable single read pattern on 2 children,
-drive driver=quorum,children.0.file.filename=0.qcow2,\
children.1.file.filename=1.qcow2,read-pattern=fifo,vote-threshold=1
[1] http://en.wikipedia.org/wiki/Distributed_Replicated_Block_Device
[Dropped \n from an error_setg() error message
--Stefan]
Cc: Benoit Canet <benoit@irqsave.net>
Cc: Eric Blake <eblake@redhat.com>
Cc: Kevin Wolf <kwolf@redhat.com>
Cc: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Liu Yuan <namei.unix@gmail.com>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2014-08-18 13:41:05 +04:00
|
|
|
|
2016-11-10 19:40:34 +03:00
|
|
|
/* FIXME: rewrite failed children if acb->children_read > 1? */
|
block/quorum: add simple read pattern support
This patch adds single read pattern to quorum driver and quorum vote is default
pattern.
For now we do a quorum vote on all the reads, it is designed for unreliable
underlying storage such as non-redundant NFS to make sure data integrity at the
cost of the read performance.
For some use cases as following:
VM
--------------
| |
v v
A B
Both A and B has hardware raid storage to justify the data integrity on its own.
So it would help performance if we do a single read instead of on all the nodes.
Further, if we run VM on either of the storage node, we can make a local read
request for better performance.
This patch generalize the above 2 nodes case in the N nodes. That is,
vm -> write to all the N nodes, read just one of them. If single read fails, we
try to read next node in FIFO order specified by the startup command.
The 2 nodes case is very similar to DRBD[1] though lack of auto-sync
functionality in the single device/node failure for now. But compared with DRBD
we still have some advantages over it:
- Suppose we have 20 VMs running on one(assume A) of 2 nodes' DRBD backed
storage. And if A crashes, we need to restart all the VMs on node B. But for
practice case, we can't because B might not have enough resources to setup 20 VMs
at once. So if we run our 20 VMs with quorum driver, and scatter the replicated
images over the data center, we can very likely restart 20 VMs without any
resource problem.
After all, I think we can build a more powerful replicated image functionality
on quorum and block jobs(block mirror) to meet various High Availibility needs.
E.g, Enable single read pattern on 2 children,
-drive driver=quorum,children.0.file.filename=0.qcow2,\
children.1.file.filename=1.qcow2,read-pattern=fifo,vote-threshold=1
[1] http://en.wikipedia.org/wiki/Distributed_Replicated_Block_Device
[Dropped \n from an error_setg() error message
--Stefan]
Cc: Benoit Canet <benoit@irqsave.net>
Cc: Eric Blake <eblake@redhat.com>
Cc: Kevin Wolf <kwolf@redhat.com>
Cc: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Liu Yuan <namei.unix@gmail.com>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2014-08-18 13:41:05 +04:00
|
|
|
|
2016-11-08 13:10:14 +03:00
|
|
|
return ret;
|
block/quorum: add simple read pattern support
This patch adds single read pattern to quorum driver and quorum vote is default
pattern.
For now we do a quorum vote on all the reads, it is designed for unreliable
underlying storage such as non-redundant NFS to make sure data integrity at the
cost of the read performance.
For some use cases as following:
VM
--------------
| |
v v
A B
Both A and B has hardware raid storage to justify the data integrity on its own.
So it would help performance if we do a single read instead of on all the nodes.
Further, if we run VM on either of the storage node, we can make a local read
request for better performance.
This patch generalize the above 2 nodes case in the N nodes. That is,
vm -> write to all the N nodes, read just one of them. If single read fails, we
try to read next node in FIFO order specified by the startup command.
The 2 nodes case is very similar to DRBD[1] though lack of auto-sync
functionality in the single device/node failure for now. But compared with DRBD
we still have some advantages over it:
- Suppose we have 20 VMs running on one(assume A) of 2 nodes' DRBD backed
storage. And if A crashes, we need to restart all the VMs on node B. But for
practice case, we can't because B might not have enough resources to setup 20 VMs
at once. So if we run our 20 VMs with quorum driver, and scatter the replicated
images over the data center, we can very likely restart 20 VMs without any
resource problem.
After all, I think we can build a more powerful replicated image functionality
on quorum and block jobs(block mirror) to meet various High Availibility needs.
E.g, Enable single read pattern on 2 children,
-drive driver=quorum,children.0.file.filename=0.qcow2,\
children.1.file.filename=1.qcow2,read-pattern=fifo,vote-threshold=1
[1] http://en.wikipedia.org/wiki/Distributed_Replicated_Block_Device
[Dropped \n from an error_setg() error message
--Stefan]
Cc: Benoit Canet <benoit@irqsave.net>
Cc: Eric Blake <eblake@redhat.com>
Cc: Kevin Wolf <kwolf@redhat.com>
Cc: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Liu Yuan <namei.unix@gmail.com>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2014-08-18 13:41:05 +04:00
|
|
|
}
|
|
|
|
|
2023-02-03 18:21:50 +03:00
|
|
|
static int coroutine_fn GRAPH_RDLOCK
|
|
|
|
quorum_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
|
|
|
|
QEMUIOVector *qiov, BdrvRequestFlags flags)
|
block/quorum: add simple read pattern support
This patch adds single read pattern to quorum driver and quorum vote is default
pattern.
For now we do a quorum vote on all the reads, it is designed for unreliable
underlying storage such as non-redundant NFS to make sure data integrity at the
cost of the read performance.
For some use cases as following:
VM
--------------
| |
v v
A B
Both A and B has hardware raid storage to justify the data integrity on its own.
So it would help performance if we do a single read instead of on all the nodes.
Further, if we run VM on either of the storage node, we can make a local read
request for better performance.
This patch generalize the above 2 nodes case in the N nodes. That is,
vm -> write to all the N nodes, read just one of them. If single read fails, we
try to read next node in FIFO order specified by the startup command.
The 2 nodes case is very similar to DRBD[1] though lack of auto-sync
functionality in the single device/node failure for now. But compared with DRBD
we still have some advantages over it:
- Suppose we have 20 VMs running on one(assume A) of 2 nodes' DRBD backed
storage. And if A crashes, we need to restart all the VMs on node B. But for
practice case, we can't because B might not have enough resources to setup 20 VMs
at once. So if we run our 20 VMs with quorum driver, and scatter the replicated
images over the data center, we can very likely restart 20 VMs without any
resource problem.
After all, I think we can build a more powerful replicated image functionality
on quorum and block jobs(block mirror) to meet various High Availibility needs.
E.g, Enable single read pattern on 2 children,
-drive driver=quorum,children.0.file.filename=0.qcow2,\
children.1.file.filename=1.qcow2,read-pattern=fifo,vote-threshold=1
[1] http://en.wikipedia.org/wiki/Distributed_Replicated_Block_Device
[Dropped \n from an error_setg() error message
--Stefan]
Cc: Benoit Canet <benoit@irqsave.net>
Cc: Eric Blake <eblake@redhat.com>
Cc: Kevin Wolf <kwolf@redhat.com>
Cc: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Liu Yuan <namei.unix@gmail.com>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2014-08-18 13:41:05 +04:00
|
|
|
{
|
|
|
|
BDRVQuorumState *s = bs->opaque;
|
2018-04-21 16:29:25 +03:00
|
|
|
QuorumAIOCB *acb = quorum_aio_get(bs, qiov, offset, bytes, flags);
|
2016-11-08 13:10:14 +03:00
|
|
|
int ret;
|
|
|
|
|
block/quorum: add simple read pattern support
This patch adds single read pattern to quorum driver and quorum vote is default
pattern.
For now we do a quorum vote on all the reads, it is designed for unreliable
underlying storage such as non-redundant NFS to make sure data integrity at the
cost of the read performance.
For some use cases as following:
VM
--------------
| |
v v
A B
Both A and B has hardware raid storage to justify the data integrity on its own.
So it would help performance if we do a single read instead of on all the nodes.
Further, if we run VM on either of the storage node, we can make a local read
request for better performance.
This patch generalize the above 2 nodes case in the N nodes. That is,
vm -> write to all the N nodes, read just one of them. If single read fails, we
try to read next node in FIFO order specified by the startup command.
The 2 nodes case is very similar to DRBD[1] though lack of auto-sync
functionality in the single device/node failure for now. But compared with DRBD
we still have some advantages over it:
- Suppose we have 20 VMs running on one(assume A) of 2 nodes' DRBD backed
storage. And if A crashes, we need to restart all the VMs on node B. But for
practice case, we can't because B might not have enough resources to setup 20 VMs
at once. So if we run our 20 VMs with quorum driver, and scatter the replicated
images over the data center, we can very likely restart 20 VMs without any
resource problem.
After all, I think we can build a more powerful replicated image functionality
on quorum and block jobs(block mirror) to meet various High Availibility needs.
E.g, Enable single read pattern on 2 children,
-drive driver=quorum,children.0.file.filename=0.qcow2,\
children.1.file.filename=1.qcow2,read-pattern=fifo,vote-threshold=1
[1] http://en.wikipedia.org/wiki/Distributed_Replicated_Block_Device
[Dropped \n from an error_setg() error message
--Stefan]
Cc: Benoit Canet <benoit@irqsave.net>
Cc: Eric Blake <eblake@redhat.com>
Cc: Kevin Wolf <kwolf@redhat.com>
Cc: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Liu Yuan <namei.unix@gmail.com>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2014-08-18 13:41:05 +04:00
|
|
|
acb->is_read = true;
|
2016-10-05 19:35:26 +03:00
|
|
|
acb->children_read = 0;
|
block/quorum: add simple read pattern support
This patch adds single read pattern to quorum driver and quorum vote is default
pattern.
For now we do a quorum vote on all the reads, it is designed for unreliable
underlying storage such as non-redundant NFS to make sure data integrity at the
cost of the read performance.
For some use cases as following:
VM
--------------
| |
v v
A B
Both A and B has hardware raid storage to justify the data integrity on its own.
So it would help performance if we do a single read instead of on all the nodes.
Further, if we run VM on either of the storage node, we can make a local read
request for better performance.
This patch generalize the above 2 nodes case in the N nodes. That is,
vm -> write to all the N nodes, read just one of them. If single read fails, we
try to read next node in FIFO order specified by the startup command.
The 2 nodes case is very similar to DRBD[1] though lack of auto-sync
functionality in the single device/node failure for now. But compared with DRBD
we still have some advantages over it:
- Suppose we have 20 VMs running on one(assume A) of 2 nodes' DRBD backed
storage. And if A crashes, we need to restart all the VMs on node B. But for
practice case, we can't because B might not have enough resources to setup 20 VMs
at once. So if we run our 20 VMs with quorum driver, and scatter the replicated
images over the data center, we can very likely restart 20 VMs without any
resource problem.
After all, I think we can build a more powerful replicated image functionality
on quorum and block jobs(block mirror) to meet various High Availibility needs.
E.g, Enable single read pattern on 2 children,
-drive driver=quorum,children.0.file.filename=0.qcow2,\
children.1.file.filename=1.qcow2,read-pattern=fifo,vote-threshold=1
[1] http://en.wikipedia.org/wiki/Distributed_Replicated_Block_Device
[Dropped \n from an error_setg() error message
--Stefan]
Cc: Benoit Canet <benoit@irqsave.net>
Cc: Eric Blake <eblake@redhat.com>
Cc: Kevin Wolf <kwolf@redhat.com>
Cc: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Liu Yuan <namei.unix@gmail.com>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2014-08-18 13:41:05 +04:00
|
|
|
|
|
|
|
if (s->read_pattern == QUORUM_READ_PATTERN_QUORUM) {
|
2016-11-08 13:10:14 +03:00
|
|
|
ret = read_quorum_children(acb);
|
|
|
|
} else {
|
|
|
|
ret = read_fifo_child(acb);
|
block/quorum: add simple read pattern support
This patch adds single read pattern to quorum driver and quorum vote is default
pattern.
For now we do a quorum vote on all the reads, it is designed for unreliable
underlying storage such as non-redundant NFS to make sure data integrity at the
cost of the read performance.
For some use cases as following:
VM
--------------
| |
v v
A B
Both A and B has hardware raid storage to justify the data integrity on its own.
So it would help performance if we do a single read instead of on all the nodes.
Further, if we run VM on either of the storage node, we can make a local read
request for better performance.
This patch generalize the above 2 nodes case in the N nodes. That is,
vm -> write to all the N nodes, read just one of them. If single read fails, we
try to read next node in FIFO order specified by the startup command.
The 2 nodes case is very similar to DRBD[1] though lack of auto-sync
functionality in the single device/node failure for now. But compared with DRBD
we still have some advantages over it:
- Suppose we have 20 VMs running on one(assume A) of 2 nodes' DRBD backed
storage. And if A crashes, we need to restart all the VMs on node B. But for
practice case, we can't because B might not have enough resources to setup 20 VMs
at once. So if we run our 20 VMs with quorum driver, and scatter the replicated
images over the data center, we can very likely restart 20 VMs without any
resource problem.
After all, I think we can build a more powerful replicated image functionality
on quorum and block jobs(block mirror) to meet various High Availibility needs.
E.g, Enable single read pattern on 2 children,
-drive driver=quorum,children.0.file.filename=0.qcow2,\
children.1.file.filename=1.qcow2,read-pattern=fifo,vote-threshold=1
[1] http://en.wikipedia.org/wiki/Distributed_Replicated_Block_Device
[Dropped \n from an error_setg() error message
--Stefan]
Cc: Benoit Canet <benoit@irqsave.net>
Cc: Eric Blake <eblake@redhat.com>
Cc: Kevin Wolf <kwolf@redhat.com>
Cc: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Liu Yuan <namei.unix@gmail.com>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2014-08-18 13:41:05 +04:00
|
|
|
}
|
2016-11-10 16:24:27 +03:00
|
|
|
quorum_aio_finalize(acb);
|
|
|
|
|
2016-11-08 13:10:14 +03:00
|
|
|
return ret;
|
|
|
|
}
|
block/quorum: add simple read pattern support
This patch adds single read pattern to quorum driver and quorum vote is default
pattern.
For now we do a quorum vote on all the reads, it is designed for unreliable
underlying storage such as non-redundant NFS to make sure data integrity at the
cost of the read performance.
For some use cases as following:
VM
--------------
| |
v v
A B
Both A and B has hardware raid storage to justify the data integrity on its own.
So it would help performance if we do a single read instead of on all the nodes.
Further, if we run VM on either of the storage node, we can make a local read
request for better performance.
This patch generalize the above 2 nodes case in the N nodes. That is,
vm -> write to all the N nodes, read just one of them. If single read fails, we
try to read next node in FIFO order specified by the startup command.
The 2 nodes case is very similar to DRBD[1] though lack of auto-sync
functionality in the single device/node failure for now. But compared with DRBD
we still have some advantages over it:
- Suppose we have 20 VMs running on one(assume A) of 2 nodes' DRBD backed
storage. And if A crashes, we need to restart all the VMs on node B. But for
practice case, we can't because B might not have enough resources to setup 20 VMs
at once. So if we run our 20 VMs with quorum driver, and scatter the replicated
images over the data center, we can very likely restart 20 VMs without any
resource problem.
After all, I think we can build a more powerful replicated image functionality
on quorum and block jobs(block mirror) to meet various High Availibility needs.
E.g, Enable single read pattern on 2 children,
-drive driver=quorum,children.0.file.filename=0.qcow2,\
children.1.file.filename=1.qcow2,read-pattern=fifo,vote-threshold=1
[1] http://en.wikipedia.org/wiki/Distributed_Replicated_Block_Device
[Dropped \n from an error_setg() error message
--Stefan]
Cc: Benoit Canet <benoit@irqsave.net>
Cc: Eric Blake <eblake@redhat.com>
Cc: Kevin Wolf <kwolf@redhat.com>
Cc: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Liu Yuan <namei.unix@gmail.com>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2014-08-18 13:41:05 +04:00
|
|
|
|
2023-02-03 18:21:48 +03:00
|
|
|
/*
|
|
|
|
* This function can count as GRAPH_RDLOCK because quorum_co_pwritev() holds the
|
|
|
|
* graph lock and keeps it until this coroutine has terminated.
|
|
|
|
*/
|
|
|
|
static void coroutine_fn GRAPH_RDLOCK write_quorum_entry(void *opaque)
|
2016-11-08 13:10:14 +03:00
|
|
|
{
|
|
|
|
QuorumCo *co = opaque;
|
|
|
|
QuorumAIOCB *acb = co->acb;
|
|
|
|
BDRVQuorumState *s = acb->bs->opaque;
|
|
|
|
int i = co->idx;
|
2016-11-10 18:13:15 +03:00
|
|
|
QuorumChildRequest *sacb = &acb->qcrs[i];
|
|
|
|
|
|
|
|
sacb->bs = s->children[i]->bs;
|
2020-11-13 19:52:32 +03:00
|
|
|
if (acb->flags & BDRV_REQ_ZERO_WRITE) {
|
|
|
|
sacb->ret = bdrv_co_pwrite_zeroes(s->children[i], acb->offset,
|
|
|
|
acb->bytes, acb->flags);
|
|
|
|
} else {
|
|
|
|
sacb->ret = bdrv_co_pwritev(s->children[i], acb->offset, acb->bytes,
|
|
|
|
acb->qiov, acb->flags);
|
|
|
|
}
|
2016-11-10 18:13:15 +03:00
|
|
|
if (sacb->ret == 0) {
|
|
|
|
acb->success_count++;
|
|
|
|
} else {
|
|
|
|
quorum_report_bad_acb(sacb, sacb->ret);
|
|
|
|
}
|
|
|
|
acb->count++;
|
|
|
|
assert(acb->count <= s->num_children);
|
|
|
|
assert(acb->success_count <= s->num_children);
|
2016-11-08 13:10:14 +03:00
|
|
|
|
2016-11-10 18:13:15 +03:00
|
|
|
/* Wake up the caller after the last write */
|
|
|
|
if (acb->count == s->num_children) {
|
|
|
|
qemu_coroutine_enter_if_inactive(acb->co);
|
|
|
|
}
|
block/quorum: add simple read pattern support
This patch adds single read pattern to quorum driver and quorum vote is default
pattern.
For now we do a quorum vote on all the reads, it is designed for unreliable
underlying storage such as non-redundant NFS to make sure data integrity at the
cost of the read performance.
For some use cases as following:
VM
--------------
| |
v v
A B
Both A and B has hardware raid storage to justify the data integrity on its own.
So it would help performance if we do a single read instead of on all the nodes.
Further, if we run VM on either of the storage node, we can make a local read
request for better performance.
This patch generalize the above 2 nodes case in the N nodes. That is,
vm -> write to all the N nodes, read just one of them. If single read fails, we
try to read next node in FIFO order specified by the startup command.
The 2 nodes case is very similar to DRBD[1] though lack of auto-sync
functionality in the single device/node failure for now. But compared with DRBD
we still have some advantages over it:
- Suppose we have 20 VMs running on one(assume A) of 2 nodes' DRBD backed
storage. And if A crashes, we need to restart all the VMs on node B. But for
practice case, we can't because B might not have enough resources to setup 20 VMs
at once. So if we run our 20 VMs with quorum driver, and scatter the replicated
images over the data center, we can very likely restart 20 VMs without any
resource problem.
After all, I think we can build a more powerful replicated image functionality
on quorum and block jobs(block mirror) to meet various High Availibility needs.
E.g, Enable single read pattern on 2 children,
-drive driver=quorum,children.0.file.filename=0.qcow2,\
children.1.file.filename=1.qcow2,read-pattern=fifo,vote-threshold=1
[1] http://en.wikipedia.org/wiki/Distributed_Replicated_Block_Device
[Dropped \n from an error_setg() error message
--Stefan]
Cc: Benoit Canet <benoit@irqsave.net>
Cc: Eric Blake <eblake@redhat.com>
Cc: Kevin Wolf <kwolf@redhat.com>
Cc: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Liu Yuan <namei.unix@gmail.com>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2014-08-18 13:41:05 +04:00
|
|
|
}
|
|
|
|
|
2023-02-03 18:21:49 +03:00
|
|
|
static int coroutine_fn GRAPH_RDLOCK
|
|
|
|
quorum_co_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes,
|
|
|
|
QEMUIOVector *qiov, BdrvRequestFlags flags)
|
2014-02-22 01:21:12 +04:00
|
|
|
{
|
|
|
|
BDRVQuorumState *s = bs->opaque;
|
2018-04-21 16:29:25 +03:00
|
|
|
QuorumAIOCB *acb = quorum_aio_get(bs, qiov, offset, bytes, flags);
|
2016-11-08 13:10:14 +03:00
|
|
|
int i, ret;
|
2014-02-22 01:21:12 +04:00
|
|
|
|
|
|
|
for (i = 0; i < s->num_children; i++) {
|
2016-11-08 13:10:14 +03:00
|
|
|
Coroutine *co;
|
|
|
|
QuorumCo data = {
|
|
|
|
.acb = acb,
|
|
|
|
.idx = i,
|
|
|
|
};
|
|
|
|
|
|
|
|
co = qemu_coroutine_create(write_quorum_entry, &data);
|
|
|
|
qemu_coroutine_enter(co);
|
2014-02-22 01:21:12 +04:00
|
|
|
}
|
|
|
|
|
2016-11-10 18:13:15 +03:00
|
|
|
while (acb->count < s->num_children) {
|
2016-11-08 13:10:14 +03:00
|
|
|
qemu_coroutine_yield();
|
|
|
|
}
|
|
|
|
|
2016-11-10 18:13:15 +03:00
|
|
|
quorum_has_too_much_io_failed(acb);
|
|
|
|
|
2016-11-08 13:10:14 +03:00
|
|
|
ret = acb->vote_ret;
|
2016-11-10 16:24:27 +03:00
|
|
|
quorum_aio_finalize(acb);
|
2016-11-08 13:10:14 +03:00
|
|
|
|
|
|
|
return ret;
|
2014-02-22 01:21:12 +04:00
|
|
|
}
|
|
|
|
|
2023-02-03 18:21:48 +03:00
|
|
|
static int coroutine_fn GRAPH_RDLOCK
|
|
|
|
quorum_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int64_t bytes,
|
|
|
|
BdrvRequestFlags flags)
|
2020-11-13 19:52:32 +03:00
|
|
|
{
|
|
|
|
return quorum_co_pwritev(bs, offset, bytes, NULL,
|
|
|
|
flags | BDRV_REQ_ZERO_WRITE);
|
|
|
|
}
|
|
|
|
|
2023-02-03 18:22:02 +03:00
|
|
|
static int64_t coroutine_fn GRAPH_RDLOCK
|
|
|
|
quorum_co_getlength(BlockDriverState *bs)
|
2014-02-22 01:21:16 +04:00
|
|
|
{
|
|
|
|
BDRVQuorumState *s = bs->opaque;
|
|
|
|
int64_t result;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* check that all file have the same length */
|
2023-01-13 23:42:04 +03:00
|
|
|
result = bdrv_co_getlength(s->children[0]->bs);
|
2014-02-22 01:21:16 +04:00
|
|
|
if (result < 0) {
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
for (i = 1; i < s->num_children; i++) {
|
2023-01-13 23:42:04 +03:00
|
|
|
int64_t value = bdrv_co_getlength(s->children[i]->bs);
|
2014-02-22 01:21:16 +04:00
|
|
|
if (value < 0) {
|
|
|
|
return value;
|
|
|
|
}
|
|
|
|
if (value != result) {
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2023-02-03 18:21:46 +03:00
|
|
|
static coroutine_fn GRAPH_RDLOCK int quorum_co_flush(BlockDriverState *bs)
|
2014-02-22 01:21:18 +04:00
|
|
|
{
|
|
|
|
BDRVQuorumState *s = bs->opaque;
|
|
|
|
QuorumVoteVersion *winner = NULL;
|
|
|
|
QuorumVotes error_votes;
|
|
|
|
QuorumVoteValue result_value;
|
|
|
|
int i;
|
|
|
|
int result = 0;
|
2016-02-26 04:39:02 +03:00
|
|
|
int success_count = 0;
|
2014-02-22 01:21:18 +04:00
|
|
|
|
|
|
|
QLIST_INIT(&error_votes.vote_list);
|
|
|
|
error_votes.compare = quorum_64bits_compare;
|
|
|
|
|
|
|
|
for (i = 0; i < s->num_children; i++) {
|
2015-06-16 12:29:22 +03:00
|
|
|
result = bdrv_co_flush(s->children[i]->bs);
|
2016-02-26 04:39:02 +03:00
|
|
|
if (result) {
|
2017-08-07 15:36:58 +03:00
|
|
|
quorum_report_bad(QUORUM_OP_TYPE_FLUSH, 0, 0,
|
2016-02-26 04:39:02 +03:00
|
|
|
s->children[i]->bs->node_name, result);
|
|
|
|
result_value.l = result;
|
|
|
|
quorum_count_vote(&error_votes, &result_value, i);
|
|
|
|
} else {
|
|
|
|
success_count++;
|
|
|
|
}
|
2014-02-22 01:21:18 +04:00
|
|
|
}
|
|
|
|
|
2016-02-26 04:39:02 +03:00
|
|
|
if (success_count >= s->threshold) {
|
|
|
|
result = 0;
|
|
|
|
} else {
|
|
|
|
winner = quorum_get_vote_winner(&error_votes);
|
|
|
|
result = winner->value.l;
|
|
|
|
}
|
2014-02-22 01:21:18 +04:00
|
|
|
quorum_free_vote_list(&error_votes);
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2023-05-04 14:57:49 +03:00
|
|
|
static bool GRAPH_RDLOCK
|
|
|
|
quorum_recurse_can_replace(BlockDriverState *bs, BlockDriverState *to_replace)
|
2020-02-18 13:34:43 +03:00
|
|
|
{
|
|
|
|
BDRVQuorumState *s = bs->opaque;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < s->num_children; i++) {
|
|
|
|
/*
|
|
|
|
* We have no idea whether our children show the same data as
|
|
|
|
* this node (@bs). It is actually highly likely that
|
|
|
|
* @to_replace does not, because replacing a broken child is
|
|
|
|
* one of the main use cases here.
|
|
|
|
*
|
|
|
|
* We do know that the new BDS will match @bs, so replacing
|
|
|
|
* any of our children by it will be safe. It cannot change
|
|
|
|
* the data this quorum node presents to its parents.
|
|
|
|
*
|
|
|
|
* However, replacing @to_replace by @bs in any of our
|
|
|
|
* children's chains may change visible data somewhere in
|
|
|
|
* there. We therefore cannot recurse down those chains with
|
|
|
|
* bdrv_recurse_can_replace().
|
|
|
|
* (More formally, bdrv_recurse_can_replace() requires that
|
|
|
|
* @to_replace will be replaced by something matching the @bs
|
|
|
|
* passed to it. We cannot guarantee that.)
|
|
|
|
*
|
|
|
|
* Thus, we can only check whether any of our immediate
|
|
|
|
* children matches @to_replace.
|
|
|
|
*
|
|
|
|
* (In the future, we might add a function to recurse down a
|
|
|
|
* chain that checks that nothing there cares about a change
|
|
|
|
* in data from the respective child in question. For
|
|
|
|
* example, most filters do not care when their child's data
|
|
|
|
* suddenly changes, as long as their parents do not care.)
|
|
|
|
*/
|
|
|
|
if (s->children[i]->bs == to_replace) {
|
|
|
|
/*
|
|
|
|
* We now have to ensure that there is no other parent
|
|
|
|
* that cares about replacing this child by a node with
|
|
|
|
* potentially different data.
|
|
|
|
* We do so by checking whether there are any other parents
|
|
|
|
* at all, which is stricter than necessary, but also very
|
|
|
|
* simple. (We may decide to implement something more
|
|
|
|
* complex and permissive when there is an actual need for
|
|
|
|
* it.)
|
|
|
|
*/
|
|
|
|
return QLIST_FIRST(&to_replace->parents) == s->children[i] &&
|
|
|
|
QLIST_NEXT(s->children[i], next_parent) == NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
quorum: Add quorum_open() and quorum_close().
Example of command line:
-drive if=virtio,driver=quorum,\
children.0.file.filename=1.raw,\
children.0.node-name=1.raw,\
children.0.driver=raw,\
children.1.file.filename=2.raw,\
children.1.node-name=2.raw,\
children.1.driver=raw,\
children.2.file.filename=3.raw,\
children.2.node-name=3.raw,\
children.2.driver=raw,\
vote-threshold=2
blkverify=on with vote-threshold=2 and two files can be passed to
emulate blkverify.
Signed-off-by: Benoit Canet <benoit@irqsave.net>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2014-02-22 01:21:20 +04:00
|
|
|
static int quorum_valid_threshold(int threshold, int num_children, Error **errp)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (threshold < 1) {
|
2015-03-17 13:54:50 +03:00
|
|
|
error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
|
2020-11-13 11:26:26 +03:00
|
|
|
"vote-threshold", "a value >= 1");
|
quorum: Add quorum_open() and quorum_close().
Example of command line:
-drive if=virtio,driver=quorum,\
children.0.file.filename=1.raw,\
children.0.node-name=1.raw,\
children.0.driver=raw,\
children.1.file.filename=2.raw,\
children.1.node-name=2.raw,\
children.1.driver=raw,\
children.2.file.filename=3.raw,\
children.2.node-name=3.raw,\
children.2.driver=raw,\
vote-threshold=2
blkverify=on with vote-threshold=2 and two files can be passed to
emulate blkverify.
Signed-off-by: Benoit Canet <benoit@irqsave.net>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2014-02-22 01:21:20 +04:00
|
|
|
return -ERANGE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (threshold > num_children) {
|
|
|
|
error_setg(errp, "threshold may not exceed children count");
|
|
|
|
return -ERANGE;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static QemuOptsList quorum_runtime_opts = {
|
|
|
|
.name = "quorum",
|
|
|
|
.head = QTAILQ_HEAD_INITIALIZER(quorum_runtime_opts.head),
|
|
|
|
.desc = {
|
|
|
|
{
|
|
|
|
.name = QUORUM_OPT_VOTE_THRESHOLD,
|
|
|
|
.type = QEMU_OPT_NUMBER,
|
|
|
|
.help = "The number of vote needed for reaching quorum",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.name = QUORUM_OPT_BLKVERIFY,
|
|
|
|
.type = QEMU_OPT_BOOL,
|
|
|
|
.help = "Trigger block verify mode if set",
|
|
|
|
},
|
2014-06-11 17:24:10 +04:00
|
|
|
{
|
|
|
|
.name = QUORUM_OPT_REWRITE,
|
|
|
|
.type = QEMU_OPT_BOOL,
|
|
|
|
.help = "Rewrite corrupted block on read quorum",
|
|
|
|
},
|
block/quorum: add simple read pattern support
This patch adds single read pattern to quorum driver and quorum vote is default
pattern.
For now we do a quorum vote on all the reads, it is designed for unreliable
underlying storage such as non-redundant NFS to make sure data integrity at the
cost of the read performance.
For some use cases as following:
VM
--------------
| |
v v
A B
Both A and B has hardware raid storage to justify the data integrity on its own.
So it would help performance if we do a single read instead of on all the nodes.
Further, if we run VM on either of the storage node, we can make a local read
request for better performance.
This patch generalize the above 2 nodes case in the N nodes. That is,
vm -> write to all the N nodes, read just one of them. If single read fails, we
try to read next node in FIFO order specified by the startup command.
The 2 nodes case is very similar to DRBD[1] though lack of auto-sync
functionality in the single device/node failure for now. But compared with DRBD
we still have some advantages over it:
- Suppose we have 20 VMs running on one(assume A) of 2 nodes' DRBD backed
storage. And if A crashes, we need to restart all the VMs on node B. But for
practice case, we can't because B might not have enough resources to setup 20 VMs
at once. So if we run our 20 VMs with quorum driver, and scatter the replicated
images over the data center, we can very likely restart 20 VMs without any
resource problem.
After all, I think we can build a more powerful replicated image functionality
on quorum and block jobs(block mirror) to meet various High Availibility needs.
E.g, Enable single read pattern on 2 children,
-drive driver=quorum,children.0.file.filename=0.qcow2,\
children.1.file.filename=1.qcow2,read-pattern=fifo,vote-threshold=1
[1] http://en.wikipedia.org/wiki/Distributed_Replicated_Block_Device
[Dropped \n from an error_setg() error message
--Stefan]
Cc: Benoit Canet <benoit@irqsave.net>
Cc: Eric Blake <eblake@redhat.com>
Cc: Kevin Wolf <kwolf@redhat.com>
Cc: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Liu Yuan <namei.unix@gmail.com>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2014-08-18 13:41:05 +04:00
|
|
|
{
|
|
|
|
.name = QUORUM_OPT_READ_PATTERN,
|
|
|
|
.type = QEMU_OPT_STRING,
|
|
|
|
.help = "Allowed pattern: quorum, fifo. Quorum is default",
|
|
|
|
},
|
quorum: Add quorum_open() and quorum_close().
Example of command line:
-drive if=virtio,driver=quorum,\
children.0.file.filename=1.raw,\
children.0.node-name=1.raw,\
children.0.driver=raw,\
children.1.file.filename=2.raw,\
children.1.node-name=2.raw,\
children.1.driver=raw,\
children.2.file.filename=3.raw,\
children.2.node-name=3.raw,\
children.2.driver=raw,\
vote-threshold=2
blkverify=on with vote-threshold=2 and two files can be passed to
emulate blkverify.
Signed-off-by: Benoit Canet <benoit@irqsave.net>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2014-02-22 01:21:20 +04:00
|
|
|
{ /* end of list */ }
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
2020-11-13 19:52:32 +03:00
|
|
|
static void quorum_refresh_flags(BlockDriverState *bs)
|
|
|
|
{
|
|
|
|
BDRVQuorumState *s = bs->opaque;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
bs->supported_zero_flags =
|
|
|
|
BDRV_REQ_FUA | BDRV_REQ_MAY_UNMAP | BDRV_REQ_NO_FALLBACK;
|
|
|
|
|
|
|
|
for (i = 0; i < s->num_children; i++) {
|
|
|
|
bs->supported_zero_flags &= s->children[i]->bs->supported_zero_flags;
|
|
|
|
}
|
|
|
|
|
|
|
|
bs->supported_zero_flags |= BDRV_REQ_WRITE_UNCHANGED;
|
|
|
|
}
|
|
|
|
|
quorum: Add quorum_open() and quorum_close().
Example of command line:
-drive if=virtio,driver=quorum,\
children.0.file.filename=1.raw,\
children.0.node-name=1.raw,\
children.0.driver=raw,\
children.1.file.filename=2.raw,\
children.1.node-name=2.raw,\
children.1.driver=raw,\
children.2.file.filename=3.raw,\
children.2.node-name=3.raw,\
children.2.driver=raw,\
vote-threshold=2
blkverify=on with vote-threshold=2 and two files can be passed to
emulate blkverify.
Signed-off-by: Benoit Canet <benoit@irqsave.net>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2014-02-22 01:21:20 +04:00
|
|
|
static int quorum_open(BlockDriverState *bs, QDict *options, int flags,
|
|
|
|
Error **errp)
|
|
|
|
{
|
|
|
|
BDRVQuorumState *s = bs->opaque;
|
2014-08-28 09:56:12 +04:00
|
|
|
QemuOpts *opts = NULL;
|
2017-08-24 11:46:03 +03:00
|
|
|
const char *pattern_str;
|
quorum: Add quorum_open() and quorum_close().
Example of command line:
-drive if=virtio,driver=quorum,\
children.0.file.filename=1.raw,\
children.0.node-name=1.raw,\
children.0.driver=raw,\
children.1.file.filename=2.raw,\
children.1.node-name=2.raw,\
children.1.driver=raw,\
children.2.file.filename=3.raw,\
children.2.node-name=3.raw,\
children.2.driver=raw,\
vote-threshold=2
blkverify=on with vote-threshold=2 and two files can be passed to
emulate blkverify.
Signed-off-by: Benoit Canet <benoit@irqsave.net>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2014-02-22 01:21:20 +04:00
|
|
|
bool *opened;
|
|
|
|
int i;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
qdict_flatten(options);
|
|
|
|
|
2015-01-21 20:49:28 +03:00
|
|
|
/* count how many different children are present */
|
|
|
|
s->num_children = qdict_array_entries(options, "children.");
|
|
|
|
if (s->num_children < 0) {
|
error: Avoid unnecessary error_propagate() after error_setg()
Replace
error_setg(&err, ...);
error_propagate(errp, err);
by
error_setg(errp, ...);
Related pattern:
if (...) {
error_setg(&err, ...);
goto out;
}
...
out:
error_propagate(errp, err);
return;
When all paths to label out are that way, replace by
if (...) {
error_setg(errp, ...);
return;
}
and delete the label along with the error_propagate().
When we have at most one other path that actually needs to propagate,
and maybe one at the end that where propagation is unnecessary, e.g.
foo(..., &err);
if (err) {
goto out;
}
...
bar(..., &err);
out:
error_propagate(errp, err);
return;
move the error_propagate() to where it's needed, like
if (...) {
foo(..., &err);
error_propagate(errp, err);
return;
}
...
bar(..., errp);
return;
and transform the error_setg() as above.
In some places, the transformation results in obviously unnecessary
error_propagate(). The next few commits will eliminate them.
Bonus: the elimination of gotos will make later patches in this series
easier to review.
Candidates for conversion tracked down with this Coccinelle script:
@@
identifier err, errp;
expression list args;
@@
- error_setg(&err, args);
+ error_setg(errp, args);
... when != err
error_propagate(errp, err);
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Message-Id: <20200707160613.848843-34-armbru@redhat.com>
2020-07-07 19:06:01 +03:00
|
|
|
error_setg(errp, "Option children is not a valid array");
|
2014-02-22 01:30:37 +04:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto exit;
|
|
|
|
}
|
2016-05-10 10:36:38 +03:00
|
|
|
if (s->num_children < 1) {
|
error: Avoid unnecessary error_propagate() after error_setg()
Replace
error_setg(&err, ...);
error_propagate(errp, err);
by
error_setg(errp, ...);
Related pattern:
if (...) {
error_setg(&err, ...);
goto out;
}
...
out:
error_propagate(errp, err);
return;
When all paths to label out are that way, replace by
if (...) {
error_setg(errp, ...);
return;
}
and delete the label along with the error_propagate().
When we have at most one other path that actually needs to propagate,
and maybe one at the end that where propagation is unnecessary, e.g.
foo(..., &err);
if (err) {
goto out;
}
...
bar(..., &err);
out:
error_propagate(errp, err);
return;
move the error_propagate() to where it's needed, like
if (...) {
foo(..., &err);
error_propagate(errp, err);
return;
}
...
bar(..., errp);
return;
and transform the error_setg() as above.
In some places, the transformation results in obviously unnecessary
error_propagate(). The next few commits will eliminate them.
Bonus: the elimination of gotos will make later patches in this series
easier to review.
Candidates for conversion tracked down with this Coccinelle script:
@@
identifier err, errp;
expression list args;
@@
- error_setg(&err, args);
+ error_setg(errp, args);
... when != err
error_propagate(errp, err);
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Message-Id: <20200707160613.848843-34-armbru@redhat.com>
2020-07-07 19:06:01 +03:00
|
|
|
error_setg(errp, "Number of provided children must be 1 or more");
|
quorum: Add quorum_open() and quorum_close().
Example of command line:
-drive if=virtio,driver=quorum,\
children.0.file.filename=1.raw,\
children.0.node-name=1.raw,\
children.0.driver=raw,\
children.1.file.filename=2.raw,\
children.1.node-name=2.raw,\
children.1.driver=raw,\
children.2.file.filename=3.raw,\
children.2.node-name=3.raw,\
children.2.driver=raw,\
vote-threshold=2
blkverify=on with vote-threshold=2 and two files can be passed to
emulate blkverify.
Signed-off-by: Benoit Canet <benoit@irqsave.net>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2014-02-22 01:21:20 +04:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
|
|
|
|
opts = qemu_opts_create(&quorum_runtime_opts, NULL, 0, &error_abort);
|
2020-07-07 19:06:05 +03:00
|
|
|
if (!qemu_opts_absorb_qdict(opts, options, errp)) {
|
quorum: Add quorum_open() and quorum_close().
Example of command line:
-drive if=virtio,driver=quorum,\
children.0.file.filename=1.raw,\
children.0.node-name=1.raw,\
children.0.driver=raw,\
children.1.file.filename=2.raw,\
children.1.node-name=2.raw,\
children.1.driver=raw,\
children.2.file.filename=3.raw,\
children.2.node-name=3.raw,\
children.2.driver=raw,\
vote-threshold=2
blkverify=on with vote-threshold=2 and two files can be passed to
emulate blkverify.
Signed-off-by: Benoit Canet <benoit@irqsave.net>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2014-02-22 01:21:20 +04:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
|
|
|
|
s->threshold = qemu_opt_get_number(opts, QUORUM_OPT_VOTE_THRESHOLD, 0);
|
2015-07-03 09:45:06 +03:00
|
|
|
/* and validate it against s->num_children */
|
error: Avoid unnecessary error_propagate() after error_setg()
Replace
error_setg(&err, ...);
error_propagate(errp, err);
by
error_setg(errp, ...);
Related pattern:
if (...) {
error_setg(&err, ...);
goto out;
}
...
out:
error_propagate(errp, err);
return;
When all paths to label out are that way, replace by
if (...) {
error_setg(errp, ...);
return;
}
and delete the label along with the error_propagate().
When we have at most one other path that actually needs to propagate,
and maybe one at the end that where propagation is unnecessary, e.g.
foo(..., &err);
if (err) {
goto out;
}
...
bar(..., &err);
out:
error_propagate(errp, err);
return;
move the error_propagate() to where it's needed, like
if (...) {
foo(..., &err);
error_propagate(errp, err);
return;
}
...
bar(..., errp);
return;
and transform the error_setg() as above.
In some places, the transformation results in obviously unnecessary
error_propagate(). The next few commits will eliminate them.
Bonus: the elimination of gotos will make later patches in this series
easier to review.
Candidates for conversion tracked down with this Coccinelle script:
@@
identifier err, errp;
expression list args;
@@
- error_setg(&err, args);
+ error_setg(errp, args);
... when != err
error_propagate(errp, err);
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Message-Id: <20200707160613.848843-34-armbru@redhat.com>
2020-07-07 19:06:01 +03:00
|
|
|
ret = quorum_valid_threshold(s->threshold, s->num_children, errp);
|
2015-07-03 09:45:06 +03:00
|
|
|
if (ret < 0) {
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
|
2017-08-24 11:46:03 +03:00
|
|
|
pattern_str = qemu_opt_get(opts, QUORUM_OPT_READ_PATTERN);
|
|
|
|
if (!pattern_str) {
|
|
|
|
ret = QUORUM_READ_PATTERN_QUORUM;
|
|
|
|
} else {
|
2017-08-24 11:46:10 +03:00
|
|
|
ret = qapi_enum_parse(&QuorumReadPattern_lookup, pattern_str,
|
2017-08-24 11:46:03 +03:00
|
|
|
-EINVAL, NULL);
|
|
|
|
}
|
quorum: Add quorum_open() and quorum_close().
Example of command line:
-drive if=virtio,driver=quorum,\
children.0.file.filename=1.raw,\
children.0.node-name=1.raw,\
children.0.driver=raw,\
children.1.file.filename=2.raw,\
children.1.node-name=2.raw,\
children.1.driver=raw,\
children.2.file.filename=3.raw,\
children.2.node-name=3.raw,\
children.2.driver=raw,\
vote-threshold=2
blkverify=on with vote-threshold=2 and two files can be passed to
emulate blkverify.
Signed-off-by: Benoit Canet <benoit@irqsave.net>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2014-02-22 01:21:20 +04:00
|
|
|
if (ret < 0) {
|
error: Avoid unnecessary error_propagate() after error_setg()
Replace
error_setg(&err, ...);
error_propagate(errp, err);
by
error_setg(errp, ...);
Related pattern:
if (...) {
error_setg(&err, ...);
goto out;
}
...
out:
error_propagate(errp, err);
return;
When all paths to label out are that way, replace by
if (...) {
error_setg(errp, ...);
return;
}
and delete the label along with the error_propagate().
When we have at most one other path that actually needs to propagate,
and maybe one at the end that where propagation is unnecessary, e.g.
foo(..., &err);
if (err) {
goto out;
}
...
bar(..., &err);
out:
error_propagate(errp, err);
return;
move the error_propagate() to where it's needed, like
if (...) {
foo(..., &err);
error_propagate(errp, err);
return;
}
...
bar(..., errp);
return;
and transform the error_setg() as above.
In some places, the transformation results in obviously unnecessary
error_propagate(). The next few commits will eliminate them.
Bonus: the elimination of gotos will make later patches in this series
easier to review.
Candidates for conversion tracked down with this Coccinelle script:
@@
identifier err, errp;
expression list args;
@@
- error_setg(&err, args);
+ error_setg(errp, args);
... when != err
error_propagate(errp, err);
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Message-Id: <20200707160613.848843-34-armbru@redhat.com>
2020-07-07 19:06:01 +03:00
|
|
|
error_setg(errp, "Please set read-pattern as fifo or quorum");
|
quorum: Add quorum_open() and quorum_close().
Example of command line:
-drive if=virtio,driver=quorum,\
children.0.file.filename=1.raw,\
children.0.node-name=1.raw,\
children.0.driver=raw,\
children.1.file.filename=2.raw,\
children.1.node-name=2.raw,\
children.1.driver=raw,\
children.2.file.filename=3.raw,\
children.2.node-name=3.raw,\
children.2.driver=raw,\
vote-threshold=2
blkverify=on with vote-threshold=2 and two files can be passed to
emulate blkverify.
Signed-off-by: Benoit Canet <benoit@irqsave.net>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2014-02-22 01:21:20 +04:00
|
|
|
goto exit;
|
|
|
|
}
|
block/quorum: add simple read pattern support
This patch adds single read pattern to quorum driver and quorum vote is default
pattern.
For now we do a quorum vote on all the reads, it is designed for unreliable
underlying storage such as non-redundant NFS to make sure data integrity at the
cost of the read performance.
For some use cases as following:
VM
--------------
| |
v v
A B
Both A and B has hardware raid storage to justify the data integrity on its own.
So it would help performance if we do a single read instead of on all the nodes.
Further, if we run VM on either of the storage node, we can make a local read
request for better performance.
This patch generalize the above 2 nodes case in the N nodes. That is,
vm -> write to all the N nodes, read just one of them. If single read fails, we
try to read next node in FIFO order specified by the startup command.
The 2 nodes case is very similar to DRBD[1] though lack of auto-sync
functionality in the single device/node failure for now. But compared with DRBD
we still have some advantages over it:
- Suppose we have 20 VMs running on one(assume A) of 2 nodes' DRBD backed
storage. And if A crashes, we need to restart all the VMs on node B. But for
practice case, we can't because B might not have enough resources to setup 20 VMs
at once. So if we run our 20 VMs with quorum driver, and scatter the replicated
images over the data center, we can very likely restart 20 VMs without any
resource problem.
After all, I think we can build a more powerful replicated image functionality
on quorum and block jobs(block mirror) to meet various High Availibility needs.
E.g, Enable single read pattern on 2 children,
-drive driver=quorum,children.0.file.filename=0.qcow2,\
children.1.file.filename=1.qcow2,read-pattern=fifo,vote-threshold=1
[1] http://en.wikipedia.org/wiki/Distributed_Replicated_Block_Device
[Dropped \n from an error_setg() error message
--Stefan]
Cc: Benoit Canet <benoit@irqsave.net>
Cc: Eric Blake <eblake@redhat.com>
Cc: Kevin Wolf <kwolf@redhat.com>
Cc: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Liu Yuan <namei.unix@gmail.com>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2014-08-18 13:41:05 +04:00
|
|
|
s->read_pattern = ret;
|
quorum: Add quorum_open() and quorum_close().
Example of command line:
-drive if=virtio,driver=quorum,\
children.0.file.filename=1.raw,\
children.0.node-name=1.raw,\
children.0.driver=raw,\
children.1.file.filename=2.raw,\
children.1.node-name=2.raw,\
children.1.driver=raw,\
children.2.file.filename=3.raw,\
children.2.node-name=3.raw,\
children.2.driver=raw,\
vote-threshold=2
blkverify=on with vote-threshold=2 and two files can be passed to
emulate blkverify.
Signed-off-by: Benoit Canet <benoit@irqsave.net>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2014-02-22 01:21:20 +04:00
|
|
|
|
block/quorum: add simple read pattern support
This patch adds single read pattern to quorum driver and quorum vote is default
pattern.
For now we do a quorum vote on all the reads, it is designed for unreliable
underlying storage such as non-redundant NFS to make sure data integrity at the
cost of the read performance.
For some use cases as following:
VM
--------------
| |
v v
A B
Both A and B has hardware raid storage to justify the data integrity on its own.
So it would help performance if we do a single read instead of on all the nodes.
Further, if we run VM on either of the storage node, we can make a local read
request for better performance.
This patch generalize the above 2 nodes case in the N nodes. That is,
vm -> write to all the N nodes, read just one of them. If single read fails, we
try to read next node in FIFO order specified by the startup command.
The 2 nodes case is very similar to DRBD[1] though lack of auto-sync
functionality in the single device/node failure for now. But compared with DRBD
we still have some advantages over it:
- Suppose we have 20 VMs running on one(assume A) of 2 nodes' DRBD backed
storage. And if A crashes, we need to restart all the VMs on node B. But for
practice case, we can't because B might not have enough resources to setup 20 VMs
at once. So if we run our 20 VMs with quorum driver, and scatter the replicated
images over the data center, we can very likely restart 20 VMs without any
resource problem.
After all, I think we can build a more powerful replicated image functionality
on quorum and block jobs(block mirror) to meet various High Availibility needs.
E.g, Enable single read pattern on 2 children,
-drive driver=quorum,children.0.file.filename=0.qcow2,\
children.1.file.filename=1.qcow2,read-pattern=fifo,vote-threshold=1
[1] http://en.wikipedia.org/wiki/Distributed_Replicated_Block_Device
[Dropped \n from an error_setg() error message
--Stefan]
Cc: Benoit Canet <benoit@irqsave.net>
Cc: Eric Blake <eblake@redhat.com>
Cc: Kevin Wolf <kwolf@redhat.com>
Cc: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Liu Yuan <namei.unix@gmail.com>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2014-08-18 13:41:05 +04:00
|
|
|
if (s->read_pattern == QUORUM_READ_PATTERN_QUORUM) {
|
2018-10-17 17:33:50 +03:00
|
|
|
s->is_blkverify = qemu_opt_get_bool(opts, QUORUM_OPT_BLKVERIFY, false);
|
|
|
|
if (s->is_blkverify && (s->num_children != 2 || s->threshold != 2)) {
|
error: Avoid unnecessary error_propagate() after error_setg()
Replace
error_setg(&err, ...);
error_propagate(errp, err);
by
error_setg(errp, ...);
Related pattern:
if (...) {
error_setg(&err, ...);
goto out;
}
...
out:
error_propagate(errp, err);
return;
When all paths to label out are that way, replace by
if (...) {
error_setg(errp, ...);
return;
}
and delete the label along with the error_propagate().
When we have at most one other path that actually needs to propagate,
and maybe one at the end that where propagation is unnecessary, e.g.
foo(..., &err);
if (err) {
goto out;
}
...
bar(..., &err);
out:
error_propagate(errp, err);
return;
move the error_propagate() to where it's needed, like
if (...) {
foo(..., &err);
error_propagate(errp, err);
return;
}
...
bar(..., errp);
return;
and transform the error_setg() as above.
In some places, the transformation results in obviously unnecessary
error_propagate(). The next few commits will eliminate them.
Bonus: the elimination of gotos will make later patches in this series
easier to review.
Candidates for conversion tracked down with this Coccinelle script:
@@
identifier err, errp;
expression list args;
@@
- error_setg(&err, args);
+ error_setg(errp, args);
... when != err
error_propagate(errp, err);
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Message-Id: <20200707160613.848843-34-armbru@redhat.com>
2020-07-07 19:06:01 +03:00
|
|
|
error_setg(errp, "blkverify=on can only be set if there are "
|
2018-10-17 17:33:50 +03:00
|
|
|
"exactly two files and vote-threshold is 2");
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto exit;
|
block/quorum: add simple read pattern support
This patch adds single read pattern to quorum driver and quorum vote is default
pattern.
For now we do a quorum vote on all the reads, it is designed for unreliable
underlying storage such as non-redundant NFS to make sure data integrity at the
cost of the read performance.
For some use cases as following:
VM
--------------
| |
v v
A B
Both A and B has hardware raid storage to justify the data integrity on its own.
So it would help performance if we do a single read instead of on all the nodes.
Further, if we run VM on either of the storage node, we can make a local read
request for better performance.
This patch generalize the above 2 nodes case in the N nodes. That is,
vm -> write to all the N nodes, read just one of them. If single read fails, we
try to read next node in FIFO order specified by the startup command.
The 2 nodes case is very similar to DRBD[1] though lack of auto-sync
functionality in the single device/node failure for now. But compared with DRBD
we still have some advantages over it:
- Suppose we have 20 VMs running on one(assume A) of 2 nodes' DRBD backed
storage. And if A crashes, we need to restart all the VMs on node B. But for
practice case, we can't because B might not have enough resources to setup 20 VMs
at once. So if we run our 20 VMs with quorum driver, and scatter the replicated
images over the data center, we can very likely restart 20 VMs without any
resource problem.
After all, I think we can build a more powerful replicated image functionality
on quorum and block jobs(block mirror) to meet various High Availibility needs.
E.g, Enable single read pattern on 2 children,
-drive driver=quorum,children.0.file.filename=0.qcow2,\
children.1.file.filename=1.qcow2,read-pattern=fifo,vote-threshold=1
[1] http://en.wikipedia.org/wiki/Distributed_Replicated_Block_Device
[Dropped \n from an error_setg() error message
--Stefan]
Cc: Benoit Canet <benoit@irqsave.net>
Cc: Eric Blake <eblake@redhat.com>
Cc: Kevin Wolf <kwolf@redhat.com>
Cc: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Liu Yuan <namei.unix@gmail.com>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2014-08-18 13:41:05 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
s->rewrite_corrupted = qemu_opt_get_bool(opts, QUORUM_OPT_REWRITE,
|
|
|
|
false);
|
|
|
|
if (s->rewrite_corrupted && s->is_blkverify) {
|
error: Avoid unnecessary error_propagate() after error_setg()
Replace
error_setg(&err, ...);
error_propagate(errp, err);
by
error_setg(errp, ...);
Related pattern:
if (...) {
error_setg(&err, ...);
goto out;
}
...
out:
error_propagate(errp, err);
return;
When all paths to label out are that way, replace by
if (...) {
error_setg(errp, ...);
return;
}
and delete the label along with the error_propagate().
When we have at most one other path that actually needs to propagate,
and maybe one at the end that where propagation is unnecessary, e.g.
foo(..., &err);
if (err) {
goto out;
}
...
bar(..., &err);
out:
error_propagate(errp, err);
return;
move the error_propagate() to where it's needed, like
if (...) {
foo(..., &err);
error_propagate(errp, err);
return;
}
...
bar(..., errp);
return;
and transform the error_setg() as above.
In some places, the transformation results in obviously unnecessary
error_propagate(). The next few commits will eliminate them.
Bonus: the elimination of gotos will make later patches in this series
easier to review.
Candidates for conversion tracked down with this Coccinelle script:
@@
identifier err, errp;
expression list args;
@@
- error_setg(&err, args);
+ error_setg(errp, args);
... when != err
error_propagate(errp, err);
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Message-Id: <20200707160613.848843-34-armbru@redhat.com>
2020-07-07 19:06:01 +03:00
|
|
|
error_setg(errp,
|
block/quorum: add simple read pattern support
This patch adds single read pattern to quorum driver and quorum vote is default
pattern.
For now we do a quorum vote on all the reads, it is designed for unreliable
underlying storage such as non-redundant NFS to make sure data integrity at the
cost of the read performance.
For some use cases as following:
VM
--------------
| |
v v
A B
Both A and B has hardware raid storage to justify the data integrity on its own.
So it would help performance if we do a single read instead of on all the nodes.
Further, if we run VM on either of the storage node, we can make a local read
request for better performance.
This patch generalize the above 2 nodes case in the N nodes. That is,
vm -> write to all the N nodes, read just one of them. If single read fails, we
try to read next node in FIFO order specified by the startup command.
The 2 nodes case is very similar to DRBD[1] though lack of auto-sync
functionality in the single device/node failure for now. But compared with DRBD
we still have some advantages over it:
- Suppose we have 20 VMs running on one(assume A) of 2 nodes' DRBD backed
storage. And if A crashes, we need to restart all the VMs on node B. But for
practice case, we can't because B might not have enough resources to setup 20 VMs
at once. So if we run our 20 VMs with quorum driver, and scatter the replicated
images over the data center, we can very likely restart 20 VMs without any
resource problem.
After all, I think we can build a more powerful replicated image functionality
on quorum and block jobs(block mirror) to meet various High Availibility needs.
E.g, Enable single read pattern on 2 children,
-drive driver=quorum,children.0.file.filename=0.qcow2,\
children.1.file.filename=1.qcow2,read-pattern=fifo,vote-threshold=1
[1] http://en.wikipedia.org/wiki/Distributed_Replicated_Block_Device
[Dropped \n from an error_setg() error message
--Stefan]
Cc: Benoit Canet <benoit@irqsave.net>
Cc: Eric Blake <eblake@redhat.com>
Cc: Kevin Wolf <kwolf@redhat.com>
Cc: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Liu Yuan <namei.unix@gmail.com>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2014-08-18 13:41:05 +04:00
|
|
|
"rewrite-corrupted=on cannot be used with blkverify=on");
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto exit;
|
|
|
|
}
|
2014-06-11 17:24:10 +04:00
|
|
|
}
|
|
|
|
|
2015-06-16 12:29:22 +03:00
|
|
|
/* allocate the children array */
|
|
|
|
s->children = g_new0(BdrvChild *, s->num_children);
|
quorum: Add quorum_open() and quorum_close().
Example of command line:
-drive if=virtio,driver=quorum,\
children.0.file.filename=1.raw,\
children.0.node-name=1.raw,\
children.0.driver=raw,\
children.1.file.filename=2.raw,\
children.1.node-name=2.raw,\
children.1.driver=raw,\
children.2.file.filename=3.raw,\
children.2.node-name=3.raw,\
children.2.driver=raw,\
vote-threshold=2
blkverify=on with vote-threshold=2 and two files can be passed to
emulate blkverify.
Signed-off-by: Benoit Canet <benoit@irqsave.net>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2014-02-22 01:21:20 +04:00
|
|
|
opened = g_new0(bool, s->num_children);
|
|
|
|
|
2015-01-21 20:49:28 +03:00
|
|
|
for (i = 0; i < s->num_children; i++) {
|
2020-08-04 13:46:42 +03:00
|
|
|
char indexstr[INDEXSTR_LEN];
|
|
|
|
ret = snprintf(indexstr, INDEXSTR_LEN, "children.%d", i);
|
|
|
|
assert(ret < INDEXSTR_LEN);
|
2014-02-22 01:30:37 +04:00
|
|
|
|
2015-06-16 12:29:22 +03:00
|
|
|
s->children[i] = bdrv_open_child(NULL, options, indexstr, bs,
|
2020-05-13 14:05:31 +03:00
|
|
|
&child_of_bds, BDRV_CHILD_DATA, false,
|
2021-02-02 15:49:45 +03:00
|
|
|
errp);
|
|
|
|
if (!s->children[i]) {
|
2015-06-16 12:29:22 +03:00
|
|
|
ret = -EINVAL;
|
2014-02-22 01:30:37 +04:00
|
|
|
goto close_exit;
|
quorum: Add quorum_open() and quorum_close().
Example of command line:
-drive if=virtio,driver=quorum,\
children.0.file.filename=1.raw,\
children.0.node-name=1.raw,\
children.0.driver=raw,\
children.1.file.filename=2.raw,\
children.1.node-name=2.raw,\
children.1.driver=raw,\
children.2.file.filename=3.raw,\
children.2.node-name=3.raw,\
children.2.driver=raw,\
vote-threshold=2
blkverify=on with vote-threshold=2 and two files can be passed to
emulate blkverify.
Signed-off-by: Benoit Canet <benoit@irqsave.net>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2014-02-22 01:21:20 +04:00
|
|
|
}
|
2015-01-21 20:49:28 +03:00
|
|
|
|
2014-02-22 01:30:37 +04:00
|
|
|
opened[i] = true;
|
quorum: Add quorum_open() and quorum_close().
Example of command line:
-drive if=virtio,driver=quorum,\
children.0.file.filename=1.raw,\
children.0.node-name=1.raw,\
children.0.driver=raw,\
children.1.file.filename=2.raw,\
children.1.node-name=2.raw,\
children.1.driver=raw,\
children.2.file.filename=3.raw,\
children.2.node-name=3.raw,\
children.2.driver=raw,\
vote-threshold=2
blkverify=on with vote-threshold=2 and two files can be passed to
emulate blkverify.
Signed-off-by: Benoit Canet <benoit@irqsave.net>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2014-02-22 01:21:20 +04:00
|
|
|
}
|
2016-05-10 10:36:38 +03:00
|
|
|
s->next_child_index = s->num_children;
|
quorum: Add quorum_open() and quorum_close().
Example of command line:
-drive if=virtio,driver=quorum,\
children.0.file.filename=1.raw,\
children.0.node-name=1.raw,\
children.0.driver=raw,\
children.1.file.filename=2.raw,\
children.1.node-name=2.raw,\
children.1.driver=raw,\
children.2.file.filename=3.raw,\
children.2.node-name=3.raw,\
children.2.driver=raw,\
vote-threshold=2
blkverify=on with vote-threshold=2 and two files can be passed to
emulate blkverify.
Signed-off-by: Benoit Canet <benoit@irqsave.net>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2014-02-22 01:21:20 +04:00
|
|
|
|
2018-04-21 16:29:25 +03:00
|
|
|
bs->supported_write_flags = BDRV_REQ_WRITE_UNCHANGED;
|
2020-11-13 19:52:32 +03:00
|
|
|
quorum_refresh_flags(bs);
|
2018-04-21 16:29:25 +03:00
|
|
|
|
quorum: Add quorum_open() and quorum_close().
Example of command line:
-drive if=virtio,driver=quorum,\
children.0.file.filename=1.raw,\
children.0.node-name=1.raw,\
children.0.driver=raw,\
children.1.file.filename=2.raw,\
children.1.node-name=2.raw,\
children.1.driver=raw,\
children.2.file.filename=3.raw,\
children.2.node-name=3.raw,\
children.2.driver=raw,\
vote-threshold=2
blkverify=on with vote-threshold=2 and two files can be passed to
emulate blkverify.
Signed-off-by: Benoit Canet <benoit@irqsave.net>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2014-02-22 01:21:20 +04:00
|
|
|
g_free(opened);
|
|
|
|
goto exit;
|
|
|
|
|
|
|
|
close_exit:
|
|
|
|
/* cleanup on error */
|
2023-12-05 21:20:02 +03:00
|
|
|
bdrv_graph_wrlock();
|
quorum: Add quorum_open() and quorum_close().
Example of command line:
-drive if=virtio,driver=quorum,\
children.0.file.filename=1.raw,\
children.0.node-name=1.raw,\
children.0.driver=raw,\
children.1.file.filename=2.raw,\
children.1.node-name=2.raw,\
children.1.driver=raw,\
children.2.file.filename=3.raw,\
children.2.node-name=3.raw,\
children.2.driver=raw,\
vote-threshold=2
blkverify=on with vote-threshold=2 and two files can be passed to
emulate blkverify.
Signed-off-by: Benoit Canet <benoit@irqsave.net>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2014-02-22 01:21:20 +04:00
|
|
|
for (i = 0; i < s->num_children; i++) {
|
|
|
|
if (!opened[i]) {
|
|
|
|
continue;
|
|
|
|
}
|
2015-06-16 12:29:22 +03:00
|
|
|
bdrv_unref_child(bs, s->children[i]);
|
quorum: Add quorum_open() and quorum_close().
Example of command line:
-drive if=virtio,driver=quorum,\
children.0.file.filename=1.raw,\
children.0.node-name=1.raw,\
children.0.driver=raw,\
children.1.file.filename=2.raw,\
children.1.node-name=2.raw,\
children.1.driver=raw,\
children.2.file.filename=3.raw,\
children.2.node-name=3.raw,\
children.2.driver=raw,\
vote-threshold=2
blkverify=on with vote-threshold=2 and two files can be passed to
emulate blkverify.
Signed-off-by: Benoit Canet <benoit@irqsave.net>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2014-02-22 01:21:20 +04:00
|
|
|
}
|
2023-12-05 21:20:02 +03:00
|
|
|
bdrv_graph_wrunlock();
|
2015-06-16 12:29:22 +03:00
|
|
|
g_free(s->children);
|
quorum: Add quorum_open() and quorum_close().
Example of command line:
-drive if=virtio,driver=quorum,\
children.0.file.filename=1.raw,\
children.0.node-name=1.raw,\
children.0.driver=raw,\
children.1.file.filename=2.raw,\
children.1.node-name=2.raw,\
children.1.driver=raw,\
children.2.file.filename=3.raw,\
children.2.node-name=3.raw,\
children.2.driver=raw,\
vote-threshold=2
blkverify=on with vote-threshold=2 and two files can be passed to
emulate blkverify.
Signed-off-by: Benoit Canet <benoit@irqsave.net>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2014-02-22 01:21:20 +04:00
|
|
|
g_free(opened);
|
|
|
|
exit:
|
2014-08-28 09:56:12 +04:00
|
|
|
qemu_opts_del(opts);
|
quorum: Add quorum_open() and quorum_close().
Example of command line:
-drive if=virtio,driver=quorum,\
children.0.file.filename=1.raw,\
children.0.node-name=1.raw,\
children.0.driver=raw,\
children.1.file.filename=2.raw,\
children.1.node-name=2.raw,\
children.1.driver=raw,\
children.2.file.filename=3.raw,\
children.2.node-name=3.raw,\
children.2.driver=raw,\
vote-threshold=2
blkverify=on with vote-threshold=2 and two files can be passed to
emulate blkverify.
Signed-off-by: Benoit Canet <benoit@irqsave.net>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2014-02-22 01:21:20 +04:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void quorum_close(BlockDriverState *bs)
|
|
|
|
{
|
|
|
|
BDRVQuorumState *s = bs->opaque;
|
|
|
|
int i;
|
|
|
|
|
2023-12-05 21:20:02 +03:00
|
|
|
bdrv_graph_wrlock();
|
quorum: Add quorum_open() and quorum_close().
Example of command line:
-drive if=virtio,driver=quorum,\
children.0.file.filename=1.raw,\
children.0.node-name=1.raw,\
children.0.driver=raw,\
children.1.file.filename=2.raw,\
children.1.node-name=2.raw,\
children.1.driver=raw,\
children.2.file.filename=3.raw,\
children.2.node-name=3.raw,\
children.2.driver=raw,\
vote-threshold=2
blkverify=on with vote-threshold=2 and two files can be passed to
emulate blkverify.
Signed-off-by: Benoit Canet <benoit@irqsave.net>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2014-02-22 01:21:20 +04:00
|
|
|
for (i = 0; i < s->num_children; i++) {
|
2015-06-16 12:29:22 +03:00
|
|
|
bdrv_unref_child(bs, s->children[i]);
|
quorum: Add quorum_open() and quorum_close().
Example of command line:
-drive if=virtio,driver=quorum,\
children.0.file.filename=1.raw,\
children.0.node-name=1.raw,\
children.0.driver=raw,\
children.1.file.filename=2.raw,\
children.1.node-name=2.raw,\
children.1.driver=raw,\
children.2.file.filename=3.raw,\
children.2.node-name=3.raw,\
children.2.driver=raw,\
vote-threshold=2
blkverify=on with vote-threshold=2 and two files can be passed to
emulate blkverify.
Signed-off-by: Benoit Canet <benoit@irqsave.net>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2014-02-22 01:21:20 +04:00
|
|
|
}
|
2023-12-05 21:20:02 +03:00
|
|
|
bdrv_graph_wrunlock();
|
quorum: Add quorum_open() and quorum_close().
Example of command line:
-drive if=virtio,driver=quorum,\
children.0.file.filename=1.raw,\
children.0.node-name=1.raw,\
children.0.driver=raw,\
children.1.file.filename=2.raw,\
children.1.node-name=2.raw,\
children.1.driver=raw,\
children.2.file.filename=3.raw,\
children.2.node-name=3.raw,\
children.2.driver=raw,\
vote-threshold=2
blkverify=on with vote-threshold=2 and two files can be passed to
emulate blkverify.
Signed-off-by: Benoit Canet <benoit@irqsave.net>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2014-02-22 01:21:20 +04:00
|
|
|
|
2015-06-16 12:29:22 +03:00
|
|
|
g_free(s->children);
|
quorum: Add quorum_open() and quorum_close().
Example of command line:
-drive if=virtio,driver=quorum,\
children.0.file.filename=1.raw,\
children.0.node-name=1.raw,\
children.0.driver=raw,\
children.1.file.filename=2.raw,\
children.1.node-name=2.raw,\
children.1.driver=raw,\
children.2.file.filename=3.raw,\
children.2.node-name=3.raw,\
children.2.driver=raw,\
vote-threshold=2
blkverify=on with vote-threshold=2 and two files can be passed to
emulate blkverify.
Signed-off-by: Benoit Canet <benoit@irqsave.net>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2014-02-22 01:21:20 +04:00
|
|
|
}
|
|
|
|
|
2023-09-11 12:46:20 +03:00
|
|
|
static void GRAPH_WRLOCK
|
|
|
|
quorum_add_child(BlockDriverState *bs, BlockDriverState *child_bs, Error **errp)
|
2016-05-10 10:36:38 +03:00
|
|
|
{
|
|
|
|
BDRVQuorumState *s = bs->opaque;
|
|
|
|
BdrvChild *child;
|
2020-08-04 13:46:42 +03:00
|
|
|
char indexstr[INDEXSTR_LEN];
|
2016-05-10 10:36:38 +03:00
|
|
|
int ret;
|
|
|
|
|
2018-10-18 11:59:03 +03:00
|
|
|
if (s->is_blkverify) {
|
|
|
|
error_setg(errp, "Cannot add a child to a quorum in blkverify mode");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-05-10 10:36:38 +03:00
|
|
|
assert(s->num_children <= INT_MAX / sizeof(BdrvChild *));
|
|
|
|
if (s->num_children == INT_MAX / sizeof(BdrvChild *) ||
|
|
|
|
s->next_child_index == UINT_MAX) {
|
|
|
|
error_setg(errp, "Too many children");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-08-04 13:46:42 +03:00
|
|
|
ret = snprintf(indexstr, INDEXSTR_LEN, "children.%u", s->next_child_index);
|
|
|
|
if (ret < 0 || ret >= INDEXSTR_LEN) {
|
2016-05-10 10:36:38 +03:00
|
|
|
error_setg(errp, "cannot generate child name");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
s->next_child_index++;
|
|
|
|
|
|
|
|
/* We can safely add the child now */
|
|
|
|
bdrv_ref(child_bs);
|
2016-12-21 00:21:17 +03:00
|
|
|
|
2020-05-13 14:05:31 +03:00
|
|
|
child = bdrv_attach_child(bs, child_bs, indexstr, &child_of_bds,
|
|
|
|
BDRV_CHILD_DATA, errp);
|
2016-12-21 00:21:17 +03:00
|
|
|
if (child == NULL) {
|
|
|
|
s->next_child_index--;
|
2023-09-11 12:46:20 +03:00
|
|
|
return;
|
2016-12-21 00:21:17 +03:00
|
|
|
}
|
2016-05-10 10:36:38 +03:00
|
|
|
s->children = g_renew(BdrvChild *, s->children, s->num_children + 1);
|
|
|
|
s->children[s->num_children++] = child;
|
2020-11-13 19:52:32 +03:00
|
|
|
quorum_refresh_flags(bs);
|
2016-05-10 10:36:38 +03:00
|
|
|
}
|
|
|
|
|
2023-09-11 12:46:20 +03:00
|
|
|
static void GRAPH_WRLOCK
|
|
|
|
quorum_del_child(BlockDriverState *bs, BdrvChild *child, Error **errp)
|
2016-05-10 10:36:38 +03:00
|
|
|
{
|
|
|
|
BDRVQuorumState *s = bs->opaque;
|
2020-08-04 13:46:42 +03:00
|
|
|
char indexstr[INDEXSTR_LEN];
|
2016-05-10 10:36:38 +03:00
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < s->num_children; i++) {
|
|
|
|
if (s->children[i] == child) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* we have checked it in bdrv_del_child() */
|
|
|
|
assert(i < s->num_children);
|
|
|
|
|
|
|
|
if (s->num_children <= s->threshold) {
|
|
|
|
error_setg(errp,
|
|
|
|
"The number of children cannot be lower than the vote threshold %d",
|
|
|
|
s->threshold);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-10-18 11:59:03 +03:00
|
|
|
/* We know now that num_children > threshold, so blkverify must be false */
|
|
|
|
assert(!s->is_blkverify);
|
|
|
|
|
2020-08-04 13:46:42 +03:00
|
|
|
snprintf(indexstr, INDEXSTR_LEN, "children.%u", s->next_child_index - 1);
|
|
|
|
if (!strncmp(child->name, indexstr, INDEXSTR_LEN)) {
|
|
|
|
s->next_child_index--;
|
|
|
|
}
|
|
|
|
|
2016-05-10 10:36:38 +03:00
|
|
|
/* We can safely remove this child now */
|
|
|
|
memmove(&s->children[i], &s->children[i + 1],
|
|
|
|
(s->num_children - i - 1) * sizeof(BdrvChild *));
|
|
|
|
s->children = g_renew(BdrvChild *, s->children, --s->num_children);
|
2023-09-11 12:46:20 +03:00
|
|
|
|
2016-05-10 10:36:38 +03:00
|
|
|
bdrv_unref_child(bs, child);
|
|
|
|
|
2020-11-13 19:52:32 +03:00
|
|
|
quorum_refresh_flags(bs);
|
2016-05-10 10:36:38 +03:00
|
|
|
}
|
|
|
|
|
2019-02-01 22:29:26 +03:00
|
|
|
static void quorum_gather_child_options(BlockDriverState *bs, QDict *target,
|
|
|
|
bool backing_overridden)
|
|
|
|
{
|
|
|
|
BDRVQuorumState *s = bs->opaque;
|
|
|
|
QList *children_list;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The generic implementation for gathering child options in
|
|
|
|
* bdrv_refresh_filename() would use the names of the children
|
|
|
|
* as specified for bdrv_open_child() or bdrv_attach_child(),
|
|
|
|
* which is "children.%u" with %u being a value
|
|
|
|
* (s->next_child_index) that is incremented each time a new child
|
|
|
|
* is added (and never decremented). Since children can be
|
|
|
|
* deleted at runtime, there may be gaps in that enumeration.
|
|
|
|
* When creating a new quorum BDS and specifying the children for
|
|
|
|
* it through runtime options, the enumeration used there may not
|
|
|
|
* have any gaps, though.
|
|
|
|
*
|
|
|
|
* Therefore, we have to create a new gap-less enumeration here
|
|
|
|
* (which we can achieve by simply putting all of the children's
|
|
|
|
* full_open_options into a QList).
|
|
|
|
*
|
|
|
|
* XXX: Note that there are issues with the current child option
|
|
|
|
* structure quorum uses (such as the fact that children do
|
|
|
|
* not really have unique permanent names). Therefore, this
|
|
|
|
* is going to have to change in the future and ideally we
|
|
|
|
* want quorum to be covered by the generic implementation.
|
|
|
|
*/
|
|
|
|
|
|
|
|
children_list = qlist_new();
|
|
|
|
qdict_put(target, "children", children_list);
|
|
|
|
|
|
|
|
for (i = 0; i < s->num_children; i++) {
|
|
|
|
qlist_append(children_list,
|
|
|
|
qobject_ref(s->children[i]->bs->full_open_options));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-01 22:29:20 +03:00
|
|
|
static char *quorum_dirname(BlockDriverState *bs, Error **errp)
|
|
|
|
{
|
|
|
|
/* In general, there are multiple BDSs with different dirnames below this
|
|
|
|
* one; so there is no unique dirname we could return (unless all are equal
|
|
|
|
* by chance, or there is only one). Therefore, to be consistent, just
|
|
|
|
* always return NULL. */
|
|
|
|
error_setg(errp, "Cannot generate a base directory for quorum nodes");
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2020-02-18 13:34:40 +03:00
|
|
|
static void quorum_child_perm(BlockDriverState *bs, BdrvChild *c,
|
2020-05-13 14:05:16 +03:00
|
|
|
BdrvChildRole role,
|
2020-02-18 13:34:40 +03:00
|
|
|
BlockReopenQueue *reopen_queue,
|
|
|
|
uint64_t perm, uint64_t shared,
|
|
|
|
uint64_t *nperm, uint64_t *nshared)
|
|
|
|
{
|
2020-11-14 00:17:16 +03:00
|
|
|
BDRVQuorumState *s = bs->opaque;
|
|
|
|
|
2020-02-18 13:34:40 +03:00
|
|
|
*nperm = perm & DEFAULT_PERM_PASSTHROUGH;
|
2020-11-14 00:17:16 +03:00
|
|
|
if (s->rewrite_corrupted) {
|
|
|
|
*nperm |= BLK_PERM_WRITE;
|
|
|
|
}
|
2020-02-18 13:34:40 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We cannot share RESIZE or WRITE, as this would make the
|
|
|
|
* children differ from each other.
|
|
|
|
*/
|
|
|
|
*nshared = (shared & (BLK_PERM_CONSISTENT_READ |
|
|
|
|
BLK_PERM_WRITE_UNCHANGED))
|
|
|
|
| DEFAULT_PERM_UNCHANGED;
|
|
|
|
}
|
|
|
|
|
2020-11-13 19:52:31 +03:00
|
|
|
/*
|
|
|
|
* Each one of the children can report different status flags even
|
|
|
|
* when they contain the same data, so what this function does is
|
|
|
|
* return BDRV_BLOCK_ZERO if *all* children agree that a certain
|
|
|
|
* region contains zeroes, and BDRV_BLOCK_DATA otherwise.
|
|
|
|
*/
|
2023-02-03 18:21:43 +03:00
|
|
|
static int coroutine_fn GRAPH_RDLOCK
|
|
|
|
quorum_co_block_status(BlockDriverState *bs, bool want_zero,
|
|
|
|
int64_t offset, int64_t count,
|
|
|
|
int64_t *pnum, int64_t *map, BlockDriverState **file)
|
2020-11-13 19:52:31 +03:00
|
|
|
{
|
|
|
|
BDRVQuorumState *s = bs->opaque;
|
|
|
|
int i, ret;
|
|
|
|
int64_t pnum_zero = count;
|
|
|
|
int64_t pnum_data = 0;
|
|
|
|
|
|
|
|
for (i = 0; i < s->num_children; i++) {
|
|
|
|
int64_t bytes;
|
|
|
|
ret = bdrv_co_common_block_status_above(s->children[i]->bs, NULL, false,
|
|
|
|
want_zero, offset, count,
|
|
|
|
&bytes, NULL, NULL, NULL);
|
|
|
|
if (ret < 0) {
|
|
|
|
quorum_report_bad(QUORUM_OP_TYPE_READ, offset, count,
|
|
|
|
s->children[i]->bs->node_name, ret);
|
|
|
|
pnum_data = count;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Even if all children agree about whether there are zeroes
|
|
|
|
* or not at @offset they might disagree on the size, so use
|
|
|
|
* the smallest when reporting BDRV_BLOCK_ZERO and the largest
|
|
|
|
* when reporting BDRV_BLOCK_DATA.
|
|
|
|
*/
|
|
|
|
if (ret & BDRV_BLOCK_ZERO) {
|
|
|
|
pnum_zero = MIN(pnum_zero, bytes);
|
|
|
|
} else {
|
|
|
|
pnum_data = MAX(pnum_data, bytes);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (pnum_data) {
|
|
|
|
*pnum = pnum_data;
|
|
|
|
return BDRV_BLOCK_DATA;
|
|
|
|
} else {
|
|
|
|
*pnum = pnum_zero;
|
|
|
|
return BDRV_BLOCK_ZERO;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-01 22:29:25 +03:00
|
|
|
static const char *const quorum_strong_runtime_opts[] = {
|
|
|
|
QUORUM_OPT_VOTE_THRESHOLD,
|
|
|
|
QUORUM_OPT_BLKVERIFY,
|
|
|
|
QUORUM_OPT_REWRITE,
|
|
|
|
QUORUM_OPT_READ_PATTERN,
|
|
|
|
|
|
|
|
NULL
|
|
|
|
};
|
|
|
|
|
2014-02-22 01:21:11 +04:00
|
|
|
static BlockDriver bdrv_quorum = {
|
2014-05-08 18:34:46 +04:00
|
|
|
.format_name = "quorum",
|
|
|
|
|
|
|
|
.instance_size = sizeof(BDRVQuorumState),
|
2014-02-22 01:21:11 +04:00
|
|
|
|
2018-03-13 01:07:50 +03:00
|
|
|
.bdrv_open = quorum_open,
|
2014-05-08 18:34:46 +04:00
|
|
|
.bdrv_close = quorum_close,
|
2019-02-01 22:29:26 +03:00
|
|
|
.bdrv_gather_child_options = quorum_gather_child_options,
|
2019-02-01 22:29:20 +03:00
|
|
|
.bdrv_dirname = quorum_dirname,
|
2020-11-13 19:52:31 +03:00
|
|
|
.bdrv_co_block_status = quorum_co_block_status,
|
2014-02-22 01:21:12 +04:00
|
|
|
|
2021-05-18 14:42:14 +03:00
|
|
|
.bdrv_co_flush = quorum_co_flush,
|
quorum: Add quorum_open() and quorum_close().
Example of command line:
-drive if=virtio,driver=quorum,\
children.0.file.filename=1.raw,\
children.0.node-name=1.raw,\
children.0.driver=raw,\
children.1.file.filename=2.raw,\
children.1.node-name=2.raw,\
children.1.driver=raw,\
children.2.file.filename=3.raw,\
children.2.node-name=3.raw,\
children.2.driver=raw,\
vote-threshold=2
blkverify=on with vote-threshold=2 and two files can be passed to
emulate blkverify.
Signed-off-by: Benoit Canet <benoit@irqsave.net>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2014-02-22 01:21:20 +04:00
|
|
|
|
2023-01-13 23:42:04 +03:00
|
|
|
.bdrv_co_getlength = quorum_co_getlength,
|
2014-02-22 01:21:18 +04:00
|
|
|
|
2016-11-10 19:22:07 +03:00
|
|
|
.bdrv_co_preadv = quorum_co_preadv,
|
|
|
|
.bdrv_co_pwritev = quorum_co_pwritev,
|
2020-11-13 19:52:32 +03:00
|
|
|
.bdrv_co_pwrite_zeroes = quorum_co_pwrite_zeroes,
|
2014-02-22 01:21:16 +04:00
|
|
|
|
2016-05-10 10:36:38 +03:00
|
|
|
.bdrv_add_child = quorum_add_child,
|
|
|
|
.bdrv_del_child = quorum_del_child,
|
|
|
|
|
2020-02-18 13:34:40 +03:00
|
|
|
.bdrv_child_perm = quorum_child_perm,
|
2016-12-15 14:28:58 +03:00
|
|
|
|
2020-02-18 13:34:43 +03:00
|
|
|
.bdrv_recurse_can_replace = quorum_recurse_can_replace,
|
2019-02-01 22:29:25 +03:00
|
|
|
|
|
|
|
.strong_runtime_opts = quorum_strong_runtime_opts,
|
2014-02-22 01:21:11 +04:00
|
|
|
};
|
|
|
|
|
|
|
|
static void bdrv_quorum_init(void)
|
|
|
|
{
|
2024-09-04 14:18:28 +03:00
|
|
|
if (!qcrypto_hash_supports(QCRYPTO_HASH_ALGO_SHA256)) {
|
2015-08-04 17:48:25 +03:00
|
|
|
/* SHA256 hash support is required for quorum device */
|
|
|
|
return;
|
|
|
|
}
|
2014-02-22 01:21:11 +04:00
|
|
|
bdrv_register(&bdrv_quorum);
|
|
|
|
}
|
|
|
|
|
|
|
|
block_init(bdrv_quorum_init);
|