backup: add minimum cluster size to performance options
In the context of backup fleecing, discarding the source will not work when the fleecing image has a larger granularity than the one used for block-copy operations (can happen if the backup target has smaller cluster size), because cbw_co_pdiscard_snapshot() will align down the discard requests and thus effectively ignore then. To make @discard-source work in such a scenario, allow specifying the minimum cluster size used for block-copy operations and thus in particular also the granularity for discard requests to the source. Suggested-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru> Acked-by: Markus Armbruster <armbru@redhat.com> (QAPI schema) Signed-off-by: Fiona Ebner <f.ebner@proxmox.com> Message-Id: <20240711120915.310243-3-f.ebner@proxmox.com> [vsementsov: switch version to 9.2 in QAPI doc] Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru> Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru>
This commit is contained in:
parent
9484ad6c17
commit
6252deb244
@ -458,7 +458,7 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
|
||||
}
|
||||
|
||||
cbw = bdrv_cbw_append(bs, target, filter_node_name, discard_source,
|
||||
&bcs, errp);
|
||||
perf->min_cluster_size, &bcs, errp);
|
||||
if (!cbw) {
|
||||
goto error;
|
||||
}
|
||||
|
@ -548,6 +548,7 @@ BlockDriverState *bdrv_cbw_append(BlockDriverState *source,
|
||||
BlockDriverState *target,
|
||||
const char *filter_node_name,
|
||||
bool discard_source,
|
||||
uint64_t min_cluster_size,
|
||||
BlockCopyState **bcs,
|
||||
Error **errp)
|
||||
{
|
||||
@ -567,6 +568,14 @@ BlockDriverState *bdrv_cbw_append(BlockDriverState *source,
|
||||
qdict_put_str(opts, "file", bdrv_get_node_name(source));
|
||||
qdict_put_str(opts, "target", bdrv_get_node_name(target));
|
||||
|
||||
if (min_cluster_size > INT64_MAX) {
|
||||
error_setg(errp, "min-cluster-size too large: %" PRIu64 " > %" PRIi64,
|
||||
min_cluster_size, INT64_MAX);
|
||||
qobject_unref(opts);
|
||||
return NULL;
|
||||
}
|
||||
qdict_put_int(opts, "min-cluster-size", (int64_t)min_cluster_size);
|
||||
|
||||
top = bdrv_insert_node(source, opts, flags, errp);
|
||||
if (!top) {
|
||||
return NULL;
|
||||
|
@ -40,6 +40,7 @@ BlockDriverState *bdrv_cbw_append(BlockDriverState *source,
|
||||
BlockDriverState *target,
|
||||
const char *filter_node_name,
|
||||
bool discard_source,
|
||||
uint64_t min_cluster_size,
|
||||
BlockCopyState **bcs,
|
||||
Error **errp);
|
||||
void bdrv_cbw_drop(BlockDriverState *bs);
|
||||
|
@ -2655,6 +2655,9 @@ static BlockJob *do_backup_common(BackupCommon *backup,
|
||||
if (backup->x_perf->has_max_chunk) {
|
||||
perf.max_chunk = backup->x_perf->max_chunk;
|
||||
}
|
||||
if (backup->x_perf->has_min_cluster_size) {
|
||||
perf.min_cluster_size = backup->x_perf->min_cluster_size;
|
||||
}
|
||||
}
|
||||
|
||||
if ((backup->sync == MIRROR_SYNC_MODE_BITMAP) ||
|
||||
|
@ -1551,11 +1551,16 @@
|
||||
# it should not be less than job cluster size which is calculated
|
||||
# as maximum of target image cluster size and 64k. Default 0.
|
||||
#
|
||||
# @min-cluster-size: Minimum size of blocks used by copy-before-write
|
||||
# and background copy operations. Has to be a power of 2. No
|
||||
# effect if smaller than the maximum of the target's cluster size
|
||||
# and 64 KiB. Default 0. (Since 9.2)
|
||||
#
|
||||
# Since: 6.0
|
||||
##
|
||||
{ 'struct': 'BackupPerf',
|
||||
'data': { '*use-copy-range': 'bool',
|
||||
'*max-workers': 'int', '*max-chunk': 'int64' } }
|
||||
'data': { '*use-copy-range': 'bool', '*max-workers': 'int',
|
||||
'*max-chunk': 'int64', '*min-cluster-size': 'size' } }
|
||||
|
||||
##
|
||||
# @BackupCommon:
|
||||
|
Loading…
Reference in New Issue
Block a user