block/backup: move to copy_bitmap with granularity

We are going to share this bitmap between backup and backup-top filter
driver, so let's share something more meaningful. It also simplifies
some calculations.

Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Message-id: 20190429090842.57910-3-vsementsov@virtuozzo.com
Signed-off-by: Max Reitz <mreitz@redhat.com>
This commit is contained in:
Vladimir Sementsov-Ogievskiy 2019-04-29 12:08:39 +03:00 committed by Max Reitz
parent c2da3413c0
commit a8389e315e

View File

@ -112,7 +112,8 @@ static int coroutine_fn backup_cow_with_bounce_buffer(BackupBlockJob *job,
int read_flags = is_write_notifier ? BDRV_REQ_NO_SERIALISING : 0; int read_flags = is_write_notifier ? BDRV_REQ_NO_SERIALISING : 0;
int write_flags = job->serialize_target_writes ? BDRV_REQ_SERIALISING : 0; int write_flags = job->serialize_target_writes ? BDRV_REQ_SERIALISING : 0;
hbitmap_reset(job->copy_bitmap, start / job->cluster_size, 1); assert(QEMU_IS_ALIGNED(start, job->cluster_size));
hbitmap_reset(job->copy_bitmap, start, job->cluster_size);
nbytes = MIN(job->cluster_size, job->len - start); nbytes = MIN(job->cluster_size, job->len - start);
if (!*bounce_buffer) { if (!*bounce_buffer) {
*bounce_buffer = blk_blockalign(blk, job->cluster_size); *bounce_buffer = blk_blockalign(blk, job->cluster_size);
@ -145,7 +146,7 @@ static int coroutine_fn backup_cow_with_bounce_buffer(BackupBlockJob *job,
return nbytes; return nbytes;
fail: fail:
hbitmap_set(job->copy_bitmap, start / job->cluster_size, 1); hbitmap_set(job->copy_bitmap, start, job->cluster_size);
return ret; return ret;
} }
@ -165,16 +166,15 @@ static int coroutine_fn backup_cow_with_offload(BackupBlockJob *job,
int write_flags = job->serialize_target_writes ? BDRV_REQ_SERIALISING : 0; int write_flags = job->serialize_target_writes ? BDRV_REQ_SERIALISING : 0;
assert(QEMU_IS_ALIGNED(job->copy_range_size, job->cluster_size)); assert(QEMU_IS_ALIGNED(job->copy_range_size, job->cluster_size));
assert(QEMU_IS_ALIGNED(start, job->cluster_size));
nbytes = MIN(job->copy_range_size, end - start); nbytes = MIN(job->copy_range_size, end - start);
nr_clusters = DIV_ROUND_UP(nbytes, job->cluster_size); nr_clusters = DIV_ROUND_UP(nbytes, job->cluster_size);
hbitmap_reset(job->copy_bitmap, start / job->cluster_size, hbitmap_reset(job->copy_bitmap, start, job->cluster_size * nr_clusters);
nr_clusters);
ret = blk_co_copy_range(blk, start, job->target, start, nbytes, ret = blk_co_copy_range(blk, start, job->target, start, nbytes,
read_flags, write_flags); read_flags, write_flags);
if (ret < 0) { if (ret < 0) {
trace_backup_do_cow_copy_range_fail(job, start, ret); trace_backup_do_cow_copy_range_fail(job, start, ret);
hbitmap_set(job->copy_bitmap, start / job->cluster_size, hbitmap_set(job->copy_bitmap, start, job->cluster_size * nr_clusters);
nr_clusters);
return ret; return ret;
} }
@ -202,7 +202,7 @@ static int coroutine_fn backup_do_cow(BackupBlockJob *job,
cow_request_begin(&cow_request, job, start, end); cow_request_begin(&cow_request, job, start, end);
while (start < end) { while (start < end) {
if (!hbitmap_get(job->copy_bitmap, start / job->cluster_size)) { if (!hbitmap_get(job->copy_bitmap, start)) {
trace_backup_do_cow_skip(job, start); trace_backup_do_cow_skip(job, start);
start += job->cluster_size; start += job->cluster_size;
continue; /* already copied */ continue; /* already copied */
@ -298,12 +298,16 @@ static void backup_clean(Job *job)
assert(s->target); assert(s->target);
blk_unref(s->target); blk_unref(s->target);
s->target = NULL; s->target = NULL;
if (s->copy_bitmap) {
hbitmap_free(s->copy_bitmap);
s->copy_bitmap = NULL;
}
} }
void backup_do_checkpoint(BlockJob *job, Error **errp) void backup_do_checkpoint(BlockJob *job, Error **errp)
{ {
BackupBlockJob *backup_job = container_of(job, BackupBlockJob, common); BackupBlockJob *backup_job = container_of(job, BackupBlockJob, common);
int64_t len;
assert(block_job_driver(job) == &backup_job_driver); assert(block_job_driver(job) == &backup_job_driver);
@ -313,8 +317,7 @@ void backup_do_checkpoint(BlockJob *job, Error **errp)
return; return;
} }
len = DIV_ROUND_UP(backup_job->len, backup_job->cluster_size); hbitmap_set(backup_job->copy_bitmap, 0, backup_job->len);
hbitmap_set(backup_job->copy_bitmap, 0, len);
} }
static void backup_drain(BlockJob *job) static void backup_drain(BlockJob *job)
@ -369,16 +372,16 @@ static int coroutine_fn backup_run_incremental(BackupBlockJob *job)
{ {
int ret; int ret;
bool error_is_read; bool error_is_read;
int64_t cluster; int64_t offset;
HBitmapIter hbi; HBitmapIter hbi;
hbitmap_iter_init(&hbi, job->copy_bitmap, 0); hbitmap_iter_init(&hbi, job->copy_bitmap, 0);
while ((cluster = hbitmap_iter_next(&hbi)) != -1) { while ((offset = hbitmap_iter_next(&hbi)) != -1) {
do { do {
if (yield_and_check(job)) { if (yield_and_check(job)) {
return 0; return 0;
} }
ret = backup_do_cow(job, cluster * job->cluster_size, ret = backup_do_cow(job, offset,
job->cluster_size, &error_is_read, false); job->cluster_size, &error_is_read, false);
if (ret < 0 && backup_error_action(job, error_is_read, -ret) == if (ret < 0 && backup_error_action(job, error_is_read, -ret) ==
BLOCK_ERROR_ACTION_REPORT) BLOCK_ERROR_ACTION_REPORT)
@ -400,12 +403,9 @@ static void backup_incremental_init_copy_bitmap(BackupBlockJob *job)
while (bdrv_dirty_bitmap_next_dirty_area(job->sync_bitmap, while (bdrv_dirty_bitmap_next_dirty_area(job->sync_bitmap,
&offset, &bytes)) &offset, &bytes))
{ {
uint64_t cluster = offset / job->cluster_size; hbitmap_set(job->copy_bitmap, offset, bytes);
uint64_t end_cluster = DIV_ROUND_UP(offset + bytes, job->cluster_size);
hbitmap_set(job->copy_bitmap, cluster, end_cluster - cluster); offset += bytes;
offset = end_cluster * job->cluster_size;
if (offset >= job->len) { if (offset >= job->len) {
break; break;
} }
@ -414,30 +414,27 @@ static void backup_incremental_init_copy_bitmap(BackupBlockJob *job)
/* TODO job_progress_set_remaining() would make more sense */ /* TODO job_progress_set_remaining() would make more sense */
job_progress_update(&job->common.job, job_progress_update(&job->common.job,
job->len - hbitmap_count(job->copy_bitmap) * job->cluster_size); job->len - hbitmap_count(job->copy_bitmap));
} }
static int coroutine_fn backup_run(Job *job, Error **errp) static int coroutine_fn backup_run(Job *job, Error **errp)
{ {
BackupBlockJob *s = container_of(job, BackupBlockJob, common.job); BackupBlockJob *s = container_of(job, BackupBlockJob, common.job);
BlockDriverState *bs = blk_bs(s->common.blk); BlockDriverState *bs = blk_bs(s->common.blk);
int64_t offset, nb_clusters; int64_t offset;
int ret = 0; int ret = 0;
QLIST_INIT(&s->inflight_reqs); QLIST_INIT(&s->inflight_reqs);
qemu_co_rwlock_init(&s->flush_rwlock); qemu_co_rwlock_init(&s->flush_rwlock);
nb_clusters = DIV_ROUND_UP(s->len, s->cluster_size);
job_progress_set_remaining(job, s->len); job_progress_set_remaining(job, s->len);
s->copy_bitmap = hbitmap_alloc(nb_clusters, 0);
if (s->sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) { if (s->sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) {
backup_incremental_init_copy_bitmap(s); backup_incremental_init_copy_bitmap(s);
} else { } else {
hbitmap_set(s->copy_bitmap, 0, nb_clusters); hbitmap_set(s->copy_bitmap, 0, s->len);
} }
s->before_write.notify = backup_before_write_notify; s->before_write.notify = backup_before_write_notify;
bdrv_add_before_write_notifier(bs, &s->before_write); bdrv_add_before_write_notifier(bs, &s->before_write);
@ -518,7 +515,6 @@ static int coroutine_fn backup_run(Job *job, Error **errp)
/* wait until pending backup_do_cow() calls have completed */ /* wait until pending backup_do_cow() calls have completed */
qemu_co_rwlock_wrlock(&s->flush_rwlock); qemu_co_rwlock_wrlock(&s->flush_rwlock);
qemu_co_rwlock_unlock(&s->flush_rwlock); qemu_co_rwlock_unlock(&s->flush_rwlock);
hbitmap_free(s->copy_bitmap);
return ret; return ret;
} }
@ -668,6 +664,8 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
} else { } else {
job->cluster_size = MAX(BACKUP_CLUSTER_SIZE_DEFAULT, bdi.cluster_size); job->cluster_size = MAX(BACKUP_CLUSTER_SIZE_DEFAULT, bdi.cluster_size);
} }
job->copy_bitmap = hbitmap_alloc(len, ctz32(job->cluster_size));
job->use_copy_range = true; job->use_copy_range = true;
job->copy_range_size = MIN_NON_ZERO(blk_get_max_transfer(job->common.blk), job->copy_range_size = MIN_NON_ZERO(blk_get_max_transfer(job->common.blk),
blk_get_max_transfer(job->target)); blk_get_max_transfer(job->target));