diff --git a/block/backup.c b/block/backup.c index 715e1d3be8..b26c22c4b8 100644 --- a/block/backup.c +++ b/block/backup.c @@ -202,22 +202,31 @@ static int coroutine_fn backup_do_cow(BackupBlockJob *job, cow_request_begin(&cow_request, job, start, end); while (start < end) { + int64_t dirty_end; + if (!hbitmap_get(job->copy_bitmap, start)) { trace_backup_do_cow_skip(job, start); start += job->cluster_size; continue; /* already copied */ } + dirty_end = hbitmap_next_zero(job->copy_bitmap, start, (end - start)); + if (dirty_end < 0) { + dirty_end = end; + } + trace_backup_do_cow_process(job, start); if (job->use_copy_range) { - ret = backup_cow_with_offload(job, start, end, is_write_notifier); + ret = backup_cow_with_offload(job, start, dirty_end, + is_write_notifier); if (ret < 0) { job->use_copy_range = false; } } if (!job->use_copy_range) { - ret = backup_cow_with_bounce_buffer(job, start, end, is_write_notifier, + ret = backup_cow_with_bounce_buffer(job, start, dirty_end, + is_write_notifier, error_is_read, &bounce_buffer); } if (ret < 0) { @@ -648,7 +657,7 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs, job->cluster_size = cluster_size; job->copy_bitmap = copy_bitmap; copy_bitmap = NULL; - job->use_copy_range = true; + job->use_copy_range = !compress; /* compression isn't supported for it */ job->copy_range_size = MIN_NON_ZERO(blk_get_max_transfer(job->common.blk), blk_get_max_transfer(job->target)); job->copy_range_size = MAX(job->cluster_size, diff --git a/block/mirror.c b/block/mirror.c index 8cb75fb409..9f5c59ece1 100644 --- a/block/mirror.c +++ b/block/mirror.c @@ -1481,6 +1481,15 @@ static void bdrv_mirror_top_child_perm(BlockDriverState *bs, BdrvChild *c, *nshared = BLK_PERM_ALL; } +static void bdrv_mirror_top_refresh_limits(BlockDriverState *bs, Error **errp) +{ + MirrorBDSOpaque *s = bs->opaque; + + if (s && s->job && s->job->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING) { + bs->bl.request_alignment = s->job->granularity; + } +} + /* Dummy node that provides consistent read to its users without requiring it * from its backing file and that allows writes on the backing file chain. */ static BlockDriver bdrv_mirror_top = { @@ -1493,6 +1502,7 @@ static BlockDriver bdrv_mirror_top = { .bdrv_co_block_status = bdrv_co_block_status_from_backing, .bdrv_refresh_filename = bdrv_mirror_top_refresh_filename, .bdrv_child_perm = bdrv_mirror_top_child_perm, + .bdrv_refresh_limits = bdrv_mirror_top_refresh_limits, }; static BlockJob *mirror_start_job( @@ -1637,6 +1647,25 @@ static BlockJob *mirror_start_job( s->should_complete = true; } + /* + * Must be called before we start tracking writes, but after + * + * ((MirrorBlockJob *) + * ((MirrorBDSOpaque *) + * mirror_top_bs->opaque + * )->job + * )->copy_mode + * + * has the correct value. + * (We start tracking writes as of the following + * bdrv_create_dirty_bitmap() call.) + */ + bdrv_refresh_limits(mirror_top_bs, &local_err); + if (local_err) { + error_propagate(errp, local_err); + goto fail; + } + s->dirty_bitmap = bdrv_create_dirty_bitmap(bs, granularity, NULL, errp); if (!s->dirty_bitmap) { goto fail; diff --git a/tests/qemu-iotests/056 b/tests/qemu-iotests/056 index f40fc11a09..e761e465ae 100755 --- a/tests/qemu-iotests/056 +++ b/tests/qemu-iotests/056 @@ -133,6 +133,7 @@ class BackupTest(iotests.QMPTestCase): self.vm = iotests.VM() self.test_img = img_create('test') self.dest_img = img_create('dest') + self.ref_img = img_create('ref') self.vm.add_drive(self.test_img) self.vm.launch() @@ -140,6 +141,7 @@ class BackupTest(iotests.QMPTestCase): self.vm.shutdown() try_remove(self.test_img) try_remove(self.dest_img) + try_remove(self.ref_img) def hmp_io_writes(self, drive, patterns): for pattern in patterns: @@ -177,6 +179,43 @@ class BackupTest(iotests.QMPTestCase): self.assert_qmp(event, 'data/error', qerror) return False + def test_overlapping_writes(self): + # Write something to back up + self.hmp_io_writes('drive0', [('42', '0M', '2M')]) + + # Create a reference backup + self.qmp_backup_and_wait(device='drive0', format=iotests.imgfmt, + sync='full', target=self.ref_img, + auto_dismiss=False) + res = self.vm.qmp('block-job-dismiss', id='drive0') + self.assert_qmp(res, 'return', {}) + + # Now to the test backup: We simulate the following guest + # writes: + # (1) [1M + 64k, 1M + 128k): Afterwards, everything in that + # area should be in the target image, and we must not copy + # it again (because the source image has changed now) + # (64k is the job's cluster size) + # (2) [1M, 2M): The backup job must not get overeager. It + # must copy [1M, 1M + 64k) and [1M + 128k, 2M) separately, + # but not the area in between. + + self.qmp_backup(device='drive0', format=iotests.imgfmt, sync='full', + target=self.dest_img, speed=1, auto_dismiss=False) + + self.hmp_io_writes('drive0', [('23', '%ik' % (1024 + 64), '64k'), + ('66', '1M', '1M')]) + + # Let the job complete + res = self.vm.qmp('block-job-set-speed', device='drive0', speed=0) + self.assert_qmp(res, 'return', {}) + self.qmp_backup_wait('drive0') + res = self.vm.qmp('block-job-dismiss', id='drive0') + self.assert_qmp(res, 'return', {}) + + self.assertTrue(iotests.compare_images(self.ref_img, self.dest_img), + 'target image does not match reference image') + def test_dismiss_false(self): res = self.vm.qmp('query-block-jobs') self.assert_qmp(res, 'return', []) diff --git a/tests/qemu-iotests/056.out b/tests/qemu-iotests/056.out index dae404e278..36376bed87 100644 --- a/tests/qemu-iotests/056.out +++ b/tests/qemu-iotests/056.out @@ -1,5 +1,5 @@ -......... +.......... ---------------------------------------------------------------------- -Ran 9 tests +Ran 10 tests OK diff --git a/tests/qemu-iotests/124 b/tests/qemu-iotests/124 index 80b356f7bb..3440f54781 100755 --- a/tests/qemu-iotests/124 +++ b/tests/qemu-iotests/124 @@ -212,25 +212,28 @@ class TestIncrementalBackupBase(iotests.QMPTestCase): return bitmap - def prepare_backup(self, bitmap=None, parent=None): + def prepare_backup(self, bitmap=None, parent=None, **kwargs): if bitmap is None: bitmap = self.bitmaps[-1] if parent is None: parent, _ = bitmap.last_target() target, _ = bitmap.new_target() - self.img_create(target, bitmap.drive['fmt'], parent=parent) + self.img_create(target, bitmap.drive['fmt'], parent=parent, + **kwargs) return target def create_incremental(self, bitmap=None, parent=None, - parentFormat=None, validate=True): + parentFormat=None, validate=True, + target=None): if bitmap is None: bitmap = self.bitmaps[-1] if parent is None: parent, _ = bitmap.last_target() - target = self.prepare_backup(bitmap, parent) + if target is None: + target = self.prepare_backup(bitmap, parent) res = self.do_qmp_backup(job_id=bitmap.drive['id'], device=bitmap.drive['id'], sync='incremental', bitmap=bitmap.name, @@ -572,6 +575,33 @@ class TestIncrementalBackup(TestIncrementalBackupBase): 'bitmap0', self.drives[0], granularity=64000) + def test_growing_before_backup(self): + ''' + Test: Add a bitmap, truncate the image, write past the old + end, do a backup. + + Incremental backup should not ignore dirty bits past the old + image end. + ''' + self.assert_no_active_block_jobs() + + self.create_anchor_backup() + + self.add_bitmap('bitmap0', self.drives[0]) + + res = self.vm.qmp('block_resize', device=self.drives[0]['id'], + size=(65 * 1048576)) + self.assert_qmp(res, 'return', {}) + + # Dirty the image past the old end + self.vm.hmp_qemu_io(self.drives[0]['id'], 'write 64M 64k') + + target = self.prepare_backup(size='65M') + self.create_incremental(target=target) + + self.vm.shutdown() + self.check_backups() + class TestIncrementalBackupBlkdebug(TestIncrementalBackupBase): '''Incremental backup tests that utilize a BlkDebug filter on drive0.''' diff --git a/tests/qemu-iotests/124.out b/tests/qemu-iotests/124.out index 281b69efea..fa16b5ccef 100644 --- a/tests/qemu-iotests/124.out +++ b/tests/qemu-iotests/124.out @@ -1,5 +1,5 @@ -............ +............. ---------------------------------------------------------------------- -Ran 12 tests +Ran 13 tests OK diff --git a/tests/qemu-iotests/151 b/tests/qemu-iotests/151 index 1bb74d67c4..ad7359fc8d 100755 --- a/tests/qemu-iotests/151 +++ b/tests/qemu-iotests/151 @@ -114,6 +114,31 @@ class TestActiveMirror(iotests.QMPTestCase): def testActiveIOFlushed(self): self.doActiveIO(True) + def testUnalignedActiveIO(self): + # Fill the source image + result = self.vm.hmp_qemu_io('source', 'write -P 1 0 2M') + + # Start the block job (very slowly) + result = self.vm.qmp('blockdev-mirror', + job_id='mirror', + filter_node_name='mirror-node', + device='source-node', + target='target-node', + sync='full', + copy_mode='write-blocking', + buf_size=(1048576 // 4), + speed=1) + self.assert_qmp(result, 'return', {}) + + # Start an unaligned request to a dirty area + result = self.vm.hmp_qemu_io('source', 'write -P 2 %i 1' % (1048576 + 42)) + + # Let the job finish + result = self.vm.qmp('block-job-set-speed', device='mirror', speed=0) + self.assert_qmp(result, 'return', {}) + self.complete_and_wait(drive='mirror') + + self.potential_writes_in_flight = False if __name__ == '__main__': diff --git a/tests/qemu-iotests/151.out b/tests/qemu-iotests/151.out index fbc63e62f8..8d7e996700 100644 --- a/tests/qemu-iotests/151.out +++ b/tests/qemu-iotests/151.out @@ -1,5 +1,5 @@ -.. +... ---------------------------------------------------------------------- -Ran 2 tests +Ran 3 tests OK diff --git a/util/hbitmap.c b/util/hbitmap.c index 7905212a8b..bcc0acdc6a 100644 --- a/util/hbitmap.c +++ b/util/hbitmap.c @@ -53,7 +53,9 @@ */ struct HBitmap { - /* Size of the bitmap, as requested in hbitmap_alloc. */ + /* + * Size of the bitmap, as requested in hbitmap_alloc or in hbitmap_truncate. + */ uint64_t orig_size; /* Number of total bits in the bottom level. */ @@ -732,6 +734,8 @@ void hbitmap_truncate(HBitmap *hb, uint64_t size) uint64_t num_elements = size; uint64_t old; + hb->orig_size = size; + /* Size comes in as logical elements, adjust for granularity. */ size = (size + (1ULL << hb->granularity) - 1) >> hb->granularity; assert(size <= ((uint64_t)1 << HBITMAP_LOG_MAX_SIZE));