2020-01-30 19:32:23 +03:00
|
|
|
#!/usr/bin/env python3
|
2021-01-16 16:44:19 +03:00
|
|
|
# group: rw
|
2019-07-29 23:35:54 +03:00
|
|
|
#
|
|
|
|
# Test bitmap-sync backups (incremental, differential, and partials)
|
|
|
|
#
|
|
|
|
# Copyright (c) 2019 John Snow for Red Hat, Inc.
|
|
|
|
#
|
|
|
|
# This program is free software; you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License as published by
|
|
|
|
# the Free Software Foundation; either version 2 of the License, or
|
|
|
|
# (at your option) any later version.
|
|
|
|
#
|
|
|
|
# This program is distributed in the hope that it will be useful,
|
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
# GNU General Public License for more details.
|
|
|
|
#
|
|
|
|
# You should have received a copy of the GNU General Public License
|
|
|
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
#
|
|
|
|
# owner=jsnow@redhat.com
|
|
|
|
|
|
|
|
import math
|
|
|
|
import os
|
|
|
|
|
|
|
|
import iotests
|
|
|
|
from iotests import log, qemu_img
|
|
|
|
|
|
|
|
SIZE = 64 * 1024 * 1024
|
|
|
|
GRANULARITY = 64 * 1024
|
|
|
|
|
2019-07-29 23:35:54 +03:00
|
|
|
|
|
|
|
class Pattern:
|
|
|
|
def __init__(self, byte, offset, size=GRANULARITY):
|
|
|
|
self.byte = byte
|
|
|
|
self.offset = offset
|
|
|
|
self.size = size
|
|
|
|
|
|
|
|
def bits(self, granularity):
|
|
|
|
lower = self.offset // granularity
|
|
|
|
upper = (self.offset + self.size - 1) // granularity
|
|
|
|
return set(range(lower, upper + 1))
|
|
|
|
|
2019-07-29 23:35:54 +03:00
|
|
|
|
|
|
|
class PatternGroup:
|
|
|
|
"""Grouping of Pattern objects. Initialize with an iterable of Patterns."""
|
|
|
|
def __init__(self, patterns):
|
|
|
|
self.patterns = patterns
|
|
|
|
|
|
|
|
def bits(self, granularity):
|
|
|
|
"""Calculate the unique bits dirtied by this pattern grouping"""
|
|
|
|
res = set()
|
|
|
|
for pattern in self.patterns:
|
2019-07-29 23:35:54 +03:00
|
|
|
res |= pattern.bits(granularity)
|
2019-07-29 23:35:54 +03:00
|
|
|
return res
|
|
|
|
|
2019-07-29 23:35:54 +03:00
|
|
|
|
2019-07-29 23:35:54 +03:00
|
|
|
GROUPS = [
|
|
|
|
PatternGroup([
|
|
|
|
# Batch 0: 4 clusters
|
2019-07-29 23:35:54 +03:00
|
|
|
Pattern('0x49', 0x0000000),
|
|
|
|
Pattern('0x6c', 0x0100000), # 1M
|
|
|
|
Pattern('0x6f', 0x2000000), # 32M
|
|
|
|
Pattern('0x76', 0x3ff0000)]), # 64M - 64K
|
2019-07-29 23:35:54 +03:00
|
|
|
PatternGroup([
|
|
|
|
# Batch 1: 6 clusters (3 new)
|
2019-07-29 23:35:54 +03:00
|
|
|
Pattern('0x65', 0x0000000), # Full overwrite
|
|
|
|
Pattern('0x77', 0x00f8000), # Partial-left (1M-32K)
|
|
|
|
Pattern('0x72', 0x2008000), # Partial-right (32M+32K)
|
|
|
|
Pattern('0x69', 0x3fe0000)]), # Adjacent-left (64M - 128K)
|
2019-07-29 23:35:54 +03:00
|
|
|
PatternGroup([
|
|
|
|
# Batch 2: 7 clusters (3 new)
|
2019-07-29 23:35:54 +03:00
|
|
|
Pattern('0x74', 0x0010000), # Adjacent-right
|
|
|
|
Pattern('0x69', 0x00e8000), # Partial-left (1M-96K)
|
|
|
|
Pattern('0x6e', 0x2018000), # Partial-right (32M+96K)
|
|
|
|
Pattern('0x67', 0x3fe0000,
|
|
|
|
2*GRANULARITY)]), # Overwrite [(64M-128K)-64M)
|
2019-07-29 23:35:54 +03:00
|
|
|
PatternGroup([
|
|
|
|
# Batch 3: 8 clusters (5 new)
|
|
|
|
# Carefully chosen such that nothing re-dirties the one cluster
|
|
|
|
# that copies out successfully before failure in Group #1.
|
2019-07-29 23:35:54 +03:00
|
|
|
Pattern('0xaa', 0x0010000,
|
|
|
|
3*GRANULARITY), # Overwrite and 2x Adjacent-right
|
|
|
|
Pattern('0xbb', 0x00d8000), # Partial-left (1M-160K)
|
|
|
|
Pattern('0xcc', 0x2028000), # Partial-right (32M+160K)
|
|
|
|
Pattern('0xdd', 0x3fc0000)]), # New; leaving a gap to the right
|
2019-07-29 23:35:54 +03:00
|
|
|
]
|
|
|
|
|
2019-07-29 23:35:54 +03:00
|
|
|
|
|
|
|
class EmulatedBitmap:
|
|
|
|
def __init__(self, granularity=GRANULARITY):
|
|
|
|
self._bits = set()
|
|
|
|
self.granularity = granularity
|
|
|
|
|
|
|
|
def dirty_bits(self, bits):
|
|
|
|
self._bits |= set(bits)
|
|
|
|
|
|
|
|
def dirty_group(self, n):
|
|
|
|
self.dirty_bits(GROUPS[n].bits(self.granularity))
|
|
|
|
|
|
|
|
def clear(self):
|
|
|
|
self._bits = set()
|
|
|
|
|
|
|
|
def clear_bits(self, bits):
|
|
|
|
self._bits -= set(bits)
|
|
|
|
|
|
|
|
def clear_bit(self, bit):
|
|
|
|
self.clear_bits({bit})
|
|
|
|
|
|
|
|
def clear_group(self, n):
|
|
|
|
self.clear_bits(GROUPS[n].bits(self.granularity))
|
|
|
|
|
|
|
|
@property
|
|
|
|
def first_bit(self):
|
|
|
|
return sorted(self.bits)[0]
|
|
|
|
|
|
|
|
@property
|
|
|
|
def bits(self):
|
|
|
|
return self._bits
|
|
|
|
|
|
|
|
@property
|
|
|
|
def count(self):
|
|
|
|
return len(self.bits)
|
|
|
|
|
|
|
|
def compare(self, qmp_bitmap):
|
|
|
|
"""
|
|
|
|
Print a nice human-readable message checking that a bitmap as reported
|
|
|
|
by the QMP interface has as many bits set as we expect it to.
|
|
|
|
"""
|
|
|
|
|
|
|
|
name = qmp_bitmap.get('name', '(anonymous)')
|
|
|
|
log("= Checking Bitmap {:s} =".format(name))
|
|
|
|
|
|
|
|
want = self.count
|
|
|
|
have = qmp_bitmap['count'] // qmp_bitmap['granularity']
|
|
|
|
|
|
|
|
log("expecting {:d} dirty sectors; have {:d}. {:s}".format(
|
|
|
|
want, have, "OK!" if want == have else "ERROR!"))
|
|
|
|
log('')
|
|
|
|
|
|
|
|
|
2019-07-29 23:35:54 +03:00
|
|
|
class Drive:
|
|
|
|
"""Represents, vaguely, a drive attached to a VM.
|
|
|
|
Includes format, graph, and device information."""
|
|
|
|
|
|
|
|
def __init__(self, path, vm=None):
|
|
|
|
self.path = path
|
|
|
|
self.vm = vm
|
|
|
|
self.fmt = None
|
|
|
|
self.size = None
|
|
|
|
self.node = None
|
|
|
|
|
|
|
|
def img_create(self, fmt, size):
|
|
|
|
self.fmt = fmt
|
|
|
|
self.size = size
|
|
|
|
iotests.qemu_img_create('-f', self.fmt, self.path, str(self.size))
|
|
|
|
|
|
|
|
def create_target(self, name, fmt, size):
|
|
|
|
basename = os.path.basename(self.path)
|
|
|
|
file_node_name = "file_{}".format(basename)
|
|
|
|
vm = self.vm
|
|
|
|
|
|
|
|
log(vm.command('blockdev-create', job_id='bdc-file-job',
|
|
|
|
options={
|
|
|
|
'driver': 'file',
|
|
|
|
'filename': self.path,
|
|
|
|
'size': 0,
|
|
|
|
}))
|
|
|
|
vm.run_job('bdc-file-job')
|
|
|
|
log(vm.command('blockdev-add', driver='file',
|
|
|
|
node_name=file_node_name, filename=self.path))
|
|
|
|
|
|
|
|
log(vm.command('blockdev-create', job_id='bdc-fmt-job',
|
|
|
|
options={
|
|
|
|
'driver': fmt,
|
|
|
|
'file': file_node_name,
|
|
|
|
'size': size,
|
|
|
|
}))
|
|
|
|
vm.run_job('bdc-fmt-job')
|
|
|
|
log(vm.command('blockdev-add', driver=fmt,
|
|
|
|
node_name=name,
|
|
|
|
file=file_node_name))
|
|
|
|
self.fmt = fmt
|
|
|
|
self.size = size
|
|
|
|
self.node = name
|
|
|
|
|
2019-07-29 23:35:55 +03:00
|
|
|
def blockdev_backup(vm, device, target, sync, **kwargs):
|
|
|
|
# Strip any arguments explicitly nulled by the caller:
|
|
|
|
kwargs = {key: val for key, val in kwargs.items() if val is not None}
|
|
|
|
result = vm.qmp_log('blockdev-backup',
|
|
|
|
device=device,
|
|
|
|
target=target,
|
|
|
|
sync=sync,
|
block/backup: use backup-top instead of write notifiers
Drop write notifiers and use filter node instead.
= Changes =
1. Add filter-node-name argument for backup qmp api. We have to do it
in this commit, as 257 needs to be fixed.
2. There are no more write notifiers here, so is_write_notifier
parameter is dropped from block-copy paths.
3. To sync with in-flight requests at job finish we now have drained
removing of the filter, we don't need rw-lock.
4. Block-copy is now using BdrvChildren instead of BlockBackends
5. As backup-top owns these children, we also move block-copy state
into backup-top's ownership.
= Iotest changes =
56: op-blocker doesn't shoot now, as we set it on source, but then
check on filter, when trying to start second backup.
To keep the test we instead can catch another collision: both jobs will
get 'drive0' job-id, as job-id parameter is unspecified. To prevent
interleaving with file-posix locks (as they are dependent on config)
let's use another target for second backup.
Also, it's obvious now that we'd like to drop this op-blocker at all
and add a test-case for two backups from one node (to different
destinations) actually works. But not in these series.
141: Output changed: prepatch, "Node is in use" comes from bdrv_has_blk
check inside qmp_blockdev_del. But we've dropped block-copy blk
objects, so no more blk objects on source bs (job blk is on backup-top
filter bs). New message is from op-blocker, which is the next check in
qmp_blockdev_add.
257: The test wants to emulate guest write during backup. They should
go to filter node, not to original source node, of course. Therefore we
need to specify filter node name and use it.
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Message-id: 20191001131409.14202-6-vsementsov@virtuozzo.com
Reviewed-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Max Reitz <mreitz@redhat.com>
2019-10-01 16:14:09 +03:00
|
|
|
filter_node_name='backup-top',
|
2021-01-17 00:46:56 +03:00
|
|
|
x_perf={'max-workers': 1},
|
2019-07-29 23:35:55 +03:00
|
|
|
**kwargs)
|
|
|
|
return result
|
|
|
|
|
|
|
|
def blockdev_backup_mktarget(drive, target_id, filepath, sync, **kwargs):
|
|
|
|
target_drive = Drive(filepath, vm=drive.vm)
|
|
|
|
target_drive.create_target(target_id, drive.fmt, drive.size)
|
2019-09-20 17:20:51 +03:00
|
|
|
blockdev_backup(drive.vm, drive.node, target_id, sync, **kwargs)
|
2019-07-29 23:35:55 +03:00
|
|
|
|
2019-07-29 23:35:54 +03:00
|
|
|
def reference_backup(drive, n, filepath):
|
|
|
|
log("--- Reference Backup #{:d} ---\n".format(n))
|
|
|
|
target_id = "ref_target_{:d}".format(n)
|
|
|
|
job_id = "ref_backup_{:d}".format(n)
|
2019-07-29 23:35:55 +03:00
|
|
|
blockdev_backup_mktarget(drive, target_id, filepath, "full",
|
|
|
|
job_id=job_id)
|
2019-07-29 23:35:54 +03:00
|
|
|
drive.vm.run_job(job_id, auto_dismiss=True)
|
|
|
|
log('')
|
|
|
|
|
2019-07-29 23:35:55 +03:00
|
|
|
def backup(drive, n, filepath, sync, **kwargs):
|
|
|
|
log("--- Test Backup #{:d} ---\n".format(n))
|
|
|
|
target_id = "backup_target_{:d}".format(n)
|
|
|
|
job_id = "backup_{:d}".format(n)
|
|
|
|
kwargs.setdefault('auto-finalize', False)
|
|
|
|
blockdev_backup_mktarget(drive, target_id, filepath, sync,
|
|
|
|
job_id=job_id, **kwargs)
|
2019-07-29 23:35:54 +03:00
|
|
|
return job_id
|
|
|
|
|
block/backup: use backup-top instead of write notifiers
Drop write notifiers and use filter node instead.
= Changes =
1. Add filter-node-name argument for backup qmp api. We have to do it
in this commit, as 257 needs to be fixed.
2. There are no more write notifiers here, so is_write_notifier
parameter is dropped from block-copy paths.
3. To sync with in-flight requests at job finish we now have drained
removing of the filter, we don't need rw-lock.
4. Block-copy is now using BdrvChildren instead of BlockBackends
5. As backup-top owns these children, we also move block-copy state
into backup-top's ownership.
= Iotest changes =
56: op-blocker doesn't shoot now, as we set it on source, but then
check on filter, when trying to start second backup.
To keep the test we instead can catch another collision: both jobs will
get 'drive0' job-id, as job-id parameter is unspecified. To prevent
interleaving with file-posix locks (as they are dependent on config)
let's use another target for second backup.
Also, it's obvious now that we'd like to drop this op-blocker at all
and add a test-case for two backups from one node (to different
destinations) actually works. But not in these series.
141: Output changed: prepatch, "Node is in use" comes from bdrv_has_blk
check inside qmp_blockdev_del. But we've dropped block-copy blk
objects, so no more blk objects on source bs (job blk is on backup-top
filter bs). New message is from op-blocker, which is the next check in
qmp_blockdev_add.
257: The test wants to emulate guest write during backup. They should
go to filter node, not to original source node, of course. Therefore we
need to specify filter node name and use it.
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Message-id: 20191001131409.14202-6-vsementsov@virtuozzo.com
Reviewed-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Max Reitz <mreitz@redhat.com>
2019-10-01 16:14:09 +03:00
|
|
|
def perform_writes(drive, n, filter_node_name=None):
|
2019-07-29 23:35:54 +03:00
|
|
|
log("--- Write #{:d} ---\n".format(n))
|
|
|
|
for pattern in GROUPS[n].patterns:
|
|
|
|
cmd = "write -P{:s} 0x{:07x} 0x{:x}".format(
|
|
|
|
pattern.byte,
|
|
|
|
pattern.offset,
|
|
|
|
pattern.size)
|
|
|
|
log(cmd)
|
block/backup: use backup-top instead of write notifiers
Drop write notifiers and use filter node instead.
= Changes =
1. Add filter-node-name argument for backup qmp api. We have to do it
in this commit, as 257 needs to be fixed.
2. There are no more write notifiers here, so is_write_notifier
parameter is dropped from block-copy paths.
3. To sync with in-flight requests at job finish we now have drained
removing of the filter, we don't need rw-lock.
4. Block-copy is now using BdrvChildren instead of BlockBackends
5. As backup-top owns these children, we also move block-copy state
into backup-top's ownership.
= Iotest changes =
56: op-blocker doesn't shoot now, as we set it on source, but then
check on filter, when trying to start second backup.
To keep the test we instead can catch another collision: both jobs will
get 'drive0' job-id, as job-id parameter is unspecified. To prevent
interleaving with file-posix locks (as they are dependent on config)
let's use another target for second backup.
Also, it's obvious now that we'd like to drop this op-blocker at all
and add a test-case for two backups from one node (to different
destinations) actually works. But not in these series.
141: Output changed: prepatch, "Node is in use" comes from bdrv_has_blk
check inside qmp_blockdev_del. But we've dropped block-copy blk
objects, so no more blk objects on source bs (job blk is on backup-top
filter bs). New message is from op-blocker, which is the next check in
qmp_blockdev_add.
257: The test wants to emulate guest write during backup. They should
go to filter node, not to original source node, of course. Therefore we
need to specify filter node name and use it.
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Message-id: 20191001131409.14202-6-vsementsov@virtuozzo.com
Reviewed-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Max Reitz <mreitz@redhat.com>
2019-10-01 16:14:09 +03:00
|
|
|
log(drive.vm.hmp_qemu_io(filter_node_name or drive.node, cmd))
|
2019-09-20 17:20:50 +03:00
|
|
|
bitmaps = drive.vm.query_bitmaps()
|
|
|
|
log({'bitmaps': bitmaps}, indent=2)
|
2019-07-29 23:35:54 +03:00
|
|
|
log('')
|
|
|
|
return bitmaps
|
|
|
|
|
|
|
|
|
|
|
|
def compare_images(image, reference, baseimg=None, expected_match=True):
|
|
|
|
"""
|
|
|
|
Print a nice human-readable message comparing these images.
|
|
|
|
"""
|
|
|
|
expected_ret = 0 if expected_match else 1
|
|
|
|
if baseimg:
|
iotests: Specify explicit backing format where sensible
There are many existing qcow2 images that specify a backing file but
no format. This has been the source of CVEs in the past, but has
become more prominent of a problem now that libvirt has switched to
-blockdev. With older -drive, at least the probing was always done by
qemu (so the only risk of a changed format between successive boots of
a guest was if qemu was upgraded and probed differently). But with
newer -blockdev, libvirt must specify a format; if libvirt guesses raw
where the image was formatted, this results in data corruption visible
to the guest; conversely, if libvirt guesses qcow2 where qemu was
using raw, this can result in potential security holes, so modern
libvirt instead refuses to use images without explicit backing format.
The change in libvirt to reject images without explicit backing format
has pointed out that a number of tools have been far too reliant on
probing in the past. It's time to set a better example in our own
iotests of properly setting this parameter.
iotest calls to create, rebase, and convert are all impacted to some
degree. It's a bit annoying that we are inconsistent on command line
- while all of those accept -o backing_file=...,backing_fmt=..., the
shortcuts are different: create and rebase have -b and -F, while
convert has -B but no -F. (amend has no shortcuts, but the previous
patch just deprecated the use of amend to change backing chains).
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <20200706203954.341758-9-eblake@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2020-07-06 23:39:52 +03:00
|
|
|
assert qemu_img("rebase", "-u", "-b", baseimg, '-F', iotests.imgfmt,
|
|
|
|
image) == 0
|
2019-07-29 23:35:54 +03:00
|
|
|
ret = qemu_img("compare", image, reference)
|
|
|
|
log('qemu_img compare "{:s}" "{:s}" ==> {:s}, {:s}'.format(
|
|
|
|
image, reference,
|
|
|
|
"Identical" if ret == 0 else "Mismatch",
|
|
|
|
"OK!" if ret == expected_ret else "ERROR!"),
|
|
|
|
filters=[iotests.filter_testfiles])
|
|
|
|
|
2019-07-29 23:35:55 +03:00
|
|
|
def test_bitmap_sync(bsync_mode, msync_mode='bitmap', failure=None):
|
2019-07-29 23:35:54 +03:00
|
|
|
"""
|
|
|
|
Test bitmap backup routines.
|
|
|
|
|
|
|
|
:param bsync_mode: Is the Bitmap Sync mode, and can be any of:
|
|
|
|
- on-success: This is the "incremental" style mode. Bitmaps are
|
|
|
|
synchronized to what was copied out only on success.
|
|
|
|
(Partial images must be discarded.)
|
|
|
|
- never: This is the "differential" style mode.
|
|
|
|
Bitmaps are never synchronized.
|
|
|
|
- always: This is a "best effort" style mode.
|
|
|
|
Bitmaps are always synchronized, regardless of failure.
|
|
|
|
(Partial images must be kept.)
|
|
|
|
|
2019-07-29 23:35:55 +03:00
|
|
|
:param msync_mode: The mirror sync mode to use for the first backup.
|
|
|
|
Can be any one of:
|
|
|
|
- bitmap: Backups based on bitmap manifest.
|
|
|
|
- full: Full backups.
|
|
|
|
- top: Full backups of the top layer only.
|
|
|
|
|
2019-07-29 23:35:54 +03:00
|
|
|
:param failure: Is the (optional) failure mode, and can be any of:
|
|
|
|
- None: No failure. Test the normative path. Default.
|
|
|
|
- simulated: Cancel the job right before it completes.
|
|
|
|
This also tests writes "during" the job.
|
|
|
|
- intermediate: This tests a job that fails mid-process and produces
|
|
|
|
an incomplete backup. Testing limitations prevent
|
|
|
|
testing competing writes.
|
|
|
|
"""
|
2020-08-29 02:21:51 +03:00
|
|
|
with iotests.FilePath(
|
2020-08-29 02:21:50 +03:00
|
|
|
'img', 'bsync1', 'bsync2', 'fbackup0', 'fbackup1', 'fbackup2') as \
|
|
|
|
(img_path, bsync1, bsync2, fbackup0, fbackup1, fbackup2), \
|
2019-07-29 23:35:54 +03:00
|
|
|
iotests.VM() as vm:
|
|
|
|
|
2019-07-29 23:35:55 +03:00
|
|
|
mode = "Mode {:s}; Bitmap Sync {:s}".format(msync_mode, bsync_mode)
|
2019-07-29 23:35:54 +03:00
|
|
|
preposition = "with" if failure else "without"
|
|
|
|
cond = "{:s} {:s}".format(preposition,
|
|
|
|
"{:s} failure".format(failure) if failure
|
|
|
|
else "failure")
|
|
|
|
log("\n=== {:s} {:s} ===\n".format(mode, cond))
|
|
|
|
|
|
|
|
log('--- Preparing image & VM ---\n')
|
|
|
|
drive0 = Drive(img_path, vm=vm)
|
|
|
|
drive0.img_create(iotests.imgfmt, SIZE)
|
2021-03-23 19:53:05 +03:00
|
|
|
vm.add_device("{},id=scsi0".format('virtio-scsi'))
|
2019-07-29 23:35:54 +03:00
|
|
|
vm.launch()
|
|
|
|
|
|
|
|
file_config = {
|
|
|
|
'driver': 'file',
|
|
|
|
'filename': drive0.path
|
|
|
|
}
|
|
|
|
|
|
|
|
if failure == 'intermediate':
|
|
|
|
file_config = {
|
|
|
|
'driver': 'blkdebug',
|
|
|
|
'image': file_config,
|
|
|
|
'set-state': [{
|
|
|
|
'event': 'flush_to_disk',
|
|
|
|
'state': 1,
|
|
|
|
'new_state': 2
|
|
|
|
}, {
|
|
|
|
'event': 'read_aio',
|
|
|
|
'state': 2,
|
|
|
|
'new_state': 3
|
|
|
|
}],
|
|
|
|
'inject-error': [{
|
|
|
|
'event': 'read_aio',
|
|
|
|
'errno': 5,
|
|
|
|
'state': 3,
|
|
|
|
'immediately': False,
|
|
|
|
'once': True
|
|
|
|
}]
|
|
|
|
}
|
|
|
|
|
2019-09-20 17:20:51 +03:00
|
|
|
drive0.node = 'drive0'
|
2019-07-29 23:35:54 +03:00
|
|
|
vm.qmp_log('blockdev-add',
|
|
|
|
filters=[iotests.filter_qmp_testfiles],
|
2019-09-20 17:20:51 +03:00
|
|
|
node_name=drive0.node,
|
2019-07-29 23:35:54 +03:00
|
|
|
driver=drive0.fmt,
|
|
|
|
file=file_config)
|
|
|
|
log('')
|
|
|
|
|
|
|
|
# 0 - Writes and Reference Backup
|
|
|
|
perform_writes(drive0, 0)
|
|
|
|
reference_backup(drive0, 0, fbackup0)
|
|
|
|
log('--- Add Bitmap ---\n')
|
2019-09-20 17:20:51 +03:00
|
|
|
vm.qmp_log("block-dirty-bitmap-add", node=drive0.node,
|
2019-07-29 23:35:54 +03:00
|
|
|
name="bitmap0", granularity=GRANULARITY)
|
|
|
|
log('')
|
2019-07-29 23:35:54 +03:00
|
|
|
ebitmap = EmulatedBitmap()
|
2019-07-29 23:35:54 +03:00
|
|
|
|
|
|
|
# 1 - Writes and Reference Backup
|
|
|
|
bitmaps = perform_writes(drive0, 1)
|
2019-07-29 23:35:54 +03:00
|
|
|
ebitmap.dirty_group(1)
|
2019-09-20 17:20:50 +03:00
|
|
|
bitmap = vm.get_bitmap(drive0.node, 'bitmap0', bitmaps=bitmaps)
|
2019-07-29 23:35:54 +03:00
|
|
|
ebitmap.compare(bitmap)
|
2019-07-29 23:35:54 +03:00
|
|
|
reference_backup(drive0, 1, fbackup1)
|
|
|
|
|
2019-07-29 23:35:55 +03:00
|
|
|
# 1 - Test Backup (w/ Optional induced failure)
|
2019-07-29 23:35:54 +03:00
|
|
|
if failure == 'intermediate':
|
|
|
|
# Activate blkdebug induced failure for second-to-next read
|
2019-09-20 17:20:51 +03:00
|
|
|
log(vm.hmp_qemu_io(drive0.node, 'flush'))
|
2019-07-29 23:35:54 +03:00
|
|
|
log('')
|
2019-07-29 23:35:55 +03:00
|
|
|
job = backup(drive0, 1, bsync1, msync_mode,
|
|
|
|
bitmap="bitmap0", bitmap_mode=bsync_mode)
|
2019-07-29 23:35:54 +03:00
|
|
|
|
|
|
|
def _callback():
|
|
|
|
"""Issue writes while the job is open to test bitmap divergence."""
|
|
|
|
# Note: when `failure` is 'intermediate', this isn't called.
|
|
|
|
log('')
|
block/backup: use backup-top instead of write notifiers
Drop write notifiers and use filter node instead.
= Changes =
1. Add filter-node-name argument for backup qmp api. We have to do it
in this commit, as 257 needs to be fixed.
2. There are no more write notifiers here, so is_write_notifier
parameter is dropped from block-copy paths.
3. To sync with in-flight requests at job finish we now have drained
removing of the filter, we don't need rw-lock.
4. Block-copy is now using BdrvChildren instead of BlockBackends
5. As backup-top owns these children, we also move block-copy state
into backup-top's ownership.
= Iotest changes =
56: op-blocker doesn't shoot now, as we set it on source, but then
check on filter, when trying to start second backup.
To keep the test we instead can catch another collision: both jobs will
get 'drive0' job-id, as job-id parameter is unspecified. To prevent
interleaving with file-posix locks (as they are dependent on config)
let's use another target for second backup.
Also, it's obvious now that we'd like to drop this op-blocker at all
and add a test-case for two backups from one node (to different
destinations) actually works. But not in these series.
141: Output changed: prepatch, "Node is in use" comes from bdrv_has_blk
check inside qmp_blockdev_del. But we've dropped block-copy blk
objects, so no more blk objects on source bs (job blk is on backup-top
filter bs). New message is from op-blocker, which is the next check in
qmp_blockdev_add.
257: The test wants to emulate guest write during backup. They should
go to filter node, not to original source node, of course. Therefore we
need to specify filter node name and use it.
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Message-id: 20191001131409.14202-6-vsementsov@virtuozzo.com
Reviewed-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Max Reitz <mreitz@redhat.com>
2019-10-01 16:14:09 +03:00
|
|
|
bitmaps = perform_writes(drive0, 2, filter_node_name='backup-top')
|
2019-07-29 23:35:54 +03:00
|
|
|
# Named bitmap (static, should be unchanged)
|
2019-09-20 17:20:50 +03:00
|
|
|
ebitmap.compare(vm.get_bitmap(drive0.node, 'bitmap0',
|
|
|
|
bitmaps=bitmaps))
|
2019-07-29 23:35:54 +03:00
|
|
|
# Anonymous bitmap (dynamic, shows new writes)
|
2019-07-29 23:35:54 +03:00
|
|
|
anonymous = EmulatedBitmap()
|
|
|
|
anonymous.dirty_group(2)
|
2019-09-20 17:20:50 +03:00
|
|
|
anonymous.compare(vm.get_bitmap(drive0.node, '', recording=True,
|
|
|
|
bitmaps=bitmaps))
|
2019-07-29 23:35:54 +03:00
|
|
|
|
|
|
|
# Simulate the order in which this will happen:
|
|
|
|
# group 1 gets cleared first, then group two gets written.
|
|
|
|
if ((bsync_mode == 'on-success' and not failure) or
|
|
|
|
(bsync_mode == 'always')):
|
2019-07-29 23:35:55 +03:00
|
|
|
ebitmap.clear()
|
2019-07-29 23:35:54 +03:00
|
|
|
ebitmap.dirty_group(2)
|
2019-07-29 23:35:54 +03:00
|
|
|
|
|
|
|
vm.run_job(job, auto_dismiss=True, auto_finalize=False,
|
|
|
|
pre_finalize=_callback,
|
|
|
|
cancel=(failure == 'simulated'))
|
2019-09-20 17:20:50 +03:00
|
|
|
bitmaps = vm.query_bitmaps()
|
|
|
|
log({'bitmaps': bitmaps}, indent=2)
|
2019-07-29 23:35:54 +03:00
|
|
|
log('')
|
|
|
|
|
|
|
|
if bsync_mode == 'always' and failure == 'intermediate':
|
2019-07-29 23:35:55 +03:00
|
|
|
# TOP treats anything allocated as dirty, expect to see:
|
|
|
|
if msync_mode == 'top':
|
|
|
|
ebitmap.dirty_group(0)
|
|
|
|
|
2019-07-29 23:35:54 +03:00
|
|
|
# We manage to copy one sector (one bit) before the error.
|
2019-07-29 23:35:54 +03:00
|
|
|
ebitmap.clear_bit(ebitmap.first_bit)
|
2019-07-29 23:35:55 +03:00
|
|
|
|
|
|
|
# Full returns all bits set except what was copied/skipped
|
|
|
|
if msync_mode == 'full':
|
|
|
|
fail_bit = ebitmap.first_bit
|
|
|
|
ebitmap.clear()
|
|
|
|
ebitmap.dirty_bits(range(fail_bit, SIZE // GRANULARITY))
|
|
|
|
|
2019-09-20 17:20:50 +03:00
|
|
|
ebitmap.compare(vm.get_bitmap(drive0.node, 'bitmap0', bitmaps=bitmaps))
|
2019-07-29 23:35:54 +03:00
|
|
|
|
|
|
|
# 2 - Writes and Reference Backup
|
|
|
|
bitmaps = perform_writes(drive0, 3)
|
2019-07-29 23:35:54 +03:00
|
|
|
ebitmap.dirty_group(3)
|
2019-09-20 17:20:50 +03:00
|
|
|
ebitmap.compare(vm.get_bitmap(drive0.node, 'bitmap0', bitmaps=bitmaps))
|
2019-07-29 23:35:54 +03:00
|
|
|
reference_backup(drive0, 2, fbackup2)
|
|
|
|
|
|
|
|
# 2 - Bitmap Backup (In failure modes, this is a recovery.)
|
2019-07-29 23:35:55 +03:00
|
|
|
job = backup(drive0, 2, bsync2, "bitmap",
|
|
|
|
bitmap="bitmap0", bitmap_mode=bsync_mode)
|
2019-07-29 23:35:54 +03:00
|
|
|
vm.run_job(job, auto_dismiss=True, auto_finalize=False)
|
2019-09-20 17:20:50 +03:00
|
|
|
bitmaps = vm.query_bitmaps()
|
|
|
|
log({'bitmaps': bitmaps}, indent=2)
|
2019-07-29 23:35:54 +03:00
|
|
|
log('')
|
2019-07-29 23:35:54 +03:00
|
|
|
if bsync_mode != 'never':
|
|
|
|
ebitmap.clear()
|
2019-09-20 17:20:50 +03:00
|
|
|
ebitmap.compare(vm.get_bitmap(drive0.node, 'bitmap0', bitmaps=bitmaps))
|
2019-07-29 23:35:54 +03:00
|
|
|
|
|
|
|
log('--- Cleanup ---\n')
|
|
|
|
vm.qmp_log("block-dirty-bitmap-remove",
|
2019-09-20 17:20:51 +03:00
|
|
|
node=drive0.node, name="bitmap0")
|
2019-09-20 17:20:50 +03:00
|
|
|
bitmaps = vm.query_bitmaps()
|
|
|
|
log({'bitmaps': bitmaps}, indent=2)
|
2019-07-29 23:35:54 +03:00
|
|
|
vm.shutdown()
|
|
|
|
log('')
|
|
|
|
|
|
|
|
log('--- Verification ---\n')
|
|
|
|
# 'simulated' failures will actually all pass here because we canceled
|
|
|
|
# while "pending". This is actually undefined behavior,
|
|
|
|
# don't rely on this to be true!
|
|
|
|
compare_images(bsync1, fbackup1, baseimg=fbackup0,
|
|
|
|
expected_match=failure != 'intermediate')
|
|
|
|
if not failure or bsync_mode == 'always':
|
|
|
|
# Always keep the last backup on success or when using 'always'
|
|
|
|
base = bsync1
|
|
|
|
else:
|
|
|
|
base = fbackup0
|
|
|
|
compare_images(bsync2, fbackup2, baseimg=base)
|
|
|
|
compare_images(img_path, fbackup2)
|
|
|
|
log('')
|
|
|
|
|
2019-07-29 23:35:55 +03:00
|
|
|
def test_backup_api():
|
|
|
|
"""
|
|
|
|
Test malformed and prohibited invocations of the backup API.
|
|
|
|
"""
|
2020-08-29 02:21:51 +03:00
|
|
|
with iotests.FilePath('img', 'bsync1') as (img_path, backup_path), \
|
2019-07-29 23:35:55 +03:00
|
|
|
iotests.VM() as vm:
|
|
|
|
|
|
|
|
log("\n=== API failure tests ===\n")
|
|
|
|
log('--- Preparing image & VM ---\n')
|
|
|
|
drive0 = Drive(img_path, vm=vm)
|
|
|
|
drive0.img_create(iotests.imgfmt, SIZE)
|
2021-03-23 19:53:05 +03:00
|
|
|
vm.add_device("{},id=scsi0".format('virtio-scsi'))
|
2019-07-29 23:35:55 +03:00
|
|
|
vm.launch()
|
|
|
|
|
|
|
|
file_config = {
|
|
|
|
'driver': 'file',
|
|
|
|
'filename': drive0.path
|
|
|
|
}
|
|
|
|
|
2019-09-20 17:20:51 +03:00
|
|
|
drive0.node = 'drive0'
|
2019-07-29 23:35:55 +03:00
|
|
|
vm.qmp_log('blockdev-add',
|
|
|
|
filters=[iotests.filter_qmp_testfiles],
|
2019-09-20 17:20:51 +03:00
|
|
|
node_name=drive0.node,
|
2019-07-29 23:35:55 +03:00
|
|
|
driver=drive0.fmt,
|
|
|
|
file=file_config)
|
|
|
|
log('')
|
|
|
|
|
|
|
|
target0 = Drive(backup_path, vm=vm)
|
|
|
|
target0.create_target("backup_target", drive0.fmt, drive0.size)
|
|
|
|
log('')
|
|
|
|
|
2019-09-20 17:20:51 +03:00
|
|
|
vm.qmp_log("block-dirty-bitmap-add", node=drive0.node,
|
2019-07-29 23:35:55 +03:00
|
|
|
name="bitmap0", granularity=GRANULARITY)
|
|
|
|
log('')
|
|
|
|
|
|
|
|
log('-- Testing invalid QMP commands --\n')
|
|
|
|
|
|
|
|
error_cases = {
|
|
|
|
'incremental': {
|
|
|
|
None: ['on-success', 'always', 'never', None],
|
|
|
|
'bitmap404': ['on-success', 'always', 'never', None],
|
|
|
|
'bitmap0': ['always', 'never']
|
|
|
|
},
|
|
|
|
'bitmap': {
|
|
|
|
None: ['on-success', 'always', 'never', None],
|
|
|
|
'bitmap404': ['on-success', 'always', 'never', None],
|
|
|
|
'bitmap0': [None],
|
|
|
|
},
|
2019-07-29 23:35:55 +03:00
|
|
|
'full': {
|
|
|
|
None: ['on-success', 'always', 'never'],
|
|
|
|
'bitmap404': ['on-success', 'always', 'never', None],
|
|
|
|
'bitmap0': ['never', None],
|
|
|
|
},
|
|
|
|
'top': {
|
|
|
|
None: ['on-success', 'always', 'never'],
|
|
|
|
'bitmap404': ['on-success', 'always', 'never', None],
|
|
|
|
'bitmap0': ['never', None],
|
|
|
|
},
|
|
|
|
'none': {
|
|
|
|
None: ['on-success', 'always', 'never'],
|
|
|
|
'bitmap404': ['on-success', 'always', 'never', None],
|
|
|
|
'bitmap0': ['on-success', 'always', 'never', None],
|
|
|
|
}
|
2019-07-29 23:35:55 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
# Dicts, as always, are not stably-ordered prior to 3.7, so use tuples:
|
2019-07-29 23:35:55 +03:00
|
|
|
for sync_mode in ('incremental', 'bitmap', 'full', 'top', 'none'):
|
2019-07-29 23:35:55 +03:00
|
|
|
log("-- Sync mode {:s} tests --\n".format(sync_mode))
|
|
|
|
for bitmap in (None, 'bitmap404', 'bitmap0'):
|
|
|
|
for policy in error_cases[sync_mode][bitmap]:
|
2019-09-20 17:20:51 +03:00
|
|
|
blockdev_backup(drive0.vm, drive0.node, "backup_target",
|
2019-07-29 23:35:55 +03:00
|
|
|
sync_mode, job_id='api_job',
|
|
|
|
bitmap=bitmap, bitmap_mode=policy)
|
|
|
|
log('')
|
|
|
|
|
|
|
|
|
2019-07-29 23:35:54 +03:00
|
|
|
def main():
|
|
|
|
for bsync_mode in ("never", "on-success", "always"):
|
|
|
|
for failure in ("simulated", "intermediate", None):
|
2019-07-29 23:35:55 +03:00
|
|
|
test_bitmap_sync(bsync_mode, "bitmap", failure)
|
2019-07-29 23:35:54 +03:00
|
|
|
|
2019-07-29 23:35:55 +03:00
|
|
|
for sync_mode in ('full', 'top'):
|
|
|
|
for bsync_mode in ('on-success', 'always'):
|
|
|
|
for failure in ('simulated', 'intermediate', None):
|
|
|
|
test_bitmap_sync(bsync_mode, sync_mode, failure)
|
|
|
|
|
2019-07-29 23:35:55 +03:00
|
|
|
test_backup_api()
|
|
|
|
|
2019-07-29 23:35:54 +03:00
|
|
|
if __name__ == '__main__':
|
2019-09-02 22:33:18 +03:00
|
|
|
iotests.script_main(main, supported_fmts=['qcow2'],
|
|
|
|
supported_protocols=['file'])
|