2020-01-30 19:32:23 +03:00
|
|
|
#!/usr/bin/env python3
|
2021-01-16 16:44:19 +03:00
|
|
|
# group: rw backing
|
2015-04-18 02:50:05 +03:00
|
|
|
#
|
|
|
|
# Tests for incremental drive-backup
|
|
|
|
#
|
|
|
|
# Copyright (C) 2015 John Snow for Red Hat, Inc.
|
|
|
|
#
|
|
|
|
# Based on 056.
|
|
|
|
#
|
|
|
|
# This program is free software; you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License as published by
|
|
|
|
# the Free Software Foundation; either version 2 of the License, or
|
|
|
|
# (at your option) any later version.
|
|
|
|
#
|
|
|
|
# This program is distributed in the hope that it will be useful,
|
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
# GNU General Public License for more details.
|
|
|
|
#
|
|
|
|
# You should have received a copy of the GNU General Public License
|
|
|
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
#
|
|
|
|
|
|
|
|
import os
|
|
|
|
import iotests
|
2021-01-18 13:57:13 +03:00
|
|
|
from iotests import try_remove
|
2015-04-18 02:50:05 +03:00
|
|
|
|
|
|
|
|
|
|
|
def io_write_patterns(img, patterns):
|
|
|
|
for pattern in patterns:
|
|
|
|
iotests.qemu_io('-c', 'write -P%s %s %s' % pattern, img)
|
|
|
|
|
|
|
|
|
2015-11-06 02:13:08 +03:00
|
|
|
def transaction_action(action, **kwargs):
|
|
|
|
return {
|
|
|
|
'type': action,
|
iotests: Different iterator behavior in Python 3
In Python 3, several functions now return iterators instead of lists.
This includes range(), items(), map(), and filter(). This means that if
we really want a list, we have to wrap those instances with list(). But
then again, the two instances where this is the case for map() and
filter(), there are shorter expressions which work without either
function.
On the other hand, sometimes we do just want an iterator, in which case
we have sometimes used xrange() and iteritems() which no longer exist in
Python 3. Just change these calls to be range() and items(), works in
both Python 2 and 3, and is really what we want in 3 (which is what
matters). But because it is so simple to do (and to find and remove
once we completely switch to Python 3), make range() be an alias for
xrange() in the two affected tests (044 and 163).
In one instance, we only wanted the first instance of the result of a
filter() call. Instead of using next(filter()) which would work only in
Python 3, or list(filter())[0] which would work everywhere but is a bit
weird, this instance is changed to use a generator expression with a
next() wrapped around, which works both in 2.7 and 3.
Signed-off-by: Max Reitz <mreitz@redhat.com>
Reviewed-by: Eduardo Habkost <ehabkost@redhat.com>
Reviewed-by: Cleber Rosa <crosa@redhat.com>
Message-Id: <20181022135307.14398-6-mreitz@redhat.com>
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
2018-10-22 16:53:03 +03:00
|
|
|
'data': dict((k.replace('_', '-'), v) for k, v in kwargs.items())
|
2015-11-06 02:13:08 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
def transaction_bitmap_clear(node, name, **kwargs):
|
|
|
|
return transaction_action('block-dirty-bitmap-clear',
|
|
|
|
node=node, name=name, **kwargs)
|
|
|
|
|
|
|
|
|
|
|
|
def transaction_drive_backup(device, target, **kwargs):
|
2016-09-21 15:56:07 +03:00
|
|
|
return transaction_action('drive-backup', job_id=device, device=device,
|
|
|
|
target=target, **kwargs)
|
2015-11-06 02:13:08 +03:00
|
|
|
|
|
|
|
|
2015-04-18 02:50:07 +03:00
|
|
|
class Bitmap:
|
|
|
|
def __init__(self, name, drive):
|
|
|
|
self.name = name
|
|
|
|
self.drive = drive
|
|
|
|
self.num = 0
|
|
|
|
self.backups = list()
|
|
|
|
|
|
|
|
def base_target(self):
|
|
|
|
return (self.drive['backup'], None)
|
|
|
|
|
|
|
|
def new_target(self, num=None):
|
|
|
|
if num is None:
|
|
|
|
num = self.num
|
|
|
|
self.num = num + 1
|
|
|
|
base = os.path.join(iotests.test_dir,
|
|
|
|
"%s.%s." % (self.drive['id'], self.name))
|
|
|
|
suff = "%i.%s" % (num, self.drive['fmt'])
|
|
|
|
target = base + "inc" + suff
|
|
|
|
reference = base + "ref" + suff
|
|
|
|
self.backups.append((target, reference))
|
|
|
|
return (target, reference)
|
|
|
|
|
|
|
|
def last_target(self):
|
|
|
|
if self.backups:
|
|
|
|
return self.backups[-1]
|
|
|
|
return self.base_target()
|
|
|
|
|
|
|
|
def del_target(self):
|
|
|
|
for image in self.backups.pop():
|
|
|
|
try_remove(image)
|
|
|
|
self.num -= 1
|
|
|
|
|
|
|
|
def cleanup(self):
|
|
|
|
for backup in self.backups:
|
|
|
|
for image in backup:
|
|
|
|
try_remove(image)
|
|
|
|
|
|
|
|
|
2015-12-02 02:16:37 +03:00
|
|
|
class TestIncrementalBackupBase(iotests.QMPTestCase):
|
|
|
|
def __init__(self, *args):
|
|
|
|
super(TestIncrementalBackupBase, self).__init__(*args)
|
2015-04-18 02:50:05 +03:00
|
|
|
self.bitmaps = list()
|
|
|
|
self.files = list()
|
|
|
|
self.drives = list()
|
|
|
|
self.vm = iotests.VM()
|
|
|
|
self.err_img = os.path.join(iotests.test_dir, 'err.%s' % iotests.imgfmt)
|
|
|
|
|
2015-12-02 02:16:37 +03:00
|
|
|
|
|
|
|
def setUp(self):
|
2015-04-18 02:50:05 +03:00
|
|
|
# Create a base image with a distinctive patterning
|
|
|
|
drive0 = self.add_node('drive0')
|
|
|
|
self.img_create(drive0['file'], drive0['fmt'])
|
2019-09-20 17:20:50 +03:00
|
|
|
self.vm.add_drive(drive0['file'], opts='node-name=node0')
|
2015-12-02 02:16:37 +03:00
|
|
|
self.write_default_pattern(drive0['file'])
|
2015-04-18 02:50:05 +03:00
|
|
|
self.vm.launch()
|
|
|
|
|
|
|
|
|
2015-12-02 02:16:37 +03:00
|
|
|
def write_default_pattern(self, target):
|
|
|
|
io_write_patterns(target, (('0x41', 0, 512),
|
|
|
|
('0xd5', '1M', '32k'),
|
|
|
|
('0xdc', '32M', '124k')))
|
|
|
|
|
|
|
|
|
2015-04-18 02:50:05 +03:00
|
|
|
def add_node(self, node_id, fmt=iotests.imgfmt, path=None, backup=None):
|
|
|
|
if path is None:
|
|
|
|
path = os.path.join(iotests.test_dir, '%s.%s' % (node_id, fmt))
|
|
|
|
if backup is None:
|
|
|
|
backup = os.path.join(iotests.test_dir,
|
|
|
|
'%s.full.backup.%s' % (node_id, fmt))
|
|
|
|
|
|
|
|
self.drives.append({
|
|
|
|
'id': node_id,
|
|
|
|
'file': path,
|
|
|
|
'backup': backup,
|
|
|
|
'fmt': fmt })
|
|
|
|
return self.drives[-1]
|
|
|
|
|
|
|
|
|
|
|
|
def img_create(self, img, fmt=iotests.imgfmt, size='64M',
|
2016-02-25 23:58:31 +03:00
|
|
|
parent=None, parentFormat=None, **kwargs):
|
|
|
|
optargs = []
|
iotests: Different iterator behavior in Python 3
In Python 3, several functions now return iterators instead of lists.
This includes range(), items(), map(), and filter(). This means that if
we really want a list, we have to wrap those instances with list(). But
then again, the two instances where this is the case for map() and
filter(), there are shorter expressions which work without either
function.
On the other hand, sometimes we do just want an iterator, in which case
we have sometimes used xrange() and iteritems() which no longer exist in
Python 3. Just change these calls to be range() and items(), works in
both Python 2 and 3, and is really what we want in 3 (which is what
matters). But because it is so simple to do (and to find and remove
once we completely switch to Python 3), make range() be an alias for
xrange() in the two affected tests (044 and 163).
In one instance, we only wanted the first instance of the result of a
filter() call. Instead of using next(filter()) which would work only in
Python 3, or list(filter())[0] which would work everywhere but is a bit
weird, this instance is changed to use a generator expression with a
next() wrapped around, which works both in 2.7 and 3.
Signed-off-by: Max Reitz <mreitz@redhat.com>
Reviewed-by: Eduardo Habkost <ehabkost@redhat.com>
Reviewed-by: Cleber Rosa <crosa@redhat.com>
Message-Id: <20181022135307.14398-6-mreitz@redhat.com>
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
2018-10-22 16:53:03 +03:00
|
|
|
for k,v in kwargs.items():
|
2016-02-25 23:58:31 +03:00
|
|
|
optargs = optargs + ['-o', '%s=%s' % (k,v)]
|
|
|
|
args = ['create', '-f', fmt] + optargs + [img, size]
|
2015-04-18 02:50:05 +03:00
|
|
|
if parent:
|
|
|
|
if parentFormat is None:
|
|
|
|
parentFormat = fmt
|
2016-02-25 23:58:31 +03:00
|
|
|
args = args + ['-b', parent, '-F', parentFormat]
|
|
|
|
iotests.qemu_img(*args)
|
2015-04-18 02:50:05 +03:00
|
|
|
self.files.append(img)
|
|
|
|
|
2015-04-18 02:50:07 +03:00
|
|
|
|
|
|
|
def do_qmp_backup(self, error='Input/output error', **kwargs):
|
|
|
|
res = self.vm.qmp('drive-backup', **kwargs)
|
|
|
|
self.assert_qmp(res, 'return', {})
|
2015-11-06 02:13:19 +03:00
|
|
|
return self.wait_qmp_backup(kwargs['device'], error)
|
2015-04-18 02:50:07 +03:00
|
|
|
|
2015-11-06 02:13:19 +03:00
|
|
|
|
2018-04-30 20:09:46 +03:00
|
|
|
def ignore_job_status_change_events(self):
|
|
|
|
while True:
|
|
|
|
e = self.vm.event_wait(name="JOB_STATUS_CHANGE")
|
|
|
|
if e['data']['status'] == 'null':
|
|
|
|
break
|
|
|
|
|
2015-11-06 02:13:19 +03:00
|
|
|
def wait_qmp_backup(self, device, error='Input/output error'):
|
2015-04-18 02:50:07 +03:00
|
|
|
event = self.vm.event_wait(name="BLOCK_JOB_COMPLETED",
|
2015-11-06 02:13:19 +03:00
|
|
|
match={'data': {'device': device}})
|
2015-05-22 19:01:41 +03:00
|
|
|
self.assertNotEqual(event, None)
|
2018-04-30 20:09:46 +03:00
|
|
|
self.ignore_job_status_change_events()
|
2015-04-18 02:50:07 +03:00
|
|
|
|
|
|
|
try:
|
|
|
|
failure = self.dictpath(event, 'data/error')
|
|
|
|
except AssertionError:
|
|
|
|
# Backup succeeded.
|
|
|
|
self.assert_qmp(event, 'data/offset', event['data']['len'])
|
|
|
|
return True
|
|
|
|
else:
|
|
|
|
# Backup failed.
|
|
|
|
self.assert_qmp(event, 'data/error', error)
|
|
|
|
return False
|
|
|
|
|
|
|
|
|
2015-11-06 02:13:19 +03:00
|
|
|
def wait_qmp_backup_cancelled(self, device):
|
|
|
|
event = self.vm.event_wait(name='BLOCK_JOB_CANCELLED',
|
|
|
|
match={'data': {'device': device}})
|
|
|
|
self.assertNotEqual(event, None)
|
2018-04-30 20:09:46 +03:00
|
|
|
self.ignore_job_status_change_events()
|
2015-11-06 02:13:19 +03:00
|
|
|
|
|
|
|
|
2015-04-18 02:50:07 +03:00
|
|
|
def create_anchor_backup(self, drive=None):
|
|
|
|
if drive is None:
|
|
|
|
drive = self.drives[-1]
|
2016-09-21 15:56:07 +03:00
|
|
|
res = self.do_qmp_backup(job_id=drive['id'],
|
|
|
|
device=drive['id'], sync='full',
|
2015-04-18 02:50:07 +03:00
|
|
|
format=drive['fmt'], target=drive['backup'])
|
|
|
|
self.assertTrue(res)
|
|
|
|
self.files.append(drive['backup'])
|
|
|
|
return drive['backup']
|
|
|
|
|
|
|
|
|
|
|
|
def make_reference_backup(self, bitmap=None):
|
|
|
|
if bitmap is None:
|
|
|
|
bitmap = self.bitmaps[-1]
|
|
|
|
_, reference = bitmap.last_target()
|
2016-09-21 15:56:07 +03:00
|
|
|
res = self.do_qmp_backup(job_id=bitmap.drive['id'],
|
|
|
|
device=bitmap.drive['id'], sync='full',
|
2015-04-18 02:50:07 +03:00
|
|
|
format=bitmap.drive['fmt'], target=reference)
|
|
|
|
self.assertTrue(res)
|
|
|
|
|
|
|
|
|
2015-04-18 02:50:09 +03:00
|
|
|
def add_bitmap(self, name, drive, **kwargs):
|
2015-04-18 02:50:07 +03:00
|
|
|
bitmap = Bitmap(name, drive)
|
|
|
|
self.bitmaps.append(bitmap)
|
|
|
|
result = self.vm.qmp('block-dirty-bitmap-add', node=drive['id'],
|
2015-04-18 02:50:09 +03:00
|
|
|
name=bitmap.name, **kwargs)
|
2015-04-18 02:50:07 +03:00
|
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
return bitmap
|
|
|
|
|
|
|
|
|
2019-08-05 18:28:40 +03:00
|
|
|
def prepare_backup(self, bitmap=None, parent=None, **kwargs):
|
2015-04-18 02:50:07 +03:00
|
|
|
if bitmap is None:
|
|
|
|
bitmap = self.bitmaps[-1]
|
|
|
|
if parent is None:
|
|
|
|
parent, _ = bitmap.last_target()
|
|
|
|
|
|
|
|
target, _ = bitmap.new_target()
|
2019-08-05 18:28:40 +03:00
|
|
|
self.img_create(target, bitmap.drive['fmt'], parent=parent,
|
|
|
|
**kwargs)
|
2015-04-18 02:50:07 +03:00
|
|
|
return target
|
|
|
|
|
|
|
|
|
|
|
|
def create_incremental(self, bitmap=None, parent=None,
|
2019-08-05 18:28:40 +03:00
|
|
|
parentFormat=None, validate=True,
|
|
|
|
target=None):
|
2015-04-18 02:50:07 +03:00
|
|
|
if bitmap is None:
|
|
|
|
bitmap = self.bitmaps[-1]
|
|
|
|
if parent is None:
|
|
|
|
parent, _ = bitmap.last_target()
|
|
|
|
|
2019-08-05 18:28:40 +03:00
|
|
|
if target is None:
|
|
|
|
target = self.prepare_backup(bitmap, parent)
|
2016-09-21 15:56:07 +03:00
|
|
|
res = self.do_qmp_backup(job_id=bitmap.drive['id'],
|
|
|
|
device=bitmap.drive['id'],
|
2015-06-05 03:20:34 +03:00
|
|
|
sync='incremental', bitmap=bitmap.name,
|
2015-04-18 02:50:07 +03:00
|
|
|
format=bitmap.drive['fmt'], target=target,
|
|
|
|
mode='existing')
|
|
|
|
if not res:
|
|
|
|
bitmap.del_target();
|
|
|
|
self.assertFalse(validate)
|
|
|
|
else:
|
|
|
|
self.make_reference_backup(bitmap)
|
|
|
|
return res
|
|
|
|
|
|
|
|
|
|
|
|
def check_backups(self):
|
|
|
|
for bitmap in self.bitmaps:
|
|
|
|
for incremental, reference in bitmap.backups:
|
|
|
|
self.assertTrue(iotests.compare_images(incremental, reference))
|
|
|
|
last = bitmap.last_target()[0]
|
|
|
|
self.assertTrue(iotests.compare_images(last, bitmap.drive['file']))
|
|
|
|
|
|
|
|
|
|
|
|
def hmp_io_writes(self, drive, patterns):
|
|
|
|
for pattern in patterns:
|
|
|
|
self.vm.hmp_qemu_io(drive, 'write -P%s %s %s' % pattern)
|
|
|
|
self.vm.hmp_qemu_io(drive, 'flush')
|
|
|
|
|
|
|
|
|
2015-04-18 02:50:09 +03:00
|
|
|
def do_incremental_simple(self, **kwargs):
|
2015-04-18 02:50:07 +03:00
|
|
|
self.create_anchor_backup()
|
2015-04-18 02:50:09 +03:00
|
|
|
self.add_bitmap('bitmap0', self.drives[0], **kwargs)
|
2015-04-18 02:50:07 +03:00
|
|
|
|
|
|
|
# Sanity: Create a "hollow" incremental backup
|
|
|
|
self.create_incremental()
|
|
|
|
# Three writes: One complete overwrite, one new segment,
|
|
|
|
# and one partial overlap.
|
|
|
|
self.hmp_io_writes(self.drives[0]['id'], (('0xab', 0, 512),
|
|
|
|
('0xfe', '16M', '256k'),
|
|
|
|
('0x64', '32736k', '64k')))
|
|
|
|
self.create_incremental()
|
|
|
|
# Three more writes, one of each kind, like above
|
|
|
|
self.hmp_io_writes(self.drives[0]['id'], (('0x9a', 0, 512),
|
|
|
|
('0x55', '8M', '352k'),
|
|
|
|
('0x78', '15872k', '1M')))
|
|
|
|
self.create_incremental()
|
|
|
|
self.vm.shutdown()
|
|
|
|
self.check_backups()
|
|
|
|
|
|
|
|
|
2015-12-02 02:16:37 +03:00
|
|
|
def tearDown(self):
|
|
|
|
self.vm.shutdown()
|
|
|
|
for bitmap in self.bitmaps:
|
|
|
|
bitmap.cleanup()
|
|
|
|
for filename in self.files:
|
|
|
|
try_remove(filename)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TestIncrementalBackup(TestIncrementalBackupBase):
|
2015-04-18 02:50:09 +03:00
|
|
|
def test_incremental_simple(self):
|
|
|
|
'''
|
|
|
|
Test: Create and verify three incremental backups.
|
|
|
|
|
|
|
|
Create a bitmap and a full backup before VM execution begins,
|
|
|
|
then create a series of three incremental backups "during execution,"
|
|
|
|
i.e.; after IO requests begin modifying the drive.
|
|
|
|
'''
|
|
|
|
return self.do_incremental_simple()
|
|
|
|
|
|
|
|
|
|
|
|
def test_small_granularity(self):
|
|
|
|
'''
|
|
|
|
Test: Create and verify backups made with a small granularity bitmap.
|
|
|
|
|
|
|
|
Perform the same test as test_incremental_simple, but with a granularity
|
|
|
|
of only 32KiB instead of the present default of 64KiB.
|
|
|
|
'''
|
|
|
|
return self.do_incremental_simple(granularity=32768)
|
|
|
|
|
|
|
|
|
|
|
|
def test_large_granularity(self):
|
|
|
|
'''
|
|
|
|
Test: Create and verify backups made with a large granularity bitmap.
|
|
|
|
|
|
|
|
Perform the same test as test_incremental_simple, but with a granularity
|
|
|
|
of 128KiB instead of the present default of 64KiB.
|
|
|
|
'''
|
|
|
|
return self.do_incremental_simple(granularity=131072)
|
|
|
|
|
|
|
|
|
2016-02-25 23:58:31 +03:00
|
|
|
def test_larger_cluster_target(self):
|
|
|
|
'''
|
|
|
|
Test: Create and verify backups made to a larger cluster size target.
|
|
|
|
|
|
|
|
With a default granularity of 64KiB, verify that backups made to a
|
|
|
|
larger cluster size target of 128KiB without a backing file works.
|
|
|
|
'''
|
|
|
|
drive0 = self.drives[0]
|
|
|
|
|
|
|
|
# Create a cluster_size=128k full backup / "anchor" backup
|
|
|
|
self.img_create(drive0['backup'], cluster_size='128k')
|
|
|
|
self.assertTrue(self.do_qmp_backup(device=drive0['id'], sync='full',
|
|
|
|
format=drive0['fmt'],
|
|
|
|
target=drive0['backup'],
|
|
|
|
mode='existing'))
|
|
|
|
|
|
|
|
# Create bitmap and dirty it with some new writes.
|
|
|
|
# overwrite [32736, 32799] which will dirty bitmap clusters at
|
|
|
|
# 32M-64K and 32M. 32M+64K will be left undirtied.
|
|
|
|
bitmap0 = self.add_bitmap('bitmap0', drive0)
|
|
|
|
self.hmp_io_writes(drive0['id'],
|
|
|
|
(('0xab', 0, 512),
|
|
|
|
('0xfe', '16M', '256k'),
|
|
|
|
('0x64', '32736k', '64k')))
|
2017-07-24 18:39:51 +03:00
|
|
|
# Check the dirty bitmap stats
|
2019-09-20 17:20:50 +03:00
|
|
|
self.assertTrue(self.vm.check_bitmap_status(
|
|
|
|
'node0', bitmap0.name, {
|
|
|
|
'name': 'bitmap0',
|
|
|
|
'count': 458752,
|
|
|
|
'granularity': 65536,
|
|
|
|
'status': 'active',
|
|
|
|
'persistent': False
|
|
|
|
}))
|
2016-02-25 23:58:31 +03:00
|
|
|
|
|
|
|
# Prepare a cluster_size=128k backup target without a backing file.
|
|
|
|
(target, _) = bitmap0.new_target()
|
|
|
|
self.img_create(target, bitmap0.drive['fmt'], cluster_size='128k')
|
|
|
|
|
|
|
|
# Perform Incremental Backup
|
|
|
|
self.assertTrue(self.do_qmp_backup(device=bitmap0.drive['id'],
|
|
|
|
sync='incremental',
|
|
|
|
bitmap=bitmap0.name,
|
|
|
|
format=bitmap0.drive['fmt'],
|
|
|
|
target=target,
|
|
|
|
mode='existing'))
|
|
|
|
self.make_reference_backup(bitmap0)
|
|
|
|
|
|
|
|
# Add the backing file, then compare and exit.
|
|
|
|
iotests.qemu_img('rebase', '-f', drive0['fmt'], '-u', '-b',
|
|
|
|
drive0['backup'], '-F', drive0['fmt'], target)
|
|
|
|
self.vm.shutdown()
|
|
|
|
self.check_backups()
|
|
|
|
|
|
|
|
|
2015-11-06 02:13:08 +03:00
|
|
|
def test_incremental_transaction(self):
|
|
|
|
'''Test: Verify backups made from transactionally created bitmaps.
|
|
|
|
|
|
|
|
Create a bitmap "before" VM execution begins, then create a second
|
|
|
|
bitmap AFTER writes have already occurred. Use transactions to create
|
|
|
|
a full backup and synchronize both bitmaps to this backup.
|
|
|
|
Create an incremental backup through both bitmaps and verify that
|
|
|
|
both backups match the current drive0 image.
|
|
|
|
'''
|
|
|
|
|
|
|
|
drive0 = self.drives[0]
|
|
|
|
bitmap0 = self.add_bitmap('bitmap0', drive0)
|
|
|
|
self.hmp_io_writes(drive0['id'], (('0xab', 0, 512),
|
|
|
|
('0xfe', '16M', '256k'),
|
|
|
|
('0x64', '32736k', '64k')))
|
|
|
|
bitmap1 = self.add_bitmap('bitmap1', drive0)
|
|
|
|
|
|
|
|
result = self.vm.qmp('transaction', actions=[
|
|
|
|
transaction_bitmap_clear(bitmap0.drive['id'], bitmap0.name),
|
|
|
|
transaction_bitmap_clear(bitmap1.drive['id'], bitmap1.name),
|
|
|
|
transaction_drive_backup(drive0['id'], drive0['backup'],
|
|
|
|
sync='full', format=drive0['fmt'])
|
|
|
|
])
|
|
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
self.wait_until_completed(drive0['id'])
|
|
|
|
self.files.append(drive0['backup'])
|
|
|
|
|
|
|
|
self.hmp_io_writes(drive0['id'], (('0x9a', 0, 512),
|
|
|
|
('0x55', '8M', '352k'),
|
|
|
|
('0x78', '15872k', '1M')))
|
|
|
|
# Both bitmaps should be correctly in sync.
|
|
|
|
self.create_incremental(bitmap0)
|
|
|
|
self.create_incremental(bitmap1)
|
|
|
|
self.vm.shutdown()
|
|
|
|
self.check_backups()
|
|
|
|
|
|
|
|
|
2016-11-08 09:50:39 +03:00
|
|
|
def do_transaction_failure_test(self, race=False):
|
2015-11-06 02:13:19 +03:00
|
|
|
# Create a second drive, with pattern:
|
|
|
|
drive1 = self.add_node('drive1')
|
|
|
|
self.img_create(drive1['file'], drive1['fmt'])
|
|
|
|
io_write_patterns(drive1['file'], (('0x14', 0, 512),
|
|
|
|
('0x5d', '1M', '32k'),
|
|
|
|
('0xcd', '32M', '124k')))
|
|
|
|
|
|
|
|
# Create a blkdebug interface to this img as 'drive1'
|
2016-10-07 18:05:04 +03:00
|
|
|
result = self.vm.qmp('blockdev-add',
|
|
|
|
node_name=drive1['id'],
|
|
|
|
driver=drive1['fmt'],
|
|
|
|
file={
|
2015-11-06 02:13:19 +03:00
|
|
|
'driver': 'blkdebug',
|
|
|
|
'image': {
|
|
|
|
'driver': 'file',
|
|
|
|
'filename': drive1['file']
|
|
|
|
},
|
|
|
|
'set-state': [{
|
|
|
|
'event': 'flush_to_disk',
|
|
|
|
'state': 1,
|
|
|
|
'new_state': 2
|
|
|
|
}],
|
|
|
|
'inject-error': [{
|
|
|
|
'event': 'read_aio',
|
|
|
|
'errno': 5,
|
|
|
|
'state': 2,
|
|
|
|
'immediately': False,
|
|
|
|
'once': True
|
|
|
|
}],
|
|
|
|
}
|
2016-10-07 18:05:04 +03:00
|
|
|
)
|
2015-11-06 02:13:19 +03:00
|
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
|
|
|
|
# Create bitmaps and full backups for both drives
|
|
|
|
drive0 = self.drives[0]
|
|
|
|
dr0bm0 = self.add_bitmap('bitmap0', drive0)
|
|
|
|
dr1bm0 = self.add_bitmap('bitmap0', drive1)
|
|
|
|
self.create_anchor_backup(drive0)
|
|
|
|
self.create_anchor_backup(drive1)
|
|
|
|
self.assert_no_active_block_jobs()
|
|
|
|
self.assertFalse(self.vm.get_qmp_events(wait=False))
|
|
|
|
|
|
|
|
# Emulate some writes
|
2016-11-08 09:50:39 +03:00
|
|
|
if not race:
|
|
|
|
self.hmp_io_writes(drive0['id'], (('0xab', 0, 512),
|
|
|
|
('0xfe', '16M', '256k'),
|
|
|
|
('0x64', '32736k', '64k')))
|
2015-11-06 02:13:19 +03:00
|
|
|
self.hmp_io_writes(drive1['id'], (('0xba', 0, 512),
|
|
|
|
('0xef', '16M', '256k'),
|
|
|
|
('0x46', '32736k', '64k')))
|
|
|
|
|
|
|
|
# Create incremental backup targets
|
|
|
|
target0 = self.prepare_backup(dr0bm0)
|
|
|
|
target1 = self.prepare_backup(dr1bm0)
|
|
|
|
|
|
|
|
# Ask for a new incremental backup per-each drive,
|
2016-11-08 09:50:39 +03:00
|
|
|
# expecting drive1's backup to fail. In the 'race' test,
|
|
|
|
# we expect drive1 to attempt to cancel the empty drive0 job.
|
2015-11-06 02:13:19 +03:00
|
|
|
transaction = [
|
|
|
|
transaction_drive_backup(drive0['id'], target0, sync='incremental',
|
|
|
|
format=drive0['fmt'], mode='existing',
|
|
|
|
bitmap=dr0bm0.name),
|
|
|
|
transaction_drive_backup(drive1['id'], target1, sync='incremental',
|
|
|
|
format=drive1['fmt'], mode='existing',
|
|
|
|
bitmap=dr1bm0.name)
|
|
|
|
]
|
|
|
|
result = self.vm.qmp('transaction', actions=transaction,
|
|
|
|
properties={'completion-mode': 'grouped'} )
|
|
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
|
|
|
|
# Observe that drive0's backup is cancelled and drive1 completes with
|
|
|
|
# an error.
|
|
|
|
self.wait_qmp_backup_cancelled(drive0['id'])
|
|
|
|
self.assertFalse(self.wait_qmp_backup(drive1['id']))
|
|
|
|
error = self.vm.event_wait('BLOCK_JOB_ERROR')
|
|
|
|
self.assert_qmp(error, 'data', {'device': drive1['id'],
|
|
|
|
'action': 'report',
|
|
|
|
'operation': 'read'})
|
|
|
|
self.assertFalse(self.vm.get_qmp_events(wait=False))
|
|
|
|
self.assert_no_active_block_jobs()
|
|
|
|
|
|
|
|
# Delete drive0's successful target and eliminate our record of the
|
2016-11-08 09:50:39 +03:00
|
|
|
# unsuccessful drive1 target.
|
2015-11-06 02:13:19 +03:00
|
|
|
dr0bm0.del_target()
|
|
|
|
dr1bm0.del_target()
|
2016-11-08 09:50:39 +03:00
|
|
|
if race:
|
|
|
|
# Don't re-run the transaction, we only wanted to test the race.
|
|
|
|
self.vm.shutdown()
|
|
|
|
return
|
|
|
|
|
|
|
|
# Re-run the same transaction:
|
2015-11-06 02:13:19 +03:00
|
|
|
target0 = self.prepare_backup(dr0bm0)
|
|
|
|
target1 = self.prepare_backup(dr1bm0)
|
|
|
|
|
|
|
|
# Re-run the exact same transaction.
|
|
|
|
result = self.vm.qmp('transaction', actions=transaction,
|
|
|
|
properties={'completion-mode':'grouped'})
|
|
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
|
|
|
|
# Both should complete successfully this time.
|
|
|
|
self.assertTrue(self.wait_qmp_backup(drive0['id']))
|
|
|
|
self.assertTrue(self.wait_qmp_backup(drive1['id']))
|
|
|
|
self.make_reference_backup(dr0bm0)
|
|
|
|
self.make_reference_backup(dr1bm0)
|
|
|
|
self.assertFalse(self.vm.get_qmp_events(wait=False))
|
|
|
|
self.assert_no_active_block_jobs()
|
|
|
|
|
|
|
|
# And the images should of course validate.
|
|
|
|
self.vm.shutdown()
|
|
|
|
self.check_backups()
|
|
|
|
|
2016-11-08 09:50:39 +03:00
|
|
|
def test_transaction_failure(self):
|
|
|
|
'''Test: Verify backups made from a transaction that partially fails.
|
|
|
|
|
|
|
|
Add a second drive with its own unique pattern, and add a bitmap to each
|
|
|
|
drive. Use blkdebug to interfere with the backup on just one drive and
|
|
|
|
attempt to create a coherent incremental backup across both drives.
|
|
|
|
|
|
|
|
verify a failure in one but not both, then delete the failed stubs and
|
|
|
|
re-run the same transaction.
|
|
|
|
|
|
|
|
verify that both incrementals are created successfully.
|
|
|
|
'''
|
|
|
|
self.do_transaction_failure_test()
|
|
|
|
|
|
|
|
def test_transaction_failure_race(self):
|
|
|
|
'''Test: Verify that transactions with jobs that have no data to
|
|
|
|
transfer do not cause race conditions in the cancellation of the entire
|
|
|
|
transaction job group.
|
|
|
|
'''
|
|
|
|
self.do_transaction_failure_test(race=True)
|
|
|
|
|
2015-11-06 02:13:19 +03:00
|
|
|
|
2015-04-18 02:50:05 +03:00
|
|
|
def test_sync_dirty_bitmap_missing(self):
|
|
|
|
self.assert_no_active_block_jobs()
|
|
|
|
self.files.append(self.err_img)
|
|
|
|
result = self.vm.qmp('drive-backup', device=self.drives[0]['id'],
|
2015-06-05 03:20:34 +03:00
|
|
|
sync='incremental', format=self.drives[0]['fmt'],
|
2015-04-18 02:50:05 +03:00
|
|
|
target=self.err_img)
|
|
|
|
self.assert_qmp(result, 'error/class', 'GenericError')
|
|
|
|
|
|
|
|
|
|
|
|
def test_sync_dirty_bitmap_not_found(self):
|
|
|
|
self.assert_no_active_block_jobs()
|
|
|
|
self.files.append(self.err_img)
|
|
|
|
result = self.vm.qmp('drive-backup', device=self.drives[0]['id'],
|
2015-06-05 03:20:34 +03:00
|
|
|
sync='incremental', bitmap='unknown',
|
2015-04-18 02:50:05 +03:00
|
|
|
format=self.drives[0]['fmt'], target=self.err_img)
|
|
|
|
self.assert_qmp(result, 'error/class', 'GenericError')
|
|
|
|
|
|
|
|
|
2015-04-18 02:50:09 +03:00
|
|
|
def test_sync_dirty_bitmap_bad_granularity(self):
|
|
|
|
'''
|
|
|
|
Test: Test what happens if we provide an improper granularity.
|
|
|
|
|
|
|
|
The granularity must always be a power of 2.
|
|
|
|
'''
|
|
|
|
self.assert_no_active_block_jobs()
|
|
|
|
self.assertRaises(AssertionError, self.add_bitmap,
|
|
|
|
'bitmap0', self.drives[0],
|
|
|
|
granularity=64000)
|
|
|
|
|
2019-08-05 18:28:40 +03:00
|
|
|
def test_growing_before_backup(self):
|
|
|
|
'''
|
|
|
|
Test: Add a bitmap, truncate the image, write past the old
|
|
|
|
end, do a backup.
|
|
|
|
|
|
|
|
Incremental backup should not ignore dirty bits past the old
|
|
|
|
image end.
|
|
|
|
'''
|
|
|
|
self.assert_no_active_block_jobs()
|
|
|
|
|
|
|
|
self.create_anchor_backup()
|
|
|
|
|
|
|
|
self.add_bitmap('bitmap0', self.drives[0])
|
|
|
|
|
|
|
|
res = self.vm.qmp('block_resize', device=self.drives[0]['id'],
|
|
|
|
size=(65 * 1048576))
|
|
|
|
self.assert_qmp(res, 'return', {})
|
|
|
|
|
|
|
|
# Dirty the image past the old end
|
|
|
|
self.vm.hmp_qemu_io(self.drives[0]['id'], 'write 64M 64k')
|
|
|
|
|
|
|
|
target = self.prepare_backup(size='65M')
|
|
|
|
self.create_incremental(target=target)
|
|
|
|
|
|
|
|
self.vm.shutdown()
|
|
|
|
self.check_backups()
|
|
|
|
|
2015-04-18 02:50:09 +03:00
|
|
|
|
2015-12-02 02:16:38 +03:00
|
|
|
class TestIncrementalBackupBlkdebug(TestIncrementalBackupBase):
|
|
|
|
'''Incremental backup tests that utilize a BlkDebug filter on drive0.'''
|
|
|
|
|
2015-12-02 02:16:39 +03:00
|
|
|
def setUp(self):
|
|
|
|
drive0 = self.add_node('drive0')
|
|
|
|
self.img_create(drive0['file'], drive0['fmt'])
|
|
|
|
self.write_default_pattern(drive0['file'])
|
|
|
|
self.vm.launch()
|
|
|
|
|
2015-12-02 02:16:38 +03:00
|
|
|
def test_incremental_failure(self):
|
|
|
|
'''Test: Verify backups made after a failure are correct.
|
|
|
|
|
|
|
|
Simulate a failure during an incremental backup block job,
|
|
|
|
emulate additional writes, then create another incremental backup
|
|
|
|
afterwards and verify that the backup created is correct.
|
|
|
|
'''
|
|
|
|
|
2015-12-02 02:16:39 +03:00
|
|
|
drive0 = self.drives[0]
|
2016-10-07 18:05:04 +03:00
|
|
|
result = self.vm.qmp('blockdev-add',
|
|
|
|
node_name=drive0['id'],
|
|
|
|
driver=drive0['fmt'],
|
|
|
|
file={
|
2015-12-02 02:16:38 +03:00
|
|
|
'driver': 'blkdebug',
|
|
|
|
'image': {
|
|
|
|
'driver': 'file',
|
2015-12-02 02:16:39 +03:00
|
|
|
'filename': drive0['file']
|
2015-12-02 02:16:38 +03:00
|
|
|
},
|
|
|
|
'set-state': [{
|
|
|
|
'event': 'flush_to_disk',
|
|
|
|
'state': 1,
|
|
|
|
'new_state': 2
|
|
|
|
}],
|
|
|
|
'inject-error': [{
|
|
|
|
'event': 'read_aio',
|
|
|
|
'errno': 5,
|
|
|
|
'state': 2,
|
|
|
|
'immediately': False,
|
|
|
|
'once': True
|
|
|
|
}],
|
|
|
|
}
|
2016-10-07 18:05:04 +03:00
|
|
|
)
|
2015-12-02 02:16:38 +03:00
|
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
|
2015-12-02 02:16:39 +03:00
|
|
|
self.create_anchor_backup(drive0)
|
|
|
|
self.add_bitmap('bitmap0', drive0)
|
2015-12-02 02:16:38 +03:00
|
|
|
# Note: at this point, during a normal execution,
|
|
|
|
# Assume that the VM resumes and begins issuing IO requests here.
|
|
|
|
|
2015-12-02 02:16:39 +03:00
|
|
|
self.hmp_io_writes(drive0['id'], (('0xab', 0, 512),
|
2015-12-02 02:16:38 +03:00
|
|
|
('0xfe', '16M', '256k'),
|
|
|
|
('0x64', '32736k', '64k')))
|
|
|
|
|
|
|
|
result = self.create_incremental(validate=False)
|
|
|
|
self.assertFalse(result)
|
2015-12-02 02:16:39 +03:00
|
|
|
self.hmp_io_writes(drive0['id'], (('0x9a', 0, 512),
|
2015-12-02 02:16:38 +03:00
|
|
|
('0x55', '8M', '352k'),
|
|
|
|
('0x78', '15872k', '1M')))
|
|
|
|
self.create_incremental()
|
|
|
|
self.vm.shutdown()
|
|
|
|
self.check_backups()
|
|
|
|
|
2019-03-12 19:05:48 +03:00
|
|
|
def test_incremental_pause(self):
|
|
|
|
"""
|
|
|
|
Test an incremental backup that errors into a pause and is resumed.
|
|
|
|
"""
|
|
|
|
|
|
|
|
drive0 = self.drives[0]
|
2019-09-20 17:20:50 +03:00
|
|
|
# NB: The blkdebug script here looks for a "flush, read" pattern.
|
|
|
|
# The flush occurs in hmp_io_writes, and the read during the block job.
|
2019-03-12 19:05:48 +03:00
|
|
|
result = self.vm.qmp('blockdev-add',
|
|
|
|
node_name=drive0['id'],
|
|
|
|
driver=drive0['fmt'],
|
|
|
|
file={
|
|
|
|
'driver': 'blkdebug',
|
|
|
|
'image': {
|
|
|
|
'driver': 'file',
|
|
|
|
'filename': drive0['file']
|
|
|
|
},
|
|
|
|
'set-state': [{
|
|
|
|
'event': 'flush_to_disk',
|
|
|
|
'state': 1,
|
|
|
|
'new_state': 2
|
|
|
|
}],
|
|
|
|
'inject-error': [{
|
|
|
|
'event': 'read_aio',
|
|
|
|
'errno': 5,
|
2019-09-20 17:20:50 +03:00
|
|
|
'state': 2,
|
2019-03-12 19:05:48 +03:00
|
|
|
'immediately': False,
|
|
|
|
'once': True
|
|
|
|
}],
|
|
|
|
})
|
|
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
self.create_anchor_backup(drive0)
|
|
|
|
bitmap = self.add_bitmap('bitmap0', drive0)
|
|
|
|
|
|
|
|
# Emulate guest activity
|
|
|
|
self.hmp_io_writes(drive0['id'], (('0xab', 0, 512),
|
|
|
|
('0xfe', '16M', '256k'),
|
|
|
|
('0x64', '32736k', '64k')))
|
|
|
|
|
|
|
|
# Bitmap Status Check
|
2019-09-20 17:20:50 +03:00
|
|
|
self.assertTrue(self.vm.check_bitmap_status(
|
|
|
|
drive0['id'], bitmap.name, {
|
|
|
|
'count': 458752,
|
|
|
|
'granularity': 65536,
|
|
|
|
'status': 'active',
|
|
|
|
'busy': False,
|
|
|
|
'recording': True
|
|
|
|
}))
|
2019-03-12 19:05:48 +03:00
|
|
|
|
|
|
|
# Start backup
|
|
|
|
parent, _ = bitmap.last_target()
|
|
|
|
target = self.prepare_backup(bitmap, parent)
|
|
|
|
res = self.vm.qmp('drive-backup',
|
|
|
|
job_id=bitmap.drive['id'],
|
|
|
|
device=bitmap.drive['id'],
|
|
|
|
sync='incremental',
|
|
|
|
bitmap=bitmap.name,
|
|
|
|
format=bitmap.drive['fmt'],
|
|
|
|
target=target,
|
|
|
|
mode='existing',
|
|
|
|
on_source_error='stop')
|
|
|
|
self.assert_qmp(res, 'return', {})
|
|
|
|
|
|
|
|
# Wait for the error
|
|
|
|
event = self.vm.event_wait(name="BLOCK_JOB_ERROR",
|
|
|
|
match={"data":{"device":bitmap.drive['id']}})
|
|
|
|
self.assert_qmp(event, 'data', {'device': bitmap.drive['id'],
|
|
|
|
'action': 'stop',
|
|
|
|
'operation': 'read'})
|
|
|
|
|
|
|
|
# Bitmap Status Check
|
2019-09-20 17:20:50 +03:00
|
|
|
self.assertTrue(self.vm.check_bitmap_status(
|
|
|
|
drive0['id'], bitmap.name, {
|
|
|
|
'count': 458752,
|
|
|
|
'granularity': 65536,
|
|
|
|
'status': 'frozen',
|
|
|
|
'busy': True,
|
|
|
|
'recording': True
|
|
|
|
}))
|
2019-03-12 19:05:48 +03:00
|
|
|
|
|
|
|
# Resume and check incremental backup for consistency
|
|
|
|
res = self.vm.qmp('block-job-resume', device=bitmap.drive['id'])
|
|
|
|
self.assert_qmp(res, 'return', {})
|
|
|
|
self.wait_qmp_backup(bitmap.drive['id'])
|
|
|
|
|
|
|
|
# Bitmap Status Check
|
2019-09-20 17:20:50 +03:00
|
|
|
self.assertTrue(self.vm.check_bitmap_status(
|
|
|
|
drive0['id'], bitmap.name, {
|
|
|
|
'count': 0,
|
|
|
|
'granularity': 65536,
|
|
|
|
'status': 'active',
|
|
|
|
'busy': False,
|
|
|
|
'recording': True
|
|
|
|
}))
|
2019-03-12 19:05:48 +03:00
|
|
|
|
|
|
|
# Finalize / Cleanup
|
|
|
|
self.make_reference_backup(bitmap)
|
|
|
|
self.vm.shutdown()
|
|
|
|
self.check_backups()
|
|
|
|
|
2015-12-02 02:16:38 +03:00
|
|
|
|
2015-04-18 02:50:05 +03:00
|
|
|
if __name__ == '__main__':
|
2019-09-02 22:33:18 +03:00
|
|
|
iotests.main(supported_fmts=['qcow2'],
|
|
|
|
supported_protocols=['file'])
|