2020-01-30 19:32:23 +03:00
|
|
|
#!/usr/bin/env python3
|
2021-01-16 16:44:19 +03:00
|
|
|
# group: rw
|
2018-06-13 21:18:23 +03:00
|
|
|
#
|
|
|
|
# Tests for active mirroring
|
|
|
|
#
|
|
|
|
# Copyright (C) 2018 Red Hat, Inc.
|
|
|
|
#
|
|
|
|
# This program is free software; you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License as published by
|
|
|
|
# the Free Software Foundation; either version 2 of the License, or
|
|
|
|
# (at your option) any later version.
|
|
|
|
#
|
|
|
|
# This program is distributed in the hope that it will be useful,
|
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
# GNU General Public License for more details.
|
|
|
|
#
|
|
|
|
# You should have received a copy of the GNU General Public License
|
|
|
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
#
|
|
|
|
|
2022-11-09 19:54:51 +03:00
|
|
|
import math
|
2018-06-13 21:18:23 +03:00
|
|
|
import os
|
2022-11-09 19:54:51 +03:00
|
|
|
import subprocess
|
2022-11-09 19:54:52 +03:00
|
|
|
import time
|
|
|
|
from typing import List, Optional
|
2018-06-13 21:18:23 +03:00
|
|
|
import iotests
|
|
|
|
from iotests import qemu_img
|
|
|
|
|
|
|
|
source_img = os.path.join(iotests.test_dir, 'source.' + iotests.imgfmt)
|
|
|
|
target_img = os.path.join(iotests.test_dir, 'target.' + iotests.imgfmt)
|
|
|
|
|
|
|
|
class TestActiveMirror(iotests.QMPTestCase):
|
|
|
|
image_len = 128 * 1024 * 1024 # MB
|
|
|
|
potential_writes_in_flight = True
|
|
|
|
|
|
|
|
def setUp(self):
|
|
|
|
qemu_img('create', '-f', iotests.imgfmt, source_img, '128M')
|
|
|
|
qemu_img('create', '-f', iotests.imgfmt, target_img, '128M')
|
|
|
|
|
|
|
|
blk_source = {'id': 'source',
|
|
|
|
'if': 'none',
|
|
|
|
'node-name': 'source-node',
|
|
|
|
'driver': iotests.imgfmt,
|
2021-07-03 00:16:35 +03:00
|
|
|
'file': {'driver': 'blkdebug',
|
|
|
|
'image': {'driver': 'file',
|
|
|
|
'filename': source_img}}}
|
2018-06-13 21:18:23 +03:00
|
|
|
|
|
|
|
blk_target = {'node-name': 'target-node',
|
|
|
|
'driver': iotests.imgfmt,
|
|
|
|
'file': {'driver': 'file',
|
|
|
|
'filename': target_img}}
|
|
|
|
|
|
|
|
self.vm = iotests.VM()
|
|
|
|
self.vm.add_drive_raw(self.vm.qmp_to_opts(blk_source))
|
|
|
|
self.vm.add_blockdev(self.vm.qmp_to_opts(blk_target))
|
2022-11-09 19:54:51 +03:00
|
|
|
self.vm.add_device('virtio-blk,id=vblk,drive=source')
|
2018-06-13 21:18:23 +03:00
|
|
|
self.vm.launch()
|
|
|
|
|
|
|
|
def tearDown(self):
|
|
|
|
self.vm.shutdown()
|
|
|
|
|
|
|
|
if not self.potential_writes_in_flight:
|
|
|
|
self.assertTrue(iotests.compare_images(source_img, target_img),
|
|
|
|
'mirror target does not match source')
|
|
|
|
|
|
|
|
os.remove(source_img)
|
|
|
|
os.remove(target_img)
|
|
|
|
|
|
|
|
def doActiveIO(self, sync_source_and_target):
|
|
|
|
# Fill the source image
|
|
|
|
self.vm.hmp_qemu_io('source',
|
|
|
|
'write -P 1 0 %i' % self.image_len);
|
|
|
|
|
|
|
|
# Start some background requests
|
2018-10-22 16:53:02 +03:00
|
|
|
for offset in range(1 * self.image_len // 8, 3 * self.image_len // 8, 1024 * 1024):
|
2018-06-13 21:18:23 +03:00
|
|
|
self.vm.hmp_qemu_io('source', 'aio_write -P 2 %i 1M' % offset)
|
2018-10-22 16:53:02 +03:00
|
|
|
for offset in range(2 * self.image_len // 8, 3 * self.image_len // 8, 1024 * 1024):
|
2018-06-13 21:18:23 +03:00
|
|
|
self.vm.hmp_qemu_io('source', 'aio_write -z %i 1M' % offset)
|
|
|
|
|
|
|
|
# Start the block job
|
2023-10-06 18:41:25 +03:00
|
|
|
self.vm.cmd('blockdev-mirror',
|
|
|
|
job_id='mirror',
|
|
|
|
filter_node_name='mirror-node',
|
|
|
|
device='source-node',
|
|
|
|
target='target-node',
|
|
|
|
sync='full',
|
|
|
|
copy_mode='write-blocking')
|
2018-06-13 21:18:23 +03:00
|
|
|
|
|
|
|
# Start some more requests
|
2018-10-22 16:53:02 +03:00
|
|
|
for offset in range(3 * self.image_len // 8, 5 * self.image_len // 8, 1024 * 1024):
|
2018-06-13 21:18:23 +03:00
|
|
|
self.vm.hmp_qemu_io('source', 'aio_write -P 3 %i 1M' % offset)
|
2018-10-22 16:53:02 +03:00
|
|
|
for offset in range(4 * self.image_len // 8, 5 * self.image_len // 8, 1024 * 1024):
|
2018-06-13 21:18:23 +03:00
|
|
|
self.vm.hmp_qemu_io('source', 'aio_write -z %i 1M' % offset)
|
|
|
|
|
|
|
|
# Wait for the READY event
|
|
|
|
self.wait_ready(drive='mirror')
|
|
|
|
|
|
|
|
# Now start some final requests; all of these (which land on
|
|
|
|
# the source) should be settled using the active mechanism.
|
|
|
|
# The mirror code itself asserts that the source BDS's dirty
|
|
|
|
# bitmap will stay clean between READY and COMPLETED.
|
2018-10-22 16:53:02 +03:00
|
|
|
for offset in range(5 * self.image_len // 8, 7 * self.image_len // 8, 1024 * 1024):
|
2018-06-13 21:18:23 +03:00
|
|
|
self.vm.hmp_qemu_io('source', 'aio_write -P 3 %i 1M' % offset)
|
2018-10-22 16:53:02 +03:00
|
|
|
for offset in range(6 * self.image_len // 8, 7 * self.image_len // 8, 1024 * 1024):
|
2018-06-13 21:18:23 +03:00
|
|
|
self.vm.hmp_qemu_io('source', 'aio_write -z %i 1M' % offset)
|
|
|
|
|
|
|
|
if sync_source_and_target:
|
|
|
|
# If source and target should be in sync after the mirror,
|
|
|
|
# we have to flush before completion
|
|
|
|
self.vm.hmp_qemu_io('source', 'aio_flush')
|
|
|
|
self.potential_writes_in_flight = False
|
|
|
|
|
|
|
|
self.complete_and_wait(drive='mirror', wait_ready=False)
|
|
|
|
|
|
|
|
def testActiveIO(self):
|
|
|
|
self.doActiveIO(False)
|
|
|
|
|
|
|
|
def testActiveIOFlushed(self):
|
|
|
|
self.doActiveIO(True)
|
|
|
|
|
2019-08-05 14:35:26 +03:00
|
|
|
def testUnalignedActiveIO(self):
|
|
|
|
# Fill the source image
|
|
|
|
result = self.vm.hmp_qemu_io('source', 'write -P 1 0 2M')
|
|
|
|
|
|
|
|
# Start the block job (very slowly)
|
2023-10-06 18:41:25 +03:00
|
|
|
self.vm.cmd('blockdev-mirror',
|
|
|
|
job_id='mirror',
|
|
|
|
filter_node_name='mirror-node',
|
|
|
|
device='source-node',
|
|
|
|
target='target-node',
|
|
|
|
sync='full',
|
|
|
|
copy_mode='write-blocking',
|
|
|
|
buf_size=(1048576 // 4),
|
|
|
|
speed=1)
|
2019-08-05 14:35:26 +03:00
|
|
|
|
|
|
|
# Start an unaligned request to a dirty area
|
|
|
|
result = self.vm.hmp_qemu_io('source', 'write -P 2 %i 1' % (1048576 + 42))
|
|
|
|
|
|
|
|
# Let the job finish
|
2023-10-06 18:41:25 +03:00
|
|
|
self.vm.cmd('block-job-set-speed', device='mirror', speed=0)
|
2019-08-05 14:35:26 +03:00
|
|
|
self.complete_and_wait(drive='mirror')
|
|
|
|
|
|
|
|
self.potential_writes_in_flight = False
|
2018-06-13 21:18:23 +03:00
|
|
|
|
2021-07-03 00:16:35 +03:00
|
|
|
def testIntersectingActiveIO(self):
|
|
|
|
# Fill the source image
|
|
|
|
result = self.vm.hmp_qemu_io('source', 'write -P 1 0 2M')
|
|
|
|
|
|
|
|
# Start the block job (very slowly)
|
2023-10-06 18:41:25 +03:00
|
|
|
self.vm.cmd('blockdev-mirror',
|
|
|
|
job_id='mirror',
|
|
|
|
filter_node_name='mirror-node',
|
|
|
|
device='source-node',
|
|
|
|
target='target-node',
|
|
|
|
sync='full',
|
|
|
|
copy_mode='write-blocking',
|
|
|
|
speed=1)
|
2021-07-03 00:16:35 +03:00
|
|
|
|
|
|
|
self.vm.hmp_qemu_io('source', 'break write_aio A')
|
|
|
|
self.vm.hmp_qemu_io('source', 'aio_write 0 1M') # 1
|
|
|
|
self.vm.hmp_qemu_io('source', 'wait_break A')
|
|
|
|
self.vm.hmp_qemu_io('source', 'aio_write 0 2M') # 2
|
|
|
|
self.vm.hmp_qemu_io('source', 'aio_write 0 2M') # 3
|
|
|
|
|
|
|
|
# Now 2 and 3 are in mirror_wait_on_conflicts, waiting for 1
|
|
|
|
|
|
|
|
self.vm.hmp_qemu_io('source', 'break write_aio B')
|
|
|
|
self.vm.hmp_qemu_io('source', 'aio_write 1M 2M') # 4
|
|
|
|
self.vm.hmp_qemu_io('source', 'wait_break B')
|
|
|
|
|
|
|
|
# 4 doesn't wait for 2 and 3, because they didn't yet set
|
|
|
|
# in_flight_bitmap. So, nothing prevents 4 to go except for our
|
|
|
|
# break-point B.
|
|
|
|
|
|
|
|
self.vm.hmp_qemu_io('source', 'resume A')
|
|
|
|
|
|
|
|
# Now we resumed 1, so 2 and 3 goes to the next iteration of while loop
|
|
|
|
# in mirror_wait_on_conflicts(). They don't exit, as bitmap is dirty
|
2021-07-03 00:16:36 +03:00
|
|
|
# due to request 4.
|
|
|
|
# In the past at that point 2 and 3 would wait for each other producing
|
|
|
|
# a dead-lock. Now this is fixed and they will wait for request 4.
|
2021-07-03 00:16:35 +03:00
|
|
|
|
|
|
|
self.vm.hmp_qemu_io('source', 'resume B')
|
|
|
|
|
2021-07-03 00:16:36 +03:00
|
|
|
# After resuming 4, one of 2 and 3 goes first and set in_flight_bitmap,
|
|
|
|
# so the other will wait for it.
|
2021-07-03 00:16:35 +03:00
|
|
|
|
2023-10-06 18:41:25 +03:00
|
|
|
self.vm.cmd('block-job-set-speed', device='mirror', speed=0)
|
2021-07-03 00:16:35 +03:00
|
|
|
self.complete_and_wait(drive='mirror')
|
|
|
|
|
|
|
|
self.potential_writes_in_flight = False
|
|
|
|
|
2018-06-13 21:18:23 +03:00
|
|
|
|
2022-11-09 19:54:52 +03:00
|
|
|
class TestThrottledWithNbdExportBase(iotests.QMPTestCase):
|
2022-11-09 19:54:51 +03:00
|
|
|
image_len = 128 * 1024 * 1024 # MB
|
2022-11-09 19:54:52 +03:00
|
|
|
iops: Optional[int] = None
|
2022-11-09 19:54:51 +03:00
|
|
|
background_processes: List['subprocess.Popen[str]'] = []
|
|
|
|
|
|
|
|
def setUp(self):
|
2022-11-09 19:54:52 +03:00
|
|
|
# Must be set by subclasses
|
|
|
|
self.assertIsNotNone(self.iops)
|
|
|
|
|
2022-11-09 19:54:51 +03:00
|
|
|
qemu_img('create', '-f', iotests.imgfmt, source_img, '128M')
|
|
|
|
qemu_img('create', '-f', iotests.imgfmt, target_img, '128M')
|
|
|
|
|
|
|
|
self.vm = iotests.VM()
|
|
|
|
self.vm.launch()
|
|
|
|
|
2023-10-06 18:41:25 +03:00
|
|
|
self.vm.cmd('object-add', **{
|
2022-11-09 19:54:51 +03:00
|
|
|
'qom-type': 'throttle-group',
|
|
|
|
'id': 'thrgr',
|
|
|
|
'limits': {
|
|
|
|
'iops-total': self.iops,
|
|
|
|
'iops-total-max': self.iops
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
2023-10-06 18:41:25 +03:00
|
|
|
self.vm.cmd('blockdev-add', **{
|
2022-11-09 19:54:51 +03:00
|
|
|
'node-name': 'source-node',
|
|
|
|
'driver': 'throttle',
|
|
|
|
'throttle-group': 'thrgr',
|
|
|
|
'file': {
|
|
|
|
'driver': iotests.imgfmt,
|
|
|
|
'file': {
|
|
|
|
'driver': 'file',
|
|
|
|
'filename': source_img
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
2023-10-06 18:41:25 +03:00
|
|
|
self.vm.cmd('blockdev-add', **{
|
2022-11-09 19:54:51 +03:00
|
|
|
'node-name': 'target-node',
|
|
|
|
'driver': iotests.imgfmt,
|
|
|
|
'file': {
|
|
|
|
'driver': 'file',
|
|
|
|
'filename': target_img
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
self.nbd_sock = iotests.file_path('nbd.sock',
|
|
|
|
base_dir=iotests.sock_dir)
|
|
|
|
self.nbd_url = f'nbd+unix:///source-node?socket={self.nbd_sock}'
|
|
|
|
|
2023-10-06 18:41:25 +03:00
|
|
|
self.vm.cmd('nbd-server-start', addr={
|
2022-11-09 19:54:51 +03:00
|
|
|
'type': 'unix',
|
|
|
|
'data': {
|
|
|
|
'path': self.nbd_sock
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
2023-10-06 18:41:25 +03:00
|
|
|
self.vm.cmd('block-export-add', id='exp0', type='nbd',
|
|
|
|
node_name='source-node', writable=True)
|
2022-11-09 19:54:51 +03:00
|
|
|
|
|
|
|
def tearDown(self):
|
|
|
|
# Wait for background requests to settle
|
|
|
|
try:
|
|
|
|
while True:
|
|
|
|
p = self.background_processes.pop()
|
|
|
|
while True:
|
|
|
|
try:
|
|
|
|
p.wait(timeout=0.0)
|
|
|
|
break
|
|
|
|
except subprocess.TimeoutExpired:
|
|
|
|
self.vm.qtest(f'clock_step {1 * 1000 * 1000 * 1000}')
|
|
|
|
except IndexError:
|
|
|
|
pass
|
|
|
|
|
|
|
|
# Cancel ongoing block jobs
|
|
|
|
for job in self.vm.qmp('query-jobs')['return']:
|
|
|
|
self.vm.qmp('block-job-cancel', device=job['id'], force=True)
|
|
|
|
|
|
|
|
while True:
|
|
|
|
self.vm.qtest(f'clock_step {1 * 1000 * 1000 * 1000}')
|
|
|
|
if len(self.vm.qmp('query-jobs')['return']) == 0:
|
|
|
|
break
|
|
|
|
|
|
|
|
self.vm.shutdown()
|
|
|
|
os.remove(source_img)
|
|
|
|
os.remove(target_img)
|
|
|
|
|
2022-11-09 19:54:52 +03:00
|
|
|
|
|
|
|
class TestLowThrottledWithNbdExport(TestThrottledWithNbdExportBase):
|
|
|
|
iops = 16
|
|
|
|
|
2022-11-09 19:54:51 +03:00
|
|
|
def testUnderLoad(self):
|
|
|
|
'''
|
|
|
|
Throttle the source node, then issue a whole bunch of external requests
|
|
|
|
while the mirror job (in write-blocking mode) is running. We want to
|
|
|
|
see background requests being issued even while the source is under
|
|
|
|
full load by active writes, so that progress can be made towards READY.
|
|
|
|
'''
|
|
|
|
|
|
|
|
# Fill the first half of the source image; do not fill the second half,
|
|
|
|
# that is where we will have active requests occur. This ensures that
|
|
|
|
# active mirroring itself will not directly contribute to the job's
|
|
|
|
# progress (because when the job was started, those areas were not
|
|
|
|
# intended to be copied, so active mirroring will only lead to not
|
|
|
|
# losing progress, but also not making any).
|
|
|
|
self.vm.hmp_qemu_io('source-node',
|
|
|
|
f'aio_write -P 1 0 {self.image_len // 2}')
|
|
|
|
self.vm.qtest(f'clock_step {1 * 1000 * 1000 * 1000}')
|
|
|
|
|
|
|
|
# Launch the mirror job
|
|
|
|
mirror_buf_size = 65536
|
2023-10-06 18:41:25 +03:00
|
|
|
self.vm.cmd('blockdev-mirror',
|
|
|
|
job_id='mirror',
|
|
|
|
filter_node_name='mirror-node',
|
|
|
|
device='source-node',
|
|
|
|
target='target-node',
|
|
|
|
sync='full',
|
|
|
|
copy_mode='write-blocking',
|
|
|
|
buf_size=mirror_buf_size)
|
2022-11-09 19:54:51 +03:00
|
|
|
|
|
|
|
# We create the external requests via qemu-io processes on the NBD
|
|
|
|
# server. Have their offset start in the middle of the image so they
|
|
|
|
# do not overlap with the background requests (which start from the
|
|
|
|
# beginning).
|
|
|
|
active_request_offset = self.image_len // 2
|
|
|
|
active_request_len = 4096
|
|
|
|
|
|
|
|
# Create enough requests to saturate the node for 5 seconds
|
|
|
|
for _ in range(0, 5 * self.iops):
|
|
|
|
req = f'write -P 42 {active_request_offset} {active_request_len}'
|
|
|
|
active_request_offset += active_request_len
|
|
|
|
p = iotests.qemu_io_popen('-f', 'nbd', self.nbd_url, '-c', req)
|
|
|
|
self.background_processes += [p]
|
|
|
|
|
|
|
|
# Now advance the clock one I/O operation at a time by the 4 seconds
|
|
|
|
# (i.e. one less than 5). We expect the mirror job to issue background
|
|
|
|
# operations here, even though active requests are still in flight.
|
|
|
|
# The active requests will take precedence, however, because they have
|
|
|
|
# been issued earlier than mirror's background requests.
|
|
|
|
# Once the active requests we have started above are done (i.e. after 5
|
|
|
|
# virtual seconds), we expect those background requests to be worked
|
|
|
|
# on. We only advance 4 seconds here to avoid race conditions.
|
|
|
|
for _ in range(0, 4 * self.iops):
|
|
|
|
step = math.ceil(1 * 1000 * 1000 * 1000 / self.iops)
|
|
|
|
self.vm.qtest(f'clock_step {step}')
|
|
|
|
|
|
|
|
# Note how much remains to be done until the mirror job is finished
|
|
|
|
job_status = self.vm.qmp('query-jobs')['return'][0]
|
|
|
|
start_remaining = job_status['total-progress'] - \
|
|
|
|
job_status['current-progress']
|
|
|
|
|
|
|
|
# Create a whole bunch of more active requests
|
|
|
|
for _ in range(0, 10 * self.iops):
|
|
|
|
req = f'write -P 42 {active_request_offset} {active_request_len}'
|
|
|
|
active_request_offset += active_request_len
|
|
|
|
p = iotests.qemu_io_popen('-f', 'nbd', self.nbd_url, '-c', req)
|
|
|
|
self.background_processes += [p]
|
|
|
|
|
|
|
|
# Let the clock advance more. After 1 second, as noted above, we
|
|
|
|
# expect the background requests to be worked on. Give them a couple
|
|
|
|
# of seconds (specifically 4) to see their impact.
|
|
|
|
for _ in range(0, 5 * self.iops):
|
|
|
|
step = math.ceil(1 * 1000 * 1000 * 1000 / self.iops)
|
|
|
|
self.vm.qtest(f'clock_step {step}')
|
|
|
|
|
|
|
|
# Note how much remains to be done now. We expect this number to be
|
|
|
|
# reduced thanks to those background requests.
|
|
|
|
job_status = self.vm.qmp('query-jobs')['return'][0]
|
|
|
|
end_remaining = job_status['total-progress'] - \
|
|
|
|
job_status['current-progress']
|
|
|
|
|
|
|
|
# See that indeed progress was being made on the job, even while the
|
|
|
|
# node was saturated with active requests
|
|
|
|
self.assertGreater(start_remaining - end_remaining, 0)
|
|
|
|
|
|
|
|
|
2022-11-09 19:54:52 +03:00
|
|
|
class TestHighThrottledWithNbdExport(TestThrottledWithNbdExportBase):
|
|
|
|
iops = 1024
|
|
|
|
|
|
|
|
def testActiveOnCreation(self):
|
|
|
|
'''
|
|
|
|
Issue requests on the mirror source node right as the mirror is
|
|
|
|
instated. It's possible that requests occur before the actual job is
|
|
|
|
created, but after the node has been put into the graph. Write
|
|
|
|
requests across the node must in that case be forwarded to the source
|
|
|
|
node without attempting to mirror them (there is no job object yet, so
|
|
|
|
attempting to access it would cause a segfault).
|
|
|
|
We do this with a lightly throttled node (i.e. quite high IOPS limit).
|
|
|
|
Using throttling seems to increase reproductivity, but if the limit is
|
|
|
|
too low, all requests allowed per second will be submitted before
|
|
|
|
mirror_start_job() gets to the problematic point.
|
|
|
|
'''
|
|
|
|
|
|
|
|
# Let qemu-img bench create write requests (enough for two seconds on
|
|
|
|
# the virtual clock)
|
|
|
|
bench_args = ['bench', '-w', '-d', '1024', '-f', 'nbd',
|
|
|
|
'-c', str(self.iops * 2), self.nbd_url]
|
|
|
|
p = iotests.qemu_tool_popen(iotests.qemu_img_args + bench_args)
|
|
|
|
self.background_processes += [p]
|
|
|
|
|
|
|
|
# Give qemu-img bench time to start up and issue requests
|
|
|
|
time.sleep(1.0)
|
|
|
|
# Flush the request queue, so new requests can come in right as we
|
|
|
|
# start blockdev-mirror
|
|
|
|
self.vm.qtest(f'clock_step {1 * 1000 * 1000 * 1000}')
|
|
|
|
|
2023-10-06 18:41:25 +03:00
|
|
|
self.vm.cmd('blockdev-mirror',
|
|
|
|
job_id='mirror',
|
|
|
|
device='source-node',
|
|
|
|
target='target-node',
|
|
|
|
sync='full',
|
|
|
|
copy_mode='write-blocking')
|
2022-11-09 19:54:52 +03:00
|
|
|
|
|
|
|
|
2018-06-13 21:18:23 +03:00
|
|
|
if __name__ == '__main__':
|
2019-09-02 22:33:18 +03:00
|
|
|
iotests.main(supported_fmts=['qcow2', 'raw'],
|
|
|
|
supported_protocols=['file'])
|