@@ -19,7 +19,10 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
+import math
import os
+import subprocess
+from typing import List
import iotests
from iotests import qemu_img
@@ -50,7 +53,7 @@ class TestActiveMirror(iotests.QMPTestCase):
self.vm = iotests.VM()
self.vm.add_drive_raw(self.vm.qmp_to_opts(blk_source))
self.vm.add_blockdev(self.vm.qmp_to_opts(blk_target))
- self.vm.add_device('virtio-blk,drive=source')
+ self.vm.add_device('virtio-blk,id=vblk,drive=source')
self.vm.launch()
def tearDown(self):
@@ -192,6 +195,181 @@ class TestActiveMirror(iotests.QMPTestCase):
self.potential_writes_in_flight = False
+class TestThrottledWithNbdExport(iotests.QMPTestCase):
+ image_len = 128 * 1024 * 1024 # MB
+ iops = 16
+ background_processes: List['subprocess.Popen[str]'] = []
+
+ def setUp(self):
+ qemu_img('create', '-f', iotests.imgfmt, source_img, '128M')
+ qemu_img('create', '-f', iotests.imgfmt, target_img, '128M')
+
+ self.vm = iotests.VM()
+ self.vm.launch()
+
+ result = self.vm.qmp('object-add', **{
+ 'qom-type': 'throttle-group',
+ 'id': 'thrgr',
+ 'limits': {
+ 'iops-total': self.iops,
+ 'iops-total-max': self.iops
+ }
+ })
+ self.assert_qmp(result, 'return', {})
+
+ result = self.vm.qmp('blockdev-add', **{
+ 'node-name': 'source-node',
+ 'driver': 'throttle',
+ 'throttle-group': 'thrgr',
+ 'file': {
+ 'driver': iotests.imgfmt,
+ 'file': {
+ 'driver': 'file',
+ 'filename': source_img
+ }
+ }
+ })
+ self.assert_qmp(result, 'return', {})
+
+ result = self.vm.qmp('blockdev-add', **{
+ 'node-name': 'target-node',
+ 'driver': iotests.imgfmt,
+ 'file': {
+ 'driver': 'file',
+ 'filename': target_img
+ }
+ })
+ self.assert_qmp(result, 'return', {})
+
+ self.nbd_sock = iotests.file_path('nbd.sock',
+ base_dir=iotests.sock_dir)
+ self.nbd_url = f'nbd+unix:///source-node?socket={self.nbd_sock}'
+
+ result = self.vm.qmp('nbd-server-start', addr={
+ 'type': 'unix',
+ 'data': {
+ 'path': self.nbd_sock
+ }
+ })
+ self.assert_qmp(result, 'return', {})
+
+ result = self.vm.qmp('block-export-add', id='exp0', type='nbd',
+ node_name='source-node', writable=True)
+ self.assert_qmp(result, 'return', {})
+
+ def tearDown(self):
+ # Wait for background requests to settle
+ try:
+ while True:
+ p = self.background_processes.pop()
+ while True:
+ try:
+ p.wait(timeout=0.0)
+ break
+ except subprocess.TimeoutExpired:
+ self.vm.qtest(f'clock_step {1 * 1000 * 1000 * 1000}')
+ except IndexError:
+ pass
+
+ # Cancel ongoing block jobs
+ for job in self.vm.qmp('query-jobs')['return']:
+ self.vm.qmp('block-job-cancel', device=job['id'], force=True)
+
+ while True:
+ self.vm.qtest(f'clock_step {1 * 1000 * 1000 * 1000}')
+ if len(self.vm.qmp('query-jobs')['return']) == 0:
+ break
+
+ self.vm.shutdown()
+ os.remove(source_img)
+ os.remove(target_img)
+
+ def testUnderLoad(self):
+ '''
+ Throttle the source node, then issue a whole bunch of external requests
+ while the mirror job (in write-blocking mode) is running. We want to
+ see background requests being issued even while the source is under
+ full load by active writes, so that progress can be made towards READY.
+ '''
+
+ # Fill the first half of the source image; do not fill the second half,
+ # that is where we will have active requests occur. This ensures that
+ # active mirroring itself will not directly contribute to the job's
+ # progress (because when the job was started, those areas were not
+ # intended to be copied, so active mirroring will only lead to not
+ # losing progress, but also not making any).
+ self.vm.hmp_qemu_io('source-node',
+ f'aio_write -P 1 0 {self.image_len // 2}')
+ self.vm.qtest(f'clock_step {1 * 1000 * 1000 * 1000}')
+
+ # Launch the mirror job
+ mirror_buf_size = 65536
+ result = self.vm.qmp('blockdev-mirror',
+ job_id='mirror',
+ filter_node_name='mirror-node',
+ device='source-node',
+ target='target-node',
+ sync='full',
+ copy_mode='write-blocking',
+ buf_size=mirror_buf_size)
+ self.assert_qmp(result, 'return', {})
+
+ # We create the external requests via qemu-io processes on the NBD
+ # server. Have their offset start in the middle of the image so they
+ # do not overlap with the background requests (which start from the
+ # beginning).
+ active_request_offset = self.image_len // 2
+ active_request_len = 4096
+
+ # Create enough requests to saturate the node for 5 seconds
+ for _ in range(0, 5 * self.iops):
+ req = f'write -P 42 {active_request_offset} {active_request_len}'
+ active_request_offset += active_request_len
+ p = iotests.qemu_io_popen('-f', 'nbd', self.nbd_url, '-c', req)
+ self.background_processes += [p]
+
+ # Now advance the clock one I/O operation at a time by the 4 seconds
+ # (i.e. one less than 5). We expect the mirror job to issue background
+ # operations here, even though active requests are still in flight.
+ # The active requests will take precedence, however, because they have
+ # been issued earlier than mirror's background requests.
+ # Once the active requests we have started above are done (i.e. after 5
+ # virtual seconds), we expect those background requests to be worked
+ # on. We only advance 4 seconds here to avoid race conditions.
+ for _ in range(0, 4 * self.iops):
+ step = math.ceil(1 * 1000 * 1000 * 1000 / self.iops)
+ self.vm.qtest(f'clock_step {step}')
+
+ # Note how much remains to be done until the mirror job is finished
+ job_status = self.vm.qmp('query-jobs')['return'][0]
+ start_remaining = job_status['total-progress'] - \
+ job_status['current-progress']
+
+ # Create a whole bunch of more active requests
+ for _ in range(0, 10 * self.iops):
+ req = f'write -P 42 {active_request_offset} {active_request_len}'
+ active_request_offset += active_request_len
+ p = iotests.qemu_io_popen('-f', 'nbd', self.nbd_url, '-c', req)
+ self.background_processes += [p]
+
+ # Let the clock advance more. After 1 second, as noted above, we
+ # expect the background requests to be worked on. Give them a couple
+ # of seconds (specifically 4) to see their impact.
+ for _ in range(0, 5 * self.iops):
+ step = math.ceil(1 * 1000 * 1000 * 1000 / self.iops)
+ self.vm.qtest(f'clock_step {step}')
+
+ # Note how much remains to be done now. We expect this number to be
+ # reduced thanks to those background requests.
+ job_status = self.vm.qmp('query-jobs')['return'][0]
+ end_remaining = job_status['total-progress'] - \
+ job_status['current-progress']
+
+ # See that indeed progress was being made on the job, even while the
+ # node was saturated with active requests
+ self.assertGreater(start_remaining - end_remaining, 0)
+
+
if __name__ == '__main__':
iotests.main(supported_fmts=['qcow2', 'raw'],
supported_protocols=['file'])
@@ -1,5 +1,5 @@
-....
+.....
----------------------------------------------------------------------
-Ran 4 tests
+Ran 5 tests
OK
Before this series, a mirror job in write-blocking mode would pause issuing background requests while active requests are in flight. Thus, if the source is constantly in use by active requests, no actual progress can be made. This series should have fixed that, making the mirror job issue background requests even while active requests are in flight. Have a new test case in 151 verify this. Signed-off-by: Hanna Reitz <hreitz@redhat.com> --- tests/qemu-iotests/151 | 180 ++++++++++++++++++++++++++++++++++++- tests/qemu-iotests/151.out | 4 +- 2 files changed, 181 insertions(+), 3 deletions(-)