Message ID | d40c9bcf9754f7da7203fbb7eaa117dd2ddb42c8.1431523498.git.berto@igalia.com |
---|---|
State | New |
Headers | show |
On Wed, 05/13 16:27, Alberto Garcia wrote: > This test case checks that it's possible to launch several stream > operations in parallel in the same snapshot chain, each one involving > a different set of nodes. > > Signed-off-by: Alberto Garcia <berto@igalia.com> > Reviewed-by: Max Reitz <mreitz@redhat.com> Reviewed-by: Fam Zheng <famz@redhat.com> > --- > tests/qemu-iotests/030 | 80 ++++++++++++++++++++++++++++++++++++++++++++++ > tests/qemu-iotests/030.out | 4 +-- > 2 files changed, 82 insertions(+), 2 deletions(-) > > diff --git a/tests/qemu-iotests/030 b/tests/qemu-iotests/030 > index 0927457..c199cef 100755 > --- a/tests/qemu-iotests/030 > +++ b/tests/qemu-iotests/030 > @@ -145,6 +145,86 @@ class TestSingleDrive(iotests.QMPTestCase): > self.assert_qmp(result, 'error/class', 'GenericError') > > > +class TestParallelOps(iotests.QMPTestCase): > + image_len = 2 * 1024 * 1024 # MB > + num_ops = 4 # Number of parallel block-stream operations > + num_imgs = num_ops * 2 + 1 > + imgs = [] > + > + def setUp(self): > + opts = [] > + self.imgs = [] > + > + # Initialize file names and command-line options > + for i in range(self.num_imgs): > + img_depth = self.num_imgs - i - 1 > + opts.append("backing." * img_depth + "node-name=node%d" % i) > + self.imgs.append(os.path.join(iotests.test_dir, 'img-%d.img' % i)) > + > + # Create all images > + iotests.create_image(self.imgs[0], self.image_len) > + for i in range(1, self.num_imgs): > + qemu_img('create', '-f', iotests.imgfmt, > + '-o', 'backing_file=%s' % self.imgs[i-1], self.imgs[i]) > + > + # Put data into the images we are copying data from > + for i in range(1, self.num_imgs, 2): > + qemu_io('-f', iotests.imgfmt, > + '-c', 'write -P %d %d 128K' % (i, i*128*1024), self.imgs[i]) > + > + # Attach the drive to the VM > + self.vm = iotests.VM() > + self.vm.add_drive("blkdebug::" + self.imgs[-1], ','.join(opts)) > + self.vm.launch() > + > + def tearDown(self): > + self.vm.shutdown() > + for img in self.imgs: > + os.remove(img) > + > + # Test that it's possible to run several block-stream operations > + # in parallel in the same snapshot chain > + def test_stream_parallel(self): > + self.assert_no_active_block_jobs() > + > + # Check that the maps don't match before the streaming operations > + for i in range(2, self.num_imgs, 2): > + self.assertNotEqual(qemu_io('-f', iotests.imgfmt, '-c', 'map', self.imgs[i]), > + qemu_io('-f', iotests.imgfmt, '-c', 'map', self.imgs[i-1]), > + 'image file map matches backing file before streaming') > + > + # Create all streaming jobs > + pending_jobs = [] > + for i in range(2, self.num_imgs, 2): > + node_name = 'node%d' % i > + pending_jobs.append(node_name) > + result = self.vm.qmp('block-stream', device=node_name, base=self.imgs[i-2], speed=32768) > + self.assert_qmp(result, 'return', {}) > + > + # The block job on the active image is always referenced by > + # its device name. Therefore we have to replace the node name > + # with the device name in the list of pending jobs > + pending_jobs.pop() > + pending_jobs.append("drive0") > + > + # Wait for all jobs to be finished. > + while len(pending_jobs) > 0: > + for event in self.vm.get_qmp_events(wait=True): > + if event['event'] == 'BLOCK_JOB_COMPLETED': > + node_name = self.dictpath(event, 'data/device') > + self.assertTrue(node_name in pending_jobs) > + self.assert_qmp_absent(event, 'data/error') > + pending_jobs.remove(node_name) > + > + self.assert_no_active_block_jobs() > + self.vm.shutdown() > + > + # Check that all maps match now > + for i in range(2, self.num_imgs, 2): > + self.assertEqual(qemu_io('-f', iotests.imgfmt, '-c', 'map', self.imgs[i]), > + qemu_io('-f', iotests.imgfmt, '-c', 'map', self.imgs[i-1]), > + 'image file map does not match backing file after streaming') > + > class TestSmallerBackingFile(iotests.QMPTestCase): > backing_len = 1 * 1024 * 1024 # MB > image_len = 2 * backing_len > diff --git a/tests/qemu-iotests/030.out b/tests/qemu-iotests/030.out > index 96961ed..b6f2576 100644 > --- a/tests/qemu-iotests/030.out > +++ b/tests/qemu-iotests/030.out > @@ -1,5 +1,5 @@ > -............... > +................ > ---------------------------------------------------------------------- > -Ran 15 tests > +Ran 16 tests > > OK > -- > 2.1.4 > >
diff --git a/tests/qemu-iotests/030 b/tests/qemu-iotests/030 index 0927457..c199cef 100755 --- a/tests/qemu-iotests/030 +++ b/tests/qemu-iotests/030 @@ -145,6 +145,86 @@ class TestSingleDrive(iotests.QMPTestCase): self.assert_qmp(result, 'error/class', 'GenericError') +class TestParallelOps(iotests.QMPTestCase): + image_len = 2 * 1024 * 1024 # MB + num_ops = 4 # Number of parallel block-stream operations + num_imgs = num_ops * 2 + 1 + imgs = [] + + def setUp(self): + opts = [] + self.imgs = [] + + # Initialize file names and command-line options + for i in range(self.num_imgs): + img_depth = self.num_imgs - i - 1 + opts.append("backing." * img_depth + "node-name=node%d" % i) + self.imgs.append(os.path.join(iotests.test_dir, 'img-%d.img' % i)) + + # Create all images + iotests.create_image(self.imgs[0], self.image_len) + for i in range(1, self.num_imgs): + qemu_img('create', '-f', iotests.imgfmt, + '-o', 'backing_file=%s' % self.imgs[i-1], self.imgs[i]) + + # Put data into the images we are copying data from + for i in range(1, self.num_imgs, 2): + qemu_io('-f', iotests.imgfmt, + '-c', 'write -P %d %d 128K' % (i, i*128*1024), self.imgs[i]) + + # Attach the drive to the VM + self.vm = iotests.VM() + self.vm.add_drive("blkdebug::" + self.imgs[-1], ','.join(opts)) + self.vm.launch() + + def tearDown(self): + self.vm.shutdown() + for img in self.imgs: + os.remove(img) + + # Test that it's possible to run several block-stream operations + # in parallel in the same snapshot chain + def test_stream_parallel(self): + self.assert_no_active_block_jobs() + + # Check that the maps don't match before the streaming operations + for i in range(2, self.num_imgs, 2): + self.assertNotEqual(qemu_io('-f', iotests.imgfmt, '-c', 'map', self.imgs[i]), + qemu_io('-f', iotests.imgfmt, '-c', 'map', self.imgs[i-1]), + 'image file map matches backing file before streaming') + + # Create all streaming jobs + pending_jobs = [] + for i in range(2, self.num_imgs, 2): + node_name = 'node%d' % i + pending_jobs.append(node_name) + result = self.vm.qmp('block-stream', device=node_name, base=self.imgs[i-2], speed=32768) + self.assert_qmp(result, 'return', {}) + + # The block job on the active image is always referenced by + # its device name. Therefore we have to replace the node name + # with the device name in the list of pending jobs + pending_jobs.pop() + pending_jobs.append("drive0") + + # Wait for all jobs to be finished. + while len(pending_jobs) > 0: + for event in self.vm.get_qmp_events(wait=True): + if event['event'] == 'BLOCK_JOB_COMPLETED': + node_name = self.dictpath(event, 'data/device') + self.assertTrue(node_name in pending_jobs) + self.assert_qmp_absent(event, 'data/error') + pending_jobs.remove(node_name) + + self.assert_no_active_block_jobs() + self.vm.shutdown() + + # Check that all maps match now + for i in range(2, self.num_imgs, 2): + self.assertEqual(qemu_io('-f', iotests.imgfmt, '-c', 'map', self.imgs[i]), + qemu_io('-f', iotests.imgfmt, '-c', 'map', self.imgs[i-1]), + 'image file map does not match backing file after streaming') + class TestSmallerBackingFile(iotests.QMPTestCase): backing_len = 1 * 1024 * 1024 # MB image_len = 2 * backing_len diff --git a/tests/qemu-iotests/030.out b/tests/qemu-iotests/030.out index 96961ed..b6f2576 100644 --- a/tests/qemu-iotests/030.out +++ b/tests/qemu-iotests/030.out @@ -1,5 +1,5 @@ -............... +................ ---------------------------------------------------------------------- -Ran 15 tests +Ran 16 tests OK