@@ -3091,6 +3091,166 @@ static void test_vcpu_dirty_limit(void)
dirtylimit_stop_vm(vm);
}
+static void migrate_dirty_limit_wait_showup(QTestState *from,
+ const int64_t period,
+ const int64_t value)
+{
+ /* Enable dirty limit capability */
+ migrate_set_capability(from, "dirty-limit", true);
+
+ /* Set dirty limit parameters */
+ migrate_set_parameter_int(from, "x-vcpu-dirty-limit-period", period);
+ migrate_set_parameter_int(from, "vcpu-dirty-limit", value);
+
+ /* Make sure migrate can't converge */
+ migrate_ensure_non_converge(from);
+
+ /* To check limit rate after precopy */
+ migrate_set_capability(from, "pause-before-switchover", true);
+
+ /* Wait for the serial output from the source */
+ wait_for_serial("src_serial");
+}
+
+/*
+ * This test does:
+ * source destination
+ * start vm
+ * start incoming vm
+ * migrate
+ * wait dirty limit to begin
+ * cancel migrate
+ * cancellation check
+ * restart incoming vm
+ * migrate
+ * wait dirty limit to begin
+ * wait pre-switchover event
+ * convergence condition check
+ *
+ * And see if dirty limit migration works correctly.
+ * This test case involves many passes, so it runs in slow mode only.
+ */
+static void test_migrate_dirty_limit(void)
+{
+ g_autofree char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs);
+ QTestState *from, *to;
+ int64_t remaining;
+ uint64_t throttle_us_per_full;
+ /*
+ * We want the test to be stable and as fast as possible.
+ * E.g., with 1Gb/s bandwith migration may pass without dirty limit,
+ * so we need to decrease a bandwidth.
+ */
+ const int64_t dirtylimit_period = 1000, dirtylimit_value = 50;
+ const int64_t max_bandwidth = 400000000; /* ~400Mb/s */
+ const int64_t downtime_limit = 250; /* 250ms */
+ /*
+ * We migrate through unix-socket (> 500Mb/s).
+ * Thus, expected migration speed ~= bandwidth limit (< 500Mb/s).
+ * So, we can predict expected_threshold
+ */
+ const int64_t expected_threshold = max_bandwidth * downtime_limit / 1000;
+ int max_try_count = 10;
+ MigrateCommon args = {
+ .start = {
+ .hide_stderr = true,
+ .use_dirty_ring = true,
+ },
+ .listen_uri = uri,
+ .connect_uri = uri,
+ };
+
+ /* Start src, dst vm */
+ if (test_migrate_start(&from, &to, args.listen_uri, &args.start)) {
+ return;
+ }
+
+ /* Prepare for dirty limit migration and wait src vm show up */
+ migrate_dirty_limit_wait_showup(from, dirtylimit_period, dirtylimit_value);
+
+ /* Start migrate */
+ migrate_qmp(from, uri, "{}");
+
+ /* Wait for dirty limit throttle begin */
+ throttle_us_per_full = 0;
+ while (throttle_us_per_full == 0) {
+ throttle_us_per_full =
+ read_migrate_property_int(from, "dirty-limit-throttle-time-per-round");
+ usleep(100);
+ g_assert_false(got_src_stop);
+ }
+
+ /* Now cancel migrate and wait for dirty limit throttle switch off */
+ migrate_cancel(from);
+ wait_for_migration_status(from, "cancelled", NULL);
+
+ /* Check if dirty limit throttle switched off, set timeout 1ms */
+ do {
+ throttle_us_per_full =
+ read_migrate_property_int(from, "dirty-limit-throttle-time-per-round");
+ usleep(100);
+ g_assert_false(got_src_stop);
+ } while (throttle_us_per_full != 0 && --max_try_count);
+
+ /* Assert dirty limit is not in service */
+ g_assert_cmpint(throttle_us_per_full, ==, 0);
+
+ args = (MigrateCommon) {
+ .start = {
+ .only_target = true,
+ .use_dirty_ring = true,
+ },
+ .listen_uri = uri,
+ .connect_uri = uri,
+ };
+
+ /* Restart dst vm, src vm already show up so we needn't wait anymore */
+ if (test_migrate_start(&from, &to, args.listen_uri, &args.start)) {
+ return;
+ }
+
+ /* Start migrate */
+ migrate_qmp(from, uri, "{}");
+
+ /* Wait for dirty limit throttle begin */
+ throttle_us_per_full = 0;
+ while (throttle_us_per_full == 0) {
+ throttle_us_per_full =
+ read_migrate_property_int(from, "dirty-limit-throttle-time-per-round");
+ usleep(100);
+ g_assert_false(got_src_stop);
+ }
+
+ /*
+ * The dirty limit rate should equals the return value of
+ * query-vcpu-dirty-limit if dirty limit cap set
+ */
+ g_assert_cmpint(dirtylimit_value, ==, get_limit_rate(from));
+
+ /* Now, we have tested if dirty limit works, let it converge */
+ migrate_set_parameter_int(from, "downtime-limit", downtime_limit);
+ migrate_set_parameter_int(from, "max-bandwidth", max_bandwidth);
+
+ /*
+ * Wait for pre-switchover status to check if migration
+ * satisfy the convergence condition
+ */
+ wait_for_migration_status(from, "pre-switchover", NULL);
+
+ remaining = read_ram_property_int(from, "remaining");
+ g_assert_cmpint(remaining, <,
+ (expected_threshold + expected_threshold / 100));
+
+ migrate_continue(from, "pre-switchover");
+
+ qtest_qmp_eventwait(to, "RESUME");
+
+ wait_for_serial("dest_serial");
+ wait_for_migration_complete(from);
+
+ test_migrate_end(from, to, true);
+}
+
static bool kvm_dirty_ring_supported(void)
{
#if defined(__linux__) && defined(HOST_X86_64)
@@ -3301,6 +3461,10 @@ int main(int argc, char **argv)
*/
if (g_test_slow()) {
qtest_add_func("/migration/auto_converge", test_migrate_auto_converge);
+ if (g_str_equal(arch, "x86_64") &&
+ has_kvm && kvm_dirty_ring_supported()) {
+ qtest_add_func("/migration/dirty_limit", test_migrate_dirty_limit);
+ }
}
qtest_add_func("/migration/multifd/tcp/plain/none",
test_multifd_tcp_none);