@@ -1043,7 +1043,8 @@ static void *migration_thread(void *opaque)
if (!qemu_file_rate_limit(s->file)) {
pending_size = qemu_savevm_state_pending(s->file, max_size);
trace_migrate_pending(pending_size, max_size);
- if (pending_size && pending_size >= max_size) {
+ if ((pending_size && pending_size >= max_size)
+ || (migrate_is_test())) {
qemu_savevm_state_iterate(s->file);
} else {
trace_migration_thread_low_pending(pending_size);
@@ -1120,6 +1120,25 @@ static void ram_migration_cancel(void *opaque)
migration_end();
}
+static uint64_t ram_migration_bitmap_reset(void)
+{
+ uint64_t dirty_pages_remaining;
+ int64_t ram_bitmap_pages; /* Size of bitmap in pages, including gaps */
+ /* TODO think about more locks?
+ * For now only using for prediction so the only another writer
+ * is migration_bitmap_sync_range()
+ */
+ qemu_mutex_lock(&migration_bitmap_mutex);
+ rcu_read_lock();
+ ram_bitmap_pages = last_ram_offset() >> TARGET_PAGE_BITS;
+ dirty_pages_remaining = migration_dirty_pages;
+ bitmap_zero(migration_bitmap, ram_bitmap_pages);
+ migration_dirty_pages = 0;
+ rcu_read_unlock();
+ qemu_mutex_unlock(&migration_bitmap_mutex);
+ return dirty_pages_remaining;
+}
+
static void reset_ram_globals(void)
{
last_seen_block = NULL;
@@ -1249,6 +1268,10 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
int64_t t0;
int pages_sent = 0;
+ if (migrate_is_test()) {
+ return ram_migration_bitmap_reset();
+ }
+
rcu_read_lock();
if (ram_list.version != last_version) {
reset_ram_globals();
@@ -1346,13 +1369,14 @@ static uint64_t ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size)
remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE;
- if (remaining_size < max_size) {
+ if ((remaining_size < max_size) || (migrate_is_test())) {
qemu_mutex_lock_iothread();
rcu_read_lock();
migration_bitmap_sync();
rcu_read_unlock();
qemu_mutex_unlock_iothread();
remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE;
+ ram_control_sync_hook(f, RAM_CONTROL_HOOK, &remaining_size);
}
return remaining_size;
}