@@ -1362,14 +1362,18 @@ static int find_dirty_block(RAMState *rs, PageSearchStatus *pss)
pss->block = QLIST_NEXT_RCU(pss->block, next);
if (!pss->block) {
if (migrate_multifd() &&
- !migrate_multifd_flush_after_each_section()) {
+ (!migrate_multifd_flush_after_each_section() ||
+ migrate_mapped_ram())) {
QEMUFile *f = rs->pss[RAM_CHANNEL_PRECOPY].pss_channel;
int ret = multifd_send_sync_main();
if (ret < 0) {
return ret;
}
- qemu_put_be64(f, RAM_SAVE_FLAG_MULTIFD_FLUSH);
- qemu_fflush(f);
+
+ if (!migrate_mapped_ram()) {
+ qemu_put_be64(f, RAM_SAVE_FLAG_MULTIFD_FLUSH);
+ qemu_fflush(f);
+ }
}
/*
* If memory migration starts over, we will meet a dirtied page
@@ -3111,7 +3115,8 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
return ret;
}
- if (migrate_multifd() && !migrate_multifd_flush_after_each_section()) {
+ if (migrate_multifd() && !migrate_multifd_flush_after_each_section()
+ && !migrate_mapped_ram()) {
qemu_put_be64(f, RAM_SAVE_FLAG_MULTIFD_FLUSH);
}
@@ -3242,7 +3247,8 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
out:
if (ret >= 0
&& migration_is_setup_or_active(migrate_get_current()->state)) {
- if (migrate_multifd() && migrate_multifd_flush_after_each_section()) {
+ if (migrate_multifd() && migrate_multifd_flush_after_each_section() &&
+ !migrate_mapped_ram()) {
ret = multifd_send_sync_main();
if (ret < 0) {
return ret;
@@ -3334,7 +3340,8 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
}
}
- if (migrate_multifd() && !migrate_multifd_flush_after_each_section()) {
+ if (migrate_multifd() && !migrate_multifd_flush_after_each_section() &&
+ !migrate_mapped_ram()) {
qemu_put_be64(f, RAM_SAVE_FLAG_MULTIFD_FLUSH);
}
qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
@@ -4137,6 +4144,12 @@ static int ram_load_precopy(QEMUFile *f)
invalid_flags |= RAM_SAVE_FLAG_COMPRESS_PAGE;
}
+ if (migrate_mapped_ram()) {
+ invalid_flags |= (RAM_SAVE_FLAG_HOOK | RAM_SAVE_FLAG_MULTIFD_FLUSH |
+ RAM_SAVE_FLAG_PAGE | RAM_SAVE_FLAG_XBZRLE |
+ RAM_SAVE_FLAG_ZERO);
+ }
+
while (!ret && !(flags & RAM_SAVE_FLAG_EOS)) {
ram_addr_t addr;
void *host = NULL, *host_bak = NULL;
@@ -4158,6 +4171,8 @@ static int ram_load_precopy(QEMUFile *f)
addr &= TARGET_PAGE_MASK;
if (flags & invalid_flags) {
+ error_report("Unexpected RAM flags: %d", flags & invalid_flags);
+
if (flags & invalid_flags & RAM_SAVE_FLAG_COMPRESS_PAGE) {
error_report("Received an unexpected compressed page");
}
@@ -4210,6 +4225,10 @@ static int ram_load_precopy(QEMUFile *f)
switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
case RAM_SAVE_FLAG_MEM_SIZE:
ret = parse_ramblocks(f, addr);
+
+ if (migrate_mapped_ram()) {
+ multifd_recv_sync_main();
+ }
break;
case RAM_SAVE_FLAG_ZERO:
@@ -4250,7 +4269,12 @@ static int ram_load_precopy(QEMUFile *f)
case RAM_SAVE_FLAG_EOS:
/* normal exit */
if (migrate_multifd() &&
- migrate_multifd_flush_after_each_section()) {
+ migrate_multifd_flush_after_each_section() &&
+ /*
+ * Mapped-ram migration flushes once and for all after
+ * parsing ramblocks. Always ignore EOS for it.
+ */
+ !migrate_mapped_ram()) {
multifd_recv_sync_main();
}
break;
The mapped-ram migration can be performed live or non-live, but it is always asynchronous, i.e. the source machine and the destination machine are not migrating at the same time. We only need some pieces of the multifd sync operations. multifd_send_sync_main() ------------------------ Issued by the ram migration code on the migration thread, causes the multifd send channels to synchronize with the migration thread and makes the sending side emit a packet with the MULTIFD_FLUSH flag. With mapped-ram we want to maintain the sync on the sending side because that provides ordering between the rounds of dirty pages when migrating live. MULTIFD_FLUSH ------------- On the receiving side, the presence of the MULTIFD_FLUSH flag on a packet causes the receiving channels to start synchronizing with the main thread. We're not using packets with mapped-ram, so there's no MULTIFD_FLUSH flag and therefore no channel sync on the receiving side. multifd_recv_sync_main() ------------------------ Issued by the migration thread when the ram migration flag RAM_SAVE_FLAG_MULTIFD_FLUSH is received, causes the migration thread on the receiving side to start synchronizing with the recv channels. Due to compatibility, this is also issued when RAM_SAVE_FLAG_EOS is received. For mapped-ram we only need to synchronize the channels at the end of migration to avoid doing cleanup before the channels have finished their IO. Make sure the multifd syncs are only issued at the appropriate times. Note that due to pre-existing backward compatibility issues, we have the multifd_flush_after_each_section property that can cause a sync to happen at EOS. Since the EOS flag is needed on the stream, allow mapped-ram to just ignore it. Also emit an error if any other unexpected flags are found on the stream. Signed-off-by: Fabiano Rosas <farosas@suse.de> --- - skipped the flush at ram_save_iterate - added a mapped-ram guard at find_dirty_block - moved EOS skip to the switch statement - added flush after parse_ramblocks --- migration/ram.c | 38 +++++++++++++++++++++++++++++++------- 1 file changed, 31 insertions(+), 7 deletions(-)