@@ -115,6 +115,7 @@ const uint32_t arch_type = QEMU_ARCH;
#define RAM_SAVE_FLAG_EOS 0x10
#define RAM_SAVE_FLAG_CONTINUE 0x20
#define RAM_SAVE_FLAG_XBZRLE 0x40
+/* 0x80 is reserved in migration.h start with 0x100 next */
static struct defconfig_file {
@@ -454,8 +455,15 @@ static int ram_save_block(QEMUFile *f, bool last_stage)
p = memory_region_get_ram_ptr(mr) + offset;
/* In doubt sent page as normal */
- bytes_sent = -1;
- if (is_zero_page(p)) {
+ bytes_sent = ram_control_save_page(f, block->offset,
+ offset, TARGET_PAGE_SIZE);
+ if (bytes_sent != -1) {
+ if (bytes_sent > 0) {
+ acct_info.norm_pages++;
+ } else if(bytes_sent == 0) {
+ acct_info.dup_pages++;
+ }
+ } else if (is_zero_page(p)) {
acct_info.dup_pages++;
if (!ram_bulk_stage) {
bytes_sent = save_block_hdr(f, block, offset, cont,
@@ -598,6 +606,15 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
}
qemu_mutex_unlock_ramlist();
+
+ /*
+ * Please leave in place. These calls generate reserved messages in
+ * the RDMA protocol in order to pre-register RDMA memory in the
+ * future to before the bulk round begins.
+ */
+ ram_control_before_iterate(f, RAM_CONTROL_SETUP);
+ ram_control_after_iterate(f, RAM_CONTROL_SETUP);
+
qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
return 0;
@@ -616,6 +633,8 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
reset_ram_globals();
}
+ ram_control_before_iterate(f, RAM_CONTROL_ROUND);
+
t0 = qemu_get_clock_ns(rt_clock);
i = 0;
while ((ret = qemu_file_rate_limit(f)) == 0) {
@@ -646,6 +665,12 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
qemu_mutex_unlock_ramlist();
+ /*
+ * Must occur before EOS (or any QEMUFile operation)
+ * because of RDMA protocol.
+ */
+ ram_control_after_iterate(f, RAM_CONTROL_ROUND);
+
if (ret < 0) {
bytes_transferred += total_sent;
return ret;
@@ -663,6 +688,8 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
qemu_mutex_lock_ramlist();
migration_bitmap_sync();
+ ram_control_before_iterate(f, RAM_CONTROL_FINISH);
+
/* try transferring iterative blocks of memory */
/* flush all remaining blocks regardless of rate limiting */
@@ -676,6 +703,8 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
}
bytes_transferred += bytes_sent;
}
+
+ ram_control_after_iterate(f, RAM_CONTROL_FINISH);
migration_end();
qemu_mutex_unlock_ramlist();
@@ -770,6 +799,21 @@ static inline void *host_from_stream_offset(QEMUFile *f,
return NULL;
}
+/*
+ * If a page (or a whole RDMA chunk) has been
+ * determined to be zero, then zap it.
+ */
+void ram_handle_compressed(void *host, uint8_t ch, uint64_t size)
+{
+ memset(host, ch, TARGET_PAGE_SIZE);
+#ifndef _WIN32
+ if (ch == 0 && (!kvm_enabled() || kvm_has_sync_mmu()) &&
+ getpagesize() <= TARGET_PAGE_SIZE) {
+ qemu_madvise(host, size, QEMU_MADV_DONTNEED);
+ }
+#endif
+}
+
static int ram_load(QEMUFile *f, void *opaque, int version_id)
{
ram_addr_t addr;
@@ -837,14 +881,7 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id)
}
ch = qemu_get_byte(f);
- memset(host, ch, TARGET_PAGE_SIZE);
-#ifndef _WIN32
- if (ch == 0 &&
- (!kvm_enabled() || kvm_has_sync_mmu()) &&
- getpagesize() <= TARGET_PAGE_SIZE) {
- qemu_madvise(host, TARGET_PAGE_SIZE, QEMU_MADV_DONTNEED);
- }
-#endif
+ ram_handle_compressed(host, ch, TARGET_PAGE_SIZE);
} else if (flags & RAM_SAVE_FLAG_PAGE) {
void *host;
@@ -864,6 +901,8 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id)
ret = -EINVAL;
goto done;
}
+ } else if (flags & RAM_SAVE_FLAG_HOOK) {
+ ram_control_load_hook(f, flags);
}
error = qemu_file_get_error(f);
if (error) {