@@ -795,13 +795,13 @@ static void ram_migration_cancel(void *opaque)
migration_end();
}
-static void reset_ram_globals(void)
+static void reset_ram_globals(bool reset_bulk_stage)
{
last_seen_block = NULL;
last_sent_block = NULL;
last_offset = 0;
last_version = ram_list.version;
- ram_bulk_stage = true;
+ ram_bulk_stage = reset_bulk_stage;
}
#define MAX_WAIT 50 /* ms, half buffered_file limit */
@@ -811,6 +811,15 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
RAMBlock *block;
int64_t ram_pages = last_ram_offset() >> TARGET_PAGE_BITS;
+ /*
+ * RAM stays open during micro-checkpointing for the next transaction.
+ */
+ if (migration_is_mc(migrate_get_current())) {
+ qemu_mutex_lock_ramlist();
+ reset_ram_globals(false);
+ goto skip_setup;
+ }
+
migration_bitmap = bitmap_new(ram_pages);
bitmap_set(migration_bitmap, 0, ram_pages);
migration_dirty_pages = ram_pages;
@@ -833,12 +842,14 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
qemu_mutex_lock_iothread();
qemu_mutex_lock_ramlist();
bytes_transferred = 0;
- reset_ram_globals();
+ reset_ram_globals(true);
memory_global_dirty_log_start();
migration_bitmap_sync();
qemu_mutex_unlock_iothread();
+skip_setup:
+
qemu_put_be64(f, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE);
QTAILQ_FOREACH(block, &ram_list.blocks, next) {
@@ -867,7 +878,7 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
qemu_mutex_lock_ramlist();
if (ram_list.version != last_version) {
- reset_ram_globals();
+ reset_ram_globals(true);
}
ram_control_before_iterate(f, RAM_CONTROL_ROUND);
@@ -948,7 +959,15 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
}
ram_control_after_iterate(f, RAM_CONTROL_FINISH);
- migration_end();
+
+ /*
+ * Only cleanup at the end of normal migrations
+ * or if the MC destination failed and we got an error.
+ * Otherwise, we are (or will be soon) in MIG_STATE_MC.
+ */
+ if(!migrate_use_mc() || migration_has_failed(migrate_get_current())) {
+ migration_end();
+ }
qemu_mutex_unlock_ramlist();
qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
@@ -95,6 +95,8 @@ int migrate_fd_close(MigrationState *s);
void add_migration_state_change_notifier(Notifier *notify);
void remove_migration_state_change_notifier(Notifier *notify);
bool migration_in_setup(MigrationState *);
+bool migration_is_active(MigrationState *);
+bool migration_is_mc(MigrationState *s);
bool migration_has_finished(MigrationState *);
bool migration_has_failed(MigrationState *);
MigrationState *migrate_get_current(void);
@@ -126,6 +128,15 @@ void migration_bitmap_worker_start(MigrationState *s);
void migration_bitmap_worker_stop(MigrationState *s);
void migrate_set_state(MigrationState *s, int old_state, int new_state);
+enum {
+ MIG_STATE_ERROR = -1,
+ MIG_STATE_NONE,
+ MIG_STATE_SETUP,
+ MIG_STATE_CANCELLED,
+ MIG_STATE_ACTIVE,
+ MIG_STATE_MC,
+ MIG_STATE_COMPLETED,
+};
void ram_handle_compressed(void *host, uint8_t ch, uint64_t size);
/**
@@ -194,4 +205,12 @@ int ram_control_copy_page(QEMUFile *f,
ram_addr_t block_offset_source,
ram_addr_t offset_source,
long size);
+
+int migrate_use_mc(void);
+int migrate_use_mc_rdma_copy(void);
+
+#define MC_VERSION 1
+
+void qemu_rdma_info_save(QEMUFile *f, void *opaque);
+int qemu_rdma_info_load(QEMUFile *f, void *opaque, int version_id);
#endif
@@ -71,6 +71,7 @@ typedef int (QEMURamHookFunc)(QEMUFile *f, void *opaque, uint64_t flags);
#define RAM_CONTROL_ROUND 1
#define RAM_CONTROL_HOOK 2
#define RAM_CONTROL_FINISH 3
+#define RAM_CONTROL_FLUSH 4
/*
* This function allows override of where the RAM page
@@ -36,15 +36,6 @@
do { } while (0)
#endif
-enum {
- MIG_STATE_ERROR = -1,
- MIG_STATE_NONE,
- MIG_STATE_SETUP,
- MIG_STATE_CANCELLED,
- MIG_STATE_ACTIVE,
- MIG_STATE_COMPLETED,
-};
-
#define MAX_THROTTLE (32 << 20) /* Migration speed throttling */
/* Amount of time to allocate to each "chunk" of bandwidth-throttled
@@ -270,7 +261,7 @@ void qmp_migrate_set_capabilities(MigrationCapabilityStatusList *params,
MigrationState *s = migrate_get_current();
MigrationCapabilityStatusList *cap;
- if (s->state == MIG_STATE_ACTIVE || s->state == MIG_STATE_SETUP) {
+ if (migration_is_active(s)) {
error_set(errp, QERR_MIGRATION_ACTIVE);
return;
}
@@ -282,6 +273,17 @@ void qmp_migrate_set_capabilities(MigrationCapabilityStatusList *params,
/* shared migration helpers */
+bool migration_is_mc(MigrationState *s)
+{
+ return s->state == MIG_STATE_MC;
+}
+
+bool migration_is_active(MigrationState *s)
+{
+ return (s->state == MIG_STATE_ACTIVE) || migration_in_setup(s)
+ || migration_is_mc(s);
+}
+
static void migrate_fd_cleanup(void *opaque)
{
MigrationState *s = opaque;
@@ -299,7 +301,7 @@ static void migrate_fd_cleanup(void *opaque)
s->file = NULL;
}
- assert(s->state != MIG_STATE_ACTIVE);
+ assert(!migration_is_active(s));
if (s->state != MIG_STATE_COMPLETED) {
qemu_savevm_state_cancel();
@@ -308,7 +310,7 @@ static void migrate_fd_cleanup(void *opaque)
notifier_list_notify(&migration_state_notifiers, s);
}
-static void migrate_set_state(MigrationState *s, int old_state, int new_state)
+void migrate_set_state(MigrationState *s, int old_state, int new_state)
{
if (atomic_cmpxchg(&s->state, old_state, new_state) == new_state) {
trace_migrate_set_state(new_state);
@@ -405,7 +407,7 @@ void qmp_migrate(const char *uri, bool has_blk, bool blk,
params.blk = has_blk && blk;
params.shared = has_inc && inc;
- if (s->state == MIG_STATE_ACTIVE || s->state == MIG_STATE_SETUP) {
+ if (migration_is_active(s)) {
error_set(errp, QERR_MIGRATION_ACTIVE);
return;
}
@@ -594,7 +596,10 @@ static void *migration_thread(void *opaque)
}
if (!qemu_file_get_error(s->file)) {
- migrate_set_state(s, MIG_STATE_ACTIVE, MIG_STATE_COMPLETED);
+ if (!migrate_use_mc()) {
+ migrate_set_state(s,
+ MIG_STATE_ACTIVE, MIG_STATE_COMPLETED);
+ }
break;
}
}