@@ -67,6 +67,19 @@ static inline void clear_bit(long nr, unsigned long *addr)
*p &= ~mask;
}
+/**
+ * clear_bit_atomic - Clears a bit in memory atomically
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ */
+static inline void clear_bit_atomic(long nr, unsigned long *addr)
+{
+ unsigned long mask = BIT_MASK(nr);
+ unsigned long *p = addr + BIT_WORD(nr);
+
+ return qatomic_and(p, ~mask);
+}
+
/**
* change_bit - Toggle a bit in memory
* @nr: Bit to change
@@ -150,3 +150,57 @@ void file_start_incoming_migration(FileMigrationArgs *file_args, Error **errp)
}
} while (++i < channels);
}
+
+int file_write_ramblock_iov(QIOChannel *ioc, const struct iovec *iov,
+ int niov, RAMBlock *block, Error **errp)
+{
+ ssize_t ret = -1;
+ int i, slice_idx, slice_num;
+ uintptr_t base, next, offset;
+ size_t len;
+
+ slice_idx = 0;
+ slice_num = 1;
+
+ /*
+ * If the iov array doesn't have contiguous elements, we need to
+ * split it in slices because we only have one file offset for the
+ * whole iov. Do this here so callers don't need to break the iov
+ * array themselves.
+ */
+ for (i = 0; i < niov; i++, slice_num++) {
+ base = (uintptr_t) iov[i].iov_base;
+
+ if (i != niov - 1) {
+ len = iov[i].iov_len;
+ next = (uintptr_t) iov[i + 1].iov_base;
+
+ if (base + len == next) {
+ continue;
+ }
+ }
+
+ /*
+ * Use the offset of the first element of the segment that
+ * we're sending.
+ */
+ offset = (uintptr_t) iov[slice_idx].iov_base - (uintptr_t) block->host;
+ if (offset >= block->used_length) {
+ error_setg(errp, "offset " RAM_ADDR_FMT
+ "outside of ramblock %s range", offset, block->idstr);
+ ret = -1;
+ break;
+ }
+
+ ret = qio_channel_pwritev(ioc, &iov[slice_idx], slice_num,
+ block->pages_offset + offset, errp);
+ if (ret < 0) {
+ break;
+ }
+
+ slice_idx += slice_num;
+ slice_num = 0;
+ }
+
+ return (ret < 0) ? ret : 0;
+}
@@ -19,4 +19,6 @@ void file_start_outgoing_migration(MigrationState *s,
int file_parse_offset(char *filespec, uint64_t *offsetp, Error **errp);
void file_cleanup_outgoing_migration(void);
bool file_send_channel_create(gpointer opaque, Error **errp);
+int file_write_ramblock_iov(QIOChannel *ioc, const struct iovec *iov,
+ int niov, RAMBlock *block, Error **errp);
#endif
@@ -139,12 +139,14 @@ static bool transport_supports_multi_channels(MigrationAddress *addr)
if (addr->transport == MIGRATION_ADDRESS_TYPE_SOCKET) {
SocketAddress *saddr = &addr->u.socket;
- return saddr->type == SOCKET_ADDRESS_TYPE_INET ||
- saddr->type == SOCKET_ADDRESS_TYPE_UNIX ||
- saddr->type == SOCKET_ADDRESS_TYPE_VSOCK;
+ return (saddr->type == SOCKET_ADDRESS_TYPE_INET ||
+ saddr->type == SOCKET_ADDRESS_TYPE_UNIX ||
+ saddr->type == SOCKET_ADDRESS_TYPE_VSOCK);
+ } else if (addr->transport == MIGRATION_ADDRESS_TYPE_FILE) {
+ return migrate_mapped_ram();
+ } else {
+ return false;
}
-
- return false;
}
static bool migration_needs_seekable_channel(void)
@@ -1988,6 +1990,11 @@ static bool migrate_prepare(MigrationState *s, bool blk, bool blk_inc,
error_setg(errp, "Cannot use TLS with mapped-ram");
return false;
}
+
+ if (migrate_multifd_compression()) {
+ error_setg(errp, "Cannot use compression with mapped-ram");
+ return false;
+ }
}
if (migrate_mode_is_cpr(s)) {
@@ -108,6 +108,17 @@ void multifd_send_channel_created(void)
qemu_sem_post(&multifd_send_state->channels_created);
}
+static void multifd_set_file_bitmap(MultiFDSendParams *p)
+{
+ MultiFDPages_t *pages = p->pages;
+
+ assert(pages->block);
+
+ for (int i = 0; i < p->pages->num; i++) {
+ ramblock_set_file_bmap_atomic(pages->block, pages->offset[i]);
+ }
+}
+
/* Multifd without compression */
/**
@@ -169,6 +180,8 @@ static int nocomp_send_prepare(MultiFDSendParams *p, Error **errp)
if (!multifd_use_packets()) {
multifd_send_prepare_iovs(p);
+ multifd_set_file_bitmap(p);
+
return 0;
}
@@ -867,8 +880,15 @@ static void *multifd_send_thread(void *opaque)
break;
}
- ret = qio_channel_writev_full_all(p->c, p->iov, p->iovs_num, NULL,
- 0, p->write_flags, &local_err);
+ if (migrate_mapped_ram()) {
+ ret = file_write_ramblock_iov(p->c, p->iov, p->iovs_num,
+ p->pages->block, &local_err);
+ } else {
+ ret = qio_channel_writev_full_all(p->c, p->iov, p->iovs_num,
+ NULL, 0, p->write_flags,
+ &local_err);
+ }
+
if (ret != 0) {
break;
}
@@ -654,12 +654,6 @@ bool migrate_caps_check(bool *old_caps, bool *new_caps, Error **errp)
}
if (new_caps[MIGRATION_CAPABILITY_MAPPED_RAM]) {
- if (new_caps[MIGRATION_CAPABILITY_MULTIFD]) {
- error_setg(errp,
- "Mapped-ram migration is incompatible with multifd");
- return false;
- }
-
if (new_caps[MIGRATION_CAPABILITY_XBZRLE]) {
error_setg(errp,
"Mapped-ram migration is incompatible with xbzrle");
@@ -1252,6 +1246,13 @@ bool migrate_params_check(MigrationParameters *params, Error **errp)
}
#endif
+ if (migrate_mapped_ram() &&
+ (migrate_multifd_compression() || migrate_tls())) {
+ error_setg(errp,
+ "Mapped-ram only available for non-compressed non-TLS multifd migration");
+ return false;
+ }
+
if (params->has_x_vcpu_dirty_limit_period &&
(params->x_vcpu_dirty_limit_period < 1 ||
params->x_vcpu_dirty_limit_period > 1000)) {
@@ -1148,7 +1148,7 @@ static int save_zero_page(RAMState *rs, PageSearchStatus *pss,
if (migrate_mapped_ram()) {
/* zero pages are not transferred with mapped-ram */
- clear_bit(offset >> TARGET_PAGE_BITS, pss->block->file_bmap);
+ clear_bit_atomic(offset >> TARGET_PAGE_BITS, pss->block->file_bmap);
return 1;
}
@@ -2445,8 +2445,6 @@ static void ram_save_cleanup(void *opaque)
block->clear_bmap = NULL;
g_free(block->bmap);
block->bmap = NULL;
- g_free(block->file_bmap);
- block->file_bmap = NULL;
}
xbzrle_cleanup();
@@ -3135,9 +3133,22 @@ static void ram_save_file_bmap(QEMUFile *f)
qemu_put_buffer_at(f, (uint8_t *)block->file_bmap, bitmap_size,
block->bitmap_offset);
ram_transferred_add(bitmap_size);
+
+ /*
+ * Free the bitmap here to catch any synchronization issues
+ * with multifd channels. No channels should be sending pages
+ * after we've written the bitmap to file.
+ */
+ g_free(block->file_bmap);
+ block->file_bmap = NULL;
}
}
+void ramblock_set_file_bmap_atomic(RAMBlock *block, ram_addr_t offset)
+{
+ set_bit_atomic(offset >> TARGET_PAGE_BITS, block->file_bmap);
+}
+
/**
* ram_save_iterate: iterative stage for migration
*
@@ -75,6 +75,7 @@ bool ram_dirty_bitmap_reload(MigrationState *s, RAMBlock *rb, Error **errp);
bool ramblock_page_is_discarded(RAMBlock *rb, ram_addr_t start);
void postcopy_preempt_shutdown_file(MigrationState *s);
void *postcopy_preempt_thread(void *opaque);
+void ramblock_set_file_bmap_atomic(RAMBlock *block, ram_addr_t offset);
/* ram cache */
int colo_init_ram_cache(void);