@@ -50,6 +50,10 @@ typedef struct {
* Number of times we have synchronized guest bitmaps.
*/
Stat64 dirty_sync_count;
+ /*
+ * Number of migration iteration processed.
+ */
+ Stat64 iteration_count;
/*
* Number of times zero copy failed to send any page using zero
* copy.
@@ -1197,6 +1197,7 @@ static void populate_ram_info(MigrationInfo *info, MigrationState *s)
info->ram->mbps = s->mbps;
info->ram->dirty_sync_count =
stat64_get(&mig_stats.dirty_sync_count);
+ info->ram->iteration_count = stat64_get(&mig_stats.iteration_count);
info->ram->dirty_sync_missed_zero_copy =
stat64_get(&mig_stats.dirty_sync_missed_zero_copy);
info->ram->postcopy_requests =
@@ -594,7 +594,7 @@ static void xbzrle_cache_zero_page(ram_addr_t current_addr)
/* We don't care if this fails to allocate a new cache page
* as long as it updated an old one */
cache_insert(XBZRLE.cache, current_addr, XBZRLE.zero_target_page,
- stat64_get(&mig_stats.dirty_sync_count));
+ stat64_get(&mig_stats.iteration_count));
}
#define ENCODING_FLAG_XBZRLE 0x1
@@ -620,7 +620,7 @@ static int save_xbzrle_page(RAMState *rs, PageSearchStatus *pss,
int encoded_len = 0, bytes_xbzrle;
uint8_t *prev_cached_page;
QEMUFile *file = pss->pss_channel;
- uint64_t generation = stat64_get(&mig_stats.dirty_sync_count);
+ uint64_t generation = stat64_get(&mig_stats.iteration_count);
if (!cache_is_cached(XBZRLE.cache, current_addr, generation)) {
xbzrle_counters.cache_miss++;
@@ -1075,6 +1075,10 @@ static void migration_bitmap_sync(RAMState *rs,
RAMBlock *block;
int64_t end_time;
+ if (!periodic) {
+ stat64_add(&mig_stats.iteration_count, 1);
+ }
+
stat64_add(&mig_stats.dirty_sync_count, 1);
if (!rs->time_last_bitmap_sync) {
@@ -1111,8 +1115,8 @@ static void migration_bitmap_sync(RAMState *rs,
rs->num_dirty_pages_period = 0;
rs->bytes_xfer_prev = migration_transferred_bytes();
}
- if (migrate_events()) {
- uint64_t generation = stat64_get(&mig_stats.dirty_sync_count);
+ if (!periodic && migrate_events()) {
+ uint64_t generation = stat64_get(&mig_stats.iteration_count);
qapi_event_send_migration_pass(generation);
}
}
@@ -60,6 +60,9 @@
# between 0 and @dirty-sync-count * @multifd-channels. (since
# 7.1)
#
+# @iteration-count: The number of iterations since migration started.
+# (since 9.2)
+#
# Since: 0.14
##
{ 'struct': 'MigrationStats',
@@ -72,7 +75,8 @@
'multifd-bytes': 'uint64', 'pages-per-second': 'uint64',
'precopy-bytes': 'uint64', 'downtime-bytes': 'uint64',
'postcopy-bytes': 'uint64',
- 'dirty-sync-missed-zero-copy': 'uint64' } }
+ 'dirty-sync-missed-zero-copy': 'uint64',
+ 'iteration-count' : 'int' } }
##
# @XBZRLECacheStats:
@@ -278,7 +278,7 @@ static int64_t read_migrate_property_int(QTestState *who, const char *property)
static uint64_t get_migration_pass(QTestState *who)
{
- return read_ram_property_int(who, "dirty-sync-count");
+ return read_ram_property_int(who, "iteration-count");
}
static void read_blocktime(QTestState *who)
The original migration information dirty-sync-count could no longer reflect iteration count due to the introduction of periodic synchronization in the next commit; add the iteration count to compensate. Signed-off-by: Hyman Huang <yong.huang@smartx.com> --- migration/migration-stats.h | 4 ++++ migration/migration.c | 1 + migration/ram.c | 12 ++++++++---- qapi/migration.json | 6 +++++- tests/qtest/migration-test.c | 2 +- 5 files changed, 19 insertions(+), 6 deletions(-)