@@ -71,5 +71,6 @@ void decompress_data_with_multi_threads(QEMUFile *f, void *host, int len);
void populate_compress(MigrationInfo *info);
uint64_t ram_compressed_pages(void);
void update_compress_thread_counts(const CompressParam *param, int bytes_xmit);
+void compress_update_rates(uint64_t page_count);
#endif
@@ -34,7 +34,6 @@
#include "io/channel.h"
extern XBZRLECacheStats xbzrle_counters;
-extern CompressionStats compression_counters;
/* Should be holding either ram_list.mutex, or the RCU lock. */
#define RAMBLOCK_FOREACH_NOT_IGNORED(block) \
@@ -41,7 +41,20 @@
#include "ram.h"
#include "migration-stats.h"
-CompressionStats compression_counters;
+static struct {
+ int64_t pages;
+ int64_t busy;
+ double busy_rate;
+ int64_t compressed_size;
+ double compression_rate;
+ /* compression statistics since the beginning of the period */
+ /* amount of count that no free thread to compress data */
+ uint64_t compress_thread_busy_prev;
+ /* amount bytes after compression */
+ uint64_t compressed_size_prev;
+ /* amount of compressed pages */
+ uint64_t compress_pages_prev;
+} compression_counters;
static CompressParam *comp_param;
static QemuThread *compress_threads;
@@ -518,3 +531,30 @@ void update_compress_thread_counts(const CompressParam *param, int bytes_xmit)
compression_counters.pages++;
}
+void compress_update_rates(uint64_t page_count)
+{
+ if (!migrate_compress()) {
+ return;
+ }
+ compression_counters.busy_rate = (double)(compression_counters.busy -
+ compression_counters.compress_thread_busy_prev) / page_count;
+ compression_counters.compress_thread_busy_prev =
+ compression_counters.busy;
+
+ double compressed_size = compression_counters.compressed_size -
+ compression_counters.compressed_size_prev;
+ if (compressed_size) {
+ double uncompressed_size = (compression_counters.pages -
+ compression_counters.compress_pages_prev) *
+ qemu_target_page_size();
+
+ /* Compression-Ratio = Uncompressed-size / Compressed-size */
+ compression_counters.compression_rate =
+ uncompressed_size / compressed_size;
+
+ compression_counters.compress_pages_prev =
+ compression_counters.pages;
+ compression_counters.compressed_size_prev =
+ compression_counters.compressed_size;
+ }
+}
@@ -369,13 +369,6 @@ struct RAMState {
bool xbzrle_started;
/* Are we on the last stage of migration */
bool last_stage;
- /* compression statistics since the beginning of the period */
- /* amount of count that no free thread to compress data */
- uint64_t compress_thread_busy_prev;
- /* amount bytes after compression */
- uint64_t compressed_size_prev;
- /* amount of compressed pages */
- uint64_t compress_pages_prev;
/* total handled target pages at the beginning of period */
uint64_t target_page_count_prev;
@@ -945,7 +938,6 @@ uint64_t ram_get_total_transferred_pages(void)
static void migration_update_rates(RAMState *rs, int64_t end_time)
{
uint64_t page_count = rs->target_page_count - rs->target_page_count_prev;
- double compressed_size;
/* calculate period counters */
stat64_set(&mig_stats.dirty_pages_rate,
@@ -973,26 +965,7 @@ static void migration_update_rates(RAMState *rs, int64_t end_time)
rs->xbzrle_pages_prev = xbzrle_counters.pages;
rs->xbzrle_bytes_prev = xbzrle_counters.bytes;
}
-
- if (migrate_compress()) {
- compression_counters.busy_rate = (double)(compression_counters.busy -
- rs->compress_thread_busy_prev) / page_count;
- rs->compress_thread_busy_prev = compression_counters.busy;
-
- compressed_size = compression_counters.compressed_size -
- rs->compressed_size_prev;
- if (compressed_size) {
- double uncompressed_size = (compression_counters.pages -
- rs->compress_pages_prev) * TARGET_PAGE_SIZE;
-
- /* Compression-Ratio = Uncompressed-size / Compressed-size */
- compression_counters.compression_rate =
- uncompressed_size / compressed_size;
-
- rs->compress_pages_prev = compression_counters.pages;
- rs->compressed_size_prev = compression_counters.compressed_size;
- }
- }
+ compress_update_rates(page_count);
}
/*