diff mbox series

[v2,1/8] migration: do not wait for free thread

Message ID 20180719121520.30026-2-xiaoguangrong@tencent.com
State New
Headers show
Series migration: compression optimization | expand

Commit Message

Xiao Guangrong July 19, 2018, 12:15 p.m. UTC
From: Xiao Guangrong <xiaoguangrong@tencent.com>

Instead of putting the main thread to sleep state to wait for
free compression thread, we can directly post it out as normal
page that reduces the latency and uses CPUs more efficiently

A parameter, compress-wait-thread, is introduced, it can be
enabled if the user really wants the old behavior

Signed-off-by: Xiao Guangrong <xiaoguangrong@tencent.com>
---
 hmp.c                 |  8 ++++++++
 migration/migration.c | 21 +++++++++++++++++++++
 migration/migration.h |  1 +
 migration/ram.c       | 45 ++++++++++++++++++++++++++-------------------
 qapi/migration.json   | 23 ++++++++++++++++++-----
 5 files changed, 74 insertions(+), 24 deletions(-)

Comments

Peter Xu July 23, 2018, 3:25 a.m. UTC | #1
On Thu, Jul 19, 2018 at 08:15:13PM +0800, guangrong.xiao@gmail.com wrote:
> @@ -3113,6 +3132,8 @@ static Property migration_properties[] = {
>      DEFINE_PROP_UINT8("x-compress-threads", MigrationState,
>                        parameters.compress_threads,
>                        DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT),
> +    DEFINE_PROP_BOOL("x-compress-wait-thread", MigrationState,
> +                      parameters.compress_wait_thread, false),

This performance feature bit makes sense to me, but I would still
think it should be true by default to keep the old behavior:

- it might change the behavior drastically: we might be in a state
  between "normal" migration and "compressed" migration since we'll
  contain both of the pages.  Old compression users might not expect
  that.

- it might still even perform worse - an extreme case is that when
  network bandwidth is very very limited but instead we have plenty of
  CPU resources. [1]

So it's really a good tunable for me when people really needs to
understand what's it before turning it on.

>      DEFINE_PROP_UINT8("x-decompress-threads", MigrationState,
>                        parameters.decompress_threads,
>                        DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT),
> diff --git a/migration/migration.h b/migration/migration.h
> index 64a7b33735..a46b9e6c8d 100644
> --- a/migration/migration.h
> +++ b/migration/migration.h
> @@ -271,6 +271,7 @@ bool migrate_use_return_path(void);
>  bool migrate_use_compression(void);
>  int migrate_compress_level(void);
>  int migrate_compress_threads(void);
> +int migrate_compress_wait_thread(void);
>  int migrate_decompress_threads(void);
>  bool migrate_use_events(void);
>  bool migrate_postcopy_blocktime(void);
> diff --git a/migration/ram.c b/migration/ram.c
> index 52dd678092..0ad234c692 100644
> --- a/migration/ram.c
> +++ b/migration/ram.c
> @@ -1889,30 +1889,34 @@ static int compress_page_with_multi_thread(RAMState *rs, RAMBlock *block,
>                                             ram_addr_t offset)
>  {
>      int idx, thread_count, bytes_xmit = -1, pages = -1;
> +    bool wait = migrate_compress_wait_thread();
>  
>      thread_count = migrate_compress_threads();
>      qemu_mutex_lock(&comp_done_lock);
> -    while (true) {
> -        for (idx = 0; idx < thread_count; idx++) {
> -            if (comp_param[idx].done) {
> -                comp_param[idx].done = false;
> -                bytes_xmit = qemu_put_qemu_file(rs->f, comp_param[idx].file);
> -                qemu_mutex_lock(&comp_param[idx].mutex);
> -                set_compress_params(&comp_param[idx], block, offset);
> -                qemu_cond_signal(&comp_param[idx].cond);
> -                qemu_mutex_unlock(&comp_param[idx].mutex);
> -                pages = 1;
> -                ram_counters.normal++;
> -                ram_counters.transferred += bytes_xmit;
> -                break;
> -            }
> -        }
> -        if (pages > 0) {
> +retry:
> +    for (idx = 0; idx < thread_count; idx++) {
> +        if (comp_param[idx].done) {
> +            comp_param[idx].done = false;
> +            bytes_xmit = qemu_put_qemu_file(rs->f, comp_param[idx].file);
> +            qemu_mutex_lock(&comp_param[idx].mutex);
> +            set_compress_params(&comp_param[idx], block, offset);
> +            qemu_cond_signal(&comp_param[idx].cond);
> +            qemu_mutex_unlock(&comp_param[idx].mutex);
> +            pages = 1;
> +            ram_counters.normal++;
> +            ram_counters.transferred += bytes_xmit;
>              break;
> -        } else {
> -            qemu_cond_wait(&comp_done_cond, &comp_done_lock);
>          }
>      }
> +
> +    /*
> +     * if there is no thread is free to compress the data and the user
> +     * really expects the slowdown, wait it.

Considering [1] above, IMHO it might not really be a slow down but it
depends.  Maybe only mentioning about the fact that we're sending a
normal page instead of the compressed page if wait is not specified.

> +     */
> +    if (pages < 0 && wait) {
> +        qemu_cond_wait(&comp_done_cond, &comp_done_lock);
> +        goto retry;
> +    }
>      qemu_mutex_unlock(&comp_done_lock);
>  
>      return pages;
> @@ -2226,7 +2230,10 @@ static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss,
>       * CPU resource.
>       */
>      if (block == rs->last_sent_block && save_page_use_compression(rs)) {
> -        return compress_page_with_multi_thread(rs, block, offset);
> +        res = compress_page_with_multi_thread(rs, block, offset);
> +        if (res > 0) {
> +            return res;
> +        }
>      } else if (migrate_use_multifd()) {
>          return ram_save_multifd_page(rs, block, offset);
>      }
> diff --git a/qapi/migration.json b/qapi/migration.json
> index 186e8a7303..b4f394844b 100644
> --- a/qapi/migration.json
> +++ b/qapi/migration.json
> @@ -462,6 +462,11 @@
>  # @compress-threads: Set compression thread count to be used in live migration,
>  #          the compression thread count is an integer between 1 and 255.
>  #
> +# @compress-wait-thread: Wait if no thread is free to compress the memory page
> +#          if it's enabled, otherwise, the page will be posted out immediately
> +#          in the main thread without compression. It's off on default.
> +#          (Since: 3.0)
> +#

Should "Since 3.1" in all the places.

We'll need to touch up the "by default" part depending on whether we'd
need to change it according to above comment.

Otherwise it looks good to me.

Thanks,
Xiao Guangrong July 23, 2018, 7:16 a.m. UTC | #2
On 07/23/2018 11:25 AM, Peter Xu wrote:
> On Thu, Jul 19, 2018 at 08:15:13PM +0800, guangrong.xiao@gmail.com wrote:
>> @@ -3113,6 +3132,8 @@ static Property migration_properties[] = {
>>       DEFINE_PROP_UINT8("x-compress-threads", MigrationState,
>>                         parameters.compress_threads,
>>                         DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT),
>> +    DEFINE_PROP_BOOL("x-compress-wait-thread", MigrationState,
>> +                      parameters.compress_wait_thread, false),
> 
> This performance feature bit makes sense to me, but I would still
> think it should be true by default to keep the old behavior:
> 
> - it might change the behavior drastically: we might be in a state
>    between "normal" migration and "compressed" migration since we'll
>    contain both of the pages.  Old compression users might not expect
>    that.
> 
> - it might still even perform worse - an extreme case is that when
>    network bandwidth is very very limited but instead we have plenty of
>    CPU resources. [1]
> 
> So it's really a good tunable for me when people really needs to
> understand what's it before turning it on.

That looks good to me.

> 
>>       DEFINE_PROP_UINT8("x-decompress-threads", MigrationState,
>>                         parameters.decompress_threads,
>>                         DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT),
>> diff --git a/migration/migration.h b/migration/migration.h
>> index 64a7b33735..a46b9e6c8d 100644
>> --- a/migration/migration.h
>> +++ b/migration/migration.h
>> @@ -271,6 +271,7 @@ bool migrate_use_return_path(void);
>>   bool migrate_use_compression(void);
>>   int migrate_compress_level(void);
>>   int migrate_compress_threads(void);
>> +int migrate_compress_wait_thread(void);
>>   int migrate_decompress_threads(void);
>>   bool migrate_use_events(void);
>>   bool migrate_postcopy_blocktime(void);
>> diff --git a/migration/ram.c b/migration/ram.c
>> index 52dd678092..0ad234c692 100644
>> --- a/migration/ram.c
>> +++ b/migration/ram.c
>> @@ -1889,30 +1889,34 @@ static int compress_page_with_multi_thread(RAMState *rs, RAMBlock *block,
>>                                              ram_addr_t offset)
>>   {
>>       int idx, thread_count, bytes_xmit = -1, pages = -1;
>> +    bool wait = migrate_compress_wait_thread();
>>   
>>       thread_count = migrate_compress_threads();
>>       qemu_mutex_lock(&comp_done_lock);
>> -    while (true) {
>> -        for (idx = 0; idx < thread_count; idx++) {
>> -            if (comp_param[idx].done) {
>> -                comp_param[idx].done = false;
>> -                bytes_xmit = qemu_put_qemu_file(rs->f, comp_param[idx].file);
>> -                qemu_mutex_lock(&comp_param[idx].mutex);
>> -                set_compress_params(&comp_param[idx], block, offset);
>> -                qemu_cond_signal(&comp_param[idx].cond);
>> -                qemu_mutex_unlock(&comp_param[idx].mutex);
>> -                pages = 1;
>> -                ram_counters.normal++;
>> -                ram_counters.transferred += bytes_xmit;
>> -                break;
>> -            }
>> -        }
>> -        if (pages > 0) {
>> +retry:
>> +    for (idx = 0; idx < thread_count; idx++) {
>> +        if (comp_param[idx].done) {
>> +            comp_param[idx].done = false;
>> +            bytes_xmit = qemu_put_qemu_file(rs->f, comp_param[idx].file);
>> +            qemu_mutex_lock(&comp_param[idx].mutex);
>> +            set_compress_params(&comp_param[idx], block, offset);
>> +            qemu_cond_signal(&comp_param[idx].cond);
>> +            qemu_mutex_unlock(&comp_param[idx].mutex);
>> +            pages = 1;
>> +            ram_counters.normal++;
>> +            ram_counters.transferred += bytes_xmit;
>>               break;
>> -        } else {
>> -            qemu_cond_wait(&comp_done_cond, &comp_done_lock);
>>           }
>>       }
>> +
>> +    /*
>> +     * if there is no thread is free to compress the data and the user
>> +     * really expects the slowdown, wait it.
> 
> Considering [1] above, IMHO it might not really be a slow down but it
> depends.  Maybe only mentioning about the fact that we're sending a
> normal page instead of the compressed page if wait is not specified.
> 

Okay, will update the comments based on your suggestion.

>> +     */
>> +    if (pages < 0 && wait) {
>> +        qemu_cond_wait(&comp_done_cond, &comp_done_lock);
>> +        goto retry;
>> +    }
>>       qemu_mutex_unlock(&comp_done_lock);
>>   
>>       return pages;
>> @@ -2226,7 +2230,10 @@ static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss,
>>        * CPU resource.
>>        */
>>       if (block == rs->last_sent_block && save_page_use_compression(rs)) {
>> -        return compress_page_with_multi_thread(rs, block, offset);
>> +        res = compress_page_with_multi_thread(rs, block, offset);
>> +        if (res > 0) {
>> +            return res;
>> +        }
>>       } else if (migrate_use_multifd()) {
>>           return ram_save_multifd_page(rs, block, offset);
>>       }
>> diff --git a/qapi/migration.json b/qapi/migration.json
>> index 186e8a7303..b4f394844b 100644
>> --- a/qapi/migration.json
>> +++ b/qapi/migration.json
>> @@ -462,6 +462,11 @@
>>   # @compress-threads: Set compression thread count to be used in live migration,
>>   #          the compression thread count is an integer between 1 and 255.
>>   #
>> +# @compress-wait-thread: Wait if no thread is free to compress the memory page
>> +#          if it's enabled, otherwise, the page will be posted out immediately
>> +#          in the main thread without compression. It's off on default.
>> +#          (Since: 3.0)
>> +#
> 
> Should "Since 3.1" in all the places.
> 

Oh... the thing goes faster than i realized :)

> We'll need to touch up the "by default" part depending on whether we'd
> need to change it according to above comment.
> 
> Otherwise it looks good to me.
> 

Okay, thank you, Peter.
Eric Blake July 23, 2018, 6:36 p.m. UTC | #3
On 07/19/2018 07:15 AM, guangrong.xiao@gmail.com wrote:
> From: Xiao Guangrong <xiaoguangrong@tencent.com>
> 
> Instead of putting the main thread to sleep state to wait for
> free compression thread, we can directly post it out as normal
> page that reduces the latency and uses CPUs more efficiently
> 
> A parameter, compress-wait-thread, is introduced, it can be
> enabled if the user really wants the old behavior
> 
> Signed-off-by: Xiao Guangrong <xiaoguangrong@tencent.com>
> ---
>   hmp.c                 |  8 ++++++++
>   migration/migration.c | 21 +++++++++++++++++++++
>   migration/migration.h |  1 +
>   migration/ram.c       | 45 ++++++++++++++++++++++++++-------------------
>   qapi/migration.json   | 23 ++++++++++++++++++-----
>   5 files changed, 74 insertions(+), 24 deletions(-)
> 

> +++ b/qapi/migration.json
> @@ -462,6 +462,11 @@
>   # @compress-threads: Set compression thread count to be used in live migration,
>   #          the compression thread count is an integer between 1 and 255.
>   #
> +# @compress-wait-thread: Wait if no thread is free to compress the memory page
> +#          if it's enabled, otherwise, the page will be posted out immediately
> +#          in the main thread without compression. It's off on default.
> +#          (Since: 3.0)

Is this a bug fix? It's awfully late in the release cycle to be adding 
new features; is this something that we can live without until 3.1?
Xiao Guangrong July 24, 2018, 7:40 a.m. UTC | #4
On 07/24/2018 02:36 AM, Eric Blake wrote:
> On 07/19/2018 07:15 AM, guangrong.xiao@gmail.com wrote:
>> From: Xiao Guangrong <xiaoguangrong@tencent.com>
>>
>> Instead of putting the main thread to sleep state to wait for
>> free compression thread, we can directly post it out as normal
>> page that reduces the latency and uses CPUs more efficiently
>>
>> A parameter, compress-wait-thread, is introduced, it can be
>> enabled if the user really wants the old behavior
>>
>> Signed-off-by: Xiao Guangrong <xiaoguangrong@tencent.com>
>> ---
>>   hmp.c                 |  8 ++++++++
>>   migration/migration.c | 21 +++++++++++++++++++++
>>   migration/migration.h |  1 +
>>   migration/ram.c       | 45 ++++++++++++++++++++++++++-------------------
>>   qapi/migration.json   | 23 ++++++++++++++++++-----
>>   5 files changed, 74 insertions(+), 24 deletions(-)
>>
> 
>> +++ b/qapi/migration.json
>> @@ -462,6 +462,11 @@
>>   # @compress-threads: Set compression thread count to be used in live migration,
>>   #          the compression thread count is an integer between 1 and 255.
>>   #
>> +# @compress-wait-thread: Wait if no thread is free to compress the memory page
>> +#          if it's enabled, otherwise, the page will be posted out immediately
>> +#          in the main thread without compression. It's off on default.
>> +#          (Since: 3.0)
> 
> Is this a bug fix? It's awfully late in the release cycle to be adding new features; is this something that we can live without until 3.1?
> 

It's performance improvement, i think it is not urgent. :)
diff mbox series

Patch

diff --git a/hmp.c b/hmp.c
index 2aafb50e8e..47d36e3ccf 100644
--- a/hmp.c
+++ b/hmp.c
@@ -327,6 +327,10 @@  void hmp_info_migrate_parameters(Monitor *mon, const QDict *qdict)
         monitor_printf(mon, "%s: %u\n",
             MigrationParameter_str(MIGRATION_PARAMETER_COMPRESS_THREADS),
             params->compress_threads);
+        assert(params->has_compress_wait_thread);
+        monitor_printf(mon, "%s: %s\n",
+            MigrationParameter_str(MIGRATION_PARAMETER_COMPRESS_WAIT_THREAD),
+            params->compress_wait_thread ? "on" : "off");
         assert(params->has_decompress_threads);
         monitor_printf(mon, "%s: %u\n",
             MigrationParameter_str(MIGRATION_PARAMETER_DECOMPRESS_THREADS),
@@ -1623,6 +1627,10 @@  void hmp_migrate_set_parameter(Monitor *mon, const QDict *qdict)
         p->has_compress_threads = true;
         visit_type_int(v, param, &p->compress_threads, &err);
         break;
+    case MIGRATION_PARAMETER_COMPRESS_WAIT_THREAD:
+        p->has_compress_wait_thread = true;
+        visit_type_bool(v, param, &p->compress_wait_thread, &err);
+        break;
     case MIGRATION_PARAMETER_DECOMPRESS_THREADS:
         p->has_decompress_threads = true;
         visit_type_int(v, param, &p->decompress_threads, &err);
diff --git a/migration/migration.c b/migration/migration.c
index 8d56d56930..0af75465b3 100644
--- a/migration/migration.c
+++ b/migration/migration.c
@@ -671,6 +671,8 @@  MigrationParameters *qmp_query_migrate_parameters(Error **errp)
     params->compress_level = s->parameters.compress_level;
     params->has_compress_threads = true;
     params->compress_threads = s->parameters.compress_threads;
+    params->has_compress_wait_thread = true;
+    params->compress_wait_thread = s->parameters.compress_wait_thread;
     params->has_decompress_threads = true;
     params->decompress_threads = s->parameters.decompress_threads;
     params->has_cpu_throttle_initial = true;
@@ -1061,6 +1063,10 @@  static void migrate_params_test_apply(MigrateSetParameters *params,
         dest->compress_threads = params->compress_threads;
     }
 
+    if (params->has_compress_wait_thread) {
+        dest->compress_wait_thread = params->compress_wait_thread;
+    }
+
     if (params->has_decompress_threads) {
         dest->decompress_threads = params->decompress_threads;
     }
@@ -1126,6 +1132,10 @@  static void migrate_params_apply(MigrateSetParameters *params, Error **errp)
         s->parameters.compress_threads = params->compress_threads;
     }
 
+    if (params->has_compress_wait_thread) {
+        s->parameters.compress_wait_thread = params->compress_wait_thread;
+    }
+
     if (params->has_decompress_threads) {
         s->parameters.decompress_threads = params->decompress_threads;
     }
@@ -1852,6 +1862,15 @@  int migrate_compress_threads(void)
     return s->parameters.compress_threads;
 }
 
+int migrate_compress_wait_thread(void)
+{
+    MigrationState *s;
+
+    s = migrate_get_current();
+
+    return s->parameters.compress_wait_thread;
+}
+
 int migrate_decompress_threads(void)
 {
     MigrationState *s;
@@ -3113,6 +3132,8 @@  static Property migration_properties[] = {
     DEFINE_PROP_UINT8("x-compress-threads", MigrationState,
                       parameters.compress_threads,
                       DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT),
+    DEFINE_PROP_BOOL("x-compress-wait-thread", MigrationState,
+                      parameters.compress_wait_thread, false),
     DEFINE_PROP_UINT8("x-decompress-threads", MigrationState,
                       parameters.decompress_threads,
                       DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT),
diff --git a/migration/migration.h b/migration/migration.h
index 64a7b33735..a46b9e6c8d 100644
--- a/migration/migration.h
+++ b/migration/migration.h
@@ -271,6 +271,7 @@  bool migrate_use_return_path(void);
 bool migrate_use_compression(void);
 int migrate_compress_level(void);
 int migrate_compress_threads(void);
+int migrate_compress_wait_thread(void);
 int migrate_decompress_threads(void);
 bool migrate_use_events(void);
 bool migrate_postcopy_blocktime(void);
diff --git a/migration/ram.c b/migration/ram.c
index 52dd678092..0ad234c692 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -1889,30 +1889,34 @@  static int compress_page_with_multi_thread(RAMState *rs, RAMBlock *block,
                                            ram_addr_t offset)
 {
     int idx, thread_count, bytes_xmit = -1, pages = -1;
+    bool wait = migrate_compress_wait_thread();
 
     thread_count = migrate_compress_threads();
     qemu_mutex_lock(&comp_done_lock);
-    while (true) {
-        for (idx = 0; idx < thread_count; idx++) {
-            if (comp_param[idx].done) {
-                comp_param[idx].done = false;
-                bytes_xmit = qemu_put_qemu_file(rs->f, comp_param[idx].file);
-                qemu_mutex_lock(&comp_param[idx].mutex);
-                set_compress_params(&comp_param[idx], block, offset);
-                qemu_cond_signal(&comp_param[idx].cond);
-                qemu_mutex_unlock(&comp_param[idx].mutex);
-                pages = 1;
-                ram_counters.normal++;
-                ram_counters.transferred += bytes_xmit;
-                break;
-            }
-        }
-        if (pages > 0) {
+retry:
+    for (idx = 0; idx < thread_count; idx++) {
+        if (comp_param[idx].done) {
+            comp_param[idx].done = false;
+            bytes_xmit = qemu_put_qemu_file(rs->f, comp_param[idx].file);
+            qemu_mutex_lock(&comp_param[idx].mutex);
+            set_compress_params(&comp_param[idx], block, offset);
+            qemu_cond_signal(&comp_param[idx].cond);
+            qemu_mutex_unlock(&comp_param[idx].mutex);
+            pages = 1;
+            ram_counters.normal++;
+            ram_counters.transferred += bytes_xmit;
             break;
-        } else {
-            qemu_cond_wait(&comp_done_cond, &comp_done_lock);
         }
     }
+
+    /*
+     * if there is no thread is free to compress the data and the user
+     * really expects the slowdown, wait it.
+     */
+    if (pages < 0 && wait) {
+        qemu_cond_wait(&comp_done_cond, &comp_done_lock);
+        goto retry;
+    }
     qemu_mutex_unlock(&comp_done_lock);
 
     return pages;
@@ -2226,7 +2230,10 @@  static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss,
      * CPU resource.
      */
     if (block == rs->last_sent_block && save_page_use_compression(rs)) {
-        return compress_page_with_multi_thread(rs, block, offset);
+        res = compress_page_with_multi_thread(rs, block, offset);
+        if (res > 0) {
+            return res;
+        }
     } else if (migrate_use_multifd()) {
         return ram_save_multifd_page(rs, block, offset);
     }
diff --git a/qapi/migration.json b/qapi/migration.json
index 186e8a7303..b4f394844b 100644
--- a/qapi/migration.json
+++ b/qapi/migration.json
@@ -462,6 +462,11 @@ 
 # @compress-threads: Set compression thread count to be used in live migration,
 #          the compression thread count is an integer between 1 and 255.
 #
+# @compress-wait-thread: Wait if no thread is free to compress the memory page
+#          if it's enabled, otherwise, the page will be posted out immediately
+#          in the main thread without compression. It's off on default.
+#          (Since: 3.0)
+#
 # @decompress-threads: Set decompression thread count to be used in live
 #          migration, the decompression thread count is an integer between 1
 #          and 255. Usually, decompression is at least 4 times as fast as
@@ -526,11 +531,11 @@ 
 # Since: 2.4
 ##
 { 'enum': 'MigrationParameter',
-  'data': ['compress-level', 'compress-threads', 'decompress-threads',
-           'cpu-throttle-initial', 'cpu-throttle-increment',
-           'tls-creds', 'tls-hostname', 'max-bandwidth',
-           'downtime-limit', 'x-checkpoint-delay', 'block-incremental',
-           'x-multifd-channels', 'x-multifd-page-count',
+  'data': ['compress-level', 'compress-threads', 'compress-wait-thread',
+           'decompress-threads', 'cpu-throttle-initial',
+           'cpu-throttle-increment', 'tls-creds', 'tls-hostname',
+           'max-bandwidth', 'downtime-limit', 'x-checkpoint-delay',
+           'block-incremental', 'x-multifd-channels', 'x-multifd-page-count',
            'xbzrle-cache-size', 'max-postcopy-bandwidth' ] }
 
 ##
@@ -540,6 +545,9 @@ 
 #
 # @compress-threads: compression thread count
 #
+# @compress-wait-thread: Wait if no thread is free to compress the memory page
+#                        (Since: 3.0)
+#
 # @decompress-threads: decompression thread count
 #
 # @cpu-throttle-initial: Initial percentage of time guest cpus are
@@ -610,6 +618,7 @@ 
 { 'struct': 'MigrateSetParameters',
   'data': { '*compress-level': 'int',
             '*compress-threads': 'int',
+            '*compress-wait-thread': 'bool',
             '*decompress-threads': 'int',
             '*cpu-throttle-initial': 'int',
             '*cpu-throttle-increment': 'int',
@@ -649,6 +658,9 @@ 
 #
 # @compress-threads: compression thread count
 #
+# @compress-wait-thread: Wait if no thread is free to compress the memory page
+#                        (Since: 3.0)
+#
 # @decompress-threads: decompression thread count
 #
 # @cpu-throttle-initial: Initial percentage of time guest cpus are
@@ -714,6 +726,7 @@ 
 { 'struct': 'MigrationParameters',
   'data': { '*compress-level': 'uint8',
             '*compress-threads': 'uint8',
+            '*compress-wait-thread': 'bool',
             '*decompress-threads': 'uint8',
             '*cpu-throttle-initial': 'uint8',
             '*cpu-throttle-increment': 'uint8',