Message ID | 20191018004850.9888-7-richardw.yang@linux.intel.com |
---|---|
State | New |
Headers | show |
Series | migration/postcopy: enable compress during postcopy | expand |
* Wei Yang (richardw.yang@linux.intel.com) wrote: > postcopy requires to place a whole host page, while migration thread > migrate memory in target page size. This makes postcopy need to collect > all target pages in one host page before placing via userfaultfd. > > To enable compress during postcopy, there are two problems to solve: > > 1. Random order for target page arrival > 2. Target pages in one host page arrives without interrupt by target > page from other host page > > The first one is handled by previous cleanup patch. > > This patch handles the second one by: > > 1. Flush compress thread for each host page > 2. Wait for decompress thread for before placing host page > > Signed-off-by: Wei Yang <richardw.yang@linux.intel.com> > --- > migration/migration.c | 11 ----------- > migration/ram.c | 28 +++++++++++++++++++++++++++- > 2 files changed, 27 insertions(+), 12 deletions(-) > > diff --git a/migration/migration.c b/migration/migration.c > index 3febd0f8f3..72e53e2249 100644 > --- a/migration/migration.c > +++ b/migration/migration.c > @@ -1000,17 +1000,6 @@ static bool migrate_caps_check(bool *cap_list, > #endif > > if (cap_list[MIGRATION_CAPABILITY_POSTCOPY_RAM]) { > - if (cap_list[MIGRATION_CAPABILITY_COMPRESS]) { > - /* The decompression threads asynchronously write into RAM > - * rather than use the atomic copies needed to avoid > - * userfaulting. It should be possible to fix the decompression > - * threads for compatibility in future. > - */ > - error_setg(errp, "Postcopy is not currently compatible " > - "with compression"); > - return false; > - } > - Yes, I think that's safe - as long as the 'compress' gets set on both sides you should never get a combination of one side trying it and the other not being capable. Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com> > /* This check is reasonably expensive, so only when it's being > * set the first time, also it's only the destination that needs > * special support. > diff --git a/migration/ram.c b/migration/ram.c > index da0596411c..1403978d75 100644 > --- a/migration/ram.c > +++ b/migration/ram.c > @@ -3449,6 +3449,14 @@ static int ram_save_iterate(QEMUFile *f, void *opaque) > > rs->target_page_count += pages; > > + /* > + * During postcopy, it is necessary to make sure one whole host > + * page is sent in one chunk. > + */ > + if (migrate_postcopy_ram()) { > + flush_compressed_data(rs); > + } > + > /* > * we want to check in the 1st loop, just in case it was the 1st > * time and we had to sync the dirty bitmap. > @@ -4025,6 +4033,7 @@ static int ram_load_postcopy(QEMUFile *f) > void *place_source = NULL; > RAMBlock *block = NULL; > uint8_t ch; > + int len; > > addr = qemu_get_be64(f); > > @@ -4042,7 +4051,8 @@ static int ram_load_postcopy(QEMUFile *f) > > trace_ram_load_postcopy_loop((uint64_t)addr, flags); > place_needed = false; > - if (flags & (RAM_SAVE_FLAG_ZERO | RAM_SAVE_FLAG_PAGE)) { > + if (flags & (RAM_SAVE_FLAG_ZERO | RAM_SAVE_FLAG_PAGE | > + RAM_SAVE_FLAG_COMPRESS_PAGE)) { > block = ram_block_from_stream(f, flags); > > host = host_from_ram_block_offset(block, addr); > @@ -4114,6 +4124,17 @@ static int ram_load_postcopy(QEMUFile *f) > TARGET_PAGE_SIZE); > } > break; > + case RAM_SAVE_FLAG_COMPRESS_PAGE: > + all_zero = false; > + len = qemu_get_be32(f); > + if (len < 0 || len > compressBound(TARGET_PAGE_SIZE)) { > + error_report("Invalid compressed data length: %d", len); > + ret = -EINVAL; > + break; > + } > + decompress_data_with_multi_threads(f, page_buffer, len); > + break; > + > case RAM_SAVE_FLAG_EOS: > /* normal exit */ > multifd_recv_sync_main(); > @@ -4125,6 +4146,11 @@ static int ram_load_postcopy(QEMUFile *f) > break; > } > > + /* Got the whole host page, wait for decompress before placing. */ > + if (place_needed) { > + ret |= wait_for_decompress_done(); > + } > + > /* Detect for any possible file errors */ > if (!ret && qemu_file_get_error(f)) { > ret = qemu_file_get_error(f); > -- > 2.17.1 > -- Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK
diff --git a/migration/migration.c b/migration/migration.c index 3febd0f8f3..72e53e2249 100644 --- a/migration/migration.c +++ b/migration/migration.c @@ -1000,17 +1000,6 @@ static bool migrate_caps_check(bool *cap_list, #endif if (cap_list[MIGRATION_CAPABILITY_POSTCOPY_RAM]) { - if (cap_list[MIGRATION_CAPABILITY_COMPRESS]) { - /* The decompression threads asynchronously write into RAM - * rather than use the atomic copies needed to avoid - * userfaulting. It should be possible to fix the decompression - * threads for compatibility in future. - */ - error_setg(errp, "Postcopy is not currently compatible " - "with compression"); - return false; - } - /* This check is reasonably expensive, so only when it's being * set the first time, also it's only the destination that needs * special support. diff --git a/migration/ram.c b/migration/ram.c index da0596411c..1403978d75 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -3449,6 +3449,14 @@ static int ram_save_iterate(QEMUFile *f, void *opaque) rs->target_page_count += pages; + /* + * During postcopy, it is necessary to make sure one whole host + * page is sent in one chunk. + */ + if (migrate_postcopy_ram()) { + flush_compressed_data(rs); + } + /* * we want to check in the 1st loop, just in case it was the 1st * time and we had to sync the dirty bitmap. @@ -4025,6 +4033,7 @@ static int ram_load_postcopy(QEMUFile *f) void *place_source = NULL; RAMBlock *block = NULL; uint8_t ch; + int len; addr = qemu_get_be64(f); @@ -4042,7 +4051,8 @@ static int ram_load_postcopy(QEMUFile *f) trace_ram_load_postcopy_loop((uint64_t)addr, flags); place_needed = false; - if (flags & (RAM_SAVE_FLAG_ZERO | RAM_SAVE_FLAG_PAGE)) { + if (flags & (RAM_SAVE_FLAG_ZERO | RAM_SAVE_FLAG_PAGE | + RAM_SAVE_FLAG_COMPRESS_PAGE)) { block = ram_block_from_stream(f, flags); host = host_from_ram_block_offset(block, addr); @@ -4114,6 +4124,17 @@ static int ram_load_postcopy(QEMUFile *f) TARGET_PAGE_SIZE); } break; + case RAM_SAVE_FLAG_COMPRESS_PAGE: + all_zero = false; + len = qemu_get_be32(f); + if (len < 0 || len > compressBound(TARGET_PAGE_SIZE)) { + error_report("Invalid compressed data length: %d", len); + ret = -EINVAL; + break; + } + decompress_data_with_multi_threads(f, page_buffer, len); + break; + case RAM_SAVE_FLAG_EOS: /* normal exit */ multifd_recv_sync_main(); @@ -4125,6 +4146,11 @@ static int ram_load_postcopy(QEMUFile *f) break; } + /* Got the whole host page, wait for decompress before placing. */ + if (place_needed) { + ret |= wait_for_decompress_done(); + } + /* Detect for any possible file errors */ if (!ret && qemu_file_get_error(f)) { ret = qemu_file_get_error(f);
postcopy requires to place a whole host page, while migration thread migrate memory in target page size. This makes postcopy need to collect all target pages in one host page before placing via userfaultfd. To enable compress during postcopy, there are two problems to solve: 1. Random order for target page arrival 2. Target pages in one host page arrives without interrupt by target page from other host page The first one is handled by previous cleanup patch. This patch handles the second one by: 1. Flush compress thread for each host page 2. Wait for decompress thread for before placing host page Signed-off-by: Wei Yang <richardw.yang@linux.intel.com> --- migration/migration.c | 11 ----------- migration/ram.c | 28 +++++++++++++++++++++++++++- 2 files changed, 27 insertions(+), 12 deletions(-)