Message ID | 20190720223939.11327-1-richardw.yang@linux.intel.com |
---|---|
State | New |
Headers | show |
Series | migration/postcopy: use mis->bh instead of allocating a QEMUBH | expand |
* Wei Yang (richardw.yang@linux.intel.com) wrote: > For migration incoming side, it either quit in precopy or postcopy. It > is save to use the mis->bh for both instead of allocating a dedicated ^^^^ 'safe' not 'save' > QEMUBH for postcopy. > > Signed-off-by: Wei Yang <richardw.yang@linux.intel.com> Yes, I think that's OK; it's a little subtle. Once postcopy is in 'run' then the main incoming code will exit before running it's bh, so you're right that it's free for us to use. Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com> > --- > migration/savevm.c | 17 ++++------------- > 1 file changed, 4 insertions(+), 13 deletions(-) > > diff --git a/migration/savevm.c b/migration/savevm.c > index 25fe7ea05a..0105068579 100644 > --- a/migration/savevm.c > +++ b/migration/savevm.c > @@ -1856,16 +1856,10 @@ static int loadvm_postcopy_handle_listen(MigrationIncomingState *mis) > return 0; > } > > - > -typedef struct { > - QEMUBH *bh; > -} HandleRunBhData; > - > static void loadvm_postcopy_handle_run_bh(void *opaque) > { > Error *local_err = NULL; > - HandleRunBhData *data = opaque; > - MigrationIncomingState *mis = migration_incoming_get_current(); > + MigrationIncomingState *mis = opaque; > > /* TODO we should move all of this lot into postcopy_ram.c or a shared code > * in migration.c > @@ -1897,8 +1891,7 @@ static void loadvm_postcopy_handle_run_bh(void *opaque) > runstate_set(RUN_STATE_PAUSED); > } > > - qemu_bh_delete(data->bh); > - g_free(data); > + qemu_bh_delete(mis->bh); > } > > /* After all discards we can start running and asking for pages */ > @@ -1906,7 +1899,6 @@ static int loadvm_postcopy_handle_run(MigrationIncomingState *mis) > { > PostcopyState old_ps = POSTCOPY_INCOMING_LISTENING; > PostcopyState ps = postcopy_state_set(POSTCOPY_INCOMING_RUNNING, &old_ps); > - HandleRunBhData *data; > > trace_loadvm_postcopy_handle_run(); > if (ps != old_ps) { > @@ -1914,9 +1906,8 @@ static int loadvm_postcopy_handle_run(MigrationIncomingState *mis) > return -1; > } > > - data = g_new(HandleRunBhData, 1); > - data->bh = qemu_bh_new(loadvm_postcopy_handle_run_bh, data); > - qemu_bh_schedule(data->bh); > + mis->bh = qemu_bh_new(loadvm_postcopy_handle_run_bh, mis); > + qemu_bh_schedule(mis->bh); > > /* We need to finish reading the stream from the package > * and also stop reading anything more from the stream that loaded the > -- > 2.17.1 > -- Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK
On Tue, Jul 23, 2019 at 06:23:53PM +0100, Dr. David Alan Gilbert wrote: >* Wei Yang (richardw.yang@linux.intel.com) wrote: >> For migration incoming side, it either quit in precopy or postcopy. It >> is save to use the mis->bh for both instead of allocating a dedicated > > ^^^^ 'safe' not 'save' > oops, thanks for pointing out. >> QEMUBH for postcopy. >> >> Signed-off-by: Wei Yang <richardw.yang@linux.intel.com> > >Yes, I think that's OK; it's a little subtle. Once postcopy is in 'run' >then the main incoming code will exit before running it's bh, so you're >right that it's free for us to use. > > >Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com> > >> --- >> migration/savevm.c | 17 ++++------------- >> 1 file changed, 4 insertions(+), 13 deletions(-) >> >> diff --git a/migration/savevm.c b/migration/savevm.c >> index 25fe7ea05a..0105068579 100644 >> --- a/migration/savevm.c >> +++ b/migration/savevm.c >> @@ -1856,16 +1856,10 @@ static int loadvm_postcopy_handle_listen(MigrationIncomingState *mis) >> return 0; >> } >> >> - >> -typedef struct { >> - QEMUBH *bh; >> -} HandleRunBhData; >> - >> static void loadvm_postcopy_handle_run_bh(void *opaque) >> { >> Error *local_err = NULL; >> - HandleRunBhData *data = opaque; >> - MigrationIncomingState *mis = migration_incoming_get_current(); >> + MigrationIncomingState *mis = opaque; >> >> /* TODO we should move all of this lot into postcopy_ram.c or a shared code >> * in migration.c >> @@ -1897,8 +1891,7 @@ static void loadvm_postcopy_handle_run_bh(void *opaque) >> runstate_set(RUN_STATE_PAUSED); >> } >> >> - qemu_bh_delete(data->bh); >> - g_free(data); >> + qemu_bh_delete(mis->bh); >> } >> >> /* After all discards we can start running and asking for pages */ >> @@ -1906,7 +1899,6 @@ static int loadvm_postcopy_handle_run(MigrationIncomingState *mis) >> { >> PostcopyState old_ps = POSTCOPY_INCOMING_LISTENING; >> PostcopyState ps = postcopy_state_set(POSTCOPY_INCOMING_RUNNING, &old_ps); >> - HandleRunBhData *data; >> >> trace_loadvm_postcopy_handle_run(); >> if (ps != old_ps) { >> @@ -1914,9 +1906,8 @@ static int loadvm_postcopy_handle_run(MigrationIncomingState *mis) >> return -1; >> } >> >> - data = g_new(HandleRunBhData, 1); >> - data->bh = qemu_bh_new(loadvm_postcopy_handle_run_bh, data); >> - qemu_bh_schedule(data->bh); >> + mis->bh = qemu_bh_new(loadvm_postcopy_handle_run_bh, mis); >> + qemu_bh_schedule(mis->bh); >> >> /* We need to finish reading the stream from the package >> * and also stop reading anything more from the stream that loaded the >> -- >> 2.17.1 >> >-- >Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK
diff --git a/migration/savevm.c b/migration/savevm.c index 25fe7ea05a..0105068579 100644 --- a/migration/savevm.c +++ b/migration/savevm.c @@ -1856,16 +1856,10 @@ static int loadvm_postcopy_handle_listen(MigrationIncomingState *mis) return 0; } - -typedef struct { - QEMUBH *bh; -} HandleRunBhData; - static void loadvm_postcopy_handle_run_bh(void *opaque) { Error *local_err = NULL; - HandleRunBhData *data = opaque; - MigrationIncomingState *mis = migration_incoming_get_current(); + MigrationIncomingState *mis = opaque; /* TODO we should move all of this lot into postcopy_ram.c or a shared code * in migration.c @@ -1897,8 +1891,7 @@ static void loadvm_postcopy_handle_run_bh(void *opaque) runstate_set(RUN_STATE_PAUSED); } - qemu_bh_delete(data->bh); - g_free(data); + qemu_bh_delete(mis->bh); } /* After all discards we can start running and asking for pages */ @@ -1906,7 +1899,6 @@ static int loadvm_postcopy_handle_run(MigrationIncomingState *mis) { PostcopyState old_ps = POSTCOPY_INCOMING_LISTENING; PostcopyState ps = postcopy_state_set(POSTCOPY_INCOMING_RUNNING, &old_ps); - HandleRunBhData *data; trace_loadvm_postcopy_handle_run(); if (ps != old_ps) { @@ -1914,9 +1906,8 @@ static int loadvm_postcopy_handle_run(MigrationIncomingState *mis) return -1; } - data = g_new(HandleRunBhData, 1); - data->bh = qemu_bh_new(loadvm_postcopy_handle_run_bh, data); - qemu_bh_schedule(data->bh); + mis->bh = qemu_bh_new(loadvm_postcopy_handle_run_bh, mis); + qemu_bh_schedule(mis->bh); /* We need to finish reading the stream from the package * and also stop reading anything more from the stream that loaded the
For migration incoming side, it either quit in precopy or postcopy. It is save to use the mis->bh for both instead of allocating a dedicated QEMUBH for postcopy. Signed-off-by: Wei Yang <richardw.yang@linux.intel.com> --- migration/savevm.c | 17 ++++------------- 1 file changed, 4 insertions(+), 13 deletions(-)