diff mbox series

[V4,01/11] cpus: pass runstate to vm_prepare_start

Message ID 1693333086-392798-2-git-send-email-steven.sistare@oracle.com
State New
Headers show
Series fix migration of suspended runstate | expand

Commit Message

Steve Sistare Aug. 29, 2023, 6:17 p.m. UTC
When a vm in the suspended state is migrated, we must call vm_prepare_start
on the destination, so a later system_wakeup properly resumes the guest,
when main_loop_should_exit callsresume_all_vcpus.  However, the runstate
should remain suspended until system_wakeup is called, so allow the caller
to pass the new state to vm_prepare_start, rather than assume the new state
is RUN_STATE_RUNNING.  Modify vm state change handlers that check
RUN_STATE_RUNNING to instead use the running parameter.

No functional change.

Suggested-by: Peter Xu <peterx@redhat.com>
Signed-off-by: Steve Sistare <steven.sistare@oracle.com>
---
 backends/tpm/tpm_emulator.c | 2 +-
 gdbstub/softmmu.c           | 2 +-
 hw/usb/hcd-ehci.c           | 2 +-
 hw/usb/redirect.c           | 2 +-
 hw/xen/xen-hvm-common.c     | 2 +-
 include/sysemu/runstate.h   | 3 ++-
 softmmu/cpus.c              | 8 ++++----
 7 files changed, 11 insertions(+), 10 deletions(-)

Comments

Peter Xu Aug. 30, 2023, 3:52 p.m. UTC | #1
On Tue, Aug 29, 2023 at 11:17:56AM -0700, Steve Sistare wrote:
> When a vm in the suspended state is migrated, we must call vm_prepare_start
> on the destination, so a later system_wakeup properly resumes the guest,
> when main_loop_should_exit callsresume_all_vcpus.  However, the runstate
> should remain suspended until system_wakeup is called, so allow the caller
> to pass the new state to vm_prepare_start, rather than assume the new state
> is RUN_STATE_RUNNING.  Modify vm state change handlers that check
> RUN_STATE_RUNNING to instead use the running parameter.
> 
> No functional change.
> 
> Suggested-by: Peter Xu <peterx@redhat.com>
> Signed-off-by: Steve Sistare <steven.sistare@oracle.com>

I think all the call sites should be covered indeed, via:

qemu_add_vm_change_state_handler_prio
    qdev_add_vm_change_state_handler
        virtio_blk_device_realize[1653] qdev_add_vm_change_state_handler(dev, virtio_blk_dma_restart_cb, s);
        scsi_qdev_realize[289]         dev->vmsentry = qdev_add_vm_change_state_handler(DEVICE(dev),
        vfio_migration_init[796]       migration->vm_state = qdev_add_vm_change_state_handler(vbasedev->dev,
        virtio_init[3189]              vdev->vmstate = qdev_add_vm_change_state_handler(DEVICE(vdev),
    qemu_add_vm_change_state_handler
        xen_init[106]                  qemu_add_vm_change_state_handler(xen_change_state_handler, NULL);
        audio_init[1827]               e = qemu_add_vm_change_state_handler (audio_vm_change_state_handler, s);
        tpm_emulator_inst_init[978]    qemu_add_vm_change_state_handler(tpm_emulator_vm_state_change,
        blk_root_activate[223]         blk->vmsh = qemu_add_vm_change_state_handler(blk_vm_state_changed,
        gdbserver_start[384]           qemu_add_vm_change_state_handler(gdb_vm_state_change, NULL);
        pflash_post_load[1038]         pfl->vmstate = qemu_add_vm_change_state_handler(postload_update_cb,
        qxl_realize_common[2202]       qemu_add_vm_change_state_handler(qxl_vm_change_state_handler, qxl);
        kvmclock_realize[233]          qemu_add_vm_change_state_handler(kvmclock_vm_state_change, s);
        kvm_pit_realizefn[298]         qemu_add_vm_change_state_handler(kvm_pit_vm_state_change, s);
        vapic_post_load[796]           qemu_add_vm_change_state_handler(kvmvapic_vm_state_change, s);
        ide_bus_register_restart_cb[2767] bus->vmstate = qemu_add_vm_change_state_handler(ide_restart_cb, bus);
        kvm_arm_its_realize[122]       qemu_add_vm_change_state_handler(vm_change_state_handler, s);
        kvm_arm_gicv3_realize[888]     qemu_add_vm_change_state_handler(vm_change_state_handler, s);
        kvmppc_xive_connect[794]       xive->change = qemu_add_vm_change_state_handler(
        via1_post_load[971]            v1s->vmstate = qemu_add_vm_change_state_handler(
        e1000e_core_pci_realize[3379]  qemu_add_vm_change_state_handler(e1000e_vm_state_change, core);
        igb_core_pci_realize[4012]     core->vmstate = qemu_add_vm_change_state_handler(igb_vm_state_change, core);
        spapr_nvram_post_load[235]     nvram->vmstate = qemu_add_vm_change_state_handler(postload_update_cb,
        ppc_booke_timers_init[366]     qemu_add_vm_change_state_handler(cpu_state_change_handler, cpu);
        spapr_machine_init[3070]       qemu_add_vm_change_state_handler(cpu_ppc_clock_vm_state_change,
        kvm_s390_tod_realize[133]      qemu_add_vm_change_state_handler(kvm_s390_tod_vm_state_change, td);
        usb_ehci_realize[2540]         s->vmstate = qemu_add_vm_change_state_handler(usb_ehci_vm_state_change, s);
        usb_host_auto_check[1912]      usb_vmstate = qemu_add_vm_change_state_handler(usb_host_vm_state, NULL);
        usbredir_realize[1466]         qemu_add_vm_change_state_handler(usbredir_vm_state_change, dev);
        virtio_rng_device_realize[226] vrng->vmstate = qemu_add_vm_change_state_handler(virtio_rng_vm_state_change,
        xen_do_ioreq_register[825]     qemu_add_vm_change_state_handler(xen_hvm_change_state_handler, state);
        net_init_clients[1644]         qemu_add_vm_change_state_handler(net_vm_change_state_handler, NULL);
        memory_global_dirty_log_stop[2978] vmstate_change = qemu_add_vm_change_state_handler(
        hvf_arch_init[2036]            qemu_add_vm_change_state_handler(hvf_vm_state_change, &vtimer);
        kvm_arch_init_vcpu[567]        qemu_add_vm_change_state_handler(kvm_arm_vm_state_change, cs);
        kvm_arch_init_vcpu[2191]       cpu->vmsentry = qemu_add_vm_change_state_handler(cpu_update_state, env);
        sev_kvm_init[1014]             qemu_add_vm_change_state_handler(sev_vm_state_change, sev);
        whpx_init_vcpu[2248]           qemu_add_vm_change_state_handler(whpx_cpu_update_state, cpu->env_ptr);
        kvm_arch_init_vcpu[70]         qemu_add_vm_change_state_handler(kvm_mips_update_state, cs);
        kvm_arch_init_vcpu[891]        qemu_add_vm_change_state_handler(kvm_riscv_vm_state_change, cs);
        gtk_display_init[2410]         qemu_add_vm_change_state_handler(gd_change_runstate, s);
        qemu_spice_display_init_done[651] qemu_add_vm_change_state_handler(vm_change_state_handler, NULL);
        qemu_spice_add_interface[868]  qemu_add_vm_change_state_handler(vm_change_state_handler, NULL);

Looks all correct:

Reviewed-by: Peter Xu <peterx@redhat.com>
Steve Sistare Aug. 30, 2023, 3:56 p.m. UTC | #2
On 8/30/2023 11:52 AM, Peter Xu wrote:
> On Tue, Aug 29, 2023 at 11:17:56AM -0700, Steve Sistare wrote:
>> When a vm in the suspended state is migrated, we must call vm_prepare_start
>> on the destination, so a later system_wakeup properly resumes the guest,
>> when main_loop_should_exit callsresume_all_vcpus.  However, the runstate
>> should remain suspended until system_wakeup is called, so allow the caller
>> to pass the new state to vm_prepare_start, rather than assume the new state
>> is RUN_STATE_RUNNING.  Modify vm state change handlers that check
>> RUN_STATE_RUNNING to instead use the running parameter.
>>
>> No functional change.
>>
>> Suggested-by: Peter Xu <peterx@redhat.com>
>> Signed-off-by: Steve Sistare <steven.sistare@oracle.com>
> 
> I think all the call sites should be covered indeed, via:

Indeed, I laboriously checked every handler, like you, and then I realized
that searching for RUN_STATE_RUNNING outside of runstate.c yielded many
fewer call sites to check!

- Steve

> qemu_add_vm_change_state_handler_prio
>     qdev_add_vm_change_state_handler
>         virtio_blk_device_realize[1653] qdev_add_vm_change_state_handler(dev, virtio_blk_dma_restart_cb, s);
>         scsi_qdev_realize[289]         dev->vmsentry = qdev_add_vm_change_state_handler(DEVICE(dev),
>         vfio_migration_init[796]       migration->vm_state = qdev_add_vm_change_state_handler(vbasedev->dev,
>         virtio_init[3189]              vdev->vmstate = qdev_add_vm_change_state_handler(DEVICE(vdev),
>     qemu_add_vm_change_state_handler
>         xen_init[106]                  qemu_add_vm_change_state_handler(xen_change_state_handler, NULL);
>         audio_init[1827]               e = qemu_add_vm_change_state_handler (audio_vm_change_state_handler, s);
>         tpm_emulator_inst_init[978]    qemu_add_vm_change_state_handler(tpm_emulator_vm_state_change,
>         blk_root_activate[223]         blk->vmsh = qemu_add_vm_change_state_handler(blk_vm_state_changed,
>         gdbserver_start[384]           qemu_add_vm_change_state_handler(gdb_vm_state_change, NULL);
>         pflash_post_load[1038]         pfl->vmstate = qemu_add_vm_change_state_handler(postload_update_cb,
>         qxl_realize_common[2202]       qemu_add_vm_change_state_handler(qxl_vm_change_state_handler, qxl);
>         kvmclock_realize[233]          qemu_add_vm_change_state_handler(kvmclock_vm_state_change, s);
>         kvm_pit_realizefn[298]         qemu_add_vm_change_state_handler(kvm_pit_vm_state_change, s);
>         vapic_post_load[796]           qemu_add_vm_change_state_handler(kvmvapic_vm_state_change, s);
>         ide_bus_register_restart_cb[2767] bus->vmstate = qemu_add_vm_change_state_handler(ide_restart_cb, bus);
>         kvm_arm_its_realize[122]       qemu_add_vm_change_state_handler(vm_change_state_handler, s);
>         kvm_arm_gicv3_realize[888]     qemu_add_vm_change_state_handler(vm_change_state_handler, s);
>         kvmppc_xive_connect[794]       xive->change = qemu_add_vm_change_state_handler(
>         via1_post_load[971]            v1s->vmstate = qemu_add_vm_change_state_handler(
>         e1000e_core_pci_realize[3379]  qemu_add_vm_change_state_handler(e1000e_vm_state_change, core);
>         igb_core_pci_realize[4012]     core->vmstate = qemu_add_vm_change_state_handler(igb_vm_state_change, core);
>         spapr_nvram_post_load[235]     nvram->vmstate = qemu_add_vm_change_state_handler(postload_update_cb,
>         ppc_booke_timers_init[366]     qemu_add_vm_change_state_handler(cpu_state_change_handler, cpu);
>         spapr_machine_init[3070]       qemu_add_vm_change_state_handler(cpu_ppc_clock_vm_state_change,
>         kvm_s390_tod_realize[133]      qemu_add_vm_change_state_handler(kvm_s390_tod_vm_state_change, td);
>         usb_ehci_realize[2540]         s->vmstate = qemu_add_vm_change_state_handler(usb_ehci_vm_state_change, s);
>         usb_host_auto_check[1912]      usb_vmstate = qemu_add_vm_change_state_handler(usb_host_vm_state, NULL);
>         usbredir_realize[1466]         qemu_add_vm_change_state_handler(usbredir_vm_state_change, dev);
>         virtio_rng_device_realize[226] vrng->vmstate = qemu_add_vm_change_state_handler(virtio_rng_vm_state_change,
>         xen_do_ioreq_register[825]     qemu_add_vm_change_state_handler(xen_hvm_change_state_handler, state);
>         net_init_clients[1644]         qemu_add_vm_change_state_handler(net_vm_change_state_handler, NULL);
>         memory_global_dirty_log_stop[2978] vmstate_change = qemu_add_vm_change_state_handler(
>         hvf_arch_init[2036]            qemu_add_vm_change_state_handler(hvf_vm_state_change, &vtimer);
>         kvm_arch_init_vcpu[567]        qemu_add_vm_change_state_handler(kvm_arm_vm_state_change, cs);
>         kvm_arch_init_vcpu[2191]       cpu->vmsentry = qemu_add_vm_change_state_handler(cpu_update_state, env);
>         sev_kvm_init[1014]             qemu_add_vm_change_state_handler(sev_vm_state_change, sev);
>         whpx_init_vcpu[2248]           qemu_add_vm_change_state_handler(whpx_cpu_update_state, cpu->env_ptr);
>         kvm_arch_init_vcpu[70]         qemu_add_vm_change_state_handler(kvm_mips_update_state, cs);
>         kvm_arch_init_vcpu[891]        qemu_add_vm_change_state_handler(kvm_riscv_vm_state_change, cs);
>         gtk_display_init[2410]         qemu_add_vm_change_state_handler(gd_change_runstate, s);
>         qemu_spice_display_init_done[651] qemu_add_vm_change_state_handler(vm_change_state_handler, NULL);
>         qemu_spice_add_interface[868]  qemu_add_vm_change_state_handler(vm_change_state_handler, NULL);
> 
> Looks all correct:
> 
> Reviewed-by: Peter Xu <peterx@redhat.com>
>
diff mbox series

Patch

diff --git a/backends/tpm/tpm_emulator.c b/backends/tpm/tpm_emulator.c
index 402a2d6..a8e559a 100644
--- a/backends/tpm/tpm_emulator.c
+++ b/backends/tpm/tpm_emulator.c
@@ -907,7 +907,7 @@  static void tpm_emulator_vm_state_change(void *opaque, bool running,
 
     trace_tpm_emulator_vm_state_change(running, state);
 
-    if (!running || state != RUN_STATE_RUNNING || !tpm_emu->relock_storage) {
+    if (!running || !tpm_emu->relock_storage) {
         return;
     }
 
diff --git a/gdbstub/softmmu.c b/gdbstub/softmmu.c
index f509b72..a43e832 100644
--- a/gdbstub/softmmu.c
+++ b/gdbstub/softmmu.c
@@ -565,7 +565,7 @@  int gdb_continue_partial(char *newstates)
             }
         }
 
-        if (vm_prepare_start(step_requested)) {
+        if (vm_prepare_start(step_requested, RUN_STATE_RUNNING)) {
             return 0;
         }
 
diff --git a/hw/usb/hcd-ehci.c b/hw/usb/hcd-ehci.c
index c930c60..e436f5c 100644
--- a/hw/usb/hcd-ehci.c
+++ b/hw/usb/hcd-ehci.c
@@ -2451,7 +2451,7 @@  static void usb_ehci_vm_state_change(void *opaque, bool running, RunState state)
      * USB-devices which have async handled packages have a packet in the
      * ep queue to match the completion with.
      */
-    if (state == RUN_STATE_RUNNING) {
+    if (running) {
         ehci_advance_async_state(ehci);
     }
 
diff --git a/hw/usb/redirect.c b/hw/usb/redirect.c
index 39fbaaa..1ec5909 100644
--- a/hw/usb/redirect.c
+++ b/hw/usb/redirect.c
@@ -1403,7 +1403,7 @@  static void usbredir_vm_state_change(void *priv, bool running, RunState state)
 {
     USBRedirDevice *dev = priv;
 
-    if (state == RUN_STATE_RUNNING && dev->parser != NULL) {
+    if (running && dev->parser != NULL) {
         usbredirparser_do_write(dev->parser); /* Flush any pending writes */
     }
 }
diff --git a/hw/xen/xen-hvm-common.c b/hw/xen/xen-hvm-common.c
index 565dc39..47e6cb1 100644
--- a/hw/xen/xen-hvm-common.c
+++ b/hw/xen/xen-hvm-common.c
@@ -623,7 +623,7 @@  void xen_hvm_change_state_handler(void *opaque, bool running,
 
     xen_set_ioreq_server_state(xen_domid,
                                state->ioservid,
-                               (rstate == RUN_STATE_RUNNING));
+                               running);
 }
 
 void xen_exit_notifier(Notifier *n, void *data)
diff --git a/include/sysemu/runstate.h b/include/sysemu/runstate.h
index 7beb29c..7d889ab 100644
--- a/include/sysemu/runstate.h
+++ b/include/sysemu/runstate.h
@@ -39,8 +39,9 @@  void vm_start(void);
  * vm_prepare_start: Prepare for starting/resuming the VM
  *
  * @step_pending: whether any of the CPUs is about to be single-stepped by gdb
+ * @state: the vm state to setup
  */
-int vm_prepare_start(bool step_pending);
+int vm_prepare_start(bool step_pending, RunState state);
 int vm_stop(RunState state);
 int vm_stop_force_state(RunState state);
 int vm_shutdown(void);
diff --git a/softmmu/cpus.c b/softmmu/cpus.c
index fed20ff..0a082d3 100644
--- a/softmmu/cpus.c
+++ b/softmmu/cpus.c
@@ -681,7 +681,7 @@  int vm_stop(RunState state)
  * Returns -1 if the vCPUs are not to be restarted (e.g. if they are already
  * running or in case of an error condition), 0 otherwise.
  */
-int vm_prepare_start(bool step_pending)
+int vm_prepare_start(bool step_pending, RunState state)
 {
     RunState requested;
 
@@ -713,14 +713,14 @@  int vm_prepare_start(bool step_pending)
     qapi_event_send_resume();
 
     cpu_enable_ticks();
-    runstate_set(RUN_STATE_RUNNING);
-    vm_state_notify(1, RUN_STATE_RUNNING);
+    runstate_set(state);
+    vm_state_notify(1, state);
     return 0;
 }
 
 void vm_start(void)
 {
-    if (!vm_prepare_start(false)) {
+    if (!vm_prepare_start(false, RUN_STATE_RUNNING)) {
         resume_all_vcpus();
     }
 }