diff mbox series

[v3,1/2] vhost: Change the sequence of device start

Message ID 20221017064452.1226514-2-yajunw@nvidia.com
State New
Headers show
Series vhost-user: Support vhost_dev_start | expand

Commit Message

Yajun Wu Oct. 17, 2022, 6:44 a.m. UTC
This patch is part of adding vhost-user vhost_dev_start support. The
motivation is to improve backend configuration speed and reduce live
migration VM downtime.

Moving the device start routines after finishing all the necessary device
and VQ configuration, further aligning to the virtio specification for
"device initialization sequence".

Following patch will add vhost-user vhost_dev_start support.

Signed-off-by: Yajun Wu <yajunw@nvidia.com>
Acked-by: Parav Pandit <parav@nvidia.com>

---
 hw/block/vhost-user-blk.c | 18 +++++++++++-------
 hw/net/vhost_net.c        | 12 ++++++------
 2 files changed, 17 insertions(+), 13 deletions(-)

Comments

Michael S. Tsirkin Nov. 5, 2022, 4:43 p.m. UTC | #1
On Mon, Oct 17, 2022 at 02:44:51PM +0800, Yajun Wu wrote:
> This patch is part of adding vhost-user vhost_dev_start support. The
> motivation is to improve backend configuration speed and reduce live
> migration VM downtime.
> 
> Moving the device start routines after finishing all the necessary device
> and VQ configuration, further aligning to the virtio specification for
> "device initialization sequence".
> 
> Following patch will add vhost-user vhost_dev_start support.
> 
> Signed-off-by: Yajun Wu <yajunw@nvidia.com>
> Acked-by: Parav Pandit <parav@nvidia.com>
> 
> ---
>  hw/block/vhost-user-blk.c | 18 +++++++++++-------
>  hw/net/vhost_net.c        | 12 ++++++------
>  2 files changed, 17 insertions(+), 13 deletions(-)
> 
> diff --git a/hw/block/vhost-user-blk.c b/hw/block/vhost-user-blk.c
> index 84902dde17..f4deb8cd5d 100644
> --- a/hw/block/vhost-user-blk.c
> +++ b/hw/block/vhost-user-blk.c
> @@ -164,13 +164,6 @@ static int vhost_user_blk_start(VirtIODevice *vdev, Error **errp)
>          goto err_guest_notifiers;
>      }
>  
> -    ret = vhost_dev_start(&s->dev, vdev);
> -    if (ret < 0) {
> -        error_setg_errno(errp, -ret, "Error starting vhost");
> -        goto err_guest_notifiers;
> -    }
> -    s->started_vu = true;
> -
>      /* guest_notifier_mask/pending not used yet, so just unmask
>       * everything here. virtio-pci will do the right thing by
>       * enabling/disabling irqfd.
> @@ -179,9 +172,20 @@ static int vhost_user_blk_start(VirtIODevice *vdev, Error **errp)
>          vhost_virtqueue_mask(&s->dev, vdev, i, false);
>      }
>  
> +    s->dev.vq_index_end = s->dev.nvqs;
> +    ret = vhost_dev_start(&s->dev, vdev);
> +    if (ret < 0) {
> +        error_setg_errno(errp, -ret, "Error starting vhost");
> +        goto err_guest_notifiers;
> +    }
> +    s->started_vu = true;
> +
>      return ret;
>  
>  err_guest_notifiers:
> +    for (i = 0; i < s->dev.nvqs; i++) {
> +        vhost_virtqueue_mask(&s->dev, vdev, i, true);
> +    }
>      k->set_guest_notifiers(qbus->parent, s->dev.nvqs, false);
>  err_host_notifiers:
>      vhost_dev_disable_notifiers(&s->dev, vdev);
> diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c
> index d28f8b974b..d6924f5e57 100644
> --- a/hw/net/vhost_net.c
> +++ b/hw/net/vhost_net.c
> @@ -387,21 +387,21 @@ int vhost_net_start(VirtIODevice *dev, NetClientState *ncs,
>          } else {
>              peer = qemu_get_peer(ncs, n->max_queue_pairs);
>          }
> -        r = vhost_net_start_one(get_vhost_net(peer), dev);
> -
> -        if (r < 0) {
> -            goto err_start;
> -        }
>  
>          if (peer->vring_enable) {
>              /* restore vring enable state */
>              r = vhost_set_vring_enable(peer, peer->vring_enable);
>  
>              if (r < 0) {
> -                vhost_net_stop_one(get_vhost_net(peer), dev);
>                  goto err_start;
>              }
>          }
> +
> +        r = vhost_net_start_one(get_vhost_net(peer), dev);
> +        if (r < 0) {
> +            vhost_net_stop_one(get_vhost_net(peer), dev);

Error handling broken here. Corrupts memory if triggered.
I fixed it up when applying just because of the freeze
but I won't do this generally.

> +            goto err_start;
> +        }
>      }
>  
>      return 0;
> -- 
> 2.27.0
diff mbox series

Patch

diff --git a/hw/block/vhost-user-blk.c b/hw/block/vhost-user-blk.c
index 84902dde17..f4deb8cd5d 100644
--- a/hw/block/vhost-user-blk.c
+++ b/hw/block/vhost-user-blk.c
@@ -164,13 +164,6 @@  static int vhost_user_blk_start(VirtIODevice *vdev, Error **errp)
         goto err_guest_notifiers;
     }
 
-    ret = vhost_dev_start(&s->dev, vdev);
-    if (ret < 0) {
-        error_setg_errno(errp, -ret, "Error starting vhost");
-        goto err_guest_notifiers;
-    }
-    s->started_vu = true;
-
     /* guest_notifier_mask/pending not used yet, so just unmask
      * everything here. virtio-pci will do the right thing by
      * enabling/disabling irqfd.
@@ -179,9 +172,20 @@  static int vhost_user_blk_start(VirtIODevice *vdev, Error **errp)
         vhost_virtqueue_mask(&s->dev, vdev, i, false);
     }
 
+    s->dev.vq_index_end = s->dev.nvqs;
+    ret = vhost_dev_start(&s->dev, vdev);
+    if (ret < 0) {
+        error_setg_errno(errp, -ret, "Error starting vhost");
+        goto err_guest_notifiers;
+    }
+    s->started_vu = true;
+
     return ret;
 
 err_guest_notifiers:
+    for (i = 0; i < s->dev.nvqs; i++) {
+        vhost_virtqueue_mask(&s->dev, vdev, i, true);
+    }
     k->set_guest_notifiers(qbus->parent, s->dev.nvqs, false);
 err_host_notifiers:
     vhost_dev_disable_notifiers(&s->dev, vdev);
diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c
index d28f8b974b..d6924f5e57 100644
--- a/hw/net/vhost_net.c
+++ b/hw/net/vhost_net.c
@@ -387,21 +387,21 @@  int vhost_net_start(VirtIODevice *dev, NetClientState *ncs,
         } else {
             peer = qemu_get_peer(ncs, n->max_queue_pairs);
         }
-        r = vhost_net_start_one(get_vhost_net(peer), dev);
-
-        if (r < 0) {
-            goto err_start;
-        }
 
         if (peer->vring_enable) {
             /* restore vring enable state */
             r = vhost_set_vring_enable(peer, peer->vring_enable);
 
             if (r < 0) {
-                vhost_net_stop_one(get_vhost_net(peer), dev);
                 goto err_start;
             }
         }
+
+        r = vhost_net_start_one(get_vhost_net(peer), dev);
+        if (r < 0) {
+            vhost_net_stop_one(get_vhost_net(peer), dev);
+            goto err_start;
+        }
     }
 
     return 0;