diff mbox series

[v2,1/2] vdpa: rename vhost_vdpa_net_cvq_add()

Message ID fbaa1630a3cf8d08c59c2e273d53f055da8292b3.1683371965.git.yin31149@gmail.com
State New
Headers show
Series Send all the SVQ control commands in parallel | expand

Commit Message

Hawkins Jiawei May 6, 2023, 2:06 p.m. UTC
We want to introduce a new version of vhost_vdpa_net_cvq_add() that
does not poll immediately after forwarding custom buffers
to the device, so that QEMU can send all the SVQ control commands
in parallel instead of serialized.

Signed-off-by: Hawkins Jiawei <yin31149@gmail.com>
---
 net/vhost-vdpa.c | 15 +++++++++++----
 1 file changed, 11 insertions(+), 4 deletions(-)

Comments

Jason Wang May 17, 2023, 4:12 a.m. UTC | #1
On Sat, May 6, 2023 at 10:07 PM Hawkins Jiawei <yin31149@gmail.com> wrote:
>
> We want to introduce a new version of vhost_vdpa_net_cvq_add() that
> does not poll immediately after forwarding custom buffers
> to the device, so that QEMU can send all the SVQ control commands
> in parallel instead of serialized.
>
> Signed-off-by: Hawkins Jiawei <yin31149@gmail.com>
> ---
>  net/vhost-vdpa.c | 15 +++++++++++----
>  1 file changed, 11 insertions(+), 4 deletions(-)
>
> diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
> index 99904a0da7..10804c7200 100644
> --- a/net/vhost-vdpa.c
> +++ b/net/vhost-vdpa.c
> @@ -590,8 +590,14 @@ static void vhost_vdpa_net_cvq_stop(NetClientState *nc)
>      vhost_vdpa_net_client_stop(nc);
>  }
>
> -static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s, size_t out_len,
> -                                      size_t in_len)
> +/**
> + * vhost_vdpa_net_cvq_add_and_wait() adds SVQ control commands to SVQ,
> + * kicks the device and polls the device used buffers.
> + *
> + * Return the length written by the device.
> + */
> +static ssize_t vhost_vdpa_net_cvq_add_and_wait(VhostVDPAState *s,

Nit: is it better to use "poll" or "sync" other than wait?

Other than this:

Acked-by: Jason Wang <jasowang@redhat.com>

Thanks

> +                                    size_t out_len, size_t in_len)
>  {
>      /* Buffers for the device */
>      const struct iovec out = {
> @@ -636,7 +642,7 @@ static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, uint8_t class,
>      memcpy(s->cvq_cmd_out_buffer, &ctrl, sizeof(ctrl));
>      memcpy(s->cvq_cmd_out_buffer + sizeof(ctrl), data, data_size);
>
> -    return vhost_vdpa_net_cvq_add(s, sizeof(ctrl) + data_size,
> +    return vhost_vdpa_net_cvq_add_and_wait(s, sizeof(ctrl) + data_size,
>                                    sizeof(virtio_net_ctrl_ack));
>  }
>
> @@ -753,7 +759,8 @@ static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
>          dev_written = sizeof(status);
>          *s->status = VIRTIO_NET_OK;
>      } else {
> -        dev_written = vhost_vdpa_net_cvq_add(s, out.iov_len, sizeof(status));
> +        dev_written = vhost_vdpa_net_cvq_add_and_wait(s, out.iov_len,
> +                                                      sizeof(status));
>          if (unlikely(dev_written < 0)) {
>              goto out;
>          }
> --
> 2.25.1
>
Hawkins Jiawei May 17, 2023, 3:11 p.m. UTC | #2
Sorry for forgetting cc when replying to the email.
I will resend this email with cc.

On Wed, 17 May 2023 at 12:12, Jason Wang <jasowang@redhat.com> wrote:
>
> On Sat, May 6, 2023 at 10:07 PM Hawkins Jiawei <yin31149@gmail.com> wrote:
> >
> > We want to introduce a new version of vhost_vdpa_net_cvq_add() that
> > does not poll immediately after forwarding custom buffers
> > to the device, so that QEMU can send all the SVQ control commands
> > in parallel instead of serialized.
> >
> > Signed-off-by: Hawkins Jiawei <yin31149@gmail.com>
> > ---
> >  net/vhost-vdpa.c | 15 +++++++++++----
> >  1 file changed, 11 insertions(+), 4 deletions(-)
> >
> > diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
> > index 99904a0da7..10804c7200 100644
> > --- a/net/vhost-vdpa.c
> > +++ b/net/vhost-vdpa.c
> > @@ -590,8 +590,14 @@ static void vhost_vdpa_net_cvq_stop(NetClientState *nc)
> >      vhost_vdpa_net_client_stop(nc);
> >  }
> >
> > -static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s, size_t out_len,
> > -                                      size_t in_len)
> > +/**
> > + * vhost_vdpa_net_cvq_add_and_wait() adds SVQ control commands to SVQ,
> > + * kicks the device and polls the device used buffers.
> > + *
> > + * Return the length written by the device.
> > + */
> > +static ssize_t vhost_vdpa_net_cvq_add_and_wait(VhostVDPAState *s,
>
> Nit: is it better to use "poll" or "sync" other than wait?
>
> Other than this:
>
> Acked-by: Jason Wang <jasowang@redhat.com>

Hi Jason,

Thanks for your suggestion. I prefer 'poll', which makes it clearer
that this function will poll immediately compared to the new
version of vhost_vdpa_net_cvq_add().

I will refactor this in the v2 patch with the Acked-by tag on.

Thanks!

>
> Thanks
>
> > +                                    size_t out_len, size_t in_len)
> >  {
> >      /* Buffers for the device */
> >      const struct iovec out = {
> > @@ -636,7 +642,7 @@ static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, uint8_t class,
> >      memcpy(s->cvq_cmd_out_buffer, &ctrl, sizeof(ctrl));
> >      memcpy(s->cvq_cmd_out_buffer + sizeof(ctrl), data, data_size);
> >
> > -    return vhost_vdpa_net_cvq_add(s, sizeof(ctrl) + data_size,
> > +    return vhost_vdpa_net_cvq_add_and_wait(s, sizeof(ctrl) + data_size,
> >                                    sizeof(virtio_net_ctrl_ack));
> >  }
> >
> > @@ -753,7 +759,8 @@ static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
> >          dev_written = sizeof(status);
> >          *s->status = VIRTIO_NET_OK;
> >      } else {
> > -        dev_written = vhost_vdpa_net_cvq_add(s, out.iov_len, sizeof(status));
> > +        dev_written = vhost_vdpa_net_cvq_add_and_wait(s, out.iov_len,
> > +                                                      sizeof(status));
> >          if (unlikely(dev_written < 0)) {
> >              goto out;
> >          }
> > --
> > 2.25.1
> >
>
diff mbox series

Patch

diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
index 99904a0da7..10804c7200 100644
--- a/net/vhost-vdpa.c
+++ b/net/vhost-vdpa.c
@@ -590,8 +590,14 @@  static void vhost_vdpa_net_cvq_stop(NetClientState *nc)
     vhost_vdpa_net_client_stop(nc);
 }
 
-static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s, size_t out_len,
-                                      size_t in_len)
+/**
+ * vhost_vdpa_net_cvq_add_and_wait() adds SVQ control commands to SVQ,
+ * kicks the device and polls the device used buffers.
+ *
+ * Return the length written by the device.
+ */
+static ssize_t vhost_vdpa_net_cvq_add_and_wait(VhostVDPAState *s,
+                                    size_t out_len, size_t in_len)
 {
     /* Buffers for the device */
     const struct iovec out = {
@@ -636,7 +642,7 @@  static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, uint8_t class,
     memcpy(s->cvq_cmd_out_buffer, &ctrl, sizeof(ctrl));
     memcpy(s->cvq_cmd_out_buffer + sizeof(ctrl), data, data_size);
 
-    return vhost_vdpa_net_cvq_add(s, sizeof(ctrl) + data_size,
+    return vhost_vdpa_net_cvq_add_and_wait(s, sizeof(ctrl) + data_size,
                                   sizeof(virtio_net_ctrl_ack));
 }
 
@@ -753,7 +759,8 @@  static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
         dev_written = sizeof(status);
         *s->status = VIRTIO_NET_OK;
     } else {
-        dev_written = vhost_vdpa_net_cvq_add(s, out.iov_len, sizeof(status));
+        dev_written = vhost_vdpa_net_cvq_add_and_wait(s, out.iov_len,
+                                                      sizeof(status));
         if (unlikely(dev_written < 0)) {
             goto out;
         }