diff mbox

vhost-user: multiqueue support

Message ID 20141206165241.4064.61867.stgit@i3820
State New
Headers show

Commit Message

Nikolay Nikolaev Dec. 6, 2014, 4:52 p.m. UTC
Vhost-user will implement the multiqueueu support in a similar way to what
vhost already has - a separate thread for each queue.

To enable multiquue funcionality - a new command line parameter
"queues" is introduced for the vhost-user netdev.

Signed-off-by: Nikolay Nikolaev <n.nikolaev@virtualopensystems.com>
---
 docs/specs/vhost-user.txt |    7 +++++++
 hw/virtio/vhost-user.c    |    6 +++++-
 net/vhost-user.c          |   35 +++++++++++++++++++++++------------
 qapi-schema.json          |    5 ++++-
 qemu-options.hx           |    5 +++--
 5 files changed, 42 insertions(+), 16 deletions(-)

Comments

Eric Blake Dec. 8, 2014, 3:58 p.m. UTC | #1
On 12/06/2014 09:52 AM, Nikolay Nikolaev wrote:
> Vhost-user will implement the multiqueueu support in a similar way to what
> vhost already has - a separate thread for each queue.
> 
> To enable multiquue funcionality - a new command line parameter
> "queues" is introduced for the vhost-user netdev.
> 
> Signed-off-by: Nikolay Nikolaev <n.nikolaev@virtualopensystems.com>
> ---

> @@ -2208,12 +2208,15 @@
>  #
>  # @vhostforce: #optional vhost on for non-MSIX virtio guests (default: false).
>  #
> +# @queues: #optional number of queues to be created for multiqueue vhost-user

Missing a '(since 2.3)' designation.  What is the default when it is
omitted?
Michael S. Tsirkin Jan. 21, 2015, 2:25 p.m. UTC | #2
On Sat, Dec 06, 2014 at 06:52:56PM +0200, Nikolay Nikolaev wrote:
> Vhost-user will implement the multiqueueu support in a similar way to what
> vhost already has - a separate thread for each queue.
> 
> To enable multiquue funcionality - a new command line parameter
> "queues" is introduced for the vhost-user netdev.
> 
> Signed-off-by: Nikolay Nikolaev <n.nikolaev@virtualopensystems.com>

Nikolay - plan to repost addressing comments?

> ---
>  docs/specs/vhost-user.txt |    7 +++++++
>  hw/virtio/vhost-user.c    |    6 +++++-
>  net/vhost-user.c          |   35 +++++++++++++++++++++++------------
>  qapi-schema.json          |    5 ++++-
>  qemu-options.hx           |    5 +++--
>  5 files changed, 42 insertions(+), 16 deletions(-)
> 
> diff --git a/docs/specs/vhost-user.txt b/docs/specs/vhost-user.txt
> index 650bb18..d3857f5 100644
> --- a/docs/specs/vhost-user.txt
> +++ b/docs/specs/vhost-user.txt
> @@ -127,6 +127,13 @@ in the ancillary data:
>  If Master is unable to send the full message or receives a wrong reply it will
>  close the connection. An optional reconnection mechanism can be implemented.
>  
> +Multi queue suport
> +---------------------
> +The protocol supports multiple queues by setting all index fields in the sent
> +messages to a value calculated by the following formula:
> +<queue idx> + <vring idx>
> +The <queue idx> is increased by 2.
> +
>  Message types
>  -------------
>  

How is the support negotiated though?
What if I set queues=N with a legacy backend that
does not support multiqueue?

> diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
> index aefe0bb..83ebcaa 100644
> --- a/hw/virtio/vhost-user.c
> +++ b/hw/virtio/vhost-user.c
> @@ -253,17 +253,20 @@ static int vhost_user_call(struct vhost_dev *dev, unsigned long int request,
>      case VHOST_SET_VRING_NUM:
>      case VHOST_SET_VRING_BASE:
>          memcpy(&msg.state, arg, sizeof(struct vhost_vring_state));
> +        msg.state.index += dev->vq_index;
>          msg.size = sizeof(m.state);
>          break;
>  
>      case VHOST_GET_VRING_BASE:
>          memcpy(&msg.state, arg, sizeof(struct vhost_vring_state));
> +        msg.state.index += dev->vq_index;
>          msg.size = sizeof(m.state);
>          need_reply = 1;
>          break;
>  
>      case VHOST_SET_VRING_ADDR:
>          memcpy(&msg.addr, arg, sizeof(struct vhost_vring_addr));
> +        msg.addr.index += dev->vq_index;
>          msg.size = sizeof(m.addr);
>          break;
>  
> @@ -271,7 +274,7 @@ static int vhost_user_call(struct vhost_dev *dev, unsigned long int request,
>      case VHOST_SET_VRING_CALL:
>      case VHOST_SET_VRING_ERR:
>          file = arg;
> -        msg.u64 = file->index & VHOST_USER_VRING_IDX_MASK;
> +        msg.u64 = (file->index + dev->vq_index) & VHOST_USER_VRING_IDX_MASK;
>          msg.size = sizeof(m.u64);
>          if (ioeventfd_enabled() && file->fd > 0) {
>              fds[fd_num++] = file->fd;
> @@ -313,6 +316,7 @@ static int vhost_user_call(struct vhost_dev *dev, unsigned long int request,
>                  error_report("Received bad msg size.\n");
>                  return -1;
>              }
> +            msg.state.index -= dev->vq_index;
>              memcpy(arg, &msg.state, sizeof(struct vhost_vring_state));
>              break;
>          default:
> diff --git a/net/vhost-user.c b/net/vhost-user.c
> index 24e050c..1ea2f98 100644
> --- a/net/vhost-user.c
> +++ b/net/vhost-user.c
> @@ -134,25 +134,27 @@ static void net_vhost_user_event(void *opaque, int event)
>  
>  static int net_vhost_user_init(NetClientState *peer, const char *device,
>                                 const char *name, CharDriverState *chr,
> -                               bool vhostforce)
> +                               bool vhostforce, uint32_t queues)
>  {
>      NetClientState *nc;
>      VhostUserState *s;
> +    int i;
>  
> -    nc = qemu_new_net_client(&net_vhost_user_info, peer, device, name);
> +    for (i = 0; i < queues; i++) {
> +        nc = qemu_new_net_client(&net_vhost_user_info, peer, device, name);
>  
> -    snprintf(nc->info_str, sizeof(nc->info_str), "vhost-user to %s",
> -             chr->label);
> +        snprintf(nc->info_str, sizeof(nc->info_str), "vhost-user%d to %s",
> +                 i, chr->label);
>  
> -    s = DO_UPCAST(VhostUserState, nc, nc);
> +        s = DO_UPCAST(VhostUserState, nc, nc);
>  
> -    /* We don't provide a receive callback */
> -    s->nc.receive_disabled = 1;
> -    s->chr = chr;
> -    s->vhostforce = vhostforce;
> -
> -    qemu_chr_add_handlers(s->chr, NULL, NULL, net_vhost_user_event, s);
> +        /* We don't provide a receive callback */
> +        s->nc.receive_disabled = 1;
> +        s->chr = chr;
> +        s->vhostforce = vhostforce;
>  
> +        qemu_chr_add_handlers(s->chr, NULL, NULL, net_vhost_user_event, s);
> +    }
>      return 0;
>  }
>  
> @@ -228,6 +230,7 @@ static int net_vhost_check_net(QemuOpts *opts, void *opaque)
>  int net_init_vhost_user(const NetClientOptions *opts, const char *name,
>                          NetClientState *peer)
>  {
> +    uint32_t queues;
>      const NetdevVhostUserOptions *vhost_user_opts;
>      CharDriverState *chr;
>      bool vhostforce;
> @@ -254,5 +257,13 @@ int net_init_vhost_user(const NetClientOptions *opts, const char *name,
>          vhostforce = false;
>      }
>  
> -    return net_vhost_user_init(peer, "vhost_user", name, chr, vhostforce);
> +    /* number of queues for multiqueue */
> +    if (vhost_user_opts->has_queues) {
> +        queues = vhost_user_opts->queues;
> +    } else {
> +        queues = 1;
> +    }
> +
> +    return net_vhost_user_init(peer, "vhost_user", name, chr, vhostforce,
> +                               queues);
>  }
> diff --git a/qapi-schema.json b/qapi-schema.json
> index 9ffdcf8..aa3bb6f 100644
> --- a/qapi-schema.json
> +++ b/qapi-schema.json
> @@ -2208,12 +2208,15 @@
>  #
>  # @vhostforce: #optional vhost on for non-MSIX virtio guests (default: false).
>  #
> +# @queues: #optional number of queues to be created for multiqueue vhost-user
> +#
>  # Since 2.1
>  ##
>  { 'type': 'NetdevVhostUserOptions',
>    'data': {
>      'chardev':        'str',
> -    '*vhostforce':    'bool' } }
> +    '*vhostforce':    'bool',
> +    '*queues':        'uint32' } }
>  
>  ##
>  # @NetClientOptions
> diff --git a/qemu-options.hx b/qemu-options.hx
> index 64af16d..23f010f 100644
> --- a/qemu-options.hx
> +++ b/qemu-options.hx
> @@ -1893,13 +1893,14 @@ The hubport netdev lets you connect a NIC to a QEMU "vlan" instead of a single
>  netdev.  @code{-net} and @code{-device} with parameter @option{vlan} create the
>  required hub automatically.
>  
> -@item -netdev vhost-user,chardev=@var{id}[,vhostforce=on|off]
> +@item -netdev vhost-user,chardev=@var{id}[,vhostforce=on|off][,queues=n]
>  
>  Establish a vhost-user netdev, backed by a chardev @var{id}. The chardev should
>  be a unix domain socket backed one. The vhost-user uses a specifically defined
>  protocol to pass vhost ioctl replacement messages to an application on the other
>  end of the socket. On non-MSIX guests, the feature can be forced with
> -@var{vhostforce}.
> +@var{vhostforce}. Use 'queues=@var{n}' to specify the number of queues to
> +be created for multiqueue vhost-user.
>  
>  Example:
>  @example
>
Nikolay Nikolaev Jan. 22, 2015, 8:14 a.m. UTC | #3
On Wed, Jan 21, 2015 at 4:25 PM, Michael S. Tsirkin <mst@redhat.com> wrote:
>
> On Sat, Dec 06, 2014 at 06:52:56PM +0200, Nikolay Nikolaev wrote:
> > Vhost-user will implement the multiqueueu support in a similar way to what
> > vhost already has - a separate thread for each queue.
> >
> > To enable multiquue funcionality - a new command line parameter
> > "queues" is introduced for the vhost-user netdev.
> >
> > Signed-off-by: Nikolay Nikolaev <n.nikolaev@virtualopensystems.com>
>
> Nikolay - plan to repost addressing comments?


I can send v2 with the small polishing fixes.

However we currently don't have he resources to fix sending
VHOST_USER_SET_OWNER, VHOST_USER_SET_FEATURES, VHOST_SET_MEM_TABLE
messages per virtq. It work like this as it follows the tap backend
behavior, which AFAIK assigns a different tap device per virtq and
initializes them separately. At least I don't see an obvious and quick
patch around this.


>
>
> > ---
> >  docs/specs/vhost-user.txt |    7 +++++++
> >  hw/virtio/vhost-user.c    |    6 +++++-
> >  net/vhost-user.c          |   35 +++++++++++++++++++++++------------
> >  qapi-schema.json          |    5 ++++-
> >  qemu-options.hx           |    5 +++--
> >  5 files changed, 42 insertions(+), 16 deletions(-)
> >
> > diff --git a/docs/specs/vhost-user.txt b/docs/specs/vhost-user.txt
> > index 650bb18..d3857f5 100644
> > --- a/docs/specs/vhost-user.txt
> > +++ b/docs/specs/vhost-user.txt
> > @@ -127,6 +127,13 @@ in the ancillary data:
> >  If Master is unable to send the full message or receives a wrong reply it will
> >  close the connection. An optional reconnection mechanism can be implemented.
> >
> > +Multi queue suport
> > +---------------------
> > +The protocol supports multiple queues by setting all index fields in the sent
> > +messages to a value calculated by the following formula:
> > +<queue idx> + <vring idx>
> > +The <queue idx> is increased by 2.
> > +
> >  Message types
> >  -------------
> >
>
> How is the support negotiated though?
Not negotiated. Will a version bump be enough ?
> What if I set queues=N with a legacy backend that
> does not support multiqueue?
It is supposed that the device that does not support MQ won't
advertise VIRTIO_NET_F_MQ, thus not utilising more than 1 queue. Still
the initialisation of the multiple virtqs will happen.

regards,
Nikolay Nikolaev

>
> > diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
> > index aefe0bb..83ebcaa 100644
> > --- a/hw/virtio/vhost-user.c
> > +++ b/hw/virtio/vhost-user.c
> > @@ -253,17 +253,20 @@ static int vhost_user_call(struct vhost_dev *dev, unsigned long int request,
> >      case VHOST_SET_VRING_NUM:
> >      case VHOST_SET_VRING_BASE:
> >          memcpy(&msg.state, arg, sizeof(struct vhost_vring_state));
> > +        msg.state.index += dev->vq_index;
> >          msg.size = sizeof(m.state);
> >          break;
> >
> >      case VHOST_GET_VRING_BASE:
> >          memcpy(&msg.state, arg, sizeof(struct vhost_vring_state));
> > +        msg.state.index += dev->vq_index;
> >          msg.size = sizeof(m.state);
> >          need_reply = 1;
> >          break;
> >
> >      case VHOST_SET_VRING_ADDR:
> >          memcpy(&msg.addr, arg, sizeof(struct vhost_vring_addr));
> > +        msg.addr.index += dev->vq_index;
> >          msg.size = sizeof(m.addr);
> >          break;
> >
> > @@ -271,7 +274,7 @@ static int vhost_user_call(struct vhost_dev *dev, unsigned long int request,
> >      case VHOST_SET_VRING_CALL:
> >      case VHOST_SET_VRING_ERR:
> >          file = arg;
> > -        msg.u64 = file->index & VHOST_USER_VRING_IDX_MASK;
> > +        msg.u64 = (file->index + dev->vq_index) & VHOST_USER_VRING_IDX_MASK;
> >          msg.size = sizeof(m.u64);
> >          if (ioeventfd_enabled() && file->fd > 0) {
> >              fds[fd_num++] = file->fd;
> > @@ -313,6 +316,7 @@ static int vhost_user_call(struct vhost_dev *dev, unsigned long int request,
> >                  error_report("Received bad msg size.\n");
> >                  return -1;
> >              }
> > +            msg.state.index -= dev->vq_index;
> >              memcpy(arg, &msg.state, sizeof(struct vhost_vring_state));
> >              break;
> >          default:
> > diff --git a/net/vhost-user.c b/net/vhost-user.c
> > index 24e050c..1ea2f98 100644
> > --- a/net/vhost-user.c
> > +++ b/net/vhost-user.c
> > @@ -134,25 +134,27 @@ static void net_vhost_user_event(void *opaque, int event)
> >
> >  static int net_vhost_user_init(NetClientState *peer, const char *device,
> >                                 const char *name, CharDriverState *chr,
> > -                               bool vhostforce)
> > +                               bool vhostforce, uint32_t queues)
> >  {
> >      NetClientState *nc;
> >      VhostUserState *s;
> > +    int i;
> >
> > -    nc = qemu_new_net_client(&net_vhost_user_info, peer, device, name);
> > +    for (i = 0; i < queues; i++) {
> > +        nc = qemu_new_net_client(&net_vhost_user_info, peer, device, name);
> >
> > -    snprintf(nc->info_str, sizeof(nc->info_str), "vhost-user to %s",
> > -             chr->label);
> > +        snprintf(nc->info_str, sizeof(nc->info_str), "vhost-user%d to %s",
> > +                 i, chr->label);
> >
> > -    s = DO_UPCAST(VhostUserState, nc, nc);
> > +        s = DO_UPCAST(VhostUserState, nc, nc);
> >
> > -    /* We don't provide a receive callback */
> > -    s->nc.receive_disabled = 1;
> > -    s->chr = chr;
> > -    s->vhostforce = vhostforce;
> > -
> > -    qemu_chr_add_handlers(s->chr, NULL, NULL, net_vhost_user_event, s);
> > +        /* We don't provide a receive callback */
> > +        s->nc.receive_disabled = 1;
> > +        s->chr = chr;
> > +        s->vhostforce = vhostforce;
> >
> > +        qemu_chr_add_handlers(s->chr, NULL, NULL, net_vhost_user_event, s);
> > +    }
> >      return 0;
> >  }
> >
> > @@ -228,6 +230,7 @@ static int net_vhost_check_net(QemuOpts *opts, void *opaque)
> >  int net_init_vhost_user(const NetClientOptions *opts, const char *name,
> >                          NetClientState *peer)
> >  {
> > +    uint32_t queues;
> >      const NetdevVhostUserOptions *vhost_user_opts;
> >      CharDriverState *chr;
> >      bool vhostforce;
> > @@ -254,5 +257,13 @@ int net_init_vhost_user(const NetClientOptions *opts, const char *name,
> >          vhostforce = false;
> >      }
> >
> > -    return net_vhost_user_init(peer, "vhost_user", name, chr, vhostforce);
> > +    /* number of queues for multiqueue */
> > +    if (vhost_user_opts->has_queues) {
> > +        queues = vhost_user_opts->queues;
> > +    } else {
> > +        queues = 1;
> > +    }
> > +
> > +    return net_vhost_user_init(peer, "vhost_user", name, chr, vhostforce,
> > +                               queues);
> >  }
> > diff --git a/qapi-schema.json b/qapi-schema.json
> > index 9ffdcf8..aa3bb6f 100644
> > --- a/qapi-schema.json
> > +++ b/qapi-schema.json
> > @@ -2208,12 +2208,15 @@
> >  #
> >  # @vhostforce: #optional vhost on for non-MSIX virtio guests (default: false).
> >  #
> > +# @queues: #optional number of queues to be created for multiqueue vhost-user
> > +#
> >  # Since 2.1
> >  ##
> >  { 'type': 'NetdevVhostUserOptions',
> >    'data': {
> >      'chardev':        'str',
> > -    '*vhostforce':    'bool' } }
> > +    '*vhostforce':    'bool',
> > +    '*queues':        'uint32' } }
> >
> >  ##
> >  # @NetClientOptions
> > diff --git a/qemu-options.hx b/qemu-options.hx
> > index 64af16d..23f010f 100644
> > --- a/qemu-options.hx
> > +++ b/qemu-options.hx
> > @@ -1893,13 +1893,14 @@ The hubport netdev lets you connect a NIC to a QEMU "vlan" instead of a single
> >  netdev.  @code{-net} and @code{-device} with parameter @option{vlan} create the
> >  required hub automatically.
> >
> > -@item -netdev vhost-user,chardev=@var{id}[,vhostforce=on|off]
> > +@item -netdev vhost-user,chardev=@var{id}[,vhostforce=on|off][,queues=n]
> >
> >  Establish a vhost-user netdev, backed by a chardev @var{id}. The chardev should
> >  be a unix domain socket backed one. The vhost-user uses a specifically defined
> >  protocol to pass vhost ioctl replacement messages to an application on the other
> >  end of the socket. On non-MSIX guests, the feature can be forced with
> > -@var{vhostforce}.
> > +@var{vhostforce}. Use 'queues=@var{n}' to specify the number of queues to
> > +be created for multiqueue vhost-user.
> >
> >  Example:
> >  @example
> >
>
> --
> You received this message because you are subscribed to the Google Groups "Snabb Switch development" group.
> To unsubscribe from this group and stop receiving emails from it, send an email to snabb-devel+unsubscribe@googlegroups.com.
> To post to this group, send an email to snabb-devel@googlegroups.com.
> Visit this group at http://groups.google.com/group/snabb-devel.
diff mbox

Patch

diff --git a/docs/specs/vhost-user.txt b/docs/specs/vhost-user.txt
index 650bb18..d3857f5 100644
--- a/docs/specs/vhost-user.txt
+++ b/docs/specs/vhost-user.txt
@@ -127,6 +127,13 @@  in the ancillary data:
 If Master is unable to send the full message or receives a wrong reply it will
 close the connection. An optional reconnection mechanism can be implemented.
 
+Multi queue suport
+---------------------
+The protocol supports multiple queues by setting all index fields in the sent
+messages to a value calculated by the following formula:
+<queue idx> + <vring idx>
+The <queue idx> is increased by 2.
+
 Message types
 -------------
 
diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
index aefe0bb..83ebcaa 100644
--- a/hw/virtio/vhost-user.c
+++ b/hw/virtio/vhost-user.c
@@ -253,17 +253,20 @@  static int vhost_user_call(struct vhost_dev *dev, unsigned long int request,
     case VHOST_SET_VRING_NUM:
     case VHOST_SET_VRING_BASE:
         memcpy(&msg.state, arg, sizeof(struct vhost_vring_state));
+        msg.state.index += dev->vq_index;
         msg.size = sizeof(m.state);
         break;
 
     case VHOST_GET_VRING_BASE:
         memcpy(&msg.state, arg, sizeof(struct vhost_vring_state));
+        msg.state.index += dev->vq_index;
         msg.size = sizeof(m.state);
         need_reply = 1;
         break;
 
     case VHOST_SET_VRING_ADDR:
         memcpy(&msg.addr, arg, sizeof(struct vhost_vring_addr));
+        msg.addr.index += dev->vq_index;
         msg.size = sizeof(m.addr);
         break;
 
@@ -271,7 +274,7 @@  static int vhost_user_call(struct vhost_dev *dev, unsigned long int request,
     case VHOST_SET_VRING_CALL:
     case VHOST_SET_VRING_ERR:
         file = arg;
-        msg.u64 = file->index & VHOST_USER_VRING_IDX_MASK;
+        msg.u64 = (file->index + dev->vq_index) & VHOST_USER_VRING_IDX_MASK;
         msg.size = sizeof(m.u64);
         if (ioeventfd_enabled() && file->fd > 0) {
             fds[fd_num++] = file->fd;
@@ -313,6 +316,7 @@  static int vhost_user_call(struct vhost_dev *dev, unsigned long int request,
                 error_report("Received bad msg size.\n");
                 return -1;
             }
+            msg.state.index -= dev->vq_index;
             memcpy(arg, &msg.state, sizeof(struct vhost_vring_state));
             break;
         default:
diff --git a/net/vhost-user.c b/net/vhost-user.c
index 24e050c..1ea2f98 100644
--- a/net/vhost-user.c
+++ b/net/vhost-user.c
@@ -134,25 +134,27 @@  static void net_vhost_user_event(void *opaque, int event)
 
 static int net_vhost_user_init(NetClientState *peer, const char *device,
                                const char *name, CharDriverState *chr,
-                               bool vhostforce)
+                               bool vhostforce, uint32_t queues)
 {
     NetClientState *nc;
     VhostUserState *s;
+    int i;
 
-    nc = qemu_new_net_client(&net_vhost_user_info, peer, device, name);
+    for (i = 0; i < queues; i++) {
+        nc = qemu_new_net_client(&net_vhost_user_info, peer, device, name);
 
-    snprintf(nc->info_str, sizeof(nc->info_str), "vhost-user to %s",
-             chr->label);
+        snprintf(nc->info_str, sizeof(nc->info_str), "vhost-user%d to %s",
+                 i, chr->label);
 
-    s = DO_UPCAST(VhostUserState, nc, nc);
+        s = DO_UPCAST(VhostUserState, nc, nc);
 
-    /* We don't provide a receive callback */
-    s->nc.receive_disabled = 1;
-    s->chr = chr;
-    s->vhostforce = vhostforce;
-
-    qemu_chr_add_handlers(s->chr, NULL, NULL, net_vhost_user_event, s);
+        /* We don't provide a receive callback */
+        s->nc.receive_disabled = 1;
+        s->chr = chr;
+        s->vhostforce = vhostforce;
 
+        qemu_chr_add_handlers(s->chr, NULL, NULL, net_vhost_user_event, s);
+    }
     return 0;
 }
 
@@ -228,6 +230,7 @@  static int net_vhost_check_net(QemuOpts *opts, void *opaque)
 int net_init_vhost_user(const NetClientOptions *opts, const char *name,
                         NetClientState *peer)
 {
+    uint32_t queues;
     const NetdevVhostUserOptions *vhost_user_opts;
     CharDriverState *chr;
     bool vhostforce;
@@ -254,5 +257,13 @@  int net_init_vhost_user(const NetClientOptions *opts, const char *name,
         vhostforce = false;
     }
 
-    return net_vhost_user_init(peer, "vhost_user", name, chr, vhostforce);
+    /* number of queues for multiqueue */
+    if (vhost_user_opts->has_queues) {
+        queues = vhost_user_opts->queues;
+    } else {
+        queues = 1;
+    }
+
+    return net_vhost_user_init(peer, "vhost_user", name, chr, vhostforce,
+                               queues);
 }
diff --git a/qapi-schema.json b/qapi-schema.json
index 9ffdcf8..aa3bb6f 100644
--- a/qapi-schema.json
+++ b/qapi-schema.json
@@ -2208,12 +2208,15 @@ 
 #
 # @vhostforce: #optional vhost on for non-MSIX virtio guests (default: false).
 #
+# @queues: #optional number of queues to be created for multiqueue vhost-user
+#
 # Since 2.1
 ##
 { 'type': 'NetdevVhostUserOptions',
   'data': {
     'chardev':        'str',
-    '*vhostforce':    'bool' } }
+    '*vhostforce':    'bool',
+    '*queues':        'uint32' } }
 
 ##
 # @NetClientOptions
diff --git a/qemu-options.hx b/qemu-options.hx
index 64af16d..23f010f 100644
--- a/qemu-options.hx
+++ b/qemu-options.hx
@@ -1893,13 +1893,14 @@  The hubport netdev lets you connect a NIC to a QEMU "vlan" instead of a single
 netdev.  @code{-net} and @code{-device} with parameter @option{vlan} create the
 required hub automatically.
 
-@item -netdev vhost-user,chardev=@var{id}[,vhostforce=on|off]
+@item -netdev vhost-user,chardev=@var{id}[,vhostforce=on|off][,queues=n]
 
 Establish a vhost-user netdev, backed by a chardev @var{id}. The chardev should
 be a unix domain socket backed one. The vhost-user uses a specifically defined
 protocol to pass vhost ioctl replacement messages to an application on the other
 end of the socket. On non-MSIX guests, the feature can be forced with
-@var{vhostforce}.
+@var{vhostforce}. Use 'queues=@var{n}' to specify the number of queues to
+be created for multiqueue vhost-user.
 
 Example:
 @example