@@ -304,6 +304,17 @@ void vhost_net_virtqueue_mask(VHostNetState *net, VirtIODevice *dev,
vhost_virtqueue_mask(&net->dev, dev, idx, mask);
}
+int vhost_net_link_status(VHostNetState *net)
+{
+ int r = 0;
+
+ if (net->dev.vhost_ops->vhost_status) {
+ r = net->dev.vhost_ops->vhost_status(&net->dev);
+ }
+
+ return r;
+}
+
VHostNetState *get_vhost_net(NetClientState *nc)
{
VHostNetState *vhost_net = 0;
@@ -372,6 +383,11 @@ void vhost_net_virtqueue_mask(VHostNetState *net, VirtIODevice *dev,
{
}
+int vhost_net_link_status(VHostNetState *net)
+{
+ return 0;
+}
+
VHostNetState *get_vhost_net(NetClientState *nc)
{
return 0;
@@ -360,12 +360,12 @@ static int vhost_user_call(struct vhost_dev *dev, unsigned long int request,
}
if (vhost_user_send_fds(fd, &msg, fds, fd_num) < 0) {
- return -1;
+ goto fail;
}
if (need_reply) {
if (vhost_user_recv(fd, &msg) < 0) {
- return -1;
+ goto fail;
}
if (msg_request != msg.request) {
@@ -398,6 +398,25 @@ static int vhost_user_call(struct vhost_dev *dev, unsigned long int request,
}
return 0;
+
+fail:
+ /* mark the backend non operational */
+ error_report("Disconnect detected\n");
+ dev->vhost_ops->vhost_backend_cleanup(dev);
+ return -1;
+}
+
+static int vhost_user_status(struct vhost_dev *dev)
+{
+ int result = 1;
+
+ if (vhost_user_echo(dev) < 0) {
+ error_report("Disconnect detected\n");
+ dev->vhost_ops->vhost_backend_cleanup(dev);
+ result = 0;
+ }
+
+ return result;
}
static int vhost_user_init(struct vhost_dev *dev, const char *devpath)
@@ -479,6 +498,7 @@ static int vhost_user_cleanup(struct vhost_dev *dev)
static const VhostOps user_ops = {
.backend_type = VHOST_BACKEND_TYPE_USER,
.vhost_call = vhost_user_call,
+ .vhost_status = vhost_user_status,
.vhost_backend_init = vhost_user_init,
.vhost_backend_cleanup = vhost_user_cleanup
};
@@ -511,6 +531,7 @@ static int vhost_kernel_cleanup(struct vhost_dev *dev)
static const VhostOps kernel_ops = {
.backend_type = VHOST_BACKEND_TYPE_KERNEL,
.vhost_call = vhost_kernel_call,
+ .vhost_status = 0,
.vhost_backend_init = vhost_kernel_init,
.vhost_backend_cleanup = vhost_kernel_cleanup
};
@@ -22,12 +22,14 @@ struct vhost_dev;
typedef int (*vhost_call)(struct vhost_dev *dev, unsigned long int request,
void *arg);
+typedef int (*vhost_status)(struct vhost_dev *dev);
typedef int (*vhost_backend_init)(struct vhost_dev *dev, const char *devpath);
typedef int (*vhost_backend_cleanup)(struct vhost_dev *dev);
typedef struct VhostOps {
VhostBackendType backend_type;
vhost_call vhost_call;
+ vhost_status vhost_status;
vhost_backend_init vhost_backend_init;
vhost_backend_cleanup vhost_backend_cleanup;
} VhostOps;
@@ -31,5 +31,6 @@ void vhost_net_ack_features(VHostNetState *net, unsigned features);
bool vhost_net_virtqueue_pending(VHostNetState *net, int n);
void vhost_net_virtqueue_mask(VHostNetState *net, VirtIODevice *dev,
int idx, bool mask);
+int vhost_net_link_status(VHostNetState *net);
VHostNetState *get_vhost_net(NetClientState *nc);
#endif
@@ -12,13 +12,18 @@
#include "net/vhost_net.h"
#include "net/vhost-user.h"
#include "qemu/error-report.h"
+#include "qemu/timer.h"
typedef struct VhostUserState {
NetClientState nc;
VHostNetState *vhost_net;
char *devpath;
+ int64_t poll_time;
} VhostUserState;
+static QEMUTimer *vhost_user_timer;
+#define VHOST_USER_DEFAULT_POLL_TIME (1*1000) /* ms */
+
VHostNetState *vhost_user_get_vhost_net(NetClientState *nc)
{
VhostUserState *s = DO_UPCAST(VhostUserState, nc, nc);
@@ -31,6 +36,11 @@ static int vhost_user_running(VhostUserState *s)
return (s->vhost_net) ? 1 : 0;
}
+static int vhost_user_link_status(VhostUserState *s)
+{
+ return (!s->nc.link_down) && vhost_net_link_status(s->vhost_net);
+}
+
static int vhost_user_start(VhostUserState *s)
{
VhostNetOptions options;
@@ -59,6 +69,48 @@ static void vhost_user_stop(VhostUserState *s)
s->vhost_net = 0;
}
+static void vhost_user_timer_handler(void *opaque)
+{
+ VhostUserState *s = opaque;
+ int link_down = 0;
+
+ if (vhost_user_running(s)) {
+ if (!vhost_user_link_status(s)) {
+ link_down = 1;
+ }
+ } else {
+ vhost_user_start(s);
+ if (!vhost_user_running(s)) {
+ link_down = 1;
+ }
+ }
+
+ if (link_down != s->nc.link_down) {
+
+ s->nc.link_down = link_down;
+
+ if (s->nc.peer) {
+ s->nc.peer->link_down = link_down;
+ }
+
+ if (s->nc.info->link_status_changed) {
+ s->nc.info->link_status_changed(&s->nc);
+ }
+
+ if (s->nc.peer && s->nc.peer->info->link_status_changed) {
+ s->nc.peer->info->link_status_changed(s->nc.peer);
+ }
+
+ if (link_down) {
+ vhost_user_stop(s);
+ }
+ }
+
+ /* reschedule */
+ timer_mod(vhost_user_timer,
+ qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + s->poll_time);
+}
+
static void vhost_user_cleanup(NetClientState *nc)
{
VhostUserState *s = DO_UPCAST(VhostUserState, nc, nc);
@@ -74,7 +126,8 @@ static NetClientInfo net_vhost_user_info = {
};
static int net_vhost_user_init(NetClientState *peer, const char *device,
- const char *name, const char *path)
+ const char *name, const char *path,
+ int64_t poll_time)
{
NetClientState *nc;
VhostUserState *s;
@@ -90,9 +143,15 @@ static int net_vhost_user_init(NetClientState *peer, const char *device,
s->nc.receive_disabled = 1;
s->devpath = g_strdup(path);
+ s->poll_time = poll_time;
r = vhost_user_start(s);
+ vhost_user_timer = timer_new_ms(QEMU_CLOCK_REALTIME,
+ vhost_user_timer_handler, s);
+ timer_mod(vhost_user_timer,
+ qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + s->poll_time);
+
return r;
}
@@ -101,11 +160,18 @@ int net_init_vhost_user(const NetClientOptions *opts, const char *name,
{
const char *path;
const NetdevVhostUserOptions *vhost_user;
+ int64_t poll_time;
assert(opts->kind == NET_CLIENT_OPTIONS_KIND_VHOST_USER);
vhost_user = opts->vhost_user;
path = vhost_user->path;
- return net_vhost_user_init(peer, "vhost_user", name, path);
+ if (vhost_user->has_poll_time) {
+ poll_time = vhost_user->poll_time;
+ } else {
+ poll_time = VHOST_USER_DEFAULT_POLL_TIME;
+ }
+
+ return net_vhost_user_init(peer, "vhost_user", name, path, poll_time);
}
@@ -3036,11 +3036,14 @@
#
# @path: control socket path
#
+# @poll_time: #optional polling time for connection probing
+#
# Since 2.0
##
{ 'type': 'NetdevVhostUserOptions',
'data': {
- 'path': 'str' } }
+ 'path': 'str',
+ '*poll_time': 'int' } }
##
@@ -1768,15 +1768,16 @@ The hubport netdev lets you connect a NIC to a QEMU "vlan" instead of a single
netdev. @code{-net} and @code{-device} with parameter @option{vlan} create the
required hub automatically.
-@item -netdev vhost-user,path=@var{path}
+@item -netdev vhost-user,path=@var{path}[,poll_time=poll_time]
Connect to a unix domain socket @var{path} on which listens a server that
-implements vhost-user backend.
+implements vhost-user backend. The connection is probed on @var{poll_time} interval (in milliseconds).
+The default @var{poll_time} is 1000 ms.
Example:
@example
qemu -m 1024 -mem-path /hugetlbfs,prealloc=on,share=on \
- -netdev type=vhost-user,id=net0,path=/path/to/sock \
+ -netdev type=vhost-user,id=net0,path=/path/to/sock,poll_time=2500 \
-device virtio-net-pci,netdev=net0
@end example