@@ -298,9 +298,108 @@ static struct vhost_dev *vuf_get_vhost(VirtIODevice *vdev)
return &fs->vhost_dev;
}
+/**
+ * Fetch the internal state from virtiofsd and save it to `f`.
+ */
+static int vuf_save_state(QEMUFile *f, void *pv, size_t size,
+ const VMStateField *field, JSONWriter *vmdesc)
+{
+ VirtIODevice *vdev = pv;
+ VHostUserFS *fs = VHOST_USER_FS(vdev);
+ Error *local_error = NULL;
+ int ret;
+
+ ret = vhost_save_backend_state(&fs->vhost_dev, f, &local_error);
+ if (ret < 0) {
+ error_reportf_err(local_error,
+ "Error saving back-end state of %s device %s "
+ "(tag: \"%s\"): ",
+ vdev->name, vdev->parent_obj.canonical_path,
+ fs->conf.tag ?: "<none>");
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * Load virtiofsd's internal state from `f` and send it over to virtiofsd.
+ */
+static int vuf_load_state(QEMUFile *f, void *pv, size_t size,
+ const VMStateField *field)
+{
+ VirtIODevice *vdev = pv;
+ VHostUserFS *fs = VHOST_USER_FS(vdev);
+ Error *local_error = NULL;
+ int ret;
+
+ ret = vhost_load_backend_state(&fs->vhost_dev, f, &local_error);
+ if (ret < 0) {
+ error_reportf_err(local_error,
+ "Error loading back-end state of %s device %s "
+ "(tag: \"%s\"): ",
+ vdev->name, vdev->parent_obj.canonical_path,
+ fs->conf.tag ?: "<none>");
+ return ret;
+ }
+
+ return 0;
+}
+
+static bool vuf_is_internal_migration(void *opaque)
+{
+ /* TODO: Return false when an external migration is requested */
+ return true;
+}
+
+static int vuf_check_migration_support(void *opaque)
+{
+ VirtIODevice *vdev = opaque;
+ VHostUserFS *fs = VHOST_USER_FS(vdev);
+
+ if (!vhost_supports_migratory_state(&fs->vhost_dev)) {
+ error_report("Back-end of %s device %s (tag: \"%s\") does not support "
+ "migration through qemu",
+ vdev->name, vdev->parent_obj.canonical_path,
+ fs->conf.tag ?: "<none>");
+ return -ENOTSUP;
+ }
+
+ return 0;
+}
+
+static const VMStateDescription vuf_backend_vmstate;
+
static const VMStateDescription vuf_vmstate = {
.name = "vhost-user-fs",
- .unmigratable = 1,
+ .version_id = 0,
+ .fields = (VMStateField[]) {
+ VMSTATE_VIRTIO_DEVICE,
+ VMSTATE_END_OF_LIST()
+ },
+ .subsections = (const VMStateDescription * []) {
+ &vuf_backend_vmstate,
+ NULL,
+ }
+};
+
+static const VMStateDescription vuf_backend_vmstate = {
+ .name = "vhost-user-fs-backend",
+ .version_id = 0,
+ .needed = vuf_is_internal_migration,
+ .pre_load = vuf_check_migration_support,
+ .pre_save = vuf_check_migration_support,
+ .fields = (VMStateField[]) {
+ {
+ .name = "back-end",
+ .info = &(const VMStateInfo) {
+ .name = "virtio-fs back-end state",
+ .get = vuf_load_state,
+ .put = vuf_save_state,
+ },
+ },
+ VMSTATE_END_OF_LIST()
+ },
};
static Property vuf_properties[] = {
A virtio-fs device's VM state consists of: - the virtio device (vring) state (VMSTATE_VIRTIO_DEVICE) - the back-end's (virtiofsd's) internal state We get/set the latter via the new vhost operations to transfer migratory state. It is its own dedicated subsection, so that for external migration, it can be disabled. Signed-off-by: Hanna Czenczek <hreitz@redhat.com> --- hw/virtio/vhost-user-fs.c | 101 +++++++++++++++++++++++++++++++++++++- 1 file changed, 100 insertions(+), 1 deletion(-)