diff mbox series

[v2,2/2] vfio/migration: Make VFIO migration non-experimental

Message ID 20230628073112.7958-3-avihaih@nvidia.com
State New
Headers show
Series vfio/migration: Make VFIO migration non-experimental | expand

Commit Message

Avihai Horon June 28, 2023, 7:31 a.m. UTC
The major parts of VFIO migration are supported today in QEMU. This
includes basic VFIO migration, device dirty page tracking and precopy
support.

Thus, at this point in time, it seems appropriate to make VFIO migration
non-experimental: remove the x prefix from enable_migration property,
change it to ON_OFF_AUTO and let the default value be AUTO.

In addition, make the following adjustments:
1. When enable_migration is ON and migration is not supported, fail VFIO
   device realization.
2. When enable_migration is AUTO (i.e., not explicitly enabled), require
   device dirty tracking support. This is because device dirty tracking
   is currently the only method to do dirty page tracking, which is
   essential for migrating in a reasonable downtime. Setting
   enable_migration to ON will not require device dirty tracking.
3. Make migration error and blocker messages more elaborate.
4. Remove error prints in vfio_migration_query_flags().
5. Rename trace_vfio_migration_probe() to
   trace_vfio_migration_realize().

Signed-off-by: Avihai Horon <avihaih@nvidia.com>
---
 include/hw/vfio/vfio-common.h |  6 +--
 hw/vfio/common.c              | 16 ++++++-
 hw/vfio/migration.c           | 79 +++++++++++++++++++++++------------
 hw/vfio/pci.c                 |  4 +-
 hw/vfio/trace-events          |  2 +-
 5 files changed, 73 insertions(+), 34 deletions(-)

Comments

Cédric Le Goater June 28, 2023, 12:54 p.m. UTC | #1
On 6/28/23 09:31, Avihai Horon wrote:
> The major parts of VFIO migration are supported today in QEMU. This
> includes basic VFIO migration, device dirty page tracking and precopy
> support.
> 
> Thus, at this point in time, it seems appropriate to make VFIO migration
> non-experimental: remove the x prefix from enable_migration property,
> change it to ON_OFF_AUTO and let the default value be AUTO.
> 
> In addition, make the following adjustments:
> 1. When enable_migration is ON and migration is not supported, fail VFIO
>     device realization.
> 2. When enable_migration is AUTO (i.e., not explicitly enabled), require
>     device dirty tracking support. This is because device dirty tracking
>     is currently the only method to do dirty page tracking, which is
>     essential for migrating in a reasonable downtime. Setting
>     enable_migration to ON will not require device dirty tracking.
> 3. Make migration error and blocker messages more elaborate.
> 4. Remove error prints in vfio_migration_query_flags().
> 5. Rename trace_vfio_migration_probe() to
>     trace_vfio_migration_realize().
> 
> Signed-off-by: Avihai Horon <avihaih@nvidia.com>


We should rework the return value of most of the routines called by
vfio_migration_realize() and simply use a bool. I think Zhenzhong is
working it.

Zhenzhong,

When you resend v4 of the "VFIO migration related refactor and bug fix"
series, please rebase on this patch since it should be merged.

Reviewed-by: Cédric Le Goater <clg@redhat.com>

Thanks,

C.
      

> ---
>   include/hw/vfio/vfio-common.h |  6 +--
>   hw/vfio/common.c              | 16 ++++++-
>   hw/vfio/migration.c           | 79 +++++++++++++++++++++++------------
>   hw/vfio/pci.c                 |  4 +-
>   hw/vfio/trace-events          |  2 +-
>   5 files changed, 73 insertions(+), 34 deletions(-)
> 
> diff --git a/include/hw/vfio/vfio-common.h b/include/hw/vfio/vfio-common.h
> index b4c28f318f..0ded0e73e2 100644
> --- a/include/hw/vfio/vfio-common.h
> +++ b/include/hw/vfio/vfio-common.h
> @@ -139,7 +139,7 @@ typedef struct VFIODevice {
>       bool needs_reset;
>       bool no_mmap;
>       bool ram_block_discard_allowed;
> -    bool enable_migration;
> +    OnOffAuto enable_migration;
>       VFIODeviceOps *ops;
>       unsigned int num_irqs;
>       unsigned int num_regions;
> @@ -224,9 +224,9 @@ typedef QLIST_HEAD(VFIOGroupList, VFIOGroup) VFIOGroupList;
>   extern VFIOGroupList vfio_group_list;
>   
>   bool vfio_mig_active(void);
> -int vfio_block_multiple_devices_migration(Error **errp);
> +int vfio_block_multiple_devices_migration(VFIODevice *vbasedev, Error **errp);
>   void vfio_unblock_multiple_devices_migration(void);
> -int vfio_block_giommu_migration(Error **errp);
> +int vfio_block_giommu_migration(VFIODevice *vbasedev, Error **errp);
>   int64_t vfio_mig_bytes_transferred(void);
>   void vfio_reset_bytes_transferred(void);
>   
> diff --git a/hw/vfio/common.c b/hw/vfio/common.c
> index 25801de173..8c73f84581 100644
> --- a/hw/vfio/common.c
> +++ b/hw/vfio/common.c
> @@ -381,7 +381,7 @@ static unsigned int vfio_migratable_device_num(void)
>       return device_num;
>   }
>   
> -int vfio_block_multiple_devices_migration(Error **errp)
> +int vfio_block_multiple_devices_migration(VFIODevice *vbasedev, Error **errp)
>   {
>       int ret;
>   
> @@ -390,6 +390,12 @@ int vfio_block_multiple_devices_migration(Error **errp)
>           return 0;
>       }
>   
> +    if (vbasedev->enable_migration == ON_OFF_AUTO_ON) {
> +        error_setg(errp, "Migration is currently not supported with multiple "
> +                         "VFIO devices");
> +        return -EINVAL;
> +    }
> +
>       error_setg(&multiple_devices_migration_blocker,
>                  "Migration is currently not supported with multiple "
>                  "VFIO devices");
> @@ -427,7 +433,7 @@ static bool vfio_viommu_preset(void)
>       return false;
>   }
>   
> -int vfio_block_giommu_migration(Error **errp)
> +int vfio_block_giommu_migration(VFIODevice *vbasedev, Error **errp)
>   {
>       int ret;
>   
> @@ -436,6 +442,12 @@ int vfio_block_giommu_migration(Error **errp)
>           return 0;
>       }
>   
> +    if (vbasedev->enable_migration == ON_OFF_AUTO_ON) {
> +        error_setg(errp,
> +                   "Migration is currently not supported with vIOMMU enabled");
> +        return -EINVAL;
> +    }
> +
>       error_setg(&giommu_migration_blocker,
>                  "Migration is currently not supported with vIOMMU enabled");
>       ret = migrate_add_blocker(giommu_migration_blocker, errp);
> diff --git a/hw/vfio/migration.c b/hw/vfio/migration.c
> index 7cf143926c..1db7d52ab2 100644
> --- a/hw/vfio/migration.c
> +++ b/hw/vfio/migration.c
> @@ -724,14 +724,6 @@ static int vfio_migration_query_flags(VFIODevice *vbasedev, uint64_t *mig_flags)
>       feature->argsz = sizeof(buf);
>       feature->flags = VFIO_DEVICE_FEATURE_GET | VFIO_DEVICE_FEATURE_MIGRATION;
>       if (ioctl(vbasedev->fd, VFIO_DEVICE_FEATURE, feature)) {
> -        if (errno == ENOTTY) {
> -            error_report("%s: VFIO migration is not supported in kernel",
> -                         vbasedev->name);
> -        } else {
> -            error_report("%s: Failed to query VFIO migration support, err: %s",
> -                         vbasedev->name, strerror(errno));
> -        }
> -
>           return -errno;
>       }
>   
> @@ -810,6 +802,27 @@ static int vfio_migration_init(VFIODevice *vbasedev)
>       return 0;
>   }
>   
> +static int vfio_block_migration(VFIODevice *vbasedev, Error *err, Error **errp)
> +{
> +    int ret;
> +
> +    if (vbasedev->enable_migration == ON_OFF_AUTO_ON) {
> +        error_propagate(errp, err);
> +        return -EINVAL;
> +    }
> +
> +    vbasedev->migration_blocker = error_copy(err);
> +    error_free(err);
> +
> +    ret = migrate_add_blocker(vbasedev->migration_blocker, errp);
> +    if (ret < 0) {
> +        error_free(vbasedev->migration_blocker);
> +        vbasedev->migration_blocker = NULL;
> +    }
> +
> +    return ret;
> +}
> +
>   /* ---------------------------------------------------------------------- */
>   
>   int64_t vfio_mig_bytes_transferred(void)
> @@ -824,40 +837,54 @@ void vfio_reset_bytes_transferred(void)
>   
>   int vfio_migration_realize(VFIODevice *vbasedev, Error **errp)
>   {
> -    int ret = -ENOTSUP;
> +    Error *err = NULL;
> +    int ret;
>   
> -    if (!vbasedev->enable_migration) {
> -        goto add_blocker;
> +    if (vbasedev->enable_migration == ON_OFF_AUTO_OFF) {
> +        error_setg(&err, "%s: Migration is disabled for VFIO device",
> +                   vbasedev->name);
> +        return vfio_block_migration(vbasedev, err, errp);
>       }
>   
>       ret = vfio_migration_init(vbasedev);
>       if (ret) {
> -        goto add_blocker;
> +        if (ret == -ENOTTY) {
> +            error_setg(&err, "%s: VFIO migration is not supported in kernel",
> +                       vbasedev->name);
> +        } else {
> +            error_setg(&err,
> +                       "%s: Migration couldn't be initialized for VFIO device, "
> +                       "err: %d (%s)",
> +                       vbasedev->name, ret, strerror(-ret));
> +        }
> +
> +        return vfio_block_migration(vbasedev, err, errp);
> +    }
> +
> +    if (!vbasedev->dirty_pages_supported) {
> +        if (vbasedev->enable_migration == ON_OFF_AUTO_AUTO) {
> +            error_setg(&err,
> +                       "%s: VFIO device doesn't support device dirty tracking",
> +                       vbasedev->name);
> +            return vfio_block_migration(vbasedev, err, errp);
> +        }
> +
> +        warn_report("%s: VFIO device doesn't support device dirty tracking",
> +                    vbasedev->name);
>       }
>   
> -    ret = vfio_block_multiple_devices_migration(errp);
> +    ret = vfio_block_multiple_devices_migration(vbasedev, errp);
>       if (ret) {
>           return ret;
>       }
>   
> -    ret = vfio_block_giommu_migration(errp);
> +    ret = vfio_block_giommu_migration(vbasedev, errp);
>       if (ret) {
>           return ret;
>       }
>   
> -    trace_vfio_migration_probe(vbasedev->name);
> +    trace_vfio_migration_realize(vbasedev->name);
>       return 0;
> -
> -add_blocker:
> -    error_setg(&vbasedev->migration_blocker,
> -               "VFIO device doesn't support migration");
> -
> -    ret = migrate_add_blocker(vbasedev->migration_blocker, errp);
> -    if (ret < 0) {
> -        error_free(vbasedev->migration_blocker);
> -        vbasedev->migration_blocker = NULL;
> -    }
> -    return ret;
>   }
>   
>   void vfio_migration_exit(VFIODevice *vbasedev)
> diff --git a/hw/vfio/pci.c b/hw/vfio/pci.c
> index 73874a94de..48584e3b01 100644
> --- a/hw/vfio/pci.c
> +++ b/hw/vfio/pci.c
> @@ -3347,8 +3347,8 @@ static Property vfio_pci_dev_properties[] = {
>                       VFIO_FEATURE_ENABLE_REQ_BIT, true),
>       DEFINE_PROP_BIT("x-igd-opregion", VFIOPCIDevice, features,
>                       VFIO_FEATURE_ENABLE_IGD_OPREGION_BIT, false),
> -    DEFINE_PROP_BOOL("x-enable-migration", VFIOPCIDevice,
> -                     vbasedev.enable_migration, false),
> +    DEFINE_PROP_ON_OFF_AUTO("enable-migration", VFIOPCIDevice,
> +                            vbasedev.enable_migration, ON_OFF_AUTO_AUTO),
>       DEFINE_PROP_BOOL("x-no-mmap", VFIOPCIDevice, vbasedev.no_mmap, false),
>       DEFINE_PROP_BOOL("x-balloon-allowed", VFIOPCIDevice,
>                        vbasedev.ram_block_discard_allowed, false),
> diff --git a/hw/vfio/trace-events b/hw/vfio/trace-events
> index e328d644d2..ee7509e68e 100644
> --- a/hw/vfio/trace-events
> +++ b/hw/vfio/trace-events
> @@ -155,7 +155,7 @@ vfio_load_cleanup(const char *name) " (%s)"
>   vfio_load_device_config_state(const char *name) " (%s)"
>   vfio_load_state(const char *name, uint64_t data) " (%s) data 0x%"PRIx64
>   vfio_load_state_device_data(const char *name, uint64_t data_size, int ret) " (%s) size 0x%"PRIx64" ret %d"
> -vfio_migration_probe(const char *name) " (%s)"
> +vfio_migration_realize(const char *name) " (%s)"
>   vfio_migration_set_state(const char *name, const char *state) " (%s) state %s"
>   vfio_migration_state_notifier(const char *name, const char *state) " (%s) state %s"
>   vfio_save_block(const char *name, int data_size) " (%s) data_size %d"
Joao Martins June 28, 2023, 2:51 p.m. UTC | #2
On 28/06/2023 13:54, Cédric Le Goater wrote:
> On 6/28/23 09:31, Avihai Horon wrote:
>> The major parts of VFIO migration are supported today in QEMU. This
>> includes basic VFIO migration, device dirty page tracking and precopy
>> support.
>>
>> Thus, at this point in time, it seems appropriate to make VFIO migration
>> non-experimental: remove the x prefix from enable_migration property,
>> change it to ON_OFF_AUTO and let the default value be AUTO.
>>
>> In addition, make the following adjustments:
>> 1. When enable_migration is ON and migration is not supported, fail VFIO
>>     device realization.
>> 2. When enable_migration is AUTO (i.e., not explicitly enabled), require
>>     device dirty tracking support. This is because device dirty tracking
>>     is currently the only method to do dirty page tracking, which is
>>     essential for migrating in a reasonable downtime. Setting
>>     enable_migration to ON will not require device dirty tracking.
>> 3. Make migration error and blocker messages more elaborate.
>> 4. Remove error prints in vfio_migration_query_flags().
>> 5. Rename trace_vfio_migration_probe() to
>>     trace_vfio_migration_realize().
>>
>> Signed-off-by: Avihai Horon <avihaih@nvidia.com>
> 
> 
> We should rework the return value of most of the routines called by
> vfio_migration_realize() and simply use a bool. I think Zhenzhong is
> working it.
> 
> Zhenzhong,
> 
> When you resend v4 of the "VFIO migration related refactor and bug fix"
> series, please rebase on this patch since it should be merged.
> 

This, and his switchover-ack series from Avihai that preceeds it.

Perhaps it might be easier to point to your tree:branch where you are queueing
all the patches?
Joao Martins June 28, 2023, 3:17 p.m. UTC | #3
On 28/06/2023 08:31, Avihai Horon wrote:
> The major parts of VFIO migration are supported today in QEMU. This
> includes basic VFIO migration, device dirty page tracking and precopy
> support.
> 
> Thus, at this point in time, it seems appropriate to make VFIO migration
> non-experimental: remove the x prefix from enable_migration property,
> change it to ON_OFF_AUTO and let the default value be AUTO.
> 
> In addition, make the following adjustments:
> 1. When enable_migration is ON and migration is not supported, fail VFIO
>    device realization.
> 2. When enable_migration is AUTO (i.e., not explicitly enabled), require
>    device dirty tracking support. This is because device dirty tracking
>    is currently the only method to do dirty page tracking, which is
>    essential for migrating in a reasonable downtime. Setting
>    enable_migration to ON will not require device dirty tracking.
> 3. Make migration error and blocker messages more elaborate.
> 4. Remove error prints in vfio_migration_query_flags().
> 5. Rename trace_vfio_migration_probe() to
>    trace_vfio_migration_realize().
> 
> Signed-off-by: Avihai Horon <avihaih@nvidia.com>

Reviewed-by: Joao Martins <joao.m.martins@oracle.com>
Cédric Le Goater June 28, 2023, 4:03 p.m. UTC | #4
On 6/28/23 16:51, Joao Martins wrote:
> On 28/06/2023 13:54, Cédric Le Goater wrote:
>> On 6/28/23 09:31, Avihai Horon wrote:
>>> The major parts of VFIO migration are supported today in QEMU. This
>>> includes basic VFIO migration, device dirty page tracking and precopy
>>> support.
>>>
>>> Thus, at this point in time, it seems appropriate to make VFIO migration
>>> non-experimental: remove the x prefix from enable_migration property,
>>> change it to ON_OFF_AUTO and let the default value be AUTO.
>>>
>>> In addition, make the following adjustments:
>>> 1. When enable_migration is ON and migration is not supported, fail VFIO
>>>      device realization.
>>> 2. When enable_migration is AUTO (i.e., not explicitly enabled), require
>>>      device dirty tracking support. This is because device dirty tracking
>>>      is currently the only method to do dirty page tracking, which is
>>>      essential for migrating in a reasonable downtime. Setting
>>>      enable_migration to ON will not require device dirty tracking.
>>> 3. Make migration error and blocker messages more elaborate.
>>> 4. Remove error prints in vfio_migration_query_flags().
>>> 5. Rename trace_vfio_migration_probe() to
>>>      trace_vfio_migration_realize().
>>>
>>> Signed-off-by: Avihai Horon <avihaih@nvidia.com>
>>
>>
>> We should rework the return value of most of the routines called by
>> vfio_migration_realize() and simply use a bool. I think Zhenzhong is
>> working it.
>>
>> Zhenzhong,
>>
>> When you resend v4 of the "VFIO migration related refactor and bug fix"
>> series, please rebase on this patch since it should be merged.
>>
> 
> This, and his switchover-ack series from Avihai that preceeds it.
> 
> Perhaps it might be easier to point to your tree:branch where you are queueing
> all the patches?
> 

Sure.

I track QEMU patches for various subsystems under :

  https://github.com/legoater/qemu

  
VFIO candidates are under :

   https://github.com/legoater/qemu/tree/vfio-8.1

This is a wip tree, patches come and go. It contains the VFIO patches of
the day/week, good for testing new ideas and checking CI.


The vfio-next branch contains what I am 90% sure to send upstream :

  https://github.com/legoater/qemu/tree/vfio-next

which I rebase on master and update with new proposals and new tags.

Beware, both are git push forced branches. Only master is not.


Cheers,

C.
Zhenzhong Duan June 29, 2023, 1:57 a.m. UTC | #5
>-----Original Message-----
>From: Cédric Le Goater <clg@redhat.com>
>Sent: Thursday, June 29, 2023 12:04 AM
>To: Martins, Joao <joao.m.martins@oracle.com>; Avihai Horon
><avihaih@nvidia.com>
>Cc: Alex Williamson <alex.williamson@redhat.com>; Juan Quintela
><quintela@redhat.com>; Peter Xu <peterx@redhat.com>; Leonardo Bras
><leobras@redhat.com>; Duan, Zhenzhong <zhenzhong.duan@intel.com>;
>Yishai Hadas <yishaih@nvidia.com>; Jason Gunthorpe <jgg@nvidia.com>;
>Maor Gottlieb <maorg@nvidia.com>; Kirti Wankhede
><kwankhede@nvidia.com>; Tarun Gupta <targupta@nvidia.com>; qemu-
>devel@nongnu.org
>Subject: Re: [PATCH v2 2/2] vfio/migration: Make VFIO migration non-
>experimental
>
>On 6/28/23 16:51, Joao Martins wrote:
>> On 28/06/2023 13:54, Cédric Le Goater wrote:
>>> On 6/28/23 09:31, Avihai Horon wrote:
>>>> The major parts of VFIO migration are supported today in QEMU. This
>>>> includes basic VFIO migration, device dirty page tracking and
>>>> precopy support.
>>>>
>>>> Thus, at this point in time, it seems appropriate to make VFIO
>>>> migration
>>>> non-experimental: remove the x prefix from enable_migration
>>>> property, change it to ON_OFF_AUTO and let the default value be AUTO.
>>>>
>>>> In addition, make the following adjustments:
>>>> 1. When enable_migration is ON and migration is not supported, fail
>>>> VFIO
>>>>      device realization.
>>>> 2. When enable_migration is AUTO (i.e., not explicitly enabled),
>>>> require
>>>>      device dirty tracking support. This is because device dirty
>>>> tracking
>>>>      is currently the only method to do dirty page tracking, which
>>>> is
>>>>      essential for migrating in a reasonable downtime. Setting
>>>>      enable_migration to ON will not require device dirty tracking.
>>>> 3. Make migration error and blocker messages more elaborate.
>>>> 4. Remove error prints in vfio_migration_query_flags().
>>>> 5. Rename trace_vfio_migration_probe() to
>>>>      trace_vfio_migration_realize().
>>>>
>>>> Signed-off-by: Avihai Horon <avihaih@nvidia.com>
>>>
>>>
>>> We should rework the return value of most of the routines called by
>>> vfio_migration_realize() and simply use a bool. I think Zhenzhong is
>>> working it.
>>>
>>> Zhenzhong,
>>>
>>> When you resend v4 of the "VFIO migration related refactor and bug fix"
>>> series, please rebase on this patch since it should be merged.
>>>
>>
>> This, and his switchover-ack series from Avihai that preceeds it.
>>
>> Perhaps it might be easier to point to your tree:branch where you are
>> queueing all the patches?
>>
>
>Sure.
>
>I track QEMU patches for various subsystems under :
>
>  https://github.com/legoater/qemu
>
>
>VFIO candidates are under :
>
>   https://github.com/legoater/qemu/tree/vfio-8.1
>
>This is a wip tree, patches come and go. It contains the VFIO patches of the
>day/week, good for testing new ideas and checking CI.
>
>
>The vfio-next branch contains what I am 90% sure to send upstream :
>
>  https://github.com/legoater/qemu/tree/vfio-next

Great, I'll rebase on this branch, thanks for sharing.

Regards
Zhenzhong
diff mbox series

Patch

diff --git a/include/hw/vfio/vfio-common.h b/include/hw/vfio/vfio-common.h
index b4c28f318f..0ded0e73e2 100644
--- a/include/hw/vfio/vfio-common.h
+++ b/include/hw/vfio/vfio-common.h
@@ -139,7 +139,7 @@  typedef struct VFIODevice {
     bool needs_reset;
     bool no_mmap;
     bool ram_block_discard_allowed;
-    bool enable_migration;
+    OnOffAuto enable_migration;
     VFIODeviceOps *ops;
     unsigned int num_irqs;
     unsigned int num_regions;
@@ -224,9 +224,9 @@  typedef QLIST_HEAD(VFIOGroupList, VFIOGroup) VFIOGroupList;
 extern VFIOGroupList vfio_group_list;
 
 bool vfio_mig_active(void);
-int vfio_block_multiple_devices_migration(Error **errp);
+int vfio_block_multiple_devices_migration(VFIODevice *vbasedev, Error **errp);
 void vfio_unblock_multiple_devices_migration(void);
-int vfio_block_giommu_migration(Error **errp);
+int vfio_block_giommu_migration(VFIODevice *vbasedev, Error **errp);
 int64_t vfio_mig_bytes_transferred(void);
 void vfio_reset_bytes_transferred(void);
 
diff --git a/hw/vfio/common.c b/hw/vfio/common.c
index 25801de173..8c73f84581 100644
--- a/hw/vfio/common.c
+++ b/hw/vfio/common.c
@@ -381,7 +381,7 @@  static unsigned int vfio_migratable_device_num(void)
     return device_num;
 }
 
-int vfio_block_multiple_devices_migration(Error **errp)
+int vfio_block_multiple_devices_migration(VFIODevice *vbasedev, Error **errp)
 {
     int ret;
 
@@ -390,6 +390,12 @@  int vfio_block_multiple_devices_migration(Error **errp)
         return 0;
     }
 
+    if (vbasedev->enable_migration == ON_OFF_AUTO_ON) {
+        error_setg(errp, "Migration is currently not supported with multiple "
+                         "VFIO devices");
+        return -EINVAL;
+    }
+
     error_setg(&multiple_devices_migration_blocker,
                "Migration is currently not supported with multiple "
                "VFIO devices");
@@ -427,7 +433,7 @@  static bool vfio_viommu_preset(void)
     return false;
 }
 
-int vfio_block_giommu_migration(Error **errp)
+int vfio_block_giommu_migration(VFIODevice *vbasedev, Error **errp)
 {
     int ret;
 
@@ -436,6 +442,12 @@  int vfio_block_giommu_migration(Error **errp)
         return 0;
     }
 
+    if (vbasedev->enable_migration == ON_OFF_AUTO_ON) {
+        error_setg(errp,
+                   "Migration is currently not supported with vIOMMU enabled");
+        return -EINVAL;
+    }
+
     error_setg(&giommu_migration_blocker,
                "Migration is currently not supported with vIOMMU enabled");
     ret = migrate_add_blocker(giommu_migration_blocker, errp);
diff --git a/hw/vfio/migration.c b/hw/vfio/migration.c
index 7cf143926c..1db7d52ab2 100644
--- a/hw/vfio/migration.c
+++ b/hw/vfio/migration.c
@@ -724,14 +724,6 @@  static int vfio_migration_query_flags(VFIODevice *vbasedev, uint64_t *mig_flags)
     feature->argsz = sizeof(buf);
     feature->flags = VFIO_DEVICE_FEATURE_GET | VFIO_DEVICE_FEATURE_MIGRATION;
     if (ioctl(vbasedev->fd, VFIO_DEVICE_FEATURE, feature)) {
-        if (errno == ENOTTY) {
-            error_report("%s: VFIO migration is not supported in kernel",
-                         vbasedev->name);
-        } else {
-            error_report("%s: Failed to query VFIO migration support, err: %s",
-                         vbasedev->name, strerror(errno));
-        }
-
         return -errno;
     }
 
@@ -810,6 +802,27 @@  static int vfio_migration_init(VFIODevice *vbasedev)
     return 0;
 }
 
+static int vfio_block_migration(VFIODevice *vbasedev, Error *err, Error **errp)
+{
+    int ret;
+
+    if (vbasedev->enable_migration == ON_OFF_AUTO_ON) {
+        error_propagate(errp, err);
+        return -EINVAL;
+    }
+
+    vbasedev->migration_blocker = error_copy(err);
+    error_free(err);
+
+    ret = migrate_add_blocker(vbasedev->migration_blocker, errp);
+    if (ret < 0) {
+        error_free(vbasedev->migration_blocker);
+        vbasedev->migration_blocker = NULL;
+    }
+
+    return ret;
+}
+
 /* ---------------------------------------------------------------------- */
 
 int64_t vfio_mig_bytes_transferred(void)
@@ -824,40 +837,54 @@  void vfio_reset_bytes_transferred(void)
 
 int vfio_migration_realize(VFIODevice *vbasedev, Error **errp)
 {
-    int ret = -ENOTSUP;
+    Error *err = NULL;
+    int ret;
 
-    if (!vbasedev->enable_migration) {
-        goto add_blocker;
+    if (vbasedev->enable_migration == ON_OFF_AUTO_OFF) {
+        error_setg(&err, "%s: Migration is disabled for VFIO device",
+                   vbasedev->name);
+        return vfio_block_migration(vbasedev, err, errp);
     }
 
     ret = vfio_migration_init(vbasedev);
     if (ret) {
-        goto add_blocker;
+        if (ret == -ENOTTY) {
+            error_setg(&err, "%s: VFIO migration is not supported in kernel",
+                       vbasedev->name);
+        } else {
+            error_setg(&err,
+                       "%s: Migration couldn't be initialized for VFIO device, "
+                       "err: %d (%s)",
+                       vbasedev->name, ret, strerror(-ret));
+        }
+
+        return vfio_block_migration(vbasedev, err, errp);
+    }
+
+    if (!vbasedev->dirty_pages_supported) {
+        if (vbasedev->enable_migration == ON_OFF_AUTO_AUTO) {
+            error_setg(&err,
+                       "%s: VFIO device doesn't support device dirty tracking",
+                       vbasedev->name);
+            return vfio_block_migration(vbasedev, err, errp);
+        }
+
+        warn_report("%s: VFIO device doesn't support device dirty tracking",
+                    vbasedev->name);
     }
 
-    ret = vfio_block_multiple_devices_migration(errp);
+    ret = vfio_block_multiple_devices_migration(vbasedev, errp);
     if (ret) {
         return ret;
     }
 
-    ret = vfio_block_giommu_migration(errp);
+    ret = vfio_block_giommu_migration(vbasedev, errp);
     if (ret) {
         return ret;
     }
 
-    trace_vfio_migration_probe(vbasedev->name);
+    trace_vfio_migration_realize(vbasedev->name);
     return 0;
-
-add_blocker:
-    error_setg(&vbasedev->migration_blocker,
-               "VFIO device doesn't support migration");
-
-    ret = migrate_add_blocker(vbasedev->migration_blocker, errp);
-    if (ret < 0) {
-        error_free(vbasedev->migration_blocker);
-        vbasedev->migration_blocker = NULL;
-    }
-    return ret;
 }
 
 void vfio_migration_exit(VFIODevice *vbasedev)
diff --git a/hw/vfio/pci.c b/hw/vfio/pci.c
index 73874a94de..48584e3b01 100644
--- a/hw/vfio/pci.c
+++ b/hw/vfio/pci.c
@@ -3347,8 +3347,8 @@  static Property vfio_pci_dev_properties[] = {
                     VFIO_FEATURE_ENABLE_REQ_BIT, true),
     DEFINE_PROP_BIT("x-igd-opregion", VFIOPCIDevice, features,
                     VFIO_FEATURE_ENABLE_IGD_OPREGION_BIT, false),
-    DEFINE_PROP_BOOL("x-enable-migration", VFIOPCIDevice,
-                     vbasedev.enable_migration, false),
+    DEFINE_PROP_ON_OFF_AUTO("enable-migration", VFIOPCIDevice,
+                            vbasedev.enable_migration, ON_OFF_AUTO_AUTO),
     DEFINE_PROP_BOOL("x-no-mmap", VFIOPCIDevice, vbasedev.no_mmap, false),
     DEFINE_PROP_BOOL("x-balloon-allowed", VFIOPCIDevice,
                      vbasedev.ram_block_discard_allowed, false),
diff --git a/hw/vfio/trace-events b/hw/vfio/trace-events
index e328d644d2..ee7509e68e 100644
--- a/hw/vfio/trace-events
+++ b/hw/vfio/trace-events
@@ -155,7 +155,7 @@  vfio_load_cleanup(const char *name) " (%s)"
 vfio_load_device_config_state(const char *name) " (%s)"
 vfio_load_state(const char *name, uint64_t data) " (%s) data 0x%"PRIx64
 vfio_load_state_device_data(const char *name, uint64_t data_size, int ret) " (%s) size 0x%"PRIx64" ret %d"
-vfio_migration_probe(const char *name) " (%s)"
+vfio_migration_realize(const char *name) " (%s)"
 vfio_migration_set_state(const char *name, const char *state) " (%s) state %s"
 vfio_migration_state_notifier(const char *name, const char *state) " (%s) state %s"
 vfio_save_block(const char *name, int data_size) " (%s) data_size %d"