Migrating with vIOMMU will require either tracking maximum
IOMMU supported address space (e.g. 39/48 address width on Intel)
or range-track current mappings and dirty track the new ones
post starting dirty tracking. This will be done as a separate
series, so add a live migration blocker until that is fixed.
Signed-off-by: Joao Martins <joao.m.martins@oracle.com>
---
hw/vfio/common.c | 51 +++++++++++++++++++++++++++++++++++
hw/vfio/migration.c | 6 +++++
include/hw/vfio/vfio-common.h | 2 ++
3 files changed, 59 insertions(+)
diff --git a/hw/vfio/common.c b/hw/vfio/common.c
index 5b8456975e97..9b909f856722 100644
--- a/hw/vfio/common.c
+++ b/hw/vfio/common.c
@@ -365,6 +365,7 @@ bool vfio_mig_active(void)
}
static Error *multiple_devices_migration_blocker;
+static Error *giommu_migration_blocker;
static unsigned int vfio_migratable_device_num(void)
{
@@ -416,6 +417,56 @@ void vfio_unblock_multiple_devices_migration(void)
multiple_devices_migration_blocker = NULL;
}
+static unsigned int vfio_use_iommu_device_num(void)
+{
+ VFIOGroup *group;
+ VFIODevice *vbasedev;
+ unsigned int device_num = 0;
+
+ QLIST_FOREACH(group, &vfio_group_list, next) {
+ QLIST_FOREACH(vbasedev, &group->device_list, next) {
+ if (vbasedev->group->container->space->as !=
+ &address_space_memory) {
+ device_num++;
+ }
+ }
+ }
+
+ return device_num;
+}
+
+int vfio_block_giommu_migration(Error **errp)
+{
+ int ret;
+
+ if (giommu_migration_blocker ||
+ !vfio_use_iommu_device_num()) {
+ return 0;
+ }
+
+ error_setg(&giommu_migration_blocker,
+ "Migration is currently not supported with vIOMMU enabled");
+ ret = migrate_add_blocker(giommu_migration_blocker, errp);
+ if (ret < 0) {
+ error_free(giommu_migration_blocker);
+ giommu_migration_blocker = NULL;
+ }
+
+ return ret;
+}
+
+void vfio_unblock_giommu_migration(void)
+{
+ if (!giommu_migration_blocker ||
+ vfio_use_iommu_device_num()) {
+ return;
+ }
+
+ migrate_del_blocker(giommu_migration_blocker);
+ error_free(giommu_migration_blocker);
+ giommu_migration_blocker = NULL;
+}
+
static void vfio_set_migration_error(int err)
{
MigrationState *ms = migrate_get_current();
diff --git a/hw/vfio/migration.c b/hw/vfio/migration.c
index a2c3d9bade7f..3e75868ae7a9 100644
--- a/hw/vfio/migration.c
+++ b/hw/vfio/migration.c
@@ -634,6 +634,11 @@ int vfio_migration_probe(VFIODevice *vbasedev, Error **errp)
return ret;
}
+ ret = vfio_block_giommu_migration(errp);
+ if (ret) {
+ return ret;
+ }
+
trace_vfio_migration_probe(vbasedev->name);
return 0;
@@ -659,6 +664,7 @@ void vfio_migration_finalize(VFIODevice *vbasedev)
unregister_savevm(VMSTATE_IF(vbasedev->dev), "vfio", vbasedev);
vfio_migration_exit(vbasedev);
vfio_unblock_multiple_devices_migration();
+ vfio_unblock_giommu_migration();
}
if (vbasedev->migration_blocker) {
diff --git a/include/hw/vfio/vfio-common.h b/include/hw/vfio/vfio-common.h
index 1cbbccd91e11..38e44258925b 100644
--- a/include/hw/vfio/vfio-common.h
+++ b/include/hw/vfio/vfio-common.h
@@ -233,6 +233,8 @@ extern VFIOGroupList vfio_group_list;
bool vfio_mig_active(void);
int vfio_block_multiple_devices_migration(Error **errp);
void vfio_unblock_multiple_devices_migration(void);
+int vfio_block_giommu_migration(Error **errp);
+void vfio_unblock_giommu_migration(void);
int64_t vfio_mig_bytes_transferred(void);
#ifdef CONFIG_LINUX
--
2.17.2
On Sat, 4 Mar 2023 01:43:41 +0000
Joao Martins <joao.m.martins@oracle.com> wrote:
> Migrating with vIOMMU will require either tracking maximum
> IOMMU supported address space (e.g. 39/48 address width on Intel)
> or range-track current mappings and dirty track the new ones
> post starting dirty tracking. This will be done as a separate
> series, so add a live migration blocker until that is fixed.
>
> Signed-off-by: Joao Martins <joao.m.martins@oracle.com>
> ---
> hw/vfio/common.c | 51 +++++++++++++++++++++++++++++++++++
> hw/vfio/migration.c | 6 +++++
> include/hw/vfio/vfio-common.h | 2 ++
> 3 files changed, 59 insertions(+)
>
> diff --git a/hw/vfio/common.c b/hw/vfio/common.c
> index 5b8456975e97..9b909f856722 100644
> --- a/hw/vfio/common.c
> +++ b/hw/vfio/common.c
> @@ -365,6 +365,7 @@ bool vfio_mig_active(void)
> }
>
> static Error *multiple_devices_migration_blocker;
> +static Error *giommu_migration_blocker;
>
> static unsigned int vfio_migratable_device_num(void)
> {
> @@ -416,6 +417,56 @@ void vfio_unblock_multiple_devices_migration(void)
> multiple_devices_migration_blocker = NULL;
> }
>
> +static unsigned int vfio_use_iommu_device_num(void)
> +{
> + VFIOGroup *group;
> + VFIODevice *vbasedev;
> + unsigned int device_num = 0;
> +
> + QLIST_FOREACH(group, &vfio_group_list, next) {
> + QLIST_FOREACH(vbasedev, &group->device_list, next) {
> + if (vbasedev->group->container->space->as !=
> + &address_space_memory) {
> + device_num++;
> + }
> + }
> + }
> +
> + return device_num;
> +}
I'm not sure why we're counting devices since nobody uses that data,
but couldn't this be even more simple and efficient by walking the
vfio_address_spaces list?
static bool vfio_viommu_preset(void)
{
VFIOAddressSpace *space;
QLIST_FOREACH(space, &vfio_address_spaces, list) {
if (space->as != &address_space_memory) {
return true;
}
}
return false;
}
> +
> +int vfio_block_giommu_migration(Error **errp)
> +{
> + int ret;
> +
> + if (giommu_migration_blocker ||
> + !vfio_use_iommu_device_num()) {
> + return 0;
> + }
> +
> + error_setg(&giommu_migration_blocker,
> + "Migration is currently not supported with vIOMMU enabled");
> + ret = migrate_add_blocker(giommu_migration_blocker, errp);
> + if (ret < 0) {
> + error_free(giommu_migration_blocker);
> + giommu_migration_blocker = NULL;
> + }
> +
> + return ret;
> +}
> +
> +void vfio_unblock_giommu_migration(void)
> +{
> + if (!giommu_migration_blocker ||
> + vfio_use_iommu_device_num()) {
> + return;
> + }
> +
> + migrate_del_blocker(giommu_migration_blocker);
> + error_free(giommu_migration_blocker);
> + giommu_migration_blocker = NULL;
> +}
> +
> static void vfio_set_migration_error(int err)
> {
> MigrationState *ms = migrate_get_current();
> diff --git a/hw/vfio/migration.c b/hw/vfio/migration.c
> index a2c3d9bade7f..3e75868ae7a9 100644
> --- a/hw/vfio/migration.c
> +++ b/hw/vfio/migration.c
> @@ -634,6 +634,11 @@ int vfio_migration_probe(VFIODevice *vbasedev, Error **errp)
> return ret;
> }
>
> + ret = vfio_block_giommu_migration(errp);
> + if (ret) {
> + return ret;
> + }
> +
> trace_vfio_migration_probe(vbasedev->name);
> return 0;
>
> @@ -659,6 +664,7 @@ void vfio_migration_finalize(VFIODevice *vbasedev)
> unregister_savevm(VMSTATE_IF(vbasedev->dev), "vfio", vbasedev);
> vfio_migration_exit(vbasedev);
> vfio_unblock_multiple_devices_migration();
> + vfio_unblock_giommu_migration();
Hmm, this actually gets called from vfio_exitfn(), doesn't all the
group, device, address space unlinking happen in
vfio_instance_finalize()? Has this actually been tested to remove the
blocker? And why is this a _finalize() function when it's called from
an exit callback? Thanks,
Alex
> }
>
> if (vbasedev->migration_blocker) {
> diff --git a/include/hw/vfio/vfio-common.h b/include/hw/vfio/vfio-common.h
> index 1cbbccd91e11..38e44258925b 100644
> --- a/include/hw/vfio/vfio-common.h
> +++ b/include/hw/vfio/vfio-common.h
> @@ -233,6 +233,8 @@ extern VFIOGroupList vfio_group_list;
> bool vfio_mig_active(void);
> int vfio_block_multiple_devices_migration(Error **errp);
> void vfio_unblock_multiple_devices_migration(void);
> +int vfio_block_giommu_migration(Error **errp);
> +void vfio_unblock_giommu_migration(void);
> int64_t vfio_mig_bytes_transferred(void);
>
> #ifdef CONFIG_LINUX
On 06/03/2023 19:42, Alex Williamson wrote:
> On Sat, 4 Mar 2023 01:43:41 +0000
> Joao Martins <joao.m.martins@oracle.com> wrote:
>
>> Migrating with vIOMMU will require either tracking maximum
>> IOMMU supported address space (e.g. 39/48 address width on Intel)
>> or range-track current mappings and dirty track the new ones
>> post starting dirty tracking. This will be done as a separate
>> series, so add a live migration blocker until that is fixed.
>>
>> Signed-off-by: Joao Martins <joao.m.martins@oracle.com>
>> ---
>> hw/vfio/common.c | 51 +++++++++++++++++++++++++++++++++++
>> hw/vfio/migration.c | 6 +++++
>> include/hw/vfio/vfio-common.h | 2 ++
>> 3 files changed, 59 insertions(+)
>>
>> diff --git a/hw/vfio/common.c b/hw/vfio/common.c
>> index 5b8456975e97..9b909f856722 100644
>> --- a/hw/vfio/common.c
>> +++ b/hw/vfio/common.c
>> @@ -365,6 +365,7 @@ bool vfio_mig_active(void)
>> }
>>
>> static Error *multiple_devices_migration_blocker;
>> +static Error *giommu_migration_blocker;
>>
>> static unsigned int vfio_migratable_device_num(void)
>> {
>> @@ -416,6 +417,56 @@ void vfio_unblock_multiple_devices_migration(void)
>> multiple_devices_migration_blocker = NULL;
>> }
>>
>> +static unsigned int vfio_use_iommu_device_num(void)
>> +{
>> + VFIOGroup *group;
>> + VFIODevice *vbasedev;
>> + unsigned int device_num = 0;
>> +
>> + QLIST_FOREACH(group, &vfio_group_list, next) {
>> + QLIST_FOREACH(vbasedev, &group->device_list, next) {
>> + if (vbasedev->group->container->space->as !=
>> + &address_space_memory) {
>> + device_num++;
>> + }
>> + }
>> + }
>> +
>> + return device_num;
>> +}
>
> I'm not sure why we're counting devices since nobody uses that data,
My idea was to track in case some device is configured with bypass_iommu
PCI device option. But that would always be catched with same check below.
> but couldn't this be even more simple and efficient by walking the
> vfio_address_spaces list?
>
Yes, or iterating groups too as Cedric suggested.
We don't care about number of devices per se, just whether *any* device is using
vIOMMU or not.
> static bool vfio_viommu_preset(void)
> {
> VFIOAddressSpace *space;
>
> QLIST_FOREACH(space, &vfio_address_spaces, list) {
> if (space->as != &address_space_memory) {
> return true;
> }
> }
>
> return false;
> }
>
Let me switch to the above.
>> +
>> +int vfio_block_giommu_migration(Error **errp)
>> +{
>> + int ret;
>> +
>> + if (giommu_migration_blocker ||
>> + !vfio_use_iommu_device_num()) {
>> + return 0;
>> + }
>> +
>> + error_setg(&giommu_migration_blocker,
>> + "Migration is currently not supported with vIOMMU enabled");
>> + ret = migrate_add_blocker(giommu_migration_blocker, errp);
>> + if (ret < 0) {
>> + error_free(giommu_migration_blocker);
>> + giommu_migration_blocker = NULL;
>> + }
>> +
>> + return ret;
>> +}
>> +
>> +void vfio_unblock_giommu_migration(void)
>> +{
>> + if (!giommu_migration_blocker ||
>> + vfio_use_iommu_device_num()) {
>> + return;
>> + }
>> +
>> + migrate_del_blocker(giommu_migration_blocker);
>> + error_free(giommu_migration_blocker);
>> + giommu_migration_blocker = NULL;
>> +}
>> +
>> static void vfio_set_migration_error(int err)
>> {
>> MigrationState *ms = migrate_get_current();
>> diff --git a/hw/vfio/migration.c b/hw/vfio/migration.c
>> index a2c3d9bade7f..3e75868ae7a9 100644
>> --- a/hw/vfio/migration.c
>> +++ b/hw/vfio/migration.c
>> @@ -634,6 +634,11 @@ int vfio_migration_probe(VFIODevice *vbasedev, Error **errp)
>> return ret;
>> }
>>
>> + ret = vfio_block_giommu_migration(errp);
>> + if (ret) {
>> + return ret;
>> + }
>> +
>> trace_vfio_migration_probe(vbasedev->name);
>> return 0;
>>
>> @@ -659,6 +664,7 @@ void vfio_migration_finalize(VFIODevice *vbasedev)
>> unregister_savevm(VMSTATE_IF(vbasedev->dev), "vfio", vbasedev);
>> vfio_migration_exit(vbasedev);
>> vfio_unblock_multiple_devices_migration();
>> + vfio_unblock_giommu_migration();
>
> Hmm, this actually gets called from vfio_exitfn(), doesn't all the
> group, device, address space unlinking happen in
> vfio_instance_finalize()? Has this actually been tested to remove the
> blocker? And why is this a _finalize() function when it's called from
> an exit callback? Thanks,
>
I didn't test it correctly, clearly.
It doesn't work correctly as vfio_viommu_preset() always returns true and thus
it won't unblock, even after device deletion. I've moved
vfio_unblock_giommu_migration() into vfio_instance_finalize() like:
diff --git a/hw/vfio/pci.c b/hw/vfio/pci.c
index 939dcc3d4a9e..30a271eab38c 100644
--- a/hw/vfio/pci.c
+++ b/hw/vfio/pci.c
@@ -3185,6 +3185,7 @@ static void vfio_instance_finalize(Object *obj)
*/
vfio_put_device(vdev);
vfio_put_group(group);
+ vfio_unblock_giommu_migration();
}
On 3/4/23 02:43, Joao Martins wrote:
> Migrating with vIOMMU will require either tracking maximum
> IOMMU supported address space (e.g. 39/48 address width on Intel)
> or range-track current mappings and dirty track the new ones
> post starting dirty tracking. This will be done as a separate
> series, so add a live migration blocker until that is fixed.
>
> Signed-off-by: Joao Martins <joao.m.martins@oracle.com>
> ---
> hw/vfio/common.c | 51 +++++++++++++++++++++++++++++++++++
> hw/vfio/migration.c | 6 +++++
> include/hw/vfio/vfio-common.h | 2 ++
> 3 files changed, 59 insertions(+)
>
> diff --git a/hw/vfio/common.c b/hw/vfio/common.c
> index 5b8456975e97..9b909f856722 100644
> --- a/hw/vfio/common.c
> +++ b/hw/vfio/common.c
> @@ -365,6 +365,7 @@ bool vfio_mig_active(void)
> }
>
> static Error *multiple_devices_migration_blocker;
> +static Error *giommu_migration_blocker;
>
> static unsigned int vfio_migratable_device_num(void)
> {
> @@ -416,6 +417,56 @@ void vfio_unblock_multiple_devices_migration(void)
> multiple_devices_migration_blocker = NULL;
> }
>
> +static unsigned int vfio_use_iommu_device_num(void)
> +{
> + VFIOGroup *group;
> + VFIODevice *vbasedev;
> + unsigned int device_num = 0;
> +
> + QLIST_FOREACH(group, &vfio_group_list, next) {
> + QLIST_FOREACH(vbasedev, &group->device_list, next) {
> + if (vbasedev->group->container->space->as !=
> + &address_space_memory) {
Can't we avoid the second loop and test directly :
group->container->space->as
?
The rest looks good. So,
Reviewed-by: Cédric Le Goater <clg@redhat.com>
Thanks,
C.
> + device_num++;
> + }
> + }
> + }
> +
> + return device_num;
> +}
> +
> +int vfio_block_giommu_migration(Error **errp)
> +{
> + int ret;
> +
> + if (giommu_migration_blocker ||
> + !vfio_use_iommu_device_num()) {
> + return 0;
> + }
> +
> + error_setg(&giommu_migration_blocker,
> + "Migration is currently not supported with vIOMMU enabled");
> + ret = migrate_add_blocker(giommu_migration_blocker, errp);
> + if (ret < 0) {
> + error_free(giommu_migration_blocker);
> + giommu_migration_blocker = NULL;
> + }
> +
> + return ret;
> +}
> +
> +void vfio_unblock_giommu_migration(void)
> +{
> + if (!giommu_migration_blocker ||
> + vfio_use_iommu_device_num()) {
> + return;
> + }
> +
> + migrate_del_blocker(giommu_migration_blocker);
> + error_free(giommu_migration_blocker);
> + giommu_migration_blocker = NULL;
> +}
> +
> static void vfio_set_migration_error(int err)
> {
> MigrationState *ms = migrate_get_current();
> diff --git a/hw/vfio/migration.c b/hw/vfio/migration.c
> index a2c3d9bade7f..3e75868ae7a9 100644
> --- a/hw/vfio/migration.c
> +++ b/hw/vfio/migration.c
> @@ -634,6 +634,11 @@ int vfio_migration_probe(VFIODevice *vbasedev, Error **errp)
> return ret;
> }
>
> + ret = vfio_block_giommu_migration(errp);
> + if (ret) {
> + return ret;
> + }
> +
> trace_vfio_migration_probe(vbasedev->name);
> return 0;
>
> @@ -659,6 +664,7 @@ void vfio_migration_finalize(VFIODevice *vbasedev)
> unregister_savevm(VMSTATE_IF(vbasedev->dev), "vfio", vbasedev);
> vfio_migration_exit(vbasedev);
> vfio_unblock_multiple_devices_migration();
> + vfio_unblock_giommu_migration();
> }
>
> if (vbasedev->migration_blocker) {
> diff --git a/include/hw/vfio/vfio-common.h b/include/hw/vfio/vfio-common.h
> index 1cbbccd91e11..38e44258925b 100644
> --- a/include/hw/vfio/vfio-common.h
> +++ b/include/hw/vfio/vfio-common.h
> @@ -233,6 +233,8 @@ extern VFIOGroupList vfio_group_list;
> bool vfio_mig_active(void);
> int vfio_block_multiple_devices_migration(Error **errp);
> void vfio_unblock_multiple_devices_migration(void);
> +int vfio_block_giommu_migration(Error **errp);
> +void vfio_unblock_giommu_migration(void);
> int64_t vfio_mig_bytes_transferred(void);
>
> #ifdef CONFIG_LINUX
On 06/03/2023 17:00, Cédric Le Goater wrote:
> On 3/4/23 02:43, Joao Martins wrote:
>> Migrating with vIOMMU will require either tracking maximum
>> IOMMU supported address space (e.g. 39/48 address width on Intel)
>> or range-track current mappings and dirty track the new ones
>> post starting dirty tracking. This will be done as a separate
>> series, so add a live migration blocker until that is fixed.
>>
>> Signed-off-by: Joao Martins <joao.m.martins@oracle.com>
>> ---
>> hw/vfio/common.c | 51 +++++++++++++++++++++++++++++++++++
>> hw/vfio/migration.c | 6 +++++
>> include/hw/vfio/vfio-common.h | 2 ++
>> 3 files changed, 59 insertions(+)
>>
>> diff --git a/hw/vfio/common.c b/hw/vfio/common.c
>> index 5b8456975e97..9b909f856722 100644
>> --- a/hw/vfio/common.c
>> +++ b/hw/vfio/common.c
>> @@ -365,6 +365,7 @@ bool vfio_mig_active(void)
>> }
>> static Error *multiple_devices_migration_blocker;
>> +static Error *giommu_migration_blocker;
>> static unsigned int vfio_migratable_device_num(void)
>> {
>> @@ -416,6 +417,56 @@ void vfio_unblock_multiple_devices_migration(void)
>> multiple_devices_migration_blocker = NULL;
>> }
>> +static unsigned int vfio_use_iommu_device_num(void)
>> +{
>> + VFIOGroup *group;
>> + VFIODevice *vbasedev;
>> + unsigned int device_num = 0;
>> +
>> + QLIST_FOREACH(group, &vfio_group_list, next) {
>> + QLIST_FOREACH(vbasedev, &group->device_list, next) {
>> + if (vbasedev->group->container->space->as !=
>> + &address_space_memory) {
>
> Can't we avoid the second loop and test directly :
>
> group->container->space->as
>
> ?
>
Ah yes
>
> The rest looks good. So,
>
> Reviewed-by: Cédric Le Goater <clg@redhat.com>
>
Thanks!
> Thanks,
>
> C.
>
>> + device_num++;
>> + }
>> + }
>> + }
>> +
>> + return device_num;
>> +}
>> +
>> +int vfio_block_giommu_migration(Error **errp)
>> +{
>> + int ret;
>> +
>> + if (giommu_migration_blocker ||
>> + !vfio_use_iommu_device_num()) {
>> + return 0;
>> + }
>> +
>> + error_setg(&giommu_migration_blocker,
>> + "Migration is currently not supported with vIOMMU enabled");
>> + ret = migrate_add_blocker(giommu_migration_blocker, errp);
>> + if (ret < 0) {
>> + error_free(giommu_migration_blocker);
>> + giommu_migration_blocker = NULL;
>> + }
>> +
>> + return ret;
>> +}
>> +
>> +void vfio_unblock_giommu_migration(void)
>> +{
>> + if (!giommu_migration_blocker ||
>> + vfio_use_iommu_device_num()) {
>> + return;
>> + }
>> +
>> + migrate_del_blocker(giommu_migration_blocker);
>> + error_free(giommu_migration_blocker);
>> + giommu_migration_blocker = NULL;
>> +}
>> +
>> static void vfio_set_migration_error(int err)
>> {
>> MigrationState *ms = migrate_get_current();
>> diff --git a/hw/vfio/migration.c b/hw/vfio/migration.c
>> index a2c3d9bade7f..3e75868ae7a9 100644
>> --- a/hw/vfio/migration.c
>> +++ b/hw/vfio/migration.c
>> @@ -634,6 +634,11 @@ int vfio_migration_probe(VFIODevice *vbasedev, Error **errp)
>> return ret;
>> }
>> + ret = vfio_block_giommu_migration(errp);
>> + if (ret) {
>> + return ret;
>> + }
>> +
>> trace_vfio_migration_probe(vbasedev->name);
>> return 0;
>> @@ -659,6 +664,7 @@ void vfio_migration_finalize(VFIODevice *vbasedev)
>> unregister_savevm(VMSTATE_IF(vbasedev->dev), "vfio", vbasedev);
>> vfio_migration_exit(vbasedev);
>> vfio_unblock_multiple_devices_migration();
>> + vfio_unblock_giommu_migration();
>> }
>> if (vbasedev->migration_blocker) {
>> diff --git a/include/hw/vfio/vfio-common.h b/include/hw/vfio/vfio-common.h
>> index 1cbbccd91e11..38e44258925b 100644
>> --- a/include/hw/vfio/vfio-common.h
>> +++ b/include/hw/vfio/vfio-common.h
>> @@ -233,6 +233,8 @@ extern VFIOGroupList vfio_group_list;
>> bool vfio_mig_active(void);
>> int vfio_block_multiple_devices_migration(Error **errp);
>> void vfio_unblock_multiple_devices_migration(void);
>> +int vfio_block_giommu_migration(Error **errp);
>> +void vfio_unblock_giommu_migration(void);
>> int64_t vfio_mig_bytes_transferred(void);
>> #ifdef CONFIG_LINUX
>
© 2016 - 2026 Red Hat, Inc.