On Tue, 7 Mar 2023 12:54:48 +0000 Joao Martins <[email protected]> wrote:
> Migrating with vIOMMU will require either tracking maximum > IOMMU supported address space (e.g. 39/48 address width on Intel) > or range-track current mappings and dirty track the new ones > post starting dirty tracking. This will be done as a separate > series, so add a live migration blocker until that is fixed. > > Signed-off-by: Joao Martins <[email protected]> > Reviewed-by: Cédric Le Goater <[email protected]> > --- > hw/vfio/common.c | 46 +++++++++++++++++++++++++++++++++++ > hw/vfio/migration.c | 5 ++++ > hw/vfio/pci.c | 1 + > include/hw/vfio/vfio-common.h | 2 ++ > 4 files changed, 54 insertions(+) > > diff --git a/hw/vfio/common.c b/hw/vfio/common.c > index 2639b393a781..2b9bcf70aa36 100644 > --- a/hw/vfio/common.c > +++ b/hw/vfio/common.c > @@ -362,6 +362,7 @@ bool vfio_mig_active(void) > } > > static Error *multiple_devices_migration_blocker; > +static Error *giommu_migration_blocker; > > static unsigned int vfio_migratable_device_num(void) > { > @@ -413,6 +414,51 @@ void vfio_unblock_multiple_devices_migration(void) > multiple_devices_migration_blocker = NULL; > } > > +static bool vfio_viommu_preset(void) > +{ > + VFIOAddressSpace *space; > + > + QLIST_FOREACH(space, &vfio_address_spaces, list) { > + if (space->as != &address_space_memory) { > + return true; > + } > + } > + > + return false; > +} > + > +int vfio_block_giommu_migration(Error **errp) > +{ > + int ret; > + > + if (giommu_migration_blocker || > + !vfio_viommu_preset()) { > + return 0; > + } > + > + error_setg(&giommu_migration_blocker, > + "Migration is currently not supported with vIOMMU enabled"); > + ret = migrate_add_blocker(giommu_migration_blocker, errp); > + if (ret < 0) { > + error_free(giommu_migration_blocker); > + giommu_migration_blocker = NULL; > + } > + > + return ret; > +} > + > +void vfio_unblock_giommu_migration(void) > +{ > + if (!giommu_migration_blocker || > + vfio_viommu_preset()) { > + return; > + } > + > + migrate_del_blocker(giommu_migration_blocker); > + error_free(giommu_migration_blocker); > + giommu_migration_blocker = NULL; > +} > + > static void vfio_set_migration_error(int err) > { > MigrationState *ms = migrate_get_current(); > diff --git a/hw/vfio/migration.c b/hw/vfio/migration.c > index a2c3d9bade7f..776fd2d7cdf3 100644 > --- a/hw/vfio/migration.c > +++ b/hw/vfio/migration.c > @@ -634,6 +634,11 @@ int vfio_migration_probe(VFIODevice *vbasedev, Error > **errp) > return ret; > } > > + ret = vfio_block_giommu_migration(errp); > + if (ret) { > + return ret; > + } > + > trace_vfio_migration_probe(vbasedev->name); > return 0; > > diff --git a/hw/vfio/pci.c b/hw/vfio/pci.c > index 939dcc3d4a9e..30a271eab38c 100644 > --- a/hw/vfio/pci.c > +++ b/hw/vfio/pci.c > @@ -3185,6 +3185,7 @@ static void vfio_instance_finalize(Object *obj) > */ > vfio_put_device(vdev); > vfio_put_group(group); > + vfio_unblock_giommu_migration(); > } Hmm, doesn't this mean we're adding the viommu migration blocker in common code but only removing it in pci code? Granted that only PCI devices currently have IOMMUs, but ick. Thanks, Alex
