From: Jason Gunthorpe <jgg@nvidia.com>
Allow iommufd to bind devices without an IOMMU (noiommu mode) by creating
a dummy IOMMU group for such devices and skipping hwpt operations.
This enables noiommu devices to operate through the same iommufd API as IOMMU-
capable devices.
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Signed-off-by: Jacob Pan <jacob.pan@linux.microsoft.com>
---
drivers/iommu/iommufd/device.c | 113 ++++++++++++++++++++++-----------
1 file changed, 76 insertions(+), 37 deletions(-)
diff --git a/drivers/iommu/iommufd/device.c b/drivers/iommu/iommufd/device.c
index 54d73016468f..c38d3efa3d6f 100644
--- a/drivers/iommu/iommufd/device.c
+++ b/drivers/iommu/iommufd/device.c
@@ -23,6 +23,11 @@ struct iommufd_attach {
struct xarray device_array;
};
+static bool is_vfio_noiommu(struct iommufd_device *idev)
+{
+ return !device_iommu_mapped(idev->dev) || !idev->dev->iommu;
+}
+
static void iommufd_group_release(struct kref *kref)
{
struct iommufd_group *igroup =
@@ -205,32 +210,17 @@ void iommufd_device_destroy(struct iommufd_object *obj)
struct iommufd_device *idev =
container_of(obj, struct iommufd_device, obj);
- iommu_device_release_dma_owner(idev->dev);
+ if (!is_vfio_noiommu(idev))
+ iommu_device_release_dma_owner(idev->dev);
iommufd_put_group(idev->igroup);
if (!iommufd_selftest_is_mock_dev(idev->dev))
iommufd_ctx_put(idev->ictx);
}
-/**
- * iommufd_device_bind - Bind a physical device to an iommu fd
- * @ictx: iommufd file descriptor
- * @dev: Pointer to a physical device struct
- * @id: Output ID number to return to userspace for this device
- *
- * A successful bind establishes an ownership over the device and returns
- * struct iommufd_device pointer, otherwise returns error pointer.
- *
- * A driver using this API must set driver_managed_dma and must not touch
- * the device until this routine succeeds and establishes ownership.
- *
- * Binding a PCI device places the entire RID under iommufd control.
- *
- * The caller must undo this with iommufd_device_unbind()
- */
-struct iommufd_device *iommufd_device_bind(struct iommufd_ctx *ictx,
- struct device *dev, u32 *id)
+static int iommufd_bind_iommu(struct iommufd_device *idev)
{
- struct iommufd_device *idev;
+ struct iommufd_ctx *ictx = idev->ictx;
+ struct device *dev = idev->dev;
struct iommufd_group *igroup;
int rc;
@@ -239,11 +229,11 @@ struct iommufd_device *iommufd_device_bind(struct iommufd_ctx *ictx,
* to restore cache coherency.
*/
if (!device_iommu_capable(dev, IOMMU_CAP_CACHE_COHERENCY))
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
- igroup = iommufd_get_group(ictx, dev);
+ igroup = iommufd_get_group(idev->ictx, dev);
if (IS_ERR(igroup))
- return ERR_CAST(igroup);
+ return PTR_ERR(igroup);
/*
* For historical compat with VFIO the insecure interrupt path is
@@ -269,21 +259,66 @@ struct iommufd_device *iommufd_device_bind(struct iommufd_ctx *ictx,
if (rc)
goto out_group_put;
+ /* igroup refcount moves into iommufd_device */
+ idev->igroup = igroup;
+ return 0;
+
+out_group_put:
+ iommufd_put_group(igroup);
+ return rc;
+}
+
+/**
+ * iommufd_device_bind - Bind a physical device to an iommu fd
+ * @ictx: iommufd file descriptor
+ * @dev: Pointer to a physical device struct
+ * @id: Output ID number to return to userspace for this device
+ *
+ * A successful bind establishes an ownership over the device and returns
+ * struct iommufd_device pointer, otherwise returns error pointer.
+ *
+ * A driver using this API must set driver_managed_dma and must not touch
+ * the device until this routine succeeds and establishes ownership.
+ *
+ * Binding a PCI device places the entire RID under iommufd control.
+ *
+ * The caller must undo this with iommufd_device_unbind()
+ */
+struct iommufd_device *iommufd_device_bind(struct iommufd_ctx *ictx,
+ struct device *dev, u32 *id)
+{
+ struct iommufd_device *idev;
+ int rc;
+
idev = iommufd_object_alloc(ictx, idev, IOMMUFD_OBJ_DEVICE);
- if (IS_ERR(idev)) {
- rc = PTR_ERR(idev);
- goto out_release_owner;
- }
+ if (IS_ERR(idev))
+ return idev;
idev->ictx = ictx;
- if (!iommufd_selftest_is_mock_dev(dev))
- iommufd_ctx_get(ictx);
idev->dev = dev;
idev->enforce_cache_coherency =
device_iommu_capable(dev, IOMMU_CAP_ENFORCE_CACHE_COHERENCY);
+
+ if (!is_vfio_noiommu(idev)) {
+ rc = iommufd_bind_iommu(idev);
+ if (rc)
+ return ERR_PTR(rc);
+ } else {
+ struct iommufd_group *igroup;
+
+ /*
+ * Create a dummy igroup, lots of stuff expects ths igroup to be
+ * present, but a NULL igroup->group is OK
+ */
+ igroup = iommufd_alloc_group(ictx, NULL);
+ if (IS_ERR(igroup))
+ return ERR_CAST(igroup);
+ idev->igroup = igroup;
+ }
+
+ if (!iommufd_selftest_is_mock_dev(dev))
+ iommufd_ctx_get(ictx);
/* The calling driver is a user until iommufd_device_unbind() */
refcount_inc(&idev->obj.users);
- /* igroup refcount moves into iommufd_device */
- idev->igroup = igroup;
/*
* If the caller fails after this success it must call
@@ -295,11 +330,6 @@ struct iommufd_device *iommufd_device_bind(struct iommufd_ctx *ictx,
*id = idev->obj.id;
return idev;
-out_release_owner:
- iommu_device_release_dma_owner(dev);
-out_group_put:
- iommufd_put_group(igroup);
- return ERR_PTR(rc);
}
EXPORT_SYMBOL_NS_GPL(iommufd_device_bind, "IOMMUFD");
@@ -513,6 +543,9 @@ static int iommufd_hwpt_attach_device(struct iommufd_hw_pagetable *hwpt,
struct iommufd_attach_handle *handle;
int rc;
+ if (is_vfio_noiommu(idev))
+ return 0;
+
if (!iommufd_hwpt_compatible_device(hwpt, idev))
return -EINVAL;
@@ -560,6 +593,9 @@ static void iommufd_hwpt_detach_device(struct iommufd_hw_pagetable *hwpt,
{
struct iommufd_attach_handle *handle;
+ if (is_vfio_noiommu(idev))
+ return;
+
handle = iommufd_device_get_attach_handle(idev, pasid);
if (pasid == IOMMU_NO_PASID)
iommu_detach_group_handle(hwpt->domain, idev->igroup->group);
@@ -578,6 +614,9 @@ static int iommufd_hwpt_replace_device(struct iommufd_device *idev,
struct iommufd_attach_handle *handle, *old_handle;
int rc;
+ if (is_vfio_noiommu(idev))
+ return 0;
+
if (!iommufd_hwpt_compatible_device(hwpt, idev))
return -EINVAL;
@@ -653,7 +692,7 @@ int iommufd_hw_pagetable_attach(struct iommufd_hw_pagetable *hwpt,
goto err_release_devid;
}
- if (attach_resv) {
+ if (attach_resv && !is_vfio_noiommu(idev)) {
rc = iommufd_device_attach_reserved_iova(idev, hwpt_paging);
if (rc)
goto err_release_devid;
--
2.34.1
On Thu, Mar 12, 2026 at 08:56:29AM -0700, Jacob Pan wrote:
> From: Jason Gunthorpe <jgg@nvidia.com>
>
> Allow iommufd to bind devices without an IOMMU (noiommu mode) by creating
> a dummy IOMMU group for such devices and skipping hwpt operations.
>
> This enables noiommu devices to operate through the same iommufd API as IOMMU-
> capable devices.
>
> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
> Signed-off-by: Jacob Pan <jacob.pan@linux.microsoft.com>
> ---
> drivers/iommu/iommufd/device.c | 113 ++++++++++++++++++++++-----------
> 1 file changed, 76 insertions(+), 37 deletions(-)
>
> diff --git a/drivers/iommu/iommufd/device.c b/drivers/iommu/iommufd/device.c
> index 54d73016468f..c38d3efa3d6f 100644
> --- a/drivers/iommu/iommufd/device.c
> +++ b/drivers/iommu/iommufd/device.c
> @@ -23,6 +23,11 @@ struct iommufd_attach {
> struct xarray device_array;
> };
>
> +static bool is_vfio_noiommu(struct iommufd_device *idev)
> +{
> + return !device_iommu_mapped(idev->dev) || !idev->dev->iommu;
Do this need to check for CONFIG_VFIO_NOIOMMU and maybe the module
param enable_unsafe_noiommu_mode similar to the legacy implemenation?
> +}
> +
> static void iommufd_group_release(struct kref *kref)
> {
> struct iommufd_group *igroup =
> @@ -205,32 +210,17 @@ void iommufd_device_destroy(struct iommufd_object *obj)
> struct iommufd_device *idev =
> container_of(obj, struct iommufd_device, obj);
>
> - iommu_device_release_dma_owner(idev->dev);
> + if (!is_vfio_noiommu(idev))
> + iommu_device_release_dma_owner(idev->dev);
> iommufd_put_group(idev->igroup);
> if (!iommufd_selftest_is_mock_dev(idev->dev))
> iommufd_ctx_put(idev->ictx);
> }
>
> -/**
> - * iommufd_device_bind - Bind a physical device to an iommu fd
> - * @ictx: iommufd file descriptor
> - * @dev: Pointer to a physical device struct
> - * @id: Output ID number to return to userspace for this device
> - *
> - * A successful bind establishes an ownership over the device and returns
> - * struct iommufd_device pointer, otherwise returns error pointer.
> - *
> - * A driver using this API must set driver_managed_dma and must not touch
> - * the device until this routine succeeds and establishes ownership.
> - *
> - * Binding a PCI device places the entire RID under iommufd control.
> - *
> - * The caller must undo this with iommufd_device_unbind()
> - */
> -struct iommufd_device *iommufd_device_bind(struct iommufd_ctx *ictx,
> - struct device *dev, u32 *id)
> +static int iommufd_bind_iommu(struct iommufd_device *idev)
> {
> - struct iommufd_device *idev;
> + struct iommufd_ctx *ictx = idev->ictx;
> + struct device *dev = idev->dev;
> struct iommufd_group *igroup;
> int rc;
>
> @@ -239,11 +229,11 @@ struct iommufd_device *iommufd_device_bind(struct iommufd_ctx *ictx,
> * to restore cache coherency.
> */
> if (!device_iommu_capable(dev, IOMMU_CAP_CACHE_COHERENCY))
> - return ERR_PTR(-EINVAL);
> + return -EINVAL;
>
> - igroup = iommufd_get_group(ictx, dev);
> + igroup = iommufd_get_group(idev->ictx, dev);
> if (IS_ERR(igroup))
> - return ERR_CAST(igroup);
> + return PTR_ERR(igroup);
>
> /*
> * For historical compat with VFIO the insecure interrupt path is
> @@ -269,21 +259,66 @@ struct iommufd_device *iommufd_device_bind(struct iommufd_ctx *ictx,
> if (rc)
> goto out_group_put;
>
> + /* igroup refcount moves into iommufd_device */
> + idev->igroup = igroup;
> + return 0;
> +
> +out_group_put:
> + iommufd_put_group(igroup);
> + return rc;
> +}
> +
> +/**
> + * iommufd_device_bind - Bind a physical device to an iommu fd
> + * @ictx: iommufd file descriptor
> + * @dev: Pointer to a physical device struct
> + * @id: Output ID number to return to userspace for this device
> + *
> + * A successful bind establishes an ownership over the device and returns
> + * struct iommufd_device pointer, otherwise returns error pointer.
> + *
> + * A driver using this API must set driver_managed_dma and must not touch
> + * the device until this routine succeeds and establishes ownership.
> + *
> + * Binding a PCI device places the entire RID under iommufd control.
> + *
> + * The caller must undo this with iommufd_device_unbind()
> + */
> +struct iommufd_device *iommufd_device_bind(struct iommufd_ctx *ictx,
> + struct device *dev, u32 *id)
> +{
> + struct iommufd_device *idev;
> + int rc;
> +
> idev = iommufd_object_alloc(ictx, idev, IOMMUFD_OBJ_DEVICE);
The next code introduces new error cases, do that need to be cleaned in
that case by calling iommufd_object_abort_and_destroy()?
Thanks,
Mostafa
> - if (IS_ERR(idev)) {
> - rc = PTR_ERR(idev);
> - goto out_release_owner;
> - }
> + if (IS_ERR(idev))
> + return idev;
> idev->ictx = ictx;
> - if (!iommufd_selftest_is_mock_dev(dev))
> - iommufd_ctx_get(ictx);
> idev->dev = dev;
> idev->enforce_cache_coherency =
> device_iommu_capable(dev, IOMMU_CAP_ENFORCE_CACHE_COHERENCY);
> +
> + if (!is_vfio_noiommu(idev)) {
> + rc = iommufd_bind_iommu(idev);
> + if (rc)
> + return ERR_PTR(rc);
> + } else {
> + struct iommufd_group *igroup;
> +
> + /*
> + * Create a dummy igroup, lots of stuff expects ths igroup to be
> + * present, but a NULL igroup->group is OK
> + */
> + igroup = iommufd_alloc_group(ictx, NULL);
> + if (IS_ERR(igroup))
> + return ERR_CAST(igroup);
> + idev->igroup = igroup;
> + }
> +
> + if (!iommufd_selftest_is_mock_dev(dev))
> + iommufd_ctx_get(ictx);
> /* The calling driver is a user until iommufd_device_unbind() */
> refcount_inc(&idev->obj.users);
> - /* igroup refcount moves into iommufd_device */
> - idev->igroup = igroup;
>
> /*
> * If the caller fails after this success it must call
> @@ -295,11 +330,6 @@ struct iommufd_device *iommufd_device_bind(struct iommufd_ctx *ictx,
> *id = idev->obj.id;
> return idev;
>
> -out_release_owner:
> - iommu_device_release_dma_owner(dev);
> -out_group_put:
> - iommufd_put_group(igroup);
> - return ERR_PTR(rc);
> }
> EXPORT_SYMBOL_NS_GPL(iommufd_device_bind, "IOMMUFD");
>
> @@ -513,6 +543,9 @@ static int iommufd_hwpt_attach_device(struct iommufd_hw_pagetable *hwpt,
> struct iommufd_attach_handle *handle;
> int rc;
>
> + if (is_vfio_noiommu(idev))
> + return 0;
> +
> if (!iommufd_hwpt_compatible_device(hwpt, idev))
> return -EINVAL;
>
> @@ -560,6 +593,9 @@ static void iommufd_hwpt_detach_device(struct iommufd_hw_pagetable *hwpt,
> {
> struct iommufd_attach_handle *handle;
>
> + if (is_vfio_noiommu(idev))
> + return;
> +
> handle = iommufd_device_get_attach_handle(idev, pasid);
> if (pasid == IOMMU_NO_PASID)
> iommu_detach_group_handle(hwpt->domain, idev->igroup->group);
> @@ -578,6 +614,9 @@ static int iommufd_hwpt_replace_device(struct iommufd_device *idev,
> struct iommufd_attach_handle *handle, *old_handle;
> int rc;
>
> + if (is_vfio_noiommu(idev))
> + return 0;
> +
> if (!iommufd_hwpt_compatible_device(hwpt, idev))
> return -EINVAL;
>
> @@ -653,7 +692,7 @@ int iommufd_hw_pagetable_attach(struct iommufd_hw_pagetable *hwpt,
> goto err_release_devid;
> }
>
> - if (attach_resv) {
> + if (attach_resv && !is_vfio_noiommu(idev)) {
> rc = iommufd_device_attach_reserved_iova(idev, hwpt_paging);
> if (rc)
> goto err_release_devid;
> --
> 2.34.1
>
Hi Mostafa,
On Sun, 22 Mar 2026 09:54:15 +0000
Mostafa Saleh <smostafa@google.com> wrote:
> From: Mostafa Saleh <smostafa@google.com>
> To: Jacob Pan <jacob.pan@linux.microsoft.com>
> Cc: linux-kernel@vger.kernel.org, "iommu@lists.linux.dev"
> <iommu@lists.linux.dev>, Jason Gunthorpe <jgg@nvidia.com>, Alex
> Williamson <alex@shazbot.org>, Joerg Roedel <joro@8bytes.org>, David
> Matlack <dmatlack@google.com>, Robin Murphy <robin.murphy@arm.com>,
> Nicolin Chen <nicolinc@nvidia.com>, "Tian, Kevin"
> <kevin.tian@intel.com>, Yi Liu <yi.l.liu@intel.com>,
> skhawaja@google.com, pasha.tatashin@soleen.com, Will Deacon
> <will@kernel.org>, Baolu Lu <baolu.lu@linux.intel.com> Subject: Re:
> [PATCH V2 03/11] iommufd: Allow binding to a noiommu device Date:
> Sun, 22 Mar 2026 09:54:15 +0000
>
> On Thu, Mar 12, 2026 at 08:56:29AM -0700, Jacob Pan wrote:
> > From: Jason Gunthorpe <jgg@nvidia.com>
> >
> > Allow iommufd to bind devices without an IOMMU (noiommu mode) by
> > creating a dummy IOMMU group for such devices and skipping hwpt
> > operations.
> >
> > This enables noiommu devices to operate through the same iommufd
> > API as IOMMU- capable devices.
> >
> > Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
> > Signed-off-by: Jacob Pan <jacob.pan@linux.microsoft.com>
> > ---
> > drivers/iommu/iommufd/device.c | 113
> > ++++++++++++++++++++++----------- 1 file changed, 76 insertions(+),
> > 37 deletions(-)
> >
> > diff --git a/drivers/iommu/iommufd/device.c
> > b/drivers/iommu/iommufd/device.c index 54d73016468f..c38d3efa3d6f
> > 100644 --- a/drivers/iommu/iommufd/device.c
> > +++ b/drivers/iommu/iommufd/device.c
> > @@ -23,6 +23,11 @@ struct iommufd_attach {
> > struct xarray device_array;
> > };
> >
> > +static bool is_vfio_noiommu(struct iommufd_device *idev)
> > +{
> > + return !device_iommu_mapped(idev->dev) ||
> > !idev->dev->iommu;
>
> Do this need to check for CONFIG_VFIO_NOIOMMU and maybe the module
> param enable_unsafe_noiommu_mode similar to the legacy implemenation?
>
Checking for CONFIG_VFIO_NOIOMMU is not needed since all the conditions
are not restricted by CONFIG_VFIO_NOIOMMU. I felt it is cleaner this
way by not tying iommufd private code with vfio.
I guess we could do something like below but not necessary IMHO.
--- a/drivers/iommu/iommufd/device.c
+++ b/drivers/iommu/iommufd/device.c
@@ -23,11 +23,6 @@ struct iommufd_attach {
struct xarray device_array;
};
-static bool is_vfio_noiommu(struct iommufd_device *idev)
-{
- return !device_iommu_mapped(idev->dev) || !idev->dev->iommu;
-}
-
static void iommufd_group_release(struct kref *kref)
{
struct iommufd_group *igroup =
diff --git a/drivers/iommu/iommufd/iommufd_private.h
b/drivers/iommu/iommufd/iommufd_private.h index
3302c6a1f99e..cba5550e3f2b 100644 ---
a/drivers/iommu/iommufd/iommufd_private.h +++
b/drivers/iommu/iommufd/iommufd_private.h @@ -711,6 +711,18 @@
iommufd_get_vdevice(struct iommufd_ctx *ictx, u32 id) struct
iommufd_vdevice, obj); }
+#ifdef CONFIG_VFIO_NOIOMMU
+static inline bool is_vfio_noiommu(struct iommufd_device *idev)
+{
+ return !device_iommu_mapped(idev->dev) || !idev->dev->iommu;
+}
+#else
+static inline bool is_vfio_noiommu(struct iommufd_device *idev)
+{
+ return false;
+}
+#endif
+
On Sun, Mar 22, 2026 at 09:54:15AM +0000, Mostafa Saleh wrote:
> > +struct iommufd_device *iommufd_device_bind(struct iommufd_ctx *ictx,
> > + struct device *dev, u32 *id)
> > +{
> > + struct iommufd_device *idev;
> > + int rc;
> > +
> > idev = iommufd_object_alloc(ictx, idev, IOMMUFD_OBJ_DEVICE);
>
> The next code introduces new error cases, do that need to be cleaned in
> that case by calling iommufd_object_abort_and_destroy()?
It should probably use iommufd_object_alloc_ucmd() so the core code
manages the lifecycle?
Jason
© 2016 - 2026 Red Hat, Inc.