From: Jason Gunthorpe <jgg@nvidia.com>
So it can be reused in the next patch which allows binding to noiommu
device.
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Signed-off-by: Jacob Pan <jacob.pan@linux.microsoft.com>
---
drivers/iommu/iommufd/device.c | 48 +++++++++++++++++++++-------------
1 file changed, 30 insertions(+), 18 deletions(-)
diff --git a/drivers/iommu/iommufd/device.c b/drivers/iommu/iommufd/device.c
index 344d620cdecc..54d73016468f 100644
--- a/drivers/iommu/iommufd/device.c
+++ b/drivers/iommu/iommufd/device.c
@@ -30,8 +30,9 @@ static void iommufd_group_release(struct kref *kref)
WARN_ON(!xa_empty(&igroup->pasid_attach));
- xa_cmpxchg(&igroup->ictx->groups, iommu_group_id(igroup->group), igroup,
- NULL, GFP_KERNEL);
+ if (igroup->group)
+ xa_cmpxchg(&igroup->ictx->groups, iommu_group_id(igroup->group),
+ igroup, NULL, GFP_KERNEL);
iommu_group_put(igroup->group);
mutex_destroy(&igroup->lock);
kfree(igroup);
@@ -56,6 +57,30 @@ static bool iommufd_group_try_get(struct iommufd_group *igroup,
return kref_get_unless_zero(&igroup->ref);
}
+static struct iommufd_group *iommufd_alloc_group(struct iommufd_ctx *ictx,
+ struct iommu_group *group)
+{
+ struct iommufd_group *new_igroup;
+
+ new_igroup = kzalloc(sizeof(*new_igroup), GFP_KERNEL);
+ if (!new_igroup)
+ return ERR_PTR(-ENOMEM);
+
+ kref_init(&new_igroup->ref);
+ mutex_init(&new_igroup->lock);
+ xa_init(&new_igroup->pasid_attach);
+ new_igroup->sw_msi_start = PHYS_ADDR_MAX;
+ /* group reference moves into new_igroup */
+ new_igroup->group = group;
+
+ /*
+ * The ictx is not additionally refcounted here becase all objects using
+ * an igroup must put it before their destroy completes.
+ */
+ new_igroup->ictx = ictx;
+ return new_igroup;
+}
+
/*
* iommufd needs to store some more data for each iommu_group, we keep a
* parallel xarray indexed by iommu_group id to hold this instead of putting it
@@ -87,25 +112,12 @@ static struct iommufd_group *iommufd_get_group(struct iommufd_ctx *ictx,
}
xa_unlock(&ictx->groups);
- new_igroup = kzalloc_obj(*new_igroup);
- if (!new_igroup) {
+ new_igroup = iommufd_alloc_group(ictx, group);
+ if (IS_ERR(new_igroup)) {
iommu_group_put(group);
- return ERR_PTR(-ENOMEM);
+ return new_igroup;
}
- kref_init(&new_igroup->ref);
- mutex_init(&new_igroup->lock);
- xa_init(&new_igroup->pasid_attach);
- new_igroup->sw_msi_start = PHYS_ADDR_MAX;
- /* group reference moves into new_igroup */
- new_igroup->group = group;
-
- /*
- * The ictx is not additionally refcounted here becase all objects using
- * an igroup must put it before their destroy completes.
- */
- new_igroup->ictx = ictx;
-
/*
* We dropped the lock so igroup is invalid. NULL is a safe and likely
* value to assume for the xa_cmpxchg algorithm.
--
2.34.1
On Thu, Mar 12, 2026 at 08:56:28AM -0700, Jacob Pan wrote:
>From: Jason Gunthorpe <jgg@nvidia.com>
>
>So it can be reused in the next patch which allows binding to noiommu
>device.
>
>Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
>Signed-off-by: Jacob Pan <jacob.pan@linux.microsoft.com>
>---
> drivers/iommu/iommufd/device.c | 48 +++++++++++++++++++++-------------
> 1 file changed, 30 insertions(+), 18 deletions(-)
>
>diff --git a/drivers/iommu/iommufd/device.c b/drivers/iommu/iommufd/device.c
>index 344d620cdecc..54d73016468f 100644
>--- a/drivers/iommu/iommufd/device.c
>+++ b/drivers/iommu/iommufd/device.c
>@@ -30,8 +30,9 @@ static void iommufd_group_release(struct kref *kref)
>
> WARN_ON(!xa_empty(&igroup->pasid_attach));
>
>- xa_cmpxchg(&igroup->ictx->groups, iommu_group_id(igroup->group), igroup,
>- NULL, GFP_KERNEL);
>+ if (igroup->group)
>+ xa_cmpxchg(&igroup->ictx->groups, iommu_group_id(igroup->group),
>+ igroup, NULL, GFP_KERNEL);
> iommu_group_put(igroup->group);
> mutex_destroy(&igroup->lock);
> kfree(igroup);
>@@ -56,6 +57,30 @@ static bool iommufd_group_try_get(struct iommufd_group *igroup,
> return kref_get_unless_zero(&igroup->ref);
> }
>
>+static struct iommufd_group *iommufd_alloc_group(struct iommufd_ctx *ictx,
>+ struct iommu_group *group)
>+{
>+ struct iommufd_group *new_igroup;
>+
>+ new_igroup = kzalloc(sizeof(*new_igroup), GFP_KERNEL);
>+ if (!new_igroup)
>+ return ERR_PTR(-ENOMEM);
>+
>+ kref_init(&new_igroup->ref);
>+ mutex_init(&new_igroup->lock);
>+ xa_init(&new_igroup->pasid_attach);
>+ new_igroup->sw_msi_start = PHYS_ADDR_MAX;
>+ /* group reference moves into new_igroup */
>+ new_igroup->group = group;
>+
>+ /*
>+ * The ictx is not additionally refcounted here becase all objects using
>+ * an igroup must put it before their destroy completes.
>+ */
>+ new_igroup->ictx = ictx;
>+ return new_igroup;
>+}
>+
> /*
> * iommufd needs to store some more data for each iommu_group, we keep a
> * parallel xarray indexed by iommu_group id to hold this instead of putting it
>@@ -87,25 +112,12 @@ static struct iommufd_group *iommufd_get_group(struct iommufd_ctx *ictx,
> }
> xa_unlock(&ictx->groups);
>
>- new_igroup = kzalloc_obj(*new_igroup);
>- if (!new_igroup) {
>+ new_igroup = iommufd_alloc_group(ictx, group);
>+ if (IS_ERR(new_igroup)) {
> iommu_group_put(group);
>- return ERR_PTR(-ENOMEM);
>+ return new_igroup;
> }
>
>- kref_init(&new_igroup->ref);
>- mutex_init(&new_igroup->lock);
>- xa_init(&new_igroup->pasid_attach);
>- new_igroup->sw_msi_start = PHYS_ADDR_MAX;
>- /* group reference moves into new_igroup */
>- new_igroup->group = group;
>-
>- /*
>- * The ictx is not additionally refcounted here becase all objects using
>- * an igroup must put it before their destroy completes.
>- */
>- new_igroup->ictx = ictx;
>-
> /*
> * We dropped the lock so igroup is invalid. NULL is a safe and likely
> * value to assume for the xa_cmpxchg algorithm.
>--
>2.34.1
>
Reviewed-by: Samiullah Khawaja <skhawaja@google.com>
On Thu, Mar 12, 2026 at 08:56:28AM -0700, Jacob Pan wrote:
> From: Jason Gunthorpe <jgg@nvidia.com>
>
> So it can be reused in the next patch which allows binding to noiommu
> device.
>
> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
> Signed-off-by: Jacob Pan <jacob.pan@linux.microsoft.com>
> ---
> drivers/iommu/iommufd/device.c | 48 +++++++++++++++++++++-------------
> 1 file changed, 30 insertions(+), 18 deletions(-)
>
> diff --git a/drivers/iommu/iommufd/device.c b/drivers/iommu/iommufd/device.c
> index 344d620cdecc..54d73016468f 100644
> --- a/drivers/iommu/iommufd/device.c
> +++ b/drivers/iommu/iommufd/device.c
> @@ -30,8 +30,9 @@ static void iommufd_group_release(struct kref *kref)
>
> WARN_ON(!xa_empty(&igroup->pasid_attach));
>
> - xa_cmpxchg(&igroup->ictx->groups, iommu_group_id(igroup->group), igroup,
> - NULL, GFP_KERNEL);
> + if (igroup->group)
> + xa_cmpxchg(&igroup->ictx->groups, iommu_group_id(igroup->group),
> + igroup, NULL, GFP_KERNEL);
Is that a separate fix or perhaps belongs to the next patch making
it possible to have NULL groups.
Thanks,
Mostafa
> iommu_group_put(igroup->group);
> mutex_destroy(&igroup->lock);
> kfree(igroup);
> @@ -56,6 +57,30 @@ static bool iommufd_group_try_get(struct iommufd_group *igroup,
> return kref_get_unless_zero(&igroup->ref);
> }
>
> +static struct iommufd_group *iommufd_alloc_group(struct iommufd_ctx *ictx,
> + struct iommu_group *group)
> +{
> + struct iommufd_group *new_igroup;
> +
> + new_igroup = kzalloc(sizeof(*new_igroup), GFP_KERNEL);
> + if (!new_igroup)
> + return ERR_PTR(-ENOMEM);
> +
> + kref_init(&new_igroup->ref);
> + mutex_init(&new_igroup->lock);
> + xa_init(&new_igroup->pasid_attach);
> + new_igroup->sw_msi_start = PHYS_ADDR_MAX;
> + /* group reference moves into new_igroup */
> + new_igroup->group = group;
> +
> + /*
> + * The ictx is not additionally refcounted here becase all objects using
> + * an igroup must put it before their destroy completes.
> + */
> + new_igroup->ictx = ictx;
> + return new_igroup;
> +}
> +
> /*
> * iommufd needs to store some more data for each iommu_group, we keep a
> * parallel xarray indexed by iommu_group id to hold this instead of putting it
> @@ -87,25 +112,12 @@ static struct iommufd_group *iommufd_get_group(struct iommufd_ctx *ictx,
> }
> xa_unlock(&ictx->groups);
>
> - new_igroup = kzalloc_obj(*new_igroup);
> - if (!new_igroup) {
> + new_igroup = iommufd_alloc_group(ictx, group);
> + if (IS_ERR(new_igroup)) {
> iommu_group_put(group);
> - return ERR_PTR(-ENOMEM);
> + return new_igroup;
> }
>
> - kref_init(&new_igroup->ref);
> - mutex_init(&new_igroup->lock);
> - xa_init(&new_igroup->pasid_attach);
> - new_igroup->sw_msi_start = PHYS_ADDR_MAX;
> - /* group reference moves into new_igroup */
> - new_igroup->group = group;
> -
> - /*
> - * The ictx is not additionally refcounted here becase all objects using
> - * an igroup must put it before their destroy completes.
> - */
> - new_igroup->ictx = ictx;
> -
> /*
> * We dropped the lock so igroup is invalid. NULL is a safe and likely
> * value to assume for the xa_cmpxchg algorithm.
> --
> 2.34.1
>
Hi Mostafa, On Sun, 22 Mar 2026 09:41:17 +0000 Mostafa Saleh <smostafa@google.com> wrote: > On Thu, Mar 12, 2026 at 08:56:28AM -0700, Jacob Pan wrote: > > From: Jason Gunthorpe <jgg@nvidia.com> > > > > So it can be reused in the next patch which allows binding to > > noiommu device. > > > > Signed-off-by: Jason Gunthorpe <jgg@nvidia.com> > > Signed-off-by: Jacob Pan <jacob.pan@linux.microsoft.com> > > --- > > drivers/iommu/iommufd/device.c | 48 > > +++++++++++++++++++++------------- 1 file changed, 30 > > insertions(+), 18 deletions(-) > > > > diff --git a/drivers/iommu/iommufd/device.c > > b/drivers/iommu/iommufd/device.c index 344d620cdecc..54d73016468f > > 100644 --- a/drivers/iommu/iommufd/device.c > > +++ b/drivers/iommu/iommufd/device.c > > @@ -30,8 +30,9 @@ static void iommufd_group_release(struct kref > > *kref) > > WARN_ON(!xa_empty(&igroup->pasid_attach)); > > > > - xa_cmpxchg(&igroup->ictx->groups, > > iommu_group_id(igroup->group), igroup, > > - NULL, GFP_KERNEL); > > + if (igroup->group) > > + xa_cmpxchg(&igroup->ictx->groups, > > iommu_group_id(igroup->group), > > + igroup, NULL, GFP_KERNEL); > > Is that a separate fix or perhaps belongs to the next patch making > it possible to have NULL groups. Yes, I agree. NULL groups are not possible here. Should be merged with the next patch.
On Thu, Mar 12, 2026 at 08:56:28AM -0700, Jacob Pan wrote:
>From: Jason Gunthorpe <jgg@nvidia.com>
>
>So it can be reused in the next patch which allows binding to noiommu
>device.
>
>Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
>Signed-off-by: Jacob Pan <jacob.pan@linux.microsoft.com>
>---
> drivers/iommu/iommufd/device.c | 48 +++++++++++++++++++++-------------
> 1 file changed, 30 insertions(+), 18 deletions(-)
>
>diff --git a/drivers/iommu/iommufd/device.c b/drivers/iommu/iommufd/device.c
>index 344d620cdecc..54d73016468f 100644
>--- a/drivers/iommu/iommufd/device.c
>+++ b/drivers/iommu/iommufd/device.c
>@@ -30,8 +30,9 @@ static void iommufd_group_release(struct kref *kref)
>
> WARN_ON(!xa_empty(&igroup->pasid_attach));
>
>- xa_cmpxchg(&igroup->ictx->groups, iommu_group_id(igroup->group), igroup,
>- NULL, GFP_KERNEL);
>+ if (igroup->group)
>+ xa_cmpxchg(&igroup->ictx->groups, iommu_group_id(igroup->group),
>+ igroup, NULL, GFP_KERNEL);
> iommu_group_put(igroup->group);
> mutex_destroy(&igroup->lock);
> kfree(igroup);
>@@ -56,6 +57,30 @@ static bool iommufd_group_try_get(struct iommufd_group *igroup,
> return kref_get_unless_zero(&igroup->ref);
> }
>
>+static struct iommufd_group *iommufd_alloc_group(struct iommufd_ctx *ictx,
>+ struct iommu_group *group)
>+{
>+ struct iommufd_group *new_igroup;
>+
>+ new_igroup = kzalloc(sizeof(*new_igroup), GFP_KERNEL);
>+ if (!new_igroup)
>+ return ERR_PTR(-ENOMEM);
>+
>+ kref_init(&new_igroup->ref);
>+ mutex_init(&new_igroup->lock);
>+ xa_init(&new_igroup->pasid_attach);
>+ new_igroup->sw_msi_start = PHYS_ADDR_MAX;
>+ /* group reference moves into new_igroup */
>+ new_igroup->group = group;
>+
>+ /*
>+ * The ictx is not additionally refcounted here becase all objects using
>+ * an igroup must put it before their destroy completes.
>+ */
>+ new_igroup->ictx = ictx;
>+ return new_igroup;
>+}
>+
> /*
> * iommufd needs to store some more data for each iommu_group, we keep a
> * parallel xarray indexed by iommu_group id to hold this instead of putting it
>@@ -87,25 +112,12 @@ static struct iommufd_group *iommufd_get_group(struct iommufd_ctx *ictx,
> }
> xa_unlock(&ictx->groups);
>
>- new_igroup = kzalloc_obj(*new_igroup);
>- if (!new_igroup) {
>+ new_igroup = iommufd_alloc_group(ictx, group);
>+ if (IS_ERR(new_igroup)) {
> iommu_group_put(group);
>- return ERR_PTR(-ENOMEM);
>+ return new_igroup;
> }
>
>- kref_init(&new_igroup->ref);
>- mutex_init(&new_igroup->lock);
>- xa_init(&new_igroup->pasid_attach);
>- new_igroup->sw_msi_start = PHYS_ADDR_MAX;
>- /* group reference moves into new_igroup */
>- new_igroup->group = group;
>-
>- /*
>- * The ictx is not additionally refcounted here becase all objects using
>- * an igroup must put it before their destroy completes.
>- */
>- new_igroup->ictx = ictx;
>-
> /*
> * We dropped the lock so igroup is invalid. NULL is a safe and likely
> * value to assume for the xa_cmpxchg algorithm.
>--
>2.34.1
>
Reviewed-by: Samiullah Khawaja <skhawaja@google.com>
Thanks,
Sami
© 2016 - 2026 Red Hat, Inc.