"attach_handle" was added exclusively for the iommufd_fault_iopf_handler()
used by IOPF/PRI use cases, along with the "fault_data". Now, the iommufd
version of sw_msi function will resue the attach_handle and fault_data for
a non-fault case.
Move the attach_handle part out of the fault.c file to make it generic for
all cases. Simplify the remaining fault specific routine to attach/detach.
Signed-off-by: Nicolin Chen <nicolinc@nvidia.com>
---
drivers/iommu/iommufd/iommufd_private.h | 40 +-------
drivers/iommu/iommufd/device.c | 105 +++++++++++++++++++++
drivers/iommu/iommufd/fault.c | 120 +++---------------------
3 files changed, 122 insertions(+), 143 deletions(-)
diff --git a/drivers/iommu/iommufd/iommufd_private.h b/drivers/iommu/iommufd/iommufd_private.h
index b6d706cf2c66..063c0a42f54f 100644
--- a/drivers/iommu/iommufd/iommufd_private.h
+++ b/drivers/iommu/iommufd/iommufd_private.h
@@ -472,42 +472,12 @@ void iommufd_fault_destroy(struct iommufd_object *obj);
int iommufd_fault_iopf_handler(struct iopf_group *group);
int iommufd_fault_domain_attach_dev(struct iommufd_hw_pagetable *hwpt,
- struct iommufd_device *idev);
+ struct iommufd_device *idev,
+ bool enable_iopf);
void iommufd_fault_domain_detach_dev(struct iommufd_hw_pagetable *hwpt,
- struct iommufd_device *idev);
-int iommufd_fault_domain_replace_dev(struct iommufd_device *idev,
- struct iommufd_hw_pagetable *hwpt,
- struct iommufd_hw_pagetable *old);
-
-static inline int iommufd_hwpt_attach_device(struct iommufd_hw_pagetable *hwpt,
- struct iommufd_device *idev)
-{
- if (hwpt->fault)
- return iommufd_fault_domain_attach_dev(hwpt, idev);
-
- return iommu_attach_group(hwpt->domain, idev->igroup->group);
-}
-
-static inline void iommufd_hwpt_detach_device(struct iommufd_hw_pagetable *hwpt,
- struct iommufd_device *idev)
-{
- if (hwpt->fault) {
- iommufd_fault_domain_detach_dev(hwpt, idev);
- return;
- }
-
- iommu_detach_group(hwpt->domain, idev->igroup->group);
-}
-
-static inline int iommufd_hwpt_replace_device(struct iommufd_device *idev,
- struct iommufd_hw_pagetable *hwpt,
- struct iommufd_hw_pagetable *old)
-{
- if (old->fault || hwpt->fault)
- return iommufd_fault_domain_replace_dev(idev, hwpt, old);
-
- return iommu_group_replace_domain(idev->igroup->group, hwpt->domain);
-}
+ struct iommufd_device *idev,
+ struct iommufd_attach_handle *handle,
+ bool disable_iopf);
static inline struct iommufd_viommu *
iommufd_get_viommu(struct iommufd_ucmd *ucmd, u32 id)
diff --git a/drivers/iommu/iommufd/device.c b/drivers/iommu/iommufd/device.c
index dfd0898fb6c1..38b31b652147 100644
--- a/drivers/iommu/iommufd/device.c
+++ b/drivers/iommu/iommufd/device.c
@@ -352,6 +352,111 @@ iommufd_device_attach_reserved_iova(struct iommufd_device *idev,
return 0;
}
+/* The device attach/detach/replace helpers for attach_handle */
+
+static int iommufd_hwpt_attach_device(struct iommufd_hw_pagetable *hwpt,
+ struct iommufd_device *idev)
+{
+ struct iommufd_attach_handle *handle;
+ int rc;
+
+ if (hwpt->fault) {
+ rc = iommufd_fault_domain_attach_dev(hwpt, idev, true);
+ if (rc)
+ return rc;
+ }
+
+ handle = kzalloc(sizeof(*handle), GFP_KERNEL);
+ if (!handle) {
+ rc = -ENOMEM;
+ goto out_fault_detach;
+ }
+
+ handle->idev = idev;
+ rc = iommu_attach_group_handle(hwpt->domain, idev->igroup->group,
+ &handle->handle);
+ if (rc)
+ goto out_free_handle;
+
+ return 0;
+
+out_free_handle:
+ kfree(handle);
+ handle = NULL;
+out_fault_detach:
+ if (hwpt->fault)
+ iommufd_fault_domain_detach_dev(hwpt, idev, handle, true);
+ return rc;
+}
+
+static struct iommufd_attach_handle *
+iommufd_device_get_attach_handle(struct iommufd_device *idev)
+{
+ struct iommu_attach_handle *handle;
+
+ handle =
+ iommu_attach_handle_get(idev->igroup->group, IOMMU_NO_PASID, 0);
+ if (IS_ERR(handle))
+ return NULL;
+ return to_iommufd_handle(handle);
+}
+
+static void iommufd_hwpt_detach_device(struct iommufd_hw_pagetable *hwpt,
+ struct iommufd_device *idev)
+{
+ struct iommufd_attach_handle *handle;
+
+ handle = iommufd_device_get_attach_handle(idev);
+ iommu_detach_group_handle(hwpt->domain, idev->igroup->group);
+ if (hwpt->fault)
+ iommufd_fault_domain_detach_dev(hwpt, idev, handle, true);
+ kfree(handle);
+}
+
+static int iommufd_hwpt_replace_device(struct iommufd_device *idev,
+ struct iommufd_hw_pagetable *hwpt,
+ struct iommufd_hw_pagetable *old)
+{
+ struct iommufd_attach_handle *old_handle =
+ iommufd_device_get_attach_handle(idev);
+ struct iommufd_attach_handle *handle;
+ int rc;
+
+ if (hwpt->fault) {
+ rc = iommufd_fault_domain_attach_dev(hwpt, idev, !old->fault);
+ if (rc)
+ return rc;
+ }
+
+ handle = kzalloc(sizeof(*handle), GFP_KERNEL);
+ if (!handle) {
+ rc = -ENOMEM;
+ goto out_fault_detach;
+ }
+
+ handle->idev = idev;
+ rc = iommu_replace_group_handle(idev->igroup->group, hwpt->domain,
+ &handle->handle);
+ if (rc)
+ goto out_free_handle;
+
+ if (old->fault)
+ iommufd_fault_domain_detach_dev(old, idev, old_handle,
+ !hwpt->fault);
+ kfree(old_handle);
+
+ return 0;
+
+out_free_handle:
+ kfree(handle);
+ handle = NULL;
+out_fault_detach:
+ if (hwpt->fault)
+ iommufd_fault_domain_detach_dev(hwpt, idev, handle,
+ !old->fault);
+ return rc;
+}
+
int iommufd_hw_pagetable_attach(struct iommufd_hw_pagetable *hwpt,
struct iommufd_device *idev)
{
diff --git a/drivers/iommu/iommufd/fault.c b/drivers/iommu/iommufd/fault.c
index 06aa83a75e94..1d9bd3024b57 100644
--- a/drivers/iommu/iommufd/fault.c
+++ b/drivers/iommu/iommufd/fault.c
@@ -60,42 +60,17 @@ static void iommufd_fault_iopf_disable(struct iommufd_device *idev)
mutex_unlock(&idev->iopf_lock);
}
-static int __fault_domain_attach_dev(struct iommufd_hw_pagetable *hwpt,
- struct iommufd_device *idev)
-{
- struct iommufd_attach_handle *handle;
- int ret;
-
- handle = kzalloc(sizeof(*handle), GFP_KERNEL);
- if (!handle)
- return -ENOMEM;
-
- handle->idev = idev;
- ret = iommu_attach_group_handle(hwpt->domain, idev->igroup->group,
- &handle->handle);
- if (ret)
- kfree(handle);
-
- return ret;
-}
-
int iommufd_fault_domain_attach_dev(struct iommufd_hw_pagetable *hwpt,
- struct iommufd_device *idev)
+ struct iommufd_device *idev,
+ bool enable_iopf)
{
- int ret;
+ int rc = 0;
if (!hwpt->fault)
return -EINVAL;
-
- ret = iommufd_fault_iopf_enable(idev);
- if (ret)
- return ret;
-
- ret = __fault_domain_attach_dev(hwpt, idev);
- if (ret)
- iommufd_fault_iopf_disable(idev);
-
- return ret;
+ if (enable_iopf)
+ rc = iommufd_fault_iopf_enable(idev);
+ return rc;
}
static void iommufd_auto_response_faults(struct iommufd_hw_pagetable *hwpt,
@@ -127,86 +102,15 @@ static void iommufd_auto_response_faults(struct iommufd_hw_pagetable *hwpt,
mutex_unlock(&fault->mutex);
}
-static struct iommufd_attach_handle *
-iommufd_device_get_attach_handle(struct iommufd_device *idev)
-{
- struct iommu_attach_handle *handle;
-
- handle = iommu_attach_handle_get(idev->igroup->group, IOMMU_NO_PASID, 0);
- if (IS_ERR(handle))
- return NULL;
-
- return to_iommufd_handle(handle);
-}
-
void iommufd_fault_domain_detach_dev(struct iommufd_hw_pagetable *hwpt,
- struct iommufd_device *idev)
+ struct iommufd_device *idev,
+ struct iommufd_attach_handle *handle,
+ bool disable_iopf)
{
- struct iommufd_attach_handle *handle;
-
- handle = iommufd_device_get_attach_handle(idev);
- iommu_detach_group_handle(hwpt->domain, idev->igroup->group);
- iommufd_auto_response_faults(hwpt, handle);
- iommufd_fault_iopf_disable(idev);
- kfree(handle);
-}
-
-static int __fault_domain_replace_dev(struct iommufd_device *idev,
- struct iommufd_hw_pagetable *hwpt,
- struct iommufd_hw_pagetable *old)
-{
- struct iommufd_attach_handle *handle, *curr = NULL;
- int ret;
-
- if (old->fault)
- curr = iommufd_device_get_attach_handle(idev);
-
- if (hwpt->fault) {
- handle = kzalloc(sizeof(*handle), GFP_KERNEL);
- if (!handle)
- return -ENOMEM;
-
- handle->idev = idev;
- ret = iommu_replace_group_handle(idev->igroup->group,
- hwpt->domain, &handle->handle);
- } else {
- ret = iommu_replace_group_handle(idev->igroup->group,
- hwpt->domain, NULL);
- }
-
- if (!ret && curr) {
- iommufd_auto_response_faults(old, curr);
- kfree(curr);
- }
-
- return ret;
-}
-
-int iommufd_fault_domain_replace_dev(struct iommufd_device *idev,
- struct iommufd_hw_pagetable *hwpt,
- struct iommufd_hw_pagetable *old)
-{
- bool iopf_off = !hwpt->fault && old->fault;
- bool iopf_on = hwpt->fault && !old->fault;
- int ret;
-
- if (iopf_on) {
- ret = iommufd_fault_iopf_enable(idev);
- if (ret)
- return ret;
- }
-
- ret = __fault_domain_replace_dev(idev, hwpt, old);
- if (ret) {
- if (iopf_on)
- iommufd_fault_iopf_disable(idev);
- return ret;
- }
-
- if (iopf_off)
+ if (handle)
+ iommufd_auto_response_faults(hwpt, handle);
+ if (disable_iopf)
iommufd_fault_iopf_disable(idev);
-
- return 0;
}
void iommufd_fault_destroy(struct iommufd_object *obj)
--
2.43.0
Hi,
On 1/11/25 4:32 AM, Nicolin Chen wrote:
> "attach_handle" was added exclusively for the iommufd_fault_iopf_handler()
> used by IOPF/PRI use cases, along with the "fault_data". Now, the iommufd
> version of sw_msi function will resue the attach_handle and fault_data for
reuse
> a non-fault case.
>
> Move the attach_handle part out of the fault.c file to make it generic for
> all cases. Simplify the remaining fault specific routine to attach/detach.
>
> Signed-off-by: Nicolin Chen <nicolinc@nvidia.com>
> ---
> drivers/iommu/iommufd/iommufd_private.h | 40 +-------
> drivers/iommu/iommufd/device.c | 105 +++++++++++++++++++++
> drivers/iommu/iommufd/fault.c | 120 +++---------------------
> 3 files changed, 122 insertions(+), 143 deletions(-)
>
> diff --git a/drivers/iommu/iommufd/iommufd_private.h b/drivers/iommu/iommufd/iommufd_private.h
> index b6d706cf2c66..063c0a42f54f 100644
> --- a/drivers/iommu/iommufd/iommufd_private.h
> +++ b/drivers/iommu/iommufd/iommufd_private.h
> @@ -472,42 +472,12 @@ void iommufd_fault_destroy(struct iommufd_object *obj);
> int iommufd_fault_iopf_handler(struct iopf_group *group);
>
> int iommufd_fault_domain_attach_dev(struct iommufd_hw_pagetable *hwpt,
> - struct iommufd_device *idev);
> + struct iommufd_device *idev,
> + bool enable_iopf);
> void iommufd_fault_domain_detach_dev(struct iommufd_hw_pagetable *hwpt,
> - struct iommufd_device *idev);
> -int iommufd_fault_domain_replace_dev(struct iommufd_device *idev,
> - struct iommufd_hw_pagetable *hwpt,
> - struct iommufd_hw_pagetable *old);
> -
> -static inline int iommufd_hwpt_attach_device(struct iommufd_hw_pagetable *hwpt,
> - struct iommufd_device *idev)
> -{
> - if (hwpt->fault)
> - return iommufd_fault_domain_attach_dev(hwpt, idev);
> -
> - return iommu_attach_group(hwpt->domain, idev->igroup->group);
> -}
> -
> -static inline void iommufd_hwpt_detach_device(struct iommufd_hw_pagetable *hwpt,
> - struct iommufd_device *idev)
> -{
> - if (hwpt->fault) {
> - iommufd_fault_domain_detach_dev(hwpt, idev);
> - return;
> - }
> -
> - iommu_detach_group(hwpt->domain, idev->igroup->group);
> -}
> -
> -static inline int iommufd_hwpt_replace_device(struct iommufd_device *idev,
> - struct iommufd_hw_pagetable *hwpt,
> - struct iommufd_hw_pagetable *old)
> -{
> - if (old->fault || hwpt->fault)
> - return iommufd_fault_domain_replace_dev(idev, hwpt, old);
> -
> - return iommu_group_replace_domain(idev->igroup->group, hwpt->domain);
> -}
> + struct iommufd_device *idev,
> + struct iommufd_attach_handle *handle,
> + bool disable_iopf);
>
> static inline struct iommufd_viommu *
> iommufd_get_viommu(struct iommufd_ucmd *ucmd, u32 id)
> diff --git a/drivers/iommu/iommufd/device.c b/drivers/iommu/iommufd/device.c
> index dfd0898fb6c1..38b31b652147 100644
> --- a/drivers/iommu/iommufd/device.c
> +++ b/drivers/iommu/iommufd/device.c
> @@ -352,6 +352,111 @@ iommufd_device_attach_reserved_iova(struct iommufd_device *idev,
> return 0;
> }
>
> +/* The device attach/detach/replace helpers for attach_handle */
> +
> +static int iommufd_hwpt_attach_device(struct iommufd_hw_pagetable *hwpt,
> + struct iommufd_device *idev)
> +{
> + struct iommufd_attach_handle *handle;
> + int rc;
> +
> + if (hwpt->fault) {
> + rc = iommufd_fault_domain_attach_dev(hwpt, idev, true);
why don't we simply call iommufd_fault_iopf_enable(idev)
also it looks there is a redundant check of hwpt_fault here and in
iommufd_fault_domain_attach_dev
Besides the addition of enable_iopf param is not documented anywhere
> + if (rc)
> + return rc;
> + }
> +
> + handle = kzalloc(sizeof(*handle), GFP_KERNEL);
> + if (!handle) {
> + rc = -ENOMEM;
> + goto out_fault_detach;
> + }
> +
> + handle->idev = idev;
> + rc = iommu_attach_group_handle(hwpt->domain, idev->igroup->group,
> + &handle->handle);
> + if (rc)
> + goto out_free_handle;
> +
> + return 0;
> +
> +out_free_handle:
> + kfree(handle);
> + handle = NULL;
> +out_fault_detach:
> + if (hwpt->fault)
> + iommufd_fault_domain_detach_dev(hwpt, idev, handle, true);
> + return rc;
> +}
> +
> +static struct iommufd_attach_handle *
> +iommufd_device_get_attach_handle(struct iommufd_device *idev)
> +{
> + struct iommu_attach_handle *handle;
> +
> + handle =
> + iommu_attach_handle_get(idev->igroup->group, IOMMU_NO_PASID, 0);
> + if (IS_ERR(handle))
> + return NULL;
> + return to_iommufd_handle(handle);
> +}
> +
> +static void iommufd_hwpt_detach_device(struct iommufd_hw_pagetable *hwpt,
> + struct iommufd_device *idev)
> +{
> + struct iommufd_attach_handle *handle;
> +
> + handle = iommufd_device_get_attach_handle(idev);
> + iommu_detach_group_handle(hwpt->domain, idev->igroup->group);
> + if (hwpt->fault)
> + iommufd_fault_domain_detach_dev(hwpt, idev, handle, true);
same here, pretty difficult to understand what this
iommufd_fault_domain_detach_dev does
To me calling iommufd_auto_response_faults and iommufd_fault_iopf_disable would be more readable or rename iommufd_fault_domain_detach_dev().
Also compared to the original code, there is a new check on handle. Why is it necessary.
Globally I feel that patch pretty hard to read. Would be nice to split if possible to ease the review process.
Thanks
Eric
> + kfree(handle);
> +}
> +
> +static int iommufd_hwpt_replace_device(struct iommufd_device *idev,
> + struct iommufd_hw_pagetable *hwpt,
> + struct iommufd_hw_pagetable *old)
> +{
> + struct iommufd_attach_handle *old_handle =
> + iommufd_device_get_attach_handle(idev);
> + struct iommufd_attach_handle *handle;
> + int rc;
> +
> + if (hwpt->fault) {
> + rc = iommufd_fault_domain_attach_dev(hwpt, idev, !old->fault);
> + if (rc)
> + return rc;
> + }
> +
> + handle = kzalloc(sizeof(*handle), GFP_KERNEL);
> + if (!handle) {
> + rc = -ENOMEM;
> + goto out_fault_detach;
> + }
> +
> + handle->idev = idev;
> + rc = iommu_replace_group_handle(idev->igroup->group, hwpt->domain,
> + &handle->handle);
> + if (rc)
> + goto out_free_handle;
> +
> + if (old->fault)
> + iommufd_fault_domain_detach_dev(old, idev, old_handle,
> + !hwpt->fault);
> + kfree(old_handle);
> +
> + return 0;
> +
> +out_free_handle:
> + kfree(handle);
> + handle = NULL;
> +out_fault_detach:
> + if (hwpt->fault)
> + iommufd_fault_domain_detach_dev(hwpt, idev, handle,
> + !old->fault);
> + return rc;
> +}
> +
> int iommufd_hw_pagetable_attach(struct iommufd_hw_pagetable *hwpt,
> struct iommufd_device *idev)
> {
> diff --git a/drivers/iommu/iommufd/fault.c b/drivers/iommu/iommufd/fault.c
> index 06aa83a75e94..1d9bd3024b57 100644
> --- a/drivers/iommu/iommufd/fault.c
> +++ b/drivers/iommu/iommufd/fault.c
> @@ -60,42 +60,17 @@ static void iommufd_fault_iopf_disable(struct iommufd_device *idev)
> mutex_unlock(&idev->iopf_lock);
> }
>
> -static int __fault_domain_attach_dev(struct iommufd_hw_pagetable *hwpt,
> - struct iommufd_device *idev)
> -{
> - struct iommufd_attach_handle *handle;
> - int ret;
> -
> - handle = kzalloc(sizeof(*handle), GFP_KERNEL);
> - if (!handle)
> - return -ENOMEM;
> -
> - handle->idev = idev;
> - ret = iommu_attach_group_handle(hwpt->domain, idev->igroup->group,
> - &handle->handle);
> - if (ret)
> - kfree(handle);
> -
> - return ret;
> -}
> -
> int iommufd_fault_domain_attach_dev(struct iommufd_hw_pagetable *hwpt,
> - struct iommufd_device *idev)
> + struct iommufd_device *idev,
> + bool enable_iopf)
> {
> - int ret;
> + int rc = 0;
>
> if (!hwpt->fault)
> return -EINVAL;
> -
> - ret = iommufd_fault_iopf_enable(idev);
> - if (ret)
> - return ret;
> -
> - ret = __fault_domain_attach_dev(hwpt, idev);
> - if (ret)
> - iommufd_fault_iopf_disable(idev);
> -
> - return ret;
> + if (enable_iopf)
> + rc = iommufd_fault_iopf_enable(idev);
> + return rc;
> }
>
> static void iommufd_auto_response_faults(struct iommufd_hw_pagetable *hwpt,
> @@ -127,86 +102,15 @@ static void iommufd_auto_response_faults(struct iommufd_hw_pagetable *hwpt,
> mutex_unlock(&fault->mutex);
> }
>
> -static struct iommufd_attach_handle *
> -iommufd_device_get_attach_handle(struct iommufd_device *idev)
> -{
> - struct iommu_attach_handle *handle;
> -
> - handle = iommu_attach_handle_get(idev->igroup->group, IOMMU_NO_PASID, 0);
> - if (IS_ERR(handle))
> - return NULL;
> -
> - return to_iommufd_handle(handle);
> -}
> -
> void iommufd_fault_domain_detach_dev(struct iommufd_hw_pagetable *hwpt,
> - struct iommufd_device *idev)
> + struct iommufd_device *idev,
> + struct iommufd_attach_handle *handle,
> + bool disable_iopf)
> {
> - struct iommufd_attach_handle *handle;
> -
> - handle = iommufd_device_get_attach_handle(idev);
> - iommu_detach_group_handle(hwpt->domain, idev->igroup->group);
> - iommufd_auto_response_faults(hwpt, handle);
> - iommufd_fault_iopf_disable(idev);
> - kfree(handle);
> -}
> -
> -static int __fault_domain_replace_dev(struct iommufd_device *idev,
> - struct iommufd_hw_pagetable *hwpt,
> - struct iommufd_hw_pagetable *old)
> -{
> - struct iommufd_attach_handle *handle, *curr = NULL;
> - int ret;
> -
> - if (old->fault)
> - curr = iommufd_device_get_attach_handle(idev);
> -
> - if (hwpt->fault) {
> - handle = kzalloc(sizeof(*handle), GFP_KERNEL);
> - if (!handle)
> - return -ENOMEM;
> -
> - handle->idev = idev;
> - ret = iommu_replace_group_handle(idev->igroup->group,
> - hwpt->domain, &handle->handle);
> - } else {
> - ret = iommu_replace_group_handle(idev->igroup->group,
> - hwpt->domain, NULL);
> - }
> -
> - if (!ret && curr) {
> - iommufd_auto_response_faults(old, curr);
> - kfree(curr);
> - }
> -
> - return ret;
> -}
> -
> -int iommufd_fault_domain_replace_dev(struct iommufd_device *idev,
> - struct iommufd_hw_pagetable *hwpt,
> - struct iommufd_hw_pagetable *old)
> -{
> - bool iopf_off = !hwpt->fault && old->fault;
> - bool iopf_on = hwpt->fault && !old->fault;
> - int ret;
> -
> - if (iopf_on) {
> - ret = iommufd_fault_iopf_enable(idev);
> - if (ret)
> - return ret;
> - }
> -
> - ret = __fault_domain_replace_dev(idev, hwpt, old);
> - if (ret) {
> - if (iopf_on)
> - iommufd_fault_iopf_disable(idev);
> - return ret;
> - }
> -
> - if (iopf_off)
> + if (handle)
> + iommufd_auto_response_faults(hwpt, handle);
> + if (disable_iopf)
> iommufd_fault_iopf_disable(idev);
> -
> - return 0;
> }
>
> void iommufd_fault_destroy(struct iommufd_object *obj)
On Wed, Jan 29, 2025 at 02:14:47PM +0100, Eric Auger wrote:
> > @@ -352,6 +352,111 @@ iommufd_device_attach_reserved_iova(struct iommufd_device *idev,
> > return 0;
> > }
> >
> > +/* The device attach/detach/replace helpers for attach_handle */
> > +
> > +static int iommufd_hwpt_attach_device(struct iommufd_hw_pagetable *hwpt,
> > + struct iommufd_device *idev)
> > +{
> > + struct iommufd_attach_handle *handle;
> > + int rc;
> > +
> > + if (hwpt->fault) {
> > + rc = iommufd_fault_domain_attach_dev(hwpt, idev, true);
> why don't we simply call iommufd_fault_iopf_enable(idev)
> also it looks there is a redundant check of hwpt_fault here and in
>
> iommufd_fault_domain_attach_dev
>
> Besides the addition of enable_iopf param is not documented anywhere
OK. I will try unwrapping that.
> > +static void iommufd_hwpt_detach_device(struct iommufd_hw_pagetable *hwpt,
> > + struct iommufd_device *idev)
> > +{
> > + struct iommufd_attach_handle *handle;
> > +
> > + handle = iommufd_device_get_attach_handle(idev);
> > + iommu_detach_group_handle(hwpt->domain, idev->igroup->group);
> > + if (hwpt->fault)
> > + iommufd_fault_domain_detach_dev(hwpt, idev, handle, true);
> same here, pretty difficult to understand what this
>
> iommufd_fault_domain_detach_dev does
> To me calling iommufd_auto_response_faults and iommufd_fault_iopf_disable would be more readable or rename iommufd_fault_domain_detach_dev().
> Also compared to the original code,
This is basically a cleanup call for the fault specific items as
the patch's commit message describes. And you read it correct..
I will see what I can do with the naming.
> there is a new check on handle. Why is it necessary.
It was to avoid the error path that has a handle=NULL entering the
auto response function. We can change that a bit to drop the check
to make it slightly clearer, though it would waste some extra CPU
cycles on scanning the two fault lists against an empty handle.
> Globally I feel that patch pretty hard to read. Would be nice to split if possible to ease the review process.
This patch is needed by both this series and Yi's PASID series too,
so I was planning to send it individually. I will see what I can do
to make it easy to read.
Thanks
Nicolin
On 2025/1/11 11:32, Nicolin Chen wrote:
> "attach_handle" was added exclusively for the iommufd_fault_iopf_handler()
> used by IOPF/PRI use cases, along with the "fault_data". Now, the iommufd
> version of sw_msi function will resue the attach_handle and fault_data for
> a non-fault case.
>
> Move the attach_handle part out of the fault.c file to make it generic for
> all cases. Simplify the remaining fault specific routine to attach/detach.
I guess you can send it separately since both of our series need it. :)
> Signed-off-by: Nicolin Chen <nicolinc@nvidia.com>
> ---
> drivers/iommu/iommufd/iommufd_private.h | 40 +-------
> drivers/iommu/iommufd/device.c | 105 +++++++++++++++++++++
> drivers/iommu/iommufd/fault.c | 120 +++---------------------
> 3 files changed, 122 insertions(+), 143 deletions(-)
>
> diff --git a/drivers/iommu/iommufd/iommufd_private.h b/drivers/iommu/iommufd/iommufd_private.h
> index b6d706cf2c66..063c0a42f54f 100644
> --- a/drivers/iommu/iommufd/iommufd_private.h
> +++ b/drivers/iommu/iommufd/iommufd_private.h
> @@ -472,42 +472,12 @@ void iommufd_fault_destroy(struct iommufd_object *obj);
> int iommufd_fault_iopf_handler(struct iopf_group *group);
>
> int iommufd_fault_domain_attach_dev(struct iommufd_hw_pagetable *hwpt,
> - struct iommufd_device *idev);
> + struct iommufd_device *idev,
> + bool enable_iopf);
> void iommufd_fault_domain_detach_dev(struct iommufd_hw_pagetable *hwpt,
> - struct iommufd_device *idev);
> -int iommufd_fault_domain_replace_dev(struct iommufd_device *idev,
> - struct iommufd_hw_pagetable *hwpt,
> - struct iommufd_hw_pagetable *old);
> -
> -static inline int iommufd_hwpt_attach_device(struct iommufd_hw_pagetable *hwpt,
> - struct iommufd_device *idev)
> -{
> - if (hwpt->fault)
> - return iommufd_fault_domain_attach_dev(hwpt, idev);
> -
> - return iommu_attach_group(hwpt->domain, idev->igroup->group);
> -}
> -
> -static inline void iommufd_hwpt_detach_device(struct iommufd_hw_pagetable *hwpt,
> - struct iommufd_device *idev)
> -{
> - if (hwpt->fault) {
> - iommufd_fault_domain_detach_dev(hwpt, idev);
> - return;
> - }
> -
> - iommu_detach_group(hwpt->domain, idev->igroup->group);
> -}
> -
> -static inline int iommufd_hwpt_replace_device(struct iommufd_device *idev,
> - struct iommufd_hw_pagetable *hwpt,
> - struct iommufd_hw_pagetable *old)
> -{
> - if (old->fault || hwpt->fault)
> - return iommufd_fault_domain_replace_dev(idev, hwpt, old);
> -
> - return iommu_group_replace_domain(idev->igroup->group, hwpt->domain);
> -}
> + struct iommufd_device *idev,
> + struct iommufd_attach_handle *handle,
> + bool disable_iopf);
>
> static inline struct iommufd_viommu *
> iommufd_get_viommu(struct iommufd_ucmd *ucmd, u32 id)
> diff --git a/drivers/iommu/iommufd/device.c b/drivers/iommu/iommufd/device.c
> index dfd0898fb6c1..38b31b652147 100644
> --- a/drivers/iommu/iommufd/device.c
> +++ b/drivers/iommu/iommufd/device.c
> @@ -352,6 +352,111 @@ iommufd_device_attach_reserved_iova(struct iommufd_device *idev,
> return 0;
> }
>
> +/* The device attach/detach/replace helpers for attach_handle */
> +
> +static int iommufd_hwpt_attach_device(struct iommufd_hw_pagetable *hwpt,
> + struct iommufd_device *idev)
> +{
> + struct iommufd_attach_handle *handle;
> + int rc;
> +
> + if (hwpt->fault) {
> + rc = iommufd_fault_domain_attach_dev(hwpt, idev, true);
> + if (rc)
> + return rc;
> + }
> +
> + handle = kzalloc(sizeof(*handle), GFP_KERNEL);
> + if (!handle) {
> + rc = -ENOMEM;
> + goto out_fault_detach;
> + }
> +
> + handle->idev = idev;
> + rc = iommu_attach_group_handle(hwpt->domain, idev->igroup->group,
> + &handle->handle);
> + if (rc)
> + goto out_free_handle;
> +
> + return 0;
> +
> +out_free_handle:
> + kfree(handle);
> + handle = NULL;
> +out_fault_detach:
> + if (hwpt->fault)
> + iommufd_fault_domain_detach_dev(hwpt, idev, handle, true);
> + return rc;
> +}
> +
> +static struct iommufd_attach_handle *
> +iommufd_device_get_attach_handle(struct iommufd_device *idev)
> +{
> + struct iommu_attach_handle *handle;
> +
> + handle =
> + iommu_attach_handle_get(idev->igroup->group, IOMMU_NO_PASID, 0);
> + if (IS_ERR(handle))
> + return NULL;
> + return to_iommufd_handle(handle);
> +}
> +
> +static void iommufd_hwpt_detach_device(struct iommufd_hw_pagetable *hwpt,
> + struct iommufd_device *idev)
> +{
> + struct iommufd_attach_handle *handle;
> +
> + handle = iommufd_device_get_attach_handle(idev);
> + iommu_detach_group_handle(hwpt->domain, idev->igroup->group);
> + if (hwpt->fault)
> + iommufd_fault_domain_detach_dev(hwpt, idev, handle, true);
> + kfree(handle);
> +}
> +
> +static int iommufd_hwpt_replace_device(struct iommufd_device *idev,
> + struct iommufd_hw_pagetable *hwpt,
> + struct iommufd_hw_pagetable *old)
> +{
> + struct iommufd_attach_handle *old_handle =
> + iommufd_device_get_attach_handle(idev);
> + struct iommufd_attach_handle *handle;
> + int rc;
> +
> + if (hwpt->fault) {
> + rc = iommufd_fault_domain_attach_dev(hwpt, idev, !old->fault);
> + if (rc)
> + return rc;
> + }
> +
> + handle = kzalloc(sizeof(*handle), GFP_KERNEL);
> + if (!handle) {
> + rc = -ENOMEM;
> + goto out_fault_detach;
> + }
> +
> + handle->idev = idev;
> + rc = iommu_replace_group_handle(idev->igroup->group, hwpt->domain,
> + &handle->handle);
> + if (rc)
> + goto out_free_handle;
> +
> + if (old->fault)
> + iommufd_fault_domain_detach_dev(old, idev, old_handle,
> + !hwpt->fault);
> + kfree(old_handle);
> +
> + return 0;
> +
> +out_free_handle:
> + kfree(handle);
> + handle = NULL;
> +out_fault_detach:
> + if (hwpt->fault)
> + iommufd_fault_domain_detach_dev(hwpt, idev, handle,
> + !old->fault);
> + return rc;
> +}
> +
> int iommufd_hw_pagetable_attach(struct iommufd_hw_pagetable *hwpt,
> struct iommufd_device *idev)
> {
> diff --git a/drivers/iommu/iommufd/fault.c b/drivers/iommu/iommufd/fault.c
> index 06aa83a75e94..1d9bd3024b57 100644
> --- a/drivers/iommu/iommufd/fault.c
> +++ b/drivers/iommu/iommufd/fault.c
> @@ -60,42 +60,17 @@ static void iommufd_fault_iopf_disable(struct iommufd_device *idev)
> mutex_unlock(&idev->iopf_lock);
> }
>
> -static int __fault_domain_attach_dev(struct iommufd_hw_pagetable *hwpt,
> - struct iommufd_device *idev)
> -{
> - struct iommufd_attach_handle *handle;
> - int ret;
> -
> - handle = kzalloc(sizeof(*handle), GFP_KERNEL);
> - if (!handle)
> - return -ENOMEM;
> -
> - handle->idev = idev;
> - ret = iommu_attach_group_handle(hwpt->domain, idev->igroup->group,
> - &handle->handle);
> - if (ret)
> - kfree(handle);
> -
> - return ret;
> -}
> -
> int iommufd_fault_domain_attach_dev(struct iommufd_hw_pagetable *hwpt,
> - struct iommufd_device *idev)
> + struct iommufd_device *idev,
> + bool enable_iopf)
> {
> - int ret;
> + int rc = 0;
>
> if (!hwpt->fault)
> return -EINVAL;
> -
> - ret = iommufd_fault_iopf_enable(idev);
> - if (ret)
> - return ret;
> -
> - ret = __fault_domain_attach_dev(hwpt, idev);
> - if (ret)
> - iommufd_fault_iopf_disable(idev);
> -
> - return ret;
> + if (enable_iopf)
> + rc = iommufd_fault_iopf_enable(idev);
> + return rc;
> }
>
> static void iommufd_auto_response_faults(struct iommufd_hw_pagetable *hwpt,
> @@ -127,86 +102,15 @@ static void iommufd_auto_response_faults(struct iommufd_hw_pagetable *hwpt,
> mutex_unlock(&fault->mutex);
> }
>
> -static struct iommufd_attach_handle *
> -iommufd_device_get_attach_handle(struct iommufd_device *idev)
> -{
> - struct iommu_attach_handle *handle;
> -
> - handle = iommu_attach_handle_get(idev->igroup->group, IOMMU_NO_PASID, 0);
> - if (IS_ERR(handle))
> - return NULL;
> -
> - return to_iommufd_handle(handle);
> -}
> -
> void iommufd_fault_domain_detach_dev(struct iommufd_hw_pagetable *hwpt,
> - struct iommufd_device *idev)
> + struct iommufd_device *idev,
> + struct iommufd_attach_handle *handle,
> + bool disable_iopf)
> {
> - struct iommufd_attach_handle *handle;
> -
> - handle = iommufd_device_get_attach_handle(idev);
> - iommu_detach_group_handle(hwpt->domain, idev->igroup->group);
> - iommufd_auto_response_faults(hwpt, handle);
> - iommufd_fault_iopf_disable(idev);
> - kfree(handle);
> -}
> -
> -static int __fault_domain_replace_dev(struct iommufd_device *idev,
> - struct iommufd_hw_pagetable *hwpt,
> - struct iommufd_hw_pagetable *old)
> -{
> - struct iommufd_attach_handle *handle, *curr = NULL;
> - int ret;
> -
> - if (old->fault)
> - curr = iommufd_device_get_attach_handle(idev);
> -
> - if (hwpt->fault) {
> - handle = kzalloc(sizeof(*handle), GFP_KERNEL);
> - if (!handle)
> - return -ENOMEM;
> -
> - handle->idev = idev;
> - ret = iommu_replace_group_handle(idev->igroup->group,
> - hwpt->domain, &handle->handle);
> - } else {
> - ret = iommu_replace_group_handle(idev->igroup->group,
> - hwpt->domain, NULL);
> - }
> -
> - if (!ret && curr) {
> - iommufd_auto_response_faults(old, curr);
> - kfree(curr);
> - }
> -
> - return ret;
> -}
> -
> -int iommufd_fault_domain_replace_dev(struct iommufd_device *idev,
> - struct iommufd_hw_pagetable *hwpt,
> - struct iommufd_hw_pagetable *old)
> -{
> - bool iopf_off = !hwpt->fault && old->fault;
> - bool iopf_on = hwpt->fault && !old->fault;
> - int ret;
> -
> - if (iopf_on) {
> - ret = iommufd_fault_iopf_enable(idev);
> - if (ret)
> - return ret;
> - }
> -
> - ret = __fault_domain_replace_dev(idev, hwpt, old);
> - if (ret) {
> - if (iopf_on)
> - iommufd_fault_iopf_disable(idev);
> - return ret;
> - }
> -
> - if (iopf_off)
> + if (handle)
> + iommufd_auto_response_faults(hwpt, handle);
no need to check handle. After this patch, both the non-fault and the fault
path will allocate handle. It cannot be used to isolate fault and non-fault
path. Also, the callers of iommufd_fault_domain_detach_dev() will check
hwpt->fault before calling it. So just call iommufd_auto_response_faults().
> + if (disable_iopf)
> iommufd_fault_iopf_disable(idev);
> -
> - return 0;
> }
>
> void iommufd_fault_destroy(struct iommufd_object *obj)
--
Regards,
Yi Liu
On Sat, Jan 18, 2025 at 04:23:22PM +0800, Yi Liu wrote:
> On 2025/1/11 11:32, Nicolin Chen wrote:
> > "attach_handle" was added exclusively for the iommufd_fault_iopf_handler()
> > used by IOPF/PRI use cases, along with the "fault_data". Now, the iommufd
> > version of sw_msi function will resue the attach_handle and fault_data for
> > a non-fault case.
> >
> > Move the attach_handle part out of the fault.c file to make it generic for
> > all cases. Simplify the remaining fault specific routine to attach/detach.
>
> I guess you can send it separately since both of our series need it. :)
Jason, would you like to take this patch separately? I can send
it prior to two big series for a quick review after rc1. It'll
likely impact the vEVENTQ series too.
> > +static int iommufd_hwpt_attach_device(struct iommufd_hw_pagetable *hwpt,
> > + struct iommufd_device *idev)
> > +{
> > + struct iommufd_attach_handle *handle;
> > + int rc;
> > +
> > + if (hwpt->fault) {
> > + rc = iommufd_fault_domain_attach_dev(hwpt, idev, true);
> > + if (rc)
> > + return rc;
> > + }
> > +
> > + handle = kzalloc(sizeof(*handle), GFP_KERNEL);
> > + if (!handle) {
> > + rc = -ENOMEM;
> > + goto out_fault_detach;
> > + }
> > +
> > + handle->idev = idev;
> > + rc = iommu_attach_group_handle(hwpt->domain, idev->igroup->group,
> > + &handle->handle);
> > + if (rc)
> > + goto out_free_handle;
> > +
> > + return 0;
> > +
> > +out_free_handle:
> > + kfree(handle);
> > + handle = NULL;
> > +out_fault_detach:
> > + if (hwpt->fault)
> > + iommufd_fault_domain_detach_dev(hwpt, idev, handle, true);
> > + return rc;
> > +}
Here the revert path passes in a handle=NULL..
> > void iommufd_fault_domain_detach_dev(struct iommufd_hw_pagetable *hwpt,
> > - struct iommufd_device *idev)
> > + struct iommufd_device *idev,
> > + struct iommufd_attach_handle *handle,
> > + bool disable_iopf)
> > {
> > + if (handle)
> > + iommufd_auto_response_faults(hwpt, handle);
>
> no need to check handle. After this patch, both the non-fault and the fault
> path will allocate handle. It cannot be used to isolate fault and non-fault
> path. Also, the callers of iommufd_fault_domain_detach_dev() will check
> hwpt->fault before calling it. So just call iommufd_auto_response_faults().
..so we still need this !NULL validation? :-/
Thanks
Nicolin
On Sat, Jan 18, 2025 at 12:32:49PM -0800, Nicolin Chen wrote: > On Sat, Jan 18, 2025 at 04:23:22PM +0800, Yi Liu wrote: > > On 2025/1/11 11:32, Nicolin Chen wrote: > > > "attach_handle" was added exclusively for the iommufd_fault_iopf_handler() > > > used by IOPF/PRI use cases, along with the "fault_data". Now, the iommufd > > > version of sw_msi function will resue the attach_handle and fault_data for > > > a non-fault case. > > > > > > Move the attach_handle part out of the fault.c file to make it generic for > > > all cases. Simplify the remaining fault specific routine to attach/detach. > > > > I guess you can send it separately since both of our series need it. :) > > Jason, would you like to take this patch separately? I can send > it prior to two big series for a quick review after rc1. It'll > likely impact the vEVENTQ series too. If it helps you can put it in its own series and I will take it with pasid or vevent, which ever goes first Jason
On 2025/1/19 04:32, Nicolin Chen wrote:
> On Sat, Jan 18, 2025 at 04:23:22PM +0800, Yi Liu wrote:
>> On 2025/1/11 11:32, Nicolin Chen wrote:
>>> "attach_handle" was added exclusively for the iommufd_fault_iopf_handler()
>>> used by IOPF/PRI use cases, along with the "fault_data". Now, the iommufd
>>> version of sw_msi function will resue the attach_handle and fault_data for
>>> a non-fault case.
>>>
>>> Move the attach_handle part out of the fault.c file to make it generic for
>>> all cases. Simplify the remaining fault specific routine to attach/detach.
>>
>> I guess you can send it separately since both of our series need it. :)
>
> Jason, would you like to take this patch separately? I can send
> it prior to two big series for a quick review after rc1. It'll
> likely impact the vEVENTQ series too.
>
>>> +static int iommufd_hwpt_attach_device(struct iommufd_hw_pagetable *hwpt,
>>> + struct iommufd_device *idev)
>>> +{
>>> + struct iommufd_attach_handle *handle;
>>> + int rc;
>>> +
>>> + if (hwpt->fault) {
>>> + rc = iommufd_fault_domain_attach_dev(hwpt, idev, true);
>>> + if (rc)
>>> + return rc;
>>> + }
>>> +
>>> + handle = kzalloc(sizeof(*handle), GFP_KERNEL);
>>> + if (!handle) {
>>> + rc = -ENOMEM;
>>> + goto out_fault_detach;
>>> + }
>>> +
>>> + handle->idev = idev;
>>> + rc = iommu_attach_group_handle(hwpt->domain, idev->igroup->group,
>>> + &handle->handle);
>>> + if (rc)
>>> + goto out_free_handle;
>>> +
>>> + return 0;
>>> +
>>> +out_free_handle:
>>> + kfree(handle);
>>> + handle = NULL;
>>> +out_fault_detach:
>>> + if (hwpt->fault)
>>> + iommufd_fault_domain_detach_dev(hwpt, idev, handle, true);
>>> + return rc;
>>> +}
>
> Here the revert path passes in a handle=NULL..
aha. got it. Perhaps we can allocate handle first. In the below thread, it
is possible that a failed domain may have pending PRIs, it would require
the caller to call the auto response. Although, we are likely to swap the
order, but it is nice to have for the caller to do it.
https://lore.kernel.org/linux-iommu/f685daca-081a-4ede-b1e1-559009fa9ebc@intel.com/
--
Regards,
Yi Liu
On Sun, Jan 19, 2025 at 06:40:57PM +0800, Yi Liu wrote:
> On 2025/1/19 04:32, Nicolin Chen wrote:
> > On Sat, Jan 18, 2025 at 04:23:22PM +0800, Yi Liu wrote:
> > > On 2025/1/11 11:32, Nicolin Chen wrote:
> > > > +static int iommufd_hwpt_attach_device(struct iommufd_hw_pagetable *hwpt,
> > > > + struct iommufd_device *idev)
> > > > +{
> > > > + struct iommufd_attach_handle *handle;
> > > > + int rc;
> > > > +
> > > > + if (hwpt->fault) {
> > > > + rc = iommufd_fault_domain_attach_dev(hwpt, idev, true);
> > > > + if (rc)
> > > > + return rc;
> > > > + }
> > > > +
> > > > + handle = kzalloc(sizeof(*handle), GFP_KERNEL);
> > > > + if (!handle) {
> > > > + rc = -ENOMEM;
> > > > + goto out_fault_detach;
> > > > + }
> > > > +
> > > > + handle->idev = idev;
> > > > + rc = iommu_attach_group_handle(hwpt->domain, idev->igroup->group,
> > > > + &handle->handle);
> > > > + if (rc)
> > > > + goto out_free_handle;
> > > > +
> > > > + return 0;
> > > > +
> > > > +out_free_handle:
> > > > + kfree(handle);
> > > > + handle = NULL;
> > > > +out_fault_detach:
> > > > + if (hwpt->fault)
> > > > + iommufd_fault_domain_detach_dev(hwpt, idev, handle, true);
> > > > + return rc;
> > > > +}
> >
> > Here the revert path passes in a handle=NULL..
>
> aha. got it. Perhaps we can allocate handle first. In the below thread, it
> is possible that a failed domain may have pending PRIs, it would require
> the caller to call the auto response. Although, we are likely to swap the
> order, but it is nice to have for the caller to do it.
>
> https://lore.kernel.org/linux-iommu/f685daca-081a-4ede-b1e1-559009fa9ebc@intel.com/
Hmm, I don't really see a point in letting the detach flow to
scan the two lists in hwpt->fault against a zero-ed handle...
which feels like a waste of CPU cycles?
And I am not sure how that xa_insert part is realted?
Thanks
Nic
On 2025/1/20 13:54, Nicolin Chen wrote:
> On Sun, Jan 19, 2025 at 06:40:57PM +0800, Yi Liu wrote:
>> On 2025/1/19 04:32, Nicolin Chen wrote:
>>> On Sat, Jan 18, 2025 at 04:23:22PM +0800, Yi Liu wrote:
>>>> On 2025/1/11 11:32, Nicolin Chen wrote:
>>>>> +static int iommufd_hwpt_attach_device(struct iommufd_hw_pagetable *hwpt,
>>>>> + struct iommufd_device *idev)
>>>>> +{
>>>>> + struct iommufd_attach_handle *handle;
>>>>> + int rc;
>>>>> +
>>>>> + if (hwpt->fault) {
>>>>> + rc = iommufd_fault_domain_attach_dev(hwpt, idev, true);
>>>>> + if (rc)
>>>>> + return rc;
>>>>> + }
>>>>> +
>>>>> + handle = kzalloc(sizeof(*handle), GFP_KERNEL);
>>>>> + if (!handle) {
>>>>> + rc = -ENOMEM;
>>>>> + goto out_fault_detach;
>>>>> + }
>>>>> +
>>>>> + handle->idev = idev;
>>>>> + rc = iommu_attach_group_handle(hwpt->domain, idev->igroup->group,
>>>>> + &handle->handle);
>>>>> + if (rc)
>>>>> + goto out_free_handle;
>>>>> +
>>>>> + return 0;
>>>>> +
>>>>> +out_free_handle:
>>>>> + kfree(handle);
>>>>> + handle = NULL;
>>>>> +out_fault_detach:
>>>>> + if (hwpt->fault)
>>>>> + iommufd_fault_domain_detach_dev(hwpt, idev, handle, true);
>>>>> + return rc;
>>>>> +}
>>>
>>> Here the revert path passes in a handle=NULL..
>>
>> aha. got it. Perhaps we can allocate handle first. In the below thread, it
>> is possible that a failed domain may have pending PRIs, it would require
>> the caller to call the auto response. Although, we are likely to swap the
>> order, but it is nice to have for the caller to do it.
>>
>> https://lore.kernel.org/linux-iommu/f685daca-081a-4ede-b1e1-559009fa9ebc@intel.com/
>
> Hmm, I don't really see a point in letting the detach flow to
> scan the two lists in hwpt->fault against a zero-ed handle...
> which feels like a waste of CPU cycles?
I meant you may call iommufd_fault_domain_attach_dev() after allocating
handle. Then in the error path, the handle is not zeroed when calling
iommufd_fault_domain_detach_dev(). The cpu circle will not be wasted if
if the two lists are empty. But it would be required if the lists are not
empty. :)
> And I am not sure how that xa_insert part is realted?
Maybe I failed to make it clear. That thread had discussed a case that the
PRIs may be forwarded to hwpt before the attach succeeds. But it is needed
to flush the PRIs in htwp->fault. Although I would swap the order of
xa_insert() and __iommu_set_group_pasid(), it is still nice if iommufd side
flushes the two lists in the error handling path.
Regards,
Yi Liu
© 2016 - 2026 Red Hat, Inc.