From: Mykola Kvach <mykola_kvach@epam.com>
Before we suspend SMMU, we want to ensure that all commands (especially
ATC_INV) have been flushed by the CMDQ, i.e. the CMDQs are empty.
The suspend callback configures the SMMU to abort new transactions,
disables the main translation unit and then drains the command queue
to ensure completion of any in-flight commands.
The resume callback performs a full device reset via 'arm_smmu_device_reset'
to bring the SMMU back to an operational state.
Link: https://lore.kernel.org/linux-iommu/20251117191433.3360130-1-praan@google.com /
Based-on-patch-by: Pranjal Shrivastava <praan@google.com>
Signed-off-by: Mykola Kvach <mykola_kvach@epam.com>
---
xen/drivers/passthrough/arm/smmu-v3.c | 170 ++++++++++++++++++++------
1 file changed, 134 insertions(+), 36 deletions(-)
diff --git a/xen/drivers/passthrough/arm/smmu-v3.c b/xen/drivers/passthrough/arm/smmu-v3.c
index bf153227db..10c4c5dee0 100644
--- a/xen/drivers/passthrough/arm/smmu-v3.c
+++ b/xen/drivers/passthrough/arm/smmu-v3.c
@@ -1814,8 +1814,7 @@ static int arm_smmu_write_reg_sync(struct arm_smmu_device *smmu, u32 val,
}
/* GBPA is "special" */
-static int __init arm_smmu_update_gbpa(struct arm_smmu_device *smmu,
- u32 set, u32 clr)
+static int arm_smmu_update_gbpa(struct arm_smmu_device *smmu, u32 set, u32 clr)
{
int ret;
u32 reg, __iomem *gbpa = smmu->base + ARM_SMMU_GBPA;
@@ -1995,10 +1994,29 @@ err_free_evtq_irq:
return ret;
}
+static int arm_smmu_enable_irqs(struct arm_smmu_device *smmu)
+{
+ int ret;
+ u32 irqen_flags = IRQ_CTRL_EVTQ_IRQEN | IRQ_CTRL_GERROR_IRQEN;
+
+ if ( smmu->features & ARM_SMMU_FEAT_PRI )
+ irqen_flags |= IRQ_CTRL_PRIQ_IRQEN;
+
+ /* Enable interrupt generation on the SMMU */
+ ret = arm_smmu_write_reg_sync(smmu, irqen_flags,
+ ARM_SMMU_IRQ_CTRL, ARM_SMMU_IRQ_CTRLACK);
+ if ( ret )
+ {
+ dev_warn(smmu->dev, "failed to enable irqs\n");
+ return ret;
+ }
+
+ return 0;
+}
+
static int __init arm_smmu_setup_irqs(struct arm_smmu_device *smmu)
{
int ret, irq;
- u32 irqen_flags = IRQ_CTRL_EVTQ_IRQEN | IRQ_CTRL_GERROR_IRQEN;
/* Disable IRQs first */
ret = arm_smmu_write_reg_sync(smmu, 0, ARM_SMMU_IRQ_CTRL,
@@ -2028,22 +2046,7 @@ static int __init arm_smmu_setup_irqs(struct arm_smmu_device *smmu)
}
}
- if (smmu->features & ARM_SMMU_FEAT_PRI)
- irqen_flags |= IRQ_CTRL_PRIQ_IRQEN;
-
- /* Enable interrupt generation on the SMMU */
- ret = arm_smmu_write_reg_sync(smmu, irqen_flags,
- ARM_SMMU_IRQ_CTRL, ARM_SMMU_IRQ_CTRLACK);
- if (ret) {
- dev_warn(smmu->dev, "failed to enable irqs\n");
- goto err_free_irqs;
- }
-
return 0;
-
-err_free_irqs:
- arm_smmu_free_irqs(smmu);
- return ret;
}
static int arm_smmu_device_disable(struct arm_smmu_device *smmu)
@@ -2057,7 +2060,7 @@ static int arm_smmu_device_disable(struct arm_smmu_device *smmu)
return ret;
}
-static int __init arm_smmu_device_reset(struct arm_smmu_device *smmu)
+static int arm_smmu_device_reset(struct arm_smmu_device *smmu)
{
int ret;
u32 reg, enables;
@@ -2163,17 +2166,9 @@ static int __init arm_smmu_device_reset(struct arm_smmu_device *smmu)
}
}
- ret = arm_smmu_setup_irqs(smmu);
- if (ret) {
- dev_err(smmu->dev, "failed to setup irqs\n");
+ ret = arm_smmu_enable_irqs(smmu);
+ if ( ret )
return ret;
- }
-
- /* Initialize tasklets for threaded IRQs*/
- tasklet_init(&smmu->evtq_irq_tasklet, arm_smmu_evtq_tasklet, smmu);
- tasklet_init(&smmu->priq_irq_tasklet, arm_smmu_priq_tasklet, smmu);
- tasklet_init(&smmu->combined_irq_tasklet, arm_smmu_combined_irq_tasklet,
- smmu);
/* Enable the SMMU interface, or ensure bypass */
if (disable_bypass) {
@@ -2181,20 +2176,16 @@ static int __init arm_smmu_device_reset(struct arm_smmu_device *smmu)
} else {
ret = arm_smmu_update_gbpa(smmu, 0, GBPA_ABORT);
if (ret)
- goto err_free_irqs;
+ return ret;
}
ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
ARM_SMMU_CR0ACK);
if (ret) {
dev_err(smmu->dev, "failed to enable SMMU interface\n");
- goto err_free_irqs;
+ return ret;
}
return 0;
-
-err_free_irqs:
- arm_smmu_free_irqs(smmu);
- return ret;
}
static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
@@ -2558,10 +2549,23 @@ static int __init arm_smmu_device_probe(struct platform_device *pdev)
if (ret)
goto out_free;
+ ret = arm_smmu_setup_irqs(smmu);
+ if ( ret )
+ {
+ dev_err(smmu->dev, "failed to setup irqs\n");
+ goto out_free;
+ }
+
+ /* Initialize tasklets for threaded IRQs*/
+ tasklet_init(&smmu->evtq_irq_tasklet, arm_smmu_evtq_tasklet, smmu);
+ tasklet_init(&smmu->priq_irq_tasklet, arm_smmu_priq_tasklet, smmu);
+ tasklet_init(&smmu->combined_irq_tasklet, arm_smmu_combined_irq_tasklet,
+ smmu);
+
/* Reset the device */
ret = arm_smmu_device_reset(smmu);
if (ret)
- goto out_free;
+ goto out_free_irqs;
/*
* Keep a list of all probed devices. This will be used to query
@@ -2575,6 +2579,8 @@ static int __init arm_smmu_device_probe(struct platform_device *pdev)
return 0;
+out_free_irqs:
+ arm_smmu_free_irqs(smmu);
out_free:
arm_smmu_free_structures(smmu);
@@ -2855,6 +2861,94 @@ static void arm_smmu_iommu_xen_domain_teardown(struct domain *d)
xfree(xen_domain);
}
+#ifdef CONFIG_SYSTEM_SUSPEND
+
+static int arm_smmu_suspend(void)
+{
+ struct arm_smmu_device *smmu;
+ int ret = 0;
+
+ list_for_each_entry(smmu, &arm_smmu_devices, devices)
+ {
+ /* Abort all transactions before disable to avoid spurious bypass */
+ ret = arm_smmu_update_gbpa(smmu, GBPA_ABORT, 0);
+ if ( ret )
+ goto fail;
+
+ /* Disable the SMMU via CR0.EN and all queues except CMDQ */
+ ret = arm_smmu_write_reg_sync(smmu, CR0_CMDQEN, ARM_SMMU_CR0,
+ ARM_SMMU_CR0ACK);
+ if ( ret )
+ {
+ dev_err(smmu->dev, "Timed-out while disabling smmu\n");
+ goto fail;
+ }
+
+ /*
+ * At this point the SMMU is completely disabled and won't access
+ * any translation/config structures, even speculative accesses
+ * aren't performed as per the IHI0070 spec (section 6.3.9.6).
+ */
+
+ /* Wait for the CMDQs to be drained to flush any pending commands */
+ ret = queue_poll_cons(&smmu->cmdq.q, true, 0);
+ if ( ret )
+ {
+ dev_err(smmu->dev, "Draining queues timed-out\n");
+ goto fail;
+ }
+
+ /* Disable everything */
+ ret = arm_smmu_device_disable(smmu);
+ if ( ret )
+ goto fail;
+
+ dev_dbg(smmu->dev, "Suspended smmu\n");
+ }
+
+ return 0;
+
+ fail:
+ {
+ int rc;
+
+ /* Reset the device that failed as well as any already-suspended ones. */
+ rc = arm_smmu_device_reset(smmu);
+ if ( rc )
+ dev_err(smmu->dev, "Failed to reset during resume operation: %d\n", rc);
+
+ list_for_each_entry_continue_reverse(smmu, &arm_smmu_devices, devices)
+ {
+ rc = arm_smmu_device_reset(smmu);
+ if ( rc )
+ dev_err(smmu->dev, "Failed to reset during resume operation: %d\n", rc);
+ }
+ }
+
+ return ret;
+}
+
+static void arm_smmu_resume(void)
+{
+ int ret;
+ struct arm_smmu_device *smmu;
+
+ list_for_each_entry(smmu, &arm_smmu_devices, devices)
+ {
+ dev_dbg(smmu->dev, "Resuming device\n");
+
+ /*
+ * The reset will re-initialize all the base addresses, queues,
+ * prod and cons maintained within struct arm_smmu_device as well as
+ * re-enable the interrupts.
+ */
+ ret = arm_smmu_device_reset(smmu);
+ if ( ret )
+ dev_err(smmu->dev, "Failed to reset during resume operation: %d\n", ret);
+ }
+}
+#endif
+
static const struct iommu_ops arm_smmu_iommu_ops = {
.page_sizes = PAGE_SIZE_4K,
.init = arm_smmu_iommu_xen_domain_init,
@@ -2867,6 +2961,10 @@ static const struct iommu_ops arm_smmu_iommu_ops = {
.unmap_page = arm_iommu_unmap_page,
.dt_xlate = arm_smmu_dt_xlate,
.add_device = arm_smmu_add_device,
+#ifdef CONFIG_SYSTEM_SUSPEND
+ .suspend = arm_smmu_suspend,
+ .resume = arm_smmu_resume,
+#endif
};
static __init int arm_smmu_dt_init(struct dt_device_node *dev,
--
2.43.0