On guest reboot or on GBPA update, attach a nested HWPT based on the
GPBA.ABORT bit which either aborts all incoming transactions or bypasses
them.
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Signed-off-by: Shameer Kolothum <skolothumtho@nvidia.com>
---
hw/arm/smmuv3-accel.c | 36 ++++++++++++++++++++++++++++++++++++
hw/arm/smmuv3-accel.h | 9 +++++++++
hw/arm/smmuv3.c | 2 ++
3 files changed, 47 insertions(+)
diff --git a/hw/arm/smmuv3-accel.c b/hw/arm/smmuv3-accel.c
index 2e42d2d484..65b577f49a 100644
--- a/hw/arm/smmuv3-accel.c
+++ b/hw/arm/smmuv3-accel.c
@@ -498,6 +498,42 @@ static const PCIIOMMUOps smmuv3_accel_ops = {
.unset_iommu_device = smmuv3_accel_unset_iommu_device,
};
+/* Based on SMUUv3 GPBA.ABORT configuration, attach a corresponding HWPT */
+bool smmuv3_accel_attach_gbpa_hwpt(SMMUv3State *s, Error **errp)
+{
+ SMMUv3AccelState *accel = s->s_accel;
+ SMMUv3AccelDevice *accel_dev;
+ Error *local_err = NULL;
+ bool all_ok = true;
+ uint32_t hwpt_id;
+
+ if (!accel) {
+ return true;
+ }
+
+ hwpt_id = smmuv3_accel_gbpa_hwpt(s, accel);
+ QLIST_FOREACH(accel_dev, &accel->device_list, next) {
+ if (!host_iommu_device_iommufd_attach_hwpt(accel_dev->idev, hwpt_id,
+ &local_err)) {
+ error_append_hint(&local_err, "Failed to attach GBPA hwpt %u for "
+ "idev devid %u", hwpt_id, accel_dev->idev->devid);
+ error_report_err(local_err);
+ local_err = NULL;
+ all_ok = false;
+ }
+ }
+ if (!all_ok) {
+ error_setg(errp, "Failed to attach all GBPA based HWPTs properly");
+ }
+ return all_ok;
+}
+
+void smmuv3_accel_reset(SMMUv3State *s)
+{
+ /* Attach a HWPT based on GBPA reset value */
+ smmuv3_accel_attach_gbpa_hwpt(s, NULL);
+}
+
static void smmuv3_accel_as_init(SMMUv3State *s)
{
diff --git a/hw/arm/smmuv3-accel.h b/hw/arm/smmuv3-accel.h
index ae896cfa8b..2d2d005658 100644
--- a/hw/arm/smmuv3-accel.h
+++ b/hw/arm/smmuv3-accel.h
@@ -44,6 +44,8 @@ bool smmuv3_accel_install_ste(SMMUv3State *s, SMMUDevice *sdev, int sid,
Error **errp);
bool smmuv3_accel_install_ste_range(SMMUv3State *s, SMMUSIDRange *range,
Error **errp);
+bool smmuv3_accel_attach_gbpa_hwpt(SMMUv3State *s, Error **errp);
+void smmuv3_accel_reset(SMMUv3State *s);
#else
static inline void smmuv3_accel_init(SMMUv3State *s)
{
@@ -60,6 +62,13 @@ smmuv3_accel_install_ste_range(SMMUv3State *s, SMMUSIDRange *range,
{
return true;
}
+static inline bool smmuv3_accel_attach_gbpa_hwpt(SMMUv3State *s, Error **errp)
+{
+ return true;
+}
+static inline void smmuv3_accel_reset(SMMUv3State *s)
+{
+}
#endif
#endif /* HW_ARM_SMMUV3_ACCEL_H */
diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c
index bfb41b8866..42c60b1ec8 100644
--- a/hw/arm/smmuv3.c
+++ b/hw/arm/smmuv3.c
@@ -1600,6 +1600,7 @@ static MemTxResult smmu_writel(SMMUv3State *s, hwaddr offset,
if (data & R_GBPA_UPDATE_MASK) {
/* Ignore update bit as write is synchronous. */
s->gbpa = data & ~R_GBPA_UPDATE_MASK;
+ smmuv3_accel_attach_gbpa_hwpt(s, &local_err);
}
break;
case A_STRTAB_BASE: /* 64b */
@@ -1887,6 +1888,7 @@ static void smmu_reset_exit(Object *obj, ResetType type)
}
smmuv3_init_regs(s);
+ smmuv3_accel_reset(s);
}
static void smmu_realize(DeviceState *d, Error **errp)
--
2.43.0