Allocate a CMDQV specific vEVENTQ via IOMMUFD, and add the
corresponding teardown path to free the vEVENTQ during cleanup.
Signed-off-by: Shameer Kolothum <skolothumtho@nvidia.com>
---
hw/arm/smmuv3-accel.h | 2 ++
hw/arm/tegra241-cmdqv.h | 1 +
hw/arm/smmuv3-accel.c | 10 ++++++++-
hw/arm/tegra241-cmdqv.c | 47 +++++++++++++++++++++++++++++++++++++++++
4 files changed, 59 insertions(+), 1 deletion(-)
diff --git a/hw/arm/smmuv3-accel.h b/hw/arm/smmuv3-accel.h
index 7d6e4c6b76..4bff90e2c1 100644
--- a/hw/arm/smmuv3-accel.h
+++ b/hw/arm/smmuv3-accel.h
@@ -28,6 +28,8 @@ typedef struct SMMUv3AccelCmdqvOps {
uint32_t *out_viommu_id,
Error **errp);
void (*free_viommu)(SMMUv3State *s);
+ bool (*alloc_veventq)(SMMUv3State *s, Error **errp);
+ void (*free_veventq)(SMMUv3State *s);
void (*reset)(SMMUv3State *s);
} SMMUv3AccelCmdqvOps;
diff --git a/hw/arm/tegra241-cmdqv.h b/hw/arm/tegra241-cmdqv.h
index 914977c2ef..01d446474a 100644
--- a/hw/arm/tegra241-cmdqv.h
+++ b/hw/arm/tegra241-cmdqv.h
@@ -37,6 +37,7 @@ typedef struct Tegra241CMDQV {
MemoryRegion mmio_cmdqv;
qemu_irq irq;
IOMMUFDHWqueue *vcmdq[TEGRA241_CMDQV_MAX_CMDQ];
+ IOMMUFDVeventq *veventq;
void *vintf_page0;
MemoryRegion *mr_vintf_page0;
diff --git a/hw/arm/smmuv3-accel.c b/hw/arm/smmuv3-accel.c
index 4373bbd97b..f6602f51aa 100644
--- a/hw/arm/smmuv3-accel.c
+++ b/hw/arm/smmuv3-accel.c
@@ -576,13 +576,21 @@ smmuv3_accel_alloc_viommu(SMMUv3State *s, HostIOMMUDeviceIOMMUFD *idev,
goto free_bypass_hwpt;
}
+ if (cmdqv_ops && !cmdqv_ops->alloc_veventq(s, errp)) {
+ goto free_veventq;
+ }
+
/* Attach a HWPT based on SMMUv3 GBPA.ABORT value */
hwpt_id = smmuv3_accel_gbpa_hwpt(s, accel);
if (!host_iommu_device_iommufd_attach_hwpt(idev, hwpt_id, errp)) {
- goto free_veventq;
+ goto free_cmdqv_veventq;
}
return true;
+free_cmdqv_veventq:
+ if (cmdqv_ops && cmdqv_ops->free_veventq) {
+ cmdqv_ops->free_veventq(s);
+ }
free_veventq:
smmuv3_accel_free_veventq(accel);
free_bypass_hwpt:
diff --git a/hw/arm/tegra241-cmdqv.c b/hw/arm/tegra241-cmdqv.c
index ce144add54..8cde459b4f 100644
--- a/hw/arm/tegra241-cmdqv.c
+++ b/hw/arm/tegra241-cmdqv.c
@@ -487,6 +487,51 @@ static void tegra241_cmdqv_write(void *opaque, hwaddr offset, uint64_t value,
}
}
+static void tegra241_cmdqv_free_veventq(SMMUv3State *s)
+{
+ SMMUv3AccelState *accel = s->s_accel;
+ Tegra241CMDQV *cmdqv = accel->cmdqv;
+ IOMMUFDVeventq *veventq = cmdqv->veventq;
+
+ if (!veventq) {
+ return;
+ }
+ close(veventq->veventq_fd);
+ iommufd_backend_free_id(veventq->viommu->iommufd, veventq->veventq_id);
+ g_free(veventq);
+ cmdqv->veventq = NULL;
+}
+
+static bool tegra241_cmdqv_alloc_veventq(SMMUv3State *s, Error **errp)
+{
+ SMMUv3AccelState *accel = s->s_accel;
+ IOMMUFDViommu *viommu = accel->viommu;
+ Tegra241CMDQV *cmdqv = accel->cmdqv;
+ IOMMUFDVeventq *veventq;
+ uint32_t veventq_id;
+ uint32_t veventq_fd;
+
+ if (cmdqv->veventq) {
+ return true;
+ }
+
+ if (!iommufd_backend_alloc_veventq(viommu->iommufd, viommu->viommu_id,
+ IOMMU_VEVENTQ_TYPE_TEGRA241_CMDQV,
+ 1 << 16, &veventq_id, &veventq_fd,
+ errp)) {
+ error_append_hint(errp, "Tegra241 CMDQV: failed to alloc veventq");
+ return false;
+ }
+
+ veventq = g_new(IOMMUFDVeventq, 1);
+ veventq->veventq_id = veventq_id;
+ veventq->veventq_fd = veventq_fd;
+ veventq->viommu = accel->viommu;
+ cmdqv->veventq = veventq;
+
+ return true;
+}
+
static void tegra241_cmdqv_free_viommu(SMMUv3State *s)
{
SMMUv3AccelState *accel = s->s_accel;
@@ -580,6 +625,8 @@ static const SMMUv3AccelCmdqvOps tegra241_cmdqv_ops = {
.init = tegra241_cmdqv_init,
.alloc_viommu = tegra241_cmdqv_alloc_viommu,
.free_viommu = tegra241_cmdqv_free_viommu,
+ .alloc_veventq = tegra241_cmdqv_alloc_veventq,
+ .free_veventq = tegra241_cmdqv_free_veventq,
.reset = tegra241_cmdqv_reset,
};
--
2.43.0