From: Nicolin Chen <nicolinc@nvidia.com>
When the guest enables the Event Queue and a vIOMMU is present, allocate a
vEVENTQ object so that host-side events related to the vIOMMU can be
received and propagated back to the guest.
For cold-plugged devices using SMMUv3 acceleration, the vIOMMU is created
before the guest boots. In this case, the vEVENTQ is allocated when the
guest writes to SMMU_CR0 and sets EVENTQEN = 1.
If no cold-plugged device exists at boot (i.e. no vIOMMU initially), the
vEVENTQ is allocated when a vIOMMU is created, i.e. during the first
device hot-plug.
Event read and propagation will be added in a later patch.
Signed-off-by: Nicolin Chen <nicolinc@nvidia.com>
Signed-off-by: Shameer Kolothum <skolothumtho@nvidia.com>
---
hw/arm/smmuv3-accel.c | 61 +++++++++++++++++++++++++++++++++++++++++++
hw/arm/smmuv3-accel.h | 6 +++++
hw/arm/smmuv3.c | 7 +++++
3 files changed, 74 insertions(+)
diff --git a/hw/arm/smmuv3-accel.c b/hw/arm/smmuv3-accel.c
index 1f206be8e4..210e7ebf36 100644
--- a/hw/arm/smmuv3-accel.c
+++ b/hw/arm/smmuv3-accel.c
@@ -383,6 +383,59 @@ static SMMUv3AccelDevice *smmuv3_accel_get_dev(SMMUState *bs, SMMUPciBus *sbus,
return accel_dev;
}
+static void smmuv3_accel_free_veventq(SMMUViommu *vsmmu)
+{
+ IOMMUFDVeventq *veventq = vsmmu->veventq;
+
+ if (!veventq) {
+ return;
+ }
+ iommufd_backend_free_id(vsmmu->iommufd, veventq->veventq_id);
+ g_free(veventq);
+ vsmmu->veventq = NULL;
+}
+
+bool smmuv3_accel_alloc_veventq(SMMUv3State *s, Error **errp)
+{
+ SMMUv3AccelState *s_accel = s->s_accel;
+ IOMMUFDVeventq *veventq;
+ SMMUViommu *vsmmu;
+ uint32_t veventq_id;
+ uint32_t veventq_fd;
+
+ if (!s_accel || !s_accel->vsmmu) {
+ return true;
+ }
+
+ vsmmu = s_accel->vsmmu;
+ if (vsmmu->veventq) {
+ return true;
+ }
+
+ /*
+ * Check whether the Guest has enabled the Event Queue. The queue enabled
+ * means EVENTQ_BASE has been programmed with a valid base address and size.
+ * If it’s not yet configured, return and retry later.
+ */
+ if (!smmuv3_eventq_enabled(s)) {
+ return true;
+ }
+
+ if (!iommufd_backend_alloc_veventq(vsmmu->iommufd, vsmmu->viommu.viommu_id,
+ IOMMU_VEVENTQ_TYPE_ARM_SMMUV3,
+ 1 << s->eventq.log2size, &veventq_id,
+ &veventq_fd, errp)) {
+ return false;
+ }
+
+ veventq = g_new(IOMMUFDVeventq, 1);
+ veventq->veventq_id = veventq_id;
+ veventq->veventq_fd = veventq_fd;
+ veventq->viommu = &vsmmu->viommu;
+ vsmmu->veventq = veventq;
+ return true;
+}
+
static bool
smmuv3_accel_dev_alloc_viommu(SMMUv3AccelDevice *accel_dev,
HostIOMMUDeviceIOMMUFD *idev, Error **errp)
@@ -438,8 +491,15 @@ smmuv3_accel_dev_alloc_viommu(SMMUv3AccelDevice *accel_dev,
vsmmu->iommufd = idev->iommufd;
s_accel->vsmmu = vsmmu;
accel_dev->vsmmu = vsmmu;
+
+ /* Allocate a vEVENTQ if guest has enabled event queue */
+ if (!smmuv3_accel_alloc_veventq(s, errp)) {
+ goto free_bypass_hwpt;
+ }
return true;
+free_bypass_hwpt:
+ iommufd_backend_free_id(idev->iommufd, vsmmu->bypass_hwpt_id);
free_abort_hwpt:
iommufd_backend_free_id(idev->iommufd, vsmmu->abort_hwpt_id);
free_viommu:
@@ -536,6 +596,7 @@ static void smmuv3_accel_unset_iommu_device(PCIBus *bus, void *opaque,
}
if (QLIST_EMPTY(&vsmmu->device_list)) {
+ smmuv3_accel_free_veventq(vsmmu);
iommufd_backend_free_id(vsmmu->iommufd, vsmmu->bypass_hwpt_id);
iommufd_backend_free_id(vsmmu->iommufd, vsmmu->abort_hwpt_id);
iommufd_backend_free_id(vsmmu->iommufd, vsmmu->viommu.viommu_id);
diff --git a/hw/arm/smmuv3-accel.h b/hw/arm/smmuv3-accel.h
index 4f5b672712..740253bc34 100644
--- a/hw/arm/smmuv3-accel.h
+++ b/hw/arm/smmuv3-accel.h
@@ -22,6 +22,7 @@
typedef struct SMMUViommu {
IOMMUFDBackend *iommufd;
IOMMUFDViommu viommu;
+ IOMMUFDVeventq *veventq;
uint32_t bypass_hwpt_id;
uint32_t abort_hwpt_id;
QLIST_HEAD(, SMMUv3AccelDevice) device_list;
@@ -56,6 +57,7 @@ bool smmuv3_accel_issue_inv_cmd(SMMUv3State *s, void *cmd, SMMUDevice *sdev,
void smmuv3_accel_gbpa_update(SMMUv3State *s);
void smmuv3_accel_reset(SMMUv3State *s);
void smmuv3_accel_idr_override(SMMUv3State *s);
+bool smmuv3_accel_alloc_veventq(SMMUv3State *s, Error **errp);
#else
static inline void smmuv3_accel_init(SMMUv3State *s)
{
@@ -87,6 +89,10 @@ static inline void smmuv3_accel_reset(SMMUv3State *s)
static inline void smmuv3_accel_idr_override(SMMUv3State *s)
{
}
+bool smmuv3_accel_alloc_veventq(SMMUv3State *s, Error **errp)
+{
+ return true;
+}
#endif
#endif /* HW_ARM_SMMUV3_ACCEL_H */
diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c
index e1140fe087..976a436bd4 100644
--- a/hw/arm/smmuv3.c
+++ b/hw/arm/smmuv3.c
@@ -1616,12 +1616,19 @@ static MemTxResult smmu_writell(SMMUv3State *s, hwaddr offset,
static MemTxResult smmu_writel(SMMUv3State *s, hwaddr offset,
uint64_t data, MemTxAttrs attrs)
{
+ Error *local_err = NULL;
+
switch (offset) {
case A_CR0:
s->cr[0] = data;
s->cr0ack = data & ~SMMU_CR0_RESERVED;
/* in case the command queue has been enabled */
smmuv3_cmdq_consume(s);
+ /* Allocate vEVENTQ if guest enables EventQ and vIOMMU is ready */
+ if (!smmuv3_accel_alloc_veventq(s, &local_err)) {
+ error_report_err(local_err);
+ /* TODO: Should we return err? */
+ }
return MEMTX_OK;
case A_CR1:
s->cr[1] = data;
--
2.43.0
© 2016 - 2025 Red Hat, Inc.