From: Nicolin Chen <nicolinc@nvidia.com>
When the guest enables the Event Queue and a vIOMMU is present, allocate a
vEVENTQ object so that host-side events related to the vIOMMU can be
received and propagated back to the guest.
For cold-plugged devices using SMMUv3 acceleration, the vIOMMU is created
before the guest boots. In this case, the vEVENTQ is allocated when the
guest writes to SMMU_CR0 and sets EVENTQEN = 1.
If no cold-plugged device exists at boot (i.e. no vIOMMU initially), the
vEVENTQ is allocated when a vIOMMU is created, i.e. during the first
device hot-plug.
Event read and propagation will be added in a later patch.
Signed-off-by: Nicolin Chen <nicolinc@nvidia.com>
Tested-by: Nicolin Chen <nicolinc@nvidia.com>
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Signed-off-by: Shameer Kolothum <skolothumtho@nvidia.com>
---
hw/arm/smmuv3-accel.c | 61 +++++++++++++++++++++++++++++++++++++++++--
hw/arm/smmuv3-accel.h | 6 +++++
hw/arm/smmuv3.c | 4 +++
3 files changed, 69 insertions(+), 2 deletions(-)
diff --git a/hw/arm/smmuv3-accel.c b/hw/arm/smmuv3-accel.c
index c19c526fca..2ca0d1deac 100644
--- a/hw/arm/smmuv3-accel.c
+++ b/hw/arm/smmuv3-accel.c
@@ -390,6 +390,19 @@ bool smmuv3_accel_issue_inv_cmd(SMMUv3State *bs, void *cmd, SMMUDevice *sdev,
sizeof(Cmd), &entry_num, cmd, errp);
}
+static void smmuv3_accel_free_veventq(SMMUv3AccelState *accel)
+{
+ IOMMUFDVeventq *veventq = accel->veventq;
+
+ if (!veventq) {
+ return;
+ }
+ close(veventq->veventq_fd);
+ iommufd_backend_free_id(accel->viommu->iommufd, veventq->veventq_id);
+ g_free(veventq);
+ accel->veventq = NULL;
+}
+
static void smmuv3_accel_free_viommu(SMMUv3AccelState *accel)
{
IOMMUFDViommu *viommu = accel->viommu;
@@ -397,6 +410,7 @@ static void smmuv3_accel_free_viommu(SMMUv3AccelState *accel)
if (!viommu) {
return;
}
+ smmuv3_accel_free_veventq(accel);
iommufd_backend_free_id(viommu->iommufd, accel->bypass_hwpt_id);
iommufd_backend_free_id(viommu->iommufd, accel->abort_hwpt_id);
iommufd_backend_free_id(viommu->iommufd, accel->viommu->viommu_id);
@@ -404,6 +418,41 @@ static void smmuv3_accel_free_viommu(SMMUv3AccelState *accel)
accel->viommu = NULL;
}
+bool smmuv3_accel_alloc_veventq(SMMUv3State *s, Error **errp)
+{
+ SMMUv3AccelState *accel = s->s_accel;
+ IOMMUFDVeventq *veventq;
+ uint32_t veventq_id;
+ uint32_t veventq_fd;
+
+ if (!accel->viommu) {
+ return true;
+ }
+
+ if (accel->veventq) {
+ return true;
+ }
+
+ if (!smmuv3_eventq_enabled(s)) {
+ return true;
+ }
+
+ if (!iommufd_backend_alloc_veventq(accel->viommu->iommufd,
+ accel->viommu->viommu_id,
+ IOMMU_VEVENTQ_TYPE_ARM_SMMUV3,
+ 1 << s->eventq.log2size, &veventq_id,
+ &veventq_fd, errp)) {
+ return false;
+ }
+
+ veventq = g_new(IOMMUFDVeventq, 1);
+ veventq->veventq_id = veventq_id;
+ veventq->veventq_fd = veventq_fd;
+ veventq->viommu = accel->viommu;
+ accel->veventq = veventq;
+ return true;
+}
+
static bool
smmuv3_accel_alloc_viommu(SMMUv3State *s, HostIOMMUDeviceIOMMUFD *idev,
Error **errp)
@@ -429,6 +478,7 @@ smmuv3_accel_alloc_viommu(SMMUv3State *s, HostIOMMUDeviceIOMMUFD *idev,
viommu->viommu_id = viommu_id;
viommu->s2_hwpt_id = s2_hwpt_id;
viommu->iommufd = idev->iommufd;
+ accel->viommu = viommu;
/*
* Pre-allocate HWPTs for S1 bypass and abort cases. These will be attached
@@ -448,14 +498,20 @@ smmuv3_accel_alloc_viommu(SMMUv3State *s, HostIOMMUDeviceIOMMUFD *idev,
goto free_abort_hwpt;
}
+ /* Allocate a vEVENTQ if guest has enabled event queue */
+ if (!smmuv3_accel_alloc_veventq(s, errp)) {
+ goto free_bypass_hwpt;
+ }
+
/* Attach a HWPT based on SMMUv3 GBPA.ABORT value */
hwpt_id = smmuv3_accel_gbpa_hwpt(s, accel);
if (!host_iommu_device_iommufd_attach_hwpt(idev, hwpt_id, errp)) {
- goto free_bypass_hwpt;
+ goto free_veventq;
}
- accel->viommu = viommu;
return true;
+free_veventq:
+ smmuv3_accel_free_veventq(accel);
free_bypass_hwpt:
iommufd_backend_free_id(idev->iommufd, accel->bypass_hwpt_id);
free_abort_hwpt:
@@ -463,6 +519,7 @@ free_abort_hwpt:
free_viommu:
iommufd_backend_free_id(idev->iommufd, viommu->viommu_id);
g_free(viommu);
+ accel->viommu = NULL;
return false;
}
diff --git a/hw/arm/smmuv3-accel.h b/hw/arm/smmuv3-accel.h
index a8a64802ec..dba6c71de5 100644
--- a/hw/arm/smmuv3-accel.h
+++ b/hw/arm/smmuv3-accel.h
@@ -22,6 +22,7 @@
*/
typedef struct SMMUv3AccelState {
IOMMUFDViommu *viommu;
+ IOMMUFDVeventq *veventq;
uint32_t bypass_hwpt_id;
uint32_t abort_hwpt_id;
QLIST_HEAD(, SMMUv3AccelDevice) device_list;
@@ -50,6 +51,7 @@ bool smmuv3_accel_attach_gbpa_hwpt(SMMUv3State *s, Error **errp);
bool smmuv3_accel_issue_inv_cmd(SMMUv3State *s, void *cmd, SMMUDevice *sdev,
Error **errp);
void smmuv3_accel_idr_override(SMMUv3State *s);
+bool smmuv3_accel_alloc_veventq(SMMUv3State *s, Error **errp);
void smmuv3_accel_reset(SMMUv3State *s);
#else
static inline void smmuv3_accel_init(SMMUv3State *s)
@@ -80,6 +82,10 @@ smmuv3_accel_issue_inv_cmd(SMMUv3State *s, void *cmd, SMMUDevice *sdev,
static inline void smmuv3_accel_idr_override(SMMUv3State *s)
{
}
+static inline bool smmuv3_accel_alloc_veventq(SMMUv3State *s, Error **errp)
+{
+ return true;
+}
static inline void smmuv3_accel_reset(SMMUv3State *s)
{
}
diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c
index c08d58c579..210ac038fe 100644
--- a/hw/arm/smmuv3.c
+++ b/hw/arm/smmuv3.c
@@ -1605,6 +1605,10 @@ static MemTxResult smmu_writel(SMMUv3State *s, hwaddr offset,
s->cr0ack = data & ~SMMU_CR0_RESERVED;
/* in case the command queue has been enabled */
smmuv3_cmdq_consume(s, &local_err);
+ /* Allocate vEVENTQ if EventQ is enabled and a vIOMMU is available */
+ if (local_err == NULL) {
+ smmuv3_accel_alloc_veventq(s, &local_err);
+ }
break;
case A_CR1:
s->cr[1] = data;
--
2.43.0
Hi Shameer,
On 2/4/26 8:00 PM, Shameer Kolothum wrote:
> From: Nicolin Chen <nicolinc@nvidia.com>
>
> When the guest enables the Event Queue and a vIOMMU is present, allocate a
> vEVENTQ object so that host-side events related to the vIOMMU can be
> received and propagated back to the guest.
>
> For cold-plugged devices using SMMUv3 acceleration, the vIOMMU is created
> before the guest boots. In this case, the vEVENTQ is allocated when the
> guest writes to SMMU_CR0 and sets EVENTQEN = 1.
>
> If no cold-plugged device exists at boot (i.e. no vIOMMU initially), the
> vEVENTQ is allocated when a vIOMMU is created, i.e. during the first
> device hot-plug.
>
> Event read and propagation will be added in a later patch.
>
> Signed-off-by: Nicolin Chen <nicolinc@nvidia.com>
> Tested-by: Nicolin Chen <nicolinc@nvidia.com>
> Reviewed-by: Eric Auger <eric.auger@redhat.com>
> Signed-off-by: Shameer Kolothum <skolothumtho@nvidia.com>
> ---
> hw/arm/smmuv3-accel.c | 61 +++++++++++++++++++++++++++++++++++++++++--
> hw/arm/smmuv3-accel.h | 6 +++++
> hw/arm/smmuv3.c | 4 +++
> 3 files changed, 69 insertions(+), 2 deletions(-)
>
> diff --git a/hw/arm/smmuv3-accel.c b/hw/arm/smmuv3-accel.c
> index c19c526fca..2ca0d1deac 100644
> --- a/hw/arm/smmuv3-accel.c
> +++ b/hw/arm/smmuv3-accel.c
> @@ -390,6 +390,19 @@ bool smmuv3_accel_issue_inv_cmd(SMMUv3State *bs, void *cmd, SMMUDevice *sdev,
> sizeof(Cmd), &entry_num, cmd, errp);
> }
>
> +static void smmuv3_accel_free_veventq(SMMUv3AccelState *accel)
> +{
> + IOMMUFDVeventq *veventq = accel->veventq;
> +
> + if (!veventq) {
> + return;
> + }
> + close(veventq->veventq_fd);
> + iommufd_backend_free_id(accel->viommu->iommufd, veventq->veventq_id);
> + g_free(veventq);
> + accel->veventq = NULL;
> +}
> +
> static void smmuv3_accel_free_viommu(SMMUv3AccelState *accel)
> {
> IOMMUFDViommu *viommu = accel->viommu;
> @@ -397,6 +410,7 @@ static void smmuv3_accel_free_viommu(SMMUv3AccelState *accel)
> if (!viommu) {
> return;
> }
> + smmuv3_accel_free_veventq(accel);
> iommufd_backend_free_id(viommu->iommufd, accel->bypass_hwpt_id);
> iommufd_backend_free_id(viommu->iommufd, accel->abort_hwpt_id);
> iommufd_backend_free_id(viommu->iommufd, accel->viommu->viommu_id);
> @@ -404,6 +418,41 @@ static void smmuv3_accel_free_viommu(SMMUv3AccelState *accel)
> accel->viommu = NULL;
> }
>
> +bool smmuv3_accel_alloc_veventq(SMMUv3State *s, Error **errp)
> +{
> + SMMUv3AccelState *accel = s->s_accel;
> + IOMMUFDVeventq *veventq;
> + uint32_t veventq_id;
> + uint32_t veventq_fd;
you need to check !accel because in case of non accelerated SMMU it
sigsevs in smmu_writel/smmuv3_accel_alloc_veventq path
Thanks
Eric
>
> +
> + if (!accel->viommu) {
> + return true;
> + }
> +
> + if (accel->veventq) {
> + return true;
> + }
> +
> + if (!smmuv3_eventq_enabled(s)) {
> + return true;
> + }
> +
> + if (!iommufd_backend_alloc_veventq(accel->viommu->iommufd,
> + accel->viommu->viommu_id,
> + IOMMU_VEVENTQ_TYPE_ARM_SMMUV3,
> + 1 << s->eventq.log2size, &veventq_id,
> + &veventq_fd, errp)) {
> + return false;
> + }
> +
> + veventq = g_new(IOMMUFDVeventq, 1);
> + veventq->veventq_id = veventq_id;
> + veventq->veventq_fd = veventq_fd;
> + veventq->viommu = accel->viommu;
> + accel->veventq = veventq;
> + return true;
> +}
> +
> static bool
> smmuv3_accel_alloc_viommu(SMMUv3State *s, HostIOMMUDeviceIOMMUFD *idev,
> Error **errp)
> @@ -429,6 +478,7 @@ smmuv3_accel_alloc_viommu(SMMUv3State *s, HostIOMMUDeviceIOMMUFD *idev,
> viommu->viommu_id = viommu_id;
> viommu->s2_hwpt_id = s2_hwpt_id;
> viommu->iommufd = idev->iommufd;
> + accel->viommu = viommu;
>
> /*
> * Pre-allocate HWPTs for S1 bypass and abort cases. These will be attached
> @@ -448,14 +498,20 @@ smmuv3_accel_alloc_viommu(SMMUv3State *s, HostIOMMUDeviceIOMMUFD *idev,
> goto free_abort_hwpt;
> }
>
> + /* Allocate a vEVENTQ if guest has enabled event queue */
> + if (!smmuv3_accel_alloc_veventq(s, errp)) {
> + goto free_bypass_hwpt;
> + }
> +
> /* Attach a HWPT based on SMMUv3 GBPA.ABORT value */
> hwpt_id = smmuv3_accel_gbpa_hwpt(s, accel);
> if (!host_iommu_device_iommufd_attach_hwpt(idev, hwpt_id, errp)) {
> - goto free_bypass_hwpt;
> + goto free_veventq;
> }
> - accel->viommu = viommu;
> return true;
>
> +free_veventq:
> + smmuv3_accel_free_veventq(accel);
> free_bypass_hwpt:
> iommufd_backend_free_id(idev->iommufd, accel->bypass_hwpt_id);
> free_abort_hwpt:
> @@ -463,6 +519,7 @@ free_abort_hwpt:
> free_viommu:
> iommufd_backend_free_id(idev->iommufd, viommu->viommu_id);
> g_free(viommu);
> + accel->viommu = NULL;
> return false;
> }
>
> diff --git a/hw/arm/smmuv3-accel.h b/hw/arm/smmuv3-accel.h
> index a8a64802ec..dba6c71de5 100644
> --- a/hw/arm/smmuv3-accel.h
> +++ b/hw/arm/smmuv3-accel.h
> @@ -22,6 +22,7 @@
> */
> typedef struct SMMUv3AccelState {
> IOMMUFDViommu *viommu;
> + IOMMUFDVeventq *veventq;
> uint32_t bypass_hwpt_id;
> uint32_t abort_hwpt_id;
> QLIST_HEAD(, SMMUv3AccelDevice) device_list;
> @@ -50,6 +51,7 @@ bool smmuv3_accel_attach_gbpa_hwpt(SMMUv3State *s, Error **errp);
> bool smmuv3_accel_issue_inv_cmd(SMMUv3State *s, void *cmd, SMMUDevice *sdev,
> Error **errp);
> void smmuv3_accel_idr_override(SMMUv3State *s);
> +bool smmuv3_accel_alloc_veventq(SMMUv3State *s, Error **errp);
> void smmuv3_accel_reset(SMMUv3State *s);
> #else
> static inline void smmuv3_accel_init(SMMUv3State *s)
> @@ -80,6 +82,10 @@ smmuv3_accel_issue_inv_cmd(SMMUv3State *s, void *cmd, SMMUDevice *sdev,
> static inline void smmuv3_accel_idr_override(SMMUv3State *s)
> {
> }
> +static inline bool smmuv3_accel_alloc_veventq(SMMUv3State *s, Error **errp)
> +{
> + return true;
> +}
> static inline void smmuv3_accel_reset(SMMUv3State *s)
> {
> }
> diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c
> index c08d58c579..210ac038fe 100644
> --- a/hw/arm/smmuv3.c
> +++ b/hw/arm/smmuv3.c
> @@ -1605,6 +1605,10 @@ static MemTxResult smmu_writel(SMMUv3State *s, hwaddr offset,
> s->cr0ack = data & ~SMMU_CR0_RESERVED;
> /* in case the command queue has been enabled */
> smmuv3_cmdq_consume(s, &local_err);
> + /* Allocate vEVENTQ if EventQ is enabled and a vIOMMU is available */
> + if (local_err == NULL) {
> + smmuv3_accel_alloc_veventq(s, &local_err);
> + }
> break;
> case A_CR1:
> s->cr[1] = data;
> -----Original Message-----
> From: Eric Auger <eric.auger@redhat.com>
> Sent: 05 February 2026 08:33
> To: Shameer Kolothum Thodi <skolothumtho@nvidia.com>; qemu-
> arm@nongnu.org; qemu-devel@nongnu.org
> Cc: peter.maydell@linaro.org; Nicolin Chen <nicolinc@nvidia.com>; Nathan
> Chen <nathanc@nvidia.com>; Matt Ochs <mochs@nvidia.com>; Jason
> Gunthorpe <jgg@nvidia.com>; jonathan.cameron@huawei.com;
> zhangfei.gao@linaro.org; zhenzhong.duan@intel.com; Krishnakant Jaju
> <kjaju@nvidia.com>
> Subject: Re: [PATCH v4 3/5] hw/arm/smmuv3-accel: Allocate vEVENTQ for
> accelerated SMMUv3 devices
>
> External email: Use caution opening links or attachments
>
>
> Hi Shameer,
>
> On 2/4/26 8:00 PM, Shameer Kolothum wrote:
> > From: Nicolin Chen <nicolinc@nvidia.com>
> >
> > When the guest enables the Event Queue and a vIOMMU is present, allocate
> a
> > vEVENTQ object so that host-side events related to the vIOMMU can be
> > received and propagated back to the guest.
> >
> > For cold-plugged devices using SMMUv3 acceleration, the vIOMMU is
> created
> > before the guest boots. In this case, the vEVENTQ is allocated when the
> > guest writes to SMMU_CR0 and sets EVENTQEN = 1.
> >
> > If no cold-plugged device exists at boot (i.e. no vIOMMU initially), the
> > vEVENTQ is allocated when a vIOMMU is created, i.e. during the first
> > device hot-plug.
> >
> > Event read and propagation will be added in a later patch.
> >
> > Signed-off-by: Nicolin Chen <nicolinc@nvidia.com>
> > Tested-by: Nicolin Chen <nicolinc@nvidia.com>
> > Reviewed-by: Eric Auger <eric.auger@redhat.com>
> > Signed-off-by: Shameer Kolothum <skolothumtho@nvidia.com>
> > ---
> > hw/arm/smmuv3-accel.c | 61
> +++++++++++++++++++++++++++++++++++++++++--
> > hw/arm/smmuv3-accel.h | 6 +++++
> > hw/arm/smmuv3.c | 4 +++
> > 3 files changed, 69 insertions(+), 2 deletions(-)
> >
> > diff --git a/hw/arm/smmuv3-accel.c b/hw/arm/smmuv3-accel.c
> > index c19c526fca..2ca0d1deac 100644
> > --- a/hw/arm/smmuv3-accel.c
> > +++ b/hw/arm/smmuv3-accel.c
> > @@ -390,6 +390,19 @@ bool
> smmuv3_accel_issue_inv_cmd(SMMUv3State *bs, void *cmd, SMMUDevice
> *sdev,
> > sizeof(Cmd), &entry_num, cmd, errp);
> > }
> >
> > +static void smmuv3_accel_free_veventq(SMMUv3AccelState *accel)
> > +{
> > + IOMMUFDVeventq *veventq = accel->veventq;
> > +
> > + if (!veventq) {
> > + return;
> > + }
> > + close(veventq->veventq_fd);
> > + iommufd_backend_free_id(accel->viommu->iommufd, veventq-
> >veventq_id);
> > + g_free(veventq);
> > + accel->veventq = NULL;
> > +}
> > +
> > static void smmuv3_accel_free_viommu(SMMUv3AccelState *accel)
> > {
> > IOMMUFDViommu *viommu = accel->viommu;
> > @@ -397,6 +410,7 @@ static void
> smmuv3_accel_free_viommu(SMMUv3AccelState *accel)
> > if (!viommu) {
> > return;
> > }
> > + smmuv3_accel_free_veventq(accel);
> > iommufd_backend_free_id(viommu->iommufd, accel->bypass_hwpt_id);
> > iommufd_backend_free_id(viommu->iommufd, accel->abort_hwpt_id);
> > iommufd_backend_free_id(viommu->iommufd, accel->viommu-
> >viommu_id);
> > @@ -404,6 +418,41 @@ static void
> smmuv3_accel_free_viommu(SMMUv3AccelState *accel)
> > accel->viommu = NULL;
> > }
> >
> > +bool smmuv3_accel_alloc_veventq(SMMUv3State *s, Error **errp)
> > +{
> > + SMMUv3AccelState *accel = s->s_accel;
> > + IOMMUFDVeventq *veventq;
> > + uint32_t veventq_id;
> > + uint32_t veventq_fd;
>
> you need to check !accel because in case of non accelerated SMMU it
> sigsevs in smmu_writel/smmuv3_accel_alloc_veventq path
Oops. Right. Will add.
Thanks,
Shameer
On Wed, Feb 04, 2026 at 07:00:06PM +0000, Shameer Kolothum wrote:
> +bool smmuv3_accel_alloc_veventq(SMMUv3State *s, Error **errp)
> +{
> + SMMUv3AccelState *accel = s->s_accel;
> + IOMMUFDVeventq *veventq;
> + uint32_t veventq_id;
> + uint32_t veventq_fd;
> +
> + if (!accel->viommu) {
> + return true;
> + }
This seems meaningless or should be an assert?
Other looks okay.
Nicolin
> -----Original Message-----
> From: Nicolin Chen <nicolinc@nvidia.com>
> Sent: 04 February 2026 19:55
> To: Shameer Kolothum Thodi <skolothumtho@nvidia.com>
> Cc: qemu-arm@nongnu.org; qemu-devel@nongnu.org;
> eric.auger@redhat.com; peter.maydell@linaro.org; Nathan Chen
> <nathanc@nvidia.com>; Matt Ochs <mochs@nvidia.com>; Jason Gunthorpe
> <jgg@nvidia.com>; jonathan.cameron@huawei.com;
> zhangfei.gao@linaro.org; zhenzhong.duan@intel.com; Krishnakant Jaju
> <kjaju@nvidia.com>
> Subject: Re: [PATCH v4 3/5] hw/arm/smmuv3-accel: Allocate vEVENTQ for
> accelerated SMMUv3 devices
>
> On Wed, Feb 04, 2026 at 07:00:06PM +0000, Shameer Kolothum wrote:
> > +bool smmuv3_accel_alloc_veventq(SMMUv3State *s, Error **errp) {
> > + SMMUv3AccelState *accel = s->s_accel;
> > + IOMMUFDVeventq *veventq;
> > + uint32_t veventq_id;
> > + uint32_t veventq_fd;
> > +
> > + if (!accel->viommu) {
> > + return true;
> > + }
>
> This seems meaningless or should be an assert?
Hmm.. We need this check because, if there are no cold-plugged devices,
when the guest boots and enables EventQ, the vIOMMU may not exist yet.
Isn't it?
Thanks,
Shameer
On Thu, Feb 05, 2026 at 12:26:40AM -0800, Shameer Kolothum Thodi wrote:
>
>
> > -----Original Message-----
> > From: Nicolin Chen <nicolinc@nvidia.com>
> > Sent: 04 February 2026 19:55
> > To: Shameer Kolothum Thodi <skolothumtho@nvidia.com>
> > Cc: qemu-arm@nongnu.org; qemu-devel@nongnu.org;
> > eric.auger@redhat.com; peter.maydell@linaro.org; Nathan Chen
> > <nathanc@nvidia.com>; Matt Ochs <mochs@nvidia.com>; Jason Gunthorpe
> > <jgg@nvidia.com>; jonathan.cameron@huawei.com;
> > zhangfei.gao@linaro.org; zhenzhong.duan@intel.com; Krishnakant Jaju
> > <kjaju@nvidia.com>
> > Subject: Re: [PATCH v4 3/5] hw/arm/smmuv3-accel: Allocate vEVENTQ for
> > accelerated SMMUv3 devices
> >
> > On Wed, Feb 04, 2026 at 07:00:06PM +0000, Shameer Kolothum wrote:
> > > +bool smmuv3_accel_alloc_veventq(SMMUv3State *s, Error **errp) {
> > > + SMMUv3AccelState *accel = s->s_accel;
> > > + IOMMUFDVeventq *veventq;
> > > + uint32_t veventq_id;
> > > + uint32_t veventq_fd;
> > > +
> > > + if (!accel->viommu) {
> > > + return true;
> > > + }
> >
> > This seems meaningless or should be an assert?
>
> Hmm.. We need this check because, if there are no cold-plugged devices,
> when the guest boots and enables EventQ, the vIOMMU may not exist yet.
>
> Isn't it?
Oh, you are right. I missed the other call in smmu_writel().
Nicolin
© 2016 - 2026 Red Hat, Inc.