Install an event handler on the vEVENTQ fd to read and propagate host
generated vIOMMU events to the guest.
The handler runs in QEMU’s main loop, using a non-blocking fd registered
via qemu_set_fd_handler().
Tested-by: Nicolin Chen <nicolinc@nvidia.com>
Signed-off-by: Shameer Kolothum <skolothumtho@nvidia.com>
---
hw/arm/smmuv3-accel.c | 62 +++++++++++++++++++++++++++++++++++++++++++
hw/arm/smmuv3-accel.h | 2 ++
2 files changed, 64 insertions(+)
diff --git a/hw/arm/smmuv3-accel.c b/hw/arm/smmuv3-accel.c
index 2ca0d1deac..beffb8aa94 100644
--- a/hw/arm/smmuv3-accel.c
+++ b/hw/arm/smmuv3-accel.c
@@ -390,6 +390,48 @@ bool smmuv3_accel_issue_inv_cmd(SMMUv3State *bs, void *cmd, SMMUDevice *sdev,
sizeof(Cmd), &entry_num, cmd, errp);
}
+static void smmuv3_accel_event_read(void *opaque)
+{
+ SMMUv3State *s = opaque;
+ SMMUv3AccelState *accel = s->s_accel;
+ struct {
+ struct iommufd_vevent_header hdr;
+ struct iommu_vevent_arm_smmuv3 vevent;
+ } buf;
+ uint32_t last_seq = accel->last_event_seq;
+ ssize_t bytes;
+
+ bytes = read(accel->veventq->veventq_fd, &buf, sizeof(buf));
+ if (bytes <= 0) {
+ if (errno == EAGAIN || errno == EINTR) {
+ return;
+ }
+ error_report_once("vEVENTQ: read failed (%m)");
+ return;
+ }
+
+ if (bytes == sizeof(buf.hdr) &&
+ (buf.hdr.flags & IOMMU_VEVENTQ_FLAG_LOST_EVENTS)) {
+ error_report_once("vEVENTQ has lost events");
+ accel->event_start = false;
+ return;
+ }
+ if (bytes < sizeof(buf)) {
+ error_report_once("vEVENTQ: incomplete read (%zd/%zd bytes)",
+ bytes, sizeof(buf));
+ return;
+ }
+
+ /* Check sequence in hdr for lost events if any */
+ if (accel->event_start && (buf.hdr.sequence - last_seq != 1)) {
+ error_report_once("vEVENTQ: detected lost %u event(s)",
+ buf.hdr.sequence - last_seq - 1);
+ }
+ accel->last_event_seq = buf.hdr.sequence;
+ accel->event_start = true;
+ smmuv3_propagate_event(s, (Evt *)&buf.vevent);
+}
+
static void smmuv3_accel_free_veventq(SMMUv3AccelState *accel)
{
IOMMUFDVeventq *veventq = accel->veventq;
@@ -397,6 +439,7 @@ static void smmuv3_accel_free_veventq(SMMUv3AccelState *accel)
if (!veventq) {
return;
}
+ qemu_set_fd_handler(veventq->veventq_fd, NULL, NULL, NULL);
close(veventq->veventq_fd);
iommufd_backend_free_id(accel->viommu->iommufd, veventq->veventq_id);
g_free(veventq);
@@ -424,6 +467,7 @@ bool smmuv3_accel_alloc_veventq(SMMUv3State *s, Error **errp)
IOMMUFDVeventq *veventq;
uint32_t veventq_id;
uint32_t veventq_fd;
+ int flags;
if (!accel->viommu) {
return true;
@@ -445,12 +489,30 @@ bool smmuv3_accel_alloc_veventq(SMMUv3State *s, Error **errp)
return false;
}
+ flags = fcntl(veventq_fd, F_GETFL);
+ if (flags < 0) {
+ error_setg(errp, "Failed to get flags for vEVENTQ fd");
+ goto free_veventq;
+ }
+ if (fcntl(veventq_fd, F_SETFL, O_NONBLOCK | flags) < 0) {
+ error_setg(errp, "Failed to set O_NONBLOCK on vEVENTQ fd");
+ goto free_veventq;
+ }
+
veventq = g_new(IOMMUFDVeventq, 1);
veventq->veventq_id = veventq_id;
veventq->veventq_fd = veventq_fd;
veventq->viommu = accel->viommu;
accel->veventq = veventq;
+
+ /* Set up event handler for veventq fd */
+ qemu_set_fd_handler(veventq_fd, smmuv3_accel_event_read, NULL, s);
return true;
+
+free_veventq:
+ close(veventq_fd);
+ iommufd_backend_free_id(accel->viommu->iommufd, veventq_id);
+ return false;
}
static bool
diff --git a/hw/arm/smmuv3-accel.h b/hw/arm/smmuv3-accel.h
index dba6c71de5..c9c10e55c3 100644
--- a/hw/arm/smmuv3-accel.h
+++ b/hw/arm/smmuv3-accel.h
@@ -23,6 +23,8 @@
typedef struct SMMUv3AccelState {
IOMMUFDViommu *viommu;
IOMMUFDVeventq *veventq;
+ uint32_t last_event_seq;
+ bool event_start;
uint32_t bypass_hwpt_id;
uint32_t abort_hwpt_id;
QLIST_HEAD(, SMMUv3AccelDevice) device_list;
--
2.43.0
On Wed, Feb 04, 2026 at 07:00:08PM +0000, Shameer Kolothum wrote:
> Install an event handler on the vEVENTQ fd to read and propagate host
> generated vIOMMU events to the guest.
>
> The handler runs in QEMU’s main loop, using a non-blocking fd registered
s/’s/'s
> +static void smmuv3_accel_event_read(void *opaque)
> + /* Check sequence in hdr for lost events if any */
> + if (accel->event_start && (buf.hdr.sequence - last_seq != 1)) {
> + error_report_once("vEVENTQ: detected lost %u event(s)",
> + buf.hdr.sequence - last_seq - 1);
> + }
> + accel->last_event_seq = buf.hdr.sequence;
> + accel->event_start = true;
> + smmuv3_propagate_event(s, (Evt *)&buf.vevent);
[..]
> + /* Set up event handler for veventq fd */
> + qemu_set_fd_handler(veventq_fd, smmuv3_accel_event_read, NULL, s);
Are callbacks serialized across threads? Otherwise, we need a mutex?
Thanks
Nicolin
> -----Original Message-----
> From: Nicolin Chen <nicolinc@nvidia.com>
> Sent: 04 February 2026 20:13
> To: Shameer Kolothum Thodi <skolothumtho@nvidia.com>
> Cc: qemu-arm@nongnu.org; qemu-devel@nongnu.org;
> eric.auger@redhat.com; peter.maydell@linaro.org; Nathan Chen
> <nathanc@nvidia.com>; Matt Ochs <mochs@nvidia.com>; Jason Gunthorpe
> <jgg@nvidia.com>; jonathan.cameron@huawei.com;
> zhangfei.gao@linaro.org; zhenzhong.duan@intel.com; Krishnakant Jaju
> <kjaju@nvidia.com>
> Subject: Re: [PATCH v4 5/5] hw/arm/smmuv3-accel: Read and propagate host
> vIOMMU events
>
> On Wed, Feb 04, 2026 at 07:00:08PM +0000, Shameer Kolothum wrote:
> > Install an event handler on the vEVENTQ fd to read and propagate host
> > generated vIOMMU events to the guest.
> >
> > The handler runs in QEMU’s main loop, using a non-blocking fd
> > registered
>
> s/’s/'s
>
> > +static void smmuv3_accel_event_read(void *opaque)
> > + /* Check sequence in hdr for lost events if any */
> > + if (accel->event_start && (buf.hdr.sequence - last_seq != 1)) {
> > + error_report_once("vEVENTQ: detected lost %u event(s)",
> > + buf.hdr.sequence - last_seq - 1);
> > + }
> > + accel->last_event_seq = buf.hdr.sequence;
> > + accel->event_start = true;
> > + smmuv3_propagate_event(s, (Evt *)&buf.vevent);
> [..]
> > + /* Set up event handler for veventq fd */
> > + qemu_set_fd_handler(veventq_fd, smmuv3_accel_event_read, NULL,
> > + s);
>
> Are callbacks serialized across threads? Otherwise, we need a mutex?
The qemu_set_fd_handler() callbacks run via the AioContext in QEMU's main
loop. AFAICS, the AioContext dispatches handlers one at a time, so the callback
is never executed concurrently.
Thanks,
Shameer
© 2016 - 2026 Red Hat, Inc.