[PATCH 4/5] intel_iommu_accel: Accept PRQ response for passthrough device

Zhenzhong Duan posted 5 patches 6 days, 16 hours ago
[PATCH 4/5] intel_iommu_accel: Accept PRQ response for passthrough device
Posted by Zhenzhong Duan 6 days, 16 hours ago
Propagate guest's PRQ response to host by writing to fault_fd.
Create a new VTDPRQEntry to cache cookie for each fault group,
this cookie is used to mark the fault group on host side.

Signed-off-by: Zhenzhong Duan <zhenzhong.duan@intel.com>
---
 hw/i386/intel_iommu_accel.h   | 14 ++++++++
 include/hw/i386/intel_iommu.h |  6 ++++
 hw/i386/intel_iommu.c         |  4 +++
 hw/i386/intel_iommu_accel.c   | 65 +++++++++++++++++++++++++++++++++++
 hw/i386/trace-events          |  1 +
 5 files changed, 90 insertions(+)

diff --git a/hw/i386/intel_iommu_accel.h b/hw/i386/intel_iommu_accel.h
index 10e6ee5722..b46c7126f7 100644
--- a/hw/i386/intel_iommu_accel.h
+++ b/hw/i386/intel_iommu_accel.h
@@ -19,6 +19,9 @@ typedef struct VTDAccelPASIDCacheEntry {
     uint32_t fs_hwpt_id;
     uint32_t fault_id;
     int fault_fd;
+    QLIST_HEAD(, VTDPRQEntry) vtd_prq_list;
+    IOMMUPRINotifier pri_notifier_entry;
+    IOMMUPRINotifier *pri_notifier;
     QLIST_ENTRY(VTDAccelPASIDCacheEntry) next;
 } VTDAccelPASIDCacheEntry;
 
@@ -31,6 +34,9 @@ void vtd_flush_host_piotlb_all_accel(IntelIOMMUState *s, uint16_t domain_id,
                                      uint64_t npages, bool ih);
 void vtd_pasid_cache_sync_accel(IntelIOMMUState *s, VTDPASIDCacheInfo *pc_info);
 void vtd_pasid_cache_reset_accel(IntelIOMMUState *s);
+bool vtd_propagate_page_group_response_accel(IntelIOMMUState *s,
+                                             uint16_t rid, uint32_t pasid,
+                                             IOMMUPRIResponse *response);
 void vtd_iommu_ops_update_accel(PCIIOMMUOps *ops);
 #else
 static inline bool vtd_check_hiod_accel(IntelIOMMUState *s,
@@ -69,6 +75,14 @@ static inline void vtd_pasid_cache_reset_accel(IntelIOMMUState *s)
 {
 }
 
+static inline
+bool vtd_propagate_page_group_response_accel(IntelIOMMUState *s,
+                                             uint16_t rid, uint32_t pasid,
+                                             IOMMUPRIResponse *response)
+{
+    return false;
+}
+
 static inline void vtd_iommu_ops_update_accel(PCIIOMMUOps *ops)
 {
 }
diff --git a/include/hw/i386/intel_iommu.h b/include/hw/i386/intel_iommu.h
index 1842ba5840..5d44eac0ed 100644
--- a/include/hw/i386/intel_iommu.h
+++ b/include/hw/i386/intel_iommu.h
@@ -100,6 +100,12 @@ typedef struct VTDPASIDCacheEntry {
     bool valid;
 } VTDPASIDCacheEntry;
 
+typedef struct VTDPRQEntry {
+    uint32_t grpid;
+    uint32_t cookie;
+    QLIST_ENTRY(VTDPRQEntry) next;
+} VTDPRQEntry;
+
 struct VTDAddressSpace {
     PCIBus *bus;
     uint8_t devfn;
diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c
index 96b4102ab9..d670a0377b 100644
--- a/hw/i386/intel_iommu.c
+++ b/hw/i386/intel_iommu.c
@@ -3390,6 +3390,10 @@ static bool vtd_process_page_group_response_desc(IntelIOMMUState *s,
         response.response_code = IOMMU_PRI_RESP_FAILURE;
     }
 
+    if (vtd_propagate_page_group_response_accel(s, rid, pasid, &response)) {
+        return true;
+    }
+
     if (vtd_dev_as->pri_notifier) {
         vtd_dev_as->pri_notifier->notify(vtd_dev_as->pri_notifier, &response);
     }
diff --git a/hw/i386/intel_iommu_accel.c b/hw/i386/intel_iommu_accel.c
index 0fce62ff75..44af534c55 100644
--- a/hw/i386/intel_iommu_accel.c
+++ b/hw/i386/intel_iommu_accel.c
@@ -102,6 +102,30 @@ VTDHostIOMMUDevice *vtd_find_hiod_iommufd(VTDAddressSpace *as)
     return NULL;
 }
 
+bool vtd_propagate_page_group_response_accel(IntelIOMMUState *s,
+                                             uint16_t rid, uint32_t pasid,
+                                             IOMMUPRIResponse *response)
+{
+    VTDAddressSpace *vtd_as = vtd_get_as_by_sid(s, rid);
+    VTDAccelPASIDCacheEntry *vtd_pce;
+    VTDHostIOMMUDevice *vtd_hiod = vtd_find_hiod_iommufd(vtd_as);
+
+    if (!vtd_hiod) {
+        return false;
+    }
+
+    QLIST_FOREACH(vtd_pce, &vtd_hiod->pasid_cache_list, next) {
+        if (vtd_pce->pasid == pasid) {
+            if (vtd_pce->pri_notifier) {
+                vtd_pce->pri_notifier->notify(vtd_pce->pri_notifier, response);
+            }
+            return true;
+        }
+    }
+
+    return false;
+}
+
 static void vtd_prq_report_fault(VTDAccelPASIDCacheEntry *vtd_pce,
                                  struct iommu_hwpt_pgfault *fault, int cnt)
 {
@@ -117,6 +141,13 @@ static void vtd_prq_report_fault(VTDAccelPASIDCacheEntry *vtd_pce,
                                     fault->addr, last_page, fault->grpid,
                                     fault->perm & IOMMU_PGFAULT_PERM_READ,
                                     fault->perm & IOMMU_PGFAULT_PERM_WRITE);
+        if (last_page) {
+            VTDPRQEntry *prqe = g_malloc0(sizeof(*prqe));
+
+            prqe->grpid = fault->grpid;
+            prqe->cookie = fault->cookie;
+            QLIST_INSERT_HEAD(&vtd_pce->vtd_prq_list, prqe, next);
+        }
     }
 }
 
@@ -150,6 +181,36 @@ static void vtd_prq_read_fault(void *opaque)
     vtd_prq_report_fault(vtd_pce, fault, bytes / sizeof(fault[0]));
 }
 
+static void vtd_prq_response_notify(struct IOMMUPRINotifier *notifier,
+                                    IOMMUPRIResponse *response)
+{
+    VTDAccelPASIDCacheEntry *vtd_pce =
+        container_of(notifier, VTDAccelPASIDCacheEntry, pri_notifier_entry);
+    uint32_t id = vtd_pce->fault_id, fd = vtd_pce->fault_fd;
+    struct iommu_hwpt_page_response resp;
+    VTDPRQEntry *prqe, *tmp;
+    ssize_t bytes;
+
+    QLIST_FOREACH_SAFE(prqe, &vtd_pce->vtd_prq_list, next, tmp) {
+        if (prqe->grpid != response->prgi) {
+            continue;
+        }
+
+        resp.cookie = prqe->cookie;
+        resp.code = response->response_code;
+        bytes = write(fd, &resp, sizeof(resp));
+        trace_vtd_prq_response_notify(id, fd, resp.cookie, resp.code, bytes);
+        if (bytes < 0) {
+            error_report_once("FAULTQ(id %u): write failed "
+                              "[cookie 0x%x code 0x%x] (%m)",
+                              id, resp.cookie, resp.code);
+        }
+
+        QLIST_REMOVE(prqe, next);
+        g_free(prqe);
+    }
+}
+
 static void vtd_destroy_fs_faultq(VTDHostIOMMUDevice *vtd_hiod,
                                   uint32_t fault_id, uint32_t fault_fd)
 {
@@ -213,6 +274,7 @@ static void vtd_destroy_old_fs_faultq(VTDHostIOMMUDevice *vtd_hiod,
         return;
     }
 
+    vtd_pce->pri_notifier = NULL;
     qemu_set_fd_handler(vtd_pce->fault_fd, NULL, NULL, NULL);
     vtd_destroy_fs_faultq(vtd_hiod, vtd_pce->fault_id, vtd_pce->fault_fd);
     vtd_pce->fault_id = 0;
@@ -228,6 +290,8 @@ static void vtd_setup_fs_faultq(VTDAccelPASIDCacheEntry *vtd_pce,
 
     vtd_pce->fault_id = fault_id;
     vtd_pce->fault_fd = fault_fd;
+    vtd_pce->pri_notifier_entry.notify = vtd_prq_response_notify;
+    vtd_pce->pri_notifier = &vtd_pce->pri_notifier_entry;
     qemu_set_fd_handler(fault_fd, vtd_prq_read_fault, NULL, vtd_pce);
 }
 
@@ -492,6 +556,7 @@ static void vtd_accel_fill_pc(VTDHostIOMMUDevice *vtd_hiod, uint32_t pasid,
     vtd_pce->vtd_hiod = vtd_hiod;
     vtd_pce->pasid = pasid;
     vtd_pce->pasid_entry = *pe;
+    QLIST_INIT(&vtd_pce->vtd_prq_list);
     QLIST_INSERT_HEAD(&vtd_hiod->pasid_cache_list, vtd_pce, next);
 
     if (!vtd_device_attach_iommufd(vtd_pce, &local_err)) {
diff --git a/hw/i386/trace-events b/hw/i386/trace-events
index bf139338f7..52dab0b508 100644
--- a/hw/i386/trace-events
+++ b/hw/i386/trace-events
@@ -78,6 +78,7 @@ vtd_device_attach_hwpt(uint32_t dev_id, uint32_t pasid, uint32_t hwpt_id, int re
 vtd_device_detach_hwpt(uint32_t dev_id, uint32_t pasid, int ret) "dev_id %d pasid %d ret: %d"
 vtd_device_reattach_def_hwpt(uint32_t dev_id, uint32_t pasid, uint32_t hwpt_id, int ret) "dev_id %d pasid %d hwpt_id %d, ret: %d"
 vtd_prq_read_fault(uint32_t fault_id, uint32_t fault_fd, ssize_t bytes) "fault_id %d fault_fd %d ret: %zd"
+vtd_prq_response_notify(uint32_t fault_id, uint32_t fault_fd, uint32_t cookie, uint32_t code, ssize_t bytes) "fault_id %d fault_fd %d cookie %d code %d ret: %zd"
 
 # amd_iommu.c
 amdvi_evntlog_fail(uint64_t addr, uint32_t head) "error: fail to write at addr 0x%"PRIx64" +  offset 0x%"PRIx32
-- 
2.47.3
Re: [PATCH 4/5] intel_iommu_accel: Accept PRQ response for passthrough device
Posted by CLEMENT MATHIEU--DRIF 3 days, 11 hours ago

On Thu, 2026-03-26 at 22:52 -0400, Zhenzhong Duan wrote:
> Propagate guest's PRQ response to host by writing to fault_fd.  
> Create a new VTDPRQEntry to cache cookie for each fault group,  
> this cookie is used to mark the fault group on host side.
> 
> Signed-off-by: Zhenzhong Duan <[zhenzhong.duan@intel.com](mailto:zhenzhong.duan@intel.com)>  
> ---  
>  hw/i386/intel_iommu_accel.h   | 14 ++++++++  
>  include/hw/i386/intel_iommu.h |  6 ++++  
>  hw/i386/intel_iommu.c         |  4 +++  
>  hw/i386/intel_iommu_accel.c   | 65 +++++++++++++++++++++++++++++++++++  
>  hw/i386/trace-events          |  1 +  
>  5 files changed, 90 insertions(+)
> 
> diff --git a/hw/i386/intel_iommu_accel.h b/hw/i386/intel_iommu_accel.h  
> index 10e6ee5722..b46c7126f7 100644  
> --- a/hw/i386/intel_iommu_accel.h  
> +++ b/hw/i386/intel_iommu_accel.h  
> @@ -19,6 +19,9 @@ typedef struct VTDAccelPASIDCacheEntry {  
>      uint32_t fs_hwpt_id;  
>      uint32_t fault_id;  
>      int fault_fd;  
> +    QLIST_HEAD(, VTDPRQEntry) vtd_prq_list;  
> +    IOMMUPRINotifier pri_notifier_entry;  
> +    IOMMUPRINotifier *pri_notifier;  
>      QLIST_ENTRY(VTDAccelPASIDCacheEntry) next;  
>  } VTDAccelPASIDCacheEntry;  
>    
> @@ -31,6 +34,9 @@ void vtd_flush_host_piotlb_all_accel(IntelIOMMUState *s, uint16_t domain_id,  
>                                       uint64_t npages, bool ih);  
>  void vtd_pasid_cache_sync_accel(IntelIOMMUState *s, VTDPASIDCacheInfo *pc_info);  
>  void vtd_pasid_cache_reset_accel(IntelIOMMUState *s);  
> +bool vtd_propagate_page_group_response_accel(IntelIOMMUState *s,  
> +                                             uint16_t rid, uint32_t pasid,  
> +                                             IOMMUPRIResponse *response);  
>  void vtd_iommu_ops_update_accel(PCIIOMMUOps *ops);  
>  #else  
>  static inline bool vtd_check_hiod_accel(IntelIOMMUState *s,  
> @@ -69,6 +75,14 @@ static inline void vtd_pasid_cache_reset_accel(IntelIOMMUState *s)  
>  {  
>  }  
>    
> +static inline  
> +bool vtd_propagate_page_group_response_accel(IntelIOMMUState *s,  
> +                                             uint16_t rid, uint32_t pasid,  
> +                                             IOMMUPRIResponse *response)  
> +{  
> +    return false;  
> +}  
> +  
>  static inline void vtd_iommu_ops_update_accel(PCIIOMMUOps *ops)  
>  {  
>  }  
> diff --git a/include/hw/i386/intel_iommu.h b/include/hw/i386/intel_iommu.h  
> index 1842ba5840..5d44eac0ed 100644  
> --- a/include/hw/i386/intel_iommu.h  
> +++ b/include/hw/i386/intel_iommu.h  
> @@ -100,6 +100,12 @@ typedef struct VTDPASIDCacheEntry {  
>      bool valid;  
>  } VTDPASIDCacheEntry;  
>    
> +typedef struct VTDPRQEntry {  
> +    uint32_t grpid;  

Hi Zhenzhon,

Maybe silly question, but why don't we keep naming it prgi?

> +    uint32_t cookie;  
> +    QLIST_ENTRY(VTDPRQEntry) next;  
> +} VTDPRQEntry;  
> +  
>  struct VTDAddressSpace {  
>      PCIBus *bus;  
>      uint8_t devfn;  
> diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c  
> index 96b4102ab9..d670a0377b 100644  
> --- a/hw/i386/intel_iommu.c  
> +++ b/hw/i386/intel_iommu.c  
> @@ -3390,6 +3390,10 @@ static bool vtd_process_page_group_response_desc(IntelIOMMUState *s,  
>          response.response_code = IOMMU_PRI_RESP_FAILURE;  
>      }  
>    
> +    if (vtd_propagate_page_group_response_accel(s, rid, pasid, &response)) {  
> +        return true;  
> +    }  
> +  
>      if (vtd_dev_as->pri_notifier) {  
>          vtd_dev_as->pri_notifier->notify(vtd_dev_as->pri_notifier, &response);  
>      }  
> diff --git a/hw/i386/intel_iommu_accel.c b/hw/i386/intel_iommu_accel.c  
> index 0fce62ff75..44af534c55 100644  
> --- a/hw/i386/intel_iommu_accel.c  
> +++ b/hw/i386/intel_iommu_accel.c  
> @@ -102,6 +102,30 @@ VTDHostIOMMUDevice *vtd_find_hiod_iommufd(VTDAddressSpace *as)  
>      return NULL;  
>  }  
>    
> +bool vtd_propagate_page_group_response_accel(IntelIOMMUState *s,  
> +                                             uint16_t rid, uint32_t pasid,  
> +                                             IOMMUPRIResponse *response)  
> +{  
> +    VTDAddressSpace *vtd_as = vtd_get_as_by_sid(s, rid);  
> +    VTDAccelPASIDCacheEntry *vtd_pce;  
> +    VTDHostIOMMUDevice *vtd_hiod = vtd_find_hiod_iommufd(vtd_as);  
> +  
> +    if (!vtd_hiod) {  
> +        return false;  
> +    }  
> +  
> +    QLIST_FOREACH(vtd_pce, &vtd_hiod->pasid_cache_list, next) {  
> +        if (vtd_pce->pasid == pasid) {  
> +            if (vtd_pce->pri_notifier) {  
> +                vtd_pce->pri_notifier->notify(vtd_pce->pri_notifier, response);  
> +            }  
> +            return true;  
> +        }  
> +    }  
> +  
> +    return false;  
> +}  
> +  
>  static void vtd_prq_report_fault(VTDAccelPASIDCacheEntry *vtd_pce,  
>                                   struct iommu_hwpt_pgfault *fault, int cnt)  
>  {  
> @@ -117,6 +141,13 @@ static void vtd_prq_report_fault(VTDAccelPASIDCacheEntry *vtd_pce,  
>                                      fault->addr, last_page, fault->grpid,  
>                                      fault->perm & IOMMU_PGFAULT_PERM_READ,  
>                                      fault->perm & IOMMU_PGFAULT_PERM_WRITE);  
> +        if (last_page) {  
> +            VTDPRQEntry *prqe = g_malloc0(sizeof(*prqe));  
> +  
> +            prqe->grpid = fault->grpid;  
> +            prqe->cookie = fault->cookie;  
> +            QLIST_INSERT_HEAD(&vtd_pce->vtd_prq_list, prqe, next);  
> +        }  
>      }  
>  }  
>    
> @@ -150,6 +181,36 @@ static void vtd_prq_read_fault(void *opaque)  
>      vtd_prq_report_fault(vtd_pce, fault, bytes / sizeof(fault[0]));  
>  }  
>    
> +static void vtd_prq_response_notify(struct IOMMUPRINotifier *notifier,  
> +                                    IOMMUPRIResponse *response)  
> +{  
> +    VTDAccelPASIDCacheEntry *vtd_pce =  
> +        container_of(notifier, VTDAccelPASIDCacheEntry, pri_notifier_entry);  
> +    uint32_t id = vtd_pce->fault_id, fd = vtd_pce->fault_fd;  
> +    struct iommu_hwpt_page_response resp;  
> +    VTDPRQEntry *prqe, *tmp;  
> +    ssize_t bytes;  
> +  
> +    QLIST_FOREACH_SAFE(prqe, &vtd_pce->vtd_prq_list, next, tmp) {  
> +        if (prqe->grpid != response->prgi) {  
> +            continue;  
> +        }  
> +  
> +        resp.cookie = prqe->cookie;  
> +        resp.code = response->response_code;  
> +        bytes = write(fd, &resp, sizeof(resp));  
> +        trace_vtd_prq_response_notify(id, fd, resp.cookie, resp.code, bytes);  
> +        if (bytes < 0) {  
> +            error_report_once("FAULTQ(id %u): write failed "  
> +                              "[cookie 0x%x code 0x%x] (%m)",  
> +                              id, resp.cookie, resp.code);  
> +        }  
> +  
> +        QLIST_REMOVE(prqe, next);  
> +        g_free(prqe);  
> +    }  
> +}  
> +  
>  static void vtd_destroy_fs_faultq(VTDHostIOMMUDevice *vtd_hiod,  
>                                    uint32_t fault_id, uint32_t fault_fd)  
>  {  
> @@ -213,6 +274,7 @@ static void vtd_destroy_old_fs_faultq(VTDHostIOMMUDevice *vtd_hiod,  
>          return;  
>      }  
>    
> +    vtd_pce->pri_notifier = NULL;  
>      qemu_set_fd_handler(vtd_pce->fault_fd, NULL, NULL, NULL);  
>      vtd_destroy_fs_faultq(vtd_hiod, vtd_pce->fault_id, vtd_pce->fault_fd);  
>      vtd_pce->fault_id = 0;  
> @@ -228,6 +290,8 @@ static void vtd_setup_fs_faultq(VTDAccelPASIDCacheEntry *vtd_pce,  
>    
>      vtd_pce->fault_id = fault_id;  
>      vtd_pce->fault_fd = fault_fd;  
> +    vtd_pce->pri_notifier_entry.notify = vtd_prq_response_notify;  
> +    vtd_pce->pri_notifier = &vtd_pce->pri_notifier_entry;  
>      qemu_set_fd_handler(fault_fd, vtd_prq_read_fault, NULL, vtd_pce);  
>  }  
>    
> @@ -492,6 +556,7 @@ static void vtd_accel_fill_pc(VTDHostIOMMUDevice *vtd_hiod, uint32_t pasid,  
>      vtd_pce->vtd_hiod = vtd_hiod;  
>      vtd_pce->pasid = pasid;  
>      vtd_pce->pasid_entry = *pe;  
> +    QLIST_INIT(&vtd_pce->vtd_prq_list);  
>      QLIST_INSERT_HEAD(&vtd_hiod->pasid_cache_list, vtd_pce, next);  
>    
>      if (!vtd_device_attach_iommufd(vtd_pce, &local_err)) {  
> diff --git a/hw/i386/trace-events b/hw/i386/trace-events  
> index bf139338f7..52dab0b508 100644  
> --- a/hw/i386/trace-events  
> +++ b/hw/i386/trace-events  
> @@ -78,6 +78,7 @@ vtd_device_attach_hwpt(uint32_t dev_id, uint32_t pasid, uint32_t hwpt_id, int re  
>  vtd_device_detach_hwpt(uint32_t dev_id, uint32_t pasid, int ret) "dev_id %d pasid %d ret: %d"  
>  vtd_device_reattach_def_hwpt(uint32_t dev_id, uint32_t pasid, uint32_t hwpt_id, int ret) "dev_id %d pasid %d hwpt_id %d, ret: %d"  
>  vtd_prq_read_fault(uint32_t fault_id, uint32_t fault_fd, ssize_t bytes) "fault_id %d fault_fd %d ret: %zd"  
> +vtd_prq_response_notify(uint32_t fault_id, uint32_t fault_fd, uint32_t cookie, uint32_t code, ssize_t bytes) "fault_id %d fault_fd %d cookie %d code %d ret: %zd"  
>    
>  # amd_iommu.c  
>  amdvi_evntlog_fail(uint64_t addr, uint32_t head) "error: fail to write at addr 0x%"PRIx64" +  offset 0x%"PRIx32
RE: [PATCH 4/5] intel_iommu_accel: Accept PRQ response for passthrough device
Posted by Duan, Zhenzhong 2 days, 13 hours ago
Hi Clement,

>-----Original Message-----
>From: CLEMENT MATHIEU--DRIF <clement.mathieu--drif@bull.com>
>Subject: Re: [PATCH 4/5] intel_iommu_accel: Accept PRQ response for
>passthrough device
>
>
>
>On Thu, 2026-03-26 at 22:52 -0400, Zhenzhong Duan wrote:
>> Propagate guest's PRQ response to host by writing to fault_fd.
>> Create a new VTDPRQEntry to cache cookie for each fault group,
>> this cookie is used to mark the fault group on host side.
>>
>> Signed-off-by: Zhenzhong Duan
><[zhenzhong.duan@intel.com](mailto:zhenzhong.duan@intel.com)>
>> ---
>>  hw/i386/intel_iommu_accel.h   | 14 ++++++++
>>  include/hw/i386/intel_iommu.h |  6 ++++
>>  hw/i386/intel_iommu.c         |  4 +++
>>  hw/i386/intel_iommu_accel.c   | 65 +++++++++++++++++++++++++++++++++++
>>  hw/i386/trace-events          |  1 +
>>  5 files changed, 90 insertions(+)
>>
>> diff --git a/hw/i386/intel_iommu_accel.h b/hw/i386/intel_iommu_accel.h
>> index 10e6ee5722..b46c7126f7 100644
>> --- a/hw/i386/intel_iommu_accel.h
>> +++ b/hw/i386/intel_iommu_accel.h
>> @@ -19,6 +19,9 @@ typedef struct VTDAccelPASIDCacheEntry {
>>      uint32_t fs_hwpt_id;
>>      uint32_t fault_id;
>>      int fault_fd;
>> +    QLIST_HEAD(, VTDPRQEntry) vtd_prq_list;
>> +    IOMMUPRINotifier pri_notifier_entry;
>> +    IOMMUPRINotifier *pri_notifier;
>>      QLIST_ENTRY(VTDAccelPASIDCacheEntry) next;
>>  } VTDAccelPASIDCacheEntry;
>>
>> @@ -31,6 +34,9 @@ void vtd_flush_host_piotlb_all_accel(IntelIOMMUState *s,
>uint16_t domain_id,
>>                                       uint64_t npages, bool ih);
>>  void vtd_pasid_cache_sync_accel(IntelIOMMUState *s, VTDPASIDCacheInfo
>*pc_info);
>>  void vtd_pasid_cache_reset_accel(IntelIOMMUState *s);
>> +bool vtd_propagate_page_group_response_accel(IntelIOMMUState *s,
>> +                                             uint16_t rid, uint32_t pasid,
>> +                                             IOMMUPRIResponse *response);
>>  void vtd_iommu_ops_update_accel(PCIIOMMUOps *ops);
>>  #else
>>  static inline bool vtd_check_hiod_accel(IntelIOMMUState *s,
>> @@ -69,6 +75,14 @@ static inline void
>vtd_pasid_cache_reset_accel(IntelIOMMUState *s)
>>  {
>>  }
>>
>> +static inline
>> +bool vtd_propagate_page_group_response_accel(IntelIOMMUState *s,
>> +                                             uint16_t rid, uint32_t pasid,
>> +                                             IOMMUPRIResponse *response)
>> +{
>> +    return false;
>> +}
>> +
>>  static inline void vtd_iommu_ops_update_accel(PCIIOMMUOps *ops)
>>  {
>>  }
>> diff --git a/include/hw/i386/intel_iommu.h b/include/hw/i386/intel_iommu.h
>> index 1842ba5840..5d44eac0ed 100644
>> --- a/include/hw/i386/intel_iommu.h
>> +++ b/include/hw/i386/intel_iommu.h
>> @@ -100,6 +100,12 @@ typedef struct VTDPASIDCacheEntry {
>>      bool valid;
>>  } VTDPASIDCacheEntry;
>>
>> +typedef struct VTDPRQEntry {
>> +    uint32_t grpid;
>
>Hi Zhenzhon,
>
>Maybe silly question, but why don't we keep naming it prgi?

Good question, the naming is picked from uAPI, see below:

struct iommu_hwpt_pgfault {
        __u32 flags;
        __u32 dev_id;
        __u32 pasid;
        __u32 grpid;
        __u32 perm;
        __u32 __reserved;
        __aligned_u64 addr;
        __u32 length;
        __u32 cookie;
};

Because VTDPRQEntry caches page group id and cookie from kernel uAPI,
so align the name with kernel.

Thanks
Zhenzhong
Re: [PATCH 4/5] intel_iommu_accel: Accept PRQ response for passthrough device
Posted by CLEMENT MATHIEU--DRIF 2 days, 12 hours ago
Ok thanks for the clarification

Reviewed-by: Clement Mathieu--Drif <clement.mathieu--drif@bull.com>

On Tue, 2026-03-31 at 05:00 +0000, Duan, Zhenzhong wrote:
> Hi Clement,
> 
> 
> > -----Original Message-----
> > From: CLEMENT MATHIEU--DRIF <[clement.mathieu--drif@bull.com](mailto:clement.mathieu--drif@bull.com)>
> > Subject: Re: [PATCH 4/5] intel_iommu_accel: Accept PRQ response for
> > passthrough device
> > 
> > 
> > 
> > On Thu, 2026-03-26 at 22:52 -0400, Zhenzhong Duan wrote:
> > 
> > > Propagate guest's PRQ response to host by writing to fault_fd.
> > > Create a new VTDPRQEntry to cache cookie for each fault group,
> > > this cookie is used to mark the fault group on host side.
> > > 
> > > Signed-off-by: Zhenzhong Duan
> > 
> > <[[zhenzhong.duan@intel.com](mailto:zhenzhong.duan@intel.com)](mailto:[zhenzhong.duan@intel.com](mailto:zhenzhong.duan@intel.com))>
> > 
> > > ---
> > >  hw/i386/intel_iommu_accel.h   | 14 ++++++++
> > >  include/hw/i386/intel_iommu.h |  6 ++++
> > >  hw/i386/intel_iommu.c         |  4 +++
> > >  hw/i386/intel_iommu_accel.c   | 65 +++++++++++++++++++++++++++++++++++
> > >  hw/i386/trace-events          |  1 +
> > >  5 files changed, 90 insertions(+)
> > > 
> > > diff --git a/hw/i386/intel_iommu_accel.h b/hw/i386/intel_iommu_accel.h
> > > index 10e6ee5722..b46c7126f7 100644
> > > --- a/hw/i386/intel_iommu_accel.h
> > > +++ b/hw/i386/intel_iommu_accel.h
> > > @@ -19,6 +19,9 @@ typedef struct VTDAccelPASIDCacheEntry {
> > >      uint32_t fs_hwpt_id;
> > >      uint32_t fault_id;
> > >      int fault_fd;
> > > +    QLIST_HEAD(, VTDPRQEntry) vtd_prq_list;
> > > +    IOMMUPRINotifier pri_notifier_entry;
> > > +    IOMMUPRINotifier *pri_notifier;
> > >      QLIST_ENTRY(VTDAccelPASIDCacheEntry) next;
> > >  } VTDAccelPASIDCacheEntry;
> > > 
> > > @@ -31,6 +34,9 @@ void vtd_flush_host_piotlb_all_accel(IntelIOMMUState *s,
> > 
> > uint16_t domain_id,
> > 
> > >                                       uint64_t npages, bool ih);
> > >  void vtd_pasid_cache_sync_accel(IntelIOMMUState *s, VTDPASIDCacheInfo
> > 
> > *pc_info);
> > 
> > >  void vtd_pasid_cache_reset_accel(IntelIOMMUState *s);
> > > +bool vtd_propagate_page_group_response_accel(IntelIOMMUState *s,
> > > +                                             uint16_t rid, uint32_t pasid,
> > > +                                             IOMMUPRIResponse *response);
> > >  void vtd_iommu_ops_update_accel(PCIIOMMUOps *ops);
> > >  #else
> > >  static inline bool vtd_check_hiod_accel(IntelIOMMUState *s,
> > > @@ -69,6 +75,14 @@ static inline void
> > 
> > vtd_pasid_cache_reset_accel(IntelIOMMUState *s)
> > 
> > >  {
> > >  }
> > > 
> > > +static inline
> > > +bool vtd_propagate_page_group_response_accel(IntelIOMMUState *s,
> > > +                                             uint16_t rid, uint32_t pasid,
> > > +                                             IOMMUPRIResponse *response)
> > > +{
> > > +    return false;
> > > +}
> > > +
> > >  static inline void vtd_iommu_ops_update_accel(PCIIOMMUOps *ops)
> > >  {
> > >  }
> > > diff --git a/include/hw/i386/intel_iommu.h b/include/hw/i386/intel_iommu.h
> > > index 1842ba5840..5d44eac0ed 100644
> > > --- a/include/hw/i386/intel_iommu.h
> > > +++ b/include/hw/i386/intel_iommu.h
> > > @@ -100,6 +100,12 @@ typedef struct VTDPASIDCacheEntry {
> > >      bool valid;
> > >  } VTDPASIDCacheEntry;
> > > 
> > > +typedef struct VTDPRQEntry {
> > > +    uint32_t grpid;
> > 
> > 
> > Hi Zhenzhon,
> > 
> > Maybe silly question, but why don't we keep naming it prgi?
> 
> 
> Good question, the naming is picked from uAPI, see below:
> 
> struct iommu_hwpt_pgfault {  
>         __u32 flags;  
>         __u32 dev_id;  
>         __u32 pasid;  
>         __u32 grpid;  
>         __u32 perm;  
>         __u32 __reserved;  
>         __aligned_u64 addr;  
>         __u32 length;  
>         __u32 cookie;  
> };
> 
> Because VTDPRQEntry caches page group id and cookie from kernel uAPI,  
> so align the name with kernel.
> 
> Thanks  
> Zhenzhong