[PATCH v2 16/19] intel_iommu: Propagate PASID-based iotlb invalidation to host

Zhenzhong Duan posted 19 patches 4 months, 4 weeks ago
Maintainers: "Michael S. Tsirkin" <mst@redhat.com>, Jason Wang <jasowang@redhat.com>, Yi Liu <yi.l.liu@intel.com>, "Clément Mathieu--Drif" <clement.mathieu--drif@eviden.com>, Marcel Apfelbaum <marcel.apfelbaum@gmail.com>, Paolo Bonzini <pbonzini@redhat.com>, Richard Henderson <richard.henderson@linaro.org>, Eduardo Habkost <eduardo@habkost.net>, Alex Williamson <alex.williamson@redhat.com>, "Cédric Le Goater" <clg@redhat.com>, Eric Auger <eric.auger@redhat.com>, Zhenzhong Duan <zhenzhong.duan@intel.com>
There is a newer version of this series
[PATCH v2 16/19] intel_iommu: Propagate PASID-based iotlb invalidation to host
Posted by Zhenzhong Duan 4 months, 4 weeks ago
From: Yi Liu <yi.l.liu@intel.com>

This traps the guest PASID-based iotlb invalidation request and propagate it
to host.

Intel VT-d 3.0 supports nested translation in PASID granular. Guest SVA support
could be implemented by configuring nested translation on specific PASID. This
is also known as dual stage DMA translation.

Under such configuration, guest owns the GVA->GPA translation which is
configured as stage-1 page table in host side for a specific pasid, and host
owns GPA->HPA translation. As guest owns stage-1 translation table, piotlb
invalidation should be propagated to host since host IOMMU will cache first
level page table related mappings during DMA address translation.

Signed-off-by: Yi Liu <yi.l.liu@intel.com>
Signed-off-by: Yi Sun <yi.y.sun@linux.intel.com>
Signed-off-by: Zhenzhong Duan <zhenzhong.duan@intel.com>
---
 hw/i386/intel_iommu_internal.h |   6 ++
 hw/i386/intel_iommu.c          | 113 ++++++++++++++++++++++++++++++++-
 2 files changed, 117 insertions(+), 2 deletions(-)

diff --git a/hw/i386/intel_iommu_internal.h b/hw/i386/intel_iommu_internal.h
index b3e4aa23f1..07bfb97499 100644
--- a/hw/i386/intel_iommu_internal.h
+++ b/hw/i386/intel_iommu_internal.h
@@ -589,6 +589,12 @@ typedef struct VTDPASIDCacheInfo {
     bool error_happened;
 } VTDPASIDCacheInfo;
 
+typedef struct VTDPIOTLBInvInfo {
+    uint16_t domain_id;
+    uint32_t pasid;
+    struct iommu_hwpt_vtd_s1_invalidate *inv_data;
+} VTDPIOTLBInvInfo;
+
 /* PASID Table Related Definitions */
 #define VTD_PASID_DIR_BASE_ADDR_MASK  (~0xfffULL)
 #define VTD_PASID_TABLE_BASE_ADDR_MASK (~0xfffULL)
diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c
index 621b07aa02..d1fa395274 100644
--- a/hw/i386/intel_iommu.c
+++ b/hw/i386/intel_iommu.c
@@ -2639,12 +2639,105 @@ static int vtd_bind_guest_pasid(VTDAddressSpace *vtd_as,
 
     return ret;
 }
+
+/*
+ * Caller of this function should hold iommu_lock.
+ */
+static void vtd_invalidate_piotlb(VTDAddressSpace *vtd_as,
+                                  struct iommu_hwpt_vtd_s1_invalidate *cache)
+{
+    VTDHostIOMMUDevice *vtd_hiod;
+    HostIOMMUDeviceIOMMUFD *idev;
+    int devfn = vtd_as->devfn;
+    struct vtd_as_key key = {
+        .bus = vtd_as->bus,
+        .devfn = devfn,
+    };
+    IntelIOMMUState *s = vtd_as->iommu_state;
+    uint32_t entry_num = 1; /* Only implement one request for simplicity */
+    Error *err;
+
+    vtd_hiod = g_hash_table_lookup(s->vtd_host_iommu_dev, &key);
+    if (!vtd_hiod || !vtd_hiod->hiod) {
+        return;
+    }
+    idev = HOST_IOMMU_DEVICE_IOMMUFD(vtd_hiod->hiod);
+
+    if (!iommufd_backend_invalidate_cache(idev->iommufd, vtd_hiod->s1_hwpt,
+                                          IOMMU_HWPT_INVALIDATE_DATA_VTD_S1,
+                                          sizeof(*cache), &entry_num, cache,
+                                          &err)) {
+        error_report_err(err);
+    }
+}
+
+/*
+ * This function is a loop function for the s->vtd_address_spaces
+ * list with VTDPIOTLBInvInfo as execution filter. It propagates
+ * the piotlb invalidation to host. Caller of this function
+ * should hold iommu_lock.
+ */
+static void vtd_flush_pasid_iotlb(gpointer key, gpointer value,
+                                  gpointer user_data)
+{
+    VTDPIOTLBInvInfo *piotlb_info = user_data;
+    VTDAddressSpace *vtd_as = value;
+    VTDPASIDCacheEntry *pc_entry = &vtd_as->pasid_cache_entry;
+    uint32_t pasid;
+    uint16_t did;
+
+    /* Replay only fill pasid entry cache for passthrough device */
+    if (!pc_entry->cache_filled ||
+        !vtd_pe_pgtt_is_flt(&pc_entry->pasid_entry)) {
+        return;
+    }
+
+    if (vtd_as_to_iommu_pasid_locked(vtd_as, &pasid)) {
+        return;
+    }
+
+    did = vtd_pe_get_did(&pc_entry->pasid_entry);
+
+    if (piotlb_info->domain_id == did && piotlb_info->pasid == pasid) {
+        vtd_invalidate_piotlb(vtd_as, piotlb_info->inv_data);
+    }
+}
+
+static void vtd_flush_pasid_iotlb_all(IntelIOMMUState *s,
+                                      uint16_t domain_id, uint32_t pasid,
+                                      hwaddr addr, uint64_t npages, bool ih)
+{
+    struct iommu_hwpt_vtd_s1_invalidate cache_info = { 0 };
+    VTDPIOTLBInvInfo piotlb_info;
+
+    cache_info.addr = addr;
+    cache_info.npages = npages;
+    cache_info.flags = ih ? IOMMU_VTD_INV_FLAGS_LEAF : 0;
+
+    piotlb_info.domain_id = domain_id;
+    piotlb_info.pasid = pasid;
+    piotlb_info.inv_data = &cache_info;
+
+    /*
+     * Here loops all the vtd_as instances in s->vtd_address_spaces
+     * to find out the affected devices since piotlb invalidation
+     * should check pasid cache per architecture point of view.
+     */
+    g_hash_table_foreach(s->vtd_address_spaces,
+                         vtd_flush_pasid_iotlb, &piotlb_info);
+}
 #else
 static int vtd_bind_guest_pasid(VTDAddressSpace *vtd_as,
                                 VTDPASIDEntry *pe, VTDPASIDOp op)
 {
     return 0;
 }
+
+static void vtd_flush_pasid_iotlb_all(IntelIOMMUState *s,
+                                      uint16_t domain_id, uint32_t pasid,
+                                      hwaddr addr, uint64_t npages, bool ih)
+{
+}
 #endif
 
 /* Do a context-cache device-selective invalidation.
@@ -3300,6 +3393,13 @@ static void vtd_piotlb_pasid_invalidate(IntelIOMMUState *s,
     info.pasid = pasid;
 
     vtd_iommu_lock(s);
+    /*
+     * Here loops all the vtd_as instances in s->vtd_as
+     * to find out the affected devices since piotlb invalidation
+     * should check pasid cache per architecture point of view.
+     */
+    vtd_flush_pasid_iotlb_all(s, domain_id, pasid, 0, (uint64_t)-1, 0);
+
     g_hash_table_foreach_remove(s->iotlb, vtd_hash_remove_by_pasid,
                                 &info);
     vtd_iommu_unlock(s);
@@ -3323,7 +3423,8 @@ static void vtd_piotlb_pasid_invalidate(IntelIOMMUState *s,
 }
 
 static void vtd_piotlb_page_invalidate(IntelIOMMUState *s, uint16_t domain_id,
-                                       uint32_t pasid, hwaddr addr, uint8_t am)
+                                       uint32_t pasid, hwaddr addr, uint8_t am,
+                                       bool ih)
 {
     VTDIOTLBPageInvInfo info;
 
@@ -3333,6 +3434,13 @@ static void vtd_piotlb_page_invalidate(IntelIOMMUState *s, uint16_t domain_id,
     info.mask = ~((1 << am) - 1);
 
     vtd_iommu_lock(s);
+    /*
+     * Here loops all the vtd_as instances in s->vtd_as
+     * to find out the affected devices since piotlb invalidation
+     * should check pasid cache per architecture point of view.
+     */
+    vtd_flush_pasid_iotlb_all(s, domain_id, pasid, addr, 1 << am, ih);
+
     g_hash_table_foreach_remove(s->iotlb,
                                 vtd_hash_remove_by_page_piotlb, &info);
     vtd_iommu_unlock(s);
@@ -3366,7 +3474,8 @@ static bool vtd_process_piotlb_desc(IntelIOMMUState *s,
     case VTD_INV_DESC_PIOTLB_PSI_IN_PASID:
         am = VTD_INV_DESC_PIOTLB_AM(inv_desc->val[1]);
         addr = (hwaddr) VTD_INV_DESC_PIOTLB_ADDR(inv_desc->val[1]);
-        vtd_piotlb_page_invalidate(s, domain_id, pasid, addr, am);
+        vtd_piotlb_page_invalidate(s, domain_id, pasid, addr, am,
+                                   VTD_INV_DESC_PIOTLB_IH(inv_desc->val[1]));
         break;
 
     default:
-- 
2.34.1
Re: [PATCH v2 16/19] intel_iommu: Propagate PASID-based iotlb invalidation to host
Posted by Eric Auger 4 months, 3 weeks ago
Hi Zhenzhong,

On 6/20/25 9:18 AM, Zhenzhong Duan wrote:
> From: Yi Liu <yi.l.liu@intel.com>
>
> This traps the guest PASID-based iotlb invalidation request and propagate it
> to host.
>
> Intel VT-d 3.0 supports nested translation in PASID granular. Guest SVA support
s/granular/granularity
> could be implemented by configuring nested translation on specific PASID. This
> is also known as dual stage DMA translation.
>
> Under such configuration, guest owns the GVA->GPA translation which is
> configured as stage-1 page table in host side for a specific pasid, and host
> owns GPA->HPA translation. As guest owns stage-1 translation table, piotlb
> invalidation should be propagated to host since host IOMMU will cache first
> level page table related mappings during DMA address translation.
>
> Signed-off-by: Yi Liu <yi.l.liu@intel.com>
> Signed-off-by: Yi Sun <yi.y.sun@linux.intel.com>
> Signed-off-by: Zhenzhong Duan <zhenzhong.duan@intel.com>
> ---
>  hw/i386/intel_iommu_internal.h |   6 ++
>  hw/i386/intel_iommu.c          | 113 ++++++++++++++++++++++++++++++++-
>  2 files changed, 117 insertions(+), 2 deletions(-)
>
> diff --git a/hw/i386/intel_iommu_internal.h b/hw/i386/intel_iommu_internal.h
> index b3e4aa23f1..07bfb97499 100644
> --- a/hw/i386/intel_iommu_internal.h
> +++ b/hw/i386/intel_iommu_internal.h
> @@ -589,6 +589,12 @@ typedef struct VTDPASIDCacheInfo {
>      bool error_happened;
>  } VTDPASIDCacheInfo;
>  
> +typedef struct VTDPIOTLBInvInfo {
> +    uint16_t domain_id;
> +    uint32_t pasid;
> +    struct iommu_hwpt_vtd_s1_invalidate *inv_data;
> +} VTDPIOTLBInvInfo;
> +
>  /* PASID Table Related Definitions */
>  #define VTD_PASID_DIR_BASE_ADDR_MASK  (~0xfffULL)
>  #define VTD_PASID_TABLE_BASE_ADDR_MASK (~0xfffULL)
> diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c
> index 621b07aa02..d1fa395274 100644
> --- a/hw/i386/intel_iommu.c
> +++ b/hw/i386/intel_iommu.c
> @@ -2639,12 +2639,105 @@ static int vtd_bind_guest_pasid(VTDAddressSpace *vtd_as,
>  
>      return ret;
>  }
> +
> +/*
> + * Caller of this function should hold iommu_lock.
> + */
> +static void vtd_invalidate_piotlb(VTDAddressSpace *vtd_as,
> +                                  struct iommu_hwpt_vtd_s1_invalidate *cache)
> +{
> +    VTDHostIOMMUDevice *vtd_hiod;
> +    HostIOMMUDeviceIOMMUFD *idev;
> +    int devfn = vtd_as->devfn;
> +    struct vtd_as_key key = {
> +        .bus = vtd_as->bus,
> +        .devfn = devfn,
> +    };
> +    IntelIOMMUState *s = vtd_as->iommu_state;
> +    uint32_t entry_num = 1; /* Only implement one request for simplicity */
> +    Error *err;
> +
> +    vtd_hiod = g_hash_table_lookup(s->vtd_host_iommu_dev, &key);
> +    if (!vtd_hiod || !vtd_hiod->hiod) {
> +        return;
> +    }
> +    idev = HOST_IOMMU_DEVICE_IOMMUFD(vtd_hiod->hiod);
> +
> +    if (!iommufd_backend_invalidate_cache(idev->iommufd, vtd_hiod->s1_hwpt,
> +                                          IOMMU_HWPT_INVALIDATE_DATA_VTD_S1,
> +                                          sizeof(*cache), &entry_num, cache,
> +                                          &err)) {
> +        error_report_err(err);
> +    }
> +}
> +
> +/*
> + * This function is a loop function for the s->vtd_address_spaces
> + * list with VTDPIOTLBInvInfo as execution filter. It propagates
> + * the piotlb invalidation to host. Caller of this function
> + * should hold iommu_lock.
instead of having this mention everywhere may be more efficient to
postfix each function with _locked and I don't know if it exists have a
checker.
> + */
> +static void vtd_flush_pasid_iotlb(gpointer key, gpointer value,
> +                                  gpointer user_data)
> +{
> +    VTDPIOTLBInvInfo *piotlb_info = user_data;
> +    VTDAddressSpace *vtd_as = value;
> +    VTDPASIDCacheEntry *pc_entry = &vtd_as->pasid_cache_entry;
> +    uint32_t pasid;
> +    uint16_t did;
> +
> +    /* Replay only fill pasid entry cache for passthrough device */
fills
> +    if (!pc_entry->cache_filled ||
> +        !vtd_pe_pgtt_is_flt(&pc_entry->pasid_entry)) {
> +        return;
> +    }
> +
> +    if (vtd_as_to_iommu_pasid_locked(vtd_as, &pasid)) {
> +        return;
> +    }
> +
> +    did = vtd_pe_get_did(&pc_entry->pasid_entry);
> +
> +    if (piotlb_info->domain_id == did && piotlb_info->pasid == pasid) {
> +        vtd_invalidate_piotlb(vtd_as, piotlb_info->inv_data);
> +    }
> +}
> +
> +static void vtd_flush_pasid_iotlb_all(IntelIOMMUState *s,
> +                                      uint16_t domain_id, uint32_t pasid,
> +                                      hwaddr addr, uint64_t npages, bool ih)
> +{
> +    struct iommu_hwpt_vtd_s1_invalidate cache_info = { 0 };
> +    VTDPIOTLBInvInfo piotlb_info;
> +
> +    cache_info.addr = addr;
> +    cache_info.npages = npages;
> +    cache_info.flags = ih ? IOMMU_VTD_INV_FLAGS_LEAF : 0;
> +
> +    piotlb_info.domain_id = domain_id;
> +    piotlb_info.pasid = pasid;
> +    piotlb_info.inv_data = &cache_info;
> +
> +    /*
> +     * Here loops all the vtd_as instances in s->vtd_address_spaces
I am not a native english speaker but I am not sure loop something is
OK. Go though each?
Besides that comment is not that useful as it paraphrases the code.
> +     * to find out the affected devices since piotlb invalidation
> +     * should check pasid cache per architecture point of view.
> +     */
> +    g_hash_table_foreach(s->vtd_address_spaces,
> +                         vtd_flush_pasid_iotlb, &piotlb_info);
> +}
>  #else
>  static int vtd_bind_guest_pasid(VTDAddressSpace *vtd_as,
>                                  VTDPASIDEntry *pe, VTDPASIDOp op)
>  {
>      return 0;
>  }
> +
> +static void vtd_flush_pasid_iotlb_all(IntelIOMMUState *s,
> +                                      uint16_t domain_id, uint32_t pasid,
> +                                      hwaddr addr, uint64_t npages, bool ih)
> +{
> +}
>  #endif
>  
>  /* Do a context-cache device-selective invalidation.
> @@ -3300,6 +3393,13 @@ static void vtd_piotlb_pasid_invalidate(IntelIOMMUState *s,
>      info.pasid = pasid;
>  
>      vtd_iommu_lock(s);
> +    /*
> +     * Here loops all the vtd_as instances in s->vtd_as
would frop above.
> +     * to find out the affected devices since piotlb invalidation
Find out ...
> +     * should check pasid cache per architecture point of view.
> +     */
> +    vtd_flush_pasid_iotlb_all(s, domain_id, pasid, 0, (uint64_t)-1, 0);
> +
>      g_hash_table_foreach_remove(s->iotlb, vtd_hash_remove_by_pasid,
>                                  &info);
>      vtd_iommu_unlock(s);
> @@ -3323,7 +3423,8 @@ static void vtd_piotlb_pasid_invalidate(IntelIOMMUState *s,
>  }
>  
>  static void vtd_piotlb_page_invalidate(IntelIOMMUState *s, uint16_t domain_id,
> -                                       uint32_t pasid, hwaddr addr, uint8_t am)
> +                                       uint32_t pasid, hwaddr addr, uint8_t am,
> +                                       bool ih)
>  {
>      VTDIOTLBPageInvInfo info;
>  
> @@ -3333,6 +3434,13 @@ static void vtd_piotlb_page_invalidate(IntelIOMMUState *s, uint16_t domain_id,
>      info.mask = ~((1 << am) - 1);
>  
>      vtd_iommu_lock(s);
> +    /*
> +     * Here loops all the vtd_as instances in s->vtd_as
> +     * to find out the affected devices since piotlb invalidation
> +     * should check pasid cache per architecture point of view.
> +     */
> +    vtd_flush_pasid_iotlb_all(s, domain_id, pasid, addr, 1 << am, ih);
> +
>      g_hash_table_foreach_remove(s->iotlb,
>                                  vtd_hash_remove_by_page_piotlb, &info);
>      vtd_iommu_unlock(s);
> @@ -3366,7 +3474,8 @@ static bool vtd_process_piotlb_desc(IntelIOMMUState *s,
>      case VTD_INV_DESC_PIOTLB_PSI_IN_PASID:
>          am = VTD_INV_DESC_PIOTLB_AM(inv_desc->val[1]);
>          addr = (hwaddr) VTD_INV_DESC_PIOTLB_ADDR(inv_desc->val[1]);
> -        vtd_piotlb_page_invalidate(s, domain_id, pasid, addr, am);
> +        vtd_piotlb_page_invalidate(s, domain_id, pasid, addr, am,
> +                                   VTD_INV_DESC_PIOTLB_IH(inv_desc->val[1]));
>          break;
>  
>      default:
Eric
RE: [PATCH v2 16/19] intel_iommu: Propagate PASID-based iotlb invalidation to host
Posted by Duan, Zhenzhong 4 months, 3 weeks ago
Hi Eric,

>-----Original Message-----
>From: Eric Auger <eric.auger@redhat.com>
>Subject: Re: [PATCH v2 16/19] intel_iommu: Propagate PASID-based iotlb
>invalidation to host
>
>Hi Zhenzhong,
>
>On 6/20/25 9:18 AM, Zhenzhong Duan wrote:
>> From: Yi Liu <yi.l.liu@intel.com>
>>
>> This traps the guest PASID-based iotlb invalidation request and propagate it
>> to host.
>>
>> Intel VT-d 3.0 supports nested translation in PASID granular. Guest SVA
>support
>s/granular/granularity

Will do

>> could be implemented by configuring nested translation on specific PASID.
>This
>> is also known as dual stage DMA translation.
>>
>> Under such configuration, guest owns the GVA->GPA translation which is
>> configured as stage-1 page table in host side for a specific pasid, and host
>> owns GPA->HPA translation. As guest owns stage-1 translation table, piotlb
>> invalidation should be propagated to host since host IOMMU will cache first
>> level page table related mappings during DMA address translation.
>>
>> Signed-off-by: Yi Liu <yi.l.liu@intel.com>
>> Signed-off-by: Yi Sun <yi.y.sun@linux.intel.com>
>> Signed-off-by: Zhenzhong Duan <zhenzhong.duan@intel.com>
>> ---
>>  hw/i386/intel_iommu_internal.h |   6 ++
>>  hw/i386/intel_iommu.c          | 113
>++++++++++++++++++++++++++++++++-
>>  2 files changed, 117 insertions(+), 2 deletions(-)
>>
>> diff --git a/hw/i386/intel_iommu_internal.h
>b/hw/i386/intel_iommu_internal.h
>> index b3e4aa23f1..07bfb97499 100644
>> --- a/hw/i386/intel_iommu_internal.h
>> +++ b/hw/i386/intel_iommu_internal.h
>> @@ -589,6 +589,12 @@ typedef struct VTDPASIDCacheInfo {
>>      bool error_happened;
>>  } VTDPASIDCacheInfo;
>>
>> +typedef struct VTDPIOTLBInvInfo {
>> +    uint16_t domain_id;
>> +    uint32_t pasid;
>> +    struct iommu_hwpt_vtd_s1_invalidate *inv_data;
>> +} VTDPIOTLBInvInfo;
>> +
>>  /* PASID Table Related Definitions */
>>  #define VTD_PASID_DIR_BASE_ADDR_MASK  (~0xfffULL)
>>  #define VTD_PASID_TABLE_BASE_ADDR_MASK (~0xfffULL)
>> diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c
>> index 621b07aa02..d1fa395274 100644
>> --- a/hw/i386/intel_iommu.c
>> +++ b/hw/i386/intel_iommu.c
>> @@ -2639,12 +2639,105 @@ static int
>vtd_bind_guest_pasid(VTDAddressSpace *vtd_as,
>>
>>      return ret;
>>  }
>> +
>> +/*
>> + * Caller of this function should hold iommu_lock.
>> + */
>> +static void vtd_invalidate_piotlb(VTDAddressSpace *vtd_as,
>> +                                  struct
>iommu_hwpt_vtd_s1_invalidate *cache)
>> +{
>> +    VTDHostIOMMUDevice *vtd_hiod;
>> +    HostIOMMUDeviceIOMMUFD *idev;
>> +    int devfn = vtd_as->devfn;
>> +    struct vtd_as_key key = {
>> +        .bus = vtd_as->bus,
>> +        .devfn = devfn,
>> +    };
>> +    IntelIOMMUState *s = vtd_as->iommu_state;
>> +    uint32_t entry_num = 1; /* Only implement one request for simplicity
>*/
>> +    Error *err;
>> +
>> +    vtd_hiod = g_hash_table_lookup(s->vtd_host_iommu_dev, &key);
>> +    if (!vtd_hiod || !vtd_hiod->hiod) {
>> +        return;
>> +    }
>> +    idev = HOST_IOMMU_DEVICE_IOMMUFD(vtd_hiod->hiod);
>> +
>> +    if (!iommufd_backend_invalidate_cache(idev->iommufd,
>vtd_hiod->s1_hwpt,
>> +
>IOMMU_HWPT_INVALIDATE_DATA_VTD_S1,
>> +                                          sizeof(*cache),
>&entry_num, cache,
>> +                                          &err)) {
>> +        error_report_err(err);
>> +    }
>> +}
>> +
>> +/*
>> + * This function is a loop function for the s->vtd_address_spaces
>> + * list with VTDPIOTLBInvInfo as execution filter. It propagates
>> + * the piotlb invalidation to host. Caller of this function
>> + * should hold iommu_lock.
>instead of having this mention everywhere may be more efficient to
>postfix each function with _locked and I don't know if it exists have a
>checker.

OK, will add _locked and checker if necessary.

>> + */
>> +static void vtd_flush_pasid_iotlb(gpointer key, gpointer value,
>> +                                  gpointer user_data)
>> +{
>> +    VTDPIOTLBInvInfo *piotlb_info = user_data;
>> +    VTDAddressSpace *vtd_as = value;
>> +    VTDPASIDCacheEntry *pc_entry = &vtd_as->pasid_cache_entry;
>> +    uint32_t pasid;
>> +    uint16_t did;
>> +
>> +    /* Replay only fill pasid entry cache for passthrough device */
>fills
>> +    if (!pc_entry->cache_filled ||
>> +        !vtd_pe_pgtt_is_flt(&pc_entry->pasid_entry)) {
>> +        return;
>> +    }
>> +
>> +    if (vtd_as_to_iommu_pasid_locked(vtd_as, &pasid)) {
>> +        return;
>> +    }
>> +
>> +    did = vtd_pe_get_did(&pc_entry->pasid_entry);
>> +
>> +    if (piotlb_info->domain_id == did && piotlb_info->pasid == pasid) {
>> +        vtd_invalidate_piotlb(vtd_as, piotlb_info->inv_data);
>> +    }
>> +}
>> +
>> +static void vtd_flush_pasid_iotlb_all(IntelIOMMUState *s,
>> +                                      uint16_t domain_id,
>uint32_t pasid,
>> +                                      hwaddr addr, uint64_t
>npages, bool ih)
>> +{
>> +    struct iommu_hwpt_vtd_s1_invalidate cache_info = { 0 };
>> +    VTDPIOTLBInvInfo piotlb_info;
>> +
>> +    cache_info.addr = addr;
>> +    cache_info.npages = npages;
>> +    cache_info.flags = ih ? IOMMU_VTD_INV_FLAGS_LEAF : 0;
>> +
>> +    piotlb_info.domain_id = domain_id;
>> +    piotlb_info.pasid = pasid;
>> +    piotlb_info.inv_data = &cache_info;
>> +
>> +    /*
>> +     * Here loops all the vtd_as instances in s->vtd_address_spaces
>I am not a native english speaker but I am not sure loop something is
>OK. Go though each?
Will do.

>Besides that comment is not that useful as it paraphrases the code.

OK, will try to simplify it.

>> +     * to find out the affected devices since piotlb invalidation
>> +     * should check pasid cache per architecture point of view.
>> +     */
>> +    g_hash_table_foreach(s->vtd_address_spaces,
>> +                         vtd_flush_pasid_iotlb, &piotlb_info);
>> +}
>>  #else
>>  static int vtd_bind_guest_pasid(VTDAddressSpace *vtd_as,
>>                                  VTDPASIDEntry *pe, VTDPASIDOp
>op)
>>  {
>>      return 0;
>>  }
>> +
>> +static void vtd_flush_pasid_iotlb_all(IntelIOMMUState *s,
>> +                                      uint16_t domain_id,
>uint32_t pasid,
>> +                                      hwaddr addr, uint64_t
>npages, bool ih)
>> +{
>> +}
>>  #endif
>>
>>  /* Do a context-cache device-selective invalidation.
>> @@ -3300,6 +3393,13 @@ static void
>vtd_piotlb_pasid_invalidate(IntelIOMMUState *s,
>>      info.pasid = pasid;
>>
>>      vtd_iommu_lock(s);
>> +    /*
>> +     * Here loops all the vtd_as instances in s->vtd_as
>would frop above.
>> +     * to find out the affected devices since piotlb invalidation
>Find out ...

Will do.

Thanks
Zhenzhong