In order to support PASID, we have switched from looping vtd_as to vtd_hiod,
vtd_hiod represents host passthrough device and never deferenced without BQL.
So we don't need extra iommu lock to protect it.
Signed-off-by: Zhenzhong Duan <zhenzhong.duan@intel.com>
---
hw/i386/intel_iommu_accel.h | 14 +++++++-------
hw/i386/intel_iommu.c | 7 ++++---
hw/i386/intel_iommu_accel.c | 6 +++---
3 files changed, 14 insertions(+), 13 deletions(-)
diff --git a/hw/i386/intel_iommu_accel.h b/hw/i386/intel_iommu_accel.h
index 1ae46d9250..3f1b1002b8 100644
--- a/hw/i386/intel_iommu_accel.h
+++ b/hw/i386/intel_iommu_accel.h
@@ -24,9 +24,9 @@ typedef struct VTDACCELPASIDCacheEntry {
bool vtd_check_hiod_accel(IntelIOMMUState *s, VTDHostIOMMUDevice *vtd_hiod,
Error **errp);
VTDHostIOMMUDevice *vtd_find_hiod_iommufd(VTDAddressSpace *as);
-void vtd_flush_host_piotlb_all_locked(IntelIOMMUState *s, uint16_t domain_id,
- uint32_t pasid, hwaddr addr,
- uint64_t npages, bool ih);
+void vtd_flush_host_piotlb_all_accel(IntelIOMMUState *s, uint16_t domain_id,
+ uint32_t pasid, hwaddr addr,
+ uint64_t npages, bool ih);
void vtd_pasid_cache_sync_accel(IntelIOMMUState *s, VTDPASIDCacheInfo *pc_info);
void vtd_pasid_cache_reset_accel(IntelIOMMUState *s);
void vtd_iommu_ops_update_accel(PCIIOMMUOps *ops);
@@ -51,10 +51,10 @@ static inline bool vtd_propagate_guest_pasid(VTDAddressSpace *vtd_as,
return true;
}
-static inline void vtd_flush_host_piotlb_all_locked(IntelIOMMUState *s,
- uint16_t domain_id,
- uint32_t pasid, hwaddr addr,
- uint64_t npages, bool ih)
+static inline void vtd_flush_host_piotlb_all_accel(IntelIOMMUState *s,
+ uint16_t domain_id,
+ uint32_t pasid, hwaddr addr,
+ uint64_t npages, bool ih)
{
}
diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c
index 1626c8f96b..17252c804b 100644
--- a/hw/i386/intel_iommu.c
+++ b/hw/i386/intel_iommu.c
@@ -2990,11 +2990,11 @@ static void vtd_piotlb_pasid_invalidate(IntelIOMMUState *s,
info.domain_id = domain_id;
info.pasid = pasid;
+ vtd_flush_host_piotlb_all_accel(s, domain_id, pasid, 0, (uint64_t)-1,
+ false);
vtd_iommu_lock(s);
g_hash_table_foreach_remove(s->iotlb, vtd_hash_remove_by_pasid,
&info);
- vtd_flush_host_piotlb_all_locked(s, domain_id, pasid, 0, (uint64_t)-1,
- false);
vtd_iommu_unlock(s);
QLIST_FOREACH(vtd_as, &s->vtd_as_with_notifiers, next) {
@@ -3024,10 +3024,11 @@ static void vtd_piotlb_page_invalidate(IntelIOMMUState *s, uint16_t domain_id,
info.addr = addr;
info.mask = ~((1 << am) - 1);
+ vtd_flush_host_piotlb_all_accel(s, domain_id, pasid, addr, 1 << am, ih);
+
vtd_iommu_lock(s);
g_hash_table_foreach_remove(s->iotlb,
vtd_hash_remove_by_page_piotlb, &info);
- vtd_flush_host_piotlb_all_locked(s, domain_id, pasid, addr, 1 << am, ih);
vtd_iommu_unlock(s);
vtd_iotlb_page_invalidate_notify(s, domain_id, addr, am, pasid);
diff --git a/hw/i386/intel_iommu_accel.c b/hw/i386/intel_iommu_accel.c
index 5abc462a45..a30786e361 100644
--- a/hw/i386/intel_iommu_accel.c
+++ b/hw/i386/intel_iommu_accel.c
@@ -236,9 +236,9 @@ static void vtd_flush_host_piotlb(VTDACCELPASIDCacheEntry *vtd_pce,
}
}
-void vtd_flush_host_piotlb_all_locked(IntelIOMMUState *s, uint16_t domain_id,
- uint32_t pasid, hwaddr addr,
- uint64_t npages, bool ih)
+void vtd_flush_host_piotlb_all_accel(IntelIOMMUState *s, uint16_t domain_id,
+ uint32_t pasid, hwaddr addr,
+ uint64_t npages, bool ih)
{
struct iommu_hwpt_vtd_s1_invalidate cache_info = { 0 };
VTDPIOTLBInvInfo piotlb_info;
--
2.47.3