[PATCH v3 4/4] iommu/vt-d: Introduce batched cache invalidation

Tina Zhang posted 4 patches 1 year, 4 months ago
[PATCH v3 4/4] iommu/vt-d: Introduce batched cache invalidation
Posted by Tina Zhang 1 year, 4 months ago
Converts IOTLB and Dev-IOTLB invalidation to a batched model. Cache tag
invalidation requests for a domain are now accumulated in a qi_batch
structure before being flushed in bulk. It replaces the previous per-
request qi_flush approach with a more efficient batching mechanism.

Co-developed-by: Lu Baolu <baolu.lu@linux.intel.com>
Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
Signed-off-by: Tina Zhang <tina.zhang@intel.com>
---
 drivers/iommu/intel/cache.c | 122 +++++++++++++++++++++++++++++++-----
 1 file changed, 107 insertions(+), 15 deletions(-)

diff --git a/drivers/iommu/intel/cache.c b/drivers/iommu/intel/cache.c
index 21485c86e7381..983769de3bc90 100644
--- a/drivers/iommu/intel/cache.c
+++ b/drivers/iommu/intel/cache.c
@@ -262,6 +262,79 @@ static unsigned long calculate_psi_aligned_address(unsigned long start,
 	return ALIGN_DOWN(start, VTD_PAGE_SIZE << mask);
 }
 
+static void qi_batch_flush_descs(struct intel_iommu *iommu, struct qi_batch *batch)
+{
+	if (!iommu || !batch->index)
+		return;
+
+	qi_submit_sync(iommu, batch->descs, batch->index, 0);
+
+	/* Reset the index value and clean the whole batch buffer. */
+	memset(batch, 0, sizeof(*batch));
+}
+
+static void qi_batch_increment_index(struct intel_iommu *iommu, struct qi_batch *batch)
+{
+	if (++batch->index == QI_MAX_BATCHED_DESC_COUNT)
+		qi_batch_flush_descs(iommu, batch);
+}
+
+static void qi_batch_add_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
+			       unsigned int size_order, u64 type,
+			       struct qi_batch *batch)
+{
+	qi_desc_iotlb(iommu, did, addr, size_order, type, &(batch->descs[batch->index]));
+	qi_batch_increment_index(iommu, batch);
+}
+
+static void qi_batch_add_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
+				   u16 qdep, u64 addr, unsigned int mask,
+				   struct qi_batch *batch)
+{
+	/*
+	 * According to VT-d spec, software is recommended to not submit any Device-TLB
+	 * invalidation requests while address remapping hardware is disabled.
+	 */
+	if (!(iommu->gcmd & DMA_GCMD_TE))
+		return;
+
+	qi_desc_dev_iotlb(sid, pfsid, qdep, addr, mask, &(batch->descs[batch->index]));
+	qi_batch_increment_index(iommu, batch);
+}
+
+static void qi_batch_add_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid,
+				u64 addr, unsigned long npages, bool ih,
+				struct qi_batch *batch)
+{
+	/*
+	 * npages == -1 means a PASID-selective invalidation, otherwise,
+	 * a positive value for Page-selective-within-PASID invalidation.
+	 * 0 is not a valid input.
+	 */
+	if (!npages)
+		return;
+
+	qi_desc_piotlb(did, pasid, addr, npages, ih, &(batch->descs[batch->index]));
+	qi_batch_increment_index(iommu, batch);
+}
+
+static void qi_batch_add_pasid_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
+					 u32 pasid,  u16 qdep, u64 addr,
+					 unsigned int size_order, struct qi_batch *batch)
+{
+	/*
+	 * According to VT-d spec, software is recommended to not submit any
+	 * Device-TLB invalidation requests while address remapping hardware
+	 * is disabled.
+	 */
+	if (!(iommu->gcmd & DMA_GCMD_TE))
+		return;
+
+	qi_desc_dev_iotlb_pasid(sid, pfsid, pasid, qdep, addr, size_order,
+				&(batch->descs[batch->index]));
+	qi_batch_increment_index(iommu, batch);
+}
+
 static void cache_tag_flush_iotlb(struct dmar_domain *domain, struct cache_tag *tag,
 				  unsigned long addr, unsigned long pages,
 				  unsigned long mask, int ih)
@@ -270,7 +343,8 @@ static void cache_tag_flush_iotlb(struct dmar_domain *domain, struct cache_tag *
 	u64 type = DMA_TLB_PSI_FLUSH;
 
 	if (domain->use_first_level) {
-		qi_flush_piotlb(iommu, tag->domain_id, tag->pasid, addr, pages, ih);
+		qi_batch_add_piotlb(iommu, tag->domain_id, tag->pasid, addr,
+				    pages, ih, domain->qi_batch);
 		return;
 	}
 
@@ -287,7 +361,8 @@ static void cache_tag_flush_iotlb(struct dmar_domain *domain, struct cache_tag *
 	}
 
 	if (ecap_qis(iommu->ecap))
-		qi_flush_iotlb(iommu, tag->domain_id, addr | ih, mask, type);
+		qi_batch_add_iotlb(iommu, tag->domain_id, addr | ih, mask, type,
+				   domain->qi_batch);
 	else
 		__iommu_flush_iotlb(iommu, tag->domain_id, addr | ih, mask, type);
 }
@@ -303,19 +378,20 @@ static void cache_tag_flush_devtlb_psi(struct dmar_domain *domain, struct cache_
 	sid = PCI_DEVID(info->bus, info->devfn);
 
 	if (tag->pasid == IOMMU_NO_PASID) {
-		qi_flush_dev_iotlb(iommu, sid, info->pfsid, info->ats_qdep,
-				   addr, mask);
+		qi_batch_add_dev_iotlb(iommu, sid, info->pfsid, info->ats_qdep,
+				       addr, mask, domain->qi_batch);
 		if (info->dtlb_extra_inval)
-			qi_flush_dev_iotlb(iommu, sid, info->pfsid,
-					   info->ats_qdep, addr, mask);
+			qi_batch_add_dev_iotlb(iommu, sid, info->pfsid, info->ats_qdep,
+					       addr, mask, domain->qi_batch);
 		return;
 	}
 
-	qi_flush_dev_iotlb_pasid(iommu, sid, info->pfsid, tag->pasid,
-				 info->ats_qdep, addr, mask);
+	qi_batch_add_pasid_dev_iotlb(iommu, sid, info->pfsid, tag->pasid,
+				     info->ats_qdep, addr, mask, domain->qi_batch);
 	if (info->dtlb_extra_inval)
-		qi_flush_dev_iotlb_pasid(iommu, sid, info->pfsid, tag->pasid,
-					 info->ats_qdep, addr, mask);
+		qi_batch_add_pasid_dev_iotlb(iommu, sid, info->pfsid, tag->pasid,
+					     info->ats_qdep, addr, mask,
+					     domain->qi_batch);
 }
 
 static void cache_tag_flush_devtlb_all(struct dmar_domain *domain, struct cache_tag *tag)
@@ -327,11 +403,11 @@ static void cache_tag_flush_devtlb_all(struct dmar_domain *domain, struct cache_
 	info = dev_iommu_priv_get(tag->dev);
 	sid = PCI_DEVID(info->bus, info->devfn);
 
-	qi_flush_dev_iotlb(iommu, sid, info->pfsid, info->ats_qdep, 0,
-			   MAX_AGAW_PFN_WIDTH);
+	qi_batch_add_dev_iotlb(iommu, sid, info->pfsid, info->ats_qdep, 0,
+			       MAX_AGAW_PFN_WIDTH, domain->qi_batch);
 	if (info->dtlb_extra_inval)
-		qi_flush_dev_iotlb(iommu, sid, info->pfsid, info->ats_qdep, 0,
-				   MAX_AGAW_PFN_WIDTH);
+		qi_batch_add_dev_iotlb(iommu, sid, info->pfsid, info->ats_qdep, 0,
+				       MAX_AGAW_PFN_WIDTH, domain->qi_batch);
 }
 
 /*
@@ -341,6 +417,7 @@ static void cache_tag_flush_devtlb_all(struct dmar_domain *domain, struct cache_
 void cache_tag_flush_range(struct dmar_domain *domain, unsigned long start,
 			   unsigned long end, int ih)
 {
+	struct intel_iommu *iommu = NULL;
 	unsigned long pages, mask, addr;
 	struct cache_tag *tag;
 	unsigned long flags;
@@ -349,6 +426,10 @@ void cache_tag_flush_range(struct dmar_domain *domain, unsigned long start,
 
 	spin_lock_irqsave(&domain->cache_lock, flags);
 	list_for_each_entry(tag, &domain->cache_tags, node) {
+		if (iommu && iommu != tag->iommu)
+			qi_batch_flush_descs(iommu, domain->qi_batch);
+		iommu = tag->iommu;
+
 		switch (tag->type) {
 		case CACHE_TAG_IOTLB:
 		case CACHE_TAG_NESTING_IOTLB:
@@ -372,6 +453,7 @@ void cache_tag_flush_range(struct dmar_domain *domain, unsigned long start,
 
 		trace_cache_tag_flush_range(tag, start, end, addr, pages, mask);
 	}
+	qi_batch_flush_descs(iommu, domain->qi_batch);
 	spin_unlock_irqrestore(&domain->cache_lock, flags);
 }
 
@@ -381,11 +463,16 @@ void cache_tag_flush_range(struct dmar_domain *domain, unsigned long start,
  */
 void cache_tag_flush_all(struct dmar_domain *domain)
 {
+	struct intel_iommu *iommu = NULL;
 	struct cache_tag *tag;
 	unsigned long flags;
 
 	spin_lock_irqsave(&domain->cache_lock, flags);
 	list_for_each_entry(tag, &domain->cache_tags, node) {
+		if (iommu && iommu != tag->iommu)
+			qi_batch_flush_descs(iommu, domain->qi_batch);
+		iommu = tag->iommu;
+
 		switch (tag->type) {
 		case CACHE_TAG_IOTLB:
 		case CACHE_TAG_NESTING_IOTLB:
@@ -399,6 +486,7 @@ void cache_tag_flush_all(struct dmar_domain *domain)
 
 		trace_cache_tag_flush_all(tag);
 	}
+	qi_batch_flush_descs(iommu, domain->qi_batch);
 	spin_unlock_irqrestore(&domain->cache_lock, flags);
 }
 
@@ -416,6 +504,7 @@ void cache_tag_flush_all(struct dmar_domain *domain)
 void cache_tag_flush_range_np(struct dmar_domain *domain, unsigned long start,
 			      unsigned long end)
 {
+	struct intel_iommu *iommu = NULL;
 	unsigned long pages, mask, addr;
 	struct cache_tag *tag;
 	unsigned long flags;
@@ -424,7 +513,9 @@ void cache_tag_flush_range_np(struct dmar_domain *domain, unsigned long start,
 
 	spin_lock_irqsave(&domain->cache_lock, flags);
 	list_for_each_entry(tag, &domain->cache_tags, node) {
-		struct intel_iommu *iommu = tag->iommu;
+		if (iommu && iommu != tag->iommu)
+			qi_batch_flush_descs(iommu, domain->qi_batch);
+		iommu = tag->iommu;
 
 		if (!cap_caching_mode(iommu->cap) || domain->use_first_level) {
 			iommu_flush_write_buffer(iommu);
@@ -437,5 +528,6 @@ void cache_tag_flush_range_np(struct dmar_domain *domain, unsigned long start,
 
 		trace_cache_tag_flush_range_np(tag, start, end, addr, pages, mask);
 	}
+	qi_batch_flush_descs(iommu, domain->qi_batch);
 	spin_unlock_irqrestore(&domain->cache_lock, flags);
 }
-- 
2.34.1
Re: [PATCH v3 4/4] iommu/vt-d: Introduce batched cache invalidation
Posted by Jacob Pan 1 year, 4 months ago
On Thu, 15 Aug 2024 14:52:21 +0800
Tina Zhang <tina.zhang@intel.com> wrote:

> @@ -270,7 +343,8 @@ static void cache_tag_flush_iotlb(struct
> dmar_domain *domain, struct cache_tag * u64 type = DMA_TLB_PSI_FLUSH;
>  
>  	if (domain->use_first_level) {
> -		qi_flush_piotlb(iommu, tag->domain_id, tag->pasid,
> addr, pages, ih);
> +		qi_batch_add_piotlb(iommu, tag->domain_id,
> tag->pasid, addr,
> +				    pages, ih, domain->qi_batch);
>  		return;
>  	}
>  
> @@ -287,7 +361,8 @@ static void cache_tag_flush_iotlb(struct
> dmar_domain *domain, struct cache_tag * }
>  
>  	if (ecap_qis(iommu->ecap))
> -		qi_flush_iotlb(iommu, tag->domain_id, addr | ih,
> mask, type);
> +		qi_batch_add_iotlb(iommu, tag->domain_id, addr | ih,
> mask, type,
> +				   domain->qi_batch);
>  
If I understand this correctly, IOTLB flush maybe deferred until the
batch array is full, right? If so, is there a security gap where
callers think the mapping is gone after the call returns?
Re: [PATCH v3 4/4] iommu/vt-d: Introduce batched cache invalidation
Posted by Baolu Lu 1 year, 4 months ago
On 2024/8/17 0:38, Jacob Pan wrote:
> On Thu, 15 Aug 2024 14:52:21 +0800
> Tina Zhang <tina.zhang@intel.com> wrote:
> 
>> @@ -270,7 +343,8 @@ static void cache_tag_flush_iotlb(struct
>> dmar_domain *domain, struct cache_tag * u64 type = DMA_TLB_PSI_FLUSH;
>>   
>>   	if (domain->use_first_level) {
>> -		qi_flush_piotlb(iommu, tag->domain_id, tag->pasid,
>> addr, pages, ih);
>> +		qi_batch_add_piotlb(iommu, tag->domain_id,
>> tag->pasid, addr,
>> +				    pages, ih, domain->qi_batch);
>>   		return;
>>   	}
>>   
>> @@ -287,7 +361,8 @@ static void cache_tag_flush_iotlb(struct
>> dmar_domain *domain, struct cache_tag * }
>>   
>>   	if (ecap_qis(iommu->ecap))
>> -		qi_flush_iotlb(iommu, tag->domain_id, addr | ih,
>> mask, type);
>> +		qi_batch_add_iotlb(iommu, tag->domain_id, addr | ih,
>> mask, type,
>> +				   domain->qi_batch);
>>   
> If I understand this correctly, IOTLB flush maybe deferred until the
> batch array is full, right? If so, is there a security gap where
> callers think the mapping is gone after the call returns?
No. All related caches are flushed before function return. A domain can
have multiple cache tags. Previously, we sent individual cache
invalidation requests to hardware. This change combines all necessary
invalidation requests into a single batch and raise them to hardware
together to make it more efficient.

Thanks,
baolu
Re: [PATCH v3 4/4] iommu/vt-d: Introduce batched cache invalidation
Posted by Jacob Pan 1 year, 4 months ago
On Sat, 17 Aug 2024 11:28:21 +0800
Baolu Lu <baolu.lu@linux.intel.com> wrote:

> On 2024/8/17 0:38, Jacob Pan wrote:
> > On Thu, 15 Aug 2024 14:52:21 +0800
> > Tina Zhang <tina.zhang@intel.com> wrote:
> >   
> >> @@ -270,7 +343,8 @@ static void cache_tag_flush_iotlb(struct
> >> dmar_domain *domain, struct cache_tag * u64 type =
> >> DMA_TLB_PSI_FLUSH; 
> >>   	if (domain->use_first_level) {
> >> -		qi_flush_piotlb(iommu, tag->domain_id, tag->pasid,
> >> addr, pages, ih);
> >> +		qi_batch_add_piotlb(iommu, tag->domain_id,
> >> tag->pasid, addr,
> >> +				    pages, ih, domain->qi_batch);
> >>   		return;
> >>   	}
> >>   
> >> @@ -287,7 +361,8 @@ static void cache_tag_flush_iotlb(struct
> >> dmar_domain *domain, struct cache_tag * }
> >>   
> >>   	if (ecap_qis(iommu->ecap))
> >> -		qi_flush_iotlb(iommu, tag->domain_id, addr | ih,
> >> mask, type);
> >> +		qi_batch_add_iotlb(iommu, tag->domain_id, addr |
> >> ih, mask, type,
> >> +				   domain->qi_batch);
> >>     
> > If I understand this correctly, IOTLB flush maybe deferred until the
> > batch array is full, right? If so, is there a security gap where
> > callers think the mapping is gone after the call returns?  
> No. All related caches are flushed before function return. A domain
> can have multiple cache tags. Previously, we sent individual cache
> invalidation requests to hardware. This change combines all necessary
> invalidation requests into a single batch and raise them to hardware
> together to make it more efficient.
I was looking at the code below, if the index does not reach
QI_MAX_BATCHED_DESC_COUNT. There will be no flush after
cache_tag_flush_iotlb() returns, right?

+static void qi_batch_increment_index(struct
intel_iommu *iommu, struct qi_batch *batch) +{
+	if (++batch->index == QI_MAX_BATCHED_DESC_COUNT)
+		qi_batch_flush_descs(iommu, batch);
+}
+
+static void qi_batch_add_iotlb(struct intel_iommu *iommu, u16 did, u64
addr,
+			       unsigned int size_order, u64 type,
+			       struct qi_batch *batch)
+{
+	qi_desc_iotlb(iommu, did, addr, size_order, type,
&(batch->descs[batch->index]));
+	qi_batch_increment_index(iommu, batch);
+}

> Thanks,
> baolu
Re: [PATCH v3 4/4] iommu/vt-d: Introduce batched cache invalidation
Posted by Baolu Lu 1 year, 4 months ago
On 8/19/24 11:40 PM, Jacob Pan wrote:
> On Sat, 17 Aug 2024 11:28:21 +0800
> Baolu Lu<baolu.lu@linux.intel.com>  wrote:
> 
>> On 2024/8/17 0:38, Jacob Pan wrote:
>>> On Thu, 15 Aug 2024 14:52:21 +0800
>>> Tina Zhang<tina.zhang@intel.com>  wrote:
>>>    
>>>> @@ -270,7 +343,8 @@ static void cache_tag_flush_iotlb(struct
>>>> dmar_domain *domain, struct cache_tag * u64 type =
>>>> DMA_TLB_PSI_FLUSH;
>>>>    	if (domain->use_first_level) {
>>>> -		qi_flush_piotlb(iommu, tag->domain_id, tag->pasid,
>>>> addr, pages, ih);
>>>> +		qi_batch_add_piotlb(iommu, tag->domain_id,
>>>> tag->pasid, addr,
>>>> +				    pages, ih, domain->qi_batch);
>>>>    		return;
>>>>    	}
>>>>    
>>>> @@ -287,7 +361,8 @@ static void cache_tag_flush_iotlb(struct
>>>> dmar_domain *domain, struct cache_tag * }
>>>>    
>>>>    	if (ecap_qis(iommu->ecap))
>>>> -		qi_flush_iotlb(iommu, tag->domain_id, addr | ih,
>>>> mask, type);
>>>> +		qi_batch_add_iotlb(iommu, tag->domain_id, addr |
>>>> ih, mask, type,
>>>> +				   domain->qi_batch);
>>>>      
>>> If I understand this correctly, IOTLB flush maybe deferred until the
>>> batch array is full, right? If so, is there a security gap where
>>> callers think the mapping is gone after the call returns?
>> No. All related caches are flushed before function return. A domain
>> can have multiple cache tags. Previously, we sent individual cache
>> invalidation requests to hardware. This change combines all necessary
>> invalidation requests into a single batch and raise them to hardware
>> together to make it more efficient.
> I was looking at the code below, if the index does not reach
> QI_MAX_BATCHED_DESC_COUNT. There will be no flush after
> cache_tag_flush_iotlb() returns, right?

No. qi_batch_flush_descs() is called explicitly before return.

@@ -341,6 +417,7 @@ static void cache_tag_flush_devtlb_all(struct 
dmar_domain *domain, struct cache_
  void cache_tag_flush_range(struct dmar_domain *domain, unsigned long 
start,
                            unsigned long end, int ih)
  {
+       struct intel_iommu *iommu = NULL;
         unsigned long pages, mask, addr;
         struct cache_tag *tag;
         unsigned long flags;
@@ -349,6 +426,10 @@ void cache_tag_flush_range(struct dmar_domain 
*domain, unsigned long start,

         spin_lock_irqsave(&domain->cache_lock, flags);
         list_for_each_entry(tag, &domain->cache_tags, node) {
+               if (iommu && iommu != tag->iommu)
+                       qi_batch_flush_descs(iommu, domain->qi_batch);
+               iommu = tag->iommu;
+
                 switch (tag->type) {
                 case CACHE_TAG_IOTLB:
                 case CACHE_TAG_NESTING_IOTLB:
@@ -372,6 +453,7 @@ void cache_tag_flush_range(struct dmar_domain 
*domain, unsigned long start,

                 trace_cache_tag_flush_range(tag, start, end, addr, 
pages, mask);
         }
+       qi_batch_flush_descs(iommu, domain->qi_batch);
         spin_unlock_irqrestore(&domain->cache_lock, flags);
  }

Thanks,
baolu
Re: [PATCH v3 4/4] iommu/vt-d: Introduce batched cache invalidation
Posted by Jacob Pan 1 year, 4 months ago
Hi Baolu,

On Tue, 20 Aug 2024 10:06:05 +0800
Baolu Lu <baolu.lu@linux.intel.com> wrote:

> On 8/19/24 11:40 PM, Jacob Pan wrote:
> > On Sat, 17 Aug 2024 11:28:21 +0800
> > Baolu Lu<baolu.lu@linux.intel.com>  wrote:
> >   
> >> On 2024/8/17 0:38, Jacob Pan wrote:  
> >>> On Thu, 15 Aug 2024 14:52:21 +0800
> >>> Tina Zhang<tina.zhang@intel.com>  wrote:
> >>>      
> >>>> @@ -270,7 +343,8 @@ static void cache_tag_flush_iotlb(struct
> >>>> dmar_domain *domain, struct cache_tag * u64 type =
> >>>> DMA_TLB_PSI_FLUSH;
> >>>>    	if (domain->use_first_level) {
> >>>> -		qi_flush_piotlb(iommu, tag->domain_id,
> >>>> tag->pasid, addr, pages, ih);
> >>>> +		qi_batch_add_piotlb(iommu, tag->domain_id,
> >>>> tag->pasid, addr,
> >>>> +				    pages, ih,
> >>>> domain->qi_batch); return;
> >>>>    	}
> >>>>    
> >>>> @@ -287,7 +361,8 @@ static void cache_tag_flush_iotlb(struct
> >>>> dmar_domain *domain, struct cache_tag * }
> >>>>    
> >>>>    	if (ecap_qis(iommu->ecap))
> >>>> -		qi_flush_iotlb(iommu, tag->domain_id, addr | ih,
> >>>> mask, type);
> >>>> +		qi_batch_add_iotlb(iommu, tag->domain_id, addr |
> >>>> ih, mask, type,
> >>>> +				   domain->qi_batch);
> >>>>        
> >>> If I understand this correctly, IOTLB flush maybe deferred until
> >>> the batch array is full, right? If so, is there a security gap
> >>> where callers think the mapping is gone after the call returns?  
> >> No. All related caches are flushed before function return. A domain
> >> can have multiple cache tags. Previously, we sent individual cache
> >> invalidation requests to hardware. This change combines all
> >> necessary invalidation requests into a single batch and raise them
> >> to hardware together to make it more efficient.  
> > I was looking at the code below, if the index does not reach
> > QI_MAX_BATCHED_DESC_COUNT. There will be no flush after
> > cache_tag_flush_iotlb() returns, right?  
> 
> No. qi_batch_flush_descs() is called explicitly before return.
I see, cache_tag_flush_iotlb() is really just adding descriptors to the
batch. Not doing any flush for most cases. IMHO, the name is a little
confusing.

Thanks,

Jacob
Re: [PATCH v3 4/4] iommu/vt-d: Introduce batched cache invalidation
Posted by Yi Liu 1 year, 4 months ago
On 2024/8/17 11:28, Baolu Lu wrote:
> On 2024/8/17 0:38, Jacob Pan wrote:
>> On Thu, 15 Aug 2024 14:52:21 +0800
>> Tina Zhang <tina.zhang@intel.com> wrote:
>>
>>> @@ -270,7 +343,8 @@ static void cache_tag_flush_iotlb(struct
>>> dmar_domain *domain, struct cache_tag * u64 type = DMA_TLB_PSI_FLUSH;
>>>       if (domain->use_first_level) {
>>> -        qi_flush_piotlb(iommu, tag->domain_id, tag->pasid,
>>> addr, pages, ih);
>>> +        qi_batch_add_piotlb(iommu, tag->domain_id,
>>> tag->pasid, addr,
>>> +                    pages, ih, domain->qi_batch);
>>>           return;
>>>       }
>>> @@ -287,7 +361,8 @@ static void cache_tag_flush_iotlb(struct
>>> dmar_domain *domain, struct cache_tag * }
>>>       if (ecap_qis(iommu->ecap))
>>> -        qi_flush_iotlb(iommu, tag->domain_id, addr | ih,
>>> mask, type);
>>> +        qi_batch_add_iotlb(iommu, tag->domain_id, addr | ih,
>>> mask, type,
>>> +                   domain->qi_batch);
>> If I understand this correctly, IOTLB flush maybe deferred until the
>> batch array is full, right? If so, is there a security gap where
>> callers think the mapping is gone after the call returns?
> No. All related caches are flushed before function return. A domain can
> have multiple cache tags. Previously, we sent individual cache
> invalidation requests to hardware. This change combines all necessary
> invalidation requests into a single batch and raise them to hardware
> together to make it more efficient.

Hi Jacob,

Do you mean the configuration that iommu.strict==0? :) As the above
explanation from Baolu, this patch is not for that although it uses
the term "batched". Also, it would reduce the VMExits that due to the
IOTLB/DevTLB invalidation a lot in the virtualization environment.

-- 
Regards,
Yi Liu
Re: [PATCH v3 4/4] iommu/vt-d: Introduce batched cache invalidation
Posted by Jacob Pan 1 year, 4 months ago
Hi Yi,

On Mon, 19 Aug 2024 15:58:35 +0800
Yi Liu <yi.l.liu@intel.com> wrote:

> On 2024/8/17 11:28, Baolu Lu wrote:
> > On 2024/8/17 0:38, Jacob Pan wrote:  
> >> On Thu, 15 Aug 2024 14:52:21 +0800
> >> Tina Zhang <tina.zhang@intel.com> wrote:
> >>  
> >>> @@ -270,7 +343,8 @@ static void cache_tag_flush_iotlb(struct
> >>> dmar_domain *domain, struct cache_tag * u64 type =
> >>> DMA_TLB_PSI_FLUSH; if (domain->use_first_level) {
> >>> -        qi_flush_piotlb(iommu, tag->domain_id, tag->pasid,
> >>> addr, pages, ih);
> >>> +        qi_batch_add_piotlb(iommu, tag->domain_id,
> >>> tag->pasid, addr,
> >>> +                    pages, ih, domain->qi_batch);
> >>>           return;
> >>>       }
> >>> @@ -287,7 +361,8 @@ static void cache_tag_flush_iotlb(struct
> >>> dmar_domain *domain, struct cache_tag * }
> >>>       if (ecap_qis(iommu->ecap))
> >>> -        qi_flush_iotlb(iommu, tag->domain_id, addr | ih,
> >>> mask, type);
> >>> +        qi_batch_add_iotlb(iommu, tag->domain_id, addr | ih,
> >>> mask, type,
> >>> +                   domain->qi_batch);  
> >> If I understand this correctly, IOTLB flush maybe deferred until
> >> the batch array is full, right? If so, is there a security gap
> >> where callers think the mapping is gone after the call returns?  
> > No. All related caches are flushed before function return. A domain
> > can have multiple cache tags. Previously, we sent individual cache
> > invalidation requests to hardware. This change combines all
> > necessary invalidation requests into a single batch and raise them
> > to hardware together to make it more efficient.  
> 
> Hi Jacob,
> 
> Do you mean the configuration that iommu.strict==0? :) As the above
> explanation from Baolu, this patch is not for that although it uses
> the term "batched". Also, it would reduce the VMExits that due to the
> IOTLB/DevTLB invalidation a lot in the virtualization environment.
> 
No, I understand this is a different "batch", not for deferred flush.
I am asking why it has to gather QI_MAX_BATCHED_DESC_COUNT before it
does the flush. See my other reply.

Thanks,

Jacob