[PATCH v10 06/24] iommu/dma: Factor out a iommu_dma_map_swiotlb helper

Leon Romanovsky posted 24 patches 7 months, 2 weeks ago
There is a newer version of this series
[PATCH v10 06/24] iommu/dma: Factor out a iommu_dma_map_swiotlb helper
Posted by Leon Romanovsky 7 months, 2 weeks ago
From: Christoph Hellwig <hch@lst.de>

Split the iommu logic from iommu_dma_map_page into a separate helper.
This not only keeps the code neatly separated, but will also allow for
reuse in another caller.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Tested-by: Jens Axboe <axboe@kernel.dk>
Reviewed-by: Luis Chamberlain <mcgrof@kernel.org>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
---
 drivers/iommu/dma-iommu.c | 73 ++++++++++++++++++++++-----------------
 1 file changed, 41 insertions(+), 32 deletions(-)

diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index d3211a8d755e..d7684024c439 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -1138,6 +1138,43 @@ void iommu_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
 			arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
 }
 
+static phys_addr_t iommu_dma_map_swiotlb(struct device *dev, phys_addr_t phys,
+		size_t size, enum dma_data_direction dir, unsigned long attrs)
+{
+	struct iommu_domain *domain = iommu_get_dma_domain(dev);
+	struct iova_domain *iovad = &domain->iova_cookie->iovad;
+
+	if (!is_swiotlb_active(dev)) {
+		dev_warn_once(dev, "DMA bounce buffers are inactive, unable to map unaligned transaction.\n");
+		return (phys_addr_t)DMA_MAPPING_ERROR;
+	}
+
+	trace_swiotlb_bounced(dev, phys, size);
+
+	phys = swiotlb_tbl_map_single(dev, phys, size, iova_mask(iovad), dir,
+			attrs);
+
+	/*
+	 * Untrusted devices should not see padding areas with random leftover
+	 * kernel data, so zero the pre- and post-padding.
+	 * swiotlb_tbl_map_single() has initialized the bounce buffer proper to
+	 * the contents of the original memory buffer.
+	 */
+	if (phys != (phys_addr_t)DMA_MAPPING_ERROR && dev_is_untrusted(dev)) {
+		size_t start, virt = (size_t)phys_to_virt(phys);
+
+		/* Pre-padding */
+		start = iova_align_down(iovad, virt);
+		memset((void *)start, 0, virt - start);
+
+		/* Post-padding */
+		start = virt + size;
+		memset((void *)start, 0, iova_align(iovad, start) - start);
+	}
+
+	return phys;
+}
+
 dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
 	      unsigned long offset, size_t size, enum dma_data_direction dir,
 	      unsigned long attrs)
@@ -1151,42 +1188,14 @@ dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
 	dma_addr_t iova, dma_mask = dma_get_mask(dev);
 
 	/*
-	 * If both the physical buffer start address and size are
-	 * page aligned, we don't need to use a bounce page.
+	 * If both the physical buffer start address and size are page aligned,
+	 * we don't need to use a bounce page.
 	 */
 	if (dev_use_swiotlb(dev, size, dir) &&
 	    iova_offset(iovad, phys | size)) {
-		if (!is_swiotlb_active(dev)) {
-			dev_warn_once(dev, "DMA bounce buffers are inactive, unable to map unaligned transaction.\n");
-			return DMA_MAPPING_ERROR;
-		}
-
-		trace_swiotlb_bounced(dev, phys, size);
-
-		phys = swiotlb_tbl_map_single(dev, phys, size,
-					      iova_mask(iovad), dir, attrs);
-
-		if (phys == DMA_MAPPING_ERROR)
+		phys = iommu_dma_map_swiotlb(dev, phys, size, dir, attrs);
+		if (phys == (phys_addr_t)DMA_MAPPING_ERROR)
 			return DMA_MAPPING_ERROR;
-
-		/*
-		 * Untrusted devices should not see padding areas with random
-		 * leftover kernel data, so zero the pre- and post-padding.
-		 * swiotlb_tbl_map_single() has initialized the bounce buffer
-		 * proper to the contents of the original memory buffer.
-		 */
-		if (dev_is_untrusted(dev)) {
-			size_t start, virt = (size_t)phys_to_virt(phys);
-
-			/* Pre-padding */
-			start = iova_align_down(iovad, virt);
-			memset((void *)start, 0, virt - start);
-
-			/* Post-padding */
-			start = virt + size;
-			memset((void *)start, 0,
-			       iova_align(iovad, start) - start);
-		}
 	}
 
 	if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
-- 
2.49.0
Re: [PATCH v10 06/24] iommu/dma: Factor out a iommu_dma_map_swiotlb helper
Posted by Baolu Lu 7 months, 2 weeks ago
On 4/28/25 17:22, Leon Romanovsky wrote:
> From: Christoph Hellwig<hch@lst.de>
> 
> Split the iommu logic from iommu_dma_map_page into a separate helper.
> This not only keeps the code neatly separated, but will also allow for
> reuse in another caller.
> 
> Signed-off-by: Christoph Hellwig<hch@lst.de>
> Tested-by: Jens Axboe<axboe@kernel.dk>
> Reviewed-by: Luis Chamberlain<mcgrof@kernel.org>
> Signed-off-by: Leon Romanovsky<leonro@nvidia.com>

Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com>

with a nit below ...

> ---
>   drivers/iommu/dma-iommu.c | 73 ++++++++++++++++++++++-----------------
>   1 file changed, 41 insertions(+), 32 deletions(-)
> 
> diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
> index d3211a8d755e..d7684024c439 100644
> --- a/drivers/iommu/dma-iommu.c
> +++ b/drivers/iommu/dma-iommu.c
> @@ -1138,6 +1138,43 @@ void iommu_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
>   			arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
>   }
>   
> +static phys_addr_t iommu_dma_map_swiotlb(struct device *dev, phys_addr_t phys,
> +		size_t size, enum dma_data_direction dir, unsigned long attrs)
> +{
> +	struct iommu_domain *domain = iommu_get_dma_domain(dev);
> +	struct iova_domain *iovad = &domain->iova_cookie->iovad;
> +
> +	if (!is_swiotlb_active(dev)) {
> +		dev_warn_once(dev, "DMA bounce buffers are inactive, unable to map unaligned transaction.\n");
> +		return (phys_addr_t)DMA_MAPPING_ERROR;
> +	}
> +
> +	trace_swiotlb_bounced(dev, phys, size);
> +
> +	phys = swiotlb_tbl_map_single(dev, phys, size, iova_mask(iovad), dir,
> +			attrs);
> +
> +	/*
> +	 * Untrusted devices should not see padding areas with random leftover
> +	 * kernel data, so zero the pre- and post-padding.
> +	 * swiotlb_tbl_map_single() has initialized the bounce buffer proper to
> +	 * the contents of the original memory buffer.
> +	 */
> +	if (phys != (phys_addr_t)DMA_MAPPING_ERROR && dev_is_untrusted(dev)) {
> +		size_t start, virt = (size_t)phys_to_virt(phys);
> +
> +		/* Pre-padding */
> +		start = iova_align_down(iovad, virt);
> +		memset((void *)start, 0, virt - start);
> +
> +		/* Post-padding */
> +		start = virt + size;
> +		memset((void *)start, 0, iova_align(iovad, start) - start);
> +	}
> +
> +	return phys;
> +}
> +
>   dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
>   	      unsigned long offset, size_t size, enum dma_data_direction dir,
>   	      unsigned long attrs)
> @@ -1151,42 +1188,14 @@ dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
>   	dma_addr_t iova, dma_mask = dma_get_mask(dev);
>   
>   	/*
> -	 * If both the physical buffer start address and size are
> -	 * page aligned, we don't need to use a bounce page.
> +	 * If both the physical buffer start address and size are page aligned,
> +	 * we don't need to use a bounce page.
>   	 */
>   	if (dev_use_swiotlb(dev, size, dir) &&
>   	    iova_offset(iovad, phys | size)) {
> -		if (!is_swiotlb_active(dev)) {

... Is it better to move this check into the helper? Simply no-op if a
bounce page is not needed:

	if (!dev_use_swiotlb(dev, size, dir) ||
	    !iova_offset(iovad, phys | size))
		return phys;

Thanks,
baolu
Re: [PATCH v10 06/24] iommu/dma: Factor out a iommu_dma_map_swiotlb helper
Posted by Leon Romanovsky 7 months, 2 weeks ago
On Tue, Apr 29, 2025 at 12:58:18PM +0800, Baolu Lu wrote:
> On 4/28/25 17:22, Leon Romanovsky wrote:
> > From: Christoph Hellwig<hch@lst.de>
> > 
> > Split the iommu logic from iommu_dma_map_page into a separate helper.
> > This not only keeps the code neatly separated, but will also allow for
> > reuse in another caller.
> > 
> > Signed-off-by: Christoph Hellwig<hch@lst.de>
> > Tested-by: Jens Axboe<axboe@kernel.dk>
> > Reviewed-by: Luis Chamberlain<mcgrof@kernel.org>
> > Signed-off-by: Leon Romanovsky<leonro@nvidia.com>
> 
> Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com>
> 
> with a nit below ...
> 
> > ---
> >   drivers/iommu/dma-iommu.c | 73 ++++++++++++++++++++++-----------------
> >   1 file changed, 41 insertions(+), 32 deletions(-)
> > 
> > diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
> > index d3211a8d755e..d7684024c439 100644
> > --- a/drivers/iommu/dma-iommu.c
> > +++ b/drivers/iommu/dma-iommu.c
> > @@ -1138,6 +1138,43 @@ void iommu_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
> >   			arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
> >   }
> > +static phys_addr_t iommu_dma_map_swiotlb(struct device *dev, phys_addr_t phys,
> > +		size_t size, enum dma_data_direction dir, unsigned long attrs)
> > +{
> > +	struct iommu_domain *domain = iommu_get_dma_domain(dev);
> > +	struct iova_domain *iovad = &domain->iova_cookie->iovad;
> > +
> > +	if (!is_swiotlb_active(dev)) {
> > +		dev_warn_once(dev, "DMA bounce buffers are inactive, unable to map unaligned transaction.\n");
> > +		return (phys_addr_t)DMA_MAPPING_ERROR;
> > +	}
> > +
> > +	trace_swiotlb_bounced(dev, phys, size);
> > +
> > +	phys = swiotlb_tbl_map_single(dev, phys, size, iova_mask(iovad), dir,
> > +			attrs);
> > +
> > +	/*
> > +	 * Untrusted devices should not see padding areas with random leftover
> > +	 * kernel data, so zero the pre- and post-padding.
> > +	 * swiotlb_tbl_map_single() has initialized the bounce buffer proper to
> > +	 * the contents of the original memory buffer.
> > +	 */
> > +	if (phys != (phys_addr_t)DMA_MAPPING_ERROR && dev_is_untrusted(dev)) {
> > +		size_t start, virt = (size_t)phys_to_virt(phys);
> > +
> > +		/* Pre-padding */
> > +		start = iova_align_down(iovad, virt);
> > +		memset((void *)start, 0, virt - start);
> > +
> > +		/* Post-padding */
> > +		start = virt + size;
> > +		memset((void *)start, 0, iova_align(iovad, start) - start);
> > +	}
> > +
> > +	return phys;
> > +}
> > +
> >   dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
> >   	      unsigned long offset, size_t size, enum dma_data_direction dir,
> >   	      unsigned long attrs)
> > @@ -1151,42 +1188,14 @@ dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
> >   	dma_addr_t iova, dma_mask = dma_get_mask(dev);
> >   	/*
> > -	 * If both the physical buffer start address and size are
> > -	 * page aligned, we don't need to use a bounce page.
> > +	 * If both the physical buffer start address and size are page aligned,
> > +	 * we don't need to use a bounce page.
> >   	 */
> >   	if (dev_use_swiotlb(dev, size, dir) &&
> >   	    iova_offset(iovad, phys | size)) {
> > -		if (!is_swiotlb_active(dev)) {
> 
> ... Is it better to move this check into the helper? Simply no-op if a
> bounce page is not needed:
> 
> 	if (!dev_use_swiotlb(dev, size, dir) ||
> 	    !iova_offset(iovad, phys | size))
> 		return phys;

Am I missing something? iommu_dma_map_page() has more code after this
check, so it is not correct to return immediately:

  1189 dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
  1190               unsigned long offset, size_t size, enum dma_data_direction dir,
  1191               unsigned long attrs)
  1192 {

<...>

  1201         /*
  1202          * If both the physical buffer start address and size are page aligned,
  1203          * we don't need to use a bounce page.
  1204          */
  1205         if (dev_use_swiotlb(dev, size, dir) &&
  1206             iova_unaligned(iovad, phys, size)) {
  1207                 phys = iommu_dma_map_swiotlb(dev, phys, size, dir, attrs);
  1208                 if (phys == (phys_addr_t)DMA_MAPPING_ERROR)
  1209                         return DMA_MAPPING_ERROR;
  1210         }
  1211
  1212         if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
  1213                 arch_sync_dma_for_device(phys, size, dir);
  1214
  1215         iova = __iommu_dma_map(dev, phys, size, prot, dma_mask);
  1216         if (iova == DMA_MAPPING_ERROR)
  1217                 swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
  1218         return iova;
  1219 }


> 
> Thanks,
> baolu
>
Re: [PATCH v10 06/24] iommu/dma: Factor out a iommu_dma_map_swiotlb helper
Posted by Baolu Lu 7 months, 2 weeks ago
On 4/29/25 13:53, Leon Romanovsky wrote:
> On Tue, Apr 29, 2025 at 12:58:18PM +0800, Baolu Lu wrote:
>> On 4/28/25 17:22, Leon Romanovsky wrote:
>>> From: Christoph Hellwig<hch@lst.de>
>>>
>>> Split the iommu logic from iommu_dma_map_page into a separate helper.
>>> This not only keeps the code neatly separated, but will also allow for
>>> reuse in another caller.
>>>
>>> Signed-off-by: Christoph Hellwig<hch@lst.de>
>>> Tested-by: Jens Axboe<axboe@kernel.dk>
>>> Reviewed-by: Luis Chamberlain<mcgrof@kernel.org>
>>> Signed-off-by: Leon Romanovsky<leonro@nvidia.com>
>>
>> Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com>
>>
>> with a nit below ...
>>
>>> ---
>>>    drivers/iommu/dma-iommu.c | 73 ++++++++++++++++++++++-----------------
>>>    1 file changed, 41 insertions(+), 32 deletions(-)
>>>
>>> diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
>>> index d3211a8d755e..d7684024c439 100644
>>> --- a/drivers/iommu/dma-iommu.c
>>> +++ b/drivers/iommu/dma-iommu.c
>>> @@ -1138,6 +1138,43 @@ void iommu_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
>>>    			arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
>>>    }
>>> +static phys_addr_t iommu_dma_map_swiotlb(struct device *dev, phys_addr_t phys,
>>> +		size_t size, enum dma_data_direction dir, unsigned long attrs)
>>> +{
>>> +	struct iommu_domain *domain = iommu_get_dma_domain(dev);
>>> +	struct iova_domain *iovad = &domain->iova_cookie->iovad;
>>> +
>>> +	if (!is_swiotlb_active(dev)) {
>>> +		dev_warn_once(dev, "DMA bounce buffers are inactive, unable to map unaligned transaction.\n");
>>> +		return (phys_addr_t)DMA_MAPPING_ERROR;
>>> +	}
>>> +
>>> +	trace_swiotlb_bounced(dev, phys, size);
>>> +
>>> +	phys = swiotlb_tbl_map_single(dev, phys, size, iova_mask(iovad), dir,
>>> +			attrs);
>>> +
>>> +	/*
>>> +	 * Untrusted devices should not see padding areas with random leftover
>>> +	 * kernel data, so zero the pre- and post-padding.
>>> +	 * swiotlb_tbl_map_single() has initialized the bounce buffer proper to
>>> +	 * the contents of the original memory buffer.
>>> +	 */
>>> +	if (phys != (phys_addr_t)DMA_MAPPING_ERROR && dev_is_untrusted(dev)) {
>>> +		size_t start, virt = (size_t)phys_to_virt(phys);
>>> +
>>> +		/* Pre-padding */
>>> +		start = iova_align_down(iovad, virt);
>>> +		memset((void *)start, 0, virt - start);
>>> +
>>> +		/* Post-padding */
>>> +		start = virt + size;
>>> +		memset((void *)start, 0, iova_align(iovad, start) - start);
>>> +	}
>>> +
>>> +	return phys;
>>> +}
>>> +
>>>    dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
>>>    	      unsigned long offset, size_t size, enum dma_data_direction dir,
>>>    	      unsigned long attrs)
>>> @@ -1151,42 +1188,14 @@ dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
>>>    	dma_addr_t iova, dma_mask = dma_get_mask(dev);
>>>    	/*
>>> -	 * If both the physical buffer start address and size are
>>> -	 * page aligned, we don't need to use a bounce page.
>>> +	 * If both the physical buffer start address and size are page aligned,
>>> +	 * we don't need to use a bounce page.
>>>    	 */
>>>    	if (dev_use_swiotlb(dev, size, dir) &&
>>>    	    iova_offset(iovad, phys | size)) {
>>> -		if (!is_swiotlb_active(dev)) {
>>
>> ... Is it better to move this check into the helper? Simply no-op if a
>> bounce page is not needed:
>>
>> 	if (!dev_use_swiotlb(dev, size, dir) ||
>> 	    !iova_offset(iovad, phys | size))
>> 		return phys;
> 
> Am I missing something? iommu_dma_map_page() has more code after this
> check, so it is not correct to return immediately:
> 
>    1189 dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
>    1190               unsigned long offset, size_t size, enum dma_data_direction dir,
>    1191               unsigned long attrs)
>    1192 {
> 
> <...>
> 
>    1201         /*
>    1202          * If both the physical buffer start address and size are page aligned,
>    1203          * we don't need to use a bounce page.
>    1204          */
>    1205         if (dev_use_swiotlb(dev, size, dir) &&
>    1206             iova_unaligned(iovad, phys, size)) {
>    1207                 phys = iommu_dma_map_swiotlb(dev, phys, size, dir, attrs);
>    1208                 if (phys == (phys_addr_t)DMA_MAPPING_ERROR)
>    1209                         return DMA_MAPPING_ERROR;
>    1210         }
>    1211
>    1212         if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
>    1213                 arch_sync_dma_for_device(phys, size, dir);
>    1214
>    1215         iova = __iommu_dma_map(dev, phys, size, prot, dma_mask);
>    1216         if (iova == DMA_MAPPING_ERROR)
>    1217                 swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
>    1218         return iova;
>    1219 }

static phys_addr_t iommu_dma_map_swiotlb(struct device *dev, phys_addr_t 
phys,
		size_t size, enum dma_data_direction dir, unsigned long attrs)
{
<...>
	/*
	 * If both the physical buffer start address and size are page aligned,
	 * we don't need to use a bounce page.
	 */
	if (!dev_use_swiotlb(dev, size, dir) ||
	    !iova_offset(iovad, phys | size))
		return phys;
<...>
}

Then,

dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
	unsigned long offset, size_t size, enum dma_data_direction dir,
	unsigned long attrs)
{
<...>
	phys = iommu_dma_map_swiotlb(dev, phys, size, dir, attrs);
	if (phys == (phys_addr_t)DMA_MAPPING_ERROR)
		return DMA_MAPPING_ERROR;
<...>
}

Thanks,
baolu
Re: [PATCH v10 06/24] iommu/dma: Factor out a iommu_dma_map_swiotlb helper
Posted by Leon Romanovsky 7 months, 2 weeks ago
On Tue, Apr 29, 2025 at 01:58:06PM +0800, Baolu Lu wrote:
> On 4/29/25 13:53, Leon Romanovsky wrote:
> > On Tue, Apr 29, 2025 at 12:58:18PM +0800, Baolu Lu wrote:
> > > On 4/28/25 17:22, Leon Romanovsky wrote:
> > > > From: Christoph Hellwig<hch@lst.de>
> > > > 
> > > > Split the iommu logic from iommu_dma_map_page into a separate helper.
> > > > This not only keeps the code neatly separated, but will also allow for
> > > > reuse in another caller.
> > > > 
> > > > Signed-off-by: Christoph Hellwig<hch@lst.de>
> > > > Tested-by: Jens Axboe<axboe@kernel.dk>
> > > > Reviewed-by: Luis Chamberlain<mcgrof@kernel.org>
> > > > Signed-off-by: Leon Romanovsky<leonro@nvidia.com>
> > > 
> > > Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com>
> > > 
> > > with a nit below ...
> > > 
> > > > ---
> > > >    drivers/iommu/dma-iommu.c | 73 ++++++++++++++++++++++-----------------
> > > >    1 file changed, 41 insertions(+), 32 deletions(-)
> > > > 
> > > > diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
> > > > index d3211a8d755e..d7684024c439 100644
> > > > --- a/drivers/iommu/dma-iommu.c
> > > > +++ b/drivers/iommu/dma-iommu.c
> > > > @@ -1138,6 +1138,43 @@ void iommu_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
> > > >    			arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
> > > >    }
> > > > +static phys_addr_t iommu_dma_map_swiotlb(struct device *dev, phys_addr_t phys,
> > > > +		size_t size, enum dma_data_direction dir, unsigned long attrs)
> > > > +{
> > > > +	struct iommu_domain *domain = iommu_get_dma_domain(dev);
> > > > +	struct iova_domain *iovad = &domain->iova_cookie->iovad;
> > > > +
> > > > +	if (!is_swiotlb_active(dev)) {
> > > > +		dev_warn_once(dev, "DMA bounce buffers are inactive, unable to map unaligned transaction.\n");
> > > > +		return (phys_addr_t)DMA_MAPPING_ERROR;
> > > > +	}
> > > > +
> > > > +	trace_swiotlb_bounced(dev, phys, size);
> > > > +
> > > > +	phys = swiotlb_tbl_map_single(dev, phys, size, iova_mask(iovad), dir,
> > > > +			attrs);
> > > > +
> > > > +	/*
> > > > +	 * Untrusted devices should not see padding areas with random leftover
> > > > +	 * kernel data, so zero the pre- and post-padding.
> > > > +	 * swiotlb_tbl_map_single() has initialized the bounce buffer proper to
> > > > +	 * the contents of the original memory buffer.
> > > > +	 */
> > > > +	if (phys != (phys_addr_t)DMA_MAPPING_ERROR && dev_is_untrusted(dev)) {
> > > > +		size_t start, virt = (size_t)phys_to_virt(phys);
> > > > +
> > > > +		/* Pre-padding */
> > > > +		start = iova_align_down(iovad, virt);
> > > > +		memset((void *)start, 0, virt - start);
> > > > +
> > > > +		/* Post-padding */
> > > > +		start = virt + size;
> > > > +		memset((void *)start, 0, iova_align(iovad, start) - start);
> > > > +	}
> > > > +
> > > > +	return phys;
> > > > +}
> > > > +
> > > >    dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
> > > >    	      unsigned long offset, size_t size, enum dma_data_direction dir,
> > > >    	      unsigned long attrs)
> > > > @@ -1151,42 +1188,14 @@ dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
> > > >    	dma_addr_t iova, dma_mask = dma_get_mask(dev);
> > > >    	/*
> > > > -	 * If both the physical buffer start address and size are
> > > > -	 * page aligned, we don't need to use a bounce page.
> > > > +	 * If both the physical buffer start address and size are page aligned,
> > > > +	 * we don't need to use a bounce page.
> > > >    	 */
> > > >    	if (dev_use_swiotlb(dev, size, dir) &&
> > > >    	    iova_offset(iovad, phys | size)) {
> > > > -		if (!is_swiotlb_active(dev)) {
> > > 
> > > ... Is it better to move this check into the helper? Simply no-op if a
> > > bounce page is not needed:
> > > 
> > > 	if (!dev_use_swiotlb(dev, size, dir) ||
> > > 	    !iova_offset(iovad, phys | size))
> > > 		return phys;
> > 
> > Am I missing something? iommu_dma_map_page() has more code after this
> > check, so it is not correct to return immediately:
> > 
> >    1189 dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
> >    1190               unsigned long offset, size_t size, enum dma_data_direction dir,
> >    1191               unsigned long attrs)
> >    1192 {
> > 
> > <...>
> > 
> >    1201         /*
> >    1202          * If both the physical buffer start address and size are page aligned,
> >    1203          * we don't need to use a bounce page.
> >    1204          */
> >    1205         if (dev_use_swiotlb(dev, size, dir) &&
> >    1206             iova_unaligned(iovad, phys, size)) {
> >    1207                 phys = iommu_dma_map_swiotlb(dev, phys, size, dir, attrs);
> >    1208                 if (phys == (phys_addr_t)DMA_MAPPING_ERROR)
> >    1209                         return DMA_MAPPING_ERROR;
> >    1210         }
> >    1211
> >    1212         if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
> >    1213                 arch_sync_dma_for_device(phys, size, dir);
> >    1214
> >    1215         iova = __iommu_dma_map(dev, phys, size, prot, dma_mask);
> >    1216         if (iova == DMA_MAPPING_ERROR)
> >    1217                 swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
> >    1218         return iova;
> >    1219 }
> 
> static phys_addr_t iommu_dma_map_swiotlb(struct device *dev, phys_addr_t
> phys,
> 		size_t size, enum dma_data_direction dir, unsigned long attrs)
> {
> <...>
> 	/*
> 	 * If both the physical buffer start address and size are page aligned,
> 	 * we don't need to use a bounce page.
> 	 */
> 	if (!dev_use_swiotlb(dev, size, dir) ||
> 	    !iova_offset(iovad, phys | size))
> 		return phys;
> <...>
> }
> 
> Then,
> 
> dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
> 	unsigned long offset, size_t size, enum dma_data_direction dir,
> 	unsigned long attrs)
> {
> <...>
> 	phys = iommu_dma_map_swiotlb(dev, phys, size, dir, attrs);
> 	if (phys == (phys_addr_t)DMA_MAPPING_ERROR)
> 		return DMA_MAPPING_ERROR;
> <...>
> }

Such change will cause to extra function call for everyone who doesn't
use SWIOTLB (RDMA, HMM e.t.c).

In addition, iommu_dma_map_swiotlb() is called through
dma_iova_link -> 
	iommu_dma_iova_link_swiotlb -> 
		iommu_dma_iova_bounce_and_link() -> 
			iommu_dma_map_swiotlb()
and dma_iova_link() has this "if (dev_use_swiotlb(dev, size, dir) && iova_unaligned(iovad, phys, size))"
very early at call stack.

So, in dma_iova_link() we will find ourselves with same check twice.

Thanks

> 
> Thanks,
> baolu
>