[PATCH] iommu/io-pgtable-arm: Support contiguous bit in translation tables

Daniel Mentz posted 1 patch 9 months, 1 week ago
drivers/iommu/io-pgtable-arm.c | 53 +++++++++++++++++++++++++++++++---
1 file changed, 49 insertions(+), 4 deletions(-)
[PATCH] iommu/io-pgtable-arm: Support contiguous bit in translation tables
Posted by Daniel Mentz 9 months, 1 week ago
The contiguous bit in translation table entries can be used as a hint to
SMMU that a group of adjacent translation table entries have consistent
attributes and point to a contiguous and properly aligned output address
range. This enables SMMU to predict the properties of the remaining
translation table entries in the same group without accessing them. It
also allows an SMMU implementation to make more efficient use of its TLB
by using a single TLB entry to cover all translation table entries in
the same group.

In the case of 4KB granule size, there are 16 translation table entries
in one group.

This change sets the contiguous bit for such groups of entries that are
completely covered by a single call to map_pages. As it stands, the code
wouldn't set the contiguous bit if a group of adjacent descriptors is
completed by separate calls to map_pages.

Signed-off-by: Daniel Mentz <danielmentz@google.com>
---
 drivers/iommu/io-pgtable-arm.c | 53 +++++++++++++++++++++++++++++++---
 1 file changed, 49 insertions(+), 4 deletions(-)

diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index 7632c80edea6..07b40e928bb3 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -76,6 +76,7 @@
 
 #define ARM_LPAE_PTE_NSTABLE		(((arm_lpae_iopte)1) << 63)
 #define ARM_LPAE_PTE_XN			(((arm_lpae_iopte)3) << 53)
+#define ARM_LPAE_PTE_CONT		(((arm_lpae_iopte)1) << 52)
 #define ARM_LPAE_PTE_DBM		(((arm_lpae_iopte)1) << 51)
 #define ARM_LPAE_PTE_AF			(((arm_lpae_iopte)1) << 10)
 #define ARM_LPAE_PTE_SH_NS		(((arm_lpae_iopte)0) << 8)
@@ -326,6 +327,27 @@ static void __arm_lpae_sync_pte(arm_lpae_iopte *ptep, int num_entries,
 				   sizeof(*ptep) * num_entries, DMA_TO_DEVICE);
 }
 
+static int arm_lpae_cont_ptes(int lvl, struct arm_lpae_io_pgtable *data)
+{
+	switch (ARM_LPAE_GRANULE(data)) {
+	case SZ_4K:
+		if (lvl >= 1)
+			return 16;
+		break;
+	case SZ_16K:
+		if (lvl == 2)
+			return 32;
+		else if (lvl == 3)
+			return 128;
+		break;
+	case SZ_64K:
+		if (lvl >= 2)
+			return 32;
+		break;
+	}
+	return 1;
+}
+
 static void __arm_lpae_clear_pte(arm_lpae_iopte *ptep, struct io_pgtable_cfg *cfg, int num_entries)
 {
 	for (int i = 0; i < num_entries; i++)
@@ -340,8 +362,30 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
 			       unsigned long iova, size_t size, size_t pgcount,
 			       int lvl, arm_lpae_iopte *ptep);
 
+static bool arm_lpae_use_contpte(struct arm_lpae_io_pgtable *data,
+				 unsigned long iova, phys_addr_t paddr,
+				 int lvl, int num_entries, int i)
+{
+	size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
+	int cont_ptes = arm_lpae_cont_ptes(lvl, data);
+	int contmask = cont_ptes - 1;
+	int contpte_addr_mask = sz * cont_ptes - 1;
+	int map_idx_start, tbl_idx;
+
+	if ((paddr & contpte_addr_mask) != (iova & contpte_addr_mask))
+		return false;
+
+	map_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data);
+	tbl_idx = map_idx_start + i;
+	if (((tbl_idx & contmask) <= i) &&
+	    (tbl_idx < ((map_idx_start + num_entries) & ~contmask)))
+		return true;
+
+	return false;
+}
+
 static void __arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
-				phys_addr_t paddr, arm_lpae_iopte prot,
+				unsigned long iova, phys_addr_t paddr, arm_lpae_iopte prot,
 				int lvl, int num_entries, arm_lpae_iopte *ptep)
 {
 	arm_lpae_iopte pte = prot;
@@ -355,8 +399,9 @@ static void __arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
 		pte |= ARM_LPAE_PTE_TYPE_BLOCK;
 
 	for (i = 0; i < num_entries; i++)
-		ptep[i] = pte | paddr_to_iopte(paddr + i * sz, data);
-
+		ptep[i] = pte | paddr_to_iopte(paddr + i * sz, data) |
+			  (arm_lpae_use_contpte(data, iova, paddr, lvl, num_entries, i) ?
+			  ARM_LPAE_PTE_CONT : 0);
 	if (!cfg->coherent_walk)
 		__arm_lpae_sync_pte(ptep, num_entries, cfg);
 }
@@ -389,7 +434,7 @@ static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
 			}
 		}
 
-	__arm_lpae_init_pte(data, paddr, prot, lvl, num_entries, ptep);
+	__arm_lpae_init_pte(data, iova, paddr, prot, lvl, num_entries, ptep);
 	return 0;
 }
 
-- 
2.49.0.967.g6a0df3ecc3-goog
Re: [PATCH] iommu/io-pgtable-arm: Support contiguous bit in translation tables
Posted by Jason Gunthorpe 9 months, 1 week ago
On Wed, Apr 30, 2025 at 11:19:24PM +0000, Daniel Mentz wrote:
> The contiguous bit in translation table entries can be used as a hint to
> SMMU that a group of adjacent translation table entries have consistent
> attributes and point to a contiguous and properly aligned output address
> range. This enables SMMU to predict the properties of the remaining
> translation table entries in the same group without accessing them. It
> also allows an SMMU implementation to make more efficient use of its TLB
> by using a single TLB entry to cover all translation table entries in
> the same group.
> 
> In the case of 4KB granule size, there are 16 translation table entries
> in one group.
> 
> This change sets the contiguous bit for such groups of entries that are
> completely covered by a single call to map_pages. As it stands, the code
> wouldn't set the contiguous bit if a group of adjacent descriptors is
> completed by separate calls to map_pages.

Nor should it

This seems like a pretty hacky implementation, it doesn't set the
pgsize bitmap to indicate support and it doesn't have a safety check
on unmap to protect against partial unmap of a huge page.

Wouldn't it be better to use the pgsize bit map and rely on the core
code to tell a contig page size is being used and then it can
trivially set the PTE bit without having to do all this extra work?

Jason
Re: [PATCH] iommu/io-pgtable-arm: Support contiguous bit in translation tables
Posted by Will Deacon 8 months, 4 weeks ago
[+Robin]

On Tue, May 06, 2025 at 12:40:14PM -0300, Jason Gunthorpe wrote:
> On Wed, Apr 30, 2025 at 11:19:24PM +0000, Daniel Mentz wrote:
> > The contiguous bit in translation table entries can be used as a hint to
> > SMMU that a group of adjacent translation table entries have consistent
> > attributes and point to a contiguous and properly aligned output address
> > range. This enables SMMU to predict the properties of the remaining
> > translation table entries in the same group without accessing them. It
> > also allows an SMMU implementation to make more efficient use of its TLB
> > by using a single TLB entry to cover all translation table entries in
> > the same group.
> > 
> > In the case of 4KB granule size, there are 16 translation table entries
> > in one group.
> > 
> > This change sets the contiguous bit for such groups of entries that are
> > completely covered by a single call to map_pages. As it stands, the code
> > wouldn't set the contiguous bit if a group of adjacent descriptors is
> > completed by separate calls to map_pages.
> 
> Nor should it
> 
> This seems like a pretty hacky implementation, it doesn't set the
> pgsize bitmap to indicate support and it doesn't have a safety check
> on unmap to protect against partial unmap of a huge page.
> 
> Wouldn't it be better to use the pgsize bit map and rely on the core
> code to tell a contig page size is being used and then it can
> trivially set the PTE bit without having to do all this extra work?

That sounds like it would be quite a bit cleaner and I think it aligns
with the "Large page" support in io-pgtable-arm-v7s.c which is doing
something extremely similar.

Will
Re: [PATCH] iommu/io-pgtable-arm: Support contiguous bit in translation tables
Posted by Robin Murphy 8 months, 4 weeks ago
On 15/05/2025 3:36 pm, Will Deacon wrote:
> [+Robin]
> 
> On Tue, May 06, 2025 at 12:40:14PM -0300, Jason Gunthorpe wrote:
>> On Wed, Apr 30, 2025 at 11:19:24PM +0000, Daniel Mentz wrote:
>>> The contiguous bit in translation table entries can be used as a hint to
>>> SMMU that a group of adjacent translation table entries have consistent
>>> attributes and point to a contiguous and properly aligned output address
>>> range. This enables SMMU to predict the properties of the remaining
>>> translation table entries in the same group without accessing them. It
>>> also allows an SMMU implementation to make more efficient use of its TLB
>>> by using a single TLB entry to cover all translation table entries in
>>> the same group.
>>>
>>> In the case of 4KB granule size, there are 16 translation table entries
>>> in one group.
>>>
>>> This change sets the contiguous bit for such groups of entries that are
>>> completely covered by a single call to map_pages. As it stands, the code
>>> wouldn't set the contiguous bit if a group of adjacent descriptors is
>>> completed by separate calls to map_pages.
>>
>> Nor should it
>>
>> This seems like a pretty hacky implementation, it doesn't set the
>> pgsize bitmap to indicate support and it doesn't have a safety check
>> on unmap to protect against partial unmap of a huge page.
>>
>> Wouldn't it be better to use the pgsize bit map and rely on the core
>> code to tell a contig page size is being used and then it can
>> trivially set the PTE bit without having to do all this extra work?
> 
> That sounds like it would be quite a bit cleaner and I think it aligns
> with the "Large page" support in io-pgtable-arm-v7s.c which is doing
> something extremely similar.

Indeed, much like we advertise contiguous sizes for hugetlb on the CPU, 
having them in the pgsize_bitmap gives a clue to IOMMU API users that 
there is some potential benefit in trying to align to these sizes where 
appropriate (e.g. __iommu_dma_alloc_pages()).

Secondly, there also needs to be a straightforward way for drivers to 
opt out of (or perhaps explicitly in to) this - IIRC there are some SMMU 
errata which have so far not mattered due to Linux not using the 
contiguous bit, for which the least painful workaround is probably for 
affected hardware to just continue not using the contiguous bit.

Thanks,
Robin.