[PATCH v2 04/13] arm64: mm: Push __TLBI_VADDR() into __tlbi_level()

Ryan Roberts posted 13 patches 2 weeks, 6 days ago
[PATCH v2 04/13] arm64: mm: Push __TLBI_VADDR() into __tlbi_level()
Posted by Ryan Roberts 2 weeks, 6 days ago
From: Will Deacon <will@kernel.org>

The __TLBI_VADDR() macro takes an ASID and an address and converts them
into a single argument formatted correctly for a TLB invalidation
instruction.

Rather than have callers worry about this (especially in the case where
the ASID is zero), push the macro down into __tlbi_level() via a new
__tlbi_level_asid() helper.

Signed-off-by: Will Deacon <will@kernel.org>
Reviewed-by: Linu Cherian <linu.cherian@arm.com>
Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
---
 arch/arm64/include/asm/tlbflush.h | 14 ++++++++++----
 arch/arm64/kernel/sys_compat.c    |  2 +-
 arch/arm64/kvm/hyp/nvhe/mm.c      |  2 +-
 arch/arm64/kvm/hyp/pgtable.c      |  4 ++--
 4 files changed, 14 insertions(+), 8 deletions(-)

diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index 0e5a30eee447..7453b6c7bd00 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -150,9 +150,10 @@ static __always_inline void ipas2e1is(u64 arg)
 	__tlbi(ipas2e1is, arg);
 }
 
-static __always_inline void __tlbi_level(tlbi_op op, u64 addr, u32 level)
+static __always_inline void __tlbi_level_asid(tlbi_op op, u64 addr, u32 level,
+					      u16 asid)
 {
-	u64 arg = addr;
+	u64 arg = __TLBI_VADDR(addr, asid);
 
 	if (alternative_has_cap_unlikely(ARM64_HAS_ARMv8_4_TTL) && level <= 3) {
 		u64 ttl = level | (get_trans_granule() << 2);
@@ -163,6 +164,11 @@ static __always_inline void __tlbi_level(tlbi_op op, u64 addr, u32 level)
 	op(arg);
 }
 
+static inline void __tlbi_level(tlbi_op op, u64 addr, u32 level)
+{
+	__tlbi_level_asid(op, addr, level, 0);
+}
+
 /*
  * This macro creates a properly formatted VA operand for the TLB RANGE. The
  * value bit assignments are:
@@ -500,8 +506,7 @@ do {									\
 		if (!system_supports_tlb_range() ||			\
 		    __flush_pages == 1 ||				\
 		    (lpa2 && __flush_start != ALIGN(__flush_start, SZ_64K))) {	\
-			addr = __TLBI_VADDR(__flush_start, asid);	\
-			__tlbi_level(op, addr, tlb_level);		\
+			__tlbi_level_asid(op, __flush_start, tlb_level, asid);	\
 			__flush_start += stride;			\
 			__flush_pages -= stride >> PAGE_SHIFT;		\
 			continue;					\
@@ -674,6 +679,7 @@ static inline bool huge_pmd_needs_flush(pmd_t oldpmd, pmd_t newpmd)
 #define huge_pmd_needs_flush huge_pmd_needs_flush
 
 #undef __tlbi_user
+#undef __TLBI_VADDR
 #endif
 
 #endif
diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c
index 4a609e9b65de..ad4857df4830 100644
--- a/arch/arm64/kernel/sys_compat.c
+++ b/arch/arm64/kernel/sys_compat.c
@@ -36,7 +36,7 @@ __do_compat_cache_op(unsigned long start, unsigned long end)
 			 * The workaround requires an inner-shareable tlbi.
 			 * We pick the reserved-ASID to minimise the impact.
 			 */
-			__tlbi(aside1is, __TLBI_VADDR(0, 0));
+			__tlbi(aside1is, 0UL);
 			dsb(ish);
 		}
 
diff --git a/arch/arm64/kvm/hyp/nvhe/mm.c b/arch/arm64/kvm/hyp/nvhe/mm.c
index ae8391baebc3..581385b21826 100644
--- a/arch/arm64/kvm/hyp/nvhe/mm.c
+++ b/arch/arm64/kvm/hyp/nvhe/mm.c
@@ -270,7 +270,7 @@ static void fixmap_clear_slot(struct hyp_fixmap_slot *slot)
 	 * https://lore.kernel.org/kvm/20221017115209.2099-1-will@kernel.org/T/#mf10dfbaf1eaef9274c581b81c53758918c1d0f03
 	 */
 	dsb(ishst);
-	__tlbi_level(vale2is, __TLBI_VADDR(addr, 0), level);
+	__tlbi_level(vale2is, addr, level);
 	dsb(ish);
 	isb();
 }
diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c
index 947ac1a951a5..9292c569afe6 100644
--- a/arch/arm64/kvm/hyp/pgtable.c
+++ b/arch/arm64/kvm/hyp/pgtable.c
@@ -472,14 +472,14 @@ static int hyp_unmap_walker(const struct kvm_pgtable_visit_ctx *ctx,
 
 		kvm_clear_pte(ctx->ptep);
 		dsb(ishst);
-		__tlbi_level(vae2is, __TLBI_VADDR(ctx->addr, 0), TLBI_TTL_UNKNOWN);
+		__tlbi_level(vae2is, ctx->addr, TLBI_TTL_UNKNOWN);
 	} else {
 		if (ctx->end - ctx->addr < granule)
 			return -EINVAL;
 
 		kvm_clear_pte(ctx->ptep);
 		dsb(ishst);
-		__tlbi_level(vale2is, __TLBI_VADDR(ctx->addr, 0), ctx->level);
+		__tlbi_level(vale2is, ctx->addr, ctx->level);
 		*unmapped += granule;
 	}
 
-- 
2.43.0
Re: [PATCH v2 04/13] arm64: mm: Push __TLBI_VADDR() into __tlbi_level()
Posted by Jonathan Cameron 1 week, 5 days ago
On Mon, 19 Jan 2026 17:21:51 +0000
Ryan Roberts <ryan.roberts@arm.com> wrote:

> From: Will Deacon <will@kernel.org>
> 
> The __TLBI_VADDR() macro takes an ASID and an address and converts them
> into a single argument formatted correctly for a TLB invalidation
> instruction.
> 
> Rather than have callers worry about this (especially in the case where
> the ASID is zero), push the macro down into __tlbi_level() via a new
> __tlbi_level_asid() helper.
> 
> Signed-off-by: Will Deacon <will@kernel.org>
> Reviewed-by: Linu Cherian <linu.cherian@arm.com>
> Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
One comment inline, but not particularly important given it's
about reducing readability of a workaround a little

Reviewed-by: Jonathan Cameron <jonathan.cameron@huawei.com>

> @@ -674,6 +679,7 @@ static inline bool huge_pmd_needs_flush(pmd_t oldpmd, pmd_t newpmd)
>  #define huge_pmd_needs_flush huge_pmd_needs_flush
>  
>  #undef __tlbi_user
> +#undef __TLBI_VADDR
>  #endif
>  
>  #endif
> diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c
> index 4a609e9b65de..ad4857df4830 100644
> --- a/arch/arm64/kernel/sys_compat.c
> +++ b/arch/arm64/kernel/sys_compat.c
> @@ -36,7 +36,7 @@ __do_compat_cache_op(unsigned long start, unsigned long end)
>  			 * The workaround requires an inner-shareable tlbi.
>  			 * We pick the reserved-ASID to minimise the impact.
>  			 */
> -			__tlbi(aside1is, __TLBI_VADDR(0, 0));
> +			__tlbi(aside1is, 0UL);

Dropping the explicit ASID sort of looses some meaning here vs the comment just
above it.  Meh, it's in a work around so most folk will ignore it anyway
if reading this code, so I don't mind that much.

>  			dsb(ish);
>  		}
>
Re: [PATCH v2 04/13] arm64: mm: Push __TLBI_VADDR() into __tlbi_level()
Posted by Ryan Roberts 1 week, 5 days ago
On 27/01/2026 11:37, Jonathan Cameron wrote:
> On Mon, 19 Jan 2026 17:21:51 +0000
> Ryan Roberts <ryan.roberts@arm.com> wrote:
> 
>> From: Will Deacon <will@kernel.org>
>>
>> The __TLBI_VADDR() macro takes an ASID and an address and converts them
>> into a single argument formatted correctly for a TLB invalidation
>> instruction.
>>
>> Rather than have callers worry about this (especially in the case where
>> the ASID is zero), push the macro down into __tlbi_level() via a new
>> __tlbi_level_asid() helper.
>>
>> Signed-off-by: Will Deacon <will@kernel.org>
>> Reviewed-by: Linu Cherian <linu.cherian@arm.com>
>> Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
> One comment inline, but not particularly important given it's
> about reducing readability of a workaround a little
> 
> Reviewed-by: Jonathan Cameron <jonathan.cameron@huawei.com>
> 
>> @@ -674,6 +679,7 @@ static inline bool huge_pmd_needs_flush(pmd_t oldpmd, pmd_t newpmd)
>>  #define huge_pmd_needs_flush huge_pmd_needs_flush
>>  
>>  #undef __tlbi_user
>> +#undef __TLBI_VADDR
>>  #endif
>>  
>>  #endif
>> diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c
>> index 4a609e9b65de..ad4857df4830 100644
>> --- a/arch/arm64/kernel/sys_compat.c
>> +++ b/arch/arm64/kernel/sys_compat.c
>> @@ -36,7 +36,7 @@ __do_compat_cache_op(unsigned long start, unsigned long end)
>>  			 * The workaround requires an inner-shareable tlbi.
>>  			 * We pick the reserved-ASID to minimise the impact.
>>  			 */
>> -			__tlbi(aside1is, __TLBI_VADDR(0, 0));
>> +			__tlbi(aside1is, 0UL);
> 
> Dropping the explicit ASID sort of looses some meaning here vs the comment just
> above it.  Meh, it's in a work around so most folk will ignore it anyway
> if reading this code, so I don't mind that much.

Agreed; personally I think the comment makes it clear what that zero is.

Thanks for all your reviews so far!

> 
>>  			dsb(ish);
>>  		}
>>  
>