Add remaining TLBI_XXX_MASK macros and replace current open encoded fields.
While here replace hard coded page size based shifts but with derived ones
via ilog2() thus adding some required context.
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will@kernel.org>
Cc: linux-arm-kernel@lists.infradead.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
---
arch/arm64/include/asm/tlbflush.h | 26 ++++++++++++++++++--------
1 file changed, 18 insertions(+), 8 deletions(-)
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index 131096094f5b..cf75fc2a06c3 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -57,9 +57,10 @@
/* This macro creates a properly formatted VA operand for the TLBI */
#define __TLBI_VADDR(addr, asid) \
({ \
- unsigned long __ta = (addr) >> 12; \
- __ta &= GENMASK_ULL(43, 0); \
- __ta |= (unsigned long)(asid) << 48; \
+ unsigned long __ta = (addr) >> ilog2(SZ_4K); \
+ __ta &= TLBI_BADDR_MASK; \
+ __ta &= ~TLBI_ASID_MASK; \
+ __ta |= FIELD_PREP(TLBI_ASID_MASK, asid); \
__ta; \
})
@@ -100,8 +101,17 @@ static inline unsigned long get_trans_granule(void)
*
* For Stage-2 invalidation, use the level values provided to that effect
* in asm/stage2_pgtable.h.
+ *
+ * +----------+------+-------+--------------------------------------+
+ * | ASID | TG | TTL | BADDR |
+ * +-----------------+-------+--------------------------------------+
+ * |63 48|47 46|45 44|43 0|
+ * +----------+------+-------+--------------------------------------+
*/
-#define TLBI_TTL_MASK GENMASK_ULL(47, 44)
+#define TLBI_ASID_MASK GENMASK_ULL(63, 48)
+#define TLBI_TG_MASK GENMASK_ULL(47, 46)
+#define TLBI_TTL_MASK GENMASK_ULL(45, 44)
+#define TLBI_BADDR_MASK GENMASK_ULL(43, 0)
#define TLBI_TTL_UNKNOWN INT_MAX
@@ -110,10 +120,10 @@ static inline unsigned long get_trans_granule(void)
\
if (alternative_has_cap_unlikely(ARM64_HAS_ARMv8_4_TTL) && \
level >= 0 && level <= 3) { \
- u64 ttl = level; \
- ttl |= get_trans_granule() << 2; \
+ arg &= ~TLBI_TG_MASK; \
+ arg |= FIELD_PREP(TLBI_TG_MASK, get_trans_granule()); \
arg &= ~TLBI_TTL_MASK; \
- arg |= FIELD_PREP(TLBI_TTL_MASK, ttl); \
+ arg |= FIELD_PREP(TLBI_TTL_MASK, level); \
} \
\
__tlbi(op, arg); \
@@ -383,7 +393,7 @@ do { \
typeof(pages) __flush_pages = pages; \
int num = 0; \
int scale = 3; \
- int shift = lpa2 ? 16 : PAGE_SHIFT; \
+ int shift = lpa2 ? ilog2(SZ_64K) : PAGE_SHIFT; \
unsigned long addr; \
\
while (__flush_pages > 0) { \
--
2.30.2
Hi Anshuman,
On 10/21/25 06:20, Anshuman Khandual wrote:
> Add remaining TLBI_XXX_MASK macros and replace current open encoded fields.
> While here replace hard coded page size based shifts but with derived ones
> via ilog2() thus adding some required context.
>
> Cc: Catalin Marinas <catalin.marinas@arm.com>
> Cc: Will Deacon <will@kernel.org>
> Cc: linux-arm-kernel@lists.infradead.org
> Cc: linux-kernel@vger.kernel.org
> Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
> ---
> arch/arm64/include/asm/tlbflush.h | 26 ++++++++++++++++++--------
> 1 file changed, 18 insertions(+), 8 deletions(-)
>
> diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
> index 131096094f5b..cf75fc2a06c3 100644
> --- a/arch/arm64/include/asm/tlbflush.h
> +++ b/arch/arm64/include/asm/tlbflush.h
> @@ -57,9 +57,10 @@
> /* This macro creates a properly formatted VA operand for the TLBI */
> #define __TLBI_VADDR(addr, asid) \
> ({ \
> - unsigned long __ta = (addr) >> 12; \
> - __ta &= GENMASK_ULL(43, 0); \
> - __ta |= (unsigned long)(asid) << 48; \
> + unsigned long __ta = (addr) >> ilog2(SZ_4K); \
> + __ta &= TLBI_BADDR_MASK; \
> + __ta &= ~TLBI_ASID_MASK; \
> + __ta |= FIELD_PREP(TLBI_ASID_MASK, asid); \
> __ta; \
> })
>
> @@ -100,8 +101,17 @@ static inline unsigned long get_trans_granule(void)
> *
> * For Stage-2 invalidation, use the level values provided to that effect
> * in asm/stage2_pgtable.h.
> + *
> + * +----------+------+-------+--------------------------------------+
> + * | ASID | TG | TTL | BADDR |
> + * +-----------------+-------+--------------------------------------+
> + * |63 48|47 46|45 44|43 0|
> + * +----------+------+-------+--------------------------------------+
> */
> -#define TLBI_TTL_MASK GENMASK_ULL(47, 44)
> +#define TLBI_ASID_MASK GENMASK_ULL(63, 48)
> +#define TLBI_TG_MASK GENMASK_ULL(47, 46)
> +#define TLBI_TTL_MASK GENMASK_ULL(45, 44)
The definition of TLBI_TTL_MASK changes here. This might be the correct
thing to do but it should be mentioned in the commit message and the
other user, arch/arm64/kvm/nested.c, needs to be updated in tandem.
> +#define TLBI_BADDR_MASK GENMASK_ULL(43, 0)
>
> #define TLBI_TTL_UNKNOWN INT_MAX
>
Thanks,
Ben
On 21/10/25 2:30 PM, Ben Horgan wrote:
> Hi Anshuman,
>
> On 10/21/25 06:20, Anshuman Khandual wrote:
>> Add remaining TLBI_XXX_MASK macros and replace current open encoded fields.
>> While here replace hard coded page size based shifts but with derived ones
>> via ilog2() thus adding some required context.
>>
>> Cc: Catalin Marinas <catalin.marinas@arm.com>
>> Cc: Will Deacon <will@kernel.org>
>> Cc: linux-arm-kernel@lists.infradead.org
>> Cc: linux-kernel@vger.kernel.org
>> Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
>> ---
>> arch/arm64/include/asm/tlbflush.h | 26 ++++++++++++++++++--------
>> 1 file changed, 18 insertions(+), 8 deletions(-)
>>
>> diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
>> index 131096094f5b..cf75fc2a06c3 100644
>> --- a/arch/arm64/include/asm/tlbflush.h
>> +++ b/arch/arm64/include/asm/tlbflush.h
>> @@ -57,9 +57,10 @@
>> /* This macro creates a properly formatted VA operand for the TLBI */
>> #define __TLBI_VADDR(addr, asid) \
>> ({ \
>> - unsigned long __ta = (addr) >> 12; \
>> - __ta &= GENMASK_ULL(43, 0); \
>> - __ta |= (unsigned long)(asid) << 48; \
>> + unsigned long __ta = (addr) >> ilog2(SZ_4K); \
>> + __ta &= TLBI_BADDR_MASK; \
>> + __ta &= ~TLBI_ASID_MASK; \
>> + __ta |= FIELD_PREP(TLBI_ASID_MASK, asid); \
>> __ta; \
>> })
>>
>> @@ -100,8 +101,17 @@ static inline unsigned long get_trans_granule(void)
>> *
>> * For Stage-2 invalidation, use the level values provided to that effect
>> * in asm/stage2_pgtable.h.
>> + *
>> + * +----------+------+-------+--------------------------------------+
>> + * | ASID | TG | TTL | BADDR |
>> + * +-----------------+-------+--------------------------------------+
>> + * |63 48|47 46|45 44|43 0|
>> + * +----------+------+-------+--------------------------------------+
>> */
>> -#define TLBI_TTL_MASK GENMASK_ULL(47, 44)
>> +#define TLBI_ASID_MASK GENMASK_ULL(63, 48)
>> +#define TLBI_TG_MASK GENMASK_ULL(47, 46)
>> +#define TLBI_TTL_MASK GENMASK_ULL(45, 44)
>
> The definition of TLBI_TTL_MASK changes here. This might be the correct
> thing to do but it should be mentioned in the commit message and the
Sure, will update the commit message.
> other user, arch/arm64/kvm/nested.c, needs to be updated in tandem.
Right, missed that one. Probably something like the following change
might do it for KVM without much code churn.
--- a/arch/arm64/kvm/nested.c
+++ b/arch/arm64/kvm/nested.c
@@ -540,7 +540,7 @@ unsigned long compute_tlb_inval_range(struct kvm_s2_mmu *mmu, u64 val)
unsigned long max_size;
u8 ttl;
- ttl = FIELD_GET(TLBI_TTL_MASK, val);
+ ttl = FIELD_GET(TLBI_TTL_MASK, val) | FIELD_GET(TLBI_TG_MASK, val);
if (!ttl || !kvm_has_feat(kvm, ID_AA64MMFR2_EL1, TTL, IMP)) {
/* No TTL, check the shadow S2 for a hint */
@@ -963,7 +963,7 @@ static void compute_s1_tlbi_range(struct kvm_vcpu *vcpu, u32 inst, u64 val,
case OP_TLBI_VALE1ISNXS:
case OP_TLBI_VALE1OSNXS:
scope->type = TLBI_VA;
- scope->size = ttl_to_size(FIELD_GET(TLBI_TTL_MASK, val));
+ scope->size = ttl_to_size(FIELD_GET(TLBI_TTL_MASK, val) | FIELD_GET(TLBI_TG_MASK, val));
if (!scope->size)
scope->size = SZ_1G;
scope->va = tlbi_va_s1_to_va(val) & ~(scope->size - 1);
@@ -991,7 +991,7 @@ static void compute_s1_tlbi_range(struct kvm_vcpu *vcpu, u32 inst, u64 val,
case OP_TLBI_VAALE1ISNXS:
case OP_TLBI_VAALE1OSNXS:
scope->type = TLBI_VAA;
- scope->size = ttl_to_size(FIELD_GET(TLBI_TTL_MASK, val));
+ scope->size = ttl_to_size(FIELD_GET(TLBI_TTL_MASK, val) | FIELD_GET(TLBI_TG_MASK, val));
if (!scope->size)
scope->size = SZ_1G;
scope->va = tlbi_va_s1_to_va(val) & ~(scope->size - 1);
>
>> +#define TLBI_BADDR_MASK GENMASK_ULL(43, 0)
>>
>> #define TLBI_TTL_UNKNOWN INT_MAX
>>
>
> Thanks,
>
> Ben
>
Hi Anshuman,
On 10/21/25 13:45, Anshuman Khandual wrote:
>
>
> On 21/10/25 2:30 PM, Ben Horgan wrote:
>> Hi Anshuman,
>>
>> On 10/21/25 06:20, Anshuman Khandual wrote:
>>> Add remaining TLBI_XXX_MASK macros and replace current open encoded fields.
>>> While here replace hard coded page size based shifts but with derived ones
>>> via ilog2() thus adding some required context.
>>>
>>> Cc: Catalin Marinas <catalin.marinas@arm.com>
>>> Cc: Will Deacon <will@kernel.org>
>>> Cc: linux-arm-kernel@lists.infradead.org
>>> Cc: linux-kernel@vger.kernel.org
>>> Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
>>> ---
>>> arch/arm64/include/asm/tlbflush.h | 26 ++++++++++++++++++--------
>>> 1 file changed, 18 insertions(+), 8 deletions(-)
>>>
>>> diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
>>> index 131096094f5b..cf75fc2a06c3 100644
>>> --- a/arch/arm64/include/asm/tlbflush.h
>>> +++ b/arch/arm64/include/asm/tlbflush.h
[...]
>>> @@ -100,8 +101,17 @@ static inline unsigned long get_trans_granule(void)
>>> *
>>> * For Stage-2 invalidation, use the level values provided to that effect
>>> * in asm/stage2_pgtable.h.
>>> + *
>>> + * +----------+------+-------+--------------------------------------+
>>> + * | ASID | TG | TTL | BADDR |
>>> + * +-----------------+-------+--------------------------------------+
>>> + * |63 48|47 46|45 44|43 0|
>>> + * +----------+------+-------+--------------------------------------+
>>> */
>>> -#define TLBI_TTL_MASK GENMASK_ULL(47, 44)
>>> +#define TLBI_ASID_MASK GENMASK_ULL(63, 48)
>>> +#define TLBI_TG_MASK GENMASK_ULL(47, 46)
>>> +#define TLBI_TTL_MASK GENMASK_ULL(45, 44)
>>
>> The definition of TLBI_TTL_MASK changes here. This might be the correct
>> thing to do but it should be mentioned in the commit message and the
>
> Sure, will update the commit message.
>> other user, arch/arm64/kvm/nested.c, needs to be updated in tandem.
>
> Right, missed that one. Probably something like the following change
> might do it for KVM without much code churn.
>
> --- a/arch/arm64/kvm/nested.c
> +++ b/arch/arm64/kvm/nested.c
> @@ -540,7 +540,7 @@ unsigned long compute_tlb_inval_range(struct kvm_s2_mmu *mmu, u64 val)
> unsigned long max_size;
> u8 ttl;
>
> - ttl = FIELD_GET(TLBI_TTL_MASK, val);
> + ttl = FIELD_GET(TLBI_TTL_MASK, val) | FIELD_GET(TLBI_TG_MASK, val);
This and the other changed lines are missing a shift, but otherwise
seems reasonable.
>
> if (!ttl || !kvm_has_feat(kvm, ID_AA64MMFR2_EL1, TTL, IMP)) {
> /* No TTL, check the shadow S2 for a hint */
> @@ -963,7 +963,7 @@ static void compute_s1_tlbi_range(struct kvm_vcpu *vcpu, u32 inst, u64 val,
> case OP_TLBI_VALE1ISNXS:
> case OP_TLBI_VALE1OSNXS:
> scope->type = TLBI_VA;
> - scope->size = ttl_to_size(FIELD_GET(TLBI_TTL_MASK, val));
> + scope->size = ttl_to_size(FIELD_GET(TLBI_TTL_MASK, val) | FIELD_GET(TLBI_TG_MASK, val));
> if (!scope->size)
> scope->size = SZ_1G;
> scope->va = tlbi_va_s1_to_va(val) & ~(scope->size - 1);
> @@ -991,7 +991,7 @@ static void compute_s1_tlbi_range(struct kvm_vcpu *vcpu, u32 inst, u64 val,
> case OP_TLBI_VAALE1ISNXS:
> case OP_TLBI_VAALE1OSNXS:
> scope->type = TLBI_VAA;
> - scope->size = ttl_to_size(FIELD_GET(TLBI_TTL_MASK, val));
> + scope->size = ttl_to_size(FIELD_GET(TLBI_TTL_MASK, val) | FIELD_GET(TLBI_TG_MASK, val));
> if (!scope->size)
> scope->size = SZ_1G;
> scope->va = tlbi_va_s1_to_va(val) & ~(scope->size - 1);
>
>>
>>> +#define TLBI_BADDR_MASK GENMASK_ULL(43, 0)
>>>
>>> #define TLBI_TTL_UNKNOWN INT_MAX
>>>
>>
>> Thanks,
>>
>> Ben
>>
>
Thanks,
Ben
© 2016 - 2026 Red Hat, Inc.