The generic lazy_mmu layer now tracks whether a task is in lazy MMU
mode. As a result we no longer need a TIF flag for that purpose -
let's use the new in_lazy_mmu_mode() helper instead.
Signed-off-by: Kevin Brodsky <kevin.brodsky@arm.com>
---
arch/arm64/include/asm/pgtable.h | 16 +++-------------
arch/arm64/include/asm/thread_info.h | 3 +--
2 files changed, 4 insertions(+), 15 deletions(-)
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 535435248923..61ca88f94551 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -62,30 +62,21 @@ static inline void emit_pte_barriers(void)
static inline void queue_pte_barriers(void)
{
- unsigned long flags;
-
if (in_interrupt()) {
emit_pte_barriers();
return;
}
- flags = read_thread_flags();
-
- if (flags & BIT(TIF_LAZY_MMU)) {
- /* Avoid the atomic op if already set. */
- if (!(flags & BIT(TIF_LAZY_MMU_PENDING)))
- set_thread_flag(TIF_LAZY_MMU_PENDING);
- } else {
+ if (in_lazy_mmu_mode())
+ test_and_set_thread_flag(TIF_LAZY_MMU_PENDING);
+ else
emit_pte_barriers();
- }
}
static inline void arch_enter_lazy_mmu_mode(void)
{
if (in_interrupt())
return;
-
- set_thread_flag(TIF_LAZY_MMU);
}
static inline void arch_flush_lazy_mmu_mode(void)
@@ -103,7 +94,6 @@ static inline void arch_leave_lazy_mmu_mode(void)
return;
arch_flush_lazy_mmu_mode();
- clear_thread_flag(TIF_LAZY_MMU);
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index f241b8601ebd..4ff8da0767d9 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -84,8 +84,7 @@ void arch_setup_new_exec(void);
#define TIF_SME_VL_INHERIT 28 /* Inherit SME vl_onexec across exec */
#define TIF_KERNEL_FPSTATE 29 /* Task is in a kernel mode FPSIMD section */
#define TIF_TSC_SIGSEGV 30 /* SIGSEGV on counter-timer access */
-#define TIF_LAZY_MMU 31 /* Task in lazy mmu mode */
-#define TIF_LAZY_MMU_PENDING 32 /* Ops pending for lazy mmu mode exit */
+#define TIF_LAZY_MMU_PENDING 31 /* Ops pending for lazy mmu mode exit */
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
--
2.47.0
On 29/10/2025 10:09, Kevin Brodsky wrote:
> The generic lazy_mmu layer now tracks whether a task is in lazy MMU
> mode. As a result we no longer need a TIF flag for that purpose -
> let's use the new in_lazy_mmu_mode() helper instead.
>
> Signed-off-by: Kevin Brodsky <kevin.brodsky@arm.com>
> ---
> arch/arm64/include/asm/pgtable.h | 16 +++-------------
> arch/arm64/include/asm/thread_info.h | 3 +--
> 2 files changed, 4 insertions(+), 15 deletions(-)
>
> diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
> index 535435248923..61ca88f94551 100644
> --- a/arch/arm64/include/asm/pgtable.h
> +++ b/arch/arm64/include/asm/pgtable.h
> @@ -62,30 +62,21 @@ static inline void emit_pte_barriers(void)
>
> static inline void queue_pte_barriers(void)
> {
> - unsigned long flags;
> -
> if (in_interrupt()) {
> emit_pte_barriers();
> return;
> }
>
> - flags = read_thread_flags();
> -
> - if (flags & BIT(TIF_LAZY_MMU)) {
> - /* Avoid the atomic op if already set. */
> - if (!(flags & BIT(TIF_LAZY_MMU_PENDING)))
> - set_thread_flag(TIF_LAZY_MMU_PENDING);
> - } else {
> + if (in_lazy_mmu_mode())
> + test_and_set_thread_flag(TIF_LAZY_MMU_PENDING);
This removes the optimization to only do the atomic set operation if the bit is
not already set. I think that should remain.
> + else
> emit_pte_barriers();
> - }
> }
>
> static inline void arch_enter_lazy_mmu_mode(void)
> {
> if (in_interrupt())
> return;
Why are you keeping this test? Surely it can go?
> -
> - set_thread_flag(TIF_LAZY_MMU);
> }
>
> static inline void arch_flush_lazy_mmu_mode(void)
> @@ -103,7 +94,6 @@ static inline void arch_leave_lazy_mmu_mode(void)
> return;
>
> arch_flush_lazy_mmu_mode();
> - clear_thread_flag(TIF_LAZY_MMU);
> }
>
> #ifdef CONFIG_TRANSPARENT_HUGEPAGE
> diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
> index f241b8601ebd..4ff8da0767d9 100644
> --- a/arch/arm64/include/asm/thread_info.h
> +++ b/arch/arm64/include/asm/thread_info.h
> @@ -84,8 +84,7 @@ void arch_setup_new_exec(void);
> #define TIF_SME_VL_INHERIT 28 /* Inherit SME vl_onexec across exec */
> #define TIF_KERNEL_FPSTATE 29 /* Task is in a kernel mode FPSIMD section */
> #define TIF_TSC_SIGSEGV 30 /* SIGSEGV on counter-timer access */
> -#define TIF_LAZY_MMU 31 /* Task in lazy mmu mode */
> -#define TIF_LAZY_MMU_PENDING 32 /* Ops pending for lazy mmu mode exit */
> +#define TIF_LAZY_MMU_PENDING 31 /* Ops pending for lazy mmu mode exit */
>
> #define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
> #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
On 29.10.25 11:09, Kevin Brodsky wrote:
> The generic lazy_mmu layer now tracks whether a task is in lazy MMU
> mode. As a result we no longer need a TIF flag for that purpose -
> let's use the new in_lazy_mmu_mode() helper instead.
>
> Signed-off-by: Kevin Brodsky <kevin.brodsky@arm.com>
> ---
> arch/arm64/include/asm/pgtable.h | 16 +++-------------
> arch/arm64/include/asm/thread_info.h | 3 +--
> 2 files changed, 4 insertions(+), 15 deletions(-)
>
> diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
> index 535435248923..61ca88f94551 100644
> --- a/arch/arm64/include/asm/pgtable.h
> +++ b/arch/arm64/include/asm/pgtable.h
> @@ -62,30 +62,21 @@ static inline void emit_pte_barriers(void)
>
> static inline void queue_pte_barriers(void)
> {
> - unsigned long flags;
> -
> if (in_interrupt()) {
> emit_pte_barriers();
> return;
> }
>
> - flags = read_thread_flags();
> -
> - if (flags & BIT(TIF_LAZY_MMU)) {
> - /* Avoid the atomic op if already set. */
> - if (!(flags & BIT(TIF_LAZY_MMU_PENDING)))
> - set_thread_flag(TIF_LAZY_MMU_PENDING);
> - } else {
> + if (in_lazy_mmu_mode())
> + test_and_set_thread_flag(TIF_LAZY_MMU_PENDING);
You likely don't want a test_and_set here, which would do a
test_and_set_bit() -- an atomic rmw.
You only want to avoid the atomic write if already set.
So keep the current
/* Avoid the atomic op if already set. */
if (!(flags & BIT(TIF_LAZY_MMU_PENDING)))
set_thread_flag(TIF_LAZY_MMU_PENDING);
--
Cheers
David
On 03/11/2025 16:03, David Hildenbrand wrote:
> On 29.10.25 11:09, Kevin Brodsky wrote:
>> The generic lazy_mmu layer now tracks whether a task is in lazy MMU
>> mode. As a result we no longer need a TIF flag for that purpose -
>> let's use the new in_lazy_mmu_mode() helper instead.
>>
>> Signed-off-by: Kevin Brodsky <kevin.brodsky@arm.com>
>> ---
>> arch/arm64/include/asm/pgtable.h | 16 +++-------------
>> arch/arm64/include/asm/thread_info.h | 3 +--
>> 2 files changed, 4 insertions(+), 15 deletions(-)
>>
>> diff --git a/arch/arm64/include/asm/pgtable.h
>> b/arch/arm64/include/asm/pgtable.h
>> index 535435248923..61ca88f94551 100644
>> --- a/arch/arm64/include/asm/pgtable.h
>> +++ b/arch/arm64/include/asm/pgtable.h
>> @@ -62,30 +62,21 @@ static inline void emit_pte_barriers(void)
>> static inline void queue_pte_barriers(void)
>> {
>> - unsigned long flags;
>> -
>> if (in_interrupt()) {
>> emit_pte_barriers();
>> return;
>> }
>> - flags = read_thread_flags();
>> -
>> - if (flags & BIT(TIF_LAZY_MMU)) {
>> - /* Avoid the atomic op if already set. */
>> - if (!(flags & BIT(TIF_LAZY_MMU_PENDING)))
>> - set_thread_flag(TIF_LAZY_MMU_PENDING);
>> - } else {
>> + if (in_lazy_mmu_mode())
>> + test_and_set_thread_flag(TIF_LAZY_MMU_PENDING);
>
> You likely don't want a test_and_set here, which would do a
> test_and_set_bit() -- an atomic rmw.
Ah yes good point, the new version would do an atomic RMW in all cases.
Simpler code but also slower :/
>
> You only want to avoid the atomic write if already set.
>
> So keep the current
>
> /* Avoid the atomic op if already set. */
> if (!(flags & BIT(TIF_LAZY_MMU_PENDING)))
> set_thread_flag(TIF_LAZY_MMU_PENDING);
Pretty much, since we're now only considering one flag we can simplify
it to:
if (!test_thread_flag(TIF_LAZY_MMU_PENDING))
set_thread_flag(TIF_LAZY_MMU_PENDING);
- Kevin
© 2016 - 2025 Red Hat, Inc.