Even if multiple ASIDs are not supported, using the single-ASID variant
of the sfence.vma instruction preserves TLB entries for global (kernel)
pages. So it is always more efficient to use the single-ASID code path.
Signed-off-by: Samuel Holland <samuel.holland@sifive.com>
---
Changes in v4:
- There is now only one copy of __flush_tlb_range()
Changes in v2:
- Update both copies of __flush_tlb_range()
arch/riscv/include/asm/mmu_context.h | 2 --
arch/riscv/mm/context.c | 3 +--
arch/riscv/mm/tlbflush.c | 3 +--
3 files changed, 2 insertions(+), 6 deletions(-)
diff --git a/arch/riscv/include/asm/mmu_context.h b/arch/riscv/include/asm/mmu_context.h
index 7030837adc1a..b0659413a080 100644
--- a/arch/riscv/include/asm/mmu_context.h
+++ b/arch/riscv/include/asm/mmu_context.h
@@ -33,8 +33,6 @@ static inline int init_new_context(struct task_struct *tsk,
return 0;
}
-DECLARE_STATIC_KEY_FALSE(use_asid_allocator);
-
#include <asm-generic/mmu_context.h>
#endif /* _ASM_RISCV_MMU_CONTEXT_H */
diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c
index 3ca9b653df7d..20057085ab8a 100644
--- a/arch/riscv/mm/context.c
+++ b/arch/riscv/mm/context.c
@@ -18,8 +18,7 @@
#ifdef CONFIG_MMU
-DEFINE_STATIC_KEY_FALSE(use_asid_allocator);
-
+static DEFINE_STATIC_KEY_FALSE(use_asid_allocator);
static unsigned long num_asids;
static atomic_long_t current_version;
diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
index 5ec621545c69..39d80f56d292 100644
--- a/arch/riscv/mm/tlbflush.c
+++ b/arch/riscv/mm/tlbflush.c
@@ -84,8 +84,7 @@ static void __flush_tlb_range(struct mm_struct *mm, unsigned long start,
if (cpumask_empty(cmask))
return;
- if (static_branch_unlikely(&use_asid_allocator))
- asid = cntx2asid(atomic_long_read(&mm->context.id));
+ asid = cntx2asid(atomic_long_read(&mm->context.id));
} else {
cmask = cpu_online_mask;
}
--
2.42.0
On Tue, Jan 2, 2024 at 11:01 PM Samuel Holland
<samuel.holland@sifive.com> wrote:
>
> Even if multiple ASIDs are not supported, using the single-ASID variant
> of the sfence.vma instruction preserves TLB entries for global (kernel)
> pages. So it is always more efficient to use the single-ASID code path.
>
> Signed-off-by: Samuel Holland <samuel.holland@sifive.com>
> ---
>
> Changes in v4:
> - There is now only one copy of __flush_tlb_range()
>
> Changes in v2:
> - Update both copies of __flush_tlb_range()
>
> arch/riscv/include/asm/mmu_context.h | 2 --
> arch/riscv/mm/context.c | 3 +--
> arch/riscv/mm/tlbflush.c | 3 +--
> 3 files changed, 2 insertions(+), 6 deletions(-)
>
> diff --git a/arch/riscv/include/asm/mmu_context.h b/arch/riscv/include/asm/mmu_context.h
> index 7030837adc1a..b0659413a080 100644
> --- a/arch/riscv/include/asm/mmu_context.h
> +++ b/arch/riscv/include/asm/mmu_context.h
> @@ -33,8 +33,6 @@ static inline int init_new_context(struct task_struct *tsk,
> return 0;
> }
>
> -DECLARE_STATIC_KEY_FALSE(use_asid_allocator);
> -
> #include <asm-generic/mmu_context.h>
>
> #endif /* _ASM_RISCV_MMU_CONTEXT_H */
> diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c
> index 3ca9b653df7d..20057085ab8a 100644
> --- a/arch/riscv/mm/context.c
> +++ b/arch/riscv/mm/context.c
> @@ -18,8 +18,7 @@
>
> #ifdef CONFIG_MMU
>
> -DEFINE_STATIC_KEY_FALSE(use_asid_allocator);
> -
> +static DEFINE_STATIC_KEY_FALSE(use_asid_allocator);
> static unsigned long num_asids;
>
> static atomic_long_t current_version;
> diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
> index 5ec621545c69..39d80f56d292 100644
> --- a/arch/riscv/mm/tlbflush.c
> +++ b/arch/riscv/mm/tlbflush.c
> @@ -84,8 +84,7 @@ static void __flush_tlb_range(struct mm_struct *mm, unsigned long start,
> if (cpumask_empty(cmask))
> return;
>
> - if (static_branch_unlikely(&use_asid_allocator))
> - asid = cntx2asid(atomic_long_read(&mm->context.id));
> + asid = cntx2asid(atomic_long_read(&mm->context.id));
> } else {
> cmask = cpu_online_mask;
> }
> --
> 2.42.0
>
You can add:
Reviewed-by: Alexandre Ghiti <alexghiti@rivosinc.com>
Thanks!
Alex
On Tue, Jan 02, 2024 at 02:00:49PM -0800, Samuel Holland wrote:
> Even if multiple ASIDs are not supported, using the single-ASID variant
> of the sfence.vma instruction preserves TLB entries for global (kernel)
> pages. So it is always more efficient to use the single-ASID code path.
>
> Signed-off-by: Samuel Holland <samuel.holland@sifive.com>
> ---
>
> Changes in v4:
> - There is now only one copy of __flush_tlb_range()
>
> Changes in v2:
> - Update both copies of __flush_tlb_range()
>
> arch/riscv/include/asm/mmu_context.h | 2 --
> arch/riscv/mm/context.c | 3 +--
> arch/riscv/mm/tlbflush.c | 3 +--
> 3 files changed, 2 insertions(+), 6 deletions(-)
>
> diff --git a/arch/riscv/include/asm/mmu_context.h b/arch/riscv/include/asm/mmu_context.h
> index 7030837adc1a..b0659413a080 100644
> --- a/arch/riscv/include/asm/mmu_context.h
> +++ b/arch/riscv/include/asm/mmu_context.h
> @@ -33,8 +33,6 @@ static inline int init_new_context(struct task_struct *tsk,
> return 0;
> }
>
> -DECLARE_STATIC_KEY_FALSE(use_asid_allocator);
> -
> #include <asm-generic/mmu_context.h>
>
> #endif /* _ASM_RISCV_MMU_CONTEXT_H */
> diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c
> index 3ca9b653df7d..20057085ab8a 100644
> --- a/arch/riscv/mm/context.c
> +++ b/arch/riscv/mm/context.c
> @@ -18,8 +18,7 @@
>
> #ifdef CONFIG_MMU
>
> -DEFINE_STATIC_KEY_FALSE(use_asid_allocator);
> -
> +static DEFINE_STATIC_KEY_FALSE(use_asid_allocator);
One of my optimization "riscv: tlb: avoid tlb flushing if fullmm == 1"
will make use of use_asid_allocator, so could we remove this modification?
> static unsigned long num_asids;
>
> static atomic_long_t current_version;
> diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
> index 5ec621545c69..39d80f56d292 100644
> --- a/arch/riscv/mm/tlbflush.c
> +++ b/arch/riscv/mm/tlbflush.c
> @@ -84,8 +84,7 @@ static void __flush_tlb_range(struct mm_struct *mm, unsigned long start,
> if (cpumask_empty(cmask))
> return;
>
> - if (static_branch_unlikely(&use_asid_allocator))
> - asid = cntx2asid(atomic_long_read(&mm->context.id));
> + asid = cntx2asid(atomic_long_read(&mm->context.id));
> } else {
> cmask = cpu_online_mask;
> }
> --
> 2.42.0
>
>
> _______________________________________________
> linux-riscv mailing list
> linux-riscv@lists.infradead.org
> http://lists.infradead.org/mailman/listinfo/linux-riscv
On 2024-01-03 9:02 AM, Jisheng Zhang wrote:
> On Tue, Jan 02, 2024 at 02:00:49PM -0800, Samuel Holland wrote:
>> Even if multiple ASIDs are not supported, using the single-ASID variant
>> of the sfence.vma instruction preserves TLB entries for global (kernel)
>> pages. So it is always more efficient to use the single-ASID code path.
>>
>> Signed-off-by: Samuel Holland <samuel.holland@sifive.com>
>> ---
>>
>> Changes in v4:
>> - There is now only one copy of __flush_tlb_range()
>>
>> Changes in v2:
>> - Update both copies of __flush_tlb_range()
>>
>> arch/riscv/include/asm/mmu_context.h | 2 --
>> arch/riscv/mm/context.c | 3 +--
>> arch/riscv/mm/tlbflush.c | 3 +--
>> 3 files changed, 2 insertions(+), 6 deletions(-)
>>
>> diff --git a/arch/riscv/include/asm/mmu_context.h b/arch/riscv/include/asm/mmu_context.h
>> index 7030837adc1a..b0659413a080 100644
>> --- a/arch/riscv/include/asm/mmu_context.h
>> +++ b/arch/riscv/include/asm/mmu_context.h
>> @@ -33,8 +33,6 @@ static inline int init_new_context(struct task_struct *tsk,
>> return 0;
>> }
>>
>> -DECLARE_STATIC_KEY_FALSE(use_asid_allocator);
>> -
>> #include <asm-generic/mmu_context.h>
>>
>> #endif /* _ASM_RISCV_MMU_CONTEXT_H */
>> diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c
>> index 3ca9b653df7d..20057085ab8a 100644
>> --- a/arch/riscv/mm/context.c
>> +++ b/arch/riscv/mm/context.c
>> @@ -18,8 +18,7 @@
>>
>> #ifdef CONFIG_MMU
>>
>> -DEFINE_STATIC_KEY_FALSE(use_asid_allocator);
>> -
>> +static DEFINE_STATIC_KEY_FALSE(use_asid_allocator);
>
> One of my optimization "riscv: tlb: avoid tlb flushing if fullmm == 1"
> will make use of use_asid_allocator, so could we remove this modification?
Yes, I can leave the global declaration alone for now.
>> static unsigned long num_asids;
>>
>> static atomic_long_t current_version;
>> diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
>> index 5ec621545c69..39d80f56d292 100644
>> --- a/arch/riscv/mm/tlbflush.c
>> +++ b/arch/riscv/mm/tlbflush.c
>> @@ -84,8 +84,7 @@ static void __flush_tlb_range(struct mm_struct *mm, unsigned long start,
>> if (cpumask_empty(cmask))
>> return;
>>
>> - if (static_branch_unlikely(&use_asid_allocator))
>> - asid = cntx2asid(atomic_long_read(&mm->context.id));
>> + asid = cntx2asid(atomic_long_read(&mm->context.id));
>> } else {
>> cmask = cpu_online_mask;
>> }
>> --
>> 2.42.0
>>
>>
>> _______________________________________________
>> linux-riscv mailing list
>> linux-riscv@lists.infradead.org
>> http://lists.infradead.org/mailman/listinfo/linux-riscv
© 2016 - 2025 Red Hat, Inc.