arch/riscv/mm/tlbflush.c | 39 +++++++++++++++++++++++---------------- 1 file changed, 23 insertions(+), 16 deletions(-)
This is required to allow the IOMMU driver to correctly flush its own
TLB.
Signed-off-by: Alexandre Ghiti <alexghiti@rivosinc.com>
---
Changes in v2:
- Rebase on top of 6.9-rc1
arch/riscv/mm/tlbflush.c | 39 +++++++++++++++++++++++----------------
1 file changed, 23 insertions(+), 16 deletions(-)
diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
index 893566e004b7..854d984deb07 100644
--- a/arch/riscv/mm/tlbflush.c
+++ b/arch/riscv/mm/tlbflush.c
@@ -4,6 +4,7 @@
#include <linux/smp.h>
#include <linux/sched.h>
#include <linux/hugetlb.h>
+#include <linux/mmu_notifier.h>
#include <asm/sbi.h>
#include <asm/mmu_context.h>
@@ -99,11 +100,19 @@ static void __ipi_flush_tlb_range_asid(void *info)
local_flush_tlb_range_asid(d->start, d->size, d->stride, d->asid);
}
-static void __flush_tlb_range(struct cpumask *cmask, unsigned long asid,
+static inline unsigned long get_mm_asid(struct mm_struct *mm)
+{
+ return (mm && static_branch_unlikely(&use_asid_allocator)) ?
+ atomic_long_read(&mm->context.id) & asid_mask : FLUSH_TLB_NO_ASID;
+}
+
+static void __flush_tlb_range(struct mm_struct *mm,
+ struct cpumask *cmask,
unsigned long start, unsigned long size,
unsigned long stride)
{
struct flush_tlb_range_data ftd;
+ unsigned long asid = get_mm_asid(mm);
bool broadcast;
if (cpumask_empty(cmask))
@@ -137,31 +146,26 @@ static void __flush_tlb_range(struct cpumask *cmask, unsigned long asid,
if (cmask != cpu_online_mask)
put_cpu();
-}
-static inline unsigned long get_mm_asid(struct mm_struct *mm)
-{
- return static_branch_unlikely(&use_asid_allocator) ?
- atomic_long_read(&mm->context.id) & asid_mask : FLUSH_TLB_NO_ASID;
+ if (mm)
+ mmu_notifier_arch_invalidate_secondary_tlbs(mm, start, start + size);
}
void flush_tlb_mm(struct mm_struct *mm)
{
- __flush_tlb_range(mm_cpumask(mm), get_mm_asid(mm),
- 0, FLUSH_TLB_MAX_SIZE, PAGE_SIZE);
+ __flush_tlb_range(mm, mm_cpumask(mm), 0, FLUSH_TLB_MAX_SIZE, PAGE_SIZE);
}
void flush_tlb_mm_range(struct mm_struct *mm,
unsigned long start, unsigned long end,
unsigned int page_size)
{
- __flush_tlb_range(mm_cpumask(mm), get_mm_asid(mm),
- start, end - start, page_size);
+ __flush_tlb_range(mm, mm_cpumask(mm), start, end - start, page_size);
}
void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
{
- __flush_tlb_range(mm_cpumask(vma->vm_mm), get_mm_asid(vma->vm_mm),
+ __flush_tlb_range(vma->vm_mm, mm_cpumask(vma->vm_mm),
addr, PAGE_SIZE, PAGE_SIZE);
}
@@ -194,13 +198,13 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
}
}
- __flush_tlb_range(mm_cpumask(vma->vm_mm), get_mm_asid(vma->vm_mm),
+ __flush_tlb_range(vma->vm_mm, mm_cpumask(vma->vm_mm),
start, end - start, stride_size);
}
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
- __flush_tlb_range((struct cpumask *)cpu_online_mask, FLUSH_TLB_NO_ASID,
+ __flush_tlb_range(NULL, (struct cpumask *)cpu_online_mask,
start, end - start, PAGE_SIZE);
}
@@ -208,7 +212,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
- __flush_tlb_range(mm_cpumask(vma->vm_mm), get_mm_asid(vma->vm_mm),
+ __flush_tlb_range(vma->vm_mm, mm_cpumask(vma->vm_mm),
start, end - start, PMD_SIZE);
}
#endif
@@ -222,7 +226,10 @@ void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
struct mm_struct *mm,
unsigned long uaddr)
{
+ unsigned long start = uaddr & PAGE_MASK;
+
cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm));
+ mmu_notifier_arch_invalidate_secondary_tlbs(mm, start, start + PAGE_SIZE);
}
void arch_flush_tlb_batched_pending(struct mm_struct *mm)
@@ -232,7 +239,7 @@ void arch_flush_tlb_batched_pending(struct mm_struct *mm)
void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
{
- __flush_tlb_range(&batch->cpumask, FLUSH_TLB_NO_ASID, 0,
- FLUSH_TLB_MAX_SIZE, PAGE_SIZE);
+ __flush_tlb_range(NULL, &batch->cpumask,
+ 0, FLUSH_TLB_MAX_SIZE, PAGE_SIZE);
cpumask_clear(&batch->cpumask);
}
--
2.39.2
On Thu, Mar 28, 2024 at 08:38:38AM +0100, Alexandre Ghiti wrote: > This is required to allow the IOMMU driver to correctly flush its own > TLB. > > Signed-off-by: Alexandre Ghiti <alexghiti@rivosinc.com> > --- > > Changes in v2: > - Rebase on top of 6.9-rc1 > > arch/riscv/mm/tlbflush.c | 39 +++++++++++++++++++++++---------------- > 1 file changed, 23 insertions(+), 16 deletions(-) > Hi Alex, Is this patch still something we need? Thanks, drew
Hi Andrew, On 21/11/2024 10:32, Andrew Jones wrote: > On Thu, Mar 28, 2024 at 08:38:38AM +0100, Alexandre Ghiti wrote: >> This is required to allow the IOMMU driver to correctly flush its own >> TLB. >> >> Signed-off-by: Alexandre Ghiti <alexghiti@rivosinc.com> >> --- >> >> Changes in v2: >> - Rebase on top of 6.9-rc1 >> >> arch/riscv/mm/tlbflush.c | 39 +++++++++++++++++++++++---------------- >> 1 file changed, 23 insertions(+), 16 deletions(-) >> > Hi Alex, > > Is this patch still something we need? > > Thanks, > drew Yes I think so, hopefully it gets merged. Thanks, Alex > _______________________________________________ > linux-riscv mailing list > linux-riscv@lists.infradead.org > http://lists.infradead.org/mailman/listinfo/linux-riscv
Hi Alex, On 2024-11-21 4:36 AM, Alexandre Ghiti wrote: > Hi Andrew, > > On 21/11/2024 10:32, Andrew Jones wrote: >> On Thu, Mar 28, 2024 at 08:38:38AM +0100, Alexandre Ghiti wrote: >>> This is required to allow the IOMMU driver to correctly flush its own >>> TLB. >>> >>> Signed-off-by: Alexandre Ghiti <alexghiti@rivosinc.com> >>> --- >>> >>> Changes in v2: >>> - Rebase on top of 6.9-rc1 >>> >>> arch/riscv/mm/tlbflush.c | 39 +++++++++++++++++++++++---------------- >>> 1 file changed, 23 insertions(+), 16 deletions(-) >>> >> Hi Alex, >> >> Is this patch still something we need? >> >> Thanks, >> drew > > > Yes I think so, hopefully it gets merged. Please rebase and resend this patch. It conflicts with two other changes merged since you sent v2 (038ac18aae93 and f58e5dc45fa9), in addition to the one you sent the resolution for. Regards, Samuel
On 28/03/2024 08:38, Alexandre Ghiti wrote:
> This is required to allow the IOMMU driver to correctly flush its own
> TLB.
>
> Signed-off-by: Alexandre Ghiti <alexghiti@rivosinc.com>
> ---
>
> Changes in v2:
> - Rebase on top of 6.9-rc1
>
> arch/riscv/mm/tlbflush.c | 39 +++++++++++++++++++++++----------------
> 1 file changed, 23 insertions(+), 16 deletions(-)
>
> diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
> index 893566e004b7..854d984deb07 100644
> --- a/arch/riscv/mm/tlbflush.c
> +++ b/arch/riscv/mm/tlbflush.c
> @@ -4,6 +4,7 @@
> #include <linux/smp.h>
> #include <linux/sched.h>
> #include <linux/hugetlb.h>
> +#include <linux/mmu_notifier.h>
> #include <asm/sbi.h>
> #include <asm/mmu_context.h>
>
> @@ -99,11 +100,19 @@ static void __ipi_flush_tlb_range_asid(void *info)
> local_flush_tlb_range_asid(d->start, d->size, d->stride, d->asid);
> }
>
> -static void __flush_tlb_range(struct cpumask *cmask, unsigned long asid,
> +static inline unsigned long get_mm_asid(struct mm_struct *mm)
Hi Alex,
Nit: the inline attribute is probably useless.
> +{
> + return (mm && static_branch_unlikely(&use_asid_allocator)) ?
> + atomic_long_read(&mm->context.id) & asid_mask : FLUSH_TLB_NO_ASID;
> +}
> +
> +static void __flush_tlb_range(struct mm_struct *mm,
> + struct cpumask *cmask,
> unsigned long start, unsigned long size,
> unsigned long stride)
> {
> struct flush_tlb_range_data ftd;
> + unsigned long asid = get_mm_asid(mm);
> bool broadcast;
>
> if (cpumask_empty(cmask))
> @@ -137,31 +146,26 @@ static void __flush_tlb_range(struct cpumask *cmask, unsigned long asid,
>
> if (cmask != cpu_online_mask)
> put_cpu();
> -}
>
> -static inline unsigned long get_mm_asid(struct mm_struct *mm)
> -{
> - return static_branch_unlikely(&use_asid_allocator) ?
> - atomic_long_read(&mm->context.id) & asid_mask : FLUSH_TLB_NO_ASID;
> + if (mm)
> + mmu_notifier_arch_invalidate_secondary_tlbs(mm, start, start + size);
> }
>
> void flush_tlb_mm(struct mm_struct *mm)
> {
> - __flush_tlb_range(mm_cpumask(mm), get_mm_asid(mm),
> - 0, FLUSH_TLB_MAX_SIZE, PAGE_SIZE);
> + __flush_tlb_range(mm, mm_cpumask(mm), 0, FLUSH_TLB_MAX_SIZE, PAGE_SIZE);
> }
>
> void flush_tlb_mm_range(struct mm_struct *mm,
> unsigned long start, unsigned long end,
> unsigned int page_size)
> {
> - __flush_tlb_range(mm_cpumask(mm), get_mm_asid(mm),
> - start, end - start, page_size);
> + __flush_tlb_range(mm, mm_cpumask(mm), start, end - start, page_size);
> }
>
> void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
> {
> - __flush_tlb_range(mm_cpumask(vma->vm_mm), get_mm_asid(vma->vm_mm),
> + __flush_tlb_range(vma->vm_mm, mm_cpumask(vma->vm_mm),
> addr, PAGE_SIZE, PAGE_SIZE);
> }
>
> @@ -194,13 +198,13 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
> }
> }
>
> - __flush_tlb_range(mm_cpumask(vma->vm_mm), get_mm_asid(vma->vm_mm),
> + __flush_tlb_range(vma->vm_mm, mm_cpumask(vma->vm_mm),
> start, end - start, stride_size);
> }
>
> void flush_tlb_kernel_range(unsigned long start, unsigned long end)
> {
> - __flush_tlb_range((struct cpumask *)cpu_online_mask, FLUSH_TLB_NO_ASID,
> + __flush_tlb_range(NULL, (struct cpumask *)cpu_online_mask,
> start, end - start, PAGE_SIZE);
> }
>
> @@ -208,7 +212,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
> void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
> unsigned long end)
> {
> - __flush_tlb_range(mm_cpumask(vma->vm_mm), get_mm_asid(vma->vm_mm),
> + __flush_tlb_range(vma->vm_mm, mm_cpumask(vma->vm_mm),
> start, end - start, PMD_SIZE);
> }
> #endif
> @@ -222,7 +226,10 @@ void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
> struct mm_struct *mm,
> unsigned long uaddr)
> {
> + unsigned long start = uaddr & PAGE_MASK;
> +
> cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm));
> + mmu_notifier_arch_invalidate_secondary_tlbs(mm, start, start + PAGE_SIZE);
> }
>
> void arch_flush_tlb_batched_pending(struct mm_struct *mm)
> @@ -232,7 +239,7 @@ void arch_flush_tlb_batched_pending(struct mm_struct *mm)
>
> void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
> {
> - __flush_tlb_range(&batch->cpumask, FLUSH_TLB_NO_ASID, 0,
> - FLUSH_TLB_MAX_SIZE, PAGE_SIZE);
> + __flush_tlb_range(NULL, &batch->cpumask,
> + 0, FLUSH_TLB_MAX_SIZE, PAGE_SIZE);
> cpumask_clear(&batch->cpumask);
> }
Other than that, looks good to me,
Reviewed-by: Clément Léger <cleger@rivosinc.com>
Thanks,
Clément
Hi Palmer,
On 28/03/2024 08:38, Alexandre Ghiti wrote:
> This is required to allow the IOMMU driver to correctly flush its own
> TLB.
>
> Signed-off-by: Alexandre Ghiti <alexghiti@rivosinc.com>
> ---
>
> Changes in v2:
> - Rebase on top of 6.9-rc1
>
> arch/riscv/mm/tlbflush.c | 39 +++++++++++++++++++++++----------------
> 1 file changed, 23 insertions(+), 16 deletions(-)
>
> diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
> index 893566e004b7..854d984deb07 100644
> --- a/arch/riscv/mm/tlbflush.c
> +++ b/arch/riscv/mm/tlbflush.c
> @@ -4,6 +4,7 @@
> #include <linux/smp.h>
> #include <linux/sched.h>
> #include <linux/hugetlb.h>
> +#include <linux/mmu_notifier.h>
> #include <asm/sbi.h>
> #include <asm/mmu_context.h>
>
> @@ -99,11 +100,19 @@ static void __ipi_flush_tlb_range_asid(void *info)
> local_flush_tlb_range_asid(d->start, d->size, d->stride, d->asid);
> }
>
> -static void __flush_tlb_range(struct cpumask *cmask, unsigned long asid,
> +static inline unsigned long get_mm_asid(struct mm_struct *mm)
> +{
> + return (mm && static_branch_unlikely(&use_asid_allocator)) ?
> + atomic_long_read(&mm->context.id) & asid_mask : FLUSH_TLB_NO_ASID;
> +}
> +
> +static void __flush_tlb_range(struct mm_struct *mm,
> + struct cpumask *cmask,
> unsigned long start, unsigned long size,
> unsigned long stride)
> {
> struct flush_tlb_range_data ftd;
> + unsigned long asid = get_mm_asid(mm);
> bool broadcast;
>
> if (cpumask_empty(cmask))
> @@ -137,31 +146,26 @@ static void __flush_tlb_range(struct cpumask *cmask, unsigned long asid,
>
> if (cmask != cpu_online_mask)
> put_cpu();
> -}
>
> -static inline unsigned long get_mm_asid(struct mm_struct *mm)
> -{
> - return static_branch_unlikely(&use_asid_allocator) ?
> - atomic_long_read(&mm->context.id) & asid_mask : FLUSH_TLB_NO_ASID;
> + if (mm)
> + mmu_notifier_arch_invalidate_secondary_tlbs(mm, start, start + size);
> }
>
> void flush_tlb_mm(struct mm_struct *mm)
> {
> - __flush_tlb_range(mm_cpumask(mm), get_mm_asid(mm),
> - 0, FLUSH_TLB_MAX_SIZE, PAGE_SIZE);
> + __flush_tlb_range(mm, mm_cpumask(mm), 0, FLUSH_TLB_MAX_SIZE, PAGE_SIZE);
> }
>
> void flush_tlb_mm_range(struct mm_struct *mm,
> unsigned long start, unsigned long end,
> unsigned int page_size)
> {
> - __flush_tlb_range(mm_cpumask(mm), get_mm_asid(mm),
> - start, end - start, page_size);
> + __flush_tlb_range(mm, mm_cpumask(mm), start, end - start, page_size);
> }
>
> void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
> {
> - __flush_tlb_range(mm_cpumask(vma->vm_mm), get_mm_asid(vma->vm_mm),
> + __flush_tlb_range(vma->vm_mm, mm_cpumask(vma->vm_mm),
> addr, PAGE_SIZE, PAGE_SIZE);
> }
>
> @@ -194,13 +198,13 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
> }
> }
>
> - __flush_tlb_range(mm_cpumask(vma->vm_mm), get_mm_asid(vma->vm_mm),
> + __flush_tlb_range(vma->vm_mm, mm_cpumask(vma->vm_mm),
> start, end - start, stride_size);
> }
>
> void flush_tlb_kernel_range(unsigned long start, unsigned long end)
> {
> - __flush_tlb_range((struct cpumask *)cpu_online_mask, FLUSH_TLB_NO_ASID,
> + __flush_tlb_range(NULL, (struct cpumask *)cpu_online_mask,
> start, end - start, PAGE_SIZE);
> }
>
> @@ -208,7 +212,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
> void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
> unsigned long end)
> {
> - __flush_tlb_range(mm_cpumask(vma->vm_mm), get_mm_asid(vma->vm_mm),
> + __flush_tlb_range(vma->vm_mm, mm_cpumask(vma->vm_mm),
> start, end - start, PMD_SIZE);
> }
> #endif
> @@ -222,7 +226,10 @@ void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
> struct mm_struct *mm,
> unsigned long uaddr)
> {
> + unsigned long start = uaddr & PAGE_MASK;
> +
> cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm));
> + mmu_notifier_arch_invalidate_secondary_tlbs(mm, start, start + PAGE_SIZE);
> }
>
> void arch_flush_tlb_batched_pending(struct mm_struct *mm)
> @@ -232,7 +239,7 @@ void arch_flush_tlb_batched_pending(struct mm_struct *mm)
>
> void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
> {
> - __flush_tlb_range(&batch->cpumask, FLUSH_TLB_NO_ASID, 0,
> - FLUSH_TLB_MAX_SIZE, PAGE_SIZE);
> + __flush_tlb_range(NULL, &batch->cpumask,
> + 0, FLUSH_TLB_MAX_SIZE, PAGE_SIZE);
> cpumask_clear(&batch->cpumask);
> }
This will conflict with Samuel's patch
https://lore.kernel.org/all/20240301201837.2826172-1-samuel.holland@sifive.com/
<https://lore.kernel.org/all/20240301201837.2826172-1-samuel.holland@sifive.com/>
Here is the resolution conflict if you pull this patch as-is, otherwise
I'll send a v3:
diff --cc arch/riscv/mm/tlbflush.c
index 07d743f87b3f,854d984deb07..000000000000
--- a/arch/riscv/mm/tlbflush.c
+++ b/arch/riscv/mm/tlbflush.c
@@@ -99,7 -100,14 +100,14 @@@ static void __ipi_flush_tlb_range_asid(
local_flush_tlb_range_asid(d->start, d->size, d->stride, d->asid);
}
- static void __flush_tlb_range(const struct cpumask *cmask, unsigned
long asid,
+ static inline unsigned long get_mm_asid(struct mm_struct *mm)
+ {
+ return (mm && static_branch_unlikely(&use_asid_allocator)) ?
+ atomic_long_read(&mm->context.id) & asid_mask :
FLUSH_TLB_NO_ASID;
+ }
+
+ static void __flush_tlb_range(struct mm_struct *mm,
- struct cpumask *cmask,
++ const struct cpumask *cmask,
unsigned long start, unsigned long size,
unsigned long stride)
{
@@@ -200,7 -204,7 +204,7 @@@ void flush_tlb_range(struct vm_area_str
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
- __flush_tlb_range(cpu_online_mask, FLUSH_TLB_NO_ASID,
- __flush_tlb_range(NULL, (struct cpumask *)cpu_online_mask,
++ __flush_tlb_range(NULL, cpu_online_mask,
start, end - start, PAGE_SIZE);
}
Thanks,
Alex
© 2016 - 2026 Red Hat, Inc.