Now that we maintain an array of active mms on each CPU, when
local_flush_tlb_all() is called, we can clear current CPU in the
mm_cpumask of all active mms on current CPU.
Signed-off-by: Xu Lu <luxu.kernel@bytedance.com>
---
arch/riscv/include/asm/tlbflush.h | 6 ++++++
arch/riscv/mm/context.c | 2 +-
arch/riscv/mm/tlbflush.c | 31 +++++++++++++++++++++++++++++--
3 files changed, 36 insertions(+), 3 deletions(-)
diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
index c9630267c58cd..fd62b27172d4a 100644
--- a/arch/riscv/include/asm/tlbflush.h
+++ b/arch/riscv/include/asm/tlbflush.h
@@ -108,6 +108,7 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_info, tlbinfo);
void local_load_tlb_mm(struct mm_struct *mm);
void local_flush_tlb_mm(struct mm_struct *mm);
+void local_flush_tlb_all_mm(void);
void __init lazy_tlb_flush_init(void);
#else /* CONFIG_RISCV_LAZY_TLB_FLUSH */
@@ -119,6 +120,11 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm)
local_flush_tlb_all_asid(get_mm_asid(mm));
}
+static inline void local_flush_tlb_all_mm(void)
+{
+ local_flush_tlb_all();
+}
+
static inline void lazy_tlb_flush_init(void) {}
#endif /* CONFIG_RISCV_LAZY_TLB_FLUSH */
diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c
index c381c4ed46bfb..b6657681948f9 100644
--- a/arch/riscv/mm/context.c
+++ b/arch/riscv/mm/context.c
@@ -194,7 +194,7 @@ static void set_mm_asid(struct mm_struct *mm, unsigned int cpu)
satp_mode);
if (need_flush_tlb)
- local_flush_tlb_all();
+ local_flush_tlb_all_mm();
}
static void set_mm_noasid(struct mm_struct *mm)
diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
index 88a1e45bcf508..73c0a7ef61cb1 100644
--- a/arch/riscv/mm/tlbflush.c
+++ b/arch/riscv/mm/tlbflush.c
@@ -89,13 +89,13 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
static void __ipi_flush_tlb_all(void *info)
{
- local_flush_tlb_all();
+ local_flush_tlb_all_mm();
}
void flush_tlb_all(void)
{
if (num_online_cpus() < 2)
- local_flush_tlb_all();
+ local_flush_tlb_all_mm();
else if (riscv_use_sbi_for_rfence())
sbi_remote_sfence_vma_asid(NULL, 0, FLUSH_TLB_MAX_SIZE, FLUSH_TLB_NO_ASID);
else
@@ -461,6 +461,33 @@ void local_flush_tlb_mm(struct mm_struct *mm)
local_flush_tlb_all_asid(asid);
}
+void local_flush_tlb_all_mm(void)
+{
+ struct tlb_info *info = this_cpu_ptr(&tlbinfo);
+ struct tlb_context *contexts = info->contexts;
+ struct mm_struct *mms[MAX_LOADED_MM];
+ unsigned int cpu = raw_smp_processor_id();
+ unsigned int i, num = 0;
+
+ write_lock(&info->rwlock);
+ for (i = 0; i < MAX_LOADED_MM; i++) {
+ if (!contexts[i].mm || contexts[i].mm == info->active_mm)
+ continue;
+
+ mms[num++] = contexts[i].mm;
+ contexts[i].mm = NULL;
+ contexts[i].gen = 0;
+ }
+ write_unlock(&info->rwlock);
+
+ for (i = 0; i < num; i++) {
+ cpumask_clear_cpu(cpu, mm_cpumask(mms[i]));
+ mmdrop_lazy_mm(mms[i]);
+ }
+
+ local_flush_tlb_all();
+}
+
void __init lazy_tlb_flush_init(void)
{
struct tlb_flush_queue *queue;
--
2.20.1