The generic lazy_mmu layer now tracks whether a task is in lazy MMU
mode. As a result we no longer need to track whether the per-CPU TLB
batch struct is active - we know it is if in_lazy_mmu_mode() returns
true.
Signed-off-by: Kevin Brodsky <kevin.brodsky@arm.com>
---
arch/sparc/include/asm/tlbflush_64.h | 1 -
arch/sparc/mm/tlb.c | 9 +--------
2 files changed, 1 insertion(+), 9 deletions(-)
diff --git a/arch/sparc/include/asm/tlbflush_64.h b/arch/sparc/include/asm/tlbflush_64.h
index 4e1036728e2f..6133306ba59a 100644
--- a/arch/sparc/include/asm/tlbflush_64.h
+++ b/arch/sparc/include/asm/tlbflush_64.h
@@ -12,7 +12,6 @@ struct tlb_batch {
unsigned int hugepage_shift;
struct mm_struct *mm;
unsigned long tlb_nr;
- unsigned long active;
unsigned long vaddrs[TLB_BATCH_NR];
};
diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
index 7b5dfcdb1243..879e22c86e5c 100644
--- a/arch/sparc/mm/tlb.c
+++ b/arch/sparc/mm/tlb.c
@@ -52,11 +52,7 @@ void flush_tlb_pending(void)
void arch_enter_lazy_mmu_mode(void)
{
- struct tlb_batch *tb;
-
preempt_disable();
- tb = this_cpu_ptr(&tlb_batch);
- tb->active = 1;
}
void arch_flush_lazy_mmu_mode(void)
@@ -69,10 +65,7 @@ void arch_flush_lazy_mmu_mode(void)
void arch_leave_lazy_mmu_mode(void)
{
- struct tlb_batch *tb = this_cpu_ptr(&tlb_batch);
-
arch_flush_lazy_mmu_mode();
- tb->active = 0;
preempt_enable();
}
@@ -93,7 +86,7 @@ static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
nr = 0;
}
- if (!tb->active) {
+ if (!in_lazy_mmu_mode()) {
flush_tsb_user_page(mm, vaddr, hugepage_shift);
global_flush_tlb_page(mm, vaddr);
goto out;
--
2.47.0