A per-CPU batch struct is activated when entering lazy MMU mode; its
lifetime is the same as the lazy MMU section (it is deactivated when
leaving the mode). Preemption is disabled in that interval to ensure
that the per-CPU reference remains valid.
The generic lazy_mmu layer now tracks whether a task is in lazy MMU
mode. We can therefore use the generic helper in_lazy_mmu_mode()
to tell whether a batch struct is active instead of tracking it
explicitly.
Signed-off-by: Kevin Brodsky <kevin.brodsky@arm.com>
---
arch/powerpc/include/asm/book3s/64/tlbflush-hash.h | 9 ---------
arch/powerpc/mm/book3s64/hash_tlb.c | 2 +-
2 files changed, 1 insertion(+), 10 deletions(-)
diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h b/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h
index 623a8a8b2d0e..bbc54690d374 100644
--- a/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h
+++ b/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h
@@ -12,7 +12,6 @@
#define PPC64_TLB_BATCH_NR 192
struct ppc64_tlb_batch {
- int active;
unsigned long index;
struct mm_struct *mm;
real_pte_t pte[PPC64_TLB_BATCH_NR];
@@ -26,8 +25,6 @@ extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch);
static inline void arch_enter_lazy_mmu_mode(void)
{
- struct ppc64_tlb_batch *batch;
-
if (radix_enabled())
return;
/*
@@ -35,8 +32,6 @@ static inline void arch_enter_lazy_mmu_mode(void)
* operating on kernel page tables.
*/
preempt_disable();
- batch = this_cpu_ptr(&ppc64_tlb_batch);
- batch->active = 1;
}
static inline void arch_flush_lazy_mmu_mode(void)
@@ -51,14 +46,10 @@ static inline void arch_flush_lazy_mmu_mode(void)
static inline void arch_leave_lazy_mmu_mode(void)
{
- struct ppc64_tlb_batch *batch;
-
if (radix_enabled())
return;
- batch = this_cpu_ptr(&ppc64_tlb_batch);
arch_flush_lazy_mmu_mode();
- batch->active = 0;
preempt_enable();
}
diff --git a/arch/powerpc/mm/book3s64/hash_tlb.c b/arch/powerpc/mm/book3s64/hash_tlb.c
index 787f7a0e27f0..72b83f582b6d 100644
--- a/arch/powerpc/mm/book3s64/hash_tlb.c
+++ b/arch/powerpc/mm/book3s64/hash_tlb.c
@@ -100,7 +100,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
* Check if we have an active batch on this CPU. If not, just
* flush now and return.
*/
- if (!batch->active) {
+ if (!in_lazy_mmu_mode()) {
flush_hash_page(vpn, rpte, psize, ssize, mm_is_thread_local(mm));
put_cpu_var(ppc64_tlb_batch);
return;
--
2.47.0