From nobody Mon Feb 9 14:15:41 2026 Received: from foss.arm.com (foss.arm.com [217.140.110.172]) by smtp.subspace.kernel.org (Postfix) with ESMTP id 9BD5C37F8A8 for ; Mon, 19 Jan 2026 17:22:37 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=217.140.110.172 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1768843359; cv=none; b=F1OvpqT8gL7WK8I1evWY1UGUOPgPkSA5L4x3ekHjp+8v1d6dfibKKsN+WLzZqz9byeq0DzXyl7umrPgYoe2yRD1datWmmLGs3zlcDlnr1KLpdNmm7LGnfCXT8nU3sy1Vx/bb6VdFr+FWjcy8pfTWBKhBO25i2e7Dix3g4AwgNhA= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1768843359; c=relaxed/simple; bh=CiepNka6vP/CQSg83WZPM92Qzrt8pdYVjintn8jtWjU=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=Vc/x8CsaAyWNuSDfPzCR0GA94oUENUen27xp9ciCzV7FQzmuF0IgLgWgB5DJnLE6i+lLyO8I3s2iNnyDIQwm2aglOZ86FDlrUH4nsxYxZ4+I0L7i5n1uVW/l7U7X5jAJ/azmrBGDCGNmpI/u+dbCvN32e0JR9ocKGawSOHN/NbY= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=arm.com; spf=pass smtp.mailfrom=arm.com; arc=none smtp.client-ip=217.140.110.172 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=arm.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=arm.com Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id 80A05497; Mon, 19 Jan 2026 09:22:30 -0800 (PST) Received: from e125769.cambridge.arm.com (e125769.cambridge.arm.com [10.1.196.27]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPSA id 7CF0D3F632; Mon, 19 Jan 2026 09:22:35 -0800 (PST) From: Ryan Roberts To: Will Deacon , Ard Biesheuvel , Catalin Marinas , Mark Rutland , Linus Torvalds , Oliver Upton , Marc Zyngier , Dev Jain , Linu Cherian , Jonathan Cameron Cc: Ryan Roberts , linux-arm-kernel@lists.infradead.org, linux-kernel@vger.kernel.org, Linu Cherian Subject: [PATCH v2 12/13] arm64: mm: Wrap flush_tlb_page() around ___flush_tlb_range() Date: Mon, 19 Jan 2026 17:21:59 +0000 Message-ID: <20260119172202.1681510-13-ryan.roberts@arm.com> X-Mailer: git-send-email 2.43.0 In-Reply-To: <20260119172202.1681510-1-ryan.roberts@arm.com> References: <20260119172202.1681510-1-ryan.roberts@arm.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Flushing a page from the tlb is just a special case of flushing a range. So let's rework flush_tlb_page() so that it simply wraps ___flush_tlb_range(). While at it, let's also update the API to take the same flags that we use when flushing a range. This allows us to delete all the ugly "_nosync", "_local" and "_nonotify" variants. Thanks to constant folding, all of the complex looping and tlbi-by-range options get eliminated so that the generated code for flush_tlb_page() looks very similar to the previous version. Reviewed-by: Linu Cherian Signed-off-by: Ryan Roberts --- arch/arm64/include/asm/pgtable.h | 6 +-- arch/arm64/include/asm/tlbflush.h | 81 ++++++++++--------------------- arch/arm64/mm/fault.c | 2 +- 3 files changed, 29 insertions(+), 60 deletions(-) diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgta= ble.h index 736747fbc843..b96a7ca465a1 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -136,10 +136,10 @@ static inline void arch_leave_lazy_mmu_mode(void) * entries exist. */ #define flush_tlb_fix_spurious_fault(vma, address, ptep) \ - local_flush_tlb_page_nonotify(vma, address) + __flush_tlb_page(vma, address, TLBF_NOBROADCAST | TLBF_NONOTIFY) =20 #define flush_tlb_fix_spurious_fault_pmd(vma, address, pmdp) \ - local_flush_tlb_page_nonotify(vma, address) + __flush_tlb_page(vma, address, TLBF_NOBROADCAST | TLBF_NONOTIFY) =20 /* * ZERO_PAGE is a global shared page that is always zero: used @@ -1351,7 +1351,7 @@ static inline int __ptep_clear_flush_young(struct vm_= area_struct *vma, * context-switch, which provides a DSB to complete the TLB * invalidation. */ - flush_tlb_page_nosync(vma, address); + __flush_tlb_page(vma, address, TLBF_NOSYNC); } =20 return young; diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlb= flush.h index f03831cd8719..88f46760e2c2 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h @@ -255,10 +255,7 @@ static inline void __tlbi_level(tlbi_op op, u64 addr, = u32 level) * unmapping pages from vmalloc/io space. * * flush_tlb_page(vma, addr) - * Invalidate a single user mapping for address 'addr' in the - * address space corresponding to 'vma->mm'. Note that this - * operation only invalidates a single, last-level page-table - * entry and therefore does not affect any walk-caches. + * Equivalent to __flush_tlb_page(..., flags=3DTLBF_NONE) * * * Next, we have some undocumented invalidation routines that you probably @@ -286,13 +283,14 @@ static inline void __tlbi_level(tlbi_op op, u64 addr,= u32 level) * TLBF_NOSYNC (don't issue trailing dsb) and TLBF_NOBROADCAST * (only perform the invalidation for the local cpu). * - * local_flush_tlb_page(vma, addr) - * Local variant of flush_tlb_page(). Stale TLB entries may - * remain in remote CPUs. - * - * local_flush_tlb_page_nonotify(vma, addr) - * Same as local_flush_tlb_page() except MMU notifier will not be - * called. + * __flush_tlb_page(vma, addr, flags) + * Invalidate a single user mapping for address 'addr' in the + * address space corresponding to 'vma->mm'. Note that this + * operation only invalidates a single, last-level page-table entry + * and therefore does not affect any walk-caches. flags may contain + * any combination of TLBF_NONOTIFY (don't call mmu notifiers), + * TLBF_NOSYNC (don't issue trailing dsb) and TLBF_NOBROADCAST + * (only perform the invalidation for the local cpu). * * Finally, take a look at asm/tlb.h to see how tlb_flush() is implemented * on top of these routines, since that is our interface to the mmu_gather @@ -326,51 +324,6 @@ static inline void flush_tlb_mm(struct mm_struct *mm) mmu_notifier_arch_invalidate_secondary_tlbs(mm, 0, -1UL); } =20 -static inline void __local_flush_tlb_page_nonotify_nosync(struct mm_struct= *mm, - unsigned long uaddr) -{ - dsb(nshst); - __tlbi_level_asid(vale1, uaddr, TLBI_TTL_UNKNOWN, ASID(mm)); -} - -static inline void local_flush_tlb_page_nonotify(struct vm_area_struct *vm= a, - unsigned long uaddr) -{ - __local_flush_tlb_page_nonotify_nosync(vma->vm_mm, uaddr); - dsb(nsh); -} - -static inline void local_flush_tlb_page(struct vm_area_struct *vma, - unsigned long uaddr) -{ - __local_flush_tlb_page_nonotify_nosync(vma->vm_mm, uaddr); - mmu_notifier_arch_invalidate_secondary_tlbs(vma->vm_mm, uaddr & PAGE_MASK, - (uaddr & PAGE_MASK) + PAGE_SIZE); - dsb(nsh); -} - -static inline void __flush_tlb_page_nosync(struct mm_struct *mm, - unsigned long uaddr) -{ - dsb(ishst); - __tlbi_level_asid(vale1is, uaddr, TLBI_TTL_UNKNOWN, ASID(mm)); - mmu_notifier_arch_invalidate_secondary_tlbs(mm, uaddr & PAGE_MASK, - (uaddr & PAGE_MASK) + PAGE_SIZE); -} - -static inline void flush_tlb_page_nosync(struct vm_area_struct *vma, - unsigned long uaddr) -{ - return __flush_tlb_page_nosync(vma->vm_mm, uaddr); -} - -static inline void flush_tlb_page(struct vm_area_struct *vma, - unsigned long uaddr) -{ - flush_tlb_page_nosync(vma, uaddr); - dsb(ish); -} - static inline bool arch_tlbbatch_should_defer(struct mm_struct *mm) { /* @@ -633,6 +586,22 @@ static inline void flush_tlb_range(struct vm_area_stru= ct *vma, __flush_tlb_range(vma, start, end, PAGE_SIZE, TLBI_TTL_UNKNOWN, TLBF_NONE= ); } =20 +static inline void __flush_tlb_page(struct vm_area_struct *vma, + unsigned long uaddr, tlbf_t flags) +{ + unsigned long start =3D round_down(uaddr, PAGE_SIZE); + unsigned long end =3D start + PAGE_SIZE; + + ___flush_tlb_range(vma, start, end, PAGE_SIZE, TLBI_TTL_UNKNOWN, + TLBF_NOWALKCACHE | flags); +} + +static inline void flush_tlb_page(struct vm_area_struct *vma, + unsigned long uaddr) +{ + __flush_tlb_page(vma, uaddr, TLBF_NONE); +} + static inline void flush_tlb_kernel_range(unsigned long start, unsigned lo= ng end) { const unsigned long stride =3D PAGE_SIZE; diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index be9dab2c7d6a..f91aa686f142 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -239,7 +239,7 @@ int __ptep_set_access_flags(struct vm_area_struct *vma, * flush_tlb_fix_spurious_fault(). */ if (dirty) - local_flush_tlb_page(vma, address); + __flush_tlb_page(vma, address, TLBF_NOBROADCAST); return 1; } =20 --=20 2.43.0