From nobody Fri Dec 19 07:24:11 2025 Received: from foss.arm.com (foss.arm.com [217.140.110.172]) by smtp.subspace.kernel.org (Postfix) with ESMTP id 8DF6C34D93A for ; Tue, 16 Dec 2025 14:46:29 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=217.140.110.172 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1765896392; cv=none; b=iuA7w5+fznshuwuaity3pERVE7UmvhyO5iMeMDl0dJZTfG2qMZ7fpz2X92y567ChMYDdpxSK2RpzN+6vilZm7QkG6+C4y3f0X2HZxBdMPxM9hBkrpn7CDePic9jTDFf/3fqF+BSc37KAGh9EpZd2rTZ91kOOP9tjfVxaxqH14tI= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1765896392; c=relaxed/simple; bh=9odixVVGGsqqGd54zDTUdYqgEXxkXpXo5Sntibel13I=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=MFOHqJmBMXka11Xrd/KuzlsBKtlGBhw5u/+Xsh7Sw7PfyI1uUSmIGDaEhpfpTjBA3fcxhIprZniMvldYLUgzHn12JLHJBeToSFPmRocwG22OUWOLz9Rz4MES4FVwnRn1KrH3dBuJHyPfPKTbh3gUUhSWKxyaUxsIb0BiDDk0HoU= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=arm.com; spf=pass smtp.mailfrom=arm.com; arc=none smtp.client-ip=217.140.110.172 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=arm.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=arm.com Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id 4F2491682; Tue, 16 Dec 2025 06:46:21 -0800 (PST) Received: from e125769.cambridge.arm.com (e125769.cambridge.arm.com [10.1.196.27]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPSA id D99463F73F; Tue, 16 Dec 2025 06:46:26 -0800 (PST) From: Ryan Roberts To: Will Deacon , Ard Biesheuvel , Catalin Marinas , Mark Rutland , Linus Torvalds , Oliver Upton , Marc Zyngier , Dev Jain , Linu Cherian Cc: Ryan Roberts , linux-arm-kernel@lists.infradead.org, linux-kernel@vger.kernel.org Subject: [PATCH v1 10/13] arm64: mm: Refactor __flush_tlb_range() to take flags Date: Tue, 16 Dec 2025 14:45:55 +0000 Message-ID: <20251216144601.2106412-11-ryan.roberts@arm.com> X-Mailer: git-send-email 2.43.0 In-Reply-To: <20251216144601.2106412-1-ryan.roberts@arm.com> References: <20251216144601.2106412-1-ryan.roberts@arm.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" We have function variants with "_nosync", "_local", "_nonotify" as well as the "last_level" parameter. Let's generalize and simplify by using a flags parameter to encode all these variants. As a first step, convert the "last_level" boolean parameter to a flags parameter and create the first flag, TLBF_NOWALKCACHE. When present, walk cache entries are not evicted, which is the same as the old last_level=3Dtrue. Signed-off-by: Ryan Roberts --- arch/arm64/include/asm/hugetlb.h | 12 ++++++------ arch/arm64/include/asm/pgtable.h | 4 ++-- arch/arm64/include/asm/tlb.h | 6 +++--- arch/arm64/include/asm/tlbflush.h | 28 ++++++++++++++++------------ arch/arm64/mm/contpte.c | 5 +++-- arch/arm64/mm/hugetlbpage.c | 4 ++-- arch/arm64/mm/mmu.c | 2 +- 7 files changed, 33 insertions(+), 28 deletions(-) diff --git a/arch/arm64/include/asm/hugetlb.h b/arch/arm64/include/asm/huge= tlb.h index 44c1f757bfcf..04af9499faf2 100644 --- a/arch/arm64/include/asm/hugetlb.h +++ b/arch/arm64/include/asm/hugetlb.h @@ -73,23 +73,23 @@ static inline void __flush_hugetlb_tlb_range(struct vm_= area_struct *vma, unsigned long start, unsigned long end, unsigned long stride, - bool last_level) + tlbf_t flags) { switch (stride) { #ifndef __PAGETABLE_PMD_FOLDED case PUD_SIZE: - __flush_tlb_range(vma, start, end, PUD_SIZE, last_level, 1); + __flush_tlb_range(vma, start, end, PUD_SIZE, 1, flags); break; #endif case CONT_PMD_SIZE: case PMD_SIZE: - __flush_tlb_range(vma, start, end, PMD_SIZE, last_level, 2); + __flush_tlb_range(vma, start, end, PMD_SIZE, 2, flags); break; case CONT_PTE_SIZE: - __flush_tlb_range(vma, start, end, PAGE_SIZE, last_level, 3); + __flush_tlb_range(vma, start, end, PAGE_SIZE, 3, flags); break; default: - __flush_tlb_range(vma, start, end, PAGE_SIZE, last_level, TLBI_TTL_UNKNO= WN); + __flush_tlb_range(vma, start, end, PAGE_SIZE, TLBI_TTL_UNKNOWN, flags); } } =20 @@ -100,7 +100,7 @@ static inline void flush_hugetlb_tlb_range(struct vm_ar= ea_struct *vma, { unsigned long stride =3D huge_page_size(hstate_vma(vma)); =20 - __flush_hugetlb_tlb_range(vma, start, end, stride, false); + __flush_hugetlb_tlb_range(vma, start, end, stride, TLBF_NONE); } =20 #endif /* __ASM_HUGETLB_H */ diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgta= ble.h index 64d5f1d9cce9..736747fbc843 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -124,9 +124,9 @@ static inline void arch_leave_lazy_mmu_mode(void) =20 /* Set stride and tlb_level in flush_*_tlb_range */ #define flush_pmd_tlb_range(vma, addr, end) \ - __flush_tlb_range(vma, addr, end, PMD_SIZE, false, 2) + __flush_tlb_range(vma, addr, end, PMD_SIZE, 2, TLBF_NONE) #define flush_pud_tlb_range(vma, addr, end) \ - __flush_tlb_range(vma, addr, end, PUD_SIZE, false, 1) + __flush_tlb_range(vma, addr, end, PUD_SIZE, 1, TLBF_NONE) #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ =20 /* diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h index 8d762607285c..10869d7731b8 100644 --- a/arch/arm64/include/asm/tlb.h +++ b/arch/arm64/include/asm/tlb.h @@ -53,7 +53,7 @@ static inline int tlb_get_level(struct mmu_gather *tlb) static inline void tlb_flush(struct mmu_gather *tlb) { struct vm_area_struct vma =3D TLB_FLUSH_VMA(tlb->mm, 0); - bool last_level =3D !tlb->freed_tables; + tlbf_t flags =3D tlb->freed_tables ? TLBF_NONE : TLBF_NOWALKCACHE; unsigned long stride =3D tlb_get_unmap_size(tlb); int tlb_level =3D tlb_get_level(tlb); =20 @@ -63,13 +63,13 @@ static inline void tlb_flush(struct mmu_gather *tlb) * reallocate our ASID without invalidating the entire TLB. */ if (tlb->fullmm) { - if (!last_level) + if (tlb->freed_tables) flush_tlb_mm(tlb->mm); return; } =20 __flush_tlb_range(&vma, tlb->start, tlb->end, stride, - last_level, tlb_level); + tlb_level, flags); } =20 static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlb= flush.h index 37c782ddc149..9a37a6a014dc 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h @@ -267,16 +267,16 @@ static inline void __tlbi_level(tlbi_op op, u64 addr,= u32 level) * CPUs, ensuring that any walk-cache entries associated with the * translation are also invalidated. * - * __flush_tlb_range(vma, start, end, stride, last_level, tlb_level) + * __flush_tlb_range(vma, start, end, stride, last_level, tlb_level, flags) * Invalidate the virtual-address range '[start, end)' on all * CPUs for the user address space corresponding to 'vma->mm'. * The invalidation operations are issued at a granularity - * determined by 'stride' and only affect any walk-cache entries - * if 'last_level' is equal to false. tlb_level is the level at + * determined by 'stride'. tlb_level is the level at * which the invalidation must take place. If the level is wrong, * no invalidation may take place. In the case where the level * cannot be easily determined, the value TLBI_TTL_UNKNOWN will - * perform a non-hinted invalidation. + * perform a non-hinted invalidation. flags may be TLBF_NONE (0) or + * TLBF_NOWALKCACHE (elide eviction of walk cache entries). * * local_flush_tlb_page(vma, addr) * Local variant of flush_tlb_page(). Stale TLB entries may @@ -528,10 +528,14 @@ static inline bool __flush_tlb_range_limit_excess(uns= igned long pages, return pages >=3D (MAX_DVM_OPS * stride) >> PAGE_SHIFT; } =20 +typedef unsigned __bitwise tlbf_t; +#define TLBF_NONE ((__force tlbf_t)0) +#define TLBF_NOWALKCACHE ((__force tlbf_t)BIT(0)) + static inline void __flush_tlb_range_nosync(struct mm_struct *mm, unsigned long start, unsigned long end, - unsigned long stride, bool last_level, - int tlb_level) + unsigned long stride, int tlb_level, + tlbf_t flags) { unsigned long asid, pages; =20 @@ -547,7 +551,7 @@ static inline void __flush_tlb_range_nosync(struct mm_s= truct *mm, dsb(ishst); asid =3D ASID(mm); =20 - if (last_level) + if (flags & TLBF_NOWALKCACHE) __flush_s1_tlb_range_op(vale1is, start, pages, stride, asid, tlb_level); else @@ -559,11 +563,11 @@ static inline void __flush_tlb_range_nosync(struct mm= _struct *mm, =20 static inline void __flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end, - unsigned long stride, bool last_level, - int tlb_level) + unsigned long stride, int tlb_level, + tlbf_t flags) { __flush_tlb_range_nosync(vma->vm_mm, start, end, stride, - last_level, tlb_level); + tlb_level, flags); dsb(ish); } =20 @@ -591,7 +595,7 @@ static inline void flush_tlb_range(struct vm_area_struc= t *vma, * Set the tlb_level to TLBI_TTL_UNKNOWN because we can not get enough * information here. */ - __flush_tlb_range(vma, start, end, PAGE_SIZE, false, TLBI_TTL_UNKNOWN); + __flush_tlb_range(vma, start, end, PAGE_SIZE, TLBI_TTL_UNKNOWN, TLBF_NONE= ); } =20 static inline void flush_tlb_kernel_range(unsigned long start, unsigned lo= ng end) @@ -632,7 +636,7 @@ static inline void __flush_tlb_kernel_pgtable(unsigned = long kaddr) static inline void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_ba= tch *batch, struct mm_struct *mm, unsigned long start, unsigned long end) { - __flush_tlb_range_nosync(mm, start, end, PAGE_SIZE, true, 3); + __flush_tlb_range_nosync(mm, start, end, PAGE_SIZE, 3, TLBF_NOWALKCACHE); } =20 static inline bool __pte_flags_need_flush(ptdesc_t oldval, ptdesc_t newval) diff --git a/arch/arm64/mm/contpte.c b/arch/arm64/mm/contpte.c index 589bcf878938..1a12bb728ee1 100644 --- a/arch/arm64/mm/contpte.c +++ b/arch/arm64/mm/contpte.c @@ -205,7 +205,8 @@ static void contpte_convert(struct mm_struct *mm, unsig= ned long addr, */ =20 if (!system_supports_bbml2_noabort()) - __flush_tlb_range(&vma, start_addr, addr, PAGE_SIZE, true, 3); + __flush_tlb_range(&vma, start_addr, addr, PAGE_SIZE, 3, + TLBF_NOWALKCACHE); =20 __set_ptes(mm, start_addr, start_ptep, pte, CONT_PTES); } @@ -527,7 +528,7 @@ int contpte_ptep_clear_flush_young(struct vm_area_struc= t *vma, */ addr =3D ALIGN_DOWN(addr, CONT_PTE_SIZE); __flush_tlb_range_nosync(vma->vm_mm, addr, addr + CONT_PTE_SIZE, - PAGE_SIZE, true, 3); + PAGE_SIZE, 3, TLBF_NOWALKCACHE); } =20 return young; diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c index 1d90a7e75333..7b95663f8c76 100644 --- a/arch/arm64/mm/hugetlbpage.c +++ b/arch/arm64/mm/hugetlbpage.c @@ -184,7 +184,7 @@ static pte_t get_clear_contig_flush(struct mm_struct *m= m, struct vm_area_struct vma =3D TLB_FLUSH_VMA(mm, 0); unsigned long end =3D addr + (pgsize * ncontig); =20 - __flush_hugetlb_tlb_range(&vma, addr, end, pgsize, true); + __flush_hugetlb_tlb_range(&vma, addr, end, pgsize, TLBF_NOWALKCACHE); return orig_pte; } =20 @@ -212,7 +212,7 @@ static void clear_flush(struct mm_struct *mm, if (mm =3D=3D &init_mm) flush_tlb_kernel_range(saddr, addr); else - __flush_hugetlb_tlb_range(&vma, saddr, addr, pgsize, true); + __flush_hugetlb_tlb_range(&vma, saddr, addr, pgsize, TLBF_NOWALKCACHE); } =20 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 9ae7ce00a7ef..a17d617a959a 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -2150,7 +2150,7 @@ pte_t modify_prot_start_ptes(struct vm_area_struct *v= ma, unsigned long addr, */ if (pte_accessible(vma->vm_mm, pte) && pte_user_exec(pte)) __flush_tlb_range(vma, addr, nr * PAGE_SIZE, - PAGE_SIZE, true, 3); + PAGE_SIZE, 3, TLBF_NOWALKCACHE); } =20 return pte; --=20 2.43.0