From nobody Mon Feb 9 13:35:49 2026 Received: from foss.arm.com (foss.arm.com [217.140.110.172]) by smtp.subspace.kernel.org (Postfix) with ESMTP id D352632B988 for ; Mon, 19 Jan 2026 17:22:20 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=217.140.110.172 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1768843342; cv=none; b=r8qr9SP2fmWLmHLUCAizx9gI69N+otO3hcgecSQsWRCBIaI7fJbVSwKcsKHcSttnDatDmNGGIK1seUmC5oOYES/uC6j/9N+IujiRWrs1UyhA+zlIBk4PXlG1LOfQSIu6f24C78eB+/i2ttQRuNsagehc9G/2HT6FENXm6OTRrjo= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1768843342; c=relaxed/simple; bh=i1QEG8s6KNVUTkPcjeNpTpXC2cyTrI8gcePEbShxSCs=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=AKuLTKM+BM4C9tFjZY2ONISnWHovIqjWFIENGvTjTHXRC58g1Xg9tU/4pTFAjQO45PsyYzCECqOHxMB+6KiPL6k0OHhphooIZZP01N+7yWyCfJaeULL4lsUtx1CChYjHmq7hESkGPpw48rKOi7nyRRxJRkrlrzQhjpnkkotNrSY= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=arm.com; spf=pass smtp.mailfrom=arm.com; arc=none smtp.client-ip=217.140.110.172 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=arm.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=arm.com Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id AD0AA1517; Mon, 19 Jan 2026 09:22:13 -0800 (PST) Received: from e125769.cambridge.arm.com (e125769.cambridge.arm.com [10.1.196.27]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPSA id BF2CA3F632; Mon, 19 Jan 2026 09:22:18 -0800 (PST) From: Ryan Roberts To: Will Deacon , Ard Biesheuvel , Catalin Marinas , Mark Rutland , Linus Torvalds , Oliver Upton , Marc Zyngier , Dev Jain , Linu Cherian , Jonathan Cameron Cc: Ryan Roberts , linux-arm-kernel@lists.infradead.org, linux-kernel@vger.kernel.org Subject: [PATCH v2 03/13] arm64: mm: Implicitly invalidate user ASID based on TLBI operation Date: Mon, 19 Jan 2026 17:21:50 +0000 Message-ID: <20260119172202.1681510-4-ryan.roberts@arm.com> X-Mailer: git-send-email 2.43.0 In-Reply-To: <20260119172202.1681510-1-ryan.roberts@arm.com> References: <20260119172202.1681510-1-ryan.roberts@arm.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" When kpti is enabled, separate ASIDs are used for userspace and kernelspace, requiring ASID-qualified TLB invalidation by virtual address to invalidate both of them. Push the logic for invalidating the two ASIDs down into the low-level tlbi-op-specific functions and remove the burden from the caller to handle the kpti-specific behaviour. Co-developed-by: Will Deacon Signed-off-by: Will Deacon Signed-off-by: Ryan Roberts Reviewed-by: Jonathan Cameron --- arch/arm64/include/asm/tlbflush.h | 30 +++++++++++++----------------- 1 file changed, 13 insertions(+), 17 deletions(-) diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlb= flush.h index acd59bc61e00..0e5a30eee447 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h @@ -110,6 +110,7 @@ typedef void (*tlbi_op)(u64 arg); static __always_inline void vae1is(u64 arg) { __tlbi(vae1is, arg); + __tlbi_user(vae1is, arg); } =20 static __always_inline void vae2is(u64 arg) @@ -120,11 +121,13 @@ static __always_inline void vae2is(u64 arg) static __always_inline void vale1(u64 arg) { __tlbi(vale1, arg); + __tlbi_user(vale1, arg); } =20 static __always_inline void vale1is(u64 arg) { __tlbi(vale1is, arg); + __tlbi_user(vale1is, arg); } =20 static __always_inline void vale2is(u64 arg) @@ -160,11 +163,6 @@ static __always_inline void __tlbi_level(tlbi_op op, u= 64 addr, u32 level) op(arg); } =20 -#define __tlbi_user_level(op, arg, level) do { \ - if (arm64_kernel_unmapped_at_el0()) \ - __tlbi_level(op, (arg | USER_ASID_FLAG), level); \ -} while (0) - /* * This macro creates a properly formatted VA operand for the TLB RANGE. T= he * value bit assignments are: @@ -433,8 +431,6 @@ static inline void arch_tlbbatch_flush(struct arch_tlbf= lush_unmap_batch *batch) * @stride: Flush granularity * @asid: The ASID of the task (0 for IPA instructions) * @tlb_level: Translation Table level hint, if known - * @tlbi_user: If 'true', call an additional __tlbi_user() - * (typically for user ASIDs). 'flase' for IPA instructions * @lpa2: If 'true', the lpa2 scheme is used as set out below * * When the CPU does not support TLB range operations, flush the TLB @@ -460,16 +456,19 @@ static inline void arch_tlbbatch_flush(struct arch_tl= bflush_unmap_batch *batch) static __always_inline void rvae1is(u64 arg) { __tlbi(rvae1is, arg); + __tlbi_user(rvae1is, arg); } =20 static __always_inline void rvale1(u64 arg) { __tlbi(rvale1, arg); + __tlbi_user(rvale1, arg); } =20 static __always_inline void rvale1is(u64 arg) { __tlbi(rvale1is, arg); + __tlbi_user(rvale1is, arg); } =20 static __always_inline void rvaale1is(u64 arg) @@ -488,7 +487,7 @@ static __always_inline void __tlbi_range(tlbi_op op, u6= 4 arg) } =20 #define __flush_tlb_range_op(op, start, pages, stride, \ - asid, tlb_level, tlbi_user, lpa2) \ + asid, tlb_level, lpa2) \ do { \ typeof(start) __flush_start =3D start; \ typeof(pages) __flush_pages =3D pages; \ @@ -503,8 +502,6 @@ do { \ (lpa2 && __flush_start !=3D ALIGN(__flush_start, SZ_64K))) { \ addr =3D __TLBI_VADDR(__flush_start, asid); \ __tlbi_level(op, addr, tlb_level); \ - if (tlbi_user) \ - __tlbi_user_level(op, addr, tlb_level); \ __flush_start +=3D stride; \ __flush_pages -=3D stride >> PAGE_SHIFT; \ continue; \ @@ -515,8 +512,6 @@ do { \ addr =3D __TLBI_VADDR_RANGE(__flush_start >> shift, asid, \ scale, num, tlb_level); \ __tlbi_range(r##op, addr); \ - if (tlbi_user) \ - __tlbi_user(r##op, addr); \ __flush_start +=3D __TLBI_RANGE_PAGES(num, scale) << PAGE_SHIFT; \ __flush_pages -=3D __TLBI_RANGE_PAGES(num, scale);\ } \ @@ -525,7 +520,7 @@ do { \ } while (0) =20 #define __flush_s2_tlb_range_op(op, start, pages, stride, tlb_level) \ - __flush_tlb_range_op(op, start, pages, stride, 0, tlb_level, false, kvm_l= pa2_is_enabled()); + __flush_tlb_range_op(op, start, pages, stride, 0, tlb_level, kvm_lpa2_is_= enabled()); =20 static inline bool __flush_tlb_range_limit_excess(unsigned long start, unsigned long end, unsigned long pages, unsigned long stride) @@ -565,10 +560,10 @@ static inline void __flush_tlb_range_nosync(struct mm= _struct *mm, =20 if (last_level) __flush_tlb_range_op(vale1is, start, pages, stride, asid, - tlb_level, true, lpa2_is_enabled()); + tlb_level, lpa2_is_enabled()); else __flush_tlb_range_op(vae1is, start, pages, stride, asid, - tlb_level, true, lpa2_is_enabled()); + tlb_level, lpa2_is_enabled()); =20 mmu_notifier_arch_invalidate_secondary_tlbs(mm, start, end); } @@ -593,7 +588,7 @@ static inline void local_flush_tlb_contpte(struct vm_ar= ea_struct *vma, dsb(nshst); asid =3D ASID(vma->vm_mm); __flush_tlb_range_op(vale1, addr, CONT_PTES, PAGE_SIZE, asid, - 3, true, lpa2_is_enabled()); + 3, lpa2_is_enabled()); mmu_notifier_arch_invalidate_secondary_tlbs(vma->vm_mm, addr, addr + CONT_PTE_SIZE); dsb(nsh); @@ -627,7 +622,7 @@ static inline void flush_tlb_kernel_range(unsigned long= start, unsigned long end =20 dsb(ishst); __flush_tlb_range_op(vaale1is, start, pages, stride, 0, - TLBI_TTL_UNKNOWN, false, lpa2_is_enabled()); + TLBI_TTL_UNKNOWN, lpa2_is_enabled()); dsb(ish); isb(); } @@ -678,6 +673,7 @@ static inline bool huge_pmd_needs_flush(pmd_t oldpmd, p= md_t newpmd) } #define huge_pmd_needs_flush huge_pmd_needs_flush =20 +#undef __tlbi_user #endif =20 #endif --=20 2.43.0