From nobody Fri Dec 19 07:24:16 2025 Received: from foss.arm.com (foss.arm.com [217.140.110.172]) by smtp.subspace.kernel.org (Postfix) with ESMTP id CF26B34AAE3 for ; Tue, 16 Dec 2025 14:46:16 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=217.140.110.172 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1765896378; cv=none; b=i0kbEtD9aABXeiV/C2TLlKj6qbiLpjZs6Rh1EuBICIKuESfghr3FOMIbLn396PRMvO7GVDY/eQ5tQdxTbl1V1+5LxPAQd8zFMZ6cZY4odUfvM/UGDKuiRaH9Bh/1v4CMMYXc9FxIE5JTII+gFJp/zwXYeES8vuKeGdpAivF/Li0= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1765896378; c=relaxed/simple; bh=Rt8IXD1w9orXK7J7oeBz6X4kSUQE5Zbrwz94/3U7VY4=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=ScS09RIbPs86NzOpDjj94+20ZyMeOn1ao9bObIDpQtm2QmQLdgnWiGRZVVzdja90q+r2nkWXuGxHyB6r/QZZC8edCoCzRd2I1jIwZcSCEjG0QNiRkBCeVSecCTJZwTMCPHzWwUNNLB81Djhqv82d40+jSwVuSpu2/Ar/7c+liFA= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=arm.com; spf=pass smtp.mailfrom=arm.com; arc=none smtp.client-ip=217.140.110.172 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=arm.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=arm.com Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id 0C6BF1682; Tue, 16 Dec 2025 06:46:09 -0800 (PST) Received: from e125769.cambridge.arm.com (e125769.cambridge.arm.com [10.1.196.27]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPSA id AE3353F73F; Tue, 16 Dec 2025 06:46:14 -0800 (PST) From: Ryan Roberts To: Will Deacon , Ard Biesheuvel , Catalin Marinas , Mark Rutland , Linus Torvalds , Oliver Upton , Marc Zyngier , Dev Jain , Linu Cherian Cc: Ryan Roberts , linux-arm-kernel@lists.infradead.org, linux-kernel@vger.kernel.org Subject: [PATCH v1 03/13] arm64: mm: Implicitly invalidate user ASID based on TLBI operation Date: Tue, 16 Dec 2025 14:45:48 +0000 Message-ID: <20251216144601.2106412-4-ryan.roberts@arm.com> X-Mailer: git-send-email 2.43.0 In-Reply-To: <20251216144601.2106412-1-ryan.roberts@arm.com> References: <20251216144601.2106412-1-ryan.roberts@arm.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" When kpti is enabled, separate ASIDs are used for userspace and kernelspace, requiring ASID-qualified TLB invalidation by virtual address to invalidate both of them. Push the logic for invalidating the two ASIDs down into the low-level tlbi-op-specific functions and remove the burden from the caller to handle the kpti-specific behaviour. Co-developed-by: Will Deacon Signed-off-by: Will Deacon Signed-off-by: Ryan Roberts --- arch/arm64/include/asm/tlbflush.h | 27 ++++++++++----------------- 1 file changed, 10 insertions(+), 17 deletions(-) diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlb= flush.h index c5111d2afc66..31f43d953ce2 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h @@ -110,6 +110,7 @@ typedef void (*tlbi_op)(u64 arg); static __always_inline void vae1is(u64 arg) { __tlbi(vae1is, arg); + __tlbi_user(vae1is, arg); } =20 static __always_inline void vae2is(u64 arg) @@ -126,6 +127,7 @@ static __always_inline void vale1(u64 arg) static __always_inline void vale1is(u64 arg) { __tlbi(vale1is, arg); + __tlbi_user(vale1is, arg); } =20 static __always_inline void vale2is(u64 arg) @@ -162,11 +164,6 @@ static __always_inline void __tlbi_level(tlbi_op op, u= 64 addr, u32 level) op(arg); } =20 -#define __tlbi_user_level(op, arg, level) do { \ - if (arm64_kernel_unmapped_at_el0()) \ - __tlbi_level(op, (arg | USER_ASID_FLAG), level); \ -} while (0) - /* * This macro creates a properly formatted VA operand for the TLB RANGE. T= he * value bit assignments are: @@ -435,8 +432,6 @@ static inline void arch_tlbbatch_flush(struct arch_tlbf= lush_unmap_batch *batch) * @stride: Flush granularity * @asid: The ASID of the task (0 for IPA instructions) * @tlb_level: Translation Table level hint, if known - * @tlbi_user: If 'true', call an additional __tlbi_user() - * (typically for user ASIDs). 'flase' for IPA instructions * @lpa2: If 'true', the lpa2 scheme is used as set out below * * When the CPU does not support TLB range operations, flush the TLB @@ -462,6 +457,7 @@ static inline void arch_tlbbatch_flush(struct arch_tlbf= lush_unmap_batch *batch) static __always_inline void rvae1is(u64 arg) { __tlbi(rvae1is, arg); + __tlbi_user(rvae1is, arg); } =20 static __always_inline void rvale1(u64 arg) @@ -473,6 +469,7 @@ static __always_inline void rvale1(u64 arg) static __always_inline void rvale1is(u64 arg) { __tlbi(rvale1is, arg); + __tlbi_user(rvale1is, arg); } =20 static __always_inline void rvaale1is(u64 arg) @@ -491,7 +488,7 @@ static __always_inline void __tlbi_range(tlbi_op op, u6= 4 arg) } =20 #define __flush_tlb_range_op(op, start, pages, stride, \ - asid, tlb_level, tlbi_user, lpa2) \ + asid, tlb_level, lpa2) \ do { \ typeof(start) __flush_start =3D start; \ typeof(pages) __flush_pages =3D pages; \ @@ -506,8 +503,6 @@ do { \ (lpa2 && __flush_start !=3D ALIGN(__flush_start, SZ_64K))) { \ addr =3D __TLBI_VADDR(__flush_start, asid); \ __tlbi_level(op, addr, tlb_level); \ - if (tlbi_user) \ - __tlbi_user_level(op, addr, tlb_level); \ __flush_start +=3D stride; \ __flush_pages -=3D stride >> PAGE_SHIFT; \ continue; \ @@ -518,8 +513,6 @@ do { \ addr =3D __TLBI_VADDR_RANGE(__flush_start >> shift, asid, \ scale, num, tlb_level); \ __tlbi_range(r##op, addr); \ - if (tlbi_user) \ - __tlbi_user(r##op, addr); \ __flush_start +=3D __TLBI_RANGE_PAGES(num, scale) << PAGE_SHIFT; \ __flush_pages -=3D __TLBI_RANGE_PAGES(num, scale);\ } \ @@ -528,7 +521,7 @@ do { \ } while (0) =20 #define __flush_s2_tlb_range_op(op, start, pages, stride, tlb_level) \ - __flush_tlb_range_op(op, start, pages, stride, 0, tlb_level, false, kvm_l= pa2_is_enabled()); + __flush_tlb_range_op(op, start, pages, stride, 0, tlb_level, kvm_lpa2_is_= enabled()); =20 static inline bool __flush_tlb_range_limit_excess(unsigned long start, unsigned long end, unsigned long pages, unsigned long stride) @@ -568,10 +561,10 @@ static inline void __flush_tlb_range_nosync(struct mm= _struct *mm, =20 if (last_level) __flush_tlb_range_op(vale1is, start, pages, stride, asid, - tlb_level, true, lpa2_is_enabled()); + tlb_level, lpa2_is_enabled()); else __flush_tlb_range_op(vae1is, start, pages, stride, asid, - tlb_level, true, lpa2_is_enabled()); + tlb_level, lpa2_is_enabled()); =20 mmu_notifier_arch_invalidate_secondary_tlbs(mm, start, end); } @@ -630,7 +623,7 @@ static inline void flush_tlb_kernel_range(unsigned long= start, unsigned long end =20 dsb(ishst); __flush_tlb_range_op(vaale1is, start, pages, stride, 0, - TLBI_TTL_UNKNOWN, false, lpa2_is_enabled()); + TLBI_TTL_UNKNOWN, lpa2_is_enabled()); dsb(ish); isb(); } @@ -681,6 +674,6 @@ static inline bool huge_pmd_needs_flush(pmd_t oldpmd, p= md_t newpmd) } #define huge_pmd_needs_flush huge_pmd_needs_flush =20 +#undef __tlbi_user #endif - #endif --=20 2.43.0