From nobody Tue Dec 16 05:42:52 2025 Received: from shelob.surriel.com (shelob.surriel.com [96.67.55.147]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id AE65F188CB1 for ; Thu, 6 Feb 2025 04:44:17 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=96.67.55.147 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1738817059; cv=none; b=qYybqFiAl3TFYJgJzFIySD1J1R9Wpsgs+mwGSvwoECPAEVl2b8oaF1EFBe+T3V0JF/yk2zR4FamWo7WlsKwiN5RFMzUcUvOUCXLirJ6cwmdHSaGt0E0EAbkYkIhjDoJkrA365YntXBpE6L4Vp6TXIRm4fH4DvVVZ4oUcjVjzGcU= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1738817059; c=relaxed/simple; bh=dFPXnPf2G8EiIQ7mLMNDMhh0y6oQxptxYWLr9P5rzEA=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=ss1EXe3sScZp7dAzlttABKRQmzZta2F7R5x9wrstAwzrQ1p4oYapwk4KwHYx+hb7jhnqoDqy9fWeyDdZczTesZltNA6fy3n76dpSR6Z255LLMSYPZ1yW1V7zNCwisZTzBJgG4CvlNj0nvpc823wdJXhdF/Gbdn3sRBQoFnnuMtk= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=surriel.com; spf=pass smtp.mailfrom=shelob.surriel.com; arc=none smtp.client-ip=96.67.55.147 Authentication-Results: smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=surriel.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=shelob.surriel.com Received: from fangorn.home.surriel.com ([10.0.13.7]) by shelob.surriel.com with esmtpsa (TLS1.2) tls TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 (Exim 4.97.1) (envelope-from ) id 1tftjw-000000004tQ-2igC; Wed, 05 Feb 2025 23:43:48 -0500 From: Rik van Riel To: x86@kernel.org Cc: linux-kernel@vger.kernel.org, bp@alien8.de, peterz@infradead.org, dave.hansen@linux.intel.com, zhengqi.arch@bytedance.com, nadav.amit@gmail.com, thomas.lendacky@amd.com, kernel-team@meta.com, linux-mm@kvack.org, akpm@linux-foundation.org, jannh@google.com, mhklinux@outlook.com, andrew.cooper3@citrix.com, Rik van Riel , Manali Shukla Subject: [PATCH v9 10/12] x86/mm: do targeted broadcast flushing from tlbbatch code Date: Wed, 5 Feb 2025 23:43:29 -0500 Message-ID: <20250206044346.3810242-11-riel@surriel.com> X-Mailer: git-send-email 2.47.1 In-Reply-To: <20250206044346.3810242-1-riel@surriel.com> References: <20250206044346.3810242-1-riel@surriel.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Sender: riel@surriel.com Content-Type: text/plain; charset="utf-8" Instead of doing a system-wide TLB flush from arch_tlbbatch_flush, queue up asynchronous, targeted flushes from arch_tlbbatch_add_pending. This also allows us to avoid adding the CPUs of processes using broadcast flushing to the batch->cpumask, and will hopefully further reduce TLB flushing from the reclaim and compaction paths. Signed-off-by: Rik van Riel Tested-by: Manali Shukla --- arch/x86/include/asm/invlpgb.h | 21 +++++---- arch/x86/include/asm/tlbflush.h | 17 ++++--- arch/x86/mm/tlb.c | 80 +++++++++++++++++++++++++++++++-- 3 files changed, 95 insertions(+), 23 deletions(-) diff --git a/arch/x86/include/asm/invlpgb.h b/arch/x86/include/asm/invlpgb.h index a1d5dedd5217..357e3cc417e4 100644 --- a/arch/x86/include/asm/invlpgb.h +++ b/arch/x86/include/asm/invlpgb.h @@ -31,9 +31,8 @@ static inline void __invlpgb(unsigned long asid, unsigned= long pcid, } =20 /* Wait for INVLPGB originated by this CPU to complete. */ -static inline void tlbsync(void) +static inline void __tlbsync(void) { - cant_migrate(); /* TLBSYNC: supported in binutils >=3D 0.36. */ asm volatile(".byte 0x0f, 0x01, 0xff" ::: "memory"); } @@ -61,19 +60,19 @@ static inline void invlpgb_flush_user(unsigned long pci= d, unsigned long addr) { __invlpgb(0, pcid, addr, 0, 0, INVLPGB_PCID | INVLPGB_VA); - tlbsync(); + __tlbsync(); } =20 -static inline void invlpgb_flush_user_nr_nosync(unsigned long pcid, - unsigned long addr, - u16 nr, - bool pmd_stride) +static inline void __invlpgb_flush_user_nr_nosync(unsigned long pcid, + unsigned long addr, + u16 nr, + bool pmd_stride) { __invlpgb(0, pcid, addr, nr - 1, pmd_stride, INVLPGB_PCID | INVLPGB_VA); } =20 /* Flush all mappings for a given PCID, not including globals. */ -static inline void invlpgb_flush_single_pcid_nosync(unsigned long pcid) +static inline void __invlpgb_flush_single_pcid_nosync(unsigned long pcid) { __invlpgb(0, pcid, 0, 0, 0, INVLPGB_PCID); } @@ -82,11 +81,11 @@ static inline void invlpgb_flush_single_pcid_nosync(uns= igned long pcid) static inline void invlpgb_flush_all(void) { __invlpgb(0, 0, 0, 0, 0, INVLPGB_INCLUDE_GLOBAL); - tlbsync(); + __tlbsync(); } =20 /* Flush addr, including globals, for all PCIDs. */ -static inline void invlpgb_flush_addr_nosync(unsigned long addr, u16 nr) +static inline void __invlpgb_flush_addr_nosync(unsigned long addr, u16 nr) { __invlpgb(0, 0, addr, nr - 1, 0, INVLPGB_INCLUDE_GLOBAL); } @@ -95,7 +94,7 @@ static inline void invlpgb_flush_addr_nosync(unsigned lon= g addr, u16 nr) static inline void invlpgb_flush_all_nonglobals(void) { __invlpgb(0, 0, 0, 0, 0, 0); - tlbsync(); + __tlbsync(); } =20 #endif /* _ASM_X86_INVLPGB */ diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflus= h.h index 234277a5ef89..bf167e215e8e 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -106,6 +106,7 @@ struct tlb_state { * need to be invalidated. */ bool invalidate_other; + bool need_tlbsync; =20 #ifdef CONFIG_ADDRESS_MASKING /* @@ -310,6 +311,10 @@ static inline void broadcast_tlb_flush(struct flush_tl= b_info *info) static inline void consider_global_asid(struct mm_struct *mm) { } + +static inline void tlbsync(void) +{ +} #endif =20 #ifdef CONFIG_PARAVIRT @@ -359,21 +364,15 @@ static inline u64 inc_mm_tlb_gen(struct mm_struct *mm) return atomic64_inc_return(&mm->context.tlb_gen); } =20 -static inline void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_ba= tch *batch, - struct mm_struct *mm, - unsigned long uaddr) -{ - inc_mm_tlb_gen(mm); - cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm)); - mmu_notifier_arch_invalidate_secondary_tlbs(mm, 0, -1UL); -} - static inline void arch_flush_tlb_batched_pending(struct mm_struct *mm) { flush_tlb_mm(mm); } =20 extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch); +extern void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *ba= tch, + struct mm_struct *mm, + unsigned long uaddr); =20 static inline bool pte_flags_need_flush(unsigned long oldflags, unsigned long newflags, diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 05390f0e6cb0..4253c3efd7e4 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -488,6 +488,37 @@ static void finish_asid_transition(struct flush_tlb_in= fo *info) WRITE_ONCE(mm->context.asid_transition, false); } =20 +static inline void tlbsync(void) +{ + if (!this_cpu_read(cpu_tlbstate.need_tlbsync)) + return; + __tlbsync(); + this_cpu_write(cpu_tlbstate.need_tlbsync, false); +} + +static inline void invlpgb_flush_user_nr_nosync(unsigned long pcid, + unsigned long addr, + u16 nr, bool pmd_stride) +{ + __invlpgb_flush_user_nr_nosync(pcid, addr, nr, pmd_stride); + if (!this_cpu_read(cpu_tlbstate.need_tlbsync)) + this_cpu_write(cpu_tlbstate.need_tlbsync, true); +} + +static inline void invlpgb_flush_single_pcid_nosync(unsigned long pcid) +{ + __invlpgb_flush_single_pcid_nosync(pcid); + if (!this_cpu_read(cpu_tlbstate.need_tlbsync)) + this_cpu_write(cpu_tlbstate.need_tlbsync, true); +} + +static inline void invlpgb_flush_addr_nosync(unsigned long addr, u16 nr) +{ + __invlpgb_flush_addr_nosync(addr, nr); + if (!this_cpu_read(cpu_tlbstate.need_tlbsync)) + this_cpu_write(cpu_tlbstate.need_tlbsync, true); +} + static void broadcast_tlb_flush(struct flush_tlb_info *info) { bool pmd =3D info->stride_shift =3D=3D PMD_SHIFT; @@ -794,6 +825,8 @@ void switch_mm_irqs_off(struct mm_struct *unused, struc= t mm_struct *next, if (IS_ENABLED(CONFIG_PROVE_LOCKING)) WARN_ON_ONCE(!irqs_disabled()); =20 + tlbsync(); + /* * Verify that CR3 is what we think it is. This will catch * hypothetical buggy code that directly switches to swapper_pg_dir @@ -973,6 +1006,8 @@ void switch_mm_irqs_off(struct mm_struct *unused, stru= ct mm_struct *next, */ void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) { + tlbsync(); + if (this_cpu_read(cpu_tlbstate.loaded_mm) =3D=3D &init_mm) return; =20 @@ -1650,9 +1685,7 @@ void arch_tlbbatch_flush(struct arch_tlbflush_unmap_b= atch *batch) * a local TLB flush is needed. Optimize this use-case by calling * flush_tlb_func_local() directly in this case. */ - if (cpu_feature_enabled(X86_FEATURE_INVLPGB)) { - invlpgb_flush_all_nonglobals(); - } else if (cpumask_any_but(&batch->cpumask, cpu) < nr_cpu_ids) { + if (cpumask_any_but(&batch->cpumask, cpu) < nr_cpu_ids) { flush_tlb_multi(&batch->cpumask, info); } else if (cpumask_test_cpu(cpu, &batch->cpumask)) { lockdep_assert_irqs_enabled(); @@ -1661,12 +1694,53 @@ void arch_tlbbatch_flush(struct arch_tlbflush_unmap= _batch *batch) local_irq_enable(); } =20 + /* + * If we issued (asynchronous) INVLPGB flushes, wait for them here. + * The cpumask above contains only CPUs that were running tasks + * not using broadcast TLB flushing. + */ + if (cpu_feature_enabled(X86_FEATURE_INVLPGB)) + tlbsync(); + cpumask_clear(&batch->cpumask); =20 put_flush_tlb_info(); put_cpu(); } =20 +void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch, + struct mm_struct *mm, + unsigned long uaddr) +{ + u16 asid =3D mm_global_asid(mm); + + if (asid) { + invlpgb_flush_user_nr_nosync(kern_pcid(asid), uaddr, 1, false); + /* Do any CPUs supporting INVLPGB need PTI? */ + if (static_cpu_has(X86_FEATURE_PTI)) + invlpgb_flush_user_nr_nosync(user_pcid(asid), uaddr, 1, false); + + /* + * Some CPUs might still be using a local ASID for this + * process, and require IPIs, while others are using the + * global ASID. + * + * In this corner case we need to do both the broadcast + * TLB invalidation, and send IPIs. The IPIs will help + * stragglers transition to the broadcast ASID. + */ + if (in_asid_transition(mm)) + asid =3D 0; + } + + if (!asid) { + inc_mm_tlb_gen(mm); + cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm)); + } + + mmu_notifier_arch_invalidate_secondary_tlbs(mm, 0, -1UL); +} + /* * Blindly accessing user memory from NMI context can be dangerous * if we're in the middle of switching the current user task or --=20 2.47.1