From: Lance Yang <lance.yang@linux.dev>
Enable the optimization introduced in the previous patch for x86.
native_pv_tlb_init() checks whether native_flush_tlb_multi() is in use.
On CONFIG_PARAVIRT systems, it checks pv_ops; on non-PARAVIRT, native
flush is always in use.
It decides once at boot whether to enable the optimization: if using
native TLB flush and INVLPGB is not supported, we know IPIs were sent
and can skip the redundant sync. The decision is fixed via a static
key as Peter suggested[1].
PV backends (KVM, Xen, Hyper-V) typically have their own implementations
and don't call native_flush_tlb_multi() directly, so they cannot be trusted
to provide the IPI guarantees we need.
Two-step plan as David suggested[2]:
Step 1 (this patch): Skip redundant sync when we're 100% certain the TLB
flush sent IPIs. INVLPGB is excluded because when supported, we cannot
guarantee IPIs were sent, keeping it clean and simple.
Step 2 (future work): Send targeted IPIs only to CPUs actually doing
software/lockless page table walks, benefiting all architectures.
Regarding Step 2, it obviously only applies to setups where Step 1 does
not apply: like x86 with INVLPGB or arm64.
[1] https://lore.kernel.org/linux-mm/20260302145652.GH1395266@noisy.programming.kicks-ass.net/
[2] https://lore.kernel.org/linux-mm/bbfdf226-4660-4949-b17b-0d209ee4ef8c@kernel.org/
Suggested-by: Peter Zijlstra <peterz@infradead.org>
Suggested-by: David Hildenbrand (Arm) <david@kernel.org>
Signed-off-by: Lance Yang <lance.yang@linux.dev>
---
arch/x86/include/asm/tlb.h | 17 ++++++++++++++++-
arch/x86/include/asm/tlbflush.h | 2 ++
arch/x86/kernel/smpboot.c | 1 +
arch/x86/mm/tlb.c | 15 +++++++++++++++
4 files changed, 34 insertions(+), 1 deletion(-)
diff --git a/arch/x86/include/asm/tlb.h b/arch/x86/include/asm/tlb.h
index 866ea78ba156..99de622d3856 100644
--- a/arch/x86/include/asm/tlb.h
+++ b/arch/x86/include/asm/tlb.h
@@ -5,11 +5,21 @@
#define tlb_flush tlb_flush
static inline void tlb_flush(struct mmu_gather *tlb);
+#define tlb_table_flush_implies_ipi_broadcast tlb_table_flush_implies_ipi_broadcast
+static inline bool tlb_table_flush_implies_ipi_broadcast(void);
+
#include <asm-generic/tlb.h>
#include <linux/kernel.h>
#include <vdso/bits.h>
#include <vdso/page.h>
+DECLARE_STATIC_KEY_FALSE(tlb_ipi_broadcast_key);
+
+static inline bool tlb_table_flush_implies_ipi_broadcast(void)
+{
+ return static_branch_likely(&tlb_ipi_broadcast_key);
+}
+
static inline void tlb_flush(struct mmu_gather *tlb)
{
unsigned long start = 0UL, end = TLB_FLUSH_ALL;
@@ -20,7 +30,12 @@ static inline void tlb_flush(struct mmu_gather *tlb)
end = tlb->end;
}
- flush_tlb_mm_range(tlb->mm, start, end, stride_shift, tlb->freed_tables);
+ /*
+ * Pass both freed_tables and unshared_tables so that lazy-TLB CPUs
+ * also receive IPIs during unsharing page tables.
+ */
+ flush_tlb_mm_range(tlb->mm, start, end, stride_shift,
+ tlb->freed_tables || tlb->unshared_tables);
}
static inline void invlpg(unsigned long addr)
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 5a3cdc439e38..8ba853154b46 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -18,6 +18,8 @@
DECLARE_PER_CPU(u64, tlbstate_untag_mask);
+void __init native_pv_tlb_init(void);
+
void __flush_tlb_all(void);
#define TLB_FLUSH_ALL -1UL
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 5cd6950ab672..3cdb04162843 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1167,6 +1167,7 @@ void __init native_smp_prepare_boot_cpu(void)
switch_gdt_and_percpu_base(me);
native_pv_lock_init();
+ native_pv_tlb_init();
}
void __init native_smp_cpus_done(unsigned int max_cpus)
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 621e09d049cb..8f5585ebaf09 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -26,6 +26,8 @@
#include "mm_internal.h"
+DEFINE_STATIC_KEY_FALSE(tlb_ipi_broadcast_key);
+
#ifdef CONFIG_PARAVIRT
# define STATIC_NOPV
#else
@@ -1834,3 +1836,16 @@ static int __init create_tlb_single_page_flush_ceiling(void)
return 0;
}
late_initcall(create_tlb_single_page_flush_ceiling);
+
+void __init native_pv_tlb_init(void)
+{
+#ifdef CONFIG_PARAVIRT
+ if (pv_ops.mmu.flush_tlb_multi != native_flush_tlb_multi)
+ return;
+#endif
+
+ if (cpu_feature_enabled(X86_FEATURE_INVLPGB))
+ return;
+
+ static_branch_enable(&tlb_ipi_broadcast_key);
+}
--
2.49.0
On 3/9/26 03:07, Lance Yang wrote:
> From: Lance Yang <lance.yang@linux.dev>
>
> Enable the optimization introduced in the previous patch for x86.
Best to make the patch description standalone, not referring to
"previous patch".
>
> native_pv_tlb_init() checks whether native_flush_tlb_multi() is in use.
> On CONFIG_PARAVIRT systems, it checks pv_ops; on non-PARAVIRT, native
> flush is always in use.
>
> It decides once at boot whether to enable the optimization: if using
> native TLB flush and INVLPGB is not supported, we know IPIs were sent
> and can skip the redundant sync. The decision is fixed via a static
> key as Peter suggested[1].
>
> PV backends (KVM, Xen, Hyper-V) typically have their own implementations
> and don't call native_flush_tlb_multi() directly, so they cannot be trusted
> to provide the IPI guarantees we need.
>
> Two-step plan as David suggested[2]:
>
> Step 1 (this patch): Skip redundant sync when we're 100% certain the TLB
> flush sent IPIs. INVLPGB is excluded because when supported, we cannot
> guarantee IPIs were sent, keeping it clean and simple.
>
> Step 2 (future work): Send targeted IPIs only to CPUs actually doing
> software/lockless page table walks, benefiting all architectures.
>
> Regarding Step 2, it obviously only applies to setups where Step 1 does
> not apply: like x86 with INVLPGB or arm64.
>
> [1] https://lore.kernel.org/linux-mm/20260302145652.GH1395266@noisy.programming.kicks-ass.net/
> [2] https://lore.kernel.org/linux-mm/bbfdf226-4660-4949-b17b-0d209ee4ef8c@kernel.org/
>
> Suggested-by: Peter Zijlstra <peterz@infradead.org>
> Suggested-by: David Hildenbrand (Arm) <david@kernel.org>
> Signed-off-by: Lance Yang <lance.yang@linux.dev>
> ---
[...]
> static inline void tlb_flush(struct mmu_gather *tlb)
> {
> unsigned long start = 0UL, end = TLB_FLUSH_ALL;
> @@ -20,7 +30,12 @@ static inline void tlb_flush(struct mmu_gather *tlb)
> end = tlb->end;
> }
>
> - flush_tlb_mm_range(tlb->mm, start, end, stride_shift, tlb->freed_tables);
> + /*
> + * Pass both freed_tables and unshared_tables so that lazy-TLB CPUs
> + * also receive IPIs during unsharing page tables.
"unsharing of page tables" ?
I would maybe have it written ass
"Treat unshared_tables just like freed_tables, such that lazy-TLB CPUs
also receive IPIs during unsharing of page tables, allowing us to
safely implement tlb_table_flush_implies_ipi_broadcast()."
> + */
> + flush_tlb_mm_range(tlb->mm, start, end, stride_shift,
> + tlb->freed_tables || tlb->unshared_tables);
> }
In general, LGTM.
Acked-by: David Hildenbrand (Arm) <david@kernel.org>
--
Cheers,
David
On 2026/3/23 19:10, David Hildenbrand (Arm) wrote:
> On 3/9/26 03:07, Lance Yang wrote:
>> From: Lance Yang <lance.yang@linux.dev>
>>
>> Enable the optimization introduced in the previous patch for x86.
>
> Best to make the patch description standalone, not referring to
> "previous patch".
Good point. Will make the changelog standalone ;)
>>
>> native_pv_tlb_init() checks whether native_flush_tlb_multi() is in use.
>> On CONFIG_PARAVIRT systems, it checks pv_ops; on non-PARAVIRT, native
>> flush is always in use.
>>
>> It decides once at boot whether to enable the optimization: if using
>> native TLB flush and INVLPGB is not supported, we know IPIs were sent
>> and can skip the redundant sync. The decision is fixed via a static
>> key as Peter suggested[1].
>>
>> PV backends (KVM, Xen, Hyper-V) typically have their own implementations
>> and don't call native_flush_tlb_multi() directly, so they cannot be trusted
>> to provide the IPI guarantees we need.
>>
>> Two-step plan as David suggested[2]:
>>
>> Step 1 (this patch): Skip redundant sync when we're 100% certain the TLB
>> flush sent IPIs. INVLPGB is excluded because when supported, we cannot
>> guarantee IPIs were sent, keeping it clean and simple.
>>
>> Step 2 (future work): Send targeted IPIs only to CPUs actually doing
>> software/lockless page table walks, benefiting all architectures.
>>
>> Regarding Step 2, it obviously only applies to setups where Step 1 does
>> not apply: like x86 with INVLPGB or arm64.
>>
>> [1] https://lore.kernel.org/linux-mm/20260302145652.GH1395266@noisy.programming.kicks-ass.net/
>> [2] https://lore.kernel.org/linux-mm/bbfdf226-4660-4949-b17b-0d209ee4ef8c@kernel.org/
>>
>> Suggested-by: Peter Zijlstra <peterz@infradead.org>
>> Suggested-by: David Hildenbrand (Arm) <david@kernel.org>
>> Signed-off-by: Lance Yang <lance.yang@linux.dev>
>> ---
>
> [...]
>
>> static inline void tlb_flush(struct mmu_gather *tlb)
>> {
>> unsigned long start = 0UL, end = TLB_FLUSH_ALL;
>> @@ -20,7 +30,12 @@ static inline void tlb_flush(struct mmu_gather *tlb)
>> end = tlb->end;
>> }
>>
>> - flush_tlb_mm_range(tlb->mm, start, end, stride_shift, tlb->freed_tables);
>> + /*
>> + * Pass both freed_tables and unshared_tables so that lazy-TLB CPUs
>> + * also receive IPIs during unsharing page tables.
>
> "unsharing of page tables" ?
Yes, that reads better.
>
> I would maybe have it written ass
>
> "Treat unshared_tables just like freed_tables, such that lazy-TLB CPUs
> also receive IPIs during unsharing of page tables, allowing us to
> safely implement tlb_table_flush_implies_ipi_broadcast()."
>
>> + */
>> + flush_tlb_mm_range(tlb->mm, start, end, stride_shift,
>> + tlb->freed_tables || tlb->unshared_tables);
>> }
Cool, this wording is much clearer :)
> In general, LGTM.
>
> Acked-by: David Hildenbrand (Arm) <david@kernel.org>
Thanks for taking time to review!
Gently ping :)
On Mon, Mar 09, 2026 at 10:07:11AM +0800, Lance Yang wrote:
>From: Lance Yang <lance.yang@linux.dev>
>
>Enable the optimization introduced in the previous patch for x86.
>
>native_pv_tlb_init() checks whether native_flush_tlb_multi() is in use.
>On CONFIG_PARAVIRT systems, it checks pv_ops; on non-PARAVIRT, native
>flush is always in use.
>
>It decides once at boot whether to enable the optimization: if using
>native TLB flush and INVLPGB is not supported, we know IPIs were sent
>and can skip the redundant sync. The decision is fixed via a static
>key as Peter suggested[1].
>
>PV backends (KVM, Xen, Hyper-V) typically have their own implementations
>and don't call native_flush_tlb_multi() directly, so they cannot be trusted
>to provide the IPI guarantees we need.
>
>Two-step plan as David suggested[2]:
>
>Step 1 (this patch): Skip redundant sync when we're 100% certain the TLB
>flush sent IPIs. INVLPGB is excluded because when supported, we cannot
>guarantee IPIs were sent, keeping it clean and simple.
>
>Step 2 (future work): Send targeted IPIs only to CPUs actually doing
>software/lockless page table walks, benefiting all architectures.
>
>Regarding Step 2, it obviously only applies to setups where Step 1 does
>not apply: like x86 with INVLPGB or arm64.
>
>[1] https://lore.kernel.org/linux-mm/20260302145652.GH1395266@noisy.programming.kicks-ass.net/
>[2] https://lore.kernel.org/linux-mm/bbfdf226-4660-4949-b17b-0d209ee4ef8c@kernel.org/
>
>Suggested-by: Peter Zijlstra <peterz@infradead.org>
>Suggested-by: David Hildenbrand (Arm) <david@kernel.org>
>Signed-off-by: Lance Yang <lance.yang@linux.dev>
>---
> arch/x86/include/asm/tlb.h | 17 ++++++++++++++++-
> arch/x86/include/asm/tlbflush.h | 2 ++
> arch/x86/kernel/smpboot.c | 1 +
> arch/x86/mm/tlb.c | 15 +++++++++++++++
> 4 files changed, 34 insertions(+), 1 deletion(-)
>
>diff --git a/arch/x86/include/asm/tlb.h b/arch/x86/include/asm/tlb.h
>index 866ea78ba156..99de622d3856 100644
>--- a/arch/x86/include/asm/tlb.h
>+++ b/arch/x86/include/asm/tlb.h
>@@ -5,11 +5,21 @@
> #define tlb_flush tlb_flush
> static inline void tlb_flush(struct mmu_gather *tlb);
>
>+#define tlb_table_flush_implies_ipi_broadcast tlb_table_flush_implies_ipi_broadcast
>+static inline bool tlb_table_flush_implies_ipi_broadcast(void);
>+
> #include <asm-generic/tlb.h>
> #include <linux/kernel.h>
> #include <vdso/bits.h>
> #include <vdso/page.h>
>
>+DECLARE_STATIC_KEY_FALSE(tlb_ipi_broadcast_key);
>+
>+static inline bool tlb_table_flush_implies_ipi_broadcast(void)
>+{
>+ return static_branch_likely(&tlb_ipi_broadcast_key);
>+}
>+
> static inline void tlb_flush(struct mmu_gather *tlb)
> {
> unsigned long start = 0UL, end = TLB_FLUSH_ALL;
>@@ -20,7 +30,12 @@ static inline void tlb_flush(struct mmu_gather *tlb)
> end = tlb->end;
> }
>
>- flush_tlb_mm_range(tlb->mm, start, end, stride_shift, tlb->freed_tables);
>+ /*
>+ * Pass both freed_tables and unshared_tables so that lazy-TLB CPUs
>+ * also receive IPIs during unsharing page tables.
>+ */
>+ flush_tlb_mm_range(tlb->mm, start, end, stride_shift,
>+ tlb->freed_tables || tlb->unshared_tables);
> }
>
> static inline void invlpg(unsigned long addr)
>diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
>index 5a3cdc439e38..8ba853154b46 100644
>--- a/arch/x86/include/asm/tlbflush.h
>+++ b/arch/x86/include/asm/tlbflush.h
>@@ -18,6 +18,8 @@
>
> DECLARE_PER_CPU(u64, tlbstate_untag_mask);
>
>+void __init native_pv_tlb_init(void);
>+
> void __flush_tlb_all(void);
>
> #define TLB_FLUSH_ALL -1UL
>diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
>index 5cd6950ab672..3cdb04162843 100644
>--- a/arch/x86/kernel/smpboot.c
>+++ b/arch/x86/kernel/smpboot.c
>@@ -1167,6 +1167,7 @@ void __init native_smp_prepare_boot_cpu(void)
> switch_gdt_and_percpu_base(me);
>
> native_pv_lock_init();
>+ native_pv_tlb_init();
> }
>
> void __init native_smp_cpus_done(unsigned int max_cpus)
>diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
>index 621e09d049cb..8f5585ebaf09 100644
>--- a/arch/x86/mm/tlb.c
>+++ b/arch/x86/mm/tlb.c
>@@ -26,6 +26,8 @@
>
> #include "mm_internal.h"
>
>+DEFINE_STATIC_KEY_FALSE(tlb_ipi_broadcast_key);
>+
> #ifdef CONFIG_PARAVIRT
> # define STATIC_NOPV
> #else
>@@ -1834,3 +1836,16 @@ static int __init create_tlb_single_page_flush_ceiling(void)
> return 0;
> }
> late_initcall(create_tlb_single_page_flush_ceiling);
>+
>+void __init native_pv_tlb_init(void)
>+{
>+#ifdef CONFIG_PARAVIRT
>+ if (pv_ops.mmu.flush_tlb_multi != native_flush_tlb_multi)
>+ return;
>+#endif
>+
>+ if (cpu_feature_enabled(X86_FEATURE_INVLPGB))
>+ return;
>+
>+ static_branch_enable(&tlb_ipi_broadcast_key);
>+}
Just following up on this series. Any feedback would be appreciated!
Thanks,
Lance
On Mon, Mar 16, 2026 at 10:36:30AM +0800, Lance Yang wrote:
>
>Gently ping :)
>
>On Mon, Mar 09, 2026 at 10:07:11AM +0800, Lance Yang wrote:
>>From: Lance Yang <lance.yang@linux.dev>
>>
>>Enable the optimization introduced in the previous patch for x86.
>>
>>native_pv_tlb_init() checks whether native_flush_tlb_multi() is in use.
>>On CONFIG_PARAVIRT systems, it checks pv_ops; on non-PARAVIRT, native
>>flush is always in use.
>>
>>It decides once at boot whether to enable the optimization: if using
>>native TLB flush and INVLPGB is not supported, we know IPIs were sent
>>and can skip the redundant sync. The decision is fixed via a static
>>key as Peter suggested[1].
>>
>>PV backends (KVM, Xen, Hyper-V) typically have their own implementations
>>and don't call native_flush_tlb_multi() directly, so they cannot be trusted
>>to provide the IPI guarantees we need.
>>
>>Two-step plan as David suggested[2]:
>>
>>Step 1 (this patch): Skip redundant sync when we're 100% certain the TLB
>>flush sent IPIs. INVLPGB is excluded because when supported, we cannot
>>guarantee IPIs were sent, keeping it clean and simple.
>>
>>Step 2 (future work): Send targeted IPIs only to CPUs actually doing
>>software/lockless page table walks, benefiting all architectures.
>>
>>Regarding Step 2, it obviously only applies to setups where Step 1 does
>>not apply: like x86 with INVLPGB or arm64.
>>
>>[1] https://lore.kernel.org/linux-mm/20260302145652.GH1395266@noisy.programming.kicks-ass.net/
>>[2] https://lore.kernel.org/linux-mm/bbfdf226-4660-4949-b17b-0d209ee4ef8c@kernel.org/
>>
>>Suggested-by: Peter Zijlstra <peterz@infradead.org>
>>Suggested-by: David Hildenbrand (Arm) <david@kernel.org>
>>Signed-off-by: Lance Yang <lance.yang@linux.dev>
>>---
>> arch/x86/include/asm/tlb.h | 17 ++++++++++++++++-
>> arch/x86/include/asm/tlbflush.h | 2 ++
>> arch/x86/kernel/smpboot.c | 1 +
>> arch/x86/mm/tlb.c | 15 +++++++++++++++
>> 4 files changed, 34 insertions(+), 1 deletion(-)
>>
>>diff --git a/arch/x86/include/asm/tlb.h b/arch/x86/include/asm/tlb.h
>>index 866ea78ba156..99de622d3856 100644
>>--- a/arch/x86/include/asm/tlb.h
>>+++ b/arch/x86/include/asm/tlb.h
>>@@ -5,11 +5,21 @@
>> #define tlb_flush tlb_flush
>> static inline void tlb_flush(struct mmu_gather *tlb);
>>
>>+#define tlb_table_flush_implies_ipi_broadcast tlb_table_flush_implies_ipi_broadcast
>>+static inline bool tlb_table_flush_implies_ipi_broadcast(void);
>>+
>> #include <asm-generic/tlb.h>
>> #include <linux/kernel.h>
>> #include <vdso/bits.h>
>> #include <vdso/page.h>
>>
>>+DECLARE_STATIC_KEY_FALSE(tlb_ipi_broadcast_key);
>>+
>>+static inline bool tlb_table_flush_implies_ipi_broadcast(void)
>>+{
>>+ return static_branch_likely(&tlb_ipi_broadcast_key);
>>+}
>>+
>> static inline void tlb_flush(struct mmu_gather *tlb)
>> {
>> unsigned long start = 0UL, end = TLB_FLUSH_ALL;
>>@@ -20,7 +30,12 @@ static inline void tlb_flush(struct mmu_gather *tlb)
>> end = tlb->end;
>> }
>>
>>- flush_tlb_mm_range(tlb->mm, start, end, stride_shift, tlb->freed_tables);
>>+ /*
>>+ * Pass both freed_tables and unshared_tables so that lazy-TLB CPUs
>>+ * also receive IPIs during unsharing page tables.
>>+ */
>>+ flush_tlb_mm_range(tlb->mm, start, end, stride_shift,
>>+ tlb->freed_tables || tlb->unshared_tables);
>> }
>>
>> static inline void invlpg(unsigned long addr)
>>diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
>>index 5a3cdc439e38..8ba853154b46 100644
>>--- a/arch/x86/include/asm/tlbflush.h
>>+++ b/arch/x86/include/asm/tlbflush.h
>>@@ -18,6 +18,8 @@
>>
>> DECLARE_PER_CPU(u64, tlbstate_untag_mask);
>>
>>+void __init native_pv_tlb_init(void);
>>+
>> void __flush_tlb_all(void);
>>
>> #define TLB_FLUSH_ALL -1UL
>>diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
>>index 5cd6950ab672..3cdb04162843 100644
>>--- a/arch/x86/kernel/smpboot.c
>>+++ b/arch/x86/kernel/smpboot.c
>>@@ -1167,6 +1167,7 @@ void __init native_smp_prepare_boot_cpu(void)
>> switch_gdt_and_percpu_base(me);
>>
>> native_pv_lock_init();
>>+ native_pv_tlb_init();
>> }
>>
>> void __init native_smp_cpus_done(unsigned int max_cpus)
>>diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
>>index 621e09d049cb..8f5585ebaf09 100644
>>--- a/arch/x86/mm/tlb.c
>>+++ b/arch/x86/mm/tlb.c
>>@@ -26,6 +26,8 @@
>>
>> #include "mm_internal.h"
>>
>>+DEFINE_STATIC_KEY_FALSE(tlb_ipi_broadcast_key);
>>+
>> #ifdef CONFIG_PARAVIRT
>> # define STATIC_NOPV
>> #else
>>@@ -1834,3 +1836,16 @@ static int __init create_tlb_single_page_flush_ceiling(void)
>> return 0;
>> }
>> late_initcall(create_tlb_single_page_flush_ceiling);
>>+
>>+void __init native_pv_tlb_init(void)
>>+{
>>+#ifdef CONFIG_PARAVIRT
>>+ if (pv_ops.mmu.flush_tlb_multi != native_flush_tlb_multi)
>>+ return;
>>+#endif
>>+
>>+ if (cpu_feature_enabled(X86_FEATURE_INVLPGB))
>>+ return;
>>+
>>+ static_branch_enable(&tlb_ipi_broadcast_key);
>>+}
>
>
© 2016 - 2026 Red Hat, Inc.