[PATCH v6 09/12] x86/mm: enable broadcast TLB invalidation for multi-threaded processes

Rik van Riel posted 12 patches 2 weeks, 2 days ago
There is a newer version of this series
[PATCH v6 09/12] x86/mm: enable broadcast TLB invalidation for multi-threaded processes
Posted by Rik van Riel 2 weeks, 2 days ago
Use broadcast TLB invalidation, using the INVPLGB instruction, on AMD EPYC 3
and newer CPUs.

In order to not exhaust PCID space, and keep TLB flushes local for single
threaded processes, we only hand out broadcast ASIDs to processes active on
3 or more CPUs, and gradually increase the threshold as broadcast ASID space
is depleted.

Signed-off-by: Rik van Riel <riel@surriel.com>
---
 arch/x86/include/asm/mmu.h         |   6 +
 arch/x86/include/asm/mmu_context.h |  14 ++
 arch/x86/include/asm/tlbflush.h    |  72 ++++++
 arch/x86/mm/tlb.c                  | 362 ++++++++++++++++++++++++++++-
 4 files changed, 442 insertions(+), 12 deletions(-)

diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
index 3b496cdcb74b..d71cd599fec4 100644
--- a/arch/x86/include/asm/mmu.h
+++ b/arch/x86/include/asm/mmu.h
@@ -69,6 +69,12 @@ typedef struct {
 	u16 pkey_allocation_map;
 	s16 execute_only_pkey;
 #endif
+
+#ifdef CONFIG_X86_BROADCAST_TLB_FLUSH
+	u16 global_asid;
+	bool asid_transition;
+#endif
+
 } mm_context_t;
 
 #define INIT_MM_CONTEXT(mm)						\
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index 795fdd53bd0a..d670699d32c2 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -139,6 +139,8 @@ static inline void mm_reset_untag_mask(struct mm_struct *mm)
 #define enter_lazy_tlb enter_lazy_tlb
 extern void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk);
 
+extern void destroy_context_free_global_asid(struct mm_struct *mm);
+
 /*
  * Init a new mm.  Used on mm copies, like at fork()
  * and on mm's that are brand-new, like at execve().
@@ -161,6 +163,14 @@ static inline int init_new_context(struct task_struct *tsk,
 		mm->context.execute_only_pkey = -1;
 	}
 #endif
+
+#ifdef CONFIG_X86_BROADCAST_TLB_FLUSH
+	if (cpu_feature_enabled(X86_FEATURE_INVLPGB)) {
+		mm->context.global_asid = 0;
+		mm->context.asid_transition = false;
+	}
+#endif
+
 	mm_reset_untag_mask(mm);
 	init_new_context_ldt(mm);
 	return 0;
@@ -170,6 +180,10 @@ static inline int init_new_context(struct task_struct *tsk,
 static inline void destroy_context(struct mm_struct *mm)
 {
 	destroy_context_ldt(mm);
+#ifdef CONFIG_X86_BROADCAST_TLB_FLUSH
+	if (cpu_feature_enabled(X86_FEATURE_INVLPGB))
+		destroy_context_free_global_asid(mm);
+#endif
 }
 
 extern void switch_mm(struct mm_struct *prev, struct mm_struct *next,
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index dba5caa4a9f4..5eae5c1aafa5 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -239,6 +239,78 @@ void flush_tlb_one_kernel(unsigned long addr);
 void flush_tlb_multi(const struct cpumask *cpumask,
 		      const struct flush_tlb_info *info);
 
+#ifdef CONFIG_X86_BROADCAST_TLB_FLUSH
+static inline bool is_dyn_asid(u16 asid)
+{
+	if (!cpu_feature_enabled(X86_FEATURE_INVLPGB))
+		return true;
+
+	return asid < TLB_NR_DYN_ASIDS;
+}
+
+static inline bool is_global_asid(u16 asid)
+{
+	return !is_dyn_asid(asid);
+}
+
+static inline bool in_asid_transition(const struct flush_tlb_info *info)
+{
+	if (!cpu_feature_enabled(X86_FEATURE_INVLPGB))
+		return false;
+
+	return info->mm && READ_ONCE(info->mm->context.asid_transition);
+}
+
+static inline u16 mm_global_asid(struct mm_struct *mm)
+{
+	u16 asid;
+
+	if (!cpu_feature_enabled(X86_FEATURE_INVLPGB))
+		return 0;
+
+	asid = READ_ONCE(mm->context.global_asid);
+
+	/* mm->context.global_asid is either 0, or a global ASID */
+	VM_WARN_ON_ONCE(is_dyn_asid(asid));
+
+	return asid;
+}
+#else
+static inline bool is_dyn_asid(u16 asid)
+{
+	return true;
+}
+
+static inline bool is_global_asid(u16 asid)
+{
+	return false;
+}
+
+static inline bool in_asid_transition(const struct flush_tlb_info *info)
+{
+	return false;
+}
+
+static inline u16 mm_global_asid(struct mm_struct *mm)
+{
+	return 0;
+}
+
+static inline bool needs_global_asid_reload(struct mm_struct *next, u16 prev_asid)
+{
+	return false;
+}
+
+static inline void broadcast_tlb_flush(struct flush_tlb_info *info)
+{
+	VM_WARN_ON_ONCE(1);
+}
+
+static inline void consider_global_asid(struct mm_struct *mm)
+{
+}
+#endif
+
 #ifdef CONFIG_PARAVIRT
 #include <asm/paravirt.h>
 #endif
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 9d4864db5720..08eee1f8573a 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -74,13 +74,15 @@
  * use different names for each of them:
  *
  * ASID  - [0, TLB_NR_DYN_ASIDS-1]
- *         the canonical identifier for an mm
+ *         the canonical identifier for an mm, dynamically allocated on each CPU
+ *         [TLB_NR_DYN_ASIDS, MAX_ASID_AVAILABLE-1]
+ *         the canonical, global identifier for an mm, identical across all CPUs
  *
- * kPCID - [1, TLB_NR_DYN_ASIDS]
+ * kPCID - [1, MAX_ASID_AVAILABLE]
  *         the value we write into the PCID part of CR3; corresponds to the
  *         ASID+1, because PCID 0 is special.
  *
- * uPCID - [2048 + 1, 2048 + TLB_NR_DYN_ASIDS]
+ * uPCID - [2048 + 1, 2048 + MAX_ASID_AVAILABLE]
  *         for KPTI each mm has two address spaces and thus needs two
  *         PCID values, but we can still do with a single ASID denomination
  *         for each mm. Corresponds to kPCID + 2048.
@@ -225,6 +227,20 @@ static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen,
 		return;
 	}
 
+	/*
+	 * TLB consistency for global ASIDs is maintained with broadcast TLB
+	 * flushing. The TLB is never outdated, and does not need flushing.
+	 */
+	if (IS_ENABLED(CONFIG_X86_BROADCAST_TLB_FLUSH) && static_cpu_has(X86_FEATURE_INVLPGB)) {
+		u16 global_asid = mm_global_asid(next);
+
+		if (global_asid) {
+			*new_asid = global_asid;
+			*need_flush = false;
+			return;
+		}
+	}
+
 	if (this_cpu_read(cpu_tlbstate.invalidate_other))
 		clear_asid_other();
 
@@ -251,6 +267,290 @@ static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen,
 	*need_flush = true;
 }
 
+#ifdef CONFIG_X86_BROADCAST_TLB_FLUSH
+/*
+ * Logic for broadcast TLB invalidation.
+ */
+static DEFINE_RAW_SPINLOCK(global_asid_lock);
+static u16 last_global_asid = MAX_ASID_AVAILABLE;
+static DECLARE_BITMAP(global_asid_used, MAX_ASID_AVAILABLE) = { 0 };
+static DECLARE_BITMAP(global_asid_freed, MAX_ASID_AVAILABLE) = { 0 };
+static int global_asid_available = MAX_ASID_AVAILABLE - TLB_NR_DYN_ASIDS - 1;
+
+static void reset_global_asid_space(void)
+{
+	lockdep_assert_held(&global_asid_lock);
+
+	/*
+	 * A global TLB flush guarantees that any stale entries from
+	 * previously freed global ASIDs get flushed from the TLB
+	 * everywhere, making these global ASIDs safe to reuse.
+	 */
+	invlpgb_flush_all_nonglobals();
+
+	/*
+	 * Clear all the previously freed global ASIDs from the
+	 * broadcast_asid_used bitmap, now that the global TLB flush
+	 * has made them actually available for re-use.
+	 */
+	bitmap_andnot(global_asid_used, global_asid_used,
+			global_asid_freed, MAX_ASID_AVAILABLE);
+	bitmap_clear(global_asid_freed, 0, MAX_ASID_AVAILABLE);
+
+	/*
+	 * ASIDs 0-TLB_NR_DYN_ASIDS are used for CPU-local ASID
+	 * assignments, for tasks doing IPI based TLB shootdowns.
+	 * Restart the search from the start of the global ASID space.
+	 */
+	last_global_asid = TLB_NR_DYN_ASIDS;
+}
+
+static u16 get_global_asid(void)
+{
+	lockdep_assert_held(&global_asid_lock);
+
+	do {
+		u16 start = last_global_asid;
+		u16 asid = find_next_zero_bit(global_asid_used, MAX_ASID_AVAILABLE, start);
+
+		if (asid >= MAX_ASID_AVAILABLE) {
+			reset_global_asid_space();
+			continue;
+		}
+
+		/* Claim this global ASID. */
+		__set_bit(asid, global_asid_used);
+		last_global_asid = asid;
+		global_asid_available--;
+		return asid;
+	} while (1);
+}
+
+/*
+ * Returns true if the mm is transitioning from a CPU-local ASID to a global
+ * (INVLPGB) ASID, or the other way around.
+ */
+static bool needs_global_asid_reload(struct mm_struct *next, u16 prev_asid)
+{
+	u16 global_asid = mm_global_asid(next);
+
+	if (global_asid && prev_asid != global_asid)
+		return true;
+
+	if (!global_asid && is_global_asid(prev_asid))
+		return true;
+
+	return false;
+}
+
+void destroy_context_free_global_asid(struct mm_struct *mm)
+{
+	if (!mm->context.global_asid)
+		return;
+
+	guard(raw_spinlock_irqsave)(&global_asid_lock);
+
+	/* The global ASID can be re-used only after flush at wrap-around. */
+	__set_bit(mm->context.global_asid, global_asid_freed);
+
+	mm->context.global_asid = 0;
+	global_asid_available++;
+}
+
+/*
+ * Check whether a process is currently active on more than "threshold" CPUs.
+ * This is a cheap estimation on whether or not it may make sense to assign
+ * a global ASID to this process, and use broadcast TLB invalidation.
+ */
+static bool mm_active_cpus_exceeds(struct mm_struct *mm, int threshold)
+{
+	int count = 0;
+	int cpu;
+
+	/* This quick check should eliminate most single threaded programs. */
+	if (cpumask_weight(mm_cpumask(mm)) <= threshold)
+		return false;
+
+	/* Slower check to make sure. */
+	for_each_cpu(cpu, mm_cpumask(mm)) {
+		/* Skip the CPUs that aren't really running this process. */
+		if (per_cpu(cpu_tlbstate.loaded_mm, cpu) != mm)
+			continue;
+
+		if (per_cpu(cpu_tlbstate_shared.is_lazy, cpu))
+			continue;
+
+		if (++count > threshold)
+			return true;
+	}
+	return false;
+}
+
+/*
+ * Assign a global ASID to the current process, protecting against
+ * races between multiple threads in the process.
+ */
+static void use_global_asid(struct mm_struct *mm)
+{
+	guard(raw_spinlock_irqsave)(&global_asid_lock);
+
+	/* This process is already using broadcast TLB invalidation. */
+	if (mm->context.global_asid)
+		return;
+
+	/* The last global ASID was consumed while waiting for the lock. */
+	if (!global_asid_available)
+		return;
+
+	/*
+	 * The transition from IPI TLB flushing, with a dynamic ASID,
+	 * and broadcast TLB flushing, using a global ASID, uses memory
+	 * ordering for synchronization.
+	 *
+	 * While the process has threads still using a dynamic ASID,
+	 * TLB invalidation IPIs continue to get sent.
+	 *
+	 * This code sets asid_transition first, before assigning the
+	 * global ASID.
+	 *
+	 * The TLB flush code will only verify the ASID transition
+	 * after it has seen the new global ASID for the process.
+	 */
+	WRITE_ONCE(mm->context.asid_transition, true);
+	WRITE_ONCE(mm->context.global_asid, get_global_asid());
+}
+
+/*
+ * Figure out whether to assign a global ASID to a process.
+ * We vary the threshold by how empty or full global ASID space is.
+ * 1/4 full: >= 4 active threads
+ * 1/2 full: >= 8 active threads
+ * 3/4 full: >= 16 active threads
+ * 7/8 full: >= 32 active threads
+ * etc
+ *
+ * This way we should never exhaust the global ASID space, even on very
+ * large systems, and the processes with the largest number of active
+ * threads should be able to use broadcast TLB invalidation.
+ */
+#define HALFFULL_THRESHOLD 8
+static bool meets_global_asid_threshold(struct mm_struct *mm)
+{
+	int avail = global_asid_available;
+	int threshold = HALFFULL_THRESHOLD;
+
+	if (!avail)
+		return false;
+
+	if (avail > MAX_ASID_AVAILABLE * 3 / 4) {
+		threshold = HALFFULL_THRESHOLD / 4;
+	} else if (avail > MAX_ASID_AVAILABLE / 2) {
+		threshold = HALFFULL_THRESHOLD / 2;
+	} else if (avail < MAX_ASID_AVAILABLE / 3) {
+		do {
+			avail *= 2;
+			threshold *= 2;
+		} while ((avail + threshold) < MAX_ASID_AVAILABLE / 2);
+	}
+
+	return mm_active_cpus_exceeds(mm, threshold);
+}
+
+static void consider_global_asid(struct mm_struct *mm)
+{
+	if (!static_cpu_has(X86_FEATURE_INVLPGB))
+		return;
+
+	/* Check every once in a while. */
+	if ((current->pid & 0x1f) != (jiffies & 0x1f))
+		return;
+
+	if (meets_global_asid_threshold(mm))
+		use_global_asid(mm);
+}
+
+static void finish_asid_transition(struct flush_tlb_info *info)
+{
+	struct mm_struct *mm = info->mm;
+	int bc_asid = mm_global_asid(mm);
+	int cpu;
+
+	if (!READ_ONCE(mm->context.asid_transition))
+		return;
+
+	for_each_cpu(cpu, mm_cpumask(mm)) {
+		/*
+		 * The remote CPU is context switching. Wait for that to
+		 * finish, to catch the unlikely case of it switching to
+		 * the target mm with an out of date ASID.
+		 */
+		while (READ_ONCE(per_cpu(cpu_tlbstate.loaded_mm, cpu)) == LOADED_MM_SWITCHING)
+			cpu_relax();
+
+		if (READ_ONCE(per_cpu(cpu_tlbstate.loaded_mm, cpu)) != mm)
+			continue;
+
+		/*
+		 * If at least one CPU is not using the global ASID yet,
+		 * send a TLB flush IPI. The IPI should cause stragglers
+		 * to transition soon.
+		 *
+		 * This can race with the CPU switching to another task;
+		 * that results in a (harmless) extra IPI.
+		 */
+		if (READ_ONCE(per_cpu(cpu_tlbstate.loaded_mm_asid, cpu)) != bc_asid) {
+			flush_tlb_multi(mm_cpumask(info->mm), info);
+			return;
+		}
+	}
+
+	/* All the CPUs running this process are using the global ASID. */
+	WRITE_ONCE(mm->context.asid_transition, false);
+}
+
+static void broadcast_tlb_flush(struct flush_tlb_info *info)
+{
+	bool pmd = info->stride_shift == PMD_SHIFT;
+	unsigned long maxnr = invlpgb_count_max;
+	unsigned long asid = info->mm->context.global_asid;
+	unsigned long addr = info->start;
+	unsigned long nr;
+
+	/* Flushing multiple pages at once is not supported with 1GB pages. */
+	if (info->stride_shift > PMD_SHIFT)
+		maxnr = 1;
+
+	/*
+	 * TLB flushes with INVLPGB are kicked off asynchronously.
+	 * The inc_mm_tlb_gen() guarantees page table updates are done
+	 * before these TLB flushes happen.
+	 */
+	if (info->end == TLB_FLUSH_ALL) {
+		invlpgb_flush_single_pcid_nosync(kern_pcid(asid));
+		/* Do any CPUs supporting INVLPGB need PTI? */
+		if (static_cpu_has(X86_FEATURE_PTI))
+			invlpgb_flush_single_pcid_nosync(user_pcid(asid));
+	} else for (; addr < info->end; addr += nr << info->stride_shift) {
+		/*
+		 * Calculate how many pages can be flushed at once; if the
+		 * remainder of the range is less than one page, flush one.
+		 */
+		nr = min(maxnr, (info->end - addr) >> info->stride_shift);
+		nr = max(nr, 1);
+
+		invlpgb_flush_user_nr_nosync(kern_pcid(asid), addr, nr, pmd);
+		/* Do any CPUs supporting INVLPGB need PTI? */
+		if (static_cpu_has(X86_FEATURE_PTI))
+			invlpgb_flush_user_nr_nosync(user_pcid(asid), addr, nr, pmd);
+	}
+
+	finish_asid_transition(info);
+
+	/* Wait for the INVLPGBs kicked off above to finish. */
+	tlbsync();
+}
+#endif /* CONFIG_X86_BROADCAST_TLB_FLUSH */
+
 /*
  * Given an ASID, flush the corresponding user ASID.  We can delay this
  * until the next time we switch to it.
@@ -556,8 +856,9 @@ void switch_mm_irqs_off(struct mm_struct *unused, struct mm_struct *next,
 	 */
 	if (prev == next) {
 		/* Not actually switching mm's */
-		VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[prev_asid].ctx_id) !=
-			   next->context.ctx_id);
+		VM_WARN_ON(is_dyn_asid(prev_asid) &&
+				this_cpu_read(cpu_tlbstate.ctxs[prev_asid].ctx_id) !=
+				next->context.ctx_id);
 
 		/*
 		 * If this races with another thread that enables lam, 'new_lam'
@@ -573,6 +874,23 @@ void switch_mm_irqs_off(struct mm_struct *unused, struct mm_struct *next,
 				 !cpumask_test_cpu(cpu, mm_cpumask(next))))
 			cpumask_set_cpu(cpu, mm_cpumask(next));
 
+		/*
+		 * Check if the current mm is transitioning to a new ASID.
+		 */
+		if (needs_global_asid_reload(next, prev_asid)) {
+			next_tlb_gen = atomic64_read(&next->context.tlb_gen);
+
+			choose_new_asid(next, next_tlb_gen, &new_asid, &need_flush);
+			goto reload_tlb;
+		}
+
+		/*
+		 * Broadcast TLB invalidation keeps this PCID up to date
+		 * all the time.
+		 */
+		if (is_global_asid(prev_asid))
+			return;
+
 		/*
 		 * If the CPU is not in lazy TLB mode, we are just switching
 		 * from one thread in a process to another thread in the same
@@ -606,6 +924,13 @@ void switch_mm_irqs_off(struct mm_struct *unused, struct mm_struct *next,
 		 */
 		cond_mitigation(tsk);
 
+		/*
+		 * Let nmi_uaccess_okay() and finish_asid_transition()
+		 * know that we're changing CR3.
+		 */
+		this_cpu_write(cpu_tlbstate.loaded_mm, LOADED_MM_SWITCHING);
+		barrier();
+
 		/*
 		 * Leave this CPU in prev's mm_cpumask. Atomic writes to
 		 * mm_cpumask can be expensive under contention. The CPU
@@ -620,14 +945,12 @@ void switch_mm_irqs_off(struct mm_struct *unused, struct mm_struct *next,
 		next_tlb_gen = atomic64_read(&next->context.tlb_gen);
 
 		choose_new_asid(next, next_tlb_gen, &new_asid, &need_flush);
-
-		/* Let nmi_uaccess_okay() know that we're changing CR3. */
-		this_cpu_write(cpu_tlbstate.loaded_mm, LOADED_MM_SWITCHING);
-		barrier();
 	}
 
+reload_tlb:
 	new_lam = mm_lam_cr3_mask(next);
 	if (need_flush) {
+		VM_WARN_ON_ONCE(is_global_asid(new_asid));
 		this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id);
 		this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen);
 		load_new_mm_cr3(next->pgd, new_asid, new_lam, true);
@@ -746,7 +1069,7 @@ static void flush_tlb_func(void *info)
 	const struct flush_tlb_info *f = info;
 	struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm);
 	u32 loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
-	u64 local_tlb_gen = this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen);
+	u64 local_tlb_gen;
 	bool local = smp_processor_id() == f->initiating_cpu;
 	unsigned long nr_invalidate = 0;
 	u64 mm_tlb_gen;
@@ -769,6 +1092,16 @@ static void flush_tlb_func(void *info)
 	if (unlikely(loaded_mm == &init_mm))
 		return;
 
+	/* Reload the ASID if transitioning into or out of a global ASID */
+	if (needs_global_asid_reload(loaded_mm, loaded_mm_asid)) {
+		switch_mm_irqs_off(NULL, loaded_mm, NULL);
+		loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
+	}
+
+	/* Broadcast ASIDs are always kept up to date with INVLPGB. */
+	if (is_global_asid(loaded_mm_asid))
+		return;
+
 	VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].ctx_id) !=
 		   loaded_mm->context.ctx_id);
 
@@ -786,6 +1119,8 @@ static void flush_tlb_func(void *info)
 		return;
 	}
 
+	local_tlb_gen = this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen);
+
 	if (unlikely(f->new_tlb_gen != TLB_GENERATION_INVALID &&
 		     f->new_tlb_gen <= local_tlb_gen)) {
 		/*
@@ -953,7 +1288,7 @@ STATIC_NOPV void native_flush_tlb_multi(const struct cpumask *cpumask,
 	 * up on the new contents of what used to be page tables, while
 	 * doing a speculative memory access.
 	 */
-	if (info->freed_tables)
+	if (info->freed_tables || in_asid_transition(info))
 		on_each_cpu_mask(cpumask, flush_tlb_func, (void *)info, true);
 	else
 		on_each_cpu_cond_mask(should_flush_tlb, flush_tlb_func,
@@ -1049,9 +1384,12 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
 	 * a local TLB flush is needed. Optimize this use-case by calling
 	 * flush_tlb_func_local() directly in this case.
 	 */
-	if (cpumask_any_but(mm_cpumask(mm), cpu) < nr_cpu_ids) {
+	if (mm_global_asid(mm)) {
+		broadcast_tlb_flush(info);
+	} else if (cpumask_any_but(mm_cpumask(mm), cpu) < nr_cpu_ids) {
 		info->trim_cpumask = should_trim_cpumask(mm);
 		flush_tlb_multi(mm_cpumask(mm), info);
+		consider_global_asid(mm);
 	} else if (mm == this_cpu_read(cpu_tlbstate.loaded_mm)) {
 		lockdep_assert_irqs_enabled();
 		local_irq_disable();
-- 
2.47.1
Re: [PATCH v6 09/12] x86/mm: enable broadcast TLB invalidation for multi-threaded processes
Posted by Peter Zijlstra 2 weeks ago
On Sun, Jan 19, 2025 at 09:40:17PM -0500, Rik van Riel wrote:
> +#ifdef CONFIG_X86_BROADCAST_TLB_FLUSH
> +/*
> + * Logic for broadcast TLB invalidation.
> + */
> +static DEFINE_RAW_SPINLOCK(global_asid_lock);
> +static u16 last_global_asid = MAX_ASID_AVAILABLE;
> +static DECLARE_BITMAP(global_asid_used, MAX_ASID_AVAILABLE) = { 0 };
> +static DECLARE_BITMAP(global_asid_freed, MAX_ASID_AVAILABLE) = { 0 };
> +static int global_asid_available = MAX_ASID_AVAILABLE - TLB_NR_DYN_ASIDS - 1;
> +
> +static void reset_global_asid_space(void)
> +{
> +	lockdep_assert_held(&global_asid_lock);
> +
> +	/*
> +	 * A global TLB flush guarantees that any stale entries from
> +	 * previously freed global ASIDs get flushed from the TLB
> +	 * everywhere, making these global ASIDs safe to reuse.
> +	 */
> +	invlpgb_flush_all_nonglobals();
> +
> +	/*
> +	 * Clear all the previously freed global ASIDs from the
> +	 * broadcast_asid_used bitmap, now that the global TLB flush
> +	 * has made them actually available for re-use.
> +	 */
> +	bitmap_andnot(global_asid_used, global_asid_used,
> +			global_asid_freed, MAX_ASID_AVAILABLE);
> +	bitmap_clear(global_asid_freed, 0, MAX_ASID_AVAILABLE);
> +
> +	/*
> +	 * ASIDs 0-TLB_NR_DYN_ASIDS are used for CPU-local ASID
> +	 * assignments, for tasks doing IPI based TLB shootdowns.
> +	 * Restart the search from the start of the global ASID space.
> +	 */
> +	last_global_asid = TLB_NR_DYN_ASIDS;
> +}
> +
> +static u16 get_global_asid(void)
> +{
> +	lockdep_assert_held(&global_asid_lock);
> +
> +	do {
> +		u16 start = last_global_asid;
> +		u16 asid = find_next_zero_bit(global_asid_used, MAX_ASID_AVAILABLE, start);
> +
> +		if (asid >= MAX_ASID_AVAILABLE) {
> +			reset_global_asid_space();
> +			continue;
> +		}
> +
> +		/* Claim this global ASID. */
> +		__set_bit(asid, global_asid_used);
> +		last_global_asid = asid;
> +		global_asid_available--;
> +		return asid;
> +	} while (1);
> +}

Looking at this more... I'm left wondering, did 'we' look at any other
architecture code at all? 

For example, look at arch/arm64/mm/context.c and see how their reset
works. Notably, they are not at all limited to reclaiming free'd ASIDs,
but will very aggressively take back all ASIDs except for the current
running ones.

And IIRC more architectures are like that (at some point in the distant
past I read through the tlb and mmu context crap from every architecture
we had at that point -- but those memories are vague).

If we want to move towards relying on broadcast TBLI, we'll need to
go in that direction. Also, as argued in the old thread yesterday, we
likely want more PCID bits -- in the interest of competition we can't be
having less than ARM64, surely :-)

Anyway, please drop the crazy threshold thing, and if you run into
falling back to IPIs because you don't have enough ASIDs to go around,
we should 'borrow' some of the ARM64 code -- RISC-V seems to have
borrowed very heavily from that as well.
Re: [PATCH v6 09/12] x86/mm: enable broadcast TLB invalidation for multi-threaded processes
Posted by Rik van Riel 1 week, 6 days ago
On Wed, 2025-01-22 at 09:38 +0100, Peter Zijlstra wrote:
> 
> Looking at this more... I'm left wondering, did 'we' look at any
> other
> architecture code at all? 
> 
> For example, look at arch/arm64/mm/context.c and see how their reset
> works. Notably, they are not at all limited to reclaiming free'd
> ASIDs,
> but will very aggressively take back all ASIDs except for the current
> running ones.
> 
I did look at the ARM64 code, and while their reset
is much nicer, it looks like that comes at a cost on
each process at context switch time.

In new_context(), there is a call to check_update_reserved_asid(),
which will iterate over all CPUs to check whether this
process's ASID is part of the reserved list that got
carried over during the rollover.

I don't know if that would scale well enough to work
on systems with thousands of CPUs.

> If we want to move towards relying on broadcast TBLI, we'll need to
> go in that direction.

For single threaded processes, which are still very
common, a local flush would likely be faster than
broadcast flushes, even if multiple broadcast flushes
can be pending simultaneously.

For very large systems with a large number of processes,
I agree we want to move in that direction, but we may
need to figure out whether or not everybody taking the 
cpu_asid_lock at rollover time, and then scanning all
other CPUs from check_update_reserved_asid(), with the
lock held, would scale to systems with thousands of CPUs.

Everybody taking the cpu_asid_lock would probably be
fine, if they didn't all have to scan over all the
CPUs.

If we can figure out a more scalable way to do the
new_context() stuff, this would definitely be the
way to go.

-- 
All Rights Reversed.
Re: [PATCH v6 09/12] x86/mm: enable broadcast TLB invalidation for multi-threaded processes
Posted by Peter Zijlstra 1 week, 6 days ago
On Wed, Jan 22, 2025 at 08:13:03PM -0500, Rik van Riel wrote:
> On Wed, 2025-01-22 at 09:38 +0100, Peter Zijlstra wrote:
> > 
> > Looking at this more... I'm left wondering, did 'we' look at any
> > other
> > architecture code at all? 
> > 
> > For example, look at arch/arm64/mm/context.c and see how their reset
> > works. Notably, they are not at all limited to reclaiming free'd
> > ASIDs,
> > but will very aggressively take back all ASIDs except for the current
> > running ones.
> > 
> I did look at the ARM64 code, and while their reset
> is much nicer, it looks like that comes at a cost on
> each process at context switch time.
> 
> In new_context(), there is a call to check_update_reserved_asid(),
> which will iterate over all CPUs to check whether this
> process's ASID is part of the reserved list that got
> carried over during the rollover.
> 
> I don't know if that would scale well enough to work
> on systems with thousands of CPUs.

So assuming something like 1k CPUs and !PTI, we only have like 4 PCIDs
per CPU on average, and rollover could be frequent.

While an ARM64 with 1k CPUs and !PTI would have an average of 64 ASIDs
per CPU, and rollover would be far less frequent.

That is to say, their larger ASID space (16 bits, vs our 12) definitely
helps. But at some point yeah, this will become a problem.

Notably, I think think a 2 socket Epyc Turin with 192C is one of the
larger off-the-shelf systems atm, that gets you 768 CPUs and that is
already uncomfortably tight with our PCID space.
Re: [PATCH v6 09/12] x86/mm: enable broadcast TLB invalidation for multi-threaded processes
Posted by Rik van Riel 1 week, 6 days ago
On Thu, 2025-01-23 at 10:07 +0100, Peter Zijlstra wrote:
> On Wed, Jan 22, 2025 at 08:13:03PM -0500, Rik van Riel wrote:
> > On Wed, 2025-01-22 at 09:38 +0100, Peter Zijlstra wrote:
> > > 
> > > Looking at this more... I'm left wondering, did 'we' look at any
> > > other
> > > architecture code at all? 
> > > 
> > > For example, look at arch/arm64/mm/context.c and see how their
> > > reset
> > > works. Notably, they are not at all limited to reclaiming free'd
> > > ASIDs,
> > > but will very aggressively take back all ASIDs except for the
> > > current
> > > running ones.
> > > 
> > I did look at the ARM64 code, and while their reset
> > is much nicer, it looks like that comes at a cost on
> > each process at context switch time.
> > 
> > In new_context(), there is a call to check_update_reserved_asid(),
> > which will iterate over all CPUs to check whether this
> > process's ASID is part of the reserved list that got
> > carried over during the rollover.
> > 
> > I don't know if that would scale well enough to work
> > on systems with thousands of CPUs.
> 
> So assuming something like 1k CPUs and !PTI, we only have like 4
> PCIDs
> per CPU on average, and rollover could be frequent.
> 
> While an ARM64 with 1k CPUs and !PTI would have an average of 64
> ASIDs
> per CPU, and rollover would be far less frequent.

Not necessarily. On ARM64, every short lived task will
get a global ASID, while on x86_64 only longer lived
processes that are simultaneously active on multiple
CPUs get a global ASID.

The situation could be fairly bad for both, which is
why I would like to solve the O(n^2) issues with the
rollover code before adding that in to our x86_64
side :)

I fully agree we should probably move in that direction,
but I would like to make the worst case in the rollover-reuse
cheaper.

> 
> That is to say, their larger ASID space (16 bits, vs our 12)
> definitely
> helps. But at some point yeah, this will become a problem.
> 
> Notably, I think think a 2 socket Epyc Turin with 192C is one of the
> larger off-the-shelf systems atm, that gets you 768 CPUs and that is
> already uncomfortably tight with our PCID space.
> 
> 
> 

-- 
All Rights Reversed.
Re: [PATCH v6 09/12] x86/mm: enable broadcast TLB invalidation for multi-threaded processes
Posted by Peter Zijlstra 2 weeks, 1 day ago
On Sun, Jan 19, 2025 at 09:40:17PM -0500, Rik van Riel wrote:
> +/*
> + * Figure out whether to assign a global ASID to a process.
> + * We vary the threshold by how empty or full global ASID space is.
> + * 1/4 full: >= 4 active threads
> + * 1/2 full: >= 8 active threads
> + * 3/4 full: >= 16 active threads
> + * 7/8 full: >= 32 active threads
> + * etc
> + *
> + * This way we should never exhaust the global ASID space, even on very
> + * large systems, and the processes with the largest number of active
> + * threads should be able to use broadcast TLB invalidation.
> + */
> +#define HALFFULL_THRESHOLD 8
> +static bool meets_global_asid_threshold(struct mm_struct *mm)
> +{
> +	int avail = global_asid_available;
> +	int threshold = HALFFULL_THRESHOLD;
> +
> +	if (!avail)
> +		return false;
> +
> +	if (avail > MAX_ASID_AVAILABLE * 3 / 4) {
> +		threshold = HALFFULL_THRESHOLD / 4;
> +	} else if (avail > MAX_ASID_AVAILABLE / 2) {
> +		threshold = HALFFULL_THRESHOLD / 2;
> +	} else if (avail < MAX_ASID_AVAILABLE / 3) {
> +		do {
> +			avail *= 2;
> +			threshold *= 2;
> +		} while ((avail + threshold) < MAX_ASID_AVAILABLE / 2);
> +	}
> +
> +	return mm_active_cpus_exceeds(mm, threshold);
> +}

I'm still very much disliking this. Why do we need this? Yes, running
out of ASID space is a pain, but this increasing threshold also makes
things behave weird.

Suppose our most used processes starts slow, and ends up not getting an
ASID because too much irrelevant crap gets started before it spawns
enough threads and then no longer qualifies.

Can't we just start with a very simple constant test and poke at things
if/when its found to not work?
Re: [PATCH v6 09/12] x86/mm: enable broadcast TLB invalidation for multi-threaded processes
Posted by Dave Hansen 2 weeks ago
On 1/21/25 01:55, Peter Zijlstra wrote:
> Can't we just start with a very simple constant test and poke at things
> if/when its found to not work?

I'd prefer something simpler for now, too.

Let's just pick a sane number, maybe 16 or 32 for now, make it pokeable
in debugfs and make sure we have a way to tell when the PCID space is
exhausted.

Then we try and design a solution for the _actual_ cases where folks are
exhausting it.
Re: [PATCH v6 09/12] x86/mm: enable broadcast TLB invalidation for multi-threaded processes
Posted by Peter Zijlstra 2 weeks, 1 day ago
On Tue, Jan 21, 2025 at 10:55:07AM +0100, Peter Zijlstra wrote:
> On Sun, Jan 19, 2025 at 09:40:17PM -0500, Rik van Riel wrote:
> > +/*
> > + * Figure out whether to assign a global ASID to a process.
> > + * We vary the threshold by how empty or full global ASID space is.
> > + * 1/4 full: >= 4 active threads
> > + * 1/2 full: >= 8 active threads
> > + * 3/4 full: >= 16 active threads
> > + * 7/8 full: >= 32 active threads
> > + * etc
> > + *
> > + * This way we should never exhaust the global ASID space, even on very
> > + * large systems, and the processes with the largest number of active
> > + * threads should be able to use broadcast TLB invalidation.
> > + */
> > +#define HALFFULL_THRESHOLD 8
> > +static bool meets_global_asid_threshold(struct mm_struct *mm)
> > +{
> > +	int avail = global_asid_available;
> > +	int threshold = HALFFULL_THRESHOLD;
> > +
> > +	if (!avail)
> > +		return false;
> > +
> > +	if (avail > MAX_ASID_AVAILABLE * 3 / 4) {
> > +		threshold = HALFFULL_THRESHOLD / 4;
> > +	} else if (avail > MAX_ASID_AVAILABLE / 2) {
> > +		threshold = HALFFULL_THRESHOLD / 2;
> > +	} else if (avail < MAX_ASID_AVAILABLE / 3) {
> > +		do {
> > +			avail *= 2;
> > +			threshold *= 2;
> > +		} while ((avail + threshold) < MAX_ASID_AVAILABLE / 2);
> > +	}
> > +
> > +	return mm_active_cpus_exceeds(mm, threshold);
> > +}
> 
> I'm still very much disliking this. Why do we need this? Yes, running
> out of ASID space is a pain, but this increasing threshold also makes
> things behave weird.
> 
> Suppose our most used processes starts slow, and ends up not getting an
> ASID because too much irrelevant crap gets started before it spawns
> enough threads and then no longer qualifies.
> 
> Can't we just start with a very simple constant test and poke at things
> if/when its found to not work?

Something like so perhaps?

--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -268,7 +268,7 @@ static inline u16 mm_global_asid(struct
 	if (!cpu_feature_enabled(X86_FEATURE_INVLPGB))
 		return 0;
 
-	asid = READ_ONCE(mm->context.global_asid);
+	asid = smp_load_acquire(&mm->context.global_asid);
 
 	/* mm->context.global_asid is either 0, or a global ASID */
 	VM_WARN_ON_ONCE(is_dyn_asid(asid));
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -308,13 +308,18 @@ static void reset_global_asid_space(void
 static u16 get_global_asid(void)
 {
 	lockdep_assert_held(&global_asid_lock);
+	bool done_reset = false;
 
 	do {
 		u16 start = last_global_asid;
 		u16 asid = find_next_zero_bit(global_asid_used, MAX_ASID_AVAILABLE, start);
 
-		if (asid >= MAX_ASID_AVAILABLE) {
+		if (asid > MAX_ASID_AVAILABLE) {
+			if (done_reset)
+				return asid;
+
 			reset_global_asid_space();
+			done_reset = true;
 			continue;
 		}
 
@@ -392,6 +398,12 @@ static bool mm_active_cpus_exceeds(struc
  */
 static void use_global_asid(struct mm_struct *mm)
 {
+	u16 asid;
+
+	/* This process is already using broadcast TLB invalidation. */
+	if (mm->context.global_asid)
+		return;
+
 	guard(raw_spinlock_irqsave)(&global_asid_lock);
 
 	/* This process is already using broadcast TLB invalidation. */
@@ -402,58 +414,25 @@ static void use_global_asid(struct mm_st
 	if (!global_asid_available)
 		return;
 
+	asid = get_global_asid();
+	if (asid > MAX_ASID_AVAILABLE)
+		return;
+
 	/*
-	 * The transition from IPI TLB flushing, with a dynamic ASID,
-	 * and broadcast TLB flushing, using a global ASID, uses memory
-	 * ordering for synchronization.
-	 *
-	 * While the process has threads still using a dynamic ASID,
-	 * TLB invalidation IPIs continue to get sent.
-	 *
-	 * This code sets asid_transition first, before assigning the
-	 * global ASID.
-	 *
-	 * The TLB flush code will only verify the ASID transition
-	 * after it has seen the new global ASID for the process.
+	 * Notably flush_tlb_mm_range() -> broadcast_tlb_flush() ->
+	 * finish_asid_transition() needs to observe asid_transition == true
+	 * once it observes global_asid.
 	 */
-	WRITE_ONCE(mm->context.asid_transition, true);
-	WRITE_ONCE(mm->context.global_asid, get_global_asid());
+	mm->context.asid_transition = true;
+	smp_store_release(&mm->context.global_asid, asid);
 }
 
-/*
- * Figure out whether to assign a global ASID to a process.
- * We vary the threshold by how empty or full global ASID space is.
- * 1/4 full: >= 4 active threads
- * 1/2 full: >= 8 active threads
- * 3/4 full: >= 16 active threads
- * 7/8 full: >= 32 active threads
- * etc
- *
- * This way we should never exhaust the global ASID space, even on very
- * large systems, and the processes with the largest number of active
- * threads should be able to use broadcast TLB invalidation.
- */
-#define HALFFULL_THRESHOLD 8
 static bool meets_global_asid_threshold(struct mm_struct *mm)
 {
-	int avail = global_asid_available;
-	int threshold = HALFFULL_THRESHOLD;
-
-	if (!avail)
+	if (!global_asid_available)
 		return false;
 
-	if (avail > MAX_ASID_AVAILABLE * 3 / 4) {
-		threshold = HALFFULL_THRESHOLD / 4;
-	} else if (avail > MAX_ASID_AVAILABLE / 2) {
-		threshold = HALFFULL_THRESHOLD / 2;
-	} else if (avail < MAX_ASID_AVAILABLE / 3) {
-		do {
-			avail *= 2;
-			threshold *= 2;
-		} while ((avail + threshold) < MAX_ASID_AVAILABLE / 2);
-	}
-
-	return mm_active_cpus_exceeds(mm, threshold);
+	return mm_active_cpus_exceeds(mm, 4);
 }
 
 static void consider_global_asid(struct mm_struct *mm)
Re: [PATCH v6 09/12] x86/mm: enable broadcast TLB invalidation for multi-threaded processes
Posted by Rik van Riel 1 week, 6 days ago
On Tue, 2025-01-21 at 11:33 +0100, Peter Zijlstra wrote:
> On Tue, Jan 21, 2025 at 10:55:07AM +0100, Peter Zijlstra wrote:
> > 
> > Can't we just start with a very simple constant test and poke at
> > things
> > if/when its found to not work?
> 
> Something like so perhaps?

I've applied your suggestions, with the exception of some
code that was already simplified further based on other
people's suggestions (get_global_asid is no longer a loop).

-- 
All Rights Reversed.
Re: [PATCH v6 09/12] x86/mm: enable broadcast TLB invalidation for multi-threaded processes
Posted by Nadav Amit 2 weeks, 1 day ago

On 20/01/2025 4:40, Rik van Riel wrote:
> Use broadcast TLB invalidation, using the INVPLGB instruction, on AMD EPYC 3
> and newer CPUs.
> 
> In order to not exhaust PCID space, and keep TLB flushes local for single
> threaded processes, we only hand out broadcast ASIDs to processes active on
> 3 or more CPUs, and gradually increase the threshold as broadcast ASID space
> is depleted.
> 
> Signed-off-by: Rik van Riel <riel@surriel.com>
> ---
>   arch/x86/include/asm/mmu.h         |   6 +
>   arch/x86/include/asm/mmu_context.h |  14 ++
>   arch/x86/include/asm/tlbflush.h    |  72 ++++++
>   arch/x86/mm/tlb.c                  | 362 ++++++++++++++++++++++++++++-
>   4 files changed, 442 insertions(+), 12 deletions(-)
> 
> diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
> index 3b496cdcb74b..d71cd599fec4 100644
> --- a/arch/x86/include/asm/mmu.h
> +++ b/arch/x86/include/asm/mmu.h
> @@ -69,6 +69,12 @@ typedef struct {
>   	u16 pkey_allocation_map;
>   	s16 execute_only_pkey;
>   #endif
> +
> +#ifdef CONFIG_X86_BROADCAST_TLB_FLUSH
> +	u16 global_asid;
> +	bool asid_transition;
> +#endif
> +
>   } mm_context_t;
>   
>   #define INIT_MM_CONTEXT(mm)						\
> diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
> index 795fdd53bd0a..d670699d32c2 100644
> --- a/arch/x86/include/asm/mmu_context.h
> +++ b/arch/x86/include/asm/mmu_context.h
> @@ -139,6 +139,8 @@ static inline void mm_reset_untag_mask(struct mm_struct *mm)
>   #define enter_lazy_tlb enter_lazy_tlb
>   extern void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk);
>   
> +extern void destroy_context_free_global_asid(struct mm_struct *mm);
> +
>   /*
>    * Init a new mm.  Used on mm copies, like at fork()
>    * and on mm's that are brand-new, like at execve().
> @@ -161,6 +163,14 @@ static inline int init_new_context(struct task_struct *tsk,
>   		mm->context.execute_only_pkey = -1;
>   	}
>   #endif
> +
> +#ifdef CONFIG_X86_BROADCAST_TLB_FLUSH
> +	if (cpu_feature_enabled(X86_FEATURE_INVLPGB)) {
> +		mm->context.global_asid = 0;
> +		mm->context.asid_transition = false;
> +	}
> +#endif
> +
>   	mm_reset_untag_mask(mm);
>   	init_new_context_ldt(mm);
>   	return 0;
> @@ -170,6 +180,10 @@ static inline int init_new_context(struct task_struct *tsk,
>   static inline void destroy_context(struct mm_struct *mm)
>   {
>   	destroy_context_ldt(mm);
> +#ifdef CONFIG_X86_BROADCAST_TLB_FLUSH
> +	if (cpu_feature_enabled(X86_FEATURE_INVLPGB))
> +		destroy_context_free_global_asid(mm);
> +#endif
>   }
>   
>   extern void switch_mm(struct mm_struct *prev, struct mm_struct *next,
> diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
> index dba5caa4a9f4..5eae5c1aafa5 100644
> --- a/arch/x86/include/asm/tlbflush.h
> +++ b/arch/x86/include/asm/tlbflush.h
> @@ -239,6 +239,78 @@ void flush_tlb_one_kernel(unsigned long addr);
>   void flush_tlb_multi(const struct cpumask *cpumask,
>   		      const struct flush_tlb_info *info);
>   
> +#ifdef CONFIG_X86_BROADCAST_TLB_FLUSH
> +static inline bool is_dyn_asid(u16 asid)
> +{
> +	if (!cpu_feature_enabled(X86_FEATURE_INVLPGB))
> +		return true;
> +
> +	return asid < TLB_NR_DYN_ASIDS;
> +}
> +
> +static inline bool is_global_asid(u16 asid)
> +{
> +	return !is_dyn_asid(asid);
> +}
> +
> +static inline bool in_asid_transition(const struct flush_tlb_info *info)
> +{
> +	if (!cpu_feature_enabled(X86_FEATURE_INVLPGB))
> +		return false;
> +
> +	return info->mm && READ_ONCE(info->mm->context.asid_transition);
> +}
> +
> +static inline u16 mm_global_asid(struct mm_struct *mm)
> +{
> +	u16 asid;
> +
> +	if (!cpu_feature_enabled(X86_FEATURE_INVLPGB))
> +		return 0;
> +
> +	asid = READ_ONCE(mm->context.global_asid);
> +
> +	/* mm->context.global_asid is either 0, or a global ASID */
> +	VM_WARN_ON_ONCE(is_dyn_asid(asid));
> +
> +	return asid;
> +}
> +#else
> +static inline bool is_dyn_asid(u16 asid)
> +{
> +	return true;
> +}
> +
> +static inline bool is_global_asid(u16 asid)
> +{
> +	return false;
> +}
> +
> +static inline bool in_asid_transition(const struct flush_tlb_info *info)
> +{
> +	return false;
> +}
> +
> +static inline u16 mm_global_asid(struct mm_struct *mm)
> +{
> +	return 0;
> +}
> +
> +static inline bool needs_global_asid_reload(struct mm_struct *next, u16 prev_asid)
> +{
> +	return false;
> +}
> +
> +static inline void broadcast_tlb_flush(struct flush_tlb_info *info)
> +{
> +	VM_WARN_ON_ONCE(1);

Not sure why not the use VM_WARN_ONCE() instead with some more 
informative message (anyhow, a string is allocated for it).

> +}
> +
> +static inline void consider_global_asid(struct mm_struct *mm)
> +{
> +}
> +#endif
> +
>   #ifdef CONFIG_PARAVIRT
>   #include <asm/paravirt.h>
>   #endif
> diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
> index 9d4864db5720..08eee1f8573a 100644
> --- a/arch/x86/mm/tlb.c
> +++ b/arch/x86/mm/tlb.c
> @@ -74,13 +74,15 @@
>    * use different names for each of them:
>    *
>    * ASID  - [0, TLB_NR_DYN_ASIDS-1]
> - *         the canonical identifier for an mm
> + *         the canonical identifier for an mm, dynamically allocated on each CPU
> + *         [TLB_NR_DYN_ASIDS, MAX_ASID_AVAILABLE-1]
> + *         the canonical, global identifier for an mm, identical across all CPUs
>    *
> - * kPCID - [1, TLB_NR_DYN_ASIDS]
> + * kPCID - [1, MAX_ASID_AVAILABLE]
>    *         the value we write into the PCID part of CR3; corresponds to the
>    *         ASID+1, because PCID 0 is special.
>    *
> - * uPCID - [2048 + 1, 2048 + TLB_NR_DYN_ASIDS]
> + * uPCID - [2048 + 1, 2048 + MAX_ASID_AVAILABLE]
>    *         for KPTI each mm has two address spaces and thus needs two
>    *         PCID values, but we can still do with a single ASID denomination
>    *         for each mm. Corresponds to kPCID + 2048.
> @@ -225,6 +227,20 @@ static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen,
>   		return;
>   	}
>   
> +	/*
> +	 * TLB consistency for global ASIDs is maintained with broadcast TLB
> +	 * flushing. The TLB is never outdated, and does not need flushing.
> +	 */
> +	if (IS_ENABLED(CONFIG_X86_BROADCAST_TLB_FLUSH) && static_cpu_has(X86_FEATURE_INVLPGB)) {
> +		u16 global_asid = mm_global_asid(next);
> +
> +		if (global_asid) {
> +			*new_asid = global_asid;
> +			*need_flush = false;
> +			return;
> +		}
> +	}
> +
>   	if (this_cpu_read(cpu_tlbstate.invalidate_other))
>   		clear_asid_other();
>   
> @@ -251,6 +267,290 @@ static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen,
>   	*need_flush = true;
>   }
>   
> +#ifdef CONFIG_X86_BROADCAST_TLB_FLUSH
> +/*
> + * Logic for broadcast TLB invalidation.
> + */
> +static DEFINE_RAW_SPINLOCK(global_asid_lock);
> +static u16 last_global_asid = MAX_ASID_AVAILABLE;
> +static DECLARE_BITMAP(global_asid_used, MAX_ASID_AVAILABLE) = { 0 };
> +static DECLARE_BITMAP(global_asid_freed, MAX_ASID_AVAILABLE) = { 0 };
> +static int global_asid_available = MAX_ASID_AVAILABLE - TLB_NR_DYN_ASIDS - 1;
> +
> +static void reset_global_asid_space(void)
> +{
> +	lockdep_assert_held(&global_asid_lock);
> +
> +	/*
> +	 * A global TLB flush guarantees that any stale entries from
> +	 * previously freed global ASIDs get flushed from the TLB
> +	 * everywhere, making these global ASIDs safe to reuse.
> +	 */
> +	invlpgb_flush_all_nonglobals();
> +
> +	/*
> +	 * Clear all the previously freed global ASIDs from the
> +	 * broadcast_asid_used bitmap, now that the global TLB flush
> +	 * has made them actually available for re-use.
> +	 */
> +	bitmap_andnot(global_asid_used, global_asid_used,
> +			global_asid_freed, MAX_ASID_AVAILABLE);
> +	bitmap_clear(global_asid_freed, 0, MAX_ASID_AVAILABLE);
> +
> +	/*
> +	 * ASIDs 0-TLB_NR_DYN_ASIDS are used for CPU-local ASID
> +	 * assignments, for tasks doing IPI based TLB shootdowns.
> +	 * Restart the search from the start of the global ASID space.
> +	 */
> +	last_global_asid = TLB_NR_DYN_ASIDS;
> +}
> +
> +static u16 get_global_asid(void)
> +{
> +	lockdep_assert_held(&global_asid_lock);
> +
> +	do {
> +		u16 start = last_global_asid;
> +		u16 asid = find_next_zero_bit(global_asid_used, MAX_ASID_AVAILABLE, start);
> +
> +		if (asid >= MAX_ASID_AVAILABLE) {
> +			reset_global_asid_space();
> +			continue;
> +		}

I think that unless something is awfully wrong, you are supposed to at 
most call reset_global_asid_space() once. So if that's the case, why not 
do it this way?

Instead, you can get rid of the loop and just do:

	asid = find_next_zero_bit(global_asid_used, MAX_ASID_AVAILABLE, start);

If you want, you can warn if asid >= MAX_ASID_AVAILABLE and have some 
fallback. But the loop, is just confusing in my opinion for no reason.

> +
> +		/* Claim this global ASID. */
> +		__set_bit(asid, global_asid_used);
> +		last_global_asid = asid;
> +		global_asid_available--;
> +		return asid;
> +	} while (1);
> +}
> +
> +/*
> + * Returns true if the mm is transitioning from a CPU-local ASID to a global
> + * (INVLPGB) ASID, or the other way around.
> + */
> +static bool needs_global_asid_reload(struct mm_struct *next, u16 prev_asid)
> +{
> +	u16 global_asid = mm_global_asid(next);
> +
> +	if (global_asid && prev_asid != global_asid)
> +		return true;
> +
> +	if (!global_asid && is_global_asid(prev_asid))
> +		return true;
> +
> +	return false;
> +}
> +
> +void destroy_context_free_global_asid(struct mm_struct *mm)
> +{
> +	if (!mm->context.global_asid)
> +		return;
> +
> +	guard(raw_spinlock_irqsave)(&global_asid_lock);
> +
> +	/* The global ASID can be re-used only after flush at wrap-around. */
> +	__set_bit(mm->context.global_asid, global_asid_freed);
> +
> +	mm->context.global_asid = 0;
> +	global_asid_available++;
> +}
> +
> +/*
> + * Check whether a process is currently active on more than "threshold" CPUs.
> + * This is a cheap estimation on whether or not it may make sense to assign
> + * a global ASID to this process, and use broadcast TLB invalidation.
> + */
> +static bool mm_active_cpus_exceeds(struct mm_struct *mm, int threshold)
> +{
> +	int count = 0;
> +	int cpu;
> +
> +	/* This quick check should eliminate most single threaded programs. */
> +	if (cpumask_weight(mm_cpumask(mm)) <= threshold)
> +		return false;
> +
> +	/* Slower check to make sure. */
> +	for_each_cpu(cpu, mm_cpumask(mm)) {
> +		/* Skip the CPUs that aren't really running this process. */
> +		if (per_cpu(cpu_tlbstate.loaded_mm, cpu) != mm)
> +			continue;

Then perhaps at least add a comment next to loaded_mm, that it's not 
private per-se, but rarely accessed by other cores?

> +
> +		if (per_cpu(cpu_tlbstate_shared.is_lazy, cpu))
> +			continue;
> +
> +		if (++count > threshold)
> +			return true;
> +	}
> +	return false;
> +}
> +
> +/*
> + * Assign a global ASID to the current process, protecting against
> + * races between multiple threads in the process.
> + */
> +static void use_global_asid(struct mm_struct *mm)
> +{
> +	guard(raw_spinlock_irqsave)(&global_asid_lock);
> +
> +	/* This process is already using broadcast TLB invalidation. */
> +	if (mm->context.global_asid)
> +		return;
> +
> +	/* The last global ASID was consumed while waiting for the lock. */
> +	if (!global_asid_available)
> +		return;
> +
> +	/*
> +	 * The transition from IPI TLB flushing, with a dynamic ASID,
> +	 * and broadcast TLB flushing, using a global ASID, uses memory
> +	 * ordering for synchronization.
> +	 *
> +	 * While the process has threads still using a dynamic ASID,
> +	 * TLB invalidation IPIs continue to get sent.
> +	 *
> +	 * This code sets asid_transition first, before assigning the
> +	 * global ASID.
> +	 *
> +	 * The TLB flush code will only verify the ASID transition
> +	 * after it has seen the new global ASID for the process.
> +	 */
> +	WRITE_ONCE(mm->context.asid_transition, true);
> +	WRITE_ONCE(mm->context.global_asid, get_global_asid());

I know it is likely correct in practice (due to TSO memory model), but 
it is not clear, at least for me, how those write order affects the rest 
of the code. I managed to figure out how it relates to the reads in 
flush_tlb_mm_range() and native_flush_tlb_multi(), but I wouldn't say it 
is trivial and doesn't worth a comment (or smp_wmb/smp_rmb).

> +}
> +
> +/*
> + * Figure out whether to assign a global ASID to a process.
> + * We vary the threshold by how empty or full global ASID space is.
> + * 1/4 full: >= 4 active threads
> + * 1/2 full: >= 8 active threads
> + * 3/4 full: >= 16 active threads
> + * 7/8 full: >= 32 active threads
> + * etc
> + *
> + * This way we should never exhaust the global ASID space, even on very
> + * large systems, and the processes with the largest number of active
> + * threads should be able to use broadcast TLB invalidation.
> + */
> +#define HALFFULL_THRESHOLD 8
> +static bool meets_global_asid_threshold(struct mm_struct *mm)
> +{
> +	int avail = global_asid_available;
> +	int threshold = HALFFULL_THRESHOLD;
> +
> +	if (!avail)
> +		return false;
> +
> +	if (avail > MAX_ASID_AVAILABLE * 3 / 4) {
> +		threshold = HALFFULL_THRESHOLD / 4;
> +	} else if (avail > MAX_ASID_AVAILABLE / 2) {
> +		threshold = HALFFULL_THRESHOLD / 2;
> +	} else if (avail < MAX_ASID_AVAILABLE / 3) {
> +		do {
> +			avail *= 2;
> +			threshold *= 2;
> +		} while ((avail + threshold) < MAX_ASID_AVAILABLE / 2);
> +	}
> +
> +	return mm_active_cpus_exceeds(mm, threshold);
> +}
> +
> +static void consider_global_asid(struct mm_struct *mm)
> +{
> +	if (!static_cpu_has(X86_FEATURE_INVLPGB))
> +		return;
> +
> +	/* Check every once in a while. */
> +	if ((current->pid & 0x1f) != (jiffies & 0x1f))
> +		return;
> +
> +	if (meets_global_asid_threshold(mm))
> +		use_global_asid(mm);
> +}
> +
> +static void finish_asid_transition(struct flush_tlb_info *info)
> +{
> +	struct mm_struct *mm = info->mm;
> +	int bc_asid = mm_global_asid(mm);
> +	int cpu;
> +
> +	if (!READ_ONCE(mm->context.asid_transition))
> +		return;
> +
> +	for_each_cpu(cpu, mm_cpumask(mm)) {
> +		/*
> +		 * The remote CPU is context switching. Wait for that to
> +		 * finish, to catch the unlikely case of it switching to
> +		 * the target mm with an out of date ASID.
> +		 */
> +		while (READ_ONCE(per_cpu(cpu_tlbstate.loaded_mm, cpu)) == LOADED_MM_SWITCHING)
> +			cpu_relax();
> +
> +		if (READ_ONCE(per_cpu(cpu_tlbstate.loaded_mm, cpu)) != mm)
> +			continue;
> +
> +		/*
> +		 * If at least one CPU is not using the global ASID yet,
> +		 * send a TLB flush IPI. The IPI should cause stragglers
> +		 * to transition soon.
> +		 *
> +		 * This can race with the CPU switching to another task;
> +		 * that results in a (harmless) extra IPI.
> +		 */
> +		if (READ_ONCE(per_cpu(cpu_tlbstate.loaded_mm_asid, cpu)) != bc_asid) {
> +			flush_tlb_multi(mm_cpumask(info->mm), info);
> +			return;

I am trying to figure out why we return here. The transition might not 
be over? Why is it "soon"? Wouldn't flush_tlb_func() reload it 
unconditionally?

> +		}
> +	}
> +
> +	/* All the CPUs running this process are using the global ASID. */
> +	WRITE_ONCE(mm->context.asid_transition, false);
> +}
> +
> +static void broadcast_tlb_flush(struct flush_tlb_info *info)
> +{
> +	bool pmd = info->stride_shift == PMD_SHIFT;
> +	unsigned long maxnr = invlpgb_count_max;
> +	unsigned long asid = info->mm->context.global_asid;
> +	unsigned long addr = info->start;
> +	unsigned long nr;
> +
> +	/* Flushing multiple pages at once is not supported with 1GB pages. */
> +	if (info->stride_shift > PMD_SHIFT)
> +		maxnr = 1;
> +
> +	/*
> +	 * TLB flushes with INVLPGB are kicked off asynchronously.
> +	 * The inc_mm_tlb_gen() guarantees page table updates are done
> +	 * before these TLB flushes happen.
> +	 */
> +	if (info->end == TLB_FLUSH_ALL) {
> +		invlpgb_flush_single_pcid_nosync(kern_pcid(asid));
> +		/* Do any CPUs supporting INVLPGB need PTI? */
> +		if (static_cpu_has(X86_FEATURE_PTI))
> +			invlpgb_flush_single_pcid_nosync(user_pcid(asid));
> +	} else for (; addr < info->end; addr += nr << info->stride_shift) {

I guess I was wrong, and do-while was cleaner here.

And I guess this is now a bug, if info->stride_shift > PMD_SHIFT...

[ I guess the cleanest way was to change get_flush_tlb_info to mask the 
low bits of start and end based on ((1ull << stride_shift) - 1). But 
whatever... ]

> +		/*
> +		 * Calculate how many pages can be flushed at once; if the
> +		 * remainder of the range is less than one page, flush one.
> +		 */
> +		nr = min(maxnr, (info->end - addr) >> info->stride_shift);
> +		nr = max(nr, 1);
> +
> +		invlpgb_flush_user_nr_nosync(kern_pcid(asid), addr, nr, pmd);
> +		/* Do any CPUs supporting INVLPGB need PTI? */
> +		if (static_cpu_has(X86_FEATURE_PTI))
> +			invlpgb_flush_user_nr_nosync(user_pcid(asid), addr, nr, pmd);
> +	}
> +
> +	finish_asid_transition(info);
> +
> +	/* Wait for the INVLPGBs kicked off above to finish. */
> +	tlbsync();
> +}
> +#endif /* CONFIG_X86_BROADCAST_TLB_FLUSH */
> +
>   /*
>    * Given an ASID, flush the corresponding user ASID.  We can delay this
>    * until the next time we switch to it.
> @@ -556,8 +856,9 @@ void switch_mm_irqs_off(struct mm_struct *unused, struct mm_struct *next,
>   	 */
>   	if (prev == next) {
>   		/* Not actually switching mm's */
> -		VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[prev_asid].ctx_id) !=
> -			   next->context.ctx_id);
> +		VM_WARN_ON(is_dyn_asid(prev_asid) &&
> +				this_cpu_read(cpu_tlbstate.ctxs[prev_asid].ctx_id) !=
> +				next->context.ctx_id);
>   
>   		/*
>   		 * If this races with another thread that enables lam, 'new_lam'
> @@ -573,6 +874,23 @@ void switch_mm_irqs_off(struct mm_struct *unused, struct mm_struct *next,
>   				 !cpumask_test_cpu(cpu, mm_cpumask(next))))
>   			cpumask_set_cpu(cpu, mm_cpumask(next));
>   
> +		/*
> +		 * Check if the current mm is transitioning to a new ASID.
> +		 */
> +		if (needs_global_asid_reload(next, prev_asid)) {
> +			next_tlb_gen = atomic64_read(&next->context.tlb_gen);
> +
> +			choose_new_asid(next, next_tlb_gen, &new_asid, &need_flush);
> +			goto reload_tlb;

Not a fan of the goto's when they are not really needed, and I don't 
think it is really needed here. Especially that the name of the tag 
"reload_tlb" does not really convey that the page-tables are reloaded at 
that point.

> +		}
> +
> +		/*
> +		 * Broadcast TLB invalidation keeps this PCID up to date
> +		 * all the time.
> +		 */
> +		if (is_global_asid(prev_asid))
> +			return;

Hard for me to convince myself

> +
>   		/*
>   		 * If the CPU is not in lazy TLB mode, we are just switching
>   		 * from one thread in a process to another thread in the same
> @@ -606,6 +924,13 @@ void switch_mm_irqs_off(struct mm_struct *unused, struct mm_struct *next,
>   		 */
>   		cond_mitigation(tsk);
>   
> +		/*
> +		 * Let nmi_uaccess_okay() and finish_asid_transition()
> +		 * know that we're changing CR3.
> +		 */
> +		this_cpu_write(cpu_tlbstate.loaded_mm, LOADED_MM_SWITCHING);
> +		barrier();
> +
>   		/*
>   		 * Leave this CPU in prev's mm_cpumask. Atomic writes to
>   		 * mm_cpumask can be expensive under contention. The CPU
> @@ -620,14 +945,12 @@ void switch_mm_irqs_off(struct mm_struct *unused, struct mm_struct *next,
>   		next_tlb_gen = atomic64_read(&next->context.tlb_gen);
>   
>   		choose_new_asid(next, next_tlb_gen, &new_asid, &need_flush);
> -
> -		/* Let nmi_uaccess_okay() know that we're changing CR3. */
> -		this_cpu_write(cpu_tlbstate.loaded_mm, LOADED_MM_SWITCHING);
> -		barrier();
>   	}
>   
> +reload_tlb:
>   	new_lam = mm_lam_cr3_mask(next);
>   	if (need_flush) {
> +		VM_WARN_ON_ONCE(is_global_asid(new_asid));
>   		this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id);
>   		this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen);
>   		load_new_mm_cr3(next->pgd, new_asid, new_lam, true);
> @@ -746,7 +1069,7 @@ static void flush_tlb_func(void *info)
>   	const struct flush_tlb_info *f = info;
>   	struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm);
>   	u32 loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
> -	u64 local_tlb_gen = this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen);
> +	u64 local_tlb_gen;
>   	bool local = smp_processor_id() == f->initiating_cpu;
>   	unsigned long nr_invalidate = 0;
>   	u64 mm_tlb_gen;
> @@ -769,6 +1092,16 @@ static void flush_tlb_func(void *info)
>   	if (unlikely(loaded_mm == &init_mm))
>   		return;
>   
> +	/* Reload the ASID if transitioning into or out of a global ASID */
> +	if (needs_global_asid_reload(loaded_mm, loaded_mm_asid)) {
> +		switch_mm_irqs_off(NULL, loaded_mm, NULL);

I understand you want to reuse that logic, but it doesn't seem 
reasonable to me. It both doesn't convey what you want to do, and can 
lead to undesired operations - cpu_tlbstate_update_lam() for instance. 
Probably the impact on performance is minor, but it is an opening for 
future mistakes.

> +		loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
> +	}
> +
> +	/* Broadcast ASIDs are always kept up to date with INVLPGB. */
> +	if (is_global_asid(loaded_mm_asid))
> +		return;

The comment does not clarify to me, and I don't manage to clearly 
explain to myself, why it is guaranteed that all the IPI TLB flushes, 
which were potentially issued before the transition, are not needed.

> +
>   	VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].ctx_id) !=
>   		   loaded_mm->context.ctx_id);
>   
> @@ -786,6 +1119,8 @@ static void flush_tlb_func(void *info)
>   		return;
>   	}
>   
> +	local_tlb_gen = this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen);
> +
>   	if (unlikely(f->new_tlb_gen != TLB_GENERATION_INVALID &&
>   		     f->new_tlb_gen <= local_tlb_gen)) {
>   		/*
> @@ -953,7 +1288,7 @@ STATIC_NOPV void native_flush_tlb_multi(const struct cpumask *cpumask,
>   	 * up on the new contents of what used to be page tables, while
>   	 * doing a speculative memory access.
>   	 */
> -	if (info->freed_tables)
> +	if (info->freed_tables || in_asid_transition(info))
>   		on_each_cpu_mask(cpumask, flush_tlb_func, (void *)info, true);
>   	else
>   		on_each_cpu_cond_mask(should_flush_tlb, flush_tlb_func,
> @@ -1049,9 +1384,12 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
>   	 * a local TLB flush is needed. Optimize this use-case by calling
>   	 * flush_tlb_func_local() directly in this case.
>   	 */
> -	if (cpumask_any_but(mm_cpumask(mm), cpu) < nr_cpu_ids) {
> +	if (mm_global_asid(mm)) {
> +		broadcast_tlb_flush(info);
> +	} else if (cpumask_any_but(mm_cpumask(mm), cpu) < nr_cpu_ids) {
>   		info->trim_cpumask = should_trim_cpumask(mm);
>   		flush_tlb_multi(mm_cpumask(mm), info);
> +		consider_global_asid(mm);
>   	} else if (mm == this_cpu_read(cpu_tlbstate.loaded_mm)) {
>   		lockdep_assert_irqs_enabled();
>   		local_irq_disable();
Re: [PATCH v6 09/12] x86/mm: enable broadcast TLB invalidation for multi-threaded processes
Posted by Rik van Riel 2 weeks, 1 day ago
On Mon, 2025-01-20 at 16:02 +0200, Nadav Amit wrote:
> 
> 
> On 20/01/2025 4:40, Rik van Riel wrote:
> > 
> > +static inline void broadcast_tlb_flush(struct flush_tlb_info
> > *info)
> > +{
> > +	VM_WARN_ON_ONCE(1);
> 
> Not sure why not the use VM_WARN_ONCE() instead with some more 
> informative message (anyhow, a string is allocated for it).
> 
VM_WARN_ON_ONCE only has a condition, not a message.

> > 
> > +static u16 get_global_asid(void)
> > +{
> > +	lockdep_assert_held(&global_asid_lock);
> > +
> > +	do {
> > +		u16 start = last_global_asid;
> > +		u16 asid = find_next_zero_bit(global_asid_used,
> > MAX_ASID_AVAILABLE, start);
> > +
> > +		if (asid >= MAX_ASID_AVAILABLE) {
> > +			reset_global_asid_space();
> > +			continue;
> > +		}
> 
> I think that unless something is awfully wrong, you are supposed to
> at 
> most call reset_global_asid_space() once. So if that's the case, why
> not 
> do it this way?
> 
> Instead, you can get rid of the loop and just do:
> 
> 	asid = find_next_zero_bit(global_asid_used,
> MAX_ASID_AVAILABLE, start);
> 
> If you want, you can warn if asid >= MAX_ASID_AVAILABLE and have some
> fallback. But the loop, is just confusing in my opinion for no
> reason.

I can get rid of the loop. You're right that the code
can just call find_next_zero_bit after calling
reset_global_asid_space.

> 
> > +	/* Slower check to make sure. */
> > +	for_each_cpu(cpu, mm_cpumask(mm)) {
> > +		/* Skip the CPUs that aren't really running this
> > process. */
> > +		if (per_cpu(cpu_tlbstate.loaded_mm, cpu) != mm)
> > +			continue;
> 
> Then perhaps at least add a comment next to loaded_mm, that it's not 
> private per-se, but rarely accessed by other cores?
> 
I don't see any comment in struct tlb_state that
suggests it was ever private to begin with.

Which comment are you referring to that should
be edited?

> > 
> > +
> > +	/*
> > +	 * The transition from IPI TLB flushing, with a dynamic
> > ASID,
> > +	 * and broadcast TLB flushing, using a global ASID, uses
> > memory
> > +	 * ordering for synchronization.
> > +	 *
> > +	 * While the process has threads still using a dynamic
> > ASID,
> > +	 * TLB invalidation IPIs continue to get sent.
> > +	 *
> > +	 * This code sets asid_transition first, before assigning
> > the
> > +	 * global ASID.
> > +	 *
> > +	 * The TLB flush code will only verify the ASID transition
> > +	 * after it has seen the new global ASID for the process.
> > +	 */
> > +	WRITE_ONCE(mm->context.asid_transition, true);
> > +	WRITE_ONCE(mm->context.global_asid, get_global_asid());
> 
> I know it is likely correct in practice (due to TSO memory model),
> but 
> it is not clear, at least for me, how those write order affects the
> rest 
> of the code. I managed to figure out how it relates to the reads in 
> flush_tlb_mm_range() and native_flush_tlb_multi(), but I wouldn't say
> it 
> is trivial and doesn't worth a comment (or smp_wmb/smp_rmb).
> 

What kind of wording should we add here to make it
easier to understand?

"The TLB invalidation code reads these variables in
 the opposite order in which they are written" ?


> > +		/*
> > +		 * If at least one CPU is not using the global
> > ASID yet,
> > +		 * send a TLB flush IPI. The IPI should cause
> > stragglers
> > +		 * to transition soon.
> > +		 *
> > +		 * This can race with the CPU switching to another
> > task;
> > +		 * that results in a (harmless) extra IPI.
> > +		 */
> > +		if (READ_ONCE(per_cpu(cpu_tlbstate.loaded_mm_asid,
> > cpu)) != bc_asid) {
> > +			flush_tlb_multi(mm_cpumask(info->mm),
> > info);
> > +			return;
> 
> I am trying to figure out why we return here. The transition might
> not 
> be over? Why is it "soon"? Wouldn't flush_tlb_func() reload it 
> unconditionally?

The transition _should_ be over, but what if another
CPU got an NMI while in the middle of switch_mm_irqs_off,
and set its own bit in the mm_cpumask after we send this
IPI?

On the other hand, if it sets its mm_cpumask bit after
this point, it will also load the mm->context.global_asid
after this point, and should definitely get the new ASID.

I think we are probably fine to set asid_transition to
false here, but I've had to tweak this code so much over
the past months that I don't feel super confident any more :)

> 
> > +	/*
> > +	 * TLB flushes with INVLPGB are kicked off asynchronously.
> > +	 * The inc_mm_tlb_gen() guarantees page table updates are
> > done
> > +	 * before these TLB flushes happen.
> > +	 */
> > +	if (info->end == TLB_FLUSH_ALL) {
> > +		invlpgb_flush_single_pcid_nosync(kern_pcid(asid));
> > +		/* Do any CPUs supporting INVLPGB need PTI? */
> > +		if (static_cpu_has(X86_FEATURE_PTI))
> > +			invlpgb_flush_single_pcid_nosync(user_pcid
> > (asid));
> > +	} else for (; addr < info->end; addr += nr << info-
> > >stride_shift) {
> 
> I guess I was wrong, and do-while was cleaner here.
> 
> And I guess this is now a bug, if info->stride_shift > PMD_SHIFT...
> 
We set maxnr to 1 for larger stride shifts at the top of the function:

        /* Flushing multiple pages at once is not supported with 1GB
pages. */
        if (info->stride_shift > PMD_SHIFT)
                maxnr = 1;

> [ I guess the cleanest way was to change get_flush_tlb_info to mask
> the 
> low bits of start and end based on ((1ull << stride_shift) - 1). But 
> whatever... ]

I'll change it back :)

I'm just happy this code is getting lots of attention,
and we're improving it with time.


> > @@ -573,6 +874,23 @@ void switch_mm_irqs_off(struct mm_struct
> > *unused, struct mm_struct *next,
> >   				 !cpumask_test_cpu(cpu,
> > mm_cpumask(next))))
> >   			cpumask_set_cpu(cpu, mm_cpumask(next));
> >   
> > +		/*
> > +		 * Check if the current mm is transitioning to a
> > new ASID.
> > +		 */
> > +		if (needs_global_asid_reload(next, prev_asid)) {
> > +			next_tlb_gen = atomic64_read(&next-
> > >context.tlb_gen);
> > +
> > +			choose_new_asid(next, next_tlb_gen,
> > &new_asid, &need_flush);
> > +			goto reload_tlb;
> 
> Not a fan of the goto's when they are not really needed, and I don't 
> think it is really needed here. Especially that the name of the tag 
> "reload_tlb" does not really convey that the page-tables are reloaded
> at 
> that point.

In this particular case, the CPU continues running with
the same page tables, but with a different PCID.

> 
> > +		}
> > +
> > +		/*
> > +		 * Broadcast TLB invalidation keeps this PCID up
> > to date
> > +		 * all the time.
> > +		 */
> > +		if (is_global_asid(prev_asid))
> > +			return;
> 
> Hard for me to convince myself

When a process uses a global ASID, we always send
out TLB invalidations using INVLPGB.

The global ASID should always be up to date.

> 
> > @@ -769,6 +1092,16 @@ static void flush_tlb_func(void *info)
> >   	if (unlikely(loaded_mm == &init_mm))
> >   		return;
> >   
> > +	/* Reload the ASID if transitioning into or out of a
> > global ASID */
> > +	if (needs_global_asid_reload(loaded_mm, loaded_mm_asid)) {
> > +		switch_mm_irqs_off(NULL, loaded_mm, NULL);
> 
> I understand you want to reuse that logic, but it doesn't seem 
> reasonable to me. It both doesn't convey what you want to do, and can
> lead to undesired operations - cpu_tlbstate_update_lam() for
> instance. 
> Probably the impact on performance is minor, but it is an opening for
> future mistakes.

My worry with having a separate code path here is
that the separate code path could bit rot, and we
could introduce bugs that way.

I would rather have a tiny performance impact in
what is a rare code path, than a rare (and hard
to track down) memory corruption due to bit rot.


> 
> > +		loaded_mm_asid =
> > this_cpu_read(cpu_tlbstate.loaded_mm_asid);
> > +	}
> > +
> > +	/* Broadcast ASIDs are always kept up to date with
> > INVLPGB. */
> > +	if (is_global_asid(loaded_mm_asid))
> > +		return;
> 
> The comment does not clarify to me, and I don't manage to clearly 
> explain to myself, why it is guaranteed that all the IPI TLB flushes,
> which were potentially issued before the transition, are not needed.
> 
IPI TLB flushes that were issued before the transition went
to the CPUs when they were using dynamic ASIDs (numbers 1-5).

Reloading the TLB with a different PCID, even pointed at the
same page tables, means that the TLB should load the
translations fresh from the page tables, and not re-use any
that it had previously loaded under a different PCID.


-- 
All Rights Reversed.
Re: [PATCH v6 09/12] x86/mm: enable broadcast TLB invalidation for multi-threaded processes
Posted by Nadav Amit 2 weeks, 1 day ago

> On 20 Jan 2025, at 18:09, Rik van Riel <riel@surriel.com> wrote:
> 
> On Mon, 2025-01-20 at 16:02 +0200, Nadav Amit wrote:
>> 
>> 
>> On 20/01/2025 4:40, Rik van Riel wrote:
>>> 
>>> +static inline void broadcast_tlb_flush(struct flush_tlb_info
>>> *info)
>>> +{
>>> + VM_WARN_ON_ONCE(1);
>> 
>> Not sure why not the use VM_WARN_ONCE() instead with some more 
>> informative message (anyhow, a string is allocated for it).
>> 
> VM_WARN_ON_ONCE only has a condition, not a message.

Right, my bad.

> 
>>> + /* Slower check to make sure. */
>>> + for_each_cpu(cpu, mm_cpumask(mm)) {
>>> + /* Skip the CPUs that aren't really running this
>>> process. */
>>> + if (per_cpu(cpu_tlbstate.loaded_mm, cpu) != mm)
>>> + continue;
>> 
>> Then perhaps at least add a comment next to loaded_mm, that it's not 
>> private per-se, but rarely accessed by other cores?
>> 
> I don't see any comment in struct tlb_state that
> suggests it was ever private to begin with.
> 
> Which comment are you referring to that should
> be edited?

You can see there is a tlb_state_shared, so one assumes tlb_state is
private... (at least that was my intention separating them).

> 
>>> 
>>> + WRITE_ONCE(mm->context.asid_transition, true);
>>> + WRITE_ONCE(mm->context.global_asid, get_global_asid());
>> 
>> I know it is likely correct in practice (due to TSO memory model),
>> but 
>> it is not clear, at least for me, how those write order affects the
>> rest 
>> of the code. I managed to figure out how it relates to the reads in 
>> flush_tlb_mm_range() and native_flush_tlb_multi(), but I wouldn't say
>> it 
>> is trivial and doesn't worth a comment (or smp_wmb/smp_rmb).
>> 
> 
> What kind of wording should we add here to make it
> easier to understand?
> 
> "The TLB invalidation code reads these variables in
> the opposite order in which they are written" ?

Usually in such cases, you make a reference to wherever there are readers
that rely on the ordering. This is how documenting smp_wmb()/smp_rmb()
ordering is usually done.

> 
> 
>>> 
>>> +		/*
>>> +		 * If at least one CPU is not using the global ASID yet,
>>> +		 * send a TLB flush IPI. The IPI should cause stragglers
>>> +		 * to transition soon.
>>> +		 *
>>> +		 * This can race with the CPU switching to another task;
>>> +		 * that results in a (harmless) extra IPI.
>>> +		 */
>>> +		if (READ_ONCE(per_cpu(cpu_tlbstate.loaded_mm_asid, cpu)) != bc_asid) {
>>> +			flush_tlb_multi(mm_cpumask(info->mm), info);
>>> +			return;
>>> +		}
>> 
>> I am trying to figure out why we return here. The transition might
>> not 
>> be over? Why is it "soon"? Wouldn't flush_tlb_func() reload it 
>> unconditionally?
> 
> The transition _should_ be over, but what if another
> CPU got an NMI while in the middle of switch_mm_irqs_off,
> and set its own bit in the mm_cpumask after we send this
> IPI?
> 
> On the other hand, if it sets its mm_cpumask bit after
> this point, it will also load the mm->context.global_asid
> after this point, and should definitely get the new ASID.
> 
> I think we are probably fine to set asid_transition to
> false here, but I've had to tweak this code so much over
> the past months that I don't feel super confident any more :)

I fully relate, but I am not sure it is that great. The problem
is that nobody would have the guts to change that code later...

>> 
>> And I guess this is now a bug, if info->stride_shift > PMD_SHIFT...
>> 
> We set maxnr to 1 for larger stride shifts at the top of the function:
> 

You’re right, all safe.

> 
>>> + goto reload_tlb;
>> 
>> Not a fan of the goto's when they are not really needed, and I don't 
>> think it is really needed here. Especially that the name of the tag 
>> "reload_tlb" does not really convey that the page-tables are reloaded
>> at 
>> that point.
> 
> In this particular case, the CPU continues running with
> the same page tables, but with a different PCID.

I understand it is “reload_tlb” from your point of view, or from the
point of view of the code that does the “goto”, but if I showed you
the code that follows the “reload_tlb”, I’m not sure you’d know it
is so.

[ snip, taking your valid points ]

>> 
>>> + loaded_mm_asid =
>>> this_cpu_read(cpu_tlbstate.loaded_mm_asid);
>>> + }
>>> +
>>> + /* Broadcast ASIDs are always kept up to date with
>>> INVLPGB. */
>>> + if (is_global_asid(loaded_mm_asid))
>>> + return;
>> 
>> The comment does not clarify to me, and I don't manage to clearly 
>> explain to myself, why it is guaranteed that all the IPI TLB flushes,
>> which were potentially issued before the transition, are not needed.
>> 
> IPI TLB flushes that were issued before the transition went
> to the CPUs when they were using dynamic ASIDs (numbers 1-5).
> 
> Reloading the TLB with a different PCID, even pointed at the
> same page tables, means that the TLB should load the
> translations fresh from the page tables, and not re-use any
> that it had previously loaded under a different PCID.
> 

What about this scenario for instance?

CPU0                  CPU1                      CPU2
----                  ----                      ----
(1) use_global_asid(mm):        
    mm->context.asid_trans = T;
    mm->context.global_asid = G;

                      (2) switch_mm(..., next=mm):
                          *Observes global_asid = G
                          => loads CR3 with PCID=G
                          => fills TLB under G.
                          TLB caches PTE[G, V] = P
			  (for some reason)

                                             (3) flush_tlb_mm_range(mm):
                                                 *Sees global_asid == 0
                                                   (stale/old value)
                                                 => flush_tlb_multi()
                                                 => IPI flush for dyn.

                      (4) IPI arrives on CPU1:
                          flush_tlb_func(...): 
                          is_global_asid(G)? yes,
                          skip invalidate; broadcast
                          flush assumed to cover it.

                                             (5) IPI completes on CPU2:
                                                 Dyn. ASIDs are flushed, 
                                                 but CPU1’s global ASID
                                                 was never invalidated!

                      (6) CPU1 uses stale TLB entries under ASID G.
                          TLB continues to use PTE[G, V] = P, as it
                          was not invalidated.
Re: [PATCH v6 09/12] x86/mm: enable broadcast TLB invalidation for multi-threaded processes
Posted by Rik van Riel 2 weeks, 1 day ago
On Mon, 2025-01-20 at 22:04 +0200, Nadav Amit wrote:
> 
> What about this scenario for instance?
> 
> CPU0                  CPU1                      CPU2
> ----                  ----                      ----
> (1) use_global_asid(mm):        
>     mm->context.asid_trans = T;
>     mm->context.global_asid = G;
> 
>                       (2) switch_mm(..., next=mm):
>                           *Observes global_asid = G
>                           => loads CR3 with PCID=G
>                           => fills TLB under G.
>                           TLB caches PTE[G, V] = P
> 			  (for some reason)
> 
>                                              (3)
> flush_tlb_mm_range(mm):
>                                                  *Sees global_asid ==
> 0
>                                                    (stale/old value)
>                                                  => flush_tlb_multi()
>                                                  => IPI flush for
> dyn.
> 

If the TLB flush is about a page table change that
happened before CPUs 0 and 1 switched to the global
ASID, then CPUs 0 and 1 will not see the old page
table contents after the switch.

If the TLB flush is about a page table change that
happened after the transition to a global ASID,
flush_tlb_mm_range() should see that global ASID,
and flush accordingly.

What am I missing?

>                       (4) IPI arrives on CPU1:
>                           flush_tlb_func(...): 
>                           is_global_asid(G)? yes,
>                           skip invalidate; broadcast
>                           flush assumed to cover it.
> 
>                                              (5) IPI completes on
> CPU2:
>                                                  Dyn. ASIDs are
> flushed, 
>                                                  but CPU1’s global
> ASID
>                                                  was never
> invalidated!
> 
>                       (6) CPU1 uses stale TLB entries under ASID G.
>                           TLB continues to use PTE[G, V] = P, as it
>                           was not invalidated.
> 
> 
> 
> 
> 

-- 
All Rights Reversed.
Re: [PATCH v6 09/12] x86/mm: enable broadcast TLB invalidation for multi-threaded processes
Posted by Nadav Amit 2 weeks, 1 day ago

> On 21 Jan 2025, at 0:44, Rik van Riel <riel@surriel.com> wrote:
> 
> 
> If the TLB flush is about a page table change that
> happened after the transition to a global ASID,
> flush_tlb_mm_range() should see that global ASID,
> and flush accordingly.
> 
> What am I missing?


I think reasoning needs to be done using memory ordering
arguments using the kernel memory model (which builds on top
of x86 memory model in our case) and when necessary
“happens-before” relations. The fact one CPU sees a write
does not imply another CPU will see the write by itself.

So if there is some memory barriers that would prevent this
scenario, it would be good to mark how they synchronize.
Otherwise, I think the very least “late” TLB-shootdowns should
be respected even if the ASID is already “global”.

I do recommend that you would also check the opposite case
where a CPU that transitioned to global ASID does broadcast
and there is a strangler CPU that has not yet switched to
the global one. While in that case the TLB flush would
eventually take place, there might be a window of time that
it is not (and the page is already freed).