arch/x86/include/asm/mmu.h | 2 ++ arch/x86/include/asm/mmu_context.h | 1 + arch/x86/mm/tlb.c | 27 ++++++++++++++++++++++++--- 3 files changed, 27 insertions(+), 3 deletions(-)
On Tue, 3 Dec 2024 09:57:55 -0500
Mathieu Desnoyers <mathieu.desnoyers@efficios.com> wrote:
> I'd recommend to rename "last_trimmed_cpumask" to "next_trim_cpumask",
> and always update it to "jiffies + HZ". Then we can remove the addition
> from the comparison in the should_flush_tlb() fast-path:
Thanks Mathieu, I have applied your suggested improvements,
except for the one you posted as a separate patch earlier.
---8<---
From c7d04233f15ba217ce6ebd0dcf12fab91c437e96 Mon Sep 17 00:00:00 2001
From: Rik van Riel <riel@fb.com>
Date: Mon, 2 Dec 2024 09:57:31 -0800
Subject: [PATCH] x86,mm: only trim the mm_cpumask once a second
Setting and clearing CPU bits in the mm_cpumask is only ever done
by the CPU itself, from the context switch code or the TLB flush
code.
Synchronization is handled by switch_mm_irqs_off blocking interrupts.
Sending TLB flush IPIs to CPUs that are in the mm_cpumask, but no
longer running the program causes a regression in the will-it-scale
tlbflush2 test. This test is contrived, but a large regression here
might cause a small regression in some real world workload.
Instead of always sending IPIs to CPUs that are in the mm_cpumask,
but no longer running the program, send these IPIs only once a second.
The rest of the time we can skip over CPUs where the loaded_mm is
different from the target mm.
Signed-off-by: Rik van Riel <riel@surriel.com>
Reported-by: kernel test roboto <oliver.sang@intel.com>
Closes: https://lore.kernel.org/oe-lkp/202411282207.6bd28eae-lkp@intel.com/
---
arch/x86/include/asm/mmu.h | 2 ++
arch/x86/include/asm/mmu_context.h | 1 +
arch/x86/mm/tlb.c | 27 ++++++++++++++++++++++++---
3 files changed, 27 insertions(+), 3 deletions(-)
diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
index ce4677b8b735..3b496cdcb74b 100644
--- a/arch/x86/include/asm/mmu.h
+++ b/arch/x86/include/asm/mmu.h
@@ -37,6 +37,8 @@ typedef struct {
*/
atomic64_t tlb_gen;
+ unsigned long next_trim_cpumask;
+
#ifdef CONFIG_MODIFY_LDT_SYSCALL
struct rw_semaphore ldt_usr_sem;
struct ldt_struct *ldt;
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index 2886cb668d7f..795fdd53bd0a 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -151,6 +151,7 @@ static inline int init_new_context(struct task_struct *tsk,
mm->context.ctx_id = atomic64_inc_return(&last_mm_ctx_id);
atomic64_set(&mm->context.tlb_gen, 0);
+ mm->context.next_trim_cpumask = jiffies + HZ;
#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
if (cpu_feature_enabled(X86_FEATURE_OSPKE)) {
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 1aac4fa90d3d..e90edbbf0188 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -759,8 +759,11 @@ static void flush_tlb_func(void *info)
/* Can only happen on remote CPUs */
if (f->mm && f->mm != loaded_mm) {
+ unsigned long next_jiffies = jiffies + HZ;
cpumask_clear_cpu(raw_smp_processor_id(), mm_cpumask(f->mm));
trace_tlb_flush(TLB_REMOTE_WRONG_CPU, 0);
+ if (time_after(next_jiffies, READ_ONCE(f->mm->context.next_trim_cpumask)))
+ WRITE_ONCE(f->mm->context.next_trim_cpumask, next_jiffies);
return;
}
}
@@ -892,9 +895,27 @@ static void flush_tlb_func(void *info)
nr_invalidate);
}
-static bool tlb_is_not_lazy(int cpu, void *data)
+static bool should_flush_tlb(int cpu, void *data)
{
- return !per_cpu(cpu_tlbstate_shared.is_lazy, cpu);
+ struct flush_tlb_info *info = data;
+
+ /* Lazy TLB will get flushed at the next context switch. */
+ if (per_cpu(cpu_tlbstate_shared.is_lazy, cpu))
+ return false;
+
+ /* No mm means kernel memory flush. */
+ if (!info->mm)
+ return true;
+
+ /* The target mm is loaded, and the CPU is not lazy. */
+ if (per_cpu(cpu_tlbstate.loaded_mm, cpu) == info->mm)
+ return true;
+
+ /* In cpumask, but not the loaded mm? Periodically remove by flushing. */
+ if (time_after(jiffies, info->mm->context.next_trim_cpumask))
+ return true;
+
+ return false;
}
DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state_shared, cpu_tlbstate_shared);
@@ -928,7 +949,7 @@ STATIC_NOPV void native_flush_tlb_multi(const struct cpumask *cpumask,
if (info->freed_tables)
on_each_cpu_mask(cpumask, flush_tlb_func, (void *)info, true);
else
- on_each_cpu_cond_mask(tlb_is_not_lazy, flush_tlb_func,
+ on_each_cpu_cond_mask(should_flush_tlb, flush_tlb_func,
(void *)info, 1, cpumask);
}
--
2.47.0
On 2024-12-03 14:48, Rik van Riel wrote:
> On Tue, 3 Dec 2024 09:57:55 -0500
> Mathieu Desnoyers <mathieu.desnoyers@efficios.com> wrote:
>
>
>> I'd recommend to rename "last_trimmed_cpumask" to "next_trim_cpumask",
>> and always update it to "jiffies + HZ". Then we can remove the addition
>> from the comparison in the should_flush_tlb() fast-path:
>
> Thanks Mathieu, I have applied your suggested improvements,
> except for the one you posted as a separate patch earlier.
>
> ---8<---
>
> From c7d04233f15ba217ce6ebd0dcf12fab91c437e96 Mon Sep 17 00:00:00 2001
> From: Rik van Riel <riel@fb.com>
> Date: Mon, 2 Dec 2024 09:57:31 -0800
> Subject: [PATCH] x86,mm: only trim the mm_cpumask once a second
>
> Setting and clearing CPU bits in the mm_cpumask is only ever done
> by the CPU itself, from the context switch code or the TLB flush
> code.
>
> Synchronization is handled by switch_mm_irqs_off blocking interrupts.
>
> Sending TLB flush IPIs to CPUs that are in the mm_cpumask, but no
> longer running the program causes a regression in the will-it-scale
> tlbflush2 test. This test is contrived, but a large regression here
> might cause a small regression in some real world workload.
We should add information detailing why tlbflush2 end up
contending on the mmap_sem, and thus schedule often.
>
> Instead of always sending IPIs to CPUs that are in the mm_cpumask,
> but no longer running the program, send these IPIs only once a second.
>
> The rest of the time we can skip over CPUs where the loaded_mm is
> different from the target mm.
>
> Signed-off-by: Rik van Riel <riel@surriel.com>
Much better !
Reviewed-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
> Reported-by: kernel test roboto <oliver.sang@intel.com>
> Closes: https://lore.kernel.org/oe-lkp/202411282207.6bd28eae-lkp@intel.com/
> ---
> arch/x86/include/asm/mmu.h | 2 ++
> arch/x86/include/asm/mmu_context.h | 1 +
> arch/x86/mm/tlb.c | 27 ++++++++++++++++++++++++---
> 3 files changed, 27 insertions(+), 3 deletions(-)
>
> diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
> index ce4677b8b735..3b496cdcb74b 100644
> --- a/arch/x86/include/asm/mmu.h
> +++ b/arch/x86/include/asm/mmu.h
> @@ -37,6 +37,8 @@ typedef struct {
> */
> atomic64_t tlb_gen;
>
> + unsigned long next_trim_cpumask;
> +
> #ifdef CONFIG_MODIFY_LDT_SYSCALL
> struct rw_semaphore ldt_usr_sem;
> struct ldt_struct *ldt;
> diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
> index 2886cb668d7f..795fdd53bd0a 100644
> --- a/arch/x86/include/asm/mmu_context.h
> +++ b/arch/x86/include/asm/mmu_context.h
> @@ -151,6 +151,7 @@ static inline int init_new_context(struct task_struct *tsk,
>
> mm->context.ctx_id = atomic64_inc_return(&last_mm_ctx_id);
> atomic64_set(&mm->context.tlb_gen, 0);
> + mm->context.next_trim_cpumask = jiffies + HZ;
>
> #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
> if (cpu_feature_enabled(X86_FEATURE_OSPKE)) {
> diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
> index 1aac4fa90d3d..e90edbbf0188 100644
> --- a/arch/x86/mm/tlb.c
> +++ b/arch/x86/mm/tlb.c
> @@ -759,8 +759,11 @@ static void flush_tlb_func(void *info)
>
> /* Can only happen on remote CPUs */
> if (f->mm && f->mm != loaded_mm) {
> + unsigned long next_jiffies = jiffies + HZ;
> cpumask_clear_cpu(raw_smp_processor_id(), mm_cpumask(f->mm));
> trace_tlb_flush(TLB_REMOTE_WRONG_CPU, 0);
> + if (time_after(next_jiffies, READ_ONCE(f->mm->context.next_trim_cpumask)))
> + WRITE_ONCE(f->mm->context.next_trim_cpumask, next_jiffies);
> return;
> }
> }
> @@ -892,9 +895,27 @@ static void flush_tlb_func(void *info)
> nr_invalidate);
> }
>
> -static bool tlb_is_not_lazy(int cpu, void *data)
> +static bool should_flush_tlb(int cpu, void *data)
> {
> - return !per_cpu(cpu_tlbstate_shared.is_lazy, cpu);
> + struct flush_tlb_info *info = data;
> +
> + /* Lazy TLB will get flushed at the next context switch. */
> + if (per_cpu(cpu_tlbstate_shared.is_lazy, cpu))
> + return false;
> +
> + /* No mm means kernel memory flush. */
> + if (!info->mm)
> + return true;
> +
> + /* The target mm is loaded, and the CPU is not lazy. */
> + if (per_cpu(cpu_tlbstate.loaded_mm, cpu) == info->mm)
> + return true;
> +
> + /* In cpumask, but not the loaded mm? Periodically remove by flushing. */
> + if (time_after(jiffies, info->mm->context.next_trim_cpumask))
> + return true;
> +
> + return false;
> }
>
> DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state_shared, cpu_tlbstate_shared);
> @@ -928,7 +949,7 @@ STATIC_NOPV void native_flush_tlb_multi(const struct cpumask *cpumask,
> if (info->freed_tables)
> on_each_cpu_mask(cpumask, flush_tlb_func, (void *)info, true);
> else
> - on_each_cpu_cond_mask(tlb_is_not_lazy, flush_tlb_func,
> + on_each_cpu_cond_mask(should_flush_tlb, flush_tlb_func,
> (void *)info, 1, cpumask);
> }
>
--
Mathieu Desnoyers
EfficiOS Inc.
https://www.efficios.com
On 12/3/24 11:48, Rik van Riel wrote: > Sending TLB flush IPIs to CPUs that are in the mm_cpumask, but no > longer running the program causes a regression in the will-it-scale > tlbflush2 test. This test is contrived, but a large regression here > might cause a small regression in some real world workload. The patch seems OK in theory, but this explanation doesn't sit right with me. Most of the will-it-scale tests including tlbflush2 have long-lived CPU-bound threads. They shouldn't schedule out much at all during the benchmark. I don't see how they could drive a significant increase in IPIs to cause a 10%+ regression. I'd much prefer that we understand the regression in detail before throwing more code at fixing it.
On Tue, 2024-12-03 at 12:05 -0800, Dave Hansen wrote: > On 12/3/24 11:48, Rik van Riel wrote: > > Sending TLB flush IPIs to CPUs that are in the mm_cpumask, but no > > longer running the program causes a regression in the will-it-scale > > tlbflush2 test. This test is contrived, but a large regression here > > might cause a small regression in some real world workload. > > The patch seems OK in theory, but this explanation doesn't sit right > with me. > > Most of the will-it-scale tests including tlbflush2 have long-lived > CPU-bound threads. They shouldn't schedule out much at all during the > benchmark. I don't see how they could drive a significant increase in > IPIs to cause a 10%+ regression. > > I'd much prefer that we understand the regression in detail before > throwing more code at fixing it. > The tlb_flush2 threaded test does not only madvise in a loop, but also mmap and munmap from inside every thread. This should create massive contention on the mmap_lock, resulting in threads going to sleep while waiting in mmap and munmap. https://github.com/antonblanchard/will-it-scale/blob/master/tests/tlb_flush2.c -- All Rights Reversed.
On 12/3/24 12:07, Rik van Riel wrote: > The tlb_flush2 threaded test does not only madvise in a > loop, but also mmap and munmap from inside every thread. > > This should create massive contention on the mmap_lock, > resulting in threads going to sleep while waiting in > mmap and munmap. > > https://github.com/antonblanchard/will-it-scale/blob/master/tests/tlb_flush2.c Oh, wow, it only madvise()'s a 1MB allocation before doing the munmap()/mmap(). I somehow remembered it being a lot larger. And, yeah, I see a ton of idle time which would be 100% explained by mmap_lock contention. Did the original workload that you care about have idle time? I'm wondering if trimming mm_cpumask() on the way to idle but leaving it alone on a context switch to another thread is a good idea.
On Tue, 2024-12-03 at 16:46 -0800, Dave Hansen wrote: > On 12/3/24 12:07, Rik van Riel wrote: > > The tlb_flush2 threaded test does not only madvise in a > > loop, but also mmap and munmap from inside every thread. > > > > This should create massive contention on the mmap_lock, > > resulting in threads going to sleep while waiting in > > mmap and munmap. > > > > https://github.com/antonblanchard/will-it-scale/blob/master/tests/tlb_flush2.c > > Oh, wow, it only madvise()'s a 1MB allocation before doing the > munmap()/mmap(). I somehow remembered it being a lot larger. And, > yeah, > I see a ton of idle time which would be 100% explained by mmap_lock > contention. > > Did the original workload that you care about have idle time? > The workloads that I care about are things like memcache, web servers, web proxies, and other workloads that typically handle very short requests before going idle again. These programs have a LOT of context switches to and from the idle task. > I'm wondering if trimming mm_cpumask() on the way to idle but leaving > it > alone on a context switch to another thread is a good idea. > The problem with that is that you then have to set the bit again when switching back to the program, which creates contention when a number of CPUs are transitioning to and from idle at the same time. Atomic operations on a contended cache line from the context switch code end up being quite visible when profiling some workloads :) -- All Rights Reversed.
© 2016 - 2025 Red Hat, Inc.