From: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
When a process reduces its number of threads or clears bits in its CPU
affinity mask, the mm_cid allocation should eventually converge towards
smaller values.
However, the change introduced by:
commit 7e019dcc470f ("sched: Improve cache locality of RSEQ concurrency
IDs for intermittent workloads")
adds a per-mm/CPU recent_cid which is never unset unless a thread
migrates.
This is a tradeoff between:
A) Preserving cache locality after a transition from many threads to few
threads, or after reducing the hamming weight of the allowed CPU mask.
B) Making the mm_cid upper bounds wrt nr threads and allowed CPU mask
easy to document and understand.
C) Allowing applications to eventually react to mm_cid compaction after
reduction of the nr threads or allowed CPU mask, making the tracking
of mm_cid compaction easier by shrinking it back towards 0 or not.
D) Making sure applications that periodically reduce and then increase
again the nr threads or allowed CPU mask still benefit from good
cache locality with mm_cid.
Introduce the following changes:
* After shrinking the number of threads or reducing the number of
allowed CPUs, reduce the value of max_nr_cid so expansion of CID
allocation will preserve cache locality if the number of threads or
allowed CPUs increase again.
* Only re-use a recent_cid if it is within the max_nr_cid upper bound,
else find the first available CID.
Fixes: 7e019dcc470f ("sched: Improve cache locality of RSEQ concurrency IDs for intermittent workloads")
Cc: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Marco Elver <elver@google.com>
Cc: Ingo Molnar <mingo@kernel.org>
Tested-by: Gabriele Monaco <gmonaco@redhat.com>
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Signed-off-by: Gabriele Monaco <gmonaco@redhat.com>
---
include/linux/mm_types.h | 7 ++++---
kernel/sched/sched.h | 25 ++++++++++++++++++++++---
2 files changed, 26 insertions(+), 6 deletions(-)
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 6b27db7f94963..0234f14f2aa6b 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -875,10 +875,11 @@ struct mm_struct {
*/
unsigned int nr_cpus_allowed;
/**
- * @max_nr_cid: Maximum number of concurrency IDs allocated.
+ * @max_nr_cid: Maximum number of allowed concurrency
+ * IDs allocated.
*
- * Track the highest number of concurrency IDs allocated for the
- * mm.
+ * Track the highest number of allowed concurrency IDs
+ * allocated for the mm.
*/
atomic_t max_nr_cid;
/**
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 38e0e323dda26..606c96b74ebfa 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -3698,10 +3698,28 @@ static inline int __mm_cid_try_get(struct task_struct *t, struct mm_struct *mm)
{
struct cpumask *cidmask = mm_cidmask(mm);
struct mm_cid __percpu *pcpu_cid = mm->pcpu_cid;
- int cid = __this_cpu_read(pcpu_cid->recent_cid);
+ int cid, max_nr_cid, allowed_max_nr_cid;
+ /*
+ * After shrinking the number of threads or reducing the number
+ * of allowed cpus, reduce the value of max_nr_cid so expansion
+ * of cid allocation will preserve cache locality if the number
+ * of threads or allowed cpus increase again.
+ */
+ max_nr_cid = atomic_read(&mm->max_nr_cid);
+ while ((allowed_max_nr_cid = min_t(int, READ_ONCE(mm->nr_cpus_allowed),
+ atomic_read(&mm->mm_users))),
+ max_nr_cid > allowed_max_nr_cid) {
+ /* atomic_try_cmpxchg loads previous mm->max_nr_cid into max_nr_cid. */
+ if (atomic_try_cmpxchg(&mm->max_nr_cid, &max_nr_cid, allowed_max_nr_cid)) {
+ max_nr_cid = allowed_max_nr_cid;
+ break;
+ }
+ }
/* Try to re-use recent cid. This improves cache locality. */
- if (!mm_cid_is_unset(cid) && !cpumask_test_and_set_cpu(cid, cidmask))
+ cid = __this_cpu_read(pcpu_cid->recent_cid);
+ if (!mm_cid_is_unset(cid) && cid < max_nr_cid &&
+ !cpumask_test_and_set_cpu(cid, cidmask))
return cid;
/*
* Expand cid allocation if the maximum number of concurrency
@@ -3709,8 +3727,9 @@ static inline int __mm_cid_try_get(struct task_struct *t, struct mm_struct *mm)
* and number of threads. Expanding cid allocation as much as
* possible improves cache locality.
*/
- cid = atomic_read(&mm->max_nr_cid);
+ cid = max_nr_cid;
while (cid < READ_ONCE(mm->nr_cpus_allowed) && cid < atomic_read(&mm->mm_users)) {
+ /* atomic_try_cmpxchg loads previous mm->max_nr_cid into cid. */
if (!atomic_try_cmpxchg(&mm->max_nr_cid, &cid, cid + 1))
continue;
if (!cpumask_test_and_set_cpu(cid, cidmask))
--
2.48.1
On 2025-02-10 10:32, Gabriele Monaco wrote:
> From: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
>
Peter, Ingo, this patch has been ready for inclusion for a while. The
rest of this series does not seem to be quite ready yet, but can we at
least merge this patch into tip ?
Thanks,
Mathieu
> When a process reduces its number of threads or clears bits in its CPU
> affinity mask, the mm_cid allocation should eventually converge towards
> smaller values.
>
> However, the change introduced by:
>
> commit 7e019dcc470f ("sched: Improve cache locality of RSEQ concurrency
> IDs for intermittent workloads")
>
> adds a per-mm/CPU recent_cid which is never unset unless a thread
> migrates.
>
> This is a tradeoff between:
>
> A) Preserving cache locality after a transition from many threads to few
> threads, or after reducing the hamming weight of the allowed CPU mask.
>
> B) Making the mm_cid upper bounds wrt nr threads and allowed CPU mask
> easy to document and understand.
>
> C) Allowing applications to eventually react to mm_cid compaction after
> reduction of the nr threads or allowed CPU mask, making the tracking
> of mm_cid compaction easier by shrinking it back towards 0 or not.
>
> D) Making sure applications that periodically reduce and then increase
> again the nr threads or allowed CPU mask still benefit from good
> cache locality with mm_cid.
>
> Introduce the following changes:
>
> * After shrinking the number of threads or reducing the number of
> allowed CPUs, reduce the value of max_nr_cid so expansion of CID
> allocation will preserve cache locality if the number of threads or
> allowed CPUs increase again.
>
> * Only re-use a recent_cid if it is within the max_nr_cid upper bound,
> else find the first available CID.
>
> Fixes: 7e019dcc470f ("sched: Improve cache locality of RSEQ concurrency IDs for intermittent workloads")
> Cc: Peter Zijlstra (Intel) <peterz@infradead.org>
> Cc: Marco Elver <elver@google.com>
> Cc: Ingo Molnar <mingo@kernel.org>
> Tested-by: Gabriele Monaco <gmonaco@redhat.com>
> Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
> Signed-off-by: Gabriele Monaco <gmonaco@redhat.com>
> ---
> include/linux/mm_types.h | 7 ++++---
> kernel/sched/sched.h | 25 ++++++++++++++++++++++---
> 2 files changed, 26 insertions(+), 6 deletions(-)
>
> diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
> index 6b27db7f94963..0234f14f2aa6b 100644
> --- a/include/linux/mm_types.h
> +++ b/include/linux/mm_types.h
> @@ -875,10 +875,11 @@ struct mm_struct {
> */
> unsigned int nr_cpus_allowed;
> /**
> - * @max_nr_cid: Maximum number of concurrency IDs allocated.
> + * @max_nr_cid: Maximum number of allowed concurrency
> + * IDs allocated.
> *
> - * Track the highest number of concurrency IDs allocated for the
> - * mm.
> + * Track the highest number of allowed concurrency IDs
> + * allocated for the mm.
> */
> atomic_t max_nr_cid;
> /**
> diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
> index 38e0e323dda26..606c96b74ebfa 100644
> --- a/kernel/sched/sched.h
> +++ b/kernel/sched/sched.h
> @@ -3698,10 +3698,28 @@ static inline int __mm_cid_try_get(struct task_struct *t, struct mm_struct *mm)
> {
> struct cpumask *cidmask = mm_cidmask(mm);
> struct mm_cid __percpu *pcpu_cid = mm->pcpu_cid;
> - int cid = __this_cpu_read(pcpu_cid->recent_cid);
> + int cid, max_nr_cid, allowed_max_nr_cid;
>
> + /*
> + * After shrinking the number of threads or reducing the number
> + * of allowed cpus, reduce the value of max_nr_cid so expansion
> + * of cid allocation will preserve cache locality if the number
> + * of threads or allowed cpus increase again.
> + */
> + max_nr_cid = atomic_read(&mm->max_nr_cid);
> + while ((allowed_max_nr_cid = min_t(int, READ_ONCE(mm->nr_cpus_allowed),
> + atomic_read(&mm->mm_users))),
> + max_nr_cid > allowed_max_nr_cid) {
> + /* atomic_try_cmpxchg loads previous mm->max_nr_cid into max_nr_cid. */
> + if (atomic_try_cmpxchg(&mm->max_nr_cid, &max_nr_cid, allowed_max_nr_cid)) {
> + max_nr_cid = allowed_max_nr_cid;
> + break;
> + }
> + }
> /* Try to re-use recent cid. This improves cache locality. */
> - if (!mm_cid_is_unset(cid) && !cpumask_test_and_set_cpu(cid, cidmask))
> + cid = __this_cpu_read(pcpu_cid->recent_cid);
> + if (!mm_cid_is_unset(cid) && cid < max_nr_cid &&
> + !cpumask_test_and_set_cpu(cid, cidmask))
> return cid;
> /*
> * Expand cid allocation if the maximum number of concurrency
> @@ -3709,8 +3727,9 @@ static inline int __mm_cid_try_get(struct task_struct *t, struct mm_struct *mm)
> * and number of threads. Expanding cid allocation as much as
> * possible improves cache locality.
> */
> - cid = atomic_read(&mm->max_nr_cid);
> + cid = max_nr_cid;
> while (cid < READ_ONCE(mm->nr_cpus_allowed) && cid < atomic_read(&mm->mm_users)) {
> + /* atomic_try_cmpxchg loads previous mm->max_nr_cid into cid. */
> if (!atomic_try_cmpxchg(&mm->max_nr_cid, &cid, cid + 1))
> continue;
> if (!cpumask_test_and_set_cpu(cid, cidmask))
--
Mathieu Desnoyers
EfficiOS Inc.
https://www.efficios.com
On Thu, Feb 13, 2025 at 09:56:17AM -0500, Mathieu Desnoyers wrote: > On 2025-02-10 10:32, Gabriele Monaco wrote: > > From: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> > > > > Peter, Ingo, this patch has been ready for inclusion for a while. The > rest of this series does not seem to be quite ready yet, but can we at > least merge this patch into tip ? Done; stuck it in queue/sched/urgent for the robots. If that passes I'll push it to tip.
The following commit has been merged into the sched/urgent branch of tip:
Commit-ID: 02d954c0fdf91845169cdacc7405b120f90afe01
Gitweb: https://git.kernel.org/tip/02d954c0fdf91845169cdacc7405b120f90afe01
Author: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
AuthorDate: Mon, 10 Feb 2025 16:32:50 +01:00
Committer: Peter Zijlstra <peterz@infradead.org>
CommitterDate: Tue, 18 Feb 2025 08:50:36 +01:00
sched: Compact RSEQ concurrency IDs with reduced threads and affinity
When a process reduces its number of threads or clears bits in its CPU
affinity mask, the mm_cid allocation should eventually converge towards
smaller values.
However, the change introduced by:
commit 7e019dcc470f ("sched: Improve cache locality of RSEQ concurrency
IDs for intermittent workloads")
adds a per-mm/CPU recent_cid which is never unset unless a thread
migrates.
This is a tradeoff between:
A) Preserving cache locality after a transition from many threads to few
threads, or after reducing the hamming weight of the allowed CPU mask.
B) Making the mm_cid upper bounds wrt nr threads and allowed CPU mask
easy to document and understand.
C) Allowing applications to eventually react to mm_cid compaction after
reduction of the nr threads or allowed CPU mask, making the tracking
of mm_cid compaction easier by shrinking it back towards 0 or not.
D) Making sure applications that periodically reduce and then increase
again the nr threads or allowed CPU mask still benefit from good
cache locality with mm_cid.
Introduce the following changes:
* After shrinking the number of threads or reducing the number of
allowed CPUs, reduce the value of max_nr_cid so expansion of CID
allocation will preserve cache locality if the number of threads or
allowed CPUs increase again.
* Only re-use a recent_cid if it is within the max_nr_cid upper bound,
else find the first available CID.
Fixes: 7e019dcc470f ("sched: Improve cache locality of RSEQ concurrency IDs for intermittent workloads")
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Signed-off-by: Gabriele Monaco <gmonaco@redhat.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Tested-by: Gabriele Monaco <gmonaco@redhat.com>
Link: https://lkml.kernel.org/r/20250210153253.460471-2-gmonaco@redhat.com
---
include/linux/mm_types.h | 7 ++++---
kernel/sched/sched.h | 25 ++++++++++++++++++++++---
2 files changed, 26 insertions(+), 6 deletions(-)
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 6b27db7..0234f14 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -875,10 +875,11 @@ struct mm_struct {
*/
unsigned int nr_cpus_allowed;
/**
- * @max_nr_cid: Maximum number of concurrency IDs allocated.
+ * @max_nr_cid: Maximum number of allowed concurrency
+ * IDs allocated.
*
- * Track the highest number of concurrency IDs allocated for the
- * mm.
+ * Track the highest number of allowed concurrency IDs
+ * allocated for the mm.
*/
atomic_t max_nr_cid;
/**
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index b93c8c3..c8512a9 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -3698,10 +3698,28 @@ static inline int __mm_cid_try_get(struct task_struct *t, struct mm_struct *mm)
{
struct cpumask *cidmask = mm_cidmask(mm);
struct mm_cid __percpu *pcpu_cid = mm->pcpu_cid;
- int cid = __this_cpu_read(pcpu_cid->recent_cid);
+ int cid, max_nr_cid, allowed_max_nr_cid;
+ /*
+ * After shrinking the number of threads or reducing the number
+ * of allowed cpus, reduce the value of max_nr_cid so expansion
+ * of cid allocation will preserve cache locality if the number
+ * of threads or allowed cpus increase again.
+ */
+ max_nr_cid = atomic_read(&mm->max_nr_cid);
+ while ((allowed_max_nr_cid = min_t(int, READ_ONCE(mm->nr_cpus_allowed),
+ atomic_read(&mm->mm_users))),
+ max_nr_cid > allowed_max_nr_cid) {
+ /* atomic_try_cmpxchg loads previous mm->max_nr_cid into max_nr_cid. */
+ if (atomic_try_cmpxchg(&mm->max_nr_cid, &max_nr_cid, allowed_max_nr_cid)) {
+ max_nr_cid = allowed_max_nr_cid;
+ break;
+ }
+ }
/* Try to re-use recent cid. This improves cache locality. */
- if (!mm_cid_is_unset(cid) && !cpumask_test_and_set_cpu(cid, cidmask))
+ cid = __this_cpu_read(pcpu_cid->recent_cid);
+ if (!mm_cid_is_unset(cid) && cid < max_nr_cid &&
+ !cpumask_test_and_set_cpu(cid, cidmask))
return cid;
/*
* Expand cid allocation if the maximum number of concurrency
@@ -3709,8 +3727,9 @@ static inline int __mm_cid_try_get(struct task_struct *t, struct mm_struct *mm)
* and number of threads. Expanding cid allocation as much as
* possible improves cache locality.
*/
- cid = atomic_read(&mm->max_nr_cid);
+ cid = max_nr_cid;
while (cid < READ_ONCE(mm->nr_cpus_allowed) && cid < atomic_read(&mm->mm_users)) {
+ /* atomic_try_cmpxchg loads previous mm->max_nr_cid into cid. */
if (!atomic_try_cmpxchg(&mm->max_nr_cid, &cid, cid + 1))
continue;
if (!cpumask_test_and_set_cpu(cid, cidmask))
© 2016 - 2025 Red Hat, Inc.