The timer migration mechanism allows active CPUs to pull timers from
idle ones to improve the overall idle time. This is however undesired
when CPU intensive workloads run on isolated cores, as the algorithm
would move the timers from housekeeping to isolated cores, negatively
affecting the isolation.
Exclude isolated cores from the timer migration algorithm, extend the
concept of unavailable cores, currently used for offline ones, to
isolated ones:
* A core is unavailable if isolated or offline;
* A core is available if non isolated and online;
A core is considered unavailable as isolated if it belongs to:
* the isolcpus (domain) list
* an isolated cpuset
Except if it is:
* in the nohz_full list (already idle for the hierarchy)
* the nohz timekeeper core (must be available to handle global timers)
All online CPUs are added to the hierarchy during early boot, isolated
CPUs are removed during late boot if configured or whenever the cpuset
isolation changes.
Due to how the timer migration algorithm works, any CPU part of the
hierarchy can have their global timers pulled by remote CPUs and have to
pull remote timers, only skipping pulling remote timers would break the
logic.
For this reason, prevent isolated CPUs from pulling remote global
timers, but also the other way around: any global timer started on an
isolated CPU will run there. This does not break the concept of
isolation (global timers don't come from outside the CPU) and, if
considered inappropriate, can usually be mitigated with other isolation
techniques (e.g. IRQ pinning).
This effect was noticed on a 128 cores machine running oslat on the
isolated cores (1-31,33-63,65-95,97-127). The tool monopolises CPUs,
and the CPU with lowest count in a timer migration hierarchy (here 1
and 65) appears as always active and continuously pulls global timers,
from the housekeeping CPUs. This ends up moving driver work (e.g.
delayed work) to isolated CPUs and causes latency spikes:
before the change:
# oslat -c 1-31,33-63,65-95,97-127 -D 62s
...
Maximum: 1203 10 3 4 ... 5 (us)
after the change:
# oslat -c 1-31,33-63,65-95,97-127 -D 62s
...
Maximum: 10 4 3 4 3 ... 5 (us)
Signed-off-by: Gabriele Monaco <gmonaco@redhat.com>
---
include/linux/timer.h | 9 ++++
kernel/cgroup/cpuset.c | 3 ++
kernel/time/timer_migration.c | 90 +++++++++++++++++++++++++++++++++++
3 files changed, 102 insertions(+)
diff --git a/include/linux/timer.h b/include/linux/timer.h
index 0414d9e6b4fcd..62e1cea711257 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -188,4 +188,13 @@ int timers_dead_cpu(unsigned int cpu);
#define timers_dead_cpu NULL
#endif
+#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
+extern int tmigr_isolated_exclude_cpumask(struct cpumask *exclude_cpumask);
+#else
+static inline int tmigr_isolated_exclude_cpumask(struct cpumask *exclude_cpumask)
+{
+ return 0;
+}
+#endif
+
#endif
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index a946d85ce954a..ff5b66abd0474 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -1392,6 +1392,9 @@ static void update_exclusion_cpumasks(bool isolcpus_updated)
ret = workqueue_unbound_exclude_cpumask(isolated_cpus);
WARN_ON_ONCE(ret < 0);
+
+ ret = tmigr_isolated_exclude_cpumask(isolated_cpus);
+ WARN_ON_ONCE(ret < 0);
}
/**
diff --git a/kernel/time/timer_migration.c b/kernel/time/timer_migration.c
index 878fd3af40ecb..c07cc9a2b209d 100644
--- a/kernel/time/timer_migration.c
+++ b/kernel/time/timer_migration.c
@@ -10,6 +10,7 @@
#include <linux/spinlock.h>
#include <linux/timerqueue.h>
#include <trace/events/ipi.h>
+#include <linux/sched/isolation.h>
#include "timer_migration.h"
#include "tick-internal.h"
@@ -428,6 +429,9 @@ static DEFINE_PER_CPU(struct tmigr_cpu, tmigr_cpu);
*/
static cpumask_var_t tmigr_available_cpumask;
+/* Enabled during late initcall */
+static bool tmigr_exclude_isolated __read_mostly;
+
#define TMIGR_NONE 0xFF
#define BIT_CNT 8
@@ -436,6 +440,24 @@ static inline bool tmigr_is_not_available(struct tmigr_cpu *tmc)
return !(tmc->tmgroup && tmc->available);
}
+/*
+ * Returns true if @cpu should be excluded from the hierarchy as isolated.
+ * Domain isolated CPUs don't participate in timer migration, nohz_full
+ * CPUs are still part of the hierarchy but are always considered idle.
+ * This behaviour depends on the value of tmigr_exclude_isolated, which is
+ * normally disabled during early boot.
+ * This check is necessary, for instance, to prevent offline isolated CPU from
+ * being incorrectly marked as available once getting back online.
+ */
+static inline bool tmigr_is_isolated(int cpu)
+{
+ if (!tmigr_exclude_isolated)
+ return false;
+ return (!housekeeping_cpu(cpu, HK_TYPE_DOMAIN) ||
+ cpuset_cpu_is_isolated(cpu)) &&
+ housekeeping_cpu(cpu, HK_TYPE_KERNEL_NOISE);
+}
+
/*
* Returns true, when @childmask corresponds to the group migrator or when the
* group is not active - so no migrator is set.
@@ -1454,6 +1476,8 @@ static int tmigr_clear_cpu_available(unsigned int cpu)
cpumask_clear_cpu(cpu, tmigr_available_cpumask);
scoped_guard(raw_spinlock_irq, &tmc->lock) {
+ if (!tmc->available)
+ return 0;
tmc->available = false;
WRITE_ONCE(tmc->wakeup, KTIME_MAX);
@@ -1481,8 +1505,12 @@ static int tmigr_set_cpu_available(unsigned int cpu)
if (WARN_ON_ONCE(!tmc->tmgroup))
return -EINVAL;
+ if (tmigr_is_isolated(cpu))
+ return 0;
cpumask_set_cpu(cpu, tmigr_available_cpumask);
scoped_guard(raw_spinlock_irq, &tmc->lock) {
+ if (tmc->available)
+ return 0;
trace_tmigr_cpu_available(tmc);
tmc->idle = timer_base_is_idle();
if (!tmc->idle)
@@ -1492,6 +1520,67 @@ static int tmigr_set_cpu_available(unsigned int cpu)
return 0;
}
+static bool tmigr_should_isolate_cpu(int cpu, void *ignored)
+{
+ /*
+ * The tick CPU can be marked as isolated by the cpuset code, however
+ * we cannot mark it as unavailable to avoid having no global migrator
+ * for the nohz_full CPUs.
+ */
+ return tick_nohz_cpu_hotpluggable(cpu);
+}
+
+static void tmigr_cpu_isolate(void *ignored)
+{
+ tmigr_clear_cpu_available(smp_processor_id());
+}
+
+static void tmigr_cpu_unisolate(void *ignored)
+{
+ tmigr_set_cpu_available(smp_processor_id());
+}
+
+int tmigr_isolated_exclude_cpumask(struct cpumask *exclude_cpumask)
+{
+ cpumask_var_t cpumask;
+
+ lockdep_assert_cpus_held();
+
+ if (!alloc_cpumask_var(&cpumask, GFP_KERNEL))
+ return -ENOMEM;
+
+ cpumask_and(cpumask, exclude_cpumask, tmigr_available_cpumask);
+ cpumask_and(cpumask, cpumask, housekeeping_cpumask(HK_TYPE_KERNEL_NOISE));
+ on_each_cpu_cond_mask(tmigr_should_isolate_cpu, tmigr_cpu_isolate, NULL,
+ 1, cpumask);
+
+ cpumask_andnot(cpumask, cpu_online_mask, exclude_cpumask);
+ cpumask_andnot(cpumask, cpumask, tmigr_available_cpumask);
+ on_each_cpu_mask(cpumask, tmigr_cpu_unisolate, NULL, 1);
+
+ free_cpumask_var(cpumask);
+ return 0;
+}
+
+static int __init tmigr_init_isolation(void)
+{
+ cpumask_var_t cpumask;
+
+ tmigr_exclude_isolated = true;
+ if (!housekeeping_enabled(HK_TYPE_DOMAIN))
+ return 0;
+ if (!alloc_cpumask_var(&cpumask, GFP_KERNEL))
+ return -ENOMEM;
+ cpumask_andnot(cpumask, tmigr_available_cpumask,
+ housekeeping_cpumask(HK_TYPE_DOMAIN));
+ cpumask_and(cpumask, cpumask, housekeeping_cpumask(HK_TYPE_KERNEL_NOISE));
+ on_each_cpu_cond_mask(tmigr_should_isolate_cpu, tmigr_cpu_isolate, NULL,
+ 1, cpumask);
+
+ free_cpumask_var(cpumask);
+ return 0;
+}
+
static void tmigr_init_group(struct tmigr_group *group, unsigned int lvl,
int node)
{
@@ -1874,3 +1963,4 @@ static int __init tmigr_init(void)
return ret;
}
early_initcall(tmigr_init);
+late_initcall(tmigr_init_isolation);
--
2.50.1
Le Mon, Jul 14, 2025 at 03:30:58PM +0200, Gabriele Monaco a écrit : > diff --git a/kernel/time/timer_migration.c b/kernel/time/timer_migration.c > index 878fd3af40ecb..c07cc9a2b209d 100644 > --- a/kernel/time/timer_migration.c > +++ b/kernel/time/timer_migration.c > @@ -10,6 +10,7 @@ > #include <linux/spinlock.h> > #include <linux/timerqueue.h> > #include <trace/events/ipi.h> > +#include <linux/sched/isolation.h> > > #include "timer_migration.h" > #include "tick-internal.h" > @@ -428,6 +429,9 @@ static DEFINE_PER_CPU(struct tmigr_cpu, tmigr_cpu); > */ > static cpumask_var_t tmigr_available_cpumask; > > +/* Enabled during late initcall */ > +static bool tmigr_exclude_isolated __read_mostly; This variable is still annoying. > + > #define TMIGR_NONE 0xFF > #define BIT_CNT 8 [...] > +int tmigr_isolated_exclude_cpumask(struct cpumask *exclude_cpumask) > +{ > + cpumask_var_t cpumask; > + > + lockdep_assert_cpus_held(); > + > + if (!alloc_cpumask_var(&cpumask, GFP_KERNEL)) > + return -ENOMEM; > + > + cpumask_and(cpumask, exclude_cpumask, tmigr_available_cpumask); > + cpumask_and(cpumask, cpumask, housekeeping_cpumask(HK_TYPE_KERNEL_NOISE)); > + on_each_cpu_cond_mask(tmigr_should_isolate_cpu, tmigr_cpu_isolate, NULL, > + 1, cpumask); > + > + cpumask_andnot(cpumask, cpu_online_mask, exclude_cpumask); > + cpumask_andnot(cpumask, cpumask, tmigr_available_cpumask); > + on_each_cpu_mask(cpumask, tmigr_cpu_unisolate, NULL, 1); > + > + free_cpumask_var(cpumask); > + return 0; > +} > + > +static int __init tmigr_init_isolation(void) > +{ > + cpumask_var_t cpumask; > + > + tmigr_exclude_isolated = true; > + if (!housekeeping_enabled(HK_TYPE_DOMAIN)) > + return 0; > + if (!alloc_cpumask_var(&cpumask, GFP_KERNEL)) > + return -ENOMEM; > + cpumask_andnot(cpumask, tmigr_available_cpumask, > + housekeeping_cpumask(HK_TYPE_DOMAIN)); > + cpumask_and(cpumask, cpumask, housekeeping_cpumask(HK_TYPE_KERNEL_NOISE)); > + on_each_cpu_cond_mask(tmigr_should_isolate_cpu, tmigr_cpu_isolate, NULL, > + 1, cpumask); And this is basically repeating the same logic as before but in reverse. Here is a proposal: register the online/offline callbacks later, on late_initcall(). This solves two problems: 1) The online/offline callbacks are called for the first time in the right place. You don't need that tmigr_exclude_isolated anymore. 2) You don't need to make the on_each_cpu_cond_mask() call anymore in tmigr_init_isolation(). In fact you don't need that function. The online/offline callbacks already take care of everything. Here is a patch you can use (only built tested): commit ad21e35e05865e2d37a60bf5d77b0d6fa22a54ee Author: Frederic Weisbecker <frederic@kernel.org> Date: Fri Jul 25 00:06:20 2025 +0200 timers/migration: Postpone online/offline callbacks registration to late initcall During the early boot process, the default clocksource used for timekeeping is the jiffies. Better clocksources can only be selected once clocksource_done_booting() is called as an fs initcall. NOHZ can only be enabled after that stage, making global timer migration irrelevant up to that point. Therefore, don't bother with trashing the cache within that tree from the SMP bootup until NOHZ even matters. Make the CPUs available to the tree on late initcall, after the right clocksource had a chance to be selected. This will also simplify the handling of domain isolated CPUs on further patches. Signed-off-by: Frederic Weisbecker <frederic@kernel.org> diff --git a/kernel/time/timer_migration.c b/kernel/time/timer_migration.c index 2f6330831f08..f730107d948d 100644 --- a/kernel/time/timer_migration.c +++ b/kernel/time/timer_migration.c @@ -1484,6 +1484,17 @@ static int tmigr_cpu_online(unsigned int cpu) return 0; } +/* + * NOHZ can only be enabled after clocksource_done_booting(). Don't + * bother trashing the cache in the tree before. + */ +static int __init tmigr_late_init(void) +{ + return cpuhp_setup_state(CPUHP_AP_TMIGR_ONLINE, "tmigr:online", + tmigr_cpu_online, tmigr_cpu_offline); +} +late_initcall(tmigr_late_init); + static void tmigr_init_group(struct tmigr_group *group, unsigned int lvl, int node) { @@ -1846,18 +1857,9 @@ static int __init tmigr_init(void) ret = cpuhp_setup_state(CPUHP_TMIGR_PREPARE, "tmigr:prepare", tmigr_cpu_prepare, NULL); - if (ret) - goto err; - - ret = cpuhp_setup_state(CPUHP_AP_TMIGR_ONLINE, "tmigr:online", - tmigr_cpu_online, tmigr_cpu_offline); - if (ret) - goto err; - - return 0; - err: - pr_err("Timer migration setup failed\n"); + if (ret) + pr_err("Timer migration setup failed\n"); return ret; } early_initcall(tmigr_init);
On Fri, 2025-07-25 at 01:05 +0200, Frederic Weisbecker wrote: > > And this is basically repeating the same logic as before but in > reverse. > > Here is a proposal: register the online/offline callbacks later, on > late_initcall(). This solves two problems: > > 1) The online/offline callbacks are called for the first time in the > right > place. You don't need that tmigr_exclude_isolated anymore. > > 2) You don't need to make the on_each_cpu_cond_mask() call anymore in > tmigr_init_isolation(). In fact you don't need that function. The > online/offline callbacks already take care of everything. > Yeah, that's much neater thanks! I'm going to try it and update the patch. > Here is a patch you can use (only built tested): > > commit ad21e35e05865e2d37a60bf5d77b0d6fa22a54ee > Author: Frederic Weisbecker <frederic@kernel.org> > Date: Fri Jul 25 00:06:20 2025 +0200 > > timers/migration: Postpone online/offline callbacks registration > to late initcall > During the early boot process, the default clocksource used for > timekeeping is the jiffies. Better clocksources can only be > selected once clocksource_done_booting() is called as an fs initcall. > > NOHZ can only be enabled after that stage, making global timer > migration irrelevant up to that point. > > Therefore, don't bother with trashing the cache within that tree > from the SMP bootup until NOHZ even matters. > > Make the CPUs available to the tree on late initcall, after the > right clocksource had a chance to be selected. This will also > simplify the handling of domain isolated CPUs on further patches. > > Signed-off-by: Frederic Weisbecker <frederic@kernel.org> > I assume it's cleaner if I squash it in 7/7 and add a Co-developed-by: Frederic Weisbecker <frederic@kernel.org> and/or Signed-off-by: Frederic Weisbecker <frederic@kernel.org> Do you agree? Thanks for the review and help, Gabriele > diff --git a/kernel/time/timer_migration.c > b/kernel/time/timer_migration.c > index 2f6330831f08..f730107d948d 100644 > --- a/kernel/time/timer_migration.c > +++ b/kernel/time/timer_migration.c > @@ -1484,6 +1484,17 @@ static int tmigr_cpu_online(unsigned int cpu) > return 0; > } > > +/* > + * NOHZ can only be enabled after clocksource_done_booting(). Don't > + * bother trashing the cache in the tree before. > + */ > +static int __init tmigr_late_init(void) > +{ > + return cpuhp_setup_state(CPUHP_AP_TMIGR_ONLINE, > "tmigr:online", > + tmigr_cpu_online, > tmigr_cpu_offline); > +} > +late_initcall(tmigr_late_init); > + > static void tmigr_init_group(struct tmigr_group *group, unsigned int > lvl, > int node) > { > @@ -1846,18 +1857,9 @@ static int __init tmigr_init(void) > > ret = cpuhp_setup_state(CPUHP_TMIGR_PREPARE, > "tmigr:prepare", > tmigr_cpu_prepare, NULL); > - if (ret) > - goto err; > - > - ret = cpuhp_setup_state(CPUHP_AP_TMIGR_ONLINE, > "tmigr:online", > - tmigr_cpu_online, > tmigr_cpu_offline); > - if (ret) > - goto err; > - > - return 0; > - > err: > - pr_err("Timer migration setup failed\n"); > + if (ret) > + pr_err("Timer migration setup failed\n"); > return ret; > } > early_initcall(tmigr_init);
Le Fri, Jul 25, 2025 at 08:42:19AM +0200, Gabriele Monaco a écrit : > On Fri, 2025-07-25 at 01:05 +0200, Frederic Weisbecker wrote: > I assume it's cleaner if I squash it in 7/7 and add a > Co-developed-by: Frederic Weisbecker <frederic@kernel.org> > and/or > Signed-off-by: Frederic Weisbecker <frederic@kernel.org> > > Do you agree? I would prefer to keep the patch standalone because it's already a logical change that has its own motivation. It's also invasive and could potentially cause regression (or improvement) so we want to be able to bisect to that. Thanks! -- Frederic Weisbecker SUSE Labs
© 2016 - 2025 Red Hat, Inc.