Add a randomized algorithm that runs newidle balancing proportional to
its success rate.
This improves schbench significantly:
6.18-rc4: 2.22 Mrps/s
6.18-rc4+revert: 2.04 Mrps/s
6.18-rc4+revert+random: 2.18 Mrps/S
Conversely, per Adam Li this affects SpecJBB slightly, reducing it by 1%:
6.17: -6%
6.17+revert: 0%
6.17+revert+random: -1%
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
include/linux/sched/topology.h | 3 ++
kernel/sched/core.c | 3 ++
kernel/sched/fair.c | 43 +++++++++++++++++++++++++++++++++++++----
kernel/sched/features.h | 5 ++++
kernel/sched/sched.h | 7 ++++++
kernel/sched/topology.c | 6 +++++
6 files changed, 63 insertions(+), 4 deletions(-)
--- a/include/linux/sched/topology.h
+++ b/include/linux/sched/topology.h
@@ -92,6 +92,9 @@ struct sched_domain {
unsigned int nr_balance_failed; /* initialise to 0 */
/* idle_balance() stats */
+ unsigned int newidle_call;
+ unsigned int newidle_success;
+ unsigned int newidle_ratio;
u64 max_newidle_lb_cost;
unsigned long last_decay_max_lb_cost;
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -121,6 +121,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(sched_updat
EXPORT_TRACEPOINT_SYMBOL_GPL(sched_compute_energy_tp);
DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
+DEFINE_PER_CPU(struct rnd_state, sched_rnd_state);
#ifdef CONFIG_SCHED_PROXY_EXEC
DEFINE_STATIC_KEY_TRUE(__sched_proxy_exec);
@@ -8589,6 +8590,8 @@ void __init sched_init_smp(void)
{
sched_init_numa(NUMA_NO_NODE);
+ prandom_init_once(&sched_rnd_state);
+
/*
* There's no userspace yet to cause hotplug operations; hence all the
* CPU masks are stable and all blatant races in the below code cannot
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -12146,11 +12146,26 @@ void update_max_interval(void)
max_load_balance_interval = HZ*num_online_cpus()/10;
}
-static inline bool update_newidle_cost(struct sched_domain *sd, u64 cost)
+static inline void update_newidle_stats(struct sched_domain *sd, unsigned int success)
+{
+ sd->newidle_call++;
+ sd->newidle_success += success;
+
+ if (sd->newidle_call >= 1024) {
+ sd->newidle_ratio = sd->newidle_success;
+ sd->newidle_call /= 2;
+ sd->newidle_success /= 2;
+ }
+}
+
+static inline bool
+update_newidle_cost(struct sched_domain *sd, u64 cost, unsigned int success)
{
unsigned long next_decay = sd->last_decay_max_lb_cost + HZ;
unsigned long now = jiffies;
+ update_newidle_stats(sd, success);
+
if (cost > sd->max_newidle_lb_cost) {
/*
* Track max cost of a domain to make sure to not delay the
@@ -12198,7 +12213,7 @@ static void sched_balance_domains(struct
* Decay the newidle max times here because this is a regular
* visit to all the domains.
*/
- need_decay = update_newidle_cost(sd, 0);
+ need_decay = update_newidle_cost(sd, 0, 0);
max_cost += sd->max_newidle_lb_cost;
/*
@@ -12843,6 +12858,22 @@ static int sched_balance_newidle(struct
break;
if (sd->flags & SD_BALANCE_NEWIDLE) {
+ unsigned int weight = 1;
+
+ if (sched_feat(NI_RANDOM)) {
+ /*
+ * Throw a 1k sided dice; and only run
+ * newidle_balance according to the success
+ * rate.
+ */
+ u32 d1k = sched_rng() % 1024;
+ weight = 1 + sd->newidle_ratio;
+ if (d1k > weight) {
+ update_newidle_stats(sd, 0);
+ continue;
+ }
+ weight = (1024 + weight/2) / weight;
+ }
pulled_task = sched_balance_rq(this_cpu, this_rq,
sd, CPU_NEWLY_IDLE,
@@ -12850,10 +12881,14 @@ static int sched_balance_newidle(struct
t1 = sched_clock_cpu(this_cpu);
domain_cost = t1 - t0;
- update_newidle_cost(sd, domain_cost);
-
curr_cost += domain_cost;
t0 = t1;
+
+ /*
+ * Track max cost of a domain to make sure to not delay the
+ * next wakeup on the CPU.
+ */
+ update_newidle_cost(sd, domain_cost, weight * !!pulled_task);
}
/*
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
@@ -121,3 +121,8 @@ SCHED_FEAT(WA_BIAS, true)
SCHED_FEAT(UTIL_EST, true)
SCHED_FEAT(LATENCY_WARN, false)
+
+/*
+ * Do newidle balancing proportional to its success rate using randomization.
+ */
+SCHED_FEAT(NI_RANDOM, true)
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -5,6 +5,7 @@
#ifndef _KERNEL_SCHED_SCHED_H
#define _KERNEL_SCHED_SCHED_H
+#include <linux/prandom.h>
#include <linux/sched/affinity.h>
#include <linux/sched/autogroup.h>
#include <linux/sched/cpufreq.h>
@@ -1348,6 +1349,12 @@ static inline bool is_migration_disabled
}
DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
+DECLARE_PER_CPU(struct rnd_state, sched_rnd_state);
+
+static inline u32 sched_rng(void)
+{
+ return prandom_u32_state(this_cpu_ptr(&sched_rnd_state));
+}
#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
#define this_rq() this_cpu_ptr(&runqueues)
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -1662,6 +1662,12 @@ sd_init(struct sched_domain_topology_lev
.last_balance = jiffies,
.balance_interval = sd_weight,
+
+ /* 50% success rate */
+ .newidle_call = 512,
+ .newidle_success = 256,
+ .newidle_ratio = 512,
+
.max_newidle_lb_cost = 0,
.last_decay_max_lb_cost = jiffies,
.child = child,
On 11/7/25 9:36 PM, Peter Zijlstra wrote:
> Add a randomized algorithm that runs newidle balancing proportional to
> its success rate.
>
> This improves schbench significantly:
>
> 6.18-rc4: 2.22 Mrps/s
> 6.18-rc4+revert: 2.04 Mrps/s
> 6.18-rc4+revert+random: 2.18 Mrps/S
>
Could you please share the schbench command?
I see command like "schbench -t 90 -r 30 -i 30" running on 60 core regress.
Will do more iterations to confirm it (to be sure it is not run/run variation)
> Conversely, per Adam Li this affects SpecJBB slightly, reducing it by 1%:
>
> 6.17: -6%
> 6.17+revert: 0%
> 6.17+revert+random: -1%
>
> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
> ---
> include/linux/sched/topology.h | 3 ++
> kernel/sched/core.c | 3 ++
> kernel/sched/fair.c | 43 +++++++++++++++++++++++++++++++++++++----
> kernel/sched/features.h | 5 ++++
> kernel/sched/sched.h | 7 ++++++
> kernel/sched/topology.c | 6 +++++
> 6 files changed, 63 insertions(+), 4 deletions(-)
>
> --- a/include/linux/sched/topology.h
> +++ b/include/linux/sched/topology.h
> @@ -92,6 +92,9 @@ struct sched_domain {
> unsigned int nr_balance_failed; /* initialise to 0 */
>
> /* idle_balance() stats */
> + unsigned int newidle_call;
> + unsigned int newidle_success;
> + unsigned int newidle_ratio;
> u64 max_newidle_lb_cost;
> unsigned long last_decay_max_lb_cost;
>
> --- a/kernel/sched/core.c
> +++ b/kernel/sched/core.c
> @@ -121,6 +121,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(sched_updat
> EXPORT_TRACEPOINT_SYMBOL_GPL(sched_compute_energy_tp);
>
> DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
> +DEFINE_PER_CPU(struct rnd_state, sched_rnd_state);
>
> #ifdef CONFIG_SCHED_PROXY_EXEC
> DEFINE_STATIC_KEY_TRUE(__sched_proxy_exec);
> @@ -8589,6 +8590,8 @@ void __init sched_init_smp(void)
> {
> sched_init_numa(NUMA_NO_NODE);
>
> + prandom_init_once(&sched_rnd_state);
> +
> /*
> * There's no userspace yet to cause hotplug operations; hence all the
> * CPU masks are stable and all blatant races in the below code cannot
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -12146,11 +12146,26 @@ void update_max_interval(void)
> max_load_balance_interval = HZ*num_online_cpus()/10;
> }
>
> -static inline bool update_newidle_cost(struct sched_domain *sd, u64 cost)
> +static inline void update_newidle_stats(struct sched_domain *sd, unsigned int success)
> +{
> + sd->newidle_call++;
> + sd->newidle_success += success;
> +
> + if (sd->newidle_call >= 1024) {
> + sd->newidle_ratio = sd->newidle_success;
> + sd->newidle_call /= 2;
> + sd->newidle_success /= 2;
> + }
Would it be better to >> 1 ? or compiler takes care of it?
> +}
> +
> +static inline bool
> +update_newidle_cost(struct sched_domain *sd, u64 cost, unsigned int success)
> {
> unsigned long next_decay = sd->last_decay_max_lb_cost + HZ;
> unsigned long now = jiffies;
>
> + update_newidle_stats(sd, success);
> +
> if (cost > sd->max_newidle_lb_cost) {
> /*
> * Track max cost of a domain to make sure to not delay the
> @@ -12198,7 +12213,7 @@ static void sched_balance_domains(struct
> * Decay the newidle max times here because this is a regular
> * visit to all the domains.
> */
> - need_decay = update_newidle_cost(sd, 0);
> + need_decay = update_newidle_cost(sd, 0, 0);
> max_cost += sd->max_newidle_lb_cost;
>
> /*
> @@ -12843,6 +12858,22 @@ static int sched_balance_newidle(struct
> break;
>
> if (sd->flags & SD_BALANCE_NEWIDLE) {
> + unsigned int weight = 1;
> +
> + if (sched_feat(NI_RANDOM)) {
> + /*
> + * Throw a 1k sided dice; and only run
> + * newidle_balance according to the success
> + * rate.
> + */
> + u32 d1k = sched_rng() % 1024;
> + weight = 1 + sd->newidle_ratio;
> + if (d1k > weight) {
> + update_newidle_stats(sd, 0);
> + continue;
> + }
> + weight = (1024 + weight/2) / weight;
> + }
>
> pulled_task = sched_balance_rq(this_cpu, this_rq,
> sd, CPU_NEWLY_IDLE,
> @@ -12850,10 +12881,14 @@ static int sched_balance_newidle(struct
>
> t1 = sched_clock_cpu(this_cpu);
> domain_cost = t1 - t0;
> - update_newidle_cost(sd, domain_cost);
> -
> curr_cost += domain_cost;
> t0 = t1;
> +
> + /*
> + * Track max cost of a domain to make sure to not delay the
> + * next wakeup on the CPU.
> + */
> + update_newidle_cost(sd, domain_cost, weight * !!pulled_task);
> }
>
> /*
> --- a/kernel/sched/features.h
> +++ b/kernel/sched/features.h
> @@ -121,3 +121,8 @@ SCHED_FEAT(WA_BIAS, true)
> SCHED_FEAT(UTIL_EST, true)
>
> SCHED_FEAT(LATENCY_WARN, false)
> +
> +/*
> + * Do newidle balancing proportional to its success rate using randomization.
> + */
> +SCHED_FEAT(NI_RANDOM, true)
> --- a/kernel/sched/sched.h
> +++ b/kernel/sched/sched.h
> @@ -5,6 +5,7 @@
> #ifndef _KERNEL_SCHED_SCHED_H
> #define _KERNEL_SCHED_SCHED_H
>
> +#include <linux/prandom.h>
> #include <linux/sched/affinity.h>
> #include <linux/sched/autogroup.h>
> #include <linux/sched/cpufreq.h>
> @@ -1348,6 +1349,12 @@ static inline bool is_migration_disabled
> }
>
> DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
> +DECLARE_PER_CPU(struct rnd_state, sched_rnd_state);
> +
> +static inline u32 sched_rng(void)
> +{
> + return prandom_u32_state(this_cpu_ptr(&sched_rnd_state));
> +}
>
> #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
> #define this_rq() this_cpu_ptr(&runqueues)
> --- a/kernel/sched/topology.c
> +++ b/kernel/sched/topology.c
> @@ -1662,6 +1662,12 @@ sd_init(struct sched_domain_topology_lev
>
> .last_balance = jiffies,
> .balance_interval = sd_weight,
> +
> + /* 50% success rate */
> + .newidle_call = 512,
> + .newidle_success = 256,
> + .newidle_ratio = 512,
> +
> .max_newidle_lb_cost = 0,
> .last_decay_max_lb_cost = jiffies,
> .child = child,
>
>
run hackbench with it, Looks like hackbench does better when utilization is very high.
Otherwise, it regresses slightly.
I compared series applied vs on 65177ea9f64d. Let me know if i need to set anything different.
Will do numbers with more loops/iterations to iron out any run/run variations.
On Wed, Nov 12, 2025 at 09:12:57PM +0530, Shrikanth Hegde wrote:
>
>
> On 11/7/25 9:36 PM, Peter Zijlstra wrote:
> > Add a randomized algorithm that runs newidle balancing proportional to
> > its success rate.
> >
> > This improves schbench significantly:
> >
> > 6.18-rc4: 2.22 Mrps/s
> > 6.18-rc4+revert: 2.04 Mrps/s
> > 6.18-rc4+revert+random: 2.18 Mrps/S
> >
>
> Could you please share the schbench command?
>
> I see command like "schbench -t 90 -r 30 -i 30" running on 60 core regress.
> Will do more iterations to confirm it (to be sure it is not run/run variation)
This was:
schbench -L -m 4 -M auto -t 256 -n 0 -r 60 -s 0
from the original thread:
https://lkml.kernel.org/r/20250626144017.1510594-2-clm@fb.com
> > + if (sd->newidle_call >= 1024) {
> > + sd->newidle_ratio = sd->newidle_success;
> > + sd->newidle_call /= 2;
> > + sd->newidle_success /= 2;
> > + }
>
> Would it be better to >> 1 ? or compiler takes care of it?
I would be very disappointed if our compilers don't do this.
On 11/8/2025 12:06 AM, Peter Zijlstra wrote:
> Add a randomized algorithm that runs newidle balancing proportional to
> its success rate.
>
> This improves schbench significantly:
>
> 6.18-rc4: 2.22 Mrps/s
> 6.18-rc4+revert: 2.04 Mrps/s
> 6.18-rc4+revert+random: 2.18 Mrps/S
>
> Conversely, per Adam Li this affects SpecJBB slightly, reducing it by 1%:
>
> 6.17: -6%
> 6.17+revert: 0%
> 6.17+revert+random: -1%
>
> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Tested-by: Adam Li <adamli@os.amperecomputing.com>
Please see the Specjbb test result on AmpereOne server bellow:
6.18-rc5: 0% (baseline)
6.18-rc5+patchset: +5%
6.18-rc4+patchset+NO_NI_RANDOM: +6%
6.18-rc5+revert-155213a2aed4: +6%
Could you please explain a little the math behind success rate
(sd->newidle_ratio) calculation?
[...]
> @@ -12843,6 +12858,22 @@ static int sched_balance_newidle(struct
> break;
>
> if (sd->flags & SD_BALANCE_NEWIDLE) {
> + unsigned int weight = 1;
> +
> + if (sched_feat(NI_RANDOM)) {
> + /*
> + * Throw a 1k sided dice; and only run
> + * newidle_balance according to the success
> + * rate.
> + */
> + u32 d1k = sched_rng() % 1024;
> + weight = 1 + sd->newidle_ratio;
> + if (d1k > weight) {
> + update_newidle_stats(sd, 0);
> + continue;
> + }
> + weight = (1024 + weight/2) / weight;
> + }
>
e.g: Why 'weight = (1024 + weight/2) / weight'
Thanks,
-adam
On Tue, Nov 11, 2025 at 05:07:45PM +0800, Adam Li wrote:
> > @@ -12843,6 +12858,22 @@ static int sched_balance_newidle(struct
> > break;
> >
> > if (sd->flags & SD_BALANCE_NEWIDLE) {
> > + unsigned int weight = 1;
> > +
> > + if (sched_feat(NI_RANDOM)) {
> > + /*
> > + * Throw a 1k sided dice; and only run
> > + * newidle_balance according to the success
> > + * rate.
> > + */
> > + u32 d1k = sched_rng() % 1024;
> > + weight = 1 + sd->newidle_ratio;
> > + if (d1k > weight) {
> > + update_newidle_stats(sd, 0);
> > + continue;
> > + }
> > + weight = (1024 + weight/2) / weight;
> > + }
> >
> e.g: Why 'weight = (1024 + weight/2) / weight'
Not sure what you're asking, so two answers:
That's a rounding divide. We have a helper for that, but I never can
remember what its called.
The transformation as a whole here is from a ratio to a weight, suppose
our ratio is 256, this means that we do 1-in-4 or 25% of the balance
calls. However this also means that each success needs to be weighted as
4 (=1024/256), otherwise we under-account the successes and not even a
100% success rate can lift you out the hole.
Now, I made it a rounding divide to make it a little easier to climb out
of said hole (I even considered ceiling divide).
On 11/11/2025 5:20 PM, Peter Zijlstra wrote:
> On Tue, Nov 11, 2025 at 05:07:45PM +0800, Adam Li wrote:
>>> @@ -12843,6 +12858,22 @@ static int sched_balance_newidle(struct
>>> break;
>>>
>>> if (sd->flags & SD_BALANCE_NEWIDLE) {
>>> + unsigned int weight = 1;
>>> +
>>> + if (sched_feat(NI_RANDOM)) {
>>> + /*
>>> + * Throw a 1k sided dice; and only run
>>> + * newidle_balance according to the success
>>> + * rate.
>>> + */
>>> + u32 d1k = sched_rng() % 1024;
>>> + weight = 1 + sd->newidle_ratio;
>>> + if (d1k > weight) {
>>> + update_newidle_stats(sd, 0);
>>> + continue;
>>> + }
>>> + weight = (1024 + weight/2) / weight;
>>> + }
>>>
>> e.g: Why 'weight = (1024 + weight/2) / weight'
>
> Not sure what you're asking, so two answers:
>
> That's a rounding divide. We have a helper for that, but I never can
> remember what its called.
>
> The transformation as a whole here is from a ratio to a weight, suppose
> our ratio is 256, this means that we do 1-in-4 or 25% of the balance
> calls. However this also means that each success needs to be weighted as
> 4 (=1024/256), otherwise we under-account the successes and not even a
> 100% success rate can lift you out the hole.
>
> Now, I made it a rounding divide to make it a little easier to climb out
> of said hole (I even considered ceiling divide).
>
>
Thanks for clarification.
If I understand correctly, (sd->newidle_ratio / 1024) is close to
(sd->newidle_success / sd->newidle_call). 'sd->newidle_ratio' means
success rate of newidle balance.
Shall we update newidle stats only from sched_balance_newidle()
as bellow patch? So that sched_balance_domains() will not update sd->newidle_call.
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -12171,7 +12171,8 @@ update_newidle_cost(struct sched_domain *sd, u64 cost, unsigned int success)
unsigned long next_decay = sd->last_decay_max_lb_cost + HZ;
unsigned long now = jiffies;
- update_newidle_stats(sd, success);
+ if (cost)
+ update_newidle_stats(sd, success);
if (cost > sd->max_newidle_lb_cost) {
/*
I tested this change, Specjbb performance is similar with your patch.
Thanks,
-adam
On Wed, Nov 12, 2025 at 08:04:05PM +0800, Adam Li wrote:
> On 11/11/2025 5:20 PM, Peter Zijlstra wrote:
> > On Tue, Nov 11, 2025 at 05:07:45PM +0800, Adam Li wrote:
> >>> @@ -12843,6 +12858,22 @@ static int sched_balance_newidle(struct
> >>> break;
> >>>
> >>> if (sd->flags & SD_BALANCE_NEWIDLE) {
> >>> + unsigned int weight = 1;
> >>> +
> >>> + if (sched_feat(NI_RANDOM)) {
> >>> + /*
> >>> + * Throw a 1k sided dice; and only run
> >>> + * newidle_balance according to the success
> >>> + * rate.
> >>> + */
> >>> + u32 d1k = sched_rng() % 1024;
> >>> + weight = 1 + sd->newidle_ratio;
> >>> + if (d1k > weight) {
> >>> + update_newidle_stats(sd, 0);
> >>> + continue;
> >>> + }
> >>> + weight = (1024 + weight/2) / weight;
> >>> + }
> >>>
> >> e.g: Why 'weight = (1024 + weight/2) / weight'
> >
> > Not sure what you're asking, so two answers:
> >
> > That's a rounding divide. We have a helper for that, but I never can
> > remember what its called.
> >
> > The transformation as a whole here is from a ratio to a weight, suppose
> > our ratio is 256, this means that we do 1-in-4 or 25% of the balance
> > calls. However this also means that each success needs to be weighted as
> > 4 (=1024/256), otherwise we under-account the successes and not even a
> > 100% success rate can lift you out the hole.
> >
> > Now, I made it a rounding divide to make it a little easier to climb out
> > of said hole (I even considered ceiling divide).
> >
> >
> Thanks for clarification.
>
> If I understand correctly, (sd->newidle_ratio / 1024) is close to
> (sd->newidle_success / sd->newidle_call). 'sd->newidle_ratio' means
> success rate of newidle balance.
>
> Shall we update newidle stats only from sched_balance_newidle()
> as bellow patch? So that sched_balance_domains() will not update sd->newidle_call.
>
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -12171,7 +12171,8 @@ update_newidle_cost(struct sched_domain *sd, u64 cost, unsigned int success)
> unsigned long next_decay = sd->last_decay_max_lb_cost + HZ;
> unsigned long now = jiffies;
>
> - update_newidle_stats(sd, success);
> + if (cost)
> + update_newidle_stats(sd, success);
>
> if (cost > sd->max_newidle_lb_cost) {
> /*
>
> I tested this change, Specjbb performance is similar with your patch.
Ah yes, that makes sense. Let me make that change.
Thanks!
On 07.11.25 17:06, Peter Zijlstra wrote: > Add a randomized algorithm that runs newidle balancing proportional to > its success rate. > > This improves schbench significantly: > > 6.18-rc4: 2.22 Mrps/s > 6.18-rc4+revert: 2.04 Mrps/s > 6.18-rc4+revert+random: 2.18 Mrps/S > > Conversely, per Adam Li this affects SpecJBB slightly, reducing it by 1%: > > 6.17: -6% > 6.17+revert: 0% > 6.17+revert+random: -1% > > Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Results with OLTP 'hammerdb - mysqld' on Arm64 VMs NOPM P50 latency 6.18-rc4 baseline baseline 6.18-rc4+revert-155213a2aed4 +13% -8.8% 6.18-rc4+patchset +11% -8.2% 6.18-rc4+patchset+NO_NI_RANDOM +13% -8.6% Pretty consistent with the results on the previous version. Although I hadn't tested NI_TARGET+NI_RANDOM back then. http://lkml.kernel.org/r/f6379aa6-459d-4205-96ea-9848e55d7f9c@arm.com In case (pure wakeup) schbench configs are the only workloads profiting from NI_RANDOM, make NO_NI_RANDOM the default? Reviewed-by: Dietmar Eggemann <dietmar.eggemann@arm.com> Tested-by: Dietmar Eggemann <dietmar.eggemann@arm.com> [...]
The following commit has been merged into the sched/core branch of tip:
Commit-ID: 33cf66d88306663d16e4759e9d24766b0aaa2e17
Gitweb: https://git.kernel.org/tip/33cf66d88306663d16e4759e9d24766b0aaa2e17
Author: Peter Zijlstra <peterz@infradead.org>
AuthorDate: Fri, 07 Nov 2025 17:01:31 +01:00
Committer: Peter Zijlstra <peterz@infradead.org>
CommitterDate: Mon, 17 Nov 2025 17:13:16 +01:00
sched/fair: Proportional newidle balance
Add a randomized algorithm that runs newidle balancing proportional to
its success rate.
This improves schbench significantly:
6.18-rc4: 2.22 Mrps/s
6.18-rc4+revert: 2.04 Mrps/s
6.18-rc4+revert+random: 2.18 Mrps/S
Conversely, per Adam Li this affects SpecJBB slightly, reducing it by 1%:
6.17: -6%
6.17+revert: 0%
6.17+revert+random: -1%
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
Tested-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
Tested-by: Chris Mason <clm@meta.com>
Link: https://lkml.kernel.org/r/6825c50d-7fa7-45d8-9b81-c6e7e25738e2@meta.com
Link: https://patch.msgid.link/20251107161739.770122091@infradead.org
---
include/linux/sched/topology.h | 3 ++-
kernel/sched/core.c | 3 ++-
kernel/sched/fair.c | 44 ++++++++++++++++++++++++++++++---
kernel/sched/features.h | 5 ++++-
kernel/sched/sched.h | 7 +++++-
kernel/sched/topology.c | 6 +++++-
6 files changed, 64 insertions(+), 4 deletions(-)
diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
index bbcfdf1..45c0022 100644
--- a/include/linux/sched/topology.h
+++ b/include/linux/sched/topology.h
@@ -92,6 +92,9 @@ struct sched_domain {
unsigned int nr_balance_failed; /* initialise to 0 */
/* idle_balance() stats */
+ unsigned int newidle_call;
+ unsigned int newidle_success;
+ unsigned int newidle_ratio;
u64 max_newidle_lb_cost;
unsigned long last_decay_max_lb_cost;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 699db3f..9f10cfb 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -121,6 +121,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(sched_update_nr_running_tp);
EXPORT_TRACEPOINT_SYMBOL_GPL(sched_compute_energy_tp);
DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
+DEFINE_PER_CPU(struct rnd_state, sched_rnd_state);
#ifdef CONFIG_SCHED_PROXY_EXEC
DEFINE_STATIC_KEY_TRUE(__sched_proxy_exec);
@@ -8489,6 +8490,8 @@ void __init sched_init_smp(void)
{
sched_init_numa(NUMA_NO_NODE);
+ prandom_init_once(&sched_rnd_state);
+
/*
* There's no userspace yet to cause hotplug operations; hence all the
* CPU masks are stable and all blatant races in the below code cannot
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index abcbb67..1855975 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -12224,11 +12224,27 @@ void update_max_interval(void)
max_load_balance_interval = HZ*num_online_cpus()/10;
}
-static inline bool update_newidle_cost(struct sched_domain *sd, u64 cost)
+static inline void update_newidle_stats(struct sched_domain *sd, unsigned int success)
+{
+ sd->newidle_call++;
+ sd->newidle_success += success;
+
+ if (sd->newidle_call >= 1024) {
+ sd->newidle_ratio = sd->newidle_success;
+ sd->newidle_call /= 2;
+ sd->newidle_success /= 2;
+ }
+}
+
+static inline bool
+update_newidle_cost(struct sched_domain *sd, u64 cost, unsigned int success)
{
unsigned long next_decay = sd->last_decay_max_lb_cost + HZ;
unsigned long now = jiffies;
+ if (cost)
+ update_newidle_stats(sd, success);
+
if (cost > sd->max_newidle_lb_cost) {
/*
* Track max cost of a domain to make sure to not delay the
@@ -12276,7 +12292,7 @@ static void sched_balance_domains(struct rq *rq, enum cpu_idle_type idle)
* Decay the newidle max times here because this is a regular
* visit to all the domains.
*/
- need_decay = update_newidle_cost(sd, 0);
+ need_decay = update_newidle_cost(sd, 0, 0);
max_cost += sd->max_newidle_lb_cost;
/*
@@ -12912,6 +12928,22 @@ static int sched_balance_newidle(struct rq *this_rq, struct rq_flags *rf)
break;
if (sd->flags & SD_BALANCE_NEWIDLE) {
+ unsigned int weight = 1;
+
+ if (sched_feat(NI_RANDOM)) {
+ /*
+ * Throw a 1k sided dice; and only run
+ * newidle_balance according to the success
+ * rate.
+ */
+ u32 d1k = sched_rng() % 1024;
+ weight = 1 + sd->newidle_ratio;
+ if (d1k > weight) {
+ update_newidle_stats(sd, 0);
+ continue;
+ }
+ weight = (1024 + weight/2) / weight;
+ }
pulled_task = sched_balance_rq(this_cpu, this_rq,
sd, CPU_NEWLY_IDLE,
@@ -12919,10 +12951,14 @@ static int sched_balance_newidle(struct rq *this_rq, struct rq_flags *rf)
t1 = sched_clock_cpu(this_cpu);
domain_cost = t1 - t0;
- update_newidle_cost(sd, domain_cost);
-
curr_cost += domain_cost;
t0 = t1;
+
+ /*
+ * Track max cost of a domain to make sure to not delay the
+ * next wakeup on the CPU.
+ */
+ update_newidle_cost(sd, domain_cost, weight * !!pulled_task);
}
/*
diff --git a/kernel/sched/features.h b/kernel/sched/features.h
index 0607def..980d92b 100644
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
@@ -121,3 +121,8 @@ SCHED_FEAT(WA_BIAS, true)
SCHED_FEAT(UTIL_EST, true)
SCHED_FEAT(LATENCY_WARN, false)
+
+/*
+ * Do newidle balancing proportional to its success rate using randomization.
+ */
+SCHED_FEAT(NI_RANDOM, true)
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index def9ab7..b419a4d 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -5,6 +5,7 @@
#ifndef _KERNEL_SCHED_SCHED_H
#define _KERNEL_SCHED_SCHED_H
+#include <linux/prandom.h>
#include <linux/sched/affinity.h>
#include <linux/sched/autogroup.h>
#include <linux/sched/cpufreq.h>
@@ -1348,6 +1349,12 @@ static inline bool is_migration_disabled(struct task_struct *p)
}
DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
+DECLARE_PER_CPU(struct rnd_state, sched_rnd_state);
+
+static inline u32 sched_rng(void)
+{
+ return prandom_u32_state(this_cpu_ptr(&sched_rnd_state));
+}
#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
#define this_rq() this_cpu_ptr(&runqueues)
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 711076a..cf643a5 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -1669,6 +1669,12 @@ sd_init(struct sched_domain_topology_level *tl,
.last_balance = jiffies,
.balance_interval = sd_weight,
+
+ /* 50% success rate */
+ .newidle_call = 512,
+ .newidle_success = 256,
+ .newidle_ratio = 512,
+
.max_newidle_lb_cost = 0,
.last_decay_max_lb_cost = jiffies,
.child = child,
The following commit has been merged into the sched/core branch of tip:
Commit-ID: 7c983640e4db0c1fd8ce6c6cd921c19954a8d479
Gitweb: https://git.kernel.org/tip/7c983640e4db0c1fd8ce6c6cd921c19954a8d479
Author: Peter Zijlstra <peterz@infradead.org>
AuthorDate: Fri, 07 Nov 2025 17:01:31 +01:00
Committer: Peter Zijlstra <peterz@infradead.org>
CommitterDate: Fri, 14 Nov 2025 13:03:08 +01:00
sched/fair: Proportional newidle balance
Add a randomized algorithm that runs newidle balancing proportional to
its success rate.
This improves schbench significantly:
6.18-rc4: 2.22 Mrps/s
6.18-rc4+revert: 2.04 Mrps/s
6.18-rc4+revert+random: 2.18 Mrps/S
Conversely, per Adam Li this affects SpecJBB slightly, reducing it by 1%:
6.17: -6%
6.17+revert: 0%
6.17+revert+random: -1%
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
Tested-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
Tested-by: Chris Mason <clm@meta.com>
Link: https://lkml.kernel.org/r/6825c50d-7fa7-45d8-9b81-c6e7e25738e2@meta.com
Link: https://patch.msgid.link/20251107161739.770122091@infradead.org
---
include/linux/sched/topology.h | 3 ++-
kernel/sched/core.c | 3 ++-
kernel/sched/fair.c | 44 ++++++++++++++++++++++++++++++---
kernel/sched/features.h | 5 ++++-
kernel/sched/sched.h | 7 +++++-
kernel/sched/topology.c | 6 +++++-
6 files changed, 64 insertions(+), 4 deletions(-)
diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
index bbcfdf1..45c0022 100644
--- a/include/linux/sched/topology.h
+++ b/include/linux/sched/topology.h
@@ -92,6 +92,9 @@ struct sched_domain {
unsigned int nr_balance_failed; /* initialise to 0 */
/* idle_balance() stats */
+ unsigned int newidle_call;
+ unsigned int newidle_success;
+ unsigned int newidle_ratio;
u64 max_newidle_lb_cost;
unsigned long last_decay_max_lb_cost;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 699db3f..9f10cfb 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -121,6 +121,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(sched_update_nr_running_tp);
EXPORT_TRACEPOINT_SYMBOL_GPL(sched_compute_energy_tp);
DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
+DEFINE_PER_CPU(struct rnd_state, sched_rnd_state);
#ifdef CONFIG_SCHED_PROXY_EXEC
DEFINE_STATIC_KEY_TRUE(__sched_proxy_exec);
@@ -8489,6 +8490,8 @@ void __init sched_init_smp(void)
{
sched_init_numa(NUMA_NO_NODE);
+ prandom_init_once(&sched_rnd_state);
+
/*
* There's no userspace yet to cause hotplug operations; hence all the
* CPU masks are stable and all blatant races in the below code cannot
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 50461c9..aaa47ec 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -12223,11 +12223,27 @@ void update_max_interval(void)
max_load_balance_interval = HZ*num_online_cpus()/10;
}
-static inline bool update_newidle_cost(struct sched_domain *sd, u64 cost)
+static inline void update_newidle_stats(struct sched_domain *sd, unsigned int success)
+{
+ sd->newidle_call++;
+ sd->newidle_success += success;
+
+ if (sd->newidle_call >= 1024) {
+ sd->newidle_ratio = sd->newidle_success;
+ sd->newidle_call /= 2;
+ sd->newidle_success /= 2;
+ }
+}
+
+static inline bool
+update_newidle_cost(struct sched_domain *sd, u64 cost, unsigned int success)
{
unsigned long next_decay = sd->last_decay_max_lb_cost + HZ;
unsigned long now = jiffies;
+ if (cost)
+ update_newidle_stats(sd, success);
+
if (cost > sd->max_newidle_lb_cost) {
/*
* Track max cost of a domain to make sure to not delay the
@@ -12275,7 +12291,7 @@ static void sched_balance_domains(struct rq *rq, enum cpu_idle_type idle)
* Decay the newidle max times here because this is a regular
* visit to all the domains.
*/
- need_decay = update_newidle_cost(sd, 0);
+ need_decay = update_newidle_cost(sd, 0, 0);
max_cost += sd->max_newidle_lb_cost;
/*
@@ -12911,6 +12927,22 @@ static int sched_balance_newidle(struct rq *this_rq, struct rq_flags *rf)
break;
if (sd->flags & SD_BALANCE_NEWIDLE) {
+ unsigned int weight = 1;
+
+ if (sched_feat(NI_RANDOM)) {
+ /*
+ * Throw a 1k sided dice; and only run
+ * newidle_balance according to the success
+ * rate.
+ */
+ u32 d1k = sched_rng() % 1024;
+ weight = 1 + sd->newidle_ratio;
+ if (d1k > weight) {
+ update_newidle_stats(sd, 0);
+ continue;
+ }
+ weight = (1024 + weight/2) / weight;
+ }
pulled_task = sched_balance_rq(this_cpu, this_rq,
sd, CPU_NEWLY_IDLE,
@@ -12918,10 +12950,14 @@ static int sched_balance_newidle(struct rq *this_rq, struct rq_flags *rf)
t1 = sched_clock_cpu(this_cpu);
domain_cost = t1 - t0;
- update_newidle_cost(sd, domain_cost);
-
curr_cost += domain_cost;
t0 = t1;
+
+ /*
+ * Track max cost of a domain to make sure to not delay the
+ * next wakeup on the CPU.
+ */
+ update_newidle_cost(sd, domain_cost, weight * !!pulled_task);
}
/*
diff --git a/kernel/sched/features.h b/kernel/sched/features.h
index 0607def..980d92b 100644
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
@@ -121,3 +121,8 @@ SCHED_FEAT(WA_BIAS, true)
SCHED_FEAT(UTIL_EST, true)
SCHED_FEAT(LATENCY_WARN, false)
+
+/*
+ * Do newidle balancing proportional to its success rate using randomization.
+ */
+SCHED_FEAT(NI_RANDOM, true)
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index def9ab7..b419a4d 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -5,6 +5,7 @@
#ifndef _KERNEL_SCHED_SCHED_H
#define _KERNEL_SCHED_SCHED_H
+#include <linux/prandom.h>
#include <linux/sched/affinity.h>
#include <linux/sched/autogroup.h>
#include <linux/sched/cpufreq.h>
@@ -1348,6 +1349,12 @@ static inline bool is_migration_disabled(struct task_struct *p)
}
DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
+DECLARE_PER_CPU(struct rnd_state, sched_rnd_state);
+
+static inline u32 sched_rng(void)
+{
+ return prandom_u32_state(this_cpu_ptr(&sched_rnd_state));
+}
#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
#define this_rq() this_cpu_ptr(&runqueues)
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 711076a..cf643a5 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -1669,6 +1669,12 @@ sd_init(struct sched_domain_topology_level *tl,
.last_balance = jiffies,
.balance_interval = sd_weight,
+
+ /* 50% success rate */
+ .newidle_call = 512,
+ .newidle_success = 256,
+ .newidle_ratio = 512,
+
.max_newidle_lb_cost = 0,
.last_decay_max_lb_cost = jiffies,
.child = child,
© 2016 - 2025 Red Hat, Inc.