CPUs with a NULL sched domain are removed from the HKR_TYPE_SCHED
isolation mask. The two following checks are equialent:
- !housekeeping_runtime_test_cpu(cpu, HKR_TYPE_SCHED)
- on_null_domain(rq)
Remove on_null_domain() and the redundant checks.
Signed-off-by: Pierre Gondois <pierre.gondois@arm.com>
---
kernel/sched/fair.c | 18 ++++++------------
1 file changed, 6 insertions(+), 12 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 3e0f2a0f153f..9657c8f2176b 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -11830,11 +11830,6 @@ static void sched_balance_domains(struct rq *rq, enum cpu_idle_type idle)
}
-static inline int on_null_domain(struct rq *rq)
-{
- return unlikely(!rcu_dereference_sched(rq->sd));
-}
-
#ifdef CONFIG_NO_HZ_COMMON
/*
* NOHZ idle load balancing (ILB) details:
@@ -12040,7 +12035,7 @@ void nohz_balance_exit_idle(struct rq *rq)
SCHED_WARN_ON(rq != this_rq());
/* If we're a completely isolated CPU, we don't play: */
- if (on_null_domain(rq))
+ if (!housekeeping_runtime_test_cpu(cpu_of(rq), HKR_TYPE_SCHED))
return;
if (likely(!rq->nohz_tick_stopped))
@@ -12090,12 +12085,8 @@ void nohz_balance_enter_idle(int cpu)
*/
rq->has_blocked_load = 1;
- /* Spare idle load balancing on CPUs that don't want to be disturbed: */
- if (!housekeeping_runtime_test_cpu(cpu, HKR_TYPE_SCHED))
- return;
-
/* If we're a completely isolated CPU, we don't play: */
- if (on_null_domain(rq))
+ if (!housekeeping_runtime_test_cpu(cpu, HKR_TYPE_SCHED))
return;
/*
@@ -12504,11 +12495,14 @@ static __latent_entropy void sched_balance_softirq(struct softirq_action *h)
*/
void sched_balance_trigger(struct rq *rq)
{
+ int cpu = cpu_of(rq);
+
/*
* Don't need to rebalance while attached to NULL domain or
* runqueue CPU is not active
*/
- if (unlikely(on_null_domain(rq) || !cpu_active(cpu_of(rq))))
+ if (unlikely(!housekeeping_runtime_test_cpu(cpu, HKR_TYPE_SCHED)) ||
+ !cpu_active(cpu))
return;
if (time_after_eq(jiffies, rq->next_balance))
--
2.25.1
On Wed, Apr 03, 2024 at 05:05:38PM +0200, Pierre Gondois wrote:
> CPUs with a NULL sched domain are removed from the HKR_TYPE_SCHED
> isolation mask. The two following checks are equialent:
> - !housekeeping_runtime_test_cpu(cpu, HKR_TYPE_SCHED)
> - on_null_domain(rq)
>
> Remove on_null_domain() and the redundant checks.
>
> Signed-off-by: Pierre Gondois <pierre.gondois@arm.com>
> ---
> kernel/sched/fair.c | 18 ++++++------------
> 1 file changed, 6 insertions(+), 12 deletions(-)
>
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index 3e0f2a0f153f..9657c8f2176b 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -11830,11 +11830,6 @@ static void sched_balance_domains(struct rq *rq, enum cpu_idle_type idle)
>
> }
>
> -static inline int on_null_domain(struct rq *rq)
> -{
> - return unlikely(!rcu_dereference_sched(rq->sd));
> -}
> -
> #ifdef CONFIG_NO_HZ_COMMON
> /*
> * NOHZ idle load balancing (ILB) details:
> @@ -12040,7 +12035,7 @@ void nohz_balance_exit_idle(struct rq *rq)
> SCHED_WARN_ON(rq != this_rq());
>
> /* If we're a completely isolated CPU, we don't play: */
> - if (on_null_domain(rq))
> + if (!housekeeping_runtime_test_cpu(cpu_of(rq), HKR_TYPE_SCHED))
> return;
>
> if (likely(!rq->nohz_tick_stopped))
This seems broken, the whole null domain can happen with cpusets, but
this housekeeping nonsense is predicated on CPU_ISOLATION and none of
that is mandatory for CPUSETS.
Hello Peter,
On 4/4/24 09:27, Peter Zijlstra wrote:
> On Wed, Apr 03, 2024 at 05:05:38PM +0200, Pierre Gondois wrote:
>> CPUs with a NULL sched domain are removed from the HKR_TYPE_SCHED
>> isolation mask. The two following checks are equialent:
>> - !housekeeping_runtime_test_cpu(cpu, HKR_TYPE_SCHED)
>> - on_null_domain(rq)
>>
>> Remove on_null_domain() and the redundant checks.
>>
>> Signed-off-by: Pierre Gondois <pierre.gondois@arm.com>
>> ---
>> kernel/sched/fair.c | 18 ++++++------------
>> 1 file changed, 6 insertions(+), 12 deletions(-)
>>
>> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
>> index 3e0f2a0f153f..9657c8f2176b 100644
>> --- a/kernel/sched/fair.c
>> +++ b/kernel/sched/fair.c
>> @@ -11830,11 +11830,6 @@ static void sched_balance_domains(struct rq *rq, enum cpu_idle_type idle)
>>
>> }
>>
>> -static inline int on_null_domain(struct rq *rq)
>> -{
>> - return unlikely(!rcu_dereference_sched(rq->sd));
>> -}
>> -
>> #ifdef CONFIG_NO_HZ_COMMON
>> /*
>> * NOHZ idle load balancing (ILB) details:
>> @@ -12040,7 +12035,7 @@ void nohz_balance_exit_idle(struct rq *rq)
>> SCHED_WARN_ON(rq != this_rq());
>>
>> /* If we're a completely isolated CPU, we don't play: */
>> - if (on_null_domain(rq))
>> + if (!housekeeping_runtime_test_cpu(cpu_of(rq), HKR_TYPE_SCHED))
>> return;
>>
>> if (likely(!rq->nohz_tick_stopped))
>
> This seems broken, the whole null domain can happen with cpusets, but
> this housekeeping nonsense is predicated on CPU_ISOLATION and none of
> that is mandatory for CPUSETS.
ok right,
I will try to remove this implicit dependency,
Regards,
Pierre
© 2016 - 2026 Red Hat, Inc.